Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Momjian2003-08-04 00:43:34 +0000
committerBruce Momjian2003-08-04 00:43:34 +0000
commit089003fb462fcce46c02bf47322b429f73c33c50 (patch)
tree77d78bc3a149df06f5603f60200a6ab363336624
parent63354a0228a1dbc4a0d5ddc8ecdd8326349d2100 (diff)
pgindent run.
-rw-r--r--contrib/btree_gist/btree_common.c2
-rw-r--r--contrib/btree_gist/btree_gist.h1
-rw-r--r--contrib/cube/cube.c76
-rw-r--r--contrib/dbase/dbf2pg.c10
-rw-r--r--contrib/dblink/dblink.c163
-rw-r--r--contrib/dbmirror/pending.c29
-rw-r--r--contrib/dbsize/dbsize.c2
-rw-r--r--contrib/earthdistance/earthdistance.c13
-rw-r--r--contrib/findoidjoins/findoidjoins.c85
-rw-r--r--contrib/fulltextindex/fti.c6
-rw-r--r--contrib/fuzzystrmatch/fuzzystrmatch.c6
-rw-r--r--contrib/intagg/int_aggregate.c6
-rw-r--r--contrib/intarray/_int.h66
-rw-r--r--contrib/intarray/_int_bool.c5
-rw-r--r--contrib/intarray/_int_gist.c53
-rw-r--r--contrib/intarray/_int_tool.c3
-rw-r--r--contrib/ltree/_ltree_op.c54
-rw-r--r--contrib/ltree/lquery_op.c52
-rw-r--r--contrib/ltree/ltree_io.c24
-rw-r--r--contrib/ltree/ltree_op.c119
-rw-r--r--contrib/oid2name/oid2name.c4
-rw-r--r--contrib/pg_autovacuum/pg_autovacuum.c1861
-rw-r--r--contrib/pg_autovacuum/pg_autovacuum.h129
-rw-r--r--contrib/pg_dumplo/main.c4
-rw-r--r--contrib/pgbench/pgbench.c4
-rw-r--r--contrib/pgcrypto/openssl.c79
-rw-r--r--contrib/pgcrypto/pgcrypto.c4
-rw-r--r--contrib/pgstattuple/pgstattuple.c4
-rw-r--r--contrib/spi/autoinc.c4
-rw-r--r--contrib/spi/insert_username.c6
-rw-r--r--contrib/spi/moddatetime.c4
-rw-r--r--contrib/spi/refint.c10
-rw-r--r--contrib/spi/timetravel.c239
-rw-r--r--contrib/tablefunc/tablefunc.c234
-rw-r--r--contrib/tsearch/morph.c5
-rw-r--r--contrib/tsearch2/common.c101
-rw-r--r--contrib/tsearch2/common.h16
-rw-r--r--contrib/tsearch2/dict.c323
-rw-r--r--contrib/tsearch2/dict.h57
-rw-r--r--contrib/tsearch2/dict_ex.c58
-rw-r--r--contrib/tsearch2/dict_ispell.c162
-rw-r--r--contrib/tsearch2/dict_snowball.c111
-rw-r--r--contrib/tsearch2/dict_syn.c179
-rw-r--r--contrib/tsearch2/ispell/spell.c876
-rw-r--r--contrib/tsearch2/ispell/spell.h91
-rw-r--r--contrib/tsearch2/prs_dcfg.c200
-rw-r--r--contrib/tsearch2/query.c163
-rw-r--r--contrib/tsearch2/query.h10
-rw-r--r--contrib/tsearch2/rank.c753
-rw-r--r--contrib/tsearch2/snmap.c94
-rw-r--r--contrib/tsearch2/snmap.h30
-rw-r--r--contrib/tsearch2/snowball/api.c92
-rw-r--r--contrib/tsearch2/snowball/api.h25
-rw-r--r--contrib/tsearch2/snowball/english_stem.c1812
-rw-r--r--contrib/tsearch2/snowball/english_stem.h5
-rw-r--r--contrib/tsearch2/snowball/header.h52
-rw-r--r--contrib/tsearch2/snowball/russian_stem.c1250
-rw-r--r--contrib/tsearch2/snowball/russian_stem.h5
-rw-r--r--contrib/tsearch2/snowball/utilities.c695
-rw-r--r--contrib/tsearch2/stopword.c129
-rw-r--r--contrib/tsearch2/ts_cfg.c659
-rw-r--r--contrib/tsearch2/ts_cfg.h111
-rw-r--r--contrib/tsearch2/ts_stat.c551
-rw-r--r--contrib/tsearch2/ts_stat.h14
-rw-r--r--contrib/tsearch2/tsvector.c510
-rw-r--r--contrib/tsearch2/tsvector.h37
-rw-r--r--contrib/tsearch2/tsvector_op.c358
-rw-r--r--contrib/tsearch2/wordparser/deflex.c5
-rw-r--r--contrib/tsearch2/wparser.c614
-rw-r--r--contrib/tsearch2/wparser.h38
-rw-r--r--contrib/tsearch2/wparser_def.c356
-rw-r--r--contrib/xml/pgxml_dom.c8
-rw-r--r--src/backend/access/common/heaptuple.c4
-rw-r--r--src/backend/access/common/indextuple.c8
-rw-r--r--src/backend/access/common/printtup.c58
-rw-r--r--src/backend/access/gist/gistscan.c9
-rw-r--r--src/backend/access/hash/hashfunc.c20
-rw-r--r--src/backend/access/hash/hashovfl.c6
-rw-r--r--src/backend/access/heap/heapam.c16
-rw-r--r--src/backend/access/index/indexam.c51
-rw-r--r--src/backend/access/nbtree/nbtinsert.c55
-rw-r--r--src/backend/access/nbtree/nbtpage.c149
-rw-r--r--src/backend/access/nbtree/nbtree.c90
-rw-r--r--src/backend/access/nbtree/nbtsearch.c91
-rw-r--r--src/backend/access/nbtree/nbtsort.c6
-rw-r--r--src/backend/access/nbtree/nbtxlog.c224
-rw-r--r--src/backend/access/rtree/rtscan.c10
-rw-r--r--src/backend/access/transam/clog.c4
-rw-r--r--src/backend/access/transam/rmgr.c6
-rw-r--r--src/backend/access/transam/slru.c94
-rw-r--r--src/backend/access/transam/xact.c58
-rw-r--r--src/backend/access/transam/xlog.c251
-rw-r--r--src/backend/bootstrap/bootstrap.c44
-rw-r--r--src/backend/catalog/aclchk.c61
-rw-r--r--src/backend/catalog/dependency.c86
-rw-r--r--src/backend/catalog/heap.c41
-rw-r--r--src/backend/catalog/index.c65
-rw-r--r--src/backend/catalog/namespace.c32
-rw-r--r--src/backend/catalog/pg_aggregate.c42
-rw-r--r--src/backend/catalog/pg_constraint.c19
-rw-r--r--src/backend/catalog/pg_conversion.c10
-rw-r--r--src/backend/catalog/pg_operator.c8
-rw-r--r--src/backend/catalog/pg_proc.c44
-rw-r--r--src/backend/catalog/pg_type.c27
-rw-r--r--src/backend/commands/aggregatecmds.c10
-rw-r--r--src/backend/commands/alter.c82
-rw-r--r--src/backend/commands/analyze.c8
-rw-r--r--src/backend/commands/async.c13
-rw-r--r--src/backend/commands/cluster.c133
-rw-r--r--src/backend/commands/comment.c35
-rw-r--r--src/backend/commands/conversioncmds.c8
-rw-r--r--src/backend/commands/copy.c209
-rw-r--r--src/backend/commands/dbcommands.c60
-rw-r--r--src/backend/commands/define.c5
-rw-r--r--src/backend/commands/explain.c40
-rw-r--r--src/backend/commands/functioncmds.c51
-rw-r--r--src/backend/commands/indexcmds.c85
-rw-r--r--src/backend/commands/opclasscmds.c18
-rw-r--r--src/backend/commands/operatorcmds.c6
-rw-r--r--src/backend/commands/portalcmds.c64
-rw-r--r--src/backend/commands/prepare.c58
-rw-r--r--src/backend/commands/proclang.c12
-rw-r--r--src/backend/commands/schemacmds.c14
-rw-r--r--src/backend/commands/sequence.c54
-rw-r--r--src/backend/commands/tablecmds.c285
-rw-r--r--src/backend/commands/trigger.c228
-rw-r--r--src/backend/commands/typecmds.c326
-rw-r--r--src/backend/commands/user.c56
-rw-r--r--src/backend/commands/vacuum.c61
-rw-r--r--src/backend/commands/vacuumlazy.c30
-rw-r--r--src/backend/commands/variable.c85
-rw-r--r--src/backend/commands/view.c6
-rw-r--r--src/backend/executor/execAmi.c28
-rw-r--r--src/backend/executor/execGrouping.c12
-rw-r--r--src/backend/executor/execMain.c123
-rw-r--r--src/backend/executor/execProcnode.c18
-rw-r--r--src/backend/executor/execQual.c363
-rw-r--r--src/backend/executor/execScan.c23
-rw-r--r--src/backend/executor/execTuples.c8
-rw-r--r--src/backend/executor/execUtils.c62
-rw-r--r--src/backend/executor/functions.c28
-rw-r--r--src/backend/executor/nodeAgg.c168
-rw-r--r--src/backend/executor/nodeAppend.c4
-rw-r--r--src/backend/executor/nodeHash.c4
-rw-r--r--src/backend/executor/nodeHashjoin.c44
-rw-r--r--src/backend/executor/nodeIndexscan.c7
-rw-r--r--src/backend/executor/nodeLimit.c41
-rw-r--r--src/backend/executor/nodeMaterial.c21
-rw-r--r--src/backend/executor/nodeMergejoin.c28
-rw-r--r--src/backend/executor/nodeNestloop.c10
-rw-r--r--src/backend/executor/nodeResult.c4
-rw-r--r--src/backend/executor/nodeSeqscan.c20
-rw-r--r--src/backend/executor/nodeSubplan.c275
-rw-r--r--src/backend/executor/nodeSubqueryscan.c17
-rw-r--r--src/backend/executor/nodeUnique.c8
-rw-r--r--src/backend/executor/spi.c29
-rw-r--r--src/backend/executor/tstoreReceiver.c10
-rw-r--r--src/backend/lib/stringinfo.c12
-rw-r--r--src/backend/libpq/auth.c42
-rw-r--r--src/backend/libpq/be-fsstubs.c6
-rw-r--r--src/backend/libpq/be-secure.c23
-rw-r--r--src/backend/libpq/crypt.c7
-rw-r--r--src/backend/libpq/hba.c73
-rw-r--r--src/backend/libpq/ip.c153
-rw-r--r--src/backend/libpq/md5.c6
-rw-r--r--src/backend/libpq/pqcomm.c88
-rw-r--r--src/backend/libpq/pqformat.c81
-rw-r--r--src/backend/main/main.c21
-rw-r--r--src/backend/nodes/bitmapset.c174
-rw-r--r--src/backend/nodes/copyfuncs.c64
-rw-r--r--src/backend/nodes/equalfuncs.c109
-rw-r--r--src/backend/nodes/list.c19
-rw-r--r--src/backend/nodes/nodes.c4
-rw-r--r--src/backend/nodes/outfuncs.c48
-rw-r--r--src/backend/nodes/readfuncs.c59
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c25
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c6
-rw-r--r--src/backend/optimizer/geqo/geqo_misc.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c31
-rw-r--r--src/backend/optimizer/path/costsize.c286
-rw-r--r--src/backend/optimizer/path/indxpath.c191
-rw-r--r--src/backend/optimizer/path/joinpath.c41
-rw-r--r--src/backend/optimizer/path/joinrels.c42
-rw-r--r--src/backend/optimizer/path/orindxpath.c4
-rw-r--r--src/backend/optimizer/path/pathkeys.c57
-rw-r--r--src/backend/optimizer/path/tidpath.c6
-rw-r--r--src/backend/optimizer/plan/createplan.c183
-rw-r--r--src/backend/optimizer/plan/initsplan.c165
-rw-r--r--src/backend/optimizer/plan/planmain.c15
-rw-r--r--src/backend/optimizer/plan/planner.c222
-rw-r--r--src/backend/optimizer/plan/setrefs.c74
-rw-r--r--src/backend/optimizer/plan/subselect.c173
-rw-r--r--src/backend/optimizer/prep/prepjointree.c107
-rw-r--r--src/backend/optimizer/prep/prepqual.c18
-rw-r--r--src/backend/optimizer/prep/preptlist.c24
-rw-r--r--src/backend/optimizer/prep/prepunion.c20
-rw-r--r--src/backend/optimizer/util/clauses.c236
-rw-r--r--src/backend/optimizer/util/joininfo.c14
-rw-r--r--src/backend/optimizer/util/pathnode.c29
-rw-r--r--src/backend/optimizer/util/plancat.c28
-rw-r--r--src/backend/optimizer/util/relnode.c9
-rw-r--r--src/backend/optimizer/util/restrictinfo.c17
-rw-r--r--src/backend/optimizer/util/tlist.c6
-rw-r--r--src/backend/optimizer/util/var.c43
-rw-r--r--src/backend/parser/analyze.c249
-rw-r--r--src/backend/parser/parse_agg.c99
-rw-r--r--src/backend/parser/parse_clause.c54
-rw-r--r--src/backend/parser/parse_coerce.c212
-rw-r--r--src/backend/parser/parse_expr.c164
-rw-r--r--src/backend/parser/parse_func.c82
-rw-r--r--src/backend/parser/parse_node.c10
-rw-r--r--src/backend/parser/parse_oper.c65
-rw-r--r--src/backend/parser/parse_relation.c43
-rw-r--r--src/backend/parser/parse_target.c22
-rw-r--r--src/backend/parser/parse_type.c8
-rw-r--r--src/backend/port/beos/sem.c4
-rw-r--r--src/backend/port/dynloader/darwin.c4
-rw-r--r--src/backend/port/dynloader/linux.c4
-rw-r--r--src/backend/port/dynloader/linux.h3
-rw-r--r--src/backend/port/dynloader/win32.c8
-rw-r--r--src/backend/port/ipc_test.c14
-rw-r--r--src/backend/port/posix_sema.c3
-rw-r--r--src/backend/port/sysv_sema.c10
-rw-r--r--src/backend/port/sysv_shmem.c34
-rw-r--r--src/backend/port/win32/sema.c13
-rw-r--r--src/backend/postmaster/pgstat.c67
-rw-r--r--src/backend/postmaster/postmaster.c231
-rw-r--r--src/backend/regex/regc_color.c412
-rw-r--r--src/backend/regex/regc_cvec.c197
-rw-r--r--src/backend/regex/regc_lex.c1492
-rw-r--r--src/backend/regex/regc_locale.c982
-rw-r--r--src/backend/regex/regc_nfa.c858
-rw-r--r--src/backend/regex/regcomp.c1836
-rw-r--r--src/backend/regex/rege_dfa.c416
-rw-r--r--src/backend/regex/regerror.c118
-rw-r--r--src/backend/regex/regexec.c776
-rw-r--r--src/backend/regex/regfree.c16
-rw-r--r--src/backend/rewrite/rewriteDefine.c26
-rw-r--r--src/backend/rewrite/rewriteHandler.c113
-rw-r--r--src/backend/rewrite/rewriteManip.c17
-rw-r--r--src/backend/storage/buffer/bufmgr.c22
-rw-r--r--src/backend/storage/file/fd.c14
-rw-r--r--src/backend/storage/freespace/freespace.c215
-rw-r--r--src/backend/storage/ipc/ipc.c6
-rw-r--r--src/backend/storage/ipc/ipci.c3
-rw-r--r--src/backend/storage/ipc/sinval.c9
-rw-r--r--src/backend/storage/lmgr/deadlock.c35
-rw-r--r--src/backend/storage/lmgr/lock.c65
-rw-r--r--src/backend/storage/lmgr/proc.c25
-rw-r--r--src/backend/storage/page/bufpage.c15
-rw-r--r--src/backend/storage/smgr/md.c15
-rw-r--r--src/backend/tcop/dest.c9
-rw-r--r--src/backend/tcop/fastpath.c56
-rw-r--r--src/backend/tcop/postgres.c406
-rw-r--r--src/backend/tcop/pquery.c229
-rw-r--r--src/backend/tcop/utility.c145
-rw-r--r--src/backend/utils/adt/acl.c89
-rw-r--r--src/backend/utils/adt/array_userfuncs.c91
-rw-r--r--src/backend/utils/adt/arrayfuncs.c230
-rw-r--r--src/backend/utils/adt/ascii.c22
-rw-r--r--src/backend/utils/adt/char.c4
-rw-r--r--src/backend/utils/adt/date.c70
-rw-r--r--src/backend/utils/adt/datetime.c86
-rw-r--r--src/backend/utils/adt/float.c10
-rw-r--r--src/backend/utils/adt/formatting.c81
-rw-r--r--src/backend/utils/adt/geo_ops.c16
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c180
-rw-r--r--src/backend/utils/adt/inet_net_pton.c116
-rw-r--r--src/backend/utils/adt/int.c6
-rw-r--r--src/backend/utils/adt/int8.c6
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/like_match.c4
-rw-r--r--src/backend/utils/adt/mac.c8
-rw-r--r--src/backend/utils/adt/nabstime.c22
-rw-r--r--src/backend/utils/adt/name.c4
-rw-r--r--src/backend/utils/adt/network.c258
-rw-r--r--src/backend/utils/adt/not_in.c4
-rw-r--r--src/backend/utils/adt/numeric.c285
-rw-r--r--src/backend/utils/adt/numutils.c4
-rw-r--r--src/backend/utils/adt/oid.c6
-rw-r--r--src/backend/utils/adt/oracle_compat.c17
-rw-r--r--src/backend/utils/adt/pg_locale.c8
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c14
-rw-r--r--src/backend/utils/adt/pseudotypes.c4
-rw-r--r--src/backend/utils/adt/regexp.c32
-rw-r--r--src/backend/utils/adt/regproc.c14
-rw-r--r--src/backend/utils/adt/ri_triggers.c193
-rw-r--r--src/backend/utils/adt/ruleutils.c582
-rw-r--r--src/backend/utils/adt/selfuncs.c160
-rw-r--r--src/backend/utils/adt/sets.c7
-rw-r--r--src/backend/utils/adt/timestamp.c170
-rw-r--r--src/backend/utils/adt/varbit.c14
-rw-r--r--src/backend/utils/adt/varchar.c20
-rw-r--r--src/backend/utils/adt/varlena.c69
-rw-r--r--src/backend/utils/adt/xid.c4
-rw-r--r--src/backend/utils/cache/catcache.c6
-rw-r--r--src/backend/utils/cache/inval.c4
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/relcache.c62
-rw-r--r--src/backend/utils/error/elog.c173
-rw-r--r--src/backend/utils/fmgr/dfmgr.c8
-rw-r--r--src/backend/utils/fmgr/fmgr.c32
-rw-r--r--src/backend/utils/init/findbe.c9
-rw-r--r--src/backend/utils/init/miscinit.c94
-rw-r--r--src/backend/utils/init/postinit.c28
-rw-r--r--src/backend/utils/mb/conv.c10
-rw-r--r--src/backend/utils/mb/mbutils.c26
-rw-r--r--src/backend/utils/mb/wchar.c10
-rw-r--r--src/backend/utils/misc/guc.c135
-rw-r--r--src/backend/utils/misc/help_config.c24
-rw-r--r--src/backend/utils/mmgr/aset.c22
-rw-r--r--src/backend/utils/mmgr/mcxt.c7
-rw-r--r--src/backend/utils/mmgr/portalmem.c53
-rw-r--r--src/backend/utils/sort/logtape.c6
-rw-r--r--src/backend/utils/sort/tuplesort.c14
-rw-r--r--src/backend/utils/sort/tuplestore.c37
-rw-r--r--src/backend/utils/time/tqual.c34
-rw-r--r--src/bin/pg_controldata/pg_controldata.c24
-rw-r--r--src/bin/pg_dump/common.c8
-rw-r--r--src/bin/pg_dump/dumputils.c75
-rw-r--r--src/bin/pg_dump/dumputils.h12
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c54
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h12
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c20
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c23
-rw-r--r--src/bin/pg_dump/pg_dump.c161
-rw-r--r--src/bin/pg_dump/pg_dump.h6
-rw-r--r--src/bin/pg_dump/pg_dumpall.c14
-rw-r--r--src/bin/pg_dump/pg_restore.c6
-rw-r--r--src/bin/pg_encoding/pg_encoding.c10
-rw-r--r--src/bin/pg_resetxlog/pg_resetxlog.c6
-rw-r--r--src/bin/psql/command.c32
-rw-r--r--src/bin/psql/common.c224
-rw-r--r--src/bin/psql/copy.c17
-rw-r--r--src/bin/psql/describe.c61
-rw-r--r--src/bin/psql/help.c14
-rw-r--r--src/bin/psql/input.c56
-rw-r--r--src/bin/psql/large_obj.c6
-rw-r--r--src/bin/psql/mainloop.c67
-rw-r--r--src/bin/psql/mbprint.c8
-rw-r--r--src/bin/psql/print.c20
-rw-r--r--src/bin/psql/print.h5
-rw-r--r--src/bin/psql/prompt.c60
-rw-r--r--src/bin/psql/sprompt.c13
-rw-r--r--src/bin/psql/startup.c8
-rw-r--r--src/bin/psql/stringutils.c31
-rw-r--r--src/bin/psql/tab-complete.c163
-rw-r--r--src/bin/psql/variables.c56
-rw-r--r--src/bin/psql/variables.h37
-rw-r--r--src/bin/scripts/clusterdb.c30
-rw-r--r--src/bin/scripts/common.c6
-rw-r--r--src/bin/scripts/common.h19
-rw-r--r--src/bin/scripts/createdb.c4
-rw-r--r--src/bin/scripts/createlang.c20
-rw-r--r--src/bin/scripts/createuser.c11
-rw-r--r--src/bin/scripts/dropdb.c4
-rw-r--r--src/bin/scripts/droplang.c15
-rw-r--r--src/bin/scripts/vacuumdb.c34
-rw-r--r--src/include/access/genam.h12
-rw-r--r--src/include/access/heapam.h4
-rw-r--r--src/include/access/nbtree.h33
-rw-r--r--src/include/access/printtup.h8
-rw-r--r--src/include/access/relscan.h6
-rw-r--r--src/include/access/slru.h22
-rw-r--r--src/include/access/xact.h12
-rw-r--r--src/include/access/xlog.h4
-rw-r--r--src/include/c.h25
-rw-r--r--src/include/catalog/catversion.h4
-rw-r--r--src/include/catalog/dependency.h10
-rw-r--r--src/include/catalog/pg_am.h4
-rw-r--r--src/include/catalog/pg_amproc.h28
-rw-r--r--src/include/catalog/pg_attribute.h8
-rw-r--r--src/include/catalog/pg_cast.h18
-rw-r--r--src/include/catalog/pg_constraint.h13
-rw-r--r--src/include/catalog/pg_opclass.h6
-rw-r--r--src/include/catalog/pg_operator.h32
-rw-r--r--src/include/catalog/pg_proc.h114
-rw-r--r--src/include/catalog/pg_statistic.h8
-rw-r--r--src/include/catalog/pg_type.h32
-rw-r--r--src/include/commands/alter.h4
-rw-r--r--src/include/commands/dbcommands.h6
-rw-r--r--src/include/commands/explain.h4
-rw-r--r--src/include/commands/portalcmds.h6
-rw-r--r--src/include/commands/prepare.h18
-rw-r--r--src/include/commands/sequence.h4
-rw-r--r--src/include/commands/trigger.h28
-rw-r--r--src/include/commands/typecmds.h4
-rw-r--r--src/include/executor/execdesc.h8
-rw-r--r--src/include/executor/executor.h104
-rw-r--r--src/include/executor/hashjoin.h7
-rw-r--r--src/include/executor/nodeHash.h4
-rw-r--r--src/include/executor/nodeSeqscan.h12
-rw-r--r--src/include/executor/nodeSubplan.h16
-rw-r--r--src/include/executor/spi.h6
-rw-r--r--src/include/executor/spi_priv.h7
-rw-r--r--src/include/executor/tstoreReceiver.h6
-rw-r--r--src/include/getaddrinfo.h35
-rw-r--r--src/include/getopt_long.h24
-rw-r--r--src/include/lib/stringinfo.h7
-rw-r--r--src/include/libpq/crypt.h4
-rw-r--r--src/include/libpq/ip.h34
-rw-r--r--src/include/libpq/libpq-be.h13
-rw-r--r--src/include/libpq/libpq.h18
-rw-r--r--src/include/libpq/pqcomm.h38
-rw-r--r--src/include/libpq/pqformat.h4
-rw-r--r--src/include/mb/pg_wchar.h4
-rw-r--r--src/include/miscadmin.h12
-rw-r--r--src/include/nodes/bitmapset.h58
-rw-r--r--src/include/nodes/execnodes.h151
-rw-r--r--src/include/nodes/makefuncs.h10
-rw-r--r--src/include/nodes/nodes.h6
-rw-r--r--src/include/nodes/params.h8
-rw-r--r--src/include/nodes/parsenodes.h45
-rw-r--r--src/include/nodes/pg_list.h14
-rw-r--r--src/include/nodes/plannodes.h10
-rw-r--r--src/include/nodes/primnodes.h79
-rw-r--r--src/include/nodes/relation.h37
-rw-r--r--src/include/optimizer/clauses.h20
-rw-r--r--src/include/optimizer/cost.h38
-rw-r--r--src/include/optimizer/geqo_misc.h3
-rw-r--r--src/include/optimizer/joininfo.h10
-rw-r--r--src/include/optimizer/pathnode.h16
-rw-r--r--src/include/optimizer/paths.h12
-rw-r--r--src/include/optimizer/plancat.h8
-rw-r--r--src/include/optimizer/planmain.h32
-rw-r--r--src/include/optimizer/prep.h4
-rw-r--r--src/include/optimizer/restrictinfo.h12
-rw-r--r--src/include/optimizer/tlist.h4
-rw-r--r--src/include/parser/analyze.h4
-rw-r--r--src/include/parser/parse_agg.h14
-rw-r--r--src/include/parser/parse_clause.h10
-rw-r--r--src/include/parser/parse_coerce.h44
-rw-r--r--src/include/parser/parse_func.h32
-rw-r--r--src/include/parser/parse_node.h5
-rw-r--r--src/include/parser/parse_oper.h18
-rw-r--r--src/include/pg_config_manual.h38
-rw-r--r--src/include/pgstat.h4
-rw-r--r--src/include/port.h54
-rw-r--r--src/include/port/bsdi.h1
-rw-r--r--src/include/port/cygwin.h3
-rw-r--r--src/include/port/freebsd.h1
-rw-r--r--src/include/port/hpux.h1
-rw-r--r--src/include/port/netbsd.h1
-rw-r--r--src/include/port/openbsd.h1
-rw-r--r--src/include/port/win32.h63
-rw-r--r--src/include/port/win32/dlfcn.h1
-rw-r--r--src/include/port/win32/grp.h1
-rw-r--r--src/include/port/win32/netdb.h1
-rw-r--r--src/include/port/win32/netinet/in.h1
-rw-r--r--src/include/port/win32/pwd.h1
-rw-r--r--src/include/port/win32/sys/socket.h1
-rw-r--r--src/include/port/win32/sys/wait.h1
-rw-r--r--src/include/postgres.h3
-rw-r--r--src/include/regex/regcustom.h52
-rw-r--r--src/include/regex/regerrs.h91
-rw-r--r--src/include/regex/regex.h169
-rw-r--r--src/include/regex/regguts.h390
-rw-r--r--src/include/storage/bufmgr.h6
-rw-r--r--src/include/storage/freespace.h16
-rw-r--r--src/include/storage/lock.h13
-rw-r--r--src/include/tcop/dest.h22
-rw-r--r--src/include/tcop/pquery.h22
-rw-r--r--src/include/tcop/tcopprot.h5
-rw-r--r--src/include/utils/acl.h10
-rw-r--r--src/include/utils/array.h53
-rw-r--r--src/include/utils/builtins.h4
-rw-r--r--src/include/utils/datetime.h6
-rw-r--r--src/include/utils/elog.h50
-rw-r--r--src/include/utils/errcodes.h24
-rw-r--r--src/include/utils/guc.h8
-rw-r--r--src/include/utils/guc_tables.h24
-rw-r--r--src/include/utils/help_config.h6
-rw-r--r--src/include/utils/inet.h4
-rw-r--r--src/include/utils/lsyscache.h20
-rw-r--r--src/include/utils/memutils.h7
-rw-r--r--src/include/utils/palloc.h6
-rw-r--r--src/include/utils/portal.h63
-rw-r--r--src/include/utils/rel.h6
-rw-r--r--src/include/utils/selfuncs.h8
-rw-r--r--src/include/utils/tuplestore.h8
-rw-r--r--src/interfaces/ecpg/compatlib/informix.c616
-rw-r--r--src/interfaces/ecpg/ecpglib/connect.c106
-rw-r--r--src/interfaces/ecpg/ecpglib/data.c47
-rw-r--r--src/interfaces/ecpg/ecpglib/descriptor.c4
-rw-r--r--src/interfaces/ecpg/ecpglib/error.c13
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c161
-rw-r--r--src/interfaces/ecpg/ecpglib/extern.h12
-rw-r--r--src/interfaces/ecpg/ecpglib/misc.c103
-rw-r--r--src/interfaces/ecpg/ecpglib/prepare.c20
-rw-r--r--src/interfaces/ecpg/include/datetime.h18
-rw-r--r--src/interfaces/ecpg/include/decimal.h31
-rw-r--r--src/interfaces/ecpg/include/ecpg_informix.h36
-rw-r--r--src/interfaces/ecpg/include/ecpgerrno.h4
-rw-r--r--src/interfaces/ecpg/include/ecpglib.h9
-rw-r--r--src/interfaces/ecpg/include/ecpgtype.h18
-rw-r--r--src/interfaces/ecpg/include/pgtypes_date.h15
-rw-r--r--src/interfaces/ecpg/include/pgtypes_error.h5
-rw-r--r--src/interfaces/ecpg/include/pgtypes_interval.h15
-rw-r--r--src/interfaces/ecpg/include/pgtypes_numeric.h88
-rw-r--r--src/interfaces/ecpg/include/pgtypes_timestamp.h10
-rw-r--r--src/interfaces/ecpg/include/sqlca.h2
-rw-r--r--src/interfaces/ecpg/include/sqlda.h1
-rw-r--r--src/interfaces/ecpg/include/sqltypes.h22
-rw-r--r--src/interfaces/ecpg/pgtypeslib/common.c90
-rw-r--r--src/interfaces/ecpg/pgtypeslib/datetime.c526
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h58
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt_common.c744
-rw-r--r--src/interfaces/ecpg/pgtypeslib/extern.h38
-rw-r--r--src/interfaces/ecpg/pgtypeslib/interval.c17
-rw-r--r--src/interfaces/ecpg/pgtypeslib/numeric.c245
-rw-r--r--src/interfaces/ecpg/pgtypeslib/timestamp.c319
-rw-r--r--src/interfaces/ecpg/preproc/c_keywords.c10
-rw-r--r--src/interfaces/ecpg/preproc/ecpg.c40
-rw-r--r--src/interfaces/ecpg/preproc/extern.h8
-rw-r--r--src/interfaces/ecpg/preproc/type.c81
-rw-r--r--src/interfaces/ecpg/preproc/type.h41
-rw-r--r--src/interfaces/ecpg/preproc/variable.c158
-rw-r--r--src/interfaces/libpgtcl/pgtclCmds.c12
-rw-r--r--src/interfaces/libpgtcl/pgtclCmds.h40
-rw-r--r--src/interfaces/libpgtcl/pgtclId.c18
-rw-r--r--src/interfaces/libpgtcl/pgtclId.h6
-rw-r--r--src/interfaces/libpq/fe-auth.c28
-rw-r--r--src/interfaces/libpq/fe-connect.c240
-rw-r--r--src/interfaces/libpq/fe-exec.c108
-rw-r--r--src/interfaces/libpq/fe-lobj.c12
-rw-r--r--src/interfaces/libpq/fe-misc.c101
-rw-r--r--src/interfaces/libpq/fe-protocol2.c103
-rw-r--r--src/interfaces/libpq/fe-protocol3.c174
-rw-r--r--src/interfaces/libpq/fe-secure.c139
-rw-r--r--src/interfaces/libpq/libpq-fe.h64
-rw-r--r--src/interfaces/libpq/libpq-int.h70
-rw-r--r--src/pl/plperl/plperl.c23
-rw-r--r--src/pl/plpgsql/src/pl_comp.c129
-rw-r--r--src/pl/plpgsql/src/pl_exec.c159
-rw-r--r--src/pl/plpgsql/src/pl_funcs.c8
-rw-r--r--src/pl/plpgsql/src/pl_handler.c8
-rw-r--r--src/pl/plpgsql/src/plpgsql.h23
-rw-r--r--src/pl/plpython/plpython.c62
-rw-r--r--src/pl/tcl/pltcl.c16
-rw-r--r--src/port/copydir.c31
-rw-r--r--src/port/crypt.c859
-rw-r--r--src/port/dirmod.c22
-rw-r--r--src/port/fseeko.c9
-rw-r--r--src/port/getaddrinfo.c66
-rw-r--r--src/port/getopt.c2
-rw-r--r--src/port/getopt_long.c14
-rw-r--r--src/port/gettimeofday.c15
-rw-r--r--src/port/path.c24
-rw-r--r--src/port/threads.c4
-rw-r--r--src/test/examples/testlibpq.c8
-rw-r--r--src/test/examples/testlibpq2.c27
-rw-r--r--src/test/examples/testlibpq3.c12
-rw-r--r--src/test/regress/regress.c6
-rw-r--r--src/tutorial/complex.c2
554 files changed, 25000 insertions, 21357 deletions
diff --git a/contrib/btree_gist/btree_common.c b/contrib/btree_gist/btree_common.c
index 9873af911c3..9306076e0ae 100644
--- a/contrib/btree_gist/btree_common.c
+++ b/contrib/btree_gist/btree_common.c
@@ -1,7 +1,7 @@
#include "btree_gist.h"
PG_FUNCTION_INFO_V1(btree_decompress);
-Datum btree_decompress(PG_FUNCTION_ARGS);
+Datum btree_decompress(PG_FUNCTION_ARGS);
/*
** GiST DeCompress methods
diff --git a/contrib/btree_gist/btree_gist.h b/contrib/btree_gist/btree_gist.h
index ebcecf4cc53..ecbabc2dbb3 100644
--- a/contrib/btree_gist/btree_gist.h
+++ b/contrib/btree_gist/btree_gist.h
@@ -23,4 +23,3 @@ typedef struct rix
extern GIST_SPLITVEC *btree_picksplit(bytea *entryvec, GIST_SPLITVEC *v,
BINARY_UNION bu, CMPFUNC cmp);
-
diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c
index 5d8bcf75b5d..396253261d9 100644
--- a/contrib/cube/cube.c
+++ b/contrib/cube/cube.c
@@ -28,11 +28,11 @@ extern int cube_yyparse();
NDBOX *cube_in(char *str);
NDBOX *cube(text *str);
char *cube_out(NDBOX * cube);
-NDBOX *cube_f8(double *);
-NDBOX *cube_f8_f8(double *, double *);
-NDBOX *cube_c_f8(NDBOX *, double *);
-NDBOX *cube_c_f8_f8(NDBOX *, double *, double *);
-int4 cube_dim(NDBOX * a);
+NDBOX *cube_f8(double *);
+NDBOX *cube_f8_f8(double *, double *);
+NDBOX *cube_c_f8(NDBOX *, double *);
+NDBOX *cube_c_f8_f8(NDBOX *, double *, double *);
+int4 cube_dim(NDBOX * a);
double *cube_ll_coord(NDBOX * a, int4 n);
double *cube_ur_coord(NDBOX * a, int4 n);
@@ -123,7 +123,7 @@ cube_out(NDBOX * cube)
bool equal = true;
int dim = cube->dim;
int i;
- int ndig;
+ int ndig;
initStringInfo(&buf);
@@ -131,7 +131,8 @@ cube_out(NDBOX * cube)
* Get the number of digits to display.
*/
ndig = DBL_DIG + extra_float_digits;
- if (ndig < 1) ndig = 1;
+ if (ndig < 1)
+ ndig = 1;
/*
* while printing the first (LL) corner, check if it is equal to the
@@ -1192,7 +1193,8 @@ cube_enlarge(NDBOX * a, double *r, int4 n)
j,
k;
- if (n > CUBE_MAX_DIM) n = CUBE_MAX_DIM;
+ if (n > CUBE_MAX_DIM)
+ n = CUBE_MAX_DIM;
if (*r > 0 && n > 0)
dim = n;
if (a->dim > dim)
@@ -1234,14 +1236,15 @@ NDBOX *
cube_f8(double *x1)
{
NDBOX *result;
- int size;
+ int size;
+
size = offsetof(NDBOX, x[0]) + sizeof(double) * 2;
result = (NDBOX *) palloc(size);
memset(result, 0, size);
result->size = size;
result->dim = 1;
- result->x[0] = *x1;
- result->x[1] = *x1;
+ result->x[0] = *x1;
+ result->x[1] = *x1;
return result;
}
@@ -1250,56 +1253,61 @@ NDBOX *
cube_f8_f8(double *x1, double *x2)
{
NDBOX *result;
- int size;
+ int size;
+
size = offsetof(NDBOX, x[0]) + sizeof(double) * 2;
result = (NDBOX *) palloc(size);
memset(result, 0, size);
result->size = size;
result->dim = 1;
- result->x[0] = *x1;
- result->x[1] = *x2;
+ result->x[0] = *x1;
+ result->x[1] = *x2;
return result;
}
/* Add a dimension to an existing cube with the same values for the new
coordinate */
NDBOX *
-cube_c_f8(NDBOX *c, double *x1)
+cube_c_f8(NDBOX * c, double *x1)
{
NDBOX *result;
- int size;
- int i;
- size = offsetof(NDBOX, x[0]) + sizeof(double) * (c->dim + 1) * 2;
+ int size;
+ int i;
+
+ size = offsetof(NDBOX, x[0]) + sizeof(double) * (c->dim + 1) *2;
result = (NDBOX *) palloc(size);
memset(result, 0, size);
result->size = size;
result->dim = c->dim + 1;
- for (i = 0; i < c->dim; i++) {
- result->x[i] = c->x[i];
- result->x[result->dim + i] = c->x[c->dim + i];
- }
- result->x[result->dim - 1] = *x1;
- result->x[2 * result->dim - 1] = *x1;
+ for (i = 0; i < c->dim; i++)
+ {
+ result->x[i] = c->x[i];
+ result->x[result->dim + i] = c->x[c->dim + i];
+ }
+ result->x[result->dim - 1] = *x1;
+ result->x[2 * result->dim - 1] = *x1;
return result;
}
/* Add a dimension to an existing cube */
NDBOX *
-cube_c_f8_f8(NDBOX *c, double *x1, double *x2)
+cube_c_f8_f8(NDBOX * c, double *x1, double *x2)
{
NDBOX *result;
- int size;
- int i;
- size = offsetof(NDBOX, x[0]) + sizeof(double) * (c->dim + 1) * 2;
+ int size;
+ int i;
+
+ size = offsetof(NDBOX, x[0]) + sizeof(double) * (c->dim + 1) *2;
result = (NDBOX *) palloc(size);
memset(result, 0, size);
result->size = size;
result->dim = c->dim + 1;
- for (i = 0; i < c->dim; i++) {
- result->x[i] = c->x[i];
- result->x[result->dim + i] = c->x[c->dim + i];
- }
- result->x[result->dim - 1] = *x1;
- result->x[2 * result->dim - 1] = *x2;
+ for (i = 0; i < c->dim; i++)
+ {
+ result->x[i] = c->x[i];
+ result->x[result->dim + i] = c->x[c->dim + i];
+ }
+ result->x[result->dim - 1] = *x1;
+ result->x[2 * result->dim - 1] = *x2;
return result;
}
diff --git a/contrib/dbase/dbf2pg.c b/contrib/dbase/dbf2pg.c
index 0ae53cd126d..d679eed1ad8 100644
--- a/contrib/dbase/dbf2pg.c
+++ b/contrib/dbase/dbf2pg.c
@@ -443,17 +443,15 @@ do_inserts(PGconn *conn, char *table, dbhead * dbh)
if (result == DBF_VALID)
{
query[0] = '\0';
- j = 0; /* counter for fields in the output */
+ j = 0; /* counter for fields in the output */
for (h = 0; h < dbh->db_nfields; h++)
{
- if (!strlen(fields[h].db_name)) /* When the new fieldname is empty, the field is skipped */
- {
+ if (!strlen(fields[h].db_name)) /* When the new fieldname
+ * is empty, the field is
+ * skipped */
continue;
- }
else
- {
j++;
- }
if (j > 1) /* not for the first field! */
strcat(query, "\t"); /* COPY statement field
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 158942a4c04..629f93f1c64 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -61,17 +61,17 @@
typedef struct remoteConn
{
- PGconn *con; /* Hold the remote connection */
- bool remoteTrFlag; /* Indicates whether or not a transaction
- * on remote database is in progress*/
-} remoteConn;
+ PGconn *con; /* Hold the remote connection */
+ bool remoteTrFlag; /* Indicates whether or not a transaction
+ * on remote database is in progress */
+} remoteConn;
/*
* Internal declarations
*/
static remoteConn *getConnectionByName(const char *name);
static HTAB *createConnHash(void);
-static void createNewConnection(const char *name,remoteConn *con);
+static void createNewConnection(const char *name, remoteConn * con);
static void deleteConnection(const char *name);
static char **get_pkey_attnames(Oid relid, int16 *numatts);
static char *get_sql_insert(Oid relid, int16 *pkattnums, int16 pknumatts, char **src_pkattvals, char **tgt_pkattvals);
@@ -86,15 +86,15 @@ static TupleDesc pgresultGetTupleDesc(PGresult *res);
static char *generate_relation_name(Oid relid);
/* Global */
-List *res_id = NIL;
-int res_id_index = 0;
-PGconn *persistent_conn = NULL;
-static HTAB *remoteConnHash=NULL;
+List *res_id = NIL;
+int res_id_index = 0;
+PGconn *persistent_conn = NULL;
+static HTAB *remoteConnHash = NULL;
-/*
+/*
Following is list that holds multiple remote connections.
Calling convention of each dblink function changes to accept
-connection name as the first parameter. The connection list is
+connection name as the first parameter. The connection list is
much like ecpg e.g. a mapping between a name and a PGconn object.
*/
@@ -102,7 +102,7 @@ typedef struct remoteConnHashEnt
{
char name[NAMEDATALEN];
remoteConn *rcon;
-} remoteConnHashEnt;
+} remoteConnHashEnt;
/* initial number of connection hashes */
#define NUMCONN 16
@@ -186,18 +186,18 @@ dblink_connect(PG_FUNCTION_ARGS)
PGconn *conn = NULL;
remoteConn *rcon = NULL;
- if(PG_NARGS()==2)
+ if (PG_NARGS() == 2)
{
connstr = GET_STR(PG_GETARG_TEXT_P(1));
connname = GET_STR(PG_GETARG_TEXT_P(0));
}
- else if(PG_NARGS()==1)
+ else if (PG_NARGS() == 1)
connstr = GET_STR(PG_GETARG_TEXT_P(0));
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
- if(connname)
- rcon=(remoteConn *) palloc(sizeof(remoteConn));
+ if (connname)
+ rcon = (remoteConn *) palloc(sizeof(remoteConn));
conn = PQconnectdb(connstr);
MemoryContextSwitchTo(oldcontext);
@@ -206,16 +206,16 @@ dblink_connect(PG_FUNCTION_ARGS)
{
msg = pstrdup(PQerrorMessage(conn));
PQfinish(conn);
- if(rcon)
+ if (rcon)
pfree(rcon);
ereport(ERROR,
- (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
- errmsg("could not establish connection"),
- errdetail("%s", msg)));
+ (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+ errmsg("could not establish connection"),
+ errdetail("%s", msg)));
}
- if(connname)
+ if (connname)
{
rcon->con = conn;
createNewConnection(connname, rcon);
@@ -237,7 +237,7 @@ dblink_disconnect(PG_FUNCTION_ARGS)
remoteConn *rcon = NULL;
PGconn *conn = NULL;
- if (PG_NARGS() ==1 )
+ if (PG_NARGS() == 1)
{
conname = GET_STR(PG_GETARG_TEXT_P(0));
rcon = getConnectionByName(conname);
@@ -276,13 +276,13 @@ dblink_open(PG_FUNCTION_ARGS)
StringInfo str = makeStringInfo();
remoteConn *rcon = NULL;
- if(PG_NARGS() == 2)
+ if (PG_NARGS() == 2)
{
curname = GET_STR(PG_GETARG_TEXT_P(0));
sql = GET_STR(PG_GETARG_TEXT_P(1));
conn = persistent_conn;
}
- else if(PG_NARGS() == 3)
+ else if (PG_NARGS() == 3)
{
conname = GET_STR(PG_GETARG_TEXT_P(0));
curname = GET_STR(PG_GETARG_TEXT_P(1));
@@ -333,12 +333,12 @@ dblink_close(PG_FUNCTION_ARGS)
curname = GET_STR(PG_GETARG_TEXT_P(0));
conn = persistent_conn;
}
- else if (PG_NARGS()==2)
+ else if (PG_NARGS() == 2)
{
conname = GET_STR(PG_GETARG_TEXT_P(0));
curname = GET_STR(PG_GETARG_TEXT_P(1));
rcon = getConnectionByName(conname);
- if(rcon)
+ if (rcon)
conn = rcon->con;
}
@@ -381,7 +381,7 @@ dblink_fetch(PG_FUNCTION_ARGS)
PGresult *res = NULL;
MemoryContext oldcontext;
char *conname = NULL;
- remoteConn *rcon=NULL;
+ remoteConn *rcon = NULL;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
@@ -401,7 +401,7 @@ dblink_fetch(PG_FUNCTION_ARGS)
howmany = PG_GETARG_INT32(2);
rcon = getConnectionByName(conname);
- if(rcon)
+ if (rcon)
conn = rcon->con;
}
else if (PG_NARGS() == 2)
@@ -411,7 +411,7 @@ dblink_fetch(PG_FUNCTION_ARGS)
conn = persistent_conn;
}
- if(!conn)
+ if (!conn)
DBLINK_CONN_NOT_AVAIL;
/* create a function context for cross-call persistence */
@@ -429,9 +429,7 @@ dblink_fetch(PG_FUNCTION_ARGS)
if (!res ||
(PQresultStatus(res) != PGRES_COMMAND_OK &&
PQresultStatus(res) != PGRES_TUPLES_OK))
- {
DBLINK_RES_ERROR("sql error");
- }
else if (PQresultStatus(res) == PGRES_COMMAND_OK)
{
/* cursor does not exist - closed already or bad name */
@@ -549,7 +547,7 @@ dblink_record(PG_FUNCTION_ARGS)
char *connstr = NULL;
char *sql = NULL;
char *conname = NULL;
- remoteConn *rcon=NULL;
+ remoteConn *rcon = NULL;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
@@ -574,7 +572,7 @@ dblink_record(PG_FUNCTION_ARGS)
/* shouldn't happen */
elog(ERROR, "wrong number of arguments");
- if(!conn)
+ if (!conn)
DBLINK_CONN_NOT_AVAIL;
res = PQexec(conn, sql);
@@ -591,8 +589,8 @@ dblink_record(PG_FUNCTION_ARGS)
TEXTOID, -1, 0, false);
/*
- * and save a copy of the command status string to return
- * as our result tuple
+ * and save a copy of the command status string to return as
+ * our result tuple
*/
sql_cmd_status = PQcmdStatus(res);
funcctx->max_calls = 1;
@@ -707,7 +705,7 @@ dblink_exec(PG_FUNCTION_ARGS)
char *connstr = NULL;
char *sql = NULL;
char *conname = NULL;
- remoteConn *rcon=NULL;
+ remoteConn *rcon = NULL;
bool freeconn = true;
if (PG_NARGS() == 2)
@@ -724,7 +722,7 @@ dblink_exec(PG_FUNCTION_ARGS)
/* shouldn't happen */
elog(ERROR, "wrong number of arguments");
- if(!conn)
+ if (!conn)
DBLINK_CONN_NOT_AVAIL;
res = PQexec(conn, sql);
@@ -741,15 +739,15 @@ dblink_exec(PG_FUNCTION_ARGS)
TEXTOID, -1, 0, false);
/*
- * and save a copy of the command status string to return as
- * our result tuple
+ * and save a copy of the command status string to return as our
+ * result tuple
*/
sql_cmd_status = GET_TEXT(PQcmdStatus(res));
}
else
ereport(ERROR,
- (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
- errmsg("statement returning results not allowed")));
+ (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
+ errmsg("statement returning results not allowed")));
PQclear(res);
@@ -802,6 +800,7 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
(errcode(ERRCODE_UNDEFINED_TABLE),
errmsg("relation \"%s\" does not exist",
GET_STR(PG_GETARG_TEXT_P(0)))));
+
/*
* need a tuple descriptor representing one INT and one TEXT
* column
@@ -980,8 +979,8 @@ dblink_build_sql_insert(PG_FUNCTION_ARGS)
if (src_nitems != pknumatts)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("source key array length must match number of key " \
- "attributes")));
+ errmsg("source key array length must match number of key " \
+ "attributes")));
/*
* get array of pointers to c-strings from the input source array
@@ -1013,8 +1012,8 @@ dblink_build_sql_insert(PG_FUNCTION_ARGS)
if (tgt_nitems != pknumatts)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("target key array length must match number of key " \
- "attributes")));
+ errmsg("target key array length must match number of key " \
+ "attributes")));
/*
* get array of pointers to c-strings from the input target array
@@ -1126,8 +1125,8 @@ dblink_build_sql_delete(PG_FUNCTION_ARGS)
if (tgt_nitems != pknumatts)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("target key array length must match number of key " \
- "attributes")));
+ errmsg("target key array length must match number of key " \
+ "attributes")));
/*
* get array of pointers to c-strings from the input target array
@@ -1249,8 +1248,8 @@ dblink_build_sql_update(PG_FUNCTION_ARGS)
if (src_nitems != pknumatts)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("source key array length must match number of key " \
- "attributes")));
+ errmsg("source key array length must match number of key " \
+ "attributes")));
/*
* get array of pointers to c-strings from the input source array
@@ -1282,8 +1281,8 @@ dblink_build_sql_update(PG_FUNCTION_ARGS)
if (tgt_nitems != pknumatts)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("target key array length must match number of key " \
- "attributes")));
+ errmsg("target key array length must match number of key " \
+ "attributes")));
/*
* get array of pointers to c-strings from the input target array
@@ -1839,10 +1838,10 @@ pgresultGetTupleDesc(PGresult *res)
ereport(ERROR,
(errcode(ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH),
errmsg("field size mismatch"),
- errdetail("Size of remote field \"%s\" does not match " \
- "size of local type \"%s\".", attname,
- format_type_with_typemod(atttypid,
- atttypmod))));
+ errdetail("Size of remote field \"%s\" does not match " \
+ "size of local type \"%s\".", attname,
+ format_type_with_typemod(atttypid,
+ atttypmod))));
attdim = 0;
attisset = false;
@@ -1893,50 +1892,50 @@ generate_relation_name(Oid relid)
static remoteConn *
getConnectionByName(const char *name)
{
- remoteConnHashEnt *hentry;
- char key[NAMEDATALEN];
+ remoteConnHashEnt *hentry;
+ char key[NAMEDATALEN];
- if(!remoteConnHash)
- remoteConnHash=createConnHash();
+ if (!remoteConnHash)
+ remoteConnHash = createConnHash();
MemSet(key, 0, NAMEDATALEN);
snprintf(key, NAMEDATALEN - 1, "%s", name);
- hentry = (remoteConnHashEnt*) hash_search(remoteConnHash,
- key, HASH_FIND, NULL);
+ hentry = (remoteConnHashEnt *) hash_search(remoteConnHash,
+ key, HASH_FIND, NULL);
- if(hentry)
- return(hentry->rcon);
+ if (hentry)
+ return (hentry->rcon);
- return(NULL);
+ return (NULL);
}
static HTAB *
createConnHash(void)
{
- HASHCTL ctl;
- HTAB *ptr;
+ HASHCTL ctl;
+ HTAB *ptr;
ctl.keysize = NAMEDATALEN;
ctl.entrysize = sizeof(remoteConnHashEnt);
- ptr=hash_create("Remote Con hash", NUMCONN, &ctl, HASH_ELEM);
+ ptr = hash_create("Remote Con hash", NUMCONN, &ctl, HASH_ELEM);
- if(!ptr)
+ if (!ptr)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- return(ptr);
+ return (ptr);
}
static void
-createNewConnection(const char *name, remoteConn *con)
+createNewConnection(const char *name, remoteConn * con)
{
- remoteConnHashEnt *hentry;
- bool found;
- char key[NAMEDATALEN];
+ remoteConnHashEnt *hentry;
+ bool found;
+ char key[NAMEDATALEN];
- if(!remoteConnHash)
+ if (!remoteConnHash)
remoteConnHash = createConnHash();
MemSet(key, 0, NAMEDATALEN);
@@ -1944,12 +1943,12 @@ createNewConnection(const char *name, remoteConn *con)
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash, key,
HASH_ENTER, &found);
- if(!hentry)
+ if (!hentry)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- if(found)
+ if (found)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("duplicate connection name")));
@@ -1961,12 +1960,12 @@ createNewConnection(const char *name, remoteConn *con)
static void
deleteConnection(const char *name)
{
- remoteConnHashEnt *hentry;
- bool found;
- char key[NAMEDATALEN];
+ remoteConnHashEnt *hentry;
+ bool found;
+ char key[NAMEDATALEN];
- if(!remoteConnHash)
- remoteConnHash=createConnHash();
+ if (!remoteConnHash)
+ remoteConnHash = createConnHash();
MemSet(key, 0, NAMEDATALEN);
snprintf(key, NAMEDATALEN - 1, "%s", name);
@@ -1974,7 +1973,7 @@ deleteConnection(const char *name)
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash,
key, HASH_REMOVE, &found);
- if(!hentry)
+ if (!hentry)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("undefined connection name")));
diff --git a/contrib/dbmirror/pending.c b/contrib/dbmirror/pending.c
index 5eebd1a1ba0..cf00276a8f1 100644
--- a/contrib/dbmirror/pending.c
+++ b/contrib/dbmirror/pending.c
@@ -1,6 +1,6 @@
/****************************************************************************
* pending.c
- * $Id: pending.c,v 1.12 2003/07/24 17:52:20 tgl Exp $
+ * $Id: pending.c,v 1.13 2003/08/04 00:43:10 momjian Exp $
*
* This file contains a trigger for Postgresql-7.x to record changes to tables
* to a pending table for mirroring.
@@ -8,7 +8,7 @@
*
* Written by Steven Singer (ssinger@navtechinc.com)
* (c) 2001-2002 Navtech Systems Support Inc.
- * ALL RIGHTS RESERVED
+ * ALL RIGHTS RESERVED
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
@@ -79,8 +79,9 @@ recordchange(PG_FUNCTION_ARGS)
HeapTuple retTuple = NULL;
char *tblname;
char op = 0;
- char *schemaname;
+ char *schemaname;
char *fullyqualtblname;
+
if (fcinfo->context != NULL)
{
@@ -94,13 +95,13 @@ recordchange(PG_FUNCTION_ARGS)
tblname = SPI_getrelname(trigdata->tg_relation);
#ifndef NOSCHEMAS
schemaname = get_namespace_name(RelationGetNamespace(trigdata->tg_relation));
- fullyqualtblname = SPI_palloc(strlen(tblname) +
- strlen(schemaname) + 6);
- sprintf(fullyqualtblname,"\"%s\".\"%s\"",
- schemaname,tblname);
+ fullyqualtblname = SPI_palloc(strlen(tblname) +
+ strlen(schemaname) + 6);
+ sprintf(fullyqualtblname, "\"%s\".\"%s\"",
+ schemaname, tblname);
#else
fullyqualtblname = SPI_palloc(strlen(tblname) + 3);
- sprintf(fullyqualtblname,"\"%s\"",tblname);
+ sprintf(fullyqualtblname, "\"%s\"", tblname);
#endif
tupdesc = trigdata->tg_relation->rd_att;
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
@@ -166,8 +167,8 @@ storePending(char *cpTableName, HeapTuple tBeforeTuple,
int iResult = 0;
HeapTuple tCurTuple;
- //Points the current tuple(before or after)
- Datum saPlanData[4];
+ /* Points the current tuple(before or after) */
+ Datum saPlanData[4];
Oid taPlanArgTypes[3] = {NAMEOID, CHAROID, INT4OID};
void *vpPlan;
@@ -253,7 +254,7 @@ storeKeyInfo(char *cpTableName, HeapTuple tTupleData,
if (cpKeyData == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- /* cpTableName already contains quotes... */
+ /* cpTableName already contains quotes... */
errmsg("there is no PRIMARY KEY for table %s",
cpTableName)));
@@ -460,7 +461,7 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
}
else
{
- sprintf(cpFormatedPtr," ");
+ sprintf(cpFormatedPtr, " ");
iUsedDataBlock++;
cpFormatedPtr++;
continue;
@@ -508,8 +509,8 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
if (tpPKeys != NULL)
SPI_pfree(tpPKeys);
#if defined DEBUG_OUTPUT
- elog(NOTICE, "returning DataBlockSize:%d iUsedDataBlock:%d",iDataBlockSize,
- iUsedDataBlock);
+ elog(NOTICE, "returning DataBlockSize:%d iUsedDataBlock:%d", iDataBlockSize,
+ iUsedDataBlock);
#endif
memset(cpDataBlock + iUsedDataBlock, 0, iDataBlockSize - iUsedDataBlock);
diff --git a/contrib/dbsize/dbsize.c b/contrib/dbsize/dbsize.c
index c474930ca95..0037c14e706 100644
--- a/contrib/dbsize/dbsize.c
+++ b/contrib/dbsize/dbsize.c
@@ -54,7 +54,7 @@ database_size(PG_FUNCTION_ARGS)
if (!OidIsValid(dbid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("database \"%s\" does not exist", NameStr(*dbname))));
+ errmsg("database \"%s\" does not exist", NameStr(*dbname))));
dbpath = GetDatabasePath(dbid);
diff --git a/contrib/earthdistance/earthdistance.c b/contrib/earthdistance/earthdistance.c
index 19c81a5783f..28ce40378cf 100644
--- a/contrib/earthdistance/earthdistance.c
+++ b/contrib/earthdistance/earthdistance.c
@@ -6,8 +6,8 @@
/* Earth's radius is in statute miles. */
-const double EARTH_RADIUS = 3958.747716;
-const double TWO_PI = 2.0 * M_PI;
+const double EARTH_RADIUS = 3958.747716;
+const double TWO_PI = 2.0 * M_PI;
double *geo_distance(Point *pt1, Point *pt2);
@@ -66,10 +66,11 @@ geo_distance(Point *pt1, Point *pt2)
if (longdiff > M_PI)
longdiff = TWO_PI - longdiff;
- sino = sqrt(sin(fabs(lat1-lat2)/2.)*sin(fabs(lat1-lat2)/2.) +
- cos(lat1) * cos(lat2) * sin(longdiff/2.)*sin(longdiff/2.));
- if (sino > 1.) sino = 1.;
- *resultp = 2. * EARTH_RADIUS * asin(sino);
+ sino = sqrt(sin(fabs(lat1 - lat2) / 2.) * sin(fabs(lat1 - lat2) / 2.) +
+ cos(lat1) * cos(lat2) * sin(longdiff / 2.) * sin(longdiff / 2.));
+ if (sino > 1.)
+ sino = 1.;
+ *resultp = 2. * EARTH_RADIUS * asin(sino);
return resultp;
}
diff --git a/contrib/findoidjoins/findoidjoins.c b/contrib/findoidjoins/findoidjoins.c
index 8eb27e42a51..4843c1f3e5d 100644
--- a/contrib/findoidjoins/findoidjoins.c
+++ b/contrib/findoidjoins/findoidjoins.c
@@ -3,7 +3,7 @@
*
* Copyright 2002 by PostgreSQL Global Development Group
*
- * $Header: /cvsroot/pgsql/contrib/findoidjoins/Attic/findoidjoins.c,v 1.20 2003/05/14 03:25:56 tgl Exp $
+ * $Header: /cvsroot/pgsql/contrib/findoidjoins/Attic/findoidjoins.c,v 1.21 2003/08/04 00:43:10 momjian Exp $
*/
#include "postgres_fe.h"
@@ -14,23 +14,24 @@
int
main(int argc, char **argv)
{
- PGconn *conn;
- PQExpBufferData sql;
- PGresult *res;
- PGresult *pkrel_res;
- PGresult *fkrel_res;
- char *fk_relname;
- char *fk_nspname;
- char *fk_attname;
- char *pk_relname;
- char *pk_nspname;
- int fk, pk; /* loop counters */
+ PGconn *conn;
+ PQExpBufferData sql;
+ PGresult *res;
+ PGresult *pkrel_res;
+ PGresult *fkrel_res;
+ char *fk_relname;
+ char *fk_nspname;
+ char *fk_attname;
+ char *pk_relname;
+ char *pk_nspname;
+ int fk,
+ pk; /* loop counters */
if (argc != 2)
{
fprintf(stderr, "Usage: %s database\n", argv[0]);
exit(EXIT_FAILURE);
- }
+ }
initPQExpBuffer(&sql);
@@ -48,13 +49,13 @@ main(int argc, char **argv)
resetPQExpBuffer(&sql);
appendPQExpBuffer(&sql, "%s",
- "SET search_path = public;"
- "SELECT c.relname, (SELECT nspname FROM "
- "pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname "
- "FROM pg_catalog.pg_class c "
- "WHERE c.relkind = 'r' "
- "AND c.relhasoids "
- "ORDER BY nspname, c.relname"
+ "SET search_path = public;"
+ "SELECT c.relname, (SELECT nspname FROM "
+ "pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname "
+ "FROM pg_catalog.pg_class c "
+ "WHERE c.relkind = 'r' "
+ "AND c.relhasoids "
+ "ORDER BY nspname, c.relname"
);
res = PQexec(conn, sql.data);
@@ -70,20 +71,20 @@ main(int argc, char **argv)
resetPQExpBuffer(&sql);
appendPQExpBuffer(&sql, "%s",
- "SELECT c.relname, "
- "(SELECT nspname FROM pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname, "
- "a.attname "
- "FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a "
- "WHERE a.attnum > 0 AND c.relkind = 'r' "
- "AND a.attrelid = c.oid "
- "AND a.atttypid IN ('pg_catalog.oid'::regtype, "
- " 'pg_catalog.regclass'::regtype, "
- " 'pg_catalog.regoper'::regtype, "
- " 'pg_catalog.regoperator'::regtype, "
- " 'pg_catalog.regproc'::regtype, "
- " 'pg_catalog.regprocedure'::regtype, "
- " 'pg_catalog.regtype'::regtype) "
- "ORDER BY nspname, c.relname, a.attnum"
+ "SELECT c.relname, "
+ "(SELECT nspname FROM pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname, "
+ "a.attname "
+ "FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a "
+ "WHERE a.attnum > 0 AND c.relkind = 'r' "
+ "AND a.attrelid = c.oid "
+ "AND a.atttypid IN ('pg_catalog.oid'::regtype, "
+ " 'pg_catalog.regclass'::regtype, "
+ " 'pg_catalog.regoper'::regtype, "
+ " 'pg_catalog.regoperator'::regtype, "
+ " 'pg_catalog.regproc'::regtype, "
+ " 'pg_catalog.regprocedure'::regtype, "
+ " 'pg_catalog.regtype'::regtype) "
+ "ORDER BY nspname, c.relname, a.attnum"
);
res = PQexec(conn, sql.data);
@@ -95,8 +96,8 @@ main(int argc, char **argv)
fkrel_res = res;
/*
- * For each column and each relation-having-OIDs, look to see if
- * the column contains any values matching entries in the relation.
+ * For each column and each relation-having-OIDs, look to see if the
+ * column contains any values matching entries in the relation.
*/
for (fk = 0; fk < PQntuples(fkrel_res); fk++)
@@ -113,12 +114,12 @@ main(int argc, char **argv)
resetPQExpBuffer(&sql);
appendPQExpBuffer(&sql,
- "SELECT 1 "
- "FROM \"%s\".\"%s\" t1, "
- "\"%s\".\"%s\" t2 "
- "WHERE t1.\"%s\"::pg_catalog.oid = t2.oid "
- "LIMIT 1",
- fk_nspname, fk_relname, pk_nspname, pk_relname, fk_attname);
+ "SELECT 1 "
+ "FROM \"%s\".\"%s\" t1, "
+ "\"%s\".\"%s\" t2 "
+ "WHERE t1.\"%s\"::pg_catalog.oid = t2.oid "
+ "LIMIT 1",
+ fk_nspname, fk_relname, pk_nspname, pk_relname, fk_attname);
res = PQexec(conn, sql.data);
if (!res || PQresultStatus(res) != PGRES_TUPLES_OK)
diff --git a/contrib/fulltextindex/fti.c b/contrib/fulltextindex/fti.c
index 38dba1ce30e..e5095ff1a1d 100644
--- a/contrib/fulltextindex/fti.c
+++ b/contrib/fulltextindex/fti.c
@@ -304,7 +304,7 @@ fti(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" of \"%s\" does not exist",
- args[i + 1], indexname)));
+ args[i + 1], indexname)));
/* Get the char* representation of the column */
column = SPI_getvalue(rettuple, tupdesc, colnum);
@@ -339,8 +339,8 @@ fti(PG_FUNCTION_ARGS)
ret = SPI_execp(*(plan->splan), values, NULL, 0);
if (ret != SPI_OK_INSERT)
ereport(ERROR,
- (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
- errmsg("error executing insert")));
+ (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
+ errmsg("error executing insert")));
}
pfree(buff);
pfree(data);
diff --git a/contrib/fuzzystrmatch/fuzzystrmatch.c b/contrib/fuzzystrmatch/fuzzystrmatch.c
index 3f869180b86..cc4be6af1de 100644
--- a/contrib/fuzzystrmatch/fuzzystrmatch.c
+++ b/contrib/fuzzystrmatch/fuzzystrmatch.c
@@ -87,7 +87,7 @@ levenshtein(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("argument exceeds max length: %d",
- MAX_LEVENSHTEIN_STRLEN)));
+ MAX_LEVENSHTEIN_STRLEN)));
/*
* If either rows or cols is 0, the answer is the other value. This
@@ -220,7 +220,7 @@ metaphone(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("argument exceeds max length: %d",
- MAX_METAPHONE_STRLEN)));
+ MAX_METAPHONE_STRLEN)));
if (!(str_i_len > 0))
ereport(ERROR,
@@ -232,7 +232,7 @@ metaphone(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("output length exceeds max length: %d",
- MAX_METAPHONE_STRLEN)));
+ MAX_METAPHONE_STRLEN)));
if (!(reqlen > 0))
ereport(ERROR,
diff --git a/contrib/intagg/int_aggregate.c b/contrib/intagg/int_aggregate.c
index 4614b669a7a..2bb06ff73a4 100644
--- a/contrib/intagg/int_aggregate.c
+++ b/contrib/intagg/int_aggregate.c
@@ -132,9 +132,9 @@ ShrinkPGArray(PGARRAY * p)
/* use current transaction context */
pnew = palloc(cb);
+
/*
- * Fix up the fields in the new structure, so Postgres
- * understands
+ * Fix up the fields in the new structure, so Postgres understands
*/
memcpy(pnew, p, cb);
pnew->a.size = cb;
@@ -194,7 +194,7 @@ int_enum(PG_FUNCTION_ARGS)
if (!rsi || !IsA(rsi, ReturnSetInfo))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("int_enum called in context that cannot accept a set")));
+ errmsg("int_enum called in context that cannot accept a set")));
if (!p)
{
diff --git a/contrib/intarray/_int.h b/contrib/intarray/_int.h
index 5decd969e16..3e702ca1afe 100644
--- a/contrib/intarray/_int.h
+++ b/contrib/intarray/_int.h
@@ -91,19 +91,19 @@ typedef char *BITVECP;
*/
typedef struct
{
- int4 len;
- int4 flag;
- char data[1];
-} GISTTYPE;
+ int4 len;
+ int4 flag;
+ char data[1];
+} GISTTYPE;
-#define ALLISTRUE 0x04
+#define ALLISTRUE 0x04
-#define ISALLTRUE(x) ( ((GISTTYPE*)x)->flag & ALLISTRUE )
+#define ISALLTRUE(x) ( ((GISTTYPE*)x)->flag & ALLISTRUE )
-#define GTHDRSIZE ( sizeof(int4)*2 )
+#define GTHDRSIZE ( sizeof(int4)*2 )
#define CALCGTSIZE(flag) ( GTHDRSIZE+(((flag) & ALLISTRUE) ? 0 : SIGLEN) )
-#define GETSIGN(x) ( (BITVECP)( (char*)x+GTHDRSIZE ) )
+#define GETSIGN(x) ( (BITVECP)( (char*)x+GTHDRSIZE ) )
/*
** types for functions
@@ -114,22 +114,22 @@ typedef void (*formfloat) (ArrayType *, float *);
/*
** useful function
*/
-bool isort(int4 *a, const int len);
-ArrayType *new_intArrayType(int num);
-ArrayType *copy_intArrayType(ArrayType *a);
-ArrayType *resize_intArrayType(ArrayType *a, int num);
-int internal_size(int *a, int len);
-ArrayType *_int_unique(ArrayType *a);
-int32 intarray_match_first(ArrayType *a, int32 elem);
-ArrayType *intarray_add_elem(ArrayType *a, int32 elem);
-ArrayType *intarray_concat_arrays(ArrayType *a, ArrayType *b);
-ArrayType *int_to_intset(int32 elem);
-bool inner_int_overlap(ArrayType *a, ArrayType *b);
-bool inner_int_contains(ArrayType *a, ArrayType *b);
-ArrayType * inner_int_union(ArrayType *a, ArrayType *b);
-ArrayType * inner_int_inter(ArrayType *a, ArrayType *b);
-void rt__int_size(ArrayType *a, float *size);
-void gensign(BITVEC sign, int *a, int len);
+bool isort(int4 *a, const int len);
+ArrayType *new_intArrayType(int num);
+ArrayType *copy_intArrayType(ArrayType *a);
+ArrayType *resize_intArrayType(ArrayType *a, int num);
+int internal_size(int *a, int len);
+ArrayType *_int_unique(ArrayType *a);
+int32 intarray_match_first(ArrayType *a, int32 elem);
+ArrayType *intarray_add_elem(ArrayType *a, int32 elem);
+ArrayType *intarray_concat_arrays(ArrayType *a, ArrayType *b);
+ArrayType *int_to_intset(int32 elem);
+bool inner_int_overlap(ArrayType *a, ArrayType *b);
+bool inner_int_contains(ArrayType *a, ArrayType *b);
+ArrayType *inner_int_union(ArrayType *a, ArrayType *b);
+ArrayType *inner_int_inter(ArrayType *a, ArrayType *b);
+void rt__int_size(ArrayType *a, float *size);
+void gensign(BITVEC sign, int *a, int len);
/*****************************************************************************
@@ -160,18 +160,16 @@ typedef struct
#define COMPUTESIZE(size) ( HDRSIZEQT + size * sizeof(ITEM) )
#define GETQUERY(x) (ITEM*)( (char*)(x)+HDRSIZEQT )
-bool signconsistent(QUERYTYPE * query, BITVEC sign, bool calcnot);
-bool execconsistent(QUERYTYPE * query, ArrayType *array, bool calcnot);
-
-
+bool signconsistent(QUERYTYPE * query, BITVEC sign, bool calcnot);
+bool execconsistent(QUERYTYPE * query, ArrayType *array, bool calcnot);
-int compASC(const void *a, const void *b);
-int compDESC(const void *a, const void *b);
-#define QSORT(a, direction) \
-if (ARRNELEMS(a) > 1) \
- qsort((void*)ARRPTR(a), ARRNELEMS(a),sizeof(int4), \
- (direction) ? compASC : compDESC )
+int compASC(const void *a, const void *b);
+int compDESC(const void *a, const void *b);
+#define QSORT(a, direction) \
+if (ARRNELEMS(a) > 1) \
+ qsort((void*)ARRPTR(a), ARRNELEMS(a),sizeof(int4), \
+ (direction) ? compASC : compDESC )
diff --git a/contrib/intarray/_int_bool.c b/contrib/intarray/_int_bool.c
index 35d6e6e2c00..d57a94e8709 100644
--- a/contrib/intarray/_int_bool.c
+++ b/contrib/intarray/_int_bool.c
@@ -299,7 +299,7 @@ signconsistent(QUERYTYPE * query, BITVEC sign, bool calcnot)
GETQUERY(query) + query->size - 1,
(void *) sign, calcnot,
checkcondition_bit
- );
+ );
}
bool
@@ -326,7 +326,7 @@ rboolop(PG_FUNCTION_ARGS)
boolop,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
- );
+ );
}
Datum
@@ -743,4 +743,3 @@ querytree(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(res);
}
-
diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c
index fa6d502e891..39c9679addd 100644
--- a/contrib/intarray/_int_gist.c
+++ b/contrib/intarray/_int_gist.c
@@ -85,27 +85,31 @@ g_int_consistent(PG_FUNCTION_ARGS)
}
Datum
-g_int_union(PG_FUNCTION_ARGS) {
- bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
- int *size = (int *) PG_GETARG_POINTER(1);
- int4 i,len = (VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY);
- ArrayType *res;
- int totlen=0,*ptr;
+g_int_union(PG_FUNCTION_ARGS)
+{
+ bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
+ int *size = (int *) PG_GETARG_POINTER(1);
+ int4 i,
+ len = (VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY);
+ ArrayType *res;
+ int totlen = 0,
+ *ptr;
for (i = 0; i < len; i++)
- totlen+=ARRNELEMS( GETENTRY(entryvec,i) );
+ totlen += ARRNELEMS(GETENTRY(entryvec, i));
- res=new_intArrayType(totlen);
- ptr=ARRPTR(res);
+ res = new_intArrayType(totlen);
+ ptr = ARRPTR(res);
- for (i = 0; i < len; i++) {
- memcpy(ptr, ARRPTR( GETENTRY(entryvec,i) ), ARRNELEMS( GETENTRY(entryvec,i) )*sizeof(int4) );
- ptr+=ARRNELEMS( GETENTRY(entryvec,i) );
+ for (i = 0; i < len; i++)
+ {
+ memcpy(ptr, ARRPTR(GETENTRY(entryvec, i)), ARRNELEMS(GETENTRY(entryvec, i)) * sizeof(int4));
+ ptr += ARRNELEMS(GETENTRY(entryvec, i));
}
- QSORT(res,1);
- res=_int_unique(res);
- *size = VARSIZE(res);
+ QSORT(res, 1);
+ res = _int_unique(res);
+ *size = VARSIZE(res);
PG_RETURN_POINTER(res);
}
@@ -239,22 +243,23 @@ g_int_decompress(PG_FUNCTION_ARGS)
** The GiST Penalty method for _intments
*/
Datum
-g_int_penalty(PG_FUNCTION_ARGS) {
- GISTENTRY *origentry = (GISTENTRY *) PG_GETARG_POINTER(0);
- GISTENTRY *newentry = (GISTENTRY *) PG_GETARG_POINTER(1);
- float *result = (float *) PG_GETARG_POINTER(2);
+g_int_penalty(PG_FUNCTION_ARGS)
+{
+ GISTENTRY *origentry = (GISTENTRY *) PG_GETARG_POINTER(0);
+ GISTENTRY *newentry = (GISTENTRY *) PG_GETARG_POINTER(1);
+ float *result = (float *) PG_GETARG_POINTER(2);
ArrayType *ud;
float tmp1,
tmp2;
ud = inner_int_union((ArrayType *) DatumGetPointer(origentry->key),
- (ArrayType *) DatumGetPointer(newentry->key));
+ (ArrayType *) DatumGetPointer(newentry->key));
rt__int_size(ud, &tmp1);
rt__int_size((ArrayType *) DatumGetPointer(origentry->key), &tmp2);
*result = tmp1 - tmp2;
pfree(ud);
- PG_RETURN_POINTER (result);
+ PG_RETURN_POINTER(result);
}
@@ -311,8 +316,9 @@ comparecost(const void *a, const void *b)
** We use Guttman's poly time split algorithm
*/
Datum
-g_int_picksplit(PG_FUNCTION_ARGS) {
- bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
+g_int_picksplit(PG_FUNCTION_ARGS)
+{
+ bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber i,
j;
@@ -501,4 +507,3 @@ g_int_picksplit(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(v);
}
-
diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c
index 04ff5e436b6..d5f169f7c82 100644
--- a/contrib/intarray/_int_tool.c
+++ b/contrib/intarray/_int_tool.c
@@ -270,7 +270,7 @@ _int_unique(ArrayType *r)
*data;
int num = ARRNELEMS(r);
- if ( num<2 )
+ if (num < 2)
return r;
data = tmp = dr = ARRPTR(r);
@@ -367,4 +367,3 @@ compDESC(const void *a, const void *b)
return 0;
return (*(int4 *) a < *(int4 *) b) ? 1 : -1;
}
-
diff --git a/contrib/ltree/_ltree_op.c b/contrib/ltree/_ltree_op.c
index f55434d3876..3890769ce1a 100644
--- a/contrib/ltree/_ltree_op.c
+++ b/contrib/ltree/_ltree_op.c
@@ -45,8 +45,8 @@ array_iterator(ArrayType *la, PGCALL2 callback, void *param, ltree ** found)
if (ARR_NDIM(la) != 1)
ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("array must be one-dimensional")));
+ (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
+ errmsg("array must be one-dimensional")));
if (found)
*found = NULL;
@@ -133,38 +133,40 @@ _ltq_rregex(PG_FUNCTION_ARGS)
Datum
_lt_q_regex(PG_FUNCTION_ARGS)
{
- ArrayType *_tree = PG_GETARG_ARRAYTYPE_P(0);
- ArrayType *_query = PG_GETARG_ARRAYTYPE_P(1);
- lquery *query = (lquery *) ARR_DATA_PTR(_query);
- bool res = false;
- int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query));
-
- if (ARR_NDIM(_query) != 1)
- ereport(ERROR,
+ ArrayType *_tree = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *_query = PG_GETARG_ARRAYTYPE_P(1);
+ lquery *query = (lquery *) ARR_DATA_PTR(_query);
+ bool res = false;
+ int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query));
+
+ if (ARR_NDIM(_query) != 1)
+ ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must be one-dimensional")));
- while (num > 0) {
- if ( array_iterator(_tree, ltq_regex, (void*)query, NULL) ) {
- res = true;
- break;
- }
- num--;
- query = (lquery*)NEXTVAL(query);
- }
-
- PG_FREE_IF_COPY(_tree, 0);
- PG_FREE_IF_COPY(_query, 1);
- PG_RETURN_BOOL(res);
+ while (num > 0)
+ {
+ if (array_iterator(_tree, ltq_regex, (void *) query, NULL))
+ {
+ res = true;
+ break;
+ }
+ num--;
+ query = (lquery *) NEXTVAL(query);
+ }
+
+ PG_FREE_IF_COPY(_tree, 0);
+ PG_FREE_IF_COPY(_query, 1);
+ PG_RETURN_BOOL(res);
}
Datum
_lt_q_rregex(PG_FUNCTION_ARGS)
{
- PG_RETURN_DATUM(DirectFunctionCall2(_lt_q_regex,
- PG_GETARG_DATUM(1),
- PG_GETARG_DATUM(0)
- ));
+ PG_RETURN_DATUM(DirectFunctionCall2(_lt_q_regex,
+ PG_GETARG_DATUM(1),
+ PG_GETARG_DATUM(0)
+ ));
}
diff --git a/contrib/ltree/lquery_op.c b/contrib/ltree/lquery_op.c
index 72fd968d595..2328fcd2386 100644
--- a/contrib/ltree/lquery_op.c
+++ b/contrib/ltree/lquery_op.c
@@ -45,7 +45,7 @@ getlexem(char *start, char *end, int *len)
}
bool
-compare_subnode(ltree_level * t, char *qn, int len, int (*cmpptr) (const char *, const char *, size_t), bool anyend)
+ compare_subnode(ltree_level * t, char *qn, int len, int (*cmpptr) (const char *, const char *, size_t), bool anyend)
{
char *endt = t->name + t->len;
char *endq = qn + len;
@@ -123,10 +123,15 @@ printFieldNot(FieldNot *fn ) {
}
*/
-static struct {
- bool muse;
- uint32 high_pos;
-} SomeStack = {false,0,};
+static struct
+{
+ bool muse;
+ uint32 high_pos;
+} SomeStack =
+
+{
+ false, 0,
+};
static bool
checkCond(lquery_level * curq, int query_numlevel, ltree_level * curt, int tree_numlevel, FieldNot * ptr)
@@ -140,7 +145,8 @@ checkCond(lquery_level * curq, int query_numlevel, ltree_level * curt, int tree_
lquery_level *prevq = NULL;
ltree_level *prevt = NULL;
- if ( SomeStack.muse ) {
+ if (SomeStack.muse)
+ {
high_pos = SomeStack.high_pos;
qlen--;
prevq = curq;
@@ -200,13 +206,15 @@ checkCond(lquery_level * curq, int query_numlevel, ltree_level * curt, int tree_
curt = LEVEL_NEXT(curt);
tlen--;
cur_tpos++;
- if ( isok && prevq && prevq->numvar==0 && tlen>0 && cur_tpos <= high_pos ) {
- FieldNot tmpptr;
- if ( ptr )
- memcpy(&tmpptr,ptr,sizeof(FieldNot));
- SomeStack.high_pos = high_pos-cur_tpos;
+ if (isok && prevq && prevq->numvar == 0 && tlen > 0 && cur_tpos <= high_pos)
+ {
+ FieldNot tmpptr;
+
+ if (ptr)
+ memcpy(&tmpptr, ptr, sizeof(FieldNot));
+ SomeStack.high_pos = high_pos - cur_tpos;
SomeStack.muse = true;
- if ( checkCond(prevq, qlen+1, curt, tlen, (ptr) ? &tmpptr : NULL) )
+ if (checkCond(prevq, qlen + 1, curt, tlen, (ptr) ? &tmpptr : NULL))
return true;
}
if (!isok && ptr)
@@ -311,19 +319,21 @@ Datum
lt_q_regex(PG_FUNCTION_ARGS)
{
ltree *tree = PG_GETARG_LTREE(0);
- ArrayType *_query = PG_GETARG_ARRAYTYPE_P(1);
- lquery *query = (lquery *) ARR_DATA_PTR(_query);
- bool res = false;
- int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query));
+ ArrayType *_query = PG_GETARG_ARRAYTYPE_P(1);
+ lquery *query = (lquery *) ARR_DATA_PTR(_query);
+ bool res = false;
+ int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query));
- if (ARR_NDIM(_query) != 1)
- ereport(ERROR,
+ if (ARR_NDIM(_query) != 1)
+ ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must be one-dimensional")));
- while (num > 0) {
+ while (num > 0)
+ {
if (DatumGetBool(DirectFunctionCall2(ltq_regex,
- PointerGetDatum(tree), PointerGetDatum(query)))) {
+ PointerGetDatum(tree), PointerGetDatum(query))))
+ {
res = true;
break;
@@ -345,5 +355,3 @@ lt_q_rregex(PG_FUNCTION_ARGS)
PG_GETARG_DATUM(0)
));
}
-
-
diff --git a/contrib/ltree/ltree_io.c b/contrib/ltree/ltree_io.c
index 24116b02424..62c033cc5df 100644
--- a/contrib/ltree/ltree_io.c
+++ b/contrib/ltree/ltree_io.c
@@ -22,7 +22,7 @@ Datum lquery_out(PG_FUNCTION_ARGS);
#define UNCHAR ereport(ERROR, \
(errcode(ERRCODE_SYNTAX_ERROR), \
- errmsg("syntax error at position %d near \"%c\"", \
+ errmsg("syntax error at position %d near \"%c\"", \
(int)(ptr-buf), *ptr)));
@@ -81,8 +81,8 @@ ltree_in(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("name length is %d, must " \
- "be < 256, in position %d",
- lptr->len, (int) (lptr->start - buf))));
+ "be < 256, in position %d",
+ lptr->len, (int) (lptr->start - buf))));
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
@@ -105,8 +105,8 @@ ltree_in(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("name length is %d, must " \
- "be < 256, in position %d",
- lptr->len, (int) (lptr->start - buf))));
+ "be < 256, in position %d",
+ lptr->len, (int) (lptr->start - buf))));
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
@@ -283,8 +283,8 @@ lquery_in(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("name length is %d, must " \
- "be < 256, in position %d",
- lptr->len, (int) (lptr->start - buf))));
+ "be < 256, in position %d",
+ lptr->len, (int) (lptr->start - buf))));
state = LQPRS_WAITVAR;
}
@@ -299,8 +299,8 @@ lquery_in(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("name length is %d, must " \
- "be < 256, in position %d",
- lptr->len, (int) (lptr->start - buf))));
+ "be < 256, in position %d",
+ lptr->len, (int) (lptr->start - buf))));
state = LQPRS_WAITLEVEL;
curqlevel = NEXTLEV(curqlevel);
@@ -412,8 +412,8 @@ lquery_in(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("name length is %d, must " \
- "be < 256, in position %d",
- lptr->len, (int) (lptr->start - buf))));
+ "be < 256, in position %d",
+ lptr->len, (int) (lptr->start - buf))));
}
else if (state == LQPRS_WAITOPEN)
curqlevel->high = 0xffff;
@@ -442,7 +442,7 @@ lquery_in(PG_FUNCTION_ARGS)
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Low limit(%d) is greater than upper(%d).",
- curqlevel->low, curqlevel->high)));
+ curqlevel->low, curqlevel->high)));
curqlevel = NEXTLEV(curqlevel);
}
diff --git a/contrib/ltree/ltree_op.c b/contrib/ltree/ltree_op.c
index 1b3c80b7266..4dcb558fe48 100644
--- a/contrib/ltree/ltree_op.c
+++ b/contrib/ltree/ltree_op.c
@@ -83,49 +83,49 @@ Datum
ltree_cmp(PG_FUNCTION_ARGS)
{
RUNCMP
- PG_RETURN_INT32(res);
+ PG_RETURN_INT32(res);
}
Datum
ltree_lt(PG_FUNCTION_ARGS)
{
RUNCMP
- PG_RETURN_BOOL((res < 0) ? true : false);
+ PG_RETURN_BOOL((res < 0) ? true : false);
}
Datum
ltree_le(PG_FUNCTION_ARGS)
{
RUNCMP
- PG_RETURN_BOOL((res <= 0) ? true : false);
+ PG_RETURN_BOOL((res <= 0) ? true : false);
}
Datum
ltree_eq(PG_FUNCTION_ARGS)
{
RUNCMP
- PG_RETURN_BOOL((res == 0) ? true : false);
+ PG_RETURN_BOOL((res == 0) ? true : false);
}
Datum
ltree_ge(PG_FUNCTION_ARGS)
{
RUNCMP
- PG_RETURN_BOOL((res >= 0) ? true : false);
+ PG_RETURN_BOOL((res >= 0) ? true : false);
}
Datum
ltree_gt(PG_FUNCTION_ARGS)
{
RUNCMP
- PG_RETURN_BOOL((res > 0) ? true : false);
+ PG_RETURN_BOOL((res > 0) ? true : false);
}
Datum
ltree_ne(PG_FUNCTION_ARGS)
{
RUNCMP
- PG_RETURN_BOOL((res != 0) ? true : false);
+ PG_RETURN_BOOL((res != 0) ? true : false);
}
Datum
@@ -331,46 +331,55 @@ ltree_index(PG_FUNCTION_ARGS)
{
ltree *a = PG_GETARG_LTREE(0);
ltree *b = PG_GETARG_LTREE(1);
- int start=(fcinfo->nargs == 3) ? PG_GETARG_INT32(2) : 0;
- int i,j;
- ltree_level *startptr, *aptr, *bptr;
- bool found=false;
-
- if ( start < 0 ) {
- if ( -start >= a->numlevel )
- start=0;
- else
- start = (int)(a->numlevel)+start;
+ int start = (fcinfo->nargs == 3) ? PG_GETARG_INT32(2) : 0;
+ int i,
+ j;
+ ltree_level *startptr,
+ *aptr,
+ *bptr;
+ bool found = false;
+
+ if (start < 0)
+ {
+ if (-start >= a->numlevel)
+ start = 0;
+ else
+ start = (int) (a->numlevel) + start;
}
- if ( a->numlevel - start < b->numlevel || a->numlevel==0 || b->numlevel==0 ) {
+ if (a->numlevel - start < b->numlevel || a->numlevel == 0 || b->numlevel == 0)
+ {
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_INT32(-1);
}
- startptr=LTREE_FIRST(a);
- for(i=0; i<=a->numlevel-b->numlevel; i++) {
- if ( i>=start ) {
- aptr=startptr;
- bptr=LTREE_FIRST(b);
- for(j=0;j<b->numlevel;j++) {
- if ( !(aptr->len==bptr->len && strncmp(aptr->name,bptr->name, aptr->len)==0) )
- break;
- aptr=LEVEL_NEXT(aptr);
- bptr=LEVEL_NEXT(bptr);
+ startptr = LTREE_FIRST(a);
+ for (i = 0; i <= a->numlevel - b->numlevel; i++)
+ {
+ if (i >= start)
+ {
+ aptr = startptr;
+ bptr = LTREE_FIRST(b);
+ for (j = 0; j < b->numlevel; j++)
+ {
+ if (!(aptr->len == bptr->len && strncmp(aptr->name, bptr->name, aptr->len) == 0))
+ break;
+ aptr = LEVEL_NEXT(aptr);
+ bptr = LEVEL_NEXT(bptr);
}
-
- if ( j==b->numlevel ) {
- found=true;
+
+ if (j == b->numlevel)
+ {
+ found = true;
break;
}
}
- startptr=LEVEL_NEXT(startptr);
+ startptr = LEVEL_NEXT(startptr);
}
-
- if ( !found )
- i=-1;
+
+ if (!found)
+ i = -1;
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
@@ -496,18 +505,18 @@ Datum
text2ltree(PG_FUNCTION_ARGS)
{
text *in = PG_GETARG_TEXT_P(0);
- char *s = (char *) palloc(VARSIZE(in) - VARHDRSZ + 1);
- ltree *out;
+ char *s = (char *) palloc(VARSIZE(in) - VARHDRSZ + 1);
+ ltree *out;
memcpy(s, VARDATA(in), VARSIZE(in) - VARHDRSZ);
s[VARSIZE(in) - VARHDRSZ] = '\0';
out = (ltree *) DatumGetPointer(DirectFunctionCall1(
- ltree_in,
- PointerGetDatum(s)
- ));
+ ltree_in,
+ PointerGetDatum(s)
+ ));
pfree(s);
- PG_FREE_IF_COPY(in,0);
+ PG_FREE_IF_COPY(in, 0);
PG_RETURN_POINTER(out);
}
@@ -516,16 +525,18 @@ Datum
ltree2text(PG_FUNCTION_ARGS)
{
ltree *in = PG_GETARG_LTREE(0);
- char *ptr;
- int i;
+ char *ptr;
+ int i;
ltree_level *curlevel;
- text *out;
-
- out=(text*)palloc(in->len+VARHDRSZ);
- ptr = VARDATA(out);
+ text *out;
+
+ out = (text *) palloc(in->len + VARHDRSZ);
+ ptr = VARDATA(out);
curlevel = LTREE_FIRST(in);
- for (i = 0; i < in->numlevel; i++) {
- if (i != 0) {
+ for (i = 0; i < in->numlevel; i++)
+ {
+ if (i != 0)
+ {
*ptr = '.';
ptr++;
}
@@ -533,13 +544,9 @@ ltree2text(PG_FUNCTION_ARGS)
ptr += curlevel->len;
curlevel = LEVEL_NEXT(curlevel);
}
-
- VARATT_SIZEP(out) = VARHDRSZ + (ptr-VARDATA(out));
+
+ VARATT_SIZEP(out) = VARHDRSZ + (ptr - VARDATA(out));
PG_FREE_IF_COPY(in, 0);
-
+
PG_RETURN_POINTER(out);
}
-
-
-
-
diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c
index e2e2907267f..76369fd13cb 100644
--- a/contrib/oid2name/oid2name.c
+++ b/contrib/oid2name/oid2name.c
@@ -356,8 +356,8 @@ sql_exec_dumptable(PGconn *conn, int systables)
snprintf(todo, 1024, "select relfilenode,relname from pg_class order by relname");
else
snprintf(todo, 1024, "select relfilenode,relname from pg_class "
- "where relkind not in ('v','s', 'c') and "
- "relname not like 'pg_%%' order by relname");
+ "where relkind not in ('v','s', 'c') and "
+ "relname not like 'pg_%%' order by relname");
sql_exec(conn, todo, 0);
}
diff --git a/contrib/pg_autovacuum/pg_autovacuum.c b/contrib/pg_autovacuum/pg_autovacuum.c
index dce065d7b6f..01d22dbc6b1 100644
--- a/contrib/pg_autovacuum/pg_autovacuum.c
+++ b/contrib/pg_autovacuum/pg_autovacuum.c
@@ -6,324 +6,362 @@
#include "pg_autovacuum.h"
#define TIMEBUFF 256
-FILE *LOGOUTPUT;
-char timebuffer[TIMEBUFF];
-char logbuffer[4096];
+FILE *LOGOUTPUT;
+char timebuffer[TIMEBUFF];
+char logbuffer[4096];
void
-log_entry (const char *logentry)
+log_entry(const char *logentry)
{
- time_t curtime;
- struct tm *loctime;
- curtime = time (NULL);
- loctime = localtime (&curtime);
- strftime (timebuffer, TIMEBUFF, "%Y-%m-%d %r", loctime); /* cbb - %F is not always available */
- fprintf (LOGOUTPUT, "[%s] %s\n", timebuffer, logentry);
+ time_t curtime;
+ struct tm *loctime;
+
+ curtime = time(NULL);
+ loctime = localtime(&curtime);
+ strftime(timebuffer, TIMEBUFF, "%Y-%m-%d %r", loctime); /* cbb - %F is not
+ * always available */
+ fprintf(LOGOUTPUT, "[%s] %s\n", timebuffer, logentry);
}
/* Function used to detatch the pg_autovacuum daemon from the tty and go into the background *
-* This code is mostly ripped directly from pm_dameonize in postmaster.c *
-* with unneeded code removed. */
-void daemonize ()
+* This code is mostly ripped directly from pm_dameonize in postmaster.c *
+* with unneeded code removed. */
+void
+daemonize()
{
- pid_t pid;
-
- pid = fork();
- if (pid == (pid_t) -1)
- {
- log_entry("Error: cannot disassociate from controlling TTY");
- fflush(LOGOUTPUT);
- _exit(1);
- }
- else if (pid)
- { /* parent */
- /* Parent should just exit, without doing any atexit cleanup */
- _exit(0);
- }
+ pid_t pid;
+
+ pid = fork();
+ if (pid == (pid_t) -1)
+ {
+ log_entry("Error: cannot disassociate from controlling TTY");
+ fflush(LOGOUTPUT);
+ _exit(1);
+ }
+ else if (pid)
+ { /* parent */
+ /* Parent should just exit, without doing any atexit cleanup */
+ _exit(0);
+ }
/* GH: If there's no setsid(), we hopefully don't need silent mode.
* Until there's a better solution. */
#ifdef HAVE_SETSID
- if (setsid() < 0)
- {
- log_entry("Error: cannot disassociate from controlling TTY");
- fflush(LOGOUTPUT);
- _exit(1);
- }
+ if (setsid() < 0)
+ {
+ log_entry("Error: cannot disassociate from controlling TTY");
+ fflush(LOGOUTPUT);
+ _exit(1);
+ }
#endif
}
/* Create and return tbl_info struct with initialized to values from row or res */
tbl_info *
-init_table_info (PGresult * res, int row, db_info *dbi)
+init_table_info(PGresult *res, int row, db_info * dbi)
{
- tbl_info *new_tbl = (tbl_info *) malloc (sizeof (tbl_info));
-
- if (!new_tbl) {
- log_entry ("init_table_info: Cannot get memory");
- fflush (LOGOUTPUT);
- return NULL;
- }
-
- if (NULL == res)
- return NULL;
-
- new_tbl->dbi = dbi; /* set pointer to db */
-
- new_tbl->schema_name = (char *)
- malloc (strlen (PQgetvalue (res, row, PQfnumber (res, "schemaname"))) + 1);
- if (!new_tbl->schema_name) {
- log_entry ("init_table_info: malloc failed on new_tbl->schema_name");
- fflush (LOGOUTPUT);
- return NULL;
- }
- strcpy (new_tbl->schema_name,
- PQgetvalue (res, row, PQfnumber (res, "schemaname")));
-
- new_tbl->table_name = (char *)
- malloc (strlen (PQgetvalue (res, row, PQfnumber (res, "relname"))) +
- strlen (new_tbl->schema_name) + 2);
- if (!new_tbl->table_name) {
- log_entry ("init_table_info: malloc failed on new_tbl->table_name");
- fflush (LOGOUTPUT);
- return NULL;
- }
- strcpy (new_tbl->table_name, new_tbl->schema_name);
- strcat (new_tbl->table_name, ".");
- strcat (new_tbl->table_name, PQgetvalue (res, row, PQfnumber (res, "relname")));
-
- new_tbl->CountAtLastAnalyze =
- (atol (PQgetvalue (res, row, PQfnumber (res, "n_tup_ins"))) +
- atol (PQgetvalue (res, row, PQfnumber (res, "n_tup_upd"))));
- new_tbl->curr_analyze_count = new_tbl->CountAtLastAnalyze;
-
- new_tbl->CountAtLastVacuum =
- (atol (PQgetvalue (res, row, PQfnumber (res, "n_tup_del"))) +
- atol (PQgetvalue (res, row, PQfnumber (res, "n_tup_upd"))));
- new_tbl->curr_vacuum_count = new_tbl->CountAtLastVacuum;
-
- new_tbl->relfilenode = atoi (PQgetvalue (res, row, PQfnumber (res, "relfilenode")));
- new_tbl->reltuples = atoi (PQgetvalue (res, row, PQfnumber (res, "reltuples")));
- new_tbl->relpages = atoi (PQgetvalue (res, row, PQfnumber (res, "relpages")));
-
- new_tbl->analyze_threshold =
- args->analyze_base_threshold + args->analyze_scaling_factor * new_tbl->reltuples;
- new_tbl->vacuum_threshold =
- args->vacuum_base_threshold + args->vacuum_scaling_factor * new_tbl->reltuples;
-
- if (args->debug >= 2) {
- print_table_info (new_tbl);
- }
-
- return new_tbl;
+ tbl_info *new_tbl = (tbl_info *) malloc(sizeof(tbl_info));
+
+ if (!new_tbl)
+ {
+ log_entry("init_table_info: Cannot get memory");
+ fflush(LOGOUTPUT);
+ return NULL;
+ }
+
+ if (NULL == res)
+ return NULL;
+
+ new_tbl->dbi = dbi; /* set pointer to db */
+
+ new_tbl->schema_name = (char *)
+ malloc(strlen(PQgetvalue(res, row, PQfnumber(res, "schemaname"))) + 1);
+ if (!new_tbl->schema_name)
+ {
+ log_entry("init_table_info: malloc failed on new_tbl->schema_name");
+ fflush(LOGOUTPUT);
+ return NULL;
+ }
+ strcpy(new_tbl->schema_name,
+ PQgetvalue(res, row, PQfnumber(res, "schemaname")));
+
+ new_tbl->table_name = (char *)
+ malloc(strlen(PQgetvalue(res, row, PQfnumber(res, "relname"))) +
+ strlen(new_tbl->schema_name) + 2);
+ if (!new_tbl->table_name)
+ {
+ log_entry("init_table_info: malloc failed on new_tbl->table_name");
+ fflush(LOGOUTPUT);
+ return NULL;
+ }
+ strcpy(new_tbl->table_name, new_tbl->schema_name);
+ strcat(new_tbl->table_name, ".");
+ strcat(new_tbl->table_name, PQgetvalue(res, row, PQfnumber(res, "relname")));
+
+ new_tbl->CountAtLastAnalyze =
+ (atol(PQgetvalue(res, row, PQfnumber(res, "n_tup_ins"))) +
+ atol(PQgetvalue(res, row, PQfnumber(res, "n_tup_upd"))));
+ new_tbl->curr_analyze_count = new_tbl->CountAtLastAnalyze;
+
+ new_tbl->CountAtLastVacuum =
+ (atol(PQgetvalue(res, row, PQfnumber(res, "n_tup_del"))) +
+ atol(PQgetvalue(res, row, PQfnumber(res, "n_tup_upd"))));
+ new_tbl->curr_vacuum_count = new_tbl->CountAtLastVacuum;
+
+ new_tbl->relfilenode = atoi(PQgetvalue(res, row, PQfnumber(res, "relfilenode")));
+ new_tbl->reltuples = atoi(PQgetvalue(res, row, PQfnumber(res, "reltuples")));
+ new_tbl->relpages = atoi(PQgetvalue(res, row, PQfnumber(res, "relpages")));
+
+ new_tbl->analyze_threshold =
+ args->analyze_base_threshold + args->analyze_scaling_factor * new_tbl->reltuples;
+ new_tbl->vacuum_threshold =
+ args->vacuum_base_threshold + args->vacuum_scaling_factor * new_tbl->reltuples;
+
+ if (args->debug >= 2)
+ print_table_info(new_tbl);
+
+ return new_tbl;
}
/* Set thresholds = base_value + scaling_factor * reltuples
Should be called after a vacuum since vacuum updates values in pg_class */
void
-update_table_thresholds (db_info * dbi, tbl_info * tbl,int vacuum_type)
+update_table_thresholds(db_info * dbi, tbl_info * tbl, int vacuum_type)
{
- PGresult *res = NULL;
- int disconnect = 0;
- char query[128];
-
- if (NULL == dbi->conn) {
- dbi->conn = db_connect (dbi);
- disconnect = 1;
- }
-
- if (NULL != dbi->conn) {
- snprintf (query, sizeof (query), PAGES_QUERY, tbl->relfilenode);
- res = send_query (query, dbi);
- if (NULL != res) {
- tbl->reltuples =
- atoi (PQgetvalue (res, 0, PQfnumber (res, "reltuples")));
- tbl->relpages = atoi (PQgetvalue (res, 0, PQfnumber (res, "relpages")));
-
- /* update vacuum thresholds only of we just did a vacuum analyze */
- if(VACUUM_ANALYZE == vacuum_type)
- {
- tbl->vacuum_threshold =
- (args->vacuum_base_threshold + args->vacuum_scaling_factor * tbl->reltuples);
- tbl->CountAtLastVacuum = tbl->curr_vacuum_count;
- }
-
- /* update analyze thresholds */
- tbl->analyze_threshold =
- (args->analyze_base_threshold + args->analyze_scaling_factor * tbl->reltuples);
- tbl->CountAtLastAnalyze = tbl->curr_analyze_count;
-
- PQclear (res);
-
- /* If the stats collector is reporting fewer updates then we have on record
- then the stats were probably reset, so we need to reset also */
- if ((tbl->curr_analyze_count < tbl->CountAtLastAnalyze) ||
- (tbl->curr_vacuum_count < tbl->CountAtLastVacuum))
- {
- tbl->CountAtLastAnalyze = tbl->curr_analyze_count;
- tbl->CountAtLastVacuum = tbl->curr_vacuum_count;
- }
- }
- }
- if (disconnect)
- db_disconnect (dbi);
+ PGresult *res = NULL;
+ int disconnect = 0;
+ char query[128];
+
+ if (NULL == dbi->conn)
+ {
+ dbi->conn = db_connect(dbi);
+ disconnect = 1;
+ }
+
+ if (NULL != dbi->conn)
+ {
+ snprintf(query, sizeof(query), PAGES_QUERY, tbl->relfilenode);
+ res = send_query(query, dbi);
+ if (NULL != res)
+ {
+ tbl->reltuples =
+ atoi(PQgetvalue(res, 0, PQfnumber(res, "reltuples")));
+ tbl->relpages = atoi(PQgetvalue(res, 0, PQfnumber(res, "relpages")));
+
+ /*
+ * update vacuum thresholds only of we just did a vacuum
+ * analyze
+ */
+ if (VACUUM_ANALYZE == vacuum_type)
+ {
+ tbl->vacuum_threshold =
+ (args->vacuum_base_threshold + args->vacuum_scaling_factor * tbl->reltuples);
+ tbl->CountAtLastVacuum = tbl->curr_vacuum_count;
+ }
+
+ /* update analyze thresholds */
+ tbl->analyze_threshold =
+ (args->analyze_base_threshold + args->analyze_scaling_factor * tbl->reltuples);
+ tbl->CountAtLastAnalyze = tbl->curr_analyze_count;
+
+ PQclear(res);
+
+ /*
+ * If the stats collector is reporting fewer updates then we
+ * have on record then the stats were probably reset, so we
+ * need to reset also
+ */
+ if ((tbl->curr_analyze_count < tbl->CountAtLastAnalyze) ||
+ (tbl->curr_vacuum_count < tbl->CountAtLastVacuum))
+ {
+ tbl->CountAtLastAnalyze = tbl->curr_analyze_count;
+ tbl->CountAtLastVacuum = tbl->curr_vacuum_count;
+ }
+ }
+ }
+ if (disconnect)
+ db_disconnect(dbi);
}
void
-update_table_list (db_info * dbi)
+update_table_list(db_info * dbi)
{
- int disconnect = 0;
- PGresult *res = NULL;
- tbl_info *tbl = NULL;
- Dlelem *tbl_elem = DLGetHead (dbi->table_list);
- int i = 0, t = 0, found_match = 0;
-
- if (NULL == dbi->conn) {
- dbi->conn = db_connect (dbi);
- disconnect = 1;
- }
-
- if (NULL != dbi->conn) {
- /* Get a result set that has all the information
- we will need to both remove tables from the list
- that no longer exist and add tables to the list
- that are new */
- res = send_query (query_table_stats (dbi), dbi);
- t = PQntuples (res);
-
- /* First: use the tbl_list as the outer loop and
- the result set as the inner loop, this will
- determine what tables should be removed */
- while (NULL != tbl_elem) {
- tbl = ((tbl_info *) DLE_VAL (tbl_elem));
- found_match = 0;
-
- for (i = 0; i < t; i++) { /* loop through result set looking for a match */
- if (tbl->relfilenode == atoi (PQgetvalue (res, i, PQfnumber (res, "relfilenode")))) {
- found_match = 1;
- break;
- }
- }
- if (0 == found_match) { /* then we didn't find this tbl_elem in the result set */
- Dlelem *elem_to_remove = tbl_elem;
- tbl_elem = DLGetSucc (tbl_elem);
- remove_table_from_list (elem_to_remove);
- }
- else
- tbl_elem = DLGetSucc (tbl_elem);
- } /* Done removing dropped tables from the table_list */
-
- /* Then loop use result set as outer loop and
- tbl_list as the inner loop to determine
- what tables are new */
- for (i = 0; i < t; i++)
- {
- tbl_elem = DLGetHead (dbi->table_list);
- found_match = 0;
- while (NULL != tbl_elem)
- {
- tbl = ((tbl_info *) DLE_VAL (tbl_elem));
- if (tbl->relfilenode == atoi (PQgetvalue (res, i, PQfnumber (res, "relfilenode"))))
- {
- found_match = 1;
- break;
- }
- tbl_elem = DLGetSucc (tbl_elem);
- }
- if (0 == found_match) /*then we didn't find this result now in the tbl_list */
- {
- DLAddTail (dbi->table_list, DLNewElem (init_table_info (res, i, dbi)));
- if (args->debug >= 1)
- {
- sprintf (logbuffer, "added table: %s.%s", dbi->dbname,
- ((tbl_info *) DLE_VAL (DLGetTail (dbi->table_list)))->table_name);
- log_entry (logbuffer);
- }
- }
- } /* end of for loop that adds tables */
- fflush (LOGOUTPUT);
- PQclear (res);
- res = NULL;
- if (args->debug >= 3) {
- print_table_list (dbi->table_list);
- }
- if (disconnect)
- db_disconnect (dbi);
- }
+ int disconnect = 0;
+ PGresult *res = NULL;
+ tbl_info *tbl = NULL;
+ Dlelem *tbl_elem = DLGetHead(dbi->table_list);
+ int i = 0,
+ t = 0,
+ found_match = 0;
+
+ if (NULL == dbi->conn)
+ {
+ dbi->conn = db_connect(dbi);
+ disconnect = 1;
+ }
+
+ if (NULL != dbi->conn)
+ {
+ /*
+ * Get a result set that has all the information we will need to
+ * both remove tables from the list that no longer exist and add
+ * tables to the list that are new
+ */
+ res = send_query(query_table_stats(dbi), dbi);
+ t = PQntuples(res);
+
+ /*
+ * First: use the tbl_list as the outer loop and the result set as
+ * the inner loop, this will determine what tables should be
+ * removed
+ */
+ while (NULL != tbl_elem)
+ {
+ tbl = ((tbl_info *) DLE_VAL(tbl_elem));
+ found_match = 0;
+
+ for (i = 0; i < t; i++)
+ { /* loop through result set looking for a
+ * match */
+ if (tbl->relfilenode == atoi(PQgetvalue(res, i, PQfnumber(res, "relfilenode"))))
+ {
+ found_match = 1;
+ break;
+ }
+ }
+ if (0 == found_match)
+ { /* then we didn't find this tbl_elem in
+ * the result set */
+ Dlelem *elem_to_remove = tbl_elem;
+
+ tbl_elem = DLGetSucc(tbl_elem);
+ remove_table_from_list(elem_to_remove);
+ }
+ else
+ tbl_elem = DLGetSucc(tbl_elem);
+ } /* Done removing dropped tables from the
+ * table_list */
+
+ /*
+ * Then loop use result set as outer loop and tbl_list as the
+ * inner loop to determine what tables are new
+ */
+ for (i = 0; i < t; i++)
+ {
+ tbl_elem = DLGetHead(dbi->table_list);
+ found_match = 0;
+ while (NULL != tbl_elem)
+ {
+ tbl = ((tbl_info *) DLE_VAL(tbl_elem));
+ if (tbl->relfilenode == atoi(PQgetvalue(res, i, PQfnumber(res, "relfilenode"))))
+ {
+ found_match = 1;
+ break;
+ }
+ tbl_elem = DLGetSucc(tbl_elem);
+ }
+ if (0 == found_match) /* then we didn't find this result
+ * now in the tbl_list */
+ {
+ DLAddTail(dbi->table_list, DLNewElem(init_table_info(res, i, dbi)));
+ if (args->debug >= 1)
+ {
+ sprintf(logbuffer, "added table: %s.%s", dbi->dbname,
+ ((tbl_info *) DLE_VAL(DLGetTail(dbi->table_list)))->table_name);
+ log_entry(logbuffer);
+ }
+ }
+ } /* end of for loop that adds tables */
+ fflush(LOGOUTPUT);
+ PQclear(res);
+ res = NULL;
+ if (args->debug >= 3)
+ print_table_list(dbi->table_list);
+ if (disconnect)
+ db_disconnect(dbi);
+ }
}
/* Free memory, and remove the node from the list */
void
-remove_table_from_list (Dlelem * tbl_to_remove)
+remove_table_from_list(Dlelem *tbl_to_remove)
{
- tbl_info *tbl = ((tbl_info *) DLE_VAL (tbl_to_remove));
-
- if (args->debug >= 1) {
- sprintf (logbuffer, "Removing table: %s from list.", tbl->table_name);
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
- }
- DLRemove (tbl_to_remove);
-
- if (tbl->schema_name) {
- free (tbl->schema_name);
- tbl->schema_name = NULL;
- }
- if (tbl->table_name) {
- free (tbl->table_name);
- tbl->table_name = NULL;
- }
- if (tbl) {
- free (tbl);
- tbl = NULL;
- }
- DLFreeElem (tbl_to_remove);
+ tbl_info *tbl = ((tbl_info *) DLE_VAL(tbl_to_remove));
+
+ if (args->debug >= 1)
+ {
+ sprintf(logbuffer, "Removing table: %s from list.", tbl->table_name);
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
+ }
+ DLRemove(tbl_to_remove);
+
+ if (tbl->schema_name)
+ {
+ free(tbl->schema_name);
+ tbl->schema_name = NULL;
+ }
+ if (tbl->table_name)
+ {
+ free(tbl->table_name);
+ tbl->table_name = NULL;
+ }
+ if (tbl)
+ {
+ free(tbl);
+ tbl = NULL;
+ }
+ DLFreeElem(tbl_to_remove);
}
/* Free the entire table list */
void
-free_tbl_list (Dllist * tbl_list)
+free_tbl_list(Dllist *tbl_list)
{
- Dlelem *tbl_elem = DLGetHead (tbl_list);
- Dlelem *tbl_elem_to_remove = NULL;
- while (NULL != tbl_elem) {
- tbl_elem_to_remove = tbl_elem;
- tbl_elem = DLGetSucc (tbl_elem);
- remove_table_from_list (tbl_elem_to_remove);
- }
- DLFreeList (tbl_list);
+ Dlelem *tbl_elem = DLGetHead(tbl_list);
+ Dlelem *tbl_elem_to_remove = NULL;
+
+ while (NULL != tbl_elem)
+ {
+ tbl_elem_to_remove = tbl_elem;
+ tbl_elem = DLGetSucc(tbl_elem);
+ remove_table_from_list(tbl_elem_to_remove);
+ }
+ DLFreeList(tbl_list);
}
void
-print_table_list (Dllist * table_list)
+print_table_list(Dllist *table_list)
{
- Dlelem *table_elem = DLGetHead (table_list);
- while (NULL != table_elem) {
- print_table_info (((tbl_info *) DLE_VAL (table_elem)));
- table_elem = DLGetSucc (table_elem);
- }
+ Dlelem *table_elem = DLGetHead(table_list);
+
+ while (NULL != table_elem)
+ {
+ print_table_info(((tbl_info *) DLE_VAL(table_elem)));
+ table_elem = DLGetSucc(table_elem);
+ }
}
void
-print_table_info (tbl_info * tbl)
+print_table_info(tbl_info * tbl)
{
- sprintf (logbuffer, " table name: %s.%s", tbl->dbi->dbname, tbl->table_name);
- log_entry (logbuffer);
- sprintf (logbuffer, " relfilenode: %i",tbl->relfilenode);
- log_entry (logbuffer);
- sprintf (logbuffer, " reltuples: %i; relpages: %i", tbl->reltuples, tbl->relpages);
- log_entry (logbuffer);
- sprintf (logbuffer, " curr_analyze_count: %li; cur_delete_count: %li",
- tbl->curr_analyze_count, tbl->curr_vacuum_count);
- log_entry (logbuffer);
- sprintf (logbuffer, " ins_at_last_analyze: %li; del_at_last_vacuum: %li",
- tbl->CountAtLastAnalyze, tbl->CountAtLastVacuum);
- log_entry (logbuffer);
- sprintf (logbuffer, " insert_threshold: %li; delete_threshold %li",
- tbl->analyze_threshold, tbl->vacuum_threshold);
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
+ sprintf(logbuffer, " table name: %s.%s", tbl->dbi->dbname, tbl->table_name);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " relfilenode: %i", tbl->relfilenode);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " reltuples: %i; relpages: %i", tbl->reltuples, tbl->relpages);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " curr_analyze_count: %li; cur_delete_count: %li",
+ tbl->curr_analyze_count, tbl->curr_vacuum_count);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " ins_at_last_analyze: %li; del_at_last_vacuum: %li",
+ tbl->CountAtLastAnalyze, tbl->CountAtLastVacuum);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " insert_threshold: %li; delete_threshold %li",
+ tbl->analyze_threshold, tbl->vacuum_threshold);
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
}
/* End of table Management Functions */
@@ -332,161 +370,188 @@ print_table_info (tbl_info * tbl)
/* init_db_list() creates the db_list and initalizes template1 */
Dllist *
-init_db_list ()
+init_db_list()
{
- Dllist *db_list = DLNewList ();
- db_info *dbs = NULL;
- PGresult *res = NULL;
-
- DLAddHead (db_list, DLNewElem (init_dbinfo ((char *) "template1", 0, 0)));
- if (NULL == DLGetHead (db_list)) { /* Make sure init_dbinfo was successful */
- log_entry ("init_db_list(): Error creating db_list for db: template1.");
- fflush (LOGOUTPUT);
- return NULL;
- }
-
- /* We do this just so we can set the proper oid for the template1 database */
- dbs = ((db_info *) DLE_VAL (DLGetHead (db_list)));
- dbs->conn = db_connect (dbs);
-
- if (NULL != dbs->conn) {
- res = send_query (FROZENOID_QUERY, dbs);
- dbs->oid = atoi (PQgetvalue (res, 0, PQfnumber (res, "oid")));
- dbs->age = atoi (PQgetvalue (res, 0, PQfnumber (res, "age")));
- if (res)
- PQclear (res);
-
- if (args->debug >= 2) {
- print_db_list (db_list, 0);
- }
- }
- return db_list;
+ Dllist *db_list = DLNewList();
+ db_info *dbs = NULL;
+ PGresult *res = NULL;
+
+ DLAddHead(db_list, DLNewElem(init_dbinfo((char *) "template1", 0, 0)));
+ if (NULL == DLGetHead(db_list))
+ { /* Make sure init_dbinfo was successful */
+ log_entry("init_db_list(): Error creating db_list for db: template1.");
+ fflush(LOGOUTPUT);
+ return NULL;
+ }
+
+ /*
+ * We do this just so we can set the proper oid for the template1
+ * database
+ */
+ dbs = ((db_info *) DLE_VAL(DLGetHead(db_list)));
+ dbs->conn = db_connect(dbs);
+
+ if (NULL != dbs->conn)
+ {
+ res = send_query(FROZENOID_QUERY, dbs);
+ dbs->oid = atoi(PQgetvalue(res, 0, PQfnumber(res, "oid")));
+ dbs->age = atoi(PQgetvalue(res, 0, PQfnumber(res, "age")));
+ if (res)
+ PQclear(res);
+
+ if (args->debug >= 2)
+ print_db_list(db_list, 0);
+ }
+ return db_list;
}
/* Simple function to create an instance of the dbinfo struct
- Initalizes all the pointers and connects to the database */
+ Initalizes all the pointers and connects to the database */
db_info *
-init_dbinfo (char *dbname, int oid, int age)
+init_dbinfo(char *dbname, int oid, int age)
{
- db_info *newdbinfo = (db_info *) malloc (sizeof (db_info));
- newdbinfo->analyze_threshold = args->vacuum_base_threshold;
- newdbinfo->vacuum_threshold = args->analyze_base_threshold;
- newdbinfo->dbname = (char *) malloc (strlen (dbname) + 1);
- strcpy (newdbinfo->dbname, dbname);
- newdbinfo->username = NULL;
- if (NULL != args->user) {
- newdbinfo->username = (char *) malloc (strlen (args->user) + 1);
- strcpy (newdbinfo->username, args->user);
- }
- newdbinfo->password = NULL;
- if (NULL != args->password) {
- newdbinfo->password = (char *) malloc (strlen (args->password) + 1);
- strcpy (newdbinfo->password, args->password);
- }
- newdbinfo->oid = oid;
- newdbinfo->age = age;
- newdbinfo->table_list = DLNewList ();
- newdbinfo->conn = NULL;
-
- if (args->debug >= 2) {
- print_table_list (newdbinfo->table_list);
- }
-
- return newdbinfo;
+ db_info *newdbinfo = (db_info *) malloc(sizeof(db_info));
+
+ newdbinfo->analyze_threshold = args->vacuum_base_threshold;
+ newdbinfo->vacuum_threshold = args->analyze_base_threshold;
+ newdbinfo->dbname = (char *) malloc(strlen(dbname) + 1);
+ strcpy(newdbinfo->dbname, dbname);
+ newdbinfo->username = NULL;
+ if (NULL != args->user)
+ {
+ newdbinfo->username = (char *) malloc(strlen(args->user) + 1);
+ strcpy(newdbinfo->username, args->user);
+ }
+ newdbinfo->password = NULL;
+ if (NULL != args->password)
+ {
+ newdbinfo->password = (char *) malloc(strlen(args->password) + 1);
+ strcpy(newdbinfo->password, args->password);
+ }
+ newdbinfo->oid = oid;
+ newdbinfo->age = age;
+ newdbinfo->table_list = DLNewList();
+ newdbinfo->conn = NULL;
+
+ if (args->debug >= 2)
+ print_table_list(newdbinfo->table_list);
+
+ return newdbinfo;
}
/* Function adds and removes databases from the db_list as appropriate */
void
-update_db_list (Dllist * db_list)
+update_db_list(Dllist *db_list)
{
- int disconnect = 0;
- PGresult *res = NULL;
- Dlelem *db_elem = DLGetHead (db_list);
- db_info *dbi = NULL;
- db_info *dbi_template1 = DLE_VAL (db_elem);
- int i = 0, t = 0, found_match = 0;
-
- if (args->debug >= 2) {
- log_entry ("updating the database list");
- fflush (LOGOUTPUT);
- }
-
- if (NULL == dbi_template1->conn) {
- dbi_template1->conn = db_connect (dbi_template1);
- disconnect = 1;
- }
-
- if (NULL != dbi_template1->conn) {
- /* Get a result set that has all the information
- we will need to both remove databasews from the list
- that no longer exist and add databases to the list
- that are new */
- res = send_query (FROZENOID_QUERY2, dbi_template1);
- t = PQntuples (res);
-
- /* First: use the db_list as the outer loop and
- the result set as the inner loop, this will
- determine what databases should be removed */
- while (NULL != db_elem) {
- dbi = ((db_info *) DLE_VAL (db_elem));
- found_match = 0;
-
- for (i = 0; i < t; i++) { /* loop through result set looking for a match */
- if (dbi->oid == atoi (PQgetvalue (res, i, PQfnumber (res, "oid")))) {
- found_match = 1;
- /* update the dbi->age so that we ensure xid_wraparound won't happen */
- dbi->age = atoi (PQgetvalue (res, i, PQfnumber (res, "age")));
- break;
- }
- }
- if (0 == found_match) { /*then we didn't find this db_elem in the result set */
- Dlelem *elem_to_remove = db_elem;
- db_elem = DLGetSucc (db_elem);
- remove_db_from_list (elem_to_remove);
- }
- else
- db_elem = DLGetSucc (db_elem);
- } /* Done removing dropped databases from the table_list */
-
- /* Then loop use result set as outer loop and
- db_list as the inner loop to determine
- what databases are new */
- for (i = 0; i < t; i++)
- {
- db_elem = DLGetHead (db_list);
- found_match = 0;
- while (NULL != db_elem)
- {
- dbi = ((db_info *) DLE_VAL (db_elem));
- if (dbi->oid == atoi (PQgetvalue (res, i, PQfnumber (res, "oid"))))
- {
- found_match = 1;
- break;
- }
- db_elem = DLGetSucc (db_elem);
- }
- if (0 == found_match) /*then we didn't find this result now in the tbl_list */
- {
- DLAddTail (db_list, DLNewElem (init_dbinfo
- (PQgetvalue(res, i, PQfnumber (res, "datname")),
- atoi (PQgetvalue(res, i, PQfnumber (res, "oid"))),
- atoi (PQgetvalue(res, i, PQfnumber (res, "age"))))));
- if (args->debug >= 1)
- {
- sprintf (logbuffer, "added database: %s",((db_info *) DLE_VAL (DLGetTail (db_list)))->dbname);
- log_entry (logbuffer);
- }
- }
- } /* end of for loop that adds tables */
- fflush (LOGOUTPUT);
- PQclear (res);
- res = NULL;
- if (args->debug >= 3) {
- print_db_list (db_list, 0);
- }
- if (disconnect)
- db_disconnect (dbi_template1);
- }
+ int disconnect = 0;
+ PGresult *res = NULL;
+ Dlelem *db_elem = DLGetHead(db_list);
+ db_info *dbi = NULL;
+ db_info *dbi_template1 = DLE_VAL(db_elem);
+ int i = 0,
+ t = 0,
+ found_match = 0;
+
+ if (args->debug >= 2)
+ {
+ log_entry("updating the database list");
+ fflush(LOGOUTPUT);
+ }
+
+ if (NULL == dbi_template1->conn)
+ {
+ dbi_template1->conn = db_connect(dbi_template1);
+ disconnect = 1;
+ }
+
+ if (NULL != dbi_template1->conn)
+ {
+ /*
+ * Get a result set that has all the information we will need to
+ * both remove databasews from the list that no longer exist and
+ * add databases to the list that are new
+ */
+ res = send_query(FROZENOID_QUERY2, dbi_template1);
+ t = PQntuples(res);
+
+ /*
+ * First: use the db_list as the outer loop and the result set as
+ * the inner loop, this will determine what databases should be
+ * removed
+ */
+ while (NULL != db_elem)
+ {
+ dbi = ((db_info *) DLE_VAL(db_elem));
+ found_match = 0;
+
+ for (i = 0; i < t; i++)
+ { /* loop through result set looking for a
+ * match */
+ if (dbi->oid == atoi(PQgetvalue(res, i, PQfnumber(res, "oid"))))
+ {
+ found_match = 1;
+
+ /*
+ * update the dbi->age so that we ensure
+ * xid_wraparound won't happen
+ */
+ dbi->age = atoi(PQgetvalue(res, i, PQfnumber(res, "age")));
+ break;
+ }
+ }
+ if (0 == found_match)
+ { /* then we didn't find this db_elem in the
+ * result set */
+ Dlelem *elem_to_remove = db_elem;
+
+ db_elem = DLGetSucc(db_elem);
+ remove_db_from_list(elem_to_remove);
+ }
+ else
+ db_elem = DLGetSucc(db_elem);
+ } /* Done removing dropped databases from
+ * the table_list */
+
+ /*
+ * Then loop use result set as outer loop and db_list as the inner
+ * loop to determine what databases are new
+ */
+ for (i = 0; i < t; i++)
+ {
+ db_elem = DLGetHead(db_list);
+ found_match = 0;
+ while (NULL != db_elem)
+ {
+ dbi = ((db_info *) DLE_VAL(db_elem));
+ if (dbi->oid == atoi(PQgetvalue(res, i, PQfnumber(res, "oid"))))
+ {
+ found_match = 1;
+ break;
+ }
+ db_elem = DLGetSucc(db_elem);
+ }
+ if (0 == found_match) /* then we didn't find this result
+ * now in the tbl_list */
+ {
+ DLAddTail(db_list, DLNewElem(init_dbinfo
+ (PQgetvalue(res, i, PQfnumber(res, "datname")),
+ atoi(PQgetvalue(res, i, PQfnumber(res, "oid"))),
+ atoi(PQgetvalue(res, i, PQfnumber(res, "age"))))));
+ if (args->debug >= 1)
+ {
+ sprintf(logbuffer, "added database: %s", ((db_info *) DLE_VAL(DLGetTail(db_list)))->dbname);
+ log_entry(logbuffer);
+ }
+ }
+ } /* end of for loop that adds tables */
+ fflush(LOGOUTPUT);
+ PQclear(res);
+ res = NULL;
+ if (args->debug >= 3)
+ print_db_list(db_list, 0);
+ if (disconnect)
+ db_disconnect(dbi_template1);
+ }
}
/* xid_wraparound_check
@@ -504,101 +569,116 @@ return 0 if nothing happened,
return 1 if the database needed a database wide vacuum
*/
int
-xid_wraparound_check (db_info * dbi)
+xid_wraparound_check(db_info * dbi)
{
- /* FIXME: should probably do something better here so that we don't vacuum all the
- databases on the server at the same time. We have 500million xacts to work with so
- we should be able to spread the load of full database vacuums a bit */
- if (1500000000 < dbi->age) {
- PGresult *res = NULL;
- res = send_query ("vacuum", dbi);
- /* FIXME: Perhaps should add a check for PQ_COMMAND_OK */
- PQclear (res);
- return 1;
- }
- return 0;
+ /*
+ * FIXME: should probably do something better here so that we don't
+ * vacuum all the databases on the server at the same time. We have
+ * 500million xacts to work with so we should be able to spread the
+ * load of full database vacuums a bit
+ */
+ if (1500000000 < dbi->age)
+ {
+ PGresult *res = NULL;
+
+ res = send_query("vacuum", dbi);
+ /* FIXME: Perhaps should add a check for PQ_COMMAND_OK */
+ PQclear(res);
+ return 1;
+ }
+ return 0;
}
/* Close DB connection, free memory, and remove the node from the list */
void
-remove_db_from_list (Dlelem * db_to_remove)
+remove_db_from_list(Dlelem *db_to_remove)
{
- db_info *dbi = ((db_info *) DLE_VAL (db_to_remove));
-
- if (args->debug >= 1) {
- sprintf (logbuffer, "Removing db: %s from list.", dbi->dbname);
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
- }
- DLRemove (db_to_remove);
- if (dbi->conn)
- db_disconnect (dbi);
- if (dbi->dbname) {
- free (dbi->dbname);
- dbi->dbname = NULL;
- }
- if (dbi->username) {
- free (dbi->username);
- dbi->username = NULL;
- }
- if (dbi->password) {
- free (dbi->password);
- dbi->password = NULL;
- }
- if (dbi->table_list) {
- free_tbl_list (dbi->table_list);
- dbi->table_list = NULL;
- }
- if (dbi) {
- free (dbi);
- dbi = NULL;
- }
- DLFreeElem (db_to_remove);
+ db_info *dbi = ((db_info *) DLE_VAL(db_to_remove));
+
+ if (args->debug >= 1)
+ {
+ sprintf(logbuffer, "Removing db: %s from list.", dbi->dbname);
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
+ }
+ DLRemove(db_to_remove);
+ if (dbi->conn)
+ db_disconnect(dbi);
+ if (dbi->dbname)
+ {
+ free(dbi->dbname);
+ dbi->dbname = NULL;
+ }
+ if (dbi->username)
+ {
+ free(dbi->username);
+ dbi->username = NULL;
+ }
+ if (dbi->password)
+ {
+ free(dbi->password);
+ dbi->password = NULL;
+ }
+ if (dbi->table_list)
+ {
+ free_tbl_list(dbi->table_list);
+ dbi->table_list = NULL;
+ }
+ if (dbi)
+ {
+ free(dbi);
+ dbi = NULL;
+ }
+ DLFreeElem(db_to_remove);
}
/* Function is called before program exit to free all memory
mostly it's just to keep valgrind happy */
void
-free_db_list (Dllist * db_list)
+free_db_list(Dllist *db_list)
{
- Dlelem *db_elem = DLGetHead (db_list);
- Dlelem *db_elem_to_remove = NULL;
- while (NULL != db_elem) {
- db_elem_to_remove = db_elem;
- db_elem = DLGetSucc (db_elem);
- remove_db_from_list (db_elem_to_remove);
- db_elem_to_remove = NULL;
- }
- DLFreeList (db_list);
+ Dlelem *db_elem = DLGetHead(db_list);
+ Dlelem *db_elem_to_remove = NULL;
+
+ while (NULL != db_elem)
+ {
+ db_elem_to_remove = db_elem;
+ db_elem = DLGetSucc(db_elem);
+ remove_db_from_list(db_elem_to_remove);
+ db_elem_to_remove = NULL;
+ }
+ DLFreeList(db_list);
}
void
-print_db_list (Dllist * db_list, int print_table_lists)
+print_db_list(Dllist *db_list, int print_table_lists)
{
- Dlelem *db_elem = DLGetHead (db_list);
- while (NULL != db_elem) {
- print_db_info (((db_info *) DLE_VAL (db_elem)), print_table_lists);
- db_elem = DLGetSucc (db_elem);
- }
+ Dlelem *db_elem = DLGetHead(db_list);
+
+ while (NULL != db_elem)
+ {
+ print_db_info(((db_info *) DLE_VAL(db_elem)), print_table_lists);
+ db_elem = DLGetSucc(db_elem);
+ }
}
void
-print_db_info (db_info * dbi, int print_tbl_list)
+print_db_info(db_info * dbi, int print_tbl_list)
{
- sprintf (logbuffer, "dbname: %s Username %s Passwd %s", dbi->dbname,
- dbi->username, dbi->password);
- log_entry (logbuffer);
- sprintf (logbuffer, " oid %i InsertThresh: %i DeleteThresh: %i", dbi->oid,
- dbi->analyze_threshold, dbi->vacuum_threshold);
- log_entry (logbuffer);
- if (NULL != dbi->conn)
- log_entry (" conn is valid, we are connected");
- else
- log_entry (" conn is null, we are not connected.");
-
- fflush (LOGOUTPUT);
- if (0 < print_tbl_list)
- print_table_list (dbi->table_list);
+ sprintf(logbuffer, "dbname: %s Username %s Passwd %s", dbi->dbname,
+ dbi->username, dbi->password);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " oid %i InsertThresh: %i DeleteThresh: %i", dbi->oid,
+ dbi->analyze_threshold, dbi->vacuum_threshold);
+ log_entry(logbuffer);
+ if (NULL != dbi->conn)
+ log_entry(" conn is valid, we are connected");
+ else
+ log_entry(" conn is null, we are not connected.");
+
+ fflush(LOGOUTPUT);
+ if (0 < print_tbl_list)
+ print_table_list(dbi->table_list);
}
/* End of DB List Management Function */
@@ -607,406 +687,475 @@ print_db_info (db_info * dbi, int print_tbl_list)
char *
-query_table_stats (db_info * dbi)
+query_table_stats(db_info * dbi)
{
- if (!strcmp (dbi->dbname, "template1")) /* Use template1 to monitor the system tables */
- return (char *) TABLE_STATS_ALL;
- else
- return (char *) TABLE_STATS_USER;
+ if (!strcmp(dbi->dbname, "template1")) /* Use template1 to
+ * monitor the system
+ * tables */
+ return (char *) TABLE_STATS_ALL;
+ else
+ return (char *) TABLE_STATS_USER;
}
/* Perhaps add some test to this function to make sure that the stats we need are available */
PGconn *
-db_connect (db_info * dbi)
+db_connect(db_info * dbi)
{
- PGconn *db_conn =
- PQsetdbLogin (args->host, args->port, NULL, NULL, dbi->dbname,
- dbi->username, dbi->password);
-
- if (CONNECTION_OK != PQstatus (db_conn)) {
- sprintf (logbuffer, "Failed connection to database %s with error: %s.",
- dbi->dbname, PQerrorMessage (db_conn));
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
- PQfinish (db_conn);
- db_conn = NULL;
- }
- return db_conn;
-} /* end of db_connect() */
+ PGconn *db_conn =
+ PQsetdbLogin(args->host, args->port, NULL, NULL, dbi->dbname,
+ dbi->username, dbi->password);
+
+ if (CONNECTION_OK != PQstatus(db_conn))
+ {
+ sprintf(logbuffer, "Failed connection to database %s with error: %s.",
+ dbi->dbname, PQerrorMessage(db_conn));
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
+ PQfinish(db_conn);
+ db_conn = NULL;
+ }
+ return db_conn;
+} /* end of db_connect() */
void
-db_disconnect (db_info * dbi)
+db_disconnect(db_info * dbi)
{
- if (NULL != dbi->conn) {
- PQfinish (dbi->conn);
- dbi->conn = NULL;
- }
+ if (NULL != dbi->conn)
+ {
+ PQfinish(dbi->conn);
+ dbi->conn = NULL;
+ }
}
int
-check_stats_enabled (db_info * dbi)
+check_stats_enabled(db_info * dbi)
{
- PGresult *res = NULL;
- int ret = 0;
- res = send_query ("show stats_row_level", dbi);
- ret =
- strcmp ("on", PQgetvalue (res, 0, PQfnumber (res, "stats_row_level")));
- PQclear (res);
- return ret;
+ PGresult *res = NULL;
+ int ret = 0;
+
+ res = send_query("show stats_row_level", dbi);
+ ret =
+ strcmp("on", PQgetvalue(res, 0, PQfnumber(res, "stats_row_level")));
+ PQclear(res);
+ return ret;
}
PGresult *
-send_query (const char *query, db_info * dbi)
+send_query(const char *query, db_info * dbi)
{
- PGresult *res;
-
- if (NULL == dbi->conn)
- return NULL;
-
- res = PQexec (dbi->conn, query);
-
- if (!res) {
- sprintf (logbuffer,
- "Fatal error occured while sending query (%s) to database %s",
- query, dbi->dbname);
- log_entry (logbuffer);
- sprintf (logbuffer, "The error is [%s]", PQresultErrorMessage (res));
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
- return NULL;
- }
- if (PQresultStatus (res) != PGRES_TUPLES_OK
- && PQresultStatus (res) != PGRES_COMMAND_OK) {
- sprintf (logbuffer,
- "Can not refresh statistics information from the database %s.",
- dbi->dbname);
- log_entry (logbuffer);
- sprintf (logbuffer, "The error is [%s]", PQresultErrorMessage (res));
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
- PQclear (res);
- return NULL;
- }
- return res;
-} /* End of send_query() */
+ PGresult *res;
+
+ if (NULL == dbi->conn)
+ return NULL;
+
+ res = PQexec(dbi->conn, query);
+
+ if (!res)
+ {
+ sprintf(logbuffer,
+ "Fatal error occured while sending query (%s) to database %s",
+ query, dbi->dbname);
+ log_entry(logbuffer);
+ sprintf(logbuffer, "The error is [%s]", PQresultErrorMessage(res));
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
+ return NULL;
+ }
+ if (PQresultStatus(res) != PGRES_TUPLES_OK
+ && PQresultStatus(res) != PGRES_COMMAND_OK)
+ {
+ sprintf(logbuffer,
+ "Can not refresh statistics information from the database %s.",
+ dbi->dbname);
+ log_entry(logbuffer);
+ sprintf(logbuffer, "The error is [%s]", PQresultErrorMessage(res));
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
+ PQclear(res);
+ return NULL;
+ }
+ return res;
+} /* End of send_query() */
void
-free_cmd_args ()
+free_cmd_args()
{
- if (NULL != args) {
- if (NULL != args->user)
- free (args->user);
- if (NULL != args->user)
- free (args->password);
- free (args);
- }
+ if (NULL != args)
+ {
+ if (NULL != args->user)
+ free(args->user);
+ if (NULL != args->user)
+ free(args->password);
+ free(args);
+ }
}
cmd_args *
-get_cmd_args (int argc, char *argv[])
+get_cmd_args(int argc, char *argv[])
{
- int c;
- args = (cmd_args *) malloc (sizeof (cmd_args));
- args->sleep_base_value = SLEEPBASEVALUE;
- args->sleep_scaling_factor = SLEEPSCALINGFACTOR;
- args->vacuum_base_threshold = VACBASETHRESHOLD;
- args->vacuum_scaling_factor = VACSCALINGFACTOR;
- args->analyze_base_threshold = -1;
- args->analyze_scaling_factor = -1;
- args->debug = AUTOVACUUM_DEBUG;
- args->daemonize = 0;
-
- /* Fixme: Should add some sanity checking such as positive integer values etc */
- while (-1 != (c = getopt (argc, argv, "s:S:v:V:a:A:d:U:P:H:L:p:hD"))) {
- switch (c) {
- case 's':
- args->sleep_base_value = atoi (optarg);
- break;
- case 'S':
- args->sleep_scaling_factor = atof (optarg);
- break;
- case 'v':
- args->vacuum_base_threshold = atoi (optarg);
- break;
- case 'V':
- args->vacuum_scaling_factor = atof (optarg);
- break;
- case 'a':
- args->analyze_base_threshold = atoi (optarg);
- break;
- case 'A':
- args->analyze_scaling_factor = atof (optarg);
- break;
- case 'D':
- args->daemonize++;
- break;
- case 'd':
- args->debug = atoi (optarg);
- break;
- case 'U':
- args->user = optarg;
- break;
- case 'P':
- args->password = optarg;
- break;
- case 'H':
- args->host = optarg;
- break;
- case 'L':
- args->logfile = optarg;
- break;
- case 'p':
- args->port = optarg;
- break;
- case 'h':
- usage();
- exit (0);
- default:
- /* It's here that we know that things are invalid...
- It is not forcibly an error to call usage */
- fprintf (stderr, "Error: Invalid Command Line Options.\n");
- usage();
- exit (1);
- break;
- }
- /* if values for insert thresholds are not specified,
- then they default to 1/2 of the delete values */
- if(-1 == args->analyze_base_threshold)
- args->analyze_base_threshold = args->vacuum_base_threshold / 2;
- if(-1 == args->analyze_scaling_factor)
- args->analyze_scaling_factor = args->vacuum_scaling_factor / 2;
- }
- return args;
+ int c;
+
+ args = (cmd_args *) malloc(sizeof(cmd_args));
+ args->sleep_base_value = SLEEPBASEVALUE;
+ args->sleep_scaling_factor = SLEEPSCALINGFACTOR;
+ args->vacuum_base_threshold = VACBASETHRESHOLD;
+ args->vacuum_scaling_factor = VACSCALINGFACTOR;
+ args->analyze_base_threshold = -1;
+ args->analyze_scaling_factor = -1;
+ args->debug = AUTOVACUUM_DEBUG;
+ args->daemonize = 0;
+
+ /*
+ * Fixme: Should add some sanity checking such as positive integer
+ * values etc
+ */
+ while (-1 != (c = getopt(argc, argv, "s:S:v:V:a:A:d:U:P:H:L:p:hD")))
+ {
+ switch (c)
+ {
+ case 's':
+ args->sleep_base_value = atoi(optarg);
+ break;
+ case 'S':
+ args->sleep_scaling_factor = atof(optarg);
+ break;
+ case 'v':
+ args->vacuum_base_threshold = atoi(optarg);
+ break;
+ case 'V':
+ args->vacuum_scaling_factor = atof(optarg);
+ break;
+ case 'a':
+ args->analyze_base_threshold = atoi(optarg);
+ break;
+ case 'A':
+ args->analyze_scaling_factor = atof(optarg);
+ break;
+ case 'D':
+ args->daemonize++;
+ break;
+ case 'd':
+ args->debug = atoi(optarg);
+ break;
+ case 'U':
+ args->user = optarg;
+ break;
+ case 'P':
+ args->password = optarg;
+ break;
+ case 'H':
+ args->host = optarg;
+ break;
+ case 'L':
+ args->logfile = optarg;
+ break;
+ case 'p':
+ args->port = optarg;
+ break;
+ case 'h':
+ usage();
+ exit(0);
+ default:
+
+ /*
+ * It's here that we know that things are invalid... It is
+ * not forcibly an error to call usage
+ */
+ fprintf(stderr, "Error: Invalid Command Line Options.\n");
+ usage();
+ exit(1);
+ break;
+ }
+
+ /*
+ * if values for insert thresholds are not specified, then they
+ * default to 1/2 of the delete values
+ */
+ if (-1 == args->analyze_base_threshold)
+ args->analyze_base_threshold = args->vacuum_base_threshold / 2;
+ if (-1 == args->analyze_scaling_factor)
+ args->analyze_scaling_factor = args->vacuum_scaling_factor / 2;
+ }
+ return args;
}
-void usage()
+void
+usage()
{
- int i=0;
- float f=0;
- fprintf (stderr, "usage: pg_autovacuum \n");
- fprintf (stderr, " [-D] Daemonize (Detach from tty and run in the background)\n");
- i=AUTOVACUUM_DEBUG;
- fprintf (stderr, " [-d] debug (debug level=0,1,2,3; default=%i)\n",i);
-
- i=SLEEPBASEVALUE;
- fprintf (stderr, " [-s] sleep base value (default=%i)\n",i);
- f=SLEEPSCALINGFACTOR;
- fprintf (stderr, " [-S] sleep scaling factor (default=%f)\n",f);
-
- i=VACBASETHRESHOLD;
- fprintf (stderr, " [-v] vacuum base threshold (default=%i)\n",i);
- f=VACSCALINGFACTOR;
- fprintf (stderr, " [-V] vacuum scaling factor (default=%f)\n",f);
- i=i/2;
- fprintf (stderr, " [-a] analyze base threshold (default=%i)\n",i);
- f=f/2;
- fprintf (stderr, " [-A] analyze scaling factor (default=%f)\n",f);
-
- fprintf (stderr, " [-L] logfile (default=none)\n");
-
- fprintf (stderr, " [-U] username (libpq default)\n");
- fprintf (stderr, " [-P] password (libpq default)\n");
- fprintf (stderr, " [-H] host (libpq default)\n");
- fprintf (stderr, " [-p] port (libpq default)\n");
-
- fprintf (stderr, " [-h] help (Show this output)\n");
+ int i = 0;
+ float f = 0;
+
+ fprintf(stderr, "usage: pg_autovacuum \n");
+ fprintf(stderr, " [-D] Daemonize (Detach from tty and run in the background)\n");
+ i = AUTOVACUUM_DEBUG;
+ fprintf(stderr, " [-d] debug (debug level=0,1,2,3; default=%i)\n", i);
+
+ i = SLEEPBASEVALUE;
+ fprintf(stderr, " [-s] sleep base value (default=%i)\n", i);
+ f = SLEEPSCALINGFACTOR;
+ fprintf(stderr, " [-S] sleep scaling factor (default=%f)\n", f);
+
+ i = VACBASETHRESHOLD;
+ fprintf(stderr, " [-v] vacuum base threshold (default=%i)\n", i);
+ f = VACSCALINGFACTOR;
+ fprintf(stderr, " [-V] vacuum scaling factor (default=%f)\n", f);
+ i = i / 2;
+ fprintf(stderr, " [-a] analyze base threshold (default=%i)\n", i);
+ f = f / 2;
+ fprintf(stderr, " [-A] analyze scaling factor (default=%f)\n", f);
+
+ fprintf(stderr, " [-L] logfile (default=none)\n");
+
+ fprintf(stderr, " [-U] username (libpq default)\n");
+ fprintf(stderr, " [-P] password (libpq default)\n");
+ fprintf(stderr, " [-H] host (libpq default)\n");
+ fprintf(stderr, " [-p] port (libpq default)\n");
+
+ fprintf(stderr, " [-h] help (Show this output)\n");
}
void
-print_cmd_args ()
+print_cmd_args()
{
- sprintf (logbuffer, "Printing command_args");
- log_entry (logbuffer);
- sprintf (logbuffer, " args->host=%s", (args->host) ? args->host : "(null)");
- log_entry (logbuffer);
- sprintf (logbuffer, " args->port=%s", (args->port) ? args->port : "(null)");
- log_entry (logbuffer);
- sprintf (logbuffer, " args->user=%s", (args->user) ? args->user : "(null)");
- log_entry (logbuffer);
- sprintf (logbuffer, " args->password=%s",(args->password) ? args->password : "(null)");
- log_entry (logbuffer);
- sprintf (logbuffer, " args->logfile=%s",(args->logfile) ? args->logfile : "(null)");
- log_entry (logbuffer);
- sprintf (logbuffer, " args->daemonize=%i",args->daemonize);
- log_entry (logbuffer);
-
- sprintf (logbuffer, " args->sleep_base_value=%i", args->sleep_base_value);
- log_entry (logbuffer);
- sprintf (logbuffer, " args->sleep_scaling_factor=%f",args->sleep_scaling_factor);
- log_entry (logbuffer);
- sprintf (logbuffer, " args->vacuum_base_threshold=%i",args->vacuum_base_threshold);
- log_entry (logbuffer);
- sprintf (logbuffer, " args->vacuum_scaling_factor=%f",args->vacuum_scaling_factor);
- log_entry (logbuffer);
- sprintf (logbuffer, " args->analyze_base_threshold=%i",args->analyze_base_threshold);
- log_entry (logbuffer);
- sprintf (logbuffer, " args->analyze_scaling_factor=%f",args->analyze_scaling_factor);
- log_entry (logbuffer);
- sprintf (logbuffer, " args->debug=%i", args->debug);
- log_entry (logbuffer);
-
- fflush (LOGOUTPUT);
+ sprintf(logbuffer, "Printing command_args");
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->host=%s", (args->host) ? args->host : "(null)");
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->port=%s", (args->port) ? args->port : "(null)");
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->user=%s", (args->user) ? args->user : "(null)");
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->password=%s", (args->password) ? args->password : "(null)");
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->logfile=%s", (args->logfile) ? args->logfile : "(null)");
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->daemonize=%i", args->daemonize);
+ log_entry(logbuffer);
+
+ sprintf(logbuffer, " args->sleep_base_value=%i", args->sleep_base_value);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->sleep_scaling_factor=%f", args->sleep_scaling_factor);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->vacuum_base_threshold=%i", args->vacuum_base_threshold);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->vacuum_scaling_factor=%f", args->vacuum_scaling_factor);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->analyze_base_threshold=%i", args->analyze_base_threshold);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->analyze_scaling_factor=%f", args->analyze_scaling_factor);
+ log_entry(logbuffer);
+ sprintf(logbuffer, " args->debug=%i", args->debug);
+ log_entry(logbuffer);
+
+ fflush(LOGOUTPUT);
}
/* Beginning of AutoVacuum Main Program */
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- char buf[256];
- int j = 0, loops = 0;
-/* int numInserts, numDeletes, */
- int sleep_secs;
- Dllist *db_list;
- Dlelem *db_elem, *tbl_elem;
- db_info *dbs;
- tbl_info *tbl;
- PGresult *res=NULL;
- long long diff = 0;
- struct timeval now, then;
-
- args = get_cmd_args (argc, argv); /* Get Command Line Args and put them in the args struct */
-
- /* Dameonize if requested */
- if (1 == args->daemonize){ daemonize(); }
-
- if (args->logfile) {
- LOGOUTPUT = fopen (args->logfile, "a");
- if (!LOGOUTPUT) {
- fprintf (stderr, "Could not open log file - [%s]\n", args->logfile);
- exit(-1);
- }
- }
- else {
- LOGOUTPUT = stderr;
- }
- if (args->debug >= 2) {
- print_cmd_args ();
- }
-
- /* Init the db list with template1 */
- db_list = init_db_list ();
- if (NULL == db_list)
- return 1;
-
- if (0 != check_stats_enabled (((db_info *) DLE_VAL (DLGetHead (db_list))))) {
- log_entry ("Error: GUC variable stats_row_level must be enabled.");
- log_entry (" Please fix the problems and try again.");
- fflush (LOGOUTPUT);
-
- exit (1);
- }
-
- gettimeofday (&then, 0); /* for use later to caluculate sleep time */
-
- while (1) { /* Main Loop */
- db_elem = DLGetHead (db_list); /* Reset cur_db_node to the beginning of the db_list */
-
- dbs = ((db_info *) DLE_VAL (db_elem)); /* get pointer to cur_db's db_info struct */
- if (NULL == dbs->conn) {
- dbs->conn = db_connect (dbs);
- if (NULL == dbs->conn) { /* Serious problem: We can't connect to template1 */
- log_entry ("Error: Cannot connect to template1, exiting.");
- fflush (LOGOUTPUT);
- fclose (LOGOUTPUT);
- exit (1);
- }
- }
-
- if (0 == (loops % UPDATE_INTERVAL)) /* Update the list if it's time */
- update_db_list (db_list); /* Add and remove databases from the list */
-
- while (NULL != db_elem) { /* Loop through databases in list */
- dbs = ((db_info *) DLE_VAL (db_elem)); /* get pointer to cur_db's db_info struct */
- if (NULL == dbs->conn)
- dbs->conn = db_connect (dbs);
-
- if (NULL != dbs->conn) {
- if (0 == (loops % UPDATE_INTERVAL)) /* Update the list if it's time */
- update_table_list (dbs); /* Add and remove tables from the list */
-
- if (0 == xid_wraparound_check (dbs));
- {
- res = send_query (query_table_stats (dbs), dbs); /* Get an updated snapshot of this dbs table stats */
- for (j = 0; j < PQntuples (res); j++) { /* loop through result set */
- tbl_elem = DLGetHead (dbs->table_list); /* Reset tbl_elem to top of dbs->table_list */
- while (NULL != tbl_elem) { /* Loop through tables in list */
- tbl = ((tbl_info *) DLE_VAL (tbl_elem)); /* set tbl_info = current_table */
- if (tbl->relfilenode == atoi (PQgetvalue(res, j, PQfnumber (res, "relfilenode")))) {
- tbl->curr_analyze_count =
- (atol (PQgetvalue (res, j, PQfnumber (res, "n_tup_ins"))) +
- atol (PQgetvalue (res, j, PQfnumber (res, "n_tup_upd"))) +
- atol (PQgetvalue (res, j, PQfnumber (res, "n_tup_del"))));
- tbl->curr_vacuum_count =
- (atol (PQgetvalue (res, j, PQfnumber (res, "n_tup_del"))) +
- atol (PQgetvalue (res, j, PQfnumber (res, "n_tup_upd"))));
-
- /* Check numDeletes to see if we need to vacuum, if so:
- Run vacuum analyze (adding analyze is small so we might as well)
- Update table thresholds and related information
- if numDeletes is not big enough for vacuum then check numInserts for analyze */
- if ((tbl->curr_vacuum_count - tbl->CountAtLastVacuum) >= tbl->vacuum_threshold)
- {
- snprintf (buf, sizeof (buf), "vacuum analyze %s", tbl->table_name);
- if (args->debug >= 1) {
- sprintf (logbuffer, "Performing: %s", buf);
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
- }
- send_query (buf, dbs);
- update_table_thresholds (dbs, tbl, VACUUM_ANALYZE);
- if (args->debug >= 2) {print_table_info (tbl);}
- }
- else if ((tbl->curr_analyze_count - tbl->CountAtLastAnalyze) >= tbl->analyze_threshold)
- {
- snprintf (buf, sizeof (buf), "analyze %s", tbl->table_name);
- if (args->debug >= 1) {
- sprintf (logbuffer, "Performing: %s", buf);
- log_entry (logbuffer);
- fflush (LOGOUTPUT);
- }
- send_query (buf, dbs);
- update_table_thresholds (dbs, tbl, ANALYZE_ONLY);
- if (args->debug >= 2) { print_table_info (tbl); }
- }
-
- break; /* once we have found a match, no need to keep checking. */
- }
- /* Advance the table pointers for the next loop */
- tbl_elem = DLGetSucc (tbl_elem);
-
- } /* end for table while loop */
- } /* end for j loop (tuples in PGresult) */
- } /* close of if(xid_wraparound_check()) */
- /* Done working on this db, Clean up, then advance cur_db */
- PQclear (res);
- res = NULL;
- db_disconnect (dbs);
- }
- db_elem = DLGetSucc (db_elem); /* move on to next DB regardless */
- } /* end of db_list while loop */
-
- /* Figure out how long to sleep etc ... */
- gettimeofday (&now, 0);
- diff = (now.tv_sec - then.tv_sec) * 1000000 + (now.tv_usec - then.tv_usec);
-
- sleep_secs = args->sleep_base_value + args->sleep_scaling_factor * diff / 1000000;
- loops++;
- if (args->debug >= 2) {
- sprintf (logbuffer,
- "%i All DBs checked in: %lld usec, will sleep for %i secs.",
- loops, diff, sleep_secs);
- log_entry (logbuffer);
- }
-
- sleep (sleep_secs); /* Larger Pause between outer loops */
-
- gettimeofday (&then, 0); /* Reset time counter */
-
- } /* end of while loop */
-
- /* program is exiting, this should never run, but is here to make compiler / valgrind happy */
- free_db_list (db_list);
- free_cmd_args ();
- return EXIT_SUCCESS;
+ char buf[256];
+ int j = 0,
+ loops = 0;
+
+/* int numInserts, numDeletes, */
+ int sleep_secs;
+ Dllist *db_list;
+ Dlelem *db_elem,
+ *tbl_elem;
+ db_info *dbs;
+ tbl_info *tbl;
+ PGresult *res = NULL;
+ long long diff = 0;
+ struct timeval now,
+ then;
+
+ args = get_cmd_args(argc, argv); /* Get Command Line Args and put
+ * them in the args struct */
+
+ /* Dameonize if requested */
+ if (1 == args->daemonize)
+ daemonize();
+
+ if (args->logfile)
+ {
+ LOGOUTPUT = fopen(args->logfile, "a");
+ if (!LOGOUTPUT)
+ {
+ fprintf(stderr, "Could not open log file - [%s]\n", args->logfile);
+ exit(-1);
+ }
+ }
+ else
+ LOGOUTPUT = stderr;
+ if (args->debug >= 2)
+ print_cmd_args();
+
+ /* Init the db list with template1 */
+ db_list = init_db_list();
+ if (NULL == db_list)
+ return 1;
+
+ if (0 != check_stats_enabled(((db_info *) DLE_VAL(DLGetHead(db_list)))))
+ {
+ log_entry("Error: GUC variable stats_row_level must be enabled.");
+ log_entry(" Please fix the problems and try again.");
+ fflush(LOGOUTPUT);
+
+ exit(1);
+ }
+
+ gettimeofday(&then, 0); /* for use later to caluculate sleep time */
+
+ while (1)
+ { /* Main Loop */
+ db_elem = DLGetHead(db_list); /* Reset cur_db_node to the
+ * beginning of the db_list */
+
+ dbs = ((db_info *) DLE_VAL(db_elem)); /* get pointer to cur_db's
+ * db_info struct */
+ if (NULL == dbs->conn)
+ {
+ dbs->conn = db_connect(dbs);
+ if (NULL == dbs->conn)
+ { /* Serious problem: We can't connect to
+ * template1 */
+ log_entry("Error: Cannot connect to template1, exiting.");
+ fflush(LOGOUTPUT);
+ fclose(LOGOUTPUT);
+ exit(1);
+ }
+ }
+
+ if (0 == (loops % UPDATE_INTERVAL)) /* Update the list if it's
+ * time */
+ update_db_list(db_list); /* Add and remove databases from
+ * the list */
+
+ while (NULL != db_elem)
+ { /* Loop through databases in list */
+ dbs = ((db_info *) DLE_VAL(db_elem)); /* get pointer to
+ * cur_db's db_info
+ * struct */
+ if (NULL == dbs->conn)
+ dbs->conn = db_connect(dbs);
+
+ if (NULL != dbs->conn)
+ {
+ if (0 == (loops % UPDATE_INTERVAL)) /* Update the list if
+ * it's time */
+ update_table_list(dbs); /* Add and remove tables
+ * from the list */
+
+ if (0 == xid_wraparound_check(dbs));
+ {
+ res = send_query(query_table_stats(dbs), dbs); /* Get an updated
+ * snapshot of this dbs
+ * table stats */
+ for (j = 0; j < PQntuples(res); j++)
+ { /* loop through result set */
+ tbl_elem = DLGetHead(dbs->table_list); /* Reset tbl_elem to top
+ * of dbs->table_list */
+ while (NULL != tbl_elem)
+ { /* Loop through tables in list */
+ tbl = ((tbl_info *) DLE_VAL(tbl_elem)); /* set tbl_info =
+ * current_table */
+ if (tbl->relfilenode == atoi(PQgetvalue(res, j, PQfnumber(res, "relfilenode"))))
+ {
+ tbl->curr_analyze_count =
+ (atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_ins"))) +
+ atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_upd"))) +
+ atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_del"))));
+ tbl->curr_vacuum_count =
+ (atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_del"))) +
+ atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_upd"))));
+
+ /*
+ * Check numDeletes to see if we need to
+ * vacuum, if so: Run vacuum analyze
+ * (adding analyze is small so we might as
+ * well) Update table thresholds and
+ * related information if numDeletes is
+ * not big enough for vacuum then check
+ * numInserts for analyze
+ */
+ if ((tbl->curr_vacuum_count - tbl->CountAtLastVacuum) >= tbl->vacuum_threshold)
+ {
+ snprintf(buf, sizeof(buf), "vacuum analyze %s", tbl->table_name);
+ if (args->debug >= 1)
+ {
+ sprintf(logbuffer, "Performing: %s", buf);
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
+ }
+ send_query(buf, dbs);
+ update_table_thresholds(dbs, tbl, VACUUM_ANALYZE);
+ if (args->debug >= 2)
+ print_table_info(tbl);
+ }
+ else if ((tbl->curr_analyze_count - tbl->CountAtLastAnalyze) >= tbl->analyze_threshold)
+ {
+ snprintf(buf, sizeof(buf), "analyze %s", tbl->table_name);
+ if (args->debug >= 1)
+ {
+ sprintf(logbuffer, "Performing: %s", buf);
+ log_entry(logbuffer);
+ fflush(LOGOUTPUT);
+ }
+ send_query(buf, dbs);
+ update_table_thresholds(dbs, tbl, ANALYZE_ONLY);
+ if (args->debug >= 2)
+ print_table_info(tbl);
+ }
+
+ break; /* once we have found a match, no
+ * need to keep checking. */
+ }
+
+ /*
+ * Advance the table pointers for the next
+ * loop
+ */
+ tbl_elem = DLGetSucc(tbl_elem);
+
+ } /* end for table while loop */
+ } /* end for j loop (tuples in PGresult) */
+ } /* close of if(xid_wraparound_check()) */
+ /* Done working on this db, Clean up, then advance cur_db */
+ PQclear(res);
+ res = NULL;
+ db_disconnect(dbs);
+ }
+ db_elem = DLGetSucc(db_elem); /* move on to next DB
+ * regardless */
+ } /* end of db_list while loop */
+
+ /* Figure out how long to sleep etc ... */
+ gettimeofday(&now, 0);
+ diff = (now.tv_sec - then.tv_sec) * 1000000 + (now.tv_usec - then.tv_usec);
+
+ sleep_secs = args->sleep_base_value + args->sleep_scaling_factor * diff / 1000000;
+ loops++;
+ if (args->debug >= 2)
+ {
+ sprintf(logbuffer,
+ "%i All DBs checked in: %lld usec, will sleep for %i secs.",
+ loops, diff, sleep_secs);
+ log_entry(logbuffer);
+ }
+
+ sleep(sleep_secs); /* Larger Pause between outer loops */
+
+ gettimeofday(&then, 0); /* Reset time counter */
+
+ } /* end of while loop */
+
+ /*
+ * program is exiting, this should never run, but is here to make
+ * compiler / valgrind happy
+ */
+ free_db_list(db_list);
+ free_cmd_args();
+ return EXIT_SUCCESS;
}
diff --git a/contrib/pg_autovacuum/pg_autovacuum.h b/contrib/pg_autovacuum/pg_autovacuum.h
index ac97f63bec0..fbdaf699122 100644
--- a/contrib/pg_autovacuum/pg_autovacuum.h
+++ b/contrib/pg_autovacuum/pg_autovacuum.h
@@ -23,19 +23,19 @@
#include "/usr/include/pgsql/server/lib/dllist.h"
*/
-#define AUTOVACUUM_DEBUG 1
-#define VACBASETHRESHOLD 1000
-#define VACSCALINGFACTOR 2
-#define SLEEPBASEVALUE 300
-#define SLEEPSCALINGFACTOR 2
-#define UPDATE_INTERVAL 2
+#define AUTOVACUUM_DEBUG 1
+#define VACBASETHRESHOLD 1000
+#define VACSCALINGFACTOR 2
+#define SLEEPBASEVALUE 300
+#define SLEEPSCALINGFACTOR 2
+#define UPDATE_INTERVAL 2
/* these two constants are used to tell update_table_stats what operation we just perfomred */
-#define VACUUM_ANALYZE 0
-#define ANALYZE_ONLY 1
+#define VACUUM_ANALYZE 0
+#define ANALYZE_ONLY 1
-#define TABLE_STATS_ALL "select a.relfilenode,a.relname,a.relnamespace,a.relpages,a.reltuples,b.schemaname,b.n_tup_ins,b.n_tup_upd,b.n_tup_del from pg_class a, pg_stat_all_tables b where a.relfilenode=b.relid"
-#define TABLE_STATS_USER "select a.relfilenode,a.relname,a.relnamespace,a.relpages,a.reltuples,b.schemaname,b.n_tup_ins,b.n_tup_upd,b.n_tup_del from pg_class a, pg_stat_user_tables b where a.relfilenode=b.relid"
+#define TABLE_STATS_ALL "select a.relfilenode,a.relname,a.relnamespace,a.relpages,a.reltuples,b.schemaname,b.n_tup_ins,b.n_tup_upd,b.n_tup_del from pg_class a, pg_stat_all_tables b where a.relfilenode=b.relid"
+#define TABLE_STATS_USER "select a.relfilenode,a.relname,a.relnamespace,a.relpages,a.reltuples,b.schemaname,b.n_tup_ins,b.n_tup_upd,b.n_tup_del from pg_class a, pg_stat_user_tables b where a.relfilenode=b.relid"
#define FRONTEND
#define PAGES_QUERY "select relfilenode,reltuples,relpages from pg_class where relfilenode=%i"
#define FROZENOID_QUERY "select oid,age(datfrozenxid) from pg_database where datname = 'template1'"
@@ -44,71 +44,96 @@
/* define cmd_args stucture */
struct cmdargs
{
- int vacuum_base_threshold, analyze_base_threshold, sleep_base_value, debug, daemonize;
- float vacuum_scaling_factor, analyze_scaling_factor, sleep_scaling_factor;
- char *user, *password, *host, *logfile, *port;
+ int vacuum_base_threshold,
+ analyze_base_threshold,
+ sleep_base_value,
+ debug,
+ daemonize;
+ float vacuum_scaling_factor,
+ analyze_scaling_factor,
+ sleep_scaling_factor;
+ char *user,
+ *password,
+ *host,
+ *logfile,
+ *port;
};
typedef struct cmdargs cmd_args;
/* define cmd_args as global so we can get to them everywhere */
-cmd_args *args;
+cmd_args *args;
/* Might need to add a time value for last time the whold database was vacuumed.
- I think we need to guarantee this happens approx every 1Million TX's */
+ I think we need to guarantee this happens approx every 1Million TX's */
struct dbinfo
{
- int oid, age;
- int analyze_threshold, vacuum_threshold; /* Use these as defaults for table thresholds */
- PGconn *conn;
- char *dbname, *username, *password;
- Dllist *table_list;
+ int oid,
+ age;
+ int analyze_threshold,
+ vacuum_threshold; /* Use these as defaults for table
+ * thresholds */
+ PGconn *conn;
+ char *dbname,
+ *username,
+ *password;
+ Dllist *table_list;
};
typedef struct dbinfo db_info;
struct tableinfo
{
- char *schema_name, *table_name;
- int relfilenode, reltuples, relpages;
- long analyze_threshold, vacuum_threshold;
- long CountAtLastAnalyze; /* equal to: inserts + updates as of the last analyze or initial values at startup */
- long CountAtLastVacuum; /* equal to: deletes + updates as of the last vacuum or initial values at startup */
- long curr_analyze_count, curr_vacuum_count; /* Latest values from stats system */
- db_info *dbi; /* pointer to the database that this table belongs to */
+ char *schema_name,
+ *table_name;
+ int relfilenode,
+ reltuples,
+ relpages;
+ long analyze_threshold,
+ vacuum_threshold;
+ long CountAtLastAnalyze; /* equal to: inserts + updates as
+ * of the last analyze or initial
+ * values at startup */
+ long CountAtLastVacuum; /* equal to: deletes + updates as
+ * of the last vacuum or initial
+ * values at startup */
+ long curr_analyze_count,
+ curr_vacuum_count; /* Latest values from stats system */
+ db_info *dbi; /* pointer to the database that this table
+ * belongs to */
};
typedef struct tableinfo tbl_info;
/* Functions for dealing with command line arguements */
-static cmd_args *get_cmd_args (int argc, char *argv[]);
-static void print_cmd_args (void);
-static void free_cmd_args (void);
-static void usage (void);
+static cmd_args *get_cmd_args(int argc, char *argv[]);
+static void print_cmd_args(void);
+static void free_cmd_args(void);
+static void usage(void);
/* Functions for managing database lists */
-static Dllist *init_db_list (void);
-static db_info *init_dbinfo (char *dbname, int oid, int age);
-static void update_db_list (Dllist * db_list);
-static void remove_db_from_list (Dlelem * db_to_remove);
-static void print_db_info (db_info * dbi, int print_table_list);
-static void print_db_list (Dllist * db_list, int print_table_lists);
-static int xid_wraparound_check (db_info * dbi);
-static void free_db_list (Dllist * db_list);
+static Dllist *init_db_list(void);
+static db_info *init_dbinfo(char *dbname, int oid, int age);
+static void update_db_list(Dllist *db_list);
+static void remove_db_from_list(Dlelem *db_to_remove);
+static void print_db_info(db_info * dbi, int print_table_list);
+static void print_db_list(Dllist *db_list, int print_table_lists);
+static int xid_wraparound_check(db_info * dbi);
+static void free_db_list(Dllist *db_list);
/* Functions for managing table lists */
-static tbl_info *init_table_info (PGresult * conn, int row, db_info *dbi);
-static void update_table_list (db_info * dbi);
-static void remove_table_from_list (Dlelem * tbl_to_remove);
-static void print_table_list (Dllist * tbl_node);
-static void print_table_info (tbl_info * tbl);
-static void update_table_thresholds (db_info * dbi, tbl_info * tbl, int vacuum_type);
-static void free_tbl_list (Dllist * tbl_list);
+static tbl_info *init_table_info(PGresult *conn, int row, db_info * dbi);
+static void update_table_list(db_info * dbi);
+static void remove_table_from_list(Dlelem *tbl_to_remove);
+static void print_table_list(Dllist *tbl_node);
+static void print_table_info(tbl_info * tbl);
+static void update_table_thresholds(db_info * dbi, tbl_info * tbl, int vacuum_type);
+static void free_tbl_list(Dllist *tbl_list);
/* A few database helper functions */
-static int check_stats_enabled (db_info * dbi);
-static PGconn *db_connect (db_info * dbi);
-static void db_disconnect (db_info * dbi);
-static PGresult *send_query (const char *query, db_info * dbi);
-static char *query_table_stats (db_info * dbi);
+static int check_stats_enabled(db_info * dbi);
+static PGconn *db_connect(db_info * dbi);
+static void db_disconnect(db_info * dbi);
+static PGresult *send_query(const char *query, db_info * dbi);
+static char *query_table_stats(db_info * dbi);
/* Other Generally needed Functions */
static void daemonize(void);
-static void log_entry (const char *logentry);
+static void log_entry(const char *logentry);
diff --git a/contrib/pg_dumplo/main.c b/contrib/pg_dumplo/main.c
index b6783c23efa..09a71a586dc 100644
--- a/contrib/pg_dumplo/main.c
+++ b/contrib/pg_dumplo/main.c
@@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
- * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.16 2003/05/14 03:25:56 tgl Exp $
+ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.17 2003/08/04 00:43:11 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@@ -26,7 +26,7 @@
#ifndef HAVE_GETOPT_LONG
#include "getopt_long.h"
-int optreset;
+int optreset;
#endif
char *progname = NULL;
diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c
index 3253dab4b62..ede1b02f6fd 100644
--- a/contrib/pgbench/pgbench.c
+++ b/contrib/pgbench/pgbench.c
@@ -1,5 +1,5 @@
/*
- * $Header: /cvsroot/pgsql/contrib/pgbench/pgbench.c,v 1.25 2003/08/01 02:21:17 tgl Exp $
+ * $Header: /cvsroot/pgsql/contrib/pgbench/pgbench.c,v 1.26 2003/08/04 00:43:11 momjian Exp $
*
* pgbench: a simple TPC-B like benchmark program for PostgreSQL
* written by Tatsuo Ishii
@@ -122,7 +122,7 @@ doConnect()
{
PGconn *con;
PGresult *res;
-
+
con = PQsetdbLogin(pghost, pgport, pgoptions, pgtty, dbName,
login, pwd);
if (con == NULL)
diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c
index 3e7915d8bb0..4b3dc593cef 100644
--- a/contrib/pgcrypto/openssl.c
+++ b/contrib/pgcrypto/openssl.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: openssl.c,v 1.11 2002/11/15 02:54:44 momjian Exp $
+ * $Id: openssl.c,v 1.12 2003/08/04 00:43:11 momjian Exp $
*/
#include <postgres.h>
@@ -130,18 +130,19 @@ px_find_digest(const char *name, PX_MD ** res)
* of functions does not allow enough flexibility
* and forces some of the parameters (keylen,
* padding) to SSL defaults.
- *
+ *
* So need to manage ciphers ourselves.
*/
-struct ossl_cipher {
- int (*init) (PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv);
- int (*encrypt) (PX_Cipher * c, const uint8 *data, unsigned dlen, uint8 *res);
- int (*decrypt) (PX_Cipher * c, const uint8 *data, unsigned dlen, uint8 *res);
+struct ossl_cipher
+{
+ int (*init) (PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv);
+ int (*encrypt) (PX_Cipher * c, const uint8 *data, unsigned dlen, uint8 *res);
+ int (*decrypt) (PX_Cipher * c, const uint8 *data, unsigned dlen, uint8 *res);
- int block_size;
- int max_key_size;
- int stream_cipher;
+ int block_size;
+ int max_key_size;
+ int stream_cipher;
};
typedef struct
@@ -224,7 +225,7 @@ static int
bf_ecb_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen, uint8 *res)
{
unsigned bs = gen_ossl_block_size(c);
- unsigned i;
+ unsigned i;
ossldata *od = c->ptr;
for (i = 0; i < dlen / bs; i++)
@@ -288,13 +289,13 @@ static int
ossl_des_init(PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv)
{
ossldata *od = c->ptr;
- des_cblock xkey;
+ des_cblock xkey;
memset(&xkey, 0, sizeof(xkey));
memcpy(&xkey, key, klen > 8 ? 8 : klen);
des_set_key(&xkey, od->u.des.key_schedule);
memset(&xkey, 0, sizeof(xkey));
-
+
if (iv)
memcpy(od->iv, iv, 8);
else
@@ -304,53 +305,53 @@ ossl_des_init(PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv)
static int
ossl_des_ecb_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
- uint8 *res)
+ uint8 *res)
{
unsigned bs = gen_ossl_block_size(c);
- unsigned i;
+ unsigned i;
ossldata *od = c->ptr;
for (i = 0; i < dlen / bs; i++)
- des_ecb_encrypt((des_cblock*)(data + i * bs),
- (des_cblock*)(res + i * bs),
- od->u.des.key_schedule, 1);
+ des_ecb_encrypt((des_cblock *) (data + i * bs),
+ (des_cblock *) (res + i * bs),
+ od->u.des.key_schedule, 1);
return 0;
}
static int
ossl_des_ecb_decrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
- uint8 *res)
+ uint8 *res)
{
unsigned bs = gen_ossl_block_size(c);
- unsigned i;
+ unsigned i;
ossldata *od = c->ptr;
for (i = 0; i < dlen / bs; i++)
- des_ecb_encrypt((des_cblock*)(data + i * bs),
- (des_cblock*)(res + i * bs),
- od->u.des.key_schedule, 0);
+ des_ecb_encrypt((des_cblock *) (data + i * bs),
+ (des_cblock *) (res + i * bs),
+ od->u.des.key_schedule, 0);
return 0;
}
static int
ossl_des_cbc_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
- uint8 *res)
+ uint8 *res)
{
ossldata *od = c->ptr;
des_ncbc_encrypt(data, res, dlen, od->u.des.key_schedule,
- (des_cblock*)od->iv, 1);
+ (des_cblock *) od->iv, 1);
return 0;
}
static int
ossl_des_cbc_decrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
- uint8 *res)
+ uint8 *res)
{
ossldata *od = c->ptr;
des_ncbc_encrypt(data, res, dlen, od->u.des.key_schedule,
- (des_cblock*)od->iv, 0);
+ (des_cblock *) od->iv, 0);
return 0;
}
@@ -375,7 +376,7 @@ ossl_cast_ecb_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen, uint8 *re
{
unsigned bs = gen_ossl_block_size(c);
ossldata *od = c->ptr;
- const uint8 *end = data + dlen - bs;
+ const uint8 *end = data + dlen - bs;
for (; data <= end; data += bs, res += bs)
CAST_ecb_encrypt(data, res, &od->u.cast_key, CAST_ENCRYPT);
@@ -387,7 +388,7 @@ ossl_cast_ecb_decrypt(PX_Cipher * c, const uint8 *data, unsigned dlen, uint8 *re
{
unsigned bs = gen_ossl_block_size(c);
ossldata *od = c->ptr;
- const uint8 *end = data + dlen - bs;
+ const uint8 *end = data + dlen - bs;
for (; data <= end; data += bs, res += bs)
CAST_ecb_encrypt(data, res, &od->u.cast_key, CAST_DECRYPT);
@@ -429,37 +430,37 @@ static PX_Alias ossl_aliases[] = {
static const struct ossl_cipher ossl_bf_cbc = {
bf_init, bf_cbc_encrypt, bf_cbc_decrypt,
- 64/8, 448/8, 0
+ 64 / 8, 448 / 8, 0
};
static const struct ossl_cipher ossl_bf_ecb = {
bf_init, bf_ecb_encrypt, bf_ecb_decrypt,
- 64/8, 448/8, 0
+ 64 / 8, 448 / 8, 0
};
static const struct ossl_cipher ossl_bf_cfb = {
bf_init, bf_cfb64_encrypt, bf_cfb64_decrypt,
- 64/8, 448/8, 1
+ 64 / 8, 448 / 8, 1
};
static const struct ossl_cipher ossl_des_ecb = {
ossl_des_init, ossl_des_ecb_encrypt, ossl_des_ecb_decrypt,
- 64/8, 64/8, 0
+ 64 / 8, 64 / 8, 0
};
static const struct ossl_cipher ossl_des_cbc = {
ossl_des_init, ossl_des_cbc_encrypt, ossl_des_cbc_decrypt,
- 64/8, 64/8, 0
+ 64 / 8, 64 / 8, 0
};
static const struct ossl_cipher ossl_cast_ecb = {
ossl_cast_init, ossl_cast_ecb_encrypt, ossl_cast_ecb_decrypt,
- 64/8, 128/8, 0
+ 64 / 8, 128 / 8, 0
};
static const struct ossl_cipher ossl_cast_cbc = {
ossl_cast_init, ossl_cast_cbc_encrypt, ossl_cast_cbc_decrypt,
- 64/8, 128/8, 0
+ 64 / 8, 128 / 8, 0
};
/*
@@ -467,7 +468,7 @@ static const struct ossl_cipher ossl_cast_cbc = {
*/
static const struct
{
- const char *name;
+ const char *name;
const struct ossl_cipher *ciph;
} ossl_cipher_types[] =
@@ -510,8 +511,10 @@ px_find_cipher(const char *name, PX_Cipher ** res)
const struct ossl_cipher *ossl_ciph = NULL;
name = px_resolve_alias(ossl_aliases, name);
- for (i = 0; ossl_cipher_types[i].name; i++) {
- if (!strcmp(ossl_cipher_types[i].name, name)) {
+ for (i = 0; ossl_cipher_types[i].name; i++)
+ {
+ if (!strcmp(ossl_cipher_types[i].name, name))
+ {
ossl_ciph = ossl_cipher_types[i].ciph;
break;
}
diff --git a/contrib/pgcrypto/pgcrypto.c b/contrib/pgcrypto/pgcrypto.c
index de0d426f462..3abc6f12b10 100644
--- a/contrib/pgcrypto/pgcrypto.c
+++ b/contrib/pgcrypto/pgcrypto.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: pgcrypto.c,v 1.13 2003/07/24 17:52:33 tgl Exp $
+ * $Id: pgcrypto.c,v 1.14 2003/08/04 00:43:11 momjian Exp $
*/
#include <postgres.h>
@@ -241,7 +241,7 @@ pg_gen_salt_rounds(PG_FUNCTION_ARGS)
if (len == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("no such crypt algorithm or bad number of rounds")));
+ errmsg("no such crypt algorithm or bad number of rounds")));
res = (text *) palloc(len + VARHDRSZ);
VARATT_SIZEP(res) = len + VARHDRSZ;
diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c
index 110de6dff45..94160605dc7 100644
--- a/contrib/pgstattuple/pgstattuple.c
+++ b/contrib/pgstattuple/pgstattuple.c
@@ -1,5 +1,5 @@
/*
- * $Header: /cvsroot/pgsql/contrib/pgstattuple/pgstattuple.c,v 1.11 2003/08/01 02:21:17 tgl Exp $
+ * $Header: /cvsroot/pgsql/contrib/pgstattuple/pgstattuple.c,v 1.12 2003/08/04 00:43:11 momjian Exp $
*
* Copyright (c) 2001,2002 Tatsuo Ishii
*
@@ -221,5 +221,5 @@ pgstattuple_real(Relation rel)
pfree(values[i]);
pfree(values);
- return(result);
+ return (result);
}
diff --git a/contrib/spi/autoinc.c b/contrib/spi/autoinc.c
index 31bd21b04ed..b6e0c643316 100644
--- a/contrib/spi/autoinc.c
+++ b/contrib/spi/autoinc.c
@@ -73,8 +73,8 @@ autoinc(PG_FUNCTION_ARGS)
if (SPI_gettypeid(tupdesc, attnum) != INT4OID)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
- errmsg("attribute \"%s\" of \"%s\" must be type INT4",
- args[i], relname)));
+ errmsg("attribute \"%s\" of \"%s\" must be type INT4",
+ args[i], relname)));
val = DatumGetInt32(SPI_getbinval(rettuple, tupdesc, attnum, &isnull));
diff --git a/contrib/spi/insert_username.c b/contrib/spi/insert_username.c
index 6fa9c4c6f88..237b14d25c2 100644
--- a/contrib/spi/insert_username.c
+++ b/contrib/spi/insert_username.c
@@ -65,17 +65,17 @@ insert_username(PG_FUNCTION_ARGS)
if (attnum < 0)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
- errmsg("\"%s\" has no attribute \"%s\"", relname, args[0])));
+ errmsg("\"%s\" has no attribute \"%s\"", relname, args[0])));
if (SPI_gettypeid(tupdesc, attnum) != TEXTOID)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("attribute \"%s\" of \"%s\" must be type TEXT",
- args[0], relname)));
+ args[0], relname)));
/* create fields containing name */
newval = DirectFunctionCall1(textin,
- CStringGetDatum(GetUserNameFromId(GetUserId())));
+ CStringGetDatum(GetUserNameFromId(GetUserId())));
/* construct new tuple */
rettuple = SPI_modifytuple(rel, rettuple, 1, &attnum, &newval, NULL);
diff --git a/contrib/spi/moddatetime.c b/contrib/spi/moddatetime.c
index 6d80bab55e6..f3a97e1c81b 100644
--- a/contrib/spi/moddatetime.c
+++ b/contrib/spi/moddatetime.c
@@ -100,8 +100,8 @@ moddatetime(PG_FUNCTION_ARGS)
if (SPI_gettypeid(tupdesc, attnum) != TIMESTAMPOID)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
- errmsg("attribute \"%s\" of \"%s\" must be type TIMESTAMP",
- args[0], relname)));
+ errmsg("attribute \"%s\" of \"%s\" must be type TIMESTAMP",
+ args[0], relname)));
/* 1 is the number of items in the arrays attnum and newdt.
attnum is the positional number of the field to be updated.
diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c
index c9baa9c249d..c409a269cac 100644
--- a/contrib/spi/refint.c
+++ b/contrib/spi/refint.c
@@ -134,8 +134,8 @@ check_primary_key(PG_FUNCTION_ARGS)
if (fnumber < 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("there is no attribute \"%s\" in relation \"%s\"",
- args[i], SPI_getrelname(rel))));
+ errmsg("there is no attribute \"%s\" in relation \"%s\"",
+ args[i], SPI_getrelname(rel))));
/* Well, get binary (in internal format) value of column */
kvals[i] = SPI_getbinval(tuple, tupdesc, fnumber, &isnull);
@@ -365,8 +365,8 @@ check_foreign_key(PG_FUNCTION_ARGS)
if (fnumber < 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("there is no attribute \"%s\" in relation \"%s\"",
- args[i], SPI_getrelname(rel))));
+ errmsg("there is no attribute \"%s\" in relation \"%s\"",
+ args[i], SPI_getrelname(rel))));
/* Well, get binary (in internal format) value of column */
kvals[i] = SPI_getbinval(trigtuple, tupdesc, fnumber, &isnull);
@@ -591,7 +591,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("\"%s\": tuple is referenced in \"%s\"",
- trigger->tgname, relname)));
+ trigger->tgname, relname)));
}
else
{
diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c
index aa8d0a47b46..8ffa56edc75 100644
--- a/contrib/spi/timetravel.c
+++ b/contrib/spi/timetravel.c
@@ -6,11 +6,11 @@
/* Modified by B�JTHE Zolt�n, Hungary, mailto:urdesobt@axelero.hu */
#include "executor/spi.h" /* this is what you need to work with SPI */
-#include "commands/trigger.h" /* -"- and triggers */
+#include "commands/trigger.h" /* -"- and triggers */
#include "miscadmin.h" /* for GetPgUserName() */
-#include <ctype.h> /* tolower () */
+#include <ctype.h> /* tolower () */
-#define ABSTIMEOID 702 /* it should be in pg_type.h */
+#define ABSTIMEOID 702 /* it should be in pg_type.h */
/* AbsoluteTime currabstime(void); */
Datum timetravel(PG_FUNCTION_ARGS);
@@ -28,13 +28,13 @@ static int nPlans = 0;
typedef struct _TTOffList
{
- struct _TTOffList *next;
- char name[1];
-} TTOffList;
+ struct _TTOffList *next;
+ char name[1];
+} TTOffList;
-static TTOffList TTOff = {NULL,{0}};
+static TTOffList TTOff = {NULL, {0}};
-static int findTTStatus(char *name);
+static int findTTStatus(char *name);
static EPlan *find_plan(char *ident, EPlan ** eplan, int *nplans);
/*
@@ -71,57 +71,57 @@ static EPlan *find_plan(char *ident, EPlan ** eplan, int *nplans);
PG_FUNCTION_INFO_V1(timetravel);
-Datum /* have to return HeapTuple to Executor */
+Datum /* have to return HeapTuple to Executor */
timetravel(PG_FUNCTION_ARGS)
{
- TriggerData *trigdata = (TriggerData *) fcinfo->context;
- Trigger *trigger; /* to get trigger name */
- int argc;
- char **args; /* arguments */
- int attnum[MaxAttrNum]; /* fnumbers of start/stop columns */
+ TriggerData *trigdata = (TriggerData *) fcinfo->context;
+ Trigger *trigger; /* to get trigger name */
+ int argc;
+ char **args; /* arguments */
+ int attnum[MaxAttrNum]; /* fnumbers of start/stop columns */
Datum oldtimeon,
- oldtimeoff;
+ oldtimeoff;
Datum newtimeon,
- newtimeoff,
- newuser,
- nulltext;
- Datum *cvals; /* column values */
- char *cnulls; /* column nulls */
- char *relname; /* triggered relation name */
+ newtimeoff,
+ newuser,
+ nulltext;
+ Datum *cvals; /* column values */
+ char *cnulls; /* column nulls */
+ char *relname; /* triggered relation name */
Relation rel; /* triggered relation */
HeapTuple trigtuple;
HeapTuple newtuple = NULL;
HeapTuple rettuple;
TupleDesc tupdesc; /* tuple description */
- int natts; /* # of attributes */
- EPlan *plan; /* prepared plan */
+ int natts; /* # of attributes */
+ EPlan *plan; /* prepared plan */
char ident[2 * NAMEDATALEN];
bool isnull; /* to know is some column NULL or not */
bool isinsert = false;
- int ret;
- int i;
+ int ret;
+ int i;
/*
* Some checks first...
*/
/* Called by trigger manager ? */
- if(!CALLED_AS_TRIGGER(fcinfo))
+ if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "timetravel: not fired by trigger manager");
/* Should be called for ROW trigger */
- if(TRIGGER_FIRED_FOR_STATEMENT(trigdata->tg_event))
+ if (TRIGGER_FIRED_FOR_STATEMENT(trigdata->tg_event))
elog(ERROR, "timetravel: can't process STATEMENT events");
/* Should be called BEFORE */
- if(TRIGGER_FIRED_AFTER(trigdata->tg_event))
+ if (TRIGGER_FIRED_AFTER(trigdata->tg_event))
elog(ERROR, "timetravel: must be fired before event");
/* INSERT ? */
- if(TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
+ if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
isinsert = true;
- if(TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
+ if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
newtuple = trigdata->tg_newtuple;
trigtuple = trigdata->tg_trigtuple;
@@ -130,7 +130,7 @@ timetravel(PG_FUNCTION_ARGS)
relname = SPI_getrelname(rel);
/* check if TT is OFF for this relation */
- if(0==findTTStatus(relname))
+ if (0 == findTTStatus(relname))
{
/* OFF - nothing to do */
pfree(relname);
@@ -140,7 +140,7 @@ timetravel(PG_FUNCTION_ARGS)
trigger = trigdata->tg_trigger;
argc = trigger->tgnargs;
- if(argc != MinAttrNum && argc != MaxAttrNum)
+ if (argc != MinAttrNum && argc != MaxAttrNum)
elog(ERROR, "timetravel (%s): invalid (!= %d or %d) number of arguments %d",
relname, MinAttrNum, MaxAttrNum, trigger->tgnargs);
@@ -148,39 +148,39 @@ timetravel(PG_FUNCTION_ARGS)
tupdesc = rel->rd_att;
natts = tupdesc->natts;
- for(i = 0 ; i < MinAttrNum ; i++)
+ for (i = 0; i < MinAttrNum; i++)
{
attnum[i] = SPI_fnumber(tupdesc, args[i]);
- if(attnum[i] < 0)
+ if (attnum[i] < 0)
elog(ERROR, "timetravel (%s): there is no attribute %s", relname, args[i]);
- if(SPI_gettypeid(tupdesc, attnum[i]) != ABSTIMEOID)
+ if (SPI_gettypeid(tupdesc, attnum[i]) != ABSTIMEOID)
elog(ERROR, "timetravel (%s): attribute %s must be of abstime type",
relname, args[i]);
}
- for( ; i < argc ; i++)
+ for (; i < argc; i++)
{
attnum[i] = SPI_fnumber(tupdesc, args[i]);
- if(attnum[i] < 0)
+ if (attnum[i] < 0)
elog(ERROR, "timetravel (%s): there is no attribute %s", relname, args[i]);
- if(SPI_gettypeid(tupdesc, attnum[i]) != TEXTOID)
+ if (SPI_gettypeid(tupdesc, attnum[i]) != TEXTOID)
elog(ERROR, "timetravel (%s): attribute %s must be of text type",
relname, args[i]);
}
/* create fields containing name */
- newuser = DirectFunctionCall1(textin, CStringGetDatum(GetUserNameFromId(GetUserId())));
+ newuser = DirectFunctionCall1(textin, CStringGetDatum(GetUserNameFromId(GetUserId())));
- nulltext = (Datum)NULL;
+ nulltext = (Datum) NULL;
- if(isinsert)
- { /* INSERT */
- int chnattrs = 0;
- int chattrs[MaxAttrNum];
- Datum newvals[MaxAttrNum];
- char newnulls[MaxAttrNum];
+ if (isinsert)
+ { /* INSERT */
+ int chnattrs = 0;
+ int chattrs[MaxAttrNum];
+ Datum newvals[MaxAttrNum];
+ char newnulls[MaxAttrNum];
oldtimeon = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_on], &isnull);
- if(isnull)
+ if (isnull)
{
newvals[chnattrs] = GetCurrentAbsoluteTime();
newnulls[chnattrs] = ' ';
@@ -189,10 +189,10 @@ timetravel(PG_FUNCTION_ARGS)
}
oldtimeoff = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_off], &isnull);
- if(isnull)
+ if (isnull)
{
- if((chnattrs == 0 && DatumGetInt32(oldtimeon) >= NOEND_ABSTIME) ||
- (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) >= NOEND_ABSTIME))
+ if ((chnattrs == 0 && DatumGetInt32(oldtimeon) >= NOEND_ABSTIME) ||
+ (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) >= NOEND_ABSTIME))
elog(ERROR, "timetravel (%s): %s is infinity", relname, args[a_time_on]);
newvals[chnattrs] = NOEND_ABSTIME;
newnulls[chnattrs] = ' ';
@@ -201,16 +201,16 @@ timetravel(PG_FUNCTION_ARGS)
}
else
{
- if((chnattrs == 0 && DatumGetInt32(oldtimeon) > DatumGetInt32(oldtimeoff)) ||
- (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) > DatumGetInt32(oldtimeoff)))
+ if ((chnattrs == 0 && DatumGetInt32(oldtimeon) > DatumGetInt32(oldtimeoff)) ||
+ (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) > DatumGetInt32(oldtimeoff)))
elog(ERROR, "timetravel (%s): %s gt %s", relname, args[a_time_on], args[a_time_off]);
}
pfree(relname);
- if(chnattrs <= 0)
+ if (chnattrs <= 0)
return PointerGetDatum(trigtuple);
- if(argc == MaxAttrNum)
+ if (argc == MaxAttrNum)
{
/* clear update_user value */
newvals[chnattrs] = nulltext;
@@ -235,33 +235,34 @@ timetravel(PG_FUNCTION_ARGS)
/* UPDATE/DELETE: */
oldtimeon = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_on], &isnull);
- if(isnull)
+ if (isnull)
elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_on]);
oldtimeoff = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_off], &isnull);
- if(isnull)
+ if (isnull)
elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_off]);
/*
* If DELETE/UPDATE of tuple with stop_date neq INFINITY then say
* upper Executor to skip operation for this tuple
*/
- if(newtuple != NULL)
- { /* UPDATE */
+ if (newtuple != NULL)
+ { /* UPDATE */
newtimeon = SPI_getbinval(newtuple, tupdesc, attnum[a_time_on], &isnull);
- if(isnull)
+ if (isnull)
elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_on]);
newtimeoff = SPI_getbinval(newtuple, tupdesc, attnum[a_time_off], &isnull);
- if(isnull)
+ if (isnull)
elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_off]);
- if(oldtimeon != newtimeon || oldtimeoff != newtimeoff)
+ if (oldtimeon != newtimeon || oldtimeoff != newtimeoff)
elog(ERROR, "timetravel (%s): you can't change %s and/or %s columns (use set_timetravel)",
relname, args[a_time_on], args[a_time_off]);
}
- if(oldtimeoff != NOEND_ABSTIME)
- { /* current record is a deleted/updated record */
+ if (oldtimeoff != NOEND_ABSTIME)
+ { /* current record is a deleted/updated
+ * record */
pfree(relname);
return PointerGetDatum(NULL);
}
@@ -269,27 +270,28 @@ timetravel(PG_FUNCTION_ARGS)
newtimeoff = GetCurrentAbsoluteTime();
/* Connect to SPI manager */
- if((ret = SPI_connect()) < 0)
+ if ((ret = SPI_connect()) < 0)
elog(ERROR, "timetravel (%s): SPI_connect returned %d", relname, ret);
/* Fetch tuple values and nulls */
cvals = (Datum *) palloc(natts * sizeof(Datum));
cnulls = (char *) palloc(natts * sizeof(char));
- for(i = 0; i < natts; i++)
+ for (i = 0; i < natts; i++)
{
cvals[i] = SPI_getbinval(trigtuple, tupdesc, i + 1, &isnull);
cnulls[i] = (isnull) ? 'n' : ' ';
}
/* change date column(s) */
- cvals[attnum[a_time_off] - 1] = newtimeoff; /* stop_date eq current date */
+ cvals[attnum[a_time_off] - 1] = newtimeoff; /* stop_date eq current
+ * date */
cnulls[attnum[a_time_off] - 1] = ' ';
- if(!newtuple)
- { /* DELETE */
- if(argc == MaxAttrNum)
+ if (!newtuple)
+ { /* DELETE */
+ if (argc == MaxAttrNum)
{
- cvals[attnum[a_del_user] - 1] = newuser; /* set delete user */
+ cvals[attnum[a_del_user] - 1] = newuser; /* set delete user */
cnulls[attnum[a_del_user] - 1] = ' ';
}
}
@@ -302,11 +304,11 @@ timetravel(PG_FUNCTION_ARGS)
plan = find_plan(ident, &Plans, &nPlans);
/* if there is no plan ... */
- if(plan->splan == NULL)
+ if (plan->splan == NULL)
{
- void *pplan;
- Oid *ctypes;
- char sql[8192];
+ void *pplan;
+ Oid *ctypes;
+ char sql[8192];
/* allocate ctypes for preparation */
ctypes = (Oid *) palloc(natts * sizeof(Oid));
@@ -315,15 +317,15 @@ timetravel(PG_FUNCTION_ARGS)
* Construct query: INSERT INTO _relation_ VALUES ($1, ...)
*/
snprintf(sql, sizeof(sql), "INSERT INTO %s VALUES (", relname);
- for(i = 1; i <= natts; i++)
+ for (i = 1; i <= natts; i++)
{
ctypes[i - 1] = SPI_gettypeid(tupdesc, i);
- if(!(tupdesc->attrs[i - 1]->attisdropped)) /* skip dropped columns */
- snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "$%d%s",
- i, (i < natts) ? ", " : ")" );
+ if (!(tupdesc->attrs[i - 1]->attisdropped)) /* skip dropped columns */
+ snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "$%d%s",
+ i, (i < natts) ? ", " : ")");
#if 0
- snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "$%d /* %d */ %s",
- i, ctypes[i-1], (i < natts) ? ", " : ")" );
+ snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "$%d /* %d */ %s",
+ i, ctypes[i - 1], (i < natts) ? ", " : ")");
#endif
}
@@ -331,7 +333,7 @@ timetravel(PG_FUNCTION_ARGS)
/* Prepare plan for query */
pplan = SPI_prepare(sql, natts, ctypes);
- if(pplan == NULL)
+ if (pplan == NULL)
elog(ERROR, "timetravel (%s): SPI_prepare returned %d", relname, SPI_result);
/*
@@ -340,7 +342,7 @@ timetravel(PG_FUNCTION_ARGS)
* use.
*/
pplan = SPI_saveplan(pplan);
- if(pplan == NULL)
+ if (pplan == NULL)
elog(ERROR, "timetravel (%s): SPI_saveplan returned %d", relname, SPI_result);
plan->splan = pplan;
@@ -351,14 +353,14 @@ timetravel(PG_FUNCTION_ARGS)
*/
ret = SPI_execp(plan->splan, cvals, cnulls, 0);
- if(ret < 0)
+ if (ret < 0)
elog(ERROR, "timetravel (%s): SPI_execp returned %d", relname, ret);
/* Tuple to return to upper Executor ... */
- if(newtuple)
- { /* UPDATE */
- int chnattrs = 0;
- int chattrs[MaxAttrNum];
+ if (newtuple)
+ { /* UPDATE */
+ int chnattrs = 0;
+ int chattrs[MaxAttrNum];
Datum newvals[MaxAttrNum];
char newnulls[MaxAttrNum];
@@ -372,7 +374,7 @@ timetravel(PG_FUNCTION_ARGS)
chattrs[chnattrs] = attnum[a_time_off];
chnattrs++;
- if(argc == MaxAttrNum)
+ if (argc == MaxAttrNum)
{
/* set update_user value */
newvals[chnattrs] = newuser;
@@ -399,7 +401,8 @@ timetravel(PG_FUNCTION_ARGS)
*/
/* SPI_pfree(tmptuple); */
}
- else /* DELETE case */
+ else
+/* DELETE case */
rettuple = trigtuple;
SPI_finish(); /* don't forget say Bye to SPI mgr */
@@ -417,23 +420,24 @@ PG_FUNCTION_INFO_V1(set_timetravel);
Datum
set_timetravel(PG_FUNCTION_ARGS)
{
- Name relname = PG_GETARG_NAME(0);
- int32 on = PG_GETARG_INT32(1);
- char *rname;
- char *d;
- char *s;
+ Name relname = PG_GETARG_NAME(0);
+ int32 on = PG_GETARG_INT32(1);
+ char *rname;
+ char *d;
+ char *s;
int32 ret;
- TTOffList *p,*pp;
+ TTOffList *p,
+ *pp;
- for(pp = (p = &TTOff)->next; pp; pp=(p=pp)->next)
+ for (pp = (p = &TTOff)->next; pp; pp = (p = pp)->next)
{
- if(namestrcmp(relname, pp->name) == 0)
+ if (namestrcmp(relname, pp->name) == 0)
break;
}
- if(pp)
+ if (pp)
{
/* OFF currently */
- if(on != 0)
+ if (on != 0)
{
/* turn ON */
p->next = pp->next;
@@ -444,20 +448,20 @@ set_timetravel(PG_FUNCTION_ARGS)
else
{
/* ON currently */
- if(on == 0)
+ if (on == 0)
{
/* turn OFF */
s = rname = DatumGetCString(DirectFunctionCall1(nameout, NameGetDatum(relname)));
- if(s)
+ if (s)
{
- pp = malloc(sizeof(TTOffList)+strlen(rname));
- if(pp)
+ pp = malloc(sizeof(TTOffList) + strlen(rname));
+ if (pp)
{
pp->next = NULL;
p->next = pp;
d = pp->name;
while (*s)
- *d++ = tolower((unsigned char)*s++);
+ *d++ = tolower((unsigned char) *s++);
*d = '\0';
}
pfree(rname);
@@ -470,7 +474,7 @@ set_timetravel(PG_FUNCTION_ARGS)
/*
* get_timetravel (relname) --
- * get timetravel status for specified relation (ON/OFF)
+ * get timetravel status for specified relation (ON/OFF)
*/
PG_FUNCTION_INFO_V1(get_timetravel);
@@ -478,11 +482,11 @@ Datum
get_timetravel(PG_FUNCTION_ARGS)
{
Name relname = PG_GETARG_NAME(0);
- TTOffList *pp;
+ TTOffList *pp;
- for(pp = TTOff.next; pp; pp = pp->next)
+ for (pp = TTOff.next; pp; pp = pp->next)
{
- if(namestrcmp(relname, pp->name) == 0)
+ if (namestrcmp(relname, pp->name) == 0)
PG_RETURN_INT32(0);
}
PG_RETURN_INT32(1);
@@ -491,9 +495,10 @@ get_timetravel(PG_FUNCTION_ARGS)
static int
findTTStatus(char *name)
{
- TTOffList* pp;
- for(pp = TTOff.next; pp; pp = pp->next)
- if(strcasecmp(name, pp->name) == 0)
+ TTOffList *pp;
+
+ for (pp = TTOff.next; pp; pp = pp->next)
+ if (strcasecmp(name, pp->name) == 0)
return 0;
return 1;
}
@@ -509,17 +514,17 @@ currabstime()
static EPlan *
find_plan(char *ident, EPlan ** eplan, int *nplans)
{
- EPlan *newp;
- int i;
+ EPlan *newp;
+ int i;
- if(*nplans > 0)
+ if (*nplans > 0)
{
- for(i = 0; i < *nplans; i++)
+ for (i = 0; i < *nplans; i++)
{
- if(strcmp((*eplan)[i].ident, ident) == 0)
+ if (strcmp((*eplan)[i].ident, ident) == 0)
break;
}
- if(i != *nplans)
+ if (i != *nplans)
return (*eplan + i);
*eplan = (EPlan *) realloc(*eplan, (i + 1) * sizeof(EPlan));
newp = *eplan + i;
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index d883793aace..17a3a6a8579 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -42,11 +42,11 @@
#include "tablefunc.h"
-static int load_categories_hash(char *cats_sql, MemoryContext per_query_ctx);
+static int load_categories_hash(char *cats_sql, MemoryContext per_query_ctx);
static Tuplestorestate *get_crosstab_tuplestore(char *sql,
- int num_categories,
- TupleDesc tupdesc,
- MemoryContext per_query_ctx);
+ int num_categories,
+ TupleDesc tupdesc,
+ MemoryContext per_query_ctx);
static void validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial);
static bool compatCrosstabTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2);
static bool compatConnectbyTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2);
@@ -56,7 +56,7 @@ static TupleDesc make_crosstab_tupledesc(TupleDesc spi_tupdesc,
static Tuplestorestate *connectby(char *relname,
char *key_fld,
char *parent_key_fld,
- char *orderby_fld,
+ char *orderby_fld,
char *branch_delim,
char *start_with,
int max_depth,
@@ -115,7 +115,7 @@ static HTAB *crosstab_HashTable;
typedef struct crosstab_cat_desc
{
char *catname;
- int attidx; /* zero based */
+ int attidx; /* zero based */
} crosstab_cat_desc;
#define MAX_CATNAME_LEN NAMEDATALEN
@@ -157,9 +157,9 @@ do { \
/* hash table */
typedef struct crosstab_hashent
{
- char internal_catname[MAX_CATNAME_LEN];
- crosstab_cat_desc *catdesc;
-} crosstab_HashEnt;
+ char internal_catname[MAX_CATNAME_LEN];
+ crosstab_cat_desc *catdesc;
+} crosstab_HashEnt;
/*
* normal_rand - return requested number of random values
@@ -414,7 +414,7 @@ crosstab(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid source data SQL statement"),
errdetail("The provided SQL must return 3 " \
- " columns; rowid, category, and values.")));
+ " columns; rowid, category, and values.")));
}
else
{
@@ -667,10 +667,10 @@ crosstab(PG_FUNCTION_ARGS)
}
/*
- * crosstab_hash - reimplement crosstab as materialized function and
+ * crosstab_hash - reimplement crosstab as materialized function and
* properly deal with missing values (i.e. don't pack remaining
* values to the left)
- *
+ *
* crosstab - create a crosstab of rowids and values columns from a
* SQL statement returning one rowid column, one category column,
* and one value column.
@@ -705,13 +705,13 @@ PG_FUNCTION_INFO_V1(crosstab_hash);
Datum
crosstab_hash(PG_FUNCTION_ARGS)
{
- char *sql = GET_STR(PG_GETARG_TEXT_P(0));
- char *cats_sql = GET_STR(PG_GETARG_TEXT_P(1));
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- int num_categories;
+ char *sql = GET_STR(PG_GETARG_TEXT_P(0));
+ char *cats_sql = GET_STR(PG_GETARG_TEXT_P(1));
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ int num_categories;
/* check to see if caller supports us returning a tuplestore */
if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize))
@@ -729,9 +729,9 @@ crosstab_hash(PG_FUNCTION_ARGS)
/*
* Check to make sure we have a reasonable tuple descriptor
*
- * Note we will attempt to coerce the values into whatever
- * the return attribute type is and depend on the "in"
- * function to complain if needed.
+ * Note we will attempt to coerce the values into whatever the return
+ * attribute type is and depend on the "in" function to complain if
+ * needed.
*/
if (tupdesc->natts < 2)
ereport(ERROR,
@@ -770,19 +770,19 @@ crosstab_hash(PG_FUNCTION_ARGS)
static int
load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
{
- HASHCTL ctl;
- int ret;
- int proc;
- MemoryContext SPIcontext;
- int num_categories = 0;
+ HASHCTL ctl;
+ int ret;
+ int proc;
+ MemoryContext SPIcontext;
+ int num_categories = 0;
/* initialize the category hash table */
ctl.keysize = MAX_CATNAME_LEN;
ctl.entrysize = sizeof(crosstab_HashEnt);
/*
- * use INIT_CATS, defined above as a guess of how
- * many hash table entries to create, initially
+ * use INIT_CATS, defined above as a guess of how many hash table
+ * entries to create, initially
*/
crosstab_HashTable = hash_create("crosstab hash", INIT_CATS, &ctl, HASH_ELEM);
@@ -798,13 +798,13 @@ load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
/* Check for qualifying tuples */
if ((ret == SPI_OK_SELECT) && (proc > 0))
{
- SPITupleTable *spi_tuptable = SPI_tuptable;
- TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
- int i;
+ SPITupleTable *spi_tuptable = SPI_tuptable;
+ TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
+ int i;
/*
- * The provided categories SQL query must always return one column:
- * category - the label or identifier for each column
+ * The provided categories SQL query must always return one
+ * column: category - the label or identifier for each column
*/
if (spi_tupdesc->natts != 1)
ereport(ERROR,
@@ -814,9 +814,9 @@ load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
for (i = 0; i < proc; i++)
{
- crosstab_cat_desc *catdesc;
- char *catname;
- HeapTuple spi_tuple;
+ crosstab_cat_desc *catdesc;
+ char *catname;
+ HeapTuple spi_tuple;
/* get the next sql result tuple */
spi_tuple = spi_tuptable->vals[i];
@@ -862,13 +862,13 @@ get_crosstab_tuplestore(char *sql,
TupleDesc tupdesc,
MemoryContext per_query_ctx)
{
- Tuplestorestate *tupstore;
- AttInMetadata *attinmeta = TupleDescGetAttInMetadata(tupdesc);
- char **values;
- HeapTuple tuple;
- int ret;
- int proc;
- MemoryContext SPIcontext;
+ Tuplestorestate *tupstore;
+ AttInMetadata *attinmeta = TupleDescGetAttInMetadata(tupdesc);
+ char **values;
+ HeapTuple tuple;
+ int ret;
+ int proc;
+ MemoryContext SPIcontext;
/* initialize our tuplestore */
tupstore = tuplestore_begin_heap(true, false, SortMem);
@@ -885,33 +885,36 @@ get_crosstab_tuplestore(char *sql,
/* Check for qualifying tuples */
if ((ret == SPI_OK_SELECT) && (proc > 0))
{
- SPITupleTable *spi_tuptable = SPI_tuptable;
- TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
- int ncols = spi_tupdesc->natts;
- char *rowid;
- char *lastrowid = NULL;
- int i, j;
- int result_ncols;
+ SPITupleTable *spi_tuptable = SPI_tuptable;
+ TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
+ int ncols = spi_tupdesc->natts;
+ char *rowid;
+ char *lastrowid = NULL;
+ int i,
+ j;
+ int result_ncols;
/*
- * The provided SQL query must always return at least three columns:
+ * The provided SQL query must always return at least three
+ * columns:
*
* 1. rowname the label for each row - column 1 in the final result
- * 2. category the label for each value-column in the final result
- * 3. value the values used to populate the value-columns
- *
+ * 2. category the label for each value-column in the final
+ * result 3. value the values used to populate the
+ * value-columns
+ *
* If there are more than three columns, the last two are taken as
- * "category" and "values". The first column is taken as "rowname".
- * Additional columns (2 thru N-2) are assumed the same for the same
- * "rowname", and are copied into the result tuple from the first
- * time we encounter a particular rowname.
+ * "category" and "values". The first column is taken as
+ * "rowname". Additional columns (2 thru N-2) are assumed the same
+ * for the same "rowname", and are copied into the result tuple
+ * from the first time we encounter a particular rowname.
*/
if (ncols < 3)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid source data SQL statement"),
errdetail("The provided SQL must return 3 " \
- " columns; rowid, category, and values.")));
+ " columns; rowid, category, and values.")));
result_ncols = (ncols - 2) + num_categories;
@@ -922,7 +925,7 @@ get_crosstab_tuplestore(char *sql,
errmsg("invalid return type"),
errdetail("query-specified return " \
"tuple has %d columns but crosstab " \
- "returns %d", tupdesc->natts, result_ncols)));
+ "returns %d", tupdesc->natts, result_ncols)));
/* allocate space */
values = (char **) palloc(result_ncols * sizeof(char *));
@@ -932,9 +935,9 @@ get_crosstab_tuplestore(char *sql,
for (i = 0; i < proc; i++)
{
- HeapTuple spi_tuple;
- crosstab_cat_desc *catdesc;
- char *catname;
+ HeapTuple spi_tuple;
+ crosstab_cat_desc *catdesc;
+ char *catname;
/* get the next sql result tuple */
spi_tuple = spi_tuptable->vals[i];
@@ -958,7 +961,10 @@ get_crosstab_tuplestore(char *sql,
*/
if (lastrowid != NULL)
{
- /* switch to appropriate context while storing the tuple */
+ /*
+ * switch to appropriate context while storing the
+ * tuple
+ */
SPIcontext = MemoryContextSwitchTo(per_query_ctx);
/* rowid changed, flush the previous output row */
@@ -984,7 +990,7 @@ get_crosstab_tuplestore(char *sql,
crosstab_HashTableLookup(catname, catdesc);
if (catdesc)
- values[catdesc->attidx + ncols - 2] =
+ values[catdesc->attidx + ncols - 2] =
SPI_getvalue(spi_tuple, spi_tupdesc, ncols);
}
@@ -1026,29 +1032,29 @@ get_crosstab_tuplestore(char *sql,
*
* keyid parent_keyid pos
* ------+------------+--
- * row1 NULL 0
- * row2 row1 0
- * row3 row1 0
- * row4 row2 1
- * row5 row2 0
- * row6 row4 0
- * row7 row3 0
- * row8 row6 0
- * row9 row5 0
+ * row1 NULL 0
+ * row2 row1 0
+ * row3 row1 0
+ * row4 row2 1
+ * row5 row2 0
+ * row6 row4 0
+ * row7 row3 0
+ * row8 row6 0
+ * row9 row5 0
*
*
* connectby(text relname, text keyid_fld, text parent_keyid_fld
- * [, text orderby_fld], text start_with, int max_depth
- * [, text branch_delim])
+ * [, text orderby_fld], text start_with, int max_depth
+ * [, text branch_delim])
* connectby('foo', 'keyid', 'parent_keyid', 'pos', 'row2', 0, '~') returns:
*
- * keyid parent_id level branch serial
+ * keyid parent_id level branch serial
* ------+-----------+--------+-----------------------
- * row2 NULL 0 row2 1
- * row5 row2 1 row2~row5 2
- * row9 row5 2 row2~row5~row9 3
- * row4 row2 1 row2~row4 4
- * row6 row4 2 row2~row4~row6 5
+ * row2 NULL 0 row2 1
+ * row5 row2 1 row2~row5 2
+ * row9 row5 2 row2~row5~row9 3
+ * row4 row2 1 row2~row4 4
+ * row6 row4 2 row2~row4~row6 5
* row8 row6 3 row2~row4~row6~row8 6
*
*/
@@ -1114,7 +1120,7 @@ connectby_text(PG_FUNCTION_ARGS)
rsinfo->setResult = connectby(relname,
key_fld,
parent_key_fld,
- NULL,
+ NULL,
branch_delim,
start_with,
max_depth,
@@ -1149,7 +1155,7 @@ connectby_text_serial(PG_FUNCTION_ARGS)
char *branch_delim = NULL;
bool show_branch = false;
bool show_serial = true;
-
+
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
AttInMetadata *attinmeta;
@@ -1192,7 +1198,7 @@ connectby_text_serial(PG_FUNCTION_ARGS)
rsinfo->setResult = connectby(relname,
key_fld,
parent_key_fld,
- orderby_fld,
+ orderby_fld,
branch_delim,
start_with,
max_depth,
@@ -1222,12 +1228,12 @@ static Tuplestorestate *
connectby(char *relname,
char *key_fld,
char *parent_key_fld,
- char *orderby_fld,
+ char *orderby_fld,
char *branch_delim,
char *start_with,
int max_depth,
bool show_branch,
- bool show_serial,
+ bool show_serial,
MemoryContext per_query_ctx,
AttInMetadata *attinmeta)
{
@@ -1235,7 +1241,7 @@ connectby(char *relname,
int ret;
MemoryContext oldcontext;
- int serial = 1;
+ int serial = 1;
/* Connect to SPI manager */
if ((ret = SPI_connect()) < 0)
@@ -1303,25 +1309,25 @@ build_tuplestore_recursively(char *key_fld,
if (!show_serial)
{
appendStringInfo(sql, "SELECT %s, %s FROM %s WHERE %s = '%s' AND %s IS NOT NULL",
- key_fld,
- parent_key_fld,
- relname,
- parent_key_fld,
- start_with,
- key_fld);
- serial_column=0;
+ key_fld,
+ parent_key_fld,
+ relname,
+ parent_key_fld,
+ start_with,
+ key_fld);
+ serial_column = 0;
}
else
{
appendStringInfo(sql, "SELECT %s, %s FROM %s WHERE %s = '%s' AND %s IS NOT NULL ORDER BY %s",
- key_fld,
- parent_key_fld,
- relname,
- parent_key_fld,
- start_with,
- key_fld,
- orderby_fld);
- serial_column=1;
+ key_fld,
+ parent_key_fld,
+ relname,
+ parent_key_fld,
+ start_with,
+ key_fld,
+ orderby_fld);
+ serial_column = 1;
}
/* Retrieve the desired rows */
@@ -1371,8 +1377,8 @@ build_tuplestore_recursively(char *key_fld,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
- errdetail("Return and SQL tuple descriptions are " \
- "incompatible.")));
+ errdetail("Return and SQL tuple descriptions are " \
+ "incompatible.")));
/* root value is the one we initially start with */
values[0] = start_with;
@@ -1395,7 +1401,7 @@ build_tuplestore_recursively(char *key_fld,
if (show_branch)
values[4] = serial_str;
else
- values[3] = serial_str;
+ values[3] = serial_str;
}
/* construct the tuple */
@@ -1508,11 +1514,11 @@ build_tuplestore_recursively(char *key_fld,
static void
validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial)
{
- int serial_column=0;
+ int serial_column = 0;
if (show_serial)
- serial_column=1;
-
+ serial_column = 1;
+
/* are there the correct number of columns */
if (show_branch)
{
@@ -1546,7 +1552,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("Third column must be type %s.",
- format_type_be(INT4OID))));
+ format_type_be(INT4OID))));
/* check that the type of the fourth column is TEXT if applicable */
if (show_branch && tupdesc->attrs[3]->atttypid != TEXTOID)
@@ -1554,7 +1560,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("Fourth column must be type %s.",
- format_type_be(TEXTOID))));
+ format_type_be(TEXTOID))));
/* check that the type of the fifth column is INT4 */
if (show_branch && show_serial && tupdesc->attrs[4]->atttypid != INT4OID)
@@ -1565,7 +1571,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial
if (!show_branch && show_serial && tupdesc->attrs[3]->atttypid != INT4OID)
elog(ERROR, "Query-specified return tuple not valid for Connectby: "
"fourth column must be type %s", format_type_be(INT4OID));
-
+
/* OK, the tupdesc is valid for our purposes */
}
@@ -1596,7 +1602,7 @@ compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc)
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("SQL parent key field datatype does " \
- "not match return parent key field datatype.")));
+ "not match return parent key field datatype.")));
/* OK, the two tupdescs are compatible for our purposes */
return true;
diff --git a/contrib/tsearch/morph.c b/contrib/tsearch/morph.c
index 2455a72d8dd..b827fd652d9 100644
--- a/contrib/tsearch/morph.c
+++ b/contrib/tsearch/morph.c
@@ -51,7 +51,8 @@ DICT dicts[] = {
#undef DICT_TABLE
/* array for storing dictionary's objects (if needed) */
-void *dictobjs[lengthof(dicts)];
+void *dictobjs[
+ lengthof(dicts)];
#define STOPLEXEM -2
#define BYLOCALE -1
@@ -175,7 +176,7 @@ lemmatize(char *word, int *len, int type)
}
else if (nd == BYLOCALE)
{
- continue; /* no dict for current locale */
+ continue; /* no dict for current locale */
}
else
{
diff --git a/contrib/tsearch2/common.c b/contrib/tsearch2/common.c
index 917dced87fa..30062180076 100644
--- a/contrib/tsearch2/common.c
+++ b/contrib/tsearch2/common.c
@@ -4,80 +4,99 @@
#include "ts_cfg.h"
#include "dict.h"
-text*
-char2text(char* in) {
+text *
+char2text(char *in)
+{
return charl2text(in, strlen(in));
}
-text* charl2text(char* in, int len) {
- text *out=(text*)palloc(len+VARHDRSZ);
+text *
+charl2text(char *in, int len)
+{
+ text *out = (text *) palloc(len + VARHDRSZ);
+
memcpy(VARDATA(out), in, len);
- VARATT_SIZEP(out) = len+VARHDRSZ;
+ VARATT_SIZEP(out) = len + VARHDRSZ;
return out;
}
-char
-*text2char(text* in) {
- char *out=palloc( VARSIZE(in) );
- memcpy(out, VARDATA(in), VARSIZE(in)-VARHDRSZ);
- out[ VARSIZE(in)-VARHDRSZ ] ='\0';
+char
+ *
+text2char(text *in)
+{
+ char *out = palloc(VARSIZE(in));
+
+ memcpy(out, VARDATA(in), VARSIZE(in) - VARHDRSZ);
+ out[VARSIZE(in) - VARHDRSZ] = '\0';
return out;
}
-char
-*pnstrdup(char* in, int len) {
- char *out=palloc( len+1 );
+char
+ *
+pnstrdup(char *in, int len)
+{
+ char *out = palloc(len + 1);
+
memcpy(out, in, len);
- out[len]='\0';
+ out[len] = '\0';
return out;
}
-text
-*ptextdup(text* in) {
- text *out=(text*)palloc( VARSIZE(in) );
- memcpy(out,in,VARSIZE(in));
+text
+ *
+ptextdup(text *in)
+{
+ text *out = (text *) palloc(VARSIZE(in));
+
+ memcpy(out, in, VARSIZE(in));
return out;
}
-text
-*mtextdup(text* in) {
- text *out=(text*)malloc( VARSIZE(in) );
- if ( !out )
+text
+ *
+mtextdup(text *in)
+{
+ text *out = (text *) malloc(VARSIZE(in));
+
+ if (!out)
ts_error(ERROR, "No memory");
- memcpy(out,in,VARSIZE(in));
+ memcpy(out, in, VARSIZE(in));
return out;
}
-void
-ts_error(int state, const char *format, ...) {
- va_list args;
- int tlen = 128, len=0;
- char *buf;
-
+void
+ts_error(int state, const char *format,...)
+{
+ va_list args;
+ int tlen = 128,
+ len = 0;
+ char *buf;
+
reset_cfg();
reset_dict();
reset_prs();
va_start(args, format);
buf = palloc(tlen);
- len = vsnprintf(buf, tlen-1, format, args);
- if ( len >= tlen ) {
- tlen=len+1;
- buf = repalloc( buf, tlen );
- vsnprintf(buf, tlen-1, format, args);
+ len = vsnprintf(buf, tlen - 1, format, args);
+ if (len >= tlen)
+ {
+ tlen = len + 1;
+ buf = repalloc(buf, tlen);
+ vsnprintf(buf, tlen - 1, format, args);
}
va_end(args);
-
+
/* ?? internal error ?? */
elog(state, "%s", buf);
pfree(buf);
}
-int
-text_cmp(text *a, text *b) {
- if ( VARSIZE(a) == VARSIZE(b) )
- return strncmp( VARDATA(a), VARDATA(b), VARSIZE(a)-VARHDRSZ );
- return (int)VARSIZE(a) - (int)VARSIZE(b);
+int
+text_cmp(text *a, text *b)
+{
+ if (VARSIZE(a) == VARSIZE(b))
+ return strncmp(VARDATA(a), VARDATA(b), VARSIZE(a) - VARHDRSZ);
+ return (int) VARSIZE(a) - (int) VARSIZE(b);
}
-
diff --git a/contrib/tsearch2/common.h b/contrib/tsearch2/common.h
index 70313fa4d24..481f00405bb 100644
--- a/contrib/tsearch2/common.h
+++ b/contrib/tsearch2/common.h
@@ -7,18 +7,18 @@
#define PG_NARGS() (fcinfo->nargs)
#endif
-text* char2text(char* in);
-text* charl2text(char* in, int len);
-char *text2char(text* in);
-char *pnstrdup(char* in, int len);
-text *ptextdup(text* in);
-text *mtextdup(text* in);
+text *char2text(char *in);
+text *charl2text(char *in, int len);
+char *text2char(text *in);
+char *pnstrdup(char *in, int len);
+text *ptextdup(text *in);
+text *mtextdup(text *in);
-int text_cmp(text *a, text *b);
+int text_cmp(text *a, text *b);
#define NEXTVAL(x) ( (text*)( (char*)(x) + INTALIGN( VARSIZE(x) ) ) )
#define ARRNELEMS(x) ArrayGetNItems( ARR_NDIM(x), ARR_DIMS(x))
-void ts_error(int state, const char *format, ...);
+void ts_error(int state, const char *format,...);
#endif
diff --git a/contrib/tsearch2/dict.c b/contrib/tsearch2/dict.c
index 8f4cad5c417..9ceb78ffb83 100644
--- a/contrib/tsearch2/dict.c
+++ b/contrib/tsearch2/dict.c
@@ -1,5 +1,5 @@
-/*
- * interface functions to dictionary
+/*
+ * interface functions to dictionary
* Teodor Sigaev <teodor@sigaev.ru>
*/
#include <errno.h>
@@ -19,260 +19,285 @@
/*********top interface**********/
-static void *plan_getdict=NULL;
+static void *plan_getdict = NULL;
void
-init_dict(Oid id, DictInfo *dict) {
- Oid arg[1]={ OIDOID };
- bool isnull;
- Datum pars[1]={ ObjectIdGetDatum(id) };
- int stat;
-
- memset(dict,0,sizeof(DictInfo));
+init_dict(Oid id, DictInfo * dict)
+{
+ Oid arg[1] = {OIDOID};
+ bool isnull;
+ Datum pars[1] = {ObjectIdGetDatum(id)};
+ int stat;
+
+ memset(dict, 0, sizeof(DictInfo));
SPI_connect();
- if ( !plan_getdict ) {
- plan_getdict = SPI_saveplan( SPI_prepare( "select dict_init, dict_initoption, dict_lexize from pg_ts_dict where oid = $1" , 1, arg ) );
- if ( !plan_getdict )
+ if (!plan_getdict)
+ {
+ plan_getdict = SPI_saveplan(SPI_prepare("select dict_init, dict_initoption, dict_lexize from pg_ts_dict where oid = $1", 1, arg));
+ if (!plan_getdict)
ts_error(ERROR, "SPI_prepare() failed");
}
stat = SPI_execp(plan_getdict, pars, " ", 1);
- if ( stat < 0 )
- ts_error (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed > 0 ) {
- Datum opt;
- Oid oid=InvalidOid;
- oid=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull) );
- if ( !(isnull || oid==InvalidOid) ) {
- opt=SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull);
- dict->dictionary=(void*)DatumGetPointer(OidFunctionCall1(oid, opt));
+ if (stat < 0)
+ ts_error(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed > 0)
+ {
+ Datum opt;
+ Oid oid = InvalidOid;
+
+ oid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
+ if (!(isnull || oid == InvalidOid))
+ {
+ opt = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull);
+ dict->dictionary = (void *) DatumGetPointer(OidFunctionCall1(oid, opt));
}
- oid=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 3, &isnull) );
- if ( isnull || oid==InvalidOid )
+ oid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 3, &isnull));
+ if (isnull || oid == InvalidOid)
ts_error(ERROR, "Null dict_lexize for dictonary %d", id);
fmgr_info_cxt(oid, &(dict->lexize_info), TopMemoryContext);
- dict->dict_id=id;
- } else
+ dict->dict_id = id;
+ }
+ else
ts_error(ERROR, "No dictionary with id %d", id);
SPI_finish();
}
-typedef struct {
- DictInfo *last_dict;
- int len;
- int reallen;
- DictInfo *list;
+typedef struct
+{
+ DictInfo *last_dict;
+ int len;
+ int reallen;
+ DictInfo *list;
SNMap name2id_map;
-} DictList;
+} DictList;
-static DictList DList = {NULL,0,0,NULL,{0,0,NULL}};
+static DictList DList = {NULL, 0, 0, NULL, {0, 0, NULL}};
void
-reset_dict(void) {
- freeSNMap( &(DList.name2id_map) );
+reset_dict(void)
+{
+ freeSNMap(&(DList.name2id_map));
/* XXX need to free DList.list[*].dictionary */
- if ( DList.list )
+ if (DList.list)
free(DList.list);
- memset(&DList,0,sizeof(DictList));
+ memset(&DList, 0, sizeof(DictList));
}
static int
-comparedict(const void *a, const void *b) {
- return ((DictInfo*)a)->dict_id - ((DictInfo*)b)->dict_id;
+comparedict(const void *a, const void *b)
+{
+ return ((DictInfo *) a)->dict_id - ((DictInfo *) b)->dict_id;
}
DictInfo *
-finddict(Oid id) {
+finddict(Oid id)
+{
/* last used dict */
- if ( DList.last_dict && DList.last_dict->dict_id==id )
+ if (DList.last_dict && DList.last_dict->dict_id == id)
return DList.last_dict;
/* already used dict */
- if ( DList.len != 0 ) {
- DictInfo key;
- key.dict_id=id;
+ if (DList.len != 0)
+ {
+ DictInfo key;
+
+ key.dict_id = id;
DList.last_dict = bsearch(&key, DList.list, DList.len, sizeof(DictInfo), comparedict);
- if ( DList.last_dict != NULL )
+ if (DList.last_dict != NULL)
return DList.last_dict;
}
/* last chance */
- if ( DList.len==DList.reallen ) {
- DictInfo *tmp;
- int reallen = ( DList.reallen ) ? 2*DList.reallen : 16;
- tmp=(DictInfo*)realloc(DList.list,sizeof(DictInfo)*reallen);
- if ( !tmp )
- ts_error(ERROR,"No memory");
- DList.reallen=reallen;
- DList.list=tmp;
+ if (DList.len == DList.reallen)
+ {
+ DictInfo *tmp;
+ int reallen = (DList.reallen) ? 2 * DList.reallen : 16;
+
+ tmp = (DictInfo *) realloc(DList.list, sizeof(DictInfo) * reallen);
+ if (!tmp)
+ ts_error(ERROR, "No memory");
+ DList.reallen = reallen;
+ DList.list = tmp;
}
- DList.last_dict=&(DList.list[DList.len]);
+ DList.last_dict = &(DList.list[DList.len]);
init_dict(id, DList.last_dict);
DList.len++;
qsort(DList.list, DList.len, sizeof(DictInfo), comparedict);
- return finddict(id); /* qsort changed order!! */;
+ return finddict(id); /* qsort changed order!! */ ;
}
-static void *plan_name2id=NULL;
+static void *plan_name2id = NULL;
Oid
-name2id_dict(text *name) {
- Oid arg[1]={ TEXTOID };
- bool isnull;
- Datum pars[1]={ PointerGetDatum(name) };
- int stat;
- Oid id=findSNMap_t( &(DList.name2id_map), name );
-
- if ( id )
+name2id_dict(text *name)
+{
+ Oid arg[1] = {TEXTOID};
+ bool isnull;
+ Datum pars[1] = {PointerGetDatum(name)};
+ int stat;
+ Oid id = findSNMap_t(&(DList.name2id_map), name);
+
+ if (id)
return id;
-
+
SPI_connect();
- if ( !plan_name2id ) {
- plan_name2id = SPI_saveplan( SPI_prepare( "select oid from pg_ts_dict where dict_name = $1" , 1, arg ) );
- if ( !plan_name2id )
+ if (!plan_name2id)
+ {
+ plan_name2id = SPI_saveplan(SPI_prepare("select oid from pg_ts_dict where dict_name = $1", 1, arg));
+ if (!plan_name2id)
ts_error(ERROR, "SPI_prepare() failed");
}
stat = SPI_execp(plan_name2id, pars, " ", 1);
- if ( stat < 0 )
- ts_error (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed > 0 )
- id=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull) );
- else
+ if (stat < 0)
+ ts_error(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed > 0)
+ id = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
+ else
ts_error(ERROR, "No dictionary with name '%s'", text2char(name));
SPI_finish();
- addSNMap_t( &(DList.name2id_map), name, id );
+ addSNMap_t(&(DList.name2id_map), name, id);
return id;
}
/******sql-level interface******/
PG_FUNCTION_INFO_V1(lexize);
-Datum lexize(PG_FUNCTION_ARGS);
+Datum lexize(PG_FUNCTION_ARGS);
Datum
-lexize(PG_FUNCTION_ARGS) {
- text *in=PG_GETARG_TEXT_P(1);
- DictInfo *dict = finddict( PG_GETARG_OID(0) );
- char **res, **ptr;
- Datum *da;
- ArrayType *a;
-
-
- ptr = res = (char**)DatumGetPointer(
- FunctionCall3(&(dict->lexize_info),
- PointerGetDatum(dict->dictionary),
- PointerGetDatum(VARDATA(in)),
- Int32GetDatum(VARSIZE(in)-VARHDRSZ)
- )
- );
+lexize(PG_FUNCTION_ARGS)
+{
+ text *in = PG_GETARG_TEXT_P(1);
+ DictInfo *dict = finddict(PG_GETARG_OID(0));
+ char **res,
+ **ptr;
+ Datum *da;
+ ArrayType *a;
+
+
+ ptr = res = (char **) DatumGetPointer(
+ FunctionCall3(&(dict->lexize_info),
+ PointerGetDatum(dict->dictionary),
+ PointerGetDatum(VARDATA(in)),
+ Int32GetDatum(VARSIZE(in) - VARHDRSZ)
+ )
+ );
PG_FREE_IF_COPY(in, 1);
- if ( !res ) {
- if (PG_NARGS() > 2)
+ if (!res)
+ {
+ if (PG_NARGS() > 2)
PG_RETURN_POINTER(NULL);
else
PG_RETURN_NULL();
}
- while(*ptr) ptr++;
- da = (Datum*)palloc(sizeof(Datum)*(ptr-res+1));
- ptr=res;
- while(*ptr) {
- da[ ptr-res ] = PointerGetDatum( char2text(*ptr) );
+ while (*ptr)
+ ptr++;
+ da = (Datum *) palloc(sizeof(Datum) * (ptr - res + 1));
+ ptr = res;
+ while (*ptr)
+ {
+ da[ptr - res] = PointerGetDatum(char2text(*ptr));
ptr++;
}
a = construct_array(
- da,
- ptr-res,
- TEXTOID,
- -1,
- false,
- 'i'
- );
-
- ptr=res;
- while(*ptr) {
- pfree( DatumGetPointer(da[ ptr-res ]) );
- pfree( *ptr );
+ da,
+ ptr - res,
+ TEXTOID,
+ -1,
+ false,
+ 'i'
+ );
+
+ ptr = res;
+ while (*ptr)
+ {
+ pfree(DatumGetPointer(da[ptr - res]));
+ pfree(*ptr);
ptr++;
}
pfree(res);
pfree(da);
-
- PG_RETURN_POINTER(a);
+
+ PG_RETURN_POINTER(a);
}
PG_FUNCTION_INFO_V1(lexize_byname);
-Datum lexize_byname(PG_FUNCTION_ARGS);
-Datum
-lexize_byname(PG_FUNCTION_ARGS) {
- text *dictname=PG_GETARG_TEXT_P(0);
- Datum res;
+Datum lexize_byname(PG_FUNCTION_ARGS);
+Datum
+lexize_byname(PG_FUNCTION_ARGS)
+{
+ text *dictname = PG_GETARG_TEXT_P(0);
+ Datum res;
strdup("simple");
- res=DirectFunctionCall3(
- lexize,
- ObjectIdGetDatum(name2id_dict(dictname)),
- PG_GETARG_DATUM(1),
- (Datum)0
- );
+ res = DirectFunctionCall3(
+ lexize,
+ ObjectIdGetDatum(name2id_dict(dictname)),
+ PG_GETARG_DATUM(1),
+ (Datum) 0
+ );
PG_FREE_IF_COPY(dictname, 0);
- if (res)
- PG_RETURN_DATUM(res);
- else
+ if (res)
+ PG_RETURN_DATUM(res);
+ else
PG_RETURN_NULL();
}
-static Oid currect_dictionary_id=0;
+static Oid currect_dictionary_id = 0;
PG_FUNCTION_INFO_V1(set_curdict);
-Datum set_curdict(PG_FUNCTION_ARGS);
+Datum set_curdict(PG_FUNCTION_ARGS);
Datum
-set_curdict(PG_FUNCTION_ARGS) {
+set_curdict(PG_FUNCTION_ARGS)
+{
finddict(PG_GETARG_OID(0));
- currect_dictionary_id=PG_GETARG_OID(0);
+ currect_dictionary_id = PG_GETARG_OID(0);
PG_RETURN_VOID();
}
PG_FUNCTION_INFO_V1(set_curdict_byname);
-Datum set_curdict_byname(PG_FUNCTION_ARGS);
+Datum set_curdict_byname(PG_FUNCTION_ARGS);
Datum
-set_curdict_byname(PG_FUNCTION_ARGS) {
- text *dictname=PG_GETARG_TEXT_P(0);
+set_curdict_byname(PG_FUNCTION_ARGS)
+{
+ text *dictname = PG_GETARG_TEXT_P(0);
DirectFunctionCall1(
- set_curdict,
- ObjectIdGetDatum( name2id_dict(dictname) )
- );
+ set_curdict,
+ ObjectIdGetDatum(name2id_dict(dictname))
+ );
PG_FREE_IF_COPY(dictname, 0);
PG_RETURN_VOID();
}
PG_FUNCTION_INFO_V1(lexize_bycurrent);
-Datum lexize_bycurrent(PG_FUNCTION_ARGS);
-Datum
-lexize_bycurrent(PG_FUNCTION_ARGS) {
- Datum res;
- if ( currect_dictionary_id == 0 )
+Datum lexize_bycurrent(PG_FUNCTION_ARGS);
+Datum
+lexize_bycurrent(PG_FUNCTION_ARGS)
+{
+ Datum res;
+
+ if (currect_dictionary_id == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("no currect dictionary"),
errhint("Execute select set_curdict().")));
res = DirectFunctionCall3(
- lexize,
- ObjectIdGetDatum(currect_dictionary_id),
- PG_GETARG_DATUM(0),
- (Datum)0
- );
- if (res)
+ lexize,
+ ObjectIdGetDatum(currect_dictionary_id),
+ PG_GETARG_DATUM(0),
+ (Datum) 0
+ );
+ if (res)
PG_RETURN_DATUM(res);
- else
+ else
PG_RETURN_NULL();
}
-
-
diff --git a/contrib/tsearch2/dict.h b/contrib/tsearch2/dict.h
index bbbbfc47a51..86ea42263e5 100644
--- a/contrib/tsearch2/dict.h
+++ b/contrib/tsearch2/dict.h
@@ -3,36 +3,39 @@
#include "postgres.h"
#include "fmgr.h"
-typedef struct {
- int len;
- char **stop;
- char* (*wordop)(char*);
-} StopList;
-
-void sortstoplist(StopList *s);
-void freestoplist(StopList *s);
-void readstoplist(text *in, StopList *s);
-bool searchstoplist(StopList *s, char *key);
-char* lowerstr(char *str);
-
-typedef struct {
- Oid dict_id;
- FmgrInfo lexize_info;
- void *dictionary;
-} DictInfo;
-
-void init_dict(Oid id, DictInfo *dict);
-DictInfo* finddict(Oid id);
-Oid name2id_dict(text *name);
-void reset_dict(void);
+typedef struct
+{
+ int len;
+ char **stop;
+ char *(*wordop) (char *);
+} StopList;
+
+void sortstoplist(StopList * s);
+void freestoplist(StopList * s);
+void readstoplist(text *in, StopList * s);
+bool searchstoplist(StopList * s, char *key);
+char *lowerstr(char *str);
+
+typedef struct
+{
+ Oid dict_id;
+ FmgrInfo lexize_info;
+ void *dictionary;
+} DictInfo;
+
+void init_dict(Oid id, DictInfo * dict);
+DictInfo *finddict(Oid id);
+Oid name2id_dict(text *name);
+void reset_dict(void);
/* simple parser of cfg string */
-typedef struct {
- char *key;
- char *value;
-} Map;
+typedef struct
+{
+ char *key;
+ char *value;
+} Map;
-void parse_cfgdict(text *in, Map **m);
+void parse_cfgdict(text *in, Map ** m);
#endif
diff --git a/contrib/tsearch2/dict_ex.c b/contrib/tsearch2/dict_ex.c
index ead96106871..a8fb20453ba 100644
--- a/contrib/tsearch2/dict_ex.c
+++ b/contrib/tsearch2/dict_ex.c
@@ -1,5 +1,5 @@
-/*
- * example of dictionary
+/*
+ * example of dictionary
* Teodor Sigaev <teodor@sigaev.ru>
*/
#include <errno.h>
@@ -11,30 +11,35 @@
#include "dict.h"
#include "common.h"
-typedef struct {
+typedef struct
+{
StopList stoplist;
-} DictExample;
+} DictExample;
PG_FUNCTION_INFO_V1(dex_init);
-Datum dex_init(PG_FUNCTION_ARGS);
+Datum dex_init(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(dex_lexize);
-Datum dex_lexize(PG_FUNCTION_ARGS);
+Datum dex_lexize(PG_FUNCTION_ARGS);
-Datum
-dex_init(PG_FUNCTION_ARGS) {
- DictExample *d = (DictExample*)malloc( sizeof(DictExample) );
+Datum
+dex_init(PG_FUNCTION_ARGS)
+{
+ DictExample *d = (DictExample *) malloc(sizeof(DictExample));
- if ( !d )
+ if (!d)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- memset(d,0,sizeof(DictExample));
+ memset(d, 0, sizeof(DictExample));
+
+ d->stoplist.wordop = lowerstr;
+
+ if (!PG_ARGISNULL(0) && PG_GETARG_POINTER(0) != NULL)
+ {
+ text *in = PG_GETARG_TEXT_P(0);
- d->stoplist.wordop=lowerstr;
-
- if ( !PG_ARGISNULL(0) && PG_GETARG_POINTER(0)!=NULL ) {
- text *in = PG_GETARG_TEXT_P(0);
readstoplist(in, &(d->stoplist));
sortstoplist(&(d->stoplist));
PG_FREE_IF_COPY(in, 0);
@@ -44,18 +49,21 @@ dex_init(PG_FUNCTION_ARGS) {
}
Datum
-dex_lexize(PG_FUNCTION_ARGS) {
- DictExample *d = (DictExample*)PG_GETARG_POINTER(0);
- char *in = (char*)PG_GETARG_POINTER(1);
- char *txt = pnstrdup(in, PG_GETARG_INT32(2));
- char **res=palloc(sizeof(char*)*2);
+dex_lexize(PG_FUNCTION_ARGS)
+{
+ DictExample *d = (DictExample *) PG_GETARG_POINTER(0);
+ char *in = (char *) PG_GETARG_POINTER(1);
+ char *txt = pnstrdup(in, PG_GETARG_INT32(2));
+ char **res = palloc(sizeof(char *) * 2);
- if ( *txt=='\0' || searchstoplist(&(d->stoplist),txt) ) {
+ if (*txt == '\0' || searchstoplist(&(d->stoplist), txt))
+ {
pfree(txt);
- res[0]=NULL;
- } else
- res[0]=txt;
- res[1]=NULL;
+ res[0] = NULL;
+ }
+ else
+ res[0] = txt;
+ res[1] = NULL;
PG_RETURN_POINTER(res);
}
diff --git a/contrib/tsearch2/dict_ispell.c b/contrib/tsearch2/dict_ispell.c
index c053adfad0b..e3a100fa013 100644
--- a/contrib/tsearch2/dict_ispell.c
+++ b/contrib/tsearch2/dict_ispell.c
@@ -1,4 +1,4 @@
-/*
+/*
* ISpell interface
* Teodor Sigaev <teodor@sigaev.ru>
*/
@@ -12,96 +12,117 @@
#include "common.h"
#include "ispell/spell.h"
-typedef struct {
+typedef struct
+{
StopList stoplist;
IspellDict obj;
-} DictISpell;
+} DictISpell;
PG_FUNCTION_INFO_V1(spell_init);
-Datum spell_init(PG_FUNCTION_ARGS);
+Datum spell_init(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(spell_lexize);
-Datum spell_lexize(PG_FUNCTION_ARGS);
+Datum spell_lexize(PG_FUNCTION_ARGS);
static void
-freeDictISpell(DictISpell *d) {
+freeDictISpell(DictISpell * d)
+{
FreeIspell(&(d->obj));
freestoplist(&(d->stoplist));
free(d);
}
-Datum
-spell_init(PG_FUNCTION_ARGS) {
- DictISpell *d;
- Map *cfg, *pcfg;
- text *in;
- bool affloaded=false, dictloaded=false, stoploaded=false;
-
- if ( PG_ARGISNULL(0) || PG_GETARG_POINTER(0)==NULL )
+Datum
+spell_init(PG_FUNCTION_ARGS)
+{
+ DictISpell *d;
+ Map *cfg,
+ *pcfg;
+ text *in;
+ bool affloaded = false,
+ dictloaded = false,
+ stoploaded = false;
+
+ if (PG_ARGISNULL(0) || PG_GETARG_POINTER(0) == NULL)
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("ISpell confguration error")));
-
- d = (DictISpell*)malloc( sizeof(DictISpell) );
- if ( !d )
+
+ d = (DictISpell *) malloc(sizeof(DictISpell));
+ if (!d)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- memset(d,0,sizeof(DictISpell));
- d->stoplist.wordop=lowerstr;
+ memset(d, 0, sizeof(DictISpell));
+ d->stoplist.wordop = lowerstr;
in = PG_GETARG_TEXT_P(0);
- parse_cfgdict(in,&cfg);
+ parse_cfgdict(in, &cfg);
PG_FREE_IF_COPY(in, 0);
- pcfg=cfg;
- while(pcfg->key) {
- if ( strcasecmp("DictFile", pcfg->key) == 0 ) {
- if ( dictloaded ) {
+ pcfg = cfg;
+ while (pcfg->key)
+ {
+ if (strcasecmp("DictFile", pcfg->key) == 0)
+ {
+ if (dictloaded)
+ {
freeDictISpell(d);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("dictionary already loaded")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("dictionary already loaded")));
}
- if ( ImportDictionary(&(d->obj), pcfg->value) ) {
+ if (ImportDictionary(&(d->obj), pcfg->value))
+ {
freeDictISpell(d);
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("could not load dictionary file \"%s\"",
pcfg->value)));
}
- dictloaded=true;
- } else if ( strcasecmp("AffFile", pcfg->key) == 0 ) {
- if ( affloaded ) {
+ dictloaded = true;
+ }
+ else if (strcasecmp("AffFile", pcfg->key) == 0)
+ {
+ if (affloaded)
+ {
freeDictISpell(d);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("affixes already loaded")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("affixes already loaded")));
}
- if ( ImportAffixes(&(d->obj), pcfg->value) ) {
+ if (ImportAffixes(&(d->obj), pcfg->value))
+ {
freeDictISpell(d);
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("could not load affix file \"%s\"",
pcfg->value)));
}
- affloaded=true;
- } else if ( strcasecmp("StopFile", pcfg->key) == 0 ) {
- text *tmp=char2text(pcfg->value);
- if ( stoploaded ) {
+ affloaded = true;
+ }
+ else if (strcasecmp("StopFile", pcfg->key) == 0)
+ {
+ text *tmp = char2text(pcfg->value);
+
+ if (stoploaded)
+ {
freeDictISpell(d);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("stop words already loaded")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("stop words already loaded")));
}
readstoplist(tmp, &(d->stoplist));
sortstoplist(&(d->stoplist));
pfree(tmp);
- stoploaded=true;
- } else {
+ stoploaded = true;
+ }
+ else
+ {
freeDictISpell(d);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("unrecognized option: %s => %s",
- pcfg->key, pcfg->value)));
+ pcfg->key, pcfg->value)));
}
pfree(pcfg->key);
pfree(pcfg->value);
@@ -109,15 +130,20 @@ spell_init(PG_FUNCTION_ARGS) {
}
pfree(cfg);
- if ( affloaded && dictloaded ) {
+ if (affloaded && dictloaded)
+ {
SortDictionary(&(d->obj));
SortAffixes(&(d->obj));
- } else if ( !affloaded ) {
+ }
+ else if (!affloaded)
+ {
freeDictISpell(d);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("no affixes")));
- } else {
+ }
+ else
+ {
freeDictISpell(d);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@@ -128,37 +154,43 @@ spell_init(PG_FUNCTION_ARGS) {
}
Datum
-spell_lexize(PG_FUNCTION_ARGS) {
- DictISpell *d = (DictISpell*)PG_GETARG_POINTER(0);
- char *in = (char*)PG_GETARG_POINTER(1);
- char *txt;
- char **res;
- char **ptr, **cptr;
-
- if ( !PG_GETARG_INT32(2) )
+spell_lexize(PG_FUNCTION_ARGS)
+{
+ DictISpell *d = (DictISpell *) PG_GETARG_POINTER(0);
+ char *in = (char *) PG_GETARG_POINTER(1);
+ char *txt;
+ char **res;
+ char **ptr,
+ **cptr;
+
+ if (!PG_GETARG_INT32(2))
PG_RETURN_POINTER(NULL);
- res=palloc(sizeof(char*)*2);
+ res = palloc(sizeof(char *) * 2);
txt = pnstrdup(in, PG_GETARG_INT32(2));
- res=NormalizeWord(&(d->obj), txt);
+ res = NormalizeWord(&(d->obj), txt);
pfree(txt);
- if ( res==NULL )
+ if (res == NULL)
PG_RETURN_POINTER(NULL);
- ptr=cptr=res;
- while(*ptr) {
- if ( searchstoplist(&(d->stoplist),*ptr) ) {
+ ptr = cptr = res;
+ while (*ptr)
+ {
+ if (searchstoplist(&(d->stoplist), *ptr))
+ {
pfree(*ptr);
- *ptr=NULL;
+ *ptr = NULL;
+ ptr++;
+ }
+ else
+ {
+ *cptr = *ptr;
+ cptr++;
ptr++;
- } else {
- *cptr=*ptr;
- cptr++; ptr++;
}
}
- *cptr=NULL;
+ *cptr = NULL;
PG_RETURN_POINTER(res);
}
-
diff --git a/contrib/tsearch2/dict_snowball.c b/contrib/tsearch2/dict_snowball.c
index 103d87d7f99..51dba044499 100644
--- a/contrib/tsearch2/dict_snowball.c
+++ b/contrib/tsearch2/dict_snowball.c
@@ -1,6 +1,6 @@
-/*
+/*
* example of Snowball dictionary
- * http://snowball.tartarus.org/
+ * http://snowball.tartarus.org/
* Teodor Sigaev <teodor@sigaev.ru>
*/
#include <stdlib.h>
@@ -14,103 +14,118 @@
#include "snowball/english_stem.h"
#include "snowball/russian_stem.h"
-typedef struct {
+typedef struct
+{
struct SN_env *z;
StopList stoplist;
- int (*stem)(struct SN_env * z);
-} DictSnowball;
+ int (*stem) (struct SN_env * z);
+} DictSnowball;
PG_FUNCTION_INFO_V1(snb_en_init);
-Datum snb_en_init(PG_FUNCTION_ARGS);
+Datum snb_en_init(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(snb_ru_init);
-Datum snb_ru_init(PG_FUNCTION_ARGS);
+Datum snb_ru_init(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(snb_lexize);
-Datum snb_lexize(PG_FUNCTION_ARGS);
+Datum snb_lexize(PG_FUNCTION_ARGS);
-Datum
-snb_en_init(PG_FUNCTION_ARGS) {
- DictSnowball *d = (DictSnowball*)malloc( sizeof(DictSnowball) );
+Datum
+snb_en_init(PG_FUNCTION_ARGS)
+{
+ DictSnowball *d = (DictSnowball *) malloc(sizeof(DictSnowball));
- if ( !d )
+ if (!d)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- memset(d,0,sizeof(DictSnowball));
- d->stoplist.wordop=lowerstr;
-
- if ( !PG_ARGISNULL(0) && PG_GETARG_POINTER(0)!=NULL ) {
- text *in = PG_GETARG_TEXT_P(0);
+ memset(d, 0, sizeof(DictSnowball));
+ d->stoplist.wordop = lowerstr;
+
+ if (!PG_ARGISNULL(0) && PG_GETARG_POINTER(0) != NULL)
+ {
+ text *in = PG_GETARG_TEXT_P(0);
+
readstoplist(in, &(d->stoplist));
sortstoplist(&(d->stoplist));
PG_FREE_IF_COPY(in, 0);
}
d->z = english_create_env();
- if (!d->z) {
+ if (!d->z)
+ {
freestoplist(&(d->stoplist));
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- d->stem=english_stem;
+ d->stem = english_stem;
PG_RETURN_POINTER(d);
}
-Datum
-snb_ru_init(PG_FUNCTION_ARGS) {
- DictSnowball *d = (DictSnowball*)malloc( sizeof(DictSnowball) );
+Datum
+snb_ru_init(PG_FUNCTION_ARGS)
+{
+ DictSnowball *d = (DictSnowball *) malloc(sizeof(DictSnowball));
- if ( !d )
+ if (!d)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- memset(d,0,sizeof(DictSnowball));
- d->stoplist.wordop=lowerstr;
-
- if ( !PG_ARGISNULL(0) && PG_GETARG_POINTER(0)!=NULL ) {
- text *in = PG_GETARG_TEXT_P(0);
+ memset(d, 0, sizeof(DictSnowball));
+ d->stoplist.wordop = lowerstr;
+
+ if (!PG_ARGISNULL(0) && PG_GETARG_POINTER(0) != NULL)
+ {
+ text *in = PG_GETARG_TEXT_P(0);
+
readstoplist(in, &(d->stoplist));
sortstoplist(&(d->stoplist));
PG_FREE_IF_COPY(in, 0);
}
d->z = russian_create_env();
- if (!d->z) {
+ if (!d->z)
+ {
freestoplist(&(d->stoplist));
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- d->stem=russian_stem;
+ d->stem = russian_stem;
PG_RETURN_POINTER(d);
}
Datum
-snb_lexize(PG_FUNCTION_ARGS) {
- DictSnowball *d = (DictSnowball*)PG_GETARG_POINTER(0);
- char *in = (char*)PG_GETARG_POINTER(1);
- char *txt = pnstrdup(in, PG_GETARG_INT32(2));
- char **res=palloc(sizeof(char*)*2);
-
- if ( *txt=='\0' || searchstoplist(&(d->stoplist),txt) ) {
+snb_lexize(PG_FUNCTION_ARGS)
+{
+ DictSnowball *d = (DictSnowball *) PG_GETARG_POINTER(0);
+ char *in = (char *) PG_GETARG_POINTER(1);
+ char *txt = pnstrdup(in, PG_GETARG_INT32(2));
+ char **res = palloc(sizeof(char *) * 2);
+
+ if (*txt == '\0' || searchstoplist(&(d->stoplist), txt))
+ {
pfree(txt);
- res[0]=NULL;
- } else {
+ res[0] = NULL;
+ }
+ else
+ {
SN_set_current(d->z, strlen(txt), txt);
- (d->stem)(d->z);
- if ( d->z->p && d->z->l ) {
- txt=repalloc(txt, d->z->l+1);
- memcpy( txt, d->z->p, d->z->l);
- txt[d->z->l]='\0';
- }
- res[0]=txt;
+ (d->stem) (d->z);
+ if (d->z->p && d->z->l)
+ {
+ txt = repalloc(txt, d->z->l + 1);
+ memcpy(txt, d->z->p, d->z->l);
+ txt[d->z->l] = '\0';
+ }
+ res[0] = txt;
}
- res[1]=NULL;
+ res[1] = NULL;
PG_RETURN_POINTER(res);
}
-
diff --git a/contrib/tsearch2/dict_syn.c b/contrib/tsearch2/dict_syn.c
index 34f74cf0ff7..8364223be40 100644
--- a/contrib/tsearch2/dict_syn.c
+++ b/contrib/tsearch2/dict_syn.c
@@ -1,4 +1,4 @@
-/*
+/*
* ISpell interface
* Teodor Sigaev <teodor@sigaev.ru>
*/
@@ -13,93 +13,106 @@
#include "common.h"
#define SYNBUFLEN 4096
-typedef struct {
- char *in;
- char *out;
-} Syn;
-
-typedef struct {
- int len;
- Syn *syn;
-} DictSyn;
+typedef struct
+{
+ char *in;
+ char *out;
+} Syn;
+
+typedef struct
+{
+ int len;
+ Syn *syn;
+} DictSyn;
PG_FUNCTION_INFO_V1(syn_init);
-Datum syn_init(PG_FUNCTION_ARGS);
+Datum syn_init(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(syn_lexize);
-Datum syn_lexize(PG_FUNCTION_ARGS);
+Datum syn_lexize(PG_FUNCTION_ARGS);
static char *
-findwrd(char *in, char **end) {
- char *start;
+findwrd(char *in, char **end)
+{
+ char *start;
- *end=NULL;
- while(*in && isspace(*in))
+ *end = NULL;
+ while (*in && isspace(*in))
in++;
- if ( !in )
+ if (!in)
return NULL;
- start=in;
+ start = in;
- while(*in && !isspace(*in))
+ while (*in && !isspace(*in))
in++;
- *end=in;
+ *end = in;
return start;
}
static int
-compareSyn(const void *a, const void *b) {
- return strcmp( ((Syn*)a)->in, ((Syn*)b)->in );
+compareSyn(const void *a, const void *b)
+{
+ return strcmp(((Syn *) a)->in, ((Syn *) b)->in);
}
-Datum
-syn_init(PG_FUNCTION_ARGS) {
- text *in;
- DictSyn *d;
- int cur=0;
- FILE *fin;
- char *filename;
- char buf[SYNBUFLEN];
- char *starti,*starto,*end=NULL;
- int slen;
-
- if ( PG_ARGISNULL(0) || PG_GETARG_POINTER(0)==NULL )
+Datum
+syn_init(PG_FUNCTION_ARGS)
+{
+ text *in;
+ DictSyn *d;
+ int cur = 0;
+ FILE *fin;
+ char *filename;
+ char buf[SYNBUFLEN];
+ char *starti,
+ *starto,
+ *end = NULL;
+ int slen;
+
+ if (PG_ARGISNULL(0) || PG_GETARG_POINTER(0) == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("NULL config")));
in = PG_GETARG_TEXT_P(0);
- if ( VARSIZE(in) - VARHDRSZ == 0 )
+ if (VARSIZE(in) - VARHDRSZ == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("VOID config")));
- filename=text2char(in);
+ filename = text2char(in);
PG_FREE_IF_COPY(in, 0);
- if ( (fin=fopen(filename,"r")) == NULL )
+ if ((fin = fopen(filename, "r")) == NULL)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m",
- filename)));
+ filename)));
- d = (DictSyn*)malloc( sizeof(DictSyn) );
- if ( !d ) {
+ d = (DictSyn *) malloc(sizeof(DictSyn));
+ if (!d)
+ {
fclose(fin);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- memset(d,0,sizeof(DictSyn));
+ memset(d, 0, sizeof(DictSyn));
- while( fgets(buf,SYNBUFLEN,fin) ) {
- slen = strlen(buf)-1;
+ while (fgets(buf, SYNBUFLEN, fin))
+ {
+ slen = strlen(buf) - 1;
buf[slen] = '\0';
- if ( *buf=='\0' ) continue;
- if (cur==d->len) {
- d->len = (d->len) ? 2*d->len : 16;
- d->syn=(Syn*)realloc( d->syn, sizeof(Syn)*d->len );
- if ( !d->syn ) {
+ if (*buf == '\0')
+ continue;
+ if (cur == d->len)
+ {
+ d->len = (d->len) ? 2 * d->len : 16;
+ d->syn = (Syn *) realloc(d->syn, sizeof(Syn) * d->len);
+ if (!d->syn)
+ {
fclose(fin);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
@@ -107,64 +120,66 @@ syn_init(PG_FUNCTION_ARGS) {
}
}
- starti=findwrd(buf,&end);
- if ( !starti )
+ starti = findwrd(buf, &end);
+ if (!starti)
continue;
- *end='\0';
- if ( end >= buf+slen )
+ *end = '\0';
+ if (end >= buf + slen)
continue;
- starto= findwrd(end+1, &end);
- if ( !starto )
+ starto = findwrd(end + 1, &end);
+ if (!starto)
continue;
- *end='\0';
+ *end = '\0';
- d->syn[cur].in=strdup(lowerstr(starti));
- d->syn[cur].out=strdup(lowerstr(starto));
- if ( !(d->syn[cur].in && d->syn[cur].out) ) {
+ d->syn[cur].in = strdup(lowerstr(starti));
+ d->syn[cur].out = strdup(lowerstr(starto));
+ if (!(d->syn[cur].in && d->syn[cur].out))
+ {
fclose(fin);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- cur++;
+ cur++;
}
-
- fclose(fin);
-
- d->len=cur;
- if ( cur>1 )
- qsort(d->syn, d->len, sizeof(Syn), compareSyn);
+
+ fclose(fin);
+
+ d->len = cur;
+ if (cur > 1)
+ qsort(d->syn, d->len, sizeof(Syn), compareSyn);
pfree(filename);
- PG_RETURN_POINTER(d);
+ PG_RETURN_POINTER(d);
}
Datum
-syn_lexize(PG_FUNCTION_ARGS) {
- DictSyn *d = (DictSyn*)PG_GETARG_POINTER(0);
- char *in = (char*)PG_GETARG_POINTER(1);
- Syn key,*found;
- char **res=NULL;
-
- if ( !PG_GETARG_INT32(2) )
+syn_lexize(PG_FUNCTION_ARGS)
+{
+ DictSyn *d = (DictSyn *) PG_GETARG_POINTER(0);
+ char *in = (char *) PG_GETARG_POINTER(1);
+ Syn key,
+ *found;
+ char **res = NULL;
+
+ if (!PG_GETARG_INT32(2))
PG_RETURN_POINTER(NULL);
- key.out=NULL;
- key.in=lowerstr(pnstrdup(in, PG_GETARG_INT32(2)));
+ key.out = NULL;
+ key.in = lowerstr(pnstrdup(in, PG_GETARG_INT32(2)));
- found=(Syn*)bsearch(&key, d->syn, d->len, sizeof(Syn), compareSyn);
+ found = (Syn *) bsearch(&key, d->syn, d->len, sizeof(Syn), compareSyn);
pfree(key.in);
- if ( !found )
+ if (!found)
PG_RETURN_POINTER(NULL);
- res=palloc(sizeof(char*)*2);
+ res = palloc(sizeof(char *) * 2);
- res[0]=pstrdup(found->out);
- res[1]=NULL;
+ res[0] = pstrdup(found->out);
+ res[1] = NULL;
- PG_RETURN_POINTER(res);
+ PG_RETURN_POINTER(res);
}
-
diff --git a/contrib/tsearch2/ispell/spell.c b/contrib/tsearch2/ispell/spell.c
index c5ab2601496..45786cca652 100644
--- a/contrib/tsearch2/ispell/spell.c
+++ b/contrib/tsearch2/ispell/spell.c
@@ -9,518 +9,648 @@
#define MAXNORMLEN 56
-#define STRNCASECMP(x,y) (strncasecmp(x,y,strlen(y)))
+#define STRNCASECMP(x,y) (strncasecmp(x,y,strlen(y)))
-static int cmpspell(const void *s1,const void *s2){
- return(strcmp(((const SPELL*)s1)->word,((const SPELL*)s2)->word));
+static int
+cmpspell(const void *s1, const void *s2)
+{
+ return (strcmp(((const SPELL *) s1)->word, ((const SPELL *) s2)->word));
}
-static void
-strlower( char * str ) {
- unsigned char *ptr = (unsigned char *)str;
- while ( *ptr ) {
- *ptr = tolower( *ptr );
+static void
+strlower(char *str)
+{
+ unsigned char *ptr = (unsigned char *) str;
+
+ while (*ptr)
+ {
+ *ptr = tolower(*ptr);
ptr++;
}
}
/* backward string compaire for suffix tree operations */
-static int
-strbcmp(const char *s1, const char *s2) {
- int l1 = strlen(s1)-1, l2 = strlen(s2)-1;
- while (l1 >= 0 && l2 >= 0) {
- if (s1[l1] < s2[l2]) return -1;
- if (s1[l1] > s2[l2]) return 1;
- l1--; l2--;
+static int
+strbcmp(const char *s1, const char *s2)
+{
+ int l1 = strlen(s1) - 1,
+ l2 = strlen(s2) - 1;
+
+ while (l1 >= 0 && l2 >= 0)
+ {
+ if (s1[l1] < s2[l2])
+ return -1;
+ if (s1[l1] > s2[l2])
+ return 1;
+ l1--;
+ l2--;
}
- if (l1 < l2) return -1;
- if (l1 > l2) return 1;
+ if (l1 < l2)
+ return -1;
+ if (l1 > l2)
+ return 1;
return 0;
}
-static int
-strbncmp(const char *s1, const char *s2, size_t count) {
- int l1 = strlen(s1) - 1, l2 = strlen(s2) - 1, l = count;
- while (l1 >= 0 && l2 >= 0 && l > 0) {
- if (s1[l1] < s2[l2]) return -1;
- if (s1[l1] > s2[l2]) return 1;
+static int
+strbncmp(const char *s1, const char *s2, size_t count)
+{
+ int l1 = strlen(s1) - 1,
+ l2 = strlen(s2) - 1,
+ l = count;
+
+ while (l1 >= 0 && l2 >= 0 && l > 0)
+ {
+ if (s1[l1] < s2[l2])
+ return -1;
+ if (s1[l1] > s2[l2])
+ return 1;
l1--;
l2--;
l--;
}
- if (l == 0) return 0;
- if (l1 < l2) return -1;
- if (l1 > l2) return 1;
+ if (l == 0)
+ return 0;
+ if (l1 < l2)
+ return -1;
+ if (l1 > l2)
+ return 1;
return 0;
}
-static int
-cmpaffix(const void *s1,const void *s2){
- if (((const AFFIX*)s1)->type < ((const AFFIX*)s2)->type) return -1;
- if (((const AFFIX*)s1)->type > ((const AFFIX*)s2)->type) return 1;
- if (((const AFFIX*)s1)->type == 'p')
- return(strcmp(((const AFFIX*)s1)->repl,((const AFFIX*)s2)->repl));
- else
- return(strbcmp(((const AFFIX*)s1)->repl,((const AFFIX*)s2)->repl));
+static int
+cmpaffix(const void *s1, const void *s2)
+{
+ if (((const AFFIX *) s1)->type < ((const AFFIX *) s2)->type)
+ return -1;
+ if (((const AFFIX *) s1)->type > ((const AFFIX *) s2)->type)
+ return 1;
+ if (((const AFFIX *) s1)->type == 'p')
+ return (strcmp(((const AFFIX *) s1)->repl, ((const AFFIX *) s2)->repl));
+ else
+ return (strbcmp(((const AFFIX *) s1)->repl, ((const AFFIX *) s2)->repl));
}
-int
-AddSpell(IspellDict * Conf,const char * word,const char *flag){
- if(Conf->nspell>=Conf->mspell){
- if(Conf->mspell){
- Conf->mspell+=1024*20;
- Conf->Spell=(SPELL *)realloc(Conf->Spell,Conf->mspell*sizeof(SPELL));
- }else{
- Conf->mspell=1024*20;
- Conf->Spell=(SPELL *)malloc(Conf->mspell*sizeof(SPELL));
+int
+AddSpell(IspellDict * Conf, const char *word, const char *flag)
+{
+ if (Conf->nspell >= Conf->mspell)
+ {
+ if (Conf->mspell)
+ {
+ Conf->mspell += 1024 * 20;
+ Conf->Spell = (SPELL *) realloc(Conf->Spell, Conf->mspell * sizeof(SPELL));
+ }
+ else
+ {
+ Conf->mspell = 1024 * 20;
+ Conf->Spell = (SPELL *) malloc(Conf->mspell * sizeof(SPELL));
}
- if ( Conf->Spell == NULL )
+ if (Conf->Spell == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- Conf->Spell[Conf->nspell].word=strdup(word);
- if ( !Conf->Spell[Conf->nspell].word )
+ Conf->Spell[Conf->nspell].word = strdup(word);
+ if (!Conf->Spell[Conf->nspell].word)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- strncpy(Conf->Spell[Conf->nspell].flag,flag,10);
+ strncpy(Conf->Spell[Conf->nspell].flag, flag, 10);
Conf->nspell++;
- return(0);
+ return (0);
}
-int
-ImportDictionary(IspellDict * Conf,const char *filename){
- unsigned char str[BUFSIZ];
- FILE *dict;
+int
+ImportDictionary(IspellDict * Conf, const char *filename)
+{
+ unsigned char str[BUFSIZ];
+ FILE *dict;
- if(!(dict=fopen(filename,"r")))return(1);
- while(fgets(str,sizeof(str),dict)){
+ if (!(dict = fopen(filename, "r")))
+ return (1);
+ while (fgets(str, sizeof(str), dict))
+ {
unsigned char *s;
const unsigned char *flag;
- flag = NULL;
- if((s=strchr(str,'/'))){
- *s=0;
- s++;flag=s;
- while(*s){
- if (((*s>='A')&&(*s<='Z'))||((*s>='a')&&(*s<='z')))
+ flag = NULL;
+ if ((s = strchr(str, '/')))
+ {
+ *s = 0;
+ s++;
+ flag = s;
+ while (*s)
+ {
+ if (((*s >= 'A') && (*s <= 'Z')) || ((*s >= 'a') && (*s <= 'z')))
s++;
- else {
- *s=0;
+ else
+ {
+ *s = 0;
break;
}
}
- }else{
- flag="";
}
+ else
+ flag = "";
strlower(str);
/* Dont load words if first letter is not required */
/* It allows to optimize loading at search time */
- s=str;
- while(*s){
- if(*s=='\r')*s=0;
- if(*s=='\n')*s=0;
+ s = str;
+ while (*s)
+ {
+ if (*s == '\r')
+ *s = 0;
+ if (*s == '\n')
+ *s = 0;
s++;
}
- AddSpell(Conf,str,flag);
+ AddSpell(Conf, str, flag);
}
fclose(dict);
- return(0);
+ return (0);
}
-static SPELL *
-FindWord(IspellDict * Conf, const char *word, int affixflag) {
- int l,c,r,resc,resl,resr, i;
+static SPELL *
+FindWord(IspellDict * Conf, const char *word, int affixflag)
+{
+ int l,
+ c,
+ r,
+ resc,
+ resl,
+ resr,
+ i;
- i = (int)(*word) & 255;
+ i = (int) (*word) & 255;
l = Conf->SpellTree.Left[i];
r = Conf->SpellTree.Right[i];
- if (l == -1) return (NULL);
- while(l<=r){
+ if (l == -1)
+ return (NULL);
+ while (l <= r)
+ {
c = (l + r) >> 1;
resc = strcmp(Conf->Spell[c].word, word);
- if( (resc == 0) &&
- ((affixflag == 0) || (strchr(Conf->Spell[c].flag, affixflag) != NULL)) ) {
- return(&Conf->Spell[c]);
- }
+ if ((resc == 0) &&
+ ((affixflag == 0) || (strchr(Conf->Spell[c].flag, affixflag) != NULL)))
+ return (&Conf->Spell[c]);
resl = strcmp(Conf->Spell[l].word, word);
- if( (resl == 0) &&
- ((affixflag == 0) || (strchr(Conf->Spell[l].flag, affixflag) != NULL)) ) {
- return(&Conf->Spell[l]);
- }
+ if ((resl == 0) &&
+ ((affixflag == 0) || (strchr(Conf->Spell[l].flag, affixflag) != NULL)))
+ return (&Conf->Spell[l]);
resr = strcmp(Conf->Spell[r].word, word);
- if( (resr == 0) &&
- ((affixflag == 0) || (strchr(Conf->Spell[r].flag, affixflag) != NULL)) ) {
- return(&Conf->Spell[r]);
- }
- if(resc < 0){
+ if ((resr == 0) &&
+ ((affixflag == 0) || (strchr(Conf->Spell[r].flag, affixflag) != NULL)))
+ return (&Conf->Spell[r]);
+ if (resc < 0)
+ {
l = c + 1;
r--;
- } else if(resc > 0){
+ }
+ else if (resc > 0)
+ {
r = c - 1;
l++;
- } else {
+ }
+ else
+ {
l++;
r--;
}
}
- return(NULL);
+ return (NULL);
}
-int
-AddAffix(IspellDict * Conf,int flag,const char *mask,const char *find,const char *repl,int type) {
- if(Conf->naffixes>=Conf->maffixes){
- if(Conf->maffixes){
- Conf->maffixes+=16;
- Conf->Affix = (AFFIX*)realloc((void*)Conf->Affix,Conf->maffixes*sizeof(AFFIX));
- }else{
- Conf->maffixes=16;
- Conf->Affix = (AFFIX*)malloc(Conf->maffixes * sizeof(AFFIX));
+int
+AddAffix(IspellDict * Conf, int flag, const char *mask, const char *find, const char *repl, int type)
+{
+ if (Conf->naffixes >= Conf->maffixes)
+ {
+ if (Conf->maffixes)
+ {
+ Conf->maffixes += 16;
+ Conf->Affix = (AFFIX *) realloc((void *) Conf->Affix, Conf->maffixes * sizeof(AFFIX));
+ }
+ else
+ {
+ Conf->maffixes = 16;
+ Conf->Affix = (AFFIX *) malloc(Conf->maffixes * sizeof(AFFIX));
}
- if ( Conf->Affix == NULL )
+ if (Conf->Affix == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- if (type=='s') {
- sprintf(Conf->Affix[Conf->naffixes].mask,"%s$",mask);
- } else {
- sprintf(Conf->Affix[Conf->naffixes].mask,"^%s",mask);
- }
+ if (type == 's')
+ sprintf(Conf->Affix[Conf->naffixes].mask, "%s$", mask);
+ else
+ sprintf(Conf->Affix[Conf->naffixes].mask, "^%s", mask);
Conf->Affix[Conf->naffixes].compile = 1;
- Conf->Affix[Conf->naffixes].flag=flag;
- Conf->Affix[Conf->naffixes].type=type;
-
- strcpy(Conf->Affix[Conf->naffixes].find,find);
- strcpy(Conf->Affix[Conf->naffixes].repl,repl);
- Conf->Affix[Conf->naffixes].replen=strlen(repl);
+ Conf->Affix[Conf->naffixes].flag = flag;
+ Conf->Affix[Conf->naffixes].type = type;
+
+ strcpy(Conf->Affix[Conf->naffixes].find, find);
+ strcpy(Conf->Affix[Conf->naffixes].repl, repl);
+ Conf->Affix[Conf->naffixes].replen = strlen(repl);
Conf->naffixes++;
- return(0);
+ return (0);
}
-static char *
-remove_spaces(char *dist,char *src){
-char *d,*s;
- d=dist;
- s=src;
- while(*s){
- if(*s!=' '&&*s!='-'&&*s!='\t'){
- *d=*s;
+static char *
+remove_spaces(char *dist, char *src)
+{
+ char *d,
+ *s;
+
+ d = dist;
+ s = src;
+ while (*s)
+ {
+ if (*s != ' ' && *s != '-' && *s != '\t')
+ {
+ *d = *s;
d++;
}
s++;
}
- *d=0;
- return(dist);
+ *d = 0;
+ return (dist);
}
-int
-ImportAffixes(IspellDict * Conf,const char *filename){
+int
+ImportAffixes(IspellDict * Conf, const char *filename)
+{
unsigned char str[BUFSIZ];
- unsigned char flag=0;
- unsigned char mask[BUFSIZ]="";
- unsigned char find[BUFSIZ]="";
- unsigned char repl[BUFSIZ]="";
+ unsigned char flag = 0;
+ unsigned char mask[BUFSIZ] = "";
+ unsigned char find[BUFSIZ] = "";
+ unsigned char repl[BUFSIZ] = "";
unsigned char *s;
- int i;
- int suffixes=0;
- int prefixes=0;
- FILE *affix;
-
- if(!(affix=fopen(filename,"r")))
- return(1);
-
- while(fgets(str,sizeof(str),affix)){
- if(!STRNCASECMP(str,"suffixes")){
- suffixes=1;
- prefixes=0;
+ int i;
+ int suffixes = 0;
+ int prefixes = 0;
+ FILE *affix;
+
+ if (!(affix = fopen(filename, "r")))
+ return (1);
+
+ while (fgets(str, sizeof(str), affix))
+ {
+ if (!STRNCASECMP(str, "suffixes"))
+ {
+ suffixes = 1;
+ prefixes = 0;
continue;
}
- if(!STRNCASECMP(str,"prefixes")){
- suffixes=0;
- prefixes=1;
+ if (!STRNCASECMP(str, "prefixes"))
+ {
+ suffixes = 0;
+ prefixes = 1;
continue;
}
- if(!STRNCASECMP(str,"flag ")){
- s=str+5;
- while(strchr("* ",*s))
+ if (!STRNCASECMP(str, "flag "))
+ {
+ s = str + 5;
+ while (strchr("* ", *s))
s++;
- flag=*s;
+ flag = *s;
continue;
}
- if((!suffixes)&&(!prefixes))continue;
- if((s=strchr(str,'#')))*s=0;
- if(!*str)continue;
+ if ((!suffixes) && (!prefixes))
+ continue;
+ if ((s = strchr(str, '#')))
+ *s = 0;
+ if (!*str)
+ continue;
strlower(str);
- strcpy(mask,"");
- strcpy(find,"");
- strcpy(repl,"");
- i=sscanf(str,"%[^>\n]>%[^,\n],%[^\n]",mask,find,repl);
- remove_spaces(str,repl);strcpy(repl,str);
- remove_spaces(str,find);strcpy(find,str);
- remove_spaces(str,mask);strcpy(mask,str);
- switch(i){
+ strcpy(mask, "");
+ strcpy(find, "");
+ strcpy(repl, "");
+ i = sscanf(str, "%[^>\n]>%[^,\n],%[^\n]", mask, find, repl);
+ remove_spaces(str, repl);
+ strcpy(repl, str);
+ remove_spaces(str, find);
+ strcpy(find, str);
+ remove_spaces(str, mask);
+ strcpy(mask, str);
+ switch (i)
+ {
case 3:
break;
case 2:
- if(*find != '\0'){
- strcpy(repl,find);
- strcpy(find,"");
+ if (*find != '\0')
+ {
+ strcpy(repl, find);
+ strcpy(find, "");
}
break;
default:
continue;
}
-
- AddAffix(Conf,(int)flag,mask,find,repl,suffixes?'s':'p');
-
+
+ AddAffix(Conf, (int) flag, mask, find, repl, suffixes ? 's' : 'p');
+
}
fclose(affix);
-
- return(0);
+
+ return (0);
}
-void
-SortDictionary(IspellDict * Conf){
- int CurLet = -1, Let;size_t i;
+void
+SortDictionary(IspellDict * Conf)
+{
+ int CurLet = -1,
+ Let;
+ size_t i;
- qsort((void*)Conf->Spell,Conf->nspell,sizeof(SPELL),cmpspell);
+ qsort((void *) Conf->Spell, Conf->nspell, sizeof(SPELL), cmpspell);
- for(i = 0; i < 256 ; i++ )
+ for (i = 0; i < 256; i++)
Conf->SpellTree.Left[i] = -1;
- for(i = 0; i < Conf->nspell; i++) {
- Let = (int)(*(Conf->Spell[i].word)) & 255;
- if (CurLet != Let) {
- Conf->SpellTree.Left[Let] = i;
- CurLet = Let;
- }
- Conf->SpellTree.Right[Let] = i;
+ for (i = 0; i < Conf->nspell; i++)
+ {
+ Let = (int) (*(Conf->Spell[i].word)) & 255;
+ if (CurLet != Let)
+ {
+ Conf->SpellTree.Left[Let] = i;
+ CurLet = Let;
+ }
+ Conf->SpellTree.Right[Let] = i;
}
}
-void
-SortAffixes(IspellDict * Conf) {
- int CurLetP = -1, CurLetS = -1, Let;
- AFFIX *Affix; size_t i;
-
- if (Conf->naffixes > 1)
- qsort((void*)Conf->Affix,Conf->naffixes,sizeof(AFFIX),cmpaffix);
- for(i = 0; i < 256; i++) {
- Conf->PrefixTree.Left[i] = Conf->PrefixTree.Right[i] = -1;
- Conf->SuffixTree.Left[i] = Conf->SuffixTree.Right[i] = -1;
- }
-
- for(i = 0; i < Conf->naffixes; i++) {
- Affix = &(((AFFIX*)Conf->Affix)[i]);
- if(Affix->type == 'p') {
- Let = (int)(*(Affix->repl)) & 255;
- if (CurLetP != Let) {
- Conf->PrefixTree.Left[Let] = i;
- CurLetP = Let;
- }
- Conf->PrefixTree.Right[Let] = i;
- } else {
- Let = (Affix->replen) ? (int)(Affix->repl[Affix->replen-1]) & 255 : 0;
- if (CurLetS != Let) {
- Conf->SuffixTree.Left[Let] = i;
- CurLetS = Let;
- }
- Conf->SuffixTree.Right[Let] = i;
- }
- }
+void
+SortAffixes(IspellDict * Conf)
+{
+ int CurLetP = -1,
+ CurLetS = -1,
+ Let;
+ AFFIX *Affix;
+ size_t i;
+
+ if (Conf->naffixes > 1)
+ qsort((void *) Conf->Affix, Conf->naffixes, sizeof(AFFIX), cmpaffix);
+ for (i = 0; i < 256; i++)
+ {
+ Conf->PrefixTree.Left[i] = Conf->PrefixTree.Right[i] = -1;
+ Conf->SuffixTree.Left[i] = Conf->SuffixTree.Right[i] = -1;
+ }
+
+ for (i = 0; i < Conf->naffixes; i++)
+ {
+ Affix = &(((AFFIX *) Conf->Affix)[i]);
+ if (Affix->type == 'p')
+ {
+ Let = (int) (*(Affix->repl)) & 255;
+ if (CurLetP != Let)
+ {
+ Conf->PrefixTree.Left[Let] = i;
+ CurLetP = Let;
+ }
+ Conf->PrefixTree.Right[Let] = i;
+ }
+ else
+ {
+ Let = (Affix->replen) ? (int) (Affix->repl[Affix->replen - 1]) & 255 : 0;
+ if (CurLetS != Let)
+ {
+ Conf->SuffixTree.Left[Let] = i;
+ CurLetS = Let;
+ }
+ Conf->SuffixTree.Right[Let] = i;
+ }
+ }
}
-static char *
-CheckSuffix(const char *word, size_t len, AFFIX *Affix, int *res, IspellDict *Conf) {
- regmatch_t subs[2]; /* workaround for apache&linux */
- char newword[2*MAXNORMLEN] = "";
- int err;
-
- *res = strbncmp(word, Affix->repl, Affix->replen);
- if (*res < 0) {
- return NULL;
- }
- if (*res > 0) {
- return NULL;
- }
- strcpy(newword, word);
- strcpy(newword+len-Affix->replen, Affix->find);
-
- if (Affix->compile) {
- err = regcomp(&(Affix->reg),Affix->mask,REG_EXTENDED|REG_ICASE|REG_NOSUB);
- if(err){
- /*regerror(err, &(Affix->reg), regerrstr, ERRSTRSIZE);*/
- regfree(&(Affix->reg));
- return(NULL);
- }
- Affix->compile = 0;
- }
- if(!(err=regexec(&(Affix->reg),newword,1,subs,0))){
- if(FindWord(Conf, newword, Affix->flag))
- return pstrdup(newword);
- }
- return NULL;
+static char *
+CheckSuffix(const char *word, size_t len, AFFIX * Affix, int *res, IspellDict * Conf)
+{
+ regmatch_t subs[2]; /* workaround for apache&linux */
+ char newword[2 * MAXNORMLEN] = "";
+ int err;
+
+ *res = strbncmp(word, Affix->repl, Affix->replen);
+ if (*res < 0)
+ return NULL;
+ if (*res > 0)
+ return NULL;
+ strcpy(newword, word);
+ strcpy(newword + len - Affix->replen, Affix->find);
+
+ if (Affix->compile)
+ {
+ err = regcomp(&(Affix->reg), Affix->mask, REG_EXTENDED | REG_ICASE | REG_NOSUB);
+ if (err)
+ {
+ /* regerror(err, &(Affix->reg), regerrstr, ERRSTRSIZE); */
+ regfree(&(Affix->reg));
+ return (NULL);
+ }
+ Affix->compile = 0;
+ }
+ if (!(err = regexec(&(Affix->reg), newword, 1, subs, 0)))
+ {
+ if (FindWord(Conf, newword, Affix->flag))
+ return pstrdup(newword);
+ }
+ return NULL;
}
#define NS 1
#define MAX_NORM 512
-static int
-CheckPrefix(const char *word, size_t len, AFFIX *Affix, IspellDict *Conf, int pi,
- char **forms, char ***cur ) {
- regmatch_t subs[NS*2];
- char newword[2*MAXNORMLEN] = "";
- int err, ls, res, lres;
- size_t newlen;
- AFFIX *CAffix = Conf->Affix;
-
- res = strncmp(word, Affix->repl, Affix->replen);
- if (res != 0) {
- return res;
- }
- strcpy(newword, Affix->find);
- strcat(newword, word+Affix->replen);
-
- if (Affix->compile) {
- err = regcomp(&(Affix->reg),Affix->mask,REG_EXTENDED|REG_ICASE|REG_NOSUB);
- if(err){
- /*regerror(err, &(Affix->reg), regerrstr, ERRSTRSIZE);*/
- regfree(&(Affix->reg));
- return (0);
- }
- Affix->compile = 0;
- }
- if(!(err=regexec(&(Affix->reg),newword,1,subs,0))){
- SPELL * curspell;
-
- if((curspell=FindWord(Conf, newword, Affix->flag))){
- if ((*cur - forms) < (MAX_NORM-1)) {
- **cur = pstrdup(newword);
- (*cur)++; **cur = NULL;
- }
- }
- newlen = strlen(newword);
- ls = Conf->SuffixTree.Left[pi];
- if ( ls>=0 && ((*cur - forms) < (MAX_NORM-1)) ) {
- **cur = CheckSuffix(newword, newlen, &CAffix[ls], &lres, Conf);
- if (**cur) {
- (*cur)++; **cur = NULL;
+static int
+CheckPrefix(const char *word, size_t len, AFFIX * Affix, IspellDict * Conf, int pi,
+ char **forms, char ***cur)
+{
+ regmatch_t subs[NS * 2];
+ char newword[2 * MAXNORMLEN] = "";
+ int err,
+ ls,
+ res,
+ lres;
+ size_t newlen;
+ AFFIX *CAffix = Conf->Affix;
+
+ res = strncmp(word, Affix->repl, Affix->replen);
+ if (res != 0)
+ return res;
+ strcpy(newword, Affix->find);
+ strcat(newword, word + Affix->replen);
+
+ if (Affix->compile)
+ {
+ err = regcomp(&(Affix->reg), Affix->mask, REG_EXTENDED | REG_ICASE | REG_NOSUB);
+ if (err)
+ {
+ /* regerror(err, &(Affix->reg), regerrstr, ERRSTRSIZE); */
+ regfree(&(Affix->reg));
+ return (0);
+ }
+ Affix->compile = 0;
}
- }
- }
- return 0;
+ if (!(err = regexec(&(Affix->reg), newword, 1, subs, 0)))
+ {
+ SPELL *curspell;
+
+ if ((curspell = FindWord(Conf, newword, Affix->flag)))
+ {
+ if ((*cur - forms) < (MAX_NORM - 1))
+ {
+ **cur = pstrdup(newword);
+ (*cur)++;
+ **cur = NULL;
+ }
+ }
+ newlen = strlen(newword);
+ ls = Conf->SuffixTree.Left[pi];
+ if (ls >= 0 && ((*cur - forms) < (MAX_NORM - 1)))
+ {
+ **cur = CheckSuffix(newword, newlen, &CAffix[ls], &lres, Conf);
+ if (**cur)
+ {
+ (*cur)++;
+ **cur = NULL;
+ }
+ }
+ }
+ return 0;
}
-char **
-NormalizeWord(IspellDict * Conf,char *word){
+char **
+NormalizeWord(IspellDict * Conf, char *word)
+{
/*regmatch_t subs[NS];*/
-size_t len;
-char ** forms;
-char **cur;
-AFFIX * Affix;
-int ri, pi, ipi, lp, rp, cp, ls, rs;
-int lres, rres, cres = 0;
- SPELL *spell;
-
- len=strlen(word);
+ size_t len;
+ char **forms;
+ char **cur;
+ AFFIX *Affix;
+ int ri,
+ pi,
+ ipi,
+ lp,
+ rp,
+ cp,
+ ls,
+ rs;
+ int lres,
+ rres,
+ cres = 0;
+ SPELL *spell;
+
+ len = strlen(word);
if (len > MAXNORMLEN)
- return(NULL);
+ return (NULL);
strlower(word);
- forms=(char **) palloc(MAX_NORM*sizeof(char **));
- cur=forms;*cur=NULL;
+ forms = (char **) palloc(MAX_NORM * sizeof(char **));
+ cur = forms;
+ *cur = NULL;
- ri = (int)(*word) & 255;
- pi = (int)(word[strlen(word)-1]) & 255;
- Affix=(AFFIX*)Conf->Affix;
+ ri = (int) (*word) & 255;
+ pi = (int) (word[strlen(word) - 1]) & 255;
+ Affix = (AFFIX *) Conf->Affix;
/* Check that the word itself is normal form */
- if((spell = FindWord(Conf, word, 0))){
- *cur=pstrdup(word);
- cur++;*cur=NULL;
+ if ((spell = FindWord(Conf, word, 0)))
+ {
+ *cur = pstrdup(word);
+ cur++;
+ *cur = NULL;
}
/* Find all other NORMAL forms of the 'word' */
- for (ipi = 0; ipi <= pi; ipi += pi) {
-
- /* check prefix */
- lp = Conf->PrefixTree.Left[ri];
- rp = Conf->PrefixTree.Right[ri];
- while (lp >= 0 && lp <= rp) {
- cp = (lp + rp) >> 1;
- cres = 0;
- if ((cur - forms) < (MAX_NORM-1)) {
- cres = CheckPrefix(word, len, &Affix[cp], Conf, ipi, forms, &cur);
- }
- if ((lp < cp) && ((cur - forms) < (MAX_NORM-1)) ) {
- lres = CheckPrefix(word, len, &Affix[lp], Conf, ipi, forms, &cur);
- }
- if ( (rp > cp) && ((cur - forms) < (MAX_NORM-1)) ) {
- rres = CheckPrefix(word, len, &Affix[rp], Conf, ipi, forms, &cur);
- }
- if (cres < 0) {
- rp = cp - 1;
- lp++;
- } else if (cres > 0) {
- lp = cp + 1;
- rp--;
- } else {
- lp++;
- rp--;
- }
- }
-
- /* check suffix */
- ls = Conf->SuffixTree.Left[ipi];
- rs = Conf->SuffixTree.Right[ipi];
- while (ls >= 0 && ls <= rs) {
- if ( ((cur - forms) < (MAX_NORM-1)) ) {
- *cur = CheckSuffix(word, len, &Affix[ls], &lres, Conf);
- if (*cur) {
- cur++; *cur = NULL;
- }
- }
- if ( (rs > ls) && ((cur - forms) < (MAX_NORM-1)) ) {
- *cur = CheckSuffix(word, len, &Affix[rs], &rres, Conf);
- if (*cur) {
- cur++; *cur = NULL;
+ for (ipi = 0; ipi <= pi; ipi += pi)
+ {
+
+ /* check prefix */
+ lp = Conf->PrefixTree.Left[ri];
+ rp = Conf->PrefixTree.Right[ri];
+ while (lp >= 0 && lp <= rp)
+ {
+ cp = (lp + rp) >> 1;
+ cres = 0;
+ if ((cur - forms) < (MAX_NORM - 1))
+ cres = CheckPrefix(word, len, &Affix[cp], Conf, ipi, forms, &cur);
+ if ((lp < cp) && ((cur - forms) < (MAX_NORM - 1)))
+ lres = CheckPrefix(word, len, &Affix[lp], Conf, ipi, forms, &cur);
+ if ((rp > cp) && ((cur - forms) < (MAX_NORM - 1)))
+ rres = CheckPrefix(word, len, &Affix[rp], Conf, ipi, forms, &cur);
+ if (cres < 0)
+ {
+ rp = cp - 1;
+ lp++;
+ }
+ else if (cres > 0)
+ {
+ lp = cp + 1;
+ rp--;
+ }
+ else
+ {
+ lp++;
+ rp--;
+ }
}
- }
- ls++;
- rs--;
- } /* end while */
-
- } /* for ipi */
-
- if(cur==forms){
+
+ /* check suffix */
+ ls = Conf->SuffixTree.Left[ipi];
+ rs = Conf->SuffixTree.Right[ipi];
+ while (ls >= 0 && ls <= rs)
+ {
+ if (((cur - forms) < (MAX_NORM - 1)))
+ {
+ *cur = CheckSuffix(word, len, &Affix[ls], &lres, Conf);
+ if (*cur)
+ {
+ cur++;
+ *cur = NULL;
+ }
+ }
+ if ((rs > ls) && ((cur - forms) < (MAX_NORM - 1)))
+ {
+ *cur = CheckSuffix(word, len, &Affix[rs], &rres, Conf);
+ if (*cur)
+ {
+ cur++;
+ *cur = NULL;
+ }
+ }
+ ls++;
+ rs--;
+ } /* end while */
+
+ } /* for ipi */
+
+ if (cur == forms)
+ {
pfree(forms);
- return(NULL);
+ return (NULL);
}
- return(forms);
+ return (forms);
}
-void
-FreeIspell (IspellDict *Conf) {
- int i;
- AFFIX *Affix = (AFFIX *)Conf->Affix;
-
- for (i = 0; i < Conf->naffixes; i++) {
- if (Affix[i].compile == 0) {
- regfree(&(Affix[i].reg));
- }
- }
- for (i = 0; i < Conf->naffixes; i++) {
- free( Conf->Spell[i].word );
- }
- free(Conf->Affix);
- free(Conf->Spell);
- memset( (void*)Conf, 0, sizeof(IspellDict) );
- return;
+void
+FreeIspell(IspellDict * Conf)
+{
+ int i;
+ AFFIX *Affix = (AFFIX *) Conf->Affix;
+
+ for (i = 0; i < Conf->naffixes; i++)
+ {
+ if (Affix[i].compile == 0)
+ regfree(&(Affix[i].reg));
+ }
+ for (i = 0; i < Conf->naffixes; i++)
+ free(Conf->Spell[i].word);
+ free(Conf->Affix);
+ free(Conf->Spell);
+ memset((void *) Conf, 0, sizeof(IspellDict));
+ return;
}
diff --git a/contrib/tsearch2/ispell/spell.h b/contrib/tsearch2/ispell/spell.h
index 3034ca6709d..baf5052f026 100644
--- a/contrib/tsearch2/ispell/spell.h
+++ b/contrib/tsearch2/ispell/spell.h
@@ -4,48 +4,53 @@
#include <sys/types.h>
#include <regex.h>
-typedef struct spell_struct {
- char * word;
- char flag[10];
-} SPELL;
-
-typedef struct aff_struct {
- char flag;
- char type;
- char mask[33];
- char find[16];
- char repl[16];
- regex_t reg;
- size_t replen;
- char compile;
-} AFFIX;
-
-typedef struct Tree_struct {
- int Left[256], Right[256];
-} Tree_struct;
-
-typedef struct {
- int maffixes;
- int naffixes;
- AFFIX * Affix;
-
- int nspell;
- int mspell;
- SPELL *Spell;
- Tree_struct SpellTree;
- Tree_struct PrefixTree;
- Tree_struct SuffixTree;
-
-} IspellDict;
-
-char ** NormalizeWord(IspellDict * Conf,char *word);
-int ImportAffixes(IspellDict * Conf, const char *filename);
-int ImportDictionary(IspellDict * Conf,const char *filename);
-
-int AddSpell(IspellDict * Conf,const char * word,const char *flag);
-int AddAffix(IspellDict * Conf,int flag,const char *mask,const char *find,const char *repl,int type);
-void SortDictionary(IspellDict * Conf);
-void SortAffixes(IspellDict * Conf);
-void FreeIspell (IspellDict *Conf);
+typedef struct spell_struct
+{
+ char *word;
+ char flag[10];
+} SPELL;
+
+typedef struct aff_struct
+{
+ char flag;
+ char type;
+ char mask[33];
+ char find[16];
+ char repl[16];
+ regex_t reg;
+ size_t replen;
+ char compile;
+} AFFIX;
+
+typedef struct Tree_struct
+{
+ int Left[256],
+ Right[256];
+} Tree_struct;
+
+typedef struct
+{
+ int maffixes;
+ int naffixes;
+ AFFIX *Affix;
+
+ int nspell;
+ int mspell;
+ SPELL *Spell;
+ Tree_struct SpellTree;
+ Tree_struct PrefixTree;
+ Tree_struct SuffixTree;
+
+} IspellDict;
+
+char **NormalizeWord(IspellDict * Conf, char *word);
+int ImportAffixes(IspellDict * Conf, const char *filename);
+int ImportDictionary(IspellDict * Conf, const char *filename);
+
+int AddSpell(IspellDict * Conf, const char *word, const char *flag);
+int AddAffix(IspellDict * Conf, int flag, const char *mask, const char *find, const char *repl, int type);
+void SortDictionary(IspellDict * Conf);
+void SortAffixes(IspellDict * Conf);
+void FreeIspell(IspellDict * Conf);
#endif
diff --git a/contrib/tsearch2/prs_dcfg.c b/contrib/tsearch2/prs_dcfg.c
index 783cf43ceb7..6b553e45256 100644
--- a/contrib/tsearch2/prs_dcfg.c
+++ b/contrib/tsearch2/prs_dcfg.c
@@ -1,5 +1,5 @@
-/*
- * Simple config parser
+/*
+ * Simple config parser
* Teodor Sigaev <teodor@sigaev.ru>
*/
#include <stdlib.h>
@@ -16,126 +16,164 @@
#define CS_WAITEQ 2
#define CS_WAITVALUE 3
#define CS_INVALUE 4
-#define CS_IN2VALUE 5
+#define CS_IN2VALUE 5
#define CS_WAITDELIM 6
#define CS_INESC 7
#define CS_IN2ESC 8
static char *
-nstrdup(char *ptr, int len) {
- char *res=palloc(len+1), *cptr;
- memcpy(res,ptr,len);
- res[len]='\0';
+nstrdup(char *ptr, int len)
+{
+ char *res = palloc(len + 1),
+ *cptr;
+
+ memcpy(res, ptr, len);
+ res[len] = '\0';
cptr = ptr = res;
- while(*ptr) {
- if ( *ptr == '\\' )
+ while (*ptr)
+ {
+ if (*ptr == '\\')
ptr++;
- *cptr=*ptr; ptr++; cptr++;
+ *cptr = *ptr;
+ ptr++;
+ cptr++;
}
- *cptr='\0';
+ *cptr = '\0';
return res;
}
void
-parse_cfgdict(text *in, Map **m) {
- Map *mptr;
- char *ptr=VARDATA(in), *begin=NULL;
- char num=0;
- int state=CS_WAITKEY;
+parse_cfgdict(text *in, Map ** m)
+{
+ Map *mptr;
+ char *ptr = VARDATA(in),
+ *begin = NULL;
+ char num = 0;
+ int state = CS_WAITKEY;
- while( ptr-VARDATA(in) < VARSIZE(in) - VARHDRSZ ) {
- if ( *ptr==',' ) num++;
+ while (ptr - VARDATA(in) < VARSIZE(in) - VARHDRSZ)
+ {
+ if (*ptr == ',')
+ num++;
ptr++;
}
- *m=mptr=(Map*)palloc( sizeof(Map)*(num+2) );
- memset(mptr, 0, sizeof(Map)*(num+2) );
- ptr=VARDATA(in);
- while( ptr-VARDATA(in) < VARSIZE(in) - VARHDRSZ ) {
- if (state==CS_WAITKEY) {
- if (isalpha(*ptr)) {
- begin=ptr;
- state=CS_INKEY;
- } else if ( !isspace(*ptr) )
+ *m = mptr = (Map *) palloc(sizeof(Map) * (num + 2));
+ memset(mptr, 0, sizeof(Map) * (num + 2));
+ ptr = VARDATA(in);
+ while (ptr - VARDATA(in) < VARSIZE(in) - VARHDRSZ)
+ {
+ if (state == CS_WAITKEY)
+ {
+ if (isalpha(*ptr))
+ {
+ begin = ptr;
+ state = CS_INKEY;
+ }
+ else if (!isspace(*ptr))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
- errdetail("Syntax error in position %d near \"%c\"",
- (int) (ptr-VARDATA(in)), *ptr)));
- } else if (state==CS_INKEY) {
- if ( isspace(*ptr) ) {
- mptr->key=nstrdup(begin, ptr-begin);
- state=CS_WAITEQ;
- } else if ( *ptr=='=' ) {
- mptr->key=nstrdup(begin, ptr-begin);
- state=CS_WAITVALUE;
- } else if ( !isalpha(*ptr) )
+ errdetail("Syntax error in position %d near \"%c\"",
+ (int) (ptr - VARDATA(in)), *ptr)));
+ }
+ else if (state == CS_INKEY)
+ {
+ if (isspace(*ptr))
+ {
+ mptr->key = nstrdup(begin, ptr - begin);
+ state = CS_WAITEQ;
+ }
+ else if (*ptr == '=')
+ {
+ mptr->key = nstrdup(begin, ptr - begin);
+ state = CS_WAITVALUE;
+ }
+ else if (!isalpha(*ptr))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
- errdetail("Syntax error in position %d near \"%c\"",
- (int) (ptr-VARDATA(in)), *ptr)));
- } else if ( state==CS_WAITEQ ) {
- if ( *ptr=='=' )
- state=CS_WAITVALUE;
- else if ( !isspace(*ptr) )
+ errdetail("Syntax error in position %d near \"%c\"",
+ (int) (ptr - VARDATA(in)), *ptr)));
+ }
+ else if (state == CS_WAITEQ)
+ {
+ if (*ptr == '=')
+ state = CS_WAITVALUE;
+ else if (!isspace(*ptr))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
- errdetail("Syntax error in position %d near \"%c\"",
- (int) (ptr-VARDATA(in)), *ptr)));
- } else if ( state==CS_WAITVALUE ) {
- if ( *ptr=='"' ) {
- begin=ptr+1;
- state=CS_INVALUE;
- } else if ( !isspace(*ptr) ) {
- begin=ptr;
- state=CS_IN2VALUE;
+ errdetail("Syntax error in position %d near \"%c\"",
+ (int) (ptr - VARDATA(in)), *ptr)));
+ }
+ else if (state == CS_WAITVALUE)
+ {
+ if (*ptr == '"')
+ {
+ begin = ptr + 1;
+ state = CS_INVALUE;
+ }
+ else if (!isspace(*ptr))
+ {
+ begin = ptr;
+ state = CS_IN2VALUE;
}
- } else if ( state==CS_INVALUE ) {
- if ( *ptr=='"' ) {
- mptr->value = nstrdup(begin, ptr-begin);
+ }
+ else if (state == CS_INVALUE)
+ {
+ if (*ptr == '"')
+ {
+ mptr->value = nstrdup(begin, ptr - begin);
mptr++;
- state=CS_WAITDELIM;
- } else if ( *ptr=='\\' )
- state=CS_INESC;
- } else if ( state==CS_IN2VALUE ) {
- if ( isspace(*ptr) || *ptr==',' ) {
- mptr->value = nstrdup(begin, ptr-begin);
+ state = CS_WAITDELIM;
+ }
+ else if (*ptr == '\\')
+ state = CS_INESC;
+ }
+ else if (state == CS_IN2VALUE)
+ {
+ if (isspace(*ptr) || *ptr == ',')
+ {
+ mptr->value = nstrdup(begin, ptr - begin);
mptr++;
- state=( *ptr==',' ) ? CS_WAITKEY : CS_WAITDELIM;
- } else if ( *ptr=='\\' )
- state=CS_INESC;
- } else if ( state==CS_WAITDELIM ) {
- if ( *ptr==',' )
- state=CS_WAITKEY;
- else if ( !isspace(*ptr) )
+ state = (*ptr == ',') ? CS_WAITKEY : CS_WAITDELIM;
+ }
+ else if (*ptr == '\\')
+ state = CS_INESC;
+ }
+ else if (state == CS_WAITDELIM)
+ {
+ if (*ptr == ',')
+ state = CS_WAITKEY;
+ else if (!isspace(*ptr))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
- errdetail("Syntax error in position %d near \"%c\"",
- (int) (ptr-VARDATA(in)), *ptr)));
- } else if ( state == CS_INESC ) {
- state=CS_INVALUE;
- } else if ( state == CS_IN2ESC ) {
- state=CS_IN2VALUE;
- } else
+ errdetail("Syntax error in position %d near \"%c\"",
+ (int) (ptr - VARDATA(in)), *ptr)));
+ }
+ else if (state == CS_INESC)
+ state = CS_INVALUE;
+ else if (state == CS_IN2ESC)
+ state = CS_IN2VALUE;
+ else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("bad parser state"),
errdetail("%d at position %d near \"%c\"",
- state, (int) (ptr-VARDATA(in)), *ptr)));
+ state, (int) (ptr - VARDATA(in)), *ptr)));
ptr++;
}
- if (state==CS_IN2VALUE) {
- mptr->value = nstrdup(begin, ptr-begin);
+ if (state == CS_IN2VALUE)
+ {
+ mptr->value = nstrdup(begin, ptr - begin);
mptr++;
- } else if ( !(state==CS_WAITDELIM || state==CS_WAITKEY) )
+ }
+ else if (!(state == CS_WAITDELIM || state == CS_WAITKEY))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("unexpected end of line")));
}
-
-
diff --git a/contrib/tsearch2/query.c b/contrib/tsearch2/query.c
index 14564c62895..0019b04f166 100644
--- a/contrib/tsearch2/query.c
+++ b/contrib/tsearch2/query.c
@@ -99,28 +99,40 @@ typedef struct
TI_IN_STATE valstate;
/* tscfg */
- int cfg_id;
+ int cfg_id;
} QPRS_STATE;
-static char*
-get_weight(char *buf, int2 *weight) {
+static char *
+get_weight(char *buf, int2 *weight)
+{
*weight = 0;
- if ( *buf != ':' )
+ if (*buf != ':')
return buf;
buf++;
- while( *buf ) {
- switch(tolower(*buf)) {
- case 'a': *weight |= 1<<3; break;
- case 'b': *weight |= 1<<2; break;
- case 'c': *weight |= 1<<1; break;
- case 'd': *weight |= 1; break;
- default: return buf;
+ while (*buf)
+ {
+ switch (tolower(*buf))
+ {
+ case 'a':
+ *weight |= 1 << 3;
+ break;
+ case 'b':
+ *weight |= 1 << 2;
+ break;
+ case 'c':
+ *weight |= 1 << 1;
+ break;
+ case 'd':
+ *weight |= 1;
+ break;
+ default:
+ return buf;
}
buf++;
}
-
+
return buf;
}
@@ -146,11 +158,15 @@ gettoken_query(QPRS_STATE * state, int4 *val, int4 *lenval, char **strval, int2
state->count++;
(state->buf)++;
return OPEN;
- } else if ( *(state->buf) == ':' ) {
+ }
+ else if (*(state->buf) == ':')
+ {
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("error at start of operand")));
- } else if (*(state->buf) != ' ') {
+ }
+ else if (*(state->buf) != ' ')
+ {
state->valstate.prsbuf = state->buf;
state->state = WAITOPERATOR;
if (gettoken_tsvector(&(state->valstate)))
@@ -257,7 +273,7 @@ static void
pushval_morph(QPRS_STATE * state, int typeval, char *strval, int lenval, int2 weight)
{
int4 count = 0;
- PRSTEXT prs;
+ PRSTEXT prs;
prs.lenwords = 32;
prs.curwords = 0;
@@ -266,16 +282,17 @@ pushval_morph(QPRS_STATE * state, int typeval, char *strval, int lenval, int2 we
parsetext_v2(findcfg(state->cfg_id), &prs, strval, lenval);
- for(count=0;count<prs.curwords;count++) {
+ for (count = 0; count < prs.curwords; count++)
+ {
pushval_asis(state, VAL, prs.words[count].word, prs.words[count].len, weight);
- pfree( prs.words[count].word );
+ pfree(prs.words[count].word);
if (count)
- pushquery(state, OPR, (int4) '&', 0, 0, 0 );
- }
+ pushquery(state, OPR, (int4) '&', 0, 0, 0);
+ }
pfree(prs.words);
/* XXX */
- if ( prs.curwords==0 )
+ if (prs.curwords == 0)
pushval_asis(state, VALTRUE, 0, 0, 0);
}
@@ -381,15 +398,18 @@ ValCompare(CHKVAL * chkval, WordEntry * ptr, ITEM * item)
* check weight info
*/
static bool
-checkclass_str(CHKVAL * chkval, WordEntry * val, ITEM * item) {
- WordEntryPos *ptr = (WordEntryPos*) (chkval->values+val->pos+SHORTALIGN(val->len)+sizeof(uint16));
- uint16 len = *( (uint16*) (chkval->values+val->pos+SHORTALIGN(val->len)) );
- while (len--) {
- if ( item->weight & ( 1<<ptr->weight ) )
+checkclass_str(CHKVAL * chkval, WordEntry * val, ITEM * item)
+{
+ WordEntryPos *ptr = (WordEntryPos *) (chkval->values + val->pos + SHORTALIGN(val->len) + sizeof(uint16));
+ uint16 len = *((uint16 *) (chkval->values + val->pos + SHORTALIGN(val->len)));
+
+ while (len--)
+ {
+ if (item->weight & (1 << ptr->weight))
return true;
ptr++;
}
- return false;
+ return false;
}
/*
@@ -410,8 +430,8 @@ checkcondition_str(void *checkval, ITEM * val)
StopMiddle = StopLow + (StopHigh - StopLow) / 2;
difference = ValCompare((CHKVAL *) checkval, StopMiddle, val);
if (difference == 0)
- return ( val->weight && StopMiddle->haspos ) ?
- checkclass_str((CHKVAL *) checkval,StopMiddle, val) : true;
+ return (val->weight && StopMiddle->haspos) ?
+ checkclass_str((CHKVAL *) checkval, StopMiddle, val) : true;
else if (difference < 0)
StopLow = StopMiddle + 1;
else
@@ -468,7 +488,7 @@ rexectsq(PG_FUNCTION_ARGS)
Datum
exectsq(PG_FUNCTION_ARGS)
{
- tsvector *val = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));
+ tsvector *val = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
CHKVAL chkval;
bool result;
@@ -485,10 +505,10 @@ exectsq(PG_FUNCTION_ARGS)
chkval.values = STRPTR(val);
chkval.operand = GETOPERAND(query);
result = TS_execute(
- GETQUERY(query),
- &chkval,
- true,
- checkcondition_str
+ GETQUERY(query),
+ &chkval,
+ true,
+ checkcondition_str
);
PG_FREE_IF_COPY(val, 0);
@@ -534,7 +554,7 @@ findoprnd(ITEM * ptr, int4 *pos)
* input
*/
static QUERYTYPE *
-queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int cfg_id)
+ queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int cfg_id)
{
QPRS_STATE state;
int4 i;
@@ -555,7 +575,7 @@ queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int c
state.count = 0;
state.num = 0;
state.str = NULL;
- state.cfg_id=cfg_id;
+ state.cfg_id = cfg_id;
/* init value parser's state */
state.valstate.oprisdelim = true;
@@ -678,12 +698,30 @@ infix(INFIX * in, bool first)
}
*(in->cur) = '\'';
in->cur++;
- if ( in->curpol->weight ) {
- *(in->cur) = ':'; in->cur++;
- if ( in->curpol->weight & (1<<3) ) { *(in->cur) = 'A'; in->cur++; }
- if ( in->curpol->weight & (1<<2) ) { *(in->cur) = 'B'; in->cur++; }
- if ( in->curpol->weight & (1<<1) ) { *(in->cur) = 'C'; in->cur++; }
- if ( in->curpol->weight & 1 ) { *(in->cur) = 'D'; in->cur++; }
+ if (in->curpol->weight)
+ {
+ *(in->cur) = ':';
+ in->cur++;
+ if (in->curpol->weight & (1 << 3))
+ {
+ *(in->cur) = 'A';
+ in->cur++;
+ }
+ if (in->curpol->weight & (1 << 2))
+ {
+ *(in->cur) = 'B';
+ in->cur++;
+ }
+ if (in->curpol->weight & (1 << 1))
+ {
+ *(in->cur) = 'C';
+ in->cur++;
+ }
+ if (in->curpol->weight & 1)
+ {
+ *(in->cur) = 'D';
+ in->cur++;
+ }
}
*(in->cur) = '\0';
in->curpol++;
@@ -827,15 +865,16 @@ tsquerytree(PG_FUNCTION_ARGS)
}
Datum
-to_tsquery(PG_FUNCTION_ARGS) {
- text *in = PG_GETARG_TEXT_P(1);
- char *str;
+to_tsquery(PG_FUNCTION_ARGS)
+{
+ text *in = PG_GETARG_TEXT_P(1);
+ char *str;
QUERYTYPE *query;
ITEM *res;
int4 len;
- str=text2char(in);
- PG_FREE_IF_COPY(in,1);
+ str = text2char(in);
+ PG_FREE_IF_COPY(in, 1);
query = queryin(str, pushval_morph, PG_GETARG_INT32(0));
res = clean_fakeval_v2(GETQUERY(query), &len);
@@ -851,25 +890,25 @@ to_tsquery(PG_FUNCTION_ARGS) {
}
Datum
-to_tsquery_name(PG_FUNCTION_ARGS) {
- text *name=PG_GETARG_TEXT_P(0);
- Datum res= DirectFunctionCall2(
- to_tsquery,
- Int32GetDatum( name2id_cfg(name) ),
- PG_GETARG_DATUM(1)
+to_tsquery_name(PG_FUNCTION_ARGS)
+{
+ text *name = PG_GETARG_TEXT_P(0);
+ Datum res = DirectFunctionCall2(
+ to_tsquery,
+ Int32GetDatum(name2id_cfg(name)),
+ PG_GETARG_DATUM(1)
);
-
- PG_FREE_IF_COPY(name,1);
+
+ PG_FREE_IF_COPY(name, 1);
PG_RETURN_DATUM(res);
}
Datum
-to_tsquery_current(PG_FUNCTION_ARGS) {
- PG_RETURN_DATUM( DirectFunctionCall2(
- to_tsquery,
- Int32GetDatum( get_currcfg() ),
- PG_GETARG_DATUM(0)
- ));
+to_tsquery_current(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_DATUM(DirectFunctionCall2(
+ to_tsquery,
+ Int32GetDatum(get_currcfg()),
+ PG_GETARG_DATUM(0)
+ ));
}
-
-
diff --git a/contrib/tsearch2/query.h b/contrib/tsearch2/query.h
index c0715a2a037..4a79efdc539 100644
--- a/contrib/tsearch2/query.h
+++ b/contrib/tsearch2/query.h
@@ -16,10 +16,10 @@ typedef struct ITEM
int2 left;
int4 val;
/* user-friendly value, must correlate with WordEntry */
- uint32
- unused:1,
- length:11,
- distance:20;
+ uint32
+ unused:1,
+ length:11,
+ distance:20;
} ITEM;
/*
@@ -50,6 +50,6 @@ typedef struct
#define VALFALSE 7
bool TS_execute(ITEM * curitem, void *checkval,
- bool calcnot, bool (*chkcond) (void *checkval, ITEM * val));
+ bool calcnot, bool (*chkcond) (void *checkval, ITEM * val));
#endif
diff --git a/contrib/tsearch2/rank.c b/contrib/tsearch2/rank.c
index 0840eb83470..5b62c9810bc 100644
--- a/contrib/tsearch2/rank.c
+++ b/contrib/tsearch2/rank.c
@@ -37,29 +37,35 @@ Datum rank_cd_def(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(get_covers);
Datum get_covers(PG_FUNCTION_ARGS);
-static float weights[]={0.1, 0.2, 0.4, 1.0};
+static float weights[] = {0.1, 0.2, 0.4, 1.0};
#define wpos(wep) ( w[ ((WordEntryPos*)(wep))->weight ] )
-#define DEF_NORM_METHOD 0
+#define DEF_NORM_METHOD 0
/*
* Returns a weight of a word collocation
*/
-static float4 word_distance ( int4 w ) {
- if ( w>100 )
- return 1e-30;
+static float4
+word_distance(int4 w)
+{
+ if (w > 100)
+ return 1e-30;
- return 1.0/(1.005+0.05*exp( ((float4)w)/1.5-2) );
+ return 1.0 / (1.005 + 0.05 * exp(((float4) w) / 1.5 - 2));
}
static int
-cnt_length( tsvector *t ) {
- WordEntry *ptr=ARRPTR(t), *end=(WordEntry*)STRPTR(t);
- int len = 0, clen;
-
- while(ptr < end) {
- if ( (clen=POSDATALEN(t, ptr)) == 0 )
+cnt_length(tsvector * t)
+{
+ WordEntry *ptr = ARRPTR(t),
+ *end = (WordEntry *) STRPTR(t);
+ int len = 0,
+ clen;
+
+ while (ptr < end)
+ {
+ if ((clen = POSDATALEN(t, ptr)) == 0)
len += 1;
else
len += clen;
@@ -70,191 +76,225 @@ cnt_length( tsvector *t ) {
}
static int4
-WordECompareITEM(char *eval, char *qval, WordEntry * ptr, ITEM * item) {
- if (ptr->len == item->length)
- return strncmp(
- eval + ptr->pos,
- qval + item->distance,
- item->length);
-
- return (ptr->len > item->length) ? 1 : -1;
+WordECompareITEM(char *eval, char *qval, WordEntry * ptr, ITEM * item)
+{
+ if (ptr->len == item->length)
+ return strncmp(
+ eval + ptr->pos,
+ qval + item->distance,
+ item->length);
+
+ return (ptr->len > item->length) ? 1 : -1;
}
-static WordEntry*
-find_wordentry(tsvector *t, QUERYTYPE *q, ITEM *item) {
- WordEntry *StopLow = ARRPTR(t);
- WordEntry *StopHigh = (WordEntry*)STRPTR(t);
- WordEntry *StopMiddle;
- int difference;
-
- /* Loop invariant: StopLow <= item < StopHigh */
-
- while (StopLow < StopHigh)
- {
- StopMiddle = StopLow + (StopHigh - StopLow) / 2;
- difference = WordECompareITEM(STRPTR(t), GETOPERAND(q), StopMiddle, item);
- if (difference == 0)
- return StopMiddle;
- else if (difference < 0)
- StopLow = StopMiddle + 1;
- else
- StopHigh = StopMiddle;
- }
-
- return NULL;
+static WordEntry *
+find_wordentry(tsvector * t, QUERYTYPE * q, ITEM * item)
+{
+ WordEntry *StopLow = ARRPTR(t);
+ WordEntry *StopHigh = (WordEntry *) STRPTR(t);
+ WordEntry *StopMiddle;
+ int difference;
+
+ /* Loop invariant: StopLow <= item < StopHigh */
+
+ while (StopLow < StopHigh)
+ {
+ StopMiddle = StopLow + (StopHigh - StopLow) / 2;
+ difference = WordECompareITEM(STRPTR(t), GETOPERAND(q), StopMiddle, item);
+ if (difference == 0)
+ return StopMiddle;
+ else if (difference < 0)
+ StopLow = StopMiddle + 1;
+ else
+ StopHigh = StopMiddle;
+ }
+
+ return NULL;
}
-static WordEntryPos POSNULL[]={
- {0,0},
- {0,MAXENTRYPOS-1}
+static WordEntryPos POSNULL[] = {
+ {0, 0},
+ {0, MAXENTRYPOS - 1}
};
static float
-calc_rank_and(float *w, tsvector *t, QUERYTYPE *q) {
- uint16 **pos=(uint16**)palloc(sizeof(uint16*) * q->size);
- int i,k,l,p;
- WordEntry *entry;
- WordEntryPos *post,*ct;
- int4 dimt,lenct,dist;
- float res=-1.0;
- ITEM *item=GETQUERY(q);
-
- memset(pos,0,sizeof(uint16**) * q->size);
- *(uint16*)POSNULL = lengthof(POSNULL)-1;
-
- for(i=0; i<q->size; i++) {
-
- if ( item[i].type != VAL )
+calc_rank_and(float *w, tsvector * t, QUERYTYPE * q)
+{
+ uint16 **pos = (uint16 **) palloc(sizeof(uint16 *) * q->size);
+ int i,
+ k,
+ l,
+ p;
+ WordEntry *entry;
+ WordEntryPos *post,
+ *ct;
+ int4 dimt,
+ lenct,
+ dist;
+ float res = -1.0;
+ ITEM *item = GETQUERY(q);
+
+ memset(pos, 0, sizeof(uint16 **) * q->size);
+ *(uint16 *) POSNULL = lengthof(POSNULL) - 1;
+
+ for (i = 0; i < q->size; i++)
+ {
+
+ if (item[i].type != VAL)
continue;
- entry=find_wordentry(t,q,&(item[i]));
- if ( !entry )
+ entry = find_wordentry(t, q, &(item[i]));
+ if (!entry)
continue;
- if ( entry->haspos )
- pos[i] = (uint16*)_POSDATAPTR(t,entry);
+ if (entry->haspos)
+ pos[i] = (uint16 *) _POSDATAPTR(t, entry);
else
- pos[i] = (uint16*)POSNULL;
-
-
- dimt = *(uint16*)(pos[i]);
- post = (WordEntryPos*)(pos[i]+1);
- for( k=0; k<i; k++ ) {
- if ( !pos[k] ) continue;
- lenct = *(uint16*)(pos[k]);
- ct = (WordEntryPos*)(pos[k]+1);
- for(l=0; l<dimt; l++) {
- for(p=0; p<lenct; p++) {
- dist = abs( post[l].pos - ct[p].pos );
- if ( dist || (dist==0 && (pos[i]==(uint16*)POSNULL || pos[k]==(uint16*)POSNULL) ) ) {
- float curw;
- if ( !dist ) dist=MAXENTRYPOS;
- curw= sqrt( wpos(&(post[l])) * wpos( &(ct[p]) ) * word_distance(dist) );
- res = ( res < 0 ) ? curw : 1.0 - ( 1.0 - res ) * ( 1.0 - curw );
+ pos[i] = (uint16 *) POSNULL;
+
+
+ dimt = *(uint16 *) (pos[i]);
+ post = (WordEntryPos *) (pos[i] + 1);
+ for (k = 0; k < i; k++)
+ {
+ if (!pos[k])
+ continue;
+ lenct = *(uint16 *) (pos[k]);
+ ct = (WordEntryPos *) (pos[k] + 1);
+ for (l = 0; l < dimt; l++)
+ {
+ for (p = 0; p < lenct; p++)
+ {
+ dist = abs(post[l].pos - ct[p].pos);
+ if (dist || (dist == 0 && (pos[i] == (uint16 *) POSNULL || pos[k] == (uint16 *) POSNULL)))
+ {
+ float curw;
+
+ if (!dist)
+ dist = MAXENTRYPOS;
+ curw = sqrt(wpos(&(post[l])) * wpos(&(ct[p])) * word_distance(dist));
+ res = (res < 0) ? curw : 1.0 - (1.0 - res) * (1.0 - curw);
}
}
}
}
}
pfree(pos);
- return res;
+ return res;
}
static float
-calc_rank_or(float *w, tsvector *t, QUERYTYPE *q) {
- WordEntry *entry;
- WordEntryPos *post;
- int4 dimt,j,i;
- float res=-1.0;
- ITEM *item=GETQUERY(q);
-
- *(uint16*)POSNULL = lengthof(POSNULL)-1;
-
- for(i=0; i<q->size; i++) {
- if ( item[i].type != VAL )
+calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
+{
+ WordEntry *entry;
+ WordEntryPos *post;
+ int4 dimt,
+ j,
+ i;
+ float res = -1.0;
+ ITEM *item = GETQUERY(q);
+
+ *(uint16 *) POSNULL = lengthof(POSNULL) - 1;
+
+ for (i = 0; i < q->size; i++)
+ {
+ if (item[i].type != VAL)
continue;
- entry=find_wordentry(t,q,&(item[i]));
- if ( !entry )
+ entry = find_wordentry(t, q, &(item[i]));
+ if (!entry)
continue;
- if ( entry->haspos ) {
- dimt = POSDATALEN(t,entry);
- post = POSDATAPTR(t,entry);
- } else {
- dimt = *(uint16*)POSNULL;
- post = POSNULL+1;
+ if (entry->haspos)
+ {
+ dimt = POSDATALEN(t, entry);
+ post = POSDATAPTR(t, entry);
+ }
+ else
+ {
+ dimt = *(uint16 *) POSNULL;
+ post = POSNULL + 1;
}
- for(j=0;j<dimt;j++) {
- if ( res < 0 )
- res = wpos( &(post[j]) );
+ for (j = 0; j < dimt; j++)
+ {
+ if (res < 0)
+ res = wpos(&(post[j]));
else
- res = 1.0 - ( 1.0-res ) * ( 1.0-wpos( &(post[j]) ) );
+ res = 1.0 - (1.0 - res) * (1.0 - wpos(&(post[j])));
}
}
return res;
}
static float
-calc_rank(float *w, tsvector *t, QUERYTYPE *q, int4 method) {
- ITEM *item = GETQUERY(q);
- float res=0.0;
+calc_rank(float *w, tsvector * t, QUERYTYPE * q, int4 method)
+{
+ ITEM *item = GETQUERY(q);
+ float res = 0.0;
if (!t->size || !q->size)
return 0.0;
- res = ( item->type != VAL && item->val == (int4) '&' ) ?
- calc_rank_and(w,t,q) : calc_rank_or(w,t,q);
+ res = (item->type != VAL && item->val == (int4) '&') ?
+ calc_rank_and(w, t, q) : calc_rank_or(w, t, q);
- if ( res < 0 )
+ if (res < 0)
res = 1e-20;
- switch(method) {
- case 0: break;
- case 1: res /= log((float)cnt_length(t)); break;
- case 2: res /= (float)cnt_length(t); break;
+ switch (method)
+ {
+ case 0:
+ break;
+ case 1:
+ res /= log((float) cnt_length(t));
+ break;
+ case 2:
+ res /= (float) cnt_length(t);
+ break;
default:
- /* internal error */
- elog(ERROR,"unrecognized normalization method: %d", method);
- }
+ /* internal error */
+ elog(ERROR, "unrecognized normalization method: %d", method);
+ }
return res;
}
Datum
-rank(PG_FUNCTION_ARGS) {
+rank(PG_FUNCTION_ARGS)
+{
ArrayType *win = (ArrayType *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
- tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
+ tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
QUERYTYPE *query = (QUERYTYPE *) PG_DETOAST_DATUM(PG_GETARG_DATUM(2));
- int method=DEF_NORM_METHOD;
- float res=0.0;
- float ws[ lengthof(weights) ];
- int i;
+ int method = DEF_NORM_METHOD;
+ float res = 0.0;
+ float ws[lengthof(weights)];
+ int i;
- if ( ARR_NDIM(win) != 1 )
+ if (ARR_NDIM(win) != 1)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array of weight must be one-dimensional")));
- if ( ARRNELEMS(win) < lengthof(weights) )
+ if (ARRNELEMS(win) < lengthof(weights))
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array of weight is too short")));
- for(i=0;i<lengthof(weights);i++) {
- ws[ i ] = ( ((float4*)ARR_DATA_PTR(win))[i] >= 0 ) ? ((float4*)ARR_DATA_PTR(win))[i] : weights[i];
- if ( ws[ i ] > 1.0 )
+ for (i = 0; i < lengthof(weights); i++)
+ {
+ ws[i] = (((float4 *) ARR_DATA_PTR(win))[i] >= 0) ? ((float4 *) ARR_DATA_PTR(win))[i] : weights[i];
+ if (ws[i] > 1.0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("weight out of range")));
- }
+ }
+
+ if (PG_NARGS() == 4)
+ method = PG_GETARG_INT32(3);
- if ( PG_NARGS() == 4 )
- method=PG_GETARG_INT32(3);
+ res = calc_rank(ws, txt, query, method);
- res=calc_rank(ws, txt, query, method);
-
PG_FREE_IF_COPY(win, 0);
PG_FREE_IF_COPY(txt, 1);
PG_FREE_IF_COPY(query, 2);
@@ -262,108 +302,127 @@ rank(PG_FUNCTION_ARGS) {
}
Datum
-rank_def(PG_FUNCTION_ARGS) {
- tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+rank_def(PG_FUNCTION_ARGS)
+{
+ tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
QUERYTYPE *query = (QUERYTYPE *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
- float res=0.0;
- int method=DEF_NORM_METHOD;
+ float res = 0.0;
+ int method = DEF_NORM_METHOD;
- if ( PG_NARGS() == 3 )
- method=PG_GETARG_INT32(2);
+ if (PG_NARGS() == 3)
+ method = PG_GETARG_INT32(2);
+
+ res = calc_rank(weights, txt, query, method);
- res=calc_rank(weights, txt, query, method);
-
PG_FREE_IF_COPY(txt, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_FLOAT4(res);
}
-typedef struct {
- ITEM *item;
- int32 pos;
-} DocRepresentation;
+typedef struct
+{
+ ITEM *item;
+ int32 pos;
+} DocRepresentation;
static int
-compareDocR(const void *a, const void *b) {
- if ( ((DocRepresentation *) a)->pos == ((DocRepresentation *) b)->pos )
+compareDocR(const void *a, const void *b)
+{
+ if (((DocRepresentation *) a)->pos == ((DocRepresentation *) b)->pos)
return 1;
- return ( ((DocRepresentation *) a)->pos > ((DocRepresentation *) b)->pos ) ? 1 : -1;
+ return (((DocRepresentation *) a)->pos > ((DocRepresentation *) b)->pos) ? 1 : -1;
}
-typedef struct {
+typedef struct
+{
DocRepresentation *doc;
- int len;
+ int len;
} ChkDocR;
static bool
-checkcondition_DR(void *checkval, ITEM *val) {
- DocRepresentation *ptr = ((ChkDocR*)checkval)->doc;
+checkcondition_DR(void *checkval, ITEM * val)
+{
+ DocRepresentation *ptr = ((ChkDocR *) checkval)->doc;
- while( ptr - ((ChkDocR*)checkval)->doc < ((ChkDocR*)checkval)->len ) {
- if ( val == ptr->item )
+ while (ptr - ((ChkDocR *) checkval)->doc < ((ChkDocR *) checkval)->len)
+ {
+ if (val == ptr->item)
return true;
ptr++;
- }
+ }
return false;
}
static bool
-Cover(DocRepresentation *doc, int len, QUERYTYPE *query, int *pos, int *p, int *q) {
- int i;
- DocRepresentation *ptr,*f=(DocRepresentation*)0xffffffff;
- ITEM *item=GETQUERY(query);
- int lastpos=*pos;
- int oldq=*q;
-
- *p=0x7fffffff;
- *q=0;
-
- for(i=0; i<query->size; i++) {
- if ( item->type != VAL ) {
+Cover(DocRepresentation * doc, int len, QUERYTYPE * query, int *pos, int *p, int *q)
+{
+ int i;
+ DocRepresentation *ptr,
+ *f = (DocRepresentation *) 0xffffffff;
+ ITEM *item = GETQUERY(query);
+ int lastpos = *pos;
+ int oldq = *q;
+
+ *p = 0x7fffffff;
+ *q = 0;
+
+ for (i = 0; i < query->size; i++)
+ {
+ if (item->type != VAL)
+ {
item++;
continue;
}
ptr = doc + *pos;
- while(ptr-doc<len) {
- if ( ptr->item == item ) {
- if ( ptr->pos > *q ) {
+ while (ptr - doc < len)
+ {
+ if (ptr->item == item)
+ {
+ if (ptr->pos > *q)
+ {
*q = ptr->pos;
- lastpos= ptr - doc;
- }
+ lastpos = ptr - doc;
+ }
break;
- }
+ }
ptr++;
}
item++;
}
- if (*q==0 )
+ if (*q == 0)
return false;
- if (*q==oldq) { /* already check this pos */
+ if (*q == oldq)
+ { /* already check this pos */
(*pos)++;
- return Cover(doc, len, query, pos,p,q);
- }
+ return Cover(doc, len, query, pos, p, q);
+ }
- item=GETQUERY(query);
- for(i=0; i<query->size; i++) {
- if ( item->type != VAL ) {
+ item = GETQUERY(query);
+ for (i = 0; i < query->size; i++)
+ {
+ if (item->type != VAL)
+ {
item++;
continue;
}
ptr = doc + lastpos;
- while(ptr>=doc+*pos) {
- if ( ptr->item == item ) {
- if ( ptr->pos < *p ) {
+ while (ptr >= doc + *pos)
+ {
+ if (ptr->item == item)
+ {
+ if (ptr->pos < *p)
+ {
*p = ptr->pos;
- f=ptr;
+ f = ptr;
}
break;
}
@@ -371,106 +430,135 @@ Cover(DocRepresentation *doc, int len, QUERYTYPE *query, int *pos, int *p, int *
}
item++;
}
-
- if ( *p<=*q ) {
- ChkDocR ch = { f, (doc + lastpos)-f+1 };
- *pos = f-doc+1;
- if ( TS_execute(GETQUERY(query), &ch, false, checkcondition_DR) ) {
- /*elog(NOTICE,"OP:%d NP:%d P:%d Q:%d", *pos, lastpos, *p, *q);*/
+
+ if (*p <= *q)
+ {
+ ChkDocR ch = {f, (doc + lastpos) - f + 1};
+
+ *pos = f - doc + 1;
+ if (TS_execute(GETQUERY(query), &ch, false, checkcondition_DR))
+ {
+ /*
+ * elog(NOTICE,"OP:%d NP:%d P:%d Q:%d", *pos, lastpos, *p,
+ * *q);
+ */
return true;
- } else
- return Cover(doc, len, query, pos,p,q);
+ }
+ else
+ return Cover(doc, len, query, pos, p, q);
}
-
+
return false;
}
-static DocRepresentation*
-get_docrep(tsvector *txt, QUERYTYPE *query, int *doclen) {
- ITEM *item=GETQUERY(query);
- WordEntry *entry;
- WordEntryPos *post;
- int4 dimt,j,i;
- int len=query->size*4,cur=0;
+static DocRepresentation *
+get_docrep(tsvector * txt, QUERYTYPE * query, int *doclen)
+{
+ ITEM *item = GETQUERY(query);
+ WordEntry *entry;
+ WordEntryPos *post;
+ int4 dimt,
+ j,
+ i;
+ int len = query->size * 4,
+ cur = 0;
DocRepresentation *doc;
- *(uint16*)POSNULL = lengthof(POSNULL)-1;
- doc = (DocRepresentation*)palloc(sizeof(DocRepresentation)*len);
- for(i=0; i<query->size; i++) {
- if ( item[i].type != VAL )
+ *(uint16 *) POSNULL = lengthof(POSNULL) - 1;
+ doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len);
+ for (i = 0; i < query->size; i++)
+ {
+ if (item[i].type != VAL)
continue;
- entry=find_wordentry(txt,query,&(item[i]));
- if ( !entry )
+ entry = find_wordentry(txt, query, &(item[i]));
+ if (!entry)
continue;
- if ( entry->haspos ) {
- dimt = POSDATALEN(txt,entry);
- post = POSDATAPTR(txt,entry);
- } else {
- dimt = *(uint16*)POSNULL;
- post = POSNULL+1;
+ if (entry->haspos)
+ {
+ dimt = POSDATALEN(txt, entry);
+ post = POSDATAPTR(txt, entry);
+ }
+ else
+ {
+ dimt = *(uint16 *) POSNULL;
+ post = POSNULL + 1;
}
- while( cur+dimt >= len ) {
- len*=2;
- doc = (DocRepresentation*)repalloc(doc,sizeof(DocRepresentation)*len);
+ while (cur + dimt >= len)
+ {
+ len *= 2;
+ doc = (DocRepresentation *) repalloc(doc, sizeof(DocRepresentation) * len);
}
- for(j=0;j<dimt;j++) {
- doc[cur].item=&(item[i]);
- doc[cur].pos=post[j].pos;
+ for (j = 0; j < dimt; j++)
+ {
+ doc[cur].item = &(item[i]);
+ doc[cur].pos = post[j].pos;
cur++;
}
}
- *doclen=cur;
-
- if ( cur>0 ) {
- if ( cur>1 )
+ *doclen = cur;
+
+ if (cur > 0)
+ {
+ if (cur > 1)
qsort((void *) doc, cur, sizeof(DocRepresentation), compareDocR);
return doc;
}
-
+
pfree(doc);
return NULL;
}
Datum
-rank_cd(PG_FUNCTION_ARGS) {
- int K = PG_GETARG_INT32(0);
- tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
+rank_cd(PG_FUNCTION_ARGS)
+{
+ int K = PG_GETARG_INT32(0);
+ tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
QUERYTYPE *query = (QUERYTYPE *) PG_DETOAST_DATUM(PG_GETARG_DATUM(2));
- int method=DEF_NORM_METHOD;
- DocRepresentation *doc;
- float res=0.0;
- int p=0,q=0,len,cur;
+ int method = DEF_NORM_METHOD;
+ DocRepresentation *doc;
+ float res = 0.0;
+ int p = 0,
+ q = 0,
+ len,
+ cur;
doc = get_docrep(txt, query, &len);
- if ( !doc ) {
+ if (!doc)
+ {
PG_FREE_IF_COPY(txt, 1);
PG_FREE_IF_COPY(query, 2);
PG_RETURN_FLOAT4(0.0);
}
- cur=0;
- if (K<=0)
- K=4;
- while( Cover(doc, len, query, &cur, &p, &q) )
- res += ( q-p+1 > K ) ? ((float)K)/((float)(q-p+1)) : 1.0;
-
- if ( PG_NARGS() == 4 )
- method=PG_GETARG_INT32(3);
-
- switch(method) {
- case 0: break;
- case 1: res /= log((float)cnt_length(txt)); break;
- case 2: res /= (float)cnt_length(txt); break;
+ cur = 0;
+ if (K <= 0)
+ K = 4;
+ while (Cover(doc, len, query, &cur, &p, &q))
+ res += (q - p + 1 > K) ? ((float) K) / ((float) (q - p + 1)) : 1.0;
+
+ if (PG_NARGS() == 4)
+ method = PG_GETARG_INT32(3);
+
+ switch (method)
+ {
+ case 0:
+ break;
+ case 1:
+ res /= log((float) cnt_length(txt));
+ break;
+ case 2:
+ res /= (float) cnt_length(txt);
+ break;
default:
- /* internal error */
- elog(ERROR,"unrecognized normalization method: %d", method);
- }
+ /* internal error */
+ elog(ERROR, "unrecognized normalization method: %d", method);
+ }
pfree(doc);
PG_FREE_IF_COPY(txt, 1);
@@ -481,120 +569,141 @@ rank_cd(PG_FUNCTION_ARGS) {
Datum
-rank_cd_def(PG_FUNCTION_ARGS) {
- PG_RETURN_DATUM( DirectFunctionCall4(
- rank_cd,
- Int32GetDatum(-1),
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1),
- ( PG_NARGS() == 3 ) ? PG_GETARG_DATUM(2) : Int32GetDatum(DEF_NORM_METHOD)
- ));
+rank_cd_def(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_DATUM(DirectFunctionCall4(
+ rank_cd,
+ Int32GetDatum(-1),
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1),
+ (PG_NARGS() == 3) ? PG_GETARG_DATUM(2) : Int32GetDatum(DEF_NORM_METHOD)
+ ));
}
/**************debug*************/
-typedef struct {
- char *w;
- int2 len;
- int2 pos;
- int2 start;
- int2 finish;
-} DocWord;
+typedef struct
+{
+ char *w;
+ int2 len;
+ int2 pos;
+ int2 start;
+ int2 finish;
+} DocWord;
static int
-compareDocWord(const void *a, const void *b) {
- if ( ((DocWord *) a)->pos == ((DocWord *) b)->pos )
+compareDocWord(const void *a, const void *b)
+{
+ if (((DocWord *) a)->pos == ((DocWord *) b)->pos)
return 1;
- return ( ((DocWord *) a)->pos > ((DocWord *) b)->pos ) ? 1 : -1;
+ return (((DocWord *) a)->pos > ((DocWord *) b)->pos) ? 1 : -1;
}
-Datum
-get_covers(PG_FUNCTION_ARGS) {
- tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+Datum
+get_covers(PG_FUNCTION_ARGS)
+{
+ tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
QUERYTYPE *query = (QUERYTYPE *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
- WordEntry *pptr=ARRPTR(txt);
- int i,dlen=0,j,cur=0,len=0,rlen;
- DocWord *dw,*dwptr;
- text *out;
- char *cptr;
+ WordEntry *pptr = ARRPTR(txt);
+ int i,
+ dlen = 0,
+ j,
+ cur = 0,
+ len = 0,
+ rlen;
+ DocWord *dw,
+ *dwptr;
+ text *out;
+ char *cptr;
DocRepresentation *doc;
- int pos=0,p,q,olddwpos=0;
- int ncover=1;
+ int pos = 0,
+ p,
+ q,
+ olddwpos = 0;
+ int ncover = 1;
doc = get_docrep(txt, query, &rlen);
- if ( !doc ) {
- out=palloc(VARHDRSZ);
+ if (!doc)
+ {
+ out = palloc(VARHDRSZ);
VARATT_SIZEP(out) = VARHDRSZ;
- PG_FREE_IF_COPY(txt,0);
- PG_FREE_IF_COPY(query,1);
+ PG_FREE_IF_COPY(txt, 0);
+ PG_FREE_IF_COPY(query, 1);
PG_RETURN_POINTER(out);
}
- for(i=0;i<txt->size;i++) {
+ for (i = 0; i < txt->size; i++)
+ {
if (!pptr[i].haspos)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("no pos info")));
- dlen += POSDATALEN(txt,&(pptr[i]));
+ dlen += POSDATALEN(txt, &(pptr[i]));
}
- dwptr=dw=palloc(sizeof(DocWord)*dlen);
- memset(dw,0,sizeof(DocWord)*dlen);
+ dwptr = dw = palloc(sizeof(DocWord) * dlen);
+ memset(dw, 0, sizeof(DocWord) * dlen);
+
+ for (i = 0; i < txt->size; i++)
+ {
+ WordEntryPos *posdata = POSDATAPTR(txt, &(pptr[i]));
- for(i=0;i<txt->size;i++) {
- WordEntryPos *posdata = POSDATAPTR(txt,&(pptr[i]));
- for(j=0;j<POSDATALEN(txt,&(pptr[i]));j++) {
- dw[cur].w=STRPTR(txt)+pptr[i].pos;
- dw[cur].len=pptr[i].len;
- dw[cur].pos=posdata[j].pos;
+ for (j = 0; j < POSDATALEN(txt, &(pptr[i])); j++)
+ {
+ dw[cur].w = STRPTR(txt) + pptr[i].pos;
+ dw[cur].len = pptr[i].len;
+ dw[cur].pos = posdata[j].pos;
cur++;
}
- len+=(pptr[i].len + 1) * (int)POSDATALEN(txt,&(pptr[i]));
+ len += (pptr[i].len + 1) * (int) POSDATALEN(txt, &(pptr[i]));
}
qsort((void *) dw, dlen, sizeof(DocWord), compareDocWord);
- while( Cover(doc, rlen, query, &pos, &p, &q) ) {
- dwptr=dw+olddwpos;
- while(dwptr->pos < p && dwptr-dw<dlen)
+ while (Cover(doc, rlen, query, &pos, &p, &q))
+ {
+ dwptr = dw + olddwpos;
+ while (dwptr->pos < p && dwptr - dw < dlen)
dwptr++;
- olddwpos=dwptr-dw;
- dwptr->start=ncover;
- while(dwptr->pos < q+1 && dwptr-dw<dlen)
+ olddwpos = dwptr - dw;
+ dwptr->start = ncover;
+ while (dwptr->pos < q + 1 && dwptr - dw < dlen)
dwptr++;
- (dwptr-1)->finish=ncover;
- len+= 4 /* {}+two spaces */ + 2*16 /*numbers*/;
- ncover++;
- }
-
- out=palloc(VARHDRSZ+len);
- cptr=((char*)out)+VARHDRSZ;
- dwptr=dw;
-
- while( dwptr-dw < dlen) {
- if ( dwptr->start ) {
- sprintf(cptr,"{%d ",dwptr->start);
- cptr=strchr(cptr,'\0');
+ (dwptr - 1)->finish = ncover;
+ len += 4 /* {}+two spaces */ + 2 * 16 /* numbers */ ;
+ ncover++;
+ }
+
+ out = palloc(VARHDRSZ + len);
+ cptr = ((char *) out) + VARHDRSZ;
+ dwptr = dw;
+
+ while (dwptr - dw < dlen)
+ {
+ if (dwptr->start)
+ {
+ sprintf(cptr, "{%d ", dwptr->start);
+ cptr = strchr(cptr, '\0');
}
- memcpy(cptr,dwptr->w,dwptr->len);
- cptr+=dwptr->len;
- *cptr=' ';
+ memcpy(cptr, dwptr->w, dwptr->len);
+ cptr += dwptr->len;
+ *cptr = ' ';
cptr++;
- if ( dwptr->finish ) {
- sprintf(cptr,"}%d ",dwptr->finish);
- cptr=strchr(cptr,'\0');
+ if (dwptr->finish)
+ {
+ sprintf(cptr, "}%d ", dwptr->finish);
+ cptr = strchr(cptr, '\0');
}
dwptr++;
- }
+ }
+
+ VARATT_SIZEP(out) = cptr - ((char *) out);
- VARATT_SIZEP(out) = cptr - ((char*)out);
-
pfree(dw);
pfree(doc);
- PG_FREE_IF_COPY(txt,0);
- PG_FREE_IF_COPY(query,1);
+ PG_FREE_IF_COPY(txt, 0);
+ PG_FREE_IF_COPY(query, 1);
PG_RETURN_POINTER(out);
}
-
diff --git a/contrib/tsearch2/snmap.c b/contrib/tsearch2/snmap.c
index 023c9eb5f9f..2cd3f53e497 100644
--- a/contrib/tsearch2/snmap.c
+++ b/contrib/tsearch2/snmap.c
@@ -1,4 +1,4 @@
-/*
+/*
* simple but fast map from str to Oid
* Teodor Sigaev <teodor@sigaev.ru>
*/
@@ -11,69 +11,85 @@
#include "common.h"
static int
-compareSNMapEntry(const void *a, const void *b) {
- return strcmp( ((SNMapEntry*)a)->key, ((SNMapEntry*)b)->key );
+compareSNMapEntry(const void *a, const void *b)
+{
+ return strcmp(((SNMapEntry *) a)->key, ((SNMapEntry *) b)->key);
}
-void
-addSNMap( SNMap *map, char *key, Oid value ) {
- if (map->len>=map->reallen) {
+void
+addSNMap(SNMap * map, char *key, Oid value)
+{
+ if (map->len >= map->reallen)
+ {
SNMapEntry *tmp;
- int len = (map->reallen) ? 2*map->reallen : 16;
- tmp=(SNMapEntry*)realloc(map->list, sizeof(SNMapEntry) * len);
- if ( !tmp )
+ int len = (map->reallen) ? 2 * map->reallen : 16;
+
+ tmp = (SNMapEntry *) realloc(map->list, sizeof(SNMapEntry) * len);
+ if (!tmp)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- map->reallen=len;
- map->list=tmp;
+ map->reallen = len;
+ map->list = tmp;
}
- map->list[ map->len ].key = strdup(key);
- if ( ! map->list[ map->len ].key )
+ map->list[map->len].key = strdup(key);
+ if (!map->list[map->len].key)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- map->list[ map->len ].value=value;
+ map->list[map->len].value = value;
map->len++;
- if ( map->len>1 ) qsort(map->list, map->len, sizeof(SNMapEntry), compareSNMapEntry);
+ if (map->len > 1)
+ qsort(map->list, map->len, sizeof(SNMapEntry), compareSNMapEntry);
}
-void
-addSNMap_t( SNMap *map, text *key, Oid value ) {
- char *k=text2char( key );
+void
+addSNMap_t(SNMap * map, text *key, Oid value)
+{
+ char *k = text2char(key);
+
addSNMap(map, k, value);
pfree(k);
}
-Oid
-findSNMap( SNMap *map, char *key ) {
+Oid
+findSNMap(SNMap * map, char *key)
+{
SNMapEntry *ptr;
- SNMapEntry ks = {key, 0};
- if ( map->len==0 || !map->list )
- return 0;
- ptr = (SNMapEntry*) bsearch(&ks, map->list, map->len, sizeof(SNMapEntry), compareSNMapEntry);
+ SNMapEntry ks = {key, 0};
+
+ if (map->len == 0 || !map->list)
+ return 0;
+ ptr = (SNMapEntry *) bsearch(&ks, map->list, map->len, sizeof(SNMapEntry), compareSNMapEntry);
return (ptr) ? ptr->value : 0;
}
-Oid
-findSNMap_t( SNMap *map, text *key ) {
- char *k=text2char(key);
- int res;
- res= findSNMap(map, k);
+Oid
+findSNMap_t(SNMap * map, text *key)
+{
+ char *k = text2char(key);
+ int res;
+
+ res = findSNMap(map, k);
pfree(k);
return res;
}
-void freeSNMap( SNMap *map ) {
- SNMapEntry *entry=map->list;
- if ( map->list ) {
- while( map->len ) {
- if ( entry->key ) free(entry->key);
- entry++; map->len--;
+void
+freeSNMap(SNMap * map)
+{
+ SNMapEntry *entry = map->list;
+
+ if (map->list)
+ {
+ while (map->len)
+ {
+ if (entry->key)
+ free(entry->key);
+ entry++;
+ map->len--;
}
- free( map->list );
+ free(map->list);
}
- memset(map,0,sizeof(SNMap));
+ memset(map, 0, sizeof(SNMap));
}
-
-
diff --git a/contrib/tsearch2/snmap.h b/contrib/tsearch2/snmap.h
index b4856019c9e..ae188b66a4c 100644
--- a/contrib/tsearch2/snmap.h
+++ b/contrib/tsearch2/snmap.h
@@ -3,21 +3,23 @@
#include "postgres.h"
-typedef struct {
- char *key;
- Oid value;
-} SNMapEntry;
+typedef struct
+{
+ char *key;
+ Oid value;
+} SNMapEntry;
-typedef struct {
- int len;
- int reallen;
- SNMapEntry *list;
-} SNMap;
+typedef struct
+{
+ int len;
+ int reallen;
+ SNMapEntry *list;
+} SNMap;
-void addSNMap( SNMap *map, char *key, Oid value );
-void addSNMap_t( SNMap *map, text *key, Oid value );
-Oid findSNMap( SNMap *map, char *key );
-Oid findSNMap_t( SNMap *map, text *key );
-void freeSNMap( SNMap *map );
+void addSNMap(SNMap * map, char *key, Oid value);
+void addSNMap_t(SNMap * map, text *key, Oid value);
+Oid findSNMap(SNMap * map, char *key);
+Oid findSNMap_t(SNMap * map, text *key);
+void freeSNMap(SNMap * map);
#endif
diff --git a/contrib/tsearch2/snowball/api.c b/contrib/tsearch2/snowball/api.c
index 27bf31a588e..5cbf37d73bf 100644
--- a/contrib/tsearch2/snowball/api.c
+++ b/contrib/tsearch2/snowball/api.c
@@ -2,48 +2,64 @@
#include "header.h"
-extern struct SN_env * SN_create_env(int S_size, int I_size, int B_size)
-{ struct SN_env * z = (struct SN_env *) calloc(1, sizeof(struct SN_env));
- z->p = create_s();
- if (S_size)
- { z->S = (symbol * *) calloc(S_size, sizeof(symbol *));
- { int i;
- for (i = 0; i < S_size; i++) z->S[i] = create_s();
- }
- z->S_size = S_size;
- }
-
- if (I_size)
- { z->I = (int *) calloc(I_size, sizeof(int));
- z->I_size = I_size;
- }
-
- if (B_size)
- { z->B = (symbol *) calloc(B_size, sizeof(symbol));
- z->B_size = B_size;
- }
-
- return z;
+extern struct SN_env *
+SN_create_env(int S_size, int I_size, int B_size)
+{
+ struct SN_env *z = (struct SN_env *) calloc(1, sizeof(struct SN_env));
+
+ z->p = create_s();
+ if (S_size)
+ {
+ z->S = (symbol * *) calloc(S_size, sizeof(symbol *));
+ {
+ int i;
+
+ for (i = 0; i < S_size; i++)
+ z->S[i] = create_s();
+ }
+ z->S_size = S_size;
+ }
+
+ if (I_size)
+ {
+ z->I = (int *) calloc(I_size, sizeof(int));
+ z->I_size = I_size;
+ }
+
+ if (B_size)
+ {
+ z->B = (symbol *) calloc(B_size, sizeof(symbol));
+ z->B_size = B_size;
+ }
+
+ return z;
}
-extern void SN_close_env(struct SN_env * z)
+extern void
+SN_close_env(struct SN_env * z)
{
- if (z->S_size)
- {
- { int i;
- for (i = 0; i < z->S_size; i++) lose_s(z->S[i]);
- }
- free(z->S);
- }
- if (z->I_size) free(z->I);
- if (z->B_size) free(z->B);
- if (z->p) lose_s(z->p);
- free(z);
+ if (z->S_size)
+ {
+ {
+ int i;
+
+ for (i = 0; i < z->S_size; i++)
+ lose_s(z->S[i]);
+ }
+ free(z->S);
+ }
+ if (z->I_size)
+ free(z->I);
+ if (z->B_size)
+ free(z->B);
+ if (z->p)
+ lose_s(z->p);
+ free(z);
}
-extern void SN_set_current(struct SN_env * z, int size, const symbol * s)
+extern void
+SN_set_current(struct SN_env * z, int size, const symbol * s)
{
- replace_s(z, 0, z->l, size, s);
- z->c = 0;
+ replace_s(z, 0, z->l, size, s);
+ z->c = 0;
}
-
diff --git a/contrib/tsearch2/snowball/api.h b/contrib/tsearch2/snowball/api.h
index 3e8b6e18517..efad537b9e8 100644
--- a/contrib/tsearch2/snowball/api.h
+++ b/contrib/tsearch2/snowball/api.h
@@ -11,17 +11,24 @@ typedef unsigned char symbol;
*/
-struct SN_env {
- symbol * p;
- int c; int a; int l; int lb; int bra; int ket;
- int S_size; int I_size; int B_size;
- symbol * * S;
- int * I;
- symbol * B;
+struct SN_env
+{
+ symbol *p;
+ int c;
+ int a;
+ int l;
+ int lb;
+ int bra;
+ int ket;
+ int S_size;
+ int I_size;
+ int B_size;
+ symbol **S;
+ int *I;
+ symbol *B;
};
-extern struct SN_env * SN_create_env(int S_size, int I_size, int B_size);
+extern struct SN_env *SN_create_env(int S_size, int I_size, int B_size);
extern void SN_close_env(struct SN_env * z);
extern void SN_set_current(struct SN_env * z, int size, const symbol * s);
-
diff --git a/contrib/tsearch2/snowball/english_stem.c b/contrib/tsearch2/snowball/english_stem.c
index f6ceb5c576f..310eddf97d8 100644
--- a/contrib/tsearch2/snowball/english_stem.c
+++ b/contrib/tsearch2/snowball/english_stem.c
@@ -3,891 +3,1103 @@
#include "header.h"
-extern int english_stem(struct SN_env * z);
-static int r_exception2(struct SN_env * z);
-static int r_exception1(struct SN_env * z);
-static int r_Step_5(struct SN_env * z);
-static int r_Step_4(struct SN_env * z);
-static int r_Step_3(struct SN_env * z);
-static int r_Step_2(struct SN_env * z);
-static int r_Step_1c(struct SN_env * z);
-static int r_Step_1b(struct SN_env * z);
-static int r_Step_1a(struct SN_env * z);
-static int r_R2(struct SN_env * z);
-static int r_R1(struct SN_env * z);
-static int r_shortv(struct SN_env * z);
-static int r_mark_regions(struct SN_env * z);
-static int r_postlude(struct SN_env * z);
-static int r_prelude(struct SN_env * z);
-
-extern struct SN_env * english_create_env(void);
+extern int english_stem(struct SN_env * z);
+static int r_exception2(struct SN_env * z);
+static int r_exception1(struct SN_env * z);
+static int r_Step_5(struct SN_env * z);
+static int r_Step_4(struct SN_env * z);
+static int r_Step_3(struct SN_env * z);
+static int r_Step_2(struct SN_env * z);
+static int r_Step_1c(struct SN_env * z);
+static int r_Step_1b(struct SN_env * z);
+static int r_Step_1a(struct SN_env * z);
+static int r_R2(struct SN_env * z);
+static int r_R1(struct SN_env * z);
+static int r_shortv(struct SN_env * z);
+static int r_mark_regions(struct SN_env * z);
+static int r_postlude(struct SN_env * z);
+static int r_prelude(struct SN_env * z);
+
+extern struct SN_env *english_create_env(void);
extern void english_close_env(struct SN_env * z);
-static symbol s_0_0[5] = { 'g', 'e', 'n', 'e', 'r' };
+static symbol s_0_0[5] = {'g', 'e', 'n', 'e', 'r'};
static struct among a_0[1] =
{
-/* 0 */ { 5, s_0_0, -1, -1, 0}
+ /* 0 */ {5, s_0_0, -1, -1, 0}
};
-static symbol s_1_0[3] = { 'i', 'e', 'd' };
-static symbol s_1_1[1] = { 's' };
-static symbol s_1_2[3] = { 'i', 'e', 's' };
-static symbol s_1_3[4] = { 's', 's', 'e', 's' };
-static symbol s_1_4[2] = { 's', 's' };
-static symbol s_1_5[2] = { 'u', 's' };
+static symbol s_1_0[3] = {'i', 'e', 'd'};
+static symbol s_1_1[1] = {'s'};
+static symbol s_1_2[3] = {'i', 'e', 's'};
+static symbol s_1_3[4] = {'s', 's', 'e', 's'};
+static symbol s_1_4[2] = {'s', 's'};
+static symbol s_1_5[2] = {'u', 's'};
static struct among a_1[6] =
{
-/* 0 */ { 3, s_1_0, -1, 2, 0},
-/* 1 */ { 1, s_1_1, -1, 3, 0},
-/* 2 */ { 3, s_1_2, 1, 2, 0},
-/* 3 */ { 4, s_1_3, 1, 1, 0},
-/* 4 */ { 2, s_1_4, 1, -1, 0},
-/* 5 */ { 2, s_1_5, 1, -1, 0}
+ /* 0 */ {3, s_1_0, -1, 2, 0},
+ /* 1 */ {1, s_1_1, -1, 3, 0},
+ /* 2 */ {3, s_1_2, 1, 2, 0},
+ /* 3 */ {4, s_1_3, 1, 1, 0},
+ /* 4 */ {2, s_1_4, 1, -1, 0},
+ /* 5 */ {2, s_1_5, 1, -1, 0}
};
-static symbol s_2_1[2] = { 'b', 'b' };
-static symbol s_2_2[2] = { 'd', 'd' };
-static symbol s_2_3[2] = { 'f', 'f' };
-static symbol s_2_4[2] = { 'g', 'g' };
-static symbol s_2_5[2] = { 'b', 'l' };
-static symbol s_2_6[2] = { 'm', 'm' };
-static symbol s_2_7[2] = { 'n', 'n' };
-static symbol s_2_8[2] = { 'p', 'p' };
-static symbol s_2_9[2] = { 'r', 'r' };
-static symbol s_2_10[2] = { 'a', 't' };
-static symbol s_2_11[2] = { 't', 't' };
-static symbol s_2_12[2] = { 'i', 'z' };
+static symbol s_2_1[2] = {'b', 'b'};
+static symbol s_2_2[2] = {'d', 'd'};
+static symbol s_2_3[2] = {'f', 'f'};
+static symbol s_2_4[2] = {'g', 'g'};
+static symbol s_2_5[2] = {'b', 'l'};
+static symbol s_2_6[2] = {'m', 'm'};
+static symbol s_2_7[2] = {'n', 'n'};
+static symbol s_2_8[2] = {'p', 'p'};
+static symbol s_2_9[2] = {'r', 'r'};
+static symbol s_2_10[2] = {'a', 't'};
+static symbol s_2_11[2] = {'t', 't'};
+static symbol s_2_12[2] = {'i', 'z'};
static struct among a_2[13] =
{
-/* 0 */ { 0, 0, -1, 3, 0},
-/* 1 */ { 2, s_2_1, 0, 2, 0},
-/* 2 */ { 2, s_2_2, 0, 2, 0},
-/* 3 */ { 2, s_2_3, 0, 2, 0},
-/* 4 */ { 2, s_2_4, 0, 2, 0},
-/* 5 */ { 2, s_2_5, 0, 1, 0},
-/* 6 */ { 2, s_2_6, 0, 2, 0},
-/* 7 */ { 2, s_2_7, 0, 2, 0},
-/* 8 */ { 2, s_2_8, 0, 2, 0},
-/* 9 */ { 2, s_2_9, 0, 2, 0},
-/* 10 */ { 2, s_2_10, 0, 1, 0},
-/* 11 */ { 2, s_2_11, 0, 2, 0},
-/* 12 */ { 2, s_2_12, 0, 1, 0}
+ /* 0 */ {0, 0, -1, 3, 0},
+ /* 1 */ {2, s_2_1, 0, 2, 0},
+ /* 2 */ {2, s_2_2, 0, 2, 0},
+ /* 3 */ {2, s_2_3, 0, 2, 0},
+ /* 4 */ {2, s_2_4, 0, 2, 0},
+ /* 5 */ {2, s_2_5, 0, 1, 0},
+ /* 6 */ {2, s_2_6, 0, 2, 0},
+ /* 7 */ {2, s_2_7, 0, 2, 0},
+ /* 8 */ {2, s_2_8, 0, 2, 0},
+ /* 9 */ {2, s_2_9, 0, 2, 0},
+ /* 10 */ {2, s_2_10, 0, 1, 0},
+ /* 11 */ {2, s_2_11, 0, 2, 0},
+ /* 12 */ {2, s_2_12, 0, 1, 0}
};
-static symbol s_3_0[2] = { 'e', 'd' };
-static symbol s_3_1[3] = { 'e', 'e', 'd' };
-static symbol s_3_2[3] = { 'i', 'n', 'g' };
-static symbol s_3_3[4] = { 'e', 'd', 'l', 'y' };
-static symbol s_3_4[5] = { 'e', 'e', 'd', 'l', 'y' };
-static symbol s_3_5[5] = { 'i', 'n', 'g', 'l', 'y' };
+static symbol s_3_0[2] = {'e', 'd'};
+static symbol s_3_1[3] = {'e', 'e', 'd'};
+static symbol s_3_2[3] = {'i', 'n', 'g'};
+static symbol s_3_3[4] = {'e', 'd', 'l', 'y'};
+static symbol s_3_4[5] = {'e', 'e', 'd', 'l', 'y'};
+static symbol s_3_5[5] = {'i', 'n', 'g', 'l', 'y'};
static struct among a_3[6] =
{
-/* 0 */ { 2, s_3_0, -1, 2, 0},
-/* 1 */ { 3, s_3_1, 0, 1, 0},
-/* 2 */ { 3, s_3_2, -1, 2, 0},
-/* 3 */ { 4, s_3_3, -1, 2, 0},
-/* 4 */ { 5, s_3_4, 3, 1, 0},
-/* 5 */ { 5, s_3_5, -1, 2, 0}
+ /* 0 */ {2, s_3_0, -1, 2, 0},
+ /* 1 */ {3, s_3_1, 0, 1, 0},
+ /* 2 */ {3, s_3_2, -1, 2, 0},
+ /* 3 */ {4, s_3_3, -1, 2, 0},
+ /* 4 */ {5, s_3_4, 3, 1, 0},
+ /* 5 */ {5, s_3_5, -1, 2, 0}
};
-static symbol s_4_0[4] = { 'a', 'n', 'c', 'i' };
-static symbol s_4_1[4] = { 'e', 'n', 'c', 'i' };
-static symbol s_4_2[3] = { 'o', 'g', 'i' };
-static symbol s_4_3[2] = { 'l', 'i' };
-static symbol s_4_4[3] = { 'b', 'l', 'i' };
-static symbol s_4_5[4] = { 'a', 'b', 'l', 'i' };
-static symbol s_4_6[4] = { 'a', 'l', 'l', 'i' };
-static symbol s_4_7[5] = { 'f', 'u', 'l', 'l', 'i' };
-static symbol s_4_8[6] = { 'l', 'e', 's', 's', 'l', 'i' };
-static symbol s_4_9[5] = { 'o', 'u', 's', 'l', 'i' };
-static symbol s_4_10[5] = { 'e', 'n', 't', 'l', 'i' };
-static symbol s_4_11[5] = { 'a', 'l', 'i', 't', 'i' };
-static symbol s_4_12[6] = { 'b', 'i', 'l', 'i', 't', 'i' };
-static symbol s_4_13[5] = { 'i', 'v', 'i', 't', 'i' };
-static symbol s_4_14[6] = { 't', 'i', 'o', 'n', 'a', 'l' };
-static symbol s_4_15[7] = { 'a', 't', 'i', 'o', 'n', 'a', 'l' };
-static symbol s_4_16[5] = { 'a', 'l', 'i', 's', 'm' };
-static symbol s_4_17[5] = { 'a', 't', 'i', 'o', 'n' };
-static symbol s_4_18[7] = { 'i', 'z', 'a', 't', 'i', 'o', 'n' };
-static symbol s_4_19[4] = { 'i', 'z', 'e', 'r' };
-static symbol s_4_20[4] = { 'a', 't', 'o', 'r' };
-static symbol s_4_21[7] = { 'i', 'v', 'e', 'n', 'e', 's', 's' };
-static symbol s_4_22[7] = { 'f', 'u', 'l', 'n', 'e', 's', 's' };
-static symbol s_4_23[7] = { 'o', 'u', 's', 'n', 'e', 's', 's' };
+static symbol s_4_0[4] = {'a', 'n', 'c', 'i'};
+static symbol s_4_1[4] = {'e', 'n', 'c', 'i'};
+static symbol s_4_2[3] = {'o', 'g', 'i'};
+static symbol s_4_3[2] = {'l', 'i'};
+static symbol s_4_4[3] = {'b', 'l', 'i'};
+static symbol s_4_5[4] = {'a', 'b', 'l', 'i'};
+static symbol s_4_6[4] = {'a', 'l', 'l', 'i'};
+static symbol s_4_7[5] = {'f', 'u', 'l', 'l', 'i'};
+static symbol s_4_8[6] = {'l', 'e', 's', 's', 'l', 'i'};
+static symbol s_4_9[5] = {'o', 'u', 's', 'l', 'i'};
+static symbol s_4_10[5] = {'e', 'n', 't', 'l', 'i'};
+static symbol s_4_11[5] = {'a', 'l', 'i', 't', 'i'};
+static symbol s_4_12[6] = {'b', 'i', 'l', 'i', 't', 'i'};
+static symbol s_4_13[5] = {'i', 'v', 'i', 't', 'i'};
+static symbol s_4_14[6] = {'t', 'i', 'o', 'n', 'a', 'l'};
+static symbol s_4_15[7] = {'a', 't', 'i', 'o', 'n', 'a', 'l'};
+static symbol s_4_16[5] = {'a', 'l', 'i', 's', 'm'};
+static symbol s_4_17[5] = {'a', 't', 'i', 'o', 'n'};
+static symbol s_4_18[7] = {'i', 'z', 'a', 't', 'i', 'o', 'n'};
+static symbol s_4_19[4] = {'i', 'z', 'e', 'r'};
+static symbol s_4_20[4] = {'a', 't', 'o', 'r'};
+static symbol s_4_21[7] = {'i', 'v', 'e', 'n', 'e', 's', 's'};
+static symbol s_4_22[7] = {'f', 'u', 'l', 'n', 'e', 's', 's'};
+static symbol s_4_23[7] = {'o', 'u', 's', 'n', 'e', 's', 's'};
static struct among a_4[24] =
{
-/* 0 */ { 4, s_4_0, -1, 3, 0},
-/* 1 */ { 4, s_4_1, -1, 2, 0},
-/* 2 */ { 3, s_4_2, -1, 13, 0},
-/* 3 */ { 2, s_4_3, -1, 16, 0},
-/* 4 */ { 3, s_4_4, 3, 12, 0},
-/* 5 */ { 4, s_4_5, 4, 4, 0},
-/* 6 */ { 4, s_4_6, 3, 8, 0},
-/* 7 */ { 5, s_4_7, 3, 14, 0},
-/* 8 */ { 6, s_4_8, 3, 15, 0},
-/* 9 */ { 5, s_4_9, 3, 10, 0},
-/* 10 */ { 5, s_4_10, 3, 5, 0},
-/* 11 */ { 5, s_4_11, -1, 8, 0},
-/* 12 */ { 6, s_4_12, -1, 12, 0},
-/* 13 */ { 5, s_4_13, -1, 11, 0},
-/* 14 */ { 6, s_4_14, -1, 1, 0},
-/* 15 */ { 7, s_4_15, 14, 7, 0},
-/* 16 */ { 5, s_4_16, -1, 8, 0},
-/* 17 */ { 5, s_4_17, -1, 7, 0},
-/* 18 */ { 7, s_4_18, 17, 6, 0},
-/* 19 */ { 4, s_4_19, -1, 6, 0},
-/* 20 */ { 4, s_4_20, -1, 7, 0},
-/* 21 */ { 7, s_4_21, -1, 11, 0},
-/* 22 */ { 7, s_4_22, -1, 9, 0},
-/* 23 */ { 7, s_4_23, -1, 10, 0}
+ /* 0 */ {4, s_4_0, -1, 3, 0},
+ /* 1 */ {4, s_4_1, -1, 2, 0},
+ /* 2 */ {3, s_4_2, -1, 13, 0},
+ /* 3 */ {2, s_4_3, -1, 16, 0},
+ /* 4 */ {3, s_4_4, 3, 12, 0},
+ /* 5 */ {4, s_4_5, 4, 4, 0},
+ /* 6 */ {4, s_4_6, 3, 8, 0},
+ /* 7 */ {5, s_4_7, 3, 14, 0},
+ /* 8 */ {6, s_4_8, 3, 15, 0},
+ /* 9 */ {5, s_4_9, 3, 10, 0},
+ /* 10 */ {5, s_4_10, 3, 5, 0},
+ /* 11 */ {5, s_4_11, -1, 8, 0},
+ /* 12 */ {6, s_4_12, -1, 12, 0},
+ /* 13 */ {5, s_4_13, -1, 11, 0},
+ /* 14 */ {6, s_4_14, -1, 1, 0},
+ /* 15 */ {7, s_4_15, 14, 7, 0},
+ /* 16 */ {5, s_4_16, -1, 8, 0},
+ /* 17 */ {5, s_4_17, -1, 7, 0},
+ /* 18 */ {7, s_4_18, 17, 6, 0},
+ /* 19 */ {4, s_4_19, -1, 6, 0},
+ /* 20 */ {4, s_4_20, -1, 7, 0},
+ /* 21 */ {7, s_4_21, -1, 11, 0},
+ /* 22 */ {7, s_4_22, -1, 9, 0},
+ /* 23 */ {7, s_4_23, -1, 10, 0}
};
-static symbol s_5_0[5] = { 'i', 'c', 'a', 't', 'e' };
-static symbol s_5_1[5] = { 'a', 't', 'i', 'v', 'e' };
-static symbol s_5_2[5] = { 'a', 'l', 'i', 'z', 'e' };
-static symbol s_5_3[5] = { 'i', 'c', 'i', 't', 'i' };
-static symbol s_5_4[4] = { 'i', 'c', 'a', 'l' };
-static symbol s_5_5[6] = { 't', 'i', 'o', 'n', 'a', 'l' };
-static symbol s_5_6[7] = { 'a', 't', 'i', 'o', 'n', 'a', 'l' };
-static symbol s_5_7[3] = { 'f', 'u', 'l' };
-static symbol s_5_8[4] = { 'n', 'e', 's', 's' };
+static symbol s_5_0[5] = {'i', 'c', 'a', 't', 'e'};
+static symbol s_5_1[5] = {'a', 't', 'i', 'v', 'e'};
+static symbol s_5_2[5] = {'a', 'l', 'i', 'z', 'e'};
+static symbol s_5_3[5] = {'i', 'c', 'i', 't', 'i'};
+static symbol s_5_4[4] = {'i', 'c', 'a', 'l'};
+static symbol s_5_5[6] = {'t', 'i', 'o', 'n', 'a', 'l'};
+static symbol s_5_6[7] = {'a', 't', 'i', 'o', 'n', 'a', 'l'};
+static symbol s_5_7[3] = {'f', 'u', 'l'};
+static symbol s_5_8[4] = {'n', 'e', 's', 's'};
static struct among a_5[9] =
{
-/* 0 */ { 5, s_5_0, -1, 4, 0},
-/* 1 */ { 5, s_5_1, -1, 6, 0},
-/* 2 */ { 5, s_5_2, -1, 3, 0},
-/* 3 */ { 5, s_5_3, -1, 4, 0},
-/* 4 */ { 4, s_5_4, -1, 4, 0},
-/* 5 */ { 6, s_5_5, -1, 1, 0},
-/* 6 */ { 7, s_5_6, 5, 2, 0},
-/* 7 */ { 3, s_5_7, -1, 5, 0},
-/* 8 */ { 4, s_5_8, -1, 5, 0}
+ /* 0 */ {5, s_5_0, -1, 4, 0},
+ /* 1 */ {5, s_5_1, -1, 6, 0},
+ /* 2 */ {5, s_5_2, -1, 3, 0},
+ /* 3 */ {5, s_5_3, -1, 4, 0},
+ /* 4 */ {4, s_5_4, -1, 4, 0},
+ /* 5 */ {6, s_5_5, -1, 1, 0},
+ /* 6 */ {7, s_5_6, 5, 2, 0},
+ /* 7 */ {3, s_5_7, -1, 5, 0},
+ /* 8 */ {4, s_5_8, -1, 5, 0}
};
-static symbol s_6_0[2] = { 'i', 'c' };
-static symbol s_6_1[4] = { 'a', 'n', 'c', 'e' };
-static symbol s_6_2[4] = { 'e', 'n', 'c', 'e' };
-static symbol s_6_3[4] = { 'a', 'b', 'l', 'e' };
-static symbol s_6_4[4] = { 'i', 'b', 'l', 'e' };
-static symbol s_6_5[3] = { 'a', 't', 'e' };
-static symbol s_6_6[3] = { 'i', 'v', 'e' };
-static symbol s_6_7[3] = { 'i', 'z', 'e' };
-static symbol s_6_8[3] = { 'i', 't', 'i' };
-static symbol s_6_9[2] = { 'a', 'l' };
-static symbol s_6_10[3] = { 'i', 's', 'm' };
-static symbol s_6_11[3] = { 'i', 'o', 'n' };
-static symbol s_6_12[2] = { 'e', 'r' };
-static symbol s_6_13[3] = { 'o', 'u', 's' };
-static symbol s_6_14[3] = { 'a', 'n', 't' };
-static symbol s_6_15[3] = { 'e', 'n', 't' };
-static symbol s_6_16[4] = { 'm', 'e', 'n', 't' };
-static symbol s_6_17[5] = { 'e', 'm', 'e', 'n', 't' };
+static symbol s_6_0[2] = {'i', 'c'};
+static symbol s_6_1[4] = {'a', 'n', 'c', 'e'};
+static symbol s_6_2[4] = {'e', 'n', 'c', 'e'};
+static symbol s_6_3[4] = {'a', 'b', 'l', 'e'};
+static symbol s_6_4[4] = {'i', 'b', 'l', 'e'};
+static symbol s_6_5[3] = {'a', 't', 'e'};
+static symbol s_6_6[3] = {'i', 'v', 'e'};
+static symbol s_6_7[3] = {'i', 'z', 'e'};
+static symbol s_6_8[3] = {'i', 't', 'i'};
+static symbol s_6_9[2] = {'a', 'l'};
+static symbol s_6_10[3] = {'i', 's', 'm'};
+static symbol s_6_11[3] = {'i', 'o', 'n'};
+static symbol s_6_12[2] = {'e', 'r'};
+static symbol s_6_13[3] = {'o', 'u', 's'};
+static symbol s_6_14[3] = {'a', 'n', 't'};
+static symbol s_6_15[3] = {'e', 'n', 't'};
+static symbol s_6_16[4] = {'m', 'e', 'n', 't'};
+static symbol s_6_17[5] = {'e', 'm', 'e', 'n', 't'};
static struct among a_6[18] =
{
-/* 0 */ { 2, s_6_0, -1, 1, 0},
-/* 1 */ { 4, s_6_1, -1, 1, 0},
-/* 2 */ { 4, s_6_2, -1, 1, 0},
-/* 3 */ { 4, s_6_3, -1, 1, 0},
-/* 4 */ { 4, s_6_4, -1, 1, 0},
-/* 5 */ { 3, s_6_5, -1, 1, 0},
-/* 6 */ { 3, s_6_6, -1, 1, 0},
-/* 7 */ { 3, s_6_7, -1, 1, 0},
-/* 8 */ { 3, s_6_8, -1, 1, 0},
-/* 9 */ { 2, s_6_9, -1, 1, 0},
-/* 10 */ { 3, s_6_10, -1, 1, 0},
-/* 11 */ { 3, s_6_11, -1, 2, 0},
-/* 12 */ { 2, s_6_12, -1, 1, 0},
-/* 13 */ { 3, s_6_13, -1, 1, 0},
-/* 14 */ { 3, s_6_14, -1, 1, 0},
-/* 15 */ { 3, s_6_15, -1, 1, 0},
-/* 16 */ { 4, s_6_16, 15, 1, 0},
-/* 17 */ { 5, s_6_17, 16, 1, 0}
+ /* 0 */ {2, s_6_0, -1, 1, 0},
+ /* 1 */ {4, s_6_1, -1, 1, 0},
+ /* 2 */ {4, s_6_2, -1, 1, 0},
+ /* 3 */ {4, s_6_3, -1, 1, 0},
+ /* 4 */ {4, s_6_4, -1, 1, 0},
+ /* 5 */ {3, s_6_5, -1, 1, 0},
+ /* 6 */ {3, s_6_6, -1, 1, 0},
+ /* 7 */ {3, s_6_7, -1, 1, 0},
+ /* 8 */ {3, s_6_8, -1, 1, 0},
+ /* 9 */ {2, s_6_9, -1, 1, 0},
+ /* 10 */ {3, s_6_10, -1, 1, 0},
+ /* 11 */ {3, s_6_11, -1, 2, 0},
+ /* 12 */ {2, s_6_12, -1, 1, 0},
+ /* 13 */ {3, s_6_13, -1, 1, 0},
+ /* 14 */ {3, s_6_14, -1, 1, 0},
+ /* 15 */ {3, s_6_15, -1, 1, 0},
+ /* 16 */ {4, s_6_16, 15, 1, 0},
+ /* 17 */ {5, s_6_17, 16, 1, 0}
};
-static symbol s_7_0[1] = { 'e' };
-static symbol s_7_1[1] = { 'l' };
+static symbol s_7_0[1] = {'e'};
+static symbol s_7_1[1] = {'l'};
static struct among a_7[2] =
{
-/* 0 */ { 1, s_7_0, -1, 1, 0},
-/* 1 */ { 1, s_7_1, -1, 2, 0}
+ /* 0 */ {1, s_7_0, -1, 1, 0},
+ /* 1 */ {1, s_7_1, -1, 2, 0}
};
-static symbol s_8_0[7] = { 's', 'u', 'c', 'c', 'e', 'e', 'd' };
-static symbol s_8_1[7] = { 'p', 'r', 'o', 'c', 'e', 'e', 'd' };
-static symbol s_8_2[6] = { 'e', 'x', 'c', 'e', 'e', 'd' };
-static symbol s_8_3[7] = { 'c', 'a', 'n', 'n', 'i', 'n', 'g' };
-static symbol s_8_4[6] = { 'i', 'n', 'n', 'i', 'n', 'g' };
-static symbol s_8_5[7] = { 'e', 'a', 'r', 'r', 'i', 'n', 'g' };
-static symbol s_8_6[7] = { 'h', 'e', 'r', 'r', 'i', 'n', 'g' };
-static symbol s_8_7[6] = { 'o', 'u', 't', 'i', 'n', 'g' };
+static symbol s_8_0[7] = {'s', 'u', 'c', 'c', 'e', 'e', 'd'};
+static symbol s_8_1[7] = {'p', 'r', 'o', 'c', 'e', 'e', 'd'};
+static symbol s_8_2[6] = {'e', 'x', 'c', 'e', 'e', 'd'};
+static symbol s_8_3[7] = {'c', 'a', 'n', 'n', 'i', 'n', 'g'};
+static symbol s_8_4[6] = {'i', 'n', 'n', 'i', 'n', 'g'};
+static symbol s_8_5[7] = {'e', 'a', 'r', 'r', 'i', 'n', 'g'};
+static symbol s_8_6[7] = {'h', 'e', 'r', 'r', 'i', 'n', 'g'};
+static symbol s_8_7[6] = {'o', 'u', 't', 'i', 'n', 'g'};
static struct among a_8[8] =
{
-/* 0 */ { 7, s_8_0, -1, -1, 0},
-/* 1 */ { 7, s_8_1, -1, -1, 0},
-/* 2 */ { 6, s_8_2, -1, -1, 0},
-/* 3 */ { 7, s_8_3, -1, -1, 0},
-/* 4 */ { 6, s_8_4, -1, -1, 0},
-/* 5 */ { 7, s_8_5, -1, -1, 0},
-/* 6 */ { 7, s_8_6, -1, -1, 0},
-/* 7 */ { 6, s_8_7, -1, -1, 0}
+ /* 0 */ {7, s_8_0, -1, -1, 0},
+ /* 1 */ {7, s_8_1, -1, -1, 0},
+ /* 2 */ {6, s_8_2, -1, -1, 0},
+ /* 3 */ {7, s_8_3, -1, -1, 0},
+ /* 4 */ {6, s_8_4, -1, -1, 0},
+ /* 5 */ {7, s_8_5, -1, -1, 0},
+ /* 6 */ {7, s_8_6, -1, -1, 0},
+ /* 7 */ {6, s_8_7, -1, -1, 0}
};
-static symbol s_9_0[5] = { 'a', 'n', 'd', 'e', 's' };
-static symbol s_9_1[5] = { 'a', 't', 'l', 'a', 's' };
-static symbol s_9_2[4] = { 'b', 'i', 'a', 's' };
-static symbol s_9_3[6] = { 'c', 'o', 's', 'm', 'o', 's' };
-static symbol s_9_4[5] = { 'd', 'y', 'i', 'n', 'g' };
-static symbol s_9_5[5] = { 'e', 'a', 'r', 'l', 'y' };
-static symbol s_9_6[6] = { 'g', 'e', 'n', 't', 'l', 'y' };
-static symbol s_9_7[4] = { 'h', 'o', 'w', 'e' };
-static symbol s_9_8[4] = { 'i', 'd', 'l', 'y' };
-static symbol s_9_9[5] = { 'l', 'y', 'i', 'n', 'g' };
-static symbol s_9_10[4] = { 'n', 'e', 'w', 's' };
-static symbol s_9_11[4] = { 'o', 'n', 'l', 'y' };
-static symbol s_9_12[6] = { 's', 'i', 'n', 'g', 'l', 'y' };
-static symbol s_9_13[5] = { 's', 'k', 'i', 'e', 's' };
-static symbol s_9_14[4] = { 's', 'k', 'i', 's' };
-static symbol s_9_15[3] = { 's', 'k', 'y' };
-static symbol s_9_16[5] = { 't', 'y', 'i', 'n', 'g' };
-static symbol s_9_17[4] = { 'u', 'g', 'l', 'y' };
+static symbol s_9_0[5] = {'a', 'n', 'd', 'e', 's'};
+static symbol s_9_1[5] = {'a', 't', 'l', 'a', 's'};
+static symbol s_9_2[4] = {'b', 'i', 'a', 's'};
+static symbol s_9_3[6] = {'c', 'o', 's', 'm', 'o', 's'};
+static symbol s_9_4[5] = {'d', 'y', 'i', 'n', 'g'};
+static symbol s_9_5[5] = {'e', 'a', 'r', 'l', 'y'};
+static symbol s_9_6[6] = {'g', 'e', 'n', 't', 'l', 'y'};
+static symbol s_9_7[4] = {'h', 'o', 'w', 'e'};
+static symbol s_9_8[4] = {'i', 'd', 'l', 'y'};
+static symbol s_9_9[5] = {'l', 'y', 'i', 'n', 'g'};
+static symbol s_9_10[4] = {'n', 'e', 'w', 's'};
+static symbol s_9_11[4] = {'o', 'n', 'l', 'y'};
+static symbol s_9_12[6] = {'s', 'i', 'n', 'g', 'l', 'y'};
+static symbol s_9_13[5] = {'s', 'k', 'i', 'e', 's'};
+static symbol s_9_14[4] = {'s', 'k', 'i', 's'};
+static symbol s_9_15[3] = {'s', 'k', 'y'};
+static symbol s_9_16[5] = {'t', 'y', 'i', 'n', 'g'};
+static symbol s_9_17[4] = {'u', 'g', 'l', 'y'};
static struct among a_9[18] =
{
-/* 0 */ { 5, s_9_0, -1, -1, 0},
-/* 1 */ { 5, s_9_1, -1, -1, 0},
-/* 2 */ { 4, s_9_2, -1, -1, 0},
-/* 3 */ { 6, s_9_3, -1, -1, 0},
-/* 4 */ { 5, s_9_4, -1, 3, 0},
-/* 5 */ { 5, s_9_5, -1, 9, 0},
-/* 6 */ { 6, s_9_6, -1, 7, 0},
-/* 7 */ { 4, s_9_7, -1, -1, 0},
-/* 8 */ { 4, s_9_8, -1, 6, 0},
-/* 9 */ { 5, s_9_9, -1, 4, 0},
-/* 10 */ { 4, s_9_10, -1, -1, 0},
-/* 11 */ { 4, s_9_11, -1, 10, 0},
-/* 12 */ { 6, s_9_12, -1, 11, 0},
-/* 13 */ { 5, s_9_13, -1, 2, 0},
-/* 14 */ { 4, s_9_14, -1, 1, 0},
-/* 15 */ { 3, s_9_15, -1, -1, 0},
-/* 16 */ { 5, s_9_16, -1, 5, 0},
-/* 17 */ { 4, s_9_17, -1, 8, 0}
+ /* 0 */ {5, s_9_0, -1, -1, 0},
+ /* 1 */ {5, s_9_1, -1, -1, 0},
+ /* 2 */ {4, s_9_2, -1, -1, 0},
+ /* 3 */ {6, s_9_3, -1, -1, 0},
+ /* 4 */ {5, s_9_4, -1, 3, 0},
+ /* 5 */ {5, s_9_5, -1, 9, 0},
+ /* 6 */ {6, s_9_6, -1, 7, 0},
+ /* 7 */ {4, s_9_7, -1, -1, 0},
+ /* 8 */ {4, s_9_8, -1, 6, 0},
+ /* 9 */ {5, s_9_9, -1, 4, 0},
+ /* 10 */ {4, s_9_10, -1, -1, 0},
+ /* 11 */ {4, s_9_11, -1, 10, 0},
+ /* 12 */ {6, s_9_12, -1, 11, 0},
+ /* 13 */ {5, s_9_13, -1, 2, 0},
+ /* 14 */ {4, s_9_14, -1, 1, 0},
+ /* 15 */ {3, s_9_15, -1, -1, 0},
+ /* 16 */ {5, s_9_16, -1, 5, 0},
+ /* 17 */ {4, s_9_17, -1, 8, 0}
};
-static unsigned char g_v[] = { 17, 65, 16, 1 };
-
-static unsigned char g_v_WXY[] = { 1, 17, 65, 208, 1 };
-
-static unsigned char g_valid_LI[] = { 55, 141, 2 };
-
-static symbol s_0[] = { 'y' };
-static symbol s_1[] = { 'Y' };
-static symbol s_2[] = { 'y' };
-static symbol s_3[] = { 'Y' };
-static symbol s_4[] = { 's', 's' };
-static symbol s_5[] = { 'i', 'e' };
-static symbol s_6[] = { 'i' };
-static symbol s_7[] = { 'e', 'e' };
-static symbol s_8[] = { 'e' };
-static symbol s_9[] = { 'e' };
-static symbol s_10[] = { 'y' };
-static symbol s_11[] = { 'Y' };
-static symbol s_12[] = { 'i' };
-static symbol s_13[] = { 't', 'i', 'o', 'n' };
-static symbol s_14[] = { 'e', 'n', 'c', 'e' };
-static symbol s_15[] = { 'a', 'n', 'c', 'e' };
-static symbol s_16[] = { 'a', 'b', 'l', 'e' };
-static symbol s_17[] = { 'e', 'n', 't' };
-static symbol s_18[] = { 'i', 'z', 'e' };
-static symbol s_19[] = { 'a', 't', 'e' };
-static symbol s_20[] = { 'a', 'l' };
-static symbol s_21[] = { 'f', 'u', 'l' };
-static symbol s_22[] = { 'o', 'u', 's' };
-static symbol s_23[] = { 'i', 'v', 'e' };
-static symbol s_24[] = { 'b', 'l', 'e' };
-static symbol s_25[] = { 'l' };
-static symbol s_26[] = { 'o', 'g' };
-static symbol s_27[] = { 'f', 'u', 'l' };
-static symbol s_28[] = { 'l', 'e', 's', 's' };
-static symbol s_29[] = { 't', 'i', 'o', 'n' };
-static symbol s_30[] = { 'a', 't', 'e' };
-static symbol s_31[] = { 'a', 'l' };
-static symbol s_32[] = { 'i', 'c' };
-static symbol s_33[] = { 's' };
-static symbol s_34[] = { 't' };
-static symbol s_35[] = { 'l' };
-static symbol s_36[] = { 's', 'k', 'i' };
-static symbol s_37[] = { 's', 'k', 'y' };
-static symbol s_38[] = { 'd', 'i', 'e' };
-static symbol s_39[] = { 'l', 'i', 'e' };
-static symbol s_40[] = { 't', 'i', 'e' };
-static symbol s_41[] = { 'i', 'd', 'l' };
-static symbol s_42[] = { 'g', 'e', 'n', 't', 'l' };
-static symbol s_43[] = { 'u', 'g', 'l', 'i' };
-static symbol s_44[] = { 'e', 'a', 'r', 'l', 'i' };
-static symbol s_45[] = { 'o', 'n', 'l', 'i' };
-static symbol s_46[] = { 's', 'i', 'n', 'g', 'l' };
-static symbol s_47[] = { 'Y' };
-static symbol s_48[] = { 'y' };
-
-static int r_prelude(struct SN_env * z) {
- z->B[0] = 0; /* unset Y_found, line 24 */
- { int c = z->c; /* do, line 25 */
- z->bra = z->c; /* [, line 25 */
- if (!(eq_s(z, 1, s_0))) goto lab0;
- z->ket = z->c; /* ], line 25 */
- if (!(in_grouping(z, g_v, 97, 121))) goto lab0;
- slice_from_s(z, 1, s_1); /* <-, line 25 */
- z->B[0] = 1; /* set Y_found, line 25 */
- lab0:
- z->c = c;
- }
- { int c = z->c; /* do, line 26 */
- while(1) { /* repeat, line 26 */
- int c = z->c;
- while(1) { /* goto, line 26 */
- int c = z->c;
- if (!(in_grouping(z, g_v, 97, 121))) goto lab3;
- z->bra = z->c; /* [, line 26 */
- if (!(eq_s(z, 1, s_2))) goto lab3;
- z->ket = z->c; /* ], line 26 */
- z->c = c;
- break;
- lab3:
- z->c = c;
- if (z->c >= z->l) goto lab2;
- z->c++;
- }
- slice_from_s(z, 1, s_3); /* <-, line 26 */
- z->B[0] = 1; /* set Y_found, line 26 */
- continue;
- lab2:
- z->c = c;
- break;
- }
- z->c = c;
- }
- return 1;
+static unsigned char g_v[] = {17, 65, 16, 1};
+
+static unsigned char g_v_WXY[] = {1, 17, 65, 208, 1};
+
+static unsigned char g_valid_LI[] = {55, 141, 2};
+
+static symbol s_0[] = {'y'};
+static symbol s_1[] = {'Y'};
+static symbol s_2[] = {'y'};
+static symbol s_3[] = {'Y'};
+static symbol s_4[] = {'s', 's'};
+static symbol s_5[] = {'i', 'e'};
+static symbol s_6[] = {'i'};
+static symbol s_7[] = {'e', 'e'};
+static symbol s_8[] = {'e'};
+static symbol s_9[] = {'e'};
+static symbol s_10[] = {'y'};
+static symbol s_11[] = {'Y'};
+static symbol s_12[] = {'i'};
+static symbol s_13[] = {'t', 'i', 'o', 'n'};
+static symbol s_14[] = {'e', 'n', 'c', 'e'};
+static symbol s_15[] = {'a', 'n', 'c', 'e'};
+static symbol s_16[] = {'a', 'b', 'l', 'e'};
+static symbol s_17[] = {'e', 'n', 't'};
+static symbol s_18[] = {'i', 'z', 'e'};
+static symbol s_19[] = {'a', 't', 'e'};
+static symbol s_20[] = {'a', 'l'};
+static symbol s_21[] = {'f', 'u', 'l'};
+static symbol s_22[] = {'o', 'u', 's'};
+static symbol s_23[] = {'i', 'v', 'e'};
+static symbol s_24[] = {'b', 'l', 'e'};
+static symbol s_25[] = {'l'};
+static symbol s_26[] = {'o', 'g'};
+static symbol s_27[] = {'f', 'u', 'l'};
+static symbol s_28[] = {'l', 'e', 's', 's'};
+static symbol s_29[] = {'t', 'i', 'o', 'n'};
+static symbol s_30[] = {'a', 't', 'e'};
+static symbol s_31[] = {'a', 'l'};
+static symbol s_32[] = {'i', 'c'};
+static symbol s_33[] = {'s'};
+static symbol s_34[] = {'t'};
+static symbol s_35[] = {'l'};
+static symbol s_36[] = {'s', 'k', 'i'};
+static symbol s_37[] = {'s', 'k', 'y'};
+static symbol s_38[] = {'d', 'i', 'e'};
+static symbol s_39[] = {'l', 'i', 'e'};
+static symbol s_40[] = {'t', 'i', 'e'};
+static symbol s_41[] = {'i', 'd', 'l'};
+static symbol s_42[] = {'g', 'e', 'n', 't', 'l'};
+static symbol s_43[] = {'u', 'g', 'l', 'i'};
+static symbol s_44[] = {'e', 'a', 'r', 'l', 'i'};
+static symbol s_45[] = {'o', 'n', 'l', 'i'};
+static symbol s_46[] = {'s', 'i', 'n', 'g', 'l'};
+static symbol s_47[] = {'Y'};
+static symbol s_48[] = {'y'};
+
+static int
+r_prelude(struct SN_env * z)
+{
+ z->B[0] = 0; /* unset Y_found, line 24 */
+ {
+ int c = z->c; /* do, line 25 */
+
+ z->bra = z->c; /* [, line 25 */
+ if (!(eq_s(z, 1, s_0)))
+ goto lab0;
+ z->ket = z->c; /* ], line 25 */
+ if (!(in_grouping(z, g_v, 97, 121)))
+ goto lab0;
+ slice_from_s(z, 1, s_1); /* <-, line 25 */
+ z->B[0] = 1; /* set Y_found, line 25 */
+lab0:
+ z->c = c;
+ }
+ {
+ int c = z->c; /* do, line 26 */
+
+ while (1)
+ { /* repeat, line 26 */
+ int c = z->c;
+
+ while (1)
+ { /* goto, line 26 */
+ int c = z->c;
+
+ if (!(in_grouping(z, g_v, 97, 121)))
+ goto lab3;
+ z->bra = z->c; /* [, line 26 */
+ if (!(eq_s(z, 1, s_2)))
+ goto lab3;
+ z->ket = z->c; /* ], line 26 */
+ z->c = c;
+ break;
+ lab3:
+ z->c = c;
+ if (z->c >= z->l)
+ goto lab2;
+ z->c++;
+ }
+ slice_from_s(z, 1, s_3); /* <-, line 26 */
+ z->B[0] = 1; /* set Y_found, line 26 */
+ continue;
+ lab2:
+ z->c = c;
+ break;
+ }
+ z->c = c;
+ }
+ return 1;
}
-static int r_mark_regions(struct SN_env * z) {
- z->I[0] = z->l;
- z->I[1] = z->l;
- { int c = z->c; /* do, line 32 */
- { int c = z->c; /* or, line 36 */
- if (!(find_among(z, a_0, 1))) goto lab2; /* among, line 33 */
- goto lab1;
- lab2:
- z->c = c;
- while(1) { /* gopast, line 36 */
- if (!(in_grouping(z, g_v, 97, 121))) goto lab3;
- break;
- lab3:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- while(1) { /* gopast, line 36 */
- if (!(out_grouping(z, g_v, 97, 121))) goto lab4;
- break;
- lab4:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- }
- lab1:
- z->I[0] = z->c; /* setmark p1, line 37 */
- while(1) { /* gopast, line 38 */
- if (!(in_grouping(z, g_v, 97, 121))) goto lab5;
- break;
- lab5:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- while(1) { /* gopast, line 38 */
- if (!(out_grouping(z, g_v, 97, 121))) goto lab6;
- break;
- lab6:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- z->I[1] = z->c; /* setmark p2, line 38 */
- lab0:
- z->c = c;
- }
- return 1;
+static int
+r_mark_regions(struct SN_env * z)
+{
+ z->I[0] = z->l;
+ z->I[1] = z->l;
+ {
+ int c = z->c; /* do, line 32 */
+
+ {
+ int c = z->c; /* or, line 36 */
+
+ if (!(find_among(z, a_0, 1)))
+ goto lab2; /* among, line 33 */
+ goto lab1;
+ lab2:
+ z->c = c;
+ while (1)
+ { /* gopast, line 36 */
+ if (!(in_grouping(z, g_v, 97, 121)))
+ goto lab3;
+ break;
+ lab3:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ while (1)
+ { /* gopast, line 36 */
+ if (!(out_grouping(z, g_v, 97, 121)))
+ goto lab4;
+ break;
+ lab4:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ }
+lab1:
+ z->I[0] = z->c; /* setmark p1, line 37 */
+ while (1)
+ { /* gopast, line 38 */
+ if (!(in_grouping(z, g_v, 97, 121)))
+ goto lab5;
+ break;
+ lab5:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ while (1)
+ { /* gopast, line 38 */
+ if (!(out_grouping(z, g_v, 97, 121)))
+ goto lab6;
+ break;
+ lab6:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ z->I[1] = z->c; /* setmark p2, line 38 */
+lab0:
+ z->c = c;
+ }
+ return 1;
}
-static int r_shortv(struct SN_env * z) {
- { int m = z->l - z->c; /* or, line 46 */
- if (!(out_grouping_b(z, g_v_WXY, 89, 121))) goto lab1;
- if (!(in_grouping_b(z, g_v, 97, 121))) goto lab1;
- if (!(out_grouping_b(z, g_v, 97, 121))) goto lab1;
- goto lab0;
- lab1:
- z->c = z->l - m;
- if (!(out_grouping_b(z, g_v, 97, 121))) return 0;
- if (!(in_grouping_b(z, g_v, 97, 121))) return 0;
- if (z->c > z->lb) return 0; /* atlimit, line 47 */
- }
+static int
+r_shortv(struct SN_env * z)
+{
+ {
+ int m = z->l - z->c; /* or, line 46 */
+
+ if (!(out_grouping_b(z, g_v_WXY, 89, 121)))
+ goto lab1;
+ if (!(in_grouping_b(z, g_v, 97, 121)))
+ goto lab1;
+ if (!(out_grouping_b(z, g_v, 97, 121)))
+ goto lab1;
+ goto lab0;
+lab1:
+ z->c = z->l - m;
+ if (!(out_grouping_b(z, g_v, 97, 121)))
+ return 0;
+ if (!(in_grouping_b(z, g_v, 97, 121)))
+ return 0;
+ if (z->c > z->lb)
+ return 0; /* atlimit, line 47 */
+ }
lab0:
- return 1;
+ return 1;
}
-static int r_R1(struct SN_env * z) {
- if (!(z->I[0] <= z->c)) return 0;
- return 1;
+static int
+r_R1(struct SN_env * z)
+{
+ if (!(z->I[0] <= z->c))
+ return 0;
+ return 1;
}
-static int r_R2(struct SN_env * z) {
- if (!(z->I[1] <= z->c)) return 0;
- return 1;
+static int
+r_R2(struct SN_env * z)
+{
+ if (!(z->I[1] <= z->c))
+ return 0;
+ return 1;
}
-static int r_Step_1a(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 54 */
- among_var = find_among_b(z, a_1, 6); /* substring, line 54 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 54 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_from_s(z, 2, s_4); /* <-, line 55 */
- break;
- case 2:
- { int m = z->l - z->c; /* or, line 57 */
- if (z->c <= z->lb) goto lab1;
- z->c--; /* next, line 57 */
- if (z->c > z->lb) goto lab1; /* atlimit, line 57 */
- slice_from_s(z, 2, s_5); /* <-, line 57 */
- goto lab0;
- lab1:
- z->c = z->l - m;
- slice_from_s(z, 1, s_6); /* <-, line 57 */
- }
- lab0:
- break;
- case 3:
- if (z->c <= z->lb) return 0;
- z->c--; /* next, line 58 */
- while(1) { /* gopast, line 58 */
- if (!(in_grouping_b(z, g_v, 97, 121))) goto lab2;
- break;
- lab2:
- if (z->c <= z->lb) return 0;
- z->c--;
- }
- slice_del(z); /* delete, line 58 */
- break;
- }
- return 1;
+static int
+r_Step_1a(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 54 */
+ among_var = find_among_b(z, a_1, 6); /* substring, line 54 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 54 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_from_s(z, 2, s_4); /* <-, line 55 */
+ break;
+ case 2:
+ {
+ int m = z->l - z->c; /* or, line 57 */
+
+ if (z->c <= z->lb)
+ goto lab1;
+ z->c--; /* next, line 57 */
+ if (z->c > z->lb)
+ goto lab1; /* atlimit, line 57 */
+ slice_from_s(z, 2, s_5); /* <-, line 57 */
+ goto lab0;
+ lab1:
+ z->c = z->l - m;
+ slice_from_s(z, 1, s_6); /* <-, line 57 */
+ }
+ lab0:
+ break;
+ case 3:
+ if (z->c <= z->lb)
+ return 0;
+ z->c--; /* next, line 58 */
+ while (1)
+ { /* gopast, line 58 */
+ if (!(in_grouping_b(z, g_v, 97, 121)))
+ goto lab2;
+ break;
+ lab2:
+ if (z->c <= z->lb)
+ return 0;
+ z->c--;
+ }
+ slice_del(z); /* delete, line 58 */
+ break;
+ }
+ return 1;
}
-static int r_Step_1b(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 64 */
- among_var = find_among_b(z, a_3, 6); /* substring, line 64 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 64 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- if (!r_R1(z)) return 0; /* call R1, line 66 */
- slice_from_s(z, 2, s_7); /* <-, line 66 */
- break;
- case 2:
- { int m_test = z->l - z->c; /* test, line 69 */
- while(1) { /* gopast, line 69 */
- if (!(in_grouping_b(z, g_v, 97, 121))) goto lab0;
- break;
- lab0:
- if (z->c <= z->lb) return 0;
- z->c--;
- }
- z->c = z->l - m_test;
- }
- slice_del(z); /* delete, line 69 */
- { int m_test = z->l - z->c; /* test, line 70 */
- among_var = find_among_b(z, a_2, 13); /* substring, line 70 */
- if (!(among_var)) return 0;
- z->c = z->l - m_test;
- }
- switch(among_var) {
- case 0: return 0;
- case 1:
- { int c = z->c;
- insert_s(z, z->c, z->c, 1, s_8); /* <+, line 72 */
- z->c = c;
- }
- break;
- case 2:
- z->ket = z->c; /* [, line 75 */
- if (z->c <= z->lb) return 0;
- z->c--; /* next, line 75 */
- z->bra = z->c; /* ], line 75 */
- slice_del(z); /* delete, line 75 */
- break;
- case 3:
- if (z->c != z->I[0]) return 0; /* atmark, line 76 */
- { int m_test = z->l - z->c; /* test, line 76 */
- if (!r_shortv(z)) return 0; /* call shortv, line 76 */
- z->c = z->l - m_test;
- }
- { int c = z->c;
- insert_s(z, z->c, z->c, 1, s_9); /* <+, line 76 */
- z->c = c;
- }
- break;
- }
- break;
- }
- return 1;
+static int
+r_Step_1b(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 64 */
+ among_var = find_among_b(z, a_3, 6); /* substring, line 64 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 64 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ if (!r_R1(z))
+ return 0; /* call R1, line 66 */
+ slice_from_s(z, 2, s_7); /* <-, line 66 */
+ break;
+ case 2:
+ {
+ int m_test = z->l - z->c; /* test, line 69 */
+
+ while (1)
+ { /* gopast, line 69 */
+ if (!(in_grouping_b(z, g_v, 97, 121)))
+ goto lab0;
+ break;
+ lab0:
+ if (z->c <= z->lb)
+ return 0;
+ z->c--;
+ }
+ z->c = z->l - m_test;
+ }
+ slice_del(z); /* delete, line 69 */
+ {
+ int m_test = z->l - z->c; /* test, line 70 */
+
+ among_var = find_among_b(z, a_2, 13); /* substring, line 70 */
+ if (!(among_var))
+ return 0;
+ z->c = z->l - m_test;
+ }
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ {
+ int c = z->c;
+
+ insert_s(z, z->c, z->c, 1, s_8); /* <+, line 72 */
+ z->c = c;
+ }
+ break;
+ case 2:
+ z->ket = z->c; /* [, line 75 */
+ if (z->c <= z->lb)
+ return 0;
+ z->c--; /* next, line 75 */
+ z->bra = z->c; /* ], line 75 */
+ slice_del(z); /* delete, line 75 */
+ break;
+ case 3:
+ if (z->c != z->I[0])
+ return 0; /* atmark, line 76 */
+ {
+ int m_test = z->l - z->c; /* test, line 76 */
+
+ if (!r_shortv(z))
+ return 0; /* call shortv, line 76 */
+ z->c = z->l - m_test;
+ }
+ {
+ int c = z->c;
+
+ insert_s(z, z->c, z->c, 1, s_9); /* <+, line 76 */
+ z->c = c;
+ }
+ break;
+ }
+ break;
+ }
+ return 1;
}
-static int r_Step_1c(struct SN_env * z) {
- z->ket = z->c; /* [, line 83 */
- { int m = z->l - z->c; /* or, line 83 */
- if (!(eq_s_b(z, 1, s_10))) goto lab1;
- goto lab0;
- lab1:
- z->c = z->l - m;
- if (!(eq_s_b(z, 1, s_11))) return 0;
- }
+static int
+r_Step_1c(struct SN_env * z)
+{
+ z->ket = z->c; /* [, line 83 */
+ {
+ int m = z->l - z->c; /* or, line 83 */
+
+ if (!(eq_s_b(z, 1, s_10)))
+ goto lab1;
+ goto lab0;
+lab1:
+ z->c = z->l - m;
+ if (!(eq_s_b(z, 1, s_11)))
+ return 0;
+ }
lab0:
- z->bra = z->c; /* ], line 83 */
- if (!(out_grouping_b(z, g_v, 97, 121))) return 0;
- { int m = z->l - z->c; /* not, line 84 */
- if (z->c > z->lb) goto lab2; /* atlimit, line 84 */
- return 0;
- lab2:
- z->c = z->l - m;
- }
- slice_from_s(z, 1, s_12); /* <-, line 85 */
- return 1;
-}
+ z->bra = z->c; /* ], line 83 */
+ if (!(out_grouping_b(z, g_v, 97, 121)))
+ return 0;
+ {
+ int m = z->l - z->c; /* not, line 84 */
-static int r_Step_2(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 89 */
- among_var = find_among_b(z, a_4, 24); /* substring, line 89 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 89 */
- if (!r_R1(z)) return 0; /* call R1, line 89 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_from_s(z, 4, s_13); /* <-, line 90 */
- break;
- case 2:
- slice_from_s(z, 4, s_14); /* <-, line 91 */
- break;
- case 3:
- slice_from_s(z, 4, s_15); /* <-, line 92 */
- break;
- case 4:
- slice_from_s(z, 4, s_16); /* <-, line 93 */
- break;
- case 5:
- slice_from_s(z, 3, s_17); /* <-, line 94 */
- break;
- case 6:
- slice_from_s(z, 3, s_18); /* <-, line 96 */
- break;
- case 7:
- slice_from_s(z, 3, s_19); /* <-, line 98 */
- break;
- case 8:
- slice_from_s(z, 2, s_20); /* <-, line 100 */
- break;
- case 9:
- slice_from_s(z, 3, s_21); /* <-, line 101 */
- break;
- case 10:
- slice_from_s(z, 3, s_22); /* <-, line 103 */
- break;
- case 11:
- slice_from_s(z, 3, s_23); /* <-, line 105 */
- break;
- case 12:
- slice_from_s(z, 3, s_24); /* <-, line 107 */
- break;
- case 13:
- if (!(eq_s_b(z, 1, s_25))) return 0;
- slice_from_s(z, 2, s_26); /* <-, line 108 */
- break;
- case 14:
- slice_from_s(z, 3, s_27); /* <-, line 109 */
- break;
- case 15:
- slice_from_s(z, 4, s_28); /* <-, line 110 */
- break;
- case 16:
- if (!(in_grouping_b(z, g_valid_LI, 99, 116))) return 0;
- slice_del(z); /* delete, line 111 */
- break;
- }
- return 1;
+ if (z->c > z->lb)
+ goto lab2; /* atlimit, line 84 */
+ return 0;
+lab2:
+ z->c = z->l - m;
+ }
+ slice_from_s(z, 1, s_12); /* <-, line 85 */
+ return 1;
}
-static int r_Step_3(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 116 */
- among_var = find_among_b(z, a_5, 9); /* substring, line 116 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 116 */
- if (!r_R1(z)) return 0; /* call R1, line 116 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_from_s(z, 4, s_29); /* <-, line 117 */
- break;
- case 2:
- slice_from_s(z, 3, s_30); /* <-, line 118 */
- break;
- case 3:
- slice_from_s(z, 2, s_31); /* <-, line 119 */
- break;
- case 4:
- slice_from_s(z, 2, s_32); /* <-, line 121 */
- break;
- case 5:
- slice_del(z); /* delete, line 123 */
- break;
- case 6:
- if (!r_R2(z)) return 0; /* call R2, line 125 */
- slice_del(z); /* delete, line 125 */
- break;
- }
- return 1;
+static int
+r_Step_2(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 89 */
+ among_var = find_among_b(z, a_4, 24); /* substring, line 89 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 89 */
+ if (!r_R1(z))
+ return 0; /* call R1, line 89 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_from_s(z, 4, s_13); /* <-, line 90 */
+ break;
+ case 2:
+ slice_from_s(z, 4, s_14); /* <-, line 91 */
+ break;
+ case 3:
+ slice_from_s(z, 4, s_15); /* <-, line 92 */
+ break;
+ case 4:
+ slice_from_s(z, 4, s_16); /* <-, line 93 */
+ break;
+ case 5:
+ slice_from_s(z, 3, s_17); /* <-, line 94 */
+ break;
+ case 6:
+ slice_from_s(z, 3, s_18); /* <-, line 96 */
+ break;
+ case 7:
+ slice_from_s(z, 3, s_19); /* <-, line 98 */
+ break;
+ case 8:
+ slice_from_s(z, 2, s_20); /* <-, line 100 */
+ break;
+ case 9:
+ slice_from_s(z, 3, s_21); /* <-, line 101 */
+ break;
+ case 10:
+ slice_from_s(z, 3, s_22); /* <-, line 103 */
+ break;
+ case 11:
+ slice_from_s(z, 3, s_23); /* <-, line 105 */
+ break;
+ case 12:
+ slice_from_s(z, 3, s_24); /* <-, line 107 */
+ break;
+ case 13:
+ if (!(eq_s_b(z, 1, s_25)))
+ return 0;
+ slice_from_s(z, 2, s_26); /* <-, line 108 */
+ break;
+ case 14:
+ slice_from_s(z, 3, s_27); /* <-, line 109 */
+ break;
+ case 15:
+ slice_from_s(z, 4, s_28); /* <-, line 110 */
+ break;
+ case 16:
+ if (!(in_grouping_b(z, g_valid_LI, 99, 116)))
+ return 0;
+ slice_del(z); /* delete, line 111 */
+ break;
+ }
+ return 1;
}
-static int r_Step_4(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 130 */
- among_var = find_among_b(z, a_6, 18); /* substring, line 130 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 130 */
- if (!r_R2(z)) return 0; /* call R2, line 130 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_del(z); /* delete, line 133 */
- break;
- case 2:
- { int m = z->l - z->c; /* or, line 134 */
- if (!(eq_s_b(z, 1, s_33))) goto lab1;
- goto lab0;
- lab1:
- z->c = z->l - m;
- if (!(eq_s_b(z, 1, s_34))) return 0;
- }
- lab0:
- slice_del(z); /* delete, line 134 */
- break;
- }
- return 1;
+static int
+r_Step_3(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 116 */
+ among_var = find_among_b(z, a_5, 9); /* substring, line 116 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 116 */
+ if (!r_R1(z))
+ return 0; /* call R1, line 116 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_from_s(z, 4, s_29); /* <-, line 117 */
+ break;
+ case 2:
+ slice_from_s(z, 3, s_30); /* <-, line 118 */
+ break;
+ case 3:
+ slice_from_s(z, 2, s_31); /* <-, line 119 */
+ break;
+ case 4:
+ slice_from_s(z, 2, s_32); /* <-, line 121 */
+ break;
+ case 5:
+ slice_del(z); /* delete, line 123 */
+ break;
+ case 6:
+ if (!r_R2(z))
+ return 0; /* call R2, line 125 */
+ slice_del(z); /* delete, line 125 */
+ break;
+ }
+ return 1;
}
-static int r_Step_5(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 139 */
- among_var = find_among_b(z, a_7, 2); /* substring, line 139 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 139 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- { int m = z->l - z->c; /* or, line 140 */
- if (!r_R2(z)) goto lab1; /* call R2, line 140 */
- goto lab0;
- lab1:
- z->c = z->l - m;
- if (!r_R1(z)) return 0; /* call R1, line 140 */
- { int m = z->l - z->c; /* not, line 140 */
- if (!r_shortv(z)) goto lab2; /* call shortv, line 140 */
- return 0;
- lab2:
- z->c = z->l - m;
- }
- }
- lab0:
- slice_del(z); /* delete, line 140 */
- break;
- case 2:
- if (!r_R2(z)) return 0; /* call R2, line 141 */
- if (!(eq_s_b(z, 1, s_35))) return 0;
- slice_del(z); /* delete, line 141 */
- break;
- }
- return 1;
+static int
+r_Step_4(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 130 */
+ among_var = find_among_b(z, a_6, 18); /* substring, line 130 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 130 */
+ if (!r_R2(z))
+ return 0; /* call R2, line 130 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_del(z); /* delete, line 133 */
+ break;
+ case 2:
+ {
+ int m = z->l - z->c; /* or, line 134 */
+
+ if (!(eq_s_b(z, 1, s_33)))
+ goto lab1;
+ goto lab0;
+ lab1:
+ z->c = z->l - m;
+ if (!(eq_s_b(z, 1, s_34)))
+ return 0;
+ }
+ lab0:
+ slice_del(z); /* delete, line 134 */
+ break;
+ }
+ return 1;
}
-static int r_exception2(struct SN_env * z) {
- z->ket = z->c; /* [, line 147 */
- if (!(find_among_b(z, a_8, 8))) return 0; /* substring, line 147 */
- z->bra = z->c; /* ], line 147 */
- if (z->c > z->lb) return 0; /* atlimit, line 147 */
- return 1;
+static int
+r_Step_5(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 139 */
+ among_var = find_among_b(z, a_7, 2); /* substring, line 139 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 139 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ {
+ int m = z->l - z->c; /* or, line 140 */
+
+ if (!r_R2(z))
+ goto lab1; /* call R2, line 140 */
+ goto lab0;
+ lab1:
+ z->c = z->l - m;
+ if (!r_R1(z))
+ return 0; /* call R1, line 140 */
+ {
+ int m = z->l - z->c; /* not, line 140 */
+
+ if (!r_shortv(z))
+ goto lab2; /* call shortv, line 140 */
+ return 0;
+ lab2:
+ z->c = z->l - m;
+ }
+ }
+ lab0:
+ slice_del(z); /* delete, line 140 */
+ break;
+ case 2:
+ if (!r_R2(z))
+ return 0; /* call R2, line 141 */
+ if (!(eq_s_b(z, 1, s_35)))
+ return 0;
+ slice_del(z); /* delete, line 141 */
+ break;
+ }
+ return 1;
}
-static int r_exception1(struct SN_env * z) {
- int among_var;
- z->bra = z->c; /* [, line 159 */
- among_var = find_among(z, a_9, 18); /* substring, line 159 */
- if (!(among_var)) return 0;
- z->ket = z->c; /* ], line 159 */
- if (z->c < z->l) return 0; /* atlimit, line 159 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_from_s(z, 3, s_36); /* <-, line 163 */
- break;
- case 2:
- slice_from_s(z, 3, s_37); /* <-, line 164 */
- break;
- case 3:
- slice_from_s(z, 3, s_38); /* <-, line 165 */
- break;
- case 4:
- slice_from_s(z, 3, s_39); /* <-, line 166 */
- break;
- case 5:
- slice_from_s(z, 3, s_40); /* <-, line 167 */
- break;
- case 6:
- slice_from_s(z, 3, s_41); /* <-, line 171 */
- break;
- case 7:
- slice_from_s(z, 5, s_42); /* <-, line 172 */
- break;
- case 8:
- slice_from_s(z, 4, s_43); /* <-, line 173 */
- break;
- case 9:
- slice_from_s(z, 5, s_44); /* <-, line 174 */
- break;
- case 10:
- slice_from_s(z, 4, s_45); /* <-, line 175 */
- break;
- case 11:
- slice_from_s(z, 5, s_46); /* <-, line 176 */
- break;
- }
- return 1;
+static int
+r_exception2(struct SN_env * z)
+{
+ z->ket = z->c; /* [, line 147 */
+ if (!(find_among_b(z, a_8, 8)))
+ return 0; /* substring, line 147 */
+ z->bra = z->c; /* ], line 147 */
+ if (z->c > z->lb)
+ return 0; /* atlimit, line 147 */
+ return 1;
}
-static int r_postlude(struct SN_env * z) {
- if (!(z->B[0])) return 0; /* Boolean test Y_found, line 192 */
- while(1) { /* repeat, line 192 */
- int c = z->c;
- while(1) { /* goto, line 192 */
- int c = z->c;
- z->bra = z->c; /* [, line 192 */
- if (!(eq_s(z, 1, s_47))) goto lab1;
- z->ket = z->c; /* ], line 192 */
- z->c = c;
- break;
- lab1:
- z->c = c;
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- slice_from_s(z, 1, s_48); /* <-, line 192 */
- continue;
- lab0:
- z->c = c;
- break;
- }
- return 1;
+static int
+r_exception1(struct SN_env * z)
+{
+ int among_var;
+
+ z->bra = z->c; /* [, line 159 */
+ among_var = find_among(z, a_9, 18); /* substring, line 159 */
+ if (!(among_var))
+ return 0;
+ z->ket = z->c; /* ], line 159 */
+ if (z->c < z->l)
+ return 0; /* atlimit, line 159 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_from_s(z, 3, s_36); /* <-, line 163 */
+ break;
+ case 2:
+ slice_from_s(z, 3, s_37); /* <-, line 164 */
+ break;
+ case 3:
+ slice_from_s(z, 3, s_38); /* <-, line 165 */
+ break;
+ case 4:
+ slice_from_s(z, 3, s_39); /* <-, line 166 */
+ break;
+ case 5:
+ slice_from_s(z, 3, s_40); /* <-, line 167 */
+ break;
+ case 6:
+ slice_from_s(z, 3, s_41); /* <-, line 171 */
+ break;
+ case 7:
+ slice_from_s(z, 5, s_42); /* <-, line 172 */
+ break;
+ case 8:
+ slice_from_s(z, 4, s_43); /* <-, line 173 */
+ break;
+ case 9:
+ slice_from_s(z, 5, s_44); /* <-, line 174 */
+ break;
+ case 10:
+ slice_from_s(z, 4, s_45); /* <-, line 175 */
+ break;
+ case 11:
+ slice_from_s(z, 5, s_46); /* <-, line 176 */
+ break;
+ }
+ return 1;
}
-extern int english_stem(struct SN_env * z) {
- { int c = z->c; /* or, line 196 */
- if (!r_exception1(z)) goto lab1; /* call exception1, line 196 */
- goto lab0;
- lab1:
- z->c = c;
- { int c_test = z->c; /* test, line 198 */
- { int c = z->c + 3;
- if (0 > c || c > z->l) return 0;
- z->c = c; /* hop, line 198 */
- }
- z->c = c_test;
- }
- { int c = z->c; /* do, line 199 */
- if (!r_prelude(z)) goto lab2; /* call prelude, line 199 */
- lab2:
- z->c = c;
- }
- { int c = z->c; /* do, line 200 */
- if (!r_mark_regions(z)) goto lab3; /* call mark_regions, line 200 */
- lab3:
- z->c = c;
- }
- z->lb = z->c; z->c = z->l; /* backwards, line 201 */
-
- { int m = z->l - z->c; /* do, line 203 */
- if (!r_Step_1a(z)) goto lab4; /* call Step_1a, line 203 */
- lab4:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* or, line 205 */
- if (!r_exception2(z)) goto lab6; /* call exception2, line 205 */
- goto lab5;
- lab6:
- z->c = z->l - m;
- { int m = z->l - z->c; /* do, line 207 */
- if (!r_Step_1b(z)) goto lab7; /* call Step_1b, line 207 */
- lab7:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* do, line 208 */
- if (!r_Step_1c(z)) goto lab8; /* call Step_1c, line 208 */
- lab8:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* do, line 210 */
- if (!r_Step_2(z)) goto lab9; /* call Step_2, line 210 */
- lab9:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* do, line 211 */
- if (!r_Step_3(z)) goto lab10; /* call Step_3, line 211 */
- lab10:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* do, line 212 */
- if (!r_Step_4(z)) goto lab11; /* call Step_4, line 212 */
- lab11:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* do, line 214 */
- if (!r_Step_5(z)) goto lab12; /* call Step_5, line 214 */
- lab12:
- z->c = z->l - m;
- }
- }
- lab5:
- z->c = z->lb;
- { int c = z->c; /* do, line 217 */
- if (!r_postlude(z)) goto lab13; /* call postlude, line 217 */
- lab13:
- z->c = c;
- }
- }
+static int
+r_postlude(struct SN_env * z)
+{
+ if (!(z->B[0]))
+ return 0; /* Boolean test Y_found, line 192 */
+ while (1)
+ { /* repeat, line 192 */
+ int c = z->c;
+
+ while (1)
+ { /* goto, line 192 */
+ int c = z->c;
+
+ z->bra = z->c; /* [, line 192 */
+ if (!(eq_s(z, 1, s_47)))
+ goto lab1;
+ z->ket = z->c; /* ], line 192 */
+ z->c = c;
+ break;
+ lab1:
+ z->c = c;
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ slice_from_s(z, 1, s_48); /* <-, line 192 */
+ continue;
lab0:
- return 1;
+ z->c = c;
+ break;
+ }
+ return 1;
}
-extern struct SN_env * english_create_env(void) { return SN_create_env(0, 2, 1); }
+extern int
+english_stem(struct SN_env * z)
+{
+ {
+ int c = z->c; /* or, line 196 */
+
+ if (!r_exception1(z))
+ goto lab1; /* call exception1, line 196 */
+ goto lab0;
+lab1:
+ z->c = c;
+ {
+ int c_test = z->c; /* test, line 198 */
+
+ {
+ int c = z->c + 3;
+
+ if (0 > c || c > z->l)
+ return 0;
+ z->c = c; /* hop, line 198 */
+ }
+ z->c = c_test;
+ }
+ {
+ int c = z->c; /* do, line 199 */
+
+ if (!r_prelude(z))
+ goto lab2; /* call prelude, line 199 */
+ lab2:
+ z->c = c;
+ }
+ {
+ int c = z->c; /* do, line 200 */
-extern void english_close_env(struct SN_env * z) { SN_close_env(z); }
+ if (!r_mark_regions(z))
+ goto lab3; /* call mark_regions, line 200 */
+ lab3:
+ z->c = c;
+ }
+ z->lb = z->c;
+ z->c = z->l; /* backwards, line 201 */
+ {
+ int m = z->l - z->c; /* do, line 203 */
+
+ if (!r_Step_1a(z))
+ goto lab4; /* call Step_1a, line 203 */
+ lab4:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* or, line 205 */
+
+ if (!r_exception2(z))
+ goto lab6; /* call exception2, line 205 */
+ goto lab5;
+ lab6:
+ z->c = z->l - m;
+ {
+ int m = z->l - z->c; /* do, line 207 */
+
+ if (!r_Step_1b(z))
+ goto lab7; /* call Step_1b, line 207 */
+ lab7:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* do, line 208 */
+
+ if (!r_Step_1c(z))
+ goto lab8; /* call Step_1c, line 208 */
+ lab8:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* do, line 210 */
+
+ if (!r_Step_2(z))
+ goto lab9; /* call Step_2, line 210 */
+ lab9:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* do, line 211 */
+
+ if (!r_Step_3(z))
+ goto lab10; /* call Step_3, line 211 */
+ lab10:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* do, line 212 */
+
+ if (!r_Step_4(z))
+ goto lab11; /* call Step_4, line 212 */
+ lab11:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* do, line 214 */
+
+ if (!r_Step_5(z))
+ goto lab12; /* call Step_5, line 214 */
+ lab12:
+ z->c = z->l - m;
+ }
+ }
+lab5:
+ z->c = z->lb;
+ {
+ int c = z->c; /* do, line 217 */
+
+ if (!r_postlude(z))
+ goto lab13; /* call postlude, line 217 */
+ lab13:
+ z->c = c;
+ }
+ }
+lab0:
+ return 1;
+}
+
+extern struct SN_env *english_create_env(void)
+{
+ return SN_create_env(0, 2, 1);
+}
+
+extern void english_close_env(struct SN_env * z)
+{
+ SN_close_env(z);
+}
diff --git a/contrib/tsearch2/snowball/english_stem.h b/contrib/tsearch2/snowball/english_stem.h
index bfefcd56565..7a52f9cde13 100644
--- a/contrib/tsearch2/snowball/english_stem.h
+++ b/contrib/tsearch2/snowball/english_stem.h
@@ -1,8 +1,7 @@
/* This file was generated automatically by the Snowball to ANSI C compiler */
-extern struct SN_env * english_create_env(void);
+extern struct SN_env *english_create_env(void);
extern void english_close_env(struct SN_env * z);
-extern int english_stem(struct SN_env * z);
-
+extern int english_stem(struct SN_env * z);
diff --git a/contrib/tsearch2/snowball/header.h b/contrib/tsearch2/snowball/header.h
index 6b636972022..4d4f127d6a1 100644
--- a/contrib/tsearch2/snowball/header.h
+++ b/contrib/tsearch2/snowball/header.h
@@ -2,41 +2,42 @@
#define HEAD 2*sizeof(int)
-#define SIZE(p) ((int *)(p))[-1]
+#define SIZE(p) ((int *)(p))[-1]
#define SET_SIZE(p, n) ((int *)(p))[-1] = n
#define CAPACITY(p) ((int *)(p))[-2]
struct among
-{ int s_size; /* number of chars in string */
- symbol * s; /* search string */
- int substring_i;/* index to longest matching substring */
- int result; /* result of the lookup */
- int (* function)(struct SN_env *);
+{
+ int s_size; /* number of chars in string */
+ symbol *s; /* search string */
+ int substring_i; /* index to longest matching substring */
+ int result; /* result of the lookup */
+ int (*function) (struct SN_env *);
};
-extern symbol * create_s(void);
+extern symbol *create_s(void);
extern void lose_s(symbol * p);
-extern int in_grouping(struct SN_env * z, unsigned char * s, int min, int max);
-extern int in_grouping_b(struct SN_env * z, unsigned char * s, int min, int max);
-extern int out_grouping(struct SN_env * z, unsigned char * s, int min, int max);
-extern int out_grouping_b(struct SN_env * z, unsigned char * s, int min, int max);
+extern int in_grouping(struct SN_env * z, unsigned char *s, int min, int max);
+extern int in_grouping_b(struct SN_env * z, unsigned char *s, int min, int max);
+extern int out_grouping(struct SN_env * z, unsigned char *s, int min, int max);
+extern int out_grouping_b(struct SN_env * z, unsigned char *s, int min, int max);
-extern int in_range(struct SN_env * z, int min, int max);
-extern int in_range_b(struct SN_env * z, int min, int max);
-extern int out_range(struct SN_env * z, int min, int max);
-extern int out_range_b(struct SN_env * z, int min, int max);
+extern int in_range(struct SN_env * z, int min, int max);
+extern int in_range_b(struct SN_env * z, int min, int max);
+extern int out_range(struct SN_env * z, int min, int max);
+extern int out_range_b(struct SN_env * z, int min, int max);
-extern int eq_s(struct SN_env * z, int s_size, symbol * s);
-extern int eq_s_b(struct SN_env * z, int s_size, symbol * s);
-extern int eq_v(struct SN_env * z, symbol * p);
-extern int eq_v_b(struct SN_env * z, symbol * p);
+extern int eq_s(struct SN_env * z, int s_size, symbol * s);
+extern int eq_s_b(struct SN_env * z, int s_size, symbol * s);
+extern int eq_v(struct SN_env * z, symbol * p);
+extern int eq_v_b(struct SN_env * z, symbol * p);
-extern int find_among(struct SN_env * z, struct among * v, int v_size);
-extern int find_among_b(struct SN_env * z, struct among * v, int v_size);
+extern int find_among(struct SN_env * z, struct among * v, int v_size);
+extern int find_among_b(struct SN_env * z, struct among * v, int v_size);
-extern symbol * increase_size(symbol * p, int n);
-extern int replace_s(struct SN_env * z, int c_bra, int c_ket, int s_size, const symbol * s);
+extern symbol *increase_size(symbol * p, int n);
+extern int replace_s(struct SN_env * z, int c_bra, int c_ket, int s_size, const symbol * s);
extern void slice_from_s(struct SN_env * z, int s_size, symbol * s);
extern void slice_from_v(struct SN_env * z, symbol * p);
extern void slice_del(struct SN_env * z);
@@ -44,8 +45,7 @@ extern void slice_del(struct SN_env * z);
extern void insert_s(struct SN_env * z, int bra, int ket, int s_size, symbol * s);
extern void insert_v(struct SN_env * z, int bra, int ket, symbol * p);
-extern symbol * slice_to(struct SN_env * z, symbol * p);
-extern symbol * assign_to(struct SN_env * z, symbol * p);
+extern symbol *slice_to(struct SN_env * z, symbol * p);
+extern symbol *assign_to(struct SN_env * z, symbol * p);
extern void debug(struct SN_env * z, int number, int line_count);
-
diff --git a/contrib/tsearch2/snowball/russian_stem.c b/contrib/tsearch2/snowball/russian_stem.c
index 14fd49156b7..db698ce53fb 100644
--- a/contrib/tsearch2/snowball/russian_stem.c
+++ b/contrib/tsearch2/snowball/russian_stem.c
@@ -3,624 +3,762 @@
#include "header.h"
-extern int russian_stem(struct SN_env * z);
-static int r_tidy_up(struct SN_env * z);
-static int r_derivational(struct SN_env * z);
-static int r_noun(struct SN_env * z);
-static int r_verb(struct SN_env * z);
-static int r_reflexive(struct SN_env * z);
-static int r_adjectival(struct SN_env * z);
-static int r_adjective(struct SN_env * z);
-static int r_perfective_gerund(struct SN_env * z);
-static int r_R2(struct SN_env * z);
-static int r_mark_regions(struct SN_env * z);
-
-extern struct SN_env * russian_create_env(void);
+extern int russian_stem(struct SN_env * z);
+static int r_tidy_up(struct SN_env * z);
+static int r_derivational(struct SN_env * z);
+static int r_noun(struct SN_env * z);
+static int r_verb(struct SN_env * z);
+static int r_reflexive(struct SN_env * z);
+static int r_adjectival(struct SN_env * z);
+static int r_adjective(struct SN_env * z);
+static int r_perfective_gerund(struct SN_env * z);
+static int r_R2(struct SN_env * z);
+static int r_mark_regions(struct SN_env * z);
+
+extern struct SN_env *russian_create_env(void);
extern void russian_close_env(struct SN_env * z);
-static symbol s_0_0[3] = { 215, 219, 201 };
-static symbol s_0_1[4] = { 201, 215, 219, 201 };
-static symbol s_0_2[4] = { 217, 215, 219, 201 };
-static symbol s_0_3[1] = { 215 };
-static symbol s_0_4[2] = { 201, 215 };
-static symbol s_0_5[2] = { 217, 215 };
-static symbol s_0_6[5] = { 215, 219, 201, 211, 216 };
-static symbol s_0_7[6] = { 201, 215, 219, 201, 211, 216 };
-static symbol s_0_8[6] = { 217, 215, 219, 201, 211, 216 };
+static symbol s_0_0[3] = {215, 219, 201};
+static symbol s_0_1[4] = {201, 215, 219, 201};
+static symbol s_0_2[4] = {217, 215, 219, 201};
+static symbol s_0_3[1] = {215};
+static symbol s_0_4[2] = {201, 215};
+static symbol s_0_5[2] = {217, 215};
+static symbol s_0_6[5] = {215, 219, 201, 211, 216};
+static symbol s_0_7[6] = {201, 215, 219, 201, 211, 216};
+static symbol s_0_8[6] = {217, 215, 219, 201, 211, 216};
static struct among a_0[9] =
{
-/* 0 */ { 3, s_0_0, -1, 1, 0},
-/* 1 */ { 4, s_0_1, 0, 2, 0},
-/* 2 */ { 4, s_0_2, 0, 2, 0},
-/* 3 */ { 1, s_0_3, -1, 1, 0},
-/* 4 */ { 2, s_0_4, 3, 2, 0},
-/* 5 */ { 2, s_0_5, 3, 2, 0},
-/* 6 */ { 5, s_0_6, -1, 1, 0},
-/* 7 */ { 6, s_0_7, 6, 2, 0},
-/* 8 */ { 6, s_0_8, 6, 2, 0}
+ /* 0 */ {3, s_0_0, -1, 1, 0},
+ /* 1 */ {4, s_0_1, 0, 2, 0},
+ /* 2 */ {4, s_0_2, 0, 2, 0},
+ /* 3 */ {1, s_0_3, -1, 1, 0},
+ /* 4 */ {2, s_0_4, 3, 2, 0},
+ /* 5 */ {2, s_0_5, 3, 2, 0},
+ /* 6 */ {5, s_0_6, -1, 1, 0},
+ /* 7 */ {6, s_0_7, 6, 2, 0},
+ /* 8 */ {6, s_0_8, 6, 2, 0}
};
-static symbol s_1_0[2] = { 192, 192 };
-static symbol s_1_1[2] = { 197, 192 };
-static symbol s_1_2[2] = { 207, 192 };
-static symbol s_1_3[2] = { 213, 192 };
-static symbol s_1_4[2] = { 197, 197 };
-static symbol s_1_5[2] = { 201, 197 };
-static symbol s_1_6[2] = { 207, 197 };
-static symbol s_1_7[2] = { 217, 197 };
-static symbol s_1_8[2] = { 201, 200 };
-static symbol s_1_9[2] = { 217, 200 };
-static symbol s_1_10[3] = { 201, 205, 201 };
-static symbol s_1_11[3] = { 217, 205, 201 };
-static symbol s_1_12[2] = { 197, 202 };
-static symbol s_1_13[2] = { 201, 202 };
-static symbol s_1_14[2] = { 207, 202 };
-static symbol s_1_15[2] = { 217, 202 };
-static symbol s_1_16[2] = { 197, 205 };
-static symbol s_1_17[2] = { 201, 205 };
-static symbol s_1_18[2] = { 207, 205 };
-static symbol s_1_19[2] = { 217, 205 };
-static symbol s_1_20[3] = { 197, 199, 207 };
-static symbol s_1_21[3] = { 207, 199, 207 };
-static symbol s_1_22[2] = { 193, 209 };
-static symbol s_1_23[2] = { 209, 209 };
-static symbol s_1_24[3] = { 197, 205, 213 };
-static symbol s_1_25[3] = { 207, 205, 213 };
+static symbol s_1_0[2] = {192, 192};
+static symbol s_1_1[2] = {197, 192};
+static symbol s_1_2[2] = {207, 192};
+static symbol s_1_3[2] = {213, 192};
+static symbol s_1_4[2] = {197, 197};
+static symbol s_1_5[2] = {201, 197};
+static symbol s_1_6[2] = {207, 197};
+static symbol s_1_7[2] = {217, 197};
+static symbol s_1_8[2] = {201, 200};
+static symbol s_1_9[2] = {217, 200};
+static symbol s_1_10[3] = {201, 205, 201};
+static symbol s_1_11[3] = {217, 205, 201};
+static symbol s_1_12[2] = {197, 202};
+static symbol s_1_13[2] = {201, 202};
+static symbol s_1_14[2] = {207, 202};
+static symbol s_1_15[2] = {217, 202};
+static symbol s_1_16[2] = {197, 205};
+static symbol s_1_17[2] = {201, 205};
+static symbol s_1_18[2] = {207, 205};
+static symbol s_1_19[2] = {217, 205};
+static symbol s_1_20[3] = {197, 199, 207};
+static symbol s_1_21[3] = {207, 199, 207};
+static symbol s_1_22[2] = {193, 209};
+static symbol s_1_23[2] = {209, 209};
+static symbol s_1_24[3] = {197, 205, 213};
+static symbol s_1_25[3] = {207, 205, 213};
static struct among a_1[26] =
{
-/* 0 */ { 2, s_1_0, -1, 1, 0},
-/* 1 */ { 2, s_1_1, -1, 1, 0},
-/* 2 */ { 2, s_1_2, -1, 1, 0},
-/* 3 */ { 2, s_1_3, -1, 1, 0},
-/* 4 */ { 2, s_1_4, -1, 1, 0},
-/* 5 */ { 2, s_1_5, -1, 1, 0},
-/* 6 */ { 2, s_1_6, -1, 1, 0},
-/* 7 */ { 2, s_1_7, -1, 1, 0},
-/* 8 */ { 2, s_1_8, -1, 1, 0},
-/* 9 */ { 2, s_1_9, -1, 1, 0},
-/* 10 */ { 3, s_1_10, -1, 1, 0},
-/* 11 */ { 3, s_1_11, -1, 1, 0},
-/* 12 */ { 2, s_1_12, -1, 1, 0},
-/* 13 */ { 2, s_1_13, -1, 1, 0},
-/* 14 */ { 2, s_1_14, -1, 1, 0},
-/* 15 */ { 2, s_1_15, -1, 1, 0},
-/* 16 */ { 2, s_1_16, -1, 1, 0},
-/* 17 */ { 2, s_1_17, -1, 1, 0},
-/* 18 */ { 2, s_1_18, -1, 1, 0},
-/* 19 */ { 2, s_1_19, -1, 1, 0},
-/* 20 */ { 3, s_1_20, -1, 1, 0},
-/* 21 */ { 3, s_1_21, -1, 1, 0},
-/* 22 */ { 2, s_1_22, -1, 1, 0},
-/* 23 */ { 2, s_1_23, -1, 1, 0},
-/* 24 */ { 3, s_1_24, -1, 1, 0},
-/* 25 */ { 3, s_1_25, -1, 1, 0}
+ /* 0 */ {2, s_1_0, -1, 1, 0},
+ /* 1 */ {2, s_1_1, -1, 1, 0},
+ /* 2 */ {2, s_1_2, -1, 1, 0},
+ /* 3 */ {2, s_1_3, -1, 1, 0},
+ /* 4 */ {2, s_1_4, -1, 1, 0},
+ /* 5 */ {2, s_1_5, -1, 1, 0},
+ /* 6 */ {2, s_1_6, -1, 1, 0},
+ /* 7 */ {2, s_1_7, -1, 1, 0},
+ /* 8 */ {2, s_1_8, -1, 1, 0},
+ /* 9 */ {2, s_1_9, -1, 1, 0},
+ /* 10 */ {3, s_1_10, -1, 1, 0},
+ /* 11 */ {3, s_1_11, -1, 1, 0},
+ /* 12 */ {2, s_1_12, -1, 1, 0},
+ /* 13 */ {2, s_1_13, -1, 1, 0},
+ /* 14 */ {2, s_1_14, -1, 1, 0},
+ /* 15 */ {2, s_1_15, -1, 1, 0},
+ /* 16 */ {2, s_1_16, -1, 1, 0},
+ /* 17 */ {2, s_1_17, -1, 1, 0},
+ /* 18 */ {2, s_1_18, -1, 1, 0},
+ /* 19 */ {2, s_1_19, -1, 1, 0},
+ /* 20 */ {3, s_1_20, -1, 1, 0},
+ /* 21 */ {3, s_1_21, -1, 1, 0},
+ /* 22 */ {2, s_1_22, -1, 1, 0},
+ /* 23 */ {2, s_1_23, -1, 1, 0},
+ /* 24 */ {3, s_1_24, -1, 1, 0},
+ /* 25 */ {3, s_1_25, -1, 1, 0}
};
-static symbol s_2_0[2] = { 197, 205 };
-static symbol s_2_1[2] = { 206, 206 };
-static symbol s_2_2[2] = { 215, 219 };
-static symbol s_2_3[3] = { 201, 215, 219 };
-static symbol s_2_4[3] = { 217, 215, 219 };
-static symbol s_2_5[1] = { 221 };
-static symbol s_2_6[2] = { 192, 221 };
-static symbol s_2_7[3] = { 213, 192, 221 };
+static symbol s_2_0[2] = {197, 205};
+static symbol s_2_1[2] = {206, 206};
+static symbol s_2_2[2] = {215, 219};
+static symbol s_2_3[3] = {201, 215, 219};
+static symbol s_2_4[3] = {217, 215, 219};
+static symbol s_2_5[1] = {221};
+static symbol s_2_6[2] = {192, 221};
+static symbol s_2_7[3] = {213, 192, 221};
static struct among a_2[8] =
{
-/* 0 */ { 2, s_2_0, -1, 1, 0},
-/* 1 */ { 2, s_2_1, -1, 1, 0},
-/* 2 */ { 2, s_2_2, -1, 1, 0},
-/* 3 */ { 3, s_2_3, 2, 2, 0},
-/* 4 */ { 3, s_2_4, 2, 2, 0},
-/* 5 */ { 1, s_2_5, -1, 1, 0},
-/* 6 */ { 2, s_2_6, 5, 1, 0},
-/* 7 */ { 3, s_2_7, 6, 2, 0}
+ /* 0 */ {2, s_2_0, -1, 1, 0},
+ /* 1 */ {2, s_2_1, -1, 1, 0},
+ /* 2 */ {2, s_2_2, -1, 1, 0},
+ /* 3 */ {3, s_2_3, 2, 2, 0},
+ /* 4 */ {3, s_2_4, 2, 2, 0},
+ /* 5 */ {1, s_2_5, -1, 1, 0},
+ /* 6 */ {2, s_2_6, 5, 1, 0},
+ /* 7 */ {3, s_2_7, 6, 2, 0}
};
-static symbol s_3_0[2] = { 211, 209 };
-static symbol s_3_1[2] = { 211, 216 };
+static symbol s_3_0[2] = {211, 209};
+static symbol s_3_1[2] = {211, 216};
static struct among a_3[2] =
{
-/* 0 */ { 2, s_3_0, -1, 1, 0},
-/* 1 */ { 2, s_3_1, -1, 1, 0}
+ /* 0 */ {2, s_3_0, -1, 1, 0},
+ /* 1 */ {2, s_3_1, -1, 1, 0}
};
-static symbol s_4_0[1] = { 192 };
-static symbol s_4_1[2] = { 213, 192 };
-static symbol s_4_2[2] = { 204, 193 };
-static symbol s_4_3[3] = { 201, 204, 193 };
-static symbol s_4_4[3] = { 217, 204, 193 };
-static symbol s_4_5[2] = { 206, 193 };
-static symbol s_4_6[3] = { 197, 206, 193 };
-static symbol s_4_7[3] = { 197, 212, 197 };
-static symbol s_4_8[3] = { 201, 212, 197 };
-static symbol s_4_9[3] = { 202, 212, 197 };
-static symbol s_4_10[4] = { 197, 202, 212, 197 };
-static symbol s_4_11[4] = { 213, 202, 212, 197 };
-static symbol s_4_12[2] = { 204, 201 };
-static symbol s_4_13[3] = { 201, 204, 201 };
-static symbol s_4_14[3] = { 217, 204, 201 };
-static symbol s_4_15[1] = { 202 };
-static symbol s_4_16[2] = { 197, 202 };
-static symbol s_4_17[2] = { 213, 202 };
-static symbol s_4_18[1] = { 204 };
-static symbol s_4_19[2] = { 201, 204 };
-static symbol s_4_20[2] = { 217, 204 };
-static symbol s_4_21[2] = { 197, 205 };
-static symbol s_4_22[2] = { 201, 205 };
-static symbol s_4_23[2] = { 217, 205 };
-static symbol s_4_24[1] = { 206 };
-static symbol s_4_25[2] = { 197, 206 };
-static symbol s_4_26[2] = { 204, 207 };
-static symbol s_4_27[3] = { 201, 204, 207 };
-static symbol s_4_28[3] = { 217, 204, 207 };
-static symbol s_4_29[2] = { 206, 207 };
-static symbol s_4_30[3] = { 197, 206, 207 };
-static symbol s_4_31[3] = { 206, 206, 207 };
-static symbol s_4_32[2] = { 192, 212 };
-static symbol s_4_33[3] = { 213, 192, 212 };
-static symbol s_4_34[2] = { 197, 212 };
-static symbol s_4_35[3] = { 213, 197, 212 };
-static symbol s_4_36[2] = { 201, 212 };
-static symbol s_4_37[2] = { 209, 212 };
-static symbol s_4_38[2] = { 217, 212 };
-static symbol s_4_39[2] = { 212, 216 };
-static symbol s_4_40[3] = { 201, 212, 216 };
-static symbol s_4_41[3] = { 217, 212, 216 };
-static symbol s_4_42[3] = { 197, 219, 216 };
-static symbol s_4_43[3] = { 201, 219, 216 };
-static symbol s_4_44[2] = { 206, 217 };
-static symbol s_4_45[3] = { 197, 206, 217 };
+static symbol s_4_0[1] = {192};
+static symbol s_4_1[2] = {213, 192};
+static symbol s_4_2[2] = {204, 193};
+static symbol s_4_3[3] = {201, 204, 193};
+static symbol s_4_4[3] = {217, 204, 193};
+static symbol s_4_5[2] = {206, 193};
+static symbol s_4_6[3] = {197, 206, 193};
+static symbol s_4_7[3] = {197, 212, 197};
+static symbol s_4_8[3] = {201, 212, 197};
+static symbol s_4_9[3] = {202, 212, 197};
+static symbol s_4_10[4] = {197, 202, 212, 197};
+static symbol s_4_11[4] = {213, 202, 212, 197};
+static symbol s_4_12[2] = {204, 201};
+static symbol s_4_13[3] = {201, 204, 201};
+static symbol s_4_14[3] = {217, 204, 201};
+static symbol s_4_15[1] = {202};
+static symbol s_4_16[2] = {197, 202};
+static symbol s_4_17[2] = {213, 202};
+static symbol s_4_18[1] = {204};
+static symbol s_4_19[2] = {201, 204};
+static symbol s_4_20[2] = {217, 204};
+static symbol s_4_21[2] = {197, 205};
+static symbol s_4_22[2] = {201, 205};
+static symbol s_4_23[2] = {217, 205};
+static symbol s_4_24[1] = {206};
+static symbol s_4_25[2] = {197, 206};
+static symbol s_4_26[2] = {204, 207};
+static symbol s_4_27[3] = {201, 204, 207};
+static symbol s_4_28[3] = {217, 204, 207};
+static symbol s_4_29[2] = {206, 207};
+static symbol s_4_30[3] = {197, 206, 207};
+static symbol s_4_31[3] = {206, 206, 207};
+static symbol s_4_32[2] = {192, 212};
+static symbol s_4_33[3] = {213, 192, 212};
+static symbol s_4_34[2] = {197, 212};
+static symbol s_4_35[3] = {213, 197, 212};
+static symbol s_4_36[2] = {201, 212};
+static symbol s_4_37[2] = {209, 212};
+static symbol s_4_38[2] = {217, 212};
+static symbol s_4_39[2] = {212, 216};
+static symbol s_4_40[3] = {201, 212, 216};
+static symbol s_4_41[3] = {217, 212, 216};
+static symbol s_4_42[3] = {197, 219, 216};
+static symbol s_4_43[3] = {201, 219, 216};
+static symbol s_4_44[2] = {206, 217};
+static symbol s_4_45[3] = {197, 206, 217};
static struct among a_4[46] =
{
-/* 0 */ { 1, s_4_0, -1, 2, 0},
-/* 1 */ { 2, s_4_1, 0, 2, 0},
-/* 2 */ { 2, s_4_2, -1, 1, 0},
-/* 3 */ { 3, s_4_3, 2, 2, 0},
-/* 4 */ { 3, s_4_4, 2, 2, 0},
-/* 5 */ { 2, s_4_5, -1, 1, 0},
-/* 6 */ { 3, s_4_6, 5, 2, 0},
-/* 7 */ { 3, s_4_7, -1, 1, 0},
-/* 8 */ { 3, s_4_8, -1, 2, 0},
-/* 9 */ { 3, s_4_9, -1, 1, 0},
-/* 10 */ { 4, s_4_10, 9, 2, 0},
-/* 11 */ { 4, s_4_11, 9, 2, 0},
-/* 12 */ { 2, s_4_12, -1, 1, 0},
-/* 13 */ { 3, s_4_13, 12, 2, 0},
-/* 14 */ { 3, s_4_14, 12, 2, 0},
-/* 15 */ { 1, s_4_15, -1, 1, 0},
-/* 16 */ { 2, s_4_16, 15, 2, 0},
-/* 17 */ { 2, s_4_17, 15, 2, 0},
-/* 18 */ { 1, s_4_18, -1, 1, 0},
-/* 19 */ { 2, s_4_19, 18, 2, 0},
-/* 20 */ { 2, s_4_20, 18, 2, 0},
-/* 21 */ { 2, s_4_21, -1, 1, 0},
-/* 22 */ { 2, s_4_22, -1, 2, 0},
-/* 23 */ { 2, s_4_23, -1, 2, 0},
-/* 24 */ { 1, s_4_24, -1, 1, 0},
-/* 25 */ { 2, s_4_25, 24, 2, 0},
-/* 26 */ { 2, s_4_26, -1, 1, 0},
-/* 27 */ { 3, s_4_27, 26, 2, 0},
-/* 28 */ { 3, s_4_28, 26, 2, 0},
-/* 29 */ { 2, s_4_29, -1, 1, 0},
-/* 30 */ { 3, s_4_30, 29, 2, 0},
-/* 31 */ { 3, s_4_31, 29, 1, 0},
-/* 32 */ { 2, s_4_32, -1, 1, 0},
-/* 33 */ { 3, s_4_33, 32, 2, 0},
-/* 34 */ { 2, s_4_34, -1, 1, 0},
-/* 35 */ { 3, s_4_35, 34, 2, 0},
-/* 36 */ { 2, s_4_36, -1, 2, 0},
-/* 37 */ { 2, s_4_37, -1, 2, 0},
-/* 38 */ { 2, s_4_38, -1, 2, 0},
-/* 39 */ { 2, s_4_39, -1, 1, 0},
-/* 40 */ { 3, s_4_40, 39, 2, 0},
-/* 41 */ { 3, s_4_41, 39, 2, 0},
-/* 42 */ { 3, s_4_42, -1, 1, 0},
-/* 43 */ { 3, s_4_43, -1, 2, 0},
-/* 44 */ { 2, s_4_44, -1, 1, 0},
-/* 45 */ { 3, s_4_45, 44, 2, 0}
+ /* 0 */ {1, s_4_0, -1, 2, 0},
+ /* 1 */ {2, s_4_1, 0, 2, 0},
+ /* 2 */ {2, s_4_2, -1, 1, 0},
+ /* 3 */ {3, s_4_3, 2, 2, 0},
+ /* 4 */ {3, s_4_4, 2, 2, 0},
+ /* 5 */ {2, s_4_5, -1, 1, 0},
+ /* 6 */ {3, s_4_6, 5, 2, 0},
+ /* 7 */ {3, s_4_7, -1, 1, 0},
+ /* 8 */ {3, s_4_8, -1, 2, 0},
+ /* 9 */ {3, s_4_9, -1, 1, 0},
+ /* 10 */ {4, s_4_10, 9, 2, 0},
+ /* 11 */ {4, s_4_11, 9, 2, 0},
+ /* 12 */ {2, s_4_12, -1, 1, 0},
+ /* 13 */ {3, s_4_13, 12, 2, 0},
+ /* 14 */ {3, s_4_14, 12, 2, 0},
+ /* 15 */ {1, s_4_15, -1, 1, 0},
+ /* 16 */ {2, s_4_16, 15, 2, 0},
+ /* 17 */ {2, s_4_17, 15, 2, 0},
+ /* 18 */ {1, s_4_18, -1, 1, 0},
+ /* 19 */ {2, s_4_19, 18, 2, 0},
+ /* 20 */ {2, s_4_20, 18, 2, 0},
+ /* 21 */ {2, s_4_21, -1, 1, 0},
+ /* 22 */ {2, s_4_22, -1, 2, 0},
+ /* 23 */ {2, s_4_23, -1, 2, 0},
+ /* 24 */ {1, s_4_24, -1, 1, 0},
+ /* 25 */ {2, s_4_25, 24, 2, 0},
+ /* 26 */ {2, s_4_26, -1, 1, 0},
+ /* 27 */ {3, s_4_27, 26, 2, 0},
+ /* 28 */ {3, s_4_28, 26, 2, 0},
+ /* 29 */ {2, s_4_29, -1, 1, 0},
+ /* 30 */ {3, s_4_30, 29, 2, 0},
+ /* 31 */ {3, s_4_31, 29, 1, 0},
+ /* 32 */ {2, s_4_32, -1, 1, 0},
+ /* 33 */ {3, s_4_33, 32, 2, 0},
+ /* 34 */ {2, s_4_34, -1, 1, 0},
+ /* 35 */ {3, s_4_35, 34, 2, 0},
+ /* 36 */ {2, s_4_36, -1, 2, 0},
+ /* 37 */ {2, s_4_37, -1, 2, 0},
+ /* 38 */ {2, s_4_38, -1, 2, 0},
+ /* 39 */ {2, s_4_39, -1, 1, 0},
+ /* 40 */ {3, s_4_40, 39, 2, 0},
+ /* 41 */ {3, s_4_41, 39, 2, 0},
+ /* 42 */ {3, s_4_42, -1, 1, 0},
+ /* 43 */ {3, s_4_43, -1, 2, 0},
+ /* 44 */ {2, s_4_44, -1, 1, 0},
+ /* 45 */ {3, s_4_45, 44, 2, 0}
};
-static symbol s_5_0[1] = { 192 };
-static symbol s_5_1[2] = { 201, 192 };
-static symbol s_5_2[2] = { 216, 192 };
-static symbol s_5_3[1] = { 193 };
-static symbol s_5_4[1] = { 197 };
-static symbol s_5_5[2] = { 201, 197 };
-static symbol s_5_6[2] = { 216, 197 };
-static symbol s_5_7[2] = { 193, 200 };
-static symbol s_5_8[2] = { 209, 200 };
-static symbol s_5_9[3] = { 201, 209, 200 };
-static symbol s_5_10[1] = { 201 };
-static symbol s_5_11[2] = { 197, 201 };
-static symbol s_5_12[2] = { 201, 201 };
-static symbol s_5_13[3] = { 193, 205, 201 };
-static symbol s_5_14[3] = { 209, 205, 201 };
-static symbol s_5_15[4] = { 201, 209, 205, 201 };
-static symbol s_5_16[1] = { 202 };
-static symbol s_5_17[2] = { 197, 202 };
-static symbol s_5_18[3] = { 201, 197, 202 };
-static symbol s_5_19[2] = { 201, 202 };
-static symbol s_5_20[2] = { 207, 202 };
-static symbol s_5_21[2] = { 193, 205 };
-static symbol s_5_22[2] = { 197, 205 };
-static symbol s_5_23[3] = { 201, 197, 205 };
-static symbol s_5_24[2] = { 207, 205 };
-static symbol s_5_25[2] = { 209, 205 };
-static symbol s_5_26[3] = { 201, 209, 205 };
-static symbol s_5_27[1] = { 207 };
-static symbol s_5_28[1] = { 209 };
-static symbol s_5_29[2] = { 201, 209 };
-static symbol s_5_30[2] = { 216, 209 };
-static symbol s_5_31[1] = { 213 };
-static symbol s_5_32[2] = { 197, 215 };
-static symbol s_5_33[2] = { 207, 215 };
-static symbol s_5_34[1] = { 216 };
-static symbol s_5_35[1] = { 217 };
+static symbol s_5_0[1] = {192};
+static symbol s_5_1[2] = {201, 192};
+static symbol s_5_2[2] = {216, 192};
+static symbol s_5_3[1] = {193};
+static symbol s_5_4[1] = {197};
+static symbol s_5_5[2] = {201, 197};
+static symbol s_5_6[2] = {216, 197};
+static symbol s_5_7[2] = {193, 200};
+static symbol s_5_8[2] = {209, 200};
+static symbol s_5_9[3] = {201, 209, 200};
+static symbol s_5_10[1] = {201};
+static symbol s_5_11[2] = {197, 201};
+static symbol s_5_12[2] = {201, 201};
+static symbol s_5_13[3] = {193, 205, 201};
+static symbol s_5_14[3] = {209, 205, 201};
+static symbol s_5_15[4] = {201, 209, 205, 201};
+static symbol s_5_16[1] = {202};
+static symbol s_5_17[2] = {197, 202};
+static symbol s_5_18[3] = {201, 197, 202};
+static symbol s_5_19[2] = {201, 202};
+static symbol s_5_20[2] = {207, 202};
+static symbol s_5_21[2] = {193, 205};
+static symbol s_5_22[2] = {197, 205};
+static symbol s_5_23[3] = {201, 197, 205};
+static symbol s_5_24[2] = {207, 205};
+static symbol s_5_25[2] = {209, 205};
+static symbol s_5_26[3] = {201, 209, 205};
+static symbol s_5_27[1] = {207};
+static symbol s_5_28[1] = {209};
+static symbol s_5_29[2] = {201, 209};
+static symbol s_5_30[2] = {216, 209};
+static symbol s_5_31[1] = {213};
+static symbol s_5_32[2] = {197, 215};
+static symbol s_5_33[2] = {207, 215};
+static symbol s_5_34[1] = {216};
+static symbol s_5_35[1] = {217};
static struct among a_5[36] =
{
-/* 0 */ { 1, s_5_0, -1, 1, 0},
-/* 1 */ { 2, s_5_1, 0, 1, 0},
-/* 2 */ { 2, s_5_2, 0, 1, 0},
-/* 3 */ { 1, s_5_3, -1, 1, 0},
-/* 4 */ { 1, s_5_4, -1, 1, 0},
-/* 5 */ { 2, s_5_5, 4, 1, 0},
-/* 6 */ { 2, s_5_6, 4, 1, 0},
-/* 7 */ { 2, s_5_7, -1, 1, 0},
-/* 8 */ { 2, s_5_8, -1, 1, 0},
-/* 9 */ { 3, s_5_9, 8, 1, 0},
-/* 10 */ { 1, s_5_10, -1, 1, 0},
-/* 11 */ { 2, s_5_11, 10, 1, 0},
-/* 12 */ { 2, s_5_12, 10, 1, 0},
-/* 13 */ { 3, s_5_13, 10, 1, 0},
-/* 14 */ { 3, s_5_14, 10, 1, 0},
-/* 15 */ { 4, s_5_15, 14, 1, 0},
-/* 16 */ { 1, s_5_16, -1, 1, 0},
-/* 17 */ { 2, s_5_17, 16, 1, 0},
-/* 18 */ { 3, s_5_18, 17, 1, 0},
-/* 19 */ { 2, s_5_19, 16, 1, 0},
-/* 20 */ { 2, s_5_20, 16, 1, 0},
-/* 21 */ { 2, s_5_21, -1, 1, 0},
-/* 22 */ { 2, s_5_22, -1, 1, 0},
-/* 23 */ { 3, s_5_23, 22, 1, 0},
-/* 24 */ { 2, s_5_24, -1, 1, 0},
-/* 25 */ { 2, s_5_25, -1, 1, 0},
-/* 26 */ { 3, s_5_26, 25, 1, 0},
-/* 27 */ { 1, s_5_27, -1, 1, 0},
-/* 28 */ { 1, s_5_28, -1, 1, 0},
-/* 29 */ { 2, s_5_29, 28, 1, 0},
-/* 30 */ { 2, s_5_30, 28, 1, 0},
-/* 31 */ { 1, s_5_31, -1, 1, 0},
-/* 32 */ { 2, s_5_32, -1, 1, 0},
-/* 33 */ { 2, s_5_33, -1, 1, 0},
-/* 34 */ { 1, s_5_34, -1, 1, 0},
-/* 35 */ { 1, s_5_35, -1, 1, 0}
+ /* 0 */ {1, s_5_0, -1, 1, 0},
+ /* 1 */ {2, s_5_1, 0, 1, 0},
+ /* 2 */ {2, s_5_2, 0, 1, 0},
+ /* 3 */ {1, s_5_3, -1, 1, 0},
+ /* 4 */ {1, s_5_4, -1, 1, 0},
+ /* 5 */ {2, s_5_5, 4, 1, 0},
+ /* 6 */ {2, s_5_6, 4, 1, 0},
+ /* 7 */ {2, s_5_7, -1, 1, 0},
+ /* 8 */ {2, s_5_8, -1, 1, 0},
+ /* 9 */ {3, s_5_9, 8, 1, 0},
+ /* 10 */ {1, s_5_10, -1, 1, 0},
+ /* 11 */ {2, s_5_11, 10, 1, 0},
+ /* 12 */ {2, s_5_12, 10, 1, 0},
+ /* 13 */ {3, s_5_13, 10, 1, 0},
+ /* 14 */ {3, s_5_14, 10, 1, 0},
+ /* 15 */ {4, s_5_15, 14, 1, 0},
+ /* 16 */ {1, s_5_16, -1, 1, 0},
+ /* 17 */ {2, s_5_17, 16, 1, 0},
+ /* 18 */ {3, s_5_18, 17, 1, 0},
+ /* 19 */ {2, s_5_19, 16, 1, 0},
+ /* 20 */ {2, s_5_20, 16, 1, 0},
+ /* 21 */ {2, s_5_21, -1, 1, 0},
+ /* 22 */ {2, s_5_22, -1, 1, 0},
+ /* 23 */ {3, s_5_23, 22, 1, 0},
+ /* 24 */ {2, s_5_24, -1, 1, 0},
+ /* 25 */ {2, s_5_25, -1, 1, 0},
+ /* 26 */ {3, s_5_26, 25, 1, 0},
+ /* 27 */ {1, s_5_27, -1, 1, 0},
+ /* 28 */ {1, s_5_28, -1, 1, 0},
+ /* 29 */ {2, s_5_29, 28, 1, 0},
+ /* 30 */ {2, s_5_30, 28, 1, 0},
+ /* 31 */ {1, s_5_31, -1, 1, 0},
+ /* 32 */ {2, s_5_32, -1, 1, 0},
+ /* 33 */ {2, s_5_33, -1, 1, 0},
+ /* 34 */ {1, s_5_34, -1, 1, 0},
+ /* 35 */ {1, s_5_35, -1, 1, 0}
};
-static symbol s_6_0[3] = { 207, 211, 212 };
-static symbol s_6_1[4] = { 207, 211, 212, 216 };
+static symbol s_6_0[3] = {207, 211, 212};
+static symbol s_6_1[4] = {207, 211, 212, 216};
static struct among a_6[2] =
{
-/* 0 */ { 3, s_6_0, -1, 1, 0},
-/* 1 */ { 4, s_6_1, -1, 1, 0}
+ /* 0 */ {3, s_6_0, -1, 1, 0},
+ /* 1 */ {4, s_6_1, -1, 1, 0}
};
-static symbol s_7_0[4] = { 197, 202, 219, 197 };
-static symbol s_7_1[1] = { 206 };
-static symbol s_7_2[1] = { 216 };
-static symbol s_7_3[3] = { 197, 202, 219 };
+static symbol s_7_0[4] = {197, 202, 219, 197};
+static symbol s_7_1[1] = {206};
+static symbol s_7_2[1] = {216};
+static symbol s_7_3[3] = {197, 202, 219};
static struct among a_7[4] =
{
-/* 0 */ { 4, s_7_0, -1, 1, 0},
-/* 1 */ { 1, s_7_1, -1, 2, 0},
-/* 2 */ { 1, s_7_2, -1, 3, 0},
-/* 3 */ { 3, s_7_3, -1, 1, 0}
+ /* 0 */ {4, s_7_0, -1, 1, 0},
+ /* 1 */ {1, s_7_1, -1, 2, 0},
+ /* 2 */ {1, s_7_2, -1, 3, 0},
+ /* 3 */ {3, s_7_3, -1, 1, 0}
};
-static unsigned char g_v[] = { 35, 130, 34, 18 };
-
-static symbol s_0[] = { 193 };
-static symbol s_1[] = { 209 };
-static symbol s_2[] = { 193 };
-static symbol s_3[] = { 209 };
-static symbol s_4[] = { 193 };
-static symbol s_5[] = { 209 };
-static symbol s_6[] = { 206 };
-static symbol s_7[] = { 206 };
-static symbol s_8[] = { 206 };
-static symbol s_9[] = { 201 };
-
-static int r_mark_regions(struct SN_env * z) {
- z->I[0] = z->l;
- z->I[1] = z->l;
- { int c = z->c; /* do, line 100 */
- while(1) { /* gopast, line 101 */
- if (!(in_grouping(z, g_v, 192, 220))) goto lab1;
- break;
- lab1:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- z->I[0] = z->c; /* setmark pV, line 101 */
- while(1) { /* gopast, line 101 */
- if (!(out_grouping(z, g_v, 192, 220))) goto lab2;
- break;
- lab2:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- while(1) { /* gopast, line 102 */
- if (!(in_grouping(z, g_v, 192, 220))) goto lab3;
- break;
- lab3:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- while(1) { /* gopast, line 102 */
- if (!(out_grouping(z, g_v, 192, 220))) goto lab4;
- break;
- lab4:
- if (z->c >= z->l) goto lab0;
- z->c++;
- }
- z->I[1] = z->c; /* setmark p2, line 102 */
- lab0:
- z->c = c;
- }
- return 1;
+static unsigned char g_v[] = {35, 130, 34, 18};
+
+static symbol s_0[] = {193};
+static symbol s_1[] = {209};
+static symbol s_2[] = {193};
+static symbol s_3[] = {209};
+static symbol s_4[] = {193};
+static symbol s_5[] = {209};
+static symbol s_6[] = {206};
+static symbol s_7[] = {206};
+static symbol s_8[] = {206};
+static symbol s_9[] = {201};
+
+static int
+r_mark_regions(struct SN_env * z)
+{
+ z->I[0] = z->l;
+ z->I[1] = z->l;
+ {
+ int c = z->c; /* do, line 100 */
+
+ while (1)
+ { /* gopast, line 101 */
+ if (!(in_grouping(z, g_v, 192, 220)))
+ goto lab1;
+ break;
+ lab1:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ z->I[0] = z->c; /* setmark pV, line 101 */
+ while (1)
+ { /* gopast, line 101 */
+ if (!(out_grouping(z, g_v, 192, 220)))
+ goto lab2;
+ break;
+ lab2:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ while (1)
+ { /* gopast, line 102 */
+ if (!(in_grouping(z, g_v, 192, 220)))
+ goto lab3;
+ break;
+ lab3:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ while (1)
+ { /* gopast, line 102 */
+ if (!(out_grouping(z, g_v, 192, 220)))
+ goto lab4;
+ break;
+ lab4:
+ if (z->c >= z->l)
+ goto lab0;
+ z->c++;
+ }
+ z->I[1] = z->c; /* setmark p2, line 102 */
+lab0:
+ z->c = c;
+ }
+ return 1;
}
-static int r_R2(struct SN_env * z) {
- if (!(z->I[1] <= z->c)) return 0;
- return 1;
+static int
+r_R2(struct SN_env * z)
+{
+ if (!(z->I[1] <= z->c))
+ return 0;
+ return 1;
}
-static int r_perfective_gerund(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 111 */
- among_var = find_among_b(z, a_0, 9); /* substring, line 111 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 111 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- { int m = z->l - z->c; /* or, line 115 */
- if (!(eq_s_b(z, 1, s_0))) goto lab1;
- goto lab0;
- lab1:
- z->c = z->l - m;
- if (!(eq_s_b(z, 1, s_1))) return 0;
- }
- lab0:
- slice_del(z); /* delete, line 115 */
- break;
- case 2:
- slice_del(z); /* delete, line 122 */
- break;
- }
- return 1;
+static int
+r_perfective_gerund(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 111 */
+ among_var = find_among_b(z, a_0, 9); /* substring, line 111 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 111 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ {
+ int m = z->l - z->c; /* or, line 115 */
+
+ if (!(eq_s_b(z, 1, s_0)))
+ goto lab1;
+ goto lab0;
+ lab1:
+ z->c = z->l - m;
+ if (!(eq_s_b(z, 1, s_1)))
+ return 0;
+ }
+ lab0:
+ slice_del(z); /* delete, line 115 */
+ break;
+ case 2:
+ slice_del(z); /* delete, line 122 */
+ break;
+ }
+ return 1;
}
-static int r_adjective(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 127 */
- among_var = find_among_b(z, a_1, 26); /* substring, line 127 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 127 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_del(z); /* delete, line 136 */
- break;
- }
- return 1;
+static int
+r_adjective(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 127 */
+ among_var = find_among_b(z, a_1, 26); /* substring, line 127 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 127 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_del(z); /* delete, line 136 */
+ break;
+ }
+ return 1;
}
-static int r_adjectival(struct SN_env * z) {
- int among_var;
- if (!r_adjective(z)) return 0; /* call adjective, line 141 */
- { int m = z->l - z->c; /* try, line 148 */
- z->ket = z->c; /* [, line 149 */
- among_var = find_among_b(z, a_2, 8); /* substring, line 149 */
- if (!(among_var)) { z->c = z->l - m; goto lab0; }
- z->bra = z->c; /* ], line 149 */
- switch(among_var) {
- case 0: { z->c = z->l - m; goto lab0; }
- case 1:
- { int m = z->l - z->c; /* or, line 154 */
- if (!(eq_s_b(z, 1, s_2))) goto lab2;
- goto lab1;
- lab2:
- z->c = z->l - m;
- if (!(eq_s_b(z, 1, s_3))) { z->c = z->l - m; goto lab0; }
- }
- lab1:
- slice_del(z); /* delete, line 154 */
- break;
- case 2:
- slice_del(z); /* delete, line 161 */
- break;
- }
- lab0:
- ;
- }
- return 1;
+static int
+r_adjectival(struct SN_env * z)
+{
+ int among_var;
+
+ if (!r_adjective(z))
+ return 0; /* call adjective, line 141 */
+ {
+ int m = z->l - z->c; /* try, line 148 */
+
+ z->ket = z->c; /* [, line 149 */
+ among_var = find_among_b(z, a_2, 8); /* substring, line 149 */
+ if (!(among_var))
+ {
+ z->c = z->l - m;
+ goto lab0;
+ }
+ z->bra = z->c; /* ], line 149 */
+ switch (among_var)
+ {
+ case 0:
+ {
+ z->c = z->l - m;
+ goto lab0;
+ }
+ case 1:
+ {
+ int m = z->l - z->c; /* or, line 154 */
+
+ if (!(eq_s_b(z, 1, s_2)))
+ goto lab2;
+ goto lab1;
+ lab2:
+ z->c = z->l - m;
+ if (!(eq_s_b(z, 1, s_3)))
+ {
+ z->c = z->l - m;
+ goto lab0;
+ }
+ }
+ lab1:
+ slice_del(z); /* delete, line 154 */
+ break;
+ case 2:
+ slice_del(z); /* delete, line 161 */
+ break;
+ }
+lab0:
+ ;
+ }
+ return 1;
}
-static int r_reflexive(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 168 */
- among_var = find_among_b(z, a_3, 2); /* substring, line 168 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 168 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_del(z); /* delete, line 171 */
- break;
- }
- return 1;
+static int
+r_reflexive(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 168 */
+ among_var = find_among_b(z, a_3, 2); /* substring, line 168 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 168 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_del(z); /* delete, line 171 */
+ break;
+ }
+ return 1;
}
-static int r_verb(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 176 */
- among_var = find_among_b(z, a_4, 46); /* substring, line 176 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 176 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- { int m = z->l - z->c; /* or, line 182 */
- if (!(eq_s_b(z, 1, s_4))) goto lab1;
- goto lab0;
- lab1:
- z->c = z->l - m;
- if (!(eq_s_b(z, 1, s_5))) return 0;
- }
- lab0:
- slice_del(z); /* delete, line 182 */
- break;
- case 2:
- slice_del(z); /* delete, line 190 */
- break;
- }
- return 1;
+static int
+r_verb(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 176 */
+ among_var = find_among_b(z, a_4, 46); /* substring, line 176 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 176 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ {
+ int m = z->l - z->c; /* or, line 182 */
+
+ if (!(eq_s_b(z, 1, s_4)))
+ goto lab1;
+ goto lab0;
+ lab1:
+ z->c = z->l - m;
+ if (!(eq_s_b(z, 1, s_5)))
+ return 0;
+ }
+ lab0:
+ slice_del(z); /* delete, line 182 */
+ break;
+ case 2:
+ slice_del(z); /* delete, line 190 */
+ break;
+ }
+ return 1;
}
-static int r_noun(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 199 */
- among_var = find_among_b(z, a_5, 36); /* substring, line 199 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 199 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_del(z); /* delete, line 206 */
- break;
- }
- return 1;
+static int
+r_noun(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 199 */
+ among_var = find_among_b(z, a_5, 36); /* substring, line 199 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 199 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_del(z); /* delete, line 206 */
+ break;
+ }
+ return 1;
}
-static int r_derivational(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 215 */
- among_var = find_among_b(z, a_6, 2); /* substring, line 215 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 215 */
- if (!r_R2(z)) return 0; /* call R2, line 215 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_del(z); /* delete, line 218 */
- break;
- }
- return 1;
+static int
+r_derivational(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 215 */
+ among_var = find_among_b(z, a_6, 2); /* substring, line 215 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 215 */
+ if (!r_R2(z))
+ return 0; /* call R2, line 215 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_del(z); /* delete, line 218 */
+ break;
+ }
+ return 1;
}
-static int r_tidy_up(struct SN_env * z) {
- int among_var;
- z->ket = z->c; /* [, line 223 */
- among_var = find_among_b(z, a_7, 4); /* substring, line 223 */
- if (!(among_var)) return 0;
- z->bra = z->c; /* ], line 223 */
- switch(among_var) {
- case 0: return 0;
- case 1:
- slice_del(z); /* delete, line 227 */
- z->ket = z->c; /* [, line 228 */
- if (!(eq_s_b(z, 1, s_6))) return 0;
- z->bra = z->c; /* ], line 228 */
- if (!(eq_s_b(z, 1, s_7))) return 0;
- slice_del(z); /* delete, line 228 */
- break;
- case 2:
- if (!(eq_s_b(z, 1, s_8))) return 0;
- slice_del(z); /* delete, line 231 */
- break;
- case 3:
- slice_del(z); /* delete, line 233 */
- break;
- }
- return 1;
+static int
+r_tidy_up(struct SN_env * z)
+{
+ int among_var;
+
+ z->ket = z->c; /* [, line 223 */
+ among_var = find_among_b(z, a_7, 4); /* substring, line 223 */
+ if (!(among_var))
+ return 0;
+ z->bra = z->c; /* ], line 223 */
+ switch (among_var)
+ {
+ case 0:
+ return 0;
+ case 1:
+ slice_del(z); /* delete, line 227 */
+ z->ket = z->c; /* [, line 228 */
+ if (!(eq_s_b(z, 1, s_6)))
+ return 0;
+ z->bra = z->c; /* ], line 228 */
+ if (!(eq_s_b(z, 1, s_7)))
+ return 0;
+ slice_del(z); /* delete, line 228 */
+ break;
+ case 2:
+ if (!(eq_s_b(z, 1, s_8)))
+ return 0;
+ slice_del(z); /* delete, line 231 */
+ break;
+ case 3:
+ slice_del(z); /* delete, line 233 */
+ break;
+ }
+ return 1;
}
-extern int russian_stem(struct SN_env * z) {
- { int c = z->c; /* do, line 240 */
- if (!r_mark_regions(z)) goto lab0; /* call mark_regions, line 240 */
- lab0:
- z->c = c;
- }
- z->lb = z->c; z->c = z->l; /* backwards, line 241 */
-
- { int m = z->l - z->c; /* setlimit, line 241 */
- int m3;
- if (z->c < z->I[0]) return 0;
- z->c = z->I[0]; /* tomark, line 241 */
- m3 = z->lb; z->lb = z->c;
- z->c = z->l - m;
- { int m = z->l - z->c; /* do, line 242 */
- { int m = z->l - z->c; /* or, line 243 */
- if (!r_perfective_gerund(z)) goto lab3; /* call perfective_gerund, line 243 */
- goto lab2;
- lab3:
- z->c = z->l - m;
- { int m = z->l - z->c; /* try, line 244 */
- if (!r_reflexive(z)) { z->c = z->l - m; goto lab4; } /* call reflexive, line 244 */
- lab4:
- ;
- }
- { int m = z->l - z->c; /* or, line 245 */
- if (!r_adjectival(z)) goto lab6; /* call adjectival, line 245 */
- goto lab5;
- lab6:
- z->c = z->l - m;
- if (!r_verb(z)) goto lab7; /* call verb, line 245 */
- goto lab5;
- lab7:
- z->c = z->l - m;
- if (!r_noun(z)) goto lab1; /* call noun, line 245 */
- }
- lab5:
- ;
- }
- lab2:
- lab1:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* try, line 248 */
- z->ket = z->c; /* [, line 248 */
- if (!(eq_s_b(z, 1, s_9))) { z->c = z->l - m; goto lab8; }
- z->bra = z->c; /* ], line 248 */
- slice_del(z); /* delete, line 248 */
- lab8:
- ;
- }
- { int m = z->l - z->c; /* do, line 251 */
- if (!r_derivational(z)) goto lab9; /* call derivational, line 251 */
- lab9:
- z->c = z->l - m;
- }
- { int m = z->l - z->c; /* do, line 252 */
- if (!r_tidy_up(z)) goto lab10; /* call tidy_up, line 252 */
- lab10:
- z->c = z->l - m;
- }
- z->lb = m3;
- }
- z->c = z->lb;
- return 1;
+extern int
+russian_stem(struct SN_env * z)
+{
+ {
+ int c = z->c; /* do, line 240 */
+
+ if (!r_mark_regions(z))
+ goto lab0; /* call mark_regions, line 240 */
+lab0:
+ z->c = c;
+ }
+ z->lb = z->c;
+ z->c = z->l; /* backwards, line 241 */
+
+ {
+ int m = z->l - z->c; /* setlimit, line 241 */
+ int m3;
+
+ if (z->c < z->I[0])
+ return 0;
+ z->c = z->I[0]; /* tomark, line 241 */
+ m3 = z->lb;
+ z->lb = z->c;
+ z->c = z->l - m;
+ {
+ int m = z->l - z->c; /* do, line 242 */
+
+ {
+ int m = z->l - z->c; /* or, line 243 */
+
+ if (!r_perfective_gerund(z))
+ goto lab3; /* call perfective_gerund, line 243 */
+ goto lab2;
+ lab3:
+ z->c = z->l - m;
+ {
+ int m = z->l - z->c; /* try, line 244 */
+
+ if (!r_reflexive(z))
+ {
+ z->c = z->l - m;
+ goto lab4;
+ } /* call reflexive, line 244 */
+ lab4:
+ ;
+ }
+ {
+ int m = z->l - z->c; /* or, line 245 */
+
+ if (!r_adjectival(z))
+ goto lab6; /* call adjectival, line 245 */
+ goto lab5;
+ lab6:
+ z->c = z->l - m;
+ if (!r_verb(z))
+ goto lab7; /* call verb, line 245 */
+ goto lab5;
+ lab7:
+ z->c = z->l - m;
+ if (!r_noun(z))
+ goto lab1; /* call noun, line 245 */
+ }
+ lab5:
+ ;
+ }
+ lab2:
+ lab1:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* try, line 248 */
+
+ z->ket = z->c; /* [, line 248 */
+ if (!(eq_s_b(z, 1, s_9)))
+ {
+ z->c = z->l - m;
+ goto lab8;
+ }
+ z->bra = z->c; /* ], line 248 */
+ slice_del(z); /* delete, line 248 */
+ lab8:
+ ;
+ }
+ {
+ int m = z->l - z->c; /* do, line 251 */
+
+ if (!r_derivational(z))
+ goto lab9; /* call derivational, line 251 */
+ lab9:
+ z->c = z->l - m;
+ }
+ {
+ int m = z->l - z->c; /* do, line 252 */
+
+ if (!r_tidy_up(z))
+ goto lab10; /* call tidy_up, line 252 */
+ lab10:
+ z->c = z->l - m;
+ }
+ z->lb = m3;
+ }
+ z->c = z->lb;
+ return 1;
}
-extern struct SN_env * russian_create_env(void) { return SN_create_env(0, 2, 0); }
-
-extern void russian_close_env(struct SN_env * z) { SN_close_env(z); }
+extern struct SN_env *russian_create_env(void)
+{
+ return SN_create_env(0, 2, 0);
+}
+extern void russian_close_env(struct SN_env * z)
+{
+ SN_close_env(z);
+}
diff --git a/contrib/tsearch2/snowball/russian_stem.h b/contrib/tsearch2/snowball/russian_stem.h
index 7dc26d45c82..69369e295b9 100644
--- a/contrib/tsearch2/snowball/russian_stem.h
+++ b/contrib/tsearch2/snowball/russian_stem.h
@@ -1,8 +1,7 @@
/* This file was generated automatically by the Snowball to ANSI C compiler */
-extern struct SN_env * russian_create_env(void);
+extern struct SN_env *russian_create_env(void);
extern void russian_close_env(struct SN_env * z);
-extern int russian_stem(struct SN_env * z);
-
+extern int russian_stem(struct SN_env * z);
diff --git a/contrib/tsearch2/snowball/utilities.c b/contrib/tsearch2/snowball/utilities.c
index 5dc752445b5..374d2c11bb7 100644
--- a/contrib/tsearch2/snowball/utilities.c
+++ b/contrib/tsearch2/snowball/utilities.c
@@ -9,320 +9,507 @@
#define CREATE_SIZE 1
-extern symbol * create_s(void)
-{ symbol * p = (symbol *) (HEAD + (char *) malloc(HEAD + (CREATE_SIZE + 1) * sizeof(symbol)));
- CAPACITY(p) = CREATE_SIZE;
- SET_SIZE(p, CREATE_SIZE);
- return p;
+extern symbol *
+create_s(void)
+{
+ symbol *p = (symbol *) (HEAD + (char *) malloc(HEAD + (CREATE_SIZE + 1) * sizeof(symbol)));
+
+ CAPACITY(p) = CREATE_SIZE;
+ SET_SIZE(p, CREATE_SIZE);
+ return p;
}
-extern void lose_s(symbol * p) { free((char *) p - HEAD); }
+extern void lose_s(symbol * p)
+{
+ free((char *) p - HEAD);
+}
-extern int in_grouping(struct SN_env * z, unsigned char * s, int min, int max)
-{ if (z->c >= z->l) return 0;
- { int ch = z->p[z->c];
- if
- (ch > max || (ch -= min) < 0 ||
- (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return 0;
- }
- z->c++; return 1;
+extern int
+in_grouping(struct SN_env * z, unsigned char *s, int min, int max)
+{
+ if (z->c >= z->l)
+ return 0;
+ {
+ int ch = z->p[z->c];
+
+ if
+ (ch > max || (ch -= min) < 0 ||
+ (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0)
+ return 0;
+ }
+ z->c++;
+ return 1;
}
-extern int in_grouping_b(struct SN_env * z, unsigned char * s, int min, int max)
-{ if (z->c <= z->lb) return 0;
- { int ch = z->p[z->c - 1];
- if
- (ch > max || (ch -= min) < 0 ||
- (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return 0;
- }
- z->c--; return 1;
+extern int
+in_grouping_b(struct SN_env * z, unsigned char *s, int min, int max)
+{
+ if (z->c <= z->lb)
+ return 0;
+ {
+ int ch = z->p[z->c - 1];
+
+ if
+ (ch > max || (ch -= min) < 0 ||
+ (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0)
+ return 0;
+ }
+ z->c--;
+ return 1;
}
-extern int out_grouping(struct SN_env * z, unsigned char * s, int min, int max)
-{ if (z->c >= z->l) return 0;
- { int ch = z->p[z->c];
- unless
- (ch > max || (ch -= min) < 0 ||
- (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return 0;
- }
- z->c++; return 1;
+extern int
+out_grouping(struct SN_env * z, unsigned char *s, int min, int max)
+{
+ if (z->c >= z->l)
+ return 0;
+ {
+ int ch = z->p[z->c];
+
+ unless
+ (ch > max || (ch -= min) < 0 ||
+ (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return 0;
+ }
+ z->c++;
+ return 1;
}
-extern int out_grouping_b(struct SN_env * z, unsigned char * s, int min, int max)
-{ if (z->c <= z->lb) return 0;
- { int ch = z->p[z->c - 1];
- unless
- (ch > max || (ch -= min) < 0 ||
- (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return 0;
- }
- z->c--; return 1;
+extern int
+out_grouping_b(struct SN_env * z, unsigned char *s, int min, int max)
+{
+ if (z->c <= z->lb)
+ return 0;
+ {
+ int ch = z->p[z->c - 1];
+
+ unless
+ (ch > max || (ch -= min) < 0 ||
+ (s[ch >> 3] & (0X1 << (ch & 0X7))) == 0) return 0;
+ }
+ z->c--;
+ return 1;
}
-extern int in_range(struct SN_env * z, int min, int max)
-{ if (z->c >= z->l) return 0;
- { int ch = z->p[z->c];
- if
- (ch > max || ch < min) return 0;
- }
- z->c++; return 1;
+extern int
+in_range(struct SN_env * z, int min, int max)
+{
+ if (z->c >= z->l)
+ return 0;
+ {
+ int ch = z->p[z->c];
+
+ if
+ (ch > max || ch < min)
+ return 0;
+ }
+ z->c++;
+ return 1;
}
-extern int in_range_b(struct SN_env * z, int min, int max)
-{ if (z->c <= z->lb) return 0;
- { int ch = z->p[z->c - 1];
- if
- (ch > max || ch < min) return 0;
- }
- z->c--; return 1;
+extern int
+in_range_b(struct SN_env * z, int min, int max)
+{
+ if (z->c <= z->lb)
+ return 0;
+ {
+ int ch = z->p[z->c - 1];
+
+ if
+ (ch > max || ch < min)
+ return 0;
+ }
+ z->c--;
+ return 1;
}
-extern int out_range(struct SN_env * z, int min, int max)
-{ if (z->c >= z->l) return 0;
- { int ch = z->p[z->c];
- unless
- (ch > max || ch < min) return 0;
- }
- z->c++; return 1;
+extern int
+out_range(struct SN_env * z, int min, int max)
+{
+ if (z->c >= z->l)
+ return 0;
+ {
+ int ch = z->p[z->c];
+
+ unless
+ (ch > max || ch < min) return 0;
+ }
+ z->c++;
+ return 1;
}
-extern int out_range_b(struct SN_env * z, int min, int max)
-{ if (z->c <= z->lb) return 0;
- { int ch = z->p[z->c - 1];
- unless
- (ch > max || ch < min) return 0;
- }
- z->c--; return 1;
+extern int
+out_range_b(struct SN_env * z, int min, int max)
+{
+ if (z->c <= z->lb)
+ return 0;
+ {
+ int ch = z->p[z->c - 1];
+
+ unless
+ (ch > max || ch < min) return 0;
+ }
+ z->c--;
+ return 1;
}
-extern int eq_s(struct SN_env * z, int s_size, symbol * s)
-{ if (z->l - z->c < s_size ||
- memcmp(z->p + z->c, s, s_size * sizeof(symbol)) != 0) return 0;
- z->c += s_size; return 1;
+extern int
+eq_s(struct SN_env * z, int s_size, symbol * s)
+{
+ if (z->l - z->c < s_size ||
+ memcmp(z->p + z->c, s, s_size * sizeof(symbol)) != 0)
+ return 0;
+ z->c += s_size;
+ return 1;
}
-extern int eq_s_b(struct SN_env * z, int s_size, symbol * s)
-{ if (z->c - z->lb < s_size ||
- memcmp(z->p + z->c - s_size, s, s_size * sizeof(symbol)) != 0) return 0;
- z->c -= s_size; return 1;
+extern int
+eq_s_b(struct SN_env * z, int s_size, symbol * s)
+{
+ if (z->c - z->lb < s_size ||
+ memcmp(z->p + z->c - s_size, s, s_size * sizeof(symbol)) != 0)
+ return 0;
+ z->c -= s_size;
+ return 1;
}
-extern int eq_v(struct SN_env * z, symbol * p)
-{ return eq_s(z, SIZE(p), p);
+extern int
+eq_v(struct SN_env * z, symbol * p)
+{
+ return eq_s(z, SIZE(p), p);
}
-extern int eq_v_b(struct SN_env * z, symbol * p)
-{ return eq_s_b(z, SIZE(p), p);
+extern int
+eq_v_b(struct SN_env * z, symbol * p)
+{
+ return eq_s_b(z, SIZE(p), p);
}
-extern int find_among(struct SN_env * z, struct among * v, int v_size)
+extern int
+find_among(struct SN_env * z, struct among * v, int v_size)
{
- int i = 0;
- int j = v_size;
-
- int c = z->c; int l = z->l;
- symbol * q = z->p + c;
-
- struct among * w;
-
- int common_i = 0;
- int common_j = 0;
-
- int first_key_inspected = 0;
-
- while(1)
- { int k = i + ((j - i) >> 1);
- int diff = 0;
- int common = common_i < common_j ? common_i : common_j; /* smaller */
- w = v + k;
- { int i; for (i = common; i < w->s_size; i++)
- { if (c + common == l) { diff = -1; break; }
- diff = q[common] - w->s[i];
- if (diff != 0) break;
- common++;
- }
- }
- if (diff < 0) { j = k; common_j = common; }
- else { i = k; common_i = common; }
- if (j - i <= 1)
- { if (i > 0) break; /* v->s has been inspected */
- if (j == i) break; /* only one item in v */
-
- /* - but now we need to go round once more to get
- v->s inspected. This looks messy, but is actually
- the optimal approach. */
-
- if (first_key_inspected) break;
- first_key_inspected = 1;
- }
- }
- while(1)
- { w = v + i;
- if (common_i >= w->s_size)
- { z->c = c + w->s_size;
- if (w->function == 0) return w->result;
- { int res = w->function(z);
- z->c = c + w->s_size;
- if (res) return w->result;
- }
- }
- i = w->substring_i;
- if (i < 0) return 0;
- }
+ int i = 0;
+ int j = v_size;
+
+ int c = z->c;
+ int l = z->l;
+ symbol *q = z->p + c;
+
+ struct among *w;
+
+ int common_i = 0;
+ int common_j = 0;
+
+ int first_key_inspected = 0;
+
+ while (1)
+ {
+ int k = i + ((j - i) >> 1);
+ int diff = 0;
+ int common = common_i < common_j ? common_i : common_j; /* smaller */
+
+ w = v + k;
+ {
+ int i;
+
+ for (i = common; i < w->s_size; i++)
+ {
+ if (c + common == l)
+ {
+ diff = -1;
+ break;
+ }
+ diff = q[common] - w->s[i];
+ if (diff != 0)
+ break;
+ common++;
+ }
+ }
+ if (diff < 0)
+ {
+ j = k;
+ common_j = common;
+ }
+ else
+ {
+ i = k;
+ common_i = common;
+ }
+ if (j - i <= 1)
+ {
+ if (i > 0)
+ break; /* v->s has been inspected */
+ if (j == i)
+ break; /* only one item in v */
+
+ /*
+ * - but now we need to go round once more to get v->s
+ * inspected. This looks messy, but is actually the optimal
+ * approach.
+ */
+
+ if (first_key_inspected)
+ break;
+ first_key_inspected = 1;
+ }
+ }
+ while (1)
+ {
+ w = v + i;
+ if (common_i >= w->s_size)
+ {
+ z->c = c + w->s_size;
+ if (w->function == 0)
+ return w->result;
+ {
+ int res = w->function(z);
+
+ z->c = c + w->s_size;
+ if (res)
+ return w->result;
+ }
+ }
+ i = w->substring_i;
+ if (i < 0)
+ return 0;
+ }
}
/* find_among_b is for backwards processing. Same comments apply */
-extern int find_among_b(struct SN_env * z, struct among * v, int v_size)
+extern int
+find_among_b(struct SN_env * z, struct among * v, int v_size)
{
- int i = 0;
- int j = v_size;
-
- int c = z->c; int lb = z->lb;
- symbol * q = z->p + c - 1;
-
- struct among * w;
-
- int common_i = 0;
- int common_j = 0;
-
- int first_key_inspected = 0;
-
- while(1)
- { int k = i + ((j - i) >> 1);
- int diff = 0;
- int common = common_i < common_j ? common_i : common_j;
- w = v + k;
- { int i; for (i = w->s_size - 1 - common; i >= 0; i--)
- { if (c - common == lb) { diff = -1; break; }
- diff = q[- common] - w->s[i];
- if (diff != 0) break;
- common++;
- }
- }
- if (diff < 0) { j = k; common_j = common; }
- else { i = k; common_i = common; }
- if (j - i <= 1)
- { if (i > 0) break;
- if (j == i) break;
- if (first_key_inspected) break;
- first_key_inspected = 1;
- }
- }
- while(1)
- { w = v + i;
- if (common_i >= w->s_size)
- { z->c = c - w->s_size;
- if (w->function == 0) return w->result;
- { int res = w->function(z);
- z->c = c - w->s_size;
- if (res) return w->result;
- }
- }
- i = w->substring_i;
- if (i < 0) return 0;
- }
+ int i = 0;
+ int j = v_size;
+
+ int c = z->c;
+ int lb = z->lb;
+ symbol *q = z->p + c - 1;
+
+ struct among *w;
+
+ int common_i = 0;
+ int common_j = 0;
+
+ int first_key_inspected = 0;
+
+ while (1)
+ {
+ int k = i + ((j - i) >> 1);
+ int diff = 0;
+ int common = common_i < common_j ? common_i : common_j;
+
+ w = v + k;
+ {
+ int i;
+
+ for (i = w->s_size - 1 - common; i >= 0; i--)
+ {
+ if (c - common == lb)
+ {
+ diff = -1;
+ break;
+ }
+ diff = q[-common] - w->s[i];
+ if (diff != 0)
+ break;
+ common++;
+ }
+ }
+ if (diff < 0)
+ {
+ j = k;
+ common_j = common;
+ }
+ else
+ {
+ i = k;
+ common_i = common;
+ }
+ if (j - i <= 1)
+ {
+ if (i > 0)
+ break;
+ if (j == i)
+ break;
+ if (first_key_inspected)
+ break;
+ first_key_inspected = 1;
+ }
+ }
+ while (1)
+ {
+ w = v + i;
+ if (common_i >= w->s_size)
+ {
+ z->c = c - w->s_size;
+ if (w->function == 0)
+ return w->result;
+ {
+ int res = w->function(z);
+
+ z->c = c - w->s_size;
+ if (res)
+ return w->result;
+ }
+ }
+ i = w->substring_i;
+ if (i < 0)
+ return 0;
+ }
}
-extern symbol * increase_size(symbol * p, int n)
-{ int new_size = n + 20;
- symbol * q = (symbol *) (HEAD + (char *) malloc(HEAD + (new_size + 1) * sizeof(symbol)));
- CAPACITY(q) = new_size;
- memmove(q, p, CAPACITY(p) * sizeof(symbol)); lose_s(p); return q;
+extern symbol *
+increase_size(symbol * p, int n)
+{
+ int new_size = n + 20;
+ symbol *q = (symbol *) (HEAD + (char *) malloc(HEAD + (new_size + 1) * sizeof(symbol)));
+
+ CAPACITY(q) = new_size;
+ memmove(q, p, CAPACITY(p) * sizeof(symbol));
+ lose_s(p);
+ return q;
}
/* to replace symbols between c_bra and c_ket in z->p by the
s_size symbols at s
*/
-extern int replace_s(struct SN_env * z, int c_bra, int c_ket, int s_size, const symbol * s)
-{ int adjustment = s_size - (c_ket - c_bra);
- int len = SIZE(z->p);
- if (adjustment != 0)
- { if (adjustment + len > CAPACITY(z->p)) z->p = increase_size(z->p, adjustment + len);
- memmove(z->p + c_ket + adjustment, z->p + c_ket, (len - c_ket) * sizeof(symbol));
- SET_SIZE(z->p, adjustment + len);
- z->l += adjustment;
- if (z->c >= c_ket) z->c += adjustment; else
- if (z->c > c_bra) z->c = c_bra;
- }
- unless (s_size == 0) memmove(z->p + c_bra, s, s_size * sizeof(symbol));
- return adjustment;
+extern int
+replace_s(struct SN_env * z, int c_bra, int c_ket, int s_size, const symbol * s)
+{
+ int adjustment = s_size - (c_ket - c_bra);
+ int len = SIZE(z->p);
+
+ if (adjustment != 0)
+ {
+ if (adjustment + len > CAPACITY(z->p))
+ z->p = increase_size(z->p, adjustment + len);
+ memmove(z->p + c_ket + adjustment, z->p + c_ket, (len - c_ket) * sizeof(symbol));
+ SET_SIZE(z->p, adjustment + len);
+ z->l += adjustment;
+ if (z->c >= c_ket)
+ z->c += adjustment;
+ else if (z->c > c_bra)
+ z->c = c_bra;
+ }
+ unless(s_size == 0) memmove(z->p + c_bra, s, s_size * sizeof(symbol));
+ return adjustment;
}
-static void slice_check(struct SN_env * z)
+static void
+slice_check(struct SN_env * z)
{
- if (!(0 <= z->bra &&
- z->bra <= z->ket &&
- z->ket <= z->l &&
- z->l <= SIZE(z->p))) /* this line could be removed */
- {
- fprintf(stderr, "faulty slice operation:\n");
- debug(z, -1, 0);
- exit(1);
- }
+ if (!(0 <= z->bra &&
+ z->bra <= z->ket &&
+ z->ket <= z->l &&
+ z->l <= SIZE(z->p))) /* this line could be removed */
+ {
+ fprintf(stderr, "faulty slice operation:\n");
+ debug(z, -1, 0);
+ exit(1);
+ }
}
-extern void slice_from_s(struct SN_env * z, int s_size, symbol * s)
-{ slice_check(z);
- replace_s(z, z->bra, z->ket, s_size, s);
+extern void
+slice_from_s(struct SN_env * z, int s_size, symbol * s)
+{
+ slice_check(z);
+ replace_s(z, z->bra, z->ket, s_size, s);
}
-extern void slice_from_v(struct SN_env * z, symbol * p)
-{ slice_from_s(z, SIZE(p), p);
+extern void
+slice_from_v(struct SN_env * z, symbol * p)
+{
+ slice_from_s(z, SIZE(p), p);
}
-extern void slice_del(struct SN_env * z)
-{ slice_from_s(z, 0, 0);
+extern void
+slice_del(struct SN_env * z)
+{
+ slice_from_s(z, 0, 0);
}
-extern void insert_s(struct SN_env * z, int bra, int ket, int s_size, symbol * s)
-{ int adjustment = replace_s(z, bra, ket, s_size, s);
- if (bra <= z->bra) z->bra += adjustment;
- if (bra <= z->ket) z->ket += adjustment;
+extern void
+insert_s(struct SN_env * z, int bra, int ket, int s_size, symbol * s)
+{
+ int adjustment = replace_s(z, bra, ket, s_size, s);
+
+ if (bra <= z->bra)
+ z->bra += adjustment;
+ if (bra <= z->ket)
+ z->ket += adjustment;
}
-extern void insert_v(struct SN_env * z, int bra, int ket, symbol * p)
-{ int adjustment = replace_s(z, bra, ket, SIZE(p), p);
- if (bra <= z->bra) z->bra += adjustment;
- if (bra <= z->ket) z->ket += adjustment;
+extern void
+insert_v(struct SN_env * z, int bra, int ket, symbol * p)
+{
+ int adjustment = replace_s(z, bra, ket, SIZE(p), p);
+
+ if (bra <= z->bra)
+ z->bra += adjustment;
+ if (bra <= z->ket)
+ z->ket += adjustment;
}
-extern symbol * slice_to(struct SN_env * z, symbol * p)
-{ slice_check(z);
- { int len = z->ket - z->bra;
- if (CAPACITY(p) < len) p = increase_size(p, len);
- memmove(p, z->p + z->bra, len * sizeof(symbol));
- SET_SIZE(p, len);
- }
- return p;
+extern symbol *
+slice_to(struct SN_env * z, symbol * p)
+{
+ slice_check(z);
+ {
+ int len = z->ket - z->bra;
+
+ if (CAPACITY(p) < len)
+ p = increase_size(p, len);
+ memmove(p, z->p + z->bra, len * sizeof(symbol));
+ SET_SIZE(p, len);
+ }
+ return p;
}
-extern symbol * assign_to(struct SN_env * z, symbol * p)
-{ int len = z->l;
- if (CAPACITY(p) < len) p = increase_size(p, len);
- memmove(p, z->p, len * sizeof(symbol));
- SET_SIZE(p, len);
- return p;
+extern symbol *
+assign_to(struct SN_env * z, symbol * p)
+{
+ int len = z->l;
+
+ if (CAPACITY(p) < len)
+ p = increase_size(p, len);
+ memmove(p, z->p, len * sizeof(symbol));
+ SET_SIZE(p, len);
+ return p;
}
-extern void debug(struct SN_env * z, int number, int line_count)
-{ int i;
- int limit = SIZE(z->p);
- /*if (number >= 0) printf("%3d (line %4d): '", number, line_count);*/
- if (number >= 0) printf("%3d (line %4d): [%d]'", number, line_count,limit);
- for (i = 0; i <= limit; i++)
- { if (z->lb == i) printf("{");
- if (z->bra == i) printf("[");
- if (z->c == i) printf("|");
- if (z->ket == i) printf("]");
- if (z->l == i) printf("}");
- if (i < limit)
- { int ch = z->p[i];
- if (ch == 0) ch = '#';
- printf("%c", ch);
- }
- }
- printf("'\n");
+extern void
+debug(struct SN_env * z, int number, int line_count)
+{
+ int i;
+ int limit = SIZE(z->p);
+
+ /* if (number >= 0) printf("%3d (line %4d): '", number, line_count); */
+ if (number >= 0)
+ printf("%3d (line %4d): [%d]'", number, line_count, limit);
+ for (i = 0; i <= limit; i++)
+ {
+ if (z->lb == i)
+ printf("{");
+ if (z->bra == i)
+ printf("[");
+ if (z->c == i)
+ printf("|");
+ if (z->ket == i)
+ printf("]");
+ if (z->l == i)
+ printf("}");
+ if (i < limit)
+ {
+ int ch = z->p[i];
+
+ if (ch == 0)
+ ch = '#';
+ printf("%c", ch);
+ }
+ }
+ printf("'\n");
}
diff --git a/contrib/tsearch2/stopword.c b/contrib/tsearch2/stopword.c
index 2c2874b7f88..f91abeb460e 100644
--- a/contrib/tsearch2/stopword.c
+++ b/contrib/tsearch2/stopword.c
@@ -1,4 +1,4 @@
-/*
+/*
* stopword library
* Teodor Sigaev <teodor@sigaev.ru>
*/
@@ -13,97 +13,114 @@
#define STOPBUFLEN 4096
-char*
-lowerstr(char *str) {
- char *ptr=str;
- while(*ptr) {
- *ptr = tolower(*(unsigned char*)ptr);
+char *
+lowerstr(char *str)
+{
+ char *ptr = str;
+
+ while (*ptr)
+ {
+ *ptr = tolower(*(unsigned char *) ptr);
ptr++;
}
return str;
}
void
-freestoplist(StopList *s) {
- char **ptr=s->stop;
- if ( ptr )
- while( *ptr && s->len >0 ) {
+freestoplist(StopList * s)
+{
+ char **ptr = s->stop;
+
+ if (ptr)
+ while (*ptr && s->len > 0)
+ {
free(*ptr);
- ptr++; s->len--;
- free(s->stop);
- }
- memset(s,0,sizeof(StopList));
+ ptr++;
+ s->len--;
+ free(s->stop);
+ }
+ memset(s, 0, sizeof(StopList));
}
void
-readstoplist(text *in, StopList *s) {
- char **stop=NULL;
- s->len=0;
- if ( in && VARSIZE(in) - VARHDRSZ > 0 ) {
- char *filename=text2char(in);
- FILE *hin=NULL;
- char buf[STOPBUFLEN];
- int reallen=0;
-
- if ( (hin=fopen(filename,"r")) == NULL )
+readstoplist(text *in, StopList * s)
+{
+ char **stop = NULL;
+
+ s->len = 0;
+ if (in && VARSIZE(in) - VARHDRSZ > 0)
+ {
+ char *filename = text2char(in);
+ FILE *hin = NULL;
+ char buf[STOPBUFLEN];
+ int reallen = 0;
+
+ if ((hin = fopen(filename, "r")) == NULL)
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("could not open file \"%s\": %m",
- filename)));
+ filename)));
- while( fgets(buf,STOPBUFLEN,hin) ) {
- buf[strlen(buf)-1] = '\0';
- if ( *buf=='\0' ) continue;
+ while (fgets(buf, STOPBUFLEN, hin))
+ {
+ buf[strlen(buf) - 1] = '\0';
+ if (*buf == '\0')
+ continue;
- if ( s->len>= reallen ) {
- char **tmp;
- reallen=(reallen) ? reallen*2 : 16;
- tmp=(char**)realloc((void*)stop, sizeof(char*)*reallen);
- if (!tmp) {
+ if (s->len >= reallen)
+ {
+ char **tmp;
+
+ reallen = (reallen) ? reallen * 2 : 16;
+ tmp = (char **) realloc((void *) stop, sizeof(char *) * reallen);
+ if (!tmp)
+ {
freestoplist(s);
- fclose(hin);
+ fclose(hin);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- stop=tmp;
+ stop = tmp;
}
-
- stop[s->len]=strdup(buf);
- if ( !stop[s->len] ) {
+
+ stop[s->len] = strdup(buf);
+ if (!stop[s->len])
+ {
freestoplist(s);
- fclose(hin);
+ fclose(hin);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
- if ( s->wordop )
- stop[s->len]=(s->wordop)(stop[s->len]);
+ if (s->wordop)
+ stop[s->len] = (s->wordop) (stop[s->len]);
- (s->len)++;
+ (s->len)++;
}
fclose(hin);
- pfree(filename);
+ pfree(filename);
}
- s->stop=stop;
-}
+ s->stop = stop;
+}
static int
-comparestr(const void *a, const void *b) {
- return strcmp( *(char**)a, *(char**)b );
+comparestr(const void *a, const void *b)
+{
+ return strcmp(*(char **) a, *(char **) b);
}
void
-sortstoplist(StopList *s) {
- if (s->stop && s->len>0)
- qsort(s->stop, s->len, sizeof(char*), comparestr);
+sortstoplist(StopList * s)
+{
+ if (s->stop && s->len > 0)
+ qsort(s->stop, s->len, sizeof(char *), comparestr);
}
bool
-searchstoplist(StopList *s, char *key) {
- if ( s->wordop )
- key=(*(s->wordop))(key);
- return ( s->stop && s->len>0 && bsearch(&key, s->stop, s->len, sizeof(char*), comparestr) ) ? true : false;
+searchstoplist(StopList * s, char *key)
+{
+ if (s->wordop)
+ key = (*(s->wordop)) (key);
+ return (s->stop && s->len > 0 && bsearch(&key, s->stop, s->len, sizeof(char *), comparestr)) ? true : false;
}
-
-
diff --git a/contrib/tsearch2/ts_cfg.c b/contrib/tsearch2/ts_cfg.c
index d964aae573e..ad06d90d9ab 100644
--- a/contrib/tsearch2/ts_cfg.c
+++ b/contrib/tsearch2/ts_cfg.c
@@ -1,5 +1,5 @@
-/*
- * interface functions to tscfg
+/*
+ * interface functions to tscfg
* Teodor Sigaev <teodor@sigaev.ru>
*/
#include <errno.h>
@@ -23,263 +23,299 @@
/*********top interface**********/
-static void *plan_getcfg_bylocale=NULL;
-static void *plan_getcfg=NULL;
-static void *plan_getmap=NULL;
-static void *plan_name2id=NULL;
-static Oid current_cfg_id=0;
+static void *plan_getcfg_bylocale = NULL;
+static void *plan_getcfg = NULL;
+static void *plan_getmap = NULL;
+static void *plan_name2id = NULL;
+static Oid current_cfg_id = 0;
void
-init_cfg(Oid id, TSCfgInfo *cfg) {
- Oid arg[2]={ OIDOID, OIDOID };
- bool isnull;
- Datum pars[2]={ ObjectIdGetDatum(id), ObjectIdGetDatum(id) } ;
- int stat,i,j;
- text *ptr;
- text *prsname=NULL;
- MemoryContext oldcontext;
-
- memset(cfg,0,sizeof(TSCfgInfo));
+init_cfg(Oid id, TSCfgInfo * cfg)
+{
+ Oid arg[2] = {OIDOID, OIDOID};
+ bool isnull;
+ Datum pars[2] = {ObjectIdGetDatum(id), ObjectIdGetDatum(id)};
+ int stat,
+ i,
+ j;
+ text *ptr;
+ text *prsname = NULL;
+ MemoryContext oldcontext;
+
+ memset(cfg, 0, sizeof(TSCfgInfo));
SPI_connect();
- if ( !plan_getcfg ) {
- plan_getcfg = SPI_saveplan( SPI_prepare( "select prs_name from pg_ts_cfg where oid = $1" , 1, arg ) );
- if ( !plan_getcfg )
+ if (!plan_getcfg)
+ {
+ plan_getcfg = SPI_saveplan(SPI_prepare("select prs_name from pg_ts_cfg where oid = $1", 1, arg));
+ if (!plan_getcfg)
ts_error(ERROR, "SPI_prepare() failed");
}
stat = SPI_execp(plan_getcfg, pars, " ", 1);
- if ( stat < 0 )
- ts_error (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed > 0 ) {
- prsname = (text*) DatumGetPointer(
- SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)
- );
+ if (stat < 0)
+ ts_error(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed > 0)
+ {
+ prsname = (text *) DatumGetPointer(
+ SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)
+ );
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
- prsname = ptextdup( prsname );
+ prsname = ptextdup(prsname);
MemoryContextSwitchTo(oldcontext);
-
- cfg->id=id;
- } else
+
+ cfg->id = id;
+ }
+ else
ts_error(ERROR, "No tsearch cfg with id %d", id);
- arg[0]=TEXTOID;
- if ( !plan_getmap ) {
- plan_getmap = SPI_saveplan( SPI_prepare( "select lt.tokid, pg_ts_cfgmap.dict_name from pg_ts_cfgmap, pg_ts_cfg, token_type( $1 ) as lt where lt.alias = pg_ts_cfgmap.tok_alias and pg_ts_cfgmap.ts_name = pg_ts_cfg.ts_name and pg_ts_cfg.oid= $2 order by lt.tokid desc;" , 2, arg ) );
- if ( !plan_getmap )
+ arg[0] = TEXTOID;
+ if (!plan_getmap)
+ {
+ plan_getmap = SPI_saveplan(SPI_prepare("select lt.tokid, pg_ts_cfgmap.dict_name from pg_ts_cfgmap, pg_ts_cfg, token_type( $1 ) as lt where lt.alias = pg_ts_cfgmap.tok_alias and pg_ts_cfgmap.ts_name = pg_ts_cfg.ts_name and pg_ts_cfg.oid= $2 order by lt.tokid desc;", 2, arg));
+ if (!plan_getmap)
ts_error(ERROR, "SPI_prepare() failed");
}
- pars[0]=PointerGetDatum( prsname );
+ pars[0] = PointerGetDatum(prsname);
stat = SPI_execp(plan_getmap, pars, " ", 0);
- if ( stat < 0 )
- ts_error (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed <= 0 )
+ if (stat < 0)
+ ts_error(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed <= 0)
ts_error(ERROR, "No parser with id %d", id);
- for(i=0;i<SPI_processed;i++) {
- int lexid = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull));
- ArrayType *toasted_a = (ArrayType*)PointerGetDatum(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 2, &isnull));
- ArrayType *a;
-
- if ( !cfg->map ) {
- cfg->len=lexid+1;
- cfg->map = (ListDictionary*)malloc( sizeof(ListDictionary)*cfg->len );
- if ( !cfg->map )
+ for (i = 0; i < SPI_processed; i++)
+ {
+ int lexid = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull));
+ ArrayType *toasted_a = (ArrayType *) PointerGetDatum(SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 2, &isnull));
+ ArrayType *a;
+
+ if (!cfg->map)
+ {
+ cfg->len = lexid + 1;
+ cfg->map = (ListDictionary *) malloc(sizeof(ListDictionary) * cfg->len);
+ if (!cfg->map)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
- memset( cfg->map, 0, sizeof(ListDictionary)*cfg->len );
+ memset(cfg->map, 0, sizeof(ListDictionary) * cfg->len);
}
if (isnull)
continue;
- a=(ArrayType*)PointerGetDatum( PG_DETOAST_DATUM( DatumGetPointer(toasted_a) ) );
-
- if ( ARR_NDIM(a) != 1 )
- ts_error(ERROR,"Wrong dimension");
- if ( ARRNELEMS(a) < 1 )
+ a = (ArrayType *) PointerGetDatum(PG_DETOAST_DATUM(DatumGetPointer(toasted_a)));
+
+ if (ARR_NDIM(a) != 1)
+ ts_error(ERROR, "Wrong dimension");
+ if (ARRNELEMS(a) < 1)
continue;
- cfg->map[lexid].len=ARRNELEMS(a);
- cfg->map[lexid].dict_id=(Datum*)malloc( sizeof(Datum)*cfg->map[lexid].len );
- memset(cfg->map[lexid].dict_id,0,sizeof(Datum)*cfg->map[lexid].len );
- ptr=(text*)ARR_DATA_PTR(a);
+ cfg->map[lexid].len = ARRNELEMS(a);
+ cfg->map[lexid].dict_id = (Datum *) malloc(sizeof(Datum) * cfg->map[lexid].len);
+ memset(cfg->map[lexid].dict_id, 0, sizeof(Datum) * cfg->map[lexid].len);
+ ptr = (text *) ARR_DATA_PTR(a);
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
- for(j=0;j<cfg->map[lexid].len;j++) {
+ for (j = 0; j < cfg->map[lexid].len; j++)
+ {
cfg->map[lexid].dict_id[j] = PointerGetDatum(ptextdup(ptr));
- ptr=NEXTVAL(ptr);
- }
+ ptr = NEXTVAL(ptr);
+ }
MemoryContextSwitchTo(oldcontext);
- if ( a != toasted_a )
+ if (a != toasted_a)
pfree(a);
}
-
+
SPI_finish();
- cfg->prs_id = name2id_prs( prsname );
+ cfg->prs_id = name2id_prs(prsname);
pfree(prsname);
- for(i=0;i<cfg->len;i++) {
- for(j=0;j<cfg->map[i].len;j++) {
- ptr = (text*)DatumGetPointer( cfg->map[i].dict_id[j] );
- cfg->map[i].dict_id[j] = ObjectIdGetDatum( name2id_dict(ptr) );
+ for (i = 0; i < cfg->len; i++)
+ {
+ for (j = 0; j < cfg->map[i].len; j++)
+ {
+ ptr = (text *) DatumGetPointer(cfg->map[i].dict_id[j]);
+ cfg->map[i].dict_id[j] = ObjectIdGetDatum(name2id_dict(ptr));
pfree(ptr);
}
}
}
-typedef struct {
- TSCfgInfo *last_cfg;
- int len;
- int reallen;
- TSCfgInfo *list;
+typedef struct
+{
+ TSCfgInfo *last_cfg;
+ int len;
+ int reallen;
+ TSCfgInfo *list;
SNMap name2id_map;
-} CFGList;
+} CFGList;
-static CFGList CList = {NULL,0,0,NULL,{0,0,NULL}};
+static CFGList CList = {NULL, 0, 0, NULL, {0, 0, NULL}};
void
-reset_cfg(void) {
- freeSNMap( &(CList.name2id_map) );
- if ( CList.list ) {
- int i,j;
- for(i=0;i<CList.len;i++)
- if ( CList.list[i].map ) {
- for(j=0;j<CList.list[i].len;j++)
- if ( CList.list[i].map[j].dict_id )
+reset_cfg(void)
+{
+ freeSNMap(&(CList.name2id_map));
+ if (CList.list)
+ {
+ int i,
+ j;
+
+ for (i = 0; i < CList.len; i++)
+ if (CList.list[i].map)
+ {
+ for (j = 0; j < CList.list[i].len; j++)
+ if (CList.list[i].map[j].dict_id)
free(CList.list[i].map[j].dict_id);
- free( CList.list[i].map );
+ free(CList.list[i].map);
}
- free(CList.list);
+ free(CList.list);
}
- memset(&CList,0,sizeof(CFGList));
+ memset(&CList, 0, sizeof(CFGList));
}
static int
-comparecfg(const void *a, const void *b) {
- return ((TSCfgInfo*)a)->id - ((TSCfgInfo*)b)->id;
+comparecfg(const void *a, const void *b)
+{
+ return ((TSCfgInfo *) a)->id - ((TSCfgInfo *) b)->id;
}
TSCfgInfo *
-findcfg(Oid id) {
+findcfg(Oid id)
+{
/* last used cfg */
- if ( CList.last_cfg && CList.last_cfg->id==id )
+ if (CList.last_cfg && CList.last_cfg->id == id)
return CList.last_cfg;
/* already used cfg */
- if ( CList.len != 0 ) {
- TSCfgInfo key;
- key.id=id;
+ if (CList.len != 0)
+ {
+ TSCfgInfo key;
+
+ key.id = id;
CList.last_cfg = bsearch(&key, CList.list, CList.len, sizeof(TSCfgInfo), comparecfg);
- if ( CList.last_cfg != NULL )
+ if (CList.last_cfg != NULL)
return CList.last_cfg;
}
/* last chance */
- if ( CList.len==CList.reallen ) {
- TSCfgInfo *tmp;
- int reallen = ( CList.reallen ) ? 2*CList.reallen : 16;
- tmp=(TSCfgInfo*)realloc(CList.list,sizeof(TSCfgInfo)*reallen);
- if ( !tmp )
- ts_error(ERROR,"No memory");
- CList.reallen=reallen;
- CList.list=tmp;
+ if (CList.len == CList.reallen)
+ {
+ TSCfgInfo *tmp;
+ int reallen = (CList.reallen) ? 2 * CList.reallen : 16;
+
+ tmp = (TSCfgInfo *) realloc(CList.list, sizeof(TSCfgInfo) * reallen);
+ if (!tmp)
+ ts_error(ERROR, "No memory");
+ CList.reallen = reallen;
+ CList.list = tmp;
}
- CList.last_cfg=&(CList.list[CList.len]);
+ CList.last_cfg = &(CList.list[CList.len]);
init_cfg(id, CList.last_cfg);
CList.len++;
qsort(CList.list, CList.len, sizeof(TSCfgInfo), comparecfg);
- return findcfg(id); /* qsort changed order!! */;
+ return findcfg(id); /* qsort changed order!! */ ;
}
Oid
-name2id_cfg(text *name) {
- Oid arg[1]={ TEXTOID };
- bool isnull;
- Datum pars[1]={ PointerGetDatum(name) };
- int stat;
- Oid id=findSNMap_t( &(CList.name2id_map), name );
-
- if ( id )
+name2id_cfg(text *name)
+{
+ Oid arg[1] = {TEXTOID};
+ bool isnull;
+ Datum pars[1] = {PointerGetDatum(name)};
+ int stat;
+ Oid id = findSNMap_t(&(CList.name2id_map), name);
+
+ if (id)
return id;
-
+
SPI_connect();
- if ( !plan_name2id ) {
- plan_name2id = SPI_saveplan( SPI_prepare( "select oid from pg_ts_cfg where ts_name = $1" , 1, arg ) );
- if ( !plan_name2id )
+ if (!plan_name2id)
+ {
+ plan_name2id = SPI_saveplan(SPI_prepare("select oid from pg_ts_cfg where ts_name = $1", 1, arg));
+ if (!plan_name2id)
/* internal error */
elog(ERROR, "SPI_prepare() failed");
}
stat = SPI_execp(plan_name2id, pars, " ", 1);
- if ( stat < 0 )
+ if (stat < 0)
/* internal error */
- elog (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed > 0 ) {
- id=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull) );
- if ( isnull )
+ elog(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed > 0)
+ {
+ id = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
+ if (isnull)
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("null id for tsearch config")));
- } else
+ }
+ else
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("no tsearch config")));
SPI_finish();
- addSNMap_t( &(CList.name2id_map), name, id );
+ addSNMap_t(&(CList.name2id_map), name, id);
return id;
}
-void
-parsetext_v2(TSCfgInfo *cfg, PRSTEXT * prs, char *buf, int4 buflen) {
- int type, lenlemm, i;
- char *lemm=NULL;
+void
+parsetext_v2(TSCfgInfo * cfg, PRSTEXT * prs, char *buf, int4 buflen)
+{
+ int type,
+ lenlemm,
+ i;
+ char *lemm = NULL;
WParserInfo *prsobj = findprs(cfg->prs_id);
- prsobj->prs=(void*)DatumGetPointer(
- FunctionCall2(
- &(prsobj->start_info),
- PointerGetDatum(buf),
- Int32GetDatum(buflen)
- )
- );
-
- while( ( type=DatumGetInt32(FunctionCall3(
- &(prsobj->getlexeme_info),
- PointerGetDatum(prsobj->prs),
- PointerGetDatum(&lemm),
- PointerGetDatum(&lenlemm))) ) != 0 ) {
-
- if ( lenlemm >= MAXSTRLEN )
+ prsobj->prs = (void *) DatumGetPointer(
+ FunctionCall2(
+ &(prsobj->start_info),
+ PointerGetDatum(buf),
+ Int32GetDatum(buflen)
+ )
+ );
+
+ while ((type = DatumGetInt32(FunctionCall3(
+ &(prsobj->getlexeme_info),
+ PointerGetDatum(prsobj->prs),
+ PointerGetDatum(&lemm),
+ PointerGetDatum(&lenlemm)))) != 0)
+ {
+
+ if (lenlemm >= MAXSTRLEN)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("word is too long")));
- if ( type >= cfg->len ) /* skip this type of lexem */
- continue;
-
- for(i=0;i<cfg->map[type].len;i++) {
- DictInfo *dict=finddict( DatumGetObjectId(cfg->map[type].dict_id[i]) );
- char **norms, **ptr;
-
- norms = ptr = (char**)DatumGetPointer(
- FunctionCall3(
- &(dict->lexize_info),
- PointerGetDatum(dict->dictionary),
- PointerGetDatum(lemm),
- PointerGetDatum(lenlemm)
- )
- );
- if ( !norms ) /* dictionary doesn't know this lexem */
+ if (type >= cfg->len) /* skip this type of lexem */
+ continue;
+
+ for (i = 0; i < cfg->map[type].len; i++)
+ {
+ DictInfo *dict = finddict(DatumGetObjectId(cfg->map[type].dict_id[i]));
+ char **norms,
+ **ptr;
+
+ norms = ptr = (char **) DatumGetPointer(
+ FunctionCall3(
+ &(dict->lexize_info),
+ PointerGetDatum(dict->dictionary),
+ PointerGetDatum(lemm),
+ PointerGetDatum(lenlemm)
+ )
+ );
+ if (!norms) /* dictionary doesn't know this lexem */
continue;
- prs->pos++; /*set pos*/
+ prs->pos++; /* set pos */
- while( *ptr ) {
- if (prs->curwords == prs->lenwords) {
+ while (*ptr)
+ {
+ if (prs->curwords == prs->lenwords)
+ {
prs->lenwords *= 2;
prs->words = (WORD *) repalloc((void *) prs->words, prs->lenwords * sizeof(WORD));
}
@@ -292,191 +328,220 @@ parsetext_v2(TSCfgInfo *cfg, PRSTEXT * prs, char *buf, int4 buflen) {
prs->curwords++;
}
pfree(norms);
- break; /* lexem already normalized or is stop word*/
+ break; /* lexem already normalized or is stop
+ * word */
}
}
FunctionCall1(
- &(prsobj->end_info),
- PointerGetDatum(prsobj->prs)
- );
+ &(prsobj->end_info),
+ PointerGetDatum(prsobj->prs)
+ );
}
static void
-hladdword(HLPRSTEXT * prs, char *buf, int4 buflen, int type) {
- while (prs->curwords >= prs->lenwords) {
+hladdword(HLPRSTEXT * prs, char *buf, int4 buflen, int type)
+{
+ while (prs->curwords >= prs->lenwords)
+ {
prs->lenwords *= 2;
prs->words = (HLWORD *) repalloc((void *) prs->words, prs->lenwords * sizeof(HLWORD));
}
- memset( &(prs->words[prs->curwords]), 0, sizeof(HLWORD) );
- prs->words[prs->curwords].type = (uint8)type;
- prs->words[prs->curwords].len = buflen;
+ memset(&(prs->words[prs->curwords]), 0, sizeof(HLWORD));
+ prs->words[prs->curwords].type = (uint8) type;
+ prs->words[prs->curwords].len = buflen;
prs->words[prs->curwords].word = palloc(buflen);
memcpy(prs->words[prs->curwords].word, buf, buflen);
- prs->curwords++;
+ prs->curwords++;
}
static void
-hlfinditem(HLPRSTEXT * prs, QUERYTYPE *query, char *buf, int buflen ) {
- int i;
- ITEM *item=GETQUERY(query);
- HLWORD *word=&( prs->words[prs->curwords-1] );
-
- while (prs->curwords + query->size >= prs->lenwords) {
+hlfinditem(HLPRSTEXT * prs, QUERYTYPE * query, char *buf, int buflen)
+{
+ int i;
+ ITEM *item = GETQUERY(query);
+ HLWORD *word = &(prs->words[prs->curwords - 1]);
+
+ while (prs->curwords + query->size >= prs->lenwords)
+ {
prs->lenwords *= 2;
prs->words = (HLWORD *) repalloc((void *) prs->words, prs->lenwords * sizeof(HLWORD));
}
- for(i=0; i<query->size; i++) {
- if ( item->type == VAL && item->length == buflen && strncmp( GETOPERAND(query) + item->distance, buf, buflen )==0 ) {
- if ( word->item ) {
- memcpy( &(prs->words[prs->curwords]), word, sizeof(HLWORD) );
- prs->words[prs->curwords].item=item;
- prs->words[prs->curwords].repeated=1;
+ for (i = 0; i < query->size; i++)
+ {
+ if (item->type == VAL && item->length == buflen && strncmp(GETOPERAND(query) + item->distance, buf, buflen) == 0)
+ {
+ if (word->item)
+ {
+ memcpy(&(prs->words[prs->curwords]), word, sizeof(HLWORD));
+ prs->words[prs->curwords].item = item;
+ prs->words[prs->curwords].repeated = 1;
prs->curwords++;
- } else
- word->item=item;
+ }
+ else
+ word->item = item;
}
item++;
}
}
-void
-hlparsetext(TSCfgInfo *cfg, HLPRSTEXT * prs, QUERYTYPE *query, char *buf, int4 buflen) {
- int type, lenlemm, i;
- char *lemm=NULL;
+void
+hlparsetext(TSCfgInfo * cfg, HLPRSTEXT * prs, QUERYTYPE * query, char *buf, int4 buflen)
+{
+ int type,
+ lenlemm,
+ i;
+ char *lemm = NULL;
WParserInfo *prsobj = findprs(cfg->prs_id);
- prsobj->prs=(void*)DatumGetPointer(
- FunctionCall2(
- &(prsobj->start_info),
- PointerGetDatum(buf),
- Int32GetDatum(buflen)
- )
- );
-
- while( ( type=DatumGetInt32(FunctionCall3(
- &(prsobj->getlexeme_info),
- PointerGetDatum(prsobj->prs),
- PointerGetDatum(&lemm),
- PointerGetDatum(&lenlemm))) ) != 0 ) {
-
- if ( lenlemm >= MAXSTRLEN )
+ prsobj->prs = (void *) DatumGetPointer(
+ FunctionCall2(
+ &(prsobj->start_info),
+ PointerGetDatum(buf),
+ Int32GetDatum(buflen)
+ )
+ );
+
+ while ((type = DatumGetInt32(FunctionCall3(
+ &(prsobj->getlexeme_info),
+ PointerGetDatum(prsobj->prs),
+ PointerGetDatum(&lemm),
+ PointerGetDatum(&lenlemm)))) != 0)
+ {
+
+ if (lenlemm >= MAXSTRLEN)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("word is too long")));
- hladdword(prs,lemm,lenlemm,type);
-
- if ( type >= cfg->len )
- continue;
-
- for(i=0;i<cfg->map[type].len;i++) {
- DictInfo *dict=finddict( DatumGetObjectId(cfg->map[type].dict_id[i]) );
- char **norms, **ptr;
-
- norms = ptr = (char**)DatumGetPointer(
- FunctionCall3(
- &(dict->lexize_info),
- PointerGetDatum(dict->dictionary),
- PointerGetDatum(lemm),
- PointerGetDatum(lenlemm)
- )
- );
- if ( !norms ) /* dictionary doesn't know this lexem */
+ hladdword(prs, lemm, lenlemm, type);
+
+ if (type >= cfg->len)
+ continue;
+
+ for (i = 0; i < cfg->map[type].len; i++)
+ {
+ DictInfo *dict = finddict(DatumGetObjectId(cfg->map[type].dict_id[i]));
+ char **norms,
+ **ptr;
+
+ norms = ptr = (char **) DatumGetPointer(
+ FunctionCall3(
+ &(dict->lexize_info),
+ PointerGetDatum(dict->dictionary),
+ PointerGetDatum(lemm),
+ PointerGetDatum(lenlemm)
+ )
+ );
+ if (!norms) /* dictionary doesn't know this lexem */
continue;
- while( *ptr ) {
- hlfinditem(prs,query,*ptr,strlen(*ptr));
+ while (*ptr)
+ {
+ hlfinditem(prs, query, *ptr, strlen(*ptr));
pfree(*ptr);
ptr++;
}
pfree(norms);
- break; /* lexem already normalized or is stop word*/
+ break; /* lexem already normalized or is stop
+ * word */
}
}
FunctionCall1(
- &(prsobj->end_info),
- PointerGetDatum(prsobj->prs)
- );
+ &(prsobj->end_info),
+ PointerGetDatum(prsobj->prs)
+ );
}
-text*
-genhl(HLPRSTEXT * prs) {
- text *out;
- int len=128;
- char *ptr;
- HLWORD *wrd=prs->words;
+text *
+genhl(HLPRSTEXT * prs)
+{
+ text *out;
+ int len = 128;
+ char *ptr;
+ HLWORD *wrd = prs->words;
- out = (text*)palloc( len );
- ptr=((char*)out) + VARHDRSZ;
+ out = (text *) palloc(len);
+ ptr = ((char *) out) + VARHDRSZ;
- while( wrd - prs->words < prs->curwords ) {
- while ( wrd->len + prs->stopsellen + prs->startsellen + (ptr - ((char*)out)) >= len ) {
- int dist = ptr - ((char*)out);
- len*= 2;
+ while (wrd - prs->words < prs->curwords)
+ {
+ while (wrd->len + prs->stopsellen + prs->startsellen + (ptr - ((char *) out)) >= len)
+ {
+ int dist = ptr - ((char *) out);
+
+ len *= 2;
out = (text *) repalloc(out, len);
- ptr=((char*)out) + dist;
+ ptr = ((char *) out) + dist;
}
- if ( wrd->in && !wrd->skip && !wrd->repeated ) {
- if ( wrd->replace ) {
- *ptr=' ';
+ if (wrd->in && !wrd->skip && !wrd->repeated)
+ {
+ if (wrd->replace)
+ {
+ *ptr = ' ';
ptr++;
- } else {
- if (wrd->selected) {
- memcpy(ptr,prs->startsel,prs->startsellen);
- ptr+=prs->startsellen;
+ }
+ else
+ {
+ if (wrd->selected)
+ {
+ memcpy(ptr, prs->startsel, prs->startsellen);
+ ptr += prs->startsellen;
}
- memcpy(ptr,wrd->word,wrd->len);
- ptr+=wrd->len;
- if (wrd->selected) {
- memcpy(ptr,prs->stopsel,prs->stopsellen);
- ptr+=prs->stopsellen;
+ memcpy(ptr, wrd->word, wrd->len);
+ ptr += wrd->len;
+ if (wrd->selected)
+ {
+ memcpy(ptr, prs->stopsel, prs->stopsellen);
+ ptr += prs->stopsellen;
}
}
}
- if ( !wrd->repeated )
+ if (!wrd->repeated)
pfree(wrd->word);
wrd++;
}
- VARATT_SIZEP(out)=ptr - ((char*)out);
- return out;
+ VARATT_SIZEP(out) = ptr - ((char *) out);
+ return out;
}
-int
-get_currcfg(void) {
- Oid arg[1]={ TEXTOID };
+int
+get_currcfg(void)
+{
+ Oid arg[1] = {TEXTOID};
const char *curlocale;
- Datum pars[1];
- bool isnull;
- int stat;
+ Datum pars[1];
+ bool isnull;
+ int stat;
- if ( current_cfg_id > 0 )
+ if (current_cfg_id > 0)
return current_cfg_id;
SPI_connect();
- if ( !plan_getcfg_bylocale ) {
- plan_getcfg_bylocale=SPI_saveplan( SPI_prepare( "select oid from pg_ts_cfg where locale = $1 ", 1, arg ) );
- if ( !plan_getcfg_bylocale )
+ if (!plan_getcfg_bylocale)
+ {
+ plan_getcfg_bylocale = SPI_saveplan(SPI_prepare("select oid from pg_ts_cfg where locale = $1 ", 1, arg));
+ if (!plan_getcfg_bylocale)
/* internal error */
elog(ERROR, "SPI_prepare() failed");
}
curlocale = setlocale(LC_CTYPE, NULL);
- pars[0] = PointerGetDatum( char2text((char*)curlocale) );
+ pars[0] = PointerGetDatum(char2text((char *) curlocale));
stat = SPI_execp(plan_getcfg_bylocale, pars, " ", 1);
- if ( stat < 0 )
+ if (stat < 0)
/* internal error */
- elog (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed > 0 )
- current_cfg_id = DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull) );
- else
+ elog(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed > 0)
+ current_cfg_id = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
+ else
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("could not find tsearch config by locale")));
@@ -487,39 +552,43 @@ get_currcfg(void) {
}
PG_FUNCTION_INFO_V1(set_curcfg);
-Datum set_curcfg(PG_FUNCTION_ARGS);
+Datum set_curcfg(PG_FUNCTION_ARGS);
Datum
-set_curcfg(PG_FUNCTION_ARGS) {
- findcfg(PG_GETARG_OID(0));
- current_cfg_id=PG_GETARG_OID(0);
- PG_RETURN_VOID();
+set_curcfg(PG_FUNCTION_ARGS)
+{
+ findcfg(PG_GETARG_OID(0));
+ current_cfg_id = PG_GETARG_OID(0);
+ PG_RETURN_VOID();
}
-
+
PG_FUNCTION_INFO_V1(set_curcfg_byname);
-Datum set_curcfg_byname(PG_FUNCTION_ARGS);
+Datum set_curcfg_byname(PG_FUNCTION_ARGS);
Datum
-set_curcfg_byname(PG_FUNCTION_ARGS) {
- text *name=PG_GETARG_TEXT_P(0);
-
- DirectFunctionCall1(
- set_curcfg,
- ObjectIdGetDatum( name2id_cfg(name) )
- );
- PG_FREE_IF_COPY(name, 0);
- PG_RETURN_VOID();
-}
+set_curcfg_byname(PG_FUNCTION_ARGS)
+{
+ text *name = PG_GETARG_TEXT_P(0);
+
+ DirectFunctionCall1(
+ set_curcfg,
+ ObjectIdGetDatum(name2id_cfg(name))
+ );
+ PG_FREE_IF_COPY(name, 0);
+ PG_RETURN_VOID();
+}
PG_FUNCTION_INFO_V1(show_curcfg);
-Datum show_curcfg(PG_FUNCTION_ARGS);
+Datum show_curcfg(PG_FUNCTION_ARGS);
Datum
-show_curcfg(PG_FUNCTION_ARGS) {
- PG_RETURN_OID( get_currcfg() );
+show_curcfg(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_OID(get_currcfg());
}
PG_FUNCTION_INFO_V1(reset_tsearch);
-Datum reset_tsearch(PG_FUNCTION_ARGS);
+Datum reset_tsearch(PG_FUNCTION_ARGS);
Datum
-reset_tsearch(PG_FUNCTION_ARGS) {
- ts_error(NOTICE,"TSearch cache cleaned");
- PG_RETURN_VOID();
+reset_tsearch(PG_FUNCTION_ARGS)
+{
+ ts_error(NOTICE, "TSearch cache cleaned");
+ PG_RETURN_VOID();
}
diff --git a/contrib/tsearch2/ts_cfg.h b/contrib/tsearch2/ts_cfg.h
index 01006c1f93c..cde35ca2a6e 100644
--- a/contrib/tsearch2/ts_cfg.h
+++ b/contrib/tsearch2/ts_cfg.h
@@ -3,66 +3,73 @@
#include "postgres.h"
#include "query.h"
-typedef struct {
- int len;
- Datum *dict_id;
-} ListDictionary;
+typedef struct
+{
+ int len;
+ Datum *dict_id;
+} ListDictionary;
-typedef struct {
- Oid id;
- Oid prs_id;
- int len;
- ListDictionary *map;
+typedef struct
+{
+ Oid id;
+ Oid prs_id;
+ int len;
+ ListDictionary *map;
} TSCfgInfo;
-Oid name2id_cfg(text *name);
-TSCfgInfo * findcfg(Oid id);
-void init_cfg(Oid id, TSCfgInfo *cfg);
-void reset_cfg(void);
+Oid name2id_cfg(text *name);
+TSCfgInfo *findcfg(Oid id);
+void init_cfg(Oid id, TSCfgInfo * cfg);
+void reset_cfg(void);
-typedef struct {
- uint16 len;
- union {
+typedef struct
+{
+ uint16 len;
+ union
+ {
uint16 pos;
- uint16 *apos;
- } pos;
- char *word;
- uint32 alen;
-} WORD;
-
-typedef struct {
- WORD *words;
- int4 lenwords;
- int4 curwords;
+ uint16 *apos;
+ } pos;
+ char *word;
+ uint32 alen;
+} WORD;
+
+typedef struct
+{
+ WORD *words;
+ int4 lenwords;
+ int4 curwords;
int4 pos;
-} PRSTEXT;
+} PRSTEXT;
+
+typedef struct
+{
+ uint16 len;
+ uint8 selected:1,
+ in:1,
+ skip:1,
+ replace:1,
+ repeated:1;
+ uint8 type;
+ char *word;
+ ITEM *item;
+} HLWORD;
-typedef struct {
- uint16 len;
- uint8 selected:1,
- in:1,
- skip:1,
- replace:1,
- repeated:1;
- uint8 type;
- char *word;
- ITEM *item;
-} HLWORD;
-
-typedef struct {
- HLWORD *words;
- int4 lenwords;
- int4 curwords;
- char *startsel;
- char *stopsel;
- int2 startsellen;
- int2 stopsellen;
-} HLPRSTEXT;
+typedef struct
+{
+ HLWORD *words;
+ int4 lenwords;
+ int4 curwords;
+ char *startsel;
+ char *stopsel;
+ int2 startsellen;
+ int2 stopsellen;
+} HLPRSTEXT;
-void hlparsetext(TSCfgInfo *cfg, HLPRSTEXT * prs, QUERYTYPE *query, char *buf, int4 buflen);
-text* genhl(HLPRSTEXT * prs);
+void hlparsetext(TSCfgInfo * cfg, HLPRSTEXT * prs, QUERYTYPE * query, char *buf, int4 buflen);
+text *genhl(HLPRSTEXT * prs);
-void parsetext_v2(TSCfgInfo *cfg, PRSTEXT * prs, char *buf, int4 buflen);
-int get_currcfg(void);
+void parsetext_v2(TSCfgInfo * cfg, PRSTEXT * prs, char *buf, int4 buflen);
+int get_currcfg(void);
#endif
diff --git a/contrib/tsearch2/ts_stat.c b/contrib/tsearch2/ts_stat.c
index a09e0572143..47353fc579e 100644
--- a/contrib/tsearch2/ts_stat.c
+++ b/contrib/tsearch2/ts_stat.c
@@ -10,108 +10,128 @@
#include "common.h"
PG_FUNCTION_INFO_V1(tsstat_in);
-Datum tsstat_in(PG_FUNCTION_ARGS);
-Datum
-tsstat_in(PG_FUNCTION_ARGS) {
- tsstat *stat=palloc(STATHDRSIZE);
- stat->len=STATHDRSIZE;
- stat->size=0;
+Datum tsstat_in(PG_FUNCTION_ARGS);
+Datum
+tsstat_in(PG_FUNCTION_ARGS)
+{
+ tsstat *stat = palloc(STATHDRSIZE);
+
+ stat->len = STATHDRSIZE;
+ stat->size = 0;
PG_RETURN_POINTER(stat);
}
PG_FUNCTION_INFO_V1(tsstat_out);
-Datum tsstat_out(PG_FUNCTION_ARGS);
-Datum
-tsstat_out(PG_FUNCTION_ARGS) {
+Datum tsstat_out(PG_FUNCTION_ARGS);
+Datum
+tsstat_out(PG_FUNCTION_ARGS)
+{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("tsstat_out not implemented")));
PG_RETURN_NULL();
}
-static WordEntry**
-SEI_realloc( WordEntry** in, uint32 *len ) {
- if ( *len==0 || in==NULL ) {
- *len=8;
- in=palloc( sizeof(WordEntry*)* (*len) );
- } else {
+static WordEntry **
+SEI_realloc(WordEntry ** in, uint32 *len)
+{
+ if (*len == 0 || in == NULL)
+ {
+ *len = 8;
+ in = palloc(sizeof(WordEntry *) * (*len));
+ }
+ else
+ {
*len *= 2;
- in=repalloc( in, sizeof(WordEntry*)* (*len) );
+ in = repalloc(in, sizeof(WordEntry *) * (*len));
}
return in;
}
static int
-compareStatWord(StatEntry *a, WordEntry *b, tsstat *stat, tsvector *txt) {
- if ( a->len == b->len )
+compareStatWord(StatEntry * a, WordEntry * b, tsstat * stat, tsvector * txt)
+{
+ if (a->len == b->len)
return strncmp(
- STATSTRPTR(stat) + a->pos,
- STRPTR(txt) + b->pos,
- a->len
+ STATSTRPTR(stat) + a->pos,
+ STRPTR(txt) + b->pos,
+ a->len
);
- return ( a->len > b->len ) ? 1 : -1;
+ return (a->len > b->len) ? 1 : -1;
}
-static tsstat*
-formstat(tsstat *stat, tsvector *txt, WordEntry** entry, uint32 len) {
- tsstat *newstat;
- uint32 totallen, nentry;
- uint32 slen=0;
- WordEntry **ptr=entry;
- char *curptr;
- StatEntry *sptr,*nptr;
-
- while(ptr-entry<len) {
+static tsstat *
+formstat(tsstat * stat, tsvector * txt, WordEntry ** entry, uint32 len)
+{
+ tsstat *newstat;
+ uint32 totallen,
+ nentry;
+ uint32 slen = 0;
+ WordEntry **ptr = entry;
+ char *curptr;
+ StatEntry *sptr,
+ *nptr;
+
+ while (ptr - entry < len)
+ {
slen += (*ptr)->len;
ptr++;
}
- nentry=stat->size + len;
- slen+=STATSTRSIZE(stat);
- totallen=CALCSTATSIZE(nentry,slen);
- newstat=palloc(totallen);
- newstat->len=totallen;
- newstat->size=nentry;
+ nentry = stat->size + len;
+ slen += STATSTRSIZE(stat);
+ totallen = CALCSTATSIZE(nentry, slen);
+ newstat = palloc(totallen);
+ newstat->len = totallen;
+ newstat->size = nentry;
memcpy(STATSTRPTR(newstat), STATSTRPTR(stat), STATSTRSIZE(stat));
- curptr=STATSTRPTR(newstat) + STATSTRSIZE(stat);
+ curptr = STATSTRPTR(newstat) + STATSTRSIZE(stat);
- ptr=entry;
- sptr=STATPTR(stat);
- nptr=STATPTR(newstat);
+ ptr = entry;
+ sptr = STATPTR(stat);
+ nptr = STATPTR(newstat);
- if ( len == 1 ) {
- StatEntry *StopLow = STATPTR(stat);
- StatEntry *StopHigh = (StatEntry*)STATSTRPTR(stat);
+ if (len == 1)
+ {
+ StatEntry *StopLow = STATPTR(stat);
+ StatEntry *StopHigh = (StatEntry *) STATSTRPTR(stat);
- while (StopLow < StopHigh) {
- sptr=StopLow + (StopHigh - StopLow) / 2;
- if ( compareStatWord(sptr,*ptr,stat,txt) < 0 )
+ while (StopLow < StopHigh)
+ {
+ sptr = StopLow + (StopHigh - StopLow) / 2;
+ if (compareStatWord(sptr, *ptr, stat, txt) < 0)
StopLow = sptr + 1;
else
- StopHigh = sptr;
+ StopHigh = sptr;
}
- nptr =STATPTR(newstat) + (StopLow-STATPTR(stat));
- memcpy( STATPTR(newstat), STATPTR(stat), sizeof(StatEntry) * (StopLow-STATPTR(stat)) );
- nptr->nentry=POSDATALEN(txt,*ptr);
- if ( nptr->nentry==0 )
- nptr->nentry=1;
- nptr->ndoc=1;
- nptr->len=(*ptr)->len;
+ nptr = STATPTR(newstat) + (StopLow - STATPTR(stat));
+ memcpy(STATPTR(newstat), STATPTR(stat), sizeof(StatEntry) * (StopLow - STATPTR(stat)));
+ nptr->nentry = POSDATALEN(txt, *ptr);
+ if (nptr->nentry == 0)
+ nptr->nentry = 1;
+ nptr->ndoc = 1;
+ nptr->len = (*ptr)->len;
memcpy(curptr, STRPTR(txt) + (*ptr)->pos, nptr->len);
nptr->pos = curptr - STATSTRPTR(newstat);
- memcpy( nptr+1, StopLow, sizeof(StatEntry) * ( ((StatEntry*)STATSTRPTR(stat))-StopLow ) );
- } else {
- while( sptr-STATPTR(stat) < stat->size && ptr-entry<len) {
- if ( compareStatWord(sptr,*ptr,stat,txt) < 0 ) {
+ memcpy(nptr + 1, StopLow, sizeof(StatEntry) * (((StatEntry *) STATSTRPTR(stat)) - StopLow));
+ }
+ else
+ {
+ while (sptr - STATPTR(stat) < stat->size && ptr - entry < len)
+ {
+ if (compareStatWord(sptr, *ptr, stat, txt) < 0)
+ {
memcpy(nptr, sptr, sizeof(StatEntry));
sptr++;
- } else {
- nptr->nentry=POSDATALEN(txt,*ptr);
- if ( nptr->nentry==0 )
- nptr->nentry=1;
- nptr->ndoc=1;
- nptr->len=(*ptr)->len;
+ }
+ else
+ {
+ nptr->nentry = POSDATALEN(txt, *ptr);
+ if (nptr->nentry == 0)
+ nptr->nentry = 1;
+ nptr->ndoc = 1;
+ nptr->len = (*ptr)->len;
memcpy(curptr, STRPTR(txt) + (*ptr)->pos, nptr->len);
nptr->pos = curptr - STATSTRPTR(newstat);
curptr += nptr->len;
@@ -120,138 +140,168 @@ formstat(tsstat *stat, tsvector *txt, WordEntry** entry, uint32 len) {
nptr++;
}
- memcpy( nptr, sptr, sizeof(StatEntry)*( stat->size - (sptr-STATPTR(stat)) ) );
-
- while(ptr-entry<len) {
- nptr->nentry=POSDATALEN(txt,*ptr);
- if ( nptr->nentry==0 )
- nptr->nentry=1;
- nptr->ndoc=1;
- nptr->len=(*ptr)->len;
+ memcpy(nptr, sptr, sizeof(StatEntry) * (stat->size - (sptr - STATPTR(stat))));
+
+ while (ptr - entry < len)
+ {
+ nptr->nentry = POSDATALEN(txt, *ptr);
+ if (nptr->nentry == 0)
+ nptr->nentry = 1;
+ nptr->ndoc = 1;
+ nptr->len = (*ptr)->len;
memcpy(curptr, STRPTR(txt) + (*ptr)->pos, nptr->len);
nptr->pos = curptr - STATSTRPTR(newstat);
curptr += nptr->len;
- ptr++; nptr++;
+ ptr++;
+ nptr++;
}
}
return newstat;
-}
+}
PG_FUNCTION_INFO_V1(ts_accum);
-Datum ts_accum(PG_FUNCTION_ARGS);
-Datum
-ts_accum(PG_FUNCTION_ARGS) {
- tsstat *newstat,*stat= (tsstat*)PG_GETARG_POINTER(0);
- tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
- WordEntry **newentry=NULL;
- uint32 len=0, cur=0;
- StatEntry *sptr;
- WordEntry *wptr;
-
- if ( stat==NULL || PG_ARGISNULL(0) ) { /* Init in first */
- stat=palloc(STATHDRSIZE);
- stat->len=STATHDRSIZE;
- stat->size=0;
+Datum ts_accum(PG_FUNCTION_ARGS);
+Datum
+ts_accum(PG_FUNCTION_ARGS)
+{
+ tsstat *newstat,
+ *stat = (tsstat *) PG_GETARG_POINTER(0);
+ tsvector *txt = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
+ WordEntry **newentry = NULL;
+ uint32 len = 0,
+ cur = 0;
+ StatEntry *sptr;
+ WordEntry *wptr;
+
+ if (stat == NULL || PG_ARGISNULL(0))
+ { /* Init in first */
+ stat = palloc(STATHDRSIZE);
+ stat->len = STATHDRSIZE;
+ stat->size = 0;
}
/* simple check of correctness */
- if ( txt==NULL || PG_ARGISNULL(1) || txt->size==0 ) {
- PG_FREE_IF_COPY(txt,1);
+ if (txt == NULL || PG_ARGISNULL(1) || txt->size == 0)
+ {
+ PG_FREE_IF_COPY(txt, 1);
PG_RETURN_POINTER(stat);
}
- sptr=STATPTR(stat);
- wptr=ARRPTR(txt);
+ sptr = STATPTR(stat);
+ wptr = ARRPTR(txt);
- if ( stat->size < 100*txt->size ) { /* merge */
- while( sptr-STATPTR(stat) < stat->size && wptr-ARRPTR(txt) < txt->size ) {
- int cmp = compareStatWord(sptr,wptr,stat,txt);
- if ( cmp<0 ) {
+ if (stat->size < 100 * txt->size)
+ { /* merge */
+ while (sptr - STATPTR(stat) < stat->size && wptr - ARRPTR(txt) < txt->size)
+ {
+ int cmp = compareStatWord(sptr, wptr, stat, txt);
+
+ if (cmp < 0)
sptr++;
- } else if ( cmp==0 ) {
- int n=POSDATALEN(txt,wptr);
-
- if (n==0) n=1;
+ else if (cmp == 0)
+ {
+ int n = POSDATALEN(txt, wptr);
+
+ if (n == 0)
+ n = 1;
sptr->ndoc++;
- sptr->nentry +=n ;
- sptr++; wptr++;
- } else {
- if ( cur==len )
- newentry=SEI_realloc(newentry, &len);
- newentry[cur]=wptr;
- wptr++; cur++;
+ sptr->nentry += n;
+ sptr++;
+ wptr++;
+ }
+ else
+ {
+ if (cur == len)
+ newentry = SEI_realloc(newentry, &len);
+ newentry[cur] = wptr;
+ wptr++;
+ cur++;
}
}
- while( wptr-ARRPTR(txt) < txt->size ) {
- if ( cur==len )
- newentry=SEI_realloc(newentry, &len);
- newentry[cur]=wptr;
- wptr++; cur++;
+ while (wptr - ARRPTR(txt) < txt->size)
+ {
+ if (cur == len)
+ newentry = SEI_realloc(newentry, &len);
+ newentry[cur] = wptr;
+ wptr++;
+ cur++;
}
- } else { /* search */
- while( wptr-ARRPTR(txt) < txt->size ) {
- StatEntry *StopLow = STATPTR(stat);
- StatEntry *StopHigh = (StatEntry*)STATSTRPTR(stat);
- int cmp;
-
- while (StopLow < StopHigh) {
- sptr=StopLow + (StopHigh - StopLow) / 2;
- cmp = compareStatWord(sptr,wptr,stat,txt);
- if (cmp==0) {
- int n=POSDATALEN(txt,wptr);
- if (n==0) n=1;
+ }
+ else
+ { /* search */
+ while (wptr - ARRPTR(txt) < txt->size)
+ {
+ StatEntry *StopLow = STATPTR(stat);
+ StatEntry *StopHigh = (StatEntry *) STATSTRPTR(stat);
+ int cmp;
+
+ while (StopLow < StopHigh)
+ {
+ sptr = StopLow + (StopHigh - StopLow) / 2;
+ cmp = compareStatWord(sptr, wptr, stat, txt);
+ if (cmp == 0)
+ {
+ int n = POSDATALEN(txt, wptr);
+
+ if (n == 0)
+ n = 1;
sptr->ndoc++;
- sptr->nentry +=n ;
+ sptr->nentry += n;
break;
- } else if ( cmp < 0 )
+ }
+ else if (cmp < 0)
StopLow = sptr + 1;
else
- StopHigh = sptr;
+ StopHigh = sptr;
}
-
- if ( StopLow >= StopHigh ) { /* not found */
- if ( cur==len )
- newentry=SEI_realloc(newentry, &len);
- newentry[cur]=wptr;
+
+ if (StopLow >= StopHigh)
+ { /* not found */
+ if (cur == len)
+ newentry = SEI_realloc(newentry, &len);
+ newentry[cur] = wptr;
cur++;
}
wptr++;
- }
+ }
}
-
- if ( cur==0 ) { /* no new words */
- PG_FREE_IF_COPY(txt,1);
+
+ if (cur == 0)
+ { /* no new words */
+ PG_FREE_IF_COPY(txt, 1);
PG_RETURN_POINTER(stat);
}
newstat = formstat(stat, txt, newentry, cur);
pfree(newentry);
- PG_FREE_IF_COPY(txt,1);
+ PG_FREE_IF_COPY(txt, 1);
/* pfree(stat); */
PG_RETURN_POINTER(newstat);
}
-typedef struct {
- uint32 cur;
- tsvector *stat;
-} StatStorage;
+typedef struct
+{
+ uint32 cur;
+ tsvector *stat;
+} StatStorage;
static void
-ts_setup_firstcall(FuncCallContext *funcctx, tsstat *stat) {
- TupleDesc tupdesc;
- MemoryContext oldcontext;
- StatStorage *st;
-
+ts_setup_firstcall(FuncCallContext *funcctx, tsstat * stat)
+{
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
+ StatStorage *st;
+
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
- st=palloc( sizeof(StatStorage) );
- st->cur=0;
- st->stat=palloc( stat->len );
+ st = palloc(sizeof(StatStorage));
+ st->cur = 0;
+ st->stat = palloc(stat->len);
memcpy(st->stat, stat, stat->len);
- funcctx->user_fctx = (void*)st;
+ funcctx->user_fctx = (void *) st;
tupdesc = RelationNameGetTupleDesc("statinfo");
funcctx->slot = TupleDescGetSlot(tupdesc);
funcctx->attinmeta = TupleDescGetAttInMetadata(tupdesc);
@@ -260,162 +310,175 @@ ts_setup_firstcall(FuncCallContext *funcctx, tsstat *stat) {
static Datum
-ts_process_call(FuncCallContext *funcctx) {
- StatStorage *st;
- st=(StatStorage*)funcctx->user_fctx;
-
- if ( st->cur < st->stat->size ) {
- Datum result;
- char* values[3];
- char ndoc[16];
- char nentry[16];
- StatEntry *entry=STATPTR(st->stat) + st->cur;
- HeapTuple tuple;
-
- values[1]=ndoc;
- sprintf(ndoc,"%d",entry->ndoc);
- values[2]=nentry;
- sprintf(nentry,"%d",entry->nentry);
- values[0]=palloc( entry->len+1 );
- memcpy( values[0], STATSTRPTR(st->stat)+entry->pos, entry->len);
- (values[0])[entry->len]='\0';
+ts_process_call(FuncCallContext *funcctx)
+{
+ StatStorage *st;
+
+ st = (StatStorage *) funcctx->user_fctx;
+
+ if (st->cur < st->stat->size)
+ {
+ Datum result;
+ char *values[3];
+ char ndoc[16];
+ char nentry[16];
+ StatEntry *entry = STATPTR(st->stat) + st->cur;
+ HeapTuple tuple;
+
+ values[1] = ndoc;
+ sprintf(ndoc, "%d", entry->ndoc);
+ values[2] = nentry;
+ sprintf(nentry, "%d", entry->nentry);
+ values[0] = palloc(entry->len + 1);
+ memcpy(values[0], STATSTRPTR(st->stat) + entry->pos, entry->len);
+ (values[0])[entry->len] = '\0';
tuple = BuildTupleFromCStrings(funcctx->attinmeta, values);
result = TupleGetDatum(funcctx->slot, tuple);
pfree(values[0]);
st->cur++;
- return result;
- } else {
+ return result;
+ }
+ else
+ {
pfree(st->stat);
pfree(st);
}
-
- return (Datum)0;
+
+ return (Datum) 0;
}
PG_FUNCTION_INFO_V1(ts_accum_finish);
-Datum ts_accum_finish(PG_FUNCTION_ARGS);
-Datum
-ts_accum_finish(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
-
- if (SRF_IS_FIRSTCALL()) {
+Datum ts_accum_finish(PG_FUNCTION_ARGS);
+Datum
+ts_accum_finish(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
+
+ if (SRF_IS_FIRSTCALL())
+ {
funcctx = SRF_FIRSTCALL_INIT();
- ts_setup_firstcall(funcctx, (tsstat*)PG_GETARG_POINTER(0) );
+ ts_setup_firstcall(funcctx, (tsstat *) PG_GETARG_POINTER(0));
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=ts_process_call(funcctx)) != (Datum)0 )
+ if ((result = ts_process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
-static Oid tiOid=InvalidOid;
-static void
-get_ti_Oid(void) {
- int ret;
- bool isnull;
+static Oid tiOid = InvalidOid;
+static void
+get_ti_Oid(void)
+{
+ int ret;
+ bool isnull;
- if ( (ret = SPI_exec("select oid from pg_type where typname='tsvector'",1)) < 0 )
+ if ((ret = SPI_exec("select oid from pg_type where typname='tsvector'", 1)) < 0)
/* internal error */
elog(ERROR, "SPI_exec to get tsvector oid returns %d", ret);
- if ( SPI_processed<0 )
+ if (SPI_processed < 0)
/* internal error */
elog(ERROR, "There is no tsvector type");
- tiOid = DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull) );
- if ( tiOid==InvalidOid )
+ tiOid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
+ if (tiOid == InvalidOid)
/* internal error */
elog(ERROR, "tsvector type has InvalidOid");
}
-static tsstat*
-ts_stat_sql(text *txt) {
- char *query=text2char(txt);
- int i;
- tsstat *newstat,*stat;
- bool isnull;
- Portal portal;
- void *plan;
-
- if ( tiOid==InvalidOid )
+static tsstat *
+ts_stat_sql(text *txt)
+{
+ char *query = text2char(txt);
+ int i;
+ tsstat *newstat,
+ *stat;
+ bool isnull;
+ Portal portal;
+ void *plan;
+
+ if (tiOid == InvalidOid)
get_ti_Oid();
- if ( (plan = SPI_prepare(query,0,NULL))==NULL )
+ if ((plan = SPI_prepare(query, 0, NULL)) == NULL)
/* internal error */
- elog(ERROR, "SPI_prepare('%s') returns NULL",query);
+ elog(ERROR, "SPI_prepare('%s') returns NULL", query);
- if ( (portal = SPI_cursor_open(NULL, plan, NULL, NULL)) == NULL )
+ if ((portal = SPI_cursor_open(NULL, plan, NULL, NULL)) == NULL)
/* internal error */
- elog(ERROR, "SPI_cursor_open('%s') returns NULL",query);
+ elog(ERROR, "SPI_cursor_open('%s') returns NULL", query);
SPI_cursor_fetch(portal, true, 100);
- if ( SPI_tuptable->tupdesc->natts != 1 )
+ if (SPI_tuptable->tupdesc->natts != 1)
/* internal error */
elog(ERROR, "number of fields doesn't equal to 1");
- if ( SPI_gettypeid(SPI_tuptable->tupdesc, 1) != tiOid )
+ if (SPI_gettypeid(SPI_tuptable->tupdesc, 1) != tiOid)
/* internal error */
elog(ERROR, "column isn't of tsvector type");
- stat=palloc(STATHDRSIZE);
- stat->len=STATHDRSIZE;
- stat->size=0;
-
- while(SPI_processed>0) {
- for(i=0;i<SPI_processed;i++) {
- Datum data=SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull);
-
- if ( !isnull ) {
- newstat = (tsstat*)DatumGetPointer(DirectFunctionCall2(
- ts_accum,
- PointerGetDatum(stat),
- data
- ));
- if ( stat!=newstat && stat )
+ stat = palloc(STATHDRSIZE);
+ stat->len = STATHDRSIZE;
+ stat->size = 0;
+
+ while (SPI_processed > 0)
+ {
+ for (i = 0; i < SPI_processed; i++)
+ {
+ Datum data = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull);
+
+ if (!isnull)
+ {
+ newstat = (tsstat *) DatumGetPointer(DirectFunctionCall2(
+ ts_accum,
+ PointerGetDatum(stat),
+ data
+ ));
+ if (stat != newstat && stat)
pfree(stat);
- stat=newstat;
+ stat = newstat;
}
- }
+ }
SPI_freetuptable(SPI_tuptable);
- SPI_cursor_fetch(portal, true, 100);
- }
+ SPI_cursor_fetch(portal, true, 100);
+ }
SPI_freetuptable(SPI_tuptable);
SPI_cursor_close(portal);
SPI_freeplan(plan);
pfree(query);
- return stat;
+ return stat;
}
PG_FUNCTION_INFO_V1(ts_stat);
-Datum ts_stat(PG_FUNCTION_ARGS);
-Datum
-ts_stat(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
-
- if (SRF_IS_FIRSTCALL()) {
- tsstat *stat;
- text *txt=PG_GETARG_TEXT_P(0);
-
+Datum ts_stat(PG_FUNCTION_ARGS);
+Datum
+ts_stat(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ tsstat *stat;
+ text *txt = PG_GETARG_TEXT_P(0);
+
funcctx = SRF_FIRSTCALL_INIT();
SPI_connect();
stat = ts_stat_sql(txt);
- PG_FREE_IF_COPY(txt,0);
- ts_setup_firstcall(funcctx, stat );
+ PG_FREE_IF_COPY(txt, 0);
+ ts_setup_firstcall(funcctx, stat);
SPI_finish();
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=ts_process_call(funcctx)) != (Datum)0 )
+ if ((result = ts_process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
-
-
diff --git a/contrib/tsearch2/ts_stat.h b/contrib/tsearch2/ts_stat.h
index 023a83346cb..37d1e7b660a 100644
--- a/contrib/tsearch2/ts_stat.h
+++ b/contrib/tsearch2/ts_stat.h
@@ -8,14 +8,16 @@
#include "utils/builtins.h"
#include "storage/bufpage.h"
-typedef struct {
- uint32 len;
- uint32 pos;
- uint32 ndoc;
- uint32 nentry;
+typedef struct
+{
+ uint32 len;
+ uint32 pos;
+ uint32 ndoc;
+ uint32 nentry;
} StatEntry;
-typedef struct {
+typedef struct
+{
int4 len;
int4 size;
char data[1];
diff --git a/contrib/tsearch2/tsvector.c b/contrib/tsearch2/tsvector.c
index 7c258f1305f..c8002c0ab3f 100644
--- a/contrib/tsearch2/tsvector.c
+++ b/contrib/tsearch2/tsvector.c
@@ -31,8 +31,10 @@ Datum tsvector_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(to_tsvector);
Datum to_tsvector(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(to_tsvector_current);
Datum to_tsvector_current(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(to_tsvector_name);
Datum to_tsvector_name(PG_FUNCTION_ARGS);
@@ -45,32 +47,38 @@ Datum tsvector_length(PG_FUNCTION_ARGS);
/*
* in/out text index type
*/
-static int
-comparePos(const void *a, const void *b) {
- if ( ((WordEntryPos *) a)->pos == ((WordEntryPos *) b)->pos )
+static int
+comparePos(const void *a, const void *b)
+{
+ if (((WordEntryPos *) a)->pos == ((WordEntryPos *) b)->pos)
return 1;
- return ( ((WordEntryPos *) a)->pos > ((WordEntryPos *) b)->pos ) ? 1 : -1;
+ return (((WordEntryPos *) a)->pos > ((WordEntryPos *) b)->pos) ? 1 : -1;
}
static int
-uniquePos(WordEntryPos *a, int4 l) {
- WordEntryPos *ptr, *res;
+uniquePos(WordEntryPos * a, int4 l)
+{
+ WordEntryPos *ptr,
+ *res;
- res=a;
- if (l==1)
+ res = a;
+ if (l == 1)
return l;
qsort((void *) a, l, sizeof(WordEntryPos), comparePos);
ptr = a + 1;
- while (ptr - a < l) {
- if ( ptr->pos != res->pos ) {
+ while (ptr - a < l)
+ {
+ if (ptr->pos != res->pos)
+ {
res++;
res->pos = ptr->pos;
res->weight = ptr->weight;
- if ( res-a >= MAXNUMPOS-1 || res->pos == MAXENTRYPOS-1 )
+ if (res - a >= MAXNUMPOS - 1 || res->pos == MAXENTRYPOS - 1)
break;
- } else if ( ptr->weight > res->weight )
+ }
+ else if (ptr->weight > res->weight)
res->weight = ptr->weight;
ptr++;
}
@@ -81,27 +89,29 @@ static char *BufferStr;
static int
compareentry(const void *a, const void *b)
{
- if ( ((WordEntryIN *) a)->entry.len == ((WordEntryIN *) b)->entry.len)
+ if (((WordEntryIN *) a)->entry.len == ((WordEntryIN *) b)->entry.len)
{
return strncmp(
&BufferStr[((WordEntryIN *) a)->entry.pos],
&BufferStr[((WordEntryIN *) b)->entry.pos],
((WordEntryIN *) a)->entry.len);
}
- return ( ((WordEntryIN *) a)->entry.len > ((WordEntryIN *) b)->entry.len ) ? 1 : -1;
+ return (((WordEntryIN *) a)->entry.len > ((WordEntryIN *) b)->entry.len) ? 1 : -1;
}
static int
uniqueentry(WordEntryIN * a, int4 l, char *buf, int4 *outbuflen)
{
- WordEntryIN *ptr,
+ WordEntryIN *ptr,
*res;
res = a;
- if (l == 1) {
- if ( a->entry.haspos ) {
- *(uint16*)(a->pos) = uniquePos( &(a->pos[1]), *(uint16*)(a->pos));
- *outbuflen = SHORTALIGN(res->entry.len) + (*(uint16*)(a->pos) +1 )*sizeof(WordEntryPos);
+ if (l == 1)
+ {
+ if (a->entry.haspos)
+ {
+ *(uint16 *) (a->pos) = uniquePos(&(a->pos[1]), *(uint16 *) (a->pos));
+ *outbuflen = SHORTALIGN(res->entry.len) + (*(uint16 *) (a->pos) + 1) * sizeof(WordEntryPos);
}
return l;
}
@@ -115,31 +125,39 @@ uniqueentry(WordEntryIN * a, int4 l, char *buf, int4 *outbuflen)
if (!(ptr->entry.len == res->entry.len &&
strncmp(&buf[ptr->entry.pos], &buf[res->entry.pos], res->entry.len) == 0))
{
- if ( res->entry.haspos ) {
- *(uint16*)(res->pos) = uniquePos( &(res->pos[1]), *(uint16*)(res->pos));
- *outbuflen += *(uint16*)(res->pos) * sizeof(WordEntryPos);
+ if (res->entry.haspos)
+ {
+ *(uint16 *) (res->pos) = uniquePos(&(res->pos[1]), *(uint16 *) (res->pos));
+ *outbuflen += *(uint16 *) (res->pos) * sizeof(WordEntryPos);
}
*outbuflen += SHORTALIGN(res->entry.len);
res++;
- memcpy(res,ptr,sizeof(WordEntryIN));
- } else if ( ptr->entry.haspos ){
- if ( res->entry.haspos ) {
- int4 len=*(uint16*)(ptr->pos) + 1 + *(uint16*)(res->pos);
- res->pos=(WordEntryPos*)repalloc( res->pos, len*sizeof(WordEntryPos));
- memcpy( &(res->pos[ *(uint16*)(res->pos) + 1 ]),
- &(ptr->pos[1]), *(uint16*)(ptr->pos) * sizeof(WordEntryPos));
- *(uint16*)(res->pos) += *(uint16*)(ptr->pos);
- pfree( ptr->pos );
- } else {
- res->entry.haspos=1;
+ memcpy(res, ptr, sizeof(WordEntryIN));
+ }
+ else if (ptr->entry.haspos)
+ {
+ if (res->entry.haspos)
+ {
+ int4 len = *(uint16 *) (ptr->pos) + 1 + *(uint16 *) (res->pos);
+
+ res->pos = (WordEntryPos *) repalloc(res->pos, len * sizeof(WordEntryPos));
+ memcpy(&(res->pos[*(uint16 *) (res->pos) + 1]),
+ &(ptr->pos[1]), *(uint16 *) (ptr->pos) * sizeof(WordEntryPos));
+ *(uint16 *) (res->pos) += *(uint16 *) (ptr->pos);
+ pfree(ptr->pos);
+ }
+ else
+ {
+ res->entry.haspos = 1;
res->pos = ptr->pos;
}
}
ptr++;
}
- if ( res->entry.haspos ) {
- *(uint16*)(res->pos) = uniquePos( &(res->pos[1]), *(uint16*)(res->pos));
- *outbuflen += *(uint16*)(res->pos) * sizeof(WordEntryPos);
+ if (res->entry.haspos)
+ {
+ *(uint16 *) (res->pos) = uniquePos(&(res->pos[1]), *(uint16 *) (res->pos));
+ *outbuflen += *(uint16 *) (res->pos) * sizeof(WordEntryPos);
}
*outbuflen += SHORTALIGN(res->entry.len);
@@ -150,7 +168,7 @@ uniqueentry(WordEntryIN * a, int4 l, char *buf, int4 *outbuflen)
#define WAITENDWORD 2
#define WAITNEXTCHAR 3
#define WAITENDCMPLX 4
-#define WAITPOSINFO 5
+#define WAITPOSINFO 5
#define INPOSINFO 6
#define WAITPOSDELIM 7
@@ -172,7 +190,7 @@ gettoken_tsvector(TI_IN_STATE * state)
state->curpos = state->word;
state->state = WAITWORD;
- state->alen=0;
+ state->alen = 0;
while (1)
{
@@ -228,14 +246,16 @@ gettoken_tsvector(TI_IN_STATE * state)
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
*(state->curpos) = '\0';
- return 1;
- } else if ( *(state->prsbuf) == ':' ) {
+ return 1;
+ }
+ else if (*(state->prsbuf) == ':')
+ {
if (state->curpos == state->word)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
*(state->curpos) = '\0';
- if ( state->oprisdelim )
+ if (state->oprisdelim)
return 1;
else
state->state = INPOSINFO;
@@ -257,10 +277,12 @@ gettoken_tsvector(TI_IN_STATE * state)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
- if ( state->oprisdelim ) {
+ if (state->oprisdelim)
+ {
state->prsbuf++;
return 1;
- } else
+ }
+ else
state->state = WAITPOSINFO;
}
else if (*(state->prsbuf) == '\\')
@@ -278,67 +300,87 @@ gettoken_tsvector(TI_IN_STATE * state)
*(state->curpos) = *(state->prsbuf);
state->curpos++;
}
- } else if (state->state == WAITPOSINFO) {
- if ( *(state->prsbuf) == ':' )
- state->state=INPOSINFO;
+ }
+ else if (state->state == WAITPOSINFO)
+ {
+ if (*(state->prsbuf) == ':')
+ state->state = INPOSINFO;
else
return 1;
- } else if (state->state == INPOSINFO) {
- if ( isdigit(*(state->prsbuf)) ) {
- if ( state->alen==0 ) {
- state->alen=4;
- state->pos = (WordEntryPos*)palloc( sizeof(WordEntryPos)*state->alen );
- *(uint16*)(state->pos)=0;
- } else if ( *(uint16*)(state->pos) +1 >= state->alen ) {
- state->alen *= 2;
- state->pos = (WordEntryPos*)repalloc( state->pos, sizeof(WordEntryPos)*state->alen );
+ }
+ else if (state->state == INPOSINFO)
+ {
+ if (isdigit(*(state->prsbuf)))
+ {
+ if (state->alen == 0)
+ {
+ state->alen = 4;
+ state->pos = (WordEntryPos *) palloc(sizeof(WordEntryPos) * state->alen);
+ *(uint16 *) (state->pos) = 0;
}
- ( *(uint16*)(state->pos) )++;
- state->pos[ *(uint16*)(state->pos) ].pos = LIMITPOS(atoi(state->prsbuf));
- if ( state->pos[ *(uint16*)(state->pos) ].pos == 0 )
+ else if (*(uint16 *) (state->pos) + 1 >= state->alen)
+ {
+ state->alen *= 2;
+ state->pos = (WordEntryPos *) repalloc(state->pos, sizeof(WordEntryPos) * state->alen);
+ }
+ (*(uint16 *) (state->pos))++;
+ state->pos[*(uint16 *) (state->pos)].pos = LIMITPOS(atoi(state->prsbuf));
+ if (state->pos[*(uint16 *) (state->pos)].pos == 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("wrong position info")));
- state->pos[ *(uint16*)(state->pos) ].weight = 0;
+ state->pos[*(uint16 *) (state->pos)].weight = 0;
state->state = WAITPOSDELIM;
- } else
+ }
+ else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
- } else if (state->state == WAITPOSDELIM) {
- if ( *(state->prsbuf) == ',' ) {
+ }
+ else if (state->state == WAITPOSDELIM)
+ {
+ if (*(state->prsbuf) == ',')
state->state = INPOSINFO;
- } else if ( tolower(*(state->prsbuf)) == 'a' || *(state->prsbuf)=='*' ) {
- if ( state->pos[ *(uint16*)(state->pos) ].weight )
+ else if (tolower(*(state->prsbuf)) == 'a' || *(state->prsbuf) == '*')
+ {
+ if (state->pos[*(uint16 *) (state->pos)].weight)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
- state->pos[ *(uint16*)(state->pos) ].weight = 3;
- } else if ( tolower(*(state->prsbuf)) == 'b' ) {
- if ( state->pos[ *(uint16*)(state->pos) ].weight )
+ state->pos[*(uint16 *) (state->pos)].weight = 3;
+ }
+ else if (tolower(*(state->prsbuf)) == 'b')
+ {
+ if (state->pos[*(uint16 *) (state->pos)].weight)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
- state->pos[ *(uint16*)(state->pos) ].weight = 2;
- } else if ( tolower(*(state->prsbuf)) == 'c' ) {
- if ( state->pos[ *(uint16*)(state->pos) ].weight )
+ state->pos[*(uint16 *) (state->pos)].weight = 2;
+ }
+ else if (tolower(*(state->prsbuf)) == 'c')
+ {
+ if (state->pos[*(uint16 *) (state->pos)].weight)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
- state->pos[ *(uint16*)(state->pos) ].weight = 1;
- } else if ( tolower(*(state->prsbuf)) == 'd' ) {
- if ( state->pos[ *(uint16*)(state->pos) ].weight )
+ state->pos[*(uint16 *) (state->pos)].weight = 1;
+ }
+ else if (tolower(*(state->prsbuf)) == 'd')
+ {
+ if (state->pos[*(uint16 *) (state->pos)].weight)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
- state->pos[ *(uint16*)(state->pos) ].weight = 0;
- } else if ( isspace(*(state->prsbuf)) || *(state->prsbuf) == '\0' ) {
+ state->pos[*(uint16 *) (state->pos)].weight = 0;
+ }
+ else if (isspace(*(state->prsbuf)) || *(state->prsbuf) == '\0')
return 1;
- } else if ( !isdigit(*(state->prsbuf)) )
+ else if (!isdigit(*(state->prsbuf)))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
- } else
+ }
+ else
/* internal error */
elog(ERROR, "internal error");
state->prsbuf++;
@@ -352,11 +394,11 @@ tsvector_in(PG_FUNCTION_ARGS)
{
char *buf = PG_GETARG_CSTRING(0);
TI_IN_STATE state;
- WordEntryIN *arr;
+ WordEntryIN *arr;
WordEntry *inarr;
int4 len = 0,
totallen = 64;
- tsvector *in;
+ tsvector *in;
char *tmpbuf,
*cur;
int4 i,
@@ -388,28 +430,30 @@ tsvector_in(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("word is too long")));
- arr[len].entry.len= state.curpos - state.word;
+ arr[len].entry.len = state.curpos - state.word;
if (cur - tmpbuf > MAXSTRPOS)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("too long value")));
- arr[len].entry.pos=cur - tmpbuf;
+ arr[len].entry.pos = cur - tmpbuf;
memcpy((void *) cur, (void *) state.word, arr[len].entry.len);
cur += arr[len].entry.len;
- if ( state.alen ) {
- arr[len].entry.haspos=1;
+ if (state.alen)
+ {
+ arr[len].entry.haspos = 1;
arr[len].pos = state.pos;
- } else
- arr[len].entry.haspos=0;
+ }
+ else
+ arr[len].entry.haspos = 0;
len++;
}
pfree(state.word);
- if ( len > 0 )
+ if (len > 0)
len = uniqueentry(arr, len, tmpbuf, &buflen);
totallen = CALCDATASIZE(len, buflen);
in = (tsvector *) palloc(totallen);
- memset(in,0,totallen);
+ memset(in, 0, totallen);
in->len = totallen;
in->size = len;
cur = STRPTR(in);
@@ -417,14 +461,15 @@ tsvector_in(PG_FUNCTION_ARGS)
for (i = 0; i < len; i++)
{
memcpy((void *) cur, (void *) &tmpbuf[arr[i].entry.pos], arr[i].entry.len);
- arr[i].entry.pos=cur - STRPTR(in);
+ arr[i].entry.pos = cur - STRPTR(in);
cur += SHORTALIGN(arr[i].entry.len);
- if ( arr[i].entry.haspos ) {
- memcpy( cur, arr[i].pos, (*(uint16*)arr[i].pos + 1) * sizeof(WordEntryPos));
- cur += (*(uint16*)arr[i].pos + 1) * sizeof(WordEntryPos);
- pfree( arr[i].pos );
+ if (arr[i].entry.haspos)
+ {
+ memcpy(cur, arr[i].pos, (*(uint16 *) arr[i].pos + 1) * sizeof(WordEntryPos));
+ cur += (*(uint16 *) arr[i].pos + 1) * sizeof(WordEntryPos);
+ pfree(arr[i].pos);
}
- memcpy( &(inarr[i]), &(arr[i].entry), sizeof(WordEntry) );
+ memcpy(&(inarr[i]), &(arr[i].entry), sizeof(WordEntry));
}
pfree(tmpbuf);
pfree(arr);
@@ -434,7 +479,7 @@ tsvector_in(PG_FUNCTION_ARGS)
Datum
tsvector_length(PG_FUNCTION_ARGS)
{
- tsvector *in = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+ tsvector *in = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
int4 ret = in->size;
PG_FREE_IF_COPY(in, 0);
@@ -444,26 +489,28 @@ tsvector_length(PG_FUNCTION_ARGS)
Datum
tsvector_out(PG_FUNCTION_ARGS)
{
- tsvector *out = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+ tsvector *out = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
char *outbuf;
int4 i,
j,
- lenbuf = 0, pp;
+ lenbuf = 0,
+ pp;
WordEntry *ptr = ARRPTR(out);
char *curin,
*curout;
- lenbuf=out->size * 2 /* '' */ + out->size - 1 /* space */ + 2 /*\0*/;
- for (i = 0; i < out->size; i++) {
- lenbuf += ptr[i].len*2 /*for escape */;
- if ( ptr[i].haspos )
- lenbuf += 7*POSDATALEN(out, &(ptr[i]));
- }
+ lenbuf = out->size * 2 /* '' */ + out->size - 1 /* space */ + 2 /* \0 */ ;
+ for (i = 0; i < out->size; i++)
+ {
+ lenbuf += ptr[i].len * 2 /* for escape */ ;
+ if (ptr[i].haspos)
+ lenbuf += 7 * POSDATALEN(out, &(ptr[i]));
+ }
curout = outbuf = (char *) palloc(lenbuf);
for (i = 0; i < out->size; i++)
{
- curin = STRPTR(out)+ptr->pos;
+ curin = STRPTR(out) + ptr->pos;
if (i != 0)
*curout++ = ' ';
*curout++ = '\'';
@@ -481,27 +528,40 @@ tsvector_out(PG_FUNCTION_ARGS)
*curout++ = *curin++;
}
*curout++ = '\'';
- if ( (pp=POSDATALEN(out,ptr)) != 0 ) {
+ if ((pp = POSDATALEN(out, ptr)) != 0)
+ {
WordEntryPos *wptr;
+
*curout++ = ':';
- wptr=POSDATAPTR(out,ptr);
- while(pp) {
- sprintf(curout,"%d",wptr->pos);
- curout=strchr(curout,'\0');
- switch( wptr->weight ) {
- case 3: *curout++ = 'A'; break;
- case 2: *curout++ = 'B'; break;
- case 1: *curout++ = 'C'; break;
- case 0:
- default: break;
+ wptr = POSDATAPTR(out, ptr);
+ while (pp)
+ {
+ sprintf(curout, "%d", wptr->pos);
+ curout = strchr(curout, '\0');
+ switch (wptr->weight)
+ {
+ case 3:
+ *curout++ = 'A';
+ break;
+ case 2:
+ *curout++ = 'B';
+ break;
+ case 1:
+ *curout++ = 'C';
+ break;
+ case 0:
+ default:
+ break;
}
- if ( pp>1 ) *curout++ = ',';
- pp--; wptr++;
+ if (pp > 1)
+ *curout++ = ',';
+ pp--;
+ wptr++;
}
}
ptr++;
}
- *curout='\0';
+ *curout = '\0';
outbuf[lenbuf - 1] = '\0';
PG_FREE_IF_COPY(out, 0);
PG_RETURN_POINTER(outbuf);
@@ -510,13 +570,15 @@ tsvector_out(PG_FUNCTION_ARGS)
static int
compareWORD(const void *a, const void *b)
{
- if (((WORD *) a)->len == ((WORD *) b)->len) {
- int res = strncmp(
- ((WORD *) a)->word,
- ((WORD *) b)->word,
- ((WORD *) b)->len);
- if ( res==0 )
- return ( ((WORD *) a)->pos.pos > ((WORD *) b)->pos.pos ) ? 1 : -1;
+ if (((WORD *) a)->len == ((WORD *) b)->len)
+ {
+ int res = strncmp(
+ ((WORD *) a)->word,
+ ((WORD *) b)->word,
+ ((WORD *) b)->len);
+
+ if (res == 0)
+ return (((WORD *) a)->pos.pos > ((WORD *) b)->pos.pos) ? 1 : -1;
return res;
}
return (((WORD *) a)->len > ((WORD *) b)->len) ? 1 : -1;
@@ -527,14 +589,15 @@ uniqueWORD(WORD * a, int4 l)
{
WORD *ptr,
*res;
- int tmppos;
-
- if (l == 1) {
- tmppos=LIMITPOS(a->pos.pos);
- a->alen=2;
- a->pos.apos=(uint16*)palloc( sizeof(uint16)*a->alen );
- a->pos.apos[0]=1;
- a->pos.apos[1]=tmppos;
+ int tmppos;
+
+ if (l == 1)
+ {
+ tmppos = LIMITPOS(a->pos.pos);
+ a->alen = 2;
+ a->pos.apos = (uint16 *) palloc(sizeof(uint16) * a->alen);
+ a->pos.apos[0] = 1;
+ a->pos.apos[1] = tmppos;
return l;
}
@@ -542,11 +605,11 @@ uniqueWORD(WORD * a, int4 l)
ptr = a + 1;
qsort((void *) a, l, sizeof(WORD), compareWORD);
- tmppos=LIMITPOS(a->pos.pos);
- a->alen=2;
- a->pos.apos=(uint16*)palloc( sizeof(uint16)*a->alen );
- a->pos.apos[0]=1;
- a->pos.apos[1]=tmppos;
+ tmppos = LIMITPOS(a->pos.pos);
+ a->alen = 2;
+ a->pos.apos = (uint16 *) palloc(sizeof(uint16) * a->alen);
+ a->pos.apos[0] = 1;
+ a->pos.apos[1] = tmppos;
while (ptr - a < l)
{
@@ -556,20 +619,24 @@ uniqueWORD(WORD * a, int4 l)
res++;
res->len = ptr->len;
res->word = ptr->word;
- tmppos=LIMITPOS(ptr->pos.pos);
- res->alen=2;
- res->pos.apos=(uint16*)palloc( sizeof(uint16)*res->alen );
- res->pos.apos[0]=1;
- res->pos.apos[1]=tmppos;
- } else {
+ tmppos = LIMITPOS(ptr->pos.pos);
+ res->alen = 2;
+ res->pos.apos = (uint16 *) palloc(sizeof(uint16) * res->alen);
+ res->pos.apos[0] = 1;
+ res->pos.apos[1] = tmppos;
+ }
+ else
+ {
pfree(ptr->word);
- if ( res->pos.apos[0] < MAXNUMPOS-1 && res->pos.apos[ res->pos.apos[0] ] != MAXENTRYPOS-1 ) {
- if ( res->pos.apos[0]+1 >= res->alen ) {
- res->alen*=2;
- res->pos.apos=(uint16*)repalloc( res->pos.apos, sizeof(uint16)*res->alen );
+ if (res->pos.apos[0] < MAXNUMPOS - 1 && res->pos.apos[res->pos.apos[0]] != MAXENTRYPOS - 1)
+ {
+ if (res->pos.apos[0] + 1 >= res->alen)
+ {
+ res->alen *= 2;
+ res->pos.apos = (uint16 *) repalloc(res->pos.apos, sizeof(uint16) * res->alen);
}
- res->pos.apos[ res->pos.apos[0]+1 ] = LIMITPOS(ptr->pos.pos);
- res->pos.apos[0]++;
+ res->pos.apos[res->pos.apos[0] + 1] = LIMITPOS(ptr->pos.pos);
+ res->pos.apos[0]++;
}
}
ptr++;
@@ -584,25 +651,27 @@ uniqueWORD(WORD * a, int4 l)
static tsvector *
makevalue(PRSTEXT * prs)
{
- int4 i,j,
+ int4 i,
+ j,
lenstr = 0,
totallen;
- tsvector *in;
+ tsvector *in;
WordEntry *ptr;
char *str,
*cur;
prs->curwords = uniqueWORD(prs->words, prs->curwords);
- for (i = 0; i < prs->curwords; i++) {
+ for (i = 0; i < prs->curwords; i++)
+ {
lenstr += SHORTALIGN(prs->words[i].len);
- if ( prs->words[i].alen )
+ if (prs->words[i].alen)
lenstr += sizeof(uint16) + prs->words[i].pos.apos[0] * sizeof(WordEntryPos);
}
totallen = CALCDATASIZE(prs->curwords, lenstr);
in = (tsvector *) palloc(totallen);
- memset(in,0,totallen);
+ memset(in, 0, totallen);
in->len = totallen;
in->size = prs->curwords;
@@ -615,24 +684,27 @@ makevalue(PRSTEXT * prs)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("value is too big")));
- ptr->pos= cur - str;
+ ptr->pos = cur - str;
memcpy((void *) cur, (void *) prs->words[i].word, prs->words[i].len);
pfree(prs->words[i].word);
cur += SHORTALIGN(prs->words[i].len);
- if ( prs->words[i].alen ) {
+ if (prs->words[i].alen)
+ {
WordEntryPos *wptr;
-
- ptr->haspos=1;
- *(uint16*)cur = prs->words[i].pos.apos[0];
- wptr=POSDATAPTR(in,ptr);
- for(j=0;j<*(uint16*)cur;j++) {
- wptr[j].weight=0;
- wptr[j].pos=prs->words[i].pos.apos[j+1];
+
+ ptr->haspos = 1;
+ *(uint16 *) cur = prs->words[i].pos.apos[0];
+ wptr = POSDATAPTR(in, ptr);
+ for (j = 0; j < *(uint16 *) cur; j++)
+ {
+ wptr[j].weight = 0;
+ wptr[j].pos = prs->words[i].pos.apos[j + 1];
}
cur += sizeof(uint16) + prs->words[i].pos.apos[0] * sizeof(WordEntryPos);
pfree(prs->words[i].pos.apos);
- } else
- ptr->haspos=0;
+ }
+ else
+ ptr->haspos = 0;
ptr++;
}
pfree(prs->words);
@@ -645,70 +717,78 @@ to_tsvector(PG_FUNCTION_ARGS)
{
text *in = PG_GETARG_TEXT_P(1);
PRSTEXT prs;
- tsvector *out = NULL;
- TSCfgInfo *cfg=findcfg(PG_GETARG_INT32(0));
+ tsvector *out = NULL;
+ TSCfgInfo *cfg = findcfg(PG_GETARG_INT32(0));
prs.lenwords = 32;
prs.curwords = 0;
prs.pos = 0;
prs.words = (WORD *) palloc(sizeof(WORD) * prs.lenwords);
-
+
parsetext_v2(cfg, &prs, VARDATA(in), VARSIZE(in) - VARHDRSZ);
PG_FREE_IF_COPY(in, 1);
if (prs.curwords)
out = makevalue(&prs);
- else {
+ else
+ {
pfree(prs.words);
- out = palloc(CALCDATASIZE(0,0));
- out->len = CALCDATASIZE(0,0);
+ out = palloc(CALCDATASIZE(0, 0));
+ out->len = CALCDATASIZE(0, 0);
out->size = 0;
- }
+ }
PG_RETURN_POINTER(out);
}
Datum
-to_tsvector_name(PG_FUNCTION_ARGS) {
- text *cfg=PG_GETARG_TEXT_P(0);
- Datum res = DirectFunctionCall3(
- to_tsvector,
- Int32GetDatum( name2id_cfg( cfg ) ),
- PG_GETARG_DATUM(1),
- (Datum)0
+to_tsvector_name(PG_FUNCTION_ARGS)
+{
+ text *cfg = PG_GETARG_TEXT_P(0);
+ Datum res = DirectFunctionCall3(
+ to_tsvector,
+ Int32GetDatum(name2id_cfg(cfg)),
+ PG_GETARG_DATUM(1),
+ (Datum) 0
);
- PG_FREE_IF_COPY(cfg,0);
- PG_RETURN_DATUM(res);
+
+ PG_FREE_IF_COPY(cfg, 0);
+ PG_RETURN_DATUM(res);
}
Datum
-to_tsvector_current(PG_FUNCTION_ARGS) {
- Datum res = DirectFunctionCall3(
- to_tsvector,
- Int32GetDatum( get_currcfg() ),
- PG_GETARG_DATUM(0),
- (Datum)0
+to_tsvector_current(PG_FUNCTION_ARGS)
+{
+ Datum res = DirectFunctionCall3(
+ to_tsvector,
+ Int32GetDatum(get_currcfg()),
+ PG_GETARG_DATUM(0),
+ (Datum) 0
);
- PG_RETURN_DATUM(res);
+
+ PG_RETURN_DATUM(res);
}
static Oid
-findFunc(char *fname) {
- FuncCandidateList clist,ptr;
- Oid funcid = InvalidOid;
- List *names=makeList1(makeString(fname));
+findFunc(char *fname)
+{
+ FuncCandidateList clist,
+ ptr;
+ Oid funcid = InvalidOid;
+ List *names = makeList1(makeString(fname));
ptr = clist = FuncnameGetCandidates(names, 1);
freeList(names);
- if ( !ptr )
+ if (!ptr)
return funcid;
- while(ptr) {
- if ( ptr->args[0] == TEXTOID && funcid == InvalidOid )
- funcid=ptr->oid;
- clist=ptr->next;
+ while (ptr)
+ {
+ if (ptr->args[0] == TEXTOID && funcid == InvalidOid)
+ funcid = ptr->oid;
+ clist = ptr->next;
pfree(ptr);
- ptr=clist;
+ ptr = clist;
}
return funcid;
@@ -724,12 +804,12 @@ tsearch2(PG_FUNCTION_ARGS)
Trigger *trigger;
Relation rel;
HeapTuple rettuple = NULL;
- TSCfgInfo *cfg=findcfg(get_currcfg());
+ TSCfgInfo *cfg = findcfg(get_currcfg());
int numidxattr,
i;
PRSTEXT prs;
Datum datum = (Datum) 0;
- Oid funcoid = InvalidOid;
+ Oid funcoid = InvalidOid;
if (!CALLED_AS_TRIGGER(fcinfo))
/* internal error */
@@ -782,8 +862,8 @@ tsearch2(PG_FUNCTION_ARGS)
numattr = SPI_fnumber(rel->rd_att, trigger->tgargs[i]);
if (numattr == SPI_ERROR_NOATTRIBUTE)
{
- funcoid=findFunc(trigger->tgargs[i]);
- if ( funcoid==InvalidOid )
+ funcoid = findFunc(trigger->tgargs[i]);
+ if (funcoid == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("could not find function or field \"%s\"",
@@ -805,19 +885,22 @@ tsearch2(PG_FUNCTION_ARGS)
if (isnull)
continue;
- if ( funcoid!=InvalidOid ) {
- text *txttmp = (text *) DatumGetPointer( OidFunctionCall1(
- funcoid,
- PointerGetDatum(txt_toasted)
- ));
+ if (funcoid != InvalidOid)
+ {
+ text *txttmp = (text *) DatumGetPointer(OidFunctionCall1(
+ funcoid,
+ PointerGetDatum(txt_toasted)
+ ));
+
txt = (text *) DatumGetPointer(PG_DETOAST_DATUM(PointerGetDatum(txttmp)));
- if ( txt == txttmp )
+ if (txt == txttmp)
txt_toasted = PointerGetDatum(txt);
- } else
- txt = (text *) DatumGetPointer(PG_DETOAST_DATUM(PointerGetDatum(txt_toasted)));
+ }
+ else
+ txt = (text *) DatumGetPointer(PG_DETOAST_DATUM(PointerGetDatum(txt_toasted)));
parsetext_v2(cfg, &prs, VARDATA(txt), VARSIZE(txt) - VARHDRSZ);
- if (txt != (text*)DatumGetPointer(txt_toasted) )
+ if (txt != (text *) DatumGetPointer(txt_toasted))
pfree(txt);
}
@@ -831,8 +914,9 @@ tsearch2(PG_FUNCTION_ARGS)
}
else
{
- tsvector *out = palloc(CALCDATASIZE(0,0));
- out->len = CALCDATASIZE(0,0);
+ tsvector *out = palloc(CALCDATASIZE(0, 0));
+
+ out->len = CALCDATASIZE(0, 0);
out->size = 0;
datum = PointerGetDatum(out);
pfree(prs.words);
diff --git a/contrib/tsearch2/tsvector.h b/contrib/tsearch2/tsvector.h
index af06e3cf52f..bb695247463 100644
--- a/contrib/tsearch2/tsvector.h
+++ b/contrib/tsearch2/tsvector.h
@@ -12,23 +12,27 @@
#include "utils/builtins.h"
#include "storage/bufpage.h"
-typedef struct {
+typedef struct
+{
uint32
- haspos:1,
- len:11, /* MAX 2Kb */
- pos:20; /* MAX 1Mb */
+ haspos:1,
+ len:11, /* MAX 2Kb */
+ pos:20; /* MAX 1Mb */
} WordEntry;
+
#define MAXSTRLEN ( 1<<11 )
#define MAXSTRPOS ( 1<<20 )
-typedef struct {
+typedef struct
+{
uint16
- weight:2,
- pos:14;
-} WordEntryPos;
-#define MAXENTRYPOS (1<<14)
+ weight:2,
+ pos:14;
+} WordEntryPos;
+
+#define MAXENTRYPOS (1<<14)
#define MAXNUMPOS 256
-#define LIMITPOS(x) ( ( (x) >= MAXENTRYPOS ) ? (MAXENTRYPOS-1) : (x) )
+#define LIMITPOS(x) ( ( (x) >= MAXENTRYPOS ) ? (MAXENTRYPOS-1) : (x) )
typedef struct
{
@@ -43,13 +47,14 @@ typedef struct
#define STRPTR(x) ( (char*)x + DATAHDRSIZE + ( sizeof(WordEntry) * ((tsvector*)x)->size ) )
#define STRSIZE(x) ( ((tsvector*)x)->len - DATAHDRSIZE - ( sizeof(WordEntry) * ((tsvector*)x)->size ) )
#define _POSDATAPTR(x,e) (STRPTR(x)+((WordEntry*)(e))->pos+SHORTALIGN(((WordEntry*)(e))->len))
-#define POSDATALEN(x,e) ( ( ((WordEntry*)(e))->haspos ) ? (*(uint16*)_POSDATAPTR(x,e)) : 0 )
-#define POSDATAPTR(x,e) ( (WordEntryPos*)( _POSDATAPTR(x,e)+sizeof(uint16) ) )
+#define POSDATALEN(x,e) ( ( ((WordEntry*)(e))->haspos ) ? (*(uint16*)_POSDATAPTR(x,e)) : 0 )
+#define POSDATAPTR(x,e) ( (WordEntryPos*)( _POSDATAPTR(x,e)+sizeof(uint16) ) )
-typedef struct {
- WordEntry entry;
- WordEntryPos *pos;
+typedef struct
+{
+ WordEntry entry;
+ WordEntryPos *pos;
} WordEntryIN;
typedef struct
@@ -60,7 +65,7 @@ typedef struct
int4 len;
int4 state;
int4 alen;
- WordEntryPos *pos;
+ WordEntryPos *pos;
bool oprisdelim;
} TI_IN_STATE;
diff --git a/contrib/tsearch2/tsvector_op.c b/contrib/tsearch2/tsvector_op.c
index cfef02db85d..2ffd4ca5848 100644
--- a/contrib/tsearch2/tsvector_op.c
+++ b/contrib/tsearch2/tsvector_op.c
@@ -33,30 +33,33 @@ Datum concat(PG_FUNCTION_ARGS);
Datum
strip(PG_FUNCTION_ARGS)
{
- tsvector *in = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
- tsvector *out;
- int i,len=0;
- WordEntry *arrin=ARRPTR(in), *arrout;
- char *cur;
+ tsvector *in = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+ tsvector *out;
+ int i,
+ len = 0;
+ WordEntry *arrin = ARRPTR(in),
+ *arrout;
+ char *cur;
- for(i=0;i<in->size;i++)
- len += SHORTALIGN( arrin[i].len );
+ for (i = 0; i < in->size; i++)
+ len += SHORTALIGN(arrin[i].len);
len = CALCDATASIZE(in->size, len);
- out=(tsvector*)palloc(len);
- memset(out,0,len);
- out->len=len;
- out->size=in->size;
- arrout=ARRPTR(out);
- cur=STRPTR(out);
- for(i=0;i<in->size;i++) {
- memcpy(cur, STRPTR(in)+arrin[i].pos, arrin[i].len);
+ out = (tsvector *) palloc(len);
+ memset(out, 0, len);
+ out->len = len;
+ out->size = in->size;
+ arrout = ARRPTR(out);
+ cur = STRPTR(out);
+ for (i = 0; i < in->size; i++)
+ {
+ memcpy(cur, STRPTR(in) + arrin[i].pos, arrin[i].len);
arrout[i].haspos = 0;
arrout[i].len = arrin[i].len;
arrout[i].pos = cur - STRPTR(out);
- cur += SHORTALIGN( arrout[i].len );
+ cur += SHORTALIGN(arrout[i].len);
}
-
+
PG_FREE_IF_COPY(in, 0);
PG_RETURN_POINTER(out);
}
@@ -64,200 +67,263 @@ strip(PG_FUNCTION_ARGS)
Datum
setweight(PG_FUNCTION_ARGS)
{
- tsvector *in = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
- char cw = PG_GETARG_CHAR(1);
- tsvector *out;
- int i,j;
- WordEntry *entry;
+ tsvector *in = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+ char cw = PG_GETARG_CHAR(1);
+ tsvector *out;
+ int i,
+ j;
+ WordEntry *entry;
WordEntryPos *p;
- int w=0;
+ int w = 0;
- switch(tolower(cw)) {
- case 'a': w=3; break;
- case 'b': w=2; break;
- case 'c': w=1; break;
- case 'd': w=0; break;
- /* internal error */
- default: elog(ERROR,"unrecognized weight");
+ switch (tolower(cw))
+ {
+ case 'a':
+ w = 3;
+ break;
+ case 'b':
+ w = 2;
+ break;
+ case 'c':
+ w = 1;
+ break;
+ case 'd':
+ w = 0;
+ break;
+ /* internal error */
+ default:
+ elog(ERROR, "unrecognized weight");
}
- out=(tsvector*)palloc(in->len);
- memcpy(out,in,in->len);
- entry=ARRPTR(out);
- i=out->size;
- while(i--) {
- if ( (j=POSDATALEN(out,entry)) != 0 ) {
- p=POSDATAPTR(out,entry);
- while(j--) {
- p->weight=w;
+ out = (tsvector *) palloc(in->len);
+ memcpy(out, in, in->len);
+ entry = ARRPTR(out);
+ i = out->size;
+ while (i--)
+ {
+ if ((j = POSDATALEN(out, entry)) != 0)
+ {
+ p = POSDATAPTR(out, entry);
+ while (j--)
+ {
+ p->weight = w;
p++;
}
}
entry++;
}
-
+
PG_FREE_IF_COPY(in, 0);
PG_RETURN_POINTER(out);
}
static int
-compareEntry(char *ptra, WordEntry* a, char *ptrb, WordEntry* b)
+compareEntry(char *ptra, WordEntry * a, char *ptrb, WordEntry * b)
{
- if ( a->len == b->len)
- {
- return strncmp(
- ptra + a->pos,
- ptrb + b->pos,
- a->len);
- }
- return ( a->len > b->len ) ? 1 : -1;
+ if (a->len == b->len)
+ {
+ return strncmp(
+ ptra + a->pos,
+ ptrb + b->pos,
+ a->len);
+ }
+ return (a->len > b->len) ? 1 : -1;
}
static int4
-add_pos(tsvector *src, WordEntry *srcptr, tsvector *dest, WordEntry *destptr, int4 maxpos ) {
- uint16 *clen = (uint16*)_POSDATAPTR(dest,destptr);
- int i;
- uint16 slen = POSDATALEN(src, srcptr), startlen;
- WordEntryPos *spos=POSDATAPTR(src, srcptr), *dpos=POSDATAPTR(dest,destptr);
+add_pos(tsvector * src, WordEntry * srcptr, tsvector * dest, WordEntry * destptr, int4 maxpos)
+{
+ uint16 *clen = (uint16 *) _POSDATAPTR(dest, destptr);
+ int i;
+ uint16 slen = POSDATALEN(src, srcptr),
+ startlen;
+ WordEntryPos *spos = POSDATAPTR(src, srcptr),
+ *dpos = POSDATAPTR(dest, destptr);
- if ( ! destptr->haspos )
- *clen=0;
+ if (!destptr->haspos)
+ *clen = 0;
startlen = *clen;
- for(i=0; i<slen && *clen<MAXNUMPOS && ( *clen==0 || dpos[ *clen-1 ].pos != MAXENTRYPOS-1 ) ;i++) {
- dpos[ *clen ].weight = spos[i].weight;
- dpos[ *clen ].pos = LIMITPOS(spos[i].pos + maxpos);
+ for (i = 0; i < slen && *clen < MAXNUMPOS && (*clen == 0 || dpos[*clen - 1].pos != MAXENTRYPOS - 1); i++)
+ {
+ dpos[*clen].weight = spos[i].weight;
+ dpos[*clen].pos = LIMITPOS(spos[i].pos + maxpos);
(*clen)++;
}
- if ( *clen != startlen )
- destptr->haspos=1;
- return *clen - startlen;
+ if (*clen != startlen)
+ destptr->haspos = 1;
+ return *clen - startlen;
}
Datum
-concat(PG_FUNCTION_ARGS) {
- tsvector *in1 = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
- tsvector *in2 = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
- tsvector *out;
- WordEntry *ptr;
- WordEntry *ptr1,*ptr2;
+concat(PG_FUNCTION_ARGS)
+{
+ tsvector *in1 = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+ tsvector *in2 = (tsvector *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
+ tsvector *out;
+ WordEntry *ptr;
+ WordEntry *ptr1,
+ *ptr2;
WordEntryPos *p;
- int maxpos=0,i,j,i1,i2;
- char *cur;
- char *data,*data1,*data2;
+ int maxpos = 0,
+ i,
+ j,
+ i1,
+ i2;
+ char *cur;
+ char *data,
+ *data1,
+ *data2;
- ptr=ARRPTR(in1);
- i=in1->size;
- while(i--) {
- if ( (j=POSDATALEN(in1,ptr)) != 0 ) {
- p=POSDATAPTR(in1,ptr);
- while(j--) {
- if ( p->pos > maxpos )
+ ptr = ARRPTR(in1);
+ i = in1->size;
+ while (i--)
+ {
+ if ((j = POSDATALEN(in1, ptr)) != 0)
+ {
+ p = POSDATAPTR(in1, ptr);
+ while (j--)
+ {
+ if (p->pos > maxpos)
maxpos = p->pos;
p++;
}
}
ptr++;
}
-
- ptr1=ARRPTR(in1); ptr2=ARRPTR(in2);
- data1=STRPTR(in1); data2=STRPTR(in2);
- i1=in1->size; i2=in2->size;
- out=(tsvector*)palloc( in1->len + in2->len );
- memset(out,0,in1->len + in2->len);
+
+ ptr1 = ARRPTR(in1);
+ ptr2 = ARRPTR(in2);
+ data1 = STRPTR(in1);
+ data2 = STRPTR(in2);
+ i1 = in1->size;
+ i2 = in2->size;
+ out = (tsvector *) palloc(in1->len + in2->len);
+ memset(out, 0, in1->len + in2->len);
out->len = in1->len + in2->len;
out->size = in1->size + in2->size;
- data=cur=STRPTR(out);
- ptr=ARRPTR(out);
- while( i1 && i2 ) {
- int cmp=compareEntry(data1,ptr1,data2,ptr2);
- if ( cmp < 0 ) { /* in1 first */
+ data = cur = STRPTR(out);
+ ptr = ARRPTR(out);
+ while (i1 && i2)
+ {
+ int cmp = compareEntry(data1, ptr1, data2, ptr2);
+
+ if (cmp < 0)
+ { /* in1 first */
ptr->haspos = ptr1->haspos;
ptr->len = ptr1->len;
- memcpy( cur, data1 + ptr1->pos, ptr1->len );
- ptr->pos = cur - data;
- cur+=SHORTALIGN(ptr1->len);
- if ( ptr->haspos ) {
- memcpy(cur, _POSDATAPTR(in1, ptr1), POSDATALEN(in1, ptr1)*sizeof(WordEntryPos) + sizeof(uint16));
- cur+=POSDATALEN(in1, ptr1)*sizeof(WordEntryPos) + sizeof(uint16);
+ memcpy(cur, data1 + ptr1->pos, ptr1->len);
+ ptr->pos = cur - data;
+ cur += SHORTALIGN(ptr1->len);
+ if (ptr->haspos)
+ {
+ memcpy(cur, _POSDATAPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
+ cur += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
}
- ptr++; ptr1++; i1--;
- } else if ( cmp>0 ) { /* in2 first */
+ ptr++;
+ ptr1++;
+ i1--;
+ }
+ else if (cmp > 0)
+ { /* in2 first */
ptr->haspos = ptr2->haspos;
ptr->len = ptr2->len;
- memcpy( cur, data2 + ptr2->pos, ptr2->len );
- ptr->pos = cur - data;
- cur+=SHORTALIGN(ptr2->len);
- if ( ptr->haspos ) {
- int addlen = add_pos(in2, ptr2, out, ptr, maxpos );
- if ( addlen == 0 )
- ptr->haspos=0;
+ memcpy(cur, data2 + ptr2->pos, ptr2->len);
+ ptr->pos = cur - data;
+ cur += SHORTALIGN(ptr2->len);
+ if (ptr->haspos)
+ {
+ int addlen = add_pos(in2, ptr2, out, ptr, maxpos);
+
+ if (addlen == 0)
+ ptr->haspos = 0;
else
- cur += addlen*sizeof(WordEntryPos) + sizeof(uint16);
+ cur += addlen * sizeof(WordEntryPos) + sizeof(uint16);
}
- ptr++; ptr2++; i2--;
- } else {
+ ptr++;
+ ptr2++;
+ i2--;
+ }
+ else
+ {
ptr->haspos = ptr1->haspos | ptr2->haspos;
ptr->len = ptr1->len;
- memcpy( cur, data1 + ptr1->pos, ptr1->len );
- ptr->pos = cur - data;
- cur+=SHORTALIGN(ptr1->len);
- if ( ptr->haspos ) {
- if ( ptr1->haspos ) {
- memcpy(cur, _POSDATAPTR(in1, ptr1), POSDATALEN(in1, ptr1)*sizeof(WordEntryPos) + sizeof(uint16));
- cur+=POSDATALEN(in1, ptr1)*sizeof(WordEntryPos) + sizeof(uint16);
- if ( ptr2->haspos )
- cur += add_pos(in2, ptr2, out, ptr, maxpos )*sizeof(WordEntryPos);
- } else if ( ptr2->haspos ) {
- int addlen = add_pos(in2, ptr2, out, ptr, maxpos );
- if ( addlen == 0 )
- ptr->haspos=0;
+ memcpy(cur, data1 + ptr1->pos, ptr1->len);
+ ptr->pos = cur - data;
+ cur += SHORTALIGN(ptr1->len);
+ if (ptr->haspos)
+ {
+ if (ptr1->haspos)
+ {
+ memcpy(cur, _POSDATAPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
+ cur += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
+ if (ptr2->haspos)
+ cur += add_pos(in2, ptr2, out, ptr, maxpos) * sizeof(WordEntryPos);
+ }
+ else if (ptr2->haspos)
+ {
+ int addlen = add_pos(in2, ptr2, out, ptr, maxpos);
+
+ if (addlen == 0)
+ ptr->haspos = 0;
else
- cur += addlen*sizeof(WordEntryPos) + sizeof(uint16);
+ cur += addlen * sizeof(WordEntryPos) + sizeof(uint16);
}
}
- ptr++; ptr1++; ptr2++; i1--; i2--;
+ ptr++;
+ ptr1++;
+ ptr2++;
+ i1--;
+ i2--;
}
}
- while(i1) {
+ while (i1)
+ {
ptr->haspos = ptr1->haspos;
ptr->len = ptr1->len;
- memcpy( cur, data1 + ptr1->pos, ptr1->len );
- ptr->pos = cur - data;
- cur+=SHORTALIGN(ptr1->len);
- if ( ptr->haspos ) {
- memcpy(cur, _POSDATAPTR(in1, ptr1), POSDATALEN(in1, ptr1)*sizeof(WordEntryPos) + sizeof(uint16));
- cur+=POSDATALEN(in1, ptr1)*sizeof(WordEntryPos) + sizeof(uint16);
+ memcpy(cur, data1 + ptr1->pos, ptr1->len);
+ ptr->pos = cur - data;
+ cur += SHORTALIGN(ptr1->len);
+ if (ptr->haspos)
+ {
+ memcpy(cur, _POSDATAPTR(in1, ptr1), POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16));
+ cur += POSDATALEN(in1, ptr1) * sizeof(WordEntryPos) + sizeof(uint16);
}
- ptr++; ptr1++; i1--;
+ ptr++;
+ ptr1++;
+ i1--;
}
- while(i2) {
+ while (i2)
+ {
ptr->haspos = ptr2->haspos;
ptr->len = ptr2->len;
- memcpy( cur, data2 + ptr2->pos, ptr2->len );
- ptr->pos = cur - data;
- cur+=SHORTALIGN(ptr2->len);
- if ( ptr->haspos ) {
- int addlen = add_pos(in2, ptr2, out, ptr, maxpos );
- if ( addlen == 0 )
- ptr->haspos=0;
+ memcpy(cur, data2 + ptr2->pos, ptr2->len);
+ ptr->pos = cur - data;
+ cur += SHORTALIGN(ptr2->len);
+ if (ptr->haspos)
+ {
+ int addlen = add_pos(in2, ptr2, out, ptr, maxpos);
+
+ if (addlen == 0)
+ ptr->haspos = 0;
else
- cur += addlen*sizeof(WordEntryPos) + sizeof(uint16);
+ cur += addlen * sizeof(WordEntryPos) + sizeof(uint16);
}
- ptr++; ptr2++; i2--;
+ ptr++;
+ ptr2++;
+ i2--;
}
-
- out->size=ptr-ARRPTR(out);
- out->len = CALCDATASIZE( out->size, cur-data );
- if ( data != STRPTR(out) )
- memmove( STRPTR(out), data, cur-data );
+
+ out->size = ptr - ARRPTR(out);
+ out->len = CALCDATASIZE(out->size, cur - data);
+ if (data != STRPTR(out))
+ memmove(STRPTR(out), data, cur - data);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
PG_RETURN_POINTER(out);
}
-
diff --git a/contrib/tsearch2/wordparser/deflex.c b/contrib/tsearch2/wordparser/deflex.c
index ea596c507ca..bbf3271b666 100644
--- a/contrib/tsearch2/wordparser/deflex.c
+++ b/contrib/tsearch2/wordparser/deflex.c
@@ -1,6 +1,6 @@
#include "deflex.h"
-const char *lex_descr[]={
+const char *lex_descr[] = {
"",
"Latin word",
"Non-latin word",
@@ -27,7 +27,7 @@ const char *lex_descr[]={
"HTML Entity"
};
-const char *tok_alias[]={
+const char *tok_alias[] = {
"",
"lword",
"nlword",
@@ -53,4 +53,3 @@ const char *tok_alias[]={
"uint",
"entity"
};
-
diff --git a/contrib/tsearch2/wparser.c b/contrib/tsearch2/wparser.c
index deff94ce904..b7e45e51885 100644
--- a/contrib/tsearch2/wparser.c
+++ b/contrib/tsearch2/wparser.c
@@ -1,5 +1,5 @@
-/*
- * interface functions to parser
+/*
+ * interface functions to parser
* Teodor Sigaev <teodor@sigaev.ru>
*/
#include <errno.h>
@@ -21,154 +21,171 @@
/*********top interface**********/
-static void *plan_getparser=NULL;
-static Oid current_parser_id=InvalidOid;
+static void *plan_getparser = NULL;
+static Oid current_parser_id = InvalidOid;
void
-init_prs(Oid id, WParserInfo *prs) {
- Oid arg[1]={ OIDOID };
- bool isnull;
- Datum pars[1]={ ObjectIdGetDatum(id) };
- int stat;
-
- memset(prs,0,sizeof(WParserInfo));
+init_prs(Oid id, WParserInfo * prs)
+{
+ Oid arg[1] = {OIDOID};
+ bool isnull;
+ Datum pars[1] = {ObjectIdGetDatum(id)};
+ int stat;
+
+ memset(prs, 0, sizeof(WParserInfo));
SPI_connect();
- if ( !plan_getparser ) {
- plan_getparser = SPI_saveplan( SPI_prepare( "select prs_start, prs_nexttoken, prs_end, prs_lextype, prs_headline from pg_ts_parser where oid = $1" , 1, arg ) );
- if ( !plan_getparser )
+ if (!plan_getparser)
+ {
+ plan_getparser = SPI_saveplan(SPI_prepare("select prs_start, prs_nexttoken, prs_end, prs_lextype, prs_headline from pg_ts_parser where oid = $1", 1, arg));
+ if (!plan_getparser)
ts_error(ERROR, "SPI_prepare() failed");
}
stat = SPI_execp(plan_getparser, pars, " ", 1);
- if ( stat < 0 )
- ts_error (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed > 0 ) {
- Oid oid=InvalidOid;
- oid=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull) );
+ if (stat < 0)
+ ts_error(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed > 0)
+ {
+ Oid oid = InvalidOid;
+
+ oid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
fmgr_info_cxt(oid, &(prs->start_info), TopMemoryContext);
- oid=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull) );
+ oid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull));
fmgr_info_cxt(oid, &(prs->getlexeme_info), TopMemoryContext);
- oid=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 3, &isnull) );
+ oid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 3, &isnull));
fmgr_info_cxt(oid, &(prs->end_info), TopMemoryContext);
- prs->lextype=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 4, &isnull) );
- oid=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 5, &isnull) );
+ prs->lextype = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 4, &isnull));
+ oid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 5, &isnull));
fmgr_info_cxt(oid, &(prs->headline_info), TopMemoryContext);
- prs->prs_id=id;
- } else
+ prs->prs_id = id;
+ }
+ else
ts_error(ERROR, "No parser with id %d", id);
SPI_finish();
}
-typedef struct {
- WParserInfo *last_prs;
- int len;
- int reallen;
- WParserInfo *list;
+typedef struct
+{
+ WParserInfo *last_prs;
+ int len;
+ int reallen;
+ WParserInfo *list;
SNMap name2id_map;
-} PrsList;
+} PrsList;
-static PrsList PList = {NULL,0,0,NULL,{0,0,NULL}};
+static PrsList PList = {NULL, 0, 0, NULL, {0, 0, NULL}};
-void
-reset_prs(void) {
- freeSNMap( &(PList.name2id_map) );
- if ( PList.list )
+void
+reset_prs(void)
+{
+ freeSNMap(&(PList.name2id_map));
+ if (PList.list)
free(PList.list);
- memset(&PList,0,sizeof(PrsList));
+ memset(&PList, 0, sizeof(PrsList));
}
static int
-compareprs(const void *a, const void *b) {
- return ((WParserInfo*)a)->prs_id - ((WParserInfo*)b)->prs_id;
+compareprs(const void *a, const void *b)
+{
+ return ((WParserInfo *) a)->prs_id - ((WParserInfo *) b)->prs_id;
}
WParserInfo *
-findprs(Oid id) {
+findprs(Oid id)
+{
/* last used prs */
- if ( PList.last_prs && PList.last_prs->prs_id==id )
+ if (PList.last_prs && PList.last_prs->prs_id == id)
return PList.last_prs;
/* already used prs */
- if ( PList.len != 0 ) {
+ if (PList.len != 0)
+ {
WParserInfo key;
- key.prs_id=id;
+
+ key.prs_id = id;
PList.last_prs = bsearch(&key, PList.list, PList.len, sizeof(WParserInfo), compareprs);
- if ( PList.last_prs != NULL )
+ if (PList.last_prs != NULL)
return PList.last_prs;
}
/* last chance */
- if ( PList.len==PList.reallen ) {
+ if (PList.len == PList.reallen)
+ {
WParserInfo *tmp;
- int reallen = ( PList.reallen ) ? 2*PList.reallen : 16;
- tmp=(WParserInfo*)realloc(PList.list,sizeof(WParserInfo)*reallen);
- if ( !tmp )
- ts_error(ERROR,"No memory");
- PList.reallen=reallen;
- PList.list=tmp;
+ int reallen = (PList.reallen) ? 2 * PList.reallen : 16;
+
+ tmp = (WParserInfo *) realloc(PList.list, sizeof(WParserInfo) * reallen);
+ if (!tmp)
+ ts_error(ERROR, "No memory");
+ PList.reallen = reallen;
+ PList.list = tmp;
}
- PList.last_prs=&(PList.list[PList.len]);
+ PList.last_prs = &(PList.list[PList.len]);
init_prs(id, PList.last_prs);
PList.len++;
qsort(PList.list, PList.len, sizeof(WParserInfo), compareprs);
- return findprs(id); /* qsort changed order!! */;
+ return findprs(id); /* qsort changed order!! */ ;
}
-static void *plan_name2id=NULL;
+static void *plan_name2id = NULL;
Oid
-name2id_prs(text *name) {
- Oid arg[1]={ TEXTOID };
- bool isnull;
- Datum pars[1]={ PointerGetDatum(name) };
- int stat;
- Oid id=findSNMap_t( &(PList.name2id_map), name );
-
- if ( id )
+name2id_prs(text *name)
+{
+ Oid arg[1] = {TEXTOID};
+ bool isnull;
+ Datum pars[1] = {PointerGetDatum(name)};
+ int stat;
+ Oid id = findSNMap_t(&(PList.name2id_map), name);
+
+ if (id)
return id;
-
+
SPI_connect();
- if ( !plan_name2id ) {
- plan_name2id = SPI_saveplan( SPI_prepare( "select oid from pg_ts_parser where prs_name = $1" , 1, arg ) );
- if ( !plan_name2id )
+ if (!plan_name2id)
+ {
+ plan_name2id = SPI_saveplan(SPI_prepare("select oid from pg_ts_parser where prs_name = $1", 1, arg));
+ if (!plan_name2id)
ts_error(ERROR, "SPI_prepare() failed");
}
stat = SPI_execp(plan_name2id, pars, " ", 1);
- if ( stat < 0 )
- ts_error (ERROR, "SPI_execp return %d", stat);
- if ( SPI_processed > 0 )
- id=DatumGetObjectId( SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull) );
- else
+ if (stat < 0)
+ ts_error(ERROR, "SPI_execp return %d", stat);
+ if (SPI_processed > 0)
+ id = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
+ else
ts_error(ERROR, "No parser '%s'", text2char(name));
SPI_finish();
- addSNMap_t( &(PList.name2id_map), name, id );
+ addSNMap_t(&(PList.name2id_map), name, id);
return id;
}
/******sql-level interface******/
-typedef struct {
- int cur;
- LexDescr *list;
-} TypeStorage;
+typedef struct
+{
+ int cur;
+ LexDescr *list;
+} TypeStorage;
static void
-setup_firstcall(FuncCallContext *funcctx, Oid prsid) {
- TupleDesc tupdesc;
- MemoryContext oldcontext;
- TypeStorage *st;
- WParserInfo *prs = findprs(prsid);
+setup_firstcall(FuncCallContext *funcctx, Oid prsid)
+{
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
+ TypeStorage *st;
+ WParserInfo *prs = findprs(prsid);
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
- st=(TypeStorage*)palloc( sizeof(TypeStorage) );
- st->cur=0;
- st->list = (LexDescr*)DatumGetPointer(
- OidFunctionCall1( prs->lextype, PointerGetDatum(prs->prs) )
- );
- funcctx->user_fctx = (void*)st;
+ st = (TypeStorage *) palloc(sizeof(TypeStorage));
+ st->cur = 0;
+ st->list = (LexDescr *) DatumGetPointer(
+ OidFunctionCall1(prs->lextype, PointerGetDatum(prs->prs))
+ );
+ funcctx->user_fctx = (void *) st;
tupdesc = RelationNameGetTupleDesc("tokentype");
funcctx->slot = TupleDescGetSlot(tupdesc);
funcctx->attinmeta = TupleDescGetAttInMetadata(tupdesc);
@@ -176,20 +193,22 @@ setup_firstcall(FuncCallContext *funcctx, Oid prsid) {
}
static Datum
-process_call(FuncCallContext *funcctx) {
- TypeStorage *st;
-
- st=(TypeStorage*)funcctx->user_fctx;
- if ( st->list && st->list[st->cur].lexid ) {
- Datum result;
- char* values[3];
- char txtid[16];
- HeapTuple tuple;
-
- values[0]=txtid;
- sprintf(txtid,"%d",st->list[st->cur].lexid);
- values[1]=st->list[st->cur].alias;
- values[2]=st->list[st->cur].descr;
+process_call(FuncCallContext *funcctx)
+{
+ TypeStorage *st;
+
+ st = (TypeStorage *) funcctx->user_fctx;
+ if (st->list && st->list[st->cur].lexid)
+ {
+ Datum result;
+ char *values[3];
+ char txtid[16];
+ HeapTuple tuple;
+
+ values[0] = txtid;
+ sprintf(txtid, "%d", st->list[st->cur].lexid);
+ values[1] = st->list[st->cur].alias;
+ values[2] = st->list[st->cur].descr;
tuple = BuildTupleFromCStrings(funcctx->attinmeta, values);
result = TupleGetDatum(funcctx->slot, tuple);
@@ -198,161 +217,179 @@ process_call(FuncCallContext *funcctx) {
pfree(values[2]);
st->cur++;
return result;
- } else {
- if ( st->list ) pfree(st->list);
+ }
+ else
+ {
+ if (st->list)
+ pfree(st->list);
pfree(st);
}
- return (Datum)0;
+ return (Datum) 0;
}
PG_FUNCTION_INFO_V1(token_type);
-Datum token_type(PG_FUNCTION_ARGS);
+Datum token_type(PG_FUNCTION_ARGS);
Datum
-token_type(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
+token_type(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
- if (SRF_IS_FIRSTCALL()) {
+ if (SRF_IS_FIRSTCALL())
+ {
funcctx = SRF_FIRSTCALL_INIT();
- setup_firstcall(funcctx, PG_GETARG_OID(0) );
+ setup_firstcall(funcctx, PG_GETARG_OID(0));
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=process_call(funcctx)) != (Datum)0 )
+ if ((result = process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(token_type_byname);
-Datum token_type_byname(PG_FUNCTION_ARGS);
+Datum token_type_byname(PG_FUNCTION_ARGS);
Datum
-token_type_byname(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
+token_type_byname(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ text *name = PG_GETARG_TEXT_P(0);
- if (SRF_IS_FIRSTCALL()) {
- text *name = PG_GETARG_TEXT_P(0);
funcctx = SRF_FIRSTCALL_INIT();
- setup_firstcall(funcctx, name2id_prs( name ) );
- PG_FREE_IF_COPY(name,0);
+ setup_firstcall(funcctx, name2id_prs(name));
+ PG_FREE_IF_COPY(name, 0);
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=process_call(funcctx)) != (Datum)0 )
+ if ((result = process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(token_type_current);
-Datum token_type_current(PG_FUNCTION_ARGS);
+Datum token_type_current(PG_FUNCTION_ARGS);
Datum
-token_type_current(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
+token_type_current(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
- if (SRF_IS_FIRSTCALL()) {
+ if (SRF_IS_FIRSTCALL())
+ {
funcctx = SRF_FIRSTCALL_INIT();
- if ( current_parser_id==InvalidOid )
- current_parser_id = name2id_prs( char2text("default") );
- setup_firstcall(funcctx, current_parser_id );
+ if (current_parser_id == InvalidOid)
+ current_parser_id = name2id_prs(char2text("default"));
+ setup_firstcall(funcctx, current_parser_id);
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=process_call(funcctx)) != (Datum)0 )
+ if ((result = process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(set_curprs);
-Datum set_curprs(PG_FUNCTION_ARGS);
+Datum set_curprs(PG_FUNCTION_ARGS);
Datum
-set_curprs(PG_FUNCTION_ARGS) {
- findprs(PG_GETARG_OID(0));
- current_parser_id=PG_GETARG_OID(0);
- PG_RETURN_VOID();
+set_curprs(PG_FUNCTION_ARGS)
+{
+ findprs(PG_GETARG_OID(0));
+ current_parser_id = PG_GETARG_OID(0);
+ PG_RETURN_VOID();
}
PG_FUNCTION_INFO_V1(set_curprs_byname);
-Datum set_curprs_byname(PG_FUNCTION_ARGS);
+Datum set_curprs_byname(PG_FUNCTION_ARGS);
Datum
-set_curprs_byname(PG_FUNCTION_ARGS) {
- text *name=PG_GETARG_TEXT_P(0);
-
- DirectFunctionCall1(
- set_curprs,
- ObjectIdGetDatum( name2id_prs(name) )
- );
- PG_FREE_IF_COPY(name, 0);
- PG_RETURN_VOID();
+set_curprs_byname(PG_FUNCTION_ARGS)
+{
+ text *name = PG_GETARG_TEXT_P(0);
+
+ DirectFunctionCall1(
+ set_curprs,
+ ObjectIdGetDatum(name2id_prs(name))
+ );
+ PG_FREE_IF_COPY(name, 0);
+ PG_RETURN_VOID();
}
-typedef struct {
- int type;
- char *lexem;
-} LexemEntry;
+typedef struct
+{
+ int type;
+ char *lexem;
+} LexemEntry;
+
+typedef struct
+{
+ int cur;
+ int len;
+ LexemEntry *list;
+} PrsStorage;
-typedef struct {
- int cur;
- int len;
- LexemEntry *list;
-} PrsStorage;
-
static void
-prs_setup_firstcall(FuncCallContext *funcctx, int prsid, text *txt) {
- TupleDesc tupdesc;
- MemoryContext oldcontext;
- PrsStorage *st;
- WParserInfo *prs = findprs(prsid);
- char *lex=NULL;
- int llen=0, type=0;
+prs_setup_firstcall(FuncCallContext *funcctx, int prsid, text *txt)
+{
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
+ PrsStorage *st;
+ WParserInfo *prs = findprs(prsid);
+ char *lex = NULL;
+ int llen = 0,
+ type = 0;
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
- st=(PrsStorage*)palloc( sizeof(PrsStorage) );
- st->cur=0;
- st->len=16;
- st->list=(LexemEntry*)palloc( sizeof(LexemEntry)*st->len );
-
- prs->prs = (void*)DatumGetPointer(
- FunctionCall2(
- &(prs->start_info),
- PointerGetDatum(VARDATA(txt)),
- Int32GetDatum(VARSIZE(txt)-VARHDRSZ)
- )
- );
-
- while( ( type=DatumGetInt32(FunctionCall3(
- &(prs->getlexeme_info),
- PointerGetDatum(prs->prs),
- PointerGetDatum(&lex),
- PointerGetDatum(&llen))) ) != 0 ) {
-
- if ( st->cur>=st->len ) {
- st->len=2*st->len;
- st->list=(LexemEntry*)repalloc(st->list, sizeof(LexemEntry)*st->len);
+ st = (PrsStorage *) palloc(sizeof(PrsStorage));
+ st->cur = 0;
+ st->len = 16;
+ st->list = (LexemEntry *) palloc(sizeof(LexemEntry) * st->len);
+
+ prs->prs = (void *) DatumGetPointer(
+ FunctionCall2(
+ &(prs->start_info),
+ PointerGetDatum(VARDATA(txt)),
+ Int32GetDatum(VARSIZE(txt) - VARHDRSZ)
+ )
+ );
+
+ while ((type = DatumGetInt32(FunctionCall3(
+ &(prs->getlexeme_info),
+ PointerGetDatum(prs->prs),
+ PointerGetDatum(&lex),
+ PointerGetDatum(&llen)))) != 0)
+ {
+
+ if (st->cur >= st->len)
+ {
+ st->len = 2 * st->len;
+ st->list = (LexemEntry *) repalloc(st->list, sizeof(LexemEntry) * st->len);
}
- st->list[st->cur].lexem = palloc(llen+1);
- memcpy( st->list[st->cur].lexem, lex, llen);
- st->list[st->cur].lexem[llen]='\0';
- st->list[st->cur].type=type;
+ st->list[st->cur].lexem = palloc(llen + 1);
+ memcpy(st->list[st->cur].lexem, lex, llen);
+ st->list[st->cur].lexem[llen] = '\0';
+ st->list[st->cur].type = type;
st->cur++;
}
-
+
FunctionCall1(
- &(prs->end_info),
- PointerGetDatum(prs->prs)
- );
+ &(prs->end_info),
+ PointerGetDatum(prs->prs)
+ );
- st->len=st->cur;
- st->cur=0;
-
- funcctx->user_fctx = (void*)st;
+ st->len = st->cur;
+ st->cur = 0;
+
+ funcctx->user_fctx = (void *) st;
tupdesc = RelationNameGetTupleDesc("tokenout");
funcctx->slot = TupleDescGetSlot(tupdesc);
funcctx->attinmeta = TupleDescGetAttInMetadata(tupdesc);
@@ -360,132 +397,148 @@ prs_setup_firstcall(FuncCallContext *funcctx, int prsid, text *txt) {
}
static Datum
-prs_process_call(FuncCallContext *funcctx) {
- PrsStorage *st;
-
- st=(PrsStorage*)funcctx->user_fctx;
- if ( st->cur < st->len ) {
- Datum result;
- char* values[2];
- char tid[16];
- HeapTuple tuple;
-
- values[0]=tid;
- sprintf(tid,"%d",st->list[st->cur].type);
- values[1]=st->list[st->cur].lexem;
+prs_process_call(FuncCallContext *funcctx)
+{
+ PrsStorage *st;
+
+ st = (PrsStorage *) funcctx->user_fctx;
+ if (st->cur < st->len)
+ {
+ Datum result;
+ char *values[2];
+ char tid[16];
+ HeapTuple tuple;
+
+ values[0] = tid;
+ sprintf(tid, "%d", st->list[st->cur].type);
+ values[1] = st->list[st->cur].lexem;
tuple = BuildTupleFromCStrings(funcctx->attinmeta, values);
result = TupleGetDatum(funcctx->slot, tuple);
pfree(values[1]);
st->cur++;
return result;
- } else {
- if ( st->list ) pfree(st->list);
+ }
+ else
+ {
+ if (st->list)
+ pfree(st->list);
pfree(st);
}
- return (Datum)0;
+ return (Datum) 0;
}
-
+
PG_FUNCTION_INFO_V1(parse);
-Datum parse(PG_FUNCTION_ARGS);
+Datum parse(PG_FUNCTION_ARGS);
Datum
-parse(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
+parse(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ text *txt = PG_GETARG_TEXT_P(1);
- if (SRF_IS_FIRSTCALL()) {
- text *txt = PG_GETARG_TEXT_P(1);
funcctx = SRF_FIRSTCALL_INIT();
- prs_setup_firstcall(funcctx, PG_GETARG_OID(0),txt );
- PG_FREE_IF_COPY(txt,1);
+ prs_setup_firstcall(funcctx, PG_GETARG_OID(0), txt);
+ PG_FREE_IF_COPY(txt, 1);
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=prs_process_call(funcctx)) != (Datum)0 )
+ if ((result = prs_process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(parse_byname);
-Datum parse_byname(PG_FUNCTION_ARGS);
+Datum parse_byname(PG_FUNCTION_ARGS);
Datum
-parse_byname(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
+parse_byname(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ text *name = PG_GETARG_TEXT_P(0);
+ text *txt = PG_GETARG_TEXT_P(1);
- if (SRF_IS_FIRSTCALL()) {
- text *name = PG_GETARG_TEXT_P(0);
- text *txt = PG_GETARG_TEXT_P(1);
funcctx = SRF_FIRSTCALL_INIT();
- prs_setup_firstcall(funcctx, name2id_prs( name ),txt );
- PG_FREE_IF_COPY(name,0);
- PG_FREE_IF_COPY(txt,1);
+ prs_setup_firstcall(funcctx, name2id_prs(name), txt);
+ PG_FREE_IF_COPY(name, 0);
+ PG_FREE_IF_COPY(txt, 1);
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=prs_process_call(funcctx)) != (Datum)0 )
+ if ((result = prs_process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(parse_current);
-Datum parse_current(PG_FUNCTION_ARGS);
+Datum parse_current(PG_FUNCTION_ARGS);
Datum
-parse_current(PG_FUNCTION_ARGS) {
- FuncCallContext *funcctx;
- Datum result;
+parse_current(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ Datum result;
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ text *txt = PG_GETARG_TEXT_P(0);
- if (SRF_IS_FIRSTCALL()) {
- text *txt = PG_GETARG_TEXT_P(0);
funcctx = SRF_FIRSTCALL_INIT();
- if ( current_parser_id==InvalidOid )
- current_parser_id = name2id_prs( char2text("default") );
- prs_setup_firstcall(funcctx, current_parser_id,txt );
- PG_FREE_IF_COPY(txt,0);
+ if (current_parser_id == InvalidOid)
+ current_parser_id = name2id_prs(char2text("default"));
+ prs_setup_firstcall(funcctx, current_parser_id, txt);
+ PG_FREE_IF_COPY(txt, 0);
}
funcctx = SRF_PERCALL_SETUP();
- if ( (result=prs_process_call(funcctx)) != (Datum)0 )
+ if ((result = prs_process_call(funcctx)) != (Datum) 0)
SRF_RETURN_NEXT(funcctx, result);
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(headline);
-Datum headline(PG_FUNCTION_ARGS);
+Datum headline(PG_FUNCTION_ARGS);
Datum
-headline(PG_FUNCTION_ARGS) {
- TSCfgInfo *cfg=findcfg(PG_GETARG_OID(0));
- text *in = PG_GETARG_TEXT_P(1);
+headline(PG_FUNCTION_ARGS)
+{
+ TSCfgInfo *cfg = findcfg(PG_GETARG_OID(0));
+ text *in = PG_GETARG_TEXT_P(1);
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(2)));
- text *opt=( PG_NARGS()>3 && PG_GETARG_POINTER(3) ) ? PG_GETARG_TEXT_P(3) : NULL;
+ text *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL;
HLPRSTEXT prs;
- text *out;
+ text *out;
WParserInfo *prsobj = findprs(cfg->prs_id);
- memset(&prs,0,sizeof(HLPRSTEXT));
+ memset(&prs, 0, sizeof(HLPRSTEXT));
prs.lenwords = 32;
prs.words = (HLWORD *) palloc(sizeof(HLWORD) * prs.lenwords);
hlparsetext(cfg, &prs, query, VARDATA(in), VARSIZE(in) - VARHDRSZ);
FunctionCall3(
- &(prsobj->headline_info),
- PointerGetDatum(&prs),
- PointerGetDatum(opt),
- PointerGetDatum(query)
- );
+ &(prsobj->headline_info),
+ PointerGetDatum(&prs),
+ PointerGetDatum(opt),
+ PointerGetDatum(query)
+ );
out = genhl(&prs);
- PG_FREE_IF_COPY(in,1);
- PG_FREE_IF_COPY(query,2);
- if ( opt ) PG_FREE_IF_COPY(opt,3);
+ PG_FREE_IF_COPY(in, 1);
+ PG_FREE_IF_COPY(query, 2);
+ if (opt)
+ PG_FREE_IF_COPY(opt, 3);
pfree(prs.words);
pfree(prs.startsel);
pfree(prs.stopsel);
@@ -495,35 +548,34 @@ headline(PG_FUNCTION_ARGS) {
PG_FUNCTION_INFO_V1(headline_byname);
-Datum headline_byname(PG_FUNCTION_ARGS);
+Datum headline_byname(PG_FUNCTION_ARGS);
Datum
-headline_byname(PG_FUNCTION_ARGS) {
- text *cfg=PG_GETARG_TEXT_P(0);
-
- Datum out=DirectFunctionCall4(
- headline,
- ObjectIdGetDatum(name2id_cfg( cfg ) ),
- PG_GETARG_DATUM(1),
- PG_GETARG_DATUM(2),
- ( PG_NARGS()>3 ) ? PG_GETARG_DATUM(3) : PointerGetDatum(NULL)
+headline_byname(PG_FUNCTION_ARGS)
+{
+ text *cfg = PG_GETARG_TEXT_P(0);
+
+ Datum out = DirectFunctionCall4(
+ headline,
+ ObjectIdGetDatum(name2id_cfg(cfg)),
+ PG_GETARG_DATUM(1),
+ PG_GETARG_DATUM(2),
+ (PG_NARGS() > 3) ? PG_GETARG_DATUM(3) : PointerGetDatum(NULL)
);
- PG_FREE_IF_COPY(cfg,0);
- PG_RETURN_DATUM(out);
+ PG_FREE_IF_COPY(cfg, 0);
+ PG_RETURN_DATUM(out);
}
PG_FUNCTION_INFO_V1(headline_current);
-Datum headline_current(PG_FUNCTION_ARGS);
+Datum headline_current(PG_FUNCTION_ARGS);
Datum
-headline_current(PG_FUNCTION_ARGS) {
+headline_current(PG_FUNCTION_ARGS)
+{
PG_RETURN_DATUM(DirectFunctionCall4(
- headline,
- ObjectIdGetDatum(get_currcfg()),
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1),
- ( PG_NARGS()>2 ) ? PG_GETARG_DATUM(2) : PointerGetDatum(NULL)
- ));
+ headline,
+ ObjectIdGetDatum(get_currcfg()),
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1),
+ (PG_NARGS() > 2) ? PG_GETARG_DATUM(2) : PointerGetDatum(NULL)
+ ));
}
-
-
-
diff --git a/contrib/tsearch2/wparser.h b/contrib/tsearch2/wparser.h
index a8afc564a1a..c3c44151f0e 100644
--- a/contrib/tsearch2/wparser.h
+++ b/contrib/tsearch2/wparser.h
@@ -3,26 +3,28 @@
#include "postgres.h"
#include "fmgr.h"
-typedef struct {
- Oid prs_id;
- FmgrInfo start_info;
- FmgrInfo getlexeme_info;
- FmgrInfo end_info;
- FmgrInfo headline_info;
- Oid lextype;
- void *prs;
-} WParserInfo;
+typedef struct
+{
+ Oid prs_id;
+ FmgrInfo start_info;
+ FmgrInfo getlexeme_info;
+ FmgrInfo end_info;
+ FmgrInfo headline_info;
+ Oid lextype;
+ void *prs;
+} WParserInfo;
-void init_prs(Oid id, WParserInfo *prs);
-WParserInfo* findprs(Oid id);
-Oid name2id_prs(text *name);
-void reset_prs(void);
+void init_prs(Oid id, WParserInfo * prs);
+WParserInfo *findprs(Oid id);
+Oid name2id_prs(text *name);
+void reset_prs(void);
-typedef struct {
- int lexid;
- char *alias;
- char *descr;
-} LexDescr;
+typedef struct
+{
+ int lexid;
+ char *alias;
+ char *descr;
+} LexDescr;
#endif
diff --git a/contrib/tsearch2/wparser_def.c b/contrib/tsearch2/wparser_def.c
index c3b03067600..4680d746b3e 100644
--- a/contrib/tsearch2/wparser_def.c
+++ b/contrib/tsearch2/wparser_def.c
@@ -1,5 +1,5 @@
-/*
- * default word parser
+/*
+ * default word parser
* Teodor Sigaev <teodor@sigaev.ru>
*/
#include <errno.h>
@@ -17,40 +17,44 @@
#include "wordparser/deflex.h"
PG_FUNCTION_INFO_V1(prsd_lextype);
-Datum prsd_lextype(PG_FUNCTION_ARGS);
+Datum prsd_lextype(PG_FUNCTION_ARGS);
+
+Datum
+prsd_lextype(PG_FUNCTION_ARGS)
+{
+ LexDescr *descr = (LexDescr *) palloc(sizeof(LexDescr) * (LASTNUM + 1));
+ int i;
+
+ for (i = 1; i <= LASTNUM; i++)
+ {
+ descr[i - 1].lexid = i;
+ descr[i - 1].alias = pstrdup(tok_alias[i]);
+ descr[i - 1].descr = pstrdup(lex_descr[i]);
+ }
-Datum
-prsd_lextype(PG_FUNCTION_ARGS) {
- LexDescr *descr=(LexDescr*)palloc(sizeof(LexDescr)*(LASTNUM+1));
- int i;
+ descr[LASTNUM].lexid = 0;
- for(i=1;i<=LASTNUM;i++) {
- descr[i-1].lexid = i;
- descr[i-1].alias = pstrdup(tok_alias[i]);
- descr[i-1].descr = pstrdup(lex_descr[i]);
- }
-
- descr[LASTNUM].lexid=0;
-
PG_RETURN_POINTER(descr);
}
PG_FUNCTION_INFO_V1(prsd_start);
-Datum prsd_start(PG_FUNCTION_ARGS);
-Datum
-prsd_start(PG_FUNCTION_ARGS) {
- start_parse_str( (char*)PG_GETARG_POINTER(0), PG_GETARG_INT32(1) );
+Datum prsd_start(PG_FUNCTION_ARGS);
+Datum
+prsd_start(PG_FUNCTION_ARGS)
+{
+ start_parse_str((char *) PG_GETARG_POINTER(0), PG_GETARG_INT32(1));
PG_RETURN_POINTER(NULL);
}
PG_FUNCTION_INFO_V1(prsd_getlexeme);
-Datum prsd_getlexeme(PG_FUNCTION_ARGS);
-Datum
-prsd_getlexeme(PG_FUNCTION_ARGS) {
+Datum prsd_getlexeme(PG_FUNCTION_ARGS);
+Datum
+prsd_getlexeme(PG_FUNCTION_ARGS)
+{
/* ParserState *p=(ParserState*)PG_GETARG_POINTER(0); */
- char **t=(char**)PG_GETARG_POINTER(1);
- int *tlen=(int*)PG_GETARG_POINTER(2);
- int type=tsearch2_yylex();
+ char **t = (char **) PG_GETARG_POINTER(1);
+ int *tlen = (int *) PG_GETARG_POINTER(2);
+ int type = tsearch2_yylex();
*t = token;
*tlen = tokenlen;
@@ -58,34 +62,39 @@ prsd_getlexeme(PG_FUNCTION_ARGS) {
}
PG_FUNCTION_INFO_V1(prsd_end);
-Datum prsd_end(PG_FUNCTION_ARGS);
-Datum
-prsd_end(PG_FUNCTION_ARGS) {
+Datum prsd_end(PG_FUNCTION_ARGS);
+Datum
+prsd_end(PG_FUNCTION_ARGS)
+{
/* ParserState *p=(ParserState*)PG_GETARG_POINTER(0); */
end_parse();
PG_RETURN_VOID();
}
#define LEAVETOKEN(x) ( (x)==12 )
-#define COMPLEXTOKEN(x) ( (x)==5 || (x)==15 || (x)==16 || (x)==17 )
-#define ENDPUNCTOKEN(x) ( (x)==12 )
+#define COMPLEXTOKEN(x) ( (x)==5 || (x)==15 || (x)==16 || (x)==17 )
+#define ENDPUNCTOKEN(x) ( (x)==12 )
#define IDIGNORE(x) ( (x)==13 || (x)==14 || (x)==12 || (x)==23 )
#define HLIDIGNORE(x) ( (x)==5 || (x)==13 || (x)==15 || (x)==16 || (x)==17 )
-#define NONWORDTOKEN(x) ( (x)==12 || HLIDIGNORE(x) )
+#define NONWORDTOKEN(x) ( (x)==12 || HLIDIGNORE(x) )
#define NOENDTOKEN(x) ( NONWORDTOKEN(x) || (x)==7 || (x)==8 || (x)==20 || (x)==21 || (x)==22 || IDIGNORE(x) )
-typedef struct {
- HLWORD *words;
- int len;
-} hlCheck;
+typedef struct
+{
+ HLWORD *words;
+ int len;
+} hlCheck;
static bool
-checkcondition_HL(void *checkval, ITEM *val) {
- int i;
- for(i=0;i<((hlCheck*)checkval)->len;i++) {
- if ( ((hlCheck*)checkval)->words[i].item==val )
+checkcondition_HL(void *checkval, ITEM * val)
+{
+ int i;
+
+ for (i = 0; i < ((hlCheck *) checkval)->len; i++)
+ {
+ if (((hlCheck *) checkval)->words[i].item == val)
return true;
}
return false;
@@ -93,21 +102,28 @@ checkcondition_HL(void *checkval, ITEM *val) {
static bool
-hlCover(HLPRSTEXT *prs, QUERYTYPE *query, int *p, int *q) {
- int i,j;
- ITEM *item=GETQUERY(query);
- int pos=*p;
- *q=0;
- *p=0x7fffffff;
-
- for(j=0;j<query->size;j++) {
- if ( item->type != VAL ) {
+hlCover(HLPRSTEXT * prs, QUERYTYPE * query, int *p, int *q)
+{
+ int i,
+ j;
+ ITEM *item = GETQUERY(query);
+ int pos = *p;
+
+ *q = 0;
+ *p = 0x7fffffff;
+
+ for (j = 0; j < query->size; j++)
+ {
+ if (item->type != VAL)
+ {
item++;
continue;
}
- for(i=pos;i<prs->curwords;i++) {
- if ( prs->words[i].item == item ) {
- if ( i>*q)
+ for (i = pos; i < prs->curwords; i++)
+ {
+ if (prs->words[i].item == item)
+ {
+ if (i > *q)
*q = i;
break;
}
@@ -115,32 +131,39 @@ hlCover(HLPRSTEXT *prs, QUERYTYPE *query, int *p, int *q) {
item++;
}
- if ( *q==0 )
+ if (*q == 0)
return false;
- item=GETQUERY(query);
- for(j=0;j<query->size;j++) {
- if ( item->type != VAL ) {
+ item = GETQUERY(query);
+ for (j = 0; j < query->size; j++)
+ {
+ if (item->type != VAL)
+ {
item++;
continue;
}
- for(i=*q;i>=pos;i--) {
- if ( prs->words[i].item == item ) {
- if ( i<*p )
- *p=i;
+ for (i = *q; i >= pos; i--)
+ {
+ if (prs->words[i].item == item)
+ {
+ if (i < *p)
+ *p = i;
break;
}
}
item++;
- }
+ }
+
+ if (*p <= *q)
+ {
+ hlCheck ch = {&(prs->words[*p]), *q - *p + 1};
- if ( *p<=*q ) {
- hlCheck ch={ &(prs->words[*p]), *q-*p+1 };
- if ( TS_execute(GETQUERY(query), &ch, false, checkcondition_HL) ) {
+ if (TS_execute(GETQUERY(query), &ch, false, checkcondition_HL))
return true;
- } else {
+ else
+ {
(*p)++;
- return hlCover(prs,query,p,q);
+ return hlCover(prs, query, p, q);
}
}
@@ -148,45 +171,54 @@ hlCover(HLPRSTEXT *prs, QUERYTYPE *query, int *p, int *q) {
}
PG_FUNCTION_INFO_V1(prsd_headline);
-Datum prsd_headline(PG_FUNCTION_ARGS);
-Datum
-prsd_headline(PG_FUNCTION_ARGS) {
- HLPRSTEXT *prs=(HLPRSTEXT*)PG_GETARG_POINTER(0);
- text *opt=(text*)PG_GETARG_POINTER(1); /* can't be toasted */
- QUERYTYPE *query=(QUERYTYPE*)PG_GETARG_POINTER(2); /* can't be toasted */
+Datum prsd_headline(PG_FUNCTION_ARGS);
+Datum
+prsd_headline(PG_FUNCTION_ARGS)
+{
+ HLPRSTEXT *prs = (HLPRSTEXT *) PG_GETARG_POINTER(0);
+ text *opt = (text *) PG_GETARG_POINTER(1); /* can't be toasted */
+ QUERYTYPE *query = (QUERYTYPE *) PG_GETARG_POINTER(2); /* can't be toasted */
+
/* from opt + start and and tag */
- int min_words=15;
- int max_words=35;
- int shortword=3;
-
- int p=0,q=0;
- int bestb=-1,beste=-1;
- int bestlen=-1;
- int pose=0, poslen, curlen;
-
- int i;
-
- /*config*/
- prs->startsel=NULL;
- prs->stopsel=NULL;
- if ( opt ) {
- Map *map,*mptr;
-
- parse_cfgdict(opt,&map);
- mptr=map;
-
- while(mptr && mptr->key) {
- if ( strcasecmp(mptr->key,"MaxWords")==0 )
- max_words=pg_atoi(mptr->value,4,1);
- else if ( strcasecmp(mptr->key,"MinWords")==0 )
- min_words=pg_atoi(mptr->value,4,1);
- else if ( strcasecmp(mptr->key,"ShortWord")==0 )
- shortword=pg_atoi(mptr->value,4,1);
- else if ( strcasecmp(mptr->key,"StartSel")==0 )
- prs->startsel=pstrdup(mptr->value);
- else if ( strcasecmp(mptr->key,"StopSel")==0 )
- prs->stopsel=pstrdup(mptr->value);
-
+ int min_words = 15;
+ int max_words = 35;
+ int shortword = 3;
+
+ int p = 0,
+ q = 0;
+ int bestb = -1,
+ beste = -1;
+ int bestlen = -1;
+ int pose = 0,
+ poslen,
+ curlen;
+
+ int i;
+
+ /* config */
+ prs->startsel = NULL;
+ prs->stopsel = NULL;
+ if (opt)
+ {
+ Map *map,
+ *mptr;
+
+ parse_cfgdict(opt, &map);
+ mptr = map;
+
+ while (mptr && mptr->key)
+ {
+ if (strcasecmp(mptr->key, "MaxWords") == 0)
+ max_words = pg_atoi(mptr->value, 4, 1);
+ else if (strcasecmp(mptr->key, "MinWords") == 0)
+ min_words = pg_atoi(mptr->value, 4, 1);
+ else if (strcasecmp(mptr->key, "ShortWord") == 0)
+ shortword = pg_atoi(mptr->value, 4, 1);
+ else if (strcasecmp(mptr->key, "StartSel") == 0)
+ prs->startsel = pstrdup(mptr->value);
+ else if (strcasecmp(mptr->key, "StopSel") == 0)
+ prs->stopsel = pstrdup(mptr->value);
+
pfree(mptr->key);
pfree(mptr->value);
@@ -194,104 +226,118 @@ prsd_headline(PG_FUNCTION_ARGS) {
}
pfree(map);
- if ( min_words >= max_words )
+ if (min_words >= max_words)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("must be MinWords < MaxWords")));
- if ( min_words<=0 )
+ if (min_words <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("must be MinWords > 0")));
- if ( shortword<0 )
+ if (shortword < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("must be ShortWord >= 0")));
}
- while( hlCover(prs,query,&p,&q) ) {
+ while (hlCover(prs, query, &p, &q))
+ {
/* find cover len in words */
- curlen=0;
- poslen=0;
- for(i=p;i<=q && curlen < max_words ; i++) {
- if ( !NONWORDTOKEN(prs->words[i].type) )
+ curlen = 0;
+ poslen = 0;
+ for (i = p; i <= q && curlen < max_words; i++)
+ {
+ if (!NONWORDTOKEN(prs->words[i].type))
curlen++;
- if ( prs->words[i].item && !prs->words[i].repeated )
- poslen++;
- pose=i;
+ if (prs->words[i].item && !prs->words[i].repeated)
+ poslen++;
+ pose = i;
}
- if ( poslen<bestlen && !(NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword) ) {
+ if (poslen < bestlen && !(NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword))
+ {
/* best already finded, so try one more cover */
p++;
continue;
}
- if ( curlen < max_words ) { /* find good end */
- for(i=i-1 ;i<prs->curwords && curlen<max_words; i++) {
- if ( i!=q ) {
- if ( !NONWORDTOKEN(prs->words[i].type) )
+ if (curlen < max_words)
+ { /* find good end */
+ for (i = i - 1; i < prs->curwords && curlen < max_words; i++)
+ {
+ if (i != q)
+ {
+ if (!NONWORDTOKEN(prs->words[i].type))
curlen++;
- if ( prs->words[i].item && !prs->words[i].repeated )
+ if (prs->words[i].item && !prs->words[i].repeated)
poslen++;
}
- pose=i;
- if ( NOENDTOKEN(prs->words[i].type) || prs->words[i].len <= shortword )
+ pose = i;
+ if (NOENDTOKEN(prs->words[i].type) || prs->words[i].len <= shortword)
continue;
- if ( curlen>=min_words )
+ if (curlen >= min_words)
break;
}
- } else { /* shorter cover :((( */
- for(;curlen>min_words;i--) {
- if ( !NONWORDTOKEN(prs->words[i].type) )
+ }
+ else
+ { /* shorter cover :((( */
+ for (; curlen > min_words; i--)
+ {
+ if (!NONWORDTOKEN(prs->words[i].type))
curlen--;
- if ( prs->words[i].item && !prs->words[i].repeated )
+ if (prs->words[i].item && !prs->words[i].repeated)
poslen--;
- pose=i;
- if ( NOENDTOKEN(prs->words[i].type) || prs->words[i].len <= shortword )
+ pose = i;
+ if (NOENDTOKEN(prs->words[i].type) || prs->words[i].len <= shortword)
continue;
break;
}
}
- if ( bestlen <0 || (poslen>bestlen && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword)) ||
- ( bestlen>=0 && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword) &&
- (NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword) ) ) {
- bestb=p; beste=pose;
- bestlen=poslen;
- }
+ if (bestlen < 0 || (poslen > bestlen && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword)) ||
+ (bestlen >= 0 && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword) &&
+ (NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword)))
+ {
+ bestb = p;
+ beste = pose;
+ bestlen = poslen;
+ }
p++;
}
- if ( bestlen<0 ) {
- curlen=0;
- poslen=0;
- for(i=0;i<prs->curwords && curlen<min_words ; i++) {
- if ( !NONWORDTOKEN(prs->words[i].type) )
+ if (bestlen < 0)
+ {
+ curlen = 0;
+ poslen = 0;
+ for (i = 0; i < prs->curwords && curlen < min_words; i++)
+ {
+ if (!NONWORDTOKEN(prs->words[i].type))
curlen++;
- pose=i;
+ pose = i;
}
- bestb=0; beste=pose;
+ bestb = 0;
+ beste = pose;
}
- for(i=bestb;i<=beste;i++) {
- if ( prs->words[i].item )
- prs->words[i].selected=1;
- if ( prs->words[i].repeated )
- prs->words[i].skip=1;
- if ( HLIDIGNORE(prs->words[i].type) )
- prs->words[i].replace=1;
+ for (i = bestb; i <= beste; i++)
+ {
+ if (prs->words[i].item)
+ prs->words[i].selected = 1;
+ if (prs->words[i].repeated)
+ prs->words[i].skip = 1;
+ if (HLIDIGNORE(prs->words[i].type))
+ prs->words[i].replace = 1;
- prs->words[i].in=1;
+ prs->words[i].in = 1;
}
if (!prs->startsel)
- prs->startsel=pstrdup("<b>");
+ prs->startsel = pstrdup("<b>");
if (!prs->stopsel)
- prs->stopsel=pstrdup("</b>");
- prs->startsellen=strlen(prs->startsel);
- prs->stopsellen=strlen(prs->stopsel);
+ prs->stopsel = pstrdup("</b>");
+ prs->startsellen = strlen(prs->startsel);
+ prs->stopsellen = strlen(prs->stopsel);
PG_RETURN_POINTER(prs);
}
-
diff --git a/contrib/xml/pgxml_dom.c b/contrib/xml/pgxml_dom.c
index f79183824eb..2b11b1d6468 100644
--- a/contrib/xml/pgxml_dom.c
+++ b/contrib/xml/pgxml_dom.c
@@ -87,10 +87,10 @@ pgxml_parse(PG_FUNCTION_ARGS)
doctree = xmlParseMemory((char *) VARDATA(t), docsize);
if (doctree == NULL)
{
- xmlCleanupParser();
+ xmlCleanupParser();
PG_RETURN_BOOL(false); /* i.e. not well-formed */
}
- xmlCleanupParser();
+ xmlCleanupParser();
xmlFreeDoc(doctree);
PG_RETURN_BOOL(true);
}
@@ -202,8 +202,8 @@ pgxml_xpath(PG_FUNCTION_ARGS)
doctree = xmlParseMemory((char *) VARDATA(t), docsize);
if (doctree == NULL)
- { /* not well-formed */
- xmlCleanupParser();
+ { /* not well-formed */
+ xmlCleanupParser();
PG_RETURN_NULL();
}
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index ae1df582b0e..4f2fd0efd1e 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.84 2003/07/21 20:29:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.85 2003/08/04 00:43:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -617,7 +617,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
td->t_natts = numberOfAttributes;
td->t_hoff = hoff;
- if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
+ if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
td->t_infomask = HEAP_HASOID;
DataFill((char *) td + hoff,
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index abf25915ab5..d0ee3798086 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.65 2003/07/21 20:29:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.66 2003/08/04 00:43:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -162,9 +162,9 @@ index_formtuple(TupleDesc tupleDescriptor,
if ((size & INDEX_SIZE_MASK) != size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index tuple requires %lu bytes, maximum size is %lu",
- (unsigned long) size,
- (unsigned long) INDEX_SIZE_MASK)));
+ errmsg("index tuple requires %lu bytes, maximum size is %lu",
+ (unsigned long) size,
+ (unsigned long) INDEX_SIZE_MASK)));
infomask |= size;
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 61ecdcd7e50..ecee11718d0 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.75 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.76 2003/08/04 00:43:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,13 +24,13 @@
static void printtup_startup(DestReceiver *self, int operation,
- TupleDesc typeinfo);
+ TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_20(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_shutdown(DestReceiver *self);
static void printtup_destroy(DestReceiver *self);
@@ -81,8 +81,8 @@ printtup_create_DR(CommandDest dest, Portal portal)
else
{
/*
- * In protocol 2.0 the Bind message does not exist, so there is
- * no way for the columns to have different print formats; it's
+ * In protocol 2.0 the Bind message does not exist, so there is no
+ * way for the columns to have different print formats; it's
* sufficient to look at the first one.
*/
if (portal->formats && portal->formats[0] != 0)
@@ -111,12 +111,13 @@ static void
printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
{
DR_printtup *myState = (DR_printtup *) self;
- Portal portal = myState->portal;
+ Portal portal = myState->portal;
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
{
/*
- * Send portal name to frontend (obsolete cruft, gone in proto 3.0)
+ * Send portal name to frontend (obsolete cruft, gone in proto
+ * 3.0)
*
* If portal name not specified, use "blank" portal.
*/
@@ -129,8 +130,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
}
/*
- * If this is a retrieve, and we are supposed to emit row descriptions,
- * then we send back the tuple descriptor of the tuples.
+ * If this is a retrieve, and we are supposed to emit row
+ * descriptions, then we send back the tuple descriptor of the tuples.
*/
if (operation == CMD_SELECT && myState->sendDescrip)
{
@@ -163,7 +164,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
@@ -176,14 +177,14 @@ SendRowDescriptionMessage(TupleDesc typeinfo, List *targetlist, int16 *formats)
int i;
StringInfoData buf;
- pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
- pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
+ pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
+ pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
for (i = 0; i < natts; ++i)
{
- Oid atttypid = attrs[i]->atttypid;
- int32 atttypmod = attrs[i]->atttypmod;
- Oid basetype;
+ Oid atttypid = attrs[i]->atttypid;
+ int32 atttypmod = attrs[i]->atttypmod;
+ Oid basetype;
pq_sendstring(&buf, NameStr(attrs[i]->attname));
/* column ID info appears in protocol 3.0 and up */
@@ -320,8 +321,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
}
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -347,7 +348,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typelem)));
+ ObjectIdGetDatum(thisState->typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
@@ -424,8 +425,8 @@ printtup_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
Assert(thisState->format == 0);
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -536,9 +537,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena);
+
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -547,7 +549,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
value = DatumGetCString(OidFunctionCall3(typoutput,
attr,
- ObjectIdGetDatum(typelem),
+ ObjectIdGetDatum(typelem),
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
@@ -627,8 +629,8 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
Assert(thisState->format == 1);
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -637,7 +639,7 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typelem)));
+ ObjectIdGetDatum(thisState->typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index d3208e248e1..505fd762565 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.46 2003/08/04 00:43:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,11 +104,12 @@ gistrescan(PG_FUNCTION_ARGS)
memmove(s->keyData,
key,
s->numberOfKeys * sizeof(ScanKeyData));
+
/*
* Play games here with the scan key to use the Consistent
- * function for all comparisons: 1) the sk_procedure field
- * will now be used to hold the strategy number 2) the
- * sk_func field will point to the Consistent function
+ * function for all comparisons: 1) the sk_procedure field will
+ * now be used to hold the strategy number 2) the sk_func field
+ * will point to the Consistent function
*/
for (i = 0; i < s->numberOfKeys; i++)
{
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index a82b8b32d55..4dd9d9df3ee 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.36 2003/06/22 22:04:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.37 2003/08/04 00:43:12 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -60,9 +60,9 @@ hashfloat4(PG_FUNCTION_ARGS)
float4 key = PG_GETARG_FLOAT4(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit patterns
- * but should compare as equal. We must ensure that they have the same
- * hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit
+ * patterns but should compare as equal. We must ensure that they
+ * have the same hash value, which is most easily done this way:
*/
if (key == (float4) 0)
PG_RETURN_UINT32(0);
@@ -76,9 +76,9 @@ hashfloat8(PG_FUNCTION_ARGS)
float8 key = PG_GETARG_FLOAT8(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit patterns
- * but should compare as equal. We must ensure that they have the same
- * hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit
+ * patterns but should compare as equal. We must ensure that they
+ * have the same hash value, which is most easily done this way:
*/
if (key == (float8) 0)
PG_RETURN_UINT32(0);
@@ -121,9 +121,9 @@ hashtext(PG_FUNCTION_ARGS)
Datum result;
/*
- * Note: this is currently identical in behavior to hashvarlena,
- * but it seems likely that we may need to do something different
- * in non-C locales. (See also hashbpchar, if so.)
+ * Note: this is currently identical in behavior to hashvarlena, but
+ * it seems likely that we may need to do something different in non-C
+ * locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA(key),
VARSIZE(key) - VARHDRSZ);
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index ed9459feb90..fd7fc158220 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.35 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.36 2003/08/04 00:43:12 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -205,8 +205,8 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
if (++splitnum >= NCACHED)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("out of overflow pages in hash index \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("out of overflow pages in hash index \"%s\"",
+ RelationGetRelationName(rel))));
metap->hashm_ovflpoint = splitnum;
metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
metap->hashm_spares[splitnum - 1]--;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index a0d191f8a9d..8b4b5590ca9 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.152 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.153 2003/08/04 00:43:14 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1132,6 +1132,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
xlhdr.t_natts = tup->t_data->t_natts;
xlhdr.t_infomask = tup->t_data->t_infomask;
xlhdr.t_hoff = tup->t_data->t_hoff;
+
/*
* note we mark rdata[1] as belonging to buffer; if XLogInsert
* decides to write the whole page to the xlog, we don't need to
@@ -1149,9 +1150,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
rdata[2].next = NULL;
/*
- * If this is the single and first tuple on page, we can reinit the
- * page instead of restoring the whole thing. Set flag, and hide
- * buffer references from XLogInsert.
+ * If this is the single and first tuple on page, we can reinit
+ * the page instead of restoring the whole thing. Set flag, and
+ * hide buffer references from XLogInsert.
*/
if (ItemPointerGetOffsetNumber(&(tup->t_self)) == FirstOffsetNumber &&
PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
@@ -1912,7 +1913,7 @@ log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *unused, int uncnt)
/*
* The unused-offsets array is not actually in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets
+ * that it is. When XLogInsert stores the whole buffer, the offsets
* array need not be stored too.
*/
rdata[1].buffer = buffer;
@@ -1991,9 +1992,10 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
2 * sizeof(TransactionId));
hsize += 2 * sizeof(TransactionId);
}
+
/*
- * As with insert records, we need not store the rdata[2] segment
- * if we decide to store the whole buffer instead.
+ * As with insert records, we need not store the rdata[2] segment if
+ * we decide to store the whole buffer instead.
*/
rdata[2].buffer = newbuf;
rdata[2].data = (char *) &xlhdr;
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 731c34b3ab6..ee93e8a7222 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.67 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.68 2003/08/04 00:43:15 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -300,7 +300,7 @@ index_beginscan(Relation heapRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
- * cannot change). To restart the scan without changing keys, pass NULL
+ * cannot change). To restart the scan without changing keys, pass NULL
* for the key array.
*
* Note that this is also called when first starting an indexscan;
@@ -394,8 +394,8 @@ index_restrpos(IndexScanDesc scan)
/*
* We do not reset got_tuple; so if the scan is actually being
- * short-circuited by index_getnext, the effective position restoration
- * is done by restoring unique_tuple_pos.
+ * short-circuited by index_getnext, the effective position
+ * restoration is done by restoring unique_tuple_pos.
*/
scan->unique_tuple_pos = scan->unique_tuple_mark;
@@ -427,24 +427,24 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
/*
- * If we already got a tuple and it must be unique, there's no need
- * to make the index AM look through any additional tuples. (This can
+ * If we already got a tuple and it must be unique, there's no need to
+ * make the index AM look through any additional tuples. (This can
* save a useful amount of work in scenarios where there are many dead
* tuples due to heavy update activity.)
*
* To do this we must keep track of the logical scan position
* (before/on/after tuple). Also, we have to be sure to release scan
- * resources before returning NULL; if we fail to do so then a multi-index
- * scan can easily run the system out of free buffers. We can release
- * index-level resources fairly cheaply by calling index_rescan. This
- * means there are two persistent states as far as the index AM is
- * concerned: on-tuple and rescanned. If we are actually asked to
- * re-fetch the single tuple, we have to go through a fresh indexscan
- * startup, which penalizes that (infrequent) case.
+ * resources before returning NULL; if we fail to do so then a
+ * multi-index scan can easily run the system out of free buffers. We
+ * can release index-level resources fairly cheaply by calling
+ * index_rescan. This means there are two persistent states as far as
+ * the index AM is concerned: on-tuple and rescanned. If we are
+ * actually asked to re-fetch the single tuple, we have to go through
+ * a fresh indexscan startup, which penalizes that (infrequent) case.
*/
if (scan->keys_are_unique && scan->got_tuple)
{
- int new_tuple_pos = scan->unique_tuple_pos;
+ int new_tuple_pos = scan->unique_tuple_pos;
if (ScanDirectionIsForward(direction))
{
@@ -459,22 +459,23 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
if (new_tuple_pos == 0)
{
/*
- * We are moving onto the unique tuple from having been off it.
- * We just fall through and let the index AM do the work. Note
- * we should get the right answer regardless of scan direction.
+ * We are moving onto the unique tuple from having been off
+ * it. We just fall through and let the index AM do the work.
+ * Note we should get the right answer regardless of scan
+ * direction.
*/
- scan->unique_tuple_pos = 0; /* need to update position */
+ scan->unique_tuple_pos = 0; /* need to update position */
}
else
{
/*
- * Moving off the tuple; must do amrescan to release index-level
- * pins before we return NULL. Since index_rescan will reset
- * my state, must save and restore...
+ * Moving off the tuple; must do amrescan to release
+ * index-level pins before we return NULL. Since index_rescan
+ * will reset my state, must save and restore...
*/
- int unique_tuple_mark = scan->unique_tuple_mark;
+ int unique_tuple_mark = scan->unique_tuple_mark;
- index_rescan(scan, NULL /* no change to key */);
+ index_rescan(scan, NULL /* no change to key */ );
scan->keys_are_unique = true;
scan->got_tuple = true;
@@ -631,7 +632,7 @@ index_bulk_delete(Relation indexRelation,
*/
IndexBulkDeleteResult *
index_vacuum_cleanup(Relation indexRelation,
- IndexVacuumCleanupInfo *info,
+ IndexVacuumCleanupInfo * info,
IndexBulkDeleteResult *stats)
{
RegProcedure procedure;
@@ -649,7 +650,7 @@ index_vacuum_cleanup(Relation indexRelation,
DatumGetPointer(OidFunctionCall3(procedure,
PointerGetDatum(indexRelation),
PointerGetDatum((Pointer) info),
- PointerGetDatum((Pointer) stats)));
+ PointerGetDatum((Pointer) stats)));
return result;
}
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index dd8eda99b93..962d7a1822e 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.102 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.103 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -432,9 +432,9 @@ _bt_insertonpg(Relation rel,
*
* must write-lock that page before releasing write lock on
* current page; else someone else's _bt_check_unique scan
- * could fail to see our insertion. write locks on intermediate
- * dead pages won't do because we don't know when they will get
- * de-linked from the tree.
+ * could fail to see our insertion. write locks on
+ * intermediate dead pages won't do because we don't know when
+ * they will get de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
@@ -523,9 +523,10 @@ _bt_insertonpg(Relation rel,
/*
* If we are doing this insert because we split a page that was
* the only one on its tree level, but was not the root, it may
- * have been the "fast root". We need to ensure that the fast root
- * link points at or above the current page. We can safely acquire
- * a lock on the metapage here --- see comments for _bt_newroot().
+ * have been the "fast root". We need to ensure that the fast
+ * root link points at or above the current page. We can safely
+ * acquire a lock on the metapage here --- see comments for
+ * _bt_newroot().
*/
if (split_only_page)
{
@@ -1135,7 +1136,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1155,19 +1156,19 @@ _bt_insert_parent(Relation rel,
bool is_only)
{
/*
- * Here we have to do something Lehman and Yao don't talk about:
- * deal with a root split and construction of a new root. If our
- * stack is empty then we have just split a node on what had been
- * the root level when we descended the tree. If it was still the
- * root then we perform a new-root construction. If it *wasn't*
- * the root anymore, search to find the next higher level that
- * someone constructed meanwhile, and find the right place to insert
- * as for the normal case.
+ * Here we have to do something Lehman and Yao don't talk about: deal
+ * with a root split and construction of a new root. If our stack is
+ * empty then we have just split a node on what had been the root
+ * level when we descended the tree. If it was still the root then we
+ * perform a new-root construction. If it *wasn't* the root anymore,
+ * search to find the next higher level that someone constructed
+ * meanwhile, and find the right place to insert as for the normal
+ * case.
*
- * If we have to search for the parent level, we do so by
- * re-descending from the root. This is not super-efficient,
- * but it's rare enough not to matter. (This path is also taken
- * when called from WAL recovery --- we have no stack in that case.)
+ * If we have to search for the parent level, we do so by re-descending
+ * from the root. This is not super-efficient, but it's rare enough
+ * not to matter. (This path is also taken when called from WAL
+ * recovery --- we have no stack in that case.)
*/
if (is_root)
{
@@ -1222,9 +1223,9 @@ _bt_insert_parent(Relation rel,
/*
* Find the parent buffer and get the parent page.
*
- * Oops - if we were moved right then we need to change stack
- * item! We want to find parent pointing to where we are,
- * right ? - vadim 05/27/97
+ * Oops - if we were moved right then we need to change stack item!
+ * We want to find parent pointing to where we are, right ? -
+ * vadim 05/27/97
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@@ -1296,16 +1297,16 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* start = InvalidOffsetNumber means "search the whole page".
- * We need this test anyway due to possibility that
- * page has a high key now when it didn't before.
+ * We need this test anyway due to possibility that page has a
+ * high key now when it didn't before.
*/
if (start < minoff)
start = minoff;
/*
* These loops will check every item on the page --- but in an
- * order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * order that's attuned to the probability of where it
+ * actually is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 33f85cd59a6..ace06f0a250 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.66 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.67 2003/08/04 00:43:15 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -181,8 +181,8 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
- * page and start all over again. (Is that really true?
- * But it's hardly worth trying to optimize this case.)
+ * page and start all over again. (Is that really true? But
+ * it's hardly worth trying to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
return _bt_getroot(rel, access);
@@ -190,8 +190,8 @@ _bt_getroot(Relation rel, int access)
/*
* Get, initialize, write, and leave a lock of the appropriate
- * type on the new root page. Since this is the first page in
- * the tree, it's a leaf as well as the root.
+ * type on the new root page. Since this is the first page in the
+ * tree, it's a leaf as well as the root.
*/
rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
rootblkno = BufferGetBlockNumber(rootbuf);
@@ -240,7 +240,7 @@ _bt_getroot(Relation rel, int access)
_bt_wrtnorelbuf(rel, rootbuf);
/*
- * swap root write lock for read lock. There is no danger of
+ * swap root write lock for read lock. There is no danger of
* anyone else accessing the new root page while it's unlocked,
* since no one else knows where it is yet.
*/
@@ -284,8 +284,8 @@ _bt_getroot(Relation rel, int access)
}
/*
- * By here, we have a pin and read lock on the root page, and no
- * lock set on the metadata page. Return the root page's buffer.
+ * By here, we have a pin and read lock on the root page, and no lock
+ * set on the metadata page. Return the root page's buffer.
*/
return rootbuf;
}
@@ -299,7 +299,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -406,9 +406,9 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check
- * that the page is still free. (For example, an already-free page
- * could have been re-used between the time the last VACUUM scanned
- * it and the time the VACUUM made its FSM updates.)
+ * that the page is still free. (For example, an already-free
+ * page could have been re-used between the time the last VACUUM
+ * scanned it and the time the VACUUM made its FSM updates.)
*/
for (;;)
{
@@ -431,10 +431,10 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
/*
* Extend the relation by one page.
*
- * We have to use a lock to ensure no one else is extending the rel at
- * the same time, else we will both try to initialize the same new
- * page. We can skip locking for new or temp relations, however,
- * since no one else could be accessing them.
+ * We have to use a lock to ensure no one else is extending the rel
+ * at the same time, else we will both try to initialize the same
+ * new page. We can skip locking for new or temp relations,
+ * however, since no one else could be accessing them.
*/
needLock = !(rel->rd_isnew || rel->rd_istemp);
@@ -444,8 +444,8 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
buf = ReadBuffer(rel, P_NEW);
/*
- * Release the file-extension lock; it's now OK for someone else to
- * extend the relation some more.
+ * Release the file-extension lock; it's now OK for someone else
+ * to extend the relation some more.
*/
if (needLock)
UnlockPage(rel, 0, ExclusiveLock);
@@ -484,7 +484,7 @@ _bt_relbuf(Relation rel, Buffer buf)
* and a pin on the buffer.
*
* NOTE: actually, the buffer manager just marks the shared buffer page
- * dirty here; the real I/O happens later. This is okay since we are not
+ * dirty here; the real I/O happens later. This is okay since we are not
* relying on write ordering anyway. The WAL mechanism is responsible for
* guaranteeing correctness after a crash.
*/
@@ -534,13 +534,14 @@ _bt_page_recyclable(Page page)
BTPageOpaque opaque;
/*
- * It's possible to find an all-zeroes page in an index --- for example,
- * a backend might successfully extend the relation one page and then
- * crash before it is able to make a WAL entry for adding the page.
- * If we find a zeroed page then reclaim it.
+ * It's possible to find an all-zeroes page in an index --- for
+ * example, a backend might successfully extend the relation one page
+ * and then crash before it is able to make a WAL entry for adding the
+ * page. If we find a zeroed page then reclaim it.
*/
if (PageIsNew(page))
return true;
+
/*
* Otherwise, recycle if deleted and too old to have any processes
* interested in it.
@@ -565,7 +566,7 @@ _bt_page_recyclable(Page page)
* mistake. On exit, metapage data is correct and we no longer have
* a pin or lock on the metapage.
*
- * Actually this is not used for splitting on-the-fly anymore. It's only used
+ * Actually this is not used for splitting on-the-fly anymore. It's only used
* in nbtsort.c at the completion of btree building, where we know we have
* sole access to the index anyway.
*/
@@ -623,7 +624,7 @@ _bt_metaproot(Relation rel, BlockNumber rootbknum, uint32 level)
/*
* Delete item(s) from a btree page.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -646,9 +647,7 @@ _bt_delitems(Relation rel, Buffer buf,
* adjusting item numbers for previous deletions.
*/
for (i = nitems - 1; i >= 0; i--)
- {
PageIndexTupleDelete(page, itemnos[i]);
- }
/* XLOG stuff */
if (!rel->rd_istemp)
@@ -666,8 +665,8 @@ _bt_delitems(Relation rel, Buffer buf,
rdata[0].next = &(rdata[1]);
/*
- * The target-offsets array is not in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets
+ * The target-offsets array is not in the buffer, but pretend that
+ * it is. When XLogInsert stores the whole buffer, the offsets
* array need not be stored too.
*/
rdata[1].buffer = buf;
@@ -701,7 +700,7 @@ _bt_delitems(Relation rel, Buffer buf,
* may currently be trying to follow links leading to the page; they have to
* be allowed to use its right-link to recover. See nbtree/README.
*
- * On entry, the target buffer must be pinned and read-locked. This lock and
+ * On entry, the target buffer must be pinned and read-locked. This lock and
* pin will be dropped before exiting.
*
* Returns the number of pages successfully deleted (zero on failure; could
@@ -714,7 +713,7 @@ _bt_delitems(Relation rel, Buffer buf,
int
_bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
{
- BlockNumber target,
+ BlockNumber target,
leftsib,
rightsib,
parent;
@@ -740,17 +739,18 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it,
+ * We can never delete rightmost pages nor root pages. While at it,
* check that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
- P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
+ P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
{
_bt_relbuf(rel, buf);
return 0;
}
+
/*
* Save info about page, including a copy of its high key (it must
* have one, being non-rightmost).
@@ -760,12 +760,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
leftsib = opaque->btpo_prev;
itemid = PageGetItemId(page, P_HIKEY);
targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid));
+
/*
* We need to get an approximate pointer to the page's parent page.
- * Use the standard search mechanism to search for the page's high key;
- * this will give us a link to either the current parent or someplace
- * to its left (if there are multiple equal high keys). To avoid
- * deadlocks, we'd better drop the target page lock first.
+ * Use the standard search mechanism to search for the page's high
+ * key; this will give us a link to either the current parent or
+ * someplace to its left (if there are multiple equal high keys). To
+ * avoid deadlocks, we'd better drop the target page lock first.
*/
_bt_relbuf(rel, buf);
/* we need a scan key to do our search, so build one */
@@ -775,9 +776,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
&lbuf, BT_READ);
/* don't need a pin on that either */
_bt_relbuf(rel, lbuf);
+
/*
* If we are trying to delete an interior page, _bt_search did more
- * than we needed. Locate the stack item pointing to our parent level.
+ * than we needed. Locate the stack item pointing to our parent
+ * level.
*/
ilevel = 0;
for (;;)
@@ -789,10 +792,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
stack = stack->bts_parent;
ilevel++;
}
+
/*
* We have to lock the pages we need to modify in the standard order:
- * moving right, then up. Else we will deadlock against other writers.
- *
+ * moving right, then up. Else we will deadlock against other
+ * writers.
+ *
* So, we need to find and write-lock the current left sibling of the
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if
@@ -823,21 +828,24 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
else
lbuf = InvalidBuffer;
+
/*
- * Next write-lock the target page itself. It should be okay to take just
- * a write lock not a superexclusive lock, since no scans would stop on an
- * empty page.
+ * Next write-lock the target page itself. It should be okay to take
+ * just a write lock not a superexclusive lock, since no scans would
+ * stop on an empty page.
*/
buf = _bt_getbuf(rel, target, BT_WRITE);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * Check page is still empty etc, else abandon deletion. The empty check
- * is necessary since someone else might have inserted into it while
- * we didn't have it locked; the others are just for paranoia's sake.
+ * Check page is still empty etc, else abandon deletion. The empty
+ * check is necessary since someone else might have inserted into it
+ * while we didn't have it locked; the others are just for paranoia's
+ * sake.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
- P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
+ P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
{
_bt_relbuf(rel, buf);
if (BufferIsValid(lbuf))
@@ -846,14 +854,17 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
if (opaque->btpo_prev != leftsib)
elog(ERROR, "left link changed unexpectedly");
+
/*
* And next write-lock the (current) right sibling.
*/
rightsib = opaque->btpo_next;
rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
+
/*
* Next find and write-lock the current parent of the target page.
- * This is essentially the same as the corresponding step of splitting.
+ * This is essentially the same as the corresponding step of
+ * splitting.
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
target, P_HIKEY);
@@ -863,10 +874,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
RelationGetRelationName(rel));
parent = stack->bts_blkno;
poffset = stack->bts_offset;
+
/*
* If the target is the rightmost child of its parent, then we can't
- * delete, unless it's also the only child --- in which case the parent
- * changes to half-dead status.
+ * delete, unless it's also the only child --- in which case the
+ * parent changes to half-dead status.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -893,12 +905,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
if (OffsetNumberNext(P_FIRSTDATAKEY(opaque)) == maxoff)
parent_one_child = true;
}
+
/*
* If we are deleting the next-to-last page on the target's level,
- * then the rightsib is a candidate to become the new fast root.
- * (In theory, it might be possible to push the fast root even further
- * down, but the odds of doing so are slim, and the locking considerations
- * daunting.)
+ * then the rightsib is a candidate to become the new fast root. (In
+ * theory, it might be possible to push the fast root even further
+ * down, but the odds of doing so are slim, and the locking
+ * considerations daunting.)
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@@ -914,12 +927,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
metapg = BufferGetPage(metabuf);
metad = BTPageGetMeta(metapg);
+
/*
* The expected case here is btm_fastlevel == targetlevel+1;
- * if the fastlevel is <= targetlevel, something is wrong, and we
- * choose to overwrite it to fix it.
+ * if the fastlevel is <= targetlevel, something is wrong, and
+ * we choose to overwrite it to fix it.
*/
- if (metad->btm_fastlevel > targetlevel+1)
+ if (metad->btm_fastlevel > targetlevel + 1)
{
/* no update wanted */
_bt_relbuf(rel, metabuf);
@@ -937,9 +951,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
/*
* Update parent. The normal case is a tad tricky because we want to
- * delete the target's downlink and the *following* key. Easiest way is
- * to copy the right sibling's downlink over the target downlink, and then
- * delete the following item.
+ * delete the target's downlink and the *following* key. Easiest way
+ * is to copy the right sibling's downlink over the target downlink,
+ * and then delete the following item.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -950,7 +964,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
else
{
- OffsetNumber nextoffset;
+ OffsetNumber nextoffset;
itemid = PageGetItemId(page, poffset);
btitem = (BTItem) PageGetItem(page, itemid);
@@ -968,8 +982,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
/*
- * Update siblings' side-links. Note the target page's side-links will
- * continue to point to the siblings.
+ * Update siblings' side-links. Note the target page's side-links
+ * will continue to point to the siblings.
*/
if (BufferIsValid(lbuf))
{
@@ -1096,10 +1110,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
_bt_wrtbuf(rel, lbuf);
/*
- * If parent became half dead, recurse to try to delete it. Otherwise,
- * if right sibling is empty and is now the last child of the parent,
- * recurse to try to delete it. (These cases cannot apply at the same
- * time, though the second case might itself recurse to the first.)
+ * If parent became half dead, recurse to try to delete it.
+ * Otherwise, if right sibling is empty and is now the last child of
+ * the parent, recurse to try to delete it. (These cases cannot apply
+ * at the same time, though the second case might itself recurse to
+ * the first.)
*/
if (parent_half_dead)
{
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 3c814725fef..7d0dea4e788 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.103 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.104 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -580,19 +580,20 @@ btbulkdelete(PG_FUNCTION_ARGS)
/*
* The outer loop iterates over index leaf pages, the inner over items
- * on a leaf page. We issue just one _bt_delitems() call per page,
- * so as to minimize WAL traffic.
+ * on a leaf page. We issue just one _bt_delitems() call per page, so
+ * as to minimize WAL traffic.
*
- * Note that we exclusive-lock every leaf page containing data items,
- * in sequence left to right. It sounds attractive to only exclusive-lock
- * those containing items we need to delete, but unfortunately that
- * is not safe: we could then pass a stopped indexscan, which could
- * in rare cases lead to deleting the item it needs to find when it
- * resumes. (See _bt_restscan --- this could only happen if an indexscan
- * stops on a deletable item and then a page split moves that item
- * into a page further to its right, which the indexscan will have no
- * pin on.) We can skip obtaining exclusive lock on empty pages
- * though, since no indexscan could be stopped on those.
+ * Note that we exclusive-lock every leaf page containing data items, in
+ * sequence left to right. It sounds attractive to only
+ * exclusive-lock those containing items we need to delete, but
+ * unfortunately that is not safe: we could then pass a stopped
+ * indexscan, which could in rare cases lead to deleting the item it
+ * needs to find when it resumes. (See _bt_restscan --- this could
+ * only happen if an indexscan stops on a deletable item and then a
+ * page split moves that item into a page further to its right, which
+ * the indexscan will have no pin on.) We can skip obtaining
+ * exclusive lock on empty pages though, since no indexscan could be
+ * stopped on those.
*/
buf = _bt_get_endpoint(rel, 0, false);
if (BufferIsValid(buf)) /* check for empty index */
@@ -604,7 +605,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
OffsetNumber offnum,
minoff,
maxoff;
- BlockNumber nextpage;
+ BlockNumber nextpage;
CHECK_FOR_INTERRUPTS();
@@ -622,12 +623,14 @@ btbulkdelete(PG_FUNCTION_ARGS)
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
+
/*
- * Recompute minoff/maxoff, both of which could have changed
- * while we weren't holding the lock.
+ * Recompute minoff/maxoff, both of which could have
+ * changed while we weren't holding the lock.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
+
/*
* Scan over all items to see which ones need deleted
* according to the callback function.
@@ -640,7 +643,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
ItemPointer htup;
btitem = (BTItem) PageGetItem(page,
- PageGetItemId(page, offnum));
+ PageGetItemId(page, offnum));
htup = &(btitem->bti_itup.t_tid);
if (callback(htup, callback_state))
{
@@ -651,6 +654,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples += 1;
}
}
+
/*
* If we need to delete anything, do it and write the buffer;
* else just release the buffer.
@@ -662,9 +666,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
_bt_wrtbuf(rel, buf);
}
else
- {
_bt_relbuf(rel, buf);
- }
/* And advance to next page, if any */
if (nextpage == P_NONE)
break;
@@ -712,7 +714,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/* No point in remembering more than MaxFSMPages pages */
maxFreePages = MaxFSMPages;
if ((BlockNumber) maxFreePages > num_pages)
- maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
+ maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
freePages = (BlockNumber *) palloc(maxFreePages * sizeof(BlockNumber));
nFreePages = 0;
@@ -728,10 +730,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* after we start the scan will not be examined; this should be fine,
* since they can't possibly be empty.)
*/
- for (blkno = BTREE_METAPAGE+1; blkno < num_pages; blkno++)
+ for (blkno = BTREE_METAPAGE + 1; blkno < num_pages; blkno++)
{
- Buffer buf;
- Page page;
+ Buffer buf;
+ Page page;
BTPageOpaque opaque;
buf = _bt_getbuf(rel, blkno, BT_READ);
@@ -753,7 +755,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page))
{
/* Empty, try to delete */
- int ndel;
+ int ndel;
/* Run pagedel in a temp context to avoid memory leakage */
MemoryContextReset(mycontext);
@@ -768,7 +770,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* During VACUUM FULL it's okay to recycle deleted pages
* immediately, since there can be no other transactions
- * scanning the index. Note that we will only recycle the
+ * scanning the index. Note that we will only recycle the
* current page and not any parent pages that _bt_pagedel
* might have recursed to; this seems reasonable in the name
* of simplicity. (Trying to do otherwise would mean we'd
@@ -787,16 +789,16 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
}
/*
- * During VACUUM FULL, we truncate off any recyclable pages at the
- * end of the index. In a normal vacuum it'd be unsafe to do this
- * except by acquiring exclusive lock on the index and then rechecking
- * all the pages; doesn't seem worth it.
+ * During VACUUM FULL, we truncate off any recyclable pages at the end
+ * of the index. In a normal vacuum it'd be unsafe to do this except
+ * by acquiring exclusive lock on the index and then rechecking all
+ * the pages; doesn't seem worth it.
*/
if (info->vacuum_full && nFreePages > 0)
{
- BlockNumber new_pages = num_pages;
+ BlockNumber new_pages = num_pages;
- while (nFreePages > 0 && freePages[nFreePages-1] == new_pages-1)
+ while (nFreePages > 0 && freePages[nFreePages - 1] == new_pages - 1)
{
new_pages--;
pages_deleted--;
@@ -810,9 +812,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* Okay to truncate.
*
* First, flush any shared buffers for the blocks we intend to
- * delete. FlushRelationBuffers is a bit more than we need for
- * this, since it will also write out dirty buffers for blocks we
- * aren't deleting, but it's the closest thing in bufmgr's API.
+ * delete. FlushRelationBuffers is a bit more than we need
+ * for this, since it will also write out dirty buffers for
+ * blocks we aren't deleting, but it's the closest thing in
+ * bufmgr's API.
*/
i = FlushRelationBuffers(rel, new_pages);
if (i < 0)
@@ -822,7 +825,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* Do the physical truncation.
*/
new_pages = smgrtruncate(DEFAULT_SMGR, rel, new_pages);
- rel->rd_nblocks = new_pages; /* update relcache immediately */
+ rel->rd_nblocks = new_pages; /* update relcache
+ * immediately */
rel->rd_targblock = InvalidBlockNumber;
num_pages = new_pages;
}
@@ -856,7 +860,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* and so no deletion can have occurred on that page.
*
* On entry, we have a pin but no read lock on the buffer that contained
- * the index tuple we stopped the scan on. On exit, we have pin and read
+ * the index tuple we stopped the scan on. On exit, we have pin and read
* lock on the buffer that now contains that index tuple, and the scandesc's
* current position is updated to point at it.
*/
@@ -877,8 +881,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
- * Reacquire read lock on the buffer. (We should still have
- * a reference-count pin on it, so need not get that.)
+ * Reacquire read lock on the buffer. (We should still have a
+ * reference-count pin on it, so need not get that.)
*/
LockBuffer(buf, BT_READ);
@@ -921,11 +925,11 @@ _bt_restscan(IndexScanDesc scan)
/*
* The item we're looking for moved right at least one page, so
- * move right. We are careful here to pin and read-lock the next
- * non-dead page before releasing the current one. This ensures that
- * a concurrent btbulkdelete scan cannot pass our position --- if it
- * did, it might be able to reach and delete our target item before
- * we can find it again.
+ * move right. We are careful here to pin and read-lock the next
+ * non-dead page before releasing the current one. This ensures
+ * that a concurrent btbulkdelete scan cannot pass our position
+ * --- if it did, it might be able to reach and delete our target
+ * item before we can find it again.
*/
if (P_RIGHTMOST(opaque))
elog(ERROR, "failed to re-find previous key in \"%s\"",
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 457914adf73..80abe195cea 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.77 2003/07/29 22:18:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.78 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,8 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
/*
* Race -- the page we just grabbed may have split since we read
- * its pointer in the parent (or metapage). If it has, we may need
- * to move right to its new sibling. Do that.
+ * its pointer in the parent (or metapage). If it has, we may
+ * need to move right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@@ -87,14 +87,14 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
- * We need to save the location of the index entry we chose in
- * the parent page on a stack. In case we split the tree, we'll
- * use the stack to work back up to the parent page. We also save
- * the actual downlink (TID) to uniquely identify the index entry,
- * in case it moves right while we're working lower in the
- * tree. See the paper by Lehman and Yao for how this is detected
- * and handled. (We use the child link to disambiguate duplicate
- * keys in the index -- Lehman and Yao disallow duplicate keys.)
+ * We need to save the location of the index entry we chose in the
+ * parent page on a stack. In case we split the tree, we'll use
+ * the stack to work back up to the parent page. We also save the
+ * actual downlink (TID) to uniquely identify the index entry, in
+ * case it moves right while we're working lower in the tree. See
+ * the paper by Lehman and Yao for how this is detected and
+ * handled. (We use the child link to disambiguate duplicate keys
+ * in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@@ -151,8 +151,8 @@ _bt_moveright(Relation rel,
* might not need to move right; have to scan the page first anyway.)
* It could even have split more than once, so scan as far as needed.
*
- * We also have to move right if we followed a link that brought us to
- * a dead page.
+ * We also have to move right if we followed a link that brought us to a
+ * dead page.
*/
while (!P_RIGHTMOST(opaque) &&
(P_IGNORE(opaque) ||
@@ -599,8 +599,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* At this point we are positioned at the first item >= scan key, or
* possibly at the end of a page on which all the existing items are
- * less than the scan key and we know that everything on later
- * pages is greater than or equal to scan key.
+ * less than the scan key and we know that everything on later pages
+ * is greater than or equal to scan key.
*
* We could step forward in the latter case, but that'd be a waste of
* time if we want to scan backwards. So, it's now time to examine
@@ -851,7 +851,8 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
}
}
- else /* backwards scan */
+ else
+/* backwards scan */
{
if (offnum > P_FIRSTDATAKEY(opaque))
offnum = OffsetNumberPrev(offnum);
@@ -860,9 +861,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
/*
* Walk left to the next page with data. This is much more
* complex than the walk-right case because of the possibility
- * that the page to our left splits while we are in flight to it,
- * plus the possibility that the page we were on gets deleted
- * after we leave it. See nbtree/README for details.
+ * that the page to our left splits while we are in flight to
+ * it, plus the possibility that the page we were on gets
+ * deleted after we leave it. See nbtree/README for details.
*/
for (;;)
{
@@ -877,10 +878,11 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
* Okay, we managed to move left to a non-deleted page.
- * Done if it's not half-dead and not empty. Else loop back
- * and do it all again.
+ * Done if it's not half-dead and not empty. Else loop
+ * back and do it all again.
*/
if (!P_IGNORE(opaque))
{
@@ -946,17 +948,18 @@ _bt_walk_left(Relation rel, Buffer buf)
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * If this isn't the page we want, walk right till we find
- * what we want --- but go no more than four hops (an
- * arbitrary limit). If we don't find the correct page by then,
- * the most likely bet is that the original page got deleted
- * and isn't in the sibling chain at all anymore, not that its
- * left sibling got split more than four times.
+ * If this isn't the page we want, walk right till we find what we
+ * want --- but go no more than four hops (an arbitrary limit).
+ * If we don't find the correct page by then, the most likely bet
+ * is that the original page got deleted and isn't in the sibling
+ * chain at all anymore, not that its left sibling got split more
+ * than four times.
*
- * Note that it is correct to test P_ISDELETED not P_IGNORE
- * here, because half-dead pages are still in the sibling
- * chain. Caller must reject half-dead pages if wanted.
+ * Note that it is correct to test P_ISDELETED not P_IGNORE here,
+ * because half-dead pages are still in the sibling chain. Caller
+ * must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
@@ -983,8 +986,8 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page
- * (there must be one); that is the page that has acquired the
+ * It was deleted. Move right to first nondeleted page (there
+ * must be one); that is the page that has acquired the
* deleted one's keyspace, so stepping left from it will take
* us where we want to be.
*/
@@ -1001,18 +1004,18 @@ _bt_walk_left(Relation rel, Buffer buf)
if (!P_ISDELETED(opaque))
break;
}
+
/*
- * Now return to top of loop, resetting obknum to
- * point to this nondeleted page, and try again.
+ * Now return to top of loop, resetting obknum to point to
+ * this nondeleted page, and try again.
*/
}
else
{
/*
- * It wasn't deleted; the explanation had better be
- * that the page to the left got split or deleted.
- * Without this check, we'd go into an infinite loop
- * if there's anything wrong.
+ * It wasn't deleted; the explanation had better be that the
+ * page to the left got split or deleted. Without this check,
+ * we'd go into an infinite loop if there's anything wrong.
*/
if (opaque->btpo_prev == lblkno)
elog(ERROR, "could not find left sibling in \"%s\"",
@@ -1028,7 +1031,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
@@ -1045,8 +1048,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
/*
* If we are looking for a leaf page, okay to descend from fast root;
- * otherwise better descend from true root. (There is no point in being
- * smarter about intermediate levels.)
+ * otherwise better descend from true root. (There is no point in
+ * being smarter about intermediate levels.)
*/
if (level == 0)
buf = _bt_getroot(rel, BT_READ);
@@ -1066,9 +1069,9 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
{
/*
* If we landed on a deleted page, step right to find a live page
- * (there must be one). Also, if we want the rightmost page,
- * step right if needed to get to it (this could happen if the
- * page split since we obtained a pointer to it).
+ * (there must be one). Also, if we want the rightmost page, step
+ * right if needed to get to it (this could happen if the page
+ * split since we obtained a pointer to it).
*/
while (P_IGNORE(opaque) ||
(rightmost && !P_RIGHTMOST(opaque)))
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 92a73021f66..f8eb671df71 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.73 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.74 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef struct BTPageState
static void _bt_blnewpage(Relation index, Buffer *buf, Page *page,
- uint32 level);
+ uint32 level);
static BTPageState *_bt_pagestate(Relation index, uint32 level);
static void _bt_slideleft(Relation index, Buffer buf, Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
@@ -469,7 +469,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
oopaque->btpo_next = BufferGetBlockNumber(nbuf);
nopaque->btpo_prev = BufferGetBlockNumber(obuf);
- nopaque->btpo_next = P_NONE; /* redundant */
+ nopaque->btpo_next = P_NONE; /* redundant */
}
/*
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index a1a52571fe1..35e5ae6ccb8 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.3 2003/02/23 22:43:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,10 +29,10 @@
typedef struct bt_incomplete_split
{
RelFileNode node; /* the index */
- BlockNumber leftblk; /* left half of split */
- BlockNumber rightblk; /* right half of split */
+ BlockNumber leftblk; /* left half of split */
+ BlockNumber rightblk; /* right half of split */
bool is_root; /* we split the root */
-} bt_incomplete_split;
+} bt_incomplete_split;
static List *incomplete_splits;
@@ -107,7 +107,7 @@ _bt_restore_page(Page page, char *from, int len)
}
static void
-_bt_restore_meta(Relation reln, XLogRecPtr lsn,
+_bt_restore_meta(Relation reln, XLogRecPtr lsn,
BlockNumber root, uint32 level,
BlockNumber fastroot, uint32 fastlevel)
{
@@ -172,7 +172,7 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (!redo || !(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(PANIC, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
@@ -183,13 +183,11 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (redo)
{
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
if (PageAddItem(page, (Item) datapos, datalen,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
elog(PANIC, "btree_insert_redo: failed to add item");
@@ -204,13 +202,9 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
elog(PANIC, "btree_insert_undo: bad page LSN");
if (!P_ISLEAF(pageop))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
- {
elog(PANIC, "btree_insert_undo: unimplemented");
- }
}
}
@@ -226,8 +220,8 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (redo && !isleaf && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
}
@@ -238,9 +232,9 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
{
xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
Relation reln;
- BlockNumber targetblk;
- BlockNumber leftsib;
- BlockNumber rightsib;
+ BlockNumber targetblk;
+ BlockNumber leftsib;
+ BlockNumber rightsib;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -338,9 +332,7 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
elog(PANIC, "btree_split_redo: uninitialized next right page");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -357,8 +349,8 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
if (redo && xlrec->level > 0 && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
@@ -422,10 +414,10 @@ btree_xlog_delete_page(bool redo, bool ismeta,
{
xl_btree_delete_page *xlrec = (xl_btree_delete_page *) XLogRecGetData(record);
Relation reln;
- BlockNumber parent;
- BlockNumber target;
- BlockNumber leftsib;
- BlockNumber rightsib;
+ BlockNumber parent;
+ BlockNumber target;
+ BlockNumber leftsib;
+ BlockNumber rightsib;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -451,9 +443,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized parent page");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
OffsetNumber poffset;
@@ -469,7 +459,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
{
ItemId itemid;
BTItem btitem;
- OffsetNumber nextoffset;
+ OffsetNumber nextoffset;
itemid = PageGetItemId(page, poffset);
btitem = (BTItem) PageGetItem(page, itemid);
@@ -494,9 +484,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized right sibling");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -520,9 +508,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized left sibling");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -799,116 +785,116 @@ btree_desc(char *buf, uint8 xl_info, char *rec)
switch (info)
{
case XLOG_BTREE_INSERT_LEAF:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_INSERT_UPPER:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert_upper: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert_upper: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_INSERT_META:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert_meta: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert_meta: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_SPLIT_L:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_l: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_l: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_R:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_r: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_r: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_L_ROOT:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_l_root: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_l_root: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_R_ROOT:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_r_root: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_r_root: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_DELETE:
- {
- xl_btree_delete *xlrec = (xl_btree_delete *) rec;
+ {
+ xl_btree_delete *xlrec = (xl_btree_delete *) rec;
- sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
- break;
- }
+ sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
+ xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
+ break;
+ }
case XLOG_BTREE_DELETE_PAGE:
case XLOG_BTREE_DELETE_PAGE_META:
- {
- xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
+ {
+ xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
- strcat(buf, "delete_page: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
- xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "delete_page: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
+ xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_NEWROOT:
- {
- xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
+ {
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
- sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->rootblk, xlrec->level);
- break;
- }
+ sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->rootblk, xlrec->level);
+ break;
+ }
case XLOG_BTREE_NEWMETA:
- {
- xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
+ {
+ xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
- sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->meta.root, xlrec->meta.level,
- xlrec->meta.fastroot, xlrec->meta.fastlevel);
- break;
- }
+ sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->meta.root, xlrec->meta.level,
+ xlrec->meta.fastroot, xlrec->meta.fastlevel);
+ break;
+ }
case XLOG_BTREE_NEWPAGE:
- {
- xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
+ {
+ xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
- sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->blkno);
- break;
- }
+ sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->blkno);
+ break;
+ }
default:
strcat(buf, "UNKNOWN");
break;
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 6358d622e1f..4362835d700 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.46 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,10 +109,10 @@ rtrescan(PG_FUNCTION_ARGS)
s->numberOfKeys * sizeof(ScanKeyData));
/*
- * Scans on internal pages use different operators than they
- * do on leaf pages. For example, if the user wants all boxes
- * that exactly match (x1,y1,x2,y2), then on internal pages we
- * need to find all boxes that contain (x1,y1,x2,y2).
+ * Scans on internal pages use different operators than they do on
+ * leaf pages. For example, if the user wants all boxes that
+ * exactly match (x1,y1,x2,y2), then on internal pages we need to
+ * find all boxes that contain (x1,y1,x2,y2).
*/
for (i = 0; i < s->numberOfKeys; i++)
{
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 3653d05bc1e..6741e5436d5 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.16 2003/06/11 22:37:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.17 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,7 +73,7 @@
static SlruCtlData ClogCtlData;
static SlruCtl ClogCtl = &ClogCtlData;
-
+
static int ZeroCLOGPage(int pageno, bool writeXlog);
static bool CLOGPagePrecedes(int page1, int page2);
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index 59af2808026..444d2b97d7d 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -3,7 +3,7 @@
*
* Resource managers definition
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.10 2003/02/21 00:06:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.11 2003/08/04 00:43:15 momjian Exp $
*/
#include "postgres.h"
@@ -19,7 +19,7 @@
#include "commands/sequence.h"
-RmgrData RmgrTable[RM_MAX_ID+1] = {
+RmgrData RmgrTable[RM_MAX_ID + 1] = {
{"XLOG", xlog_redo, xlog_undo, xlog_desc, NULL, NULL},
{"Transaction", xact_redo, xact_undo, xact_desc, NULL, NULL},
{"Storage", smgr_redo, smgr_undo, smgr_desc, NULL, NULL},
@@ -32,7 +32,7 @@ RmgrData RmgrTable[RM_MAX_ID+1] = {
{"Reserved 9", NULL, NULL, NULL, NULL, NULL},
{"Heap", heap_redo, heap_undo, heap_desc, NULL, NULL},
{"Btree", btree_redo, btree_undo, btree_desc,
- btree_xlog_startup, btree_xlog_cleanup},
+ btree_xlog_startup, btree_xlog_cleanup},
{"Hash", hash_redo, hash_undo, hash_desc, NULL, NULL},
{"Rtree", rtree_redo, rtree_undo, rtree_desc, NULL, NULL},
{"Gist", gist_redo, gist_undo, gist_desc, NULL, NULL},
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 5129dd3c7e5..1c290f2cf57 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.3 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef enum
SLRU_PAGE_CLEAN, /* page is valid and not dirty */
SLRU_PAGE_DIRTY, /* page is valid but needs write */
SLRU_PAGE_WRITE_IN_PROGRESS /* page is being written out */
-} SlruPageStatus;
+} SlruPageStatus;
/*
* Shared-memory state
@@ -117,7 +117,7 @@ typedef struct SlruSharedData
* swapping out the latest page.
*/
int latest_page_number;
-} SlruSharedData;
+} SlruSharedData;
typedef SlruSharedData *SlruShared;
@@ -145,7 +145,7 @@ typedef enum
SLRU_SEEK_FAILED,
SLRU_READ_FAILED,
SLRU_WRITE_FAILED
-} SlruErrorCause;
+} SlruErrorCause;
static SlruErrorCause slru_errcause;
static int slru_errno;
@@ -166,9 +166,9 @@ SimpleLruShmemSize(void)
{
return MAXALIGN(sizeof(SlruSharedData)) + BLCKSZ * NUM_CLOG_BUFFERS
#ifdef EXEC_BACKEND
- + MAXALIGN(sizeof(SlruLockData))
+ + MAXALIGN(sizeof(SlruLockData))
#endif
- ;
+ ;
}
void
@@ -183,12 +183,14 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
shared = (SlruShared) ptr;
#ifdef EXEC_BACKEND
+
/*
* Locks are in shared memory
*/
- locks = (SlruLock)(ptr + MAXALIGN(sizeof(SlruSharedData)) +
- BLCKSZ * NUM_CLOG_BUFFERS);
+ locks = (SlruLock) (ptr + MAXALIGN(sizeof(SlruSharedData)) +
+ BLCKSZ * NUM_CLOG_BUFFERS);
#else
+
/*
* Locks are in private memory
*/
@@ -199,7 +201,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
if (!IsUnderPostmaster)
- /* Initialize locks and shared memory area */
+ /* Initialize locks and shared memory area */
{
char *bufptr;
int slotno;
@@ -210,8 +212,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
memset(shared, 0, sizeof(SlruSharedData));
- bufptr = (char *)shared + MAXALIGN(sizeof(SlruSharedData));
-
+ bufptr = (char *) shared + MAXALIGN(sizeof(SlruSharedData));
+
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
locks->BufferLocks[slotno] = LWLockAssign();
@@ -247,7 +249,7 @@ int
SimpleLruZeroPage(SlruCtl ctl, int pageno)
{
int slotno;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Find a suitable buffer slot for the page */
slotno = SlruSelectLRUPage(ctl, pageno);
@@ -285,7 +287,7 @@ SimpleLruZeroPage(SlruCtl ctl, int pageno)
char *
SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid, bool forwrite)
{
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Outer loop handles restart if we lose the buffer to someone else */
for (;;)
@@ -383,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno)
{
int pageno;
bool ok;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Do nothing if page does not need writing */
if (shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
@@ -539,13 +541,13 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno)
* possible for this to need to happen when writing a page that's not
* first in its segment; we assume the OS can cope with that. (Note:
* it might seem that it'd be okay to create files only when
- * SimpleLruZeroPage is called for the first page of a segment. However,
- * if after a crash and restart the REDO logic elects to replay the
- * log from a checkpoint before the latest one, then it's possible
- * that we will get commands to set transaction status of transactions
- * that have already been truncated from the commit log. Easiest way
- * to deal with that is to accept references to nonexistent files here
- * and in SlruPhysicalReadPage.)
+ * SimpleLruZeroPage is called for the first page of a segment.
+ * However, if after a crash and restart the REDO logic elects to
+ * replay the log from a checkpoint before the latest one, then it's
+ * possible that we will get commands to set transaction status of
+ * transactions that have already been truncated from the commit log.
+ * Easiest way to deal with that is to accept references to
+ * nonexistent files here and in SlruPhysicalReadPage.)
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
@@ -608,37 +610,37 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
case SLRU_OPEN_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("open of file \"%s\" failed: %m",
path)));
break;
case SLRU_CREATE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("creation of file \"%s\" failed: %m",
path)));
break;
case SLRU_SEEK_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("lseek of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("lseek of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("read of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("read of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("write of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("write of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
default:
/* can't get here, we trust */
@@ -665,6 +667,7 @@ static int
SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
SlruShared shared = (SlruShared) ctl->shared;
+
/* Outer loop handles restart after I/O */
for (;;)
{
@@ -689,7 +692,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
return slotno;
if (shared->page_lru_count[slotno] > bestcount &&
- shared->page_number[slotno] != shared->latest_page_number)
+ shared->page_number[slotno] != shared->latest_page_number)
{
bestslot = slotno;
bestcount = shared->page_lru_count[slotno];
@@ -705,12 +708,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* We need to do I/O. Normal case is that we have to write it
* out, but it's possible in the worst case to have selected a
- * read-busy page. In that case we use SimpleLruReadPage to wait for
- * the read to complete.
+ * read-busy page. In that case we use SimpleLruReadPage to wait
+ * for the read to complete.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
(void) SimpleLruReadPage(ctl, shared->page_number[bestslot],
- InvalidTransactionId, false);
+ InvalidTransactionId, false);
else
SimpleLruWritePage(ctl, bestslot);
@@ -747,10 +750,11 @@ SimpleLruFlush(SlruCtl ctl, bool checkpoint)
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
SimpleLruWritePage(ctl, slotno);
+
/*
- * When called during a checkpoint,
- * we cannot assert that the slot is clean now, since another
- * process might have re-dirtied it already. That's okay.
+ * When called during a checkpoint, we cannot assert that the slot
+ * is clean now, since another process might have re-dirtied it
+ * already. That's okay.
*/
Assert(checkpoint ||
shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
@@ -792,10 +796,10 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
CreateCheckPoint(false, true);
/*
- * Scan shared memory and remove any pages preceding the cutoff
- * page, to ensure we won't rewrite them later. (Any dirty pages
- * should have been flushed already during the checkpoint, we're just
- * being extra careful here.)
+ * Scan shared memory and remove any pages preceding the cutoff page,
+ * to ensure we won't rewrite them later. (Any dirty pages should
+ * have been flushed already during the checkpoint, we're just being
+ * extra careful here.)
*/
LWLockAcquire(ctl->locks->ControlLock, LW_EXCLUSIVE);
@@ -870,7 +874,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (cldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not open directory \"%s\": %m", ctl->Dir)));
errno = 0;
while ((clde = readdir(cldir)) != NULL)
@@ -898,7 +902,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (errno)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not read directory \"%s\": %m", ctl->Dir)));
closedir(cldir);
return found;
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 40b41519a93..550f2ae924b 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.149 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.150 2003/08/04 00:43:15 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -92,7 +92,7 @@
* AbortTransactionBlock
*
* These are invoked only in response to a user "BEGIN WORK", "COMMIT",
- * or "ROLLBACK" command. The tricky part about these functions
+ * or "ROLLBACK" command. The tricky part about these functions
* is that they are called within the postgres main loop, in between
* the StartTransactionCommand() and CommitTransactionCommand().
*
@@ -197,8 +197,8 @@ static TransactionStateData CurrentTransactionStateData = {
0, /* scan command id */
0x0, /* start time */
TRANS_DEFAULT, /* transaction state */
- TBLOCK_DEFAULT /* transaction block state from
- the client perspective */
+ TBLOCK_DEFAULT /* transaction block state from the client
+ * perspective */
};
TransactionState CurrentTransactionState = &CurrentTransactionStateData;
@@ -359,7 +359,7 @@ GetCurrentTransactionStartTimeUsec(int *msec)
* TransactionIdIsCurrentTransactionId
*
* During bootstrap, we cheat and say "it's not my transaction ID" even though
- * it is. Along with transam.c's cheat to say that the bootstrap XID is
+ * it is. Along with transam.c's cheat to say that the bootstrap XID is
* already committed, this causes the tqual.c routines to see previously
* inserted tuples as committed, which is what we need during bootstrap.
*/
@@ -561,13 +561,13 @@ RecordTransactionCommit(void)
/*
* We must mark the transaction committed in clog if its XID
- * appears either in permanent rels or in local temporary rels.
- * We test this by seeing if we made transaction-controlled
- * entries *OR* local-rel tuple updates. Note that if we made
- * only the latter, we have not emitted an XLOG record for our
- * commit, and so in the event of a crash the clog update might be
- * lost. This is okay because no one else will ever care whether
- * we committed.
+ * appears either in permanent rels or in local temporary rels. We
+ * test this by seeing if we made transaction-controlled entries
+ * *OR* local-rel tuple updates. Note that if we made only the
+ * latter, we have not emitted an XLOG record for our commit, and
+ * so in the event of a crash the clog update might be lost. This
+ * is okay because no one else will ever care whether we
+ * committed.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
TransactionIdCommit(xid);
@@ -755,9 +755,9 @@ AtAbort_Memory(void)
{
/*
* Make sure we are in a valid context (not a child of
- * TopTransactionContext...). Note that it is possible for this
- * code to be called when we aren't in a transaction at all; go
- * directly to TopMemoryContext in that case.
+ * TopTransactionContext...). Note that it is possible for this code
+ * to be called when we aren't in a transaction at all; go directly to
+ * TopMemoryContext in that case.
*/
if (TopTransactionContext != NULL)
{
@@ -891,8 +891,8 @@ CommitTransaction(void)
DeferredTriggerEndXact();
/*
- * Similarly, let ON COMMIT management do its thing before we start
- * to commit.
+ * Similarly, let ON COMMIT management do its thing before we start to
+ * commit.
*/
PreCommit_on_commit_actions();
@@ -953,10 +953,10 @@ CommitTransaction(void)
* noncritical resource releasing.
*
* The ordering of operations is not entirely random. The idea is:
- * release resources visible to other backends (eg, files, buffer pins);
- * then release locks; then release backend-local resources. We want
- * to release locks at the point where any backend waiting for us will
- * see our transaction as being fully cleaned up.
+ * release resources visible to other backends (eg, files, buffer
+ * pins); then release locks; then release backend-local resources.
+ * We want to release locks at the point where any backend waiting for
+ * us will see our transaction as being fully cleaned up.
*/
smgrDoPendingDeletes(true);
@@ -1064,7 +1064,7 @@ AbortTransaction(void)
}
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering.
*/
@@ -1194,8 +1194,8 @@ StartTransactionCommand(void)
}
/*
- * We must switch to TopTransactionContext before returning. This
- * is already done if we called StartTransaction, otherwise not.
+ * We must switch to TopTransactionContext before returning. This is
+ * already done if we called StartTransaction, otherwise not.
*/
Assert(TopTransactionContext != NULL);
MemoryContextSwitchTo(TopTransactionContext);
@@ -1370,9 +1370,10 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (IsTransactionBlock())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s cannot run inside a transaction block",
stmtType)));
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1381,8 +1382,8 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (!MemoryContextContains(QueryContext, stmtNode))
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
- errmsg("%s cannot be executed from a function", stmtType)));
+ /* translator: %s represents an SQL statement name */
+ errmsg("%s cannot be executed from a function", stmtType)));
/* If we got past IsTransactionBlock test, should be in default state */
if (CurrentTransactionState->blockState != TBLOCK_DEFAULT)
elog(ERROR, "cannot prevent transaction chain");
@@ -1414,6 +1415,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
*/
if (IsTransactionBlock())
return;
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1423,7 +1425,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
return;
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s may only be used in BEGIN/END transaction blocks",
stmtType)));
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 0ceb8951cbe..45a2743ba97 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.120 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.121 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1046,8 +1046,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
@@ -1162,8 +1162,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
if (openLogFile < 0)
@@ -1266,7 +1266,7 @@ XLogFlush(XLogRecPtr record)
XLogCtlInsert *Insert = &XLogCtl->Insert;
uint32 freespace = INSERT_FREESPACE(Insert);
- if (freespace < SizeOfXLogRecord) /* buffer is full */
+ if (freespace < SizeOfXLogRecord) /* buffer is full */
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
else
{
@@ -1449,8 +1449,8 @@ XLogFileInit(uint32 log, uint32 seg,
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
@@ -1563,14 +1563,14 @@ XLogFileOpen(uint32 log, uint32 seg, bool econt)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
}
return (fd);
@@ -1621,8 +1621,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (xldir == NULL)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not open transaction log directory \"%s\": %m",
+ XLogDir)));
sprintf(lastoff, "%08X%08X", log, seg);
@@ -1654,15 +1654,15 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
true))
{
ereport(LOG,
- (errmsg("recycled transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("recycled transaction log file \"%s\"",
+ xlde->d_name)));
}
else
{
/* No need for any more future segments... */
ereport(LOG,
- (errmsg("removing transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("removing transaction log file \"%s\"",
+ xlde->d_name)));
unlink(path);
}
}
@@ -1672,8 +1672,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (errno)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not read transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not read transaction log directory \"%s\": %m",
+ XLogDir)));
closedir(xldir);
}
@@ -1746,8 +1746,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(record->xl_crc, crc))
{
ereport(emode,
- (errmsg("bad resource manager data checksum in record at %X/%X",
- recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad resource manager data checksum in record at %X/%X",
+ recptr.xlogid, recptr.xrecoff)));
return (false);
}
@@ -1769,8 +1769,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(cbuf, crc))
{
ereport(emode,
- (errmsg("bad checksum of backup block %d in record at %X/%X",
- i + 1, recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad checksum of backup block %d in record at %X/%X",
+ i + 1, recptr.xlogid, recptr.xrecoff)));
return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
@@ -1931,7 +1931,7 @@ got_record:;
{
ereport(emode,
(errmsg("invalid resource manager id %u at %X/%X",
- record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
+ record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
nextRecord = NULL;
@@ -2063,7 +2063,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
{
ereport(emode,
(errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
- hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
+ hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
readId, readSeg, readOff)));
return false;
}
@@ -2084,7 +2084,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_sui > lastReadSUI + 512)
{
ereport(emode,
- /* translator: SUI = startup id */
+ /* translator: SUI = startup id */
(errmsg("out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
hdr->xlp_sui, lastReadSUI,
readId, readSeg, readOff)));
@@ -2235,8 +2235,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
- " but the server was compiled with PG_CONTROL_VERSION %d.",
- ControlFile->pg_control_version, PG_CONTROL_VERSION),
+ " but the server was compiled with PG_CONTROL_VERSION %d.",
+ ControlFile->pg_control_version, PG_CONTROL_VERSION),
errhint("It looks like you need to initdb.")));
/* Now check the CRC. */
INIT_CRC64(crc);
@@ -2265,75 +2265,75 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
- " but the server was compiled with CATALOG_VERSION_NO %d.",
- ControlFile->catalog_version_no, CATALOG_VERSION_NO),
+ " but the server was compiled with CATALOG_VERSION_NO %d.",
+ ControlFile->catalog_version_no, CATALOG_VERSION_NO),
errhint("It looks like you need to initdb.")));
if (ControlFile->blcksz != BLCKSZ)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with BLCKSZ %d,"
- " but the server was compiled with BLCKSZ %d.",
- ControlFile->blcksz, BLCKSZ),
- errhint("It looks like you need to recompile or initdb.")));
+ errdetail("The database cluster was initialized with BLCKSZ %d,"
+ " but the server was compiled with BLCKSZ %d.",
+ ControlFile->blcksz, BLCKSZ),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->relseg_size != RELSEG_SIZE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
- " but the server was compiled with RELSEG_SIZE %d.",
+ " but the server was compiled with RELSEG_SIZE %d.",
ControlFile->relseg_size, RELSEG_SIZE),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->nameDataLen != NAMEDATALEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with NAMEDATALEN %d,"
- " but the server was compiled with NAMEDATALEN %d.",
+ " but the server was compiled with NAMEDATALEN %d.",
ControlFile->nameDataLen, NAMEDATALEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with FUNC_MAX_ARGS %d,"
- " but the server was compiled with FUNC_MAX_ARGS %d.",
+ " but the server was compiled with FUNC_MAX_ARGS %d.",
ControlFile->funcMaxArgs, FUNC_MAX_ARGS),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
if (ControlFile->enableIntTimes != TRUE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
- " but the server was compiled with HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled with HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->enableIntTimes != FALSE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
- " but the server was compiled without HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled without HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#endif
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
- " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
+ " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_collate),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_collate),
+ errhint("It looks like you need to initdb or install locale support.")));
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_ctype),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_ctype),
+ errhint("It looks like you need to initdb or install locale support.")));
/* Make the fixed locale settings visible as GUC variables, too */
SetConfigOption("lc_collate", ControlFile->lc_collate,
@@ -2602,10 +2602,10 @@ StartupXLOG(void)
str_time(ControlFile->time))));
else if (ControlFile->state == DB_IN_RECOVERY)
ereport(LOG,
- (errmsg("database system was interrupted while in recovery at %s",
- str_time(ControlFile->time)),
- errhint("This probably means that some data is corrupted and"
- " you will have to use the last backup for recovery.")));
+ (errmsg("database system was interrupted while in recovery at %s",
+ str_time(ControlFile->time)),
+ errhint("This probably means that some data is corrupted and"
+ " you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted at %s",
@@ -2637,12 +2637,12 @@ StartupXLOG(void)
checkPointLoc = ControlFile->prevCheckPoint;
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
ereport(PANIC,
- (errmsg("could not locate a valid checkpoint record")));
+ (errmsg("could not locate a valid checkpoint record")));
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
@@ -2665,11 +2665,12 @@ StartupXLOG(void)
ShmemVariableCache->oidCount = 0;
/*
- * If it was a shutdown checkpoint, then any following WAL entries were
- * created under the next StartUpID; if it was a regular checkpoint then
- * any following WAL entries were created under the same StartUpID.
- * We must replay WAL entries using the same StartUpID they were created
- * under, so temporarily adopt that SUI (see also xlog_redo()).
+ * If it was a shutdown checkpoint, then any following WAL entries
+ * were created under the next StartUpID; if it was a regular
+ * checkpoint then any following WAL entries were created under the
+ * same StartUpID. We must replay WAL entries using the same StartUpID
+ * they were created under, so temporarily adopt that SUI (see also
+ * xlog_redo()).
*/
if (wasShutdown)
ThisStartUpID = checkPoint.ThisStartUpID + 1;
@@ -2690,7 +2691,7 @@ StartupXLOG(void)
{
if (wasShutdown)
ereport(PANIC,
- (errmsg("invalid redo/undo record in shutdown checkpoint")));
+ (errmsg("invalid redo/undo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@@ -2699,7 +2700,7 @@ StartupXLOG(void)
/* REDO */
if (InRecovery)
{
- int rmid;
+ int rmid;
ereport(LOG,
(errmsg("database system was not properly shut down; "
@@ -2791,8 +2792,8 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the
- * LastRec record spans, not the one it starts in. The last block
- * is indeed the one we want to use.
+ * LastRec record spans, not the one it starts in. The last block is
+ * indeed the one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -2818,11 +2819,12 @@ StartupXLOG(void)
else
{
/*
- * Whenever Write.LogwrtResult points to exactly the end of a page,
- * Write.curridx must point to the *next* page (see XLogWrite()).
+ * Whenever Write.LogwrtResult points to exactly the end of a
+ * page, Write.curridx must point to the *next* page (see
+ * XLogWrite()).
*
- * Note: it might seem we should do AdvanceXLInsertBuffer() here,
- * but we can't since we haven't yet determined the correct StartUpID
+ * Note: it might seem we should do AdvanceXLInsertBuffer() here, but
+ * we can't since we haven't yet determined the correct StartUpID
* to put into the new page's header. The first actual attempt to
* insert a log record will advance the insert state.
*/
@@ -2859,7 +2861,7 @@ StartupXLOG(void)
if (InRecovery)
{
- int rmid;
+ int rmid;
/*
* Allow resource managers to do any required cleanup.
@@ -2885,14 +2887,15 @@ StartupXLOG(void)
ThisStartUpID = ControlFile->checkPointCopy.ThisStartUpID;
/*
- * Perform a new checkpoint to update our recovery activity to disk.
+ * Perform a new checkpoint to update our recovery activity to
+ * disk.
*
* Note that we write a shutdown checkpoint. This is correct since
- * the records following it will use SUI one more than what is shown
- * in the checkpoint's ThisStartUpID.
+ * the records following it will use SUI one more than what is
+ * shown in the checkpoint's ThisStartUpID.
*
- * In case we had to use the secondary checkpoint, make sure that
- * it will still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it
+ * will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -2907,10 +2910,10 @@ StartupXLOG(void)
else
{
/*
- * If we are not doing recovery, then we saw a checkpoint with nothing
- * after it, and we can safely use StartUpID equal to one more than
- * the checkpoint's SUI. But just for paranoia's sake, check against
- * pg_control too.
+ * If we are not doing recovery, then we saw a checkpoint with
+ * nothing after it, and we can safely use StartUpID equal to one
+ * more than the checkpoint's SUI. But just for paranoia's sake,
+ * check against pg_control too.
*/
ThisStartUpID = checkPoint.ThisStartUpID;
if (ThisStartUpID < ControlFile->checkPointCopy.ThisStartUpID)
@@ -2923,7 +2926,8 @@ StartupXLOG(void)
PreallocXlogFiles(EndOfLog);
/*
- * Advance StartUpID to one more than the highest value used previously.
+ * Advance StartUpID to one more than the highest value used
+ * previously.
*/
ThisStartUpID++;
XLogCtl->ThisStartUpID = ThisStartUpID;
@@ -2973,9 +2977,9 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (!XRecOffIsValid(RecPtr.xrecoff))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint link in control file",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
@@ -2984,34 +2988,34 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (record == NULL)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_rmid != RM_XLOG_ID)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
- (errmsg("invalid resource manager id in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid resource manager id in %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
record->xl_info != XLOG_CHECKPOINT_ONLINE)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid xl_info in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_len != sizeof(CheckPoint))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid length of %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
return record;
@@ -3112,10 +3116,11 @@ CreateCheckPoint(bool shutdown, bool force)
if (MyXactMadeXLogEntry)
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("checkpoint cannot be made inside transaction block")));
+ errmsg("checkpoint cannot be made inside transaction block")));
/*
- * Acquire CheckpointLock to ensure only one checkpoint happens at a time.
+ * Acquire CheckpointLock to ensure only one checkpoint happens at a
+ * time.
*
* The CheckpointLock can be held for quite a while, which is not good
* because we won't respond to a cancel/die request while waiting for
@@ -3149,14 +3154,15 @@ CreateCheckPoint(bool shutdown, bool force)
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
/*
- * If this isn't a shutdown or forced checkpoint, and we have not inserted
- * any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
- * when the system is idle. That wastes log space, and more importantly it
- * exposes us to possible loss of both current and previous checkpoint
- * records if the machine crashes just as we're writing the update.
- * (Perhaps it'd make even more sense to checkpoint only when the previous
- * checkpoint record is in a different xlog page?)
+ * If this isn't a shutdown or forced checkpoint, and we have not
+ * inserted any XLOG records since the start of the last checkpoint,
+ * skip the checkpoint. The idea here is to avoid inserting duplicate
+ * checkpoints when the system is idle. That wastes log space, and
+ * more importantly it exposes us to possible loss of both current and
+ * previous checkpoint records if the machine crashes just as we're
+ * writing the update. (Perhaps it'd make even more sense to
+ * checkpoint only when the previous checkpoint record is in a
+ * different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must
@@ -3204,12 +3210,13 @@ CreateCheckPoint(bool shutdown, bool force)
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock AND the info_lck.
*
- * Note: if we fail to complete the checkpoint, RedoRecPtr will be
- * left pointing past where it really needs to point. This is okay;
- * the only consequence is that XLogInsert might back up whole buffers
- * that it didn't really need to. We can't postpone advancing RedoRecPtr
- * because XLogInserts that happen while we are dumping buffers must
- * assume that their buffer changes are not included in the checkpoint.
+ * Note: if we fail to complete the checkpoint, RedoRecPtr will be left
+ * pointing past where it really needs to point. This is okay; the
+ * only consequence is that XLogInsert might back up whole buffers
+ * that it didn't really need to. We can't postpone advancing
+ * RedoRecPtr because XLogInserts that happen while we are dumping
+ * buffers must assume that their buffer changes are not included in
+ * the checkpoint.
*/
{
/* use volatile pointer to prevent code rearrangement */
@@ -3538,15 +3545,15 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
if (open_sync_bit != new_sync_bit)
{
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
}
@@ -3570,16 +3577,16 @@ issue_xlog_fsync(void)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#ifdef HAVE_FDATASYNC
case SYNC_METHOD_FDATASYNC:
if (pg_fdatasync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fdatasync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fdatasync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#endif
case SYNC_METHOD_OPEN:
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index b02fa775ded..328f2ab9b38 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.163 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.164 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -238,7 +238,7 @@ BootstrapMain(int argc, char *argv[])
*
* If we are running under the postmaster, this is done already.
*/
- if (!IsUnderPostmaster /* when exec || ExecBackend */)
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
MemoryContextInit();
/*
@@ -247,7 +247,7 @@ BootstrapMain(int argc, char *argv[])
/* Set defaults, to be overriden by explicit options below */
dbname = NULL;
- if (!IsUnderPostmaster /* when exec || ExecBackend*/)
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
{
InitializeGUCOptions();
potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA
@@ -285,22 +285,22 @@ BootstrapMain(int argc, char *argv[])
xlogop = atoi(optarg);
break;
case 'p':
- {
- /* indicates fork from postmaster */
+ {
+ /* indicates fork from postmaster */
#ifdef EXEC_BACKEND
- char *p;
-
- sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
- p = strchr(optarg, ',');
- if (p)
- p = strchr(p+1, ',');
- if (p)
- dbname = strdup(p+1);
+ char *p;
+
+ sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
+ p = strchr(optarg, ',');
+ if (p)
+ p = strchr(p + 1, ',');
+ if (p)
+ dbname = strdup(p + 1);
#else
- dbname = strdup(optarg);
+ dbname = strdup(optarg);
#endif
- break;
- }
+ break;
+ }
case 'B':
SetConfigOption("shared_buffers", optarg, PGC_POSTMASTER, PGC_S_ARGV);
break;
@@ -346,12 +346,10 @@ BootstrapMain(int argc, char *argv[])
usage();
- if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */)
- {
+ if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */ )
AttachSharedMemoryAndSemaphores();
- }
-
- if (!IsUnderPostmaster /* when exec || ExecBackend*/)
+
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
{
if (!potential_DataDir)
{
@@ -473,8 +471,8 @@ BootstrapMain(int argc, char *argv[])
/*
* In NOP mode, all we really want to do is create shared memory and
- * semaphores (just to prove we can do it with the current GUC settings).
- * So, quit now.
+ * semaphores (just to prove we can do it with the current GUC
+ * settings). So, quit now.
*/
if (xlogop == BS_XLOG_NOP)
proc_exit(0);
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 77b1d3b2d7f..7ace67de6b2 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.85 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.86 2003/08/04 00:43:16 momjian Exp $
*
* NOTES
* See acl.h.
@@ -97,37 +97,40 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
if (grantee->username)
{
- aclitem.ai_grantee = get_usesysid(grantee->username);
+ aclitem. ai_grantee = get_usesysid(grantee->username);
+
idtype = ACL_IDTYPE_UID;
}
else if (grantee->groupname)
{
- aclitem.ai_grantee = get_grosysid(grantee->groupname);
+ aclitem. ai_grantee = get_grosysid(grantee->groupname);
+
idtype = ACL_IDTYPE_GID;
}
else
{
- aclitem.ai_grantee = ACL_ID_WORLD;
+ aclitem. ai_grantee = ACL_ID_WORLD;
+
idtype = ACL_IDTYPE_WORLD;
}
/*
* Grant options can only be granted to individual users, not
- * groups or public. The reason is that if a user would
- * re-grant a privilege that he held through a group having a
- * grant option, and later the user is removed from the group,
- * the situation is impossible to clean up.
+ * groups or public. The reason is that if a user would re-grant
+ * a privilege that he held through a group having a grant option,
+ * and later the user is removed from the group, the situation is
+ * impossible to clean up.
*/
if (is_grant && idtype != ACL_IDTYPE_UID && grant_option)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to individual users")));
- aclitem.ai_grantor = GetUserId();
+ aclitem. ai_grantor = GetUserId();
ACLITEM_SET_PRIVS_IDTYPE(aclitem,
- (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
- (grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
+ (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
+ (grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
idtype);
new_acl = aclinsert3(new_acl, &aclitem, modechg, behavior);
@@ -247,7 +250,7 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -346,7 +349,7 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -443,7 +446,7 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -543,7 +546,7 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -619,7 +622,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
pg_namespace_tuple = (Form_pg_namespace) GETSTRUCT(tuple);
if (stmt->is_grant
- && !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
+ && !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
&& pg_namespace_aclcheck(HeapTupleGetOid(tuple), GetUserId(), ACL_GRANT_OPTION_FOR(privileges)) != ACLCHECK_OK)
aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_NAMESPACE,
nspname);
@@ -640,7 +643,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -805,7 +808,7 @@ in_group(AclId uid, AclId gid)
static AclResult
aclcheck(Acl *acl, AclId userid, AclMode mode)
{
- AclItem *aidat;
+ AclItem *aidat;
int i,
num;
@@ -833,10 +836,10 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
if (aidat[i].ai_privs & mode)
return ACLCHECK_OK;
}
-
+
/*
- * See if he has the permission via any group (do this in a
- * separate pass to avoid expensive(?) lookups in pg_group)
+ * See if he has the permission via any group (do this in a separate
+ * pass to avoid expensive(?) lookups in pg_group)
*/
for (i = 0; i < num; i++)
if (ACLITEM_GET_IDTYPE(aidat[i]) == ACL_IDTYPE_GID
@@ -856,7 +859,7 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
* supply strings that might be already quoted.
*/
-static const char * const no_priv_msg[MAX_ACL_KIND] =
+static const char *const no_priv_msg[MAX_ACL_KIND] =
{
/* ACL_KIND_CLASS */
gettext_noop("permission denied for relation %s"),
@@ -878,7 +881,7 @@ static const char * const no_priv_msg[MAX_ACL_KIND] =
gettext_noop("permission denied for conversion %s")
};
-static const char * const not_owner_msg[MAX_ACL_KIND] =
+static const char *const not_owner_msg[MAX_ACL_KIND] =
{
/* ACL_KIND_CLASS */
gettext_noop("must be owner of relation %s"),
@@ -972,7 +975,7 @@ pg_class_aclcheck(Oid table_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation with OID %u does not exist", table_oid)));
+ errmsg("relation with OID %u does not exist", table_oid)));
/*
* Deny anyone permission to update a system catalog unless
@@ -1124,7 +1127,7 @@ pg_proc_aclcheck(Oid proc_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
aclDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proacl,
&isNull);
@@ -1179,7 +1182,7 @@ pg_language_aclcheck(Oid lang_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("language with OID %u does not exist", lang_oid)));
+ errmsg("language with OID %u does not exist", lang_oid)));
aclDatum = SysCacheGetAttr(LANGOID, tuple, Anum_pg_language_lanacl,
&isNull);
@@ -1288,7 +1291,7 @@ pg_class_ownercheck(Oid class_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation with OID %u does not exist", class_oid)));
+ errmsg("relation with OID %u does not exist", class_oid)));
owner_id = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
@@ -1344,7 +1347,7 @@ pg_oper_ownercheck(Oid oper_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator with OID %u does not exist", oper_oid)));
+ errmsg("operator with OID %u does not exist", oper_oid)));
owner_id = ((Form_pg_operator) GETSTRUCT(tuple))->oprowner;
@@ -1372,7 +1375,7 @@ pg_proc_ownercheck(Oid proc_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
owner_id = ((Form_pg_proc) GETSTRUCT(tuple))->proowner;
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 2cdf4bc229c..251fb82d813 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.28 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.29 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,8 +93,8 @@ static Oid object_classes[MAX_OCLASS];
static void findAutoDeletableObjects(const ObjectAddress *object,
- ObjectAddresses *oktodelete,
- Relation depRel);
+ ObjectAddresses *oktodelete,
+ Relation depRel);
static bool recursiveDeletion(const ObjectAddress *object,
DropBehavior behavior,
int msglevel,
@@ -102,11 +102,11 @@ static bool recursiveDeletion(const ObjectAddress *object,
ObjectAddresses *oktodelete,
Relation depRel);
static bool deleteDependentObjects(const ObjectAddress *object,
- const char *objDescription,
- DropBehavior behavior,
- int msglevel,
- ObjectAddresses *oktodelete,
- Relation depRel);
+ const char *objDescription,
+ DropBehavior behavior,
+ int msglevel,
+ ObjectAddresses *oktodelete,
+ Relation depRel);
static void doDeletion(const ObjectAddress *object);
static bool find_expr_references_walker(Node *node,
find_expr_references_context *context);
@@ -118,7 +118,7 @@ static void add_object_address(ObjectClasses oclass, Oid objectId, int32 subId,
static void add_exact_object_address(const ObjectAddress *object,
ObjectAddresses *addrs);
static bool object_address_present(const ObjectAddress *object,
- ObjectAddresses *addrs);
+ ObjectAddresses *addrs);
static void term_object_addresses(ObjectAddresses *addrs);
static void init_object_classes(void);
static ObjectClasses getObjectClass(const ObjectAddress *object);
@@ -158,9 +158,9 @@ performDeletion(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted silently,
- * even if the actual deletion pass first reaches one of them via a
- * non-auto dependency.
+ * dependencies from the target object. These should be deleted
+ * silently, even if the actual deletion pass first reaches one of
+ * them via a non-auto dependency.
*/
init_object_addresses(&oktodelete);
@@ -170,8 +170,8 @@ performDeletion(const ObjectAddress *object,
NULL, &oktodelete, depRel))
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because other objects depend on it",
- objDescription),
+ errmsg("cannot drop %s because other objects depend on it",
+ objDescription),
errhint("Use DROP ... CASCADE to drop the dependent objects too.")));
term_object_addresses(&oktodelete);
@@ -184,7 +184,7 @@ performDeletion(const ObjectAddress *object,
/*
* deleteWhatDependsOn: attempt to drop everything that depends on the
- * specified object, though not the object itself. Behavior is always
+ * specified object, though not the object itself. Behavior is always
* CASCADE.
*
* This is currently used only to clean out the contents of a schema
@@ -212,9 +212,9 @@ deleteWhatDependsOn(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted silently,
- * even if the actual deletion pass first reaches one of them via a
- * non-auto dependency.
+ * dependencies from the target object. These should be deleted
+ * silently, even if the actual deletion pass first reaches one of
+ * them via a non-auto dependency.
*/
init_object_addresses(&oktodelete);
@@ -266,9 +266,9 @@ findAutoDeletableObjects(const ObjectAddress *object,
ObjectAddress otherObject;
/*
- * If this object is already in oktodelete, then we already visited it;
- * don't do so again (this prevents infinite recursion if there's a loop
- * in pg_depend). Otherwise, add it.
+ * If this object is already in oktodelete, then we already visited
+ * it; don't do so again (this prevents infinite recursion if there's
+ * a loop in pg_depend). Otherwise, add it.
*/
if (object_address_present(object, oktodelete))
return;
@@ -276,8 +276,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
/*
* Scan pg_depend records that link to this object, showing the things
- * that depend on it. For each one that is AUTO or INTERNAL, visit the
- * referencing object.
+ * that depend on it. For each one that is AUTO or INTERNAL, visit
+ * the referencing object.
*
* When dropping a whole object (subId = 0), find pg_depend records for
* its sub-objects too.
@@ -319,6 +319,7 @@ findAutoDeletableObjects(const ObjectAddress *object,
findAutoDeletableObjects(&otherObject, oktodelete, depRel);
break;
case DEPENDENCY_PIN:
+
/*
* For a PIN dependency we just ereport immediately; there
* won't be any others to examine, and we aren't ever
@@ -461,11 +462,11 @@ recursiveDeletion(const ObjectAddress *object,
char *otherObjDesc = getObjectDescription(&otherObject);
ereport(ERROR,
- (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because %s requires it",
- objDescription, otherObjDesc),
- errhint("You may drop %s instead.",
- otherObjDesc)));
+ (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
+ errmsg("cannot drop %s because %s requires it",
+ objDescription, otherObjDesc),
+ errhint("You may drop %s instead.",
+ otherObjDesc)));
}
/*
@@ -559,10 +560,9 @@ recursiveDeletion(const ObjectAddress *object,
/*
* Step 2: scan pg_depend records that link to this object, showing
* the things that depend on it. Recursively delete those things.
- * Note it's important to delete the dependent objects
- * before the referenced one, since the deletion routines might do
- * things like try to update the pg_class record when deleting a check
- * constraint.
+ * Note it's important to delete the dependent objects before the
+ * referenced one, since the deletion routines might do things like
+ * try to update the pg_class record when deleting a check constraint.
*/
if (!deleteDependentObjects(object, objDescription,
behavior, msglevel,
@@ -674,11 +674,12 @@ deleteDependentObjects(const ObjectAddress *object,
switch (foundDep->deptype)
{
case DEPENDENCY_NORMAL:
+
/*
* Perhaps there was another dependency path that would
- * have allowed silent deletion of the otherObject, had
- * we only taken that path first.
- * In that case, act like this link is AUTO, too.
+ * have allowed silent deletion of the otherObject, had we
+ * only taken that path first. In that case, act like this
+ * link is AUTO, too.
*/
if (object_address_present(&otherObject, oktodelete))
ereport(DEBUG2,
@@ -872,7 +873,7 @@ recordDependencyOnExpr(const ObjectAddress *depender,
* recordDependencyOnSingleRelExpr - find expression dependencies
*
* As above, but only one relation is expected to be referenced (with
- * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
+ * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
* range table. An additional frammish is that dependencies on that
* relation (or its component columns) will be marked with 'self_behavior',
* whereas 'behavior' is used for everything else.
@@ -1001,7 +1002,7 @@ find_expr_references_walker(Node *node,
else if (rte->rtekind == RTE_JOIN)
{
/* Scan join output column to add references to join inputs */
- List *save_rtables;
+ List *save_rtables;
/* We must make the context appropriate for join's level */
save_rtables = context->rtables;
@@ -1026,7 +1027,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, OpExpr))
{
- OpExpr *opexpr = (OpExpr *) node;
+ OpExpr *opexpr = (OpExpr *) node;
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
&context->addrs);
@@ -1034,7 +1035,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, DistinctExpr))
{
- DistinctExpr *distinctexpr = (DistinctExpr *) node;
+ DistinctExpr *distinctexpr = (DistinctExpr *) node;
add_object_address(OCLASS_OPERATOR, distinctexpr->opno, 0,
&context->addrs);
@@ -1042,7 +1043,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
&context->addrs);
@@ -1066,7 +1067,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, SubLink))
{
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
List *opid;
foreach(opid, sublink->operOids)
@@ -1092,7 +1093,8 @@ find_expr_references_walker(Node *node,
* Add whole-relation refs for each plain relation mentioned in
* the subquery's rtable. (Note: query_tree_walker takes care of
* recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need
- * to do that here. But keep it from looking at join alias lists.)
+ * to do that here. But keep it from looking at join alias
+ * lists.)
*/
foreach(rtable, query->rtable)
{
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 15dbc50a13d..c8a411646fa 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.249 2003/07/29 17:21:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.250 2003/08/04 00:43:16 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -418,8 +418,8 @@ CheckAttributeType(const char *attname, Oid atttypid)
* Warn user, but don't fail, if column to be created has UNKNOWN type
* (usually as a result of a 'retrieve into' - jolly)
*
- * Refuse any attempt to create a pseudo-type column or one that uses
- * a standalone composite type. (Eventually we should probably refuse
+ * Refuse any attempt to create a pseudo-type column or one that uses a
+ * standalone composite type. (Eventually we should probably refuse
* all references to complex types, but for now there's still some
* Berkeley-derived code that thinks it can do this...)
*/
@@ -439,7 +439,7 @@ CheckAttributeType(const char *attname, Oid atttypid)
}
else if (att_typtype == 'c')
{
- Oid typrelid = get_typ_typrelid(atttypid);
+ Oid typrelid = get_typ_typrelid(atttypid);
if (get_rel_relkind(typrelid) == RELKIND_COMPOSITE_TYPE)
ereport(ERROR,
@@ -975,12 +975,13 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
attStruct->attisdropped = true;
/*
- * Set the type OID to invalid. A dropped attribute's type link cannot
- * be relied on (once the attribute is dropped, the type might be too).
- * Fortunately we do not need the type row --- the only really essential
- * information is the type's typlen and typalign, which are preserved in
- * the attribute's attlen and attalign. We set atttypid to zero here
- * as a means of catching code that incorrectly expects it to be valid.
+ * Set the type OID to invalid. A dropped attribute's type link
+ * cannot be relied on (once the attribute is dropped, the type might
+ * be too). Fortunately we do not need the type row --- the only
+ * really essential information is the type's typlen and typalign,
+ * which are preserved in the attribute's attlen and attalign. We set
+ * atttypid to zero here as a means of catching code that incorrectly
+ * expects it to be valid.
*/
attStruct->atttypid = InvalidOid;
@@ -1401,7 +1402,7 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
' ',
' ',
' ',
- InvalidOid, /* no associated index */
+ InvalidOid, /* no associated index */
expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
ccsrc); /* Source form check constraint */
@@ -1568,8 +1569,8 @@ AddRelationRawConstraints(Relation rel,
if (strcmp(cdef2->name, ccname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("CHECK constraint \"%s\" already exists",
- ccname)));
+ errmsg("CHECK constraint \"%s\" already exists",
+ ccname)));
}
}
else
@@ -1639,7 +1640,7 @@ AddRelationRawConstraints(Relation rel,
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in CHECK constraint")));
+ errmsg("cannot use sub-select in CHECK constraint")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
@@ -1750,7 +1751,7 @@ cookDefault(ParseState *pstate,
if (contain_var_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use column references in DEFAULT clause")));
+ errmsg("cannot use column references in DEFAULT clause")));
/*
* It can't return a set either.
@@ -1773,9 +1774,9 @@ cookDefault(ParseState *pstate,
errmsg("cannot use aggregate in DEFAULT clause")));
/*
- * Coerce the expression to the correct type and typmod, if given. This
- * should match the parser's processing of non-defaulted expressions ---
- * see updateTargetListEntry().
+ * Coerce the expression to the correct type and typmod, if given.
+ * This should match the parser's processing of non-defaulted
+ * expressions --- see updateTargetListEntry().
*/
if (OidIsValid(atttypid))
{
@@ -1793,7 +1794,7 @@ cookDefault(ParseState *pstate,
attname,
format_type_be(atttypid),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
return expr;
@@ -1952,7 +1953,7 @@ RelationTruncateIndexes(Oid heapId)
/*
* index_build will close both the heap and index relations (but
- * not give up the locks we hold on them). We're done with this
+ * not give up the locks we hold on them). We're done with this
* index, but we must re-open the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index cb5a78c3dac..0b03c630b55 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.212 2003/07/21 01:59:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.213 2003/08/04 00:43:16 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -65,8 +65,8 @@
/* non-export function prototypes */
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
- IndexInfo *indexInfo,
- Oid *classObjectId);
+ IndexInfo *indexInfo,
+ Oid *classObjectId);
static void UpdateRelationRelation(Relation indexRelation);
static void InitializeAttributeOids(Relation indexRelation,
int numatts, Oid indexoid);
@@ -124,7 +124,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/*
* For simple index columns, we copy the pg_attribute row from the
- * parent relation and modify it as necessary. For expressions we
+ * parent relation and modify it as necessary. For expressions we
* have to cons up a pg_attribute row the hard way.
*/
for (i = 0; i < numatts; i++)
@@ -149,7 +149,7 @@ ConstructTupleDescriptor(Relation heapRelation,
* here we are indexing on a system attribute (-1...-n)
*/
from = SystemAttributeDefinition(atnum,
- heapRelation->rd_rel->relhasoids);
+ heapRelation->rd_rel->relhasoids);
}
else
{
@@ -162,8 +162,8 @@ ConstructTupleDescriptor(Relation heapRelation,
}
/*
- * now that we've determined the "from", let's copy the tuple desc
- * data...
+ * now that we've determined the "from", let's copy the tuple
+ * desc data...
*/
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
@@ -185,7 +185,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/* Expressional index */
Node *indexkey;
- if (indexprs == NIL) /* shouldn't happen */
+ if (indexprs == NIL) /* shouldn't happen */
elog(ERROR, "too few entries in indexprs list");
indexkey = (Node *) lfirst(indexprs);
indexprs = lnext(indexprs);
@@ -197,7 +197,8 @@ ConstructTupleDescriptor(Relation heapRelation,
sprintf(NameStr(to->attname), "pg_expression_%d", i + 1);
/*
- * Lookup the expression type in pg_type for the type length etc.
+ * Lookup the expression type in pg_type for the type length
+ * etc.
*/
keyType = exprType(indexkey);
tuple = SearchSysCache(TYPEOID,
@@ -534,7 +535,7 @@ index_create(Oid heapRelationId,
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared indexes cannot be created after initdb")));
+ errmsg("shared indexes cannot be created after initdb")));
if (get_relname_relid(indexRelationName, namespaceId))
ereport(ERROR,
@@ -668,7 +669,7 @@ index_create(Oid heapRelationId,
' ',
' ',
' ',
- InvalidOid, /* no associated index */
+ InvalidOid, /* no associated index */
NULL, /* no check constraint */
NULL,
NULL);
@@ -709,7 +710,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Expressions)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Expressions,
+ (Node *) indexInfo->ii_Expressions,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -719,7 +720,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Predicate)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Predicate,
+ (Node *) indexInfo->ii_Predicate,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -831,8 +832,8 @@ index_drop(Oid indexId)
/*
* We are presently too lazy to attempt to compute the new correct
- * value of relhasindex (the next VACUUM will fix it if necessary).
- * So there is no need to update the pg_class tuple for the owning
+ * value of relhasindex (the next VACUUM will fix it if necessary). So
+ * there is no need to update the pg_class tuple for the owning
* relation. But we must send out a shared-cache-inval notice on the
* owning relation to ensure other backends update their relcache
* lists of indexes.
@@ -958,7 +959,7 @@ FormIndexDatum(IndexInfo *indexInfo,
if (indexprs == NIL)
elog(ERROR, "wrong number of index expressions");
iDatum = ExecEvalExprSwitchContext((ExprState *) lfirst(indexprs),
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
indexprs = lnext(indexprs);
@@ -1160,7 +1161,7 @@ setNewRelfilenode(Relation relation)
if (!in_place_upd)
{
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
}
else
@@ -1170,7 +1171,7 @@ setNewRelfilenode(Relation relation)
ScanKeyEntryInitialize(&key[0], 0,
ObjectIdAttributeNumber,
F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(relation)));
+ ObjectIdGetDatum(RelationGetRelid(relation)));
pg_class_scan = heap_beginscan(pg_class, SnapshotNow, 1, key);
tuple = heap_getnext(pg_class_scan, ForwardScanDirection);
@@ -1325,9 +1326,9 @@ UpdateStats(Oid relid, double reltuples)
}
/*
- * Update statistics in pg_class, if they changed. (Avoiding an
- * unnecessary update is not just a tiny performance improvement;
- * it also reduces the window wherein concurrent CREATE INDEX commands
+ * Update statistics in pg_class, if they changed. (Avoiding an
+ * unnecessary update is not just a tiny performance improvement; it
+ * also reduces the window wherein concurrent CREATE INDEX commands
* may conflict.)
*/
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
@@ -1338,8 +1339,9 @@ UpdateStats(Oid relid, double reltuples)
if (in_place_upd)
{
/*
- * At bootstrap time, we don't need to worry about concurrency or
- * visibility of changes, so we cheat. Also cheat if REINDEX.
+ * At bootstrap time, we don't need to worry about concurrency
+ * or visibility of changes, so we cheat. Also cheat if
+ * REINDEX.
*/
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
rd_rel->relpages = (int32) relpages;
@@ -1367,7 +1369,7 @@ UpdateStats(Oid relid, double reltuples)
/*
* We shouldn't have to do this, but we do... Modify the reldesc in
* place with the new values so that the cache contains the latest
- * copy. (XXX is this really still necessary? The relcache will get
+ * copy. (XXX is this really still necessary? The relcache will get
* fixed at next CommandCounterIncrement, so why bother here?)
*/
whichRel->rd_rel->relpages = (int32) relpages;
@@ -1454,8 +1456,8 @@ IndexBuildHeapScan(Relation heapRelation,
heapDescriptor = RelationGetDescr(heapRelation);
/*
- * Need an EState for evaluation of index expressions
- * and partial-index predicates.
+ * Need an EState for evaluation of index expressions and
+ * partial-index predicates.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -1463,7 +1465,8 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
- * be in a slot of a TupleTable. Likewise if there are any expressions.
+ * be in a slot of a TupleTable. Likewise if there are any
+ * expressions.
*/
if (indexInfo->ii_Predicate != NIL || indexInfo->ii_Expressions != NIL)
{
@@ -1741,15 +1744,15 @@ reindex_index(Oid indexId, bool force, bool inplace)
* it's a nailed-in-cache index, we must do inplace processing because
* the relcache can't cope with changing its relfilenode.
*
- * In either of these cases, we are definitely processing a system
- * index, so we'd better be ignoring system indexes.
+ * In either of these cases, we are definitely processing a system index,
+ * so we'd better be ignoring system indexes.
*/
if (iRel->rd_rel->relisshared)
{
if (!IsIgnoringSystemIndexes())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("the target relation %u is shared", indexId)));
+ errmsg("the target relation %u is shared", indexId)));
inplace = true;
}
if (iRel->rd_isnailed)
@@ -1757,7 +1760,7 @@ reindex_index(Oid indexId, bool force, bool inplace)
if (!IsIgnoringSystemIndexes())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("the target relation %u is nailed", indexId)));
+ errmsg("the target relation %u is nailed", indexId)));
inplace = true;
}
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 28b9859677d..6a39fc69016 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.55 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.56 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -164,7 +164,7 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK)
if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
}
if (relation->schemaname)
@@ -217,7 +217,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
}
if (newRelation->istemp)
@@ -226,7 +226,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (newRelation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("TEMP tables may not specify a schema name")));
+ errmsg("TEMP tables may not specify a schema name")));
/* Initialize temp namespace if first time through */
if (!OidIsValid(myTempNamespace))
InitTempTableNamespace();
@@ -1057,7 +1057,7 @@ OpclassIsVisible(Oid opcid)
Oid
ConversionGetConid(const char *conname)
{
- Oid conid;
+ Oid conid;
List *lptr;
recomputeNamespacePath();
@@ -1115,11 +1115,11 @@ ConversionIsVisible(Oid conid)
/*
* If it is in the path, it might still not be visible; it could
* be hidden by another conversion of the same name earlier in the
- * path. So we must do a slow check to see if this conversion would
- * be found by ConversionGetConid.
+ * path. So we must do a slow check to see if this conversion
+ * would be found by ConversionGetConid.
*/
char *conname = NameStr(conform->conname);
-
+
visible = (ConversionGetConid(conname) == conid);
}
@@ -1164,13 +1164,13 @@ DeconstructQualifiedName(List *names,
if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1281,8 +1281,8 @@ makeRangeVarFromNameList(List *names)
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper relation name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper relation name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1720,8 +1720,8 @@ RemoveTempRelations(Oid tempNamespaceId)
/*
* We want to get rid of everything in the target namespace, but not
- * the namespace itself (deleting it only to recreate it later would be
- * a waste of cycles). We do this by finding everything that has a
+ * the namespace itself (deleting it only to recreate it later would
+ * be a waste of cycles). We do this by finding everything that has a
* dependency on the namespace.
*/
object.classId = get_system_catalog_relid(NamespaceRelationName);
@@ -1797,7 +1797,7 @@ assign_search_path(const char *newval, bool doit, bool interactive)
0, 0, 0))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("schema \"%s\" does not exist", curname)));
+ errmsg("schema \"%s\" does not exist", curname)));
}
}
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 6837f3b9225..779468ce21f 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.61 2003/07/21 01:59:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.62 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,8 +29,8 @@
#include "utils/syscache.h"
-static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
- Oid *rettype);
+static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
+ Oid *rettype);
/*
@@ -79,7 +79,7 @@ AggregateCreate(const char *aggName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot determine transition datatype"),
errdetail("An aggregate using ANYARRAY or ANYELEMENT as "
- "trans type must have one of them as its base type.")));
+ "trans type must have one of them as its base type.")));
/* handle transfn */
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -99,8 +99,8 @@ AggregateCreate(const char *aggName,
* enforce_generic_type_consistency, if transtype isn't polymorphic)
* must exactly match declared transtype.
*
- * In the non-polymorphic-transtype case, it might be okay to allow
- * a rettype that's binary-coercible to transtype, but I'm not quite
+ * In the non-polymorphic-transtype case, it might be okay to allow a
+ * rettype that's binary-coercible to transtype, but I'm not quite
* convinced that it's either safe or useful. When transtype is
* polymorphic we *must* demand exact equality.
*/
@@ -151,9 +151,9 @@ AggregateCreate(const char *aggName,
Assert(OidIsValid(finaltype));
/*
- * If finaltype (i.e. aggregate return type) is polymorphic,
- * basetype must be polymorphic also, else parser will fail to deduce
- * result type. (Note: given the previous test on transtype and basetype,
+ * If finaltype (i.e. aggregate return type) is polymorphic, basetype
+ * must be polymorphic also, else parser will fail to deduce result
+ * type. (Note: given the previous test on transtype and basetype,
* this cannot happen, unless someone has snuck a finalfn definition
* into the catalogs that itself violates the rule against polymorphic
* result with no polymorphic input.)
@@ -163,8 +163,8 @@ AggregateCreate(const char *aggName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot determine result datatype"),
- errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
- "must have one of them as its base type.")));
+ errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
+ "must have one of them as its base type.")));
/*
* Everything looks okay. Try to create the pg_proc entry for the
@@ -278,21 +278,21 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
if (retset)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s returns a set",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
/*
- * If the given type(s) are all polymorphic, there's nothing we
- * can check. Otherwise, enforce consistency, and possibly refine
- * the result type.
+ * If the given type(s) are all polymorphic, there's nothing we can
+ * check. Otherwise, enforce consistency, and possibly refine the
+ * result type.
*/
if ((input_types[0] == ANYARRAYOID || input_types[0] == ANYELEMENTOID) &&
(nargs == 1 ||
- (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
+ (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
{
/* nothing to check here */
}
@@ -305,8 +305,8 @@ lookup_agg_function(List *fnName,
}
/*
- * func_get_detail will find functions requiring run-time argument type
- * coercion, but nodeAgg.c isn't prepared to deal with that
+ * func_get_detail will find functions requiring run-time argument
+ * type coercion, but nodeAgg.c isn't prepared to deal with that
*/
if (true_oid_array[0] != ANYARRAYOID &&
true_oid_array[0] != ANYELEMENTOID &&
@@ -314,7 +314,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
if (nargs == 2 &&
true_oid_array[1] != ANYARRAYOID &&
@@ -323,7 +323,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
return fnOid;
}
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 89adeb57b99..ae8b7dec038 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.14 2003/07/21 01:59:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.15 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -195,7 +195,7 @@ CreateConstraintEntry(const char *constraintName,
/*
* Register auto dependency from constraint to owning domain
*/
- ObjectAddress domobject;
+ ObjectAddress domobject;
domobject.classId = RelOid_pg_type;
domobject.objectId = domainId;
@@ -234,8 +234,8 @@ CreateConstraintEntry(const char *constraintName,
if (OidIsValid(indexRelId))
{
/*
- * Register normal dependency on the unique index that supports
- * a foreign-key constraint.
+ * Register normal dependency on the unique index that supports a
+ * foreign-key constraint.
*/
ObjectAddress relobject;
@@ -438,8 +438,8 @@ RemoveConstraintById(Oid conId)
Relation rel;
/*
- * If the constraint is for a relation, open and exclusive-lock the
- * relation it's for.
+ * If the constraint is for a relation, open and exclusive-lock
+ * the relation it's for.
*/
rel = heap_open(con->conrelid, AccessExclusiveLock);
@@ -463,7 +463,7 @@ RemoveConstraintById(Oid conId)
con->conrelid);
classForm = (Form_pg_class) GETSTRUCT(relTup);
- if (classForm->relchecks == 0) /* should not happen */
+ if (classForm->relchecks == 0) /* should not happen */
elog(ERROR, "relation \"%s\" has relchecks = 0",
RelationGetRelationName(rel));
classForm->relchecks--;
@@ -483,16 +483,15 @@ RemoveConstraintById(Oid conId)
else if (OidIsValid(con->contypid))
{
/*
- * XXX for now, do nothing special when dropping a domain constraint
+ * XXX for now, do nothing special when dropping a domain
+ * constraint
*
* Probably there should be some form of locking on the domain type,
* but we have no such concept at the moment.
*/
}
else
- {
elog(ERROR, "constraint %u is not of a known type", conId);
- }
/* Fry the constraint itself */
simple_heap_delete(conDesc, &tup->t_self);
diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c
index 70bd294297d..5c10fa7b28c 100644
--- a/src/backend/catalog/pg_conversion.c
+++ b/src/backend/catalog/pg_conversion.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.13 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.14 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,9 +76,9 @@ ConversionCreate(const char *conname, Oid connamespace,
contoencoding))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("default conversion for %s to %s already exists",
- pg_encoding_to_char(conforencoding),
- pg_encoding_to_char(contoencoding))));
+ errmsg("default conversion for %s to %s already exists",
+ pg_encoding_to_char(conforencoding),
+ pg_encoding_to_char(contoencoding))));
}
/* open pg_conversion */
@@ -147,7 +147,7 @@ ConversionDrop(Oid conversionOid, DropBehavior behavior)
if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
- NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
+ NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
ReleaseSysCache(tuple);
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index da3e2a46920..141d3a142a7 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.81 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.82 2003/08/04 00:43:16 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -409,7 +409,7 @@ OperatorCreate(const char *operatorName,
if (!OidIsValid(leftTypeId) && !OidIsValid(rightTypeId))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("at least one of leftarg or rightarg must be specified")));
+ errmsg("at least one of leftarg or rightarg must be specified")));
if (!(OidIsValid(leftTypeId) && OidIsValid(rightTypeId)))
{
@@ -417,11 +417,11 @@ OperatorCreate(const char *operatorName,
if (commutatorName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have commutators")));
+ errmsg("only binary operators can have commutators")));
if (joinName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have join selectivity")));
+ errmsg("only binary operators can have join selectivity")));
if (canHash)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index d8ff4a5225a..2c11a17db5e 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.102 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.103 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -94,7 +94,7 @@ ProcedureCreate(const char *procedureName,
*/
if (returnType == ANYARRAYOID || returnType == ANYELEMENTOID)
{
- bool genericParam = false;
+ bool genericParam = false;
for (i = 0; i < parameterCount; i++)
{
@@ -231,7 +231,7 @@ ProcedureCreate(const char *procedureName,
returnsSet != oldproc->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("cannot change return type of existing function"),
+ errmsg("cannot change return type of existing function"),
errhint("Use DROP FUNCTION first.")));
/* Can't change aggregate status, either */
@@ -339,8 +339,8 @@ ProcedureCreate(const char *procedureName,
*
* This is normally applied during function definition, but in the case
* of a function with polymorphic arguments, we instead apply it during
- * function execution startup. The rettype is then the actual resolved
- * output type of the function, rather than the declared type. (Therefore,
+ * function execution startup. The rettype is then the actual resolved
+ * output type of the function, rather than the declared type. (Therefore,
* we should never see ANYARRAY or ANYELEMENT as rettype.)
*/
void
@@ -366,7 +366,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errdetail("Function's final statement must be a SELECT.")));
return;
}
@@ -395,9 +395,9 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
if (cmd != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must be a SELECT.")));
/*
* Count the non-junk entries in the result targetlist.
@@ -421,7 +421,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT must return exactly one column.")));
+ errdetail("Final SELECT must return exactly one column.")));
restype = ((TargetEntry *) lfirst(tlist))->resdom->restype;
if (!IsBinaryCoercible(restype, rettype))
@@ -481,7 +481,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT returns too many columns.")));
+ errdetail("Final SELECT returns too many columns.")));
attr = reln->rd_att->attrs[colindex - 1];
} while (attr->attisdropped);
rellogcols++;
@@ -538,8 +538,8 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type %s is not supported for SQL functions",
- format_type_be(rettype))));
+ errmsg("return type %s is not supported for SQL functions",
+ format_type_be(rettype))));
}
@@ -684,8 +684,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL functions cannot have arguments of type %s",
- format_type_be(proc->proargtypes[i]))));
+ errmsg("SQL functions cannot have arguments of type %s",
+ format_type_be(proc->proargtypes[i]))));
}
}
@@ -696,13 +696,13 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
prosrc = DatumGetCString(DirectFunctionCall1(textout, tmp));
/*
- * We can't do full prechecking of the function definition if there are
- * any polymorphic input types, because actual datatypes of expression
- * results will be unresolvable. The check will be done at runtime
- * instead.
+ * We can't do full prechecking of the function definition if there
+ * are any polymorphic input types, because actual datatypes of
+ * expression results will be unresolvable. The check will be done at
+ * runtime instead.
*
- * We can run the text through the raw parser though; this will at
- * least catch silly syntactic errors.
+ * We can run the text through the raw parser though; this will at least
+ * catch silly syntactic errors.
*/
if (!haspolyarg)
{
@@ -712,9 +712,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
check_sql_fn_retval(proc->prorettype, functyptype, querytree_list);
}
else
- {
querytree_list = pg_parse_query(prosrc);
- }
ReleaseSysCache(tuple);
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 7980e6afadf..d578644e681 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.88 2003/07/21 01:59:11 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.89 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -359,7 +359,8 @@ TypeCreate(const char *typeName,
void
GenerateTypeDependencies(Oid typeNamespace,
Oid typeObjectId,
- Oid relationOid, /* only for 'c'atalog types */
+ Oid relationOid, /* only for 'c'atalog
+ * types */
char relationKind, /* ditto */
Oid inputProcedure,
Oid outputProcedure,
@@ -426,13 +427,13 @@ GenerateTypeDependencies(Oid typeNamespace,
/*
* If the type is a rowtype for a relation, mark it as internally
- * dependent on the relation, *unless* it is a stand-alone
- * composite type relation. For the latter case, we have to
- * reverse the dependency.
+ * dependent on the relation, *unless* it is a stand-alone composite
+ * type relation. For the latter case, we have to reverse the
+ * dependency.
*
- * In the former case, this allows the type to be auto-dropped when
- * the relation is, and not otherwise. And in the latter, of
- * course we get the opposite effect.
+ * In the former case, this allows the type to be auto-dropped when the
+ * relation is, and not otherwise. And in the latter, of course we get
+ * the opposite effect.
*/
if (OidIsValid(relationOid))
{
@@ -447,11 +448,11 @@ GenerateTypeDependencies(Oid typeNamespace,
}
/*
- * If the type is an array type, mark it auto-dependent on the
- * base type. (This is a compromise between the typical case
- * where the array type is automatically generated and the case
- * where it is manually created: we'd prefer INTERNAL for the
- * former case and NORMAL for the latter.)
+ * If the type is an array type, mark it auto-dependent on the base
+ * type. (This is a compromise between the typical case where the
+ * array type is automatically generated and the case where it is
+ * manually created: we'd prefer INTERNAL for the former case and
+ * NORMAL for the latter.)
*/
if (OidIsValid(elementType))
{
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 1d9b25b5b0a..5a57d5c5c77 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.12 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.13 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -256,16 +256,16 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
if (basetypeOid == ANYOID)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function %s(*) already exists in schema \"%s\"",
- newname,
- get_namespace_name(namespaceOid))));
+ errmsg("function %s(*) already exists in schema \"%s\"",
+ newname,
+ get_namespace_name(namespaceOid))));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes),
+ procForm->proargtypes),
get_namespace_name(namespaceOid))));
}
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index b377635099e..4fd43871e97 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.4 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.5 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,52 +79,52 @@ ExecRenameStmt(RenameStmt *stmt)
case OBJECT_TABLE:
case OBJECT_COLUMN:
case OBJECT_TRIGGER:
- {
- Oid relid;
+ {
+ Oid relid;
- CheckRelationOwnership(stmt->relation, true);
+ CheckRelationOwnership(stmt->relation, true);
- relid = RangeVarGetRelid(stmt->relation, false);
+ relid = RangeVarGetRelid(stmt->relation, false);
- switch (stmt->renameType)
- {
- case OBJECT_TABLE:
+ switch (stmt->renameType)
{
- /*
- * RENAME TABLE requires that we (still) hold
- * CREATE rights on the containing namespace, as
- * well as ownership of the table.
- */
- Oid namespaceId = get_rel_namespace(relid);
- AclResult aclresult;
-
- aclresult = pg_namespace_aclcheck(namespaceId,
- GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
- get_namespace_name(namespaceId));
-
- renamerel(relid, stmt->newname);
- break;
- }
- case OBJECT_COLUMN:
- renameatt(relid,
- stmt->subname, /* old att name */
- stmt->newname, /* new att name */
+ case OBJECT_TABLE:
+ {
+ /*
+ * RENAME TABLE requires that we (still) hold
+ * CREATE rights on the containing namespace,
+ * as well as ownership of the table.
+ */
+ Oid namespaceId = get_rel_namespace(relid);
+ AclResult aclresult;
+
+ aclresult = pg_namespace_aclcheck(namespaceId,
+ GetUserId(),
+ ACL_CREATE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
+ get_namespace_name(namespaceId));
+
+ renamerel(relid, stmt->newname);
+ break;
+ }
+ case OBJECT_COLUMN:
+ renameatt(relid,
+ stmt->subname, /* old att name */
+ stmt->newname, /* new att name */
interpretInhOption(stmt->relation->inhOpt), /* recursive? */
- false); /* recursing already? */
- break;
- case OBJECT_TRIGGER:
- renametrig(relid,
- stmt->subname, /* old att name */
- stmt->newname); /* new att name */
- break;
- default:
- /*can't happen*/;
+ false); /* recursing already? */
+ break;
+ case OBJECT_TRIGGER:
+ renametrig(relid,
+ stmt->subname, /* old att name */
+ stmt->newname); /* new att name */
+ break;
+ default:
+ /* can't happen */ ;
+ }
+ break;
}
- break;
- }
default:
elog(ERROR, "unrecognized rename stmt type: %d",
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 5277884f1f8..dac2d5d7bbd 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.56 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.57 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -220,9 +220,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* Silently ignore tables that are temp tables of other backends ---
- * trying to analyze these is rather pointless, since their
- * contents are probably not up-to-date on disk. (We don't throw a
- * warning here; it would just lead to chatter during a database-wide
+ * trying to analyze these is rather pointless, since their contents
+ * are probably not up-to-date on disk. (We don't throw a warning
+ * here; it would just lead to chatter during a database-wide
* ANALYZE.)
*/
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index dafea7c8695..69085740cc5 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.96 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.97 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -603,10 +603,10 @@ Async_NotifyHandler(SIGNAL_ARGS)
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it off
- * while messing with the NOTIFY state. (We would have to save
- * and restore it anyway, because PGSemaphore operations inside
- * ProcessIncomingNotify() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it
+ * off while messing with the NOTIFY state. (We would have to
+ * save and restore it anyway, because PGSemaphore operations
+ * inside ProcessIncomingNotify() might reset it.)
*/
ImmediateInterruptOK = false;
@@ -639,7 +639,8 @@ Async_NotifyHandler(SIGNAL_ARGS)
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if
+ * needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 18f6bfcf6b5..23e03443fc5 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.112 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.113 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,12 +58,12 @@ typedef struct
*/
typedef struct
{
- Oid tableOid;
- Oid indexOid;
-} RelToCluster;
+ Oid tableOid;
+ Oid indexOid;
+} RelToCluster;
-static void cluster_rel(RelToCluster *rv, bool recheck);
+static void cluster_rel(RelToCluster * rv, bool recheck);
static Oid make_new_heap(Oid OIDOldHeap, const char *NewName);
static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
static List *get_indexattr_list(Relation OldHeap, Oid OldIndex);
@@ -74,7 +74,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
/*---------------------------------------------------------------------------
- * This cluster code allows for clustering multiple tables at once. Because
+ * This cluster code allows for clustering multiple tables at once. Because
* of this, we cannot just run everything on a single transaction, or we
* would be forced to acquire exclusive locks on all the tables being
* clustered, simultaneously --- very likely leading to deadlock.
@@ -82,17 +82,17 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
* To solve this we follow a similar strategy to VACUUM code,
* clustering each relation in a separate transaction. For this to work,
* we need to:
- * - provide a separate memory context so that we can pass information in
- * a way that survives across transactions
- * - start a new transaction every time a new relation is clustered
- * - check for validity of the information on to-be-clustered relations,
- * as someone might have deleted a relation behind our back, or
- * clustered one on a different index
- * - end the transaction
+ * - provide a separate memory context so that we can pass information in
+ * a way that survives across transactions
+ * - start a new transaction every time a new relation is clustered
+ * - check for validity of the information on to-be-clustered relations,
+ * as someone might have deleted a relation behind our back, or
+ * clustered one on a different index
+ * - end the transaction
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation being specified without index. In that case,
+ * We also allow a relation being specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -103,10 +103,10 @@ cluster(ClusterStmt *stmt)
if (stmt->relation != NULL)
{
/* This is the single-relation case. */
- Oid tableOid,
- indexOid = InvalidOid;
- Relation rel;
- RelToCluster rvtc;
+ Oid tableOid,
+ indexOid = InvalidOid;
+ Relation rel;
+ RelToCluster rvtc;
/* Find and lock the table */
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
@@ -123,10 +123,10 @@ cluster(ClusterStmt *stmt)
List *index;
/* We need to find the index that has indisclustered set. */
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index indexForm;
+ HeapTuple idxtuple;
+ Form_pg_index indexForm;
indexOid = lfirsto(index);
idxtuple = SearchSysCache(INDEXRELID,
@@ -152,14 +152,17 @@ cluster(ClusterStmt *stmt)
}
else
{
- /* The index is expected to be in the same namespace as the relation. */
+ /*
+ * The index is expected to be in the same namespace as the
+ * relation.
+ */
indexOid = get_relname_relid(stmt->indexname,
rel->rd_rel->relnamespace);
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("index \"%s\" for table \"%s\" does not exist",
- stmt->indexname, stmt->relation->relname)));
+ errmsg("index \"%s\" for table \"%s\" does not exist",
+ stmt->indexname, stmt->relation->relname)));
}
/* All other checks are done in cluster_rel() */
@@ -175,16 +178,16 @@ cluster(ClusterStmt *stmt)
else
{
/*
- * This is the "multi relation" case. We need to cluster all tables
- * that have some index with indisclustered set.
+ * This is the "multi relation" case. We need to cluster all
+ * tables that have some index with indisclustered set.
*/
- MemoryContext cluster_context;
- List *rv,
- *rvs;
+ MemoryContext cluster_context;
+ List *rv,
+ *rvs;
/*
- * We cannot run this form of CLUSTER inside a user transaction block;
- * we'd be holding locks way too long.
+ * We cannot run this form of CLUSTER inside a user transaction
+ * block; we'd be holding locks way too long.
*/
PreventTransactionChain((void *) stmt, "CLUSTER");
@@ -201,8 +204,8 @@ cluster(ClusterStmt *stmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
- * cluster_context.
+ * Build the list of relations to cluster. Note that this lives
+ * in cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -210,13 +213,14 @@ cluster(ClusterStmt *stmt)
CommitTransactionCommand();
/* Ok, now that we've got them all, cluster them one by one */
- foreach (rv, rvs)
+ foreach(rv, rvs)
{
- RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
+ RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
/* Start a new transaction for each relation. */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
cluster_rel(rvtc, true);
CommitTransactionCommand();
}
@@ -244,7 +248,7 @@ cluster(ClusterStmt *stmt)
* them incrementally while we load the table.
*/
static void
-cluster_rel(RelToCluster *rvtc, bool recheck)
+cluster_rel(RelToCluster * rvtc, bool recheck)
{
Relation OldHeap,
OldIndex;
@@ -256,14 +260,14 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* Since we may open a new transaction for each relation, we have to
* check that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests.
- * We *must* skip the one on indisclustered since it would reject an
+ * If this is a single-transaction CLUSTER, we can skip these tests. We
+ * *must* skip the one on indisclustered since it would reject an
* attempt to cluster a not-previously-clustered index.
*/
if (recheck)
{
- HeapTuple tuple;
- Form_pg_index indexForm;
+ HeapTuple tuple;
+ Form_pg_index indexForm;
/*
* Check if the relation and index still exist before opening them
@@ -319,10 +323,10 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
RelationGetRelationName(OldHeap))));
/*
- * Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
- * seqscan pass over the table to copy the missing rows, but that seems
- * expensive and tedious.
+ * Disallow clustering on incomplete indexes (those that might not
+ * index every row of the relation). We could relax this by making a
+ * separate seqscan pass over the table to copy the missing rows, but
+ * that seems expensive and tedious.
*/
if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred))
ereport(ERROR,
@@ -334,7 +338,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
/*
* If the AM doesn't index nulls, then it's a partial index unless
- * we can prove all the rows are non-null. Note we only need look
+ * we can prove all the rows are non-null. Note we only need look
* at the first column; multicolumn-capable AMs are *required* to
* index nulls in columns after the first.
*/
@@ -347,7 +351,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster when index access method does not handle nulls"),
errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
}
else if (colno < 0)
{
@@ -382,7 +386,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temp tables of other processes")));
+ errmsg("cannot cluster temp tables of other processes")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
@@ -397,7 +401,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* rebuild_relation: rebuild an existing relation
*
* This is shared code between CLUSTER and TRUNCATE. In the TRUNCATE
- * case, the new relation is built and left empty. In the CLUSTER case,
+ * case, the new relation is built and left empty. In the CLUSTER case,
* it is filled with data read from the old relation in the order specified
* by the index.
*
@@ -432,6 +436,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid);
OIDNewHeap = make_new_heap(tableOid, NewHeapName);
+
/*
* We don't need CommandCounterIncrement() because make_new_heap did
* it.
@@ -754,8 +759,8 @@ swap_relfilenodes(Oid r1, Oid r2)
/* swap size statistics too, since new rel has freshly-updated stats */
{
- int4 swap_pages;
- float4 swap_tuples;
+ int4 swap_pages;
+ float4 swap_tuples;
swap_pages = relform1->relpages;
relform1->relpages = relform2->relpages;
@@ -857,20 +862,20 @@ swap_relfilenodes(Oid r1, Oid r2)
static List *
get_tables_to_cluster(MemoryContext cluster_context)
{
- Relation indRelation;
- HeapScanDesc scan;
- ScanKeyData entry;
- HeapTuple indexTuple;
- Form_pg_index index;
- MemoryContext old_context;
- RelToCluster *rvtc;
- List *rvs = NIL;
+ Relation indRelation;
+ HeapScanDesc scan;
+ ScanKeyData entry;
+ HeapTuple indexTuple;
+ Form_pg_index index;
+ MemoryContext old_context;
+ RelToCluster *rvtc;
+ List *rvs = NIL;
/*
* Get all indexes that have indisclustered set and are owned by
- * appropriate user. System relations or nailed-in relations cannot ever
- * have indisclustered set, because CLUSTER will refuse to set it when
- * called with one of them as argument.
+ * appropriate user. System relations or nailed-in relations cannot
+ * ever have indisclustered set, because CLUSTER will refuse to set it
+ * when called with one of them as argument.
*/
indRelation = relation_openr(IndexRelationName, AccessShareLock);
ScanKeyEntryInitialize(&entry, 0,
@@ -886,8 +891,8 @@ get_tables_to_cluster(MemoryContext cluster_context)
continue;
/*
- * We have to build the list in a different memory context so
- * it will survive the cross-transaction processing
+ * We have to build the list in a different memory context so it
+ * will survive the cross-transaction processing
*/
old_context = MemoryContextSwitchTo(cluster_context);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index ecd50bdb367..e0ebba0df96 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2001, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.67 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.68 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,8 +383,8 @@ CommentAttribute(List *qualname, char *comment)
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- attrname, RelationGetRelationName(relation))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ attrname, RelationGetRelationName(relation))));
/* Create the comment using the relation's oid */
@@ -418,16 +418,17 @@ CommentDatabase(List *qualname, char *comment)
database = strVal(lfirst(qualname));
/*
- * We cannot currently support cross-database comments (since other DBs
- * cannot see pg_description of this database). So, we reject attempts
- * to comment on a database other than the current one. Someday this
- * might be improved, but it would take a redesigned infrastructure.
+ * We cannot currently support cross-database comments (since other
+ * DBs cannot see pg_description of this database). So, we reject
+ * attempts to comment on a database other than the current one.
+ * Someday this might be improved, but it would take a redesigned
+ * infrastructure.
*
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
- * of the database. Erroring out would prevent pg_restore from completing
- * (which is really pg_restore's fault, but for now we will work around
- * the problem here). Consensus is that the best fix is to treat wrong
- * database name as a WARNING not an ERROR.
+ * of the database. Erroring out would prevent pg_restore from
+ * completing (which is really pg_restore's fault, but for now we will
+ * work around the problem here). Consensus is that the best fix is
+ * to treat wrong database name as a WARNING not an ERROR.
*/
/* First get the database OID */
@@ -569,7 +570,7 @@ CommentRule(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("there are multiple rules \"%s\"", rulename),
- errhint("Specify a relation name as well as a rule name.")));
+ errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
heap_close(RewriteRelation, AccessShareLock);
@@ -811,8 +812,8 @@ CommentTrigger(List *qualname, char *comment)
if (!HeapTupleIsValid(triggertuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, RelationGetRelationName(relation))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, RelationGetRelationName(relation))));
oid = HeapTupleGetOid(triggertuple);
@@ -891,7 +892,7 @@ CommentConstraint(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("relation \"%s\" has multiple constraints named \"%s\"",
- RelationGetRelationName(relation), conName)));
+ RelationGetRelationName(relation), conName)));
conOid = HeapTupleGetOid(tuple);
}
}
@@ -902,8 +903,8 @@ CommentConstraint(List *qualname, char *comment)
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for relation \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for relation \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Create the comment with the pg_constraint oid */
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c
index b917c527aca..e9afb956246 100644
--- a/src/backend/commands/conversioncmds.c
+++ b/src/backend/commands/conversioncmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.9 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.10 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,11 +148,11 @@ RenameConversion(List *name, const char *newname)
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("conversion \"%s\" already exists in schema \"%s\"",
- newname, get_namespace_name(namespaceOid))));
+ errmsg("conversion \"%s\" already exists in schema \"%s\"",
+ newname, get_namespace_name(namespaceOid))));
/* must be owner */
- if (!superuser() &&
+ if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tup))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index fa91439a579..5c7238de8dc 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.205 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.206 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@ typedef enum CopyDest
COPY_FILE, /* to/from file */
COPY_OLD_FE, /* to/from frontend (old protocol) */
COPY_NEW_FE /* to/from frontend (new protocol) */
-} CopyDest;
+} CopyDest;
/*
* Represents the type of data returned by CopyReadAttribute()
@@ -82,17 +82,17 @@ typedef enum EolType
EOL_NL,
EOL_CR,
EOL_CRNL
-} EolType;
+} EolType;
/* non-export function prototypes */
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static char *CopyReadAttribute(const char *delim, CopyReadResult *result);
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
- Oid typelem, bool *isnull);
+ Oid typelem, bool *isnull);
static void CopyAttributeOut(char *string, char *delim);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
@@ -136,6 +136,7 @@ static void CopySendChar(char c);
static void CopySendEndOfRow(bool binary);
static void CopyGetData(void *databuf, int datasize);
static int CopyGetChar(void);
+
#define CopyGetEof() (fe_eof)
static int CopyPeekChar(void);
static void CopyDonePeek(int c, bool pickup);
@@ -155,14 +156,14 @@ SendCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'H');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -200,14 +201,14 @@ ReceiveCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'G');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -289,7 +290,7 @@ CopySendData(void *databuf, int datasize)
/* no hope of recovering connection sync, so FATAL */
ereport(FATAL,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("connection lost during COPY to stdout")));
+ errmsg("connection lost during COPY to stdout")));
}
break;
case COPY_NEW_FE:
@@ -378,7 +379,7 @@ CopyGetData(void *databuf, int datasize)
case COPY_NEW_FE:
while (datasize > 0 && !fe_eof)
{
- int avail;
+ int avail;
while (copy_msgbuf->cursor >= copy_msgbuf->len)
{
@@ -389,24 +390,24 @@ CopyGetData(void *databuf, int datasize)
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
if (pq_getmessage(copy_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
switch (mtype)
{
- case 'd': /* CopyData */
+ case 'd': /* CopyData */
break;
- case 'c': /* CopyDone */
+ case 'c': /* CopyDone */
/* COPY IN correctly terminated by frontend */
fe_eof = true;
return;
- case 'f': /* CopyFail */
+ case 'f': /* CopyFail */
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY from stdin failed: %s",
- pq_getmsgstring(copy_msgbuf))));
+ pq_getmsgstring(copy_msgbuf))));
break;
default:
ereport(ERROR,
@@ -421,7 +422,7 @@ CopyGetData(void *databuf, int datasize)
avail = datasize;
pq_copymsgbytes(copy_msgbuf, databuf, avail);
databuf = (void *) ((char *) databuf + avail);
- datasize =- avail;
+ datasize = -avail;
}
break;
}
@@ -430,7 +431,7 @@ CopyGetData(void *databuf, int datasize)
static int
CopyGetChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -448,16 +449,16 @@ CopyGetChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -479,7 +480,7 @@ CopyGetChar(void)
static int
CopyPeekChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -497,16 +498,16 @@ CopyPeekChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -524,7 +525,7 @@ CopyDonePeek(int c, bool pickup)
switch (copy_dest)
{
case COPY_FILE:
- if (!pickup)
+ if (!pickup)
{
/* We don't want to pick it up - so put it back in there */
ungetc(c, copy_file);
@@ -537,7 +538,11 @@ CopyDonePeek(int c, bool pickup)
/* We want to pick it up */
(void) pq_getbyte();
}
- /* If we didn't want to pick it up, just leave it where it sits */
+
+ /*
+ * If we didn't want to pick it up, just leave it where it
+ * sits
+ */
break;
case COPY_NEW_FE:
if (!pickup)
@@ -737,7 +742,7 @@ DoCopy(const CopyStmt *stmt)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
/*
* Presently, only single-character delimiter strings are supported.
@@ -791,8 +796,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy to non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy to non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -810,8 +815,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for reading: %m",
- filename)));
+ errmsg("could not open file \"%s\" for reading: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -841,8 +846,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy from non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy from non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -863,7 +868,7 @@ DoCopy(const CopyStmt *stmt)
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask((mode_t) 022);
copy_file = AllocateFile(filename, PG_BINARY_W);
@@ -872,8 +877,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for writing: %m",
- filename)));
+ errmsg("could not open file \"%s\" for writing: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -955,8 +960,8 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Create a temporary memory context that we can reset once per row
- * to recover palloc'd memory. This avoids any problems with leaks
+ * Create a temporary memory context that we can reset once per row to
+ * recover palloc'd memory. This avoids any problems with leaks
* inside datatype output routines, and should be faster than retail
* pfree's anyway. (We don't need a whole econtext as CopyFrom does.)
*/
@@ -1040,9 +1045,9 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
if (isnull)
{
if (!binary)
- CopySendString(null_print); /* null indicator */
+ CopySendString(null_print); /* null indicator */
else
- CopySendInt32(-1); /* null marker */
+ CopySendInt32(-1); /* null marker */
}
else
{
@@ -1060,7 +1065,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(elements[attnum - 1])));
+ ObjectIdGetDatum(elements[attnum - 1])));
/* We assume the result will not have been toasted */
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
CopySendData(VARDATA(outputbytes),
@@ -1199,7 +1204,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
/* attribute is NOT to be copied from input */
/* use default value if one exists */
- Node *defexpr = build_column_default(rel, i + 1);
+ Node *defexpr = build_column_default(rel, i + 1);
if (defexpr != NULL)
{
@@ -1219,10 +1224,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Easiest way to do this is to use parse_coerce.c to set up
* an expression that checks the constraints. (At present,
- * the expression might contain a length-coercion-function call
- * and/or CoerceToDomain nodes.) The bottom of the expression
- * is a Param node so that we can fill in the actual datum during
- * the data input loop.
+ * the expression might contain a length-coercion-function
+ * call and/or CoerceToDomain nodes.) The bottom of the
+ * expression is a Param node so that we can fill in the
+ * actual datum during the data input loop.
*/
prm = makeNode(Param);
prm->paramkind = PARAM_EXEC;
@@ -1241,11 +1246,11 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Check BEFORE STATEMENT insertion triggers. It's debateable
- * whether we should do this for COPY, since it's not really an
- * "INSERT" statement as such. However, executing these triggers
- * maintains consistency with the EACH ROW triggers that we already
- * fire on COPY.
+ * Check BEFORE STATEMENT insertion triggers. It's debateable whether
+ * we should do this for COPY, since it's not really an "INSERT"
+ * statement as such. However, executing these triggers maintains
+ * consistency with the EACH ROW triggers that we already fire on
+ * COPY.
*/
ExecBSInsertTriggers(estate, resultRelInfo);
@@ -1276,13 +1281,13 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if ((tmp >> 16) != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unrecognized critical flags in COPY file header")));
+ errmsg("unrecognized critical flags in COPY file header")));
/* Header extension length */
tmp = CopyGetInt32();
if (CopyGetEof() || tmp < 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (missing length)")));
+ errmsg("invalid COPY file header (missing length)")));
/* Skip extension header, if present */
while (tmp-- > 0)
{
@@ -1290,7 +1295,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (CopyGetEof())
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (wrong length)")));
+ errmsg("invalid COPY file header (wrong length)")));
}
}
@@ -1418,9 +1423,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Complain if there are more fields on the input line.
*
- * Special case: if we're reading a zero-column table, we
- * won't yet have called CopyReadAttribute() at all; so do that
- * and check we have an empty line. Fortunately we can keep that
+ * Special case: if we're reading a zero-column table, we won't
+ * yet have called CopyReadAttribute() at all; so do that and
+ * check we have an empty line. Fortunately we can keep that
* silly corner case out of the main line of execution.
*/
if (result == NORMAL_ATTR)
@@ -1431,7 +1436,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (result == NORMAL_ATTR || *string != '\0')
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
if (result == END_OF_FILE)
{
/* EOF at start of line: all is well */
@@ -1442,7 +1447,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
else
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
}
/*
@@ -1475,8 +1480,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
loaded_oid =
DatumGetObjectId(CopyReadBinaryAttribute(0,
- &oid_in_function,
- oid_in_element,
+ &oid_in_function,
+ oid_in_element,
&isnull));
if (isnull || loaded_oid == InvalidOid)
ereport(ERROR,
@@ -1531,9 +1536,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
prmdata->isnull = (nulls[i] == 'n');
/*
- * Execute the constraint expression. Allow the expression
- * to replace the value (consider e.g. a timestamp precision
- * restriction).
+ * Execute the constraint expression. Allow the
+ * expression to replace the value (consider e.g. a
+ * timestamp precision restriction).
*/
values[i] = ExecEvalExpr(exprstate, econtext,
&isnull, NULL);
@@ -1674,11 +1679,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal carriage return found in data"),
- errhint("Use \"\\r\" to represent carriage return.")));
- /* Check for \r\n on first line, _and_ handle \r\n. */
+ errhint("Use \"\\r\" to represent carriage return.")));
+ /* Check for \r\n on first line, _and_ handle \r\n. */
if (copy_lineno == 1 || eol_type == EOL_CRNL)
{
- int c2 = CopyPeekChar();
+ int c2 = CopyPeekChar();
+
if (c2 == '\n')
{
CopyDonePeek(c2, true); /* eat newline */
@@ -1690,9 +1696,13 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
if (eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("literal carriage return found in data"),
+ errmsg("literal carriage return found in data"),
errhint("Use \"\\r\" to represent carriage return.")));
- /* if we got here, it is the first line and we didn't get \n, so put it back */
+
+ /*
+ * if we got here, it is the first line and we didn't
+ * get \n, so put it back
+ */
CopyDonePeek(c2, false);
eol_type = EOL_CR;
}
@@ -1802,12 +1812,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
c = CopyGetChar();
if (c == '\n')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker does not match previous newline style")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker does not match previous newline style")));
if (c != '\r')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker corrupt")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker corrupt")));
}
c = CopyGetChar();
if (c != '\r' && c != '\n')
@@ -1816,21 +1826,20 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
errmsg("end-of-copy marker corrupt")));
if ((eol_type == EOL_NL && c != '\n') ||
(eol_type == EOL_CRNL && c != '\n') ||
- (eol_type == EOL_CR && c != '\r'))
+ (eol_type == EOL_CR && c != '\r'))
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("end-of-copy marker does not match previous newline style")));
+
/*
- * In protocol version 3, we should ignore anything after
- * \. up to the protocol end of copy data. (XXX maybe
- * better not to treat \. as special?)
+ * In protocol version 3, we should ignore anything
+ * after \. up to the protocol end of copy data. (XXX
+ * maybe better not to treat \. as special?)
*/
if (copy_dest == COPY_NEW_FE)
{
while (c != EOF)
- {
c = CopyGetChar();
- }
}
*result = END_OF_FILE;
goto copy_eof;
@@ -2045,8 +2054,8 @@ CopyGetAttnums(Relation rel, List *attnamelist)
if (intMember(attnum, attnums))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" specified more than once",
- name)));
+ errmsg("attribute \"%s\" specified more than once",
+ name)));
attnums = lappendi(attnums, attnum);
}
}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 70678b26b08..547f3fb2f3f 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.119 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.120 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -200,7 +200,7 @@ createdb(const CreatedbStmt *stmt)
if (dbpath != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use an alternate location on this platform")));
+ errmsg("cannot use an alternate location on this platform")));
#endif
/*
@@ -260,8 +260,8 @@ createdb(const CreatedbStmt *stmt)
if (DatabaseHasActiveBackends(src_dboid, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("source database \"%s\" is being accessed by other users",
- dbtemplate)));
+ errmsg("source database \"%s\" is being accessed by other users",
+ dbtemplate)));
/* If encoding is defaulted, use source's encoding */
if (encoding < 0)
@@ -345,7 +345,7 @@ createdb(const CreatedbStmt *stmt)
/* Make the symlink, if needed */
if (alt_loc)
{
-#ifdef HAVE_SYMLINK /* already throws error above */
+#ifdef HAVE_SYMLINK /* already throws error above */
if (symlink(alt_loc, nominal_loc) != 0)
#endif
ereport(ERROR,
@@ -450,7 +450,7 @@ dropdb(const char *dbname)
char *nominal_loc;
char dbpath[MAXPGPATH];
Relation pgdbrel;
- SysScanDesc pgdbscan;
+ SysScanDesc pgdbscan;
ScanKeyData key;
HeapTuple tup;
@@ -503,8 +503,8 @@ dropdb(const char *dbname)
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- dbname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ dbname)));
/*
* Find the database's tuple by OID (should be unique).
@@ -577,10 +577,13 @@ dropdb(const char *dbname)
void
RenameDatabase(const char *oldname, const char *newname)
{
- HeapTuple tup, newtup;
+ HeapTuple tup,
+ newtup;
Relation rel;
- SysScanDesc scan, scan2;
- ScanKeyData key, key2;
+ SysScanDesc scan,
+ scan2;
+ ScanKeyData key,
+ key2;
/*
* Obtain AccessExclusiveLock so that no new session gets started
@@ -610,15 +613,14 @@ RenameDatabase(const char *oldname, const char *newname)
errmsg("current database may not be renamed")));
/*
- * Make sure the database does not have active sessions. Might
- * not be necessary, but it's consistent with other database
- * operations.
+ * Make sure the database does not have active sessions. Might not be
+ * necessary, but it's consistent with other database operations.
*/
if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- oldname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ oldname)));
/* make sure the new name doesn't exist */
ScanKeyEntryInitialize(&key2, 0, Anum_pg_database_datname,
@@ -651,10 +653,10 @@ RenameDatabase(const char *oldname, const char *newname)
heap_close(rel, NoLock);
/*
- * Force dirty buffers out to disk, so that newly-connecting
- * backends will see the renamed database in pg_database right
- * away. (They'll see an uncommitted tuple, but they don't care;
- * see GetRawDatabaseInfo.)
+ * Force dirty buffers out to disk, so that newly-connecting backends
+ * will see the renamed database in pg_database right away. (They'll
+ * see an uncommitted tuple, but they don't care; see
+ * GetRawDatabaseInfo.)
*/
BufferSync();
}
@@ -671,7 +673,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
newtuple;
Relation rel;
ScanKeyData scankey;
- SysScanDesc scan;
+ SysScanDesc scan;
Datum repl_val[Natts_pg_database];
char repl_null[Natts_pg_database];
char repl_repl[Natts_pg_database];
@@ -689,9 +691,9 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
errmsg("database \"%s\" does not exist", stmt->dbname)));
if (!(superuser()
- || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
+ || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
- stmt->dbname);
+ stmt->dbname);
MemSet(repl_repl, ' ', sizeof(repl_repl));
repl_repl[Anum_pg_database_datconfig - 1] = 'r';
@@ -750,7 +752,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
{
Relation relation;
ScanKeyData scanKey;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple tuple;
bool gottuple;
@@ -862,7 +864,7 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
#ifndef ALLOW_ABSOLUTE_DBPATHS
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("absolute paths are not allowed as database locations")));
+ errmsg("absolute paths are not allowed as database locations")));
#endif
prefix = dbpath;
}
@@ -874,8 +876,8 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
if (!var)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("postmaster environment variable \"%s\" not found",
- dbpath)));
+ errmsg("postmaster environment variable \"%s\" not found",
+ dbpath)));
if (!is_absolute_path(var))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
@@ -955,7 +957,7 @@ get_database_oid(const char *dbname)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
Oid oid;
@@ -993,7 +995,7 @@ get_database_name(Oid dbid)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
char *result;
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index bf0c95a75ef..c924dcc7b77 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.82 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.83 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -51,7 +51,8 @@ case_translate_language_name(const char *input, char *output)
{
int i;
- MemSet(output, 0, NAMEDATALEN); /* ensure result Name is zero-filled */
+ MemSet(output, 0, NAMEDATALEN); /* ensure result Name is
+ * zero-filled */
for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index bc137b0eaca..916c1ff772f 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.111 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.112 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,11 +45,11 @@ typedef struct ExplainState
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
TupOutputState *tstate);
-static double elapsed_time(struct timeval *starttime);
+static double elapsed_time(struct timeval * starttime);
static void explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
- Plan *outer_plan,
- int indent, ExplainState *es);
+ Plan *plan, PlanState * planstate,
+ Plan *outer_plan,
+ int indent, ExplainState *es);
static void show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
int scanrelid, Plan *outer_plan,
StringInfo str, int indent, ExplainState *es);
@@ -58,8 +58,8 @@ static void show_upper_qual(List *qual, const char *qlabel,
const char *inner_name, int inner_varno, Plan *inner_plan,
StringInfo str, int indent, ExplainState *es);
static void show_sort_keys(List *tlist, int nkeys, AttrNumber *keycols,
- const char *qlabel,
- StringInfo str, int indent, ExplainState *es);
+ const char *qlabel,
+ StringInfo str, int indent, ExplainState *es);
static Node *make_ors_ands_explicit(List *orclauses);
/*
@@ -255,8 +255,8 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
}
/*
- * Close down the query and free resources. Include time for this
- * in the total runtime.
+ * Close down the query and free resources. Include time for this in
+ * the total runtime.
*/
gettimeofday(&starttime, NULL);
@@ -282,7 +282,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
/* Compute elapsed time in seconds since given gettimeofday() timestamp */
static double
-elapsed_time(struct timeval *starttime)
+elapsed_time(struct timeval * starttime)
{
struct timeval endtime;
@@ -313,7 +313,7 @@ elapsed_time(struct timeval *starttime)
*/
static void
explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
+ Plan *plan, PlanState * planstate,
Plan *outer_plan,
int indent, ExplainState *es)
{
@@ -542,8 +542,8 @@ explain_outNode(StringInfo str,
/*
* If the expression is still a function call, we can get
* the real name of the function. Otherwise, punt (this
- * can happen if the optimizer simplified away the function
- * call, for example).
+ * can happen if the optimizer simplified away the
+ * function call, for example).
*/
if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr))
{
@@ -583,15 +583,13 @@ explain_outNode(StringInfo str,
double nloops = planstate->instrument->nloops;
appendStringInfo(str, " (actual time=%.2f..%.2f rows=%.0f loops=%.0f)",
- 1000.0 * planstate->instrument->startup / nloops,
- 1000.0 * planstate->instrument->total / nloops,
+ 1000.0 * planstate->instrument->startup / nloops,
+ 1000.0 * planstate->instrument->total / nloops,
planstate->instrument->ntuples / nloops,
planstate->instrument->nloops);
}
else if (es->printAnalyze)
- {
appendStringInfo(str, " (never executed)");
- }
}
appendStringInfoChar(str, '\n');
@@ -709,7 +707,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->initPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -807,7 +805,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->subPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -865,7 +863,7 @@ show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
*/
if (outer_plan)
{
- Relids varnos = pull_varnos(node);
+ Relids varnos = pull_varnos(node);
if (bms_is_member(OUTER, varnos))
outercontext = deparse_context_for_subplan("outer",
@@ -1037,9 +1035,7 @@ make_ors_ands_explicit(List *orclauses)
FastListInit(&args);
foreach(orptr, orclauses)
- {
FastAppend(&args, make_ands_explicit(lfirst(orptr)));
- }
return (Node *) make_orclause(FastListValue(&args));
}
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 7a6a3775d64..181f52e1143 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.31 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.32 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -80,8 +80,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot return shell type %s",
- TypeNameToString(returnType))));
+ errmsg("SQL function cannot return shell type %s",
+ TypeNameToString(returnType))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -147,8 +147,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (parameterCount >= FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("functions cannot have more than %d arguments",
- FUNC_MAX_ARGS)));
+ errmsg("functions cannot have more than %d arguments",
+ FUNC_MAX_ARGS)));
toid = LookupTypeName(t);
if (OidIsValid(toid))
@@ -159,8 +159,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot accept shell type %s",
- TypeNameToString(t))));
+ errmsg("SQL function cannot accept shell type %s",
+ TypeNameToString(t))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -330,8 +330,8 @@ compute_attributes_with_style(List *parameters, bool *isStrict_p, char *volatili
else
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized function attribute \"%s\" ignored",
- param->defname)));
+ errmsg("unrecognized function attribute \"%s\" ignored",
+ param->defname)));
}
}
@@ -558,7 +558,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(functionName)),
- errhint("Use DROP AGGREGATE to drop aggregate functions.")));
+ errhint("Use DROP AGGREGATE to drop aggregate functions.")));
if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
{
@@ -664,7 +664,7 @@ RenameFunction(List *name, List *argtypes, const char *newname)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
namespaceOid = procForm->pronamespace;
@@ -728,7 +728,7 @@ SetFunctionReturnType(Oid funcOid, Oid newRetType)
elog(ERROR, "cache lookup failed for function %u", funcOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
- if (procForm->prorettype != OPAQUEOID) /* caller messed up */
+ if (procForm->prorettype != OPAQUEOID) /* caller messed up */
elog(ERROR, "function %u doesn't return OPAQUE", funcOid);
/* okay to overwrite copied tuple */
@@ -815,7 +815,7 @@ CreateCast(CreateCastStmt *stmt)
if (sourcetypeid == targettypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("source data type and target data type are the same")));
+ errmsg("source data type and target data type are the same")));
/* No shells, no pseudo-types allowed */
if (!get_typisdefined(sourcetypeid))
@@ -878,10 +878,11 @@ CreateCast(CreateCastStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("return data type of cast function must match target data type")));
+
/*
* Restricting the volatility of a cast function may or may not be
* a good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -892,7 +893,7 @@ CreateCast(CreateCastStmt *stmt)
if (procstruct->proisagg)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must not be an aggregate function")));
+ errmsg("cast function must not be an aggregate function")));
if (procstruct->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -902,12 +903,12 @@ CreateCast(CreateCastStmt *stmt)
}
else
{
- int16 typ1len;
- int16 typ2len;
- bool typ1byval;
- bool typ2byval;
- char typ1align;
- char typ2align;
+ int16 typ1len;
+ int16 typ2len;
+ bool typ1byval;
+ bool typ2byval;
+ char typ1align;
+ char typ2align;
/* indicates binary coercibility */
funcid = InvalidOid;
@@ -924,7 +925,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Also, insist that the types match as to size, alignment, and
* pass-by-value attributes; this provides at least a crude check
- * that they have similar representations. A pair of types that
+ * that they have similar representations. A pair of types that
* fail this test should certainly not be equated.
*/
get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align);
@@ -958,9 +959,9 @@ CreateCast(CreateCastStmt *stmt)
relation = heap_openr(CastRelationName, RowExclusiveLock);
/*
- * Check for duplicate. This is just to give a friendly error message,
- * the unique index would catch it anyway (so no need to sweat about
- * race conditions).
+ * Check for duplicate. This is just to give a friendly error
+ * message, the unique index would catch it anyway (so no need to
+ * sweat about race conditions).
*/
tuple = SearchSysCache(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 4cd66fd1b5d..5e3cec954d3 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.103 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.104 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,11 +44,11 @@
/* non-export function prototypes */
static void CheckPredicate(List *predList);
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
static Oid GetIndexOpClass(List *opclass, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
+ char *accessMethodName, Oid accessMethodId);
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
/*
@@ -157,8 +157,8 @@ DefineIndex(RangeVar *heapRelation,
if (unique && !accessMethodForm->amcanunique)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support UNIQUE indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support UNIQUE indexes",
+ accessMethodName)));
if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -192,16 +192,16 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * Check that all of the attributes in a primary key are marked
- * as not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as not
+ * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
if (primary)
{
- List *keys;
+ List *keys;
foreach(keys, attributeList)
{
- IndexElem *key = (IndexElem *) lfirst(keys);
+ IndexElem *key = (IndexElem *) lfirst(keys);
HeapTuple atttuple;
if (!key->name)
@@ -216,15 +216,16 @@ DefineIndex(RangeVar *heapRelation,
atttuple = SearchSysCacheAttName(relationId, key->name);
if (HeapTupleIsValid(atttuple))
{
- if (! ((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
+ if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
{
/*
* Try to make it NOT NULL.
*
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
* to child tables? Currently, since the PRIMARY KEY
- * itself doesn't cascade, we don't cascade the notnull
- * constraint either; but this is pretty debatable.
+ * itself doesn't cascade, we don't cascade the
+ * notnull constraint either; but this is pretty
+ * debatable.
*/
AlterTableAlterColumnSetNotNull(relationId, false,
key->name);
@@ -236,8 +237,8 @@ DefineIndex(RangeVar *heapRelation,
/* This shouldn't happen if parser did its job ... */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ key->name)));
}
}
}
@@ -248,7 +249,7 @@ DefineIndex(RangeVar *heapRelation,
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_NumIndexAttrs = numberOfAttributes;
- indexInfo->ii_Expressions = NIL; /* for now */
+ indexInfo->ii_Expressions = NIL; /* for now */
indexInfo->ii_ExpressionsState = NIL;
indexInfo->ii_Predicate = cnfPred;
indexInfo->ii_PredicateState = NIL;
@@ -308,7 +309,7 @@ CheckPredicate(List *predList)
if (contain_mutable_functions((Node *) predList))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("functions in index predicate must be marked IMMUTABLE")));
+ errmsg("functions in index predicate must be marked IMMUTABLE")));
}
static void
@@ -351,7 +352,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
else if (attribute->expr && IsA(attribute->expr, Var))
{
/* Tricky tricky, he wrote (column) ... treat as simple attr */
- Var *var = (Var *) attribute->expr;
+ Var *var = (Var *) attribute->expr;
indexInfo->ii_KeyAttrNumbers[attn] = var->varattno;
atttype = get_atttype(relId, var->varattno);
@@ -360,30 +361,30 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
{
/* Index expression */
Assert(attribute->expr != NULL);
- indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
+ indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
attribute->expr);
atttype = exprType(attribute->expr);
/*
- * We don't currently support generation of an actual query plan
- * for an index expression, only simple scalar expressions;
- * hence these restrictions.
+ * We don't currently support generation of an actual query
+ * plan for an index expression, only simple scalar
+ * expressions; hence these restrictions.
*/
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in index expression")));
+ errmsg("cannot use sub-select in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate in index expression")));
+ errmsg("cannot use aggregate in index expression")));
/*
* A expression using mutable functions is probably wrong,
- * since if you aren't going to get the same result for the same
- * data every time, it's not clear what the index entries mean at
- * all.
+ * since if you aren't going to get the same result for the
+ * same data every time, it's not clear what the index entries
+ * mean at all.
*/
if (contain_mutable_functions(attribute->expr))
ereport(ERROR,
@@ -413,21 +414,20 @@ GetIndexOpClass(List *opclass, Oid attrType,
opInputType;
/*
- * Release 7.0 removed network_ops, timespan_ops, and
- * datetime_ops, so we ignore those opclass names
- * so the default *_ops is used. This can be
- * removed in some later release. bjm 2000/02/07
+ * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so
+ * we ignore those opclass names so the default *_ops is used. This
+ * can be removed in some later release. bjm 2000/02/07
*
- * Release 7.1 removes lztext_ops, so suppress that too
- * for a while. tgl 2000/07/30
+ * Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
+ * 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops,
- * so suppress that too for awhile. I'm starting to
- * think we need a better approach. tgl 2000/10/01
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
+ * too for awhile. I'm starting to think we need a better approach.
+ * tgl 2000/10/01
*/
if (length(opclass) == 1)
{
- char *claname = strVal(lfirst(opclass));
+ char *claname = strVal(lfirst(opclass));
if (strcmp(claname, "network_ops") == 0 ||
strcmp(claname, "timespan_ops") == 0 ||
@@ -499,8 +499,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
if (!IsBinaryCoercible(attrType, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator class \"%s\" does not accept data type %s",
- NameListToString(opclass), format_type_be(attrType))));
+ errmsg("operator class \"%s\" does not accept data type %s",
+ NameListToString(opclass), format_type_be(attrType))));
ReleaseSysCache(tuple);
@@ -607,7 +607,7 @@ ReindexIndex(RangeVar *indexRelation, bool force /* currently unused */ )
tuple = SearchSysCache(RELOID,
ObjectIdGetDatum(indOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", indOid);
if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
@@ -785,7 +785,8 @@ ReindexDatabase(const char *dbname, bool force, bool all)
for (i = 0; i < relcnt; i++)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
if (reindex_relation(relids[i], force))
ereport(NOTICE,
(errmsg("relation %u was reindexed", relids[i])));
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 60b041466f1..52792bc31ab 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.15 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.16 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,13 +103,13 @@ DefineOpClass(CreateOpClassStmt *stmt)
* Currently, we require superuser privileges to create an opclass.
* This seems necessary because we have no way to validate that the
* offered set of operators and functions are consistent with the AM's
- * expectations. It would be nice to provide such a check someday,
- * if it can be done without solving the halting problem :-(
+ * expectations. It would be nice to provide such a check someday, if
+ * it can be done without solving the halting problem :-(
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create an operator class")));
+ errmsg("must be superuser to create an operator class")));
/* Look up the datatype */
typeoid = typenameTypeId(stmt->datatype);
@@ -157,8 +157,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (operators[item->number - 1] != InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- item->number)));
+ errmsg("operator number %d appears more than once",
+ item->number)));
if (item->args != NIL)
{
TypeName *typeName1 = (TypeName *) lfirst(item->args);
@@ -211,7 +211,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (OidIsValid(storageoid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("storage type specified more than once")));
+ errmsg("storage type specified more than once")));
storageoid = typenameTypeId(item->storedtype);
break;
default:
@@ -532,7 +532,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
opcID = HeapTupleGetOid(tuple);
@@ -681,7 +681,7 @@ RenameOpClass(List *name, const char *access_method, const char *newname)
tup = SearchSysCacheCopy(CLAOID,
ObjectIdGetDatum(opcOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for opclass %u", opcOid);
namespaceOid = ((Form_pg_opclass) GETSTRUCT(tup))->opcnamespace;
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 6a4d479c121..ddc088fe2f5 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.10 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.11 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -103,7 +103,7 @@ DefineOperator(List *names, List *parameters)
if (typeName1->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
@@ -111,7 +111,7 @@ DefineOperator(List *names, List *parameters)
if (typeName2->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "procedure") == 0)
functionName = defGetQualifiedName(defel);
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index cf4a0638717..aa5a5b9ea61 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,17 +4,17 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
- *
+ *
*
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.19 2003/08/01 13:53:36 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.20 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,7 @@
* Execute SQL DECLARE CURSOR command.
*/
void
-PerformCursorOpen(DeclareCursorStmt *stmt)
+PerformCursorOpen(DeclareCursorStmt * stmt)
{
List *rewritten;
Query *query;
@@ -64,7 +64,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* The query has been through parse analysis, but not rewriting or
* planning as yet. Note that the grammar ensured we have a SELECT
- * query, so we are not expecting rule rewriting to do anything strange.
+ * query, so we are not expecting rule rewriting to do anything
+ * strange.
*/
rewritten = QueryRewrite((Query *) stmt->query);
if (length(rewritten) != 1 || !IsA(lfirst(rewritten), Query))
@@ -86,8 +87,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
plan = planner(query, true, stmt->options);
/*
- * Create a portal and copy the query and plan into its memory context.
- * (If a duplicate cursor name already exists, warn and drop it.)
+ * Create a portal and copy the query and plan into its memory
+ * context. (If a duplicate cursor name already exists, warn and drop
+ * it.)
*/
portal = CreatePortal(stmt->portalname, true, false);
@@ -98,7 +100,7 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
PortalDefineQuery(portal,
NULL, /* unfortunately don't have sourceText */
- "SELECT", /* cursor's query is always a SELECT */
+ "SELECT", /* cursor's query is always a SELECT */
makeList1(query),
makeList1(plan),
PortalGetHeapMemory(portal));
@@ -108,9 +110,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* Set up options for portal.
*
- * If the user didn't specify a SCROLL type, allow or disallow
- * scrolling based on whether it would require any additional
- * runtime overhead to do so.
+ * If the user didn't specify a SCROLL type, allow or disallow scrolling
+ * based on whether it would require any additional runtime overhead
+ * to do so.
*/
portal->cursorOptions = stmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -129,8 +131,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
Assert(portal->strategy == PORTAL_ONE_SELECT);
/*
- * We're done; the query won't actually be run until PerformPortalFetch
- * is called.
+ * We're done; the query won't actually be run until
+ * PerformPortalFetch is called.
*/
}
@@ -169,7 +171,7 @@ PerformPortalFetch(FetchStmt *stmt,
/* FIXME: shouldn't this be an ERROR? */
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("portal \"%s\" does not exist", stmt->portalname)));
+ errmsg("portal \"%s\" does not exist", stmt->portalname)));
if (completionTag)
strcpy(completionTag, stmt->ismove ? "MOVE 0" : "FETCH 0");
return;
@@ -219,7 +221,7 @@ PerformPortalClose(const char *name)
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("portal \"%s\" does not exist", name),
- errfunction("PerformPortalClose"))); /* for ecpg */
+ errfunction("PerformPortalClose"))); /* for ecpg */
return;
}
@@ -249,7 +251,8 @@ PortalCleanup(Portal portal, bool isError)
/*
* Shut down executor, if still running. We skip this during error
* abort, since other mechanisms will take care of releasing executor
- * resources, and we can't be sure that ExecutorEnd itself wouldn't fail.
+ * resources, and we can't be sure that ExecutorEnd itself wouldn't
+ * fail.
*/
queryDesc = PortalGetQueryDesc(portal);
if (queryDesc)
@@ -271,14 +274,14 @@ PortalCleanup(Portal portal, bool isError)
void
PersistHoldablePortal(Portal portal)
{
- QueryDesc *queryDesc = PortalGetQueryDesc(portal);
+ QueryDesc *queryDesc = PortalGetQueryDesc(portal);
MemoryContext savePortalContext;
MemoryContext saveQueryContext;
MemoryContext oldcxt;
/*
- * If we're preserving a holdable portal, we had better be
- * inside the transaction that originally created it.
+ * If we're preserving a holdable portal, we had better be inside the
+ * transaction that originally created it.
*/
Assert(portal->createXact == GetCurrentTransactionId());
Assert(queryDesc != NULL);
@@ -321,9 +324,8 @@ PersistHoldablePortal(Portal portal)
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in
- * the tuplestore, so that subsequent backward FETCHs can be
- * processed.
+ * Rewind the executor: we need to store the entire result set in the
+ * tuplestore, so that subsequent backward FETCHs can be processed.
*/
ExecutorRewind(queryDesc);
@@ -351,17 +353,17 @@ PersistHoldablePortal(Portal portal)
/*
* Reset the position in the result set: ideally, this could be
* implemented by just skipping straight to the tuple # that we need
- * to be at, but the tuplestore API doesn't support that. So we
- * start at the beginning of the tuplestore and iterate through it
- * until we reach where we need to be. FIXME someday?
+ * to be at, but the tuplestore API doesn't support that. So we start
+ * at the beginning of the tuplestore and iterate through it until we
+ * reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
if (!portal->atEnd)
{
- long store_pos;
+ long store_pos;
- if (portal->posOverflow) /* oops, cannot trust portalPos */
+ if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not reposition held cursor")));
@@ -370,8 +372,8 @@ PersistHoldablePortal(Portal portal)
for (store_pos = 0; store_pos < portal->portalPos; store_pos++)
{
- HeapTuple tup;
- bool should_free;
+ HeapTuple tup;
+ bool should_free;
tup = tuplestore_gettuple(portal->holdStore, true,
&should_free);
@@ -389,8 +391,8 @@ PersistHoldablePortal(Portal portal)
/*
* We can now release any subsidiary memory of the portal's heap
* context; we'll never use it again. The executor already dropped
- * its context, but this will clean up anything that glommed onto
- * the portal's heap via PortalContext.
+ * its context, but this will clean up anything that glommed onto the
+ * portal's heap via PortalContext.
*/
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index cd58d7fc7b6..d0fabd1ad31 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.21 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.22 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static ParamListInfo EvaluateParams(EState *estate,
- List *params, List *argtypes);
+ List *params, List *argtypes);
/*
* Implements the 'PREPARE' utility statement.
@@ -90,12 +90,12 @@ PrepareQuery(PrepareStmt *stmt)
/* Rewrite the query. The result could be 0, 1, or many queries. */
query_list = QueryRewrite(stmt->query);
- /* Generate plans for queries. Snapshot is already set. */
+ /* Generate plans for queries. Snapshot is already set. */
plan_list = pg_plan_queries(query_list, false);
/* Save the results. */
StorePreparedStatement(stmt->name,
- NULL, /* text form not available */
+ NULL, /* text form not available */
commandTag,
query_list,
plan_list,
@@ -131,8 +131,8 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list);
@@ -144,15 +144,15 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
portal = CreateNewPortal();
/*
- * For CREATE TABLE / AS EXECUTE, make a copy of the stored query
- * so that we can modify its destination (yech, but this has
- * always been ugly). For regular EXECUTE we can just use the
- * stored query where it sits, since the executor is read-only.
+ * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so
+ * that we can modify its destination (yech, but this has always been
+ * ugly). For regular EXECUTE we can just use the stored query where
+ * it sits, since the executor is read-only.
*/
if (stmt->into)
{
MemoryContext oldContext;
- Query *query;
+ Query *query;
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
@@ -208,11 +208,11 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
static ParamListInfo
EvaluateParams(EState *estate, List *params, List *argtypes)
{
- int nargs = length(argtypes);
- ParamListInfo paramLI;
- List *exprstates;
- List *l;
- int i = 0;
+ int nargs = length(argtypes);
+ ParamListInfo paramLI;
+ List *exprstates;
+ List *l;
+ int i = 0;
/* Parser should have caught this error, but check for safety */
if (length(params) != nargs)
@@ -229,7 +229,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
bool isNull;
paramLI[i].value = ExecEvalExprSwitchContext(n,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
paramLI[i].kind = PARAM_NUM;
@@ -273,7 +273,7 @@ InitQueryHashTable(void)
* to the hash entry, so the caller can dispose of their copy.
*
* Exception: commandTag is presumed to be a pointer to a constant string,
- * or possibly NULL, so it need not be copied. Note that commandTag should
+ * or possibly NULL, so it need not be copied. Note that commandTag should
* be NULL only if the original query (before rewriting) was empty.
*/
void
@@ -367,9 +367,9 @@ FetchPreparedStatement(const char *stmt_name, bool throwError)
if (prepared_queries)
{
/*
- * We can't just use the statement name as supplied by the user: the
- * hash package is picky enough that it needs to be NULL-padded out to
- * the appropriate length to work correctly.
+ * We can't just use the statement name as supplied by the user:
+ * the hash package is picky enough that it needs to be
+ * NULL-padded out to the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, stmt_name, sizeof(key));
@@ -412,9 +412,9 @@ FetchPreparedStatementParams(const char *stmt_name)
* Note: the result is created or copied into current memory context.
*/
TupleDesc
-FetchPreparedStatementResultDesc(PreparedStatement *stmt)
+FetchPreparedStatementResultDesc(PreparedStatement * stmt)
{
- Query *query;
+ Query *query;
switch (ChoosePortalStrategy(stmt->query_list))
{
@@ -476,7 +476,7 @@ DropPreparedStatement(const char *stmt_name, bool showError)
void
ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
{
- ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
+ ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
PreparedStatement *entry;
List *l,
*query_list,
@@ -499,8 +499,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, execstmt->params,
@@ -510,8 +510,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
/* Explain each query */
foreach(l, query_list)
{
- Query *query = (Query *) lfirst(l);
- Plan *plan = (Plan *) lfirst(plan_list);
+ Query *query = (Query *) lfirst(l);
+ Plan *plan = (Plan *) lfirst(plan_list);
bool is_last_query;
plan_list = lnext(plan_list);
@@ -533,7 +533,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("prepared statement is not a SELECT")));
+ errmsg("prepared statement is not a SELECT")));
/* Copy the query so we can modify it */
query = copyObject(query);
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 69000b29bc7..b0a4702a715 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.47 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.48 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create procedural language")));
+ errmsg("must be superuser to create procedural language")));
/*
* Translate the language name and check that this language doesn't
@@ -85,7 +85,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER.
*/
@@ -183,7 +183,7 @@ DropProceduralLanguage(DropPLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to drop procedural language")));
+ errmsg("must be superuser to drop procedural language")));
/*
* Translate the language name, check that this language exist and is
@@ -225,7 +225,7 @@ DropProceduralLanguageById(Oid langOid)
langTup = SearchSysCache(LANGOID,
ObjectIdGetDatum(langOid),
0, 0, 0);
- if (!HeapTupleIsValid(langTup)) /* should not happen */
+ if (!HeapTupleIsValid(langTup)) /* should not happen */
elog(ERROR, "cache lookup failed for language %u", langOid);
simple_heap_delete(rel, &langTup->t_self);
@@ -266,7 +266,7 @@ RenameLanguage(const char *oldname, const char *newname)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to rename procedural language")));
+ errmsg("must be superuser to rename procedural language")));
/* rename */
namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 5ad81634f41..4eb285daa33 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.14 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.15 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,7 +98,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", schemaName),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* Create the schema's namespace */
namespaceId = NamespaceCreate(schemaName, owner_userid);
@@ -215,7 +215,7 @@ RemoveSchemaById(Oid schemaOid)
tup = SearchSysCache(NAMESPACEOID,
ObjectIdGetDatum(schemaOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for namespace %u", schemaOid);
simple_heap_delete(relation, &tup->t_self);
@@ -248,9 +248,9 @@ RenameSchema(const char *oldname, const char *newname)
/* make sure the new name doesn't exist */
if (HeapTupleIsValid(
- SearchSysCache(NAMESPACENAME,
- CStringGetDatum(newname),
- 0, 0, 0)))
+ SearchSysCache(NAMESPACENAME,
+ CStringGetDatum(newname),
+ 0, 0, 0)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_SCHEMA),
errmsg("schema \"%s\" already exists", newname)));
@@ -270,7 +270,7 @@ RenameSchema(const char *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* rename */
namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 7ce7810fbca..01544a015b3 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.99 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.100 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ static SeqTable seqtab = NULL; /* Head of list of SeqTable items */
static void init_sequence(RangeVar *relation,
- SeqTable *p_elm, Relation *p_rel);
+ SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
static void init_params(List *options, Form_pg_sequence new);
static void do_setval(RangeVar *sequence, int64 next, bool iscalled);
@@ -97,10 +97,10 @@ DefineSequence(CreateSeqStmt *seq)
/* Values are NULL (or false) by default */
new.last_value = 0;
new.increment_by = 0;
- new.max_value = 0;
+ new.max_value = 0;
new.min_value = 0;
new.cache_value = 0;
- new.is_cycled = false;
+ new.is_cycled = false;
/* Check and set values */
init_params(seq->options, &new);
@@ -299,10 +299,10 @@ DefineSequence(CreateSeqStmt *seq)
/*
* AlterSequence
*
- * Modify the defition of a sequence relation
+ * Modify the defition of a sequence relation
*/
void
-AlterSequence(AlterSeqStmt *stmt)
+AlterSequence(AlterSeqStmt * stmt)
{
SeqTable elm;
Relation seqrel;
@@ -324,7 +324,7 @@ AlterSequence(AlterSeqStmt *stmt)
page = BufferGetPage(buf);
new.increment_by = seq->increment_by;
- new.max_value = seq->max_value;
+ new.max_value = seq->max_value;
new.min_value = seq->min_value;
new.cache_value = seq->cache_value;
new.is_cycled = seq->is_cycled;
@@ -346,9 +346,9 @@ AlterSequence(AlterSeqStmt *stmt)
}
/* save info in local cache */
- elm->last = new.last_value; /* last returned number */
- elm->cached = new.last_value; /* last cached number (forget cached
- * values) */
+ elm->last = new.last_value; /* last returned number */
+ elm->cached = new.last_value; /* last cached number (forget
+ * cached values) */
START_CRIT_SECTION();
@@ -494,9 +494,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MAXVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MAXVALUE (%s)",
+ sequence->relname, buf)));
}
next = minv;
}
@@ -517,9 +517,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MINVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MINVALUE (%s)",
+ sequence->relname, buf)));
}
next = maxv;
}
@@ -895,9 +895,9 @@ init_params(List *options, Form_pg_sequence new)
errmsg("conflicting or redundant options")));
increment_by = defel;
}
+
/*
- * start is for a new sequence
- * restart is for alter
+ * start is for a new sequence restart is for alter
*/
else if (strcmp(defel->defname, "start") == 0 ||
strcmp(defel->defname, "restart") == 0)
@@ -963,9 +963,9 @@ init_params(List *options, Form_pg_sequence new)
|| (max_value != (DefElem *) NULL && !max_value->arg))
{
if (new->increment_by > 0)
- new->max_value = SEQ_MAXVALUE; /* ascending seq */
+ new->max_value = SEQ_MAXVALUE; /* ascending seq */
else
- new->max_value = -1; /* descending seq */
+ new->max_value = -1; /* descending seq */
}
else if (max_value != (DefElem *) NULL)
new->max_value = defGetInt64(max_value);
@@ -975,9 +975,9 @@ init_params(List *options, Form_pg_sequence new)
|| (min_value != (DefElem *) NULL && !min_value->arg))
{
if (new->increment_by > 0)
- new->min_value = 1; /* ascending seq */
+ new->min_value = 1; /* ascending seq */
else
- new->min_value = SEQ_MINVALUE; /* descending seq */
+ new->min_value = SEQ_MINVALUE; /* descending seq */
}
else if (min_value != (DefElem *) NULL)
new->min_value = defGetInt64(min_value);
@@ -996,7 +996,7 @@ init_params(List *options, Form_pg_sequence new)
}
/* START WITH */
- if (new->last_value == 0 && last_value == (DefElem *) NULL)
+ if (new->last_value == 0 && last_value == (DefElem *) NULL)
{
if (new->increment_by > 0)
new->last_value = new->min_value; /* ascending seq */
@@ -1015,8 +1015,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be less than MINVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be less than MINVALUE (%s)",
+ bufs, bufm)));
}
if (new->last_value > new->max_value)
{
@@ -1027,8 +1027,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
+ bufs, bufm)));
}
/* CACHE */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index b3108053d9d..6e503fdac54 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.76 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.77 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,18 +57,19 @@
*/
typedef struct OnCommitItem
{
- Oid relid; /* relid of relation */
- OnCommitAction oncommit; /* what to do at end of xact */
+ Oid relid; /* relid of relation */
+ OnCommitAction oncommit; /* what to do at end of xact */
/*
* If this entry was created during this xact, it should be deleted at
* xact abort. Conversely, if this entry was deleted during this
* xact, it should be removed at xact commit. We leave deleted
- * entries in the list until commit so that we can roll back if needed.
+ * entries in the list until commit so that we can roll back if
+ * needed.
*/
bool created_in_cur_xact;
bool deleted_in_cur_xact;
-} OnCommitItem;
+} OnCommitItem;
static List *on_commits = NIL;
@@ -82,14 +83,14 @@ static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static bool needs_toast_table(Relation rel);
static void AlterTableAddCheckConstraint(Relation rel, Constraint *constr);
static void AlterTableAddForeignKeyConstraint(Relation rel,
- FkConstraint *fkconstraint);
+ FkConstraint *fkconstraint);
static int transformColumnNameList(Oid relId, List *colList,
- int16 *attnums, Oid *atttypids);
+ int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
- List **attnamelist,
- int16 *attnums, Oid *atttypids);
-static Oid transformFkeyCheckAttrs(Relation pkrel,
- int numattrs, int16 *attnums);
+ List **attnamelist,
+ int16 *attnums, Oid *atttypids);
+static Oid transformFkeyCheckAttrs(Relation pkrel,
+ int numattrs, int16 *attnums);
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
Relation rel, Relation pkrel);
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
@@ -206,8 +207,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (strcmp(check[i].ccname, cdef->name) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("duplicate CHECK constraint name \"%s\"",
- cdef->name)));
+ errmsg("duplicate CHECK constraint name \"%s\"",
+ cdef->name)));
}
check[ncheck].ccname = cdef->name;
}
@@ -399,7 +400,7 @@ TruncateRelation(const RangeVar *relation)
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temp tables of other processes")));
+ errmsg("cannot truncate temp tables of other processes")));
/*
* Don't allow truncate on tables which are referenced by foreign keys
@@ -435,8 +436,8 @@ TruncateRelation(const RangeVar *relation)
heap_close(fkeyRel, AccessShareLock);
/*
- * Do the real work using the same technique as cluster, but
- * without the data-copying portion
+ * Do the real work using the same technique as cluster, but without
+ * the data-copying portion
*/
rebuild_relation(rel, InvalidOid);
@@ -570,8 +571,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation \"%s\"",
- parent->relname)));
+ errmsg("cannot inherit from temporary relation \"%s\"",
+ parent->relname)));
/*
* We should have an UNDER permission flag for this, but for now,
@@ -652,7 +653,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- format_type_be(attribute->atttypid))));
+ format_type_be(attribute->atttypid))));
def->inhcount++;
/* Merge of NOT NULL constraints = OR 'em together */
def->is_not_null |= attribute->attnotnull;
@@ -803,11 +804,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
def->typename->typmod != newdef->typename->typmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("attribute \"%s\" has a type conflict",
- attributeName),
+ errmsg("attribute \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- TypeNameToString(newdef->typename))));
+ TypeNameToString(newdef->typename))));
/* Mark the column as locally defined */
def->is_local = true;
/* Merge of NOT NULL constraints = OR 'em together */
@@ -1230,8 +1231,8 @@ renameatt(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- newattname, RelationGetRelationName(targetrelation))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
@@ -1257,7 +1258,7 @@ renameatt(Oid myrelid,
/*
* Scan through index columns to see if there's any simple index
- * entries for this attribute. We ignore expressional entries.
+ * entries for this attribute. We ignore expressional entries.
*/
indextup = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
@@ -1270,6 +1271,7 @@ renameatt(Oid myrelid,
{
if (attnum != indexform->indkey[i])
continue;
+
/*
* Found one, rename it.
*/
@@ -1279,6 +1281,7 @@ renameatt(Oid myrelid,
0, 0);
if (!HeapTupleIsValid(atttup))
continue; /* should we raise an error? */
+
/*
* Update the (copied) attribute tuple.
*/
@@ -1366,7 +1369,7 @@ renamerel(Oid myrelid, const char *newrelname)
reltup = SearchSysCacheCopy(RELOID,
PointerGetDatum(myrelid),
0, 0, 0);
- if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
+ if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", myrelid);
if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
@@ -1743,7 +1746,7 @@ AlterTableAddColumn(Oid myrelid,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- get_rel_name(childrelid), colDef->colname)));
+ get_rel_name(childrelid), colDef->colname)));
/*
* XXX if we supported NOT NULL or defaults, would need to do
@@ -1782,7 +1785,7 @@ AlterTableAddColumn(Oid myrelid,
if (find_inheritance_children(myrelid) != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute must be added to child tables too")));
+ errmsg("attribute must be added to child tables too")));
}
/*
@@ -1801,14 +1804,14 @@ AlterTableAddColumn(Oid myrelid,
if (colDef->raw_default || colDef->cooked_default)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("adding columns with defaults is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
+ errmsg("adding columns with defaults is not implemented"),
+ errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
if (colDef->is_not_null)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("adding NOT NULL columns is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
+ errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
pgclass = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1829,8 +1832,8 @@ AlterTableAddColumn(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- colDef->colname, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ colDef->colname, RelationGetRelationName(rel))));
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
maxatts = minattnum + 1;
@@ -2014,8 +2017,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2057,8 +2060,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (indexStruct->indkey[i] == attnum)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute \"%s\" is in a primary key",
- colName)));
+ errmsg("attribute \"%s\" is in a primary key",
+ colName)));
}
}
@@ -2158,8 +2161,8 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2286,8 +2289,8 @@ AlterTableAlterColumnDefault(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2450,8 +2453,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum < 0)
@@ -2476,8 +2479,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("column datatype %s can only have storage \"plain\"",
- format_type_be(attrtuple->atttypid))));
+ errmsg("column datatype %s can only have storage \"plain\"",
+ format_type_be(attrtuple->atttypid))));
}
simple_heap_update(attrelation, &tuple->t_self, tuple);
@@ -2573,7 +2576,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
(errmsg("table \"%s\" is already WITHOUT OIDS",
RelationGetRelationName(rel))));
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
return;
}
@@ -2601,8 +2604,8 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
attrel = heap_open(RelOid_pg_attribute, RowExclusiveLock);
/*
- * Oids are being removed from the relation, so we need
- * to remove the oid pg_attribute record relating.
+ * Oids are being removed from the relation, so we need to remove
+ * the oid pg_attribute record relating.
*/
atttup = SearchSysCache(ATTNUM,
ObjectIdGetDatum(myrelid),
@@ -2621,7 +2624,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
}
/*
@@ -2663,8 +2666,8 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Can't drop a system attribute */
/* XXX perhaps someday allow dropping OID? */
@@ -2712,7 +2715,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
childatt->attinhcount--;
@@ -2731,9 +2734,9 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
}
/*
- * Propagate to children if desired. Unlike most other ALTER routines,
- * we have to do this one level of recursion at a time; we can't use
- * find_all_inheritors to do it in one pass.
+ * Propagate to children if desired. Unlike most other ALTER
+ * routines, we have to do this one level of recursion at a time; we
+ * can't use find_all_inheritors to do it in one pass.
*/
if (recurse)
{
@@ -2763,7 +2766,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
@@ -2882,18 +2885,18 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetNamespace(rel),
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
constr->name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetRelid(rel),
+ RelationGetNamespace(rel),
&counter);
/*
@@ -2923,14 +2926,14 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
if (fkconstraint->constr_name)
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
+ RelationGetRelid(rel),
RelationGetNamespace(rel),
fkconstraint->constr_name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
fkconstraint->constr_name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
fkconstraint->constr_name = GenerateConstraintName(CONSTRAINT_RELATION,
@@ -2959,7 +2962,7 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
/*
* Add a check constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -2979,13 +2982,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
Node *expr;
/*
- * We need to make a parse state and range
- * table to allow us to do transformExpr()
+ * We need to make a parse state and range table to allow us to do
+ * transformExpr()
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(pstate,
RelationGetRelid(rel),
- makeAlias(RelationGetRelationName(rel), NIL),
+ makeAlias(RelationGetRelationName(rel), NIL),
false,
true);
addRTEtoQuery(pstate, rte, true, true);
@@ -3006,8 +3009,8 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (length(pstate->p_rtable) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("CHECK constraint may only reference relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("CHECK constraint may only reference relation \"%s\"",
+ RelationGetRelationName(rel))));
/*
* No subplans or aggregates, either...
@@ -3070,15 +3073,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (!successful)
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("CHECK constraint \"%s\" is violated at some row(s)",
- constr->name)));
+ errmsg("CHECK constraint \"%s\" is violated at some row(s)",
+ constr->name)));
/*
- * Call AddRelationRawConstraints to do
- * the real adding -- It duplicates some
- * of the above, but does not check the
- * validity of the constraint against
- * tuples already in the table.
+ * Call AddRelationRawConstraints to do the real adding -- It
+ * duplicates some of the above, but does not check the validity of
+ * the constraint against tuples already in the table.
*/
AddRelationRawConstraints(rel, NIL, makeList1(constr));
}
@@ -3086,7 +3087,7 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -3106,12 +3107,11 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
Oid constrOid;
/*
- * Grab an exclusive lock on the pk table, so that
- * someone doesn't delete rows out from under us.
- * (Although a lesser lock would do for that purpose,
- * we'll need exclusive lock anyway to add triggers to
- * the pk table; trying to start with a lesser lock
- * will just create a risk of deadlock.)
+ * Grab an exclusive lock on the pk table, so that someone doesn't
+ * delete rows out from under us. (Although a lesser lock would do for
+ * that purpose, we'll need exclusive lock anyway to add triggers to
+ * the pk table; trying to start with a lesser lock will just create a
+ * risk of deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock);
@@ -3152,8 +3152,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
errmsg("cannot reference temporary table from permanent table constraint")));
/*
- * Look up the referencing attributes to make sure they
- * exist, and record their attnums and type OIDs.
+ * Look up the referencing attributes to make sure they exist, and
+ * record their attnums and type OIDs.
*/
for (i = 0; i < INDEX_MAX_KEYS; i++)
{
@@ -3166,10 +3166,10 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkattnum, fktypoid);
/*
- * If the attribute list for the referenced table was omitted,
- * lookup the definition of the primary key and use it. Otherwise,
- * validate the supplied attribute list. In either case, discover
- * the index OID and the attnums and type OIDs of the attributes.
+ * If the attribute list for the referenced table was omitted, lookup
+ * the definition of the primary key and use it. Otherwise, validate
+ * the supplied attribute list. In either case, discover the index
+ * OID and the attnums and type OIDs of the attributes.
*/
if (fkconstraint->pk_attrs == NIL)
{
@@ -3208,8 +3208,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
}
/*
- * Check that the constraint is satisfied by existing
- * rows (we can skip this during table creation).
+ * Check that the constraint is satisfied by existing rows (we can
+ * skip this during table creation).
*/
if (!fkconstraint->skip_validation)
validateForeignKeyConstraint(fkconstraint, rel, pkrel);
@@ -3225,7 +3225,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
RelationGetRelid(rel),
fkattnum,
numfks,
- InvalidOid, /* not a domain constraint */
+ InvalidOid, /* not a domain
+ * constraint */
RelationGetRelid(pkrel),
pkattnum,
numpks,
@@ -3233,7 +3234,7 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkconstraint->fk_del_action,
fkconstraint->fk_matchtype,
indexOid,
- NULL, /* no check constraint */
+ NULL, /* no check constraint */
NULL,
NULL);
@@ -3276,8 +3277,8 @@ transformColumnNameList(Oid relId, List *colList,
if (attnum >= INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
- errmsg("cannot have more than %d keys in a foreign key",
- INDEX_MAX_KEYS)));
+ errmsg("cannot have more than %d keys in a foreign key",
+ INDEX_MAX_KEYS)));
attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum;
atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid;
ReleaseSysCache(atttuple);
@@ -3291,7 +3292,7 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Used when the column list in the REFERENCES specification
+ * for the pkrel. Used when the column list in the REFERENCES specification
* is omitted.
*/
static int
@@ -3339,12 +3340,12 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (indexStruct == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
+ RelationGetRelationName(pkrel))));
/*
- * Now build the list of PK attributes from the indkey definition
- * (we assume a primary key cannot have expressional elements)
+ * Now build the list of PK attributes from the indkey definition (we
+ * assume a primary key cannot have expressional elements)
*/
*attnamelist = NIL;
for (i = 0; i < indexStruct->indnatts; i++)
@@ -3389,7 +3390,8 @@ transformFkeyCheckAttrs(Relation pkrel,
{
HeapTuple indexTuple;
Form_pg_index indexStruct;
- int i, j;
+ int i,
+ j;
indexoid = lfirsto(indexoidscan);
indexTuple = SearchSysCache(INDEXRELID,
@@ -3453,7 +3455,7 @@ transformFkeyCheckAttrs(Relation pkrel,
ereport(ERROR,
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
errmsg("there is no UNIQUE constraint matching given keys for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ RelationGetRelationName(pkrel))));
freeList(indexoidlist);
@@ -3969,17 +3971,17 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
void
AlterTableClusterOn(Oid relOid, const char *indexName)
{
- Relation rel,
- pg_index;
- List *index;
- Oid indexOid;
- HeapTuple indexTuple;
- Form_pg_index indexForm;
-
+ Relation rel,
+ pg_index;
+ List *index;
+ Oid indexOid;
+ HeapTuple indexTuple;
+ Form_pg_index indexForm;
+
rel = heap_open(relOid, AccessExclusiveLock);
indexOid = get_relname_relid(indexName, rel->rd_rel->relnamespace);
-
+
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -3994,36 +3996,37 @@ AlterTableClusterOn(Oid relOid, const char *indexName)
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
/*
- * If this is the same index the relation was previously
- * clustered on, no need to do anything.
+ * If this is the same index the relation was previously clustered on,
+ * no need to do anything.
*/
if (indexForm->indisclustered)
{
ereport(NOTICE,
- (errmsg("table \"%s\" is already being clustered on index \"%s\"",
- NameStr(rel->rd_rel->relname), indexName)));
+ (errmsg("table \"%s\" is already being clustered on index \"%s\"",
+ NameStr(rel->rd_rel->relname), indexName)));
ReleaseSysCache(indexTuple);
heap_close(rel, NoLock);
return;
}
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
-
+
/*
* Now check each index in the relation and set the bit where needed.
*/
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index idxForm;
-
+ HeapTuple idxtuple;
+ Form_pg_index idxForm;
+
indexOid = lfirsto(index);
idxtuple = SearchSysCacheCopy(INDEXRELID,
- ObjectIdGetDatum(indexOid),
+ ObjectIdGetDatum(indexOid),
0, 0, 0);
if (!HeapTupleIsValid(idxtuple))
elog(ERROR, "cache lookup failed for index %u", indexOid);
idxForm = (Form_pg_index) GETSTRUCT(idxtuple);
+
/*
* Unset the bit if set. We know it's wrong because we checked
* this earlier.
@@ -4100,7 +4103,7 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared relations cannot be toasted after initdb")));
+ errmsg("shared relations cannot be toasted after initdb")));
/*
* Is it already toasted?
@@ -4331,12 +4334,12 @@ needs_toast_table(Relation rel)
void
register_on_commit_action(Oid relid, OnCommitAction action)
{
- OnCommitItem *oc;
+ OnCommitItem *oc;
MemoryContext oldcxt;
/*
- * We needn't bother registering the relation unless there is an ON COMMIT
- * action we need to take.
+ * We needn't bother registering the relation unless there is an ON
+ * COMMIT action we need to take.
*/
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
@@ -4366,7 +4369,7 @@ remove_on_commit_action(Oid relid)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (oc->relid == relid)
{
@@ -4389,7 +4392,7 @@ PreCommit_on_commit_actions(void)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
/* Ignore entry if already dropped in this xact */
if (oc->deleted_in_cur_xact)
@@ -4403,23 +4406,25 @@ PreCommit_on_commit_actions(void)
break;
case ONCOMMIT_DELETE_ROWS:
heap_truncate(oc->relid);
- CommandCounterIncrement(); /* XXX needed? */
+ CommandCounterIncrement(); /* XXX needed? */
break;
case ONCOMMIT_DROP:
- {
- ObjectAddress object;
+ {
+ ObjectAddress object;
- object.classId = RelOid_pg_class;
- object.objectId = oc->relid;
- object.objectSubId = 0;
- performDeletion(&object, DROP_CASCADE);
- /*
- * Note that table deletion will call remove_on_commit_action,
- * so the entry should get marked as deleted.
- */
- Assert(oc->deleted_in_cur_xact);
- break;
- }
+ object.classId = RelOid_pg_class;
+ object.objectId = oc->relid;
+ object.objectSubId = 0;
+ performDeletion(&object, DROP_CASCADE);
+
+ /*
+ * Note that table deletion will call
+ * remove_on_commit_action, so the entry should get
+ * marked as deleted.
+ */
+ Assert(oc->deleted_in_cur_xact);
+ break;
+ }
}
}
}
@@ -4442,7 +4447,7 @@ AtEOXact_on_commit_actions(bool isCommit)
l = on_commits;
while (l != NIL)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (isCommit ? oc->deleted_in_cur_xact :
oc->created_in_cur_xact)
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 6e5b38804ff..d3e969c7e4f 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.153 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.154 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,17 +41,17 @@
static void InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx);
static HeapTuple GetTupleForTrigger(EState *estate,
- ResultRelInfo *relinfo,
- ItemPointer tid,
- CommandId cid,
- TupleTableSlot **newSlot);
+ ResultRelInfo *relinfo,
+ ItemPointer tid,
+ CommandId cid,
+ TupleTableSlot **newSlot);
static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
FmgrInfo *finfo,
MemoryContext per_tuple_context);
static void DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
+ bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context);
@@ -97,18 +97,19 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
else if (stmt->isconstraint)
{
/*
- * If this trigger is a constraint (and a foreign key one)
- * then we really need a constrrelid. Since we don't have one,
- * we'll try to generate one from the argument information.
+ * If this trigger is a constraint (and a foreign key one) then we
+ * really need a constrrelid. Since we don't have one, we'll try
+ * to generate one from the argument information.
*
- * This is really just a workaround for a long-ago pg_dump bug
- * that omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
- * commands. We don't want to bomb out completely here if we can't
- * determine the correct relation, because that would prevent loading
- * the dump file. Instead, NOTICE here and ERROR in the trigger.
+ * This is really just a workaround for a long-ago pg_dump bug that
+ * omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
+ * commands. We don't want to bomb out completely here if we
+ * can't determine the correct relation, because that would
+ * prevent loading the dump file. Instead, NOTICE here and ERROR
+ * in the trigger.
*/
- bool needconstrrelid = false;
- void *elem = NULL;
+ bool needconstrrelid = false;
+ void *elem = NULL;
if (strncmp(strVal(llast(stmt->funcname)), "RI_FKey_check_", 14) == 0)
{
@@ -265,8 +266,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ trigname, stmt->relation->relname)));
found++;
}
systable_endscan(tgscan);
@@ -280,7 +281,7 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -480,8 +481,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, get_rel_name(relid))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, get_rel_name(relid))));
if (!pg_class_ownercheck(relid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
@@ -576,7 +577,7 @@ RemoveTriggerById(Oid trigOid)
elog(ERROR, "cache lookup failed for relation %u", relid);
classForm = (Form_pg_class) GETSTRUCT(tuple);
- if (classForm->reltriggers == 0) /* should not happen */
+ if (classForm->reltriggers == 0) /* should not happen */
elog(ERROR, "relation \"%s\" has reltriggers = 0",
RelationGetRelationName(rel));
classForm->reltriggers--;
@@ -650,8 +651,8 @@ renametrig(Oid relid,
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- newname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ newname, RelationGetRelationName(targetrel))));
systable_endscan(tgscan);
/*
@@ -693,8 +694,8 @@ renametrig(Oid relid,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- oldname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ oldname, RelationGetRelationName(targetrel))));
}
systable_endscan(tgscan);
@@ -762,7 +763,7 @@ RelationBuildTriggers(Relation relation)
build->tgoid = HeapTupleGetOid(htup);
build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname)));
+ NameGetDatum(&pg_trigger->tgname)));
build->tgfoid = pg_trigger->tgfoid;
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
@@ -927,8 +928,8 @@ CopyTriggerDesc(TriggerDesc *trigdesc)
trigger->tgname = pstrdup(trigger->tgname);
if (trigger->tgnargs > 0)
{
- char **newargs;
- int16 j;
+ char **newargs;
+ int16 j;
newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
for (j = 0; j < trigger->tgnargs; j++)
@@ -1101,7 +1102,7 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
return false;
return true;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/*
* Call a trigger function.
@@ -1166,10 +1167,10 @@ ExecCallTriggerFunc(TriggerData *trigdata,
void
ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1190,10 +1191,10 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1209,7 +1210,7 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1242,8 +1243,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1279,10 +1280,10 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1303,10 +1304,10 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1322,7 +1323,7 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1361,8 +1362,8 @@ ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1408,10 +1409,10 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1432,10 +1433,10 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1451,7 +1452,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1498,8 +1499,8 @@ ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
for (i = 0; i < ntrigs; i++)
{
@@ -1639,19 +1640,20 @@ ltrmark:;
* ----------
*/
-typedef struct DeferredTriggersData {
- /* Internal data is held in a per-transaction memory context */
- MemoryContext deftrig_cxt;
- /* ALL DEFERRED or ALL IMMEDIATE */
- bool deftrig_all_isset;
- bool deftrig_all_isdeferred;
- /* Per trigger state */
- List *deftrig_trigstates;
- /* List of pending deferred triggers. Previous comment below */
- DeferredTriggerEvent deftrig_events;
- DeferredTriggerEvent deftrig_events_imm;
- DeferredTriggerEvent deftrig_event_tail;
-} DeferredTriggersData;
+typedef struct DeferredTriggersData
+{
+ /* Internal data is held in a per-transaction memory context */
+ MemoryContext deftrig_cxt;
+ /* ALL DEFERRED or ALL IMMEDIATE */
+ bool deftrig_all_isset;
+ bool deftrig_all_isdeferred;
+ /* Per trigger state */
+ List *deftrig_trigstates;
+ /* List of pending deferred triggers. Previous comment below */
+ DeferredTriggerEvent deftrig_events;
+ DeferredTriggerEvent deftrig_events_imm;
+ DeferredTriggerEvent deftrig_event_tail;
+} DeferredTriggersData;
/* ----------
* deftrig_events, deftrig_event_tail:
@@ -1661,8 +1663,8 @@ typedef struct DeferredTriggersData {
* Because this can grow pretty large, we don't use separate List nodes,
* but instead thread the list through the dte_next fields of the member
* nodes. Saves just a few bytes per entry, but that adds up.
- *
- * deftrig_events_imm holds the tail pointer as of the last
+ *
+ * deftrig_events_imm holds the tail pointer as of the last
* deferredTriggerInvokeEvents call; we can use this to avoid rescanning
* entries unnecessarily. It is NULL if deferredTriggerInvokeEvents
* hasn't run since the last state change.
@@ -1674,7 +1676,7 @@ typedef struct DeferredTriggersData {
typedef DeferredTriggersData *DeferredTriggers;
-static DeferredTriggers deferredTriggers;
+static DeferredTriggers deferredTriggers;
/* ----------
* deferredTriggerCheckState()
@@ -1783,7 +1785,7 @@ deferredTriggerAddEvent(DeferredTriggerEvent event)
*/
static void
DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context)
{
Oid tgoid = event->dte_item[itemno].dti_tgoid;
@@ -1817,7 +1819,7 @@ DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
*/
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
- (event->dte_event & TRIGGER_EVENT_ROW);
+ (event->dte_event & TRIGGER_EVENT_ROW);
LocTriggerData.tg_relation = rel;
LocTriggerData.tg_trigger = NULL;
@@ -1899,12 +1901,12 @@ deferredTriggerInvokeEvents(bool immediate_only)
* are going to discard the whole event queue on return anyway, so no
* need to bother with "retail" pfree's.
*
- * If immediate_only is true, we need only scan from where the end of
- * the queue was at the previous deferredTriggerInvokeEvents call;
- * any non-deferred events before that point are already fired.
- * (But if the deferral state changes, we must reset the saved position
- * to the beginning of the queue, so as to process all events once with
- * the new states. See DeferredTriggerSetState.)
+ * If immediate_only is true, we need only scan from where the end of the
+ * queue was at the previous deferredTriggerInvokeEvents call; any
+ * non-deferred events before that point are already fired. (But if
+ * the deferral state changes, we must reset the saved position to the
+ * beginning of the queue, so as to process all events once with the
+ * new states. See DeferredTriggerSetState.)
*/
/* Make a per-tuple memory context for trigger function calls */
@@ -1916,9 +1918,9 @@ deferredTriggerInvokeEvents(bool immediate_only)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * If immediate_only is true, then the only events that could need firing
- * are those since deftrig_events_imm. (But if deftrig_events_imm is
- * NULL, we must scan the entire list.)
+ * If immediate_only is true, then the only events that could need
+ * firing are those since deftrig_events_imm. (But if
+ * deftrig_events_imm is NULL, we must scan the entire list.)
*/
if (immediate_only && deferredTriggers->deftrig_events_imm != NULL)
{
@@ -1984,17 +1986,18 @@ deferredTriggerInvokeEvents(bool immediate_only)
rel = heap_open(event->dte_relid, NoLock);
/*
- * Copy relation's trigger info so that we have a stable
- * copy no matter what the called triggers do.
+ * Copy relation's trigger info so that we have a
+ * stable copy no matter what the called triggers do.
*/
trigdesc = CopyTriggerDesc(rel->trigdesc);
- if (trigdesc == NULL) /* should not happen */
+ if (trigdesc == NULL) /* should not happen */
elog(ERROR, "relation %u has no triggers",
event->dte_relid);
/*
- * Allocate space to cache fmgr lookup info for triggers.
+ * Allocate space to cache fmgr lookup info for
+ * triggers.
*/
finfo = (FmgrInfo *)
palloc0(trigdesc->numtriggers * sizeof(FmgrInfo));
@@ -2089,21 +2092,23 @@ void
DeferredTriggerBeginXact(void)
{
/*
- * This will be changed to a special context when
- * the nested transactions project moves forward.
+ * This will be changed to a special context when the nested
+ * transactions project moves forward.
*/
MemoryContext cxt = TopTransactionContext;
+
deferredTriggers = (DeferredTriggers) MemoryContextAlloc(TopTransactionContext,
- sizeof(DeferredTriggersData));
+ sizeof(DeferredTriggersData));
/*
* Create the per transaction memory context
*/
deferredTriggers->deftrig_cxt = AllocSetContextCreate(cxt,
- "DeferredTriggerXact",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "DeferredTriggerXact",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+
/*
* If unspecified, constraints default to IMMEDIATE, per SQL
*/
@@ -2174,7 +2179,7 @@ DeferredTriggerAbortXact(void)
* Ignore call if we aren't in a transaction.
*/
if (deferredTriggers == NULL)
- return;
+ return;
/*
* Forget everything we know about deferred triggers.
@@ -2255,7 +2260,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (strlen(cname) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("unnamed constraints cannot be set explicitly")));
+ errmsg("unnamed constraints cannot be set explicitly")));
/*
* Setup to scan pg_trigger by tgconstrname ...
@@ -2304,7 +2309,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (!found)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" does not exist", cname)));
+ errmsg("constraint \"%s\" does not exist", cname)));
}
heap_close(tgrel, AccessShareLock);
@@ -2349,9 +2354,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* CONSTRAINTS command applies retroactively. This happens "for free"
* since we have already made the necessary modifications to the
* constraints, and deferredTriggerEndQuery() is called by
- * finish_xact_command(). But we must reset deferredTriggerInvokeEvents'
- * tail pointer to make it rescan the entire list, in case some deferred
- * events are now immediately invokable.
+ * finish_xact_command(). But we must reset
+ * deferredTriggerInvokeEvents' tail pointer to make it rescan the
+ * entire list, in case some deferred events are now immediately
+ * invokable.
*/
deferredTriggers->deftrig_events_imm = NULL;
}
@@ -2416,7 +2422,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
*/
for (i = 0; i < ntriggers; i++)
{
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
+ Trigger *trigger = &trigdesc->triggers[tgindx[i]];
if (trigger->tgenabled)
n_enabled_triggers++;
@@ -2455,7 +2461,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
ev_item = &(new_event->dte_item[i]);
ev_item->dti_tgoid = trigger->tgoid;
- ev_item->dti_state =
+ ev_item->dti_state =
((trigger->tgdeferrable) ?
TRIGGER_DEFERRED_DEFERRABLE : 0) |
((trigger->tginitdeferred) ?
@@ -2464,9 +2470,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
if (row_trigger && (trigdesc->n_before_row[event] > 0))
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
else if (!row_trigger && (trigdesc->n_before_statement[event] > 0))
- {
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
- }
}
MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 275143c1517..57bc7c5f71f 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.40 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.41 2003/08/04 00:43:17 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -66,11 +66,11 @@
/* result structure for get_rels_with_domain() */
typedef struct
{
- Relation rel; /* opened and locked relation */
- int natts; /* number of attributes of interest */
- int *atts; /* attribute numbers */
+ Relation rel; /* opened and locked relation */
+ int natts; /* number of attributes of interest */
+ int *atts; /* attribute numbers */
/* atts[] is of allocated length RelationGetNumberOfAttributes(rel) */
-} RelToCheck;
+} RelToCheck;
static Oid findTypeInputFunction(List *procname, Oid typeOid);
@@ -80,9 +80,9 @@ static Oid findTypeSendFunction(List *procname, Oid typeOid);
static List *get_rels_with_domain(Oid domainOid, LOCKMODE lockmode);
static void domainOwnerCheck(HeapTuple tup, TypeName *typename);
static char *domainAddConstraint(Oid domainOid, Oid domainNamespace,
- Oid baseTypeOid,
- int typMod, Constraint *constr,
- int *counter, char *domainName);
+ Oid baseTypeOid,
+ int typMod, Constraint *constr,
+ int *counter, char *domainName);
/*
@@ -105,7 +105,7 @@ DefineType(List *names, List *parameters)
bool byValue = false;
char delimiter = DEFAULT_TYPDELIM;
char alignment = 'i'; /* default alignment */
- char storage = 'p'; /* default TOAST storage method */
+ char storage = 'p'; /* default TOAST storage method */
Oid inputOid;
Oid outputOid;
Oid receiveOid = InvalidOid;
@@ -237,8 +237,8 @@ DefineType(List *names, List *parameters)
/*
* Look to see if type already exists (presumably as a shell; if not,
- * TypeCreate will complain). If it doesn't, create it as a shell,
- * so that the OID is known for use in the I/O function definitions.
+ * TypeCreate will complain). If it doesn't, create it as a shell, so
+ * that the OID is known for use in the I/O function definitions.
*/
typoid = GetSysCacheOid(TYPENAMENSP,
CStringGetDatum(typeName),
@@ -492,7 +492,7 @@ DefineDomain(CreateDomainStmt *stmt)
List *listptr;
Oid basetypeoid;
Oid domainoid;
- Form_pg_type baseType;
+ Form_pg_type baseType;
int counter = 0;
/* Convert list of names to a name and namespace */
@@ -508,10 +508,11 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Domainnames, unlike typenames don't need to account for the '_'
- * prefix. So they can be one character longer. (This test is presently
- * useless since the parser will have truncated the name to fit. But
- * leave it here since we may someday support arrays of domains, in
- * which case we'll be back to needing to enforce NAMEDATALEN-2.)
+ * prefix. So they can be one character longer. (This test is
+ * presently useless since the parser will have truncated the name to
+ * fit. But leave it here since we may someday support arrays of
+ * domains, in which case we'll be back to needing to enforce
+ * NAMEDATALEN-2.)
*/
if (strlen(domainName) > (NAMEDATALEN - 1))
ereport(ERROR,
@@ -581,8 +582,8 @@ DefineDomain(CreateDomainStmt *stmt)
basetypelem = baseType->typelem;
/*
- * Run through constraints manually to avoid the additional
- * processing conducted by DefineRelation() and friends.
+ * Run through constraints manually to avoid the additional processing
+ * conducted by DefineRelation() and friends.
*/
foreach(listptr, schema)
{
@@ -594,7 +595,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -606,6 +607,7 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
case CONSTR_DEFAULT:
+
/*
* The inherited default value may be overridden by the
* user with the DEFAULT <expr> statement.
@@ -643,7 +645,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && !typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = true;
nullDefined = true;
break;
@@ -652,41 +654,42 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = false;
nullDefined = true;
- break;
+ break;
+
+ case CONSTR_CHECK:
- case CONSTR_CHECK:
/*
- * Check constraints are handled after domain creation, as they
- * require the Oid of the domain
+ * Check constraints are handled after domain creation, as
+ * they require the Oid of the domain
*/
- break;
+ break;
/*
* All else are error cases
*/
- case CONSTR_UNIQUE:
- ereport(ERROR,
+ case CONSTR_UNIQUE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
- break;
+ errmsg("UNIQUE constraints not supported for domains")));
+ break;
- case CONSTR_PRIMARY:
- ereport(ERROR,
+ case CONSTR_PRIMARY:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("PRIMARY KEY constraints not supported for domains")));
- break;
+ break;
- case CONSTR_ATTR_DEFERRABLE:
- case CONSTR_ATTR_NOT_DEFERRABLE:
- case CONSTR_ATTR_DEFERRED:
- case CONSTR_ATTR_IMMEDIATE:
- ereport(ERROR,
+ case CONSTR_ATTR_DEFERRABLE:
+ case CONSTR_ATTR_NOT_DEFERRABLE:
+ case CONSTR_ATTR_DEFERRED:
+ case CONSTR_ATTR_IMMEDIATE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("deferrability constraints not supported for domains")));
- break;
+ break;
default:
elog(ERROR, "unrecognized constraint subtype: %d",
@@ -715,15 +718,16 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid, /* base type ID */
defaultValue, /* default type value (text) */
defaultValueBin, /* default type value (binary) */
- byValue, /* passed by value */
- alignment, /* required alignment */
- storage, /* TOAST strategy */
- stmt->typename->typmod, /* typeMod value */
- typNDims, /* Array dimensions for base type */
- typNotNull); /* Type NOT NULL */
+ byValue, /* passed by value */
+ alignment, /* required alignment */
+ storage, /* TOAST strategy */
+ stmt->typename->typmod, /* typeMod value */
+ typNDims, /* Array dimensions for base type */
+ typNotNull); /* Type NOT NULL */
/*
- * Process constraints which refer to the domain ID returned by TypeCreate
+ * Process constraints which refer to the domain ID returned by
+ * TypeCreate
*/
foreach(listptr, schema)
{
@@ -733,16 +737,16 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
domainAddConstraint(domainoid, domainNamespace,
basetypeoid, stmt->typename->typmod,
constr, &counter, domainName);
- break;
+ break;
- /* Other constraint types were fully processed above */
+ /* Other constraint types were fully processed above */
default:
- break;
+ break;
}
}
@@ -834,8 +838,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
* Input functions can take a single argument of type CSTRING, or
* three arguments (string, element OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING;
- * if we see this, we issue a NOTICE and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
+ * see this, we issue a NOTICE and fix up the pg_proc entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -874,9 +878,10 @@ findTypeInputFunction(List *procname, Oid typeOid)
(errmsg("changing argument type of function %s from OPAQUE to CSTRING",
NameListToString(procname))));
SetFunctionArgType(procOid, 0, CSTRINGOID);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -905,8 +910,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
* arguments (data value, element OID).
*
* For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a NOTICE and fix up the
- * pg_proc entry.
+ * type name; if we see this, we issue a NOTICE and fix up the pg_proc
+ * entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -940,12 +945,13 @@ findTypeOutputFunction(List *procname, Oid typeOid)
{
/* Found, but must complain and fix the pg_proc entry */
ereport(NOTICE,
- (errmsg("changing argument type of function %s from OPAQUE to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from OPAQUE to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -1050,7 +1056,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
if (coldeflist == NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("composite type must have at least one attribute")));
+ errmsg("composite type must have at least one attribute")));
/*
* now create the parameters for keys/inheritance etc. All of them are
@@ -1072,7 +1078,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
/*
* AlterDomainDefault
*
- * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
+ * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
*/
void
AlterDomainDefault(List *names, Node *defaultRaw)
@@ -1083,12 +1089,12 @@ AlterDomainDefault(List *names, Node *defaultRaw)
ParseState *pstate;
Relation rel;
char *defaultValue;
- Node *defaultExpr = NULL; /* NULL if no default specified */
+ Node *defaultExpr = NULL; /* NULL if no default specified */
Datum new_record[Natts_pg_type];
char new_record_nulls[Natts_pg_type];
char new_record_repl[Natts_pg_type];
HeapTuple newtuple;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1113,7 +1119,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Setup new tuple */
@@ -1129,9 +1135,10 @@ AlterDomainDefault(List *names, Node *defaultRaw)
{
/* Create a dummy ParseState for transformExpr */
pstate = make_parsestate(NULL);
+
/*
- * Cook the colDef->raw_expr into an expression. Note:
- * Name is strictly for error message
+ * Cook the colDef->raw_expr into an expression. Note: Name is
+ * strictly for error message
*/
defaultExpr = cookDefault(pstate, defaultRaw,
typTup->typbasetype,
@@ -1139,27 +1146,29 @@ AlterDomainDefault(List *names, Node *defaultRaw)
NameStr(typTup->typname));
/*
- * Expression must be stored as a nodeToString result, but
- * we also require a valid textual representation (mainly
- * to make life easier for pg_dump).
+ * Expression must be stored as a nodeToString result, but we also
+ * require a valid textual representation (mainly to make life
+ * easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(NameStr(typTup->typname),
- InvalidOid),
+ deparse_context_for(NameStr(typTup->typname),
+ InvalidOid),
false, false);
+
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(
- nodeToString(defaultExpr)));
+ CStringGetDatum(
+ nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultValue));
+ CStringGetDatum(defaultValue));
new_record_repl[Anum_pg_type_typdefault - 1] = 'r';
}
- else /* Default is NULL, drop it */
+ else
+/* Default is NULL, drop it */
{
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
@@ -1168,7 +1177,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
}
newtuple = heap_modifytuple(tup, rel,
- new_record, new_record_nulls, new_record_repl);
+ new_record, new_record_nulls, new_record_repl);
simple_heap_update(rel, &tup->t_self, newtuple);
@@ -1178,7 +1187,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
GenerateTypeDependencies(typTup->typnamespace,
domainoid,
typTup->typrelid,
- 0, /* relation kind is n/a */
+ 0, /* relation kind is n/a */
typTup->typinput,
typTup->typoutput,
typTup->typreceive,
@@ -1186,7 +1195,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
typTup->typelem,
typTup->typbasetype,
defaultExpr,
- true); /* Rebuild is true */
+ true); /* Rebuild is true */
/* Clean up */
heap_close(rel, NoLock);
@@ -1196,7 +1205,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
/*
* AlterDomainNotNull
*
- * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
+ * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
*/
void
AlterDomainNotNull(List *names, bool notNull)
@@ -1205,7 +1214,7 @@ AlterDomainNotNull(List *names, bool notNull)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1231,7 +1240,7 @@ AlterDomainNotNull(List *names, bool notNull)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Is the domain already set to the desired constraint? */
@@ -1248,15 +1257,15 @@ AlterDomainNotNull(List *names, bool notNull)
/* Adding a NOT NULL constraint requires checking existing columns */
if (notNull)
{
- List *rels;
- List *rt;
+ List *rels;
+ List *rt;
/* Fetch relation list with attributes based on this domain */
/* ShareLock is sufficient to prevent concurrent data changes */
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1268,14 +1277,14 @@ AlterDomainNotNull(List *names, bool notNull)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1284,7 +1293,7 @@ AlterDomainNotNull(List *names, bool notNull)
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains NULL values",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
}
heap_endscan(scan);
@@ -1295,7 +1304,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's
+ * Okay to update pg_type row. We can scribble on typTup because it's
* a copy.
*/
typTup->typnotnull = notNull;
@@ -1321,7 +1330,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
Oid domainoid;
HeapTuple tup;
Relation rel;
- Form_pg_type typTup;
+ Form_pg_type typTup;
Relation conrel;
SysScanDesc conscan;
ScanKeyData key[1];
@@ -1350,7 +1359,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Grab an appropriate lock on the pg_constraint relation */
@@ -1403,15 +1412,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
- List *rels;
- List *rt;
- EState *estate;
+ Form_pg_type typTup;
+ List *rels;
+ List *rt;
+ EState *estate;
ExprContext *econtext;
- char *ccbin;
- Expr *expr;
- ExprState *exprstate;
- int counter = 0;
+ char *ccbin;
+ Expr *expr;
+ ExprState *exprstate;
+ int counter = 0;
Constraint *constr;
/* Make a TypeName so we can use standard type lookup machinery */
@@ -1438,14 +1447,14 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Check for unsupported constraint types */
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -1469,20 +1478,20 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
errmsg("use ALTER DOMAIN .. [ SET | DROP ] NOT NULL instead")));
break;
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
/* processed below */
- break;
+ break;
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
+ errmsg("UNIQUE constraints not supported for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("PRIMARY KEY constraints not supported for domains")));
+ errmsg("PRIMARY KEY constraints not supported for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -1501,18 +1510,18 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
}
/*
- * Since all other constraint types throw errors, this must be
- * a check constraint. First, process the constraint expression
- * and add an entry to pg_constraint.
+ * Since all other constraint types throw errors, this must be a check
+ * constraint. First, process the constraint expression and add an
+ * entry to pg_constraint.
*/
ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace,
typTup->typbasetype, typTup->typtypmod,
- constr, &counter, NameStr(typTup->typname));
+ constr, &counter, NameStr(typTup->typname));
/*
- * Test all values stored in the attributes based on the domain
- * the constraint is being added to.
+ * Test all values stored in the attributes based on the domain the
+ * constraint is being added to.
*/
expr = (Expr *) stringToNode(ccbin);
@@ -1528,7 +1537,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1540,15 +1549,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
- Datum conResult;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
+ Datum conResult;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1564,7 +1573,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains values that violate the new constraint",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
ResetExprContext(econtext);
@@ -1610,7 +1619,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
static List *
get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
{
- List *result = NIL;
+ List *result = NIL;
Relation depRel;
ScanKeyData key[2];
SysScanDesc depScan;
@@ -1634,10 +1643,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
{
- Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
+ Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
RelToCheck *rtc = NULL;
List *rellist;
- Form_pg_attribute pg_att;
+ Form_pg_attribute pg_att;
int ptr;
/* Ignore dependees that aren't user columns of tables */
@@ -1675,10 +1684,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
}
/*
- * Confirm column has not been dropped, and is of the expected type.
- * This defends against an ALTER DROP COLUMN occuring just before
- * we acquired lock ... but if the whole table were dropped, we'd
- * still have a problem.
+ * Confirm column has not been dropped, and is of the expected
+ * type. This defends against an ALTER DROP COLUMN occuring just
+ * before we acquired lock ... but if the whole table were
+ * dropped, we'd still have a problem.
*/
if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel))
continue;
@@ -1687,16 +1696,16 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
- * order; this is just a hack to improve predictability of regression
- * test output ...
+ * Okay, add column to result. We store the columns in
+ * column-number order; this is just a hack to improve
+ * predictability of regression test output ...
*/
Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel));
ptr = rtc->natts++;
- while (ptr > 0 && rtc->atts[ptr-1] > pg_depend->objsubid)
+ while (ptr > 0 && rtc->atts[ptr - 1] > pg_depend->objsubid)
{
- rtc->atts[ptr] = rtc->atts[ptr-1];
+ rtc->atts[ptr] = rtc->atts[ptr - 1];
ptr--;
}
rtc->atts[ptr] = pg_depend->objsubid;
@@ -1719,7 +1728,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
static void
domainOwnerCheck(HeapTuple tup, TypeName *typename)
{
- Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
+ Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
/* Check that this is actually a domain */
if (typTup->typtype != 'd')
@@ -1746,7 +1755,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
char *ccsrc;
char *ccbin;
ParseState *pstate;
- CoerceToDomainValue *domVal;
+ CoerceToDomainValue *domVal;
/*
* Assign or validate constraint name
@@ -1759,8 +1768,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for domain \"%s\" already exists",
- constr->name, domainName)));
+ errmsg("constraint \"%s\" for domain \"%s\" already exists",
+ constr->name, domainName)));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_DOMAIN,
@@ -1775,10 +1784,10 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE
- * in the expression. Note that it will appear to have the type of the
- * base type, not the domain. This seems correct since within the
- * check expression, we should not assume the input value can be considered
- * a member of the domain.
+ * in the expression. Note that it will appear to have the type of
+ * the base type, not the domain. This seems correct since within the
+ * check expression, we should not assume the input value can be
+ * considered a member of the domain.
*/
domVal = makeNode(CoerceToDomainValue);
domVal->typeId = baseTypeOid;
@@ -1841,13 +1850,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Store the constraint in pg_constraint
*/
- CreateConstraintEntry(constr->name, /* Constraint Name */
- domainNamespace, /* namespace */
+ CreateConstraintEntry(constr->name, /* Constraint Name */
+ domainNamespace, /* namespace */
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
false, /* Is Deferred */
- InvalidOid, /* not a relation constraint */
- NULL,
+ InvalidOid, /* not a relation constraint */
+ NULL,
0,
domainOid, /* domain constraint */
InvalidOid, /* Foreign key fields */
@@ -1857,13 +1866,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
' ',
' ',
InvalidOid,
- expr, /* Tree form check constraint */
+ expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
- ccsrc); /* Source form check constraint */
+ ccsrc); /* Source form check constraint */
/*
- * Return the compiled constraint expression so the calling routine can
- * perform any additional required tests.
+ * Return the compiled constraint expression so the calling routine
+ * can perform any additional required tests.
*/
return ccbin;
}
@@ -1893,7 +1902,7 @@ GetDomainConstraints(Oid typeOid)
Form_pg_type typTup;
ScanKeyData key[1];
SysScanDesc scan;
-
+
tup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(typeOid),
0, 0, 0);
@@ -1915,17 +1924,20 @@ GetDomainConstraints(Oid typeOid)
while (HeapTupleIsValid(conTup = systable_getnext(scan)))
{
- Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
- Datum val;
- bool isNull;
- Expr *check_expr;
+ Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
+ Datum val;
+ bool isNull;
+ Expr *check_expr;
DomainConstraintState *r;
/* Ignore non-CHECK constraints (presently, shouldn't be any) */
if (c->contype != CONSTRAINT_CHECK)
continue;
- /* Not expecting conbin to be NULL, but we'll test for it anyway */
+ /*
+ * Not expecting conbin to be NULL, but we'll test for it
+ * anyway
+ */
val = fastgetattr(conTup, Anum_pg_constraint_conbin,
conRel->rd_att, &isNull);
if (isNull)
@@ -1945,8 +1957,8 @@ GetDomainConstraints(Oid typeOid)
r->check_expr = ExecInitExpr(check_expr, NULL);
/*
- * use lcons() here because constraints of lower domains should
- * be applied earlier.
+ * use lcons() here because constraints of lower domains
+ * should be applied earlier.
*/
result = lcons(r, result);
}
@@ -2003,7 +2015,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
Oid typeOid;
Relation rel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 36416a5232f..117eef1e750 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.122 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.123 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,12 +146,12 @@ write_group_file(Relation grel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_group and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_group and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -212,7 +212,7 @@ write_group_file(Relation grel)
if (usename[j] != '\0')
{
ereport(LOG,
- (errmsg("invalid user name \"%s\"", usename)));
+ (errmsg("invalid user name \"%s\"", usename)));
continue;
}
@@ -245,7 +245,7 @@ write_group_file(Relation grel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -294,12 +294,12 @@ write_user_file(Relation urel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_shadow and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_shadow and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(urel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -376,7 +376,7 @@ write_user_file(Relation urel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -430,10 +430,10 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
Relation urel = NULL;
Relation grel = NULL;
- if (! (user_file_update_needed || group_file_update_needed))
+ if (!(user_file_update_needed || group_file_update_needed))
return;
- if (! isCommit)
+ if (!isCommit)
{
user_file_update_needed = false;
group_file_update_needed = false;
@@ -441,12 +441,12 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
}
/*
- * We use ExclusiveLock to ensure that only one backend writes the flat
- * file(s) at a time. That's sufficient because it's okay to allow plain
- * reads of the tables in parallel. There is some chance of a deadlock
- * here (if we were triggered by a user update of pg_shadow or pg_group,
- * which likely won't have gotten a strong enough lock), so get the locks
- * we need before writing anything.
+ * We use ExclusiveLock to ensure that only one backend writes the
+ * flat file(s) at a time. That's sufficient because it's okay to
+ * allow plain reads of the tables in parallel. There is some chance
+ * of a deadlock here (if we were triggered by a user update of
+ * pg_shadow or pg_group, which likely won't have gotten a strong
+ * enough lock), so get the locks we need before writing anything.
*/
if (user_file_update_needed)
urel = heap_openr(ShadowRelationName, ExclusiveLock);
@@ -1088,7 +1088,7 @@ DropUser(DropUserStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("user \"%s\" cannot be dropped", user),
- errdetail("The user owns database \"%s\".", dbname)));
+ errdetail("The user owns database \"%s\".", dbname)));
}
heap_endscan(scan);
@@ -1172,10 +1172,10 @@ RenameUser(const char *oldname, const char *newname)
errmsg("user \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the session user
- * somewhere, so renaming it could cause confusion. On the other
- * hand, there may not be an actual problem besides a little
- * confusion, so think about this and decide.
+ * XXX Client applications probably store the session user somewhere,
+ * so renaming it could cause confusion. On the other hand, there may
+ * not be an actual problem besides a little confusion, so think about
+ * this and decide.
*/
if (((Form_pg_shadow) GETSTRUCT(tup))->usesysid == GetSessionUserId())
ereport(ERROR,
@@ -1221,14 +1221,14 @@ CheckPgUserAclNotNull(void)
htup = SearchSysCache(RELOID,
ObjectIdGetDatum(RelOid_pg_shadow),
0, 0, 0);
- if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
+ if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
elog(ERROR, "cache lookup failed for relation %u", RelOid_pg_shadow);
if (heap_attisnull(htup, Anum_pg_class_relacl))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("before using passwords you must revoke permissions on %s",
- ShadowRelationName),
+ errmsg("before using passwords you must revoke permissions on %s",
+ ShadowRelationName),
errdetail("This restriction is to prevent unprivileged users from reading the passwords."),
errhint("Try 'REVOKE ALL ON \"%s\" FROM PUBLIC'.",
ShadowRelationName)));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index e73ace27c27..9dc0d9a8996 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.257 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.258 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -287,24 +287,25 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->vacuum)
{
- if (! vacuum_rel(relid, vacstmt, RELKIND_RELATION))
- all_rels = false; /* forget about updating dbstats */
+ if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
+ all_rels = false; /* forget about updating dbstats */
}
if (vacstmt->analyze)
{
MemoryContext old_context = NULL;
/*
- * If we vacuumed, use new transaction for analyze.
- * Otherwise, we can use the outer transaction, but we still
- * need to call analyze_rel in a memory context that will be
- * cleaned up on return (else we leak memory while processing
- * multiple tables).
+ * If we vacuumed, use new transaction for analyze. Otherwise,
+ * we can use the outer transaction, but we still need to call
+ * analyze_rel in a memory context that will be cleaned up on
+ * return (else we leak memory while processing multiple
+ * tables).
*/
if (vacstmt->vacuum)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions
+ * in indexes */
}
else
old_context = MemoryContextSwitchTo(anl_context);
@@ -734,7 +735,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/* Begin a transaction for vacuuming this relation */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
/*
* Check for user-requested abort. Note we want this to be inside a
@@ -812,7 +814,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
{
relation_close(onerel, lmode);
CommitTransactionCommand();
- return true; /* assume no long-lived data in temp tables */
+ return true; /* assume no long-lived data in temp
+ * tables */
}
/*
@@ -860,7 +863,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*/
if (toast_relid != InvalidOid)
{
- if (! vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
+ if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
result = false; /* failed to vacuum the TOAST table? */
}
@@ -1087,8 +1090,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (PageIsNew(page))
{
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
@@ -1314,7 +1317,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Include the page in empty_end_pages if it will be empty after
- * vacuuming; this is to keep us from using it as a move destination.
+ * vacuuming; this is to keep us from using it as a move
+ * destination.
*/
if (notup)
{
@@ -1382,9 +1386,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead tuples cannot be removed yet.\n"
- "Nonremovable tuples range from %lu to %lu bytes long.\n"
+ "Nonremovable tuples range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable tuples) is %.0f bytes.\n"
+ "Total free space (including removable tuples) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
"%u pages containing %.0f free bytes are potential move destinations.\n"
"%s",
@@ -2380,8 +2384,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* It'd be cleaner to make this report at the bottom of this routine,
* but then the rusage would double-count the second pass of index
- * vacuuming. So do it here and ignore the relatively small amount
- * of processing that occurs below.
+ * vacuuming. So do it here and ignore the relatively small amount of
+ * processing that occurs below.
*/
ereport(elevel,
(errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
@@ -2735,7 +2739,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -2752,7 +2756,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
ereport(WARNING,
(errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples + keep_tuples),
+ stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX.")));
}
@@ -2837,13 +2841,14 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small bits
- * of space. Although FSM would discard pages with little free space
- * anyway, it's important to do this prefiltering because (a) it reduces
- * the time spent holding the FSM lock in RecordRelationFreeSpace, and
- * (b) FSM uses the number of pages reported as a statistic for guiding
- * space management. If we didn't threshold our reports the same way
- * vacuumlazy.c does, we'd be skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small
+ * bits of space. Although FSM would discard pages with little free
+ * space anyway, it's important to do this prefiltering because (a) it
+ * reduces the time spent holding the FSM lock in
+ * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+ * reported as a statistic for guiding space management. If we didn't
+ * threshold our reports the same way vacuumlazy.c does, we'd be
+ * skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index f0be98a23ed..65af960be8e 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.29 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.30 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,7 +79,7 @@ typedef struct LVRelStats
bool fs_is_heap; /* are we using heap organization? */
int num_free_pages; /* current # of entries */
int max_free_pages; /* # slots allocated in array */
- PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
+ PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
} LVRelStats;
@@ -162,7 +162,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
*/
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
lazy_truncate_heap(onerel, vacrelstats);
/* Update shared free space map with final free space info */
@@ -659,7 +659,7 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -966,16 +966,18 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*
* A page with less than stats->threshold free space will be forgotten
* immediately, and never passed to the free space map. Removing the
- * uselessly small entries early saves cycles, and in particular reduces
- * the amount of time we spend holding the FSM lock when we finally call
- * RecordRelationFreeSpace. Since the FSM will probably drop pages with
- * little free space anyway, there's no point in making this really small.
+ * uselessly small entries early saves cycles, and in particular
+ * reduces the amount of time we spend holding the FSM lock when we
+ * finally call RecordRelationFreeSpace. Since the FSM will probably
+ * drop pages with little free space anyway, there's no point in
+ * making this really small.
*
- * XXX Is it worth trying to measure average tuple size, and using that to
- * adjust the threshold? Would be worthwhile if FSM has no stats yet
- * for this relation. But changing the threshold as we scan the rel
- * might lead to bizarre behavior, too. Also, it's probably better if
- * vacuum.c has the same thresholding behavior as we do here.
+ * XXX Is it worth trying to measure average tuple size, and using that
+ * to adjust the threshold? Would be worthwhile if FSM has no stats
+ * yet for this relation. But changing the threshold as we scan the
+ * rel might lead to bizarre behavior, too. Also, it's probably
+ * better if vacuum.c has the same thresholding behavior as we do
+ * here.
*/
if (avail < vacrelstats->threshold)
return;
@@ -996,7 +998,7 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*----------
* The rest of this routine works with "heap" organization of the
* free space arrays, wherein we maintain the heap property
- * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
+ * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
* In particular, the zero'th element always has the smallest available
* space and can be discarded to make room for a new page with more space.
* See Knuth's discussion of heap-based priority queues, sec 5.2.3;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index e0b041636e6..07dfca13c84 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.85 2003/07/29 00:03:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.86 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
* to duplicate the test in AC_STRUCT_TIMEZONE.
*/
#ifdef HAVE_TZNAME
-#ifndef tzname /* For SGI. */
+#ifndef tzname /* For SGI. */
extern char *tzname[];
#endif
#endif
@@ -273,12 +273,11 @@ static void
clear_tz(void)
{
/*
- * unsetenv() works fine, but is BSD, not POSIX, and is not
- * available under Solaris, among others. Apparently putenv()
- * called as below clears the process-specific environment
- * variables. Other reasonable arguments to putenv() (e.g.
- * "TZ=", "TZ", "") result in a core dump (under Linux
- * anyway). - thomas 1998-01-26
+ * unsetenv() works fine, but is BSD, not POSIX, and is not available
+ * under Solaris, among others. Apparently putenv() called as below
+ * clears the process-specific environment variables. Other
+ * reasonable arguments to putenv() (e.g. "TZ=", "TZ", "") result in a
+ * core dump (under Linux anyway). - thomas 1998-01-26
*/
if (tzbuf[0] == 'T')
{
@@ -298,14 +297,14 @@ clear_tz(void)
*
* If tzname[1] is a nonempty string, *or* the global timezone variable is
* not zero, then tzset must have recognized the TZ value as something
- * different from UTC. Return true.
+ * different from UTC. Return true.
*
* Otherwise, check to see if the TZ name is a known spelling of "UTC"
* (ie, appears in our internal tables as a timezone equivalent to UTC).
* If so, accept it.
*
* This will reject nonstandard spellings of UTC unless tzset() chose to
- * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
+ * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
* do so, but on other systems we may be tightening the spec a little.
*
* Another problem is that on some platforms (eg HPUX), if tzset thinks the
@@ -337,8 +336,8 @@ tzset_succeeded(const char *tz)
return true;
/*
- * Check for known spellings of "UTC". Note we must downcase the input
- * before passing it to DecodePosixTimezone().
+ * Check for known spellings of "UTC". Note we must downcase the
+ * input before passing it to DecodePosixTimezone().
*/
StrNCpy(tztmp, tz, sizeof(tztmp));
for (cp = tztmp; *cp; cp++)
@@ -368,7 +367,7 @@ tz_acceptable(void)
/*
* To detect leap-second timekeeping, compute the time_t value for
- * local midnight, 2000-01-01. Insist that this be a multiple of 60;
+ * local midnight, 2000-01-01. Insist that this be a multiple of 60;
* any partial-minute offset has to be due to leap seconds.
*/
MemSet(&tt, 0, sizeof(tt));
@@ -399,7 +398,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
*/
if (!have_saved_tz)
{
- char *orig_tz = getenv("TZ");
+ char *orig_tz = getenv("TZ");
if (orig_tz)
StrNCpy(orig_tzbuf, orig_tz, sizeof(orig_tzbuf));
@@ -434,9 +433,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport, which is not desirable for GUC. We did what we could to
- * guard against this in flatten_set_variable_args, but a string
- * coming in from postgresql.conf might contain anything.
+ * ereport, which is not desirable for GUC. We did what we could
+ * to guard against this in flatten_set_variable_args, but a
+ * string coming in from postgresql.conf might contain anything.
*/
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
CStringGetDatum(val),
@@ -455,7 +454,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - interval->time;
+ CTimeZone = -interval->time;
HasCTZSet = true;
}
pfree(interval);
@@ -471,22 +470,22 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - hours * 3600;
+ CTimeZone = -hours * 3600;
HasCTZSet = true;
}
}
else if (strcasecmp(value, "UNKNOWN") == 0)
{
/*
- * UNKNOWN is the value shown as the "default" for TimeZone
- * in guc.c. We interpret it as meaning the original TZ
- * inherited from the environment. Note that if there is an
- * original TZ setting, we will return that rather than UNKNOWN
- * as the canonical spelling.
+ * UNKNOWN is the value shown as the "default" for TimeZone in
+ * guc.c. We interpret it as meaning the original TZ
+ * inherited from the environment. Note that if there is an
+ * original TZ setting, we will return that rather than
+ * UNKNOWN as the canonical spelling.
*/
if (doit)
{
- bool ok;
+ bool ok;
/* Revert to original setting of TZ, whatever it was */
if (orig_tzbuf[0])
@@ -516,14 +515,14 @@ assign_timezone(const char *value, bool doit, bool interactive)
* Otherwise assume it is a timezone name.
*
* We have to actually apply the change before we can have any
- * hope of checking it. So, save the old value in case we have
- * to back out. Note that it's possible the old setting is in
- * tzbuf, so we'd better copy it.
+ * hope of checking it. So, save the old value in case we
+ * have to back out. Note that it's possible the old setting
+ * is in tzbuf, so we'd better copy it.
*/
- char save_tzbuf[TZBUF_LEN];
- char *save_tz;
- bool known,
- acceptable;
+ char save_tzbuf[TZBUF_LEN];
+ char *save_tz;
+ bool known,
+ acceptable;
save_tz = getenv("TZ");
if (save_tz)
@@ -563,8 +562,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
{
ereport(interactive ? ERROR : LOG,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timezone \"%s\" appears to use leap seconds",
- value),
+ errmsg("timezone \"%s\" appears to use leap seconds",
+ value),
errdetail("PostgreSQL does not support leap seconds")));
return NULL;
}
@@ -609,7 +608,7 @@ show_timezone(void)
Interval interval;
interval.month = 0;
- interval.time = - CTimeZone;
+ interval.time = -CTimeZone;
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
IntervalPGetDatum(&interval)));
@@ -703,16 +702,16 @@ assign_client_encoding(const char *value, bool doit, bool interactive)
/*
* Note: if we are in startup phase then SetClientEncoding may not be
* able to really set the encoding. In this case we will assume that
- * the encoding is okay, and InitializeClientEncoding() will fix things
- * once initialization is complete.
+ * the encoding is okay, and InitializeClientEncoding() will fix
+ * things once initialization is complete.
*/
if (SetClientEncoding(encoding, doit) < 0)
{
if (interactive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conversion between %s and %s is not supported",
- value, GetDatabaseEncodingName())));
+ errmsg("conversion between %s and %s is not supported",
+ value, GetDatabaseEncodingName())));
return NULL;
}
return value;
@@ -758,12 +757,12 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
/* not a saved ID, so look it up */
HeapTuple userTup;
- if (! IsTransactionState())
+ if (!IsTransactionState())
{
/*
* Can't do catalog lookups, so fail. The upshot of this is
- * that session_authorization cannot be set in postgresql.conf,
- * which seems like a good thing anyway.
+ * that session_authorization cannot be set in
+ * postgresql.conf, which seems like a good thing anyway.
*/
return NULL;
}
@@ -782,7 +781,7 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
usesysid = ((Form_pg_shadow) GETSTRUCT(userTup))->usesysid;
is_superuser = ((Form_pg_shadow) GETSTRUCT(userTup))->usesuper;
-
+
ReleaseSysCache(userTup);
}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index e12ae0af686..9c3b372b3f7 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.75 2003/08/01 00:15:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.76 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -190,8 +190,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change datatype of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change datatype of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 61974827b3b..9267d362ddd 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.72 2003/07/21 17:05:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.73 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,7 +48,7 @@
* ----------------------------------------------------------------
*/
void
-ExecReScan(PlanState *node, ExprContext *exprCtxt)
+ExecReScan(PlanState * node, ExprContext *exprCtxt)
{
/* If collecting timing stats, update them */
if (node->instrument)
@@ -61,7 +61,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
foreach(lst, node->initPlan)
{
- SubPlanState *sstate = (SubPlanState *) lfirst(lst);
+ SubPlanState *sstate = (SubPlanState *) lfirst(lst);
PlanState *splan = sstate->planstate;
if (splan->plan->extParam != NULL) /* don't care about child
@@ -72,7 +72,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
}
foreach(lst, node->subPlan)
{
- SubPlanState *sstate = (SubPlanState *) lfirst(lst);
+ SubPlanState *sstate = (SubPlanState *) lfirst(lst);
PlanState *splan = sstate->planstate;
if (splan->plan->extParam != NULL)
@@ -177,7 +177,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
* Marks the current scan position.
*/
void
-ExecMarkPos(PlanState *node)
+ExecMarkPos(PlanState * node)
{
switch (nodeTag(node))
{
@@ -218,7 +218,7 @@ ExecMarkPos(PlanState *node)
* restores the scan position previously saved with ExecMarkPos()
*/
void
-ExecRestrPos(PlanState *node)
+ExecRestrPos(PlanState * node)
{
switch (nodeTag(node))
{
@@ -302,16 +302,16 @@ ExecSupportsBackwardScan(Plan *node)
return false;
case T_Append:
- {
- List *l;
-
- foreach(l, ((Append *) node)->appendplans)
{
- if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
- return false;
+ List *l;
+
+ foreach(l, ((Append *) node)->appendplans)
+ {
+ if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
+ return false;
+ }
+ return true;
}
- return true;
- }
case T_SeqScan:
case T_IndexScan:
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 3f9c6d0d47d..8b0962ba9be 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.4 2003/07/21 17:05:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.5 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -249,7 +249,7 @@ execTuplesHashPrepare(TupleDesc tupdesc,
eq_function = oprfuncid(optup);
ReleaseSysCache(optup);
hash_function = get_op_hash_function(eq_opr);
- if (!OidIsValid(hash_function)) /* should not happen */
+ if (!OidIsValid(hash_function)) /* should not happen */
elog(ERROR, "could not find hash function for hash operator %u",
eq_opr);
fmgr_info(eq_function, &(*eqfunctions)[i]);
@@ -289,8 +289,8 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
int nbuckets, Size entrysize,
MemoryContext tablecxt, MemoryContext tempcxt)
{
- TupleHashTable hashtable;
- Size tabsize;
+ TupleHashTable hashtable;
+ Size tabsize;
Assert(nbuckets > 0);
Assert(entrysize >= sizeof(TupleHashEntryData));
@@ -411,9 +411,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
* Iterator state must be initialized with ResetTupleHashIterator() macro.
*/
TupleHashEntry
-ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator *state)
+ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator * state)
{
- TupleHashEntry entry;
+ TupleHashEntry entry;
entry = state->next_entry;
while (entry == NULL)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index ded748d5bf8..ae58bb130f7 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.212 2003/08/01 00:15:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.213 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ static void initResultRelInfo(ResultRelInfo *resultRelInfo,
Index resultRelationIndex,
List *rangeTable,
CmdType operation);
-static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
+static TupleTableSlot *ExecutePlan(EState *estate, PlanState * planstate,
CmdType operation,
long numberTuples,
ScanDirection direction,
@@ -87,7 +87,7 @@ static void EndEvalPlanQual(EState *estate);
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
static void ExecCheckXactReadOnly(Query *parsetree, CmdType operation);
static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
- evalPlanQual *priorepq);
+ evalPlanQual *priorepq);
static void EvalPlanQualStop(evalPlanQual *epq);
/* end of local decls */
@@ -100,7 +100,7 @@ static void EvalPlanQualStop(evalPlanQual *epq);
* query plan
*
* Takes a QueryDesc previously created by CreateQueryDesc (it's not real
- * clear why we bother to separate the two functions, but...). The tupDesc
+ * clear why we bother to separate the two functions, but...). The tupDesc
* field of the QueryDesc is filled in to describe the tuples that will be
* returned, and the internal fields (estate and planstate) are set up.
*
@@ -122,8 +122,8 @@ ExecutorStart(QueryDesc *queryDesc, bool explainOnly)
Assert(queryDesc->estate == NULL);
/*
- * If the transaction is read-only, we need to check if any writes
- * are planned to non-temporary tables.
+ * If the transaction is read-only, we need to check if any writes are
+ * planned to non-temporary tables.
*/
if (!explainOnly)
ExecCheckXactReadOnly(queryDesc->parsetree, queryDesc->operation);
@@ -362,8 +362,8 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
/*
* Otherwise, only plain-relation RTEs need to be checked here.
- * Function RTEs are checked by init_fcache when the function is prepared
- * for execution. Join and special RTEs need no checks.
+ * Function RTEs are checked by init_fcache when the function is
+ * prepared for execution. Join and special RTEs need no checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
@@ -435,7 +435,7 @@ ExecCheckXactReadOnly(Query *parsetree, CmdType operation)
if (operation == CMD_DELETE || operation == CMD_INSERT
|| operation == CMD_UPDATE)
{
- List *lp;
+ List *lp;
foreach(lp, parsetree->rtable)
{
@@ -474,9 +474,9 @@ static void
InitPlan(QueryDesc *queryDesc, bool explainOnly)
{
CmdType operation = queryDesc->operation;
- Query *parseTree = queryDesc->parsetree;
- Plan *plan = queryDesc->plantree;
- EState *estate = queryDesc->estate;
+ Query *parseTree = queryDesc->parsetree;
+ Plan *plan = queryDesc->plantree;
+ EState *estate = queryDesc->estate;
PlanState *planstate;
List *rangeTable;
Relation intoRelationDesc;
@@ -484,8 +484,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
TupleDesc tupType;
/*
- * Do permissions checks. It's sufficient to examine the query's
- * top rangetable here --- subplan RTEs will be checked during
+ * Do permissions checks. It's sufficient to examine the query's top
+ * rangetable here --- subplan RTEs will be checked during
* ExecInitSubPlan().
*/
ExecCheckRTPerms(parseTree->rtable, operation);
@@ -570,10 +570,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
if (operation == CMD_SELECT && parseTree->into != NULL)
{
do_select_into = true;
+
/*
- * For now, always create OIDs in SELECT INTO; this is for backwards
- * compatibility with pre-7.3 behavior. Eventually we might want
- * to allow the user to choose.
+ * For now, always create OIDs in SELECT INTO; this is for
+ * backwards compatibility with pre-7.3 behavior. Eventually we
+ * might want to allow the user to choose.
*/
estate->es_force_oids = true;
}
@@ -640,12 +641,12 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
tupType = ExecGetResultType(planstate);
/*
- * Initialize the junk filter if needed. SELECT and INSERT queries need a
- * filter if there are any junk attrs in the tlist. INSERT and SELECT
- * INTO also need a filter if the top plan node is a scan node that's not
- * doing projection (else we'll be scribbling on the scan tuple!) UPDATE
- * and DELETE always need a filter, since there's always a junk 'ctid'
- * attribute present --- no need to look first.
+ * Initialize the junk filter if needed. SELECT and INSERT queries
+ * need a filter if there are any junk attrs in the tlist. INSERT and
+ * SELECT INTO also need a filter if the top plan node is a scan node
+ * that's not doing projection (else we'll be scribbling on the scan
+ * tuple!) UPDATE and DELETE always need a filter, since there's
+ * always a junk 'ctid' attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@@ -752,8 +753,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* If doing SELECT INTO, initialize the "into" relation. We must wait
- * till now so we have the "clean" result tuple type to create the
- * new table from.
+ * till now so we have the "clean" result tuple type to create the new
+ * table from.
*
* If EXPLAIN, skip creating the "into" relation.
*/
@@ -795,16 +796,16 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
FreeTupleDesc(tupdesc);
/*
- * Advance command counter so that the newly-created
- * relation's catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's
+ * catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the into
- * relation. Note that AlterTableCreateToastTable ends
- * with CommandCounterIncrement(), so that the TOAST table
- * will be visible for insertion.
+ * If necessary, create a TOAST table for the into relation. Note
+ * that AlterTableCreateToastTable ends with
+ * CommandCounterIncrement(), so that the TOAST table will be
+ * visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId, true);
@@ -841,19 +842,19 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change sequence relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_TOASTVALUE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change toast relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_VIEW:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change view relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
}
@@ -894,7 +895,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
* ----------------------------------------------------------------
*/
void
-ExecEndPlan(PlanState *planstate, EState *estate)
+ExecEndPlan(PlanState * planstate, EState *estate)
{
ResultRelInfo *resultRelInfo;
int i;
@@ -964,18 +965,18 @@ ExecEndPlan(PlanState *planstate, EState *estate)
*/
static TupleTableSlot *
ExecutePlan(EState *estate,
- PlanState *planstate,
+ PlanState * planstate,
CmdType operation,
long numberTuples,
ScanDirection direction,
DestReceiver *dest)
{
- JunkFilter *junkfilter;
- TupleTableSlot *slot;
- ItemPointer tupleid = NULL;
- ItemPointerData tuple_ctid;
- long current_tuple_count;
- TupleTableSlot *result;
+ JunkFilter *junkfilter;
+ TupleTableSlot *slot;
+ ItemPointer tupleid = NULL;
+ ItemPointerData tuple_ctid;
+ long current_tuple_count;
+ TupleTableSlot *result;
/*
* initialize local variables
@@ -1199,7 +1200,7 @@ lnext: ;
/*
* check our tuple count.. if we've processed the proper number
- * then quit, else loop again and process more tuples. Zero
+ * then quit, else loop again and process more tuples. Zero
* numberTuples means no limit.
*/
current_tuple_count++;
@@ -1309,7 +1310,7 @@ ExecInsert(TupleTableSlot *slot,
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@@ -1686,13 +1687,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value for attribute \"%s\" violates NOT NULL constraint",
- NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
+ NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
}
}
if (constr->num_check > 0)
{
- const char *failed;
+ const char *failed;
if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
ereport(ERROR,
@@ -1884,10 +1885,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
* integrated with the Param mechanism somehow, so that the upper plan
* nodes know that their children's outputs have changed.
*
- * Note that the stack of free evalPlanQual nodes is quite useless at
- * the moment, since it only saves us from pallocing/releasing the
+ * Note that the stack of free evalPlanQual nodes is quite useless at the
+ * moment, since it only saves us from pallocing/releasing the
* evalPlanQual nodes themselves. But it will be useful once we
- * implement ReScan instead of end/restart for re-using PlanQual nodes.
+ * implement ReScan instead of end/restart for re-using PlanQual
+ * nodes.
*/
if (endNode)
{
@@ -1898,10 +1900,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
/*
* Initialize new recheck query.
*
- * Note: if we were re-using PlanQual plans via ExecReScan, we'd need
- * to instead copy down changeable state from the top plan (including
- * es_result_relation_info, es_junkFilter) and reset locally changeable
- * state in the epq (including es_param_exec_vals, es_evTupleNull).
+ * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
+ * instead copy down changeable state from the top plan (including
+ * es_result_relation_info, es_junkFilter) and reset locally
+ * changeable state in the epq (including es_param_exec_vals,
+ * es_evTupleNull).
*/
EvalPlanQualStart(epq, estate, epq->next);
@@ -2016,9 +2019,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
/*
* The epqstates share the top query's copy of unchanging state such
- * as the snapshot, rangetable, result-rel info, and external Param info.
- * They need their own copies of local state, including a tuple table,
- * es_param_exec_vals, etc.
+ * as the snapshot, rangetable, result-rel info, and external Param
+ * info. They need their own copies of local state, including a tuple
+ * table, es_param_exec_vals, etc.
*/
epqstate->es_direction = ForwardScanDirection;
epqstate->es_snapshot = estate->es_snapshot;
@@ -2036,11 +2039,11 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
epqstate->es_instrument = estate->es_instrument;
epqstate->es_force_oids = estate->es_force_oids;
epqstate->es_topPlan = estate->es_topPlan;
+
/*
- * Each epqstate must have its own es_evTupleNull state, but
- * all the stack entries share es_evTuple state. This allows
- * sub-rechecks to inherit the value being examined by an
- * outer recheck.
+ * Each epqstate must have its own es_evTupleNull state, but all the
+ * stack entries share es_evTuple state. This allows sub-rechecks to
+ * inherit the value being examined by an outer recheck.
*/
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
if (priorepq == NULL)
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index f73f2d71852..1c34e1d1a4b 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.37 2003/07/21 17:05:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.38 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,7 @@
* * ExecInitNode() notices that it is looking at a nest loop and
* as the code below demonstrates, it calls ExecInitNestLoop().
* Eventually this calls ExecInitNode() on the right and left subplans
- * and so forth until the entire plan is initialized. The result
+ * and so forth until the entire plan is initialized. The result
* of ExecInitNode() is a plan state tree built with the same structure
* as the underlying plan tree.
*
@@ -226,7 +226,7 @@ ExecInitNode(Plan *node, EState *estate)
subps = NIL;
foreach(subp, node->initPlan)
{
- SubPlan *subplan = (SubPlan *) lfirst(subp);
+ SubPlan *subplan = (SubPlan *) lfirst(subp);
SubPlanState *sstate;
Assert(IsA(subplan, SubPlan));
@@ -237,9 +237,9 @@ ExecInitNode(Plan *node, EState *estate)
result->initPlan = subps;
/*
- * Initialize any subPlans present in this node. These were found
- * by ExecInitExpr during initialization of the PlanState. Note we
- * must do this after initializing initPlans, in case their arguments
+ * Initialize any subPlans present in this node. These were found by
+ * ExecInitExpr during initialization of the PlanState. Note we must
+ * do this after initializing initPlans, in case their arguments
* contain subPlans (is that actually possible? perhaps not).
*/
subps = NIL;
@@ -268,7 +268,7 @@ ExecInitNode(Plan *node, EState *estate)
* ----------------------------------------------------------------
*/
TupleTableSlot *
-ExecProcNode(PlanState *node)
+ExecProcNode(PlanState * node)
{
TupleTableSlot *result;
@@ -280,7 +280,7 @@ ExecProcNode(PlanState *node)
if (node == NULL)
return NULL;
- if (node->chgParam != NULL) /* something changed */
+ if (node->chgParam != NULL) /* something changed */
ExecReScan(node, NULL); /* let ReScan handle this */
if (node->instrument)
@@ -484,7 +484,7 @@ ExecCountSlotsNode(Plan *node)
* ----------------------------------------------------------------
*/
void
-ExecEndNode(PlanState *node)
+ExecEndNode(PlanState * node)
{
List *subp;
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 891019f0ae7..d509122f29b 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.138 2003/08/01 00:15:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.139 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,55 +50,55 @@
/* static function decls */
-static Datum ExecEvalAggref(AggrefExprState *aggref,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalArrayRef(ArrayRefExprState *astate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalAggref(AggrefExprState * aggref,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalArrayRef(ArrayRefExprState * astate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull);
static Datum ExecEvalParam(Param *expression, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalFunc(FuncExprState * fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext,
+static Datum ExecEvalOper(FuncExprState * fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext,
+static Datum ExecEvalDistinct(FuncExprState * fcache, ExprContext *econtext,
bool *isNull);
-static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
- ExprContext *econtext, bool *isNull);
+static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState * sstate,
+ ExprContext *econtext, bool *isNull);
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
List *argList, ExprContext *econtext);
-static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
+static Datum ExecEvalNot(BoolExprState * notclause, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalOr(BoolExprState * orExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalAnd(BoolExprState * andExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalCase(CaseExprState * caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalArray(ArrayExprState *astate,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalNullTest(GenericExprState *nstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalBooleanTest(GenericExprState *bstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate,
+static Datum ExecEvalArray(ArrayExprState * astate,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalCoalesce(CoalesceExprState * coalesceExpr,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalNullIf(FuncExprState * nullIfExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalNullTest(GenericExprState * nstate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalBooleanTest(GenericExprState * bstate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalCoerceToDomain(CoerceToDomainState * cstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
- ExprContext *econtext, bool *isNull);
-static Datum ExecEvalFieldSelect(GenericExprState *fstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalCoerceToDomainValue(CoerceToDomainValue * conVal,
+ ExprContext *econtext, bool *isNull);
+static Datum ExecEvalFieldSelect(GenericExprState * fstate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
/*----------
@@ -127,7 +127,7 @@ static Datum ExecEvalFieldSelect(GenericExprState *fstate,
*----------
*/
static Datum
-ExecEvalArrayRef(ArrayRefExprState *astate,
+ExecEvalArrayRef(ArrayRefExprState * astate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -301,7 +301,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalAggref(AggrefExprState *aggref, ExprContext *econtext, bool *isNull)
+ExecEvalAggref(AggrefExprState * aggref, ExprContext *econtext, bool *isNull)
{
if (econtext->ecxt_aggvalues == NULL) /* safety check */
elog(ERROR, "no aggregates in this expression context");
@@ -382,8 +382,8 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
*
* XXX this is a horrid crock: since the pointer to the slot might live
* longer than the current evaluation context, we are forced to copy
- * the tuple and slot into a long-lived context --- we use
- * the econtext's per-query memory which should be safe enough. This
+ * the tuple and slot into a long-lived context --- we use the
+ * econtext's per-query memory which should be safe enough. This
* represents a serious memory leak if many such tuples are processed
* in one command, however. We ought to redesign the representation
* of whole-tuple datums so that this is not necessary.
@@ -439,7 +439,8 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
{
/*
* PARAM_EXEC params (internal executor parameters) are stored in
- * the ecxt_param_exec_vals array, and can be accessed by array index.
+ * the ecxt_param_exec_vals array, and can be accessed by array
+ * index.
*/
ParamExecData *prm;
@@ -457,9 +458,9 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
else
{
/*
- * All other parameter types must be sought in ecxt_param_list_info.
- * NOTE: The last entry in the param array is always an
- * entry with kind == PARAM_INVALID.
+ * All other parameter types must be sought in
+ * ecxt_param_list_info. NOTE: The last entry in the param array
+ * is always an entry with kind == PARAM_INVALID.
*/
ParamListInfo paramList = econtext->ecxt_param_list_info;
char *thisParamName = expression->paramname;
@@ -488,8 +489,8 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
}
if (!matchFound)
paramList++;
- } /* while */
- } /* if */
+ } /* while */
+ } /* if */
if (!matchFound)
{
@@ -605,7 +606,7 @@ GetAttributeByName(TupleTableSlot *slot, char *attname, bool *isNull)
* init_fcache - initialize a FuncExprState node during first use
*/
void
-init_fcache(Oid foid, FuncExprState *fcache, MemoryContext fcacheCxt)
+init_fcache(Oid foid, FuncExprState * fcache, MemoryContext fcacheCxt)
{
AclResult aclresult;
@@ -678,7 +679,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
* Evaluate the arguments to a function and then the function itself.
*/
Datum
-ExecMakeFunctionResult(FuncExprState *fcache,
+ExecMakeFunctionResult(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -881,7 +882,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
* object. (If function returns an empty set, we just return NULL instead.)
*/
Tuplestorestate *
-ExecMakeTableFunctionResult(ExprState *funcexpr,
+ExecMakeTableFunctionResult(ExprState * funcexpr,
ExprContext *econtext,
TupleDesc expectedDesc,
TupleDesc *returnDesc)
@@ -899,14 +900,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
bool returnsTuple = false;
/*
- * Normally the passed expression tree will be a FuncExprState, since the
- * grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set then
- * the planner might have replaced the function call via constant-folding
- * or inlining. So if we see any other kind of expression node, execute
- * it via the general ExecEvalExpr() code; the only difference is that
- * we don't get a chance to pass a special ReturnSetInfo to any functions
- * buried in the expression.
+ * Normally the passed expression tree will be a FuncExprState, since
+ * the grammar only allows a function call at the top level of a table
+ * function reference. However, if the function doesn't return set
+ * then the planner might have replaced the function call via
+ * constant-folding or inlining. So if we see any other kind of
+ * expression node, execute it via the general ExecEvalExpr() code;
+ * the only difference is that we don't get a chance to pass a special
+ * ReturnSetInfo to any functions buried in the expression.
*/
if (funcexpr && IsA(funcexpr, FuncExprState) &&
IsA(funcexpr->expr, FuncExpr))
@@ -924,7 +925,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
*/
if (fcache->func.fn_oid == InvalidOid)
{
- FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
+ FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
init_fcache(func->funcid, fcache, econtext->ecxt_per_query_memory);
}
@@ -933,9 +934,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Evaluate the function's argument list.
*
* Note: ideally, we'd do this in the per-tuple context, but then the
- * argument values would disappear when we reset the context in the
- * inner loop. So do it in caller context. Perhaps we should make a
- * separate context just to hold the evaluated arguments?
+ * argument values would disappear when we reset the context in
+ * the inner loop. So do it in caller context. Perhaps we should
+ * make a separate context just to hold the evaluated arguments?
*/
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.flinfo = &(fcache->func);
@@ -990,7 +991,8 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
rsinfo.setDesc = NULL;
/*
- * Switch to short-lived context for calling the function or expression.
+ * Switch to short-lived context for calling the function or
+ * expression.
*/
callerContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
@@ -1004,9 +1006,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
HeapTuple tuple;
/*
- * reset per-tuple memory context before each call of the
- * function or expression. This cleans up any local memory the
- * function may leak when called.
+ * reset per-tuple memory context before each call of the function
+ * or expression. This cleans up any local memory the function may
+ * leak when called.
*/
ResetExprContext(econtext);
@@ -1157,7 +1159,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalFunc(FuncExprState *fcache,
+ExecEvalFunc(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1167,7 +1169,7 @@ ExecEvalFunc(FuncExprState *fcache,
*/
if (fcache->func.fn_oid == InvalidOid)
{
- FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
+ FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
init_fcache(func->funcid, fcache, econtext->ecxt_per_query_memory);
}
@@ -1180,7 +1182,7 @@ ExecEvalFunc(FuncExprState *fcache,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalOper(FuncExprState *fcache,
+ExecEvalOper(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1190,7 +1192,7 @@ ExecEvalOper(FuncExprState *fcache,
*/
if (fcache->func.fn_oid == InvalidOid)
{
- OpExpr *op = (OpExpr *) fcache->xprstate.expr;
+ OpExpr *op = (OpExpr *) fcache->xprstate.expr;
init_fcache(op->opfuncid, fcache, econtext->ecxt_per_query_memory);
}
@@ -1210,7 +1212,7 @@ ExecEvalOper(FuncExprState *fcache,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalDistinct(FuncExprState *fcache,
+ExecEvalDistinct(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull)
{
@@ -1242,7 +1244,7 @@ ExecEvalDistinct(FuncExprState *fcache,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM does not support set arguments")));
+ errmsg("IS DISTINCT FROM does not support set arguments")));
Assert(fcinfo.nargs == 2);
if (fcinfo.argnull[0] && fcinfo.argnull[1])
@@ -1272,11 +1274,11 @@ ExecEvalDistinct(FuncExprState *fcache,
*
* Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean,
* and we combine the results across all array elements using OR and AND
- * (for ANY and ALL respectively). Of course we short-circuit as soon as
+ * (for ANY and ALL respectively). Of course we short-circuit as soon as
* the result is known.
*/
static Datum
-ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
+ExecEvalScalarArrayOp(ScalarArrayOpExprState * sstate,
ExprContext *econtext, bool *isNull)
{
ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) sstate->fxprstate.xprstate.expr;
@@ -1310,12 +1312,12 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("op ANY/ALL (array) does not support set arguments")));
+ errmsg("op ANY/ALL (array) does not support set arguments")));
Assert(fcinfo.nargs == 2);
/*
- * If the array is NULL then we return NULL --- it's not very meaningful
- * to do anything else, even if the operator isn't strict.
+ * If the array is NULL then we return NULL --- it's not very
+ * meaningful to do anything else, even if the operator isn't strict.
*/
if (fcinfo.argnull[1])
{
@@ -1334,6 +1336,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
if (nitems <= 0)
return BoolGetDatum(!useOr);
+
/*
* If the scalar is NULL, and the function is strict, return NULL.
* This is just to avoid having to test for strictness inside the
@@ -1347,8 +1350,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
}
/*
- * We arrange to look up info about the element type only
- * once per series of calls, assuming the element type doesn't change
+ * We arrange to look up info about the element type only once per
+ * series of calls, assuming the element type doesn't change
* underneath us.
*/
if (sstate->element_type != ARR_ELEMTYPE(arr))
@@ -1370,8 +1373,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
s = (char *) ARR_DATA_PTR(arr);
for (i = 0; i < nitems; i++)
{
- Datum elt;
- Datum thisresult;
+ Datum elt;
+ Datum thisresult;
/* Get array element */
elt = fetch_att(s, typbyval, typlen);
@@ -1394,7 +1397,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
{
result = BoolGetDatum(true);
resultnull = false;
- break; /* needn't look at any more elements */
+ break; /* needn't look at any more elements */
}
}
else
@@ -1403,7 +1406,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
{
result = BoolGetDatum(false);
resultnull = false;
- break; /* needn't look at any more elements */
+ break; /* needn't look at any more elements */
}
}
}
@@ -1428,7 +1431,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, bool *isNull)
+ExecEvalNot(BoolExprState * notclause, ExprContext *econtext, bool *isNull)
{
ExprState *clause;
Datum expr_value;
@@ -1456,7 +1459,7 @@ ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, bool *isNull)
+ExecEvalOr(BoolExprState * orExpr, ExprContext *econtext, bool *isNull)
{
List *clauses;
List *clause;
@@ -1504,7 +1507,7 @@ ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, bool *isNull)
+ExecEvalAnd(BoolExprState * andExpr, ExprContext *econtext, bool *isNull)
{
List *clauses;
List *clause;
@@ -1552,7 +1555,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
+ExecEvalCase(CaseExprState * caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
List *clauses;
@@ -1610,22 +1613,22 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
+ExecEvalArray(ArrayExprState * astate, ExprContext *econtext,
bool *isNull)
{
- ArrayExpr *arrayExpr = (ArrayExpr *) astate->xprstate.expr;
+ ArrayExpr *arrayExpr = (ArrayExpr *) astate->xprstate.expr;
ArrayType *result;
- List *element;
- Oid element_type = arrayExpr->element_typeid;
- int ndims = arrayExpr->ndims;
- int dims[MAXDIM];
- int lbs[MAXDIM];
+ List *element;
+ Oid element_type = arrayExpr->element_typeid;
+ int ndims = arrayExpr->ndims;
+ int dims[MAXDIM];
+ int lbs[MAXDIM];
if (ndims == 1)
{
- int nelems;
- Datum *dvalues;
- int i = 0;
+ int nelems;
+ Datum *dvalues;
+ int i = 0;
nelems = length(astate->elements);
@@ -1683,7 +1686,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
/* loop through and get data area from each element */
foreach(element, astate->elements)
{
- ExprState *e = (ExprState *) lfirst(element);
+ ExprState *e = (ExprState *) lfirst(element);
bool eisnull;
Datum arraydatum;
ArrayType *array;
@@ -1718,8 +1721,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
elem_ndims * sizeof(int)) != 0)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("multidimensional arrays must have array "
- "expressions with matching dimensions")));
+ errmsg("multidimensional arrays must have array "
+ "expressions with matching dimensions")));
}
elem_ndatabytes = ARR_SIZE(array) - ARR_OVERHEAD(elem_ndims);
@@ -1767,16 +1770,16 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
+ExecEvalCoalesce(CoalesceExprState * coalesceExpr, ExprContext *econtext,
bool *isNull)
{
- List *arg;
+ List *arg;
/* Simply loop through until something NOT NULL is found */
foreach(arg, coalesceExpr->args)
{
- ExprState *e = (ExprState *) lfirst(arg);
- Datum value;
+ ExprState *e = (ExprState *) lfirst(arg);
+ Datum value;
value = ExecEvalExpr(e, econtext, isNull, NULL);
if (!*isNull)
@@ -1787,7 +1790,7 @@ ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
*isNull = true;
return (Datum) 0;
}
-
+
/* ----------------------------------------------------------------
* ExecEvalNullIf
*
@@ -1797,7 +1800,7 @@ ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalNullIf(FuncExprState *fcache, ExprContext *econtext,
+ExecEvalNullIf(FuncExprState * fcache, ExprContext *econtext,
bool *isNull)
{
Datum result;
@@ -1856,7 +1859,7 @@ ExecEvalNullIf(FuncExprState *fcache, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalNullTest(GenericExprState *nstate,
+ExecEvalNullTest(GenericExprState * nstate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1901,7 +1904,7 @@ ExecEvalNullTest(GenericExprState *nstate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalBooleanTest(GenericExprState *bstate,
+ExecEvalBooleanTest(GenericExprState * bstate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1987,7 +1990,7 @@ ExecEvalBooleanTest(GenericExprState *bstate,
* datum) otherwise throw an error.
*/
static Datum
-ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
+ExecEvalCoerceToDomain(CoerceToDomainState * cstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
CoerceToDomain *ctest = (CoerceToDomain *) cstate->xprstate.expr;
@@ -2009,43 +2012,44 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
if (*isNull)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
- errmsg("domain %s does not allow NULL values",
- format_type_be(ctest->resulttype))));
+ errmsg("domain %s does not allow NULL values",
+ format_type_be(ctest->resulttype))));
break;
case DOM_CONSTRAINT_CHECK:
- {
- Datum conResult;
- bool conIsNull;
- Datum save_datum;
- bool save_isNull;
-
- /*
- * Set up value to be returned by CoerceToDomainValue nodes.
- * We must save and restore prior setting of econtext's
- * domainValue fields, in case this node is itself within
- * a check expression for another domain.
- */
- save_datum = econtext->domainValue_datum;
- save_isNull = econtext->domainValue_isNull;
+ {
+ Datum conResult;
+ bool conIsNull;
+ Datum save_datum;
+ bool save_isNull;
- econtext->domainValue_datum = result;
- econtext->domainValue_isNull = *isNull;
+ /*
+ * Set up value to be returned by CoerceToDomainValue
+ * nodes. We must save and restore prior setting of
+ * econtext's domainValue fields, in case this node is
+ * itself within a check expression for another
+ * domain.
+ */
+ save_datum = econtext->domainValue_datum;
+ save_isNull = econtext->domainValue_isNull;
- conResult = ExecEvalExpr(con->check_expr,
- econtext, &conIsNull, NULL);
+ econtext->domainValue_datum = result;
+ econtext->domainValue_isNull = *isNull;
- if (!conIsNull &&
- !DatumGetBool(conResult))
- ereport(ERROR,
- (errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("value for domain %s violates CHECK constraint \"%s\"",
- format_type_be(ctest->resulttype),
- con->name)));
- econtext->domainValue_datum = save_datum;
- econtext->domainValue_isNull = save_isNull;
+ conResult = ExecEvalExpr(con->check_expr,
+ econtext, &conIsNull, NULL);
- break;
- }
+ if (!conIsNull &&
+ !DatumGetBool(conResult))
+ ereport(ERROR,
+ (errcode(ERRCODE_CHECK_VIOLATION),
+ errmsg("value for domain %s violates CHECK constraint \"%s\"",
+ format_type_be(ctest->resulttype),
+ con->name)));
+ econtext->domainValue_datum = save_datum;
+ econtext->domainValue_isNull = save_isNull;
+
+ break;
+ }
default:
elog(ERROR, "unrecognized constraint type: %d",
(int) con->constrainttype);
@@ -2063,7 +2067,7 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
* Return the value stored by CoerceToDomain.
*/
static Datum
-ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
+ExecEvalCoerceToDomainValue(CoerceToDomainValue * conVal,
ExprContext *econtext, bool *isNull)
{
*isNull = econtext->domainValue_isNull;
@@ -2077,7 +2081,7 @@ ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalFieldSelect(GenericExprState *fstate,
+ExecEvalFieldSelect(GenericExprState * fstate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -2141,7 +2145,7 @@ ExecEvalFieldSelect(GenericExprState *fstate,
* ----------------------------------------------------------------
*/
Datum
-ExecEvalExpr(ExprState *expression,
+ExecEvalExpr(ExprState * expression,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -2308,7 +2312,7 @@ ExecEvalExpr(ExprState *expression,
* Same as above, but get into the right allocation context explicitly.
*/
Datum
-ExecEvalExprSwitchContext(ExprState *expression,
+ExecEvalExprSwitchContext(ExprState * expression,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -2327,7 +2331,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* ExecInitExpr: prepare an expression tree for execution
*
* This function builds and returns an ExprState tree paralleling the given
- * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
+ * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
* for execution. Because the Expr tree itself is read-only as far as
* ExecInitExpr and ExecEvalExpr are concerned, several different executions
* of the same plan tree can occur concurrently.
@@ -2337,7 +2341,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* the same as the per-query context of the associated ExprContext.
*
* Any Aggref and SubPlan nodes found in the tree are added to the lists
- * of such nodes held by the parent PlanState. Otherwise, we do very little
+ * of such nodes held by the parent PlanState. Otherwise, we do very little
* initialization here other than building the state-node tree. Any nontrivial
* work associated with initializing runtime info for a node should happen
* during the first actual evaluation of that node. (This policy lets us
@@ -2356,7 +2360,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* This case should usually come through ExecPrepareExpr, not directly here.
*/
ExprState *
-ExecInitExpr(Expr *node, PlanState *parent)
+ExecInitExpr(Expr *node, PlanState * parent)
{
ExprState *state;
@@ -2373,7 +2377,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_Aggref:
{
- Aggref *aggref = (Aggref *) node;
+ Aggref *aggref = (Aggref *) node;
AggrefExprState *astate = makeNode(AggrefExprState);
if (parent && IsA(parent, AggState))
@@ -2389,8 +2393,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* Complain if the aggregate's argument contains any
* aggregates; nested agg functions are semantically
- * nonsensical. (This should have been caught earlier,
- * but we defend against it here anyway.)
+ * nonsensical. (This should have been caught
+ * earlier, but we defend against it here anyway.)
*/
if (naggs != aggstate->numaggs)
ereport(ERROR,
@@ -2433,41 +2437,41 @@ ExecInitExpr(Expr *node, PlanState *parent)
fstate->args = (List *)
ExecInitExpr((Expr *) funcexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
case T_OpExpr:
{
- OpExpr *opexpr = (OpExpr *) node;
+ OpExpr *opexpr = (OpExpr *) node;
FuncExprState *fstate = makeNode(FuncExprState);
fstate->args = (List *)
ExecInitExpr((Expr *) opexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
case T_DistinctExpr:
{
- DistinctExpr *distinctexpr = (DistinctExpr *) node;
+ DistinctExpr *distinctexpr = (DistinctExpr *) node;
FuncExprState *fstate = makeNode(FuncExprState);
fstate->args = (List *)
ExecInitExpr((Expr *) distinctexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
case T_ScalarArrayOpExpr:
{
- ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
ScalarArrayOpExprState *sstate = makeNode(ScalarArrayOpExprState);
sstate->fxprstate.args = (List *)
ExecInitExpr((Expr *) opexpr->args, parent);
- sstate->fxprstate.func.fn_oid = InvalidOid; /* not initialized */
- sstate->element_type = InvalidOid; /* ditto */
+ sstate->fxprstate.func.fn_oid = InvalidOid; /* not initialized */
+ sstate->element_type = InvalidOid; /* ditto */
state = (ExprState *) sstate;
}
break;
@@ -2484,7 +2488,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
case T_SubPlan:
{
/* Keep this in sync with ExecInitExprInitPlan, below */
- SubPlan *subplan = (SubPlan *) node;
+ SubPlan *subplan = (SubPlan *) node;
SubPlanState *sstate = makeNode(SubPlanState);
if (!parent)
@@ -2492,7 +2496,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* Here we just add the SubPlanState nodes to
- * parent->subPlan. The subplans will be initialized later.
+ * parent->subPlan. The subplans will be initialized
+ * later.
*/
parent->subPlan = lcons(sstate, parent->subPlan);
sstate->sub_estate = NULL;
@@ -2508,7 +2513,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_FieldSelect:
{
- FieldSelect *fselect = (FieldSelect *) node;
+ FieldSelect *fselect = (FieldSelect *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(fselect->arg, parent);
@@ -2517,7 +2522,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_RelabelType:
{
- RelabelType *relabel = (RelabelType *) node;
+ RelabelType *relabel = (RelabelType *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(relabel->arg, parent);
@@ -2552,10 +2557,10 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_ArrayExpr:
{
- ArrayExpr *arrayexpr = (ArrayExpr *) node;
+ ArrayExpr *arrayexpr = (ArrayExpr *) node;
ArrayExprState *astate = makeNode(ArrayExprState);
- FastList outlist;
- List *inlist;
+ FastList outlist;
+ List *inlist;
FastListInit(&outlist);
foreach(inlist, arrayexpr->elements)
@@ -2585,8 +2590,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
FastListInit(&outlist);
foreach(inlist, coalesceexpr->args)
{
- Expr *e = (Expr *) lfirst(inlist);
- ExprState *estate;
+ Expr *e = (Expr *) lfirst(inlist);
+ ExprState *estate;
estate = ExecInitExpr(e, parent);
FastAppend(&outlist, estate);
@@ -2602,7 +2607,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
fstate->args = (List *)
ExecInitExpr((Expr *) nullifexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
@@ -2617,7 +2622,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_BooleanTest:
{
- BooleanTest *btest = (BooleanTest *) node;
+ BooleanTest *btest = (BooleanTest *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(btest->arg, parent);
@@ -2626,7 +2631,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_CoerceToDomain:
{
- CoerceToDomain *ctest = (CoerceToDomain *) node;
+ CoerceToDomain *ctest = (CoerceToDomain *) node;
CoerceToDomainState *cstate = makeNode(CoerceToDomainState);
cstate->arg = ExecInitExpr(ctest->arg, parent);
@@ -2636,7 +2641,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_TargetEntry:
{
- TargetEntry *tle = (TargetEntry *) node;
+ TargetEntry *tle = (TargetEntry *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(tle->expr, parent);
@@ -2673,12 +2678,12 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* ExecInitExprInitPlan --- initialize a subplan expr that's being handled
- * as an InitPlan. This is identical to ExecInitExpr's handling of a regular
+ * as an InitPlan. This is identical to ExecInitExpr's handling of a regular
* subplan expr, except we do NOT want to add the node to the parent's
* subplan list.
*/
SubPlanState *
-ExecInitExprInitPlan(SubPlan *node, PlanState *parent)
+ExecInitExprInitPlan(SubPlan *node, PlanState * parent)
{
SubPlanState *sstate = makeNode(SubPlanState);
@@ -2704,7 +2709,7 @@ ExecInitExprInitPlan(SubPlan *node, PlanState *parent)
* This differs from ExecInitExpr in that we don't assume the caller is
* already running in the EState's per-query context. Also, we apply
* fix_opfuncids() to the passed expression tree to be sure it is ready
- * to run. (In ordinary Plan trees the planner will have fixed opfuncids,
+ * to run. (In ordinary Plan trees the planner will have fixed opfuncids,
* but callers outside the executor will not have done this.)
*/
ExprState *
@@ -2988,8 +2993,8 @@ ExecTargetList(List *targetlist,
if (itemIsDone[resind] == ExprEndResult)
{
/*
- * Oh dear, this item is returning an empty
- * set. Guess we can't make a tuple after all.
+ * Oh dear, this item is returning an empty set.
+ * Guess we can't make a tuple after all.
*/
*isDone = ExprEndResult;
break;
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 9352c79d81e..35007cf0cc0 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.23 2003/02/03 15:07:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.24 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ static bool tlist_matches_tupdesc(List *tlist, Index varno, TupleDesc tupdesc);
* ----------------------------------------------------------------
*/
TupleTableSlot *
-ExecScan(ScanState *node,
+ExecScan(ScanState * node,
ExecScanAccessMtd accessMtd) /* function returning a tuple */
{
EState *estate;
@@ -134,9 +134,10 @@ ExecScan(ScanState *node,
if (projInfo)
{
/*
- * Form a projection tuple, store it in the result tuple slot
- * and return it --- unless we find we can project no tuples
- * from this scan tuple, in which case continue scan.
+ * Form a projection tuple, store it in the result tuple
+ * slot and return it --- unless we find we can project no
+ * tuples from this scan tuple, in which case continue
+ * scan.
*/
resultSlot = ExecProject(projInfo, &isDone);
if (isDone != ExprEndResult)
@@ -175,13 +176,13 @@ ExecScan(ScanState *node,
* ExecAssignScanType must have been called already.
*/
void
-ExecAssignScanProjectionInfo(ScanState *node)
+ExecAssignScanProjectionInfo(ScanState * node)
{
- Scan *scan = (Scan *) node->ps.plan;
+ Scan *scan = (Scan *) node->ps.plan;
if (tlist_matches_tupdesc(scan->plan.targetlist,
scan->scanrelid,
- node->ss_ScanTupleSlot->ttc_tupleDescriptor))
+ node->ss_ScanTupleSlot->ttc_tupleDescriptor))
node->ps.ps_ProjInfo = NULL;
else
ExecAssignProjectionInfo(&node->ps);
@@ -190,13 +191,13 @@ ExecAssignScanProjectionInfo(ScanState *node)
static bool
tlist_matches_tupdesc(List *tlist, Index varno, TupleDesc tupdesc)
{
- int numattrs = tupdesc->natts;
- int attrno;
+ int numattrs = tupdesc->natts;
+ int attrno;
for (attrno = 1; attrno <= numattrs; attrno++)
{
Form_pg_attribute att_tup = tupdesc->attrs[attrno - 1];
- Var *var;
+ Var *var;
if (tlist == NIL)
return false; /* tlist too short */
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 976c152236c..c2145b7eca8 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.67 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.68 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -482,7 +482,7 @@ ExecSetSlotDescriptorIsNew(TupleTableSlot *slot, /* slot to change */
* ----------------
*/
void
-ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
+ExecInitResultTupleSlot(EState *estate, PlanState * planstate)
{
INIT_SLOT_DEFS;
INIT_SLOT_ALLOC;
@@ -494,7 +494,7 @@ ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
* ----------------
*/
void
-ExecInitScanTupleSlot(EState *estate, ScanState *scanstate)
+ExecInitScanTupleSlot(EState *estate, ScanState * scanstate)
{
INIT_SLOT_DEFS;
INIT_SLOT_ALLOC;
@@ -807,7 +807,7 @@ do_text_output_multiline(TupOutputState *tstate, char *text)
if (eol)
*eol++ = '\0';
else
- eol = text + strlen(text);
+ eol = text +strlen(text);
do_tup_output(tstate, &text);
text = eol;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index ca4ff192f44..f87708d3927 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.100 2003/05/28 16:03:56 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.101 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,8 +166,8 @@ CreateExecutorState(void)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Make the EState node within the per-query context. This way,
- * we don't need a separate pfree() operation for it at shutdown.
+ * Make the EState node within the per-query context. This way, we
+ * don't need a separate pfree() operation for it at shutdown.
*/
oldcontext = MemoryContextSwitchTo(qcontext);
@@ -248,6 +248,7 @@ FreeExecutorState(EState *estate)
FreeExprContext((ExprContext *) lfirst(estate->es_exprcontexts));
/* FreeExprContext removed the list link for us */
}
+
/*
* Free the per-query memory context, thereby releasing all working
* memory, including the EState node itself.
@@ -310,10 +311,10 @@ CreateExprContext(EState *estate)
econtext->ecxt_callbacks = NULL;
/*
- * Link the ExprContext into the EState to ensure it is shut down
- * when the EState is freed. Because we use lcons(), shutdowns will
- * occur in reverse order of creation, which may not be essential
- * but can't hurt.
+ * Link the ExprContext into the EState to ensure it is shut down when
+ * the EState is freed. Because we use lcons(), shutdowns will occur
+ * in reverse order of creation, which may not be essential but can't
+ * hurt.
*/
estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
@@ -377,14 +378,14 @@ MakePerTupleExprContext(EState *estate)
/* ----------------
* ExecAssignExprContext
*
- * This initializes the ps_ExprContext field. It is only necessary
+ * This initializes the ps_ExprContext field. It is only necessary
* to do this for nodes which use ExecQual or ExecProject
- * because those routines require an econtext. Other nodes that
+ * because those routines require an econtext. Other nodes that
* don't have to evaluate expressions don't need to do this.
* ----------------
*/
void
-ExecAssignExprContext(EState *estate, PlanState *planstate)
+ExecAssignExprContext(EState *estate, PlanState * planstate)
{
planstate->ps_ExprContext = CreateExprContext(estate);
}
@@ -394,7 +395,7 @@ ExecAssignExprContext(EState *estate, PlanState *planstate)
* ----------------
*/
void
-ExecAssignResultType(PlanState *planstate,
+ExecAssignResultType(PlanState * planstate,
TupleDesc tupDesc, bool shouldFree)
{
TupleTableSlot *slot = planstate->ps_ResultTupleSlot;
@@ -407,7 +408,7 @@ ExecAssignResultType(PlanState *planstate,
* ----------------
*/
void
-ExecAssignResultTypeFromOuterPlan(PlanState *planstate)
+ExecAssignResultTypeFromOuterPlan(PlanState * planstate)
{
PlanState *outerPlan;
TupleDesc tupDesc;
@@ -423,7 +424,7 @@ ExecAssignResultTypeFromOuterPlan(PlanState *planstate)
* ----------------
*/
void
-ExecAssignResultTypeFromTL(PlanState *planstate)
+ExecAssignResultTypeFromTL(PlanState * planstate)
{
bool hasoid = false;
TupleDesc tupDesc;
@@ -445,9 +446,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
* each of the child plans of the topmost Append plan. So, this is
* ugly but it works, for now ...
*
- * SELECT INTO is also pretty grotty, because we don't yet have the
- * INTO relation's descriptor at this point; we have to look aside
- * at a flag set by InitPlan().
+ * SELECT INTO is also pretty grotty, because we don't yet have the INTO
+ * relation's descriptor at this point; we have to look aside at a
+ * flag set by InitPlan().
*/
if (planstate->state->es_force_oids)
hasoid = true;
@@ -465,9 +466,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
}
/*
- * ExecTypeFromTL needs the parse-time representation of the tlist, not
- * a list of ExprStates. This is good because some plan nodes don't
- * bother to set up planstate->targetlist ...
+ * ExecTypeFromTL needs the parse-time representation of the tlist,
+ * not a list of ExprStates. This is good because some plan nodes
+ * don't bother to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
ExecAssignResultType(planstate, tupDesc, true);
@@ -478,7 +479,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
* ----------------
*/
TupleDesc
-ExecGetResultType(PlanState *planstate)
+ExecGetResultType(PlanState * planstate)
{
TupleTableSlot *slot = planstate->ps_ResultTupleSlot;
@@ -524,7 +525,7 @@ ExecBuildProjectionInfo(List *targetList,
* ----------------
*/
void
-ExecAssignProjectionInfo(PlanState *planstate)
+ExecAssignProjectionInfo(PlanState * planstate)
{
planstate->ps_ProjInfo =
ExecBuildProjectionInfo(planstate->targetlist,
@@ -543,7 +544,7 @@ ExecAssignProjectionInfo(PlanState *planstate)
* ----------------
*/
void
-ExecFreeExprContext(PlanState *planstate)
+ExecFreeExprContext(PlanState * planstate)
{
ExprContext *econtext;
@@ -575,7 +576,7 @@ ExecFreeExprContext(PlanState *planstate)
* ----------------
*/
TupleDesc
-ExecGetScanType(ScanState *scanstate)
+ExecGetScanType(ScanState * scanstate)
{
TupleTableSlot *slot = scanstate->ss_ScanTupleSlot;
@@ -587,7 +588,7 @@ ExecGetScanType(ScanState *scanstate)
* ----------------
*/
void
-ExecAssignScanType(ScanState *scanstate,
+ExecAssignScanType(ScanState * scanstate,
TupleDesc tupDesc, bool shouldFree)
{
TupleTableSlot *slot = scanstate->ss_ScanTupleSlot;
@@ -600,7 +601,7 @@ ExecAssignScanType(ScanState *scanstate,
* ----------------
*/
void
-ExecAssignScanTypeFromOuterPlan(ScanState *scanstate)
+ExecAssignScanTypeFromOuterPlan(ScanState * scanstate)
{
PlanState *outerPlan;
TupleDesc tupDesc;
@@ -795,8 +796,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* We will use the EState's per-tuple context for evaluating
- * predicates and index expressions (creating it if it's not
- * already there).
+ * predicates and index expressions (creating it if it's not already
+ * there).
*/
econtext = GetPerTupleExprContext(estate);
@@ -841,8 +842,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* FormIndexDatum fills in its datum and null parameters with
- * attribute information taken from the given heap tuple.
- * It also computes any expressions needed.
+ * attribute information taken from the given heap tuple. It also
+ * computes any expressions needed.
*/
FormIndexDatum(indexInfo,
heapTuple,
@@ -878,7 +879,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
* Add changed parameters to a plan node's chgParam set
*/
void
-UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
+UpdateChangedParamSet(PlanState * node, Bitmapset * newchg)
{
Bitmapset *parmset;
@@ -887,6 +888,7 @@ UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
* Don't include anything else into its chgParam set.
*/
parmset = bms_intersect(node->plan->allParam, newchg);
+
/*
* Keep node->chgParam == NULL if there's not actually any members;
* this allows the simplest possible tests in executor node files.
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index c8df7ccb83c..ebc3cbcac37 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.69 2003/07/28 18:33:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.70 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,8 +29,8 @@
/*
- * We have an execution_state record for each query in a function. Each
- * record contains a querytree and plantree for its query. If the query
+ * We have an execution_state record for each query in a function. Each
+ * record contains a querytree and plantree for its query. If the query
* is currently in F_EXEC_RUN state then there's a QueryDesc too.
*/
typedef enum
@@ -83,7 +83,7 @@ static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
static TupleTableSlot *postquel_getnext(execution_state *es);
static void postquel_end(execution_state *es);
static void postquel_sub_params(SQLFunctionCachePtr fcache,
- FunctionCallInfo fcinfo);
+ FunctionCallInfo fcinfo);
static Datum postquel_execute(execution_state *es,
FunctionCallInfo fcinfo,
SQLFunctionCachePtr fcache);
@@ -177,11 +177,11 @@ init_sql_fcache(FmgrInfo *finfo)
if (rettype == ANYARRAYOID || rettype == ANYELEMENTOID)
{
rettype = get_fn_expr_rettype(finfo);
- if (rettype == InvalidOid) /* this probably should not happen */
+ if (rettype == InvalidOid) /* this probably should not happen */
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not determine actual result type for function declared %s",
- format_type_be(procedureStruct->prorettype))));
+ format_type_be(procedureStruct->prorettype))));
}
/* Now look up the actual result type */
@@ -226,7 +226,7 @@ init_sql_fcache(FmgrInfo *finfo)
fcache->funcSlot = NULL;
/*
- * Parse and plan the queries. We need the argument type info to pass
+ * Parse and plan the queries. We need the argument type info to pass
* to the parser.
*/
nargs = procedureStruct->pronargs;
@@ -234,7 +234,7 @@ init_sql_fcache(FmgrInfo *finfo)
if (nargs > 0)
{
- int argnum;
+ int argnum;
argOidVect = (Oid *) palloc(nargs * sizeof(Oid));
memcpy(argOidVect,
@@ -243,7 +243,7 @@ init_sql_fcache(FmgrInfo *finfo)
/* Resolve any polymorphic argument types */
for (argnum = 0; argnum < nargs; argnum++)
{
- Oid argtype = argOidVect[argnum];
+ Oid argtype = argOidVect[argnum];
if (argtype == ANYARRAYOID || argtype == ANYELEMENTOID)
{
@@ -309,7 +309,7 @@ postquel_getnext(execution_state *es)
/*
* If it's the function's last command, and it's a SELECT, fetch one
- * row at a time so we can return the results. Otherwise just run it
+ * row at a time so we can return the results. Otherwise just run it
* to completion.
*/
if (LAST_POSTQUEL_COMMAND(es) && es->qd->operation == CMD_SELECT)
@@ -655,14 +655,14 @@ sql_exec_error_callback(void *arg)
/*
* Try to determine where in the function we failed. If there is a
* query with non-null QueryDesc, finger it. (We check this rather
- * than looking for F_EXEC_RUN state, so that errors during ExecutorStart
- * or ExecutorEnd are blamed on the appropriate query; see postquel_start
- * and postquel_end.)
+ * than looking for F_EXEC_RUN state, so that errors during
+ * ExecutorStart or ExecutorEnd are blamed on the appropriate query;
+ * see postquel_start and postquel_end.)
*/
if (fcache)
{
execution_state *es;
- int query_num;
+ int query_num;
es = fcache->func_state;
query_num = 1;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index f0537cf5d90..5cf448e772a 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -45,7 +45,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.112 2003/08/01 00:15:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.113 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -173,12 +173,12 @@ typedef struct AggStatePerGroupData
* later input value. Only the first non-NULL input will be
* auto-substituted.
*/
-} AggStatePerGroupData;
+} AggStatePerGroupData;
/*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
- * distinct set of GROUP BY column values. We compute the hash key from
+ * distinct set of GROUP BY column values. We compute the hash key from
* the GROUP BY columns.
*/
typedef struct AggHashEntryData *AggHashEntry;
@@ -188,27 +188,27 @@ typedef struct AggHashEntryData
TupleHashEntryData shared; /* common header for hash table entries */
/* per-aggregate transition status array - must be last! */
AggStatePerGroupData pergroup[1]; /* VARIABLE LENGTH ARRAY */
-} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
+} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
static void initialize_aggregates(AggState *aggstate,
- AggStatePerAgg peragg,
- AggStatePerGroup pergroup);
+ AggStatePerAgg peragg,
+ AggStatePerGroup pergroup);
static void advance_transition_function(AggState *aggstate,
- AggStatePerAgg peraggstate,
- AggStatePerGroup pergroupstate,
- Datum newVal, bool isNull);
+ AggStatePerAgg peraggstate,
+ AggStatePerGroup pergroupstate,
+ Datum newVal, bool isNull);
static void advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup);
static void process_sorted_aggregate(AggState *aggstate,
- AggStatePerAgg peraggstate,
- AggStatePerGroup pergroupstate);
+ AggStatePerAgg peraggstate,
+ AggStatePerGroup pergroupstate);
static void finalize_aggregate(AggState *aggstate,
- AggStatePerAgg peraggstate,
- AggStatePerGroup pergroupstate,
- Datum *resultVal, bool *resultIsNull);
+ AggStatePerAgg peraggstate,
+ AggStatePerGroup pergroupstate,
+ Datum *resultVal, bool *resultIsNull);
static void build_hash_table(AggState *aggstate);
static AggHashEntry lookup_hash_entry(AggState *aggstate,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
static void agg_fill_hash_table(AggState *aggstate);
static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
@@ -231,7 +231,7 @@ initialize_aggregates(AggState *aggstate,
{
AggStatePerAgg peraggstate = &peragg[aggno];
AggStatePerGroup pergroupstate = &pergroup[aggno];
- Aggref *aggref = peraggstate->aggref;
+ Aggref *aggref = peraggstate->aggref;
/*
* Start a fresh sort operation for each DISTINCT aggregate.
@@ -265,18 +265,18 @@ initialize_aggregates(AggState *aggstate,
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(peraggstate->initValue,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
MemoryContextSwitchTo(oldContext);
}
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
- * If the initial value for the transition state doesn't exist in the
- * pg_aggregate table then we will let the first non-NULL value
- * returned from the outer procNode become the initial value. (This is
- * useful for aggregates like max() and min().) The noTransValue flag
- * signals that we still need to do this.
+ * If the initial value for the transition state doesn't exist in
+ * the pg_aggregate table then we will let the first non-NULL
+ * value returned from the outer procNode become the initial
+ * value. (This is useful for aggregates like max() and min().)
+ * The noTransValue flag signals that we still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
@@ -299,8 +299,8 @@ advance_transition_function(AggState *aggstate,
if (peraggstate->transfn.fn_strict)
{
/*
- * For a strict transfn, nothing happens at a NULL input
- * tuple; we just keep the prior transValue.
+ * For a strict transfn, nothing happens at a NULL input tuple; we
+ * just keep the prior transValue.
*/
if (isNull)
return;
@@ -314,12 +314,13 @@ advance_transition_function(AggState *aggstate,
* here is OK.)
*
* We must copy the datum into aggcontext if it is pass-by-ref.
- * We do not need to pfree the old transValue, since it's NULL.
+ * We do not need to pfree the old transValue, since it's
+ * NULL.
*/
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(newVal,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
pergroupstate->transValueIsNull = false;
pergroupstate->noTransValue = false;
MemoryContextSwitchTo(oldContext);
@@ -363,12 +364,12 @@ advance_transition_function(AggState *aggstate,
newVal = FunctionCallInvoke(&fcinfo);
/*
- * If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
- * first input, we don't need to do anything.
+ * If pass-by-ref datatype, must copy the new value into aggcontext
+ * and pfree the prior transValue. But if transfn returned a pointer
+ * to its first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
- DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
+ DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
{
if (!fcinfo.isnull)
{
@@ -388,7 +389,7 @@ advance_transition_function(AggState *aggstate,
}
/*
- * Advance all the aggregates for one input tuple. The input tuple
+ * Advance all the aggregates for one input tuple. The input tuple
* has been stored in tmpcontext->ecxt_scantuple, so that it is accessible
* to ExecEvalExpr. pergroup is the array of per-group structs to use
* (this might be in a hashtable entry).
@@ -467,8 +468,8 @@ process_sorted_aggregate(AggState *aggstate,
continue;
/*
- * Clear and select the working context for evaluation of
- * the equality function and transition function.
+ * Clear and select the working context for evaluation of the
+ * equality function and transition function.
*/
MemoryContextReset(workcontext);
oldContext = MemoryContextSwitchTo(workcontext);
@@ -570,9 +571,9 @@ finalize_aggregate(AggState *aggstate,
static void
build_hash_table(AggState *aggstate)
{
- Agg *node = (Agg *) aggstate->ss.ps.plan;
- MemoryContext tmpmem = aggstate->tmpcontext->ecxt_per_tuple_memory;
- Size entrysize;
+ Agg *node = (Agg *) aggstate->ss.ps.plan;
+ MemoryContext tmpmem = aggstate->tmpcontext->ecxt_per_tuple_memory;
+ Size entrysize;
Assert(node->aggstrategy == AGG_HASHED);
Assert(node->numGroups > 0);
@@ -622,9 +623,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *slot)
* the appropriate attribute for each aggregate function use (Aggref
* node) appearing in the targetlist or qual of the node. The number
* of tuples to aggregate over depends on whether grouped or plain
- * aggregation is selected. In grouped aggregation, we produce a result
+ * aggregation is selected. In grouped aggregation, we produce a result
* row for each group; in plain aggregation there's a single result row
- * for the whole query. In either case, the value of each aggregate is
+ * for the whole query. In either case, the value of each aggregate is
* stored in the expression context to be used when ExecProject evaluates
* the result tuple.
*/
@@ -641,9 +642,7 @@ ExecAgg(AggState *node)
return agg_retrieve_hash_table(node);
}
else
- {
return agg_retrieve_direct(node);
- }
}
/*
@@ -736,7 +735,7 @@ agg_retrieve_direct(AggState *aggstate)
firstSlot,
InvalidBuffer,
true);
- aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
+ aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
/* set up for first advance_aggregates call */
tmpcontext->ecxt_scantuple = firstSlot;
@@ -773,7 +772,7 @@ agg_retrieve_direct(AggState *aggstate)
firstSlot->ttc_tupleDescriptor,
node->numCols, node->grpColIdx,
aggstate->eqfunctions,
- tmpcontext->ecxt_per_tuple_memory))
+ tmpcontext->ecxt_per_tuple_memory))
{
/*
* Save the first input tuple of the next group.
@@ -806,15 +805,15 @@ agg_retrieve_direct(AggState *aggstate)
* anything), create a dummy all-nulls input tuple for use by
* ExecProject. 99.44% of the time this is a waste of cycles,
* because ordinarily the projected output tuple's targetlist
- * cannot contain any direct (non-aggregated) references to
- * input columns, so the dummy tuple will not be referenced.
- * However there are special cases where this isn't so --- in
- * particular an UPDATE involving an aggregate will have a
- * targetlist reference to ctid. We need to return a null for
- * ctid in that situation, not coredump.
+ * cannot contain any direct (non-aggregated) references to input
+ * columns, so the dummy tuple will not be referenced. However
+ * there are special cases where this isn't so --- in particular
+ * an UPDATE involving an aggregate will have a targetlist
+ * reference to ctid. We need to return a null for ctid in that
+ * situation, not coredump.
*
- * The values returned for the aggregates will be the initial
- * values of the transition functions.
+ * The values returned for the aggregates will be the initial values
+ * of the transition functions.
*/
if (TupIsNull(firstSlot))
{
@@ -872,7 +871,7 @@ agg_fill_hash_table(AggState *aggstate)
{
PlanState *outerPlan;
ExprContext *tmpcontext;
- AggHashEntry entry;
+ AggHashEntry entry;
TupleTableSlot *outerslot;
/*
@@ -883,8 +882,8 @@ agg_fill_hash_table(AggState *aggstate)
tmpcontext = aggstate->tmpcontext;
/*
- * Process each outer-plan tuple, and then fetch the next one,
- * until we exhaust the outer plan.
+ * Process each outer-plan tuple, and then fetch the next one, until
+ * we exhaust the outer plan.
*/
for (;;)
{
@@ -921,8 +920,8 @@ agg_retrieve_hash_table(AggState *aggstate)
bool *aggnulls;
AggStatePerAgg peragg;
AggStatePerGroup pergroup;
- TupleHashTable hashtable;
- AggHashEntry entry;
+ TupleHashTable hashtable;
+ AggHashEntry entry;
TupleTableSlot *firstSlot;
TupleTableSlot *resultSlot;
int aggno;
@@ -1045,20 +1044,20 @@ ExecInitAgg(Agg *node, EState *estate)
aggstate->hashtable = NULL;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
- * by using ExecAssignExprContext() to build both.
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a
+ * little by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
ExecAssignExprContext(estate, &aggstate->ss.ps);
/*
- * We also need a long-lived memory context for holding hashtable
- * data structures and transition values. NOTE: the details of what
- * is stored in aggcontext and what is stored in the regular per-query
- * memory context are driven by a simple decision: we want to reset the
- * aggcontext in ExecReScanAgg to recover no-longer-wanted space.
+ * We also need a long-lived memory context for holding hashtable data
+ * structures and transition values. NOTE: the details of what is
+ * stored in aggcontext and what is stored in the regular per-query
+ * memory context are driven by a simple decision: we want to reset
+ * the aggcontext in ExecReScanAgg to recover no-longer-wanted space.
*/
aggstate->aggcontext =
AllocSetContextCreate(CurrentMemoryContext,
@@ -1079,10 +1078,10 @@ ExecInitAgg(Agg *node, EState *estate)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no sense
- * under SQL semantics anyway (and it's forbidden by the spec). Because
- * that is true, we don't need to worry about evaluating the aggs in any
- * particular order.
+ * contain other agg calls in their arguments. This would make no
+ * sense under SQL semantics anyway (and it's forbidden by the spec).
+ * Because that is true, we don't need to worry about evaluating the
+ * aggs in any particular order.
*/
aggstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->plan.targetlist,
@@ -1116,19 +1115,20 @@ ExecInitAgg(Agg *node, EState *estate)
if (numaggs <= 0)
{
/*
- * This is not an error condition: we might be using the Agg node just
- * to do hash-based grouping. Even in the regular case,
- * constant-expression simplification could optimize away all of the
- * Aggrefs in the targetlist and qual. So keep going, but force local
- * copy of numaggs positive so that palloc()s below don't choke.
+ * This is not an error condition: we might be using the Agg node
+ * just to do hash-based grouping. Even in the regular case,
+ * constant-expression simplification could optimize away all of
+ * the Aggrefs in the targetlist and qual. So keep going, but
+ * force local copy of numaggs positive so that palloc()s below
+ * don't choke.
*/
numaggs = 1;
}
/*
- * If we are grouping, precompute fmgr lookup data for inner loop.
- * We need both equality and hashing functions to do it by hashing,
- * but only equality if not hashing.
+ * If we are grouping, precompute fmgr lookup data for inner loop. We
+ * need both equality and hashing functions to do it by hashing, but
+ * only equality if not hashing.
*/
if (node->numCols > 0)
{
@@ -1146,8 +1146,8 @@ ExecInitAgg(Agg *node, EState *estate)
}
/*
- * Set up aggregate-result storage in the output expr context, and also
- * allocate my private per-agg working storage
+ * Set up aggregate-result storage in the output expr context, and
+ * also allocate my private per-agg working storage
*/
econtext = aggstate->ss.ps.ps_ExprContext;
econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
@@ -1174,8 +1174,8 @@ ExecInitAgg(Agg *node, EState *estate)
* unchanging fields of the per-agg data. We also detect duplicate
* aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0").
* When duplicates are detected, we only make an AggStatePerAgg struct
- * for the first one. The clones are simply pointed at the same result
- * entry by giving them duplicate aggno values.
+ * for the first one. The clones are simply pointed at the same
+ * result entry by giving them duplicate aggno values.
*/
aggno = -1;
foreach(alist, aggstate->aggs)
@@ -1425,9 +1425,9 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
{
/*
- * In the hashed case, if we haven't yet built the hash table
- * then we can just return; nothing done yet, so nothing to undo.
- * If subnode's chgParam is not NULL then it will be re-scanned by
+ * In the hashed case, if we haven't yet built the hash table then
+ * we can just return; nothing done yet, so nothing to undo. If
+ * subnode's chgParam is not NULL then it will be re-scanned by
* ExecProcNode, else no reason to re-scan it at all.
*/
if (!node->table_filled)
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index e79d37fd857..7fc8caac2a9 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.52 2003/02/09 00:30:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.53 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -355,7 +355,7 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt)
for (i = node->as_firstplan; i <= node->as_lastplan; i++)
{
- PlanState *subnode = node->appendplans[i];
+ PlanState *subnode = node->appendplans[i];
/*
* ExecReScan doesn't know about my subplans, so I have to do
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 69053708cda..a4eb9065c06 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.77 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.78 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -244,7 +244,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
i = 0;
foreach(ho, hashOperators)
{
- Oid hashfn;
+ Oid hashfn;
hashfn = get_op_hash_function(lfirsto(ho));
if (!OidIsValid(hashfn))
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index bc3ecdfeeda..a45e007aff2 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.53 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.54 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,8 +22,8 @@
#include "utils/memutils.h"
-static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *node,
- HashJoinState *hjstate);
+static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState * node,
+ HashJoinState *hjstate);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
TupleTableSlot *tupleSlot);
@@ -94,10 +94,10 @@ ExecHashJoin(HashJoinState *node)
/*
* If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched on
- * the previous try.
+ * outer tuple; so we can stop scanning the inner scan if we matched
+ * on the previous try.
*/
- if (node->js.jointype == JOIN_IN &&
+ if (node->js.jointype == JOIN_IN &&
node->hj_MatchedOuter)
node->hj_NeedNewOuter = true;
@@ -244,7 +244,10 @@ ExecHashJoin(HashJoinState *node)
}
}
- /* If we didn't return a tuple, may need to set NeedNewOuter */
+ /*
+ * If we didn't return a tuple, may need to set
+ * NeedNewOuter
+ */
if (node->js.jointype == JOIN_IN)
{
node->hj_NeedNewOuter = true;
@@ -365,7 +368,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
case JOIN_LEFT:
hjstate->hj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(hjstate)));
+ ExecGetResultType(innerPlanState(hjstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@@ -407,10 +410,10 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
hjstate->hj_CurTuple = (HashJoinTuple) NULL;
/*
- * The planner already made a list of the inner hashkeys for us,
- * but we also need a list of the outer hashkeys, as well as a list
- * of the hash operator OIDs. Both lists of exprs must then be prepared
- * for execution.
+ * The planner already made a list of the inner hashkeys for us, but
+ * we also need a list of the outer hashkeys, as well as a list of the
+ * hash operator OIDs. Both lists of exprs must then be prepared for
+ * execution.
*/
hjstate->hj_InnerHashKeys = (List *)
ExecInitExpr((Expr *) hashNode->hashkeys,
@@ -496,7 +499,7 @@ ExecEndHashJoin(HashJoinState *node)
*/
static TupleTableSlot *
-ExecHashJoinOuterGetTuple(PlanState *node, HashJoinState *hjstate)
+ExecHashJoinOuterGetTuple(PlanState * node, HashJoinState *hjstate)
{
HashJoinTable hashtable = hjstate->hj_HashTable;
int curbatch = hashtable->curbatch;
@@ -701,11 +704,11 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
Assert(node->hj_HashTable != NULL);
/*
- * In a multi-batch join, we currently have to do rescans the hard way,
- * primarily because batch temp files may have already been released.
- * But if it's a single-batch join, and there is no parameter change
- * for the inner subnode, then we can just re-use the existing hash
- * table without rebuilding it.
+ * In a multi-batch join, we currently have to do rescans the hard
+ * way, primarily because batch temp files may have already been
+ * released. But if it's a single-batch join, and there is no
+ * parameter change for the inner subnode, then we can just re-use the
+ * existing hash table without rebuilding it.
*/
if (node->hj_HashTable->nbatch == 0 &&
((PlanState *) node)->righttree->chgParam == NULL)
@@ -718,6 +721,7 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
node->hj_hashdone = false;
ExecHashTableDestroy(node->hj_HashTable);
node->hj_HashTable = NULL;
+
/*
* if chgParam of subnode is not null then plan will be re-scanned
* by first ExecProcNode.
@@ -736,8 +740,8 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
node->hj_MatchedOuter = false;
/*
- * if chgParam of subnode is not null then plan will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->lefttree, exprCtxt);
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 017a378f9da..d01d4cfa7c7 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.80 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.81 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -290,7 +290,8 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
int j;
estate = node->ss.ps.state;
- econtext = node->iss_RuntimeContext; /* context for runtime keys */
+ econtext = node->iss_RuntimeContext; /* context for runtime
+ * keys */
numIndices = node->iss_NumIndices;
scanDescs = node->iss_ScanDescs;
scanKeys = node->iss_ScanKeys;
@@ -882,7 +883,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
reloid)));
indexstate->ss.ss_currentRelation = currentRelation;
- indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */
+ indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */
/*
* get the scan type from the relation descriptor.
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 4b1145e258c..7477bd43042 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.15 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.16 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,17 +55,21 @@ ExecLimit(LimitState *node)
switch (node->lstate)
{
case LIMIT_INITIAL:
+
/*
* If backwards scan, just return NULL without changing state.
*/
if (!ScanDirectionIsForward(direction))
return NULL;
+
/*
- * First call for this scan, so compute limit/offset. (We can't do
- * this any earlier, because parameters from upper nodes may not
- * be set until now.) This also sets position = 0.
+ * First call for this scan, so compute limit/offset. (We
+ * can't do this any earlier, because parameters from upper
+ * nodes may not be set until now.) This also sets position =
+ * 0.
*/
recompute_limits(node);
+
/*
* Check for empty window; if so, treat like empty subplan.
*/
@@ -74,6 +78,7 @@ ExecLimit(LimitState *node)
node->lstate = LIMIT_EMPTY;
return NULL;
}
+
/*
* Fetch rows from subplan until we reach position > offset.
*/
@@ -83,8 +88,8 @@ ExecLimit(LimitState *node)
if (TupIsNull(slot))
{
/*
- * The subplan returns too few tuples for us to produce
- * any output at all.
+ * The subplan returns too few tuples for us to
+ * produce any output at all.
*/
node->lstate = LIMIT_EMPTY;
return NULL;
@@ -93,6 +98,7 @@ ExecLimit(LimitState *node)
if (++node->position > node->offset)
break;
}
+
/*
* Okay, we have the first tuple of the window.
*/
@@ -100,9 +106,10 @@ ExecLimit(LimitState *node)
break;
case LIMIT_EMPTY:
+
/*
* The subplan is known to return no tuples (or not more than
- * OFFSET tuples, in general). So we return no tuples.
+ * OFFSET tuples, in general). So we return no tuples.
*/
return NULL;
@@ -113,7 +120,8 @@ ExecLimit(LimitState *node)
* Forwards scan, so check for stepping off end of window.
* If we are at the end of the window, return NULL without
* advancing the subplan or the position variable; but
- * change the state machine state to record having done so.
+ * change the state machine state to record having done
+ * so.
*/
if (!node->noCount &&
node->position >= node->offset + node->count)
@@ -121,6 +129,7 @@ ExecLimit(LimitState *node)
node->lstate = LIMIT_WINDOWEND;
return NULL;
}
+
/*
* Get next tuple from subplan, if any.
*/
@@ -136,14 +145,16 @@ ExecLimit(LimitState *node)
else
{
/*
- * Backwards scan, so check for stepping off start of window.
- * As above, change only state-machine status if so.
+ * Backwards scan, so check for stepping off start of
+ * window. As above, change only state-machine status if
+ * so.
*/
if (node->position <= node->offset + 1)
{
node->lstate = LIMIT_WINDOWSTART;
return NULL;
}
+
/*
* Get previous tuple from subplan; there should be one!
*/
@@ -158,9 +169,11 @@ ExecLimit(LimitState *node)
case LIMIT_SUBPLANEOF:
if (ScanDirectionIsForward(direction))
return NULL;
+
/*
* Backing up from subplan EOF, so re-fetch previous tuple;
- * there should be one! Note previous tuple must be in window.
+ * there should be one! Note previous tuple must be in
+ * window.
*/
slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
@@ -173,9 +186,10 @@ ExecLimit(LimitState *node)
case LIMIT_WINDOWEND:
if (ScanDirectionIsForward(direction))
return NULL;
+
/*
- * Backing up from window end: simply re-return the last
- * tuple fetched from the subplan.
+ * Backing up from window end: simply re-return the last tuple
+ * fetched from the subplan.
*/
slot = node->subSlot;
node->lstate = LIMIT_INWINDOW;
@@ -185,6 +199,7 @@ ExecLimit(LimitState *node)
case LIMIT_WINDOWSTART:
if (!ScanDirectionIsForward(direction))
return NULL;
+
/*
* Advancing after having backed off window start: simply
* re-return the last tuple fetched from the subplan.
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 39968c65e0c..afd08e80945 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.42 2003/03/27 16:51:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.43 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,15 +79,15 @@ ExecMaterial(MaterialState *node)
{
/*
* When reversing direction at tuplestore EOF, the first
- * getheaptuple call will fetch the last-added tuple; but
- * we want to return the one before that, if possible.
- * So do an extra fetch.
+ * getheaptuple call will fetch the last-added tuple; but we
+ * want to return the one before that, if possible. So do an
+ * extra fetch.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
forward,
&should_free);
if (heapTuple == NULL)
- return NULL; /* the tuplestore must be empty */
+ return NULL; /* the tuplestore must be empty */
if (should_free)
heap_freetuple(heapTuple);
}
@@ -129,10 +129,11 @@ ExecMaterial(MaterialState *node)
}
heapTuple = outerslot->val;
should_free = false;
+
/*
* Append returned tuple to tuplestore, too. NOTE: because the
- * tuplestore is certainly in EOF state, its read position will move
- * forward over the added tuple. This is what we want.
+ * tuplestore is certainly in EOF state, its read position will
+ * move forward over the added tuple. This is what we want.
*/
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
}
@@ -293,8 +294,8 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
* If subnode is to be rescanned then we forget previous stored
* results; we have to re-read the subplan and re-store.
*
- * Otherwise we can just rewind and rescan the stored output.
- * The state of the subnode does not change.
+ * Otherwise we can just rewind and rescan the stored output. The state
+ * of the subnode does not change.
*/
if (((PlanState *) node)->lefttree->chgParam != NULL)
{
@@ -303,7 +304,5 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
node->eof_underlying = false;
}
else
- {
tuplestore_rescan((Tuplestorestate *) node->tuplestorestate);
- }
}
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 57249d7d970..081ce24cb19 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.58 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.59 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,7 +102,7 @@ static bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
*/
static void
MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals,
- PlanState *parent)
+ PlanState * parent)
{
List *ltexprs,
*gtexprs,
@@ -358,9 +358,9 @@ ExecMergeJoin(MergeJoinState *node)
List *otherqual;
bool qualResult;
bool compareResult;
- PlanState *innerPlan;
+ PlanState *innerPlan;
TupleTableSlot *innerTupleSlot;
- PlanState *outerPlan;
+ PlanState *outerPlan;
TupleTableSlot *outerTupleSlot;
ExprContext *econtext;
bool doFillOuter;
@@ -644,7 +644,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedInner = true; /* do it only once */
+ node->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -720,7 +720,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedOuter = true; /* do it only once */
+ node->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1004,7 +1004,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedOuter = true; /* do it only once */
+ node->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1181,7 +1181,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedInner = true; /* do it only once */
+ node->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1266,7 +1266,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedInner = true; /* do it only once */
+ node->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1333,7 +1333,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedOuter = true; /* do it only once */
+ node->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1462,12 +1462,12 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
case JOIN_LEFT:
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
break;
case JOIN_RIGHT:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
/*
* Can't handle right or full join with non-nil extra
@@ -1481,10 +1481,10 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
case JOIN_FULL:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
/*
* Can't handle right or full join with non-nil extra
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index 2b69da95e82..66dbd8c063d 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.33 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.34 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,8 +103,8 @@ ExecNestLoop(NestLoopState *node)
/*
* If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched on
- * the previous try.
+ * outer tuple; so we can stop scanning the inner scan if we matched
+ * on the previous try.
*/
if (node->js.jointype == JOIN_IN &&
node->nl_MatchedOuter)
@@ -330,7 +330,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate)
case JOIN_LEFT:
nlstate->nl_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(nlstate)));
+ ExecGetResultType(innerPlanState(nlstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@@ -404,7 +404,7 @@ ExecEndNestLoop(NestLoopState *node)
void
ExecReScanNestLoop(NestLoopState *node, ExprContext *exprCtxt)
{
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
/*
* If outerPlan->chgParam is not null then plan will be automatically
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index 9ea75eb3ce7..194ed192169 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -34,7 +34,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.24 2002/12/15 16:17:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.25 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ ExecResult(ResultState *node)
{
TupleTableSlot *outerTupleSlot;
TupleTableSlot *resultSlot;
- PlanState *outerPlan;
+ PlanState *outerPlan;
ExprContext *econtext;
ExprDoneCond isDone;
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 47d2e4eb497..4721fc5bf6a 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.43 2003/02/03 15:07:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.44 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,8 +29,8 @@
#include "executor/nodeSeqscan.h"
#include "parser/parsetree.h"
-static void InitScanRelation(SeqScanState *node, EState *estate);
-static TupleTableSlot *SeqNext(SeqScanState *node);
+static void InitScanRelation(SeqScanState * node, EState *estate);
+static TupleTableSlot *SeqNext(SeqScanState * node);
/* ----------------------------------------------------------------
* Scan Support
@@ -43,7 +43,7 @@ static TupleTableSlot *SeqNext(SeqScanState *node);
* ----------------------------------------------------------------
*/
static TupleTableSlot *
-SeqNext(SeqScanState *node)
+SeqNext(SeqScanState * node)
{
HeapTuple tuple;
HeapScanDesc scandesc;
@@ -123,7 +123,7 @@ SeqNext(SeqScanState *node)
*/
TupleTableSlot *
-ExecSeqScan(SeqScanState *node)
+ExecSeqScan(SeqScanState * node)
{
/*
* use SeqNext as access method
@@ -139,7 +139,7 @@ ExecSeqScan(SeqScanState *node)
* ----------------------------------------------------------------
*/
static void
-InitScanRelation(SeqScanState *node, EState *estate)
+InitScanRelation(SeqScanState * node, EState *estate)
{
Index relid;
List *rangeTable;
@@ -252,7 +252,7 @@ ExecCountSlotsSeqScan(SeqScan *node)
* ----------------------------------------------------------------
*/
void
-ExecEndSeqScan(SeqScanState *node)
+ExecEndSeqScan(SeqScanState * node)
{
Relation relation;
HeapScanDesc scanDesc;
@@ -302,7 +302,7 @@ ExecEndSeqScan(SeqScanState *node)
* ----------------------------------------------------------------
*/
void
-ExecSeqReScan(SeqScanState *node, ExprContext *exprCtxt)
+ExecSeqReScan(SeqScanState * node, ExprContext *exprCtxt)
{
EState *estate;
Index scanrelid;
@@ -332,7 +332,7 @@ ExecSeqReScan(SeqScanState *node, ExprContext *exprCtxt)
* ----------------------------------------------------------------
*/
void
-ExecSeqMarkPos(SeqScanState *node)
+ExecSeqMarkPos(SeqScanState * node)
{
HeapScanDesc scan;
@@ -347,7 +347,7 @@ ExecSeqMarkPos(SeqScanState *node)
* ----------------------------------------------------------------
*/
void
-ExecSeqRestrPos(SeqScanState *node)
+ExecSeqRestrPos(SeqScanState * node)
{
HeapScanDesc scan;
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index a42e8f18e26..0fe888c803c 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.51 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.52 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,13 +29,13 @@
#include "utils/lsyscache.h"
-static Datum ExecHashSubPlan(SubPlanState *node,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecScanSubPlan(SubPlanState *node,
- ExprContext *econtext,
- bool *isNull);
-static void buildSubPlanHash(SubPlanState *node);
+static Datum ExecHashSubPlan(SubPlanState * node,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecScanSubPlan(SubPlanState * node,
+ ExprContext *econtext,
+ bool *isNull);
+static void buildSubPlanHash(SubPlanState * node);
static bool findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot);
static bool tupleAllNulls(HeapTuple tuple);
@@ -45,11 +45,11 @@ static bool tupleAllNulls(HeapTuple tuple);
* ----------------------------------------------------------------
*/
Datum
-ExecSubPlan(SubPlanState *node,
+ExecSubPlan(SubPlanState * node,
ExprContext *econtext,
bool *isNull)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
if (subplan->setParam != NIL)
elog(ERROR, "cannot set parent params from subquery");
@@ -64,11 +64,11 @@ ExecSubPlan(SubPlanState *node,
* ExecHashSubPlan: store subselect result in an in-memory hash table
*/
static Datum
-ExecHashSubPlan(SubPlanState *node,
+ExecHashSubPlan(SubPlanState * node,
ExprContext *econtext,
bool *isNull)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
ExprContext *innerecontext = node->innerecontext;
TupleTableSlot *slot;
@@ -79,8 +79,8 @@ ExecHashSubPlan(SubPlanState *node,
elog(ERROR, "hashed subplan with direct correlation not supported");
/*
- * If first time through or we need to rescan the subplan, build
- * the hash table.
+ * If first time through or we need to rescan the subplan, build the
+ * hash table.
*/
if (node->hashtable == NULL || planstate->chgParam != NULL)
buildSubPlanHash(node);
@@ -94,19 +94,19 @@ ExecHashSubPlan(SubPlanState *node,
return BoolGetDatum(false);
/*
- * Evaluate lefthand expressions and form a projection tuple.
- * First we have to set the econtext to use (hack alert!).
+ * Evaluate lefthand expressions and form a projection tuple. First we
+ * have to set the econtext to use (hack alert!).
*/
node->projLeft->pi_exprContext = econtext;
slot = ExecProject(node->projLeft, NULL);
tup = slot->val;
/*
- * Note: because we are typically called in a per-tuple context,
- * we have to explicitly clear the projected tuple before returning.
- * Otherwise, we'll have a double-free situation: the per-tuple context
- * will probably be reset before we're called again, and then the tuple
- * slot will think it still needs to free the tuple.
+ * Note: because we are typically called in a per-tuple context, we
+ * have to explicitly clear the projected tuple before returning.
+ * Otherwise, we'll have a double-free situation: the per-tuple
+ * context will probably be reset before we're called again, and then
+ * the tuple slot will think it still needs to free the tuple.
*/
/*
@@ -116,20 +116,20 @@ ExecHashSubPlan(SubPlanState *node,
ResetExprContext(innerecontext);
/*
- * If the LHS is all non-null, probe for an exact match in the
- * main hash table. If we find one, the result is TRUE.
- * Otherwise, scan the partly-null table to see if there are any
- * rows that aren't provably unequal to the LHS; if so, the result
- * is UNKNOWN. (We skip that part if we don't care about UNKNOWN.)
- * Otherwise, the result is FALSE.
+ * If the LHS is all non-null, probe for an exact match in the main
+ * hash table. If we find one, the result is TRUE. Otherwise, scan
+ * the partly-null table to see if there are any rows that aren't
+ * provably unequal to the LHS; if so, the result is UNKNOWN. (We
+ * skip that part if we don't care about UNKNOWN.) Otherwise, the
+ * result is FALSE.
*
- * Note: the reason we can avoid a full scan of the main hash table
- * is that the combining operators are assumed never to yield NULL
- * when both inputs are non-null. If they were to do so, we might
- * need to produce UNKNOWN instead of FALSE because of an UNKNOWN
- * result in comparing the LHS to some main-table entry --- which
- * is a comparison we will not even make, unless there's a chance
- * match of hash keys.
+ * Note: the reason we can avoid a full scan of the main hash table is
+ * that the combining operators are assumed never to yield NULL when
+ * both inputs are non-null. If they were to do so, we might need to
+ * produce UNKNOWN instead of FALSE because of an UNKNOWN result in
+ * comparing the LHS to some main-table entry --- which is a
+ * comparison we will not even make, unless there's a chance match of
+ * hash keys.
*/
if (HeapTupleNoNulls(tup))
{
@@ -151,14 +151,14 @@ ExecHashSubPlan(SubPlanState *node,
}
/*
- * When the LHS is partly or wholly NULL, we can never return TRUE.
- * If we don't care about UNKNOWN, just return FALSE. Otherwise,
- * if the LHS is wholly NULL, immediately return UNKNOWN. (Since the
- * combining operators are strict, the result could only be FALSE if the
- * sub-select were empty, but we already handled that case.) Otherwise,
- * we must scan both the main and partly-null tables to see if there are
- * any rows that aren't provably unequal to the LHS; if so, the result is
- * UNKNOWN. Otherwise, the result is FALSE.
+ * When the LHS is partly or wholly NULL, we can never return TRUE. If
+ * we don't care about UNKNOWN, just return FALSE. Otherwise, if the
+ * LHS is wholly NULL, immediately return UNKNOWN. (Since the
+ * combining operators are strict, the result could only be FALSE if
+ * the sub-select were empty, but we already handled that case.)
+ * Otherwise, we must scan both the main and partly-null tables to see
+ * if there are any rows that aren't provably unequal to the LHS; if
+ * so, the result is UNKNOWN. Otherwise, the result is FALSE.
*/
if (node->hashnulls == NULL)
{
@@ -194,11 +194,11 @@ ExecHashSubPlan(SubPlanState *node,
* ExecScanSubPlan: default case where we have to rescan subplan each time
*/
static Datum
-ExecScanSubPlan(SubPlanState *node,
+ExecScanSubPlan(SubPlanState * node,
ExprContext *econtext,
bool *isNull)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
SubLinkType subLinkType = subplan->subLinkType;
bool useOr = subplan->useOr;
@@ -218,14 +218,14 @@ ExecScanSubPlan(SubPlanState *node,
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
/*
- * Set Params of this plan from parent plan correlation values.
- * (Any calculation we have to do is done in the parent econtext,
- * since the Param values don't need to have per-query lifetime.)
+ * Set Params of this plan from parent plan correlation values. (Any
+ * calculation we have to do is done in the parent econtext, since the
+ * Param values don't need to have per-query lifetime.)
*/
pvar = node->args;
foreach(lst, subplan->parParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(pvar != NIL);
@@ -241,23 +241,24 @@ ExecScanSubPlan(SubPlanState *node,
ExecReScan(planstate, NULL);
/*
- * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the result
- * is boolean as are the results of the combining operators. We combine
- * results within a tuple (if there are multiple columns) using OR
- * semantics if "useOr" is true, AND semantics if not. We then combine
- * results across tuples (if the subplan produces more than one) using OR
- * semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
- * (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.)
- * NULL results from the combining operators are handled according to
- * the usual SQL semantics for OR and AND. The result for no input
- * tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
- * MULTIEXPR_SUBLINK.
+ * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the
+ * result is boolean as are the results of the combining operators. We
+ * combine results within a tuple (if there are multiple columns)
+ * using OR semantics if "useOr" is true, AND semantics if not. We
+ * then combine results across tuples (if the subplan produces more
+ * than one) using OR semantics for ANY_SUBLINK or AND semantics for
+ * ALL_SUBLINK. (MULTIEXPR_SUBLINK doesn't allow multiple tuples from
+ * the subplan.) NULL results from the combining operators are handled
+ * according to the usual SQL semantics for OR and AND. The result
+ * for no input tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK,
+ * NULL for MULTIEXPR_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. For ARRAY_SUBLINK we allow the subplan
- * to produce more than one tuple. In either case, if zero tuples are
- * produced, we return NULL. Assuming we get a tuple, we just use its
- * first column (there can be only one non-junk column in this case).
+ * tuple, else an error is raised. For ARRAY_SUBLINK we allow the
+ * subplan to produce more than one tuple. In either case, if zero
+ * tuples are produced, we return NULL. Assuming we get a tuple, we
+ * just use its first column (there can be only one non-junk column in
+ * this case).
*/
result = BoolGetDatum(subLinkType == ALL_SUBLINK);
*isNull = false;
@@ -311,8 +312,8 @@ ExecScanSubPlan(SubPlanState *node,
if (subLinkType == ARRAY_SUBLINK)
{
- Datum dvalue;
- bool disnull;
+ Datum dvalue;
+ bool disnull;
found = true;
/* stash away current value */
@@ -346,7 +347,8 @@ ExecScanSubPlan(SubPlanState *node,
bool expnull;
/*
- * Load up the Param representing this column of the sub-select.
+ * Load up the Param representing this column of the
+ * sub-select.
*/
prmdata = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(prmdata->execPlan == NULL);
@@ -432,8 +434,8 @@ ExecScanSubPlan(SubPlanState *node,
{
/*
* deal with empty subplan result. result/isNull were previously
- * initialized correctly for all sublink types except EXPR, ARRAY, and
- * MULTIEXPR; for those, return NULL.
+ * initialized correctly for all sublink types except EXPR, ARRAY,
+ * and MULTIEXPR; for those, return NULL.
*/
if (subLinkType == EXPR_SUBLINK ||
subLinkType == ARRAY_SUBLINK ||
@@ -459,9 +461,9 @@ ExecScanSubPlan(SubPlanState *node,
* buildSubPlanHash: load hash table by scanning subplan output.
*/
static void
-buildSubPlanHash(SubPlanState *node)
+buildSubPlanHash(SubPlanState * node)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
int ncols = length(node->exprs);
ExprContext *innerecontext = node->innerecontext;
@@ -474,19 +476,19 @@ buildSubPlanHash(SubPlanState *node)
Assert(!subplan->useOr);
/*
- * If we already had any hash tables, destroy 'em; then create
- * empty hash table(s).
+ * If we already had any hash tables, destroy 'em; then create empty
+ * hash table(s).
*
- * If we need to distinguish accurately between FALSE and UNKNOWN
- * (i.e., NULL) results of the IN operation, then we have to store
- * subplan output rows that are partly or wholly NULL. We store such
- * rows in a separate hash table that we expect will be much smaller
- * than the main table. (We can use hashing to eliminate partly-null
- * rows that are not distinct. We keep them separate to minimize the
- * cost of the inevitable full-table searches; see findPartialMatch.)
+ * If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
+ * NULL) results of the IN operation, then we have to store subplan
+ * output rows that are partly or wholly NULL. We store such rows in
+ * a separate hash table that we expect will be much smaller than the
+ * main table. (We can use hashing to eliminate partly-null rows that
+ * are not distinct. We keep them separate to minimize the cost of
+ * the inevitable full-table searches; see findPartialMatch.)
*
- * If it's not necessary to distinguish FALSE and UNKNOWN, then we
- * don't need to store subplan output rows that contain NULL.
+ * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
+ * need to store subplan output rows that contain NULL.
*/
MemoryContextReset(node->tablecxt);
node->hashtable = NULL;
@@ -529,7 +531,8 @@ buildSubPlanHash(SubPlanState *node)
/*
* We are probably in a short-lived expression-evaluation context.
- * Switch to the child plan's per-query context for calling ExecProcNode.
+ * Switch to the child plan's per-query context for calling
+ * ExecProcNode.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
@@ -539,8 +542,9 @@ buildSubPlanHash(SubPlanState *node)
ExecReScan(planstate, NULL);
/*
- * Scan the subplan and load the hash table(s). Note that when there are
- * duplicate rows coming out of the sub-select, only one copy is stored.
+ * Scan the subplan and load the hash table(s). Note that when there
+ * are duplicate rows coming out of the sub-select, only one copy is
+ * stored.
*/
for (slot = ExecProcNode(planstate);
!TupIsNull(slot);
@@ -572,9 +576,9 @@ buildSubPlanHash(SubPlanState *node)
/*
* If result contains any nulls, store separately or not at all.
- * (Since we know the projection tuple has no junk columns, we
- * can just look at the overall hasnull info bit, instead of
- * groveling through the columns.)
+ * (Since we know the projection tuple has no junk columns, we can
+ * just look at the overall hasnull info bit, instead of groveling
+ * through the columns.)
*/
if (HeapTupleNoNulls(tup))
{
@@ -621,7 +625,7 @@ findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot)
HeapTuple tuple = slot->val;
TupleDesc tupdesc = slot->ttc_tupleDescriptor;
TupleHashIterator hashiter;
- TupleHashEntry entry;
+ TupleHashEntry entry;
ResetTupleHashIterator(&hashiter);
while ((entry = ScanTupleHashTable(hashtable, &hashiter)) != NULL)
@@ -643,8 +647,8 @@ findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot)
static bool
tupleAllNulls(HeapTuple tuple)
{
- int ncols = tuple->t_data->t_natts;
- int i;
+ int ncols = tuple->t_data->t_natts;
+ int i;
for (i = 1; i <= ncols; i++)
{
@@ -659,15 +663,15 @@ tupleAllNulls(HeapTuple tuple)
* ----------------------------------------------------------------
*/
void
-ExecInitSubPlan(SubPlanState *node, EState *estate)
+ExecInitSubPlan(SubPlanState * node, EState *estate)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
EState *sp_estate;
MemoryContext oldcontext;
/*
- * Do access checking on the rangetable entries in the subquery.
- * Here, we assume the subquery is a SELECT.
+ * Do access checking on the rangetable entries in the subquery. Here,
+ * we assume the subquery is a SELECT.
*/
ExecCheckRTPerms(subplan->rtable, CMD_SELECT);
@@ -690,9 +694,9 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* create an EState for the subplan
*
* The subquery needs its own EState because it has its own rangetable.
- * It shares our Param ID space, however. XXX if rangetable access were
- * done differently, the subquery could share our EState, which would
- * eliminate some thrashing about in this module...
+ * It shares our Param ID space, however. XXX if rangetable access
+ * were done differently, the subquery could share our EState, which
+ * would eliminate some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
node->sub_estate = sp_estate;
@@ -721,9 +725,9 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* to set params for parent plan then mark parameters as needing
* evaluation.
*
- * Note that in the case of un-correlated subqueries we don't care
- * about setting parent->chgParam here: indices take care about
- * it, for others - it doesn't matter...
+ * Note that in the case of un-correlated subqueries we don't care about
+ * setting parent->chgParam here: indices take care about it, for
+ * others - it doesn't matter...
*/
if (subplan->setParam != NIL)
{
@@ -731,7 +735,7 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(estate->es_param_exec_vals[paramid]);
prm->execPlan = node;
@@ -744,8 +748,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
*/
if (subplan->useHashTable)
{
- int ncols,
- i;
+ int ncols,
+ i;
TupleDesc tupDesc;
TupleTable tupTable;
TupleTableSlot *slot;
@@ -768,15 +772,16 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
ncols = length(node->exprs);
node->keyColIdx = (AttrNumber *) palloc(ncols * sizeof(AttrNumber));
for (i = 0; i < ncols; i++)
- node->keyColIdx[i] = i+1;
+ node->keyColIdx[i] = i + 1;
+
/*
* We use ExecProject to evaluate the lefthand and righthand
* expression lists and form tuples. (You might think that we
* could use the sub-select's output tuples directly, but that is
* not the case if we had to insert any run-time coercions of the
* sub-select's output datatypes; anyway this avoids storing any
- * resjunk columns that might be in the sub-select's output.)
- * Run through the combining expressions to build tlists for the
+ * resjunk columns that might be in the sub-select's output.) Run
+ * through the combining expressions to build tlists for the
* lefthand and righthand sides. We need both the ExprState list
* (for ExecProject) and the underlying parse Exprs (for
* ExecTypeFromTL).
@@ -791,7 +796,7 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
i = 1;
foreach(lexpr, node->exprs)
{
- FuncExprState *fstate = (FuncExprState *) lfirst(lexpr);
+ FuncExprState *fstate = (FuncExprState *) lfirst(lexpr);
OpExpr *opexpr = (OpExpr *) fstate->xprstate.expr;
ExprState *exstate;
Expr *expr;
@@ -834,34 +839,34 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
rightptlist = lappend(rightptlist, tle);
/* Lookup the combining function */
- fmgr_info(opexpr->opfuncid, &node->eqfunctions[i-1]);
- node->eqfunctions[i-1].fn_expr = (Node *) opexpr;
+ fmgr_info(opexpr->opfuncid, &node->eqfunctions[i - 1]);
+ node->eqfunctions[i - 1].fn_expr = (Node *) opexpr;
/* Lookup the associated hash function */
hashfn = get_op_hash_function(opexpr->opno);
if (!OidIsValid(hashfn))
elog(ERROR, "could not find hash function for hash operator %u",
opexpr->opno);
- fmgr_info(hashfn, &node->hashfunctions[i-1]);
+ fmgr_info(hashfn, &node->hashfunctions[i - 1]);
i++;
}
/*
- * Create a tupletable to hold these tuples. (Note: we never bother
- * to free the tupletable explicitly; that's okay because it will
- * never store raw disk tuples that might have associated buffer
- * pins. The only resource involved is memory, which will be
- * cleaned up by freeing the query context.)
+ * Create a tupletable to hold these tuples. (Note: we never
+ * bother to free the tupletable explicitly; that's okay because
+ * it will never store raw disk tuples that might have associated
+ * buffer pins. The only resource involved is memory, which will
+ * be cleaned up by freeing the query context.)
*/
tupTable = ExecCreateTupleTable(2);
/*
* Construct tupdescs, slots and projection nodes for left and
- * right sides. The lefthand expressions will be evaluated in
- * the parent plan node's exprcontext, which we don't have access
- * to here. Fortunately we can just pass NULL for now and fill it
- * in later (hack alert!). The righthand expressions will be
+ * right sides. The lefthand expressions will be evaluated in the
+ * parent plan node's exprcontext, which we don't have access to
+ * here. Fortunately we can just pass NULL for now and fill it in
+ * later (hack alert!). The righthand expressions will be
* evaluated in our own innerecontext.
*/
tupDesc = ExecTypeFromTL(leftptlist, false);
@@ -894,11 +899,11 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* ----------------------------------------------------------------
*/
void
-ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
+ExecSetParamPlan(SubPlanState * node, ExprContext *econtext)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
- SubLinkType subLinkType = subplan->subLinkType;
+ SubLinkType subLinkType = subplan->subLinkType;
MemoryContext oldcontext;
TupleTableSlot *slot;
List *lst;
@@ -928,7 +933,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
if (subLinkType == EXISTS_SUBLINK)
{
/* There can be only one param... */
- int paramid = lfirsti(subplan->setParam);
+ int paramid = lfirsti(subplan->setParam);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -940,8 +945,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
if (subLinkType == ARRAY_SUBLINK)
{
- Datum dvalue;
- bool disnull;
+ Datum dvalue;
+ bool disnull;
found = true;
/* stash away current value */
@@ -963,8 +968,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
found = true;
/*
- * We need to copy the subplan's tuple into our own context,
- * in case any of the params are pass-by-ref type --- the pointers
+ * We need to copy the subplan's tuple into our own context, in
+ * case any of the params are pass-by-ref type --- the pointers
* stored in the param structs will point at this copied tuple!
* node->curTuple keeps track of the copied tuple for eventual
* freeing.
@@ -981,7 +986,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
*/
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -995,7 +1000,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
if (subLinkType == EXISTS_SUBLINK)
{
/* There can be only one param... */
- int paramid = lfirsti(subplan->setParam);
+ int paramid = lfirsti(subplan->setParam);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -1006,7 +1011,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
{
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -1018,7 +1023,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
else if (subLinkType == ARRAY_SUBLINK)
{
/* There can be only one param... */
- int paramid = lfirsti(subplan->setParam);
+ int paramid = lfirsti(subplan->setParam);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(astate != NULL);
@@ -1036,7 +1041,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
* ----------------------------------------------------------------
*/
void
-ExecEndSubPlan(SubPlanState *node)
+ExecEndSubPlan(SubPlanState * node)
{
if (node->needShutdown)
{
@@ -1056,10 +1061,10 @@ ExecEndSubPlan(SubPlanState *node)
* Mark an initplan as needing recalculation
*/
void
-ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent)
+ExecReScanSetParamPlan(SubPlanState * node, PlanState * parent)
{
PlanState *planstate = node->planstate;
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
EState *estate = parent->state;
List *lst;
@@ -1080,7 +1085,7 @@ ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent)
*/
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(estate->es_param_exec_vals[paramid]);
prm->execPlan = node;
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index ba4804fcebb..deec07ae6bb 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.18 2003/02/09 00:30:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.19 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -160,10 +160,11 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate)
Assert(rte->rtekind == RTE_SUBQUERY);
/*
- * The subquery needs its own EState because it has its own rangetable.
- * It shares our Param ID space, however. XXX if rangetable access were
- * done differently, the subquery could share our EState, which would
- * eliminate some thrashing about in this module...
+ * The subquery needs its own EState because it has its own
+ * rangetable. It shares our Param ID space, however. XXX if
+ * rangetable access were done differently, the subquery could share
+ * our EState, which would eliminate some thrashing about in this
+ * module...
*/
sp_estate = CreateExecutorState();
subquerystate->sss_SubEState = sp_estate;
@@ -259,9 +260,9 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well,
- * because the subplan has its own memory context in which its
- * chgParam state lives.
+ * changed-parameter signaling myself. This is just as well, because
+ * the subplan has its own memory context in which its chgParam state
+ * lives.
*/
if (node->ss.ps.chgParam != NULL)
UpdateChangedParamSet(node->subplan, node->ss.ps.chgParam);
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index fb012a5cc40..18e172209d1 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.38 2003/02/02 19:08:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.39 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,9 +57,9 @@ ExecUnique(UniqueState *node)
* now loop, returning only non-duplicate tuples. We assume that the
* tuples arrive in sorted order so we can detect duplicates easily.
*
- * We return the first tuple from each group of duplicates (or the
- * last tuple of each group, when moving backwards). At either end
- * of the subplan, clear priorTuple so that we correctly return the
+ * We return the first tuple from each group of duplicates (or the last
+ * tuple of each group, when moving backwards). At either end of the
+ * subplan, clear priorTuple so that we correctly return the
* first/last tuple when reversing direction.
*/
for (;;)
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 4d554aef1ed..b25f0a79ffa 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.99 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.100 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,8 +95,8 @@ SPI_connect(void)
/*
* Create memory contexts for this procedure
*
- * XXX it would be better to use PortalContext as the parent context,
- * but we may not be inside a portal (consider deferred-trigger
+ * XXX it would be better to use PortalContext as the parent context, but
+ * we may not be inside a portal (consider deferred-trigger
* execution).
*/
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
@@ -799,7 +799,7 @@ SPI_cursor_open(const char *name, void *plan, Datum *Values, const char *Nulls)
*/
PortalDefineQuery(portal,
NULL, /* unfortunately don't have sourceText */
- "SELECT", /* cursor's query is always a SELECT */
+ "SELECT", /* cursor's query is always a SELECT */
makeList1(queryTree),
makeList1(planTree),
PortalGetHeapMemory(portal));
@@ -1007,9 +1007,9 @@ _SPI_execute(const char *src, int tcount, _SPI_plan *plan)
/*
* Do parse analysis and rule rewrite for each raw parsetree.
*
- * We save the querytrees from each raw parsetree as a separate
- * sublist. This allows _SPI_execute_plan() to know where the
- * boundaries between original queries fall.
+ * We save the querytrees from each raw parsetree as a separate sublist.
+ * This allows _SPI_execute_plan() to know where the boundaries
+ * between original queries fall.
*/
query_list_list = NIL;
plan_list = NIL;
@@ -1136,8 +1136,8 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
foreach(query_list_list_item, query_list_list)
{
- List *query_list = lfirst(query_list_list_item);
- List *query_list_item;
+ List *query_list = lfirst(query_list_list_item);
+ List *query_list_item;
/* Reset state for each original parsetree */
/* (at most one of its querytrees will be marked canSetTag) */
@@ -1148,7 +1148,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
foreach(query_list_item, query_list)
{
- Query *queryTree = (Query *) lfirst(query_list_item);
+ Query *queryTree = (Query *) lfirst(query_list_item);
Plan *planTree;
QueryDesc *qdesc;
DestReceiver *dest;
@@ -1190,10 +1190,10 @@ _SPI_pquery(QueryDesc *queryDesc, bool runit, int tcount)
{
case CMD_SELECT:
res = SPI_OK_SELECT;
- if (queryDesc->parsetree->into != NULL) /* select into table */
+ if (queryDesc->parsetree->into != NULL) /* select into table */
{
res = SPI_OK_SELINTO;
- queryDesc->dest = None_Receiver; /* don't output results */
+ queryDesc->dest = None_Receiver; /* don't output results */
}
break;
case CMD_INSERT:
@@ -1351,7 +1351,7 @@ _SPI_checktuples(void)
SPITupleTable *tuptable = _SPI_current->tuptable;
bool failed = false;
- if (tuptable == NULL) /* spi_dest_startup was not called */
+ if (tuptable == NULL) /* spi_dest_startup was not called */
failed = true;
else if (processed != (tuptable->alloced - tuptable->free))
failed = true;
@@ -1372,7 +1372,8 @@ _SPI_copy_plan(_SPI_plan *plan, int location)
parentcxt = _SPI_current->procCxt;
else if (location == _SPI_CPLAN_TOPCXT)
parentcxt = TopMemoryContext;
- else /* (this case not currently used) */
+ else
+/* (this case not currently used) */
parentcxt = CurrentMemoryContext;
/*
diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c
index 3d8479faee2..0989eb3e270 100644
--- a/src/backend/executor/tstoreReceiver.c
+++ b/src/backend/executor/tstoreReceiver.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/tstoreReceiver.c,v 1.6 2003/05/08 18:16:36 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/tstoreReceiver.c,v 1.7 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,10 +21,10 @@
typedef struct
{
- DestReceiver pub;
- Tuplestorestate *tstore;
- MemoryContext cxt;
-} TStoreState;
+ DestReceiver pub;
+ Tuplestorestate *tstore;
+ MemoryContext cxt;
+} TStoreState;
/*
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index 03251beed90..9424070e506 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: stringinfo.c,v 1.34 2003/04/24 21:16:43 tgl Exp $
+ * $Id: stringinfo.c,v 1.35 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +62,7 @@ initStringInfo(StringInfo str)
* strcat.
*/
void
-appendStringInfo(StringInfo str, const char *fmt, ...)
+appendStringInfo(StringInfo str, const char *fmt,...)
{
for (;;)
{
@@ -86,7 +86,7 @@ appendStringInfo(StringInfo str, const char *fmt, ...)
* appendStringInfoVA
*
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return true; if not (because there's not enough space), return false
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
@@ -113,9 +113,9 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
return false;
/*
- * Assert check here is to catch buggy vsnprintf that overruns
- * the specified buffer length. Solaris 7 in 64-bit mode is
- * an example of a platform with such a bug.
+ * Assert check here is to catch buggy vsnprintf that overruns the
+ * specified buffer length. Solaris 7 in 64-bit mode is an example of
+ * a platform with such a bug.
*/
#ifdef USE_ASSERT_CHECKING
str->data[str->maxlen - 1] = '\0';
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 9c80651d5fb..9b9ffecbca8 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.108 2003/07/28 06:27:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.109 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -221,8 +221,8 @@ pg_krb5_init(void)
if (retval)
{
ereport(LOG,
- (errmsg("kerberos sname_to_principal(\"%s\") returned error %d",
- PG_KRB_SRVNAM, retval)));
+ (errmsg("kerberos sname_to_principal(\"%s\") returned error %d",
+ PG_KRB_SRVNAM, retval)));
com_err("postgres", retval,
"while getting server principal for service \"%s\"",
PG_KRB_SRVNAM);
@@ -432,7 +432,7 @@ ClientAuthentication(Port *port)
* out the less clueful good guys.
*/
{
- char hostinfo[NI_MAXHOST];
+ char hostinfo[NI_MAXHOST];
getnameinfo_all(&port->raddr.addr, port->raddr.salen,
hostinfo, sizeof(hostinfo),
@@ -441,15 +441,15 @@ ClientAuthentication(Port *port)
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
- hostinfo, port->user_name, port->database_name,
- port->ssl ? gettext("SSL on") : gettext("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
+ hostinfo, port->user_name, port->database_name,
+ port->ssl ? gettext("SSL on") : gettext("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
- hostinfo, port->user_name, port->database_name)));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
+ hostinfo, port->user_name, port->database_name)));
#endif
break;
}
@@ -460,7 +460,7 @@ ClientAuthentication(Port *port)
|| port->laddr.addr.ss_family != AF_INET)
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("kerberos 4 only supports IPv4 connections")));
+ errmsg("kerberos 4 only supports IPv4 connections")));
sendAuthRequest(port, AUTH_REQ_KRB4);
status = pg_krb4_recvauth(port);
break;
@@ -492,7 +492,7 @@ ClientAuthentication(Port *port)
if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0)
ereport(FATAL,
(errcode_for_socket_access(),
- errmsg("failed to enable credential receipt: %m")));
+ errmsg("failed to enable credential receipt: %m")));
}
#endif
if (port->raddr.addr.ss_family == AF_UNIX)
@@ -755,22 +755,22 @@ recv_password_packet(Port *port)
if (PG_PROTOCOL_MAJOR(port->proto) >= 3)
{
/* Expect 'p' message type */
- int mtype;
+ int mtype;
mtype = pq_getbyte();
if (mtype != 'p')
{
/*
* If the client just disconnects without offering a password,
- * don't make a log entry. This is legal per protocol spec and
- * in fact commonly done by psql, so complaining just clutters
- * the log.
+ * don't make a log entry. This is legal per protocol spec
+ * and in fact commonly done by psql, so complaining just
+ * clutters the log.
*/
if (mtype != EOF)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("expected password response, got msg type %d",
- mtype)));
+ errmsg("expected password response, got msg type %d",
+ mtype)));
return NULL; /* EOF or bad message type */
}
}
@@ -782,7 +782,7 @@ recv_password_packet(Port *port)
}
initStringInfo(&buf);
- if (pq_getmessage(&buf, 1000)) /* receive password */
+ if (pq_getmessage(&buf, 1000)) /* receive password */
{
/* EOF - pq_getmessage already logged a suitable message */
pfree(buf.data);
@@ -804,7 +804,7 @@ recv_password_packet(Port *port)
(errmsg("received password packet")));
/*
- * Return the received string. Note we do not attempt to do any
+ * Return the received string. Note we do not attempt to do any
* character-set conversion on it; since we don't yet know the
* client's encoding, there wouldn't be much point.
*/
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 00bd01b6aef..5a33712243c 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.66 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.67 2003/08/04 00:43:18 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@@ -372,7 +372,7 @@ lo_import(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_import()"),
+ errmsg("must be superuser to use server-side lo_import()"),
errhint("Anyone can use the client-side lo_import() provided by libpq.")));
#endif
@@ -439,7 +439,7 @@ lo_export(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_export()"),
+ errmsg("must be superuser to use server-side lo_export()"),
errhint("Anyone can use the client-side lo_export() provided by libpq.")));
#endif
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 0bd4f787414..7089b5077a9 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.37 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.38 2003/08/04 00:43:18 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@@ -187,7 +187,6 @@ OvOzKGtwcTqO/1wV5gKkzu1ZVswVUQd5Gg8lJicwqRWyyNRczDDoG9jVDxmogKTH\n\
AaqLulO7R8Ifa1SwF2DteSGVtgWEN8gDpN3RBmmPTDngyF2DHb5qmpnznwtFKdTL\n\
KWbuHn491xNO25CQWMtem80uKw+pTnisBRF/454n1Jnhub144YRBoN8CAQI=\n\
-----END DH PARAMETERS-----\n";
-
#endif
/* ------------------------------------------------------------ */
@@ -258,7 +257,7 @@ secure_read(Port *port, void *ptr, size_t len)
#ifdef USE_SSL
if (port->ssl)
{
- rloop:
+rloop:
n = SSL_read(port->ssl, ptr, len);
switch (SSL_get_error(port->ssl, n))
{
@@ -328,7 +327,7 @@ secure_write(Port *port, void *ptr, size_t len)
if (port->ssl->state != SSL_ST_OK)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("SSL failed to send renegotiation request")));
+ errmsg("SSL failed to send renegotiation request")));
port->ssl->state |= SSL_ST_ACCEPT;
SSL_do_handshake(port->ssl);
if (port->ssl->state != SSL_ST_OK)
@@ -338,7 +337,7 @@ secure_write(Port *port, void *ptr, size_t len)
port->count = 0;
}
- wloop:
+wloop:
n = SSL_write(port->ssl, ptr, len);
switch (SSL_get_error(port->ssl, n))
{
@@ -436,7 +435,7 @@ load_dh_file(int keylength)
(codes & DH_CHECK_P_NOT_SAFE_PRIME))
{
elog(LOG,
- "DH error (%s): neither suitable generator or safe prime",
+ "DH error (%s): neither suitable generator or safe prime",
fnbuf);
return NULL;
}
@@ -620,21 +619,21 @@ initialize_SSL(void)
if (!SSL_CTX_use_certificate_file(SSL_context, fnbuf, SSL_FILETYPE_PEM))
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("could not load server certificate file \"%s\": %s",
- fnbuf, SSLerrmessage())));
+ errmsg("could not load server certificate file \"%s\": %s",
+ fnbuf, SSLerrmessage())));
snprintf(fnbuf, sizeof(fnbuf), "%s/server.key", DataDir);
if (stat(fnbuf, &buf) == -1)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not access private key file \"%s\": %m",
- fnbuf)));
+ errmsg("could not access private key file \"%s\": %m",
+ fnbuf)));
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
buf.st_uid != getuid())
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unsafe permissions on private key file \"%s\"",
- fnbuf),
+ errmsg("unsafe permissions on private key file \"%s\"",
+ fnbuf),
errdetail("File must be owned by the database user and must have no permissions for \"group\" or \"other\".")));
if (!SSL_CTX_use_PrivateKey_file(SSL_context, fnbuf, SSL_FILETYPE_PEM))
diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c
index c1443e56744..9629a47aa9e 100644
--- a/src/backend/libpq/crypt.c
+++ b/src/backend/libpq/crypt.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.54 2003/07/22 19:00:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.55 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,7 +119,10 @@ md5_crypt_verify(const Port *port, const char *user, char *client_pass)
default:
if (isMD5(shadow_pass))
{
- /* Encrypt user-supplied password to match MD5 in pg_shadow */
+ /*
+ * Encrypt user-supplied password to match MD5 in
+ * pg_shadow
+ */
crypt_client_pass = palloc(MD5_PASSWD_LEN + 1);
if (!EncryptMD5(client_pass,
port->user_name,
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index f9f77cda574..1c42d1a7d43 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.109 2003/08/01 23:24:28 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.110 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -392,7 +392,7 @@ get_group_line(const char *group)
/*
* Lookup a user name in the pg_shadow file
*/
-List **
+List **
get_user_line(const char *user)
{
return (List **) bsearch((void *) user,
@@ -416,7 +416,7 @@ check_group(char *group, char *user)
{
foreach(l, lnext(lnext(*line)))
if (strcmp(lfirst(l), user) == 0)
- return true;
+ return true;
}
return false;
@@ -547,13 +547,14 @@ static void
parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
{
int line_number;
- char *token;
- char *db;
- char *user;
- struct addrinfo *file_ip_addr = NULL, *file_ip_mask = NULL;
- struct addrinfo hints;
- struct sockaddr_storage *mask;
- char *cidr_slash;
+ char *token;
+ char *db;
+ char *user;
+ struct addrinfo *file_ip_addr = NULL,
+ *file_ip_mask = NULL;
+ struct addrinfo hints;
+ struct sockaddr_storage *mask;
+ char *cidr_slash;
int ret;
Assert(line != NIL);
@@ -595,11 +596,11 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
return;
}
else if (strcmp(token, "host") == 0
- || strcmp(token, "hostssl") == 0
- || strcmp(token, "hostnossl") == 0)
+ || strcmp(token, "hostssl") == 0
+ || strcmp(token, "hostnossl") == 0)
{
- if (token[4] == 's') /* "hostssl" */
+ if (token[4] == 's') /* "hostssl" */
{
#ifdef USE_SSL
/* Record does not match if we are not on an SSL connection */
@@ -616,7 +617,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
#endif
}
#ifdef USE_SSL
- else if (token[4] == 'n') /* "hostnossl" */
+ else if (token[4] == 'n') /* "hostnossl" */
{
/* Record does not match if we are on an SSL connection */
if (port->ssl)
@@ -643,7 +644,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
token = lfirst(line);
/* Check if it has a CIDR suffix and if so isolate it */
- cidr_slash = strchr(token,'/');
+ cidr_slash = strchr(token, '/');
if (cidr_slash)
*cidr_slash = '\0';
@@ -698,7 +699,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
if (ret || !file_ip_mask)
goto hba_syntax;
- mask = (struct sockaddr_storage *)file_ip_mask->ai_addr;
+ mask = (struct sockaddr_storage *) file_ip_mask->ai_addr;
if (file_ip_addr->ai_family != mask->ss_family)
goto hba_syntax;
@@ -714,7 +715,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
/* Must meet network restrictions */
if (!rangeSockAddr(&port->raddr.addr,
- (struct sockaddr_storage *)file_ip_addr->ai_addr,
+ (struct sockaddr_storage *) file_ip_addr->ai_addr,
mask))
goto hba_freeaddr;
@@ -743,8 +744,8 @@ hba_syntax:
else
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("missing field in pg_hba.conf file at end of line %d",
- line_number)));
+ errmsg("missing field in pg_hba.conf file at end of line %d",
+ line_number)));
*error_p = true;
@@ -1012,8 +1013,8 @@ ident_syntax:
else
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("missing entry in pg_ident.conf file at end of line %d",
- line_number)));
+ errmsg("missing entry in pg_ident.conf file at end of line %d",
+ line_number)));
*error_p = true;
}
@@ -1044,7 +1045,7 @@ check_ident_usermap(const char *usermap_name,
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("cannot use IDENT authentication without usermap field")));
+ errmsg("cannot use IDENT authentication without usermap field")));
found_entry = false;
}
else if (strcmp(usermap_name, "sameuser") == 0)
@@ -1215,11 +1216,13 @@ ident_inet(const SockAddr remote_addr,
char ident_port[NI_MAXSERV];
char ident_query[80];
char ident_response[80 + IDENT_USERNAME_MAX];
- struct addrinfo *ident_serv = NULL, *la = NULL, hints;
+ struct addrinfo *ident_serv = NULL,
+ *la = NULL,
+ hints;
/*
- * Might look a little weird to first convert it to text and
- * then back to sockaddr, but it's protocol independent.
+ * Might look a little weird to first convert it to text and then back
+ * to sockaddr, but it's protocol independent.
*/
getnameinfo_all(&remote_addr.addr, remote_addr.salen,
remote_addr_s, sizeof(remote_addr_s),
@@ -1254,22 +1257,23 @@ ident_inet(const SockAddr remote_addr,
rc = getaddrinfo_all(local_addr_s, NULL, &hints, &la);
if (rc || !la)
return false; /* we don't expect this to happen */
-
+
sock_fd = socket(ident_serv->ai_family, ident_serv->ai_socktype,
ident_serv->ai_protocol);
if (sock_fd < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create socket for IDENT connection: %m")));
+ errmsg("could not create socket for IDENT connection: %m")));
ident_return = false;
goto ident_inet_done;
}
+
/*
* Bind to the address which the client originally contacted,
* otherwise the ident server won't be able to match up the right
- * connection. This is necessary if the PostgreSQL server is
- * running on an IP alias.
+ * connection. This is necessary if the PostgreSQL server is running
+ * on an IP alias.
*/
rc = bind(sock_fd, la->ai_addr, la->ai_addrlen);
if (rc != 0)
@@ -1282,7 +1286,7 @@ ident_inet(const SockAddr remote_addr,
goto ident_inet_done;
}
- rc = connect(sock_fd, ident_serv->ai_addr,
+ rc = connect(sock_fd, ident_serv->ai_addr,
ident_serv->ai_addrlen);
if (rc != 0)
{
@@ -1354,12 +1358,12 @@ ident_unix(int sock, char *ident_user)
{
#if defined(HAVE_GETPEEREID)
/* OpenBSD style: */
- uid_t uid;
- gid_t gid;
+ uid_t uid;
+ gid_t gid;
struct passwd *pass;
errno = 0;
- if (getpeereid(sock,&uid,&gid) != 0)
+ if (getpeereid(sock, &uid, &gid) != 0)
{
/* We didn't get a valid credentials struct. */
ereport(LOG,
@@ -1491,8 +1495,7 @@ ident_unix(int sock, char *ident_user)
return false;
#endif
}
-
-#endif /* HAVE_UNIX_SOCKETS */
+#endif /* HAVE_UNIX_SOCKETS */
/*
diff --git a/src/backend/libpq/ip.c b/src/backend/libpq/ip.c
index 8fd3941266c..a17c817c65c 100644
--- a/src/backend/libpq/ip.c
+++ b/src/backend/libpq/ip.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/ip.c,v 1.17 2003/08/01 17:53:41 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/ip.c,v 1.18 2003/08/04 00:43:18 momjian Exp $
*
* This file and the IPV6 implementation were initially provided by
* Nigel Kukard <nkukard@lbsd.net>, Linux Based Systems Design
@@ -34,30 +34,30 @@
#endif
#include <arpa/inet.h>
#include <sys/file.h>
-
#endif
#include "libpq/ip.h"
-static int rangeSockAddrAF_INET(const struct sockaddr_in *addr,
- const struct sockaddr_in *netaddr,
- const struct sockaddr_in *netmask);
+static int rangeSockAddrAF_INET(const struct sockaddr_in * addr,
+ const struct sockaddr_in * netaddr,
+ const struct sockaddr_in * netmask);
+
#ifdef HAVE_IPV6
-static int rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
- const struct sockaddr_in6 *netaddr,
- const struct sockaddr_in6 *netmask);
+static int rangeSockAddrAF_INET6(const struct sockaddr_in6 * addr,
+ const struct sockaddr_in6 * netaddr,
+ const struct sockaddr_in6 * netmask);
#endif
#ifdef HAVE_UNIX_SOCKETS
-static int getaddrinfo_unix(const char *path,
- const struct addrinfo *hintsp,
- struct addrinfo **result);
-
-static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
- char *node, int nodelen,
- char *service, int servicelen,
- int flags);
+static int getaddrinfo_unix(const char *path,
+ const struct addrinfo * hintsp,
+ struct addrinfo ** result);
+
+static int getnameinfo_unix(const struct sockaddr_un * sa, int salen,
+ char *node, int nodelen,
+ char *service, int servicelen,
+ int flags);
#endif
@@ -66,7 +66,7 @@ static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
*/
int
getaddrinfo_all(const char *hostname, const char *servname,
- const struct addrinfo *hintp, struct addrinfo **result)
+ const struct addrinfo * hintp, struct addrinfo ** result)
{
#ifdef HAVE_UNIX_SOCKETS
if (hintp != NULL && hintp->ai_family == AF_UNIX)
@@ -89,7 +89,7 @@ getaddrinfo_all(const char *hostname, const char *servname,
* not safe to look at ai_family in the addrinfo itself.
*/
void
-freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
+freeaddrinfo_all(int hint_ai_family, struct addrinfo * ai)
{
#ifdef HAVE_UNIX_SOCKETS
if (hint_ai_family == AF_UNIX)
@@ -123,12 +123,12 @@ freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
* guaranteed to be filled with something even on failure return.
*/
int
-getnameinfo_all(const struct sockaddr_storage *addr, int salen,
+getnameinfo_all(const struct sockaddr_storage * addr, int salen,
char *node, int nodelen,
char *service, int servicelen,
int flags)
{
- int rc;
+ int rc;
#ifdef HAVE_UNIX_SOCKETS
if (addr && addr->ss_family == AF_UNIX)
@@ -166,8 +166,8 @@ getnameinfo_all(const struct sockaddr_storage *addr, int salen,
* -------
*/
static int
-getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
- struct addrinfo **result)
+getaddrinfo_unix(const char *path, const struct addrinfo * hintsp,
+ struct addrinfo ** result)
{
struct addrinfo hints;
struct addrinfo *aip;
@@ -178,9 +178,7 @@ getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
MemSet(&hints, 0, sizeof(hints));
if (strlen(path) >= sizeof(unp->sun_path))
- {
return EAI_FAIL;
- }
if (hintsp == NULL)
{
@@ -234,139 +232,123 @@ getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
* Convert an address to a hostname.
*/
static int
-getnameinfo_unix(const struct sockaddr_un *sa, int salen,
+getnameinfo_unix(const struct sockaddr_un * sa, int salen,
char *node, int nodelen,
char *service, int servicelen,
int flags)
{
- int ret = -1;
+ int ret = -1;
/* Invalid arguments. */
if (sa == NULL || sa->sun_family != AF_UNIX ||
(node == NULL && service == NULL))
- {
return EAI_FAIL;
- }
/* We don't support those. */
if ((node && !(flags & NI_NUMERICHOST))
|| (service && !(flags & NI_NUMERICSERV)))
- {
return EAI_FAIL;
- }
if (node)
{
ret = snprintf(node, nodelen, "%s", "localhost");
if (ret == -1 || ret > nodelen)
- {
return EAI_MEMORY;
- }
}
if (service)
{
ret = snprintf(service, servicelen, "%s", sa->sun_path);
if (ret == -1 || ret > servicelen)
- {
return EAI_MEMORY;
- }
}
return 0;
}
-
#endif /* HAVE_UNIX_SOCKETS */
int
-rangeSockAddr(const struct sockaddr_storage *addr,
- const struct sockaddr_storage *netaddr,
- const struct sockaddr_storage *netmask)
+rangeSockAddr(const struct sockaddr_storage * addr,
+ const struct sockaddr_storage * netaddr,
+ const struct sockaddr_storage * netmask)
{
if (addr->ss_family == AF_INET)
- return rangeSockAddrAF_INET((struct sockaddr_in *)addr,
- (struct sockaddr_in *)netaddr,
- (struct sockaddr_in *)netmask);
+ return rangeSockAddrAF_INET((struct sockaddr_in *) addr,
+ (struct sockaddr_in *) netaddr,
+ (struct sockaddr_in *) netmask);
#ifdef HAVE_IPV6
else if (addr->ss_family == AF_INET6)
- return rangeSockAddrAF_INET6((struct sockaddr_in6 *)addr,
- (struct sockaddr_in6 *)netaddr,
- (struct sockaddr_in6 *)netmask);
+ return rangeSockAddrAF_INET6((struct sockaddr_in6 *) addr,
+ (struct sockaddr_in6 *) netaddr,
+ (struct sockaddr_in6 *) netmask);
#endif
else
return 0;
}
/*
- * SockAddr_cidr_mask - make a network mask of the appropriate family
- * and required number of significant bits
+ * SockAddr_cidr_mask - make a network mask of the appropriate family
+ * and required number of significant bits
*
* Note: Returns a static pointer for the mask, so it's not thread safe,
- * and a second call will overwrite the data.
+ * and a second call will overwrite the data.
*/
int
-SockAddr_cidr_mask(struct sockaddr_storage **mask, char *numbits, int family)
+SockAddr_cidr_mask(struct sockaddr_storage ** mask, char *numbits, int family)
{
- long bits;
- char *endptr;
-static struct sockaddr_storage sock;
- struct sockaddr_in mask4;
+ long bits;
+ char *endptr;
+ static struct sockaddr_storage sock;
+ struct sockaddr_in mask4;
+
#ifdef HAVE_IPV6
- struct sockaddr_in6 mask6;
+ struct sockaddr_in6 mask6;
#endif
bits = strtol(numbits, &endptr, 10);
if (*numbits == '\0' || *endptr != '\0')
- {
return -1;
- }
if ((bits < 0) || (family == AF_INET && bits > 32)
#ifdef HAVE_IPV6
|| (family == AF_INET6 && bits > 128)
#endif
)
- {
return -1;
- }
*mask = &sock;
switch (family)
{
case AF_INET:
- mask4.sin_addr.s_addr =
+ mask4.sin_addr.s_addr =
htonl((0xffffffffUL << (32 - bits))
- & 0xffffffffUL);
- memcpy(&sock, &mask4, sizeof(mask4));
+ & 0xffffffffUL);
+ memcpy(&sock, &mask4, sizeof(mask4));
break;
#ifdef HAVE_IPV6
case AF_INET6:
- {
- int i;
-
- for (i = 0; i < 16; i++)
{
- if (bits <= 0)
- {
- mask6.sin6_addr.s6_addr[i] = 0;
- }
- else if (bits >= 8)
- {
- mask6.sin6_addr.s6_addr[i] = 0xff;
- }
- else
+ int i;
+
+ for (i = 0; i < 16; i++)
{
- mask6.sin6_addr.s6_addr[i] =
- (0xff << (8 - bits)) & 0xff;
+ if (bits <= 0)
+ mask6.sin6_addr.s6_addr[i] = 0;
+ else if (bits >= 8)
+ mask6.sin6_addr.s6_addr[i] = 0xff;
+ else
+ {
+ mask6.sin6_addr.s6_addr[i] =
+ (0xff << (8 - bits)) & 0xff;
+ }
+ bits -= 8;
}
- bits -= 8;
+ memcpy(&sock, &mask6, sizeof(mask6));
+ break;
}
- memcpy(&sock, &mask6, sizeof(mask6));
- break;
- }
#endif
default:
return -1;
@@ -377,8 +359,8 @@ static struct sockaddr_storage sock;
}
static int
-rangeSockAddrAF_INET(const struct sockaddr_in *addr, const struct sockaddr_in *netaddr,
- const struct sockaddr_in *netmask)
+rangeSockAddrAF_INET(const struct sockaddr_in * addr, const struct sockaddr_in * netaddr,
+ const struct sockaddr_in * netmask)
{
if (((addr->sin_addr.s_addr ^ netaddr->sin_addr.s_addr) &
netmask->sin_addr.s_addr) == 0)
@@ -390,9 +372,9 @@ rangeSockAddrAF_INET(const struct sockaddr_in *addr, const struct sockaddr_in *n
#ifdef HAVE_IPV6
static int
-rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
- const struct sockaddr_in6 *netaddr,
- const struct sockaddr_in6 *netmask)
+rangeSockAddrAF_INET6(const struct sockaddr_in6 * addr,
+ const struct sockaddr_in6 * netaddr,
+ const struct sockaddr_in6 * netmask)
{
int i;
@@ -405,6 +387,5 @@ rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
return 1;
}
-#endif
-
+#endif
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index dbf639fc74f..05adff56a60 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.19 2002/10/03 17:09:41 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.20 2003/08/04 00:43:18 momjian Exp $
*/
@@ -35,8 +35,8 @@
#include "postgres_fe.h"
#ifndef WIN32
#include "libpq/crypt.h"
-#endif /* WIN32 */
-#endif /* FRONTEND */
+#endif /* WIN32 */
+#endif /* FRONTEND */
#ifdef MD5_ODBC
#include "md5.h"
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 2f6d0245bf5..5ae7e1ae2c9 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -30,7 +30,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/libpq/pqcomm.c,v 1.161 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/pqcomm.c,v 1.162 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -151,6 +151,7 @@ pq_close(void)
{
/* Cleanly shut down SSL layer */
secure_close(MyProcPort);
+
/*
* Formerly we did an explicit close() here, but it seems better
* to leave the socket open until the process dies. This allows
@@ -208,10 +209,11 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
int maxconn;
int one = 1;
int ret;
- char portNumberStr[64];
- char *service;
- struct addrinfo *addrs = NULL, *addr;
- struct addrinfo hint;
+ char portNumberStr[64];
+ char *service;
+ struct addrinfo *addrs = NULL,
+ *addr;
+ struct addrinfo hint;
int listen_index = 0;
int added = 0;
@@ -245,8 +247,8 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
hostName, service, gai_strerror(ret))));
else
ereport(LOG,
- (errmsg("could not translate service \"%s\" to address: %s",
- service, gai_strerror(ret))));
+ (errmsg("could not translate service \"%s\" to address: %s",
+ service, gai_strerror(ret))));
freeaddrinfo_all(hint.ai_family, addrs);
return STATUS_ERROR;
}
@@ -255,9 +257,9 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
{
if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family))
{
- /* Only set up a unix domain socket when
- * they really asked for it. The service/port
- * is different in that case.
+ /*
+ * Only set up a unix domain socket when they really asked for
+ * it. The service/port is different in that case.
*/
continue;
}
@@ -285,7 +287,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
if (!IS_AF_UNIX(addr->ai_family))
{
if ((setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
- (char *) &one, sizeof(one))) == -1)
+ (char *) &one, sizeof(one))) == -1)
{
ereport(LOG,
(errcode_for_socket_access(),
@@ -299,7 +301,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
if (addr->ai_family == AF_INET6)
{
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY,
- (char *)&one, sizeof(one)) == -1)
+ (char *) &one, sizeof(one)) == -1)
{
ereport(LOG,
(errcode_for_socket_access(),
@@ -311,10 +313,10 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
#endif
/*
- * Note: This might fail on some OS's, like Linux
- * older than 2.4.21-pre3, that don't have the IPV6_V6ONLY
- * socket option, and map ipv4 addresses to ipv6. It will
- * show ::ffff:ipv4 for all ipv4 connections.
+ * Note: This might fail on some OS's, like Linux older than
+ * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and
+ * map ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all
+ * ipv4 connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
if (err < 0)
@@ -323,12 +325,12 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
(errcode_for_socket_access(),
errmsg("failed to bind server socket: %m"),
(IS_AF_UNIX(addr->ai_family)) ?
- errhint("Is another postmaster already running on port %d?"
- " If not, remove socket node \"%s\" and retry.",
- (int) portNumber, sock_path) :
- errhint("Is another postmaster already running on port %d?"
- " If not, wait a few seconds and retry.",
- (int) portNumber)));
+ errhint("Is another postmaster already running on port %d?"
+ " If not, remove socket node \"%s\" and retry.",
+ (int) portNumber, sock_path) :
+ errhint("Is another postmaster already running on port %d?"
+ " If not, wait a few seconds and retry.",
+ (int) portNumber)));
closesocket(fd);
continue;
}
@@ -345,10 +347,10 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
#endif
/*
- * Select appropriate accept-queue length limit. PG_SOMAXCONN
- * is only intended to provide a clamp on the request on
- * platforms where an overly large request provokes a kernel
- * error (are there any?).
+ * Select appropriate accept-queue length limit. PG_SOMAXCONN is
+ * only intended to provide a clamp on the request on platforms
+ * where an overly large request provokes a kernel error (are
+ * there any?).
*/
maxconn = MaxBackends * 2;
if (maxconn > PG_SOMAXCONN)
@@ -465,7 +467,6 @@ Setup_AF_UNIX(void)
}
return STATUS_OK;
}
-
#endif /* HAVE_UNIX_SOCKETS */
@@ -485,8 +486,8 @@ StreamConnection(int server_fd, Port *port)
/* accept connection and fill in the client (remote) address */
port->raddr.salen = sizeof(port->raddr.addr);
if ((port->sock = accept(server_fd,
- (struct sockaddr *) &port->raddr.addr,
- &port->raddr.salen)) < 0)
+ (struct sockaddr *) & port->raddr.addr,
+ &port->raddr.salen)) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
@@ -495,6 +496,7 @@ StreamConnection(int server_fd, Port *port)
}
#ifdef SCO_ACCEPT_BUG
+
/*
* UnixWare 7+ and OpenServer 5.0.4 are known to have this bug, but it
* shouldn't hurt to catch it for all versions of those platforms.
@@ -571,19 +573,19 @@ TouchSocketFile(void)
if (sock_path[0] != '\0')
{
/*
- * utime() is POSIX standard, utimes() is a common alternative.
- * If we have neither, there's no way to affect the mod or access
+ * utime() is POSIX standard, utimes() is a common alternative. If
+ * we have neither, there's no way to affect the mod or access
* time of the socket :-(
*
* In either path, we ignore errors; there's no point in complaining.
*/
#ifdef HAVE_UTIME
utime(sock_path, NULL);
-#else /* !HAVE_UTIME */
+#else /* !HAVE_UTIME */
#ifdef HAVE_UTIMES
utimes(sock_path, NULL);
-#endif /* HAVE_UTIMES */
-#endif /* HAVE_UTIME */
+#endif /* HAVE_UTIMES */
+#endif /* HAVE_UTIME */
}
}
@@ -634,9 +636,10 @@ pq_recvbuf(void)
continue; /* Ok if interrupted */
/*
- * Careful: an ereport() that tries to write to the client would
- * cause recursion to here, leading to stack overflow and core
- * dump! This message must go *only* to the postmaster log.
+ * Careful: an ereport() that tries to write to the client
+ * would cause recursion to here, leading to stack overflow
+ * and core dump! This message must go *only* to the
+ * postmaster log.
*/
ereport(COMMERROR,
(errcode_for_socket_access(),
@@ -646,8 +649,8 @@ pq_recvbuf(void)
if (r == 0)
{
/*
- * EOF detected. We used to write a log message here, but it's
- * better to expect the ultimate caller to do that.
+ * EOF detected. We used to write a log message here, but
+ * it's better to expect the ultimate caller to do that.
*/
return EOF;
}
@@ -894,9 +897,10 @@ pq_flush(void)
continue; /* Ok if we were interrupted */
/*
- * Careful: an ereport() that tries to write to the client would
- * cause recursion to here, leading to stack overflow and core
- * dump! This message must go *only* to the postmaster log.
+ * Careful: an ereport() that tries to write to the client
+ * would cause recursion to here, leading to stack overflow
+ * and core dump! This message must go *only* to the
+ * postmaster log.
*
* If a client disconnects while we're in the midst of output, we
* might write quite a bit of data before we get to a safe
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 8b04dbe7055..d5b1baded29 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -24,7 +24,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/libpq/pqformat.c,v 1.32 2003/07/22 19:00:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/pqformat.c,v 1.33 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,12 +58,12 @@
* pq_getmsgbyte - get a raw byte from a message buffer
* pq_getmsgint - get a binary integer from a message buffer
* pq_getmsgint64 - get a binary 8-byte int from a message buffer
- * pq_getmsgfloat4 - get a float4 from a message buffer
- * pq_getmsgfloat8 - get a float8 from a message buffer
+ * pq_getmsgfloat4 - get a float4 from a message buffer
+ * pq_getmsgfloat8 - get a float8 from a message buffer
* pq_getmsgbytes - get raw data from a message buffer
- * pq_copymsgbytes - copy raw data from a message buffer
+ * pq_copymsgbytes - copy raw data from a message buffer
* pq_getmsgtext - get a counted text string (with conversion)
- * pq_getmsgstring - get a null-terminated text string (with conversion)
+ * pq_getmsgstring - get a null-terminated text string (with conversion)
* pq_getmsgend - verify message fully consumed
*/
@@ -90,10 +90,12 @@ void
pq_beginmessage(StringInfo buf, char msgtype)
{
initStringInfo(buf);
+
/*
* We stash the message type into the buffer's cursor field, expecting
- * that the pq_sendXXX routines won't touch it. We could alternatively
- * make it the first byte of the buffer contents, but this seems easier.
+ * that the pq_sendXXX routines won't touch it. We could
+ * alternatively make it the first byte of the buffer contents, but
+ * this seems easier.
*/
buf->cursor = msgtype;
}
@@ -122,7 +124,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen)
* pq_sendcountedtext - append a counted text string (with character set conversion)
*
* The data sent to the frontend by this routine is a 4-byte count field
- * followed by the string. The count includes itself or not, as per the
+ * followed by the string. The count includes itself or not, as per the
* countincludesself flag (pre-3.0 protocol requires it to include itself).
* The passed text string need not be null-terminated, and the data sent
* to the frontend isn't either.
@@ -173,9 +175,7 @@ pq_sendtext(StringInfo buf, const char *str, int slen)
pfree(p);
}
else
- {
appendBinaryStringInfo(buf, str, slen);
- }
}
/* --------------------------------
@@ -200,9 +200,7 @@ pq_sendstring(StringInfo buf, const char *str)
pfree(p);
}
else
- {
appendBinaryStringInfo(buf, str, slen + 1);
- }
}
/* --------------------------------
@@ -281,9 +279,9 @@ pq_sendfloat4(StringInfo buf, float4 f)
{
union
{
- float4 f;
- uint32 i;
- } swap;
+ float4 f;
+ uint32 i;
+ } swap;
swap.f = f;
swap.i = htonl(swap.i);
@@ -308,9 +306,9 @@ pq_sendfloat8(StringInfo buf, float8 f)
#ifdef INT64_IS_BUSTED
union
{
- float8 f;
- uint32 h[2];
- } swap;
+ float8 f;
+ uint32 h[2];
+ } swap;
swap.f = f;
swap.h[0] = htonl(swap.h[0]);
@@ -332,9 +330,9 @@ pq_sendfloat8(StringInfo buf, float8 f)
#else
union
{
- float8 f;
- int64 i;
- } swap;
+ float8 f;
+ int64 i;
+ } swap;
swap.f = f;
pq_sendint64(buf, swap.i);
@@ -515,7 +513,7 @@ pq_getmsgint64(StringInfo msg)
}
/* --------------------------------
- * pq_getmsgfloat4 - get a float4 from a message buffer
+ * pq_getmsgfloat4 - get a float4 from a message buffer
*
* See notes for pq_sendfloat4.
* --------------------------------
@@ -525,16 +523,16 @@ pq_getmsgfloat4(StringInfo msg)
{
union
{
- float4 f;
- uint32 i;
- } swap;
+ float4 f;
+ uint32 i;
+ } swap;
swap.i = pq_getmsgint(msg, 4);
return swap.f;
}
/* --------------------------------
- * pq_getmsgfloat8 - get a float8 from a message buffer
+ * pq_getmsgfloat8 - get a float8 from a message buffer
*
* See notes for pq_sendfloat8.
* --------------------------------
@@ -545,9 +543,9 @@ pq_getmsgfloat8(StringInfo msg)
#ifdef INT64_IS_BUSTED
union
{
- float8 f;
- uint32 h[2];
- } swap;
+ float8 f;
+ uint32 h[2];
+ } swap;
/* Have to figure out endianness by testing... */
if (((uint32) 1) == htonl((uint32) 1))
@@ -566,9 +564,9 @@ pq_getmsgfloat8(StringInfo msg)
#else
union
{
- float8 f;
- int64 i;
- } swap;
+ float8 f;
+ int64 i;
+ } swap;
swap.i = pq_getmsgint64(msg);
return swap.f;
@@ -597,7 +595,7 @@ pq_getmsgbytes(StringInfo msg, int datalen)
}
/* --------------------------------
- * pq_copymsgbytes - copy raw data from a message buffer
+ * pq_copymsgbytes - copy raw data from a message buffer
*
* Same as above, except data is copied to caller's buffer.
* --------------------------------
@@ -623,8 +621,8 @@ pq_copymsgbytes(StringInfo msg, char *buf, int datalen)
char *
pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
{
- char *str;
- char *p;
+ char *str;
+ char *p;
if (rawbytes < 0 || rawbytes > (msg->len - msg->cursor))
ereport(ERROR,
@@ -635,9 +633,7 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
p = (char *) pg_client_to_server((unsigned char *) str, rawbytes);
if (p != str) /* actual conversion has been done? */
- {
*nbytes = strlen(p);
- }
else
{
p = (char *) palloc(rawbytes + 1);
@@ -649,7 +645,7 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
}
/* --------------------------------
- * pq_getmsgstring - get a null-terminated text string (with conversion)
+ * pq_getmsgstring - get a null-terminated text string (with conversion)
*
* May return a pointer directly into the message buffer, or a pointer
* to a palloc'd conversion result.
@@ -658,14 +654,15 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
const char *
pq_getmsgstring(StringInfo msg)
{
- char *str;
- int slen;
+ char *str;
+ int slen;
str = &msg->data[msg->cursor];
+
/*
* It's safe to use strlen() here because a StringInfo is guaranteed
- * to have a trailing null byte. But check we found a null inside
- * the message.
+ * to have a trailing null byte. But check we found a null inside the
+ * message.
*/
slen = strlen(str);
if (msg->cursor + slen >= msg->len)
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 862194b17d5..fcc6ea5a7e5 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.59 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.60 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -163,6 +163,7 @@ main(int argc, char *argv[])
{
#ifndef WIN32
#ifndef __BEOS__
+
/*
* Make sure we are not running as root.
*
@@ -175,8 +176,8 @@ main(int argc, char *argv[])
gettext("\"root\" execution of the PostgreSQL server is not permitted.\n"
"The server must be started under an unprivileged user id to prevent\n"
"possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n"
- ));
+ "more information on how to properly start the server.\n"
+ ));
exit(1);
}
#endif /* !__BEOS__ */
@@ -193,16 +194,16 @@ main(int argc, char *argv[])
if (getuid() != geteuid())
{
fprintf(stderr,
- gettext("%s: real and effective user ids must match\n"),
+ gettext("%s: real and effective user ids must match\n"),
argv[0]);
exit(1);
}
-#endif /* !WIN32 */
+#endif /* !WIN32 */
}
/*
- * Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain, or
- * BootstrapMain depending on the program name (and possibly first
+ * Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain,
+ * or BootstrapMain depending on the program name (and possibly first
* argument) we were called with. The lack of consistency here is
* historical.
*/
@@ -223,8 +224,8 @@ main(int argc, char *argv[])
/*
* If the first argument is "--help-config", then invoke runtime
- * configuration option display mode.
- * We remove "--help-config" from the arguments passed on to GucInfoMain.
+ * configuration option display mode. We remove "--help-config" from
+ * the arguments passed on to GucInfoMain.
*/
if (argc > 1 && strcmp(new_argv[1], "--help-config") == 0)
exit(GucInfoMain(argc - 1, new_argv + 1));
@@ -246,7 +247,7 @@ main(int argc, char *argv[])
pw_name_persist = strdup(pw->pw_name);
#else
{
- long namesize = 256 /* UNLEN */ + 1;
+ long namesize = 256 /* UNLEN */ + 1;
pw_name_persist = malloc(namesize);
if (!GetUserName(pw_name_persist, &namesize))
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index c4576cf3b3d..e444f449e19 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -14,7 +14,7 @@
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/bitmapset.c,v 1.3 2003/07/22 23:30:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/bitmapset.c,v 1.4 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@
* where x's are unspecified bits. The two's complement negative is formed
* by inverting all the bits and adding one. Inversion gives
* yyyyyy01111
- * where each y is the inverse of the corresponding x. Incrementing gives
+ * where each y is the inverse of the corresponding x. Incrementing gives
* yyyyyy10000
* and then ANDing with the original value gives
* 00000010000
@@ -65,41 +65,41 @@
*/
static const uint8 rightmost_one_pos[256] = {
- 0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
+ 0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
static const uint8 number_of_ones[256] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
@@ -107,7 +107,7 @@ static const uint8 number_of_ones[256] = {
* bms_copy - make a palloc'd copy of a bitmapset
*/
Bitmapset *
-bms_copy(const Bitmapset *a)
+bms_copy(const Bitmapset * a)
{
Bitmapset *result;
size_t size;
@@ -127,7 +127,7 @@ bms_copy(const Bitmapset *a)
* be reported as equal to a palloc'd value containing no members.
*/
bool
-bms_equal(const Bitmapset *a, const Bitmapset *b)
+bms_equal(const Bitmapset * a, const Bitmapset * b)
{
const Bitmapset *shorter;
const Bitmapset *longer;
@@ -143,9 +143,7 @@ bms_equal(const Bitmapset *a, const Bitmapset *b)
return bms_is_empty(b);
}
else if (b == NULL)
- {
return bms_is_empty(a);
- }
/* Identify shorter and longer input */
if (a->nwords <= b->nwords)
{
@@ -199,7 +197,7 @@ bms_make_singleton(int x)
* Same as pfree except for allowing NULL input
*/
void
-bms_free(Bitmapset *a)
+bms_free(Bitmapset * a)
{
if (a)
pfree(a);
@@ -216,7 +214,7 @@ bms_free(Bitmapset *a)
* bms_union - set union
*/
Bitmapset *
-bms_union(const Bitmapset *a, const Bitmapset *b)
+bms_union(const Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
const Bitmapset *other;
@@ -242,9 +240,7 @@ bms_union(const Bitmapset *a, const Bitmapset *b)
/* And union the shorter input into the result */
otherlen = other->nwords;
for (i = 0; i < otherlen; i++)
- {
result->words[i] |= other->words[i];
- }
return result;
}
@@ -252,7 +248,7 @@ bms_union(const Bitmapset *a, const Bitmapset *b)
* bms_intersect - set intersection
*/
Bitmapset *
-bms_intersect(const Bitmapset *a, const Bitmapset *b)
+bms_intersect(const Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
const Bitmapset *other;
@@ -276,9 +272,7 @@ bms_intersect(const Bitmapset *a, const Bitmapset *b)
/* And intersect the longer input with the result */
resultlen = result->nwords;
for (i = 0; i < resultlen; i++)
- {
result->words[i] &= other->words[i];
- }
return result;
}
@@ -286,7 +280,7 @@ bms_intersect(const Bitmapset *a, const Bitmapset *b)
* bms_difference - set difference (ie, A without members of B)
*/
Bitmapset *
-bms_difference(const Bitmapset *a, const Bitmapset *b)
+bms_difference(const Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
int shortlen;
@@ -302,9 +296,7 @@ bms_difference(const Bitmapset *a, const Bitmapset *b)
/* And remove b's bits from result */
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
- {
- result->words[i] &= ~ b->words[i];
- }
+ result->words[i] &= ~b->words[i];
return result;
}
@@ -312,7 +304,7 @@ bms_difference(const Bitmapset *a, const Bitmapset *b)
* bms_is_subset - is A a subset of B?
*/
bool
-bms_is_subset(const Bitmapset *a, const Bitmapset *b)
+bms_is_subset(const Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int longlen;
@@ -327,7 +319,7 @@ bms_is_subset(const Bitmapset *a, const Bitmapset *b)
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
{
- if ((a->words[i] & ~ b->words[i]) != 0)
+ if ((a->words[i] & ~b->words[i]) != 0)
return false;
}
/* Check extra words */
@@ -347,7 +339,7 @@ bms_is_subset(const Bitmapset *a, const Bitmapset *b)
* bms_is_member - is X a member of A?
*/
bool
-bms_is_member(int x, const Bitmapset *a)
+bms_is_member(int x, const Bitmapset * a)
{
int wordnum,
bitnum;
@@ -370,7 +362,7 @@ bms_is_member(int x, const Bitmapset *a)
* bms_overlap - do sets overlap (ie, have a nonempty intersection)?
*/
bool
-bms_overlap(const Bitmapset *a, const Bitmapset *b)
+bms_overlap(const Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -392,7 +384,7 @@ bms_overlap(const Bitmapset *a, const Bitmapset *b)
* bms_nonempty_difference - do sets have a nonempty difference?
*/
bool
-bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
+bms_nonempty_difference(const Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -406,7 +398,7 @@ bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
{
- if ((a->words[i] & ~ b->words[i]) != 0)
+ if ((a->words[i] & ~b->words[i]) != 0)
return true;
}
/* Check extra words in a */
@@ -424,11 +416,11 @@ bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
* Raises error if |a| is not 1.
*/
int
-bms_singleton_member(const Bitmapset *a)
+bms_singleton_member(const Bitmapset * a)
{
- int result = -1;
- int nwords;
- int wordnum;
+ int result = -1;
+ int nwords;
+ int wordnum;
if (a == NULL)
elog(ERROR, "bitmapset is empty");
@@ -459,11 +451,11 @@ bms_singleton_member(const Bitmapset *a)
* bms_num_members - count members of set
*/
int
-bms_num_members(const Bitmapset *a)
+bms_num_members(const Bitmapset * a)
{
- int result = 0;
- int nwords;
- int wordnum;
+ int result = 0;
+ int nwords;
+ int wordnum;
if (a == NULL)
return 0;
@@ -488,11 +480,11 @@ bms_num_members(const Bitmapset *a)
* This is faster than making an exact count with bms_num_members().
*/
BMS_Membership
-bms_membership(const Bitmapset *a)
+bms_membership(const Bitmapset * a)
{
BMS_Membership result = BMS_EMPTY_SET;
- int nwords;
- int wordnum;
+ int nwords;
+ int wordnum;
if (a == NULL)
return BMS_EMPTY_SET;
@@ -517,10 +509,10 @@ bms_membership(const Bitmapset *a)
* This is even faster than bms_membership().
*/
bool
-bms_is_empty(const Bitmapset *a)
+bms_is_empty(const Bitmapset * a)
{
- int nwords;
- int wordnum;
+ int nwords;
+ int wordnum;
if (a == NULL)
return true;
@@ -552,7 +544,7 @@ bms_is_empty(const Bitmapset *a)
* Input set is modified or recycled!
*/
Bitmapset *
-bms_add_member(Bitmapset *a, int x)
+bms_add_member(Bitmapset * a, int x)
{
int wordnum,
bitnum;
@@ -573,9 +565,7 @@ bms_add_member(Bitmapset *a, int x)
result = bms_make_singleton(x);
nwords = a->nwords;
for (i = 0; i < nwords; i++)
- {
result->words[i] |= a->words[i];
- }
pfree(a);
return result;
}
@@ -592,7 +582,7 @@ bms_add_member(Bitmapset *a, int x)
* Input set is modified in-place!
*/
Bitmapset *
-bms_del_member(Bitmapset *a, int x)
+bms_del_member(Bitmapset * a, int x)
{
int wordnum,
bitnum;
@@ -604,9 +594,7 @@ bms_del_member(Bitmapset *a, int x)
wordnum = WORDNUM(x);
bitnum = BITNUM(x);
if (wordnum < a->nwords)
- {
- a->words[wordnum] &= ~ ((bitmapword) 1 << bitnum);
- }
+ a->words[wordnum] &= ~((bitmapword) 1 << bitnum);
return a;
}
@@ -614,7 +602,7 @@ bms_del_member(Bitmapset *a, int x)
* bms_add_members - like bms_union, but left input is recycled
*/
Bitmapset *
-bms_add_members(Bitmapset *a, const Bitmapset *b)
+bms_add_members(Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
const Bitmapset *other;
@@ -640,9 +628,7 @@ bms_add_members(Bitmapset *a, const Bitmapset *b)
/* And union the shorter input into the result */
otherlen = other->nwords;
for (i = 0; i < otherlen; i++)
- {
result->words[i] |= other->words[i];
- }
if (result != a)
pfree(a);
return result;
@@ -652,7 +638,7 @@ bms_add_members(Bitmapset *a, const Bitmapset *b)
* bms_int_members - like bms_intersect, but left input is recycled
*/
Bitmapset *
-bms_int_members(Bitmapset *a, const Bitmapset *b)
+bms_int_members(Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -668,13 +654,9 @@ bms_int_members(Bitmapset *a, const Bitmapset *b)
/* Intersect b into a; we need never copy */
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
- {
a->words[i] &= b->words[i];
- }
for (; i < a->nwords; i++)
- {
a->words[i] = 0;
- }
return a;
}
@@ -682,7 +664,7 @@ bms_int_members(Bitmapset *a, const Bitmapset *b)
* bms_del_members - like bms_difference, but left input is recycled
*/
Bitmapset *
-bms_del_members(Bitmapset *a, const Bitmapset *b)
+bms_del_members(Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -695,9 +677,7 @@ bms_del_members(Bitmapset *a, const Bitmapset *b)
/* Remove b's bits from a; we need never copy */
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
- {
- a->words[i] &= ~ b->words[i];
- }
+ a->words[i] &= ~b->words[i];
return a;
}
@@ -705,7 +685,7 @@ bms_del_members(Bitmapset *a, const Bitmapset *b)
* bms_join - like bms_union, but *both* inputs are recycled
*/
Bitmapset *
-bms_join(Bitmapset *a, Bitmapset *b)
+bms_join(Bitmapset * a, Bitmapset * b)
{
Bitmapset *result;
Bitmapset *other;
@@ -731,9 +711,7 @@ bms_join(Bitmapset *a, Bitmapset *b)
/* And union the shorter input into the result */
otherlen = other->nwords;
for (i = 0; i < otherlen; i++)
- {
result->words[i] |= other->words[i];
- }
if (other != result) /* pure paranoia */
pfree(other);
return result;
@@ -742,24 +720,22 @@ bms_join(Bitmapset *a, Bitmapset *b)
/*----------
* bms_first_member - find and remove first member of a set
*
- * Returns -1 if set is empty. NB: set is destructively modified!
+ * Returns -1 if set is empty. NB: set is destructively modified!
*
* This is intended as support for iterating through the members of a set.
* The typical pattern is
*
* tmpset = bms_copy(inputset);
* while ((x = bms_first_member(tmpset)) >= 0)
- * {
* process member x;
- * }
* bms_free(tmpset);
*----------
*/
int
-bms_first_member(Bitmapset *a)
+bms_first_member(Bitmapset * a)
{
- int nwords;
- int wordnum;
+ int nwords;
+ int wordnum;
if (a == NULL)
return -1;
@@ -770,10 +746,10 @@ bms_first_member(Bitmapset *a)
if (w != 0)
{
- int result;
+ int result;
w = RIGHTMOST_ONE(w);
- a->words[wordnum] &= ~ w;
+ a->words[wordnum] &= ~w;
result = wordnum * BITS_PER_BITMAPWORD;
while ((w & 255) == 0)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 6945e98d5d7..03349efdc74 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -4,7 +4,7 @@
* Copy functions for Postgres tree nodes.
*
* NOTE: we currently support copying all node types found in parse and
- * plan trees. We do not support copying executor state trees; there
+ * plan trees. We do not support copying executor state trees; there
* is no need for that, and no point in maintaining all the code that
* would be needed. We also do not support copying Path trees, mainly
* because the circular linkages between RelOptInfo and Path nodes can't
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.260 2003/07/22 23:30:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.261 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,7 @@
/*
* Macros to simplify copying of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in a Copy routine are
* named 'newnode' and 'from'.
*/
@@ -639,7 +639,7 @@ _copyRangeVar(RangeVar *from)
/*
* We don't need a _copyExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out copying the common fields...
*/
@@ -755,9 +755,9 @@ _copyArrayRef(ArrayRef *from)
* _copyFuncExpr
*/
static FuncExpr *
-_copyFuncExpr(FuncExpr *from)
+_copyFuncExpr(FuncExpr * from)
{
- FuncExpr *newnode = makeNode(FuncExpr);
+ FuncExpr *newnode = makeNode(FuncExpr);
COPY_SCALAR_FIELD(funcid);
COPY_SCALAR_FIELD(funcresulttype);
@@ -772,7 +772,7 @@ _copyFuncExpr(FuncExpr *from)
* _copyOpExpr
*/
static OpExpr *
-_copyOpExpr(OpExpr *from)
+_copyOpExpr(OpExpr * from)
{
OpExpr *newnode = makeNode(OpExpr);
@@ -789,9 +789,9 @@ _copyOpExpr(OpExpr *from)
* _copyDistinctExpr (same as OpExpr)
*/
static DistinctExpr *
-_copyDistinctExpr(DistinctExpr *from)
+_copyDistinctExpr(DistinctExpr * from)
{
- DistinctExpr *newnode = makeNode(DistinctExpr);
+ DistinctExpr *newnode = makeNode(DistinctExpr);
COPY_SCALAR_FIELD(opno);
COPY_SCALAR_FIELD(opfuncid);
@@ -806,9 +806,9 @@ _copyDistinctExpr(DistinctExpr *from)
* _copyScalarArrayOpExpr
*/
static ScalarArrayOpExpr *
-_copyScalarArrayOpExpr(ScalarArrayOpExpr *from)
+_copyScalarArrayOpExpr(ScalarArrayOpExpr * from)
{
- ScalarArrayOpExpr *newnode = makeNode(ScalarArrayOpExpr);
+ ScalarArrayOpExpr *newnode = makeNode(ScalarArrayOpExpr);
COPY_SCALAR_FIELD(opno);
COPY_SCALAR_FIELD(opfuncid);
@@ -822,9 +822,9 @@ _copyScalarArrayOpExpr(ScalarArrayOpExpr *from)
* _copyBoolExpr
*/
static BoolExpr *
-_copyBoolExpr(BoolExpr *from)
+_copyBoolExpr(BoolExpr * from)
{
- BoolExpr *newnode = makeNode(BoolExpr);
+ BoolExpr *newnode = makeNode(BoolExpr);
COPY_SCALAR_FIELD(boolop);
COPY_NODE_FIELD(args);
@@ -940,9 +940,9 @@ _copyCaseWhen(CaseWhen *from)
* _copyArrayExpr
*/
static ArrayExpr *
-_copyArrayExpr(ArrayExpr *from)
+_copyArrayExpr(ArrayExpr * from)
{
- ArrayExpr *newnode = makeNode(ArrayExpr);
+ ArrayExpr *newnode = makeNode(ArrayExpr);
COPY_SCALAR_FIELD(array_typeid);
COPY_SCALAR_FIELD(element_typeid);
@@ -956,7 +956,7 @@ _copyArrayExpr(ArrayExpr *from)
* _copyCoalesceExpr
*/
static CoalesceExpr *
-_copyCoalesceExpr(CoalesceExpr *from)
+_copyCoalesceExpr(CoalesceExpr * from)
{
CoalesceExpr *newnode = makeNode(CoalesceExpr);
@@ -970,9 +970,9 @@ _copyCoalesceExpr(CoalesceExpr *from)
* _copyNullIfExpr (same as OpExpr)
*/
static NullIfExpr *
-_copyNullIfExpr(NullIfExpr *from)
+_copyNullIfExpr(NullIfExpr * from)
{
- NullIfExpr *newnode = makeNode(NullIfExpr);
+ NullIfExpr *newnode = makeNode(NullIfExpr);
COPY_SCALAR_FIELD(opno);
COPY_SCALAR_FIELD(opfuncid);
@@ -1015,7 +1015,7 @@ _copyBooleanTest(BooleanTest *from)
* _copyCoerceToDomain
*/
static CoerceToDomain *
-_copyCoerceToDomain(CoerceToDomain *from)
+_copyCoerceToDomain(CoerceToDomain * from)
{
CoerceToDomain *newnode = makeNode(CoerceToDomain);
@@ -1031,7 +1031,7 @@ _copyCoerceToDomain(CoerceToDomain *from)
* _copyCoerceToDomainValue
*/
static CoerceToDomainValue *
-_copyCoerceToDomainValue(CoerceToDomainValue *from)
+_copyCoerceToDomainValue(CoerceToDomainValue * from)
{
CoerceToDomainValue *newnode = makeNode(CoerceToDomainValue);
@@ -1045,7 +1045,7 @@ _copyCoerceToDomainValue(CoerceToDomainValue *from)
* _copySetToDefault
*/
static SetToDefault *
-_copySetToDefault(SetToDefault *from)
+_copySetToDefault(SetToDefault * from)
{
SetToDefault *newnode = makeNode(SetToDefault);
@@ -1148,7 +1148,7 @@ _copyRestrictInfo(RestrictInfo *from)
COPY_NODE_FIELD(clause);
COPY_SCALAR_FIELD(ispusheddown);
- COPY_NODE_FIELD(subclauseindices); /* XXX probably bad */
+ COPY_NODE_FIELD(subclauseindices); /* XXX probably bad */
COPY_SCALAR_FIELD(eval_cost);
COPY_SCALAR_FIELD(this_selec);
COPY_BITMAPSET_FIELD(left_relids);
@@ -1191,7 +1191,7 @@ _copyJoinInfo(JoinInfo *from)
* _copyInClauseInfo
*/
static InClauseInfo *
-_copyInClauseInfo(InClauseInfo *from)
+_copyInClauseInfo(InClauseInfo * from)
{
InClauseInfo *newnode = makeNode(InClauseInfo);
@@ -1532,9 +1532,9 @@ _copyQuery(Query *from)
/*
* We do not copy the other planner internal fields: base_rel_list,
- * other_rel_list, join_rel_list, equi_key_list, query_pathkeys.
- * That would get us into copying RelOptInfo/Path trees, which we don't
- * want to do. It is necessary to copy in_info_list and hasJoinRTEs
+ * other_rel_list, join_rel_list, equi_key_list, query_pathkeys. That
+ * would get us into copying RelOptInfo/Path trees, which we don't
+ * want to do. It is necessary to copy in_info_list and hasJoinRTEs
* for the benefit of inheritance_planner(), which may try to copy a
* Query in which these are already set.
*/
@@ -1633,7 +1633,7 @@ _copyAlterTableStmt(AlterTableStmt *from)
}
static AlterDomainStmt *
-_copyAlterDomainStmt(AlterDomainStmt *from)
+_copyAlterDomainStmt(AlterDomainStmt * from)
{
AlterDomainStmt *newnode = makeNode(AlterDomainStmt);
@@ -1644,7 +1644,7 @@ _copyAlterDomainStmt(AlterDomainStmt *from)
COPY_SCALAR_FIELD(behavior);
return newnode;
-}
+}
static GrantStmt *
_copyGrantStmt(GrantStmt *from)
@@ -1685,7 +1685,7 @@ _copyFuncWithArgs(FuncWithArgs *from)
}
static DeclareCursorStmt *
-_copyDeclareCursorStmt(DeclareCursorStmt *from)
+_copyDeclareCursorStmt(DeclareCursorStmt * from)
{
DeclareCursorStmt *newnode = makeNode(DeclareCursorStmt);
@@ -1747,7 +1747,7 @@ _copyCreateStmt(CreateStmt *from)
}
static InhRelation *
-_copyInhRelation(InhRelation *from)
+_copyInhRelation(InhRelation * from)
{
InhRelation *newnode = makeNode(InhRelation);
@@ -2118,7 +2118,7 @@ _copyCreateSeqStmt(CreateSeqStmt *from)
}
static AlterSeqStmt *
-_copyAlterSeqStmt(AlterSeqStmt *from)
+_copyAlterSeqStmt(AlterSeqStmt * from)
{
AlterSeqStmt *newnode = makeNode(AlterSeqStmt);
@@ -2171,7 +2171,7 @@ _copyCreateTrigStmt(CreateTrigStmt *from)
COPY_NODE_FIELD(args);
COPY_SCALAR_FIELD(before);
COPY_SCALAR_FIELD(row);
- strcpy(newnode->actions, from->actions); /* in-line string field */
+ strcpy(newnode->actions, from->actions); /* in-line string field */
COPY_SCALAR_FIELD(isconstraint);
COPY_SCALAR_FIELD(deferrable);
COPY_SCALAR_FIELD(initdeferred);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 513c17b048c..924793d07ec 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -11,14 +11,14 @@
* be handled easily in a simple depth-first traversal.
*
* Currently, in fact, equal() doesn't know how to compare Plan trees
- * either. This might need to be fixed someday.
+ * either. This might need to be fixed someday.
*
*
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.204 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.205 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,8 +32,8 @@
/*
- * Macros to simplify comparison of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify comparison of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in an Equal routine are
* named 'a' and 'b'.
*/
@@ -135,7 +135,7 @@ _equalRangeVar(RangeVar *a, RangeVar *b)
/*
* We don't need an _equalExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out comparing the common fields...
*/
@@ -224,11 +224,12 @@ _equalArrayRef(ArrayRef *a, ArrayRef *b)
}
static bool
-_equalFuncExpr(FuncExpr *a, FuncExpr *b)
+_equalFuncExpr(FuncExpr * a, FuncExpr * b)
{
COMPARE_SCALAR_FIELD(funcid);
COMPARE_SCALAR_FIELD(funcresulttype);
COMPARE_SCALAR_FIELD(funcretset);
+
/*
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
* nodes that are equal() to both explicit and implicit coercions.
@@ -244,14 +245,15 @@ _equalFuncExpr(FuncExpr *a, FuncExpr *b)
}
static bool
-_equalOpExpr(OpExpr *a, OpExpr *b)
+_equalOpExpr(OpExpr * a, OpExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -266,14 +268,15 @@ _equalOpExpr(OpExpr *a, OpExpr *b)
}
static bool
-_equalDistinctExpr(DistinctExpr *a, DistinctExpr *b)
+_equalDistinctExpr(DistinctExpr * a, DistinctExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -288,14 +291,15 @@ _equalDistinctExpr(DistinctExpr *a, DistinctExpr *b)
}
static bool
-_equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b)
+_equalScalarArrayOpExpr(ScalarArrayOpExpr * a, ScalarArrayOpExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -309,7 +313,7 @@ _equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b)
}
static bool
-_equalBoolExpr(BoolExpr *a, BoolExpr *b)
+_equalBoolExpr(BoolExpr * a, BoolExpr * b)
{
COMPARE_SCALAR_FIELD(boolop);
COMPARE_NODE_FIELD(args);
@@ -366,6 +370,7 @@ _equalRelabelType(RelabelType *a, RelabelType *b)
COMPARE_NODE_FIELD(arg);
COMPARE_SCALAR_FIELD(resulttype);
COMPARE_SCALAR_FIELD(resulttypmod);
+
/*
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
* nodes that are equal() to both explicit and implicit coercions.
@@ -399,7 +404,7 @@ _equalCaseWhen(CaseWhen *a, CaseWhen *b)
}
static bool
-_equalArrayExpr(ArrayExpr *a, ArrayExpr *b)
+_equalArrayExpr(ArrayExpr * a, ArrayExpr * b)
{
COMPARE_SCALAR_FIELD(array_typeid);
COMPARE_SCALAR_FIELD(element_typeid);
@@ -410,7 +415,7 @@ _equalArrayExpr(ArrayExpr *a, ArrayExpr *b)
}
static bool
-_equalCoalesceExpr(CoalesceExpr *a, CoalesceExpr *b)
+_equalCoalesceExpr(CoalesceExpr * a, CoalesceExpr * b)
{
COMPARE_SCALAR_FIELD(coalescetype);
COMPARE_NODE_FIELD(args);
@@ -419,14 +424,15 @@ _equalCoalesceExpr(CoalesceExpr *a, CoalesceExpr *b)
}
static bool
-_equalNullIfExpr(NullIfExpr *a, NullIfExpr *b)
+_equalNullIfExpr(NullIfExpr * a, NullIfExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -459,11 +465,12 @@ _equalBooleanTest(BooleanTest *a, BooleanTest *b)
}
static bool
-_equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b)
+_equalCoerceToDomain(CoerceToDomain * a, CoerceToDomain * b)
{
COMPARE_NODE_FIELD(arg);
COMPARE_SCALAR_FIELD(resulttype);
COMPARE_SCALAR_FIELD(resulttypmod);
+
/*
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
* nodes that are equal() to both explicit and implicit coercions.
@@ -477,7 +484,7 @@ _equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b)
}
static bool
-_equalCoerceToDomainValue(CoerceToDomainValue *a, CoerceToDomainValue *b)
+_equalCoerceToDomainValue(CoerceToDomainValue * a, CoerceToDomainValue * b)
{
COMPARE_SCALAR_FIELD(typeId);
COMPARE_SCALAR_FIELD(typeMod);
@@ -486,7 +493,7 @@ _equalCoerceToDomainValue(CoerceToDomainValue *a, CoerceToDomainValue *b)
}
static bool
-_equalSetToDefault(SetToDefault *a, SetToDefault *b)
+_equalSetToDefault(SetToDefault * a, SetToDefault * b)
{
COMPARE_SCALAR_FIELD(typeId);
COMPARE_SCALAR_FIELD(typeMod);
@@ -554,11 +561,13 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
{
COMPARE_NODE_FIELD(clause);
COMPARE_SCALAR_FIELD(ispusheddown);
+
/*
- * We ignore subclauseindices, eval_cost, this_selec, left/right_relids,
- * left/right_pathkey, and left/right_bucketsize, since they may not be
- * set yet, and should be derivable from the clause anyway. Probably it's
- * not really necessary to compare any of these remaining fields ...
+ * We ignore subclauseindices, eval_cost, this_selec,
+ * left/right_relids, left/right_pathkey, and left/right_bucketsize,
+ * since they may not be set yet, and should be derivable from the
+ * clause anyway. Probably it's not really necessary to compare any
+ * of these remaining fields ...
*/
COMPARE_SCALAR_FIELD(mergejoinoperator);
COMPARE_SCALAR_FIELD(left_sortop);
@@ -578,7 +587,7 @@ _equalJoinInfo(JoinInfo *a, JoinInfo *b)
}
static bool
-_equalInClauseInfo(InClauseInfo *a, InClauseInfo *b)
+_equalInClauseInfo(InClauseInfo * a, InClauseInfo * b)
{
COMPARE_BITMAPSET_FIELD(lefthand);
COMPARE_BITMAPSET_FIELD(righthand);
@@ -620,9 +629,9 @@ _equalQuery(Query *a, Query *b)
/*
* We do not check the other planner internal fields: base_rel_list,
- * other_rel_list, join_rel_list, equi_key_list, query_pathkeys.
- * They might not be set yet, and in any case they should be derivable
- * from the other fields.
+ * other_rel_list, join_rel_list, equi_key_list, query_pathkeys. They
+ * might not be set yet, and in any case they should be derivable from
+ * the other fields.
*/
return true;
}
@@ -706,7 +715,7 @@ _equalAlterTableStmt(AlterTableStmt *a, AlterTableStmt *b)
}
static bool
-_equalAlterDomainStmt(AlterDomainStmt *a, AlterDomainStmt *b)
+_equalAlterDomainStmt(AlterDomainStmt * a, AlterDomainStmt * b)
{
COMPARE_SCALAR_FIELD(subtype);
COMPARE_NODE_FIELD(typename);
@@ -750,7 +759,7 @@ _equalFuncWithArgs(FuncWithArgs *a, FuncWithArgs *b)
}
static bool
-_equalDeclareCursorStmt(DeclareCursorStmt *a, DeclareCursorStmt *b)
+_equalDeclareCursorStmt(DeclareCursorStmt * a, DeclareCursorStmt * b)
{
COMPARE_STRING_FIELD(portalname);
COMPARE_SCALAR_FIELD(options);
@@ -802,7 +811,7 @@ _equalCreateStmt(CreateStmt *a, CreateStmt *b)
}
static bool
-_equalInhRelation(InhRelation *a, InhRelation *b)
+_equalInhRelation(InhRelation * a, InhRelation * b)
{
COMPARE_NODE_FIELD(relation);
COMPARE_SCALAR_FIELD(including_defaults);
@@ -1113,7 +1122,7 @@ _equalCreateSeqStmt(CreateSeqStmt *a, CreateSeqStmt *b)
}
static bool
-_equalAlterSeqStmt(AlterSeqStmt *a, AlterSeqStmt *b)
+_equalAlterSeqStmt(AlterSeqStmt * a, AlterSeqStmt * b)
{
COMPARE_NODE_FIELD(sequence);
COMPARE_NODE_FIELD(options);
@@ -1156,7 +1165,7 @@ _equalCreateTrigStmt(CreateTrigStmt *a, CreateTrigStmt *b)
COMPARE_NODE_FIELD(args);
COMPARE_SCALAR_FIELD(before);
COMPARE_SCALAR_FIELD(row);
- if (strcmp(a->actions, b->actions) != 0) /* in-line string field */
+ if (strcmp(a->actions, b->actions) != 0) /* in-line string field */
return false;
COMPARE_SCALAR_FIELD(isconstraint);
COMPARE_SCALAR_FIELD(deferrable);
@@ -1400,7 +1409,7 @@ _equalParamRef(ParamRef *a, ParamRef *b)
static bool
_equalAConst(A_Const *a, A_Const *b)
{
- if (!equal(&a->val, &b->val)) /* hack for in-line Value field */
+ if (!equal(&a->val, &b->val)) /* hack for in-line Value field */
return false;
COMPARE_NODE_FIELD(typename);
@@ -1649,9 +1658,9 @@ equal(void *a, void *b)
switch (nodeTag(a))
{
- /*
- * PRIMITIVE NODES
- */
+ /*
+ * PRIMITIVE NODES
+ */
case T_Resdom:
retval = _equalResdom(a, b);
break;
@@ -1841,7 +1850,7 @@ equal(void *a, void *b)
retval = _equalCreateStmt(a, b);
break;
case T_InhRelation:
- retval = _equalInhRelation(a,b);
+ retval = _equalInhRelation(a, b);
break;
case T_DefineStmt:
retval = _equalDefineStmt(a, b);
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index b0f6821b8c4..354134caeaf 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.51 2003/07/22 23:30:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.52 2003/08/04 00:43:19 momjian Exp $
*
* NOTES
* XXX a few of the following functions are duplicated to handle
@@ -202,7 +202,7 @@ nconc(List *l1, List *l2)
* since we avoid having to chase down the list again each time.
*/
void
-FastAppend(FastList *fl, void *datum)
+FastAppend(FastList * fl, void *datum)
{
List *cell = makeList1(datum);
@@ -223,7 +223,7 @@ FastAppend(FastList *fl, void *datum)
* FastAppendi - same for integers
*/
void
-FastAppendi(FastList *fl, int datum)
+FastAppendi(FastList * fl, int datum)
{
List *cell = makeListi1(datum);
@@ -244,7 +244,7 @@ FastAppendi(FastList *fl, int datum)
* FastAppendo - same for Oids
*/
void
-FastAppendo(FastList *fl, Oid datum)
+FastAppendo(FastList * fl, Oid datum)
{
List *cell = makeListo1(datum);
@@ -267,14 +267,12 @@ FastAppendo(FastList *fl, Oid datum)
* Note that the cells of the second argument are absorbed into the FastList.
*/
void
-FastConc(FastList *fl, List *cells)
+FastConc(FastList * fl, List *cells)
{
if (cells == NIL)
return; /* nothing to do */
if (fl->tail)
- {
lnext(fl->tail) = cells;
- }
else
{
/* First cell of list */
@@ -292,14 +290,12 @@ FastConc(FastList *fl, List *cells)
* Note that the cells of the second argument are absorbed into the first.
*/
void
-FastConcFast(FastList *fl, FastList *fl2)
+FastConcFast(FastList * fl, FastList * fl2)
{
if (fl2->head == NIL)
return; /* nothing to do */
if (fl->tail)
- {
lnext(fl->tail) = fl2->head;
- }
else
{
/* First cell of list */
@@ -319,9 +315,7 @@ nth(int n, List *l)
{
/* XXX assume list is long enough */
while (n-- > 0)
- {
l = lnext(l);
- }
return lfirst(l);
}
@@ -781,4 +775,5 @@ lreverse(List *l)
result = lcons(lfirst(i), result);
return result;
}
+
#endif
diff --git a/src/backend/nodes/nodes.c b/src/backend/nodes/nodes.c
index f71bd020ce9..4a4e0c98f57 100644
--- a/src/backend/nodes/nodes.c
+++ b/src/backend/nodes/nodes.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/nodes.c,v 1.19 2002/12/16 16:22:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/nodes.c,v 1.20 2003/08/04 00:43:19 momjian Exp $
*
* HISTORY
* Andrew Yu Oct 20, 1994 file creation
@@ -24,4 +24,4 @@
* Support for newNode() macro
*/
-Node *newNodeMacroHolder;
+Node *newNodeMacroHolder;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index fba59553793..9247bb00d2a 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -8,12 +8,12 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.214 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.215 2003/08/04 00:43:19 momjian Exp $
*
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
* have an output function defined here (as well as an input function
- * in readfuncs.c). For use in debugging, we also provide output
+ * in readfuncs.c). For use in debugging, we also provide output
* functions for nodes that appear in raw parsetrees, path, and plan trees.
* These nodes however need not have input functions.
*
@@ -31,8 +31,8 @@
/*
- * Macros to simplify output of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify output of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in an Out
* routine.
*/
@@ -184,7 +184,7 @@ _outOidList(StringInfo str, List *list)
* an integer List would be.
*/
static void
-_outBitmapset(StringInfo str, Bitmapset *bms)
+_outBitmapset(StringInfo str, Bitmapset * bms)
{
Bitmapset *tmpset;
int x;
@@ -192,9 +192,7 @@ _outBitmapset(StringInfo str, Bitmapset *bms)
appendStringInfoChar(str, '(');
tmpset = bms_copy(bms);
while ((x = bms_first_member(tmpset)) >= 0)
- {
appendStringInfo(str, " %d", x);
- }
bms_free(tmpset);
appendStringInfoChar(str, ')');
}
@@ -633,7 +631,7 @@ _outArrayRef(StringInfo str, ArrayRef *node)
}
static void
-_outFuncExpr(StringInfo str, FuncExpr *node)
+_outFuncExpr(StringInfo str, FuncExpr * node)
{
WRITE_NODE_TYPE("FUNCEXPR");
@@ -645,7 +643,7 @@ _outFuncExpr(StringInfo str, FuncExpr *node)
}
static void
-_outOpExpr(StringInfo str, OpExpr *node)
+_outOpExpr(StringInfo str, OpExpr * node)
{
WRITE_NODE_TYPE("OPEXPR");
@@ -657,7 +655,7 @@ _outOpExpr(StringInfo str, OpExpr *node)
}
static void
-_outDistinctExpr(StringInfo str, DistinctExpr *node)
+_outDistinctExpr(StringInfo str, DistinctExpr * node)
{
WRITE_NODE_TYPE("DISTINCTEXPR");
@@ -669,7 +667,7 @@ _outDistinctExpr(StringInfo str, DistinctExpr *node)
}
static void
-_outScalarArrayOpExpr(StringInfo str, ScalarArrayOpExpr *node)
+_outScalarArrayOpExpr(StringInfo str, ScalarArrayOpExpr * node)
{
WRITE_NODE_TYPE("SCALARARRAYOPEXPR");
@@ -680,7 +678,7 @@ _outScalarArrayOpExpr(StringInfo str, ScalarArrayOpExpr *node)
}
static void
-_outBoolExpr(StringInfo str, BoolExpr *node)
+_outBoolExpr(StringInfo str, BoolExpr * node)
{
char *opstr = NULL;
@@ -780,7 +778,7 @@ _outCaseWhen(StringInfo str, CaseWhen *node)
}
static void
-_outArrayExpr(StringInfo str, ArrayExpr *node)
+_outArrayExpr(StringInfo str, ArrayExpr * node)
{
WRITE_NODE_TYPE("ARRAY");
@@ -791,7 +789,7 @@ _outArrayExpr(StringInfo str, ArrayExpr *node)
}
static void
-_outCoalesceExpr(StringInfo str, CoalesceExpr *node)
+_outCoalesceExpr(StringInfo str, CoalesceExpr * node)
{
WRITE_NODE_TYPE("COALESCE");
@@ -800,7 +798,7 @@ _outCoalesceExpr(StringInfo str, CoalesceExpr *node)
}
static void
-_outNullIfExpr(StringInfo str, NullIfExpr *node)
+_outNullIfExpr(StringInfo str, NullIfExpr * node)
{
WRITE_NODE_TYPE("NULLIFEXPR");
@@ -830,7 +828,7 @@ _outBooleanTest(StringInfo str, BooleanTest *node)
}
static void
-_outCoerceToDomain(StringInfo str, CoerceToDomain *node)
+_outCoerceToDomain(StringInfo str, CoerceToDomain * node)
{
WRITE_NODE_TYPE("COERCETODOMAIN");
@@ -841,7 +839,7 @@ _outCoerceToDomain(StringInfo str, CoerceToDomain *node)
}
static void
-_outCoerceToDomainValue(StringInfo str, CoerceToDomainValue *node)
+_outCoerceToDomainValue(StringInfo str, CoerceToDomainValue * node)
{
WRITE_NODE_TYPE("COERCETODOMAINVALUE");
@@ -850,7 +848,7 @@ _outCoerceToDomainValue(StringInfo str, CoerceToDomainValue *node)
}
static void
-_outSetToDefault(StringInfo str, SetToDefault *node)
+_outSetToDefault(StringInfo str, SetToDefault * node)
{
WRITE_NODE_TYPE("SETTODEFAULT");
@@ -979,7 +977,7 @@ _outAppendPath(StringInfo str, AppendPath *node)
}
static void
-_outResultPath(StringInfo str, ResultPath *node)
+_outResultPath(StringInfo str, ResultPath * node)
{
WRITE_NODE_TYPE("RESULTPATH");
@@ -990,7 +988,7 @@ _outResultPath(StringInfo str, ResultPath *node)
}
static void
-_outMaterialPath(StringInfo str, MaterialPath *node)
+_outMaterialPath(StringInfo str, MaterialPath * node)
{
WRITE_NODE_TYPE("MATERIALPATH");
@@ -1000,7 +998,7 @@ _outMaterialPath(StringInfo str, MaterialPath *node)
}
static void
-_outUniquePath(StringInfo str, UniquePath *node)
+_outUniquePath(StringInfo str, UniquePath * node)
{
WRITE_NODE_TYPE("UNIQUEPATH");
@@ -1079,7 +1077,7 @@ _outJoinInfo(StringInfo str, JoinInfo *node)
}
static void
-_outInClauseInfo(StringInfo str, InClauseInfo *node)
+_outInClauseInfo(StringInfo str, InClauseInfo * node)
{
WRITE_NODE_TYPE("INCLAUSEINFO");
@@ -1132,7 +1130,7 @@ _outNotifyStmt(StringInfo str, NotifyStmt *node)
}
static void
-_outDeclareCursorStmt(StringInfo str, DeclareCursorStmt *node)
+_outDeclareCursorStmt(StringInfo str, DeclareCursorStmt * node)
{
WRITE_NODE_TYPE("DECLARECURSOR");
@@ -1820,9 +1818,11 @@ _outNode(StringInfo str, void *obj)
break;
default:
+
/*
* This should be an ERROR, but it's too useful to be able
- * to dump structures that _outNode only understands part of.
+ * to dump structures that _outNode only understands part
+ * of.
*/
elog(WARNING, "could not dump unrecognized node type: %d",
(int) nodeTag(obj));
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index abc3a1b0105..32af1d92923 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -8,11 +8,11 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.159 2003/07/22 23:30:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.160 2003/08/04 00:43:19 momjian Exp $
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
- * never have occasion to read them in. (There was once code here that
+ * never have occasion to read them in. (There was once code here that
* claimed to read them, but it was broken as well as unused.) We
* never read executor state trees, either.
*
@@ -28,7 +28,7 @@
/*
* Macros to simplify reading of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in a Read
* routine.
*/
@@ -466,13 +466,14 @@ _readOpExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -493,13 +494,14 @@ _readDistinctExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -520,13 +522,14 @@ _readScalarArrayOpExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -685,13 +688,14 @@ _readNullIfExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -955,6 +959,7 @@ Node *
parseNodeString(void)
{
void *return_value;
+
READ_TEMP_LOCALS();
token = pg_strtok(&length);
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index ee06560ca23..b1ff994f061 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.63 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.64 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,9 +48,9 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
/*
* Because gimme_tree considers both left- and right-sided trees,
* there is no difference between a tour (a,b,c,d,...) and a tour
- * (b,a,c,d,...) --- the same join orders will be considered.
- * To avoid redundant cost calculations, we simply reject tours where
- * tour[0] > tour[1], assigning them an artificially bad fitness.
+ * (b,a,c,d,...) --- the same join orders will be considered. To avoid
+ * redundant cost calculations, we simply reject tours where tour[0] >
+ * tour[1], assigning them an artificially bad fitness.
*
* (It would be better to tweak the GEQO logic to not generate such tours
* in the first place, but I'm not sure of all the implications in the
@@ -65,8 +65,8 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
*
* Since geqo_eval() will be called many times, we can't afford to let
* all that memory go unreclaimed until end of statement. Note we
- * make the temp context a child of the planner's normal context, so that
- * it will be freed even if we abort via ereport(ERROR).
+ * make the temp context a child of the planner's normal context, so
+ * that it will be freed even if we abort via ereport(ERROR).
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"GEQO",
@@ -76,9 +76,9 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
oldcxt = MemoryContextSwitchTo(mycontext);
/*
- * preserve root->join_rel_list, which gimme_tree changes; without this,
- * it'll be pointing at recycled storage after the MemoryContextDelete
- * below.
+ * preserve root->join_rel_list, which gimme_tree changes; without
+ * this, it'll be pointing at recycled storage after the
+ * MemoryContextDelete below.
*/
savelist = root->join_rel_list;
@@ -151,9 +151,10 @@ gimme_tree(Query *root, List *initial_rels,
/*
* Construct a RelOptInfo representing the previous joinrel joined
- * to inner_rel. These are always inner joins. Note that we expect
- * the joinrel not to exist in root->join_rel_list yet, and so the
- * paths constructed for it will only include the ones we want.
+ * to inner_rel. These are always inner joins. Note that we
+ * expect the joinrel not to exist in root->join_rel_list yet, and
+ * so the paths constructed for it will only include the ones we
+ * want.
*/
new_rel = make_join_rel(root, joinrel, inner_rel, JOIN_INNER);
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index f60fd7d4667..c91f16fd0e4 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.36 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.37 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -229,8 +229,8 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
/*
- * got the cheapest query tree processed by geqo;
- * first element of the population indicates the best query tree
+ * got the cheapest query tree processed by geqo; first element of the
+ * population indicates the best query tree
*/
best_tour = (Gene *) pool->data[0].string;
diff --git a/src/backend/optimizer/geqo/geqo_misc.c b/src/backend/optimizer/geqo/geqo_misc.c
index acc9285d005..329e19c4016 100644
--- a/src/backend/optimizer/geqo/geqo_misc.c
+++ b/src/backend/optimizer/geqo/geqo_misc.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.37 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.38 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,7 +42,7 @@ avg_pool(Pool *pool)
/*
* Since the pool may contain multiple occurrences of DBL_MAX, divide
- * by pool->size before summing, not after, to avoid overflow. This
+ * by pool->size before summing, not after, to avoid overflow. This
* loses a little in speed and accuracy, but this routine is only used
* for debug printouts, so we don't care that much.
*/
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 486dede0fb9..494f624d4cd 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.104 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.105 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,13 +50,13 @@ static void set_function_pathlist(Query *root, RelOptInfo *rel,
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
List *initial_rels);
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
- bool *differentTypes);
+ bool *differentTypes);
static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
- bool *differentTypes);
+ bool *differentTypes);
static void compare_tlist_datatypes(List *tlist, List *colTypes,
- bool *differentTypes);
+ bool *differentTypes);
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
- bool *differentTypes);
+ bool *differentTypes);
static void subquery_push_qual(Query *subquery, Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
Index rti, Node *qual);
@@ -290,14 +290,14 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
rel->rows += childrel->rows;
if (childrel->width > rel->width)
rel->width = childrel->width;
-
+
childvars = FastListValue(&childrel->reltargetlist);
foreach(parentvars, FastListValue(&rel->reltargetlist))
{
- Var *parentvar = (Var *) lfirst(parentvars);
- Var *childvar = (Var *) lfirst(childvars);
- int parentndx = parentvar->varattno - rel->min_attr;
- int childndx = childvar->varattno - childrel->min_attr;
+ Var *parentvar = (Var *) lfirst(parentvars);
+ Var *childvar = (Var *) lfirst(childvars);
+ int parentndx = parentvar->varattno - rel->min_attr;
+ int childndx = childvar->varattno - childrel->min_attr;
if (childrel->attr_widths[childndx] > rel->attr_widths[parentndx])
rel->attr_widths[parentndx] = childrel->attr_widths[childndx];
@@ -343,8 +343,8 @@ set_subquery_pathlist(Query *root, RelOptInfo *rel,
*
* There are several cases where we cannot push down clauses.
* Restrictions involving the subquery are checked by
- * subquery_is_pushdown_safe(). Restrictions on individual clauses are
- * checked by qual_is_pushdown_safe().
+ * subquery_is_pushdown_safe(). Restrictions on individual clauses
+ * are checked by qual_is_pushdown_safe().
*
* Non-pushed-down clauses will get evaluated as qpquals of the
* SubqueryScan node.
@@ -725,15 +725,16 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
vars = pull_var_clause(qual, false);
foreach(vl, vars)
{
- Var *var = (Var *) lfirst(vl);
+ Var *var = (Var *) lfirst(vl);
List *tl;
TargetEntry *tle = NULL;
Assert(var->varno == rti);
+
/*
* We use a bitmapset to avoid testing the same attno more than
- * once. (NB: this only works because subquery outputs can't
- * have negative attnos.)
+ * once. (NB: this only works because subquery outputs can't have
+ * negative attnos.)
*/
if (bms_is_member(var->varattno, tested))
continue;
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index e1754a7a694..1a0e2da82fd 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -49,7 +49,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.111 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.112 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,10 +102,10 @@ bool enable_hashjoin = true;
static Selectivity estimate_hash_bucketsize(Query *root, Var *var,
- int nbuckets);
-static bool cost_qual_eval_walker(Node *node, QualCost *total);
+ int nbuckets);
+static bool cost_qual_eval_walker(Node *node, QualCost * total);
static Selectivity approx_selectivity(Query *root, List *quals,
- JoinType jointype);
+ JoinType jointype);
static void set_rel_width(Query *root, RelOptInfo *rel);
static double relation_byte_size(double tuples, int width);
static double page_size(double tuples, int width);
@@ -358,13 +358,13 @@ cost_index(Path *path, Query *root,
* Normally the indexquals will be removed from the list of restriction
* clauses that we have to evaluate as qpquals, so we should subtract
* their costs from baserestrictcost. But if we are doing a join then
- * some of the indexquals are join clauses and shouldn't be subtracted.
- * Rather than work out exactly how much to subtract, we don't subtract
- * anything.
+ * some of the indexquals are join clauses and shouldn't be
+ * subtracted. Rather than work out exactly how much to subtract, we
+ * don't subtract anything.
*
* XXX For a lossy index, not all the quals will be removed and so we
- * really shouldn't subtract their costs; but detecting that seems more
- * expensive than it's worth.
+ * really shouldn't subtract their costs; but detecting that seems
+ * more expensive than it's worth.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
@@ -433,8 +433,8 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel)
/*
* Cost of path is cost of evaluating the subplan, plus cost of
* evaluating any restriction clauses that will be attached to the
- * SubqueryScan node, plus cpu_tuple_cost to account for selection
- * and projection overhead.
+ * SubqueryScan node, plus cpu_tuple_cost to account for selection and
+ * projection overhead.
*/
path->startup_cost = baserel->subplan->startup_cost;
path->total_cost = baserel->subplan->total_cost;
@@ -597,8 +597,9 @@ cost_material(Path *path,
}
/*
- * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
- * so that it doesn't appear worthwhile to materialize a bare seqscan.
+ * Also charge a small amount per extracted tuple. We use
+ * cpu_tuple_cost so that it doesn't appear worthwhile to materialize
+ * a bare seqscan.
*/
run_cost += cpu_tuple_cost * tuples;
@@ -631,17 +632,17 @@ cost_agg(Path *path, Query *root,
* additional cpu_operator_cost per grouping column per input tuple
* for grouping comparisons.
*
- * We will produce a single output tuple if not grouping,
- * and a tuple per group otherwise.
+ * We will produce a single output tuple if not grouping, and a tuple per
+ * group otherwise.
*
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
* input path is already sorted appropriately, AGG_SORTED should be
- * preferred (since it has no risk of memory overflow). This will happen
- * as long as the computed total costs are indeed exactly equal --- but
- * if there's roundoff error we might do the wrong thing. So be sure
- * that the computations below form the same intermediate values in the
- * same order.
+ * preferred (since it has no risk of memory overflow). This will
+ * happen as long as the computed total costs are indeed exactly equal
+ * --- but if there's roundoff error we might do the wrong thing. So
+ * be sure that the computations below form the same intermediate
+ * values in the same order.
*/
if (aggstrategy == AGG_PLAIN)
{
@@ -724,26 +725,26 @@ cost_nestloop(NestPath *path, Query *root)
double outer_path_rows = PATH_ROWS(outer_path);
double inner_path_rows = PATH_ROWS(inner_path);
double ntuples;
- Selectivity joininfactor;
+ Selectivity joininfactor;
if (!enable_nestloop)
startup_cost += disable_cost;
/*
- * If we're doing JOIN_IN then we will stop scanning inner tuples for an
- * outer tuple as soon as we have one match. Account for the effects of
- * this by scaling down the cost estimates in proportion to the expected
- * output size. (This assumes that all the quals attached to the join are
- * IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop scanning inner tuples for
+ * an outer tuple as soon as we have one match. Account for the
+ * effects of this by scaling down the cost estimates in proportion to
+ * the expected output size. (This assumes that all the quals
+ * attached to the join are IN quals, which should be true.)
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
*/
if (path->jointype == JOIN_IN)
{
- Selectivity qual_selec = approx_selectivity(root, restrictlist,
+ Selectivity qual_selec = approx_selectivity(root, restrictlist,
path->jointype);
- double qptuples;
+ double qptuples;
qptuples = ceil(qual_selec * outer_path_rows * inner_path_rows);
if (qptuples > path->path.parent->rows)
@@ -761,8 +762,8 @@ cost_nestloop(NestPath *path, Query *root)
* before we can start returning tuples, so the join's startup cost is
* their sum. What's not so clear is whether the inner path's
* startup_cost must be paid again on each rescan of the inner path.
- * This is not true if the inner path is materialized or is a hashjoin,
- * but probably is true otherwise.
+ * This is not true if the inner path is materialized or is a
+ * hashjoin, but probably is true otherwise.
*/
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
run_cost += outer_path->total_cost - outer_path->startup_cost;
@@ -783,14 +784,15 @@ cost_nestloop(NestPath *path, Query *root)
(inner_path->total_cost - inner_path->startup_cost) * joininfactor;
/*
- * Compute number of tuples processed (not number emitted!).
- * If inner path is an indexscan, be sure to use its estimated output row
- * count, which may be lower than the restriction-clause-only row count of
- * its parent. (We don't include this case in the PATH_ROWS macro because
- * it applies *only* to a nestloop's inner relation.) Note: it is correct
- * to use the unadjusted inner_path_rows in the above calculation for
- * joininfactor, since otherwise we'd be double-counting the selectivity
- * of the join clause being used for the index.
+ * Compute number of tuples processed (not number emitted!). If inner
+ * path is an indexscan, be sure to use its estimated output row
+ * count, which may be lower than the restriction-clause-only row
+ * count of its parent. (We don't include this case in the PATH_ROWS
+ * macro because it applies *only* to a nestloop's inner relation.)
+ * Note: it is correct to use the unadjusted inner_path_rows in the
+ * above calculation for joininfactor, since otherwise we'd be
+ * double-counting the selectivity of the join clause being used for
+ * the index.
*/
if (IsA(inner_path, IndexPath))
inner_path_rows = ((IndexPath *) inner_path)->rows;
@@ -831,8 +833,8 @@ cost_mergejoin(MergePath *path, Query *root)
Cost startup_cost = 0;
Cost run_cost = 0;
Cost cpu_per_tuple;
- Selectivity merge_selec;
- Selectivity qp_selec;
+ Selectivity merge_selec;
+ Selectivity qp_selec;
QualCost merge_qual_cost;
QualCost qp_qual_cost;
RestrictInfo *firstclause;
@@ -847,7 +849,7 @@ cost_mergejoin(MergePath *path, Query *root)
double rescanratio;
Selectivity outerscansel,
innerscansel;
- Selectivity joininfactor;
+ Selectivity joininfactor;
Path sort_path; /* dummy for result of cost_sort */
if (!enable_mergejoin)
@@ -856,7 +858,8 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* Compute cost and selectivity of the mergequals and qpquals (other
* restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result much.
+ * for speed --- in most cases, any errors won't affect the result
+ * much.
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
@@ -876,29 +879,30 @@ cost_mergejoin(MergePath *path, Query *root)
qptuples = ceil(mergejointuples * qp_selec);
/*
- * When there are equal merge keys in the outer relation, the mergejoin
- * must rescan any matching tuples in the inner relation. This means
- * re-fetching inner tuples. Our cost model for this is that a re-fetch
- * costs the same as an original fetch, which is probably an overestimate;
- * but on the other hand we ignore the bookkeeping costs of mark/restore.
- * Not clear if it's worth developing a more refined model.
+ * When there are equal merge keys in the outer relation, the
+ * mergejoin must rescan any matching tuples in the inner relation.
+ * This means re-fetching inner tuples. Our cost model for this is
+ * that a re-fetch costs the same as an original fetch, which is
+ * probably an overestimate; but on the other hand we ignore the
+ * bookkeeping costs of mark/restore. Not clear if it's worth
+ * developing a more refined model.
*
* The number of re-fetches can be estimated approximately as size of
- * merge join output minus size of inner relation. Assume that the
- * distinct key values are 1, 2, ..., and denote the number of values of
- * each key in the outer relation as m1, m2, ...; in the inner relation,
- * n1, n2, ... Then we have
+ * merge join output minus size of inner relation. Assume that the
+ * distinct key values are 1, 2, ..., and denote the number of values
+ * of each key in the outer relation as m1, m2, ...; in the inner
+ * relation, n1, n2, ... Then we have
*
- * size of join = m1 * n1 + m2 * n2 + ...
+ * size of join = m1 * n1 + m2 * n2 + ...
*
- * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ...
- * = m1 * n1 + m2 * n2 + ... - (n1 + n2 + ...)
- * = size of join - size of inner relation
+ * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
+ * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
+ * relation
*
* This equation works correctly for outer tuples having no inner match
* (nk = 0), but not for inner tuples having no outer match (mk = 0);
* we are effectively subtracting those from the number of rescanned
- * tuples, when we should not. Can we do better without expensive
+ * tuples, when we should not. Can we do better without expensive
* selectivity computations?
*/
if (IsA(outer_path, UniquePath))
@@ -953,8 +957,9 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* Readjust scan selectivities to account for above rounding. This is
- * normally an insignificant effect, but when there are only a few rows
- * in the inputs, failing to do this makes for a large percentage error.
+ * normally an insignificant effect, but when there are only a few
+ * rows in the inputs, failing to do this makes for a large percentage
+ * error.
*/
outerscansel = outer_rows / outer_path_rows;
innerscansel = inner_rows / inner_path_rows;
@@ -1002,11 +1007,11 @@ cost_mergejoin(MergePath *path, Query *root)
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop outputting inner
- * tuples for an outer tuple as soon as we have one match. Account for
- * the effects of this by scaling down the cost estimates in proportion
- * to the expected output size. (This assumes that all the quals attached
- * to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop outputting inner tuples
+ * for an outer tuple as soon as we have one match. Account for the
+ * effects of this by scaling down the cost estimates in proportion to
+ * the expected output size. (This assumes that all the quals
+ * attached to the join are IN quals, which should be true.)
*/
if (path->jpath.jointype == JOIN_IN &&
qptuples > path->jpath.path.parent->rows)
@@ -1017,9 +1022,9 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* The number of tuple comparisons needed is approximately number of
* outer rows plus number of inner rows plus number of rescanned
- * tuples (can we refine this?). At each one, we need to evaluate
- * the mergejoin quals. NOTE: JOIN_IN mode does not save any work
- * here, so do NOT include joininfactor.
+ * tuples (can we refine this?). At each one, we need to evaluate the
+ * mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
+ * so do NOT include joininfactor.
*/
startup_cost += merge_qual_cost.startup;
run_cost += merge_qual_cost.per_tuple *
@@ -1028,7 +1033,7 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
+ * clauses that are to be applied at the join. (This is pessimistic
* since not all of the quals may get evaluated at each tuple.) This
* work is skipped in JOIN_IN mode, so apply the factor.
*/
@@ -1059,8 +1064,8 @@ cost_hashjoin(HashPath *path, Query *root)
Cost startup_cost = 0;
Cost run_cost = 0;
Cost cpu_per_tuple;
- Selectivity hash_selec;
- Selectivity qp_selec;
+ Selectivity hash_selec;
+ Selectivity qp_selec;
QualCost hash_qual_cost;
QualCost qp_qual_cost;
double hashjointuples;
@@ -1076,7 +1081,7 @@ cost_hashjoin(HashPath *path, Query *root)
int physicalbuckets;
int numbatches;
Selectivity innerbucketsize;
- Selectivity joininfactor;
+ Selectivity joininfactor;
List *hcl;
List *qpquals;
@@ -1086,7 +1091,8 @@ cost_hashjoin(HashPath *path, Query *root)
/*
* Compute cost and selectivity of the hashquals and qpquals (other
* restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result much.
+ * for speed --- in most cases, any errors won't affect the result
+ * much.
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
@@ -1114,9 +1120,9 @@ cost_hashjoin(HashPath *path, Query *root)
* Cost of computing hash function: must do it once per input tuple.
* We charge one cpu_operator_cost for each column's hash function.
*
- * XXX when a hashclause is more complex than a single operator,
- * we really should charge the extra eval costs of the left or right
- * side, as appropriate, here. This seems more work than it's worth
+ * XXX when a hashclause is more complex than a single operator, we
+ * really should charge the extra eval costs of the left or right
+ * side, as appropriate, here. This seems more work than it's worth
* at the moment.
*/
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
@@ -1131,13 +1137,13 @@ cost_hashjoin(HashPath *path, Query *root)
/*
* Determine bucketsize fraction for inner relation. We use the
- * smallest bucketsize estimated for any individual hashclause;
- * this is undoubtedly conservative.
+ * smallest bucketsize estimated for any individual hashclause; this
+ * is undoubtedly conservative.
*
- * BUT: if inner relation has been unique-ified, we can assume it's
- * good for hashing. This is important both because it's the right
- * answer, and because we avoid contaminating the cache with a value
- * that's wrong for non-unique-ified paths.
+ * BUT: if inner relation has been unique-ified, we can assume it's good
+ * for hashing. This is important both because it's the right answer,
+ * and because we avoid contaminating the cache with a value that's
+ * wrong for non-unique-ified paths.
*/
if (IsA(inner_path, UniquePath))
innerbucketsize = 1.0 / virtualbuckets;
@@ -1152,12 +1158,13 @@ cost_hashjoin(HashPath *path, Query *root)
Assert(IsA(restrictinfo, RestrictInfo));
/*
- * First we have to figure out which side of the hashjoin clause
- * is the inner side.
+ * First we have to figure out which side of the hashjoin
+ * clause is the inner side.
*
* Since we tend to visit the same clauses over and over when
- * planning a large query, we cache the bucketsize estimate in the
- * RestrictInfo node to avoid repeated lookups of statistics.
+ * planning a large query, we cache the bucketsize estimate in
+ * the RestrictInfo node to avoid repeated lookups of
+ * statistics.
*/
if (bms_is_subset(restrictinfo->right_relids,
inner_path->parent->relids))
@@ -1169,7 +1176,7 @@ cost_hashjoin(HashPath *path, Query *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- (Var *) get_rightop(restrictinfo->clause),
+ (Var *) get_rightop(restrictinfo->clause),
virtualbuckets);
restrictinfo->right_bucketsize = thisbucketsize;
}
@@ -1185,7 +1192,7 @@ cost_hashjoin(HashPath *path, Query *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- (Var *) get_leftop(restrictinfo->clause),
+ (Var *) get_leftop(restrictinfo->clause),
virtualbuckets);
restrictinfo->left_bucketsize = thisbucketsize;
}
@@ -1217,11 +1224,11 @@ cost_hashjoin(HashPath *path, Query *root)
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop comparing inner
- * tuples to an outer tuple as soon as we have one match. Account for
- * the effects of this by scaling down the cost estimates in proportion
- * to the expected output size. (This assumes that all the quals attached
- * to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop comparing inner tuples to
+ * an outer tuple as soon as we have one match. Account for the
+ * effects of this by scaling down the cost estimates in proportion to
+ * the expected output size. (This assumes that all the quals
+ * attached to the join are IN quals, which should be true.)
*/
if (path->jpath.jointype == JOIN_IN &&
qptuples > path->jpath.path.parent->rows)
@@ -1243,7 +1250,7 @@ cost_hashjoin(HashPath *path, Query *root)
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
+ * clauses that are to be applied at the join. (This is pessimistic
* since not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
@@ -1254,14 +1261,14 @@ cost_hashjoin(HashPath *path, Query *root)
* Bias against putting larger relation on inside. We don't want an
* absolute prohibition, though, since larger relation might have
* better bucketsize --- and we can't trust the size estimates
- * unreservedly, anyway. Instead, inflate the run cost by the
- * square root of the size ratio. (Why square root? No real good
- * reason, but it seems reasonable...)
+ * unreservedly, anyway. Instead, inflate the run cost by the square
+ * root of the size ratio. (Why square root? No real good reason,
+ * but it seems reasonable...)
*
- * Note: before 7.4 we implemented this by inflating startup cost;
- * but if there's a disable_cost component in the input paths'
- * startup cost, that unfairly penalizes the hash. Probably it'd
- * be better to keep track of disable penalty separately from cost.
+ * Note: before 7.4 we implemented this by inflating startup cost; but if
+ * there's a disable_cost component in the input paths' startup cost,
+ * that unfairly penalizes the hash. Probably it'd be better to keep
+ * track of disable penalty separately from cost.
*/
if (innerbytes > outerbytes && outerbytes > 0)
run_cost *= sqrt(innerbytes / outerbytes);
@@ -1442,7 +1449,7 @@ estimate_hash_bucketsize(Query *root, Var *var, int nbuckets)
* and a per-evaluation component.
*/
void
-cost_qual_eval(QualCost *cost, List *quals)
+cost_qual_eval(QualCost * cost, List *quals)
{
List *l;
@@ -1484,7 +1491,7 @@ cost_qual_eval(QualCost *cost, List *quals)
}
static bool
-cost_qual_eval_walker(Node *node, QualCost *total)
+cost_qual_eval_walker(Node *node, QualCost * total)
{
if (node == NULL)
return false;
@@ -1502,9 +1509,7 @@ cost_qual_eval_walker(Node *node, QualCost *total)
IsA(node, OpExpr) ||
IsA(node, DistinctExpr) ||
IsA(node, NullIfExpr))
- {
total->per_tuple += cpu_operator_cost;
- }
else if (IsA(node, ScalarArrayOpExpr))
{
/* should charge more than 1 op cost, but how many? */
@@ -1519,47 +1524,48 @@ cost_qual_eval_walker(Node *node, QualCost *total)
{
/*
* A subplan node in an expression typically indicates that the
- * subplan will be executed on each evaluation, so charge accordingly.
- * (Sub-selects that can be executed as InitPlans have already been
- * removed from the expression.)
+ * subplan will be executed on each evaluation, so charge
+ * accordingly. (Sub-selects that can be executed as InitPlans
+ * have already been removed from the expression.)
*
* An exception occurs when we have decided we can implement the
* subplan by hashing.
*
*/
- SubPlan *subplan = (SubPlan *) node;
+ SubPlan *subplan = (SubPlan *) node;
Plan *plan = subplan->plan;
if (subplan->useHashTable)
{
/*
* If we are using a hash table for the subquery outputs, then
- * the cost of evaluating the query is a one-time cost.
- * We charge one cpu_operator_cost per tuple for the work of
+ * the cost of evaluating the query is a one-time cost. We
+ * charge one cpu_operator_cost per tuple for the work of
* loading the hashtable, too.
*/
total->startup += plan->total_cost +
cpu_operator_cost * plan->plan_rows;
+
/*
* The per-tuple costs include the cost of evaluating the
- * lefthand expressions, plus the cost of probing the hashtable.
- * Recursion into the exprs list will handle the lefthand
- * expressions properly, and will count one cpu_operator_cost
- * for each comparison operator. That is probably too low for
- * the probing cost, but it's hard to make a better estimate,
- * so live with it for now.
+ * lefthand expressions, plus the cost of probing the
+ * hashtable. Recursion into the exprs list will handle the
+ * lefthand expressions properly, and will count one
+ * cpu_operator_cost for each comparison operator. That is
+ * probably too low for the probing cost, but it's hard to
+ * make a better estimate, so live with it for now.
*/
}
else
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output
- * we will actually need to scan. NOTE: this logic should
- * agree with the estimates used by make_subplan() in
+ * evaluation. We need to estimate how much of the output we
+ * will actually need to scan. NOTE: this logic should agree
+ * with the estimates used by make_subplan() in
* plan/subselect.c.
*/
- Cost plan_run_cost = plan->total_cost - plan->startup_cost;
+ Cost plan_run_cost = plan->total_cost - plan->startup_cost;
if (subplan->subLinkType == EXISTS_SUBLINK)
{
@@ -1579,23 +1585,20 @@ cost_qual_eval_walker(Node *node, QualCost *total)
/* assume we need all tuples */
total->per_tuple += plan_run_cost;
}
+
/*
- * Also account for subplan's startup cost.
- * If the subplan is uncorrelated or undirect correlated,
- * AND its topmost node is a Sort or Material node, assume
- * that we'll only need to pay its startup cost once;
- * otherwise assume we pay the startup cost every time.
+ * Also account for subplan's startup cost. If the subplan is
+ * uncorrelated or undirect correlated, AND its topmost node
+ * is a Sort or Material node, assume that we'll only need to
+ * pay its startup cost once; otherwise assume we pay the
+ * startup cost every time.
*/
if (subplan->parParam == NIL &&
(IsA(plan, Sort) ||
IsA(plan, Material)))
- {
total->startup += plan->startup_cost;
- }
else
- {
total->per_tuple += plan->startup_cost;
- }
}
}
@@ -1745,7 +1748,7 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
UniquePath *upath;
/*
- * Compute joinclause selectivity. Note that we are only considering
+ * Compute joinclause selectivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are
* not double-counting them because they were not considered in
* estimating the sizes of the component rels.
@@ -1758,8 +1761,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
/*
* Basically, we multiply size of Cartesian product by selectivity.
*
- * If we are doing an outer join, take that into account: the output
- * must be at least as large as the non-nullable input. (Is there any
+ * If we are doing an outer join, take that into account: the output must
+ * be at least as large as the non-nullable input. (Is there any
* chance of being even smarter?)
*
* For JOIN_IN and variants, the Cartesian product is figured with
@@ -1823,8 +1826,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
rel->rows = temp;
/*
- * We need not compute the output width here, because build_joinrel_tlist
- * already did.
+ * We need not compute the output width here, because
+ * build_joinrel_tlist already did.
*/
}
@@ -1911,11 +1914,14 @@ set_rel_width(Query *root, RelOptInfo *rel)
Assert(IsA(var, Var));
- /* The width probably hasn't been cached yet, but may as well check */
+ /*
+ * The width probably hasn't been cached yet, but may as well
+ * check
+ */
if (rel->attr_widths[ndx] > 0)
{
- tuple_width += rel->attr_widths[ndx];
- continue;
+ tuple_width += rel->attr_widths[ndx];
+ continue;
}
relid = getrelid(var->varno, root->rtable);
@@ -1931,8 +1937,8 @@ set_rel_width(Query *root, RelOptInfo *rel)
}
/*
- * Not a plain relation, or can't find statistics for it.
- * Estimate using just the type info.
+ * Not a plain relation, or can't find statistics for it. Estimate
+ * using just the type info.
*/
item_width = get_typavgwidth(var->vartype, var->vartypmod);
Assert(item_width > 0);
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index fa19abe4717..67238b5361c 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.145 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.146 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,9 +64,9 @@ static List *group_clauses_by_indexkey_for_join(Query *root,
Relids outer_relids,
JoinType jointype, bool isouterjoin);
static bool match_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
- int indexcol, Oid opclass, Expr *clause);
-static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
int indexcol, Oid opclass, Expr *clause);
+static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
+ int indexcol, Oid opclass, Expr *clause);
static Oid indexable_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static bool pred_test(List *predicate_list, List *restrictinfo_list,
@@ -77,8 +77,8 @@ static bool pred_test_recurse_pred(Expr *predicate, Node *clause);
static bool pred_test_simple_clause(Expr *predicate, Node *clause);
static Relids indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index);
static Path *make_innerjoin_index_path(Query *root,
- RelOptInfo *rel, IndexOptInfo *index,
- List *clausegroups);
+ RelOptInfo *rel, IndexOptInfo *index,
+ List *clausegroups);
static bool match_index_to_operand(Node *operand, int indexcol,
RelOptInfo *rel, IndexOptInfo *index);
static bool match_special_index_operator(Expr *clause, Oid opclass,
@@ -87,7 +87,7 @@ static List *expand_indexqual_condition(Expr *clause, Oid opclass);
static List *prefix_quals(Node *leftop, Oid opclass,
Const *prefix, Pattern_Prefix_Status pstatus);
static List *network_prefix_quals(Node *leftop, Oid expr_op, Oid opclass,
- Datum rightop);
+ Datum rightop);
static Datum string_to_datum(const char *str, Oid datatype);
static Const *string_to_const(const char *str, Oid datatype);
@@ -114,7 +114,7 @@ static Const *string_to_const(const char *str, Oid datatype);
* scan this routine deems potentially interesting for the current query.
*
* We also determine the set of other relids that participate in join
- * clauses that could be used with each index. The actually best innerjoin
+ * clauses that could be used with each index. The actually best innerjoin
* path will be generated for each outer relation later on, but knowing the
* set of potential otherrels allows us to identify equivalent outer relations
* and avoid repeated computation.
@@ -219,10 +219,11 @@ create_index_paths(Query *root, RelOptInfo *rel)
/*
* 6. Examine join clauses to see which ones are potentially
- * usable with this index, and generate the set of all other relids
- * that participate in such join clauses. We'll use this set later
- * to recognize outer rels that are equivalent for joining purposes.
- * We compute both per-index and overall-for-relation sets.
+ * usable with this index, and generate the set of all other
+ * relids that participate in such join clauses. We'll use this
+ * set later to recognize outer rels that are equivalent for
+ * joining purposes. We compute both per-index and
+ * overall-for-relation sets.
*/
join_outerrelids = indexable_outerrelids(rel, index);
index->outer_relids = join_outerrelids;
@@ -274,7 +275,7 @@ match_index_orclauses(RelOptInfo *rel,
*/
restrictinfo->subclauseindices =
match_index_orclause(rel, index,
- ((BoolExpr *) restrictinfo->clause)->args,
+ ((BoolExpr *) restrictinfo->clause)->args,
restrictinfo->subclauseindices);
}
}
@@ -422,6 +423,7 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
Oid *classes = index->classlist;
FastListInit(&quals);
+
/*
* Extract relevant indexclauses in indexkey order. This is
* essentially just like group_clauses_by_indexkey() except that the
@@ -576,7 +578,7 @@ group_clauses_by_indexkey(RelOptInfo *rel, IndexOptInfo *index)
*
* This is much like group_clauses_by_indexkey(), but we consider both
* join and restriction clauses. Any joinclause that uses only otherrels
- * in the specified outer_relids is fair game. But there must be at least
+ * in the specified outer_relids is fair game. But there must be at least
* one such joinclause in the final list, otherwise we return NIL indicating
* that this index isn't interesting as an inner indexscan. (A scan using
* only restriction clauses shouldn't be created here, because a regular Path
@@ -641,10 +643,10 @@ group_clauses_by_indexkey_for_join(Query *root,
*/
if (FastListValue(&clausegroup) != NIL)
{
- List *nl;
+ List *nl;
nl = remove_redundant_join_clauses(root,
- FastListValue(&clausegroup),
+ FastListValue(&clausegroup),
jointype);
FastListFromList(&clausegroup, nl);
}
@@ -736,9 +738,9 @@ match_clause_to_indexcol(RelOptInfo *rel,
return false;
/*
- * Check for clauses of the form:
- * (indexkey operator constant) or (constant operator indexkey).
- * Anything that is a "pseudo constant" expression will do.
+ * Check for clauses of the form: (indexkey operator constant) or
+ * (constant operator indexkey). Anything that is a "pseudo constant"
+ * expression will do.
*/
if (match_index_to_operand(leftop, indexcol, rel, index) &&
is_pseudo_constant_clause(rightop))
@@ -747,8 +749,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
return true;
/*
- * If we didn't find a member of the index's opclass, see
- * whether it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether
+ * it is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, true))
return true;
@@ -762,8 +764,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
return true;
/*
- * If we didn't find a member of the index's opclass, see
- * whether it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether
+ * it is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, false))
return true;
@@ -824,10 +826,10 @@ match_join_clause_to_indexcol(RelOptInfo *rel,
return false;
/*
- * Check for an indexqual that could be handled by a nestloop
- * join. We need the index key to be compared against an
- * expression that uses none of the indexed relation's vars and
- * contains no volatile functions.
+ * Check for an indexqual that could be handled by a nestloop join. We
+ * need the index key to be compared against an expression that uses
+ * none of the indexed relation's vars and contains no volatile
+ * functions.
*/
if (match_index_to_operand(leftop, indexcol, rel, index))
{
@@ -1174,10 +1176,11 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
* 1. Find "btree" strategy numbers for the pred_op and clause_op.
*
* We must find a btree opclass that contains both operators, else the
- * implication can't be determined. If there are multiple such opclasses,
- * assume we can use any one to determine the logical relationship of the
- * two operators and the correct corresponding test operator. This should
- * work for any logically consistent opclasses.
+ * implication can't be determined. If there are multiple such
+ * opclasses, assume we can use any one to determine the logical
+ * relationship of the two operators and the correct corresponding
+ * test operator. This should work for any logically consistent
+ * opclasses.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(pred_op),
@@ -1269,7 +1272,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
/* And execute it. */
test_result = ExecEvalExprSwitchContext(test_exprstate,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull, NULL);
/* Get back to outer memory context */
@@ -1295,7 +1298,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
- * for the specified index. Returns a set of relids.
+ * for the specified index. Returns a set of relids.
*
* 'rel' is the relation for which 'index' is defined
*/
@@ -1314,16 +1317,16 @@ indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index)
/*
* Examine each joinclause in the JoinInfo node's list to see if
* it matches any key of the index. If so, add the JoinInfo's
- * otherrels to the result. We can skip examining other joinclauses
- * in the same list as soon as we find a match (since by definition
- * they all have the same otherrels).
+ * otherrels to the result. We can skip examining other
+ * joinclauses in the same list as soon as we find a match (since
+ * by definition they all have the same otherrels).
*/
foreach(j, joininfo->jinfo_restrictinfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(j);
- Expr *clause = rinfo->clause;
- int indexcol = 0;
- Oid *classes = index->classlist;
+ Expr *clause = rinfo->clause;
+ int indexcol = 0;
+ Oid *classes = index->classlist;
do
{
@@ -1398,11 +1401,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
default:
return NULL;
}
+
/*
* If there are no indexable joinclauses for this rel, exit quickly.
*/
if (bms_is_empty(rel->index_outer_relids))
return NULL;
+
/*
* Otherwise, we have to do path selection in the memory context of
* the given rel, so that any created path can be safely attached to
@@ -1410,10 +1415,11 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
* issue for normal planning, but it is an issue for GEQO planning.)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
+
/*
- * Intersect the given outer_relids with index_outer_relids
- * to find the set of outer relids actually relevant for this index.
- * If there are none, again we can fail immediately.
+ * Intersect the given outer_relids with index_outer_relids to find
+ * the set of outer relids actually relevant for this index. If there
+ * are none, again we can fail immediately.
*/
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
if (bms_is_empty(outer_relids))
@@ -1422,11 +1428,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
MemoryContextSwitchTo(oldcontext);
return NULL;
}
+
/*
* Look to see if we already computed the result for this set of
- * relevant outerrels. (We include the isouterjoin status in the
+ * relevant outerrels. (We include the isouterjoin status in the
* cache lookup key for safety. In practice I suspect this is not
- * necessary because it should always be the same for a given innerrel.)
+ * necessary because it should always be the same for a given
+ * innerrel.)
*/
foreach(jlist, rel->index_inner_paths)
{
@@ -1441,15 +1449,15 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
}
/*
- * For each index of the rel, find the best path; then choose the
- * best overall. We cache the per-index results as well as the overall
- * result. (This is useful because different indexes may have different
- * relevant outerrel sets, so different overall outerrel sets might still
- * map to the same computation for a given index.)
+ * For each index of the rel, find the best path; then choose the best
+ * overall. We cache the per-index results as well as the overall
+ * result. (This is useful because different indexes may have
+ * different relevant outerrel sets, so different overall outerrel
+ * sets might still map to the same computation for a given index.)
*/
foreach(ilist, rel->indexlist)
{
- IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
+ IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
Relids index_outer_relids;
Path *path = NULL;
@@ -1461,6 +1469,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
bms_free(index_outer_relids);
continue;
}
+
/*
* Look to see if we already computed the result for this index.
*/
@@ -1471,7 +1480,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
info->isouterjoin == isouterjoin)
{
path = info->best_innerpath;
- bms_free(index_outer_relids); /* not needed anymore */
+ bms_free(index_outer_relids); /* not needed anymore */
break;
}
}
@@ -1484,9 +1493,9 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
clausegroups = group_clauses_by_indexkey_for_join(root,
rel,
index,
- index_outer_relids,
+ index_outer_relids,
jointype,
- isouterjoin);
+ isouterjoin);
if (clausegroups)
{
/* make the path */
@@ -1548,9 +1557,9 @@ make_innerjoin_index_path(Query *root,
pathnode->path.parent = rel;
/*
- * There's no point in marking the path with any pathkeys, since
- * it will only ever be used as the inner path of a nestloop, and
- * so its ordering does not matter.
+ * There's no point in marking the path with any pathkeys, since it
+ * will only ever be used as the inner path of a nestloop, and so its
+ * ordering does not matter.
*/
pathnode->path.pathkeys = NIL;
@@ -1582,19 +1591,19 @@ make_innerjoin_index_path(Query *root,
/*
* We must compute the estimated number of output rows for the
- * indexscan. This is less than rel->rows because of the
- * additional selectivity of the join clauses. Since clausegroups
- * may contain both restriction and join clauses, we have to do a
- * set union to get the full set of clauses that must be
- * considered to compute the correct selectivity. (Without the union
- * operation, we might have some restriction clauses appearing twice,
- * which'd mislead restrictlist_selectivity into double-counting their
- * selectivity. However, since RestrictInfo nodes aren't copied when
- * linking them into different lists, it should be sufficient to use
- * pointer comparison to remove duplicates.)
+ * indexscan. This is less than rel->rows because of the additional
+ * selectivity of the join clauses. Since clausegroups may contain
+ * both restriction and join clauses, we have to do a set union to get
+ * the full set of clauses that must be considered to compute the
+ * correct selectivity. (Without the union operation, we might have
+ * some restriction clauses appearing twice, which'd mislead
+ * restrictlist_selectivity into double-counting their selectivity.
+ * However, since RestrictInfo nodes aren't copied when linking them
+ * into different lists, it should be sufficient to use pointer
+ * comparison to remove duplicates.)
*
- * Always assume the join type is JOIN_INNER; even if some of the
- * join clauses come from other contexts, that's not our problem.
+ * Always assume the join type is JOIN_INNER; even if some of the join
+ * clauses come from other contexts, that's not our problem.
*/
allclauses = set_ptrUnion(rel->baserestrictinfo, allclauses);
pathnode->rows = rel->tuples *
@@ -1656,9 +1665,9 @@ match_index_to_operand(Node *operand,
else
{
/*
- * Index expression; find the correct expression. (This search could
- * be avoided, at the cost of complicating all the callers of this
- * routine; doesn't seem worth it.)
+ * Index expression; find the correct expression. (This search
+ * could be avoided, at the cost of complicating all the callers
+ * of this routine; doesn't seem worth it.)
*/
List *indexprs;
int i;
@@ -1677,6 +1686,7 @@ match_index_to_operand(Node *operand,
if (indexprs == NIL)
elog(ERROR, "wrong number of index expressions");
indexkey = (Node *) lfirst(indexprs);
+
/*
* Does it match the operand? Again, strip any relabeling.
*/
@@ -1776,12 +1786,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_LIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_BYTEA_LIKE_OP:
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICLIKE_OP:
@@ -1789,7 +1799,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICLIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_REGEXEQ_OP:
@@ -1797,7 +1807,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_REGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICREGEXEQ_OP:
@@ -1805,7 +1815,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICREGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_INET_SUB_OP:
@@ -1831,9 +1841,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
- * We insist on the opclass being the specific one we expect,
- * else we'd do the wrong thing if someone were to make a reverse-sort
- * opclass with the same operators.
+ * We insist on the opclass being the specific one we expect, else we'd
+ * do the wrong thing if someone were to make a reverse-sort opclass
+ * with the same operators.
*/
switch (expr_op)
{
@@ -1896,7 +1906,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
* The input list is ordered by index key, and so the output list is too.
* (The latter is not depended on by any part of the planner, so far as I can
* tell; but some parts of the executor do assume that the indxqual list
- * ultimately delivered to the executor is so ordered. One such place is
+ * ultimately delivered to the executor is so ordered. One such place is
* _bt_orderkeys() in the btree support. Perhaps that ought to be fixed
* someday --- tgl 7/00)
*/
@@ -1930,7 +1940,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
} while (clausegroups != NIL && !DoneMatchingIndexKeys(classes));
- Assert(clausegroups == NIL); /* else more groups than indexkeys... */
+ Assert(clausegroups == NIL); /* else more groups than indexkeys... */
return FastListValue(&resultquals);
}
@@ -1953,11 +1963,12 @@ expand_indexqual_condition(Expr *clause, Oid opclass)
switch (expr_op)
{
- /*
- * LIKE and regex operators are not members of any index
- * opclass, so if we find one in an indexqual list we can
- * assume that it was accepted by match_special_index_operator().
- */
+ /*
+ * LIKE and regex operators are not members of any index
+ * opclass, so if we find one in an indexqual list we can
+ * assume that it was accepted by
+ * match_special_index_operator().
+ */
case OID_TEXT_LIKE_OP:
case OID_BPCHAR_LIKE_OP:
case OID_NAME_LIKE_OP:
@@ -2061,22 +2072,22 @@ prefix_quals(Node *leftop, Oid opclass,
}
/*
- * If necessary, coerce the prefix constant to the right type.
- * The given prefix constant is either text or bytea type.
+ * If necessary, coerce the prefix constant to the right type. The
+ * given prefix constant is either text or bytea type.
*/
if (prefix_const->consttype != datatype)
{
- char *prefix;
+ char *prefix;
switch (prefix_const->consttype)
{
case TEXTOID:
prefix = DatumGetCString(DirectFunctionCall1(textout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
case BYTEAOID:
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
default:
elog(ERROR, "unexpected const type: %u",
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index cf7c4ee4331..695b8c98411 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.79 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.80 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -300,7 +300,7 @@ sort_inner_and_outer(Query *root,
* We always generate a nestloop path for each available outer path.
* In fact we may generate as many as four: one on the cheapest-total-cost
* inner path, one on the same with materialization, one on the
- * cheapest-startup-cost inner path (if different),
+ * cheapest-startup-cost inner path (if different),
* and one on the best inner-indexscan path (if any).
*
* We also consider mergejoins if mergejoin clauses are available. We have
@@ -342,10 +342,10 @@ match_unsorted_outer(Query *root,
/*
* Nestloop only supports inner, left, and IN joins. Also, if we are
- * doing a right or full join, we must use *all* the mergeclauses as join
- * clauses, else we will not have a valid plan. (Although these two
- * flags are currently inverses, keep them separate for clarity and
- * possible future changes.)
+ * doing a right or full join, we must use *all* the mergeclauses as
+ * join clauses, else we will not have a valid plan. (Although these
+ * two flags are currently inverses, keep them separate for clarity
+ * and possible future changes.)
*/
switch (jointype)
{
@@ -371,8 +371,8 @@ match_unsorted_outer(Query *root,
}
/*
- * If we need to unique-ify the inner path, we will consider only
- * the cheapest inner.
+ * If we need to unique-ify the inner path, we will consider only the
+ * cheapest inner.
*/
if (jointype == JOIN_UNIQUE_INNER)
{
@@ -384,9 +384,10 @@ match_unsorted_outer(Query *root,
else if (nestjoinOK)
{
/*
- * If the cheapest inner path is a join or seqscan, we should consider
- * materializing it. (This is a heuristic: we could consider it
- * always, but for inner indexscans it's probably a waste of time.)
+ * If the cheapest inner path is a join or seqscan, we should
+ * consider materializing it. (This is a heuristic: we could
+ * consider it always, but for inner indexscans it's probably a
+ * waste of time.)
*/
if (!(IsA(inner_cheapest_total, IndexPath) ||
IsA(inner_cheapest_total, TidPath)))
@@ -394,8 +395,8 @@ match_unsorted_outer(Query *root,
create_material_path(innerrel, inner_cheapest_total);
/*
- * Get the best innerjoin indexpath (if any) for this outer rel. It's
- * the same for all outer paths.
+ * Get the best innerjoin indexpath (if any) for this outer rel.
+ * It's the same for all outer paths.
*/
bestinnerjoin = best_inner_indexscan(root, innerrel,
outerrel->relids, jointype);
@@ -414,8 +415,8 @@ match_unsorted_outer(Query *root,
int sortkeycnt;
/*
- * If we need to unique-ify the outer path, it's pointless to consider
- * any but the cheapest outer.
+ * If we need to unique-ify the outer path, it's pointless to
+ * consider any but the cheapest outer.
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@@ -709,7 +710,7 @@ hash_inner_and_outer(Query *root,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
@@ -727,9 +728,9 @@ hash_inner_and_outer(Query *root,
* cheapest-startup-cost outer paths. There's no need to consider
* any but the cheapest-total-cost inner path, however.
*/
- Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
- Path *cheapest_total_outer = outerrel->cheapest_total_path;
- Path *cheapest_total_inner = innerrel->cheapest_total_path;
+ Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
+ Path *cheapest_total_outer = outerrel->cheapest_total_path;
+ Path *cheapest_total_inner = innerrel->cheapest_total_path;
/* Unique-ify if need be */
if (jointype == JOIN_UNIQUE_OUTER)
@@ -840,7 +841,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 023bc397840..81e5080e4b7 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.61 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.62 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,11 +19,11 @@
static List *make_rels_by_clause_joins(Query *root,
- RelOptInfo *old_rel,
- List *other_rels);
+ RelOptInfo *old_rel,
+ List *other_rels);
static List *make_rels_by_clauseless_joins(Query *root,
- RelOptInfo *old_rel,
- List *other_rels);
+ RelOptInfo *old_rel,
+ List *other_rels);
/*
@@ -417,8 +417,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* If we are implementing IN clauses as joins, there are some joins
- * that are illegal. Check to see if the proposed join is trouble.
- * We can skip the work if looking at an outer join, however, because
+ * that are illegal. Check to see if the proposed join is trouble. We
+ * can skip the work if looking at an outer join, however, because
* only top-level joins might be affected.
*/
if (jointype == JOIN_INNER)
@@ -430,8 +430,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
/*
- * Cannot join if proposed join contains part, but only
- * part, of the RHS, *and* it contains rels not in the RHS.
+ * Cannot join if proposed join contains part, but only part,
+ * of the RHS, *and* it contains rels not in the RHS.
*/
if (bms_overlap(ininfo->righthand, joinrelids) &&
!bms_is_subset(ininfo->righthand, joinrelids) &&
@@ -442,16 +442,17 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
/*
- * No issue unless we are looking at a join of the IN's RHS
- * to other stuff.
+ * No issue unless we are looking at a join of the IN's RHS to
+ * other stuff.
*/
- if (! (bms_is_subset(ininfo->righthand, joinrelids) &&
- !bms_equal(ininfo->righthand, joinrelids)))
+ if (!(bms_is_subset(ininfo->righthand, joinrelids) &&
+ !bms_equal(ininfo->righthand, joinrelids)))
continue;
+
/*
- * If we already joined IN's RHS to any part of its LHS in either
- * input path, then this join is not constrained (the necessary
- * work was done at a lower level).
+ * If we already joined IN's RHS to any part of its LHS in
+ * either input path, then this join is not constrained (the
+ * necessary work was done at a lower level).
*/
if (bms_overlap(ininfo->lefthand, rel1->relids) &&
bms_is_subset(ininfo->righthand, rel1->relids))
@@ -459,6 +460,7 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
if (bms_overlap(ininfo->lefthand, rel2->relids) &&
bms_is_subset(ininfo->righthand, rel2->relids))
continue;
+
/*
* JOIN_IN technique will work if outerrel includes LHS and
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
@@ -478,22 +480,14 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
if (bms_is_subset(ininfo->lefthand, rel1->relids) &&
bms_equal(ininfo->righthand, rel2->relids))
- {
jointype = JOIN_IN;
- }
else if (bms_is_subset(ininfo->lefthand, rel2->relids) &&
bms_equal(ininfo->righthand, rel1->relids))
- {
jointype = JOIN_REVERSE_IN;
- }
else if (bms_equal(ininfo->righthand, rel1->relids))
- {
jointype = JOIN_UNIQUE_OUTER;
- }
else if (bms_equal(ininfo->righthand, rel2->relids))
- {
jointype = JOIN_UNIQUE_INNER;
- }
else
{
/* invalid join path */
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index a078b3f5a93..40d2de41417 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.51 2003/06/15 22:51:45 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,7 +99,7 @@ create_or_index_paths(Query *root, RelOptInfo *rel)
best_or_subclause_indices(root,
rel,
- ((BoolExpr *) restrictinfo->clause)->args,
+ ((BoolExpr *) restrictinfo->clause)->args,
restrictinfo->subclauseindices,
pathnode);
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 9fec73e2603..beb51a69966 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.51 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -198,8 +198,8 @@ generate_implied_equalities(Query *root)
/*
* Collect info about relids mentioned in each item. For this
* routine we only really care whether there are any at all in
- * each item, but process_implied_equality() needs the exact
- * sets, so we may as well pull them here.
+ * each item, but process_implied_equality() needs the exact sets,
+ * so we may as well pull them here.
*/
relids = (Relids *) palloc(nitems * sizeof(Relids));
have_consts = false;
@@ -233,8 +233,8 @@ generate_implied_equalities(Query *root)
/*
* If it's "const = const" then just ignore it altogether.
- * There is no place in the restrictinfo structure to store
- * it. (If the two consts are in fact unequal, then
+ * There is no place in the restrictinfo structure to
+ * store it. (If the two consts are in fact unequal, then
* propagating the comparison to Vars will cause us to
* produce zero rows out, as expected.)
*/
@@ -242,12 +242,12 @@ generate_implied_equalities(Query *root)
{
/*
* Tell process_implied_equality to delete the clause,
- * not add it, if it's "var = var" and we have constants
- * present in the list.
+ * not add it, if it's "var = var" and we have
+ * constants present in the list.
*/
- bool delete_it = (have_consts &&
- i1_is_variable &&
- i2_is_variable);
+ bool delete_it = (have_consts &&
+ i1_is_variable &&
+ i2_is_variable);
process_implied_equality(root,
item1->key, item2->key,
@@ -751,20 +751,21 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
* element might match none, one, or more of the output columns
* that are visible to the outer query. This means we may have
* multiple possible representations of the sub_pathkey in the
- * context of the outer query. Ideally we would generate them all
- * and put them all into a pathkey list of the outer query, thereby
- * propagating equality knowledge up to the outer query. Right now
- * we cannot do so, because the outer query's canonical pathkey
- * sets are already frozen when this is called. Instead we prefer
- * the one that has the highest "score" (number of canonical pathkey
- * peers, plus one if it matches the outer query_pathkeys).
- * This is the most likely to be useful in the outer query.
+ * context of the outer query. Ideally we would generate them all
+ * and put them all into a pathkey list of the outer query,
+ * thereby propagating equality knowledge up to the outer query.
+ * Right now we cannot do so, because the outer query's canonical
+ * pathkey sets are already frozen when this is called. Instead
+ * we prefer the one that has the highest "score" (number of
+ * canonical pathkey peers, plus one if it matches the outer
+ * query_pathkeys). This is the most likely to be useful in the
+ * outer query.
*/
foreach(j, sub_pathkey)
{
PathKeyItem *sub_item = (PathKeyItem *) lfirst(j);
- Node *sub_key = sub_item->key;
- List *k;
+ Node *sub_key = sub_item->key;
+ List *k;
foreach(k, subquery->targetList)
{
@@ -774,9 +775,9 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
equal(tle->expr, sub_key))
{
/* Found a representation for this sub_key */
- Var *outer_var;
+ Var *outer_var;
PathKeyItem *outer_item;
- int score;
+ int score;
outer_var = makeVar(rel->relid,
tle->resdom->resno,
@@ -802,8 +803,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
}
/*
- * If we couldn't find a representation of this sub_pathkey,
- * we're done (we can't use the ones to its right, either).
+ * If we couldn't find a representation of this sub_pathkey, we're
+ * done (we can't use the ones to its right, either).
*/
if (!best_item)
break;
@@ -812,8 +813,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
cpathkey = make_canonical_pathkey(root, best_item);
/*
- * Eliminate redundant ordering info; could happen if outer
- * query equijoins subquery keys...
+ * Eliminate redundant ordering info; could happen if outer query
+ * equijoins subquery keys...
*/
if (!ptrMember(cpathkey, retval))
{
@@ -920,7 +921,7 @@ make_pathkeys_for_sortclauses(List *sortclauses,
* many times when dealing with a many-relation query.
*
* We have to be careful that the cached values are palloc'd in the same
- * context the RestrictInfo node itself is in. This is not currently a
+ * context the RestrictInfo node itself is in. This is not currently a
* problem for normal planning, but it is an issue for GEQO planning.
*/
void
@@ -1090,7 +1091,7 @@ make_pathkeys_for_mergeclauses(Query *root,
else
{
elog(ERROR, "could not identify which side of mergeclause to use");
- pathkey = NIL; /* keep compiler quiet */
+ pathkey = NIL; /* keep compiler quiet */
}
/*
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 761f03b967c..60093ec5e3d 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.14 2003/02/08 20:20:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.15 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,7 +27,7 @@
static List *TidqualFromRestrictinfo(Relids relids, List *restrictinfo);
static bool isEvaluable(int varno, Node *node);
-static Node *TidequalClause(int varno, OpExpr *node);
+static Node *TidequalClause(int varno, OpExpr * node);
static List *TidqualFromExpr(int varno, Expr *expr);
static bool
@@ -66,7 +66,7 @@ isEvaluable(int varno, Node *node)
* or the left node if the opclause is ....=CTID
*/
static Node *
-TidequalClause(int varno, OpExpr *node)
+TidequalClause(int varno, OpExpr * node)
{
Node *rnode = NULL,
*arg1,
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 9ac746d34a9..e4e7490d82a 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.149 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.150 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,9 +40,9 @@ static bool use_physical_tlist(RelOptInfo *rel);
static void disuse_physical_tlist(Plan *plan, Path *path);
static Join *create_join_plan(Query *root, JoinPath *best_path);
static Append *create_append_plan(Query *root, AppendPath *best_path);
-static Result *create_result_plan(Query *root, ResultPath *best_path);
-static Material *create_material_plan(Query *root, MaterialPath *best_path);
-static Plan *create_unique_plan(Query *root, UniquePath *best_path);
+static Result *create_result_plan(Query *root, ResultPath * best_path);
+static Material *create_material_plan(Query *root, MaterialPath * best_path);
+static Plan *create_unique_plan(Query *root, UniquePath * best_path);
static SeqScan *create_seqscan_plan(Path *best_path, List *tlist,
List *scan_clauses);
static IndexScan *create_indexscan_plan(Query *root, IndexPath *best_path,
@@ -63,9 +63,9 @@ static void fix_indxqual_references(List *indexquals, IndexPath *index_path,
List **fixed_indexquals,
List **recheck_indexquals);
static void fix_indxqual_sublist(List *indexqual,
- Relids baserelids, int baserelid,
- IndexOptInfo *index,
- List **fixed_quals, List **recheck_quals);
+ Relids baserelids, int baserelid,
+ IndexOptInfo *index,
+ List **fixed_quals, List **recheck_quals);
static Node *fix_indxqual_operand(Node *node, int baserelid,
IndexOptInfo *index,
Oid *opclass);
@@ -98,9 +98,9 @@ static MergeJoin *make_mergejoin(List *tlist,
Plan *lefttree, Plan *righttree,
JoinType jointype);
static Sort *make_sort(Query *root, List *tlist, Plan *lefttree, int numCols,
- AttrNumber *sortColIdx, Oid *sortOperators);
+ AttrNumber *sortColIdx, Oid *sortOperators);
static Sort *make_sort_from_pathkeys(Query *root, Plan *lefttree,
- Relids relids, List *pathkeys);
+ Relids relids, List *pathkeys);
/*
@@ -148,7 +148,7 @@ create_plan(Query *root, Path *best_path)
break;
case T_Material:
plan = (Plan *) create_material_plan(root,
- (MaterialPath *) best_path);
+ (MaterialPath *) best_path);
break;
case T_Unique:
plan = (Plan *) create_unique_plan(root,
@@ -192,12 +192,12 @@ create_scan_plan(Query *root, Path *best_path)
Scan *plan;
/*
- * For table scans, rather than using the relation targetlist (which is
- * only those Vars actually needed by the query), we prefer to generate a
- * tlist containing all Vars in order. This will allow the executor to
- * optimize away projection of the table tuples, if possible. (Note that
- * planner.c may replace the tlist we generate here, forcing projection to
- * occur.)
+ * For table scans, rather than using the relation targetlist (which
+ * is only those Vars actually needed by the query), we prefer to
+ * generate a tlist containing all Vars in order. This will allow the
+ * executor to optimize away projection of the table tuples, if
+ * possible. (Note that planner.c may replace the tlist we generate
+ * here, forcing projection to occur.)
*/
if (use_physical_tlist(rel))
{
@@ -274,8 +274,8 @@ build_relation_tlist(RelOptInfo *rel)
FastListInit(&tlist);
foreach(v, FastListValue(&rel->reltargetlist))
{
- /* Do we really need to copy here? Not sure */
- Var *var = (Var *) copyObject(lfirst(v));
+ /* Do we really need to copy here? Not sure */
+ Var *var = (Var *) copyObject(lfirst(v));
FastAppend(&tlist, create_tl_element(var, resdomno));
resdomno++;
@@ -294,22 +294,24 @@ use_physical_tlist(RelOptInfo *rel)
int i;
/*
- * Currently, can't do this for subquery or function scans. (This
- * is mainly because we don't have an equivalent of build_physical_tlist
+ * Currently, can't do this for subquery or function scans. (This is
+ * mainly because we don't have an equivalent of build_physical_tlist
* for them; worth adding?)
*/
if (rel->rtekind != RTE_RELATION)
return false;
+
/*
* Can't do it with inheritance cases either (mainly because Append
* doesn't project).
*/
if (rel->reloptkind != RELOPT_BASEREL)
return false;
+
/*
- * Can't do it if any system columns are requested, either. (This could
- * possibly be fixed but would take some fragile assumptions in setrefs.c,
- * I think.)
+ * Can't do it if any system columns are requested, either. (This
+ * could possibly be fixed but would take some fragile assumptions in
+ * setrefs.c, I think.)
*/
for (i = rel->min_attr; i <= 0; i++)
{
@@ -325,7 +327,7 @@ use_physical_tlist(RelOptInfo *rel)
*
* If the plan node immediately above a scan would prefer to get only
* needed Vars and not a physical tlist, it must call this routine to
- * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
+ * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
* and Material nodes want this, so they don't have to store useless columns.
*/
static void
@@ -441,7 +443,7 @@ create_append_plan(Query *root, AppendPath *best_path)
* Returns a Plan node.
*/
static Result *
-create_result_plan(Query *root, ResultPath *best_path)
+create_result_plan(Query *root, ResultPath * best_path)
{
Result *plan;
List *tlist;
@@ -473,7 +475,7 @@ create_result_plan(Query *root, ResultPath *best_path)
* Returns a Plan node.
*/
static Material *
-create_material_plan(Query *root, MaterialPath *best_path)
+create_material_plan(Query *root, MaterialPath * best_path)
{
Material *plan;
Plan *subplan;
@@ -498,7 +500,7 @@ create_material_plan(Query *root, MaterialPath *best_path)
* Returns a Plan node.
*/
static Plan *
-create_unique_plan(Query *root, UniquePath *best_path)
+create_unique_plan(Query *root, UniquePath * best_path)
{
Plan *plan;
Plan *subplan;
@@ -509,9 +511,9 @@ create_unique_plan(Query *root, UniquePath *best_path)
subplan = create_plan(root, best_path->subpath);
/*
- * If the subplan came from an IN subselect (currently always the case),
- * we need to instantiate the correct output targetlist for the subselect,
- * rather than using the flattened tlist.
+ * If the subplan came from an IN subselect (currently always the
+ * case), we need to instantiate the correct output targetlist for the
+ * subselect, rather than using the flattened tlist.
*/
sub_targetlist = NIL;
foreach(l, root->in_info_list)
@@ -530,8 +532,8 @@ create_unique_plan(Query *root, UniquePath *best_path)
/*
* Transform list of plain Vars into targetlist
*/
- List *newtlist = NIL;
- int resno = 1;
+ List *newtlist = NIL;
+ int resno = 1;
foreach(l, sub_targetlist)
{
@@ -547,12 +549,13 @@ create_unique_plan(Query *root, UniquePath *best_path)
newtlist = lappend(newtlist, tle);
resno++;
}
+
/*
* If the top plan node can't do projections, we need to add a
* Result node to help it along.
*
- * Currently, the only non-projection-capable plan type
- * we can see here is Append.
+ * Currently, the only non-projection-capable plan type we can see
+ * here is Append.
*/
if (IsA(subplan, Append))
subplan = (Plan *) make_result(newtlist, NULL, subplan);
@@ -564,16 +567,16 @@ create_unique_plan(Query *root, UniquePath *best_path)
if (best_path->use_hash)
{
- int numGroupCols = length(my_tlist);
- long numGroups;
+ int numGroupCols = length(my_tlist);
+ long numGroups;
AttrNumber *groupColIdx;
- int i;
+ int i;
numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
for (i = 0; i < numGroupCols; i++)
- groupColIdx[i] = i+1;
+ groupColIdx[i] = i + 1;
plan = (Plan *) make_agg(root,
my_tlist,
@@ -700,9 +703,7 @@ create_indexscan_plan(Query *root,
FastListInit(&orclauses);
foreach(orclause, indxqual)
- {
FastAppend(&orclauses, make_ands_explicit(lfirst(orclause)));
- }
indxqual_or_expr = make_orclause(FastListValue(&orclauses));
qpqual = set_difference(scan_clauses, makeList1(indxqual_or_expr));
@@ -861,9 +862,9 @@ create_nestloop_plan(Query *root,
/*
* An index is being used to reduce the number of tuples scanned
* in the inner relation. If there are join clauses being used
- * with the index, we may remove those join clauses from the list of
- * clauses that have to be checked as qpquals at the join node ---
- * but only if there's just one indexscan in the inner path
+ * with the index, we may remove those join clauses from the list
+ * of clauses that have to be checked as qpquals at the join node
+ * --- but only if there's just one indexscan in the inner path
* (otherwise, several different sets of clauses are being ORed
* together).
*
@@ -873,13 +874,14 @@ create_nestloop_plan(Query *root,
* been put in the same joininfo list.
*
* This would be a waste of time if the indexpath was an ordinary
- * indexpath and not a special innerjoin path. We will skip it in
- * that case since indexjoinclauses is NIL in an ordinary indexpath.
+ * indexpath and not a special innerjoin path. We will skip it in
+ * that case since indexjoinclauses is NIL in an ordinary
+ * indexpath.
*/
IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
List *indexjoinclauses = innerpath->indexjoinclauses;
- if (length(indexjoinclauses) == 1) /* single indexscan? */
+ if (length(indexjoinclauses) == 1) /* single indexscan? */
{
joinrestrictclauses =
select_nonredundant_join_clauses(root,
@@ -947,11 +949,11 @@ create_mergejoin_plan(Query *root,
joinclauses = set_difference(joinclauses, mergeclauses);
/*
- * Rearrange mergeclauses, if needed, so that the outer variable
- * is always on the left.
+ * Rearrange mergeclauses, if needed, so that the outer variable is
+ * always on the left.
*/
mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/*
* Create explicit sort nodes for the outer and inner join paths if
@@ -964,7 +966,7 @@ create_mergejoin_plan(Query *root,
outer_plan = (Plan *)
make_sort_from_pathkeys(root,
outer_plan,
- best_path->jpath.outerjoinpath->parent->relids,
+ best_path->jpath.outerjoinpath->parent->relids,
best_path->outersortkeys);
}
@@ -974,7 +976,7 @@ create_mergejoin_plan(Query *root,
inner_plan = (Plan *)
make_sort_from_pathkeys(root,
inner_plan,
- best_path->jpath.innerjoinpath->parent->relids,
+ best_path->jpath.innerjoinpath->parent->relids,
best_path->innersortkeys);
}
@@ -1030,21 +1032,19 @@ create_hashjoin_plan(Query *root,
joinclauses = set_difference(joinclauses, hashclauses);
/*
- * Rearrange hashclauses, if needed, so that the outer variable
- * is always on the left.
+ * Rearrange hashclauses, if needed, so that the outer variable is
+ * always on the left.
*/
hashclauses = get_switched_clauses(best_path->path_hashclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/*
- * Extract the inner hash keys (right-hand operands of the hashclauses)
- * to put in the Hash node.
+ * Extract the inner hash keys (right-hand operands of the
+ * hashclauses) to put in the Hash node.
*/
innerhashkeys = NIL;
foreach(hcl, hashclauses)
- {
innerhashkeys = lappend(innerhashkeys, get_rightop(lfirst(hcl)));
- }
/* We don't want any excess columns in the hashed tuples */
disuse_physical_tlist(inner_plan, best_path->jpath.innerjoinpath);
@@ -1362,7 +1362,7 @@ order_qual_clauses(Query *root, List *clauses)
FastListInit(&withsubplans);
foreach(l, clauses)
{
- Node *clause = lfirst(l);
+ Node *clause = lfirst(l);
if (contain_subplans(clause))
FastAppend(&withsubplans, clause);
@@ -1507,8 +1507,8 @@ make_subqueryscan(List *qptlist,
/*
* Cost is figured here for the convenience of prepunion.c. Note this
- * is only correct for the case where qpqual is empty; otherwise caller
- * should overwrite cost with a better estimate.
+ * is only correct for the case where qpqual is empty; otherwise
+ * caller should overwrite cost with a better estimate.
*/
copy_plan_costsize(plan, subplan);
plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
@@ -1709,7 +1709,7 @@ make_sort(Query *root, List *tlist, Plan *lefttree, int numCols,
* once as a sort key column; if so, the extra mentions are redundant.
*
* Caller is assumed to have allocated the arrays large enough for the
- * max possible number of columns. Return value is the new column count.
+ * max possible number of columns. Return value is the new column count.
*/
static int
add_sort_column(AttrNumber colIdx, Oid sortOp,
@@ -1777,8 +1777,8 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
/*
* We can sort by any one of the sort key items listed in this
* sublist. For now, we take the first one that corresponds to an
- * available Var in the tlist. If there isn't any, use the
- * first one that is an expression in the input's vars.
+ * available Var in the tlist. If there isn't any, use the first
+ * one that is an expression in the input's vars.
*
* XXX if we have a choice, is there any way of figuring out which
* might be cheapest to execute? (For example, int4lt is likely
@@ -1805,17 +1805,19 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
}
if (!j)
elog(ERROR, "could not find pathkey item to sort");
+
/*
* Do we need to insert a Result node?
*
- * Currently, the only non-projection-capable plan type
- * we can see here is Append.
+ * Currently, the only non-projection-capable plan type we can
+ * see here is Append.
*/
if (IsA(lefttree, Append))
{
tlist = copyObject(tlist);
lefttree = (Plan *) make_result(tlist, NULL, lefttree);
}
+
/*
* Add resjunk entry to input's tlist
*/
@@ -1827,8 +1829,9 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
tlist = lappend(tlist,
makeTargetEntry(resdom,
(Expr *) pathkey->key));
- lefttree->targetlist = tlist; /* just in case NIL before */
+ lefttree->targetlist = tlist; /* just in case NIL before */
}
+
/*
* The column might already be selected as a sort key, if the
* pathkeys contain duplicate entries. (This can happen in
@@ -1836,7 +1839,7 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
* var, for example.) So enter it only once in the sort arrays.
*/
numsortkeys = add_sort_column(resdom->resno, pathkey->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
@@ -1881,10 +1884,11 @@ make_sort_from_sortclauses(Query *root, List *tlist,
/*
* Check for the possibility of duplicate order-by clauses --- the
- * parser should have removed 'em, but no point in sorting redundantly.
+ * parser should have removed 'em, but no point in sorting
+ * redundantly.
*/
numsortkeys = add_sort_column(resdom->resno, sortcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
@@ -1938,10 +1942,11 @@ make_sort_from_groupcols(Query *root,
/*
* Check for the possibility of duplicate group-by clauses --- the
- * parser should have removed 'em, but no point in sorting redundantly.
+ * parser should have removed 'em, but no point in sorting
+ * redundantly.
*/
numsortkeys = add_sort_column(resdom->resno, grpcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
grpno++;
}
@@ -1973,7 +1978,7 @@ make_material(List *tlist, Plan *lefttree)
* materialize_finished_plan: stick a Material node atop a completed plan
*
* There are a couple of places where we want to attach a Material node
- * after completion of subquery_planner(). This currently requires hackery.
+ * after completion of subquery_planner(). This currently requires hackery.
* Since subquery_planner has already run SS_finalize_plan on the subplan
* tree, we have to kluge up parameter lists for the Material node.
* Possibly this could be fixed by postponing SS_finalize_plan processing
@@ -2032,8 +2037,8 @@ make_agg(Query *root, List *tlist, List *qual,
plan->total_cost = agg_path.total_cost;
/*
- * We will produce a single output tuple if not grouping,
- * and a tuple per group otherwise.
+ * We will produce a single output tuple if not grouping, and a tuple
+ * per group otherwise.
*/
if (aggstrategy == AGG_PLAIN)
plan->plan_rows = 1;
@@ -2041,10 +2046,10 @@ make_agg(Query *root, List *tlist, List *qual,
plan->plan_rows = numGroups;
/*
- * We also need to account for the cost of evaluation of the qual
- * (ie, the HAVING clause) and the tlist. Note that cost_qual_eval
- * doesn't charge anything for Aggref nodes; this is okay since
- * they are really comparable to Vars.
+ * We also need to account for the cost of evaluation of the qual (ie,
+ * the HAVING clause) and the tlist. Note that cost_qual_eval doesn't
+ * charge anything for Aggref nodes; this is okay since they are
+ * really comparable to Vars.
*
* See notes in grouping_planner about why this routine and make_group
* are the only ones in this file that worry about tlist eval cost.
@@ -2100,13 +2105,13 @@ make_group(Query *root,
/*
* We also need to account for the cost of evaluation of the tlist.
*
- * XXX this double-counts the cost of evaluation of any expressions
- * used for grouping, since in reality those will have been evaluated
- * at a lower plan level and will only be copied by the Group node.
- * Worth fixing?
+ * XXX this double-counts the cost of evaluation of any expressions used
+ * for grouping, since in reality those will have been evaluated at a
+ * lower plan level and will only be copied by the Group node. Worth
+ * fixing?
*
- * See notes in grouping_planner about why this routine and make_agg
- * are the only ones in this file that worry about tlist eval cost.
+ * See notes in grouping_planner about why this routine and make_agg are
+ * the only ones in this file that worry about tlist eval cost.
*/
cost_qual_eval(&qual_cost, tlist);
plan->startup_cost += qual_cost.startup;
@@ -2139,15 +2144,15 @@ make_unique(List *tlist, Plan *lefttree, List *distinctList)
/*
* Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples. (XXX probably
- * this is an overestimate.)
+ * assume all columns get compared at most of the tuples. (XXX
+ * probably this is an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
/*
* plan->plan_rows is left as a copy of the input subplan's plan_rows;
- * ie, we assume the filter removes nothing. The caller must alter this
- * if he has a better idea.
+ * ie, we assume the filter removes nothing. The caller must alter
+ * this if he has a better idea.
*/
plan->targetlist = tlist;
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index afcb4528326..61edf1c22d4 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.88 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.89 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,12 +36,12 @@
static void mark_baserels_for_outer_join(Query *root, Relids rels,
Relids outerrels);
static void distribute_qual_to_rels(Query *root, Node *clause,
- bool ispusheddown,
- bool isdeduced,
- Relids outerjoin_nonnullable,
- Relids qualscope);
+ bool ispusheddown,
+ bool isdeduced,
+ Relids outerjoin_nonnullable,
+ Relids qualscope);
static void add_vars_to_targetlist(Query *root, List *vars,
- Relids where_needed);
+ Relids where_needed);
static bool qual_is_redundant(Query *root, RestrictInfo *restrictinfo,
List *restrictlist);
static void check_mergejoinable(RestrictInfo *restrictinfo);
@@ -83,9 +83,7 @@ add_base_rels_to_query(Query *root, Node *jtnode)
List *l;
foreach(l, f->fromlist)
- {
add_base_rels_to_query(root, lfirst(l));
- }
}
else if (IsA(jtnode, JoinExpr))
{
@@ -93,13 +91,14 @@ add_base_rels_to_query(Query *root, Node *jtnode)
add_base_rels_to_query(root, j->larg);
add_base_rels_to_query(root, j->rarg);
+
/*
* Safety check: join RTEs should not be SELECT FOR UPDATE targets
*/
if (intMember(j->rtindex, root->rowMarks))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE cannot be applied to a join")));
+ errmsg("SELECT FOR UPDATE cannot be applied to a join")));
}
else
elog(ERROR, "unrecognized node type: %d",
@@ -247,14 +246,14 @@ distribute_quals_to_rels(Query *root, Node *jtnode)
* Order of operations here is subtle and critical. First we
* recurse to handle sub-JOINs. Their join quals will be placed
* without regard for whether this level is an outer join, which
- * is correct. Then we place our own join quals, which are restricted
- * by lower outer joins in any case, and are forced to this level if
- * this is an outer join and they mention the outer side. Finally, if
- * this is an outer join, we mark baserels contained within the inner
- * side(s) with our own rel set; this will prevent quals above us in
- * the join tree that use those rels from being pushed down below this
- * level. (It's okay for upper quals to be pushed down to the outer
- * side, however.)
+ * is correct. Then we place our own join quals, which are
+ * restricted by lower outer joins in any case, and are forced to
+ * this level if this is an outer join and they mention the outer
+ * side. Finally, if this is an outer join, we mark baserels
+ * contained within the inner side(s) with our own rel set; this
+ * will prevent quals above us in the join tree that use those
+ * rels from being pushed down below this level. (It's okay for
+ * upper quals to be pushed down to the outer side, however.)
*/
leftids = distribute_quals_to_rels(root, j->larg);
rightids = distribute_quals_to_rels(root, j->rarg);
@@ -390,9 +389,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
restrictinfo->clause = (Expr *) clause;
restrictinfo->subclauseindices = NIL;
- restrictinfo->eval_cost.startup = -1; /* not computed until needed */
+ restrictinfo->eval_cost.startup = -1; /* not computed until
+ * needed */
restrictinfo->this_selec = -1; /* not computed until needed */
- restrictinfo->left_relids = NULL; /* set below, if join clause */
+ restrictinfo->left_relids = NULL; /* set below, if join clause */
restrictinfo->right_relids = NULL;
restrictinfo->mergejoinoperator = InvalidOid;
restrictinfo->left_sortop = InvalidOid;
@@ -435,10 +435,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
if (isdeduced)
{
/*
- * If the qual came from implied-equality deduction, we can evaluate
- * the qual at its natural semantic level. It is not affected by
- * any outer-join rules (else we'd not have decided the vars were
- * equal).
+ * If the qual came from implied-equality deduction, we can
+ * evaluate the qual at its natural semantic level. It is not
+ * affected by any outer-join rules (else we'd not have decided
+ * the vars were equal).
*/
Assert(bms_equal(relids, qualscope));
can_be_equijoin = true;
@@ -446,12 +446,13 @@ distribute_qual_to_rels(Query *root, Node *clause,
else if (bms_overlap(relids, outerjoin_nonnullable))
{
/*
- * The qual is attached to an outer join and mentions (some of the)
- * rels on the nonnullable side. Force the qual to be evaluated
- * exactly at the level of joining corresponding to the outer join.
- * We cannot let it get pushed down into the nonnullable side, since
- * then we'd produce no output rows, rather than the intended single
- * null-extended row, for any nonnullable-side rows failing the qual.
+ * The qual is attached to an outer join and mentions (some of
+ * the) rels on the nonnullable side. Force the qual to be
+ * evaluated exactly at the level of joining corresponding to the
+ * outer join. We cannot let it get pushed down into the
+ * nonnullable side, since then we'd produce no output rows,
+ * rather than the intended single null-extended row, for any
+ * nonnullable-side rows failing the qual.
*
* Note: an outer-join qual that mentions only nullable-side rels can
* be pushed down into the nullable side without changing the join
@@ -464,13 +465,14 @@ distribute_qual_to_rels(Query *root, Node *clause,
{
/*
* For a non-outer-join qual, we can evaluate the qual as soon as
- * (1) we have all the rels it mentions, and (2) we are at or above
- * any outer joins that can null any of these rels and are below the
- * syntactic location of the given qual. To enforce the latter, scan
- * the base rels listed in relids, and merge their outer-join sets
- * into the clause's own reference list. At the time we are called,
- * the outerjoinset of each baserel will show exactly those outer
- * joins that are below the qual in the join tree.
+ * (1) we have all the rels it mentions, and (2) we are at or
+ * above any outer joins that can null any of these rels and are
+ * below the syntactic location of the given qual. To enforce the
+ * latter, scan the base rels listed in relids, and merge their
+ * outer-join sets into the clause's own reference list. At the
+ * time we are called, the outerjoinset of each baserel will show
+ * exactly those outer joins that are below the qual in the join
+ * tree.
*/
Relids addrelids = NULL;
Relids tmprelids;
@@ -496,9 +498,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
relids = bms_union(relids, addrelids);
/* Should still be a subset of current scope ... */
Assert(bms_is_subset(relids, qualscope));
+
/*
- * Because application of the qual will be delayed by outer join,
- * we mustn't assume its vars are equal everywhere.
+ * Because application of the qual will be delayed by outer
+ * join, we mustn't assume its vars are equal everywhere.
*/
can_be_equijoin = false;
}
@@ -518,6 +521,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
switch (bms_membership(relids))
{
case BMS_SINGLETON:
+
/*
* There is only one relation participating in 'clause', so
* 'clause' is a restriction clause for that relation.
@@ -525,28 +529,29 @@ distribute_qual_to_rels(Query *root, Node *clause,
rel = find_base_rel(root, bms_singleton_member(relids));
/*
- * Check for a "mergejoinable" clause even though it's not a join
- * clause. This is so that we can recognize that "a.x = a.y"
- * makes x and y eligible to be considered equal, even when they
- * belong to the same rel. Without this, we would not recognize
- * that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to
- * consider z and q equal after their rels are joined.
+ * Check for a "mergejoinable" clause even though it's not a
+ * join clause. This is so that we can recognize that "a.x =
+ * a.y" makes x and y eligible to be considered equal, even
+ * when they belong to the same rel. Without this, we would
+ * not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q"
+ * allows us to consider z and q equal after their rels are
+ * joined.
*/
if (can_be_equijoin)
check_mergejoinable(restrictinfo);
/*
- * If the clause was deduced from implied equality, check to see
- * whether it is redundant with restriction clauses we already
- * have for this rel. Note we cannot apply this check to
- * user-written clauses, since we haven't found the canonical
- * pathkey sets yet while processing user clauses. (NB: no
- * comparable check is done in the join-clause case; redundancy
- * will be detected when the join clause is moved into a join
- * rel's restriction list.)
+ * If the clause was deduced from implied equality, check to
+ * see whether it is redundant with restriction clauses we
+ * already have for this rel. Note we cannot apply this check
+ * to user-written clauses, since we haven't found the
+ * canonical pathkey sets yet while processing user clauses.
+ * (NB: no comparable check is done in the join-clause case;
+ * redundancy will be detected when the join clause is moved
+ * into a join rel's restriction list.)
*/
if (!isdeduced ||
- !qual_is_redundant(root, restrictinfo, rel->baserestrictinfo))
+ !qual_is_redundant(root, restrictinfo, rel->baserestrictinfo))
{
/* Add clause to rel's restriction list */
rel->baserestrictinfo = lappend(rel->baserestrictinfo,
@@ -554,13 +559,14 @@ distribute_qual_to_rels(Query *root, Node *clause,
}
break;
case BMS_MULTIPLE:
+
/*
- * 'clause' is a join clause, since there is more than one rel in
- * the relid set. Set additional RestrictInfo fields for
- * joining. First, does it look like a normal join clause, i.e.,
- * a binary operator relating expressions that come from distinct
- * relations? If so we might be able to use it in a join
- * algorithm.
+ * 'clause' is a join clause, since there is more than one rel
+ * in the relid set. Set additional RestrictInfo fields for
+ * joining. First, does it look like a normal join clause,
+ * i.e., a binary operator relating expressions that come from
+ * distinct relations? If so we might be able to use it in a
+ * join algorithm.
*/
if (is_opclause(clause) && length(((OpExpr *) clause)->args) == 2)
{
@@ -582,9 +588,9 @@ distribute_qual_to_rels(Query *root, Node *clause,
* Now check for hash or mergejoinable operators.
*
* We don't bother setting the hashjoin info if we're not going
- * to need it. We do want to know about mergejoinable ops in all
- * cases, however, because we use mergejoinable ops for other
- * purposes such as detecting redundant clauses.
+ * to need it. We do want to know about mergejoinable ops in
+ * all cases, however, because we use mergejoinable ops for
+ * other purposes such as detecting redundant clauses.
*/
check_mergejoinable(restrictinfo);
if (enable_hashjoin)
@@ -597,16 +603,18 @@ distribute_qual_to_rels(Query *root, Node *clause,
/*
* Add vars used in the join clause to targetlists of their
- * relations, so that they will be emitted by the plan nodes that
- * scan those relations (else they won't be available at the join
- * node!).
+ * relations, so that they will be emitted by the plan nodes
+ * that scan those relations (else they won't be available at
+ * the join node!).
*/
add_vars_to_targetlist(root, vars, relids);
break;
default:
+
/*
- * 'clause' references no rels, and therefore we have no place to
- * attach it. Shouldn't get here if callers are working properly.
+ * 'clause' references no rels, and therefore we have no place
+ * to attach it. Shouldn't get here if callers are working
+ * properly.
*/
elog(ERROR, "cannot cope with variable-free clause");
break;
@@ -634,7 +642,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
*
* This processing is a consequence of transitivity of mergejoin equality:
* if we have mergejoinable clauses A = B and B = C, we can deduce A = C
- * (where = is an appropriate mergejoinable operator). See path/pathkeys.c
+ * (where = is an appropriate mergejoinable operator). See path/pathkeys.c
* for more details.
*/
void
@@ -695,8 +703,8 @@ process_implied_equality(Query *root,
}
/*
- * Scan to see if equality is already known. If so, we're done in
- * the add case, and done after removing it in the delete case.
+ * Scan to see if equality is already known. If so, we're done in the
+ * add case, and done after removing it in the delete case.
*/
foreach(itm, restrictlist)
{
@@ -719,7 +727,7 @@ process_implied_equality(Query *root,
{
/* delete it from local restrictinfo list */
rel1->baserestrictinfo = lremove(restrictinfo,
- rel1->baserestrictinfo);
+ rel1->baserestrictinfo);
}
else
{
@@ -768,9 +776,9 @@ process_implied_equality(Query *root,
errmsg("equality operator for types %s and %s should be mergejoinable, but isn't",
format_type_be(ltype), format_type_be(rtype))));
- clause = make_opclause(oprid(eq_operator), /* opno */
- BOOLOID, /* opresulttype */
- false, /* opretset */
+ clause = make_opclause(oprid(eq_operator), /* opno */
+ BOOLOID, /* opresulttype */
+ false, /* opretset */
(Expr *) item1,
(Expr *) item2);
@@ -797,9 +805,9 @@ process_implied_equality(Query *root,
* too-small selectivity, not to mention wasting time at execution.
*
* Note: quals of the form "var = const" are never considered redundant,
- * only those of the form "var = var". This is needed because when we
+ * only those of the form "var = var". This is needed because when we
* have constants in an implied-equality set, we use a different strategy
- * that suppresses all "var = var" deductions. We must therefore keep
+ * that suppresses all "var = var" deductions. We must therefore keep
* all the "var = const" quals.
*/
static bool
@@ -858,7 +866,8 @@ qual_is_redundant(Query *root,
* left side of the new qual. We traverse the old-quals list
* repeatedly to transitively expand the exprs list. If at any point
* we find we can reach the right-side expr of the new qual, we are
- * done. We give up when we can't expand the equalexprs list any more.
+ * done. We give up when we can't expand the equalexprs list any
+ * more.
*/
equalexprs = makeList1(newleft);
do
@@ -945,7 +954,7 @@ check_mergejoinable(RestrictInfo *restrictinfo)
* info fields in the restrictinfo.
*
* Currently, we support hashjoin for binary opclauses where
- * the operator is a hashjoinable operator. The arguments can be
+ * the operator is a hashjoinable operator. The arguments can be
* anything --- as long as there are no volatile functions in them.
*/
static void
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 8883310f66d..4f0ede34520 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.76 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.77 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
* which may involve joins but not any fancier features.
*
* Since query_planner does not handle the toplevel processing (grouping,
- * sorting, etc) it cannot select the best path by itself. It selects
+ * sorting, etc) it cannot select the best path by itself. It selects
* two paths: the cheapest path that produces all the required tuples,
* independent of any ordering considerations, and the cheapest path that
* produces the expected fraction of the required tuples in the required
@@ -84,7 +84,7 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
if (root->jointree->fromlist == NIL)
{
*cheapest_path = (Path *) create_result_path(NULL, NULL,
- (List *) root->jointree->quals);
+ (List *) root->jointree->quals);
*sorted_path = NULL;
return;
}
@@ -125,9 +125,9 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
* relations. We also build lists of equijoined keys for pathkey
* construction.
*
- * Note: all subplan nodes will have "flat" (var-only) tlists.
- * This implies that all expression evaluations are done at the root of
- * the plan tree. Once upon a time there was code to try to push
+ * Note: all subplan nodes will have "flat" (var-only) tlists. This
+ * implies that all expression evaluations are done at the root of the
+ * plan tree. Once upon a time there was code to try to push
* expensive function calls down to lower plan nodes, but that's dead
* code and has been for a long time...
*/
@@ -223,7 +223,8 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
}
/*
- * If we have constant quals, add a toplevel Result step to process them.
+ * If we have constant quals, add a toplevel Result step to process
+ * them.
*/
if (constant_quals)
{
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 1896982f02e..c2aec37470a 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.157 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.158 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,10 +45,10 @@
/* Expression kind codes for preprocess_expression */
#define EXPRKIND_QUAL 0
-#define EXPRKIND_TARGET 1
-#define EXPRKIND_RTFUNC 2
+#define EXPRKIND_TARGET 1
+#define EXPRKIND_RTFUNC 2
#define EXPRKIND_LIMIT 3
-#define EXPRKIND_ININFO 4
+#define EXPRKIND_ININFO 4
static Node *preprocess_expression(Query *parse, Node *expr, int kind);
@@ -59,9 +59,9 @@ static bool hash_safe_grouping(Query *parse);
static List *make_subplanTargetList(Query *parse, List *tlist,
AttrNumber **groupColIdx, bool *need_tlist_eval);
static void locate_grouping_columns(Query *parse,
- List *tlist,
- List *sub_tlist,
- AttrNumber *groupColIdx);
+ List *tlist,
+ List *sub_tlist,
+ AttrNumber *groupColIdx);
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
@@ -103,9 +103,9 @@ planner(Query *parse, bool isCursor, int cursorOptions)
{
/*
* We have no real idea how many tuples the user will ultimately
- * FETCH from a cursor, but it seems a good bet that he
- * doesn't want 'em all. Optimize for 10% retrieval (you
- * gotta better number? Should this be a SETtable parameter?)
+ * FETCH from a cursor, but it seems a good bet that he doesn't
+ * want 'em all. Optimize for 10% retrieval (you gotta better
+ * number? Should this be a SETtable parameter?)
*/
tuple_fraction = 0.10;
}
@@ -121,8 +121,8 @@ planner(Query *parse, bool isCursor, int cursorOptions)
Assert(PlannerQueryLevel == 0);
/*
- * If creating a plan for a scrollable cursor, make sure it can
- * run backwards on demand. Add a Material node at the top at need.
+ * If creating a plan for a scrollable cursor, make sure it can run
+ * backwards on demand. Add a Material node at the top at need.
*/
if (isCursor && (cursorOptions & CURSOR_OPT_SCROLL))
{
@@ -181,14 +181,14 @@ subquery_planner(Query *parse, double tuple_fraction)
/*
* Look for IN clauses at the top level of WHERE, and transform them
- * into joins. Note that this step only handles IN clauses originally
- * at top level of WHERE; if we pull up any subqueries in the next step,
- * their INs are processed just before pulling them up.
+ * into joins. Note that this step only handles IN clauses originally
+ * at top level of WHERE; if we pull up any subqueries in the next
+ * step, their INs are processed just before pulling them up.
*/
parse->in_info_list = NIL;
if (parse->hasSubLinks)
parse->jointree->quals = pull_up_IN_clauses(parse,
- parse->jointree->quals);
+ parse->jointree->quals);
/*
* Check to see if any subqueries in the rangetable can be merged into
@@ -198,10 +198,11 @@ subquery_planner(Query *parse, double tuple_fraction)
pull_up_subqueries(parse, (Node *) parse->jointree, false);
/*
- * Detect whether any rangetable entries are RTE_JOIN kind; if not,
- * we can avoid the expense of doing flatten_join_alias_vars(). Also
- * check for outer joins --- if none, we can skip reduce_outer_joins().
- * This must be done after we have done pull_up_subqueries, of course.
+ * Detect whether any rangetable entries are RTE_JOIN kind; if not, we
+ * can avoid the expense of doing flatten_join_alias_vars(). Also
+ * check for outer joins --- if none, we can skip
+ * reduce_outer_joins(). This must be done after we have done
+ * pull_up_subqueries, of course.
*/
parse->hasJoinRTEs = false;
hasOuterJoins = false;
@@ -283,19 +284,20 @@ subquery_planner(Query *parse, double tuple_fraction)
parse->havingQual = (Node *) newHaving;
/*
- * If we have any outer joins, try to reduce them to plain inner joins.
- * This step is most easily done after we've done expression preprocessing.
+ * If we have any outer joins, try to reduce them to plain inner
+ * joins. This step is most easily done after we've done expression
+ * preprocessing.
*/
if (hasOuterJoins)
reduce_outer_joins(parse);
/*
- * See if we can simplify the jointree; opportunities for this may come
- * from having pulled up subqueries, or from flattening explicit JOIN
- * syntax. We must do this after flattening JOIN alias variables, since
- * eliminating explicit JOIN nodes from the jointree will cause
- * get_relids_for_join() to fail. But it should happen after
- * reduce_outer_joins, anyway.
+ * See if we can simplify the jointree; opportunities for this may
+ * come from having pulled up subqueries, or from flattening explicit
+ * JOIN syntax. We must do this after flattening JOIN alias
+ * variables, since eliminating explicit JOIN nodes from the jointree
+ * will cause get_relids_for_join() to fail. But it should happen
+ * after reduce_outer_joins, anyway.
*/
parse->jointree = (FromExpr *)
simplify_jointree(parse, (Node *) parse->jointree);
@@ -318,26 +320,26 @@ subquery_planner(Query *parse, double tuple_fraction)
*/
if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1)
{
- Cost initplan_cost = 0;
+ Cost initplan_cost = 0;
/* Prepare extParam/allParam sets for all nodes in tree */
SS_finalize_plan(plan, parse->rtable);
/*
- * SS_finalize_plan doesn't handle initPlans, so we have to manually
- * attach them to the topmost plan node, and add their extParams to
- * the topmost node's, too.
+ * SS_finalize_plan doesn't handle initPlans, so we have to
+ * manually attach them to the topmost plan node, and add their
+ * extParams to the topmost node's, too.
*
- * We also add the total_cost of each initPlan to the startup cost
- * of the top node. This is a conservative overestimate, since in
- * fact each initPlan might be executed later than plan startup, or
- * even not at all.
+ * We also add the total_cost of each initPlan to the startup cost of
+ * the top node. This is a conservative overestimate, since in
+ * fact each initPlan might be executed later than plan startup,
+ * or even not at all.
*/
plan->initPlan = PlannerInitPlan;
foreach(lst, plan->initPlan)
{
- SubPlan *initplan = (SubPlan *) lfirst(lst);
+ SubPlan *initplan = (SubPlan *) lfirst(lst);
plan->extParam = bms_add_members(plan->extParam,
initplan->plan->extParam);
@@ -368,7 +370,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
/*
* If the query has any join RTEs, replace join alias variables with
* base-relation variables. We must do this before sublink processing,
- * else sublinks expanded out from join aliases wouldn't get processed.
+ * else sublinks expanded out from join aliases wouldn't get
+ * processed.
*/
if (parse->hasJoinRTEs)
expr = flatten_join_alias_vars(parse, expr);
@@ -403,8 +406,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL));
/*
- * XXX do not insert anything here unless you have grokked the comments
- * in SS_replace_correlation_vars ...
+ * XXX do not insert anything here unless you have grokked the
+ * comments in SS_replace_correlation_vars ...
*/
/* Replace uplevel vars with Param nodes */
@@ -498,20 +501,21 @@ inheritance_planner(Query *parse, List *inheritlist)
/* Generate plan */
subplan = grouping_planner(subquery, 0.0 /* retrieve all tuples */ );
subplans = lappend(subplans, subplan);
+
/*
* It's possible that additional RTEs got added to the rangetable
* due to expansion of inherited source tables (see allpaths.c).
* If so, we must copy 'em back to the main parse tree's rtable.
*
- * XXX my goodness this is ugly. Really need to think about ways
- * to rein in planner's habit of scribbling on its input.
+ * XXX my goodness this is ugly. Really need to think about ways to
+ * rein in planner's habit of scribbling on its input.
*/
subrtlength = length(subquery->rtable);
if (subrtlength > mainrtlength)
{
- List *subrt = subquery->rtable;
+ List *subrt = subquery->rtable;
- while (mainrtlength-- > 0) /* wish we had nthcdr() */
+ while (mainrtlength-- > 0) /* wish we had nthcdr() */
subrt = lnext(subrt);
parse->rtable = nconc(parse->rtable, subrt);
mainrtlength = subrtlength;
@@ -684,7 +688,7 @@ grouping_planner(Query *parse, double tuple_fraction)
* from tlist if grouping or aggregation is needed.
*/
sub_tlist = make_subplanTargetList(parse, tlist,
- &groupColIdx, &need_tlist_eval);
+ &groupColIdx, &need_tlist_eval);
/*
* Calculate pathkeys that represent grouping/ordering
@@ -700,8 +704,8 @@ grouping_planner(Query *parse, double tuple_fraction)
* Also, it's possible that optimization has eliminated all
* aggregates, and we may as well check for that here.
*
- * Note: we do not attempt to detect duplicate aggregates here;
- * a somewhat-overestimated count is okay for our present purposes.
+ * Note: we do not attempt to detect duplicate aggregates here; a
+ * somewhat-overestimated count is okay for our present purposes.
*/
if (parse->hasAggs)
{
@@ -892,8 +896,8 @@ grouping_planner(Query *parse, double tuple_fraction)
&cheapest_path, &sorted_path);
/*
- * We couldn't canonicalize group_pathkeys and sort_pathkeys before
- * running query_planner(), so do it now.
+ * We couldn't canonicalize group_pathkeys and sort_pathkeys
+ * before running query_planner(), so do it now.
*/
group_pathkeys = canonicalize_pathkeys(parse, group_pathkeys);
sort_pathkeys = canonicalize_pathkeys(parse, sort_pathkeys);
@@ -903,9 +907,9 @@ grouping_planner(Query *parse, double tuple_fraction)
*/
if (parse->groupClause)
{
- List *groupExprs;
- double cheapest_path_rows;
- int cheapest_path_width;
+ List *groupExprs;
+ double cheapest_path_rows;
+ int cheapest_path_width;
/*
* Beware in this section of the possibility that
@@ -919,13 +923,13 @@ grouping_planner(Query *parse, double tuple_fraction)
}
else
{
- cheapest_path_rows = 1; /* assume non-set result */
- cheapest_path_width = 100; /* arbitrary */
+ cheapest_path_rows = 1; /* assume non-set result */
+ cheapest_path_width = 100; /* arbitrary */
}
/*
- * Always estimate the number of groups. We can't do this until
- * after running query_planner(), either.
+ * Always estimate the number of groups. We can't do this
+ * until after running query_planner(), either.
*/
groupExprs = get_sortgrouplist_exprs(parse->groupClause,
parse->targetList);
@@ -936,12 +940,13 @@ grouping_planner(Query *parse, double tuple_fraction)
numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
/*
- * Check can't-do-it conditions, including whether the grouping
- * operators are hashjoinable.
+ * Check can't-do-it conditions, including whether the
+ * grouping operators are hashjoinable.
*
* Executor doesn't support hashed aggregation with DISTINCT
- * aggregates. (Doing so would imply storing *all* the input
- * values in the hash table, which seems like a certain loser.)
+ * aggregates. (Doing so would imply storing *all* the input
+ * values in the hash table, which seems like a certain
+ * loser.)
*/
if (!enable_hashagg || !hash_safe_grouping(parse))
use_hashed_grouping = false;
@@ -953,32 +958,30 @@ grouping_planner(Query *parse, double tuple_fraction)
{
/*
* Use hashed grouping if (a) we think we can fit the
- * hashtable into SortMem, *and* (b) the estimated cost
- * is no more than doing it the other way. While avoiding
+ * hashtable into SortMem, *and* (b) the estimated cost is
+ * no more than doing it the other way. While avoiding
* the need for sorted input is usually a win, the fact
* that the output won't be sorted may be a loss; so we
* need to do an actual cost comparison.
*
* In most cases we have no good way to estimate the size of
- * the transition value needed by an aggregate; arbitrarily
- * assume it is 100 bytes. Also set the overhead per hashtable
- * entry at 64 bytes.
+ * the transition value needed by an aggregate;
+ * arbitrarily assume it is 100 bytes. Also set the
+ * overhead per hashtable entry at 64 bytes.
*/
- int hashentrysize = cheapest_path_width + 64 + numAggs * 100;
+ int hashentrysize = cheapest_path_width + 64 + numAggs * 100;
if (hashentrysize * dNumGroups <= SortMem * 1024L)
{
/*
* Okay, do the cost comparison. We need to consider
- * cheapest_path + hashagg [+ final sort]
- * versus either
- * cheapest_path [+ sort] + group or agg [+ final sort]
- * or
- * presorted_path + group or agg [+ final sort]
- * where brackets indicate a step that may not be needed.
- * We assume query_planner() will have returned a
- * presorted path only if it's a winner compared to
- * cheapest_path for this purpose.
+ * cheapest_path + hashagg [+ final sort] versus
+ * either cheapest_path [+ sort] + group or agg [+
+ * final sort] or presorted_path + group or agg [+
+ * final sort] where brackets indicate a step that may
+ * not be needed. We assume query_planner() will have
+ * returned a presorted path only if it's a winner
+ * compared to cheapest_path for this purpose.
*
* These path variables are dummies that just hold cost
* fields; we don't make actual Paths for these steps.
@@ -1065,9 +1068,9 @@ grouping_planner(Query *parse, double tuple_fraction)
/*
* Select the best path and create a plan to execute it.
*
- * If we are doing hashed grouping, we will always read all the
- * input tuples, so use the cheapest-total path. Otherwise,
- * trust query_planner's decision about which to use.
+ * If we are doing hashed grouping, we will always read all the input
+ * tuples, so use the cheapest-total path. Otherwise, trust
+ * query_planner's decision about which to use.
*/
if (sorted_path && !use_hashed_grouping)
{
@@ -1081,19 +1084,19 @@ grouping_planner(Query *parse, double tuple_fraction)
}
/*
- * create_plan() returns a plan with just a "flat" tlist of required
- * Vars. Usually we need to insert the sub_tlist as the tlist of the
- * top plan node. However, we can skip that if we determined that
- * whatever query_planner chose to return will be good enough.
+ * create_plan() returns a plan with just a "flat" tlist of
+ * required Vars. Usually we need to insert the sub_tlist as the
+ * tlist of the top plan node. However, we can skip that if we
+ * determined that whatever query_planner chose to return will be
+ * good enough.
*/
if (need_tlist_eval)
{
/*
* If the top-level plan node is one that cannot do expression
- * evaluation, we must insert a Result node to project the desired
- * tlist.
- * Currently, the only plan node we might see here that falls into
- * that category is Append.
+ * evaluation, we must insert a Result node to project the
+ * desired tlist. Currently, the only plan node we might see
+ * here that falls into that category is Append.
*/
if (IsA(result_plan, Append))
{
@@ -1108,23 +1111,25 @@ grouping_planner(Query *parse, double tuple_fraction)
*/
result_plan->targetlist = sub_tlist;
}
+
/*
* Also, account for the cost of evaluation of the sub_tlist.
*
* Up to now, we have only been dealing with "flat" tlists,
* containing just Vars. So their evaluation cost is zero
* according to the model used by cost_qual_eval() (or if you
- * prefer, the cost is factored into cpu_tuple_cost). Thus we can
- * avoid accounting for tlist cost throughout query_planner() and
- * subroutines. But now we've inserted a tlist that might contain
- * actual operators, sub-selects, etc --- so we'd better account
- * for its cost.
+ * prefer, the cost is factored into cpu_tuple_cost). Thus we
+ * can avoid accounting for tlist cost throughout
+ * query_planner() and subroutines. But now we've inserted a
+ * tlist that might contain actual operators, sub-selects, etc
+ * --- so we'd better account for its cost.
*
- * Below this point, any tlist eval cost for added-on nodes should
- * be accounted for as we create those nodes. Presently, of the
- * node types we can add on, only Agg and Group project new tlists
- * (the rest just copy their input tuples) --- so make_agg() and
- * make_group() are responsible for computing the added cost.
+ * Below this point, any tlist eval cost for added-on nodes
+ * should be accounted for as we create those nodes.
+ * Presently, of the node types we can add on, only Agg and
+ * Group project new tlists (the rest just copy their input
+ * tuples) --- so make_agg() and make_group() are responsible
+ * for computing the added cost.
*/
cost_qual_eval(&tlist_cost, sub_tlist);
result_plan->startup_cost += tlist_cost.startup;
@@ -1135,8 +1140,8 @@ grouping_planner(Query *parse, double tuple_fraction)
{
/*
* Since we're using query_planner's tlist and not the one
- * make_subplanTargetList calculated, we have to refigure
- * any groupi