diff options
Diffstat (limited to 'src/backend/commands')
-rw-r--r-- | src/backend/commands/command.c | 4 | ||||
-rw-r--r-- | src/backend/commands/indexcmds.c | 44 | ||||
-rw-r--r-- | src/backend/commands/vacuum.c | 109 | ||||
-rw-r--r-- | src/backend/commands/vacuumlazy.c | 79 |
4 files changed, 97 insertions, 139 deletions
diff --git a/src/backend/commands/command.c b/src/backend/commands/command.c index 6a2bd7dc932..4fcbeeceb6c 100644 --- a/src/backend/commands/command.c +++ b/src/backend/commands/command.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.134 2001/06/14 01:09:22 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.135 2001/07/15 22:48:17 tgl Exp $ * * NOTES * The PerformAddAttribute() code, like most of the relation @@ -269,7 +269,7 @@ PerformPortalClose(char *name, CommandDest dest) * Initial idea of ordering the tuple attributes so that all * the variable length domains occured last was scratched. Doing * so would not speed access too much (in general) and would create - * many complications in formtuple, amgetattr, and addattribute. + * many complications in formtuple, heap_getattr, and addattribute. * * scan attribute catalog for name conflict (within rel) * scan type catalog for absence of data type (if not arg) diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 108c4ea3780..7398b0b0ce5 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.50 2001/06/13 21:44:40 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.51 2001/07/15 22:48:17 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -83,6 +83,8 @@ DefineIndex(char *heapRelationName, Oid *classObjectId; Oid accessMethodId; Oid relationId; + HeapTuple tuple; + Form_pg_am accessMethodForm; IndexInfo *indexInfo; int numberOfAttributes; List *cnfPred = NIL; @@ -107,27 +109,25 @@ DefineIndex(char *heapRelationName, heapRelationName); /* - * compute access method id + * look up the access method, verify it can handle the requested features */ - accessMethodId = GetSysCacheOid(AMNAME, - PointerGetDatum(accessMethodName), - 0, 0, 0); - if (!OidIsValid(accessMethodId)) + tuple = SearchSysCache(AMNAME, + PointerGetDatum(accessMethodName), + 0, 0, 0); + if (!HeapTupleIsValid(tuple)) elog(ERROR, "DefineIndex: access method \"%s\" not found", accessMethodName); + accessMethodId = tuple->t_data->t_oid; + accessMethodForm = (Form_pg_am) GETSTRUCT(tuple); - /* - * XXX Hardwired hacks to check for limitations on supported index - * types. We really ought to be learning this info from entries in the - * pg_am table, instead of having it wired-in here! - */ - if (unique && accessMethodId != BTREE_AM_OID) - elog(ERROR, "DefineIndex: unique indices are only available with the btree access method"); + if (unique && ! accessMethodForm->amcanunique) + elog(ERROR, "DefineIndex: access method \"%s\" does not support UNIQUE indexes", + accessMethodName); + if (numberOfAttributes > 1 && ! accessMethodForm->amcanmulticol) + elog(ERROR, "DefineIndex: access method \"%s\" does not support multi-column indexes", + accessMethodName); - if (numberOfAttributes > 1 && - !( accessMethodId == BTREE_AM_OID || - accessMethodId == GIST_AM_OID)) - elog(ERROR, "DefineIndex: multi-column indices are only available with the btree or GiST access methods"); + ReleaseSysCache(tuple); /* * WITH clause reinstated to handle lossy indices. -- JMH, 7/22/96 @@ -298,7 +298,15 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable) InitIndexStrategy(indexInfo->ii_NumIndexAttrs, indexRelation, accessMethodId); - index_build(heapRelation, indexRelation, indexInfo, oldPred); + /* + * XXX currently BROKEN: if we want to support EXTEND INDEX, oldPred + * needs to be passed through to IndexBuildHeapScan. We could do this + * without help from the index AMs if we added an oldPred field to the + * IndexInfo struct. Currently I'm expecting that EXTEND INDEX will + * get removed, so I'm not going to do that --- tgl 7/14/01 + */ + + index_build(heapRelation, indexRelation, indexInfo); /* heap and index rels are closed as a side-effect of index_build */ } diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index f41bb664a2f..c53fa05812e 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.204 2001/07/13 22:55:59 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.205 2001/07/15 22:48:17 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -128,7 +128,7 @@ static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage); static void vacuum_index(VacPageList vacpagelist, Relation indrel, double num_tuples, int keep_tuples); static void scan_index(Relation indrel, double num_tuples); -static VacPage tid_reaped(ItemPointer itemptr, VacPageList vacpagelist); +static bool tid_reaped(ItemPointer itemptr, void *state); static void vac_update_fsm(Relation onerel, VacPageList fraged_pages, BlockNumber rel_pages); static VacPage copy_vac_page(VacPage vacpage); @@ -542,17 +542,11 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt) /* * Do the actual work --- either FULL or "lazy" vacuum - * - * XXX for the moment, lazy vac not supported unless CONCURRENT_VACUUM */ -#ifdef CONCURRENT_VACUUM if (vacstmt->full) full_vacuum_rel(onerel); else lazy_vacuum_rel(onerel, vacstmt); -#else - full_vacuum_rel(onerel); -#endif /* all done with this class, but hold lock until commit */ heap_close(onerel, NoLock); @@ -1049,7 +1043,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, elog(MESSAGE_LEVEL, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \ Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, MaxLen %lu; \ -Re-using: Free/Avail. Space %.0f/%.0f; EndEmpty/Avail. Pages %u/%u. %s", +Re-using: Free/Avail. Space %.0f/%.0f; EndEmpty/Avail. Pages %u/%u.\n\t%s", nblocks, changed_pages, vacuum_pages->num_pages, empty_pages, new_pages, num_tuples, tups_vacuumed, nkeep, vacrelstats->num_vtlinks, @@ -1965,7 +1959,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, } Assert(num_moved == checked_moved); - elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u. %s", + elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s", RelationGetRelationName(onerel), nblocks, blkno, num_moved, vac_show_rusage(&ru0)); @@ -2213,7 +2207,7 @@ scan_index(Relation indrel, double num_tuples) nipages = RelationGetNumberOfBlocks(indrel); vac_update_relstats(RelationGetRelid(indrel), nipages, nitups, false); - elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f. %s", + elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f.\n\t%s", RelationGetRelationName(indrel), nipages, nitups, vac_show_rusage(&ru0)); @@ -2247,85 +2241,55 @@ static void vacuum_index(VacPageList vacpagelist, Relation indrel, double num_tuples, int keep_tuples) { - RetrieveIndexResult res; - IndexScanDesc iscan; - ItemPointer heapptr; - int tups_vacuumed; - BlockNumber num_pages; - double num_index_tuples; - VacPage vp; + IndexBulkDeleteResult *stats; VacRUsage ru0; vac_init_rusage(&ru0); - /* walk through the entire index */ - iscan = index_beginscan(indrel, false, 0, (ScanKey) NULL); - tups_vacuumed = 0; - num_index_tuples = 0; - - while ((res = index_getnext(iscan, ForwardScanDirection)) - != (RetrieveIndexResult) NULL) - { - heapptr = &res->heap_iptr; - - if ((vp = tid_reaped(heapptr, vacpagelist)) != (VacPage) NULL) - { -#ifdef NOT_USED - elog(DEBUG, "<%x,%x> -> <%x,%x>", - ItemPointerGetBlockNumber(&(res->index_iptr)), - ItemPointerGetOffsetNumber(&(res->index_iptr)), - ItemPointerGetBlockNumber(&(res->heap_iptr)), - ItemPointerGetOffsetNumber(&(res->heap_iptr))); -#endif - if (vp->offsets_free == 0) - { - elog(NOTICE, "Index %s: pointer to EmptyPage (blk %u off %u) - fixing", - RelationGetRelationName(indrel), - vp->blkno, ItemPointerGetOffsetNumber(heapptr)); - } - ++tups_vacuumed; - index_delete(indrel, &res->index_iptr); - } - else - num_index_tuples += 1; - - pfree(res); - } + /* Do bulk deletion */ + stats = index_bulk_delete(indrel, tid_reaped, (void *) vacpagelist); - index_endscan(iscan); + if (!stats) + return; /* now update statistics in pg_class */ - num_pages = RelationGetNumberOfBlocks(indrel); vac_update_relstats(RelationGetRelid(indrel), - num_pages, num_index_tuples, false); + stats->num_pages, stats->num_index_tuples, + false); - elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %u. %s", - RelationGetRelationName(indrel), num_pages, - num_index_tuples - keep_tuples, tups_vacuumed, + elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s", + RelationGetRelationName(indrel), stats->num_pages, + stats->num_index_tuples - keep_tuples, stats->tuples_removed, vac_show_rusage(&ru0)); /* * Check for tuple count mismatch. If the index is partial, then * it's OK for it to have fewer tuples than the heap; else we got trouble. */ - if (num_index_tuples != num_tuples + keep_tuples) + if (stats->num_index_tuples != num_tuples + keep_tuples) { - if (num_index_tuples > num_tuples + keep_tuples || + if (stats->num_index_tuples > num_tuples + keep_tuples || ! is_partial_index(indrel)) elog(NOTICE, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f).\ \n\tRecreate the index.", - RelationGetRelationName(indrel), num_index_tuples, num_tuples); + RelationGetRelationName(indrel), + stats->num_index_tuples, num_tuples); } + + pfree(stats); } /* * tid_reaped() -- is a particular tid reaped? * + * This has the right signature to be an IndexBulkDeleteCallback. + * * vacpagelist->VacPage_array is sorted in right order. */ -static VacPage -tid_reaped(ItemPointer itemptr, VacPageList vacpagelist) +static bool +tid_reaped(ItemPointer itemptr, void *state) { + VacPageList vacpagelist = (VacPageList) state; OffsetNumber ioffno; OffsetNumber *voff; VacPage vp, @@ -2342,8 +2306,8 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist) sizeof(VacPage), vac_cmp_blk); - if (vpp == (VacPage *) NULL) - return (VacPage) NULL; + if (vpp == NULL) + return false; /* ok - we are on a partially or fully reaped page */ vp = *vpp; @@ -2351,7 +2315,7 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist) if (vp->offsets_free == 0) { /* this is EmptyPage, so claim all tuples on it are reaped!!! */ - return vp; + return true; } voff = (OffsetNumber *) vac_bsearch((void *) &ioffno, @@ -2360,11 +2324,11 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist) sizeof(OffsetNumber), vac_cmp_offno); - if (voff == (OffsetNumber *) NULL) - return (VacPage) NULL; + if (voff == NULL) + return false; /* tid is reaped */ - return vp; + return true; } /* @@ -2595,6 +2559,13 @@ is_partial_index(Relation indrel) HeapTuple cachetuple; Form_pg_index indexStruct; + /* + * If the index's AM doesn't support nulls, it's partial for our purposes + */ + if (! indrel->rd_am->amindexnulls) + return true; + + /* Otherwise, look to see if there's a partial-index predicate */ cachetuple = SearchSysCache(INDEXRELID, ObjectIdGetDatum(RelationGetRelid(indrel)), 0, 0, 0); @@ -2603,7 +2574,7 @@ is_partial_index(Relation indrel) RelationGetRelid(indrel)); indexStruct = (Form_pg_index) GETSTRUCT(cachetuple); - result = (VARSIZE(&indexStruct->indpred) != 0); + result = (VARSIZE(&indexStruct->indpred) > VARHDRSZ); ReleaseSysCache(cachetuple); return result; diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 07529fe265a..b78f933f0c3 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -31,7 +31,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.1 2001/07/13 22:55:59 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.2 2001/07/15 22:48:17 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -112,7 +112,7 @@ static void lazy_record_dead_tuple(LVRelStats *vacrelstats, ItemPointer itemptr); static void lazy_record_free_space(LVRelStats *vacrelstats, BlockNumber page, Size avail); -static bool lazy_tid_reaped(ItemPointer itemptr, LVRelStats *vacrelstats); +static bool lazy_tid_reaped(ItemPointer itemptr, void *state); static void lazy_update_fsm(Relation onerel, LVRelStats *vacrelstats); static int vac_cmp_itemptr(const void *left, const void *right); @@ -371,11 +371,11 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, if (pgchanged) { - WriteBuffer(buf); + SetBufferCommitInfoNeedsSave(buf); changed_pages++; } - else - ReleaseBuffer(buf); + + ReleaseBuffer(buf); } /* If any tuples need to be deleted, perform final vacuum cycle */ @@ -507,64 +507,40 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, static void lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats) { - RetrieveIndexResult res; - IndexScanDesc iscan; - int tups_vacuumed; - BlockNumber num_pages; - double num_index_tuples; + IndexBulkDeleteResult *stats; VacRUsage ru0; vac_init_rusage(&ru0); /* - * Only btree and hash indexes are currently safe for concurrent access; - * see notes in ExecOpenIndices(). XXX should rely on index AM for this + * If index is unsafe for concurrent access, must lock it. */ - if (indrel->rd_rel->relam != BTREE_AM_OID && - indrel->rd_rel->relam != HASH_AM_OID) + if (! indrel->rd_am->amconcurrent) LockRelation(indrel, AccessExclusiveLock); - /* XXX should use a bulk-delete call here */ - - /* walk through the entire index */ - iscan = index_beginscan(indrel, false, 0, (ScanKey) NULL); - tups_vacuumed = 0; - num_index_tuples = 0; - - while ((res = index_getnext(iscan, ForwardScanDirection)) - != (RetrieveIndexResult) NULL) - { - ItemPointer heapptr = &res->heap_iptr; - - if (lazy_tid_reaped(heapptr, vacrelstats)) - { - index_delete(indrel, &res->index_iptr); - ++tups_vacuumed; - } - else - num_index_tuples += 1; - - pfree(res); - } - - index_endscan(iscan); - - /* now update statistics in pg_class */ - num_pages = RelationGetNumberOfBlocks(indrel); - vac_update_relstats(RelationGetRelid(indrel), - num_pages, num_index_tuples, false); + /* Do bulk deletion */ + stats = index_bulk_delete(indrel, lazy_tid_reaped, (void *) vacrelstats); /* * Release lock acquired above. */ - if (indrel->rd_rel->relam != BTREE_AM_OID && - indrel->rd_rel->relam != HASH_AM_OID) + if (! indrel->rd_am->amconcurrent) UnlockRelation(indrel, AccessExclusiveLock); - elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %u.\n\t%s", - RelationGetRelationName(indrel), num_pages, - num_index_tuples, tups_vacuumed, - vac_show_rusage(&ru0)); + /* now update statistics in pg_class */ + if (stats) + { + vac_update_relstats(RelationGetRelid(indrel), + stats->num_pages, stats->num_index_tuples, + false); + + elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s", + RelationGetRelationName(indrel), stats->num_pages, + stats->num_index_tuples, stats->tuples_removed, + vac_show_rusage(&ru0)); + + pfree(stats); + } } /* @@ -960,11 +936,14 @@ lazy_record_free_space(LVRelStats *vacrelstats, /* * lazy_tid_reaped() -- is a particular tid deletable? * + * This has the right signature to be an IndexBulkDeleteCallback. + * * Assumes dead_tuples array is in sorted order. */ static bool -lazy_tid_reaped(ItemPointer itemptr, LVRelStats *vacrelstats) +lazy_tid_reaped(ItemPointer itemptr, void *state) { + LVRelStats *vacrelstats = (LVRelStats *) state; ItemPointer res; res = (ItemPointer) bsearch((void *) itemptr, |