Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 8b9bc23

Browse files
committed
Remove the limit on the number of entries allowed in catcaches, and
remove the infrastructure needed to enforce the limit, ie, the global LRU list of cache entries. On small-to-middling databases this wins because maintaining the LRU list is a waste of time. On large databases this wins because it's better to keep more cache entries (we assume such users can afford to use some more per-backend memory than was contemplated in the Berkeley-era catcache design). This provides a noticeable improvement in the speed of psql \d on a 10000-table database, though it doesn't make it instantaneous. While at it, use per-catcache settings for the number of hash buckets per catcache, rather than the former one-size-fits-all value. It's a bit silly to be using the same number of hash buckets for, eg, pg_am and pg_attribute. The specific values I used might need some tuning, but they seem to be in the right ballpark based on CATCACHE_STATS results from the standard regression tests.
1 parent e1e133f commit 8b9bc23

File tree

3 files changed

+181
-206
lines changed

3 files changed

+181
-206
lines changed

src/backend/utils/cache/catcache.c

Lines changed: 51 additions & 146 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
*
99
*
1010
* IDENTIFICATION
11-
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.128 2006/03/05 15:58:45 momjian Exp $
11+
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.129 2006/06/15 02:08:09 tgl Exp $
1212
*
1313
*-------------------------------------------------------------------------
1414
*/
@@ -37,20 +37,6 @@
3737

3838
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
3939

40-
/*
41-
* Constants related to size of the catcache.
42-
*
43-
* NCCBUCKETS must be a power of two and must be less than 64K (because
44-
* SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
45-
* practice it should be a lot less, anyway, to avoid chewing up too much
46-
* space on hash bucket headers.
47-
*
48-
* MAXCCTUPLES could be as small as a few hundred, if per-backend memory
49-
* consumption is at a premium.
50-
*/
51-
#define NCCBUCKETS 256 /* Hash buckets per CatCache */
52-
#define MAXCCTUPLES 5000 /* Maximum # of tuples in all caches */
53-
5440
/*
5541
* Given a hash value and the size of the hash table, find the bucket
5642
* in which the hash value belongs. Since the hash table must contain
@@ -89,15 +75,14 @@ static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
8975
HeapTuple tuple);
9076

9177
#ifdef CATCACHE_STATS
92-
static void CatCachePrintStats(void);
78+
static void CatCachePrintStats(int code, Datum arg);
9379
#endif
9480
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
9581
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
9682
static void CatalogCacheInitializeCache(CatCache *cache);
9783
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
9884
uint32 hashValue, Index hashIndex,
9985
bool negative);
100-
static void CatalogCacheCleanup(CatCTup *savect);
10186
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
10287

10388

@@ -281,26 +266,22 @@ CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
281266
#ifdef CATCACHE_STATS
282267

283268
static void
284-
CatCachePrintStats(void)
269+
CatCachePrintStats(int code, Datum arg)
285270
{
286271
CatCache *cache;
287272
long cc_searches = 0;
288273
long cc_hits = 0;
289274
long cc_neg_hits = 0;
290275
long cc_newloads = 0;
291276
long cc_invals = 0;
292-
long cc_discards = 0;
293277
long cc_lsearches = 0;
294278
long cc_lhits = 0;
295279

296-
elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
297-
CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
298-
299280
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
300281
{
301282
if (cache->cc_ntup == 0 && cache->cc_searches == 0)
302283
continue; /* don't print unused caches */
303-
elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
284+
elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
304285
cache->cc_relname,
305286
cache->cc_indexoid,
306287
cache->cc_ntup,
@@ -312,19 +293,17 @@ CatCachePrintStats(void)
312293
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
313294
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
314295
cache->cc_invals,
315-
cache->cc_discards,
316296
cache->cc_lsearches,
317297
cache->cc_lhits);
318298
cc_searches += cache->cc_searches;
319299
cc_hits += cache->cc_hits;
320300
cc_neg_hits += cache->cc_neg_hits;
321301
cc_newloads += cache->cc_newloads;
322302
cc_invals += cache->cc_invals;
323-
cc_discards += cache->cc_discards;
324303
cc_lsearches += cache->cc_lsearches;
325304
cc_lhits += cache->cc_lhits;
326305
}
327-
elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
306+
elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
328307
CacheHdr->ch_ntup,
329308
cc_searches,
330309
cc_hits,
@@ -334,7 +313,6 @@ CatCachePrintStats(void)
334313
cc_searches - cc_hits - cc_neg_hits - cc_newloads,
335314
cc_searches - cc_hits - cc_neg_hits,
336315
cc_invals,
337-
cc_discards,
338316
cc_lsearches,
339317
cc_lhits);
340318
}
@@ -367,8 +345,7 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
367345
return; /* nothing left to do */
368346
}
369347

370-
/* delink from linked lists */
371-
DLRemove(&ct->lrulist_elem);
348+
/* delink from linked list */
372349
DLRemove(&ct->cache_elem);
373350

374351
/* free associated tuple data */
@@ -568,11 +545,13 @@ AtEOXact_CatCache(bool isCommit)
568545
if (assert_enabled)
569546
{
570547
CatCache *ccp;
571-
Dlelem *elt;
572548

573-
/* Check CatCLists */
574549
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
575550
{
551+
Dlelem *elt;
552+
int i;
553+
554+
/* Check CatCLists */
576555
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
577556
{
578557
CatCList *cl = (CatCList *) DLE_VAL(elt);
@@ -581,16 +560,21 @@ AtEOXact_CatCache(bool isCommit)
581560
Assert(cl->refcount == 0);
582561
Assert(!cl->dead);
583562
}
584-
}
585563

586-
/* Check individual tuples */
587-
for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = DLGetSucc(elt))
588-
{
589-
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
564+
/* Check individual tuples */
565+
for (i = 0; i < ccp->cc_nbuckets; i++)
566+
{
567+
for (elt = DLGetHead(&ccp->cc_bucket[i]);
568+
elt;
569+
elt = DLGetSucc(elt))
570+
{
571+
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
590572

591-
Assert(ct->ct_magic == CT_MAGIC);
592-
Assert(ct->refcount == 0);
593-
Assert(!ct->dead);
573+
Assert(ct->ct_magic == CT_MAGIC);
574+
Assert(ct->refcount == 0);
575+
Assert(!ct->dead);
576+
}
577+
}
594578
}
595579
}
596580
#endif
@@ -796,12 +780,27 @@ InitCatCache(int id,
796780
Oid indexoid,
797781
int reloidattr,
798782
int nkeys,
799-
const int *key)
783+
const int *key,
784+
int nbuckets)
800785
{
801786
CatCache *cp;
802787
MemoryContext oldcxt;
803788
int i;
804789

790+
/*
791+
* nbuckets is the number of hash buckets to use in this catcache.
792+
* Currently we just use a hard-wired estimate of an appropriate size
793+
* for each cache; maybe later make them dynamically resizable?
794+
*
795+
* nbuckets must be a power of two. We check this via Assert rather than
796+
* a full runtime check because the values will be coming from constant
797+
* tables.
798+
*
799+
* If you're confused by the power-of-two check, see comments in
800+
* bitmapset.c for an explanation.
801+
*/
802+
Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
803+
805804
/*
806805
* first switch to the cache context so our allocations do not vanish at
807806
* the end of a transaction
@@ -812,17 +811,15 @@ InitCatCache(int id,
812811
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
813812

814813
/*
815-
* if first time through, initialize the cache group header, including
816-
* global LRU list header
814+
* if first time through, initialize the cache group header
817815
*/
818816
if (CacheHdr == NULL)
819817
{
820818
CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
821819
CacheHdr->ch_caches = NULL;
822820
CacheHdr->ch_ntup = 0;
823-
CacheHdr->ch_maxtup = MAXCCTUPLES;
824-
DLInitList(&CacheHdr->ch_lrulist);
825821
#ifdef CATCACHE_STATS
822+
/* set up to dump stats at backend exit */
826823
on_proc_exit(CatCachePrintStats, 0);
827824
#endif
828825
}
@@ -832,7 +829,7 @@ InitCatCache(int id,
832829
*
833830
* Note: we assume zeroing initializes the Dllist headers correctly
834831
*/
835-
cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
832+
cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
836833

837834
/*
838835
* initialize the cache's relation information for the relation
@@ -847,7 +844,7 @@ InitCatCache(int id,
847844
cp->cc_tupdesc = (TupleDesc) NULL;
848845
cp->cc_reloidattr = reloidattr;
849846
cp->cc_ntup = 0;
850-
cp->cc_nbuckets = NCCBUCKETS;
847+
cp->cc_nbuckets = nbuckets;
851848
cp->cc_nkeys = nkeys;
852849
for (i = 0; i < nkeys; ++i)
853850
cp->cc_key[i] = key[i];
@@ -1162,13 +1159,11 @@ SearchCatCache(CatCache *cache,
11621159
continue;
11631160

11641161
/*
1165-
* we found a match in the cache: move it to the front of the global
1166-
* LRU list. We also move it to the front of the list for its
1167-
* hashbucket, in order to speed subsequent searches. (The most
1168-
* frequently accessed elements in any hashbucket will tend to be near
1169-
* the front of the hashbucket's list.)
1162+
* We found a match in the cache. Move it to the front of the list
1163+
* for its hashbucket, in order to speed subsequent searches. (The
1164+
* most frequently accessed elements in any hashbucket will tend to be
1165+
* near the front of the hashbucket's list.)
11701166
*/
1171-
DLMoveToFront(&ct->lrulist_elem);
11721167
DLMoveToFront(&ct->cache_elem);
11731168

11741169
/*
@@ -1414,14 +1409,12 @@ SearchCatCacheList(CatCache *cache,
14141409
continue;
14151410

14161411
/*
1417-
* We found a matching list: mark it as touched since the last
1418-
* CatalogCacheCleanup() sweep. Also move the list to the front of
1419-
* the cache's list-of-lists, to speed subsequent searches. (We do not
1412+
* We found a matching list. Move the list to the front of the
1413+
* cache's list-of-lists, to speed subsequent searches. (We do not
14201414
* move the members to the fronts of their hashbucket lists, however,
14211415
* since there's no point in that unless they are searched for
14221416
* individually.)
14231417
*/
1424-
cl->touched = true;
14251418
DLMoveToFront(&cl->cache_elem);
14261419

14271420
/* Bump the list's refcount and return it */
@@ -1504,10 +1497,7 @@ SearchCatCacheList(CatCache *cache,
15041497
if (ct->c_list)
15051498
continue;
15061499

1507-
/* Found a match, so move it to front */
1508-
DLMoveToFront(&ct->lrulist_elem);
1509-
1510-
break;
1500+
break; /* A-OK */
15111501
}
15121502

15131503
if (elt == NULL)
@@ -1577,7 +1567,6 @@ SearchCatCacheList(CatCache *cache,
15771567
cl->refcount = 0; /* for the moment */
15781568
cl->dead = false;
15791569
cl->ordered = ordered;
1580-
cl->touched = false; /* we already moved members to front */
15811570
cl->nkeys = nkeys;
15821571
cl->hash_value = lHashValue;
15831572
cl->n_members = nmembers;
@@ -1654,109 +1643,25 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
16541643

16551644
/*
16561645
* Finish initializing the CatCTup header, and add it to the cache's
1657-
* linked lists and counts.
1646+
* linked list and counts.
16581647
*/
16591648
ct->ct_magic = CT_MAGIC;
16601649
ct->my_cache = cache;
1661-
DLInitElem(&ct->lrulist_elem, (void *) ct);
16621650
DLInitElem(&ct->cache_elem, (void *) ct);
16631651
ct->c_list = NULL;
16641652
ct->refcount = 0; /* for the moment */
16651653
ct->dead = false;
16661654
ct->negative = negative;
16671655
ct->hash_value = hashValue;
16681656

1669-
DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
16701657
DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
16711658

16721659
cache->cc_ntup++;
16731660
CacheHdr->ch_ntup++;
16741661

1675-
/*
1676-
* If we've exceeded the desired size of the caches, try to throw away the
1677-
* least recently used entry(s). NB: be careful not to throw away the
1678-
* newly-built entry...
1679-
*/
1680-
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
1681-
CatalogCacheCleanup(ct);
1682-
16831662
return ct;
16841663
}
16851664

1686-
/*
1687-
* CatalogCacheCleanup
1688-
* Try to reduce the size of the catcaches when they get too big
1689-
*
1690-
* savect can be NULL, or a specific CatCTup not to remove even if it
1691-
* has zero refcount.
1692-
*/
1693-
static void
1694-
CatalogCacheCleanup(CatCTup *savect)
1695-
{
1696-
int tup_target;
1697-
CatCache *ccp;
1698-
Dlelem *elt,
1699-
*prevelt;
1700-
1701-
/*
1702-
* Each time we have to do this, try to cut the cache size down to about
1703-
* 90% of the maximum.
1704-
*/
1705-
tup_target = (CacheHdr->ch_maxtup * 9) / 10;
1706-
1707-
/*
1708-
* Our strategy for managing CatCLists is that, each time we have to throw
1709-
* away some cache entries, we first move-to-front all the members of
1710-
* CatCLists that have been touched since the last cleanup sweep. Then we
1711-
* do strict LRU elimination by individual tuples, zapping a list if any
1712-
* of its members gets zapped. Before PostgreSQL 8.1, we moved members to
1713-
* front each time their owning list was touched, which was arguably more
1714-
* fair in balancing list members against standalone tuples --- but the
1715-
* overhead for large lists was horrendous. This scheme is more heavily
1716-
* biased towards preserving lists, but that is not necessarily bad
1717-
* either.
1718-
*/
1719-
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1720-
{
1721-
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
1722-
{
1723-
CatCList *cl = (CatCList *) DLE_VAL(elt);
1724-
1725-
Assert(cl->cl_magic == CL_MAGIC);
1726-
if (cl->touched && !cl->dead)
1727-
{
1728-
int i;
1729-
1730-
for (i = 0; i < cl->n_members; i++)
1731-
DLMoveToFront(&cl->members[i]->lrulist_elem);
1732-
}
1733-
cl->touched = false;
1734-
}
1735-
}
1736-
1737-
/* Now get rid of unreferenced tuples in reverse global LRU order */
1738-
for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
1739-
{
1740-
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
1741-
1742-
prevelt = DLGetPred(elt);
1743-
1744-
if (ct->refcount == 0 &&
1745-
(ct->c_list == NULL || ct->c_list->refcount == 0) &&
1746-
ct != savect)
1747-
{
1748-
#ifdef CATCACHE_STATS
1749-
ct->my_cache->cc_discards++;
1750-
#endif
1751-
CatCacheRemoveCTup(ct->my_cache, ct);
1752-
1753-
/* Quit when we've removed enough tuples */
1754-
if (CacheHdr->ch_ntup <= tup_target)
1755-
break;
1756-
}
1757-
}
1758-
}
1759-
17601665
/*
17611666
* build_dummy_tuple
17621667
* Generate a palloc'd HeapTuple that contains the specified key

0 commit comments

Comments
 (0)