Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit d29a4fb

Browse files
committed
1 parent 3b4d85c commit d29a4fb

File tree

1 file changed

+95
-95
lines changed

1 file changed

+95
-95
lines changed

src/backend/utils/cache/catcache.c

Lines changed: 95 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -1366,34 +1366,34 @@ SearchCatCacheMiss(CatCache *cache,
13661366
cur_skey[2].sk_argument = v3;
13671367
cur_skey[3].sk_argument = v4;
13681368

1369-
scandesc = systable_beginscan(relation,
1370-
cache->cc_indexoid,
1371-
IndexScanOK(cache, cur_skey),
1372-
NULL,
1373-
nkeys,
1374-
cur_skey);
1369+
scandesc = systable_beginscan(relation,
1370+
cache->cc_indexoid,
1371+
IndexScanOK(cache, cur_skey),
1372+
NULL,
1373+
nkeys,
1374+
cur_skey);
13751375

1376-
ct = NULL;
1377-
stale = false;
1376+
ct = NULL;
1377+
stale = false;
13781378

1379-
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1380-
{
1381-
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1382-
hashValue, hashIndex);
1383-
/* upon failure, we must start the scan over */
1384-
if (ct == NULL)
1379+
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
13851380
{
1386-
stale = true;
1387-
break;
1381+
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1382+
hashValue, hashIndex);
1383+
/* upon failure, we must start the scan over */
1384+
if (ct == NULL)
1385+
{
1386+
stale = true;
1387+
break;
1388+
}
1389+
/* immediately set the refcount to 1 */
1390+
ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1391+
ct->refcount++;
1392+
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1393+
break; /* assume only one match */
13881394
}
1389-
/* immediately set the refcount to 1 */
1390-
ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1391-
ct->refcount++;
1392-
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1393-
break; /* assume only one match */
1394-
}
13951395

1396-
systable_endscan(scandesc);
1396+
systable_endscan(scandesc);
13971397
} while (stale);
13981398

13991399
table_close(relation, AccessShareLock);
@@ -1653,95 +1653,95 @@ SearchCatCacheList(CatCache *cache,
16531653
cur_skey[2].sk_argument = v3;
16541654
cur_skey[3].sk_argument = v4;
16551655

1656-
scandesc = systable_beginscan(relation,
1657-
cache->cc_indexoid,
1658-
IndexScanOK(cache, cur_skey),
1659-
NULL,
1660-
nkeys,
1661-
cur_skey);
1662-
1663-
/* The list will be ordered iff we are doing an index scan */
1664-
ordered = (scandesc->irel != NULL);
1665-
1666-
stale = false;
1656+
scandesc = systable_beginscan(relation,
1657+
cache->cc_indexoid,
1658+
IndexScanOK(cache, cur_skey),
1659+
NULL,
1660+
nkeys,
1661+
cur_skey);
16671662

1668-
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1669-
{
1670-
uint32 hashValue;
1671-
Index hashIndex;
1672-
bool found = false;
1673-
dlist_head *bucket;
1663+
/* The list will be ordered iff we are doing an index scan */
1664+
ordered = (scandesc->irel != NULL);
16741665

1675-
/*
1676-
* See if there's an entry for this tuple already.
1677-
*/
1678-
ct = NULL;
1679-
hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1680-
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1666+
stale = false;
16811667

1682-
bucket = &cache->cc_bucket[hashIndex];
1683-
dlist_foreach(iter, bucket)
1668+
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
16841669
{
1685-
ct = dlist_container(CatCTup, cache_elem, iter.cur);
1670+
uint32 hashValue;
1671+
Index hashIndex;
1672+
bool found = false;
1673+
dlist_head *bucket;
16861674

1687-
if (ct->dead || ct->negative)
1688-
continue; /* ignore dead and negative entries */
1675+
/*
1676+
* See if there's an entry for this tuple already.
1677+
*/
1678+
ct = NULL;
1679+
hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1680+
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
16891681

1690-
if (ct->hash_value != hashValue)
1691-
continue; /* quickly skip entry if wrong hash val */
1682+
bucket = &cache->cc_bucket[hashIndex];
1683+
dlist_foreach(iter, bucket)
1684+
{
1685+
ct = dlist_container(CatCTup, cache_elem, iter.cur);
16921686

1693-
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1694-
continue; /* not same tuple */
1687+
if (ct->dead || ct->negative)
1688+
continue; /* ignore dead and negative entries */
16951689

1696-
/*
1697-
* Found a match, but can't use it if it belongs to another
1698-
* list already
1699-
*/
1700-
if (ct->c_list)
1701-
continue;
1690+
if (ct->hash_value != hashValue)
1691+
continue; /* quickly skip entry if wrong hash val */
17021692

1703-
found = true;
1704-
break; /* A-OK */
1705-
}
1693+
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1694+
continue; /* not same tuple */
17061695

1707-
if (!found)
1708-
{
1709-
/* We didn't find a usable entry, so make a new one */
1710-
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1711-
hashValue, hashIndex);
1712-
/* upon failure, we must start the scan over */
1713-
if (ct == NULL)
1714-
{
17151696
/*
1716-
* Release refcounts on any items we already had. We dare
1717-
* not try to free them if they're now unreferenced, since
1718-
* an error while doing that would result in the PG_CATCH
1719-
* below doing extra refcount decrements. Besides, we'll
1720-
* likely re-adopt those items in the next iteration, so
1721-
* it's not worth complicating matters to try to get rid
1722-
* of them.
1697+
* Found a match, but can't use it if it belongs to
1698+
* another list already
17231699
*/
1724-
foreach(ctlist_item, ctlist)
1700+
if (ct->c_list)
1701+
continue;
1702+
1703+
found = true;
1704+
break; /* A-OK */
1705+
}
1706+
1707+
if (!found)
1708+
{
1709+
/* We didn't find a usable entry, so make a new one */
1710+
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1711+
hashValue, hashIndex);
1712+
/* upon failure, we must start the scan over */
1713+
if (ct == NULL)
17251714
{
1726-
ct = (CatCTup *) lfirst(ctlist_item);
1727-
Assert(ct->c_list == NULL);
1728-
Assert(ct->refcount > 0);
1729-
ct->refcount--;
1715+
/*
1716+
* Release refcounts on any items we already had. We
1717+
* dare not try to free them if they're now
1718+
* unreferenced, since an error while doing that would
1719+
* result in the PG_CATCH below doing extra refcount
1720+
* decrements. Besides, we'll likely re-adopt those
1721+
* items in the next iteration, so it's not worth
1722+
* complicating matters to try to get rid of them.
1723+
*/
1724+
foreach(ctlist_item, ctlist)
1725+
{
1726+
ct = (CatCTup *) lfirst(ctlist_item);
1727+
Assert(ct->c_list == NULL);
1728+
Assert(ct->refcount > 0);
1729+
ct->refcount--;
1730+
}
1731+
/* Reset ctlist in preparation for new try */
1732+
ctlist = NIL;
1733+
stale = true;
1734+
break;
17301735
}
1731-
/* Reset ctlist in preparation for new try */
1732-
ctlist = NIL;
1733-
stale = true;
1734-
break;
17351736
}
1736-
}
17371737

1738-
/* Careful here: add entry to ctlist, then bump its refcount */
1739-
/* This way leaves state correct if lappend runs out of memory */
1740-
ctlist = lappend(ctlist, ct);
1741-
ct->refcount++;
1742-
}
1738+
/* Careful here: add entry to ctlist, then bump its refcount */
1739+
/* This way leaves state correct if lappend runs out of memory */
1740+
ctlist = lappend(ctlist, ct);
1741+
ct->refcount++;
1742+
}
17431743

1744-
systable_endscan(scandesc);
1744+
systable_endscan(scandesc);
17451745
} while (stale);
17461746

17471747
table_close(relation, AccessShareLock);

0 commit comments

Comments
 (0)