Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 56dcd71

Browse files
committed
1 parent 7e2561e commit 56dcd71

File tree

1 file changed

+95
-95
lines changed

1 file changed

+95
-95
lines changed

src/backend/utils/cache/catcache.c

Lines changed: 95 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -1362,34 +1362,34 @@ SearchCatCacheMiss(CatCache *cache,
13621362
cur_skey[2].sk_argument = v3;
13631363
cur_skey[3].sk_argument = v4;
13641364

1365-
scandesc = systable_beginscan(relation,
1366-
cache->cc_indexoid,
1367-
IndexScanOK(cache, cur_skey),
1368-
NULL,
1369-
nkeys,
1370-
cur_skey);
1365+
scandesc = systable_beginscan(relation,
1366+
cache->cc_indexoid,
1367+
IndexScanOK(cache, cur_skey),
1368+
NULL,
1369+
nkeys,
1370+
cur_skey);
13711371

1372-
ct = NULL;
1373-
stale = false;
1372+
ct = NULL;
1373+
stale = false;
13741374

1375-
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1376-
{
1377-
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1378-
hashValue, hashIndex);
1379-
/* upon failure, we must start the scan over */
1380-
if (ct == NULL)
1375+
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
13811376
{
1382-
stale = true;
1383-
break;
1377+
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1378+
hashValue, hashIndex);
1379+
/* upon failure, we must start the scan over */
1380+
if (ct == NULL)
1381+
{
1382+
stale = true;
1383+
break;
1384+
}
1385+
/* immediately set the refcount to 1 */
1386+
ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1387+
ct->refcount++;
1388+
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1389+
break; /* assume only one match */
13841390
}
1385-
/* immediately set the refcount to 1 */
1386-
ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1387-
ct->refcount++;
1388-
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1389-
break; /* assume only one match */
1390-
}
13911391

1392-
systable_endscan(scandesc);
1392+
systable_endscan(scandesc);
13931393
} while (stale);
13941394

13951395
table_close(relation, AccessShareLock);
@@ -1649,95 +1649,95 @@ SearchCatCacheList(CatCache *cache,
16491649
cur_skey[2].sk_argument = v3;
16501650
cur_skey[3].sk_argument = v4;
16511651

1652-
scandesc = systable_beginscan(relation,
1653-
cache->cc_indexoid,
1654-
IndexScanOK(cache, cur_skey),
1655-
NULL,
1656-
nkeys,
1657-
cur_skey);
1658-
1659-
/* The list will be ordered iff we are doing an index scan */
1660-
ordered = (scandesc->irel != NULL);
1661-
1662-
stale = false;
1652+
scandesc = systable_beginscan(relation,
1653+
cache->cc_indexoid,
1654+
IndexScanOK(cache, cur_skey),
1655+
NULL,
1656+
nkeys,
1657+
cur_skey);
16631658

1664-
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1665-
{
1666-
uint32 hashValue;
1667-
Index hashIndex;
1668-
bool found = false;
1669-
dlist_head *bucket;
1659+
/* The list will be ordered iff we are doing an index scan */
1660+
ordered = (scandesc->irel != NULL);
16701661

1671-
/*
1672-
* See if there's an entry for this tuple already.
1673-
*/
1674-
ct = NULL;
1675-
hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1676-
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1662+
stale = false;
16771663

1678-
bucket = &cache->cc_bucket[hashIndex];
1679-
dlist_foreach(iter, bucket)
1664+
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
16801665
{
1681-
ct = dlist_container(CatCTup, cache_elem, iter.cur);
1666+
uint32 hashValue;
1667+
Index hashIndex;
1668+
bool found = false;
1669+
dlist_head *bucket;
16821670

1683-
if (ct->dead || ct->negative)
1684-
continue; /* ignore dead and negative entries */
1671+
/*
1672+
* See if there's an entry for this tuple already.
1673+
*/
1674+
ct = NULL;
1675+
hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1676+
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
16851677

1686-
if (ct->hash_value != hashValue)
1687-
continue; /* quickly skip entry if wrong hash val */
1678+
bucket = &cache->cc_bucket[hashIndex];
1679+
dlist_foreach(iter, bucket)
1680+
{
1681+
ct = dlist_container(CatCTup, cache_elem, iter.cur);
16881682

1689-
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1690-
continue; /* not same tuple */
1683+
if (ct->dead || ct->negative)
1684+
continue; /* ignore dead and negative entries */
16911685

1692-
/*
1693-
* Found a match, but can't use it if it belongs to another
1694-
* list already
1695-
*/
1696-
if (ct->c_list)
1697-
continue;
1686+
if (ct->hash_value != hashValue)
1687+
continue; /* quickly skip entry if wrong hash val */
16981688

1699-
found = true;
1700-
break; /* A-OK */
1701-
}
1689+
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1690+
continue; /* not same tuple */
17021691

1703-
if (!found)
1704-
{
1705-
/* We didn't find a usable entry, so make a new one */
1706-
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1707-
hashValue, hashIndex);
1708-
/* upon failure, we must start the scan over */
1709-
if (ct == NULL)
1710-
{
17111692
/*
1712-
* Release refcounts on any items we already had. We dare
1713-
* not try to free them if they're now unreferenced, since
1714-
* an error while doing that would result in the PG_CATCH
1715-
* below doing extra refcount decrements. Besides, we'll
1716-
* likely re-adopt those items in the next iteration, so
1717-
* it's not worth complicating matters to try to get rid
1718-
* of them.
1693+
* Found a match, but can't use it if it belongs to
1694+
* another list already
17191695
*/
1720-
foreach(ctlist_item, ctlist)
1696+
if (ct->c_list)
1697+
continue;
1698+
1699+
found = true;
1700+
break; /* A-OK */
1701+
}
1702+
1703+
if (!found)
1704+
{
1705+
/* We didn't find a usable entry, so make a new one */
1706+
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1707+
hashValue, hashIndex);
1708+
/* upon failure, we must start the scan over */
1709+
if (ct == NULL)
17211710
{
1722-
ct = (CatCTup *) lfirst(ctlist_item);
1723-
Assert(ct->c_list == NULL);
1724-
Assert(ct->refcount > 0);
1725-
ct->refcount--;
1711+
/*
1712+
* Release refcounts on any items we already had. We
1713+
* dare not try to free them if they're now
1714+
* unreferenced, since an error while doing that would
1715+
* result in the PG_CATCH below doing extra refcount
1716+
* decrements. Besides, we'll likely re-adopt those
1717+
* items in the next iteration, so it's not worth
1718+
* complicating matters to try to get rid of them.
1719+
*/
1720+
foreach(ctlist_item, ctlist)
1721+
{
1722+
ct = (CatCTup *) lfirst(ctlist_item);
1723+
Assert(ct->c_list == NULL);
1724+
Assert(ct->refcount > 0);
1725+
ct->refcount--;
1726+
}
1727+
/* Reset ctlist in preparation for new try */
1728+
ctlist = NIL;
1729+
stale = true;
1730+
break;
17261731
}
1727-
/* Reset ctlist in preparation for new try */
1728-
ctlist = NIL;
1729-
stale = true;
1730-
break;
17311732
}
1732-
}
17331733

1734-
/* Careful here: add entry to ctlist, then bump its refcount */
1735-
/* This way leaves state correct if lappend runs out of memory */
1736-
ctlist = lappend(ctlist, ct);
1737-
ct->refcount++;
1738-
}
1734+
/* Careful here: add entry to ctlist, then bump its refcount */
1735+
/* This way leaves state correct if lappend runs out of memory */
1736+
ctlist = lappend(ctlist, ct);
1737+
ct->refcount++;
1738+
}
17391739

1740-
systable_endscan(scandesc);
1740+
systable_endscan(scandesc);
17411741
} while (stale);
17421742

17431743
table_close(relation, AccessShareLock);

0 commit comments

Comments
 (0)