@@ -1362,34 +1362,34 @@ SearchCatCacheMiss(CatCache *cache,
1362
1362
cur_skey [2 ].sk_argument = v3 ;
1363
1363
cur_skey [3 ].sk_argument = v4 ;
1364
1364
1365
- scandesc = systable_beginscan (relation ,
1366
- cache -> cc_indexoid ,
1367
- IndexScanOK (cache , cur_skey ),
1368
- NULL ,
1369
- nkeys ,
1370
- cur_skey );
1365
+ scandesc = systable_beginscan (relation ,
1366
+ cache -> cc_indexoid ,
1367
+ IndexScanOK (cache , cur_skey ),
1368
+ NULL ,
1369
+ nkeys ,
1370
+ cur_skey );
1371
1371
1372
- ct = NULL ;
1373
- stale = false;
1372
+ ct = NULL ;
1373
+ stale = false;
1374
1374
1375
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1376
- {
1377
- ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1378
- hashValue , hashIndex );
1379
- /* upon failure, we must start the scan over */
1380
- if (ct == NULL )
1375
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1381
1376
{
1382
- stale = true;
1383
- break ;
1377
+ ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1378
+ hashValue , hashIndex );
1379
+ /* upon failure, we must start the scan over */
1380
+ if (ct == NULL )
1381
+ {
1382
+ stale = true;
1383
+ break ;
1384
+ }
1385
+ /* immediately set the refcount to 1 */
1386
+ ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1387
+ ct -> refcount ++ ;
1388
+ ResourceOwnerRememberCatCacheRef (CurrentResourceOwner , & ct -> tuple );
1389
+ break ; /* assume only one match */
1384
1390
}
1385
- /* immediately set the refcount to 1 */
1386
- ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1387
- ct -> refcount ++ ;
1388
- ResourceOwnerRememberCatCacheRef (CurrentResourceOwner , & ct -> tuple );
1389
- break ; /* assume only one match */
1390
- }
1391
1391
1392
- systable_endscan (scandesc );
1392
+ systable_endscan (scandesc );
1393
1393
} while (stale );
1394
1394
1395
1395
table_close (relation , AccessShareLock );
@@ -1649,95 +1649,95 @@ SearchCatCacheList(CatCache *cache,
1649
1649
cur_skey [2 ].sk_argument = v3 ;
1650
1650
cur_skey [3 ].sk_argument = v4 ;
1651
1651
1652
- scandesc = systable_beginscan (relation ,
1653
- cache -> cc_indexoid ,
1654
- IndexScanOK (cache , cur_skey ),
1655
- NULL ,
1656
- nkeys ,
1657
- cur_skey );
1658
-
1659
- /* The list will be ordered iff we are doing an index scan */
1660
- ordered = (scandesc -> irel != NULL );
1661
-
1662
- stale = false;
1652
+ scandesc = systable_beginscan (relation ,
1653
+ cache -> cc_indexoid ,
1654
+ IndexScanOK (cache , cur_skey ),
1655
+ NULL ,
1656
+ nkeys ,
1657
+ cur_skey );
1663
1658
1664
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1665
- {
1666
- uint32 hashValue ;
1667
- Index hashIndex ;
1668
- bool found = false;
1669
- dlist_head * bucket ;
1659
+ /* The list will be ordered iff we are doing an index scan */
1660
+ ordered = (scandesc -> irel != NULL );
1670
1661
1671
- /*
1672
- * See if there's an entry for this tuple already.
1673
- */
1674
- ct = NULL ;
1675
- hashValue = CatalogCacheComputeTupleHashValue (cache , cache -> cc_nkeys , ntp );
1676
- hashIndex = HASH_INDEX (hashValue , cache -> cc_nbuckets );
1662
+ stale = false;
1677
1663
1678
- bucket = & cache -> cc_bucket [hashIndex ];
1679
- dlist_foreach (iter , bucket )
1664
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1680
1665
{
1681
- ct = dlist_container (CatCTup , cache_elem , iter .cur );
1666
+ uint32 hashValue ;
1667
+ Index hashIndex ;
1668
+ bool found = false;
1669
+ dlist_head * bucket ;
1682
1670
1683
- if (ct -> dead || ct -> negative )
1684
- continue ; /* ignore dead and negative entries */
1671
+ /*
1672
+ * See if there's an entry for this tuple already.
1673
+ */
1674
+ ct = NULL ;
1675
+ hashValue = CatalogCacheComputeTupleHashValue (cache , cache -> cc_nkeys , ntp );
1676
+ hashIndex = HASH_INDEX (hashValue , cache -> cc_nbuckets );
1685
1677
1686
- if (ct -> hash_value != hashValue )
1687
- continue ; /* quickly skip entry if wrong hash val */
1678
+ bucket = & cache -> cc_bucket [hashIndex ];
1679
+ dlist_foreach (iter , bucket )
1680
+ {
1681
+ ct = dlist_container (CatCTup , cache_elem , iter .cur );
1688
1682
1689
- if (! ItemPointerEquals ( & ( ct -> tuple . t_self ), & ( ntp -> t_self )) )
1690
- continue ; /* not same tuple */
1683
+ if (ct -> dead || ct -> negative )
1684
+ continue ; /* ignore dead and negative entries */
1691
1685
1692
- /*
1693
- * Found a match, but can't use it if it belongs to another
1694
- * list already
1695
- */
1696
- if (ct -> c_list )
1697
- continue ;
1686
+ if (ct -> hash_value != hashValue )
1687
+ continue ; /* quickly skip entry if wrong hash val */
1698
1688
1699
- found = true;
1700
- break ; /* A-OK */
1701
- }
1689
+ if (!ItemPointerEquals (& (ct -> tuple .t_self ), & (ntp -> t_self )))
1690
+ continue ; /* not same tuple */
1702
1691
1703
- if (!found )
1704
- {
1705
- /* We didn't find a usable entry, so make a new one */
1706
- ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1707
- hashValue , hashIndex );
1708
- /* upon failure, we must start the scan over */
1709
- if (ct == NULL )
1710
- {
1711
1692
/*
1712
- * Release refcounts on any items we already had. We dare
1713
- * not try to free them if they're now unreferenced, since
1714
- * an error while doing that would result in the PG_CATCH
1715
- * below doing extra refcount decrements. Besides, we'll
1716
- * likely re-adopt those items in the next iteration, so
1717
- * it's not worth complicating matters to try to get rid
1718
- * of them.
1693
+ * Found a match, but can't use it if it belongs to
1694
+ * another list already
1719
1695
*/
1720
- foreach (ctlist_item , ctlist )
1696
+ if (ct -> c_list )
1697
+ continue ;
1698
+
1699
+ found = true;
1700
+ break ; /* A-OK */
1701
+ }
1702
+
1703
+ if (!found )
1704
+ {
1705
+ /* We didn't find a usable entry, so make a new one */
1706
+ ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1707
+ hashValue , hashIndex );
1708
+ /* upon failure, we must start the scan over */
1709
+ if (ct == NULL )
1721
1710
{
1722
- ct = (CatCTup * ) lfirst (ctlist_item );
1723
- Assert (ct -> c_list == NULL );
1724
- Assert (ct -> refcount > 0 );
1725
- ct -> refcount -- ;
1711
+ /*
1712
+ * Release refcounts on any items we already had. We
1713
+ * dare not try to free them if they're now
1714
+ * unreferenced, since an error while doing that would
1715
+ * result in the PG_CATCH below doing extra refcount
1716
+ * decrements. Besides, we'll likely re-adopt those
1717
+ * items in the next iteration, so it's not worth
1718
+ * complicating matters to try to get rid of them.
1719
+ */
1720
+ foreach (ctlist_item , ctlist )
1721
+ {
1722
+ ct = (CatCTup * ) lfirst (ctlist_item );
1723
+ Assert (ct -> c_list == NULL );
1724
+ Assert (ct -> refcount > 0 );
1725
+ ct -> refcount -- ;
1726
+ }
1727
+ /* Reset ctlist in preparation for new try */
1728
+ ctlist = NIL ;
1729
+ stale = true;
1730
+ break ;
1726
1731
}
1727
- /* Reset ctlist in preparation for new try */
1728
- ctlist = NIL ;
1729
- stale = true;
1730
- break ;
1731
1732
}
1732
- }
1733
1733
1734
- /* Careful here: add entry to ctlist, then bump its refcount */
1735
- /* This way leaves state correct if lappend runs out of memory */
1736
- ctlist = lappend (ctlist , ct );
1737
- ct -> refcount ++ ;
1738
- }
1734
+ /* Careful here: add entry to ctlist, then bump its refcount */
1735
+ /* This way leaves state correct if lappend runs out of memory */
1736
+ ctlist = lappend (ctlist , ct );
1737
+ ct -> refcount ++ ;
1738
+ }
1739
1739
1740
- systable_endscan (scandesc );
1740
+ systable_endscan (scandesc );
1741
1741
} while (stale );
1742
1742
1743
1743
table_close (relation , AccessShareLock );
0 commit comments