@@ -1366,34 +1366,34 @@ SearchCatCacheMiss(CatCache *cache,
1366
1366
cur_skey [2 ].sk_argument = v3 ;
1367
1367
cur_skey [3 ].sk_argument = v4 ;
1368
1368
1369
- scandesc = systable_beginscan (relation ,
1370
- cache -> cc_indexoid ,
1371
- IndexScanOK (cache , cur_skey ),
1372
- NULL ,
1373
- nkeys ,
1374
- cur_skey );
1369
+ scandesc = systable_beginscan (relation ,
1370
+ cache -> cc_indexoid ,
1371
+ IndexScanOK (cache , cur_skey ),
1372
+ NULL ,
1373
+ nkeys ,
1374
+ cur_skey );
1375
1375
1376
- ct = NULL ;
1377
- stale = false;
1376
+ ct = NULL ;
1377
+ stale = false;
1378
1378
1379
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1380
- {
1381
- ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1382
- hashValue , hashIndex );
1383
- /* upon failure, we must start the scan over */
1384
- if (ct == NULL )
1379
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1385
1380
{
1386
- stale = true;
1387
- break ;
1381
+ ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1382
+ hashValue , hashIndex );
1383
+ /* upon failure, we must start the scan over */
1384
+ if (ct == NULL )
1385
+ {
1386
+ stale = true;
1387
+ break ;
1388
+ }
1389
+ /* immediately set the refcount to 1 */
1390
+ ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1391
+ ct -> refcount ++ ;
1392
+ ResourceOwnerRememberCatCacheRef (CurrentResourceOwner , & ct -> tuple );
1393
+ break ; /* assume only one match */
1388
1394
}
1389
- /* immediately set the refcount to 1 */
1390
- ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1391
- ct -> refcount ++ ;
1392
- ResourceOwnerRememberCatCacheRef (CurrentResourceOwner , & ct -> tuple );
1393
- break ; /* assume only one match */
1394
- }
1395
1395
1396
- systable_endscan (scandesc );
1396
+ systable_endscan (scandesc );
1397
1397
} while (stale );
1398
1398
1399
1399
table_close (relation , AccessShareLock );
@@ -1653,95 +1653,95 @@ SearchCatCacheList(CatCache *cache,
1653
1653
cur_skey [2 ].sk_argument = v3 ;
1654
1654
cur_skey [3 ].sk_argument = v4 ;
1655
1655
1656
- scandesc = systable_beginscan (relation ,
1657
- cache -> cc_indexoid ,
1658
- IndexScanOK (cache , cur_skey ),
1659
- NULL ,
1660
- nkeys ,
1661
- cur_skey );
1662
-
1663
- /* The list will be ordered iff we are doing an index scan */
1664
- ordered = (scandesc -> irel != NULL );
1665
-
1666
- stale = false;
1656
+ scandesc = systable_beginscan (relation ,
1657
+ cache -> cc_indexoid ,
1658
+ IndexScanOK (cache , cur_skey ),
1659
+ NULL ,
1660
+ nkeys ,
1661
+ cur_skey );
1667
1662
1668
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1669
- {
1670
- uint32 hashValue ;
1671
- Index hashIndex ;
1672
- bool found = false;
1673
- dlist_head * bucket ;
1663
+ /* The list will be ordered iff we are doing an index scan */
1664
+ ordered = (scandesc -> irel != NULL );
1674
1665
1675
- /*
1676
- * See if there's an entry for this tuple already.
1677
- */
1678
- ct = NULL ;
1679
- hashValue = CatalogCacheComputeTupleHashValue (cache , cache -> cc_nkeys , ntp );
1680
- hashIndex = HASH_INDEX (hashValue , cache -> cc_nbuckets );
1666
+ stale = false;
1681
1667
1682
- bucket = & cache -> cc_bucket [hashIndex ];
1683
- dlist_foreach (iter , bucket )
1668
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1684
1669
{
1685
- ct = dlist_container (CatCTup , cache_elem , iter .cur );
1670
+ uint32 hashValue ;
1671
+ Index hashIndex ;
1672
+ bool found = false;
1673
+ dlist_head * bucket ;
1686
1674
1687
- if (ct -> dead || ct -> negative )
1688
- continue ; /* ignore dead and negative entries */
1675
+ /*
1676
+ * See if there's an entry for this tuple already.
1677
+ */
1678
+ ct = NULL ;
1679
+ hashValue = CatalogCacheComputeTupleHashValue (cache , cache -> cc_nkeys , ntp );
1680
+ hashIndex = HASH_INDEX (hashValue , cache -> cc_nbuckets );
1689
1681
1690
- if (ct -> hash_value != hashValue )
1691
- continue ; /* quickly skip entry if wrong hash val */
1682
+ bucket = & cache -> cc_bucket [hashIndex ];
1683
+ dlist_foreach (iter , bucket )
1684
+ {
1685
+ ct = dlist_container (CatCTup , cache_elem , iter .cur );
1692
1686
1693
- if (! ItemPointerEquals ( & ( ct -> tuple . t_self ), & ( ntp -> t_self )) )
1694
- continue ; /* not same tuple */
1687
+ if (ct -> dead || ct -> negative )
1688
+ continue ; /* ignore dead and negative entries */
1695
1689
1696
- /*
1697
- * Found a match, but can't use it if it belongs to another
1698
- * list already
1699
- */
1700
- if (ct -> c_list )
1701
- continue ;
1690
+ if (ct -> hash_value != hashValue )
1691
+ continue ; /* quickly skip entry if wrong hash val */
1702
1692
1703
- found = true;
1704
- break ; /* A-OK */
1705
- }
1693
+ if (!ItemPointerEquals (& (ct -> tuple .t_self ), & (ntp -> t_self )))
1694
+ continue ; /* not same tuple */
1706
1695
1707
- if (!found )
1708
- {
1709
- /* We didn't find a usable entry, so make a new one */
1710
- ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1711
- hashValue , hashIndex );
1712
- /* upon failure, we must start the scan over */
1713
- if (ct == NULL )
1714
- {
1715
1696
/*
1716
- * Release refcounts on any items we already had. We dare
1717
- * not try to free them if they're now unreferenced, since
1718
- * an error while doing that would result in the PG_CATCH
1719
- * below doing extra refcount decrements. Besides, we'll
1720
- * likely re-adopt those items in the next iteration, so
1721
- * it's not worth complicating matters to try to get rid
1722
- * of them.
1697
+ * Found a match, but can't use it if it belongs to
1698
+ * another list already
1723
1699
*/
1724
- foreach (ctlist_item , ctlist )
1700
+ if (ct -> c_list )
1701
+ continue ;
1702
+
1703
+ found = true;
1704
+ break ; /* A-OK */
1705
+ }
1706
+
1707
+ if (!found )
1708
+ {
1709
+ /* We didn't find a usable entry, so make a new one */
1710
+ ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1711
+ hashValue , hashIndex );
1712
+ /* upon failure, we must start the scan over */
1713
+ if (ct == NULL )
1725
1714
{
1726
- ct = (CatCTup * ) lfirst (ctlist_item );
1727
- Assert (ct -> c_list == NULL );
1728
- Assert (ct -> refcount > 0 );
1729
- ct -> refcount -- ;
1715
+ /*
1716
+ * Release refcounts on any items we already had. We
1717
+ * dare not try to free them if they're now
1718
+ * unreferenced, since an error while doing that would
1719
+ * result in the PG_CATCH below doing extra refcount
1720
+ * decrements. Besides, we'll likely re-adopt those
1721
+ * items in the next iteration, so it's not worth
1722
+ * complicating matters to try to get rid of them.
1723
+ */
1724
+ foreach (ctlist_item , ctlist )
1725
+ {
1726
+ ct = (CatCTup * ) lfirst (ctlist_item );
1727
+ Assert (ct -> c_list == NULL );
1728
+ Assert (ct -> refcount > 0 );
1729
+ ct -> refcount -- ;
1730
+ }
1731
+ /* Reset ctlist in preparation for new try */
1732
+ ctlist = NIL ;
1733
+ stale = true;
1734
+ break ;
1730
1735
}
1731
- /* Reset ctlist in preparation for new try */
1732
- ctlist = NIL ;
1733
- stale = true;
1734
- break ;
1735
1736
}
1736
- }
1737
1737
1738
- /* Careful here: add entry to ctlist, then bump its refcount */
1739
- /* This way leaves state correct if lappend runs out of memory */
1740
- ctlist = lappend (ctlist , ct );
1741
- ct -> refcount ++ ;
1742
- }
1738
+ /* Careful here: add entry to ctlist, then bump its refcount */
1739
+ /* This way leaves state correct if lappend runs out of memory */
1740
+ ctlist = lappend (ctlist , ct );
1741
+ ct -> refcount ++ ;
1742
+ }
1743
1743
1744
- systable_endscan (scandesc );
1744
+ systable_endscan (scandesc );
1745
1745
} while (stale );
1746
1746
1747
1747
table_close (relation , AccessShareLock );
0 commit comments