@@ -1418,34 +1418,34 @@ SearchCatCacheMiss(CatCache *cache,
1418
1418
cur_skey [2 ].sk_argument = v3 ;
1419
1419
cur_skey [3 ].sk_argument = v4 ;
1420
1420
1421
- scandesc = systable_beginscan (relation ,
1422
- cache -> cc_indexoid ,
1423
- IndexScanOK (cache , cur_skey ),
1424
- NULL ,
1425
- nkeys ,
1426
- cur_skey );
1421
+ scandesc = systable_beginscan (relation ,
1422
+ cache -> cc_indexoid ,
1423
+ IndexScanOK (cache , cur_skey ),
1424
+ NULL ,
1425
+ nkeys ,
1426
+ cur_skey );
1427
1427
1428
- ct = NULL ;
1429
- stale = false;
1428
+ ct = NULL ;
1429
+ stale = false;
1430
1430
1431
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1432
- {
1433
- ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1434
- hashValue , hashIndex );
1435
- /* upon failure, we must start the scan over */
1436
- if (ct == NULL )
1431
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1437
1432
{
1438
- stale = true;
1439
- break ;
1433
+ ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1434
+ hashValue , hashIndex );
1435
+ /* upon failure, we must start the scan over */
1436
+ if (ct == NULL )
1437
+ {
1438
+ stale = true;
1439
+ break ;
1440
+ }
1441
+ /* immediately set the refcount to 1 */
1442
+ ResourceOwnerEnlarge (CurrentResourceOwner );
1443
+ ct -> refcount ++ ;
1444
+ ResourceOwnerRememberCatCacheRef (CurrentResourceOwner , & ct -> tuple );
1445
+ break ; /* assume only one match */
1440
1446
}
1441
- /* immediately set the refcount to 1 */
1442
- ResourceOwnerEnlarge (CurrentResourceOwner );
1443
- ct -> refcount ++ ;
1444
- ResourceOwnerRememberCatCacheRef (CurrentResourceOwner , & ct -> tuple );
1445
- break ; /* assume only one match */
1446
- }
1447
1447
1448
- systable_endscan (scandesc );
1448
+ systable_endscan (scandesc );
1449
1449
} while (stale );
1450
1450
1451
1451
table_close (relation , AccessShareLock );
@@ -1710,95 +1710,95 @@ SearchCatCacheList(CatCache *cache,
1710
1710
cur_skey [2 ].sk_argument = v3 ;
1711
1711
cur_skey [3 ].sk_argument = v4 ;
1712
1712
1713
- scandesc = systable_beginscan (relation ,
1714
- cache -> cc_indexoid ,
1715
- IndexScanOK (cache , cur_skey ),
1716
- NULL ,
1717
- nkeys ,
1718
- cur_skey );
1713
+ scandesc = systable_beginscan (relation ,
1714
+ cache -> cc_indexoid ,
1715
+ IndexScanOK (cache , cur_skey ),
1716
+ NULL ,
1717
+ nkeys ,
1718
+ cur_skey );
1719
1719
1720
- /* The list will be ordered iff we are doing an index scan */
1721
- ordered = (scandesc -> irel != NULL );
1720
+ /* The list will be ordered iff we are doing an index scan */
1721
+ ordered = (scandesc -> irel != NULL );
1722
1722
1723
- stale = false;
1724
-
1725
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1726
- {
1727
- uint32 hashValue ;
1728
- Index hashIndex ;
1729
- bool found = false;
1730
- dlist_head * bucket ;
1731
-
1732
- /*
1733
- * See if there's an entry for this tuple already.
1734
- */
1735
- ct = NULL ;
1736
- hashValue = CatalogCacheComputeTupleHashValue (cache , cache -> cc_nkeys , ntp );
1737
- hashIndex = HASH_INDEX (hashValue , cache -> cc_nbuckets );
1723
+ stale = false;
1738
1724
1739
- bucket = & cache -> cc_bucket [hashIndex ];
1740
- dlist_foreach (iter , bucket )
1725
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1741
1726
{
1742
- ct = dlist_container (CatCTup , cache_elem , iter .cur );
1727
+ uint32 hashValue ;
1728
+ Index hashIndex ;
1729
+ bool found = false;
1730
+ dlist_head * bucket ;
1743
1731
1744
- if (ct -> dead || ct -> negative )
1745
- continue ; /* ignore dead and negative entries */
1732
+ /*
1733
+ * See if there's an entry for this tuple already.
1734
+ */
1735
+ ct = NULL ;
1736
+ hashValue = CatalogCacheComputeTupleHashValue (cache , cache -> cc_nkeys , ntp );
1737
+ hashIndex = HASH_INDEX (hashValue , cache -> cc_nbuckets );
1746
1738
1747
- if (ct -> hash_value != hashValue )
1748
- continue ; /* quickly skip entry if wrong hash val */
1739
+ bucket = & cache -> cc_bucket [hashIndex ];
1740
+ dlist_foreach (iter , bucket )
1741
+ {
1742
+ ct = dlist_container (CatCTup , cache_elem , iter .cur );
1749
1743
1750
- if (! ItemPointerEquals ( & ( ct -> tuple . t_self ), & ( ntp -> t_self )) )
1751
- continue ; /* not same tuple */
1744
+ if (ct -> dead || ct -> negative )
1745
+ continue ; /* ignore dead and negative entries */
1752
1746
1753
- /*
1754
- * Found a match, but can't use it if it belongs to another
1755
- * list already
1756
- */
1757
- if (ct -> c_list )
1758
- continue ;
1747
+ if (ct -> hash_value != hashValue )
1748
+ continue ; /* quickly skip entry if wrong hash val */
1759
1749
1760
- found = true;
1761
- break ; /* A-OK */
1762
- }
1750
+ if (!ItemPointerEquals (& (ct -> tuple .t_self ), & (ntp -> t_self )))
1751
+ continue ; /* not same tuple */
1763
1752
1764
- if (!found )
1765
- {
1766
- /* We didn't find a usable entry, so make a new one */
1767
- ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1768
- hashValue , hashIndex );
1769
- /* upon failure, we must start the scan over */
1770
- if (ct == NULL )
1771
- {
1772
1753
/*
1773
- * Release refcounts on any items we already had. We dare
1774
- * not try to free them if they're now unreferenced, since
1775
- * an error while doing that would result in the PG_CATCH
1776
- * below doing extra refcount decrements. Besides, we'll
1777
- * likely re-adopt those items in the next iteration, so
1778
- * it's not worth complicating matters to try to get rid
1779
- * of them.
1754
+ * Found a match, but can't use it if it belongs to
1755
+ * another list already
1780
1756
*/
1781
- foreach (ctlist_item , ctlist )
1757
+ if (ct -> c_list )
1758
+ continue ;
1759
+
1760
+ found = true;
1761
+ break ; /* A-OK */
1762
+ }
1763
+
1764
+ if (!found )
1765
+ {
1766
+ /* We didn't find a usable entry, so make a new one */
1767
+ ct = CatalogCacheCreateEntry (cache , ntp , scandesc , NULL ,
1768
+ hashValue , hashIndex );
1769
+ /* upon failure, we must start the scan over */
1770
+ if (ct == NULL )
1782
1771
{
1783
- ct = (CatCTup * ) lfirst (ctlist_item );
1784
- Assert (ct -> c_list == NULL );
1785
- Assert (ct -> refcount > 0 );
1786
- ct -> refcount -- ;
1772
+ /*
1773
+ * Release refcounts on any items we already had. We
1774
+ * dare not try to free them if they're now
1775
+ * unreferenced, since an error while doing that would
1776
+ * result in the PG_CATCH below doing extra refcount
1777
+ * decrements. Besides, we'll likely re-adopt those
1778
+ * items in the next iteration, so it's not worth
1779
+ * complicating matters to try to get rid of them.
1780
+ */
1781
+ foreach (ctlist_item , ctlist )
1782
+ {
1783
+ ct = (CatCTup * ) lfirst (ctlist_item );
1784
+ Assert (ct -> c_list == NULL );
1785
+ Assert (ct -> refcount > 0 );
1786
+ ct -> refcount -- ;
1787
+ }
1788
+ /* Reset ctlist in preparation for new try */
1789
+ ctlist = NIL ;
1790
+ stale = true;
1791
+ break ;
1787
1792
}
1788
- /* Reset ctlist in preparation for new try */
1789
- ctlist = NIL ;
1790
- stale = true;
1791
- break ;
1792
1793
}
1793
- }
1794
1794
1795
- /* Careful here: add entry to ctlist, then bump its refcount */
1796
- /* This way leaves state correct if lappend runs out of memory */
1797
- ctlist = lappend (ctlist , ct );
1798
- ct -> refcount ++ ;
1799
- }
1795
+ /* Careful here: add entry to ctlist, then bump its refcount */
1796
+ /* This way leaves state correct if lappend runs out of memory */
1797
+ ctlist = lappend (ctlist , ct );
1798
+ ct -> refcount ++ ;
1799
+ }
1800
1800
1801
- systable_endscan (scandesc );
1801
+ systable_endscan (scandesc );
1802
1802
} while (stale );
1803
1803
1804
1804
table_close (relation , AccessShareLock );
0 commit comments