44
44
#include "storage/predicate.h"
45
45
#include "utils/memutils.h"
46
46
#include "utils/rel.h"
47
+ #include "utils/spccache.h"
47
48
#include "utils/snapmgr.h"
48
49
#include "utils/tqual.h"
49
50
@@ -95,9 +96,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
95
96
* prefetching. node->prefetch_pages tracks exactly how many pages ahead
96
97
* the prefetch iterator is. Also, node->prefetch_target tracks the
97
98
* desired prefetch distance, which starts small and increases up to the
98
- * GUC-controlled maximum, target_prefetch_pages. This is to avoid doing
99
- * a lot of prefetching in a scan that stops after a few tuples because of
100
- * a LIMIT.
99
+ * node->prefetch_maximum. This is to avoid doing a lot of prefetching in
100
+ * a scan that stops after a few tuples because of a LIMIT.
101
101
*/
102
102
if (tbm == NULL )
103
103
{
@@ -111,7 +111,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
111
111
node -> tbmres = tbmres = NULL ;
112
112
113
113
#ifdef USE_PREFETCH
114
- if (target_prefetch_pages > 0 )
114
+ if (node -> prefetch_maximum > 0 )
115
115
{
116
116
node -> prefetch_iterator = prefetch_iterator = tbm_begin_iterate (tbm );
117
117
node -> prefetch_pages = 0 ;
@@ -188,10 +188,10 @@ BitmapHeapNext(BitmapHeapScanState *node)
188
188
* page/tuple, then to one after the second tuple is fetched, then
189
189
* it doubles as later pages are fetched.
190
190
*/
191
- if (node -> prefetch_target >= target_prefetch_pages )
191
+ if (node -> prefetch_target >= node -> prefetch_maximum )
192
192
/* don't increase any further */ ;
193
- else if (node -> prefetch_target >= target_prefetch_pages / 2 )
194
- node -> prefetch_target = target_prefetch_pages ;
193
+ else if (node -> prefetch_target >= node -> prefetch_maximum / 2 )
194
+ node -> prefetch_target = node -> prefetch_maximum ;
195
195
else if (node -> prefetch_target > 0 )
196
196
node -> prefetch_target *= 2 ;
197
197
else
@@ -211,7 +211,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
211
211
* Try to prefetch at least a few pages even before we get to the
212
212
* second page if we don't stop reading after the first tuple.
213
213
*/
214
- if (node -> prefetch_target < target_prefetch_pages )
214
+ if (node -> prefetch_target < node -> prefetch_maximum )
215
215
node -> prefetch_target ++ ;
216
216
#endif /* USE_PREFETCH */
217
217
}
@@ -539,6 +539,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
539
539
{
540
540
BitmapHeapScanState * scanstate ;
541
541
Relation currentRelation ;
542
+ int io_concurrency ;
542
543
543
544
/* check for unsupported flags */
544
545
Assert (!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK )));
@@ -564,6 +565,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
564
565
scanstate -> prefetch_iterator = NULL ;
565
566
scanstate -> prefetch_pages = 0 ;
566
567
scanstate -> prefetch_target = 0 ;
568
+ /* may be updated below */
569
+ scanstate -> prefetch_maximum = target_prefetch_pages ;
567
570
568
571
/*
569
572
* Miscellaneous initialization
@@ -598,6 +601,22 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
598
601
*/
599
602
currentRelation = ExecOpenScanRelation (estate , node -> scan .scanrelid , eflags );
600
603
604
+ /*
605
+ * Determine the maximum for prefetch_target. If the tablespace has a
606
+ * specific IO concurrency set, use that to compute the corresponding
607
+ * maximum value; otherwise, we already initialized to the value computed
608
+ * by the GUC machinery.
609
+ */
610
+ io_concurrency =
611
+ get_tablespace_io_concurrency (currentRelation -> rd_rel -> reltablespace );
612
+ if (io_concurrency != effective_io_concurrency )
613
+ {
614
+ double maximum ;
615
+
616
+ if (ComputeIoConcurrency (io_concurrency , & maximum ))
617
+ scanstate -> prefetch_maximum = rint (maximum );
618
+ }
619
+
601
620
scanstate -> ss .ss_currentRelation = currentRelation ;
602
621
603
622
/*
0 commit comments