Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit da08a65

Browse files
committed
Refactor bitmap heap scan estimation of heap pages fetched.
Currently, we only need this logic in order to cost a Bitmap Heap Scan. But a pending patch for Parallel Bitmap Heap Scan also uses it to help figure out how many workers to use for the scan, which has to be determined prior to costing. So, move the logic to a separate function to make that easier. Dilip Kumar. The patch series of which this is a part has been reviewed by Andres Freund, Amit Khendekar, Tushar Ahuja, Rafia Sabih, Haribabu Kommi, and me; it is not clear from the email discussion which of those people have looked specifically at this part. Discussion: http://postgr.es/m/CAFiTN-v3QYNJEZnnmKCeATuLbN-h9tMVfeEF0+BrouYDqjXgwg@mail.gmail.com
1 parent 350cb92 commit da08a65

File tree

2 files changed

+72
-41
lines changed

2 files changed

+72
-41
lines changed

src/backend/optimizer/path/costsize.c

Lines changed: 70 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -813,7 +813,6 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
813813
Cost startup_cost = 0;
814814
Cost run_cost = 0;
815815
Cost indexTotalCost;
816-
Selectivity indexSelectivity;
817816
QualCost qpqual_cost;
818817
Cost cpu_per_tuple;
819818
Cost cost_per_page;
@@ -837,54 +836,18 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
837836
if (!enable_bitmapscan)
838837
startup_cost += disable_cost;
839838

840-
/*
841-
* Fetch total cost of obtaining the bitmap, as well as its total
842-
* selectivity.
843-
*/
844-
cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
839+
pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
840+
loop_count, &indexTotalCost,
841+
&tuples_fetched);
845842

846843
startup_cost += indexTotalCost;
844+
T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
847845

848846
/* Fetch estimated page costs for tablespace containing table. */
849847
get_tablespace_page_costs(baserel->reltablespace,
850848
&spc_random_page_cost,
851849
&spc_seq_page_cost);
852850

853-
/*
854-
* Estimate number of main-table pages fetched.
855-
*/
856-
tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
857-
858-
T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
859-
860-
if (loop_count > 1)
861-
{
862-
/*
863-
* For repeated bitmap scans, scale up the number of tuples fetched in
864-
* the Mackert and Lohman formula by the number of scans, so that we
865-
* estimate the number of pages fetched by all the scans. Then
866-
* pro-rate for one scan.
867-
*/
868-
pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
869-
baserel->pages,
870-
get_indexpath_pages(bitmapqual),
871-
root);
872-
pages_fetched /= loop_count;
873-
}
874-
else
875-
{
876-
/*
877-
* For a single scan, the number of heap pages that need to be fetched
878-
* is the same as the Mackert and Lohman formula for the case T <= b
879-
* (ie, no re-reads needed).
880-
*/
881-
pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
882-
}
883-
if (pages_fetched >= T)
884-
pages_fetched = T;
885-
else
886-
pages_fetched = ceil(pages_fetched);
887-
888851
/*
889852
* For small numbers of pages we should charge spc_random_page_cost
890853
* apiece, while if nearly all the table's pages are being read, it's more
@@ -4820,3 +4783,69 @@ get_parallel_divisor(Path *path)
48204783

48214784
return parallel_divisor;
48224785
}
4786+
4787+
/*
4788+
* compute_bitmap_pages
4789+
*
4790+
* compute number of pages fetched from heap in bitmap heap scan.
4791+
*/
4792+
double
4793+
compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
4794+
int loop_count, Cost *cost, double *tuple)
4795+
{
4796+
Cost indexTotalCost;
4797+
Selectivity indexSelectivity;
4798+
double T;
4799+
double pages_fetched;
4800+
double tuples_fetched;
4801+
4802+
/*
4803+
* Fetch total cost of obtaining the bitmap, as well as its total
4804+
* selectivity.
4805+
*/
4806+
cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
4807+
4808+
/*
4809+
* Estimate number of main-table pages fetched.
4810+
*/
4811+
tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
4812+
4813+
T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
4814+
4815+
if (loop_count > 1)
4816+
{
4817+
/*
4818+
* For repeated bitmap scans, scale up the number of tuples fetched in
4819+
* the Mackert and Lohman formula by the number of scans, so that we
4820+
* estimate the number of pages fetched by all the scans. Then
4821+
* pro-rate for one scan.
4822+
*/
4823+
pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
4824+
baserel->pages,
4825+
get_indexpath_pages(bitmapqual),
4826+
root);
4827+
pages_fetched /= loop_count;
4828+
}
4829+
else
4830+
{
4831+
/*
4832+
* For a single scan, the number of heap pages that need to be fetched
4833+
* is the same as the Mackert and Lohman formula for the case T <= b
4834+
* (ie, no re-reads needed).
4835+
*/
4836+
pages_fetched =
4837+
(2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
4838+
}
4839+
4840+
if (pages_fetched >= T)
4841+
pages_fetched = T;
4842+
else
4843+
pages_fetched = ceil(pages_fetched);
4844+
4845+
if (cost)
4846+
*cost = indexTotalCost;
4847+
if (tuple)
4848+
*tuple = tuples_fetched;
4849+
4850+
return pages_fetched;
4851+
}

src/include/optimizer/cost.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,8 @@ extern void set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel,
183183
double cte_rows);
184184
extern void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel);
185185
extern PathTarget *set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target);
186+
extern double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
187+
Path *bitmapqual, int loop_count, Cost *cost, double *tuple);
186188

187189
/*
188190
* prototypes for clausesel.c

0 commit comments

Comments
 (0)