Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 2157a61

Browse files
committed
Back-port fix for accumulation of parallel worker instrumentation.
When a Gather or Gather Merge node is started and stopped multiple times, accumulate instrumentation data only once, at the end, instead of after each execution, to avoid recording inflated totals. This is a back-port of commit 8526bcb by Amit Kapila. Discussion: http://postgr.es/m/20171127175631.GA405@depesz.com Discussion: http://postgr.es/m/CAA4eK1KT3BYj50qWhK5qBF=LDzQCoUVSFZjcK3mHoJJeWA+fNA@mail.gmail.com
1 parent b64d6c9 commit 2157a61

File tree

3 files changed

+82
-10
lines changed

3 files changed

+82
-10
lines changed

src/backend/executor/execParallel.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
536536

537537
/*
538538
* Finish parallel execution. We wait for parallel workers to finish, and
539-
* accumulate their buffer usage and instrumentation.
539+
* accumulate their buffer usage.
540540
*/
541541
void
542542
ExecParallelFinish(ParallelExecutorInfo *pei)
@@ -553,23 +553,23 @@ ExecParallelFinish(ParallelExecutorInfo *pei)
553553
for (i = 0; i < pei->pcxt->nworkers_launched; ++i)
554554
InstrAccumParallelQuery(&pei->buffer_usage[i]);
555555

556-
/* Finally, accumulate instrumentation, if any. */
557-
if (pei->instrumentation)
558-
ExecParallelRetrieveInstrumentation(pei->planstate,
559-
pei->instrumentation);
560-
561556
pei->finished = true;
562557
}
563558

564559
/*
565-
* Clean up whatever ParallelExecutorInfo resources still exist after
566-
* ExecParallelFinish. We separate these routines because someone might
567-
* want to examine the contents of the DSM after ExecParallelFinish and
568-
* before calling this routine.
560+
* Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo
561+
* resources still exist after ExecParallelFinish. We separate these
562+
* routines because someone might want to examine the contents of the DSM
563+
* after ExecParallelFinish and before calling this routine.
569564
*/
570565
void
571566
ExecParallelCleanup(ParallelExecutorInfo *pei)
572567
{
568+
/* Accumulate instrumentation, if any. */
569+
if (pei->instrumentation)
570+
ExecParallelRetrieveInstrumentation(pei->planstate,
571+
pei->instrumentation);
572+
573573
if (pei->pcxt != NULL)
574574
{
575575
DestroyParallelContext(pei->pcxt);

src/test/regress/expected/select_parallel.out

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,50 @@ select avg(aa::int8) from a_star;
123123
13.6538461538461538
124124
(1 row)
125125

126+
-- test accumulation of stats for parallel nodes
127+
set enable_indexscan to off;
128+
set enable_bitmapscan to off;
129+
set enable_material to off;
130+
alter table tenk2 set (parallel_workers = 0);
131+
create function explain_parallel_stats() returns setof text
132+
language plpgsql as
133+
$$
134+
declare ln text;
135+
begin
136+
for ln in
137+
explain (analyze, timing off, costs off)
138+
select count(*) from tenk1, tenk2 where
139+
tenk1.hundred > 1 and tenk2.thousand=0
140+
loop
141+
ln := regexp_replace(ln, 'Planning time: \S*', 'Planning time: xxx');
142+
ln := regexp_replace(ln, 'Execution time: \S*', 'Execution time: xxx');
143+
return next ln;
144+
end loop;
145+
end;
146+
$$;
147+
select * from explain_parallel_stats();
148+
explain_parallel_stats
149+
--------------------------------------------------------------------------
150+
Aggregate (actual rows=1 loops=1)
151+
-> Nested Loop (actual rows=98000 loops=1)
152+
-> Seq Scan on tenk2 (actual rows=10 loops=1)
153+
Filter: (thousand = 0)
154+
Rows Removed by Filter: 9990
155+
-> Gather (actual rows=9800 loops=10)
156+
Workers Planned: 4
157+
Workers Launched: 4
158+
-> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50)
159+
Filter: (hundred > 1)
160+
Rows Removed by Filter: 40
161+
Planning time: xxx ms
162+
Execution time: xxx ms
163+
(13 rows)
164+
165+
reset enable_indexscan;
166+
reset enable_bitmapscan;
167+
reset enable_material;
168+
alter table tenk2 reset (parallel_workers);
169+
drop function explain_parallel_stats();
126170
-- test the sanity of parallel query after the active role is dropped.
127171
set force_parallel_mode=1;
128172
drop role if exists regress_parallel_worker;

src/test/regress/sql/select_parallel.sql

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,34 @@ select avg(aa::int8) from a_star;
4545

4646
select avg(aa::int8) from a_star;
4747

48+
-- test accumulation of stats for parallel nodes
49+
set enable_indexscan to off;
50+
set enable_bitmapscan to off;
51+
set enable_material to off;
52+
alter table tenk2 set (parallel_workers = 0);
53+
create function explain_parallel_stats() returns setof text
54+
language plpgsql as
55+
$$
56+
declare ln text;
57+
begin
58+
for ln in
59+
explain (analyze, timing off, costs off)
60+
select count(*) from tenk1, tenk2 where
61+
tenk1.hundred > 1 and tenk2.thousand=0
62+
loop
63+
ln := regexp_replace(ln, 'Planning time: \S*', 'Planning time: xxx');
64+
ln := regexp_replace(ln, 'Execution time: \S*', 'Execution time: xxx');
65+
return next ln;
66+
end loop;
67+
end;
68+
$$;
69+
select * from explain_parallel_stats();
70+
reset enable_indexscan;
71+
reset enable_bitmapscan;
72+
reset enable_material;
73+
alter table tenk2 reset (parallel_workers);
74+
drop function explain_parallel_stats();
75+
4876
-- test the sanity of parallel query after the active role is dropped.
4977
set force_parallel_mode=1;
5078
drop role if exists regress_parallel_worker;

0 commit comments

Comments
 (0)