Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 3a5e221

Browse files
author
Amit Kapila
committed
Allow parallel vacuum to accumulate buffer usage.
Commit 40d964e allowed vacuum command to process indexes in parallel but forgot to accumulate the buffer usage stats of parallel workers. This allows leader backend to accumulate buffer usage stats of all the parallel workers. Reported-by: Julien Rouhaud Author: Sawada Masahiko Reviewed-by: Dilip Kumar, Amit Kapila and Julien Rouhaud Discussion: https://postgr.es/m/20200328151721.GB12854@nol
1 parent 17e0328 commit 3a5e221

File tree

1 file changed

+45
-2
lines changed

1 file changed

+45
-2
lines changed

src/backend/access/heap/vacuumlazy.c

+45-2
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
#include "commands/dbcommands.h"
6666
#include "commands/progress.h"
6767
#include "commands/vacuum.h"
68+
#include "executor/instrument.h"
6869
#include "miscadmin.h"
6970
#include "optimizer/paths.h"
7071
#include "pgstat.h"
@@ -137,6 +138,7 @@
137138
#define PARALLEL_VACUUM_KEY_SHARED 1
138139
#define PARALLEL_VACUUM_KEY_DEAD_TUPLES 2
139140
#define PARALLEL_VACUUM_KEY_QUERY_TEXT 3
141+
#define PARALLEL_VACUUM_KEY_BUFFER_USAGE 4
140142

141143
/*
142144
* Macro to check if we are in a parallel vacuum. If true, we are in the
@@ -270,6 +272,9 @@ typedef struct LVParallelState
270272
/* Shared information among parallel vacuum workers */
271273
LVShared *lvshared;
272274

275+
/* Points to buffer usage area in DSM */
276+
BufferUsage *buffer_usage;
277+
273278
/*
274279
* The number of indexes that support parallel index bulk-deletion and
275280
* parallel index cleanup respectively.
@@ -2137,8 +2142,20 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
21372142
parallel_vacuum_index(Irel, stats, lps->lvshared,
21382143
vacrelstats->dead_tuples, nindexes, vacrelstats);
21392144

2140-
/* Wait for all vacuum workers to finish */
2141-
WaitForParallelWorkersToFinish(lps->pcxt);
2145+
/*
2146+
* Next, accumulate buffer usage. (This must wait for the workers to
2147+
* finish, or we might get incomplete data.)
2148+
*/
2149+
if (nworkers > 0)
2150+
{
2151+
int i;
2152+
2153+
/* Wait for all vacuum workers to finish */
2154+
WaitForParallelWorkersToFinish(lps->pcxt);
2155+
2156+
for (i = 0; i < lps->pcxt->nworkers_launched; i++)
2157+
InstrAccumParallelQuery(&lps->buffer_usage[i]);
2158+
}
21422159

21432160
/*
21442161
* Carry the shared balance value to heap scan and disable shared costing
@@ -3153,6 +3170,7 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
31533170
ParallelContext *pcxt;
31543171
LVShared *shared;
31553172
LVDeadTuples *dead_tuples;
3173+
BufferUsage *buffer_usage;
31563174
bool *can_parallel_vacuum;
31573175
long maxtuples;
31583176
char *sharedquery;
@@ -3236,6 +3254,17 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
32363254
shm_toc_estimate_chunk(&pcxt->estimator, est_deadtuples);
32373255
shm_toc_estimate_keys(&pcxt->estimator, 1);
32383256

3257+
/*
3258+
* Estimate space for BufferUsage -- PARALLEL_VACUUM_KEY_BUFFER_USAGE.
3259+
*
3260+
* If there are no extensions loaded that care, we could skip this. We
3261+
* have no way of knowing whether anyone's looking at pgBufferUsage, so do
3262+
* it unconditionally.
3263+
*/
3264+
shm_toc_estimate_chunk(&pcxt->estimator,
3265+
mul_size(sizeof(BufferUsage), pcxt->nworkers));
3266+
shm_toc_estimate_keys(&pcxt->estimator, 1);
3267+
32393268
/* Finally, estimate PARALLEL_VACUUM_KEY_QUERY_TEXT space */
32403269
querylen = strlen(debug_query_string);
32413270
shm_toc_estimate_chunk(&pcxt->estimator, querylen + 1);
@@ -3270,6 +3299,12 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
32703299
shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_TUPLES, dead_tuples);
32713300
vacrelstats->dead_tuples = dead_tuples;
32723301

3302+
/* Allocate space for each worker's BufferUsage; no need to initialize */
3303+
buffer_usage = shm_toc_allocate(pcxt->toc,
3304+
mul_size(sizeof(BufferUsage), pcxt->nworkers));
3305+
shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_BUFFER_USAGE, buffer_usage);
3306+
lps->buffer_usage = buffer_usage;
3307+
32733308
/* Store query string for workers */
32743309
sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
32753310
memcpy(sharedquery, debug_query_string, querylen + 1);
@@ -3399,6 +3434,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
33993434
Relation *indrels;
34003435
LVShared *lvshared;
34013436
LVDeadTuples *dead_tuples;
3437+
BufferUsage *buffer_usage;
34023438
int nindexes;
34033439
char *sharedquery;
34043440
IndexBulkDeleteResult **stats;
@@ -3468,10 +3504,17 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
34683504
errcallback.previous = error_context_stack;
34693505
error_context_stack = &errcallback;
34703506

3507+
/* Prepare to track buffer usage during parallel execution */
3508+
InstrStartParallelQuery();
3509+
34713510
/* Process indexes to perform vacuum/cleanup */
34723511
parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes,
34733512
&vacrelstats);
34743513

3514+
/* Report buffer usage during parallel execution */
3515+
buffer_usage = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_BUFFER_USAGE, false);
3516+
InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber]);
3517+
34753518
/* Pop the error context stack */
34763519
error_context_stack = errcallback.previous;
34773520

0 commit comments

Comments
 (0)