@@ -208,7 +208,7 @@ typedef struct LVShared
208
208
* live tuples in the index vacuum case or the new live tuples in the
209
209
* index cleanup case.
210
210
*
211
- * estimated_count is true if the reltuples is an estimated value.
211
+ * estimated_count is true if reltuples is an estimated value.
212
212
*/
213
213
double reltuples ;
214
214
bool estimated_count ;
@@ -232,8 +232,8 @@ typedef struct LVShared
232
232
233
233
/*
234
234
* Number of active parallel workers. This is used for computing the
235
- * minimum threshold of the vacuum cost balance for a worker to go for the
236
- * delay.
235
+ * minimum threshold of the vacuum cost balance before a worker sleeps for
236
+ * cost-based delay.
237
237
*/
238
238
pg_atomic_uint32 active_nworkers ;
239
239
@@ -732,7 +732,7 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
732
732
* to reclaim dead line pointers.
733
733
*
734
734
* If the table has at least two indexes, we execute both index vacuum
735
- * and index cleanup with parallel workers unless the parallel vacuum is
735
+ * and index cleanup with parallel workers unless parallel vacuum is
736
736
* disabled. In a parallel vacuum, we enter parallel mode and then
737
737
* create both the parallel context and the DSM segment before starting
738
738
* heap scan so that we can record dead tuples to the DSM segment. All
@@ -809,8 +809,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
809
809
vacrelstats -> latestRemovedXid = InvalidTransactionId ;
810
810
811
811
/*
812
- * Initialize the state for a parallel vacuum. As of now, only one worker
813
- * can be used for an index, so we invoke parallelism only if there are at
812
+ * Initialize state for a parallel vacuum. As of now, only one worker can
813
+ * be used for an index, so we invoke parallelism only if there are at
814
814
* least two indexes on a table.
815
815
*/
816
816
if (params -> nworkers >= 0 && vacrelstats -> useindex && nindexes > 1 )
@@ -837,7 +837,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
837
837
}
838
838
839
839
/*
840
- * Allocate the space for dead tuples in case the parallel vacuum is not
840
+ * Allocate the space for dead tuples in case parallel vacuum is not
841
841
* initialized.
842
842
*/
843
843
if (!ParallelVacuumIsActive (lps ))
@@ -2215,7 +2215,7 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
2215
2215
shared_indstats = get_indstats (lvshared , idx );
2216
2216
2217
2217
/*
2218
- * Skip processing indexes that doesn 't participate in parallel
2218
+ * Skip processing indexes that don 't participate in parallel
2219
2219
* operation
2220
2220
*/
2221
2221
if (shared_indstats == NULL ||
@@ -2312,12 +2312,12 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
2312
2312
2313
2313
/*
2314
2314
* Copy the index bulk-deletion result returned from ambulkdelete and
2315
- * amvacuumcleanup to the DSM segment if it's the first time to get it
2316
- * from them, because they allocate it locally and it's possible that an
2317
- * index will be vacuumed by the different vacuum process at the next
2318
- * time. The copying of the result normally happens only after the first
2319
- * time of index vacuuming. From the second time, we pass the result on
2320
- * the DSM segment so that they then update it directly.
2315
+ * amvacuumcleanup to the DSM segment if it's the first cycle because they
2316
+ * allocate locally and it's possible that an index will be vacuumed by a
2317
+ * different vacuum process the next cycle. Copying the result normally
2318
+ * happens only the first time an index is vacuumed. For any additional
2319
+ * vacuum pass, we directly point to the result on the DSM segment and
2320
+ * pass it to vacuum index APIs so that workers can update it directly.
2321
2321
*
2322
2322
* Since all vacuum workers write the bulk-deletion result at different
2323
2323
* slots we can write them without locking.
@@ -2328,8 +2328,8 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
2328
2328
shared_indstats -> updated = true;
2329
2329
2330
2330
/*
2331
- * Now that the stats[idx] points to the DSM segment, we don't need
2332
- * the locally allocated results.
2331
+ * Now that stats[idx] points to the DSM segment, we don't need the
2332
+ * locally allocated results.
2333
2333
*/
2334
2334
pfree (* stats );
2335
2335
* stats = bulkdelete_res ;
@@ -2449,7 +2449,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
2449
2449
* lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
2450
2450
*
2451
2451
* reltuples is the number of heap tuples and estimated_count is true
2452
- * if the reltuples is an estimated value.
2452
+ * if reltuples is an estimated value.
2453
2453
*/
2454
2454
static void
2455
2455
lazy_cleanup_index (Relation indrel ,
@@ -3050,9 +3050,9 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
3050
3050
/*
3051
3051
* Compute the number of parallel worker processes to request. Both index
3052
3052
* vacuum and index cleanup can be executed with parallel workers. The index
3053
- * is eligible for parallel vacuum iff it's size is greater than
3053
+ * is eligible for parallel vacuum iff its size is greater than
3054
3054
* min_parallel_index_scan_size as invoking workers for very small indexes
3055
- * can hurt the performance.
3055
+ * can hurt performance.
3056
3056
*
3057
3057
* nrequested is the number of parallel workers that user requested. If
3058
3058
* nrequested is 0, we compute the parallel degree based on nindexes, that is
@@ -3071,7 +3071,7 @@ compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
3071
3071
int i ;
3072
3072
3073
3073
/*
3074
- * We don't allow to perform parallel operation in standalone backend or
3074
+ * We don't allow performing parallel operation in standalone backend or
3075
3075
* when parallelism is disabled.
3076
3076
*/
3077
3077
if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0 )
@@ -3138,13 +3138,13 @@ prepare_index_statistics(LVShared *lvshared, bool *can_parallel_vacuum,
3138
3138
if (!can_parallel_vacuum [i ])
3139
3139
continue ;
3140
3140
3141
- /* Set NOT NULL as this index do support parallelism */
3141
+ /* Set NOT NULL as this index does support parallelism */
3142
3142
lvshared -> bitmap [i >> 3 ] |= 1 << (i & 0x07 );
3143
3143
}
3144
3144
}
3145
3145
3146
3146
/*
3147
- * Update index statistics in pg_class if the statistics is accurate.
3147
+ * Update index statistics in pg_class if the statistics are accurate.
3148
3148
*/
3149
3149
static void
3150
3150
update_index_statistics (Relation * Irel , IndexBulkDeleteResult * * stats ,
@@ -3174,7 +3174,7 @@ update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
3174
3174
3175
3175
/*
3176
3176
* This function prepares and returns parallel vacuum state if we can launch
3177
- * even one worker. This function is responsible to enter parallel mode,
3177
+ * even one worker. This function is responsible for entering parallel mode,
3178
3178
* create a parallel context, and then initialize the DSM segment.
3179
3179
*/
3180
3180
static LVParallelState *
@@ -3345,8 +3345,8 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
3345
3345
/*
3346
3346
* Destroy the parallel context, and end parallel mode.
3347
3347
*
3348
- * Since writes are not allowed during the parallel mode, so we copy the
3349
- * updated index statistics from DSM in local memory and then later use that
3348
+ * Since writes are not allowed during parallel mode, copy the
3349
+ * updated index statistics from DSM into local memory and then later use that
3350
3350
* to update the index statistics. One might think that we can exit from
3351
3351
* parallel mode, update the index statistics and then destroy parallel
3352
3352
* context, but that won't be safe (see ExitParallelMode).
@@ -3452,7 +3452,7 @@ skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared)
3452
3452
* Perform work within a launched parallel process.
3453
3453
*
3454
3454
* Since parallel vacuum workers perform only index vacuum or index cleanup,
3455
- * we don't need to report the progress information.
3455
+ * we don't need to report progress information.
3456
3456
*/
3457
3457
void
3458
3458
parallel_vacuum_main (dsm_segment * seg , shm_toc * toc )
0 commit comments