Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Fixes for Disk-based Hash Aggregation.
authorJeff Davis <jdavis@postgresql.org>
Mon, 23 Mar 2020 20:56:28 +0000 (13:56 -0700)
committerJeff Davis <jdavis@postgresql.org>
Mon, 23 Mar 2020 22:43:07 +0000 (15:43 -0700)
Justin Pryzby raised a couple issues with commit 1f39bce0. Fixed.

Also, tweak the way the size of a hash entry is estimated and the
number of buckets is estimated when calling BuildTupleHashTableExt().

Discussion: https://www.postgresql.org/message-id/20200319064222.GR26184@telsasoft.com

src/backend/commands/explain.c
src/backend/executor/nodeAgg.c

index 58141d8393c4b0728f40925e911dd82a0dc8d7dc..ff2f45cfb2583e751c48954d8c104c6891ec0789 100644 (file)
@@ -2778,7 +2778,7 @@ static void
 show_hashagg_info(AggState *aggstate, ExplainState *es)
 {
    Agg     *agg       = (Agg *)aggstate->ss.ps.plan;
-   long     memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
+   int64    memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
 
    Assert(IsA(aggstate, AggState));
 
index 44c159ab2a3b77a714a5cf7c4022497c1758c2e8..fbc0480fc649a4d11006c597392f7c394ef2a746 100644 (file)
@@ -1873,17 +1873,12 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
            aggstate->hash_disk_used = disk_used;
    }
 
-   /*
-    * Update hashentrysize estimate based on contents. Don't include meta_mem
-    * in the memory used, because empty buckets would inflate the per-entry
-    * cost. An underestimate of the per-entry size is better than an
-    * overestimate, because an overestimate could compound with each level of
-    * recursion.
-    */
+   /* update hashentrysize estimate based on contents */
    if (aggstate->hash_ngroups_current > 0)
    {
        aggstate->hashentrysize =
-           hash_mem / (double)aggstate->hash_ngroups_current;
+           sizeof(TupleHashEntryData) +
+           (hash_mem / (double)aggstate->hash_ngroups_current);
    }
 }
 
@@ -1899,10 +1894,10 @@ hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
    max_nbuckets = memory / hashentrysize;
 
    /*
-    * Leave room for slop to avoid a case where the initial hash table size
-    * exceeds the memory limit (though that may still happen in edge cases).
+    * Underestimating is better than overestimating. Too many buckets crowd
+    * out space for group keys and transition state values.
     */
-   max_nbuckets *= 0.75;
+   max_nbuckets >>= 1;
 
    if (nbuckets > max_nbuckets)
        nbuckets = max_nbuckets;
@@ -3548,7 +3543,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
         * reasonable.
         */
        for (i = 0; i < aggstate->num_hashes; i++)
-           totalGroups = aggstate->perhash[i].aggnode->numGroups;
+           totalGroups += aggstate->perhash[i].aggnode->numGroups;
 
        hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0,
                            &aggstate->hash_mem_limit,