Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit e969f9a

Browse files
committed
Save a few cycles while creating "sticky" entries in pg_stat_statements.
There's no need to sit there and increment the stats when we know all the increments would be zero anyway. The actual additions might not be very expensive, but skipping acquisition of the spinlock seems like a good thing. Pushing the logic about initialization of the usage count down into entry_alloc() allows us to do that while making the code actually simpler, not more complex. Expansion on a suggestion by Peter Geoghegan.
1 parent 140a4fb commit e969f9a

File tree

1 file changed

+28
-53
lines changed

1 file changed

+28
-53
lines changed

contrib/pg_stat_statements/pg_stat_statements.c

+28-53
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,8 @@ static void pgss_store(const char *query, uint32 queryId,
250250
const BufferUsage *bufusage,
251251
pgssJumbleState * jstate);
252252
static Size pgss_memsize(void);
253-
static pgssEntry *entry_alloc(pgssHashKey *key, const char *query, int query_len);
253+
static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
254+
int query_len, bool sticky);
254255
static void entry_dealloc(void);
255256
static void entry_reset(void);
256257
static void AppendJumble(pgssJumbleState * jstate,
@@ -502,7 +503,7 @@ pgss_shmem_startup(void)
502503
query_size - 1);
503504

504505
/* make the hashtable entry (discards old entries if too many) */
505-
entry = entry_alloc(&temp.key, buffer, temp.query_len);
506+
entry = entry_alloc(&temp.key, buffer, temp.query_len, false);
506507

507508
/* copy in the actual stats */
508509
entry->counters = temp.counters;
@@ -596,7 +597,6 @@ static void
596597
pgss_post_parse_analyze(ParseState *pstate, Query *query)
597598
{
598599
pgssJumbleState jstate;
599-
BufferUsage bufusage;
600600

601601
/* Assert we didn't do this already */
602602
Assert(query->queryId == 0);
@@ -646,16 +646,12 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
646646
* there's no need for an early entry.
647647
*/
648648
if (jstate.clocations_count > 0)
649-
{
650-
memset(&bufusage, 0, sizeof(bufusage));
651-
652649
pgss_store(pstate->p_sourcetext,
653650
query->queryId,
654651
0,
655652
0,
656-
&bufusage,
653+
NULL,
657654
&jstate);
658-
}
659655
}
660656

661657
/*
@@ -924,7 +920,7 @@ pgss_hash_string(const char *str)
924920
*
925921
* If jstate is not NULL then we're trying to create an entry for which
926922
* we have no statistics as yet; we just want to record the normalized
927-
* query string while we can.
923+
* query string. total_time, rows, bufusage are ignored in this case.
928924
*/
929925
static void
930926
pgss_store(const char *query, uint32 queryId,
@@ -933,7 +929,6 @@ pgss_store(const char *query, uint32 queryId,
933929
pgssJumbleState * jstate)
934930
{
935931
pgssHashKey key;
936-
double usage;
937932
pgssEntry *entry;
938933
char *norm_query = NULL;
939934

@@ -954,29 +949,7 @@ pgss_store(const char *query, uint32 queryId,
954949

955950
entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
956951

957-
if (jstate)
958-
{
959-
/*
960-
* When creating an entry just to store the normalized string, make it
961-
* artificially sticky so that it will probably still be there when
962-
* the query gets executed. We do this by giving it a median usage
963-
* value rather than the normal value. (Strictly speaking, query
964-
* strings are normalized on a best effort basis, though it would be
965-
* difficult to demonstrate this even under artificial conditions.)
966-
* But if we found the entry already present, don't let this call
967-
* increment its usage.
968-
*/
969-
if (!entry)
970-
usage = pgss->cur_median_usage;
971-
else
972-
usage = 0;
973-
}
974-
else
975-
{
976-
/* normal case, increment usage by normal amount */
977-
usage = USAGE_EXEC(duration);
978-
}
979-
952+
/* Create new entry, if not present */
980953
if (!entry)
981954
{
982955
int query_len;
@@ -999,7 +972,7 @@ pgss_store(const char *query, uint32 queryId,
999972
/* Acquire exclusive lock as required by entry_alloc() */
1000973
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
1001974

1002-
entry = entry_alloc(&key, norm_query, query_len);
975+
entry = entry_alloc(&key, norm_query, query_len, true);
1003976
}
1004977
else
1005978
{
@@ -1016,31 +989,26 @@ pgss_store(const char *query, uint32 queryId,
1016989
/* Acquire exclusive lock as required by entry_alloc() */
1017990
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
1018991

1019-
entry = entry_alloc(&key, query, query_len);
992+
entry = entry_alloc(&key, query, query_len, false);
1020993
}
1021994
}
1022995

1023-
/*
1024-
* Grab the spinlock while updating the counters (see comment about
1025-
* locking rules at the head of the file)
1026-
*/
996+
/* Increment the counts, except when jstate is not NULL */
997+
if (!jstate)
1027998
{
999+
/*
1000+
* Grab the spinlock while updating the counters (see comment about
1001+
* locking rules at the head of the file)
1002+
*/
10281003
volatile pgssEntry *e = (volatile pgssEntry *) entry;
10291004

10301005
SpinLockAcquire(&e->mutex);
10311006

1032-
/*
1033-
* If we're entering real data, "unstick" entry if it was previously
1034-
* sticky, and then increment calls.
1035-
*/
1036-
if (!jstate)
1037-
{
1038-
if (e->counters.calls == 0)
1039-
e->counters.usage = USAGE_INIT;
1040-
1041-
e->counters.calls += 1;
1042-
}
1007+
/* "Unstick" entry if it was previously sticky */
1008+
if (e->counters.calls == 0)
1009+
e->counters.usage = USAGE_INIT;
10431010

1011+
e->counters.calls += 1;
10441012
e->counters.total_time += total_time;
10451013
e->counters.rows += rows;
10461014
e->counters.shared_blks_hit += bufusage->shared_blks_hit;
@@ -1055,7 +1023,7 @@ pgss_store(const char *query, uint32 queryId,
10551023
e->counters.temp_blks_written += bufusage->temp_blks_written;
10561024
e->counters.time_read += INSTR_TIME_GET_DOUBLE(bufusage->time_read);
10571025
e->counters.time_write += INSTR_TIME_GET_DOUBLE(bufusage->time_write);
1058-
e->counters.usage += usage;
1026+
e->counters.usage += USAGE_EXEC(duration);
10591027

10601028
SpinLockRelease(&e->mutex);
10611029
}
@@ -1235,13 +1203,19 @@ pgss_memsize(void)
12351203
*
12361204
* "query" need not be null-terminated; we rely on query_len instead
12371205
*
1206+
* If "sticky" is true, make the new entry artificially sticky so that it will
1207+
* probably still be there when the query finishes execution. We do this by
1208+
* giving it a median usage value rather than the normal value. (Strictly
1209+
* speaking, query strings are normalized on a best effort basis, though it
1210+
* would be difficult to demonstrate this even under artificial conditions.)
1211+
*
12381212
* Note: despite needing exclusive lock, it's not an error for the target
12391213
* entry to already exist. This is because pgss_store releases and
12401214
* reacquires lock after failing to find a match; so someone else could
12411215
* have made the entry while we waited to get exclusive lock.
12421216
*/
12431217
static pgssEntry *
1244-
entry_alloc(pgssHashKey *key, const char *query, int query_len)
1218+
entry_alloc(pgssHashKey *key, const char *query, int query_len, bool sticky)
12451219
{
12461220
pgssEntry *entry;
12471221
bool found;
@@ -1259,7 +1233,8 @@ entry_alloc(pgssHashKey *key, const char *query, int query_len)
12591233

12601234
/* reset the statistics */
12611235
memset(&entry->counters, 0, sizeof(Counters));
1262-
entry->counters.usage = USAGE_INIT;
1236+
/* set the appropriate initial usage count */
1237+
entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
12631238
/* re-initialize the mutex each time ... we assume no one using it */
12641239
SpinLockInit(&entry->mutex);
12651240
/* ... and don't forget the query text */

0 commit comments

Comments
 (0)