@@ -250,7 +250,8 @@ static void pgss_store(const char *query, uint32 queryId,
250
250
const BufferUsage * bufusage ,
251
251
pgssJumbleState * jstate );
252
252
static Size pgss_memsize (void );
253
- static pgssEntry * entry_alloc (pgssHashKey * key , const char * query , int query_len );
253
+ static pgssEntry * entry_alloc (pgssHashKey * key , const char * query ,
254
+ int query_len , bool sticky );
254
255
static void entry_dealloc (void );
255
256
static void entry_reset (void );
256
257
static void AppendJumble (pgssJumbleState * jstate ,
@@ -502,7 +503,7 @@ pgss_shmem_startup(void)
502
503
query_size - 1 );
503
504
504
505
/* make the hashtable entry (discards old entries if too many) */
505
- entry = entry_alloc (& temp .key , buffer , temp .query_len );
506
+ entry = entry_alloc (& temp .key , buffer , temp .query_len , false );
506
507
507
508
/* copy in the actual stats */
508
509
entry -> counters = temp .counters ;
@@ -596,7 +597,6 @@ static void
596
597
pgss_post_parse_analyze (ParseState * pstate , Query * query )
597
598
{
598
599
pgssJumbleState jstate ;
599
- BufferUsage bufusage ;
600
600
601
601
/* Assert we didn't do this already */
602
602
Assert (query -> queryId == 0 );
@@ -646,16 +646,12 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
646
646
* there's no need for an early entry.
647
647
*/
648
648
if (jstate .clocations_count > 0 )
649
- {
650
- memset (& bufusage , 0 , sizeof (bufusage ));
651
-
652
649
pgss_store (pstate -> p_sourcetext ,
653
650
query -> queryId ,
654
651
0 ,
655
652
0 ,
656
- & bufusage ,
653
+ NULL ,
657
654
& jstate );
658
- }
659
655
}
660
656
661
657
/*
@@ -924,7 +920,7 @@ pgss_hash_string(const char *str)
924
920
*
925
921
* If jstate is not NULL then we're trying to create an entry for which
926
922
* we have no statistics as yet; we just want to record the normalized
927
- * query string while we can .
923
+ * query string. total_time, rows, bufusage are ignored in this case .
928
924
*/
929
925
static void
930
926
pgss_store (const char * query , uint32 queryId ,
@@ -933,7 +929,6 @@ pgss_store(const char *query, uint32 queryId,
933
929
pgssJumbleState * jstate )
934
930
{
935
931
pgssHashKey key ;
936
- double usage ;
937
932
pgssEntry * entry ;
938
933
char * norm_query = NULL ;
939
934
@@ -954,29 +949,7 @@ pgss_store(const char *query, uint32 queryId,
954
949
955
950
entry = (pgssEntry * ) hash_search (pgss_hash , & key , HASH_FIND , NULL );
956
951
957
- if (jstate )
958
- {
959
- /*
960
- * When creating an entry just to store the normalized string, make it
961
- * artificially sticky so that it will probably still be there when
962
- * the query gets executed. We do this by giving it a median usage
963
- * value rather than the normal value. (Strictly speaking, query
964
- * strings are normalized on a best effort basis, though it would be
965
- * difficult to demonstrate this even under artificial conditions.)
966
- * But if we found the entry already present, don't let this call
967
- * increment its usage.
968
- */
969
- if (!entry )
970
- usage = pgss -> cur_median_usage ;
971
- else
972
- usage = 0 ;
973
- }
974
- else
975
- {
976
- /* normal case, increment usage by normal amount */
977
- usage = USAGE_EXEC (duration );
978
- }
979
-
952
+ /* Create new entry, if not present */
980
953
if (!entry )
981
954
{
982
955
int query_len ;
@@ -999,7 +972,7 @@ pgss_store(const char *query, uint32 queryId,
999
972
/* Acquire exclusive lock as required by entry_alloc() */
1000
973
LWLockAcquire (pgss -> lock , LW_EXCLUSIVE );
1001
974
1002
- entry = entry_alloc (& key , norm_query , query_len );
975
+ entry = entry_alloc (& key , norm_query , query_len , true );
1003
976
}
1004
977
else
1005
978
{
@@ -1016,31 +989,26 @@ pgss_store(const char *query, uint32 queryId,
1016
989
/* Acquire exclusive lock as required by entry_alloc() */
1017
990
LWLockAcquire (pgss -> lock , LW_EXCLUSIVE );
1018
991
1019
- entry = entry_alloc (& key , query , query_len );
992
+ entry = entry_alloc (& key , query , query_len , false );
1020
993
}
1021
994
}
1022
995
1023
- /*
1024
- * Grab the spinlock while updating the counters (see comment about
1025
- * locking rules at the head of the file)
1026
- */
996
+ /* Increment the counts, except when jstate is not NULL */
997
+ if (!jstate )
1027
998
{
999
+ /*
1000
+ * Grab the spinlock while updating the counters (see comment about
1001
+ * locking rules at the head of the file)
1002
+ */
1028
1003
volatile pgssEntry * e = (volatile pgssEntry * ) entry ;
1029
1004
1030
1005
SpinLockAcquire (& e -> mutex );
1031
1006
1032
- /*
1033
- * If we're entering real data, "unstick" entry if it was previously
1034
- * sticky, and then increment calls.
1035
- */
1036
- if (!jstate )
1037
- {
1038
- if (e -> counters .calls == 0 )
1039
- e -> counters .usage = USAGE_INIT ;
1040
-
1041
- e -> counters .calls += 1 ;
1042
- }
1007
+ /* "Unstick" entry if it was previously sticky */
1008
+ if (e -> counters .calls == 0 )
1009
+ e -> counters .usage = USAGE_INIT ;
1043
1010
1011
+ e -> counters .calls += 1 ;
1044
1012
e -> counters .total_time += total_time ;
1045
1013
e -> counters .rows += rows ;
1046
1014
e -> counters .shared_blks_hit += bufusage -> shared_blks_hit ;
@@ -1055,7 +1023,7 @@ pgss_store(const char *query, uint32 queryId,
1055
1023
e -> counters .temp_blks_written += bufusage -> temp_blks_written ;
1056
1024
e -> counters .time_read += INSTR_TIME_GET_DOUBLE (bufusage -> time_read );
1057
1025
e -> counters .time_write += INSTR_TIME_GET_DOUBLE (bufusage -> time_write );
1058
- e -> counters .usage += usage ;
1026
+ e -> counters .usage += USAGE_EXEC ( duration ) ;
1059
1027
1060
1028
SpinLockRelease (& e -> mutex );
1061
1029
}
@@ -1235,13 +1203,19 @@ pgss_memsize(void)
1235
1203
*
1236
1204
* "query" need not be null-terminated; we rely on query_len instead
1237
1205
*
1206
+ * If "sticky" is true, make the new entry artificially sticky so that it will
1207
+ * probably still be there when the query finishes execution. We do this by
1208
+ * giving it a median usage value rather than the normal value. (Strictly
1209
+ * speaking, query strings are normalized on a best effort basis, though it
1210
+ * would be difficult to demonstrate this even under artificial conditions.)
1211
+ *
1238
1212
* Note: despite needing exclusive lock, it's not an error for the target
1239
1213
* entry to already exist. This is because pgss_store releases and
1240
1214
* reacquires lock after failing to find a match; so someone else could
1241
1215
* have made the entry while we waited to get exclusive lock.
1242
1216
*/
1243
1217
static pgssEntry *
1244
- entry_alloc (pgssHashKey * key , const char * query , int query_len )
1218
+ entry_alloc (pgssHashKey * key , const char * query , int query_len , bool sticky )
1245
1219
{
1246
1220
pgssEntry * entry ;
1247
1221
bool found ;
@@ -1259,7 +1233,8 @@ entry_alloc(pgssHashKey *key, const char *query, int query_len)
1259
1233
1260
1234
/* reset the statistics */
1261
1235
memset (& entry -> counters , 0 , sizeof (Counters ));
1262
- entry -> counters .usage = USAGE_INIT ;
1236
+ /* set the appropriate initial usage count */
1237
+ entry -> counters .usage = sticky ? pgss -> cur_median_usage : USAGE_INIT ;
1263
1238
/* re-initialize the mutex each time ... we assume no one using it */
1264
1239
SpinLockInit (& entry -> mutex );
1265
1240
/* ... and don't forget the query text */
0 commit comments