8
8
*
9
9
*
10
10
* IDENTIFICATION
11
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.85 2008/03/17 11:50:26 alvherre Exp $
11
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.86 2008/06/19 21:32:56 tgl Exp $
12
12
*
13
13
*-------------------------------------------------------------------------
14
14
*/
17
17
#include "access/xact.h"
18
18
#include "commands/async.h"
19
19
#include "miscadmin.h"
20
- #include "storage/backendid.h"
21
20
#include "storage/ipc.h"
22
- #include "storage/proc.h"
23
21
#include "storage/sinvaladt.h"
24
22
#include "utils/inval.h"
25
23
26
24
27
25
/*
28
26
* Because backends sitting idle will not be reading sinval events, we
29
27
* need a way to give an idle backend a swift kick in the rear and make
30
- * it catch up before the sinval queue overflows and forces everyone
31
- * through a cache reset exercise. This is done by broadcasting SIGUSR1
32
- * to all backends when the queue is threatening to become full .
28
+ * it catch up before the sinval queue overflows and forces it to go
29
+ * through a cache reset exercise. This is done by sending SIGUSR1
30
+ * to any backend that gets too far behind .
33
31
*
34
32
* State for catchup events consists of two flags: one saying whether
35
33
* the signal handler is currently allowed to call ProcessCatchupEvent
@@ -47,67 +45,101 @@ static void ProcessCatchupEvent(void);
47
45
48
46
49
47
/*
50
- * SendSharedInvalidMessage
51
- * Add a shared-cache-invalidation message to the global SI message queue.
48
+ * SendSharedInvalidMessages
49
+ * Add shared-cache-invalidation message(s) to the global SI message queue.
52
50
*/
53
51
void
54
- SendSharedInvalidMessage ( SharedInvalidationMessage * msg )
52
+ SendSharedInvalidMessages ( const SharedInvalidationMessage * msgs , int n )
55
53
{
56
- bool insertOK ;
57
-
58
- insertOK = SIInsertDataEntry (msg );
59
- if (!insertOK )
60
- elog (DEBUG4 , "SI buffer overflow" );
54
+ SIInsertDataEntries (msgs , n );
61
55
}
62
56
63
57
/*
64
58
* ReceiveSharedInvalidMessages
65
59
* Process shared-cache-invalidation messages waiting for this backend
66
60
*
61
+ * We guarantee to process all messages that had been queued before the
62
+ * routine was entered. It is of course possible for more messages to get
63
+ * queued right after our last SIGetDataEntries call.
64
+ *
67
65
* NOTE: it is entirely possible for this routine to be invoked recursively
68
66
* as a consequence of processing inside the invalFunction or resetFunction.
69
- * Hence, we must be holding no SI resources when we call them. The only
70
- * bad side-effect is that SIDelExpiredDataEntries might be called extra
71
- * times on the way out of a nested call.
67
+ * Furthermore, such a recursive call must guarantee that all outstanding
68
+ * inval messages have been processed before it exits. This is the reason
69
+ * for the strange-looking choice to use a statically allocated buffer array
70
+ * and counters; it's so that a recursive call can process messages already
71
+ * sucked out of sinvaladt.c.
72
72
*/
73
73
void
74
74
ReceiveSharedInvalidMessages (
75
75
void (* invalFunction ) (SharedInvalidationMessage * msg ),
76
76
void (* resetFunction ) (void ))
77
77
{
78
- SharedInvalidationMessage data ;
79
- int getResult ;
80
- bool gotMessage = false;
78
+ #define MAXINVALMSGS 32
79
+ static SharedInvalidationMessage messages [MAXINVALMSGS ];
80
+ /*
81
+ * We use volatile here to prevent bugs if a compiler doesn't realize
82
+ * that recursion is a possibility ...
83
+ */
84
+ static volatile int nextmsg = 0 ;
85
+ static volatile int nummsgs = 0 ;
81
86
82
- for (;;)
87
+ /* Deal with any messages still pending from an outer recursion */
88
+ while (nextmsg < nummsgs )
83
89
{
84
- /*
85
- * We can discard any pending catchup event, since we will not exit
86
- * this loop until we're fully caught up.
87
- */
88
- catchupInterruptOccurred = 0 ;
90
+ SharedInvalidationMessage * msg = & messages [nextmsg ++ ];
89
91
90
- getResult = SIGetDataEntry (MyBackendId , & data );
92
+ invalFunction (msg );
93
+ }
94
+
95
+ do
96
+ {
97
+ int getResult ;
98
+
99
+ nextmsg = nummsgs = 0 ;
100
+
101
+ /* Try to get some more messages */
102
+ getResult = SIGetDataEntries (messages , MAXINVALMSGS );
91
103
92
- if (getResult == 0 )
93
- break ; /* nothing more to do */
94
104
if (getResult < 0 )
95
105
{
96
106
/* got a reset message */
97
107
elog (DEBUG4 , "cache state reset" );
98
108
resetFunction ();
109
+ break ; /* nothing more to do */
99
110
}
100
- else
111
+
112
+ /* Process them, being wary that a recursive call might eat some */
113
+ nextmsg = 0 ;
114
+ nummsgs = getResult ;
115
+
116
+ while (nextmsg < nummsgs )
101
117
{
102
- /* got a normal data message */
103
- invalFunction (& data );
118
+ SharedInvalidationMessage * msg = & messages [nextmsg ++ ];
119
+
120
+ invalFunction (msg );
104
121
}
105
- gotMessage = true;
106
- }
107
122
108
- /* If we got any messages, try to release dead messages */
109
- if (gotMessage )
110
- SIDelExpiredDataEntries (false);
123
+ /*
124
+ * We only need to loop if the last SIGetDataEntries call (which
125
+ * might have been within a recursive call) returned a full buffer.
126
+ */
127
+ } while (nummsgs == MAXINVALMSGS );
128
+
129
+ /*
130
+ * We are now caught up. If we received a catchup signal, reset that
131
+ * flag, and call SICleanupQueue(). This is not so much because we
132
+ * need to flush dead messages right now, as that we want to pass on
133
+ * the catchup signal to the next slowest backend. "Daisy chaining" the
134
+ * catchup signal this way avoids creating spikes in system load for
135
+ * what should be just a background maintenance activity.
136
+ */
137
+ if (catchupInterruptOccurred )
138
+ {
139
+ catchupInterruptOccurred = 0 ;
140
+ elog (DEBUG4 , "sinval catchup complete, cleaning queue" );
141
+ SICleanupQueue (false, 0 );
142
+ }
111
143
}
112
144
113
145
0 commit comments