56
56
#include "storage/pmsignal.h"
57
57
#include "storage/shmem.h"
58
58
59
+ /*
60
+ * Select the fd readiness primitive to use. Normally the "most modern"
61
+ * primitive supported by the OS will be used, but for testing it can be
62
+ * useful to manually specify the used primitive. If desired, just add a
63
+ * define somewhere before this block.
64
+ */
65
+ #if defined(LATCH_USE_POLL ) || defined(LATCH_USE_SELECT )
66
+ /* don't overwrite manual choice */
67
+ #elif defined(HAVE_POLL )
68
+ #define LATCH_USE_POLL
69
+ #elif HAVE_SYS_SELECT_H
70
+ #define LATCH_USE_SELECT
71
+ #else
72
+ #error "no latch implementation available"
73
+ #endif
74
+
59
75
/* Are we currently in WaitLatch? The signal handler would like to know. */
60
76
static volatile sig_atomic_t waiting = false;
61
77
@@ -215,10 +231,10 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
215
231
cur_time ;
216
232
long cur_timeout ;
217
233
218
- #ifdef HAVE_POLL
234
+ #if defined( LATCH_USE_POLL )
219
235
struct pollfd pfds [3 ];
220
236
int nfds ;
221
- #else
237
+ #elif defined( LATCH_USE_SELECT )
222
238
struct timeval tv ,
223
239
* tvp ;
224
240
fd_set input_mask ;
@@ -248,7 +264,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
248
264
Assert (timeout >= 0 && timeout <= INT_MAX );
249
265
cur_timeout = timeout ;
250
266
251
- #ifndef HAVE_POLL
267
+ #ifdef LATCH_USE_SELECT
252
268
tv .tv_sec = cur_timeout / 1000L ;
253
269
tv .tv_usec = (cur_timeout % 1000L ) * 1000L ;
254
270
tvp = & tv ;
@@ -258,7 +274,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
258
274
{
259
275
cur_timeout = -1 ;
260
276
261
- #ifndef HAVE_POLL
277
+ #ifdef LATCH_USE_SELECT
262
278
tvp = NULL ;
263
279
#endif
264
280
}
@@ -292,16 +308,10 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
292
308
}
293
309
294
310
/*
295
- * Must wait ... we use poll(2) if available, otherwise select(2).
296
- *
297
- * On at least older linux kernels select(), in violation of POSIX,
298
- * doesn't reliably return a socket as writable if closed - but we
299
- * rely on that. So far all the known cases of this problem are on
300
- * platforms that also provide a poll() implementation without that
301
- * bug. If we find one where that's not the case, we'll need to add a
302
- * workaround.
311
+ * Must wait ... we use the polling interface determined at the top of
312
+ * this file to do so.
303
313
*/
304
- #ifdef HAVE_POLL
314
+ #if defined( LATCH_USE_POLL )
305
315
nfds = 0 ;
306
316
if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE ))
307
317
{
@@ -397,8 +407,16 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
397
407
result |= WL_POSTMASTER_DEATH ;
398
408
}
399
409
}
400
- #else /* !HAVE_POLL */
410
+ #elif defined( LATCH_USE_SELECT )
401
411
412
+ /*
413
+ * On at least older linux kernels select(), in violation of POSIX,
414
+ * doesn't reliably return a socket as writable if closed - but we
415
+ * rely on that. So far all the known cases of this problem are on
416
+ * platforms that also provide a poll() implementation without that
417
+ * bug. If we find one where that's not the case, we'll need to add a
418
+ * workaround.
419
+ */
402
420
FD_ZERO (& input_mask );
403
421
FD_ZERO (& output_mask );
404
422
@@ -478,7 +496,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
478
496
result |= WL_POSTMASTER_DEATH ;
479
497
}
480
498
}
481
- #endif /* HAVE_POLL */
499
+ #endif /* LATCH_USE_SELECT */
482
500
483
501
/* If we're not done, update cur_timeout for next iteration */
484
502
if (result == 0 && (wakeEvents & WL_TIMEOUT ))
@@ -491,7 +509,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
491
509
/* Timeout has expired, no need to continue looping */
492
510
result |= WL_TIMEOUT ;
493
511
}
494
- #ifndef HAVE_POLL
512
+ #ifdef LATCH_USE_SELECT
495
513
else
496
514
{
497
515
tv .tv_sec = cur_timeout / 1000L ;
0 commit comments