PostgreSQL Source Code  git master
latch.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * latch.c
4  * Routines for inter-process latches
5  *
6  * The poll() implementation uses the so-called self-pipe trick to overcome the
7  * race condition involved with poll() and setting a global flag in the signal
8  * handler. When a latch is set and the current process is waiting for it, the
9  * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
10  * A signal by itself doesn't interrupt poll() on all platforms, and even on
11  * platforms where it does, a signal that arrives just before the poll() call
12  * does not prevent poll() from entering sleep. An incoming byte on a pipe
13  * however reliably interrupts the sleep, and causes poll() to return
14  * immediately even if the signal arrives before poll() begins.
15  *
16  * The epoll() implementation overcomes the race with a different technique: it
17  * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
18  * don't need to register a signal handler or create our own self-pipe. We
19  * assume that any system that has Linux epoll() also has Linux signalfd().
20  *
21  * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
22  *
23  * The Windows implementation uses Windows events that are inherited by all
24  * postmaster child processes. There's no need for the self-pipe trick there.
25  *
26  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
27  * Portions Copyright (c) 1994, Regents of the University of California
28  *
29  * IDENTIFICATION
30  * src/backend/storage/ipc/latch.c
31  *
32  *-------------------------------------------------------------------------
33  */
34 #include "postgres.h"
35 
36 #include <fcntl.h>
37 #include <limits.h>
38 #include <signal.h>
39 #include <unistd.h>
40 #ifdef HAVE_SYS_EPOLL_H
41 #include <sys/epoll.h>
42 #endif
43 #ifdef HAVE_SYS_EVENT_H
44 #include <sys/event.h>
45 #endif
46 #ifdef HAVE_SYS_SIGNALFD_H
47 #include <sys/signalfd.h>
48 #endif
49 #ifdef HAVE_POLL_H
50 #include <poll.h>
51 #endif
52 
53 #include "libpq/pqsignal.h"
54 #include "miscadmin.h"
55 #include "pgstat.h"
56 #include "port/atomics.h"
57 #include "portability/instr_time.h"
58 #include "postmaster/postmaster.h"
59 #include "storage/fd.h"
60 #include "storage/ipc.h"
61 #include "storage/latch.h"
62 #include "storage/pmsignal.h"
63 #include "storage/shmem.h"
64 #include "utils/memutils.h"
65 
66 /*
67  * Select the fd readiness primitive to use. Normally the "most modern"
68  * primitive supported by the OS will be used, but for testing it can be
69  * useful to manually specify the used primitive. If desired, just add a
70  * define somewhere before this block.
71  */
72 #if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
73  defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
74 /* don't overwrite manual choice */
75 #elif defined(HAVE_SYS_EPOLL_H)
76 #define WAIT_USE_EPOLL
77 #elif defined(HAVE_KQUEUE)
78 #define WAIT_USE_KQUEUE
79 #elif defined(HAVE_POLL)
80 #define WAIT_USE_POLL
81 #elif WIN32
82 #define WAIT_USE_WIN32
83 #else
84 #error "no wait set implementation available"
85 #endif
86 
87 /*
88  * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
89  * available. We avoid signalfd on illumos for now based on problem reports.
90  * For testing the choice can also be manually specified.
91  */
92 #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
93 #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
94 /* don't overwrite manual choice */
95 #elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H) && \
96  !defined(__illumos__)
97 #define WAIT_USE_SIGNALFD
98 #else
99 #define WAIT_USE_SELF_PIPE
100 #endif
101 #endif
102 
103 /* typedef in latch.h */
105 {
106  int nevents; /* number of registered events */
107  int nevents_space; /* maximum number of events in this set */
108 
109  /*
110  * Array, of nevents_space length, storing the definition of events this
111  * set is waiting for.
112  */
114 
115  /*
116  * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
117  * said latch, and latch_pos the offset in the ->events array. This is
118  * useful because we check the state of the latch before performing doing
119  * syscalls related to waiting.
120  */
123 
124  /*
125  * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
126  * is set so that we'll exit immediately if postmaster death is detected,
127  * instead of returning.
128  */
130 
131 #if defined(WAIT_USE_EPOLL)
132  int epoll_fd;
133  /* epoll_wait returns events in a user provided arrays, allocate once */
134  struct epoll_event *epoll_ret_events;
135 #elif defined(WAIT_USE_KQUEUE)
136  int kqueue_fd;
137  /* kevent returns events in a user provided arrays, allocate once */
138  struct kevent *kqueue_ret_events;
139  bool report_postmaster_not_running;
140 #elif defined(WAIT_USE_POLL)
141  /* poll expects events to be waited on every poll() call, prepare once */
142  struct pollfd *pollfds;
143 #elif defined(WAIT_USE_WIN32)
144 
145  /*
146  * Array of windows events. The first element always contains
147  * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
148  * event->pos + 1).
149  */
150  HANDLE *handles;
151 #endif
152 };
153 
154 /* A common WaitEventSet used to implement WatchLatch() */
156 
157 /* The position of the latch in LatchWaitSet. */
158 #define LatchWaitSetLatchPos 0
159 
160 #ifndef WIN32
161 /* Are we currently in WaitLatch? The signal handler would like to know. */
162 static volatile sig_atomic_t waiting = false;
163 #endif
164 
165 #ifdef WAIT_USE_SIGNALFD
166 /* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
167 static int signal_fd = -1;
168 #endif
169 
170 #ifdef WAIT_USE_SELF_PIPE
171 /* Read and write ends of the self-pipe */
172 static int selfpipe_readfd = -1;
173 static int selfpipe_writefd = -1;
174 
175 /* Process owning the self-pipe --- needed for checking purposes */
176 static int selfpipe_owner_pid = 0;
177 
178 /* Private function prototypes */
179 static void latch_sigurg_handler(SIGNAL_ARGS);
180 static void sendSelfPipeByte(void);
181 #endif
182 
183 #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
184 static void drain(void);
185 #endif
186 
187 #if defined(WAIT_USE_EPOLL)
188 static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
189 #elif defined(WAIT_USE_KQUEUE)
190 static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
191 #elif defined(WAIT_USE_POLL)
192 static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
193 #elif defined(WAIT_USE_WIN32)
194 static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
195 #endif
196 
197 static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
198  WaitEvent *occurred_events, int nevents);
199 
200 /*
201  * Initialize the process-local latch infrastructure.
202  *
203  * This must be called once during startup of any process that can wait on
204  * latches, before it issues any InitLatch() or OwnLatch() calls.
205  */
206 void
208 {
209 #if defined(WAIT_USE_SELF_PIPE)
210  int pipefd[2];
211 
212  if (IsUnderPostmaster)
213  {
214  /*
215  * We might have inherited connections to a self-pipe created by the
216  * postmaster. It's critical that child processes create their own
217  * self-pipes, of course, and we really want them to close the
218  * inherited FDs for safety's sake.
219  */
220  if (selfpipe_owner_pid != 0)
221  {
222  /* Assert we go through here but once in a child process */
223  Assert(selfpipe_owner_pid != MyProcPid);
224  /* Release postmaster's pipe FDs; ignore any error */
225  (void) close(selfpipe_readfd);
226  (void) close(selfpipe_writefd);
227  /* Clean up, just for safety's sake; we'll set these below */
228  selfpipe_readfd = selfpipe_writefd = -1;
229  selfpipe_owner_pid = 0;
230  /* Keep fd.c's accounting straight */
233  }
234  else
235  {
236  /*
237  * Postmaster didn't create a self-pipe ... or else we're in an
238  * EXEC_BACKEND build, in which case it doesn't matter since the
239  * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
240  * fd.c won't have state to clean up, either.
241  */
242  Assert(selfpipe_readfd == -1);
243  }
244  }
245  else
246  {
247  /* In postmaster or standalone backend, assert we do this but once */
248  Assert(selfpipe_readfd == -1);
249  Assert(selfpipe_owner_pid == 0);
250  }
251 
252  /*
253  * Set up the self-pipe that allows a signal handler to wake up the
254  * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
255  * that SetLatch won't block if the event has already been set many times
256  * filling the kernel buffer. Make the read-end non-blocking too, so that
257  * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
258  * Also, make both FDs close-on-exec, since we surely do not want any
259  * child processes messing with them.
260  */
261  if (pipe(pipefd) < 0)
262  elog(FATAL, "pipe() failed: %m");
263  if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
264  elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
265  if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
266  elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
267  if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
268  elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
269  if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
270  elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
271 
272  selfpipe_readfd = pipefd[0];
273  selfpipe_writefd = pipefd[1];
274  selfpipe_owner_pid = MyProcPid;
275 
276  /* Tell fd.c about these two long-lived FDs */
279 
280  pqsignal(SIGURG, latch_sigurg_handler);
281 #endif
282 
283 #ifdef WAIT_USE_SIGNALFD
284  sigset_t signalfd_mask;
285 
286  /* Block SIGURG, because we'll receive it through a signalfd. */
287  sigaddset(&UnBlockSig, SIGURG);
288 
289  /* Set up the signalfd to receive SIGURG notifications. */
290  sigemptyset(&signalfd_mask);
291  sigaddset(&signalfd_mask, SIGURG);
292  signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
293  if (signal_fd < 0)
294  elog(FATAL, "signalfd() failed");
296 #endif
297 
298 #ifdef WAIT_USE_KQUEUE
299  /* Ignore SIGURG, because we'll receive it via kqueue. */
300  pqsignal(SIGURG, SIG_IGN);
301 #endif
302 }
303 
304 void
306 {
307  int latch_pos PG_USED_FOR_ASSERTS_ONLY;
308 
309  Assert(LatchWaitSet == NULL);
310 
311  /* Set up the WaitEventSet used by WaitLatch(). */
314  MyLatch, NULL);
315  if (IsUnderPostmaster)
317  PGINVALID_SOCKET, NULL, NULL);
318 
319  Assert(latch_pos == LatchWaitSetLatchPos);
320 }
321 
322 void
324 {
325 #if defined(WAIT_USE_POLL)
326  pqsignal(SIGURG, SIG_IGN);
327 #endif
328 
329  if (LatchWaitSet)
330  {
332  LatchWaitSet = NULL;
333  }
334 
335 #if defined(WAIT_USE_SELF_PIPE)
336  close(selfpipe_readfd);
337  close(selfpipe_writefd);
338  selfpipe_readfd = -1;
339  selfpipe_writefd = -1;
340  selfpipe_owner_pid = InvalidPid;
341 #endif
342 
343 #if defined(WAIT_USE_SIGNALFD)
344  close(signal_fd);
345  signal_fd = -1;
346 #endif
347 }
348 
349 /*
350  * Initialize a process-local latch.
351  */
352 void
354 {
355  latch->is_set = false;
356  latch->maybe_sleeping = false;
357  latch->owner_pid = MyProcPid;
358  latch->is_shared = false;
359 
360 #if defined(WAIT_USE_SELF_PIPE)
361  /* Assert InitializeLatchSupport has been called in this process */
362  Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
363 #elif defined(WAIT_USE_SIGNALFD)
364  /* Assert InitializeLatchSupport has been called in this process */
365  Assert(signal_fd >= 0);
366 #elif defined(WAIT_USE_WIN32)
367  latch->event = CreateEvent(NULL, TRUE, FALSE, NULL);
368  if (latch->event == NULL)
369  elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
370 #endif /* WIN32 */
371 }
372 
373 /*
374  * Initialize a shared latch that can be set from other processes. The latch
375  * is initially owned by no-one; use OwnLatch to associate it with the
376  * current process.
377  *
378  * InitSharedLatch needs to be called in postmaster before forking child
379  * processes, usually right after allocating the shared memory block
380  * containing the latch with ShmemInitStruct. (The Unix implementation
381  * doesn't actually require that, but the Windows one does.) Because of
382  * this restriction, we have no concurrency issues to worry about here.
383  *
384  * Note that other handles created in this module are never marked as
385  * inheritable. Thus we do not need to worry about cleaning up child
386  * process references to postmaster-private latches or WaitEventSets.
387  */
388 void
390 {
391 #ifdef WIN32
392  SECURITY_ATTRIBUTES sa;
393 
394  /*
395  * Set up security attributes to specify that the events are inherited.
396  */
397  ZeroMemory(&sa, sizeof(sa));
398  sa.nLength = sizeof(sa);
399  sa.bInheritHandle = TRUE;
400 
401  latch->event = CreateEvent(&sa, TRUE, FALSE, NULL);
402  if (latch->event == NULL)
403  elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
404 #endif
405 
406  latch->is_set = false;
407  latch->maybe_sleeping = false;
408  latch->owner_pid = 0;
409  latch->is_shared = true;
410 }
411 
412 /*
413  * Associate a shared latch with the current process, allowing it to
414  * wait on the latch.
415  *
416  * Although there is a sanity check for latch-already-owned, we don't do
417  * any sort of locking here, meaning that we could fail to detect the error
418  * if two processes try to own the same latch at about the same time. If
419  * there is any risk of that, caller must provide an interlock to prevent it.
420  */
421 void
423 {
424  int owner_pid;
425 
426  /* Sanity checks */
427  Assert(latch->is_shared);
428 
429 #if defined(WAIT_USE_SELF_PIPE)
430  /* Assert InitializeLatchSupport has been called in this process */
431  Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
432 #elif defined(WAIT_USE_SIGNALFD)
433  /* Assert InitializeLatchSupport has been called in this process */
434  Assert(signal_fd >= 0);
435 #endif
436 
437  owner_pid = latch->owner_pid;
438  if (owner_pid != 0)
439  elog(PANIC, "latch already owned by PID %d", owner_pid);
440 
441  latch->owner_pid = MyProcPid;
442 }
443 
444 /*
445  * Disown a shared latch currently owned by the current process.
446  */
447 void
449 {
450  Assert(latch->is_shared);
451  Assert(latch->owner_pid == MyProcPid);
452 
453  latch->owner_pid = 0;
454 }
455 
456 /*
457  * Wait for a given latch to be set, or for postmaster death, or until timeout
458  * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
459  * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
460  * function returns immediately.
461  *
462  * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
463  * is given. Although it is declared as "long", we don't actually support
464  * timeouts longer than INT_MAX milliseconds. Note that some extra overhead
465  * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
466  *
467  * The latch must be owned by the current process, ie. it must be a
468  * process-local latch initialized with InitLatch, or a shared latch
469  * associated with the current process by calling OwnLatch.
470  *
471  * Returns bit mask indicating which condition(s) caused the wake-up. Note
472  * that if multiple wake-up conditions are true, there is no guarantee that
473  * we return all of them in one call, but we will return at least one.
474  */
475 int
476 WaitLatch(Latch *latch, int wakeEvents, long timeout,
477  uint32 wait_event_info)
478 {
479  WaitEvent event;
480 
481  /* Postmaster-managed callers must handle postmaster death somehow. */
483  (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
484  (wakeEvents & WL_POSTMASTER_DEATH));
485 
486  /*
487  * Some callers may have a latch other than MyLatch, or no latch at all,
488  * or want to handle postmaster death differently. It's cheap to assign
489  * those, so just do it every time.
490  */
491  if (!(wakeEvents & WL_LATCH_SET))
492  latch = NULL;
495  ((wakeEvents & WL_EXIT_ON_PM_DEATH) != 0);
496 
498  (wakeEvents & WL_TIMEOUT) ? timeout : -1,
499  &event, 1,
500  wait_event_info) == 0)
501  return WL_TIMEOUT;
502  else
503  return event.events;
504 }
505 
506 /*
507  * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
508  * conditions.
509  *
510  * When waiting on a socket, EOF and error conditions always cause the socket
511  * to be reported as readable/writable/connected, so that the caller can deal
512  * with the condition.
513  *
514  * wakeEvents must include either WL_EXIT_ON_PM_DEATH for automatic exit
515  * if the postmaster dies or WL_POSTMASTER_DEATH for a flag set in the
516  * return value if the postmaster dies. The latter is useful for rare cases
517  * where some behavior other than immediate exit is needed.
518  *
519  * NB: These days this is just a wrapper around the WaitEventSet API. When
520  * using a latch very frequently, consider creating a longer living
521  * WaitEventSet instead; that's more efficient.
522  */
523 int
524 WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
525  long timeout, uint32 wait_event_info)
526 {
527  int ret = 0;
528  int rc;
529  WaitEvent event;
531 
532  if (wakeEvents & WL_TIMEOUT)
533  Assert(timeout >= 0);
534  else
535  timeout = -1;
536 
537  if (wakeEvents & WL_LATCH_SET)
539  latch, NULL);
540 
541  /* Postmaster-managed callers must handle postmaster death somehow. */
543  (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
544  (wakeEvents & WL_POSTMASTER_DEATH));
545 
546  if ((wakeEvents & WL_POSTMASTER_DEATH) && IsUnderPostmaster)
548  NULL, NULL);
549 
550  if ((wakeEvents & WL_EXIT_ON_PM_DEATH) && IsUnderPostmaster)
552  NULL, NULL);
553 
554  if (wakeEvents & WL_SOCKET_MASK)
555  {
556  int ev;
557 
558  ev = wakeEvents & WL_SOCKET_MASK;
559  AddWaitEventToSet(set, ev, sock, NULL, NULL);
560  }
561 
562  rc = WaitEventSetWait(set, timeout, &event, 1, wait_event_info);
563 
564  if (rc == 0)
565  ret |= WL_TIMEOUT;
566  else
567  {
568  ret |= event.events & (WL_LATCH_SET |
571  }
572 
573  FreeWaitEventSet(set);
574 
575  return ret;
576 }
577 
578 /*
579  * Sets a latch and wakes up anyone waiting on it.
580  *
581  * This is cheap if the latch is already set, otherwise not so much.
582  *
583  * NB: when calling this in a signal handler, be sure to save and restore
584  * errno around it. (That's standard practice in most signal handlers, of
585  * course, but we used to omit it in handlers that only set a flag.)
586  *
587  * NB: this function is called from critical sections and signal handlers so
588  * throwing an error is not a good idea.
589  */
590 void
592 {
593 #ifndef WIN32
594  pid_t owner_pid;
595 #else
596  HANDLE handle;
597 #endif
598 
599  /*
600  * The memory barrier has to be placed here to ensure that any flag
601  * variables possibly changed by this process have been flushed to main
602  * memory, before we check/set is_set.
603  */
605 
606  /* Quick exit if already set */
607  if (latch->is_set)
608  return;
609 
610  latch->is_set = true;
611 
613  if (!latch->maybe_sleeping)
614  return;
615 
616 #ifndef WIN32
617 
618  /*
619  * See if anyone's waiting for the latch. It can be the current process if
620  * we're in a signal handler. We use the self-pipe or SIGURG to ourselves
621  * to wake up WaitEventSetWaitBlock() without races in that case. If it's
622  * another process, send a signal.
623  *
624  * Fetch owner_pid only once, in case the latch is concurrently getting
625  * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
626  * guaranteed to be true! In practice, the effective range of pid_t fits
627  * in a 32 bit integer, and so should be atomic. In the worst case, we
628  * might end up signaling the wrong process. Even then, you're very
629  * unlucky if a process with that bogus pid exists and belongs to
630  * Postgres; and PG database processes should handle excess SIGUSR1
631  * interrupts without a problem anyhow.
632  *
633  * Another sort of race condition that's possible here is for a new
634  * process to own the latch immediately after we look, so we don't signal
635  * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
636  * the standard coding convention of waiting at the bottom of their loops,
637  * not the top, so that they'll correctly process latch-setting events
638  * that happen before they enter the loop.
639  */
640  owner_pid = latch->owner_pid;
641  if (owner_pid == 0)
642  return;
643  else if (owner_pid == MyProcPid)
644  {
645 #if defined(WAIT_USE_SELF_PIPE)
646  if (waiting)
647  sendSelfPipeByte();
648 #else
649  if (waiting)
650  kill(MyProcPid, SIGURG);
651 #endif
652  }
653  else
654  kill(owner_pid, SIGURG);
655 
656 #else
657 
658  /*
659  * See if anyone's waiting for the latch. It can be the current process if
660  * we're in a signal handler.
661  *
662  * Use a local variable here just in case somebody changes the event field
663  * concurrently (which really should not happen).
664  */
665  handle = latch->event;
666  if (handle)
667  {
668  SetEvent(handle);
669 
670  /*
671  * Note that we silently ignore any errors. We might be in a signal
672  * handler or other critical path where it's not safe to call elog().
673  */
674  }
675 #endif
676 }
677 
678 /*
679  * Clear the latch. Calling WaitLatch after this will sleep, unless
680  * the latch is set again before the WaitLatch call.
681  */
682 void
684 {
685  /* Only the owner should reset the latch */
686  Assert(latch->owner_pid == MyProcPid);
687  Assert(latch->maybe_sleeping == false);
688 
689  latch->is_set = false;
690 
691  /*
692  * Ensure that the write to is_set gets flushed to main memory before we
693  * examine any flag variables. Otherwise a concurrent SetLatch might
694  * falsely conclude that it needn't signal us, even though we have missed
695  * seeing some flag updates that SetLatch was supposed to inform us of.
696  */
698 }
699 
700 /*
701  * Create a WaitEventSet with space for nevents different events to wait for.
702  *
703  * These events can then be efficiently waited upon together, using
704  * WaitEventSetWait().
705  */
706 WaitEventSet *
707 CreateWaitEventSet(MemoryContext context, int nevents)
708 {
709  WaitEventSet *set;
710  char *data;
711  Size sz = 0;
712 
713  /*
714  * Use MAXALIGN size/alignment to guarantee that later uses of memory are
715  * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
716  * platforms, but earlier allocations like WaitEventSet and WaitEvent
717  * might not be sized to guarantee that when purely using sizeof().
718  */
719  sz += MAXALIGN(sizeof(WaitEventSet));
720  sz += MAXALIGN(sizeof(WaitEvent) * nevents);
721 
722 #if defined(WAIT_USE_EPOLL)
723  sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
724 #elif defined(WAIT_USE_KQUEUE)
725  sz += MAXALIGN(sizeof(struct kevent) * nevents);
726 #elif defined(WAIT_USE_POLL)
727  sz += MAXALIGN(sizeof(struct pollfd) * nevents);
728 #elif defined(WAIT_USE_WIN32)
729  /* need space for the pgwin32_signal_event */
730  sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
731 #endif
732 
733  data = (char *) MemoryContextAllocZero(context, sz);
734 
735  set = (WaitEventSet *) data;
736  data += MAXALIGN(sizeof(WaitEventSet));
737 
738  set->events = (WaitEvent *) data;
739  data += MAXALIGN(sizeof(WaitEvent) * nevents);
740 
741 #if defined(WAIT_USE_EPOLL)
742  set->epoll_ret_events = (struct epoll_event *) data;
743  data += MAXALIGN(sizeof(struct epoll_event) * nevents);
744 #elif defined(WAIT_USE_KQUEUE)
745  set->kqueue_ret_events = (struct kevent *) data;
746  data += MAXALIGN(sizeof(struct kevent) * nevents);
747 #elif defined(WAIT_USE_POLL)
748  set->pollfds = (struct pollfd *) data;
749  data += MAXALIGN(sizeof(struct pollfd) * nevents);
750 #elif defined(WAIT_USE_WIN32)
751  set->handles = (HANDLE) data;
752  data += MAXALIGN(sizeof(HANDLE) * nevents);
753 #endif
754 
755  set->latch = NULL;
756  set->nevents_space = nevents;
757  set->exit_on_postmaster_death = false;
758 
759 #if defined(WAIT_USE_EPOLL)
760  if (!AcquireExternalFD())
761  {
762  /* treat this as though epoll_create1 itself returned EMFILE */
763  elog(ERROR, "epoll_create1 failed: %m");
764  }
765  set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
766  if (set->epoll_fd < 0)
767  {
769  elog(ERROR, "epoll_create1 failed: %m");
770  }
771 #elif defined(WAIT_USE_KQUEUE)
772  if (!AcquireExternalFD())
773  {
774  /* treat this as though kqueue itself returned EMFILE */
775  elog(ERROR, "kqueue failed: %m");
776  }
777  set->kqueue_fd = kqueue();
778  if (set->kqueue_fd < 0)
779  {
781  elog(ERROR, "kqueue failed: %m");
782  }
783  if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
784  {
785  int save_errno = errno;
786 
787  close(set->kqueue_fd);
789  errno = save_errno;
790  elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
791  }
792  set->report_postmaster_not_running = false;
793 #elif defined(WAIT_USE_WIN32)
794 
795  /*
796  * To handle signals while waiting, we need to add a win32 specific event.
797  * We accounted for the additional event at the top of this routine. See
798  * port/win32/signal.c for more details.
799  *
800  * Note: pgwin32_signal_event should be first to ensure that it will be
801  * reported when multiple events are set. We want to guarantee that
802  * pending signals are serviced.
803  */
804  set->handles[0] = pgwin32_signal_event;
805  StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
806 #endif
807 
808  return set;
809 }
810 
811 /*
812  * Free a previously created WaitEventSet.
813  *
814  * Note: preferably, this shouldn't have to free any resources that could be
815  * inherited across an exec(). If it did, we'd likely leak those resources in
816  * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
817  * when the FD is created. For the Windows case, we assume that the handles
818  * involved are non-inheritable.
819  */
820 void
822 {
823 #if defined(WAIT_USE_EPOLL)
824  close(set->epoll_fd);
826 #elif defined(WAIT_USE_KQUEUE)
827  close(set->kqueue_fd);
829 #elif defined(WAIT_USE_WIN32)
830  WaitEvent *cur_event;
831 
832  for (cur_event = set->events;
833  cur_event < (set->events + set->nevents);
834  cur_event++)
835  {
836  if (cur_event->events & WL_LATCH_SET)
837  {
838  /* uses the latch's HANDLE */
839  }
840  else if (cur_event->events & WL_POSTMASTER_DEATH)
841  {
842  /* uses PostmasterHandle */
843  }
844  else
845  {
846  /* Clean up the event object we created for the socket */
847  WSAEventSelect(cur_event->fd, NULL, 0);
848  WSACloseEvent(set->handles[cur_event->pos + 1]);
849  }
850  }
851 #endif
852 
853  pfree(set);
854 }
855 
856 /* ---
857  * Add an event to the set. Possible events are:
858  * - WL_LATCH_SET: Wait for the latch to be set
859  * - WL_POSTMASTER_DEATH: Wait for postmaster to die
860  * - WL_SOCKET_READABLE: Wait for socket to become readable,
861  * can be combined in one event with other WL_SOCKET_* events
862  * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
863  * can be combined with other WL_SOCKET_* events
864  * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
865  * can be combined with other WL_SOCKET_* events (on non-Windows
866  * platforms, this is the same as WL_SOCKET_WRITEABLE)
867  * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
868  * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
869  *
870  * Returns the offset in WaitEventSet->events (starting from 0), which can be
871  * used to modify previously added wait events using ModifyWaitEvent().
872  *
873  * In the WL_LATCH_SET case the latch must be owned by the current process,
874  * i.e. it must be a process-local latch initialized with InitLatch, or a
875  * shared latch associated with the current process by calling OwnLatch.
876  *
877  * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED cases, EOF and error
878  * conditions cause the socket to be reported as readable/writable/connected,
879  * so that the caller can deal with the condition.
880  *
881  * The user_data pointer specified here will be set for the events returned
882  * by WaitEventSetWait(), allowing to easily associate additional data with
883  * events.
884  */
885 int
887  void *user_data)
888 {
889  WaitEvent *event;
890 
891  /* not enough space */
892  Assert(set->nevents < set->nevents_space);
893 
894  if (events == WL_EXIT_ON_PM_DEATH)
895  {
896  events = WL_POSTMASTER_DEATH;
897  set->exit_on_postmaster_death = true;
898  }
899 
900  if (latch)
901  {
902  if (latch->owner_pid != MyProcPid)
903  elog(ERROR, "cannot wait on a latch owned by another process");
904  if (set->latch)
905  elog(ERROR, "cannot wait on more than one latch");
906  if ((events & WL_LATCH_SET) != WL_LATCH_SET)
907  elog(ERROR, "latch events only support being set");
908  }
909  else
910  {
911  if (events & WL_LATCH_SET)
912  elog(ERROR, "cannot wait on latch without a specified latch");
913  }
914 
915  /* waiting for socket readiness without a socket indicates a bug */
916  if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
917  elog(ERROR, "cannot wait on socket event without a socket");
918 
919  event = &set->events[set->nevents];
920  event->pos = set->nevents++;
921  event->fd = fd;
922  event->events = events;
923  event->user_data = user_data;
924 #ifdef WIN32
925  event->reset = false;
926 #endif
927 
928  if (events == WL_LATCH_SET)
929  {
930  set->latch = latch;
931  set->latch_pos = event->pos;
932 #if defined(WAIT_USE_SELF_PIPE)
933  event->fd = selfpipe_readfd;
934 #elif defined(WAIT_USE_SIGNALFD)
935  event->fd = signal_fd;
936 #else
937  event->fd = PGINVALID_SOCKET;
938 #ifdef WAIT_USE_EPOLL
939  return event->pos;
940 #endif
941 #endif
942  }
943  else if (events == WL_POSTMASTER_DEATH)
944  {
945 #ifndef WIN32
947 #endif
948  }
949 
950  /* perform wait primitive specific initialization, if needed */
951 #if defined(WAIT_USE_EPOLL)
952  WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
953 #elif defined(WAIT_USE_KQUEUE)
954  WaitEventAdjustKqueue(set, event, 0);
955 #elif defined(WAIT_USE_POLL)
956  WaitEventAdjustPoll(set, event);
957 #elif defined(WAIT_USE_WIN32)
958  WaitEventAdjustWin32(set, event);
959 #endif
960 
961  return event->pos;
962 }
963 
964 /*
965  * Change the event mask and, in the WL_LATCH_SET case, the latch associated
966  * with the WaitEvent. The latch may be changed to NULL to disable the latch
967  * temporarily, and then set back to a latch later.
968  *
969  * 'pos' is the id returned by AddWaitEventToSet.
970  */
971 void
972 ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
973 {
974  WaitEvent *event;
975 #if defined(WAIT_USE_KQUEUE)
976  int old_events;
977 #endif
978 
979  Assert(pos < set->nevents);
980 
981  event = &set->events[pos];
982 #if defined(WAIT_USE_KQUEUE)
983  old_events = event->events;
984 #endif
985 
986  /*
987  * If neither the event mask nor the associated latch changes, return
988  * early. That's an important optimization for some sockets, where
989  * ModifyWaitEvent is frequently used to switch from waiting for reads to
990  * waiting on writes.
991  */
992  if (events == event->events &&
993  (!(event->events & WL_LATCH_SET) || set->latch == latch))
994  return;
995 
996  if (event->events & WL_LATCH_SET &&
997  events != event->events)
998  {
999  elog(ERROR, "cannot modify latch event");
1000  }
1001 
1002  if (event->events & WL_POSTMASTER_DEATH)
1003  {
1004  elog(ERROR, "cannot modify postmaster death event");
1005  }
1006 
1007  /* FIXME: validate event mask */
1008  event->events = events;
1009 
1010  if (events == WL_LATCH_SET)
1011  {
1012  if (latch && latch->owner_pid != MyProcPid)
1013  elog(ERROR, "cannot wait on a latch owned by another process");
1014  set->latch = latch;
1015 
1016  /*
1017  * On Unix, we don't need to modify the kernel object because the
1018  * underlying pipe (if there is one) is the same for all latches so we
1019  * can return immediately. On Windows, we need to update our array of
1020  * handles, but we leave the old one in place and tolerate spurious
1021  * wakeups if the latch is disabled.
1022  */
1023 #if defined(WAIT_USE_WIN32)
1024  if (!latch)
1025  return;
1026 #else
1027  return;
1028 #endif
1029  }
1030 
1031 #if defined(WAIT_USE_EPOLL)
1032  WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
1033 #elif defined(WAIT_USE_KQUEUE)
1034  WaitEventAdjustKqueue(set, event, old_events);
1035 #elif defined(WAIT_USE_POLL)
1036  WaitEventAdjustPoll(set, event);
1037 #elif defined(WAIT_USE_WIN32)
1038  WaitEventAdjustWin32(set, event);
1039 #endif
1040 }
1041 
1042 #if defined(WAIT_USE_EPOLL)
1043 /*
1044  * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
1045  */
1046 static void
1047 WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
1048 {
1049  struct epoll_event epoll_ev;
1050  int rc;
1051 
1052  /* pointer to our event, returned by epoll_wait */
1053  epoll_ev.data.ptr = event;
1054  /* always wait for errors */
1055  epoll_ev.events = EPOLLERR | EPOLLHUP;
1056 
1057  /* prepare pollfd entry once */
1058  if (event->events == WL_LATCH_SET)
1059  {
1060  Assert(set->latch != NULL);
1061  epoll_ev.events |= EPOLLIN;
1062  }
1063  else if (event->events == WL_POSTMASTER_DEATH)
1064  {
1065  epoll_ev.events |= EPOLLIN;
1066  }
1067  else
1068  {
1069  Assert(event->fd != PGINVALID_SOCKET);
1070  Assert(event->events & (WL_SOCKET_READABLE |
1072  WL_SOCKET_CLOSED));
1073 
1074  if (event->events & WL_SOCKET_READABLE)
1075  epoll_ev.events |= EPOLLIN;
1076  if (event->events & WL_SOCKET_WRITEABLE)
1077  epoll_ev.events |= EPOLLOUT;
1078  if (event->events & WL_SOCKET_CLOSED)
1079  epoll_ev.events |= EPOLLRDHUP;
1080  }
1081 
1082  /*
1083  * Even though unused, we also pass epoll_ev as the data argument if
1084  * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
1085  * requiring that, and actually it makes the code simpler...
1086  */
1087  rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
1088 
1089  if (rc < 0)
1090  ereport(ERROR,
1092  errmsg("%s() failed: %m",
1093  "epoll_ctl")));
1094 }
1095 #endif
1096 
1097 #if defined(WAIT_USE_POLL)
1098 static void
1099 WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
1100 {
1101  struct pollfd *pollfd = &set->pollfds[event->pos];
1102 
1103  pollfd->revents = 0;
1104  pollfd->fd = event->fd;
1105 
1106  /* prepare pollfd entry once */
1107  if (event->events == WL_LATCH_SET)
1108  {
1109  Assert(set->latch != NULL);
1110  pollfd->events = POLLIN;
1111  }
1112  else if (event->events == WL_POSTMASTER_DEATH)
1113  {
1114  pollfd->events = POLLIN;
1115  }
1116  else
1117  {
1118  Assert(event->events & (WL_SOCKET_READABLE |
1120  WL_SOCKET_CLOSED));
1121  pollfd->events = 0;
1122  if (event->events & WL_SOCKET_READABLE)
1123  pollfd->events |= POLLIN;
1124  if (event->events & WL_SOCKET_WRITEABLE)
1125  pollfd->events |= POLLOUT;
1126 #ifdef POLLRDHUP
1127  if (event->events & WL_SOCKET_CLOSED)
1128  pollfd->events |= POLLRDHUP;
1129 #endif
1130  }
1131 
1132  Assert(event->fd != PGINVALID_SOCKET);
1133 }
1134 #endif
1135 
1136 #if defined(WAIT_USE_KQUEUE)
1137 
1138 /*
1139  * On most BSD family systems, the udata member of struct kevent is of type
1140  * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
1141  * NetBSD has it as intptr_t, so here we wallpaper over that difference with
1142  * an lvalue cast.
1143  */
1144 #define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
1145 
1146 static inline void
1147 WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
1148  WaitEvent *event)
1149 {
1150  k_ev->ident = event->fd;
1151  k_ev->filter = filter;
1152  k_ev->flags = action;
1153  k_ev->fflags = 0;
1154  k_ev->data = 0;
1155  AccessWaitEvent(k_ev) = event;
1156 }
1157 
1158 static inline void
1159 WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
1160 {
1161  /* For now postmaster death can only be added, not removed. */
1162  k_ev->ident = PostmasterPid;
1163  k_ev->filter = EVFILT_PROC;
1164  k_ev->flags = EV_ADD;
1165  k_ev->fflags = NOTE_EXIT;
1166  k_ev->data = 0;
1167  AccessWaitEvent(k_ev) = event;
1168 }
1169 
1170 static inline void
1171 WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
1172 {
1173  /* For now latch can only be added, not removed. */
1174  k_ev->ident = SIGURG;
1175  k_ev->filter = EVFILT_SIGNAL;
1176  k_ev->flags = EV_ADD;
1177  k_ev->fflags = 0;
1178  k_ev->data = 0;
1179  AccessWaitEvent(k_ev) = event;
1180 }
1181 
1182 /*
1183  * old_events is the previous event mask, used to compute what has changed.
1184  */
1185 static void
1186 WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
1187 {
1188  int rc;
1189  struct kevent k_ev[2];
1190  int count = 0;
1191  bool new_filt_read = false;
1192  bool old_filt_read = false;
1193  bool new_filt_write = false;
1194  bool old_filt_write = false;
1195 
1196  if (old_events == event->events)
1197  return;
1198 
1199  Assert(event->events != WL_LATCH_SET || set->latch != NULL);
1200  Assert(event->events == WL_LATCH_SET ||
1201  event->events == WL_POSTMASTER_DEATH ||
1202  (event->events & (WL_SOCKET_READABLE |
1204  WL_SOCKET_CLOSED)));
1205 
1206  if (event->events == WL_POSTMASTER_DEATH)
1207  {
1208  /*
1209  * Unlike all the other implementations, we detect postmaster death
1210  * using process notification instead of waiting on the postmaster
1211  * alive pipe.
1212  */
1213  WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
1214  }
1215  else if (event->events == WL_LATCH_SET)
1216  {
1217  /* We detect latch wakeup using a signal event. */
1218  WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
1219  }
1220  else
1221  {
1222  /*
1223  * We need to compute the adds and deletes required to get from the
1224  * old event mask to the new event mask, since kevent treats readable
1225  * and writable as separate events.
1226  */
1227  if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
1228  old_filt_read = true;
1229  if (event->events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
1230  new_filt_read = true;
1231  if (old_events & WL_SOCKET_WRITEABLE)
1232  old_filt_write = true;
1233  if (event->events & WL_SOCKET_WRITEABLE)
1234  new_filt_write = true;
1235  if (old_filt_read && !new_filt_read)
1236  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
1237  event);
1238  else if (!old_filt_read && new_filt_read)
1239  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
1240  event);
1241  if (old_filt_write && !new_filt_write)
1242  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
1243  event);
1244  else if (!old_filt_write && new_filt_write)
1245  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
1246  event);
1247  }
1248 
1249  /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
1250  if (count == 0)
1251  return;
1252 
1253  Assert(count <= 2);
1254 
1255  rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
1256 
1257  /*
1258  * When adding the postmaster's pid, we have to consider that it might
1259  * already have exited and perhaps even been replaced by another process
1260  * with the same pid. If so, we have to defer reporting this as an event
1261  * until the next call to WaitEventSetWaitBlock().
1262  */
1263 
1264  if (rc < 0)
1265  {
1266  if (event->events == WL_POSTMASTER_DEATH &&
1267  (errno == ESRCH || errno == EACCES))
1268  set->report_postmaster_not_running = true;
1269  else
1270  ereport(ERROR,
1272  errmsg("%s() failed: %m",
1273  "kevent")));
1274  }
1275  else if (event->events == WL_POSTMASTER_DEATH &&
1276  PostmasterPid != getppid() &&
1277  !PostmasterIsAlive())
1278  {
1279  /*
1280  * The extra PostmasterIsAliveInternal() check prevents false alarms
1281  * on systems that give a different value for getppid() while being
1282  * traced by a debugger.
1283  */
1284  set->report_postmaster_not_running = true;
1285  }
1286 }
1287 
1288 #endif
1289 
1290 #if defined(WAIT_USE_WIN32)
1291 static void
1292 WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
1293 {
1294  HANDLE *handle = &set->handles[event->pos + 1];
1295 
1296  if (event->events == WL_LATCH_SET)
1297  {
1298  Assert(set->latch != NULL);
1299  *handle = set->latch->event;
1300  }
1301  else if (event->events == WL_POSTMASTER_DEATH)
1302  {
1303  *handle = PostmasterHandle;
1304  }
1305  else
1306  {
1307  int flags = FD_CLOSE; /* always check for errors/EOF */
1308 
1309  if (event->events & WL_SOCKET_READABLE)
1310  flags |= FD_READ;
1311  if (event->events & WL_SOCKET_WRITEABLE)
1312  flags |= FD_WRITE;
1313  if (event->events & WL_SOCKET_CONNECTED)
1314  flags |= FD_CONNECT;
1315 
1316  if (*handle == WSA_INVALID_EVENT)
1317  {
1318  *handle = WSACreateEvent();
1319  if (*handle == WSA_INVALID_EVENT)
1320  elog(ERROR, "failed to create event for socket: error code %d",
1321  WSAGetLastError());
1322  }
1323  if (WSAEventSelect(event->fd, *handle, flags) != 0)
1324  elog(ERROR, "failed to set up event for socket: error code %d",
1325  WSAGetLastError());
1326 
1327  Assert(event->fd != PGINVALID_SOCKET);
1328  }
1329 }
1330 #endif
1331 
1332 /*
1333  * Wait for events added to the set to happen, or until the timeout is
1334  * reached. At most nevents occurred events are returned.
1335  *
1336  * If timeout = -1, block until an event occurs; if 0, check sockets for
1337  * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1338  *
1339  * Returns the number of events occurred, or 0 if the timeout was reached.
1340  *
1341  * Returned events will have the fd, pos, user_data fields set to the
1342  * values associated with the registered event.
1343  */
1344 int
1345 WaitEventSetWait(WaitEventSet *set, long timeout,
1346  WaitEvent *occurred_events, int nevents,
1347  uint32 wait_event_info)
1348 {
1349  int returned_events = 0;
1351  instr_time cur_time;
1352  long cur_timeout = -1;
1353 
1354  Assert(nevents > 0);
1355 
1356  /*
1357  * Initialize timeout if requested. We must record the current time so
1358  * that we can determine the remaining timeout if interrupted.
1359  */
1360  if (timeout >= 0)
1361  {
1363  Assert(timeout >= 0 && timeout <= INT_MAX);
1364  cur_timeout = timeout;
1365  }
1366 
1367  pgstat_report_wait_start(wait_event_info);
1368 
1369 #ifndef WIN32
1370  waiting = true;
1371 #else
1372  /* Ensure that signals are serviced even if latch is already set */
1374 #endif
1375  while (returned_events == 0)
1376  {
1377  int rc;
1378 
1379  /*
1380  * Check if the latch is set already. If so, leave the loop
1381  * immediately, avoid blocking again. We don't attempt to report any
1382  * other events that might also be satisfied.
1383  *
1384  * If someone sets the latch between this and the
1385  * WaitEventSetWaitBlock() below, the setter will write a byte to the
1386  * pipe (or signal us and the signal handler will do that), and the
1387  * readiness routine will return immediately.
1388  *
1389  * On unix, If there's a pending byte in the self pipe, we'll notice
1390  * whenever blocking. Only clearing the pipe in that case avoids
1391  * having to drain it every time WaitLatchOrSocket() is used. Should
1392  * the pipe-buffer fill up we're still ok, because the pipe is in
1393  * nonblocking mode. It's unlikely for that to happen, because the
1394  * self pipe isn't filled unless we're blocking (waiting = true), or
1395  * from inside a signal handler in latch_sigurg_handler().
1396  *
1397  * On windows, we'll also notice if there's a pending event for the
1398  * latch when blocking, but there's no danger of anything filling up,
1399  * as "Setting an event that is already set has no effect.".
1400  *
1401  * Note: we assume that the kernel calls involved in latch management
1402  * will provide adequate synchronization on machines with weak memory
1403  * ordering, so that we cannot miss seeing is_set if a notification
1404  * has already been queued.
1405  */
1406  if (set->latch && !set->latch->is_set)
1407  {
1408  /* about to sleep on a latch */
1409  set->latch->maybe_sleeping = true;
1411  /* and recheck */
1412  }
1413 
1414  if (set->latch && set->latch->is_set)
1415  {
1416  occurred_events->fd = PGINVALID_SOCKET;
1417  occurred_events->pos = set->latch_pos;
1418  occurred_events->user_data =
1419  set->events[set->latch_pos].user_data;
1420  occurred_events->events = WL_LATCH_SET;
1421  occurred_events++;
1422  returned_events++;
1423 
1424  /* could have been set above */
1425  set->latch->maybe_sleeping = false;
1426 
1427  break;
1428  }
1429 
1430  /*
1431  * Wait for events using the readiness primitive chosen at the top of
1432  * this file. If -1 is returned, a timeout has occurred, if 0 we have
1433  * to retry, everything >= 1 is the number of returned events.
1434  */
1435  rc = WaitEventSetWaitBlock(set, cur_timeout,
1436  occurred_events, nevents);
1437 
1438  if (set->latch)
1439  {
1440  Assert(set->latch->maybe_sleeping);
1441  set->latch->maybe_sleeping = false;
1442  }
1443 
1444  if (rc == -1)
1445  break; /* timeout occurred */
1446  else
1447  returned_events = rc;
1448 
1449  /* If we're not done, update cur_timeout for next iteration */
1450  if (returned_events == 0 && timeout >= 0)
1451  {
1452  INSTR_TIME_SET_CURRENT(cur_time);
1453  INSTR_TIME_SUBTRACT(cur_time, start_time);
1454  cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
1455  if (cur_timeout <= 0)
1456  break;
1457  }
1458  }
1459 #ifndef WIN32
1460  waiting = false;
1461 #endif
1462 
1464 
1465  return returned_events;
1466 }
1467 
1468 
1469 #if defined(WAIT_USE_EPOLL)
1470 
1471 /*
1472  * Wait using linux's epoll_wait(2).
1473  *
1474  * This is the preferable wait method, as several readiness notifications are
1475  * delivered, without having to iterate through all of set->events. The return
1476  * epoll_event struct contain a pointer to our events, making association
1477  * easy.
1478  */
1479 static inline int
1480 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1481  WaitEvent *occurred_events, int nevents)
1482 {
1483  int returned_events = 0;
1484  int rc;
1485  WaitEvent *cur_event;
1486  struct epoll_event *cur_epoll_event;
1487 
1488  /* Sleep */
1489  rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1490  nevents, cur_timeout);
1491 
1492  /* Check return code */
1493  if (rc < 0)
1494  {
1495  /* EINTR is okay, otherwise complain */
1496  if (errno != EINTR)
1497  {
1498  waiting = false;
1499  ereport(ERROR,
1501  errmsg("%s() failed: %m",
1502  "epoll_wait")));
1503  }
1504  return 0;
1505  }
1506  else if (rc == 0)
1507  {
1508  /* timeout exceeded */
1509  return -1;
1510  }
1511 
1512  /*
1513  * At least one event occurred, iterate over the returned epoll events
1514  * until they're either all processed, or we've returned all the events
1515  * the caller desired.
1516  */
1517  for (cur_epoll_event = set->epoll_ret_events;
1518  cur_epoll_event < (set->epoll_ret_events + rc) &&
1519  returned_events < nevents;
1520  cur_epoll_event++)
1521  {
1522  /* epoll's data pointer is set to the associated WaitEvent */
1523  cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1524 
1525  occurred_events->pos = cur_event->pos;
1526  occurred_events->user_data = cur_event->user_data;
1527  occurred_events->events = 0;
1528 
1529  if (cur_event->events == WL_LATCH_SET &&
1530  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1531  {
1532  /* Drain the signalfd. */
1533  drain();
1534 
1535  if (set->latch && set->latch->is_set)
1536  {
1537  occurred_events->fd = PGINVALID_SOCKET;
1538  occurred_events->events = WL_LATCH_SET;
1539  occurred_events++;
1540  returned_events++;
1541  }
1542  }
1543  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1544  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1545  {
1546  /*
1547  * We expect an EPOLLHUP when the remote end is closed, but
1548  * because we don't expect the pipe to become readable or to have
1549  * any errors either, treat those cases as postmaster death, too.
1550  *
1551  * Be paranoid about a spurious event signaling the postmaster as
1552  * being dead. There have been reports about that happening with
1553  * older primitives (select(2) to be specific), and a spurious
1554  * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1555  * cost much.
1556  */
1558  {
1559  if (set->exit_on_postmaster_death)
1560  proc_exit(1);
1561  occurred_events->fd = PGINVALID_SOCKET;
1562  occurred_events->events = WL_POSTMASTER_DEATH;
1563  occurred_events++;
1564  returned_events++;
1565  }
1566  }
1567  else if (cur_event->events & (WL_SOCKET_READABLE |
1570  {
1571  Assert(cur_event->fd != PGINVALID_SOCKET);
1572 
1573  if ((cur_event->events & WL_SOCKET_READABLE) &&
1574  (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1575  {
1576  /* data available in socket, or EOF */
1577  occurred_events->events |= WL_SOCKET_READABLE;
1578  }
1579 
1580  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1581  (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1582  {
1583  /* writable, or EOF */
1584  occurred_events->events |= WL_SOCKET_WRITEABLE;
1585  }
1586 
1587  if ((cur_event->events & WL_SOCKET_CLOSED) &&
1588  (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
1589  {
1590  /* remote peer shut down, or error */
1591  occurred_events->events |= WL_SOCKET_CLOSED;
1592  }
1593 
1594  if (occurred_events->events != 0)
1595  {
1596  occurred_events->fd = cur_event->fd;
1597  occurred_events++;
1598  returned_events++;
1599  }
1600  }
1601  }
1602 
1603  return returned_events;
1604 }
1605 
1606 #elif defined(WAIT_USE_KQUEUE)
1607 
1608 /*
1609  * Wait using kevent(2) on BSD-family systems and macOS.
1610  *
1611  * For now this mirrors the epoll code, but in future it could modify the fd
1612  * set in the same call to kevent as it uses for waiting instead of doing that
1613  * with separate system calls.
1614  */
1615 static int
1616 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1617  WaitEvent *occurred_events, int nevents)
1618 {
1619  int returned_events = 0;
1620  int rc;
1621  WaitEvent *cur_event;
1622  struct kevent *cur_kqueue_event;
1623  struct timespec timeout;
1624  struct timespec *timeout_p;
1625 
1626  if (cur_timeout < 0)
1627  timeout_p = NULL;
1628  else
1629  {
1630  timeout.tv_sec = cur_timeout / 1000;
1631  timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1632  timeout_p = &timeout;
1633  }
1634 
1635  /*
1636  * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1637  * earlier call to WaitEventSetWait().
1638  */
1639  if (unlikely(set->report_postmaster_not_running))
1640  {
1641  if (set->exit_on_postmaster_death)
1642  proc_exit(1);
1643  occurred_events->fd = PGINVALID_SOCKET;
1644  occurred_events->events = WL_POSTMASTER_DEATH;
1645  return 1;
1646  }
1647 
1648  /* Sleep */
1649  rc = kevent(set->kqueue_fd, NULL, 0,
1650  set->kqueue_ret_events, nevents,
1651  timeout_p);
1652 
1653  /* Check return code */
1654  if (rc < 0)
1655  {
1656  /* EINTR is okay, otherwise complain */
1657  if (errno != EINTR)
1658  {
1659  waiting = false;
1660  ereport(ERROR,
1662  errmsg("%s() failed: %m",
1663  "kevent")));
1664  }
1665  return 0;
1666  }
1667  else if (rc == 0)
1668  {
1669  /* timeout exceeded */
1670  return -1;
1671  }
1672 
1673  /*
1674  * At least one event occurred, iterate over the returned kqueue events
1675  * until they're either all processed, or we've returned all the events
1676  * the caller desired.
1677  */
1678  for (cur_kqueue_event = set->kqueue_ret_events;
1679  cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1680  returned_events < nevents;
1681  cur_kqueue_event++)
1682  {
1683  /* kevent's udata points to the associated WaitEvent */
1684  cur_event = AccessWaitEvent(cur_kqueue_event);
1685 
1686  occurred_events->pos = cur_event->pos;
1687  occurred_events->user_data = cur_event->user_data;
1688  occurred_events->events = 0;
1689 
1690  if (cur_event->events == WL_LATCH_SET &&
1691  cur_kqueue_event->filter == EVFILT_SIGNAL)
1692  {
1693  if (set->latch && set->latch->is_set)
1694  {
1695  occurred_events->fd = PGINVALID_SOCKET;
1696  occurred_events->events = WL_LATCH_SET;
1697  occurred_events++;
1698  returned_events++;
1699  }
1700  }
1701  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1702  cur_kqueue_event->filter == EVFILT_PROC &&
1703  (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1704  {
1705  /*
1706  * The kernel will tell this kqueue object only once about the
1707  * exit of the postmaster, so let's remember that for next time so
1708  * that we provide level-triggered semantics.
1709  */
1710  set->report_postmaster_not_running = true;
1711 
1712  if (set->exit_on_postmaster_death)
1713  proc_exit(1);
1714  occurred_events->fd = PGINVALID_SOCKET;
1715  occurred_events->events = WL_POSTMASTER_DEATH;
1716  occurred_events++;
1717  returned_events++;
1718  }
1719  else if (cur_event->events & (WL_SOCKET_READABLE |
1722  {
1723  Assert(cur_event->fd >= 0);
1724 
1725  if ((cur_event->events & WL_SOCKET_READABLE) &&
1726  (cur_kqueue_event->filter == EVFILT_READ))
1727  {
1728  /* readable, or EOF */
1729  occurred_events->events |= WL_SOCKET_READABLE;
1730  }
1731 
1732  if ((cur_event->events & WL_SOCKET_CLOSED) &&
1733  (cur_kqueue_event->filter == EVFILT_READ) &&
1734  (cur_kqueue_event->flags & EV_EOF))
1735  {
1736  /* the remote peer has shut down */
1737  occurred_events->events |= WL_SOCKET_CLOSED;
1738  }
1739 
1740  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1741  (cur_kqueue_event->filter == EVFILT_WRITE))
1742  {
1743  /* writable, or EOF */
1744  occurred_events->events |= WL_SOCKET_WRITEABLE;
1745  }
1746 
1747  if (occurred_events->events != 0)
1748  {
1749  occurred_events->fd = cur_event->fd;
1750  occurred_events++;
1751  returned_events++;
1752  }
1753  }
1754  }
1755 
1756  return returned_events;
1757 }
1758 
1759 #elif defined(WAIT_USE_POLL)
1760 
1761 /*
1762  * Wait using poll(2).
1763  *
1764  * This allows to receive readiness notifications for several events at once,
1765  * but requires iterating through all of set->pollfds.
1766  */
1767 static inline int
1768 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1769  WaitEvent *occurred_events, int nevents)
1770 {
1771  int returned_events = 0;
1772  int rc;
1773  WaitEvent *cur_event;
1774  struct pollfd *cur_pollfd;
1775 
1776  /* Sleep */
1777  rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
1778 
1779  /* Check return code */
1780  if (rc < 0)
1781  {
1782  /* EINTR is okay, otherwise complain */
1783  if (errno != EINTR)
1784  {
1785  waiting = false;
1786  ereport(ERROR,
1788  errmsg("%s() failed: %m",
1789  "poll")));
1790  }
1791  return 0;
1792  }
1793  else if (rc == 0)
1794  {
1795  /* timeout exceeded */
1796  return -1;
1797  }
1798 
1799  for (cur_event = set->events, cur_pollfd = set->pollfds;
1800  cur_event < (set->events + set->nevents) &&
1801  returned_events < nevents;
1802  cur_event++, cur_pollfd++)
1803  {
1804  /* no activity on this FD, skip */
1805  if (cur_pollfd->revents == 0)
1806  continue;
1807 
1808  occurred_events->pos = cur_event->pos;
1809  occurred_events->user_data = cur_event->user_data;
1810  occurred_events->events = 0;
1811 
1812  if (cur_event->events == WL_LATCH_SET &&
1813  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1814  {
1815  /* There's data in the self-pipe, clear it. */
1816  drain();
1817 
1818  if (set->latch && set->latch->is_set)
1819  {
1820  occurred_events->fd = PGINVALID_SOCKET;
1821  occurred_events->events = WL_LATCH_SET;
1822  occurred_events++;
1823  returned_events++;
1824  }
1825  }
1826  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1827  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1828  {
1829  /*
1830  * We expect an POLLHUP when the remote end is closed, but because
1831  * we don't expect the pipe to become readable or to have any
1832  * errors either, treat those cases as postmaster death, too.
1833  *
1834  * Be paranoid about a spurious event signaling the postmaster as
1835  * being dead. There have been reports about that happening with
1836  * older primitives (select(2) to be specific), and a spurious
1837  * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1838  * cost much.
1839  */
1841  {
1842  if (set->exit_on_postmaster_death)
1843  proc_exit(1);
1844  occurred_events->fd = PGINVALID_SOCKET;
1845  occurred_events->events = WL_POSTMASTER_DEATH;
1846  occurred_events++;
1847  returned_events++;
1848  }
1849  }
1850  else if (cur_event->events & (WL_SOCKET_READABLE |
1853  {
1854  int errflags = POLLHUP | POLLERR | POLLNVAL;
1855 
1856  Assert(cur_event->fd >= PGINVALID_SOCKET);
1857 
1858  if ((cur_event->events & WL_SOCKET_READABLE) &&
1859  (cur_pollfd->revents & (POLLIN | errflags)))
1860  {
1861  /* data available in socket, or EOF */
1862  occurred_events->events |= WL_SOCKET_READABLE;
1863  }
1864 
1865  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1866  (cur_pollfd->revents & (POLLOUT | errflags)))
1867  {
1868  /* writeable, or EOF */
1869  occurred_events->events |= WL_SOCKET_WRITEABLE;
1870  }
1871 
1872 #ifdef POLLRDHUP
1873  if ((cur_event->events & WL_SOCKET_CLOSED) &&
1874  (cur_pollfd->revents & (POLLRDHUP | errflags)))
1875  {
1876  /* remote peer closed, or error */
1877  occurred_events->events |= WL_SOCKET_CLOSED;
1878  }
1879 #endif
1880 
1881  if (occurred_events->events != 0)
1882  {
1883  occurred_events->fd = cur_event->fd;
1884  occurred_events++;
1885  returned_events++;
1886  }
1887  }
1888  }
1889  return returned_events;
1890 }
1891 
1892 #elif defined(WAIT_USE_WIN32)
1893 
1894 /*
1895  * Wait using Windows' WaitForMultipleObjects().
1896  *
1897  * Unfortunately this will only ever return a single readiness notification at
1898  * a time. Note that while the official documentation for
1899  * WaitForMultipleObjects is ambiguous about multiple events being "consumed"
1900  * with a single bWaitAll = FALSE call,
1901  * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273 confirms
1902  * that only one event is "consumed".
1903  */
1904 static inline int
1905 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1906  WaitEvent *occurred_events, int nevents)
1907 {
1908  int returned_events = 0;
1909  DWORD rc;
1910  WaitEvent *cur_event;
1911 
1912  /* Reset any wait events that need it */
1913  for (cur_event = set->events;
1914  cur_event < (set->events + set->nevents);
1915  cur_event++)
1916  {
1917  if (cur_event->reset)
1918  {
1919  WaitEventAdjustWin32(set, cur_event);
1920  cur_event->reset = false;
1921  }
1922 
1923  /*
1924  * Windows does not guarantee to log an FD_WRITE network event
1925  * indicating that more data can be sent unless the previous send()
1926  * failed with WSAEWOULDBLOCK. While our caller might well have made
1927  * such a call, we cannot assume that here. Therefore, if waiting for
1928  * write-ready, force the issue by doing a dummy send(). If the dummy
1929  * send() succeeds, assume that the socket is in fact write-ready, and
1930  * return immediately. Also, if it fails with something other than
1931  * WSAEWOULDBLOCK, return a write-ready indication to let our caller
1932  * deal with the error condition.
1933  */
1934  if (cur_event->events & WL_SOCKET_WRITEABLE)
1935  {
1936  char c;
1937  WSABUF buf;
1938  DWORD sent;
1939  int r;
1940 
1941  buf.buf = &c;
1942  buf.len = 0;
1943 
1944  r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
1945  if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
1946  {
1947  occurred_events->pos = cur_event->pos;
1948  occurred_events->user_data = cur_event->user_data;
1949  occurred_events->events = WL_SOCKET_WRITEABLE;
1950  occurred_events->fd = cur_event->fd;
1951  return 1;
1952  }
1953  }
1954  }
1955 
1956  /*
1957  * Sleep.
1958  *
1959  * Need to wait for ->nevents + 1, because signal handle is in [0].
1960  */
1961  rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
1962  cur_timeout);
1963 
1964  /* Check return code */
1965  if (rc == WAIT_FAILED)
1966  elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
1967  GetLastError());
1968  else if (rc == WAIT_TIMEOUT)
1969  {
1970  /* timeout exceeded */
1971  return -1;
1972  }
1973 
1974  if (rc == WAIT_OBJECT_0)
1975  {
1976  /* Service newly-arrived signals */
1978  return 0; /* retry */
1979  }
1980 
1981  /*
1982  * With an offset of one, due to the always present pgwin32_signal_event,
1983  * the handle offset directly corresponds to a wait event.
1984  */
1985  cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
1986 
1987  occurred_events->pos = cur_event->pos;
1988  occurred_events->user_data = cur_event->user_data;
1989  occurred_events->events = 0;
1990 
1991  if (cur_event->events == WL_LATCH_SET)
1992  {
1993  /*
1994  * We cannot use set->latch->event to reset the fired event if we
1995  * aren't waiting on this latch now.
1996  */
1997  if (!ResetEvent(set->handles[cur_event->pos + 1]))
1998  elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
1999 
2000  if (set->latch && set->latch->is_set)
2001  {
2002  occurred_events->fd = PGINVALID_SOCKET;
2003  occurred_events->events = WL_LATCH_SET;
2004  occurred_events++;
2005  returned_events++;
2006  }
2007  }
2008  else if (cur_event->events == WL_POSTMASTER_DEATH)
2009  {
2010  /*
2011  * Postmaster apparently died. Since the consequences of falsely
2012  * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we take
2013  * the trouble to positively verify this with PostmasterIsAlive(),
2014  * even though there is no known reason to think that the event could
2015  * be falsely set on Windows.
2016  */
2018  {
2019  if (set->exit_on_postmaster_death)
2020  proc_exit(1);
2021  occurred_events->fd = PGINVALID_SOCKET;
2022  occurred_events->events = WL_POSTMASTER_DEATH;
2023  occurred_events++;
2024  returned_events++;
2025  }
2026  }
2027  else if (cur_event->events & WL_SOCKET_MASK)
2028  {
2029  WSANETWORKEVENTS resEvents;
2030  HANDLE handle = set->handles[cur_event->pos + 1];
2031 
2032  Assert(cur_event->fd);
2033 
2034  occurred_events->fd = cur_event->fd;
2035 
2036  ZeroMemory(&resEvents, sizeof(resEvents));
2037  if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
2038  elog(ERROR, "failed to enumerate network events: error code %d",
2039  WSAGetLastError());
2040  if ((cur_event->events & WL_SOCKET_READABLE) &&
2041  (resEvents.lNetworkEvents & FD_READ))
2042  {
2043  /* data available in socket */
2044  occurred_events->events |= WL_SOCKET_READABLE;
2045 
2046  /*------
2047  * WaitForMultipleObjects doesn't guarantee that a read event will
2048  * be returned if the latch is set at the same time. Even if it
2049  * did, the caller might drop that event expecting it to reoccur
2050  * on next call. So, we must force the event to be reset if this
2051  * WaitEventSet is used again in order to avoid an indefinite
2052  * hang. Refer https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
2053  * for the behavior of socket events.
2054  *------
2055  */
2056  cur_event->reset = true;
2057  }
2058  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
2059  (resEvents.lNetworkEvents & FD_WRITE))
2060  {
2061  /* writeable */
2062  occurred_events->events |= WL_SOCKET_WRITEABLE;
2063  }
2064  if ((cur_event->events & WL_SOCKET_CONNECTED) &&
2065  (resEvents.lNetworkEvents & FD_CONNECT))
2066  {
2067  /* connected */
2068  occurred_events->events |= WL_SOCKET_CONNECTED;
2069  }
2070  if (resEvents.lNetworkEvents & FD_CLOSE)
2071  {
2072  /* EOF/error, so signal all caller-requested socket flags */
2073  occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
2074  }
2075 
2076  if (occurred_events->events != 0)
2077  {
2078  occurred_events++;
2079  returned_events++;
2080  }
2081  }
2082 
2083  return returned_events;
2084 }
2085 #endif
2086 
2087 /*
2088  * Return whether the current build options can report WL_SOCKET_CLOSED.
2089  */
2090 bool
2092 {
2093 #if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
2094  defined(WAIT_USE_EPOLL) || \
2095  defined(WAIT_USE_KQUEUE)
2096  return true;
2097 #else
2098  return false;
2099 #endif
2100 }
2101 
2102 /*
2103  * Get the number of wait events registered in a given WaitEventSet.
2104  */
2105 int
2107 {
2108  return set->nevents;
2109 }
2110 
2111 #if defined(WAIT_USE_SELF_PIPE)
2112 
2113 /*
2114  * SetLatch uses SIGURG to wake up the process waiting on the latch.
2115  *
2116  * Wake up WaitLatch, if we're waiting.
2117  */
2118 static void
2119 latch_sigurg_handler(SIGNAL_ARGS)
2120 {
2121  int save_errno = errno;
2122 
2123  if (waiting)
2124  sendSelfPipeByte();
2125 
2126  errno = save_errno;
2127 }
2128 
2129 /* Send one byte to the self-pipe, to wake up WaitLatch */
2130 static void
2131 sendSelfPipeByte(void)
2132 {
2133  int rc;
2134  char dummy = 0;
2135 
2136 retry:
2137  rc = write(selfpipe_writefd, &dummy, 1);
2138  if (rc < 0)
2139  {
2140  /* If interrupted by signal, just retry */
2141  if (errno == EINTR)
2142  goto retry;
2143 
2144  /*
2145  * If the pipe is full, we don't need to retry, the data that's there
2146  * already is enough to wake up WaitLatch.
2147  */
2148  if (errno == EAGAIN || errno == EWOULDBLOCK)
2149  return;
2150 
2151  /*
2152  * Oops, the write() failed for some other reason. We might be in a
2153  * signal handler, so it's not safe to elog(). We have no choice but
2154  * silently ignore the error.
2155  */
2156  return;
2157  }
2158 }
2159 
2160 #endif
2161 
2162 #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
2163 
2164 /*
2165  * Read all available data from self-pipe or signalfd.
2166  *
2167  * Note: this is only called when waiting = true. If it fails and doesn't
2168  * return, it must reset that flag first (though ideally, this will never
2169  * happen).
2170  */
2171 static void
2172 drain(void)
2173 {
2174  char buf[1024];
2175  int rc;
2176  int fd;
2177 
2178 #ifdef WAIT_USE_SELF_PIPE
2179  fd = selfpipe_readfd;
2180 #else
2181  fd = signal_fd;
2182 #endif
2183 
2184  for (;;)
2185  {
2186  rc = read(fd, buf, sizeof(buf));
2187  if (rc < 0)
2188  {
2189  if (errno == EAGAIN || errno == EWOULDBLOCK)
2190  break; /* the descriptor is empty */
2191  else if (errno == EINTR)
2192  continue; /* retry */
2193  else
2194  {
2195  waiting = false;
2196 #ifdef WAIT_USE_SELF_PIPE
2197  elog(ERROR, "read() on self-pipe failed: %m");
2198 #else
2199  elog(ERROR, "read() on signalfd failed: %m");
2200 #endif
2201  }
2202  }
2203  else if (rc == 0)
2204  {
2205  waiting = false;
2206 #ifdef WAIT_USE_SELF_PIPE
2207  elog(ERROR, "unexpected EOF on self-pipe");
2208 #else
2209  elog(ERROR, "unexpected EOF on signalfd");
2210 #endif
2211  }
2212  else if (rc < sizeof(buf))
2213  {
2214  /* we successfully drained the pipe; no need to read() again */
2215  break;
2216  }
2217  /* else buffer wasn't big enough, so read again */
2218  }
2219 }
2220 
2221 #endif
#define pg_memory_barrier()
Definition: atomics.h:145
sigset_t UnBlockSig
Definition: pqsignal.c:22
unsigned int uint32
Definition: c.h:441
#define MAXALIGN(LEN)
Definition: c.h:757
#define SIGNAL_ARGS
Definition: c.h:1355
#define unlikely(x)
Definition: c.h:273
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:918
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:155
size_t Size
Definition: c.h:540
int errcode_for_socket_access(void)
Definition: elog.c:787
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define FATAL
Definition: elog.h:35
#define PANIC
Definition: elog.h:36
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
void ReleaseExternalFD(void)
Definition: fd.c:1230
bool AcquireExternalFD(void)
Definition: fd.c:1177
void ReserveExternalFD(void)
Definition: fd.c:1212
pid_t PostmasterPid
Definition: globals.c:99
int MyProcPid
Definition: globals.c:44
bool IsUnderPostmaster
Definition: globals.c:113
struct Latch * MyLatch
Definition: globals.c:58
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
struct timeval instr_time
Definition: instr_time.h:150
#define INSTR_TIME_GET_MILLISEC(t)
Definition: instr_time.h:202
#define close(a)
Definition: win32.h:12
#define write(a, b, c)
Definition: win32.h:14
#define read(a, b, c)
Definition: win32.h:13
void proc_exit(int code)
Definition: ipc.c:104
void InitializeLatchWaitSet(void)
Definition: latch.c:305
int WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock, long timeout, uint32 wait_event_info)
Definition: latch.c:524
#define LatchWaitSetLatchPos
Definition: latch.c:158
void OwnLatch(Latch *latch)
Definition: latch.c:422
void DisownLatch(Latch *latch)
Definition: latch.c:448
int GetNumRegisteredWaitEvents(WaitEventSet *set)
Definition: latch.c:2106
WaitEventSet * CreateWaitEventSet(MemoryContext context, int nevents)
Definition: latch.c:707
void InitSharedLatch(Latch *latch)
Definition: latch.c:389
void InitializeLatchSupport(void)
Definition: latch.c:207
static WaitEventSet * LatchWaitSet
Definition: latch.c:155
void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
Definition: latch.c:972
static int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, WaitEvent *occurred_events, int nevents)
void SetLatch(Latch *latch)
Definition: latch.c:591
void ShutdownLatchSupport(void)
Definition: latch.c:323
bool WaitEventSetCanReportClosed(void)
Definition: latch.c:2091
void InitLatch(Latch *latch)
Definition: latch.c:353
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
Definition: latch.c:886
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
Definition: latch.c:1345
static volatile sig_atomic_t waiting
Definition: latch.c:162
void FreeWaitEventSet(WaitEventSet *set)
Definition: latch.c:821
void ResetLatch(Latch *latch)
Definition: latch.c:683
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:476
#define WL_SOCKET_READABLE
Definition: latch.h:126
#define WL_TIMEOUT
Definition: latch.h:128
#define WL_SOCKET_CLOSED
Definition: latch.h:137
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
#define WL_SOCKET_CONNECTED
Definition: latch.h:135
#define WL_POSTMASTER_DEATH
Definition: latch.h:129
#define WL_SOCKET_WRITEABLE
Definition: latch.h:127
#define WL_SOCKET_MASK
Definition: latch.h:138
Assert(fmt[strlen(fmt) - 1] !='\n')
void pfree(void *pointer)
Definition: mcxt.c:1175
MemoryContext TopMemoryContext
Definition: mcxt.c:48
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:906
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
#define InvalidPid
Definition: miscadmin.h:32
const void * data
static time_t start_time
Definition: pg_ctl.c:99
static char * buf
Definition: pg_test_fsync.c:67
bool PostmasterIsAliveInternal(void)
Definition: pmsignal.c:344
#define PostmasterIsAlive()
Definition: pmsignal.h:102
int pgsocket
Definition: port.h:29
#define PGINVALID_SOCKET
Definition: port.h:31
int postmaster_alive_fds[2]
Definition: postmaster.c:564
#define POSTMASTER_FD_WATCH
Definition: postmaster.h:43
char * c
static int fd(const char *x, int i)
Definition: preproc-init.c:105
void pgwin32_dispatch_queued_signals(void)
Definition: signal.c:118
HANDLE pgwin32_signal_event
Definition: signal.c:27
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:180
Definition: latch.h:111
sig_atomic_t is_set
Definition: latch.h:112
sig_atomic_t maybe_sleeping
Definition: latch.h:113
bool is_shared
Definition: latch.h:114
int owner_pid
Definition: latch.h:115
Latch * latch
Definition: latch.c:121
bool exit_on_postmaster_death
Definition: latch.c:129
int nevents
Definition: latch.c:106
int latch_pos
Definition: latch.c:122
int nevents_space
Definition: latch.c:107
WaitEvent * events
Definition: latch.c:113
pgsocket fd
Definition: latch.h:147
int pos
Definition: latch.h:145
void * user_data
Definition: latch.h:148
uint32 events
Definition: latch.h:146
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:266
static void pgstat_report_wait_end(void)
Definition: wait_event.h:282
#define EINTR
Definition: win32_port.h:351
#define EWOULDBLOCK
Definition: win32_port.h:357
#define kill(pid, sig)
Definition: win32_port.h:464
#define SIG_IGN
Definition: win32_port.h:164
#define EAGAIN
Definition: win32_port.h:349