PostgreSQL Source Code  git master
latch.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * latch.c
4  * Routines for inter-process latches
5  *
6  * The poll() implementation uses the so-called self-pipe trick to overcome the
7  * race condition involved with poll() and setting a global flag in the signal
8  * handler. When a latch is set and the current process is waiting for it, the
9  * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
10  * A signal by itself doesn't interrupt poll() on all platforms, and even on
11  * platforms where it does, a signal that arrives just before the poll() call
12  * does not prevent poll() from entering sleep. An incoming byte on a pipe
13  * however reliably interrupts the sleep, and causes poll() to return
14  * immediately even if the signal arrives before poll() begins.
15  *
16  * The epoll() implementation overcomes the race with a different technique: it
17  * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
18  * don't need to register a signal handler or create our own self-pipe. We
19  * assume that any system that has Linux epoll() also has Linux signalfd().
20  *
21  * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
22  *
23  * The Windows implementation uses Windows events that are inherited by all
24  * postmaster child processes. There's no need for the self-pipe trick there.
25  *
26  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
27  * Portions Copyright (c) 1994, Regents of the University of California
28  *
29  * IDENTIFICATION
30  * src/backend/storage/ipc/latch.c
31  *
32  *-------------------------------------------------------------------------
33  */
34 #include "postgres.h"
35 
36 #include <fcntl.h>
37 #include <limits.h>
38 #include <signal.h>
39 #include <unistd.h>
40 #ifdef HAVE_SYS_EPOLL_H
41 #include <sys/epoll.h>
42 #endif
43 #ifdef HAVE_SYS_EVENT_H
44 #include <sys/event.h>
45 #endif
46 #ifdef HAVE_SYS_SIGNALFD_H
47 #include <sys/signalfd.h>
48 #endif
49 #ifdef HAVE_POLL_H
50 #include <poll.h>
51 #endif
52 
53 #include "libpq/pqsignal.h"
54 #include "miscadmin.h"
55 #include "pgstat.h"
56 #include "port/atomics.h"
57 #include "portability/instr_time.h"
58 #include "postmaster/postmaster.h"
59 #include "storage/fd.h"
60 #include "storage/ipc.h"
61 #include "storage/latch.h"
62 #include "storage/pmsignal.h"
63 #include "storage/shmem.h"
64 #include "utils/memutils.h"
65 
66 /*
67  * Select the fd readiness primitive to use. Normally the "most modern"
68  * primitive supported by the OS will be used, but for testing it can be
69  * useful to manually specify the used primitive. If desired, just add a
70  * define somewhere before this block.
71  */
72 #if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
73  defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
74 /* don't overwrite manual choice */
75 #elif defined(HAVE_SYS_EPOLL_H)
76 #define WAIT_USE_EPOLL
77 #elif defined(HAVE_KQUEUE)
78 #define WAIT_USE_KQUEUE
79 #elif defined(HAVE_POLL)
80 #define WAIT_USE_POLL
81 #elif WIN32
82 #define WAIT_USE_WIN32
83 #else
84 #error "no wait set implementation available"
85 #endif
86 
87 /*
88  * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
89  * available. We avoid signalfd on illumos for now based on problem reports.
90  * For testing the choice can also be manually specified.
91  */
92 #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
93 #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
94 /* don't overwrite manual choice */
95 #elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H) && \
96  !defined(__illumos__)
97 #define WAIT_USE_SIGNALFD
98 #else
99 #define WAIT_USE_SELF_PIPE
100 #endif
101 #endif
102 
103 /* typedef in latch.h */
105 {
106  int nevents; /* number of registered events */
107  int nevents_space; /* maximum number of events in this set */
108 
109  /*
110  * Array, of nevents_space length, storing the definition of events this
111  * set is waiting for.
112  */
114 
115  /*
116  * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
117  * said latch, and latch_pos the offset in the ->events array. This is
118  * useful because we check the state of the latch before performing doing
119  * syscalls related to waiting.
120  */
123 
124  /*
125  * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
126  * is set so that we'll exit immediately if postmaster death is detected,
127  * instead of returning.
128  */
130 
131 #if defined(WAIT_USE_EPOLL)
132  int epoll_fd;
133  /* epoll_wait returns events in a user provided arrays, allocate once */
134  struct epoll_event *epoll_ret_events;
135 #elif defined(WAIT_USE_KQUEUE)
136  int kqueue_fd;
137  /* kevent returns events in a user provided arrays, allocate once */
138  struct kevent *kqueue_ret_events;
139  bool report_postmaster_not_running;
140 #elif defined(WAIT_USE_POLL)
141  /* poll expects events to be waited on every poll() call, prepare once */
142  struct pollfd *pollfds;
143 #elif defined(WAIT_USE_WIN32)
144 
145  /*
146  * Array of windows events. The first element always contains
147  * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
148  * event->pos + 1).
149  */
150  HANDLE *handles;
151 #endif
152 };
153 
154 /* A common WaitEventSet used to implement WatchLatch() */
156 
157 /* The position of the latch in LatchWaitSet. */
158 #define LatchWaitSetLatchPos 0
159 
160 #ifndef WIN32
161 /* Are we currently in WaitLatch? The signal handler would like to know. */
162 static volatile sig_atomic_t waiting = false;
163 #endif
164 
165 #ifdef WAIT_USE_SIGNALFD
166 /* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
167 static int signal_fd = -1;
168 #endif
169 
170 #ifdef WAIT_USE_SELF_PIPE
171 /* Read and write ends of the self-pipe */
172 static int selfpipe_readfd = -1;
173 static int selfpipe_writefd = -1;
174 
175 /* Process owning the self-pipe --- needed for checking purposes */
176 static int selfpipe_owner_pid = 0;
177 
178 /* Private function prototypes */
179 static void latch_sigurg_handler(SIGNAL_ARGS);
180 static void sendSelfPipeByte(void);
181 #endif
182 
183 #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
184 static void drain(void);
185 #endif
186 
187 #if defined(WAIT_USE_EPOLL)
188 static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
189 #elif defined(WAIT_USE_KQUEUE)
190 static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
191 #elif defined(WAIT_USE_POLL)
192 static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
193 #elif defined(WAIT_USE_WIN32)
194 static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
195 #endif
196 
197 static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
198  WaitEvent *occurred_events, int nevents);
199 
200 /*
201  * Initialize the process-local latch infrastructure.
202  *
203  * This must be called once during startup of any process that can wait on
204  * latches, before it issues any InitLatch() or OwnLatch() calls.
205  */
206 void
208 {
209 #if defined(WAIT_USE_SELF_PIPE)
210  int pipefd[2];
211 
212  if (IsUnderPostmaster)
213  {
214  /*
215  * We might have inherited connections to a self-pipe created by the
216  * postmaster. It's critical that child processes create their own
217  * self-pipes, of course, and we really want them to close the
218  * inherited FDs for safety's sake.
219  */
220  if (selfpipe_owner_pid != 0)
221  {
222  /* Assert we go through here but once in a child process */
224  /* Release postmaster's pipe FDs; ignore any error */
225  (void) close(selfpipe_readfd);
226  (void) close(selfpipe_writefd);
227  /* Clean up, just for safety's sake; we'll set these below */
229  selfpipe_owner_pid = 0;
230  /* Keep fd.c's accounting straight */
233  }
234  else
235  {
236  /*
237  * Postmaster didn't create a self-pipe ... or else we're in an
238  * EXEC_BACKEND build, in which case it doesn't matter since the
239  * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
240  * fd.c won't have state to clean up, either.
241  */
242  Assert(selfpipe_readfd == -1);
243  }
244  }
245  else
246  {
247  /* In postmaster or standalone backend, assert we do this but once */
248  Assert(selfpipe_readfd == -1);
250  }
251 
252  /*
253  * Set up the self-pipe that allows a signal handler to wake up the
254  * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
255  * that SetLatch won't block if the event has already been set many times
256  * filling the kernel buffer. Make the read-end non-blocking too, so that
257  * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
258  * Also, make both FDs close-on-exec, since we surely do not want any
259  * child processes messing with them.
260  */
261  if (pipe(pipefd) < 0)
262  elog(FATAL, "pipe() failed: %m");
263  if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
264  elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
265  if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
266  elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
267  if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
268  elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
269  if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
270  elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
271 
272  selfpipe_readfd = pipefd[0];
273  selfpipe_writefd = pipefd[1];
275 
276  /* Tell fd.c about these two long-lived FDs */
279 
281 #endif
282 
283 #ifdef WAIT_USE_SIGNALFD
284  sigset_t signalfd_mask;
285 
286  if (IsUnderPostmaster)
287  {
288  /*
289  * It would probably be safe to re-use the inherited signalfd since
290  * signalfds only see the current process's pending signals, but it
291  * seems less surprising to close it and create our own.
292  */
293  if (signal_fd != -1)
294  {
295  /* Release postmaster's signal FD; ignore any error */
296  (void) close(signal_fd);
297  signal_fd = -1;
299  }
300  }
301 
302  /* Block SIGURG, because we'll receive it through a signalfd. */
303  sigaddset(&UnBlockSig, SIGURG);
304 
305  /* Set up the signalfd to receive SIGURG notifications. */
306  sigemptyset(&signalfd_mask);
307  sigaddset(&signalfd_mask, SIGURG);
308  signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
309  if (signal_fd < 0)
310  elog(FATAL, "signalfd() failed");
312 #endif
313 
314 #ifdef WAIT_USE_KQUEUE
315  /* Ignore SIGURG, because we'll receive it via kqueue. */
316  pqsignal(SIGURG, SIG_IGN);
317 #endif
318 }
319 
320 void
322 {
323  int latch_pos PG_USED_FOR_ASSERTS_ONLY;
324 
325  Assert(LatchWaitSet == NULL);
326 
327  /* Set up the WaitEventSet used by WaitLatch(). */
330  MyLatch, NULL);
331  if (IsUnderPostmaster)
333  PGINVALID_SOCKET, NULL, NULL);
334 
335  Assert(latch_pos == LatchWaitSetLatchPos);
336 }
337 
338 void
340 {
341 #if defined(WAIT_USE_POLL)
342  pqsignal(SIGURG, SIG_IGN);
343 #endif
344 
345  if (LatchWaitSet)
346  {
348  LatchWaitSet = NULL;
349  }
350 
351 #if defined(WAIT_USE_SELF_PIPE)
354  selfpipe_readfd = -1;
355  selfpipe_writefd = -1;
357 #endif
358 
359 #if defined(WAIT_USE_SIGNALFD)
360  close(signal_fd);
361  signal_fd = -1;
362 #endif
363 }
364 
365 /*
366  * Initialize a process-local latch.
367  */
368 void
370 {
371  latch->is_set = false;
372  latch->maybe_sleeping = false;
373  latch->owner_pid = MyProcPid;
374  latch->is_shared = false;
375 
376 #if defined(WAIT_USE_SELF_PIPE)
377  /* Assert InitializeLatchSupport has been called in this process */
379 #elif defined(WAIT_USE_SIGNALFD)
380  /* Assert InitializeLatchSupport has been called in this process */
381  Assert(signal_fd >= 0);
382 #elif defined(WAIT_USE_WIN32)
383  latch->event = CreateEvent(NULL, TRUE, FALSE, NULL);
384  if (latch->event == NULL)
385  elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
386 #endif /* WIN32 */
387 }
388 
389 /*
390  * Initialize a shared latch that can be set from other processes. The latch
391  * is initially owned by no-one; use OwnLatch to associate it with the
392  * current process.
393  *
394  * InitSharedLatch needs to be called in postmaster before forking child
395  * processes, usually right after allocating the shared memory block
396  * containing the latch with ShmemInitStruct. (The Unix implementation
397  * doesn't actually require that, but the Windows one does.) Because of
398  * this restriction, we have no concurrency issues to worry about here.
399  *
400  * Note that other handles created in this module are never marked as
401  * inheritable. Thus we do not need to worry about cleaning up child
402  * process references to postmaster-private latches or WaitEventSets.
403  */
404 void
406 {
407 #ifdef WIN32
408  SECURITY_ATTRIBUTES sa;
409 
410  /*
411  * Set up security attributes to specify that the events are inherited.
412  */
413  ZeroMemory(&sa, sizeof(sa));
414  sa.nLength = sizeof(sa);
415  sa.bInheritHandle = TRUE;
416 
417  latch->event = CreateEvent(&sa, TRUE, FALSE, NULL);
418  if (latch->event == NULL)
419  elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
420 #endif
421 
422  latch->is_set = false;
423  latch->maybe_sleeping = false;
424  latch->owner_pid = 0;
425  latch->is_shared = true;
426 }
427 
428 /*
429  * Associate a shared latch with the current process, allowing it to
430  * wait on the latch.
431  *
432  * Although there is a sanity check for latch-already-owned, we don't do
433  * any sort of locking here, meaning that we could fail to detect the error
434  * if two processes try to own the same latch at about the same time. If
435  * there is any risk of that, caller must provide an interlock to prevent it.
436  */
437 void
439 {
440  int owner_pid;
441 
442  /* Sanity checks */
443  Assert(latch->is_shared);
444 
445 #if defined(WAIT_USE_SELF_PIPE)
446  /* Assert InitializeLatchSupport has been called in this process */
448 #elif defined(WAIT_USE_SIGNALFD)
449  /* Assert InitializeLatchSupport has been called in this process */
450  Assert(signal_fd >= 0);
451 #endif
452 
453  owner_pid = latch->owner_pid;
454  if (owner_pid != 0)
455  elog(PANIC, "latch already owned by PID %d", owner_pid);
456 
457  latch->owner_pid = MyProcPid;
458 }
459 
460 /*
461  * Disown a shared latch currently owned by the current process.
462  */
463 void
465 {
466  Assert(latch->is_shared);
467  Assert(latch->owner_pid == MyProcPid);
468 
469  latch->owner_pid = 0;
470 }
471 
472 /*
473  * Wait for a given latch to be set, or for postmaster death, or until timeout
474  * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
475  * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
476  * function returns immediately.
477  *
478  * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
479  * is given. Although it is declared as "long", we don't actually support
480  * timeouts longer than INT_MAX milliseconds. Note that some extra overhead
481  * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
482  *
483  * The latch must be owned by the current process, ie. it must be a
484  * process-local latch initialized with InitLatch, or a shared latch
485  * associated with the current process by calling OwnLatch.
486  *
487  * Returns bit mask indicating which condition(s) caused the wake-up. Note
488  * that if multiple wake-up conditions are true, there is no guarantee that
489  * we return all of them in one call, but we will return at least one.
490  */
491 int
492 WaitLatch(Latch *latch, int wakeEvents, long timeout,
493  uint32 wait_event_info)
494 {
495  WaitEvent event;
496 
497  /* Postmaster-managed callers must handle postmaster death somehow. */
499  (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
500  (wakeEvents & WL_POSTMASTER_DEATH));
501 
502  /*
503  * Some callers may have a latch other than MyLatch, or no latch at all,
504  * or want to handle postmaster death differently. It's cheap to assign
505  * those, so just do it every time.
506  */
507  if (!(wakeEvents & WL_LATCH_SET))
508  latch = NULL;
511  ((wakeEvents & WL_EXIT_ON_PM_DEATH) != 0);
512 
514  (wakeEvents & WL_TIMEOUT) ? timeout : -1,
515  &event, 1,
516  wait_event_info) == 0)
517  return WL_TIMEOUT;
518  else
519  return event.events;
520 }
521 
522 /*
523  * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
524  * conditions.
525  *
526  * When waiting on a socket, EOF and error conditions always cause the socket
527  * to be reported as readable/writable/connected, so that the caller can deal
528  * with the condition.
529  *
530  * wakeEvents must include either WL_EXIT_ON_PM_DEATH for automatic exit
531  * if the postmaster dies or WL_POSTMASTER_DEATH for a flag set in the
532  * return value if the postmaster dies. The latter is useful for rare cases
533  * where some behavior other than immediate exit is needed.
534  *
535  * NB: These days this is just a wrapper around the WaitEventSet API. When
536  * using a latch very frequently, consider creating a longer living
537  * WaitEventSet instead; that's more efficient.
538  */
539 int
540 WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
541  long timeout, uint32 wait_event_info)
542 {
543  int ret = 0;
544  int rc;
545  WaitEvent event;
547 
548  if (wakeEvents & WL_TIMEOUT)
549  Assert(timeout >= 0);
550  else
551  timeout = -1;
552 
553  if (wakeEvents & WL_LATCH_SET)
555  latch, NULL);
556 
557  /* Postmaster-managed callers must handle postmaster death somehow. */
559  (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
560  (wakeEvents & WL_POSTMASTER_DEATH));
561 
562  if ((wakeEvents & WL_POSTMASTER_DEATH) && IsUnderPostmaster)
564  NULL, NULL);
565 
566  if ((wakeEvents & WL_EXIT_ON_PM_DEATH) && IsUnderPostmaster)
568  NULL, NULL);
569 
570  if (wakeEvents & WL_SOCKET_MASK)
571  {
572  int ev;
573 
574  ev = wakeEvents & WL_SOCKET_MASK;
575  AddWaitEventToSet(set, ev, sock, NULL, NULL);
576  }
577 
578  rc = WaitEventSetWait(set, timeout, &event, 1, wait_event_info);
579 
580  if (rc == 0)
581  ret |= WL_TIMEOUT;
582  else
583  {
584  ret |= event.events & (WL_LATCH_SET |
587  }
588 
589  FreeWaitEventSet(set);
590 
591  return ret;
592 }
593 
594 /*
595  * Sets a latch and wakes up anyone waiting on it.
596  *
597  * This is cheap if the latch is already set, otherwise not so much.
598  *
599  * NB: when calling this in a signal handler, be sure to save and restore
600  * errno around it. (That's standard practice in most signal handlers, of
601  * course, but we used to omit it in handlers that only set a flag.)
602  *
603  * NB: this function is called from critical sections and signal handlers so
604  * throwing an error is not a good idea.
605  */
606 void
608 {
609 #ifndef WIN32
610  pid_t owner_pid;
611 #else
612  HANDLE handle;
613 #endif
614 
615  /*
616  * The memory barrier has to be placed here to ensure that any flag
617  * variables possibly changed by this process have been flushed to main
618  * memory, before we check/set is_set.
619  */
621 
622  /* Quick exit if already set */
623  if (latch->is_set)
624  return;
625 
626  latch->is_set = true;
627 
629  if (!latch->maybe_sleeping)
630  return;
631 
632 #ifndef WIN32
633 
634  /*
635  * See if anyone's waiting for the latch. It can be the current process if
636  * we're in a signal handler. We use the self-pipe or SIGURG to ourselves
637  * to wake up WaitEventSetWaitBlock() without races in that case. If it's
638  * another process, send a signal.
639  *
640  * Fetch owner_pid only once, in case the latch is concurrently getting
641  * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
642  * guaranteed to be true! In practice, the effective range of pid_t fits
643  * in a 32 bit integer, and so should be atomic. In the worst case, we
644  * might end up signaling the wrong process. Even then, you're very
645  * unlucky if a process with that bogus pid exists and belongs to
646  * Postgres; and PG database processes should handle excess SIGUSR1
647  * interrupts without a problem anyhow.
648  *
649  * Another sort of race condition that's possible here is for a new
650  * process to own the latch immediately after we look, so we don't signal
651  * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
652  * the standard coding convention of waiting at the bottom of their loops,
653  * not the top, so that they'll correctly process latch-setting events
654  * that happen before they enter the loop.
655  */
656  owner_pid = latch->owner_pid;
657  if (owner_pid == 0)
658  return;
659  else if (owner_pid == MyProcPid)
660  {
661 #if defined(WAIT_USE_SELF_PIPE)
662  if (waiting)
664 #else
665  if (waiting)
666  kill(MyProcPid, SIGURG);
667 #endif
668  }
669  else
670  kill(owner_pid, SIGURG);
671 
672 #else
673 
674  /*
675  * See if anyone's waiting for the latch. It can be the current process if
676  * we're in a signal handler.
677  *
678  * Use a local variable here just in case somebody changes the event field
679  * concurrently (which really should not happen).
680  */
681  handle = latch->event;
682  if (handle)
683  {
684  SetEvent(handle);
685 
686  /*
687  * Note that we silently ignore any errors. We might be in a signal
688  * handler or other critical path where it's not safe to call elog().
689  */
690  }
691 #endif
692 }
693 
694 /*
695  * Clear the latch. Calling WaitLatch after this will sleep, unless
696  * the latch is set again before the WaitLatch call.
697  */
698 void
700 {
701  /* Only the owner should reset the latch */
702  Assert(latch->owner_pid == MyProcPid);
703  Assert(latch->maybe_sleeping == false);
704 
705  latch->is_set = false;
706 
707  /*
708  * Ensure that the write to is_set gets flushed to main memory before we
709  * examine any flag variables. Otherwise a concurrent SetLatch might
710  * falsely conclude that it needn't signal us, even though we have missed
711  * seeing some flag updates that SetLatch was supposed to inform us of.
712  */
714 }
715 
716 /*
717  * Create a WaitEventSet with space for nevents different events to wait for.
718  *
719  * These events can then be efficiently waited upon together, using
720  * WaitEventSetWait().
721  */
722 WaitEventSet *
723 CreateWaitEventSet(MemoryContext context, int nevents)
724 {
725  WaitEventSet *set;
726  char *data;
727  Size sz = 0;
728 
729  /*
730  * Use MAXALIGN size/alignment to guarantee that later uses of memory are
731  * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
732  * platforms, but earlier allocations like WaitEventSet and WaitEvent
733  * might not be sized to guarantee that when purely using sizeof().
734  */
735  sz += MAXALIGN(sizeof(WaitEventSet));
736  sz += MAXALIGN(sizeof(WaitEvent) * nevents);
737 
738 #if defined(WAIT_USE_EPOLL)
739  sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
740 #elif defined(WAIT_USE_KQUEUE)
741  sz += MAXALIGN(sizeof(struct kevent) * nevents);
742 #elif defined(WAIT_USE_POLL)
743  sz += MAXALIGN(sizeof(struct pollfd) * nevents);
744 #elif defined(WAIT_USE_WIN32)
745  /* need space for the pgwin32_signal_event */
746  sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
747 #endif
748 
749  data = (char *) MemoryContextAllocZero(context, sz);
750 
751  set = (WaitEventSet *) data;
752  data += MAXALIGN(sizeof(WaitEventSet));
753 
754  set->events = (WaitEvent *) data;
755  data += MAXALIGN(sizeof(WaitEvent) * nevents);
756 
757 #if defined(WAIT_USE_EPOLL)
758  set->epoll_ret_events = (struct epoll_event *) data;
759  data += MAXALIGN(sizeof(struct epoll_event) * nevents);
760 #elif defined(WAIT_USE_KQUEUE)
761  set->kqueue_ret_events = (struct kevent *) data;
762  data += MAXALIGN(sizeof(struct kevent) * nevents);
763 #elif defined(WAIT_USE_POLL)
764  set->pollfds = (struct pollfd *) data;
765  data += MAXALIGN(sizeof(struct pollfd) * nevents);
766 #elif defined(WAIT_USE_WIN32)
767  set->handles = (HANDLE) data;
768  data += MAXALIGN(sizeof(HANDLE) * nevents);
769 #endif
770 
771  set->latch = NULL;
772  set->nevents_space = nevents;
773  set->exit_on_postmaster_death = false;
774 
775 #if defined(WAIT_USE_EPOLL)
776  if (!AcquireExternalFD())
777  {
778  /* treat this as though epoll_create1 itself returned EMFILE */
779  elog(ERROR, "epoll_create1 failed: %m");
780  }
781  set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
782  if (set->epoll_fd < 0)
783  {
785  elog(ERROR, "epoll_create1 failed: %m");
786  }
787 #elif defined(WAIT_USE_KQUEUE)
788  if (!AcquireExternalFD())
789  {
790  /* treat this as though kqueue itself returned EMFILE */
791  elog(ERROR, "kqueue failed: %m");
792  }
793  set->kqueue_fd = kqueue();
794  if (set->kqueue_fd < 0)
795  {
797  elog(ERROR, "kqueue failed: %m");
798  }
799  if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
800  {
801  int save_errno = errno;
802 
803  close(set->kqueue_fd);
805  errno = save_errno;
806  elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
807  }
808  set->report_postmaster_not_running = false;
809 #elif defined(WAIT_USE_WIN32)
810 
811  /*
812  * To handle signals while waiting, we need to add a win32 specific event.
813  * We accounted for the additional event at the top of this routine. See
814  * port/win32/signal.c for more details.
815  *
816  * Note: pgwin32_signal_event should be first to ensure that it will be
817  * reported when multiple events are set. We want to guarantee that
818  * pending signals are serviced.
819  */
820  set->handles[0] = pgwin32_signal_event;
821  StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
822 #endif
823 
824  return set;
825 }
826 
827 /*
828  * Free a previously created WaitEventSet.
829  *
830  * Note: preferably, this shouldn't have to free any resources that could be
831  * inherited across an exec(). If it did, we'd likely leak those resources in
832  * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
833  * when the FD is created. For the Windows case, we assume that the handles
834  * involved are non-inheritable.
835  */
836 void
838 {
839 #if defined(WAIT_USE_EPOLL)
840  close(set->epoll_fd);
842 #elif defined(WAIT_USE_KQUEUE)
843  close(set->kqueue_fd);
845 #elif defined(WAIT_USE_WIN32)
846  WaitEvent *cur_event;
847 
848  for (cur_event = set->events;
849  cur_event < (set->events + set->nevents);
850  cur_event++)
851  {
852  if (cur_event->events & WL_LATCH_SET)
853  {
854  /* uses the latch's HANDLE */
855  }
856  else if (cur_event->events & WL_POSTMASTER_DEATH)
857  {
858  /* uses PostmasterHandle */
859  }
860  else
861  {
862  /* Clean up the event object we created for the socket */
863  WSAEventSelect(cur_event->fd, NULL, 0);
864  WSACloseEvent(set->handles[cur_event->pos + 1]);
865  }
866  }
867 #endif
868 
869  pfree(set);
870 }
871 
872 /*
873  * Free a previously created WaitEventSet in a child process after a fork().
874  */
875 void
877 {
878 #if defined(WAIT_USE_EPOLL)
879  close(set->epoll_fd);
881 #elif defined(WAIT_USE_KQUEUE)
882  /* kqueues are not normally inherited by child processes */
884 #endif
885 
886  pfree(set);
887 }
888 
889 /* ---
890  * Add an event to the set. Possible events are:
891  * - WL_LATCH_SET: Wait for the latch to be set
892  * - WL_POSTMASTER_DEATH: Wait for postmaster to die
893  * - WL_SOCKET_READABLE: Wait for socket to become readable,
894  * can be combined in one event with other WL_SOCKET_* events
895  * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
896  * can be combined with other WL_SOCKET_* events
897  * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
898  * can be combined with other WL_SOCKET_* events (on non-Windows
899  * platforms, this is the same as WL_SOCKET_WRITEABLE)
900  * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
901  * can be combined with other WL_SOCKET_* events (on non-Windows
902  * platforms, this is the same as WL_SOCKET_READABLE)
903  * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
904  * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
905  *
906  * Returns the offset in WaitEventSet->events (starting from 0), which can be
907  * used to modify previously added wait events using ModifyWaitEvent().
908  *
909  * In the WL_LATCH_SET case the latch must be owned by the current process,
910  * i.e. it must be a process-local latch initialized with InitLatch, or a
911  * shared latch associated with the current process by calling OwnLatch.
912  *
913  * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
914  * conditions cause the socket to be reported as readable/writable/connected,
915  * so that the caller can deal with the condition.
916  *
917  * The user_data pointer specified here will be set for the events returned
918  * by WaitEventSetWait(), allowing to easily associate additional data with
919  * events.
920  */
921 int
923  void *user_data)
924 {
925  WaitEvent *event;
926 
927  /* not enough space */
928  Assert(set->nevents < set->nevents_space);
929 
930  if (events == WL_EXIT_ON_PM_DEATH)
931  {
932  events = WL_POSTMASTER_DEATH;
933  set->exit_on_postmaster_death = true;
934  }
935 
936  if (latch)
937  {
938  if (latch->owner_pid != MyProcPid)
939  elog(ERROR, "cannot wait on a latch owned by another process");
940  if (set->latch)
941  elog(ERROR, "cannot wait on more than one latch");
942  if ((events & WL_LATCH_SET) != WL_LATCH_SET)
943  elog(ERROR, "latch events only support being set");
944  }
945  else
946  {
947  if (events & WL_LATCH_SET)
948  elog(ERROR, "cannot wait on latch without a specified latch");
949  }
950 
951  /* waiting for socket readiness without a socket indicates a bug */
952  if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
953  elog(ERROR, "cannot wait on socket event without a socket");
954 
955  event = &set->events[set->nevents];
956  event->pos = set->nevents++;
957  event->fd = fd;
958  event->events = events;
959  event->user_data = user_data;
960 #ifdef WIN32
961  event->reset = false;
962 #endif
963 
964  if (events == WL_LATCH_SET)
965  {
966  set->latch = latch;
967  set->latch_pos = event->pos;
968 #if defined(WAIT_USE_SELF_PIPE)
969  event->fd = selfpipe_readfd;
970 #elif defined(WAIT_USE_SIGNALFD)
971  event->fd = signal_fd;
972 #else
973  event->fd = PGINVALID_SOCKET;
974 #ifdef WAIT_USE_EPOLL
975  return event->pos;
976 #endif
977 #endif
978  }
979  else if (events == WL_POSTMASTER_DEATH)
980  {
981 #ifndef WIN32
983 #endif
984  }
985 
986  /* perform wait primitive specific initialization, if needed */
987 #if defined(WAIT_USE_EPOLL)
988  WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
989 #elif defined(WAIT_USE_KQUEUE)
990  WaitEventAdjustKqueue(set, event, 0);
991 #elif defined(WAIT_USE_POLL)
992  WaitEventAdjustPoll(set, event);
993 #elif defined(WAIT_USE_WIN32)
994  WaitEventAdjustWin32(set, event);
995 #endif
996 
997  return event->pos;
998 }
999 
1000 /*
1001  * Change the event mask and, in the WL_LATCH_SET case, the latch associated
1002  * with the WaitEvent. The latch may be changed to NULL to disable the latch
1003  * temporarily, and then set back to a latch later.
1004  *
1005  * 'pos' is the id returned by AddWaitEventToSet.
1006  */
1007 void
1008 ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
1009 {
1010  WaitEvent *event;
1011 #if defined(WAIT_USE_KQUEUE)
1012  int old_events;
1013 #endif
1014 
1015  Assert(pos < set->nevents);
1016 
1017  event = &set->events[pos];
1018 #if defined(WAIT_USE_KQUEUE)
1019  old_events = event->events;
1020 #endif
1021 
1022  /*
1023  * If neither the event mask nor the associated latch changes, return
1024  * early. That's an important optimization for some sockets, where
1025  * ModifyWaitEvent is frequently used to switch from waiting for reads to
1026  * waiting on writes.
1027  */
1028  if (events == event->events &&
1029  (!(event->events & WL_LATCH_SET) || set->latch == latch))
1030  return;
1031 
1032  if (event->events & WL_LATCH_SET &&
1033  events != event->events)
1034  {
1035  elog(ERROR, "cannot modify latch event");
1036  }
1037 
1038  if (event->events & WL_POSTMASTER_DEATH)
1039  {
1040  elog(ERROR, "cannot modify postmaster death event");
1041  }
1042 
1043  /* FIXME: validate event mask */
1044  event->events = events;
1045 
1046  if (events == WL_LATCH_SET)
1047  {
1048  if (latch && latch->owner_pid != MyProcPid)
1049  elog(ERROR, "cannot wait on a latch owned by another process");
1050  set->latch = latch;
1051 
1052  /*
1053  * On Unix, we don't need to modify the kernel object because the
1054  * underlying pipe (if there is one) is the same for all latches so we
1055  * can return immediately. On Windows, we need to update our array of
1056  * handles, but we leave the old one in place and tolerate spurious
1057  * wakeups if the latch is disabled.
1058  */
1059 #if defined(WAIT_USE_WIN32)
1060  if (!latch)
1061  return;
1062 #else
1063  return;
1064 #endif
1065  }
1066 
1067 #if defined(WAIT_USE_EPOLL)
1068  WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
1069 #elif defined(WAIT_USE_KQUEUE)
1070  WaitEventAdjustKqueue(set, event, old_events);
1071 #elif defined(WAIT_USE_POLL)
1072  WaitEventAdjustPoll(set, event);
1073 #elif defined(WAIT_USE_WIN32)
1074  WaitEventAdjustWin32(set, event);
1075 #endif
1076 }
1077 
1078 #if defined(WAIT_USE_EPOLL)
1079 /*
1080  * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
1081  */
1082 static void
1083 WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
1084 {
1085  struct epoll_event epoll_ev;
1086  int rc;
1087 
1088  /* pointer to our event, returned by epoll_wait */
1089  epoll_ev.data.ptr = event;
1090  /* always wait for errors */
1091  epoll_ev.events = EPOLLERR | EPOLLHUP;
1092 
1093  /* prepare pollfd entry once */
1094  if (event->events == WL_LATCH_SET)
1095  {
1096  Assert(set->latch != NULL);
1097  epoll_ev.events |= EPOLLIN;
1098  }
1099  else if (event->events == WL_POSTMASTER_DEATH)
1100  {
1101  epoll_ev.events |= EPOLLIN;
1102  }
1103  else
1104  {
1105  Assert(event->fd != PGINVALID_SOCKET);
1106  Assert(event->events & (WL_SOCKET_READABLE |
1108  WL_SOCKET_CLOSED));
1109 
1110  if (event->events & WL_SOCKET_READABLE)
1111  epoll_ev.events |= EPOLLIN;
1112  if (event->events & WL_SOCKET_WRITEABLE)
1113  epoll_ev.events |= EPOLLOUT;
1114  if (event->events & WL_SOCKET_CLOSED)
1115  epoll_ev.events |= EPOLLRDHUP;
1116  }
1117 
1118  /*
1119  * Even though unused, we also pass epoll_ev as the data argument if
1120  * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
1121  * requiring that, and actually it makes the code simpler...
1122  */
1123  rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
1124 
1125  if (rc < 0)
1126  ereport(ERROR,
1128  errmsg("%s() failed: %m",
1129  "epoll_ctl")));
1130 }
1131 #endif
1132 
1133 #if defined(WAIT_USE_POLL)
1134 static void
1136 {
1137  struct pollfd *pollfd = &set->pollfds[event->pos];
1138 
1139  pollfd->revents = 0;
1140  pollfd->fd = event->fd;
1141 
1142  /* prepare pollfd entry once */
1143  if (event->events == WL_LATCH_SET)
1144  {
1145  Assert(set->latch != NULL);
1146  pollfd->events = POLLIN;
1147  }
1148  else if (event->events == WL_POSTMASTER_DEATH)
1149  {
1150  pollfd->events = POLLIN;
1151  }
1152  else
1153  {
1154  Assert(event->events & (WL_SOCKET_READABLE |
1156  WL_SOCKET_CLOSED));
1157  pollfd->events = 0;
1158  if (event->events & WL_SOCKET_READABLE)
1159  pollfd->events |= POLLIN;
1160  if (event->events & WL_SOCKET_WRITEABLE)
1161  pollfd->events |= POLLOUT;
1162 #ifdef POLLRDHUP
1163  if (event->events & WL_SOCKET_CLOSED)
1164  pollfd->events |= POLLRDHUP;
1165 #endif
1166  }
1167 
1168  Assert(event->fd != PGINVALID_SOCKET);
1169 }
1170 #endif
1171 
1172 #if defined(WAIT_USE_KQUEUE)
1173 
1174 /*
1175  * On most BSD family systems, the udata member of struct kevent is of type
1176  * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
1177  * NetBSD has it as intptr_t, so here we wallpaper over that difference with
1178  * an lvalue cast.
1179  */
1180 #define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
1181 
1182 static inline void
1183 WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
1184  WaitEvent *event)
1185 {
1186  k_ev->ident = event->fd;
1187  k_ev->filter = filter;
1188  k_ev->flags = action;
1189  k_ev->fflags = 0;
1190  k_ev->data = 0;
1191  AccessWaitEvent(k_ev) = event;
1192 }
1193 
1194 static inline void
1195 WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
1196 {
1197  /* For now postmaster death can only be added, not removed. */
1198  k_ev->ident = PostmasterPid;
1199  k_ev->filter = EVFILT_PROC;
1200  k_ev->flags = EV_ADD;
1201  k_ev->fflags = NOTE_EXIT;
1202  k_ev->data = 0;
1203  AccessWaitEvent(k_ev) = event;
1204 }
1205 
1206 static inline void
1207 WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
1208 {
1209  /* For now latch can only be added, not removed. */
1210  k_ev->ident = SIGURG;
1211  k_ev->filter = EVFILT_SIGNAL;
1212  k_ev->flags = EV_ADD;
1213  k_ev->fflags = 0;
1214  k_ev->data = 0;
1215  AccessWaitEvent(k_ev) = event;
1216 }
1217 
1218 /*
1219  * old_events is the previous event mask, used to compute what has changed.
1220  */
1221 static void
1222 WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
1223 {
1224  int rc;
1225  struct kevent k_ev[2];
1226  int count = 0;
1227  bool new_filt_read = false;
1228  bool old_filt_read = false;
1229  bool new_filt_write = false;
1230  bool old_filt_write = false;
1231 
1232  if (old_events == event->events)
1233  return;
1234 
1235  Assert(event->events != WL_LATCH_SET || set->latch != NULL);
1236  Assert(event->events == WL_LATCH_SET ||
1237  event->events == WL_POSTMASTER_DEATH ||
1238  (event->events & (WL_SOCKET_READABLE |
1240  WL_SOCKET_CLOSED)));
1241 
1242  if (event->events == WL_POSTMASTER_DEATH)
1243  {
1244  /*
1245  * Unlike all the other implementations, we detect postmaster death
1246  * using process notification instead of waiting on the postmaster
1247  * alive pipe.
1248  */
1249  WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
1250  }
1251  else if (event->events == WL_LATCH_SET)
1252  {
1253  /* We detect latch wakeup using a signal event. */
1254  WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
1255  }
1256  else
1257  {
1258  /*
1259  * We need to compute the adds and deletes required to get from the
1260  * old event mask to the new event mask, since kevent treats readable
1261  * and writable as separate events.
1262  */
1263  if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
1264  old_filt_read = true;
1265  if (event->events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
1266  new_filt_read = true;
1267  if (old_events & WL_SOCKET_WRITEABLE)
1268  old_filt_write = true;
1269  if (event->events & WL_SOCKET_WRITEABLE)
1270  new_filt_write = true;
1271  if (old_filt_read && !new_filt_read)
1272  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
1273  event);
1274  else if (!old_filt_read && new_filt_read)
1275  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
1276  event);
1277  if (old_filt_write && !new_filt_write)
1278  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
1279  event);
1280  else if (!old_filt_write && new_filt_write)
1281  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
1282  event);
1283  }
1284 
1285  /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
1286  if (count == 0)
1287  return;
1288 
1289  Assert(count <= 2);
1290 
1291  rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
1292 
1293  /*
1294  * When adding the postmaster's pid, we have to consider that it might
1295  * already have exited and perhaps even been replaced by another process
1296  * with the same pid. If so, we have to defer reporting this as an event
1297  * until the next call to WaitEventSetWaitBlock().
1298  */
1299 
1300  if (rc < 0)
1301  {
1302  if (event->events == WL_POSTMASTER_DEATH &&
1303  (errno == ESRCH || errno == EACCES))
1304  set->report_postmaster_not_running = true;
1305  else
1306  ereport(ERROR,
1308  errmsg("%s() failed: %m",
1309  "kevent")));
1310  }
1311  else if (event->events == WL_POSTMASTER_DEATH &&
1312  PostmasterPid != getppid() &&
1313  !PostmasterIsAlive())
1314  {
1315  /*
1316  * The extra PostmasterIsAliveInternal() check prevents false alarms
1317  * on systems that give a different value for getppid() while being
1318  * traced by a debugger.
1319  */
1320  set->report_postmaster_not_running = true;
1321  }
1322 }
1323 
1324 #endif
1325 
1326 #if defined(WAIT_USE_WIN32)
1327 static void
1328 WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
1329 {
1330  HANDLE *handle = &set->handles[event->pos + 1];
1331 
1332  if (event->events == WL_LATCH_SET)
1333  {
1334  Assert(set->latch != NULL);
1335  *handle = set->latch->event;
1336  }
1337  else if (event->events == WL_POSTMASTER_DEATH)
1338  {
1339  *handle = PostmasterHandle;
1340  }
1341  else
1342  {
1343  int flags = FD_CLOSE; /* always check for errors/EOF */
1344 
1345  if (event->events & WL_SOCKET_READABLE)
1346  flags |= FD_READ;
1347  if (event->events & WL_SOCKET_WRITEABLE)
1348  flags |= FD_WRITE;
1349  if (event->events & WL_SOCKET_CONNECTED)
1350  flags |= FD_CONNECT;
1351  if (event->events & WL_SOCKET_ACCEPT)
1352  flags |= FD_ACCEPT;
1353 
1354  if (*handle == WSA_INVALID_EVENT)
1355  {
1356  *handle = WSACreateEvent();
1357  if (*handle == WSA_INVALID_EVENT)
1358  elog(ERROR, "failed to create event for socket: error code %d",
1359  WSAGetLastError());
1360  }
1361  if (WSAEventSelect(event->fd, *handle, flags) != 0)
1362  elog(ERROR, "failed to set up event for socket: error code %d",
1363  WSAGetLastError());
1364 
1365  Assert(event->fd != PGINVALID_SOCKET);
1366  }
1367 }
1368 #endif
1369 
1370 /*
1371  * Wait for events added to the set to happen, or until the timeout is
1372  * reached. At most nevents occurred events are returned.
1373  *
1374  * If timeout = -1, block until an event occurs; if 0, check sockets for
1375  * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1376  *
1377  * Returns the number of events occurred, or 0 if the timeout was reached.
1378  *
1379  * Returned events will have the fd, pos, user_data fields set to the
1380  * values associated with the registered event.
1381  */
1382 int
1383 WaitEventSetWait(WaitEventSet *set, long timeout,
1384  WaitEvent *occurred_events, int nevents,
1385  uint32 wait_event_info)
1386 {
1387  int returned_events = 0;
1389  instr_time cur_time;
1390  long cur_timeout = -1;
1391 
1392  Assert(nevents > 0);
1393 
1394  /*
1395  * Initialize timeout if requested. We must record the current time so
1396  * that we can determine the remaining timeout if interrupted.
1397  */
1398  if (timeout >= 0)
1399  {
1401  Assert(timeout >= 0 && timeout <= INT_MAX);
1402  cur_timeout = timeout;
1403  }
1404  else
1406 
1407  pgstat_report_wait_start(wait_event_info);
1408 
1409 #ifndef WIN32
1410  waiting = true;
1411 #else
1412  /* Ensure that signals are serviced even if latch is already set */
1414 #endif
1415  while (returned_events == 0)
1416  {
1417  int rc;
1418 
1419  /*
1420  * Check if the latch is set already. If so, leave the loop
1421  * immediately, avoid blocking again. We don't attempt to report any
1422  * other events that might also be satisfied.
1423  *
1424  * If someone sets the latch between this and the
1425  * WaitEventSetWaitBlock() below, the setter will write a byte to the
1426  * pipe (or signal us and the signal handler will do that), and the
1427  * readiness routine will return immediately.
1428  *
1429  * On unix, If there's a pending byte in the self pipe, we'll notice
1430  * whenever blocking. Only clearing the pipe in that case avoids
1431  * having to drain it every time WaitLatchOrSocket() is used. Should
1432  * the pipe-buffer fill up we're still ok, because the pipe is in
1433  * nonblocking mode. It's unlikely for that to happen, because the
1434  * self pipe isn't filled unless we're blocking (waiting = true), or
1435  * from inside a signal handler in latch_sigurg_handler().
1436  *
1437  * On windows, we'll also notice if there's a pending event for the
1438  * latch when blocking, but there's no danger of anything filling up,
1439  * as "Setting an event that is already set has no effect.".
1440  *
1441  * Note: we assume that the kernel calls involved in latch management
1442  * will provide adequate synchronization on machines with weak memory
1443  * ordering, so that we cannot miss seeing is_set if a notification
1444  * has already been queued.
1445  */
1446  if (set->latch && !set->latch->is_set)
1447  {
1448  /* about to sleep on a latch */
1449  set->latch->maybe_sleeping = true;
1451  /* and recheck */
1452  }
1453 
1454  if (set->latch && set->latch->is_set)
1455  {
1456  occurred_events->fd = PGINVALID_SOCKET;
1457  occurred_events->pos = set->latch_pos;
1458  occurred_events->user_data =
1459  set->events[set->latch_pos].user_data;
1460  occurred_events->events = WL_LATCH_SET;
1461  occurred_events++;
1462  returned_events++;
1463 
1464  /* could have been set above */
1465  set->latch->maybe_sleeping = false;
1466 
1467  break;
1468  }
1469 
1470  /*
1471  * Wait for events using the readiness primitive chosen at the top of
1472  * this file. If -1 is returned, a timeout has occurred, if 0 we have
1473  * to retry, everything >= 1 is the number of returned events.
1474  */
1475  rc = WaitEventSetWaitBlock(set, cur_timeout,
1476  occurred_events, nevents);
1477 
1478  if (set->latch)
1479  {
1480  Assert(set->latch->maybe_sleeping);
1481  set->latch->maybe_sleeping = false;
1482  }
1483 
1484  if (rc == -1)
1485  break; /* timeout occurred */
1486  else
1487  returned_events = rc;
1488 
1489  /* If we're not done, update cur_timeout for next iteration */
1490  if (returned_events == 0 && timeout >= 0)
1491  {
1492  INSTR_TIME_SET_CURRENT(cur_time);
1493  INSTR_TIME_SUBTRACT(cur_time, start_time);
1494  cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
1495  if (cur_timeout <= 0)
1496  break;
1497  }
1498  }
1499 #ifndef WIN32
1500  waiting = false;
1501 #endif
1502 
1504 
1505  return returned_events;
1506 }
1507 
1508 
1509 #if defined(WAIT_USE_EPOLL)
1510 
1511 /*
1512  * Wait using linux's epoll_wait(2).
1513  *
1514  * This is the preferable wait method, as several readiness notifications are
1515  * delivered, without having to iterate through all of set->events. The return
1516  * epoll_event struct contain a pointer to our events, making association
1517  * easy.
1518  */
1519 static inline int
1520 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1521  WaitEvent *occurred_events, int nevents)
1522 {
1523  int returned_events = 0;
1524  int rc;
1525  WaitEvent *cur_event;
1526  struct epoll_event *cur_epoll_event;
1527 
1528  /* Sleep */
1529  rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1530  Min(nevents, set->nevents_space), cur_timeout);
1531 
1532  /* Check return code */
1533  if (rc < 0)
1534  {
1535  /* EINTR is okay, otherwise complain */
1536  if (errno != EINTR)
1537  {
1538  waiting = false;
1539  ereport(ERROR,
1541  errmsg("%s() failed: %m",
1542  "epoll_wait")));
1543  }
1544  return 0;
1545  }
1546  else if (rc == 0)
1547  {
1548  /* timeout exceeded */
1549  return -1;
1550  }
1551 
1552  /*
1553  * At least one event occurred, iterate over the returned epoll events
1554  * until they're either all processed, or we've returned all the events
1555  * the caller desired.
1556  */
1557  for (cur_epoll_event = set->epoll_ret_events;
1558  cur_epoll_event < (set->epoll_ret_events + rc) &&
1559  returned_events < nevents;
1560  cur_epoll_event++)
1561  {
1562  /* epoll's data pointer is set to the associated WaitEvent */
1563  cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1564 
1565  occurred_events->pos = cur_event->pos;
1566  occurred_events->user_data = cur_event->user_data;
1567  occurred_events->events = 0;
1568 
1569  if (cur_event->events == WL_LATCH_SET &&
1570  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1571  {
1572  /* Drain the signalfd. */
1573  drain();
1574 
1575  if (set->latch && set->latch->is_set)
1576  {
1577  occurred_events->fd = PGINVALID_SOCKET;
1578  occurred_events->events = WL_LATCH_SET;
1579  occurred_events++;
1580  returned_events++;
1581  }
1582  }
1583  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1584  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1585  {
1586  /*
1587  * We expect an EPOLLHUP when the remote end is closed, but
1588  * because we don't expect the pipe to become readable or to have
1589  * any errors either, treat those cases as postmaster death, too.
1590  *
1591  * Be paranoid about a spurious event signaling the postmaster as
1592  * being dead. There have been reports about that happening with
1593  * older primitives (select(2) to be specific), and a spurious
1594  * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1595  * cost much.
1596  */
1598  {
1599  if (set->exit_on_postmaster_death)
1600  proc_exit(1);
1601  occurred_events->fd = PGINVALID_SOCKET;
1602  occurred_events->events = WL_POSTMASTER_DEATH;
1603  occurred_events++;
1604  returned_events++;
1605  }
1606  }
1607  else if (cur_event->events & (WL_SOCKET_READABLE |
1610  {
1611  Assert(cur_event->fd != PGINVALID_SOCKET);
1612 
1613  if ((cur_event->events & WL_SOCKET_READABLE) &&
1614  (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1615  {
1616  /* data available in socket, or EOF */
1617  occurred_events->events |= WL_SOCKET_READABLE;
1618  }
1619 
1620  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1621  (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1622  {
1623  /* writable, or EOF */
1624  occurred_events->events |= WL_SOCKET_WRITEABLE;
1625  }
1626 
1627  if ((cur_event->events & WL_SOCKET_CLOSED) &&
1628  (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
1629  {
1630  /* remote peer shut down, or error */
1631  occurred_events->events |= WL_SOCKET_CLOSED;
1632  }
1633 
1634  if (occurred_events->events != 0)
1635  {
1636  occurred_events->fd = cur_event->fd;
1637  occurred_events++;
1638  returned_events++;
1639  }
1640  }
1641  }
1642 
1643  return returned_events;
1644 }
1645 
1646 #elif defined(WAIT_USE_KQUEUE)
1647 
1648 /*
1649  * Wait using kevent(2) on BSD-family systems and macOS.
1650  *
1651  * For now this mirrors the epoll code, but in future it could modify the fd
1652  * set in the same call to kevent as it uses for waiting instead of doing that
1653  * with separate system calls.
1654  */
1655 static int
1656 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1657  WaitEvent *occurred_events, int nevents)
1658 {
1659  int returned_events = 0;
1660  int rc;
1661  WaitEvent *cur_event;
1662  struct kevent *cur_kqueue_event;
1663  struct timespec timeout;
1664  struct timespec *timeout_p;
1665 
1666  if (cur_timeout < 0)
1667  timeout_p = NULL;
1668  else
1669  {
1670  timeout.tv_sec = cur_timeout / 1000;
1671  timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1672  timeout_p = &timeout;
1673  }
1674 
1675  /*
1676  * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1677  * earlier call to WaitEventSetWait().
1678  */
1679  if (unlikely(set->report_postmaster_not_running))
1680  {
1681  if (set->exit_on_postmaster_death)
1682  proc_exit(1);
1683  occurred_events->fd = PGINVALID_SOCKET;
1684  occurred_events->events = WL_POSTMASTER_DEATH;
1685  return 1;
1686  }
1687 
1688  /* Sleep */
1689  rc = kevent(set->kqueue_fd, NULL, 0,
1690  set->kqueue_ret_events,
1691  Min(nevents, set->nevents_space),
1692  timeout_p);
1693 
1694  /* Check return code */
1695  if (rc < 0)
1696  {
1697  /* EINTR is okay, otherwise complain */
1698  if (errno != EINTR)
1699  {
1700  waiting = false;
1701  ereport(ERROR,
1703  errmsg("%s() failed: %m",
1704  "kevent")));
1705  }
1706  return 0;
1707  }
1708  else if (rc == 0)
1709  {
1710  /* timeout exceeded */
1711  return -1;
1712  }
1713 
1714  /*
1715  * At least one event occurred, iterate over the returned kqueue events
1716  * until they're either all processed, or we've returned all the events
1717  * the caller desired.
1718  */
1719  for (cur_kqueue_event = set->kqueue_ret_events;
1720  cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1721  returned_events < nevents;
1722  cur_kqueue_event++)
1723  {
1724  /* kevent's udata points to the associated WaitEvent */
1725  cur_event = AccessWaitEvent(cur_kqueue_event);
1726 
1727  occurred_events->pos = cur_event->pos;
1728  occurred_events->user_data = cur_event->user_data;
1729  occurred_events->events = 0;
1730 
1731  if (cur_event->events == WL_LATCH_SET &&
1732  cur_kqueue_event->filter == EVFILT_SIGNAL)
1733  {
1734  if (set->latch && set->latch->is_set)
1735  {
1736  occurred_events->fd = PGINVALID_SOCKET;
1737  occurred_events->events = WL_LATCH_SET;
1738  occurred_events++;
1739  returned_events++;
1740  }
1741  }
1742  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1743  cur_kqueue_event->filter == EVFILT_PROC &&
1744  (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1745  {
1746  /*
1747  * The kernel will tell this kqueue object only once about the
1748  * exit of the postmaster, so let's remember that for next time so
1749  * that we provide level-triggered semantics.
1750  */
1751  set->report_postmaster_not_running = true;
1752 
1753  if (set->exit_on_postmaster_death)
1754  proc_exit(1);
1755  occurred_events->fd = PGINVALID_SOCKET;
1756  occurred_events->events = WL_POSTMASTER_DEATH;
1757  occurred_events++;
1758  returned_events++;
1759  }
1760  else if (cur_event->events & (WL_SOCKET_READABLE |
1763  {
1764  Assert(cur_event->fd >= 0);
1765 
1766  if ((cur_event->events & WL_SOCKET_READABLE) &&
1767  (cur_kqueue_event->filter == EVFILT_READ))
1768  {
1769  /* readable, or EOF */
1770  occurred_events->events |= WL_SOCKET_READABLE;
1771  }
1772 
1773  if ((cur_event->events & WL_SOCKET_CLOSED) &&
1774  (cur_kqueue_event->filter == EVFILT_READ) &&
1775  (cur_kqueue_event->flags & EV_EOF))
1776  {
1777  /* the remote peer has shut down */
1778  occurred_events->events |= WL_SOCKET_CLOSED;
1779  }
1780 
1781  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1782  (cur_kqueue_event->filter == EVFILT_WRITE))
1783  {
1784  /* writable, or EOF */
1785  occurred_events->events |= WL_SOCKET_WRITEABLE;
1786  }
1787 
1788  if (occurred_events->events != 0)
1789  {
1790  occurred_events->fd = cur_event->fd;
1791  occurred_events++;
1792  returned_events++;
1793  }
1794  }
1795  }
1796 
1797  return returned_events;
1798 }
1799 
1800 #elif defined(WAIT_USE_POLL)
1801 
1802 /*
1803  * Wait using poll(2).
1804  *
1805  * This allows to receive readiness notifications for several events at once,
1806  * but requires iterating through all of set->pollfds.
1807  */
1808 static inline int
1809 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1810  WaitEvent *occurred_events, int nevents)
1811 {
1812  int returned_events = 0;
1813  int rc;
1814  WaitEvent *cur_event;
1815  struct pollfd *cur_pollfd;
1816 
1817  /* Sleep */
1818  rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
1819 
1820  /* Check return code */
1821  if (rc < 0)
1822  {
1823  /* EINTR is okay, otherwise complain */
1824  if (errno != EINTR)
1825  {
1826  waiting = false;
1827  ereport(ERROR,
1829  errmsg("%s() failed: %m",
1830  "poll")));
1831  }
1832  return 0;
1833  }
1834  else if (rc == 0)
1835  {
1836  /* timeout exceeded */
1837  return -1;
1838  }
1839 
1840  for (cur_event = set->events, cur_pollfd = set->pollfds;
1841  cur_event < (set->events + set->nevents) &&
1842  returned_events < nevents;
1843  cur_event++, cur_pollfd++)
1844  {
1845  /* no activity on this FD, skip */
1846  if (cur_pollfd->revents == 0)
1847  continue;
1848 
1849  occurred_events->pos = cur_event->pos;
1850  occurred_events->user_data = cur_event->user_data;
1851  occurred_events->events = 0;
1852 
1853  if (cur_event->events == WL_LATCH_SET &&
1854  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1855  {
1856  /* There's data in the self-pipe, clear it. */
1857  drain();
1858 
1859  if (set->latch && set->latch->is_set)
1860  {
1861  occurred_events->fd = PGINVALID_SOCKET;
1862  occurred_events->events = WL_LATCH_SET;
1863  occurred_events++;
1864  returned_events++;
1865  }
1866  }
1867  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1868  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1869  {
1870  /*
1871  * We expect an POLLHUP when the remote end is closed, but because
1872  * we don't expect the pipe to become readable or to have any
1873  * errors either, treat those cases as postmaster death, too.
1874  *
1875  * Be paranoid about a spurious event signaling the postmaster as
1876  * being dead. There have been reports about that happening with
1877  * older primitives (select(2) to be specific), and a spurious
1878  * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1879  * cost much.
1880  */
1882  {
1883  if (set->exit_on_postmaster_death)
1884  proc_exit(1);
1885  occurred_events->fd = PGINVALID_SOCKET;
1886  occurred_events->events = WL_POSTMASTER_DEATH;
1887  occurred_events++;
1888  returned_events++;
1889  }
1890  }
1891  else if (cur_event->events & (WL_SOCKET_READABLE |
1894  {
1895  int errflags = POLLHUP | POLLERR | POLLNVAL;
1896 
1897  Assert(cur_event->fd >= PGINVALID_SOCKET);
1898 
1899  if ((cur_event->events & WL_SOCKET_READABLE) &&
1900  (cur_pollfd->revents & (POLLIN | errflags)))
1901  {
1902  /* data available in socket, or EOF */
1903  occurred_events->events |= WL_SOCKET_READABLE;
1904  }
1905 
1906  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1907  (cur_pollfd->revents & (POLLOUT | errflags)))
1908  {
1909  /* writeable, or EOF */
1910  occurred_events->events |= WL_SOCKET_WRITEABLE;
1911  }
1912 
1913 #ifdef POLLRDHUP
1914  if ((cur_event->events & WL_SOCKET_CLOSED) &&
1915  (cur_pollfd->revents & (POLLRDHUP | errflags)))
1916  {
1917  /* remote peer closed, or error */
1918  occurred_events->events |= WL_SOCKET_CLOSED;
1919  }
1920 #endif
1921 
1922  if (occurred_events->events != 0)
1923  {
1924  occurred_events->fd = cur_event->fd;
1925  occurred_events++;
1926  returned_events++;
1927  }
1928  }
1929  }
1930  return returned_events;
1931 }
1932 
1933 #elif defined(WAIT_USE_WIN32)
1934 
1935 /*
1936  * Wait using Windows' WaitForMultipleObjects().
1937  *
1938  * Unfortunately this will only ever return a single readiness notification at
1939  * a time. Note that while the official documentation for
1940  * WaitForMultipleObjects is ambiguous about multiple events being "consumed"
1941  * with a single bWaitAll = FALSE call,
1942  * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273 confirms
1943  * that only one event is "consumed".
1944  */
1945 static inline int
1946 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1947  WaitEvent *occurred_events, int nevents)
1948 {
1949  int returned_events = 0;
1950  DWORD rc;
1951  WaitEvent *cur_event;
1952 
1953  /* Reset any wait events that need it */
1954  for (cur_event = set->events;
1955  cur_event < (set->events + set->nevents);
1956  cur_event++)
1957  {
1958  if (cur_event->reset)
1959  {
1960  WaitEventAdjustWin32(set, cur_event);
1961  cur_event->reset = false;
1962  }
1963 
1964  /*
1965  * Windows does not guarantee to log an FD_WRITE network event
1966  * indicating that more data can be sent unless the previous send()
1967  * failed with WSAEWOULDBLOCK. While our caller might well have made
1968  * such a call, we cannot assume that here. Therefore, if waiting for
1969  * write-ready, force the issue by doing a dummy send(). If the dummy
1970  * send() succeeds, assume that the socket is in fact write-ready, and
1971  * return immediately. Also, if it fails with something other than
1972  * WSAEWOULDBLOCK, return a write-ready indication to let our caller
1973  * deal with the error condition.
1974  */
1975  if (cur_event->events & WL_SOCKET_WRITEABLE)
1976  {
1977  char c;
1978  WSABUF buf;
1979  DWORD sent;
1980  int r;
1981 
1982  buf.buf = &c;
1983  buf.len = 0;
1984 
1985  r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
1986  if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
1987  {
1988  occurred_events->pos = cur_event->pos;
1989  occurred_events->user_data = cur_event->user_data;
1990  occurred_events->events = WL_SOCKET_WRITEABLE;
1991  occurred_events->fd = cur_event->fd;
1992  return 1;
1993  }
1994  }
1995  }
1996 
1997  /*
1998  * Sleep.
1999  *
2000  * Need to wait for ->nevents + 1, because signal handle is in [0].
2001  */
2002  rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
2003  cur_timeout);
2004 
2005  /* Check return code */
2006  if (rc == WAIT_FAILED)
2007  elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
2008  GetLastError());
2009  else if (rc == WAIT_TIMEOUT)
2010  {
2011  /* timeout exceeded */
2012  return -1;
2013  }
2014 
2015  if (rc == WAIT_OBJECT_0)
2016  {
2017  /* Service newly-arrived signals */
2019  return 0; /* retry */
2020  }
2021 
2022  /*
2023  * With an offset of one, due to the always present pgwin32_signal_event,
2024  * the handle offset directly corresponds to a wait event.
2025  */
2026  cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
2027 
2028  occurred_events->pos = cur_event->pos;
2029  occurred_events->user_data = cur_event->user_data;
2030  occurred_events->events = 0;
2031 
2032  if (cur_event->events == WL_LATCH_SET)
2033  {
2034  /*
2035  * We cannot use set->latch->event to reset the fired event if we
2036  * aren't waiting on this latch now.
2037  */
2038  if (!ResetEvent(set->handles[cur_event->pos + 1]))
2039  elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
2040 
2041  if (set->latch && set->latch->is_set)
2042  {
2043  occurred_events->fd = PGINVALID_SOCKET;
2044  occurred_events->events = WL_LATCH_SET;
2045  occurred_events++;
2046  returned_events++;
2047  }
2048  }
2049  else if (cur_event->events == WL_POSTMASTER_DEATH)
2050  {
2051  /*
2052  * Postmaster apparently died. Since the consequences of falsely
2053  * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we take
2054  * the trouble to positively verify this with PostmasterIsAlive(),
2055  * even though there is no known reason to think that the event could
2056  * be falsely set on Windows.
2057  */
2059  {
2060  if (set->exit_on_postmaster_death)
2061  proc_exit(1);
2062  occurred_events->fd = PGINVALID_SOCKET;
2063  occurred_events->events = WL_POSTMASTER_DEATH;
2064  occurred_events++;
2065  returned_events++;
2066  }
2067  }
2068  else if (cur_event->events & WL_SOCKET_MASK)
2069  {
2070  WSANETWORKEVENTS resEvents;
2071  HANDLE handle = set->handles[cur_event->pos + 1];
2072 
2073  Assert(cur_event->fd);
2074 
2075  occurred_events->fd = cur_event->fd;
2076 
2077  ZeroMemory(&resEvents, sizeof(resEvents));
2078  if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
2079  elog(ERROR, "failed to enumerate network events: error code %d",
2080  WSAGetLastError());
2081  if ((cur_event->events & WL_SOCKET_READABLE) &&
2082  (resEvents.lNetworkEvents & FD_READ))
2083  {
2084  /* data available in socket */
2085  occurred_events->events |= WL_SOCKET_READABLE;
2086 
2087  /*------
2088  * WaitForMultipleObjects doesn't guarantee that a read event will
2089  * be returned if the latch is set at the same time. Even if it
2090  * did, the caller might drop that event expecting it to reoccur
2091  * on next call. So, we must force the event to be reset if this
2092  * WaitEventSet is used again in order to avoid an indefinite
2093  * hang. Refer https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
2094  * for the behavior of socket events.
2095  *------
2096  */
2097  cur_event->reset = true;
2098  }
2099  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
2100  (resEvents.lNetworkEvents & FD_WRITE))
2101  {
2102  /* writeable */
2103  occurred_events->events |= WL_SOCKET_WRITEABLE;
2104  }
2105  if ((cur_event->events & WL_SOCKET_CONNECTED) &&
2106  (resEvents.lNetworkEvents & FD_CONNECT))
2107  {
2108  /* connected */
2109  occurred_events->events |= WL_SOCKET_CONNECTED;
2110  }
2111  if ((cur_event->events & WL_SOCKET_ACCEPT) &&
2112  (resEvents.lNetworkEvents & FD_ACCEPT))
2113  {
2114  /* incoming connection could be accepted */
2115  occurred_events->events |= WL_SOCKET_ACCEPT;
2116  }
2117  if (resEvents.lNetworkEvents & FD_CLOSE)
2118  {
2119  /* EOF/error, so signal all caller-requested socket flags */
2120  occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
2121  }
2122 
2123  if (occurred_events->events != 0)
2124  {
2125  occurred_events++;
2126  returned_events++;
2127  }
2128  }
2129 
2130  return returned_events;
2131 }
2132 #endif
2133 
2134 /*
2135  * Return whether the current build options can report WL_SOCKET_CLOSED.
2136  */
2137 bool
2139 {
2140 #if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
2141  defined(WAIT_USE_EPOLL) || \
2142  defined(WAIT_USE_KQUEUE)
2143  return true;
2144 #else
2145  return false;
2146 #endif
2147 }
2148 
2149 /*
2150  * Get the number of wait events registered in a given WaitEventSet.
2151  */
2152 int
2154 {
2155  return set->nevents;
2156 }
2157 
2158 #if defined(WAIT_USE_SELF_PIPE)
2159 
2160 /*
2161  * SetLatch uses SIGURG to wake up the process waiting on the latch.
2162  *
2163  * Wake up WaitLatch, if we're waiting.
2164  */
2165 static void
2167 {
2168  int save_errno = errno;
2169 
2170  if (waiting)
2171  sendSelfPipeByte();
2172 
2173  errno = save_errno;
2174 }
2175 
2176 /* Send one byte to the self-pipe, to wake up WaitLatch */
2177 static void
2179 {
2180  int rc;
2181  char dummy = 0;
2182 
2183 retry:
2184  rc = write(selfpipe_writefd, &dummy, 1);
2185  if (rc < 0)
2186  {
2187  /* If interrupted by signal, just retry */
2188  if (errno == EINTR)
2189  goto retry;
2190 
2191  /*
2192  * If the pipe is full, we don't need to retry, the data that's there
2193  * already is enough to wake up WaitLatch.
2194  */
2195  if (errno == EAGAIN || errno == EWOULDBLOCK)
2196  return;
2197 
2198  /*
2199  * Oops, the write() failed for some other reason. We might be in a
2200  * signal handler, so it's not safe to elog(). We have no choice but
2201  * silently ignore the error.
2202  */
2203  return;
2204  }
2205 }
2206 
2207 #endif
2208 
2209 #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
2210 
2211 /*
2212  * Read all available data from self-pipe or signalfd.
2213  *
2214  * Note: this is only called when waiting = true. If it fails and doesn't
2215  * return, it must reset that flag first (though ideally, this will never
2216  * happen).
2217  */
2218 static void
2219 drain(void)
2220 {
2221  char buf[1024];
2222  int rc;
2223  int fd;
2224 
2225 #ifdef WAIT_USE_SELF_PIPE
2226  fd = selfpipe_readfd;
2227 #else
2228  fd = signal_fd;
2229 #endif
2230 
2231  for (;;)
2232  {
2233  rc = read(fd, buf, sizeof(buf));
2234  if (rc < 0)
2235  {
2236  if (errno == EAGAIN || errno == EWOULDBLOCK)
2237  break; /* the descriptor is empty */
2238  else if (errno == EINTR)
2239  continue; /* retry */
2240  else
2241  {
2242  waiting = false;
2243 #ifdef WAIT_USE_SELF_PIPE
2244  elog(ERROR, "read() on self-pipe failed: %m");
2245 #else
2246  elog(ERROR, "read() on signalfd failed: %m");
2247 #endif
2248  }
2249  }
2250  else if (rc == 0)
2251  {
2252  waiting = false;
2253 #ifdef WAIT_USE_SELF_PIPE
2254  elog(ERROR, "unexpected EOF on self-pipe");
2255 #else
2256  elog(ERROR, "unexpected EOF on signalfd");
2257 #endif
2258  }
2259  else if (rc < sizeof(buf))
2260  {
2261  /* we successfully drained the pipe; no need to read() again */
2262  break;
2263  }
2264  /* else buffer wasn't big enough, so read again */
2265  }
2266 }
2267 
2268 #endif
#define pg_memory_barrier()
Definition: atomics.h:140
sigset_t UnBlockSig
Definition: pqsignal.c:22
unsigned int uint32
Definition: c.h:490
#define Min(x, y)
Definition: c.h:988
#define MAXALIGN(LEN)
Definition: c.h:795
#define SIGNAL_ARGS
Definition: c.h:1332
#define unlikely(x)
Definition: c.h:295
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:922
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:166
size_t Size
Definition: c.h:589
int errcode_for_socket_access(void)
Definition: elog.c:952
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define FATAL
Definition: elog.h:41
#define PANIC
Definition: elog.h:42
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void ReleaseExternalFD(void)
Definition: fd.c:1145
bool AcquireExternalFD(void)
Definition: fd.c:1092
void ReserveExternalFD(void)
Definition: fd.c:1127
pid_t PostmasterPid
Definition: globals.c:99
int MyProcPid
Definition: globals.c:44
bool IsUnderPostmaster
Definition: globals.c:113
struct Latch * MyLatch
Definition: globals.c:58
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MILLISEC(t)
Definition: instr_time.h:191
#define INSTR_TIME_SET_ZERO(t)
Definition: instr_time.h:172
#define close(a)
Definition: win32.h:12
#define write(a, b, c)
Definition: win32.h:14
#define read(a, b, c)
Definition: win32.h:13
void proc_exit(int code)
Definition: ipc.c:104
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
static void latch_sigurg_handler(SIGNAL_ARGS)
Definition: latch.c:2166
static void sendSelfPipeByte(void)
Definition: latch.c:2178
void InitializeLatchWaitSet(void)
Definition: latch.c:321
int WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock, long timeout, uint32 wait_event_info)
Definition: latch.c:540
#define LatchWaitSetLatchPos
Definition: latch.c:158
static int selfpipe_readfd
Definition: latch.c:172
void OwnLatch(Latch *latch)
Definition: latch.c:438
void DisownLatch(Latch *latch)
Definition: latch.c:464
void FreeWaitEventSetAfterFork(WaitEventSet *set)
Definition: latch.c:876
static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
Definition: latch.c:1135
static int selfpipe_owner_pid
Definition: latch.c:176
static int selfpipe_writefd
Definition: latch.c:173
int GetNumRegisteredWaitEvents(WaitEventSet *set)
Definition: latch.c:2153
WaitEventSet * CreateWaitEventSet(MemoryContext context, int nevents)
Definition: latch.c:723
void InitSharedLatch(Latch *latch)
Definition: latch.c:405
void InitializeLatchSupport(void)
Definition: latch.c:207
static WaitEventSet * LatchWaitSet
Definition: latch.c:155
void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
Definition: latch.c:1008
static int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, WaitEvent *occurred_events, int nevents)
Definition: latch.c:1809
void SetLatch(Latch *latch)
Definition: latch.c:607
void ShutdownLatchSupport(void)
Definition: latch.c:339
bool WaitEventSetCanReportClosed(void)
Definition: latch.c:2138
void InitLatch(Latch *latch)
Definition: latch.c:369
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
Definition: latch.c:922
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
Definition: latch.c:1383
static void drain(void)
Definition: latch.c:2219
static volatile sig_atomic_t waiting
Definition: latch.c:162
void FreeWaitEventSet(WaitEventSet *set)
Definition: latch.c:837
void ResetLatch(Latch *latch)
Definition: latch.c:699
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:492
#define WL_SOCKET_READABLE
Definition: latch.h:126
#define WL_SOCKET_ACCEPT
Definition: latch.h:142
#define WL_TIMEOUT
Definition: latch.h:128
#define WL_SOCKET_CLOSED
Definition: latch.h:137
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
#define WL_LATCH_SET
Definition: latch.h:125
#define WL_SOCKET_CONNECTED
Definition: latch.h:135
#define WL_POSTMASTER_DEATH
Definition: latch.h:129
#define WL_SOCKET_WRITEABLE
Definition: latch.h:127
#define WL_SOCKET_MASK
Definition: latch.h:144
Assert(fmt[strlen(fmt) - 1] !='\n')
void pfree(void *pointer)
Definition: mcxt.c:1436
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1048
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
#define InvalidPid
Definition: miscadmin.h:32
const void * data
static time_t start_time
Definition: pg_ctl.c:94
static char * buf
Definition: pg_test_fsync.c:67
bool PostmasterIsAliveInternal(void)
Definition: pmsignal.c:376
#define PostmasterIsAlive()
Definition: pmsignal.h:102
pqsigfunc pqsignal(int signo, pqsigfunc func)
int pgsocket
Definition: port.h:29
#define PGINVALID_SOCKET
Definition: port.h:31
int postmaster_alive_fds[2]
Definition: postmaster.c:576
#define POSTMASTER_FD_WATCH
Definition: postmaster.h:46
char * c
static int fd(const char *x, int i)
Definition: preproc-init.c:105
void pgwin32_dispatch_queued_signals(void)
Definition: signal.c:120
HANDLE pgwin32_signal_event
Definition: signal.c:27
Definition: latch.h:111
sig_atomic_t is_set
Definition: latch.h:112
sig_atomic_t maybe_sleeping
Definition: latch.h:113
bool is_shared
Definition: latch.h:114
int owner_pid
Definition: latch.h:115
Latch * latch
Definition: latch.c:121
bool exit_on_postmaster_death
Definition: latch.c:129
int nevents
Definition: latch.c:106
int latch_pos
Definition: latch.c:122
int nevents_space
Definition: latch.c:107
WaitEvent * events
Definition: latch.c:113
struct pollfd * pollfds
Definition: latch.c:142
pgsocket fd
Definition: latch.h:154
int pos
Definition: latch.h:152
void * user_data
Definition: latch.h:155
uint32 events
Definition: latch.h:153
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:271
static void pgstat_report_wait_end(void)
Definition: wait_event.h:287
#define EINTR
Definition: win32_port.h:376
#define EWOULDBLOCK
Definition: win32_port.h:382
#define kill(pid, sig)
Definition: win32_port.h:489
#define SIG_IGN
Definition: win32_port.h:173
#define EAGAIN
Definition: win32_port.h:374