PostgreSQL Source Code  git master
latch.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * latch.c
4  * Routines for inter-process latches
5  *
6  * The poll() implementation uses the so-called self-pipe trick to overcome the
7  * race condition involved with poll() and setting a global flag in the signal
8  * handler. When a latch is set and the current process is waiting for it, the
9  * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
10  * A signal by itself doesn't interrupt poll() on all platforms, and even on
11  * platforms where it does, a signal that arrives just before the poll() call
12  * does not prevent poll() from entering sleep. An incoming byte on a pipe
13  * however reliably interrupts the sleep, and causes poll() to return
14  * immediately even if the signal arrives before poll() begins.
15  *
16  * The epoll() implementation overcomes the race with a different technique: it
17  * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
18  * don't need to register a signal handler or create our own self-pipe. We
19  * assume that any system that has Linux epoll() also has Linux signalfd().
20  *
21  * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
22  *
23  * The Windows implementation uses Windows events that are inherited by all
24  * postmaster child processes. There's no need for the self-pipe trick there.
25  *
26  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
27  * Portions Copyright (c) 1994, Regents of the University of California
28  *
29  * IDENTIFICATION
30  * src/backend/storage/ipc/latch.c
31  *
32  *-------------------------------------------------------------------------
33  */
34 #include "postgres.h"
35 
36 #include <fcntl.h>
37 #include <limits.h>
38 #include <signal.h>
39 #include <unistd.h>
40 #ifdef HAVE_SYS_EPOLL_H
41 #include <sys/epoll.h>
42 #endif
43 #ifdef HAVE_SYS_EVENT_H
44 #include <sys/event.h>
45 #endif
46 #ifdef HAVE_POLL_H
47 #include <poll.h>
48 #endif
49 
50 #include "libpq/pqsignal.h"
51 #include "miscadmin.h"
52 #include "pgstat.h"
53 #include "port/atomics.h"
54 #include "portability/instr_time.h"
55 #include "postmaster/postmaster.h"
56 #include "storage/fd.h"
57 #include "storage/ipc.h"
58 #include "storage/latch.h"
59 #include "storage/pmsignal.h"
60 #include "storage/shmem.h"
61 #include "utils/memutils.h"
62 
63 /*
64  * Select the fd readiness primitive to use. Normally the "most modern"
65  * primitive supported by the OS will be used, but for testing it can be
66  * useful to manually specify the used primitive. If desired, just add a
67  * define somewhere before this block.
68  */
69 #if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
70  defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
71 /* don't overwrite manual choice */
72 #elif defined(HAVE_SYS_EPOLL_H)
73 #define WAIT_USE_EPOLL
74 #elif defined(HAVE_KQUEUE)
75 #define WAIT_USE_KQUEUE
76 #elif defined(HAVE_POLL)
77 #define WAIT_USE_POLL
78 #elif WIN32
79 #define WAIT_USE_WIN32
80 #else
81 #error "no wait set implementation available"
82 #endif
83 
84 #ifdef WAIT_USE_EPOLL
85 #include <sys/signalfd.h>
86 #endif
87 
88 /* typedef in latch.h */
90 {
91  int nevents; /* number of registered events */
92  int nevents_space; /* maximum number of events in this set */
93 
94  /*
95  * Array, of nevents_space length, storing the definition of events this
96  * set is waiting for.
97  */
99 
100  /*
101  * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
102  * said latch, and latch_pos the offset in the ->events array. This is
103  * useful because we check the state of the latch before performing doing
104  * syscalls related to waiting.
105  */
108 
109  /*
110  * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
111  * is set so that we'll exit immediately if postmaster death is detected,
112  * instead of returning.
113  */
115 
116 #if defined(WAIT_USE_EPOLL)
117  int epoll_fd;
118  /* epoll_wait returns events in a user provided arrays, allocate once */
119  struct epoll_event *epoll_ret_events;
120 #elif defined(WAIT_USE_KQUEUE)
121  int kqueue_fd;
122  /* kevent returns events in a user provided arrays, allocate once */
123  struct kevent *kqueue_ret_events;
124  bool report_postmaster_not_running;
125 #elif defined(WAIT_USE_POLL)
126  /* poll expects events to be waited on every poll() call, prepare once */
127  struct pollfd *pollfds;
128 #elif defined(WAIT_USE_WIN32)
129 
130  /*
131  * Array of windows events. The first element always contains
132  * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
133  * event->pos + 1).
134  */
135  HANDLE *handles;
136 #endif
137 };
138 
139 /* A common WaitEventSet used to implement WatchLatch() */
141 
142 /* The position of the latch in LatchWaitSet. */
143 #define LatchWaitSetLatchPos 0
144 
145 #ifndef WIN32
146 /* Are we currently in WaitLatch? The signal handler would like to know. */
147 static volatile sig_atomic_t waiting = false;
148 #endif
149 
150 #ifdef WAIT_USE_EPOLL
151 /* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
152 static int signal_fd = -1;
153 #endif
154 
155 #if defined(WAIT_USE_POLL)
156 /* Read and write ends of the self-pipe */
157 static int selfpipe_readfd = -1;
158 static int selfpipe_writefd = -1;
159 
160 /* Process owning the self-pipe --- needed for checking purposes */
161 static int selfpipe_owner_pid = 0;
162 
163 /* Private function prototypes */
164 static void latch_sigurg_handler(SIGNAL_ARGS);
165 static void sendSelfPipeByte(void);
166 #endif
167 
168 #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
169 static void drain(void);
170 #endif
171 
172 #if defined(WAIT_USE_EPOLL)
173 static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
174 #elif defined(WAIT_USE_KQUEUE)
175 static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
176 #elif defined(WAIT_USE_POLL)
177 static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
178 #elif defined(WAIT_USE_WIN32)
179 static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
180 #endif
181 
182 static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
183  WaitEvent *occurred_events, int nevents);
184 
185 /*
186  * Initialize the process-local latch infrastructure.
187  *
188  * This must be called once during startup of any process that can wait on
189  * latches, before it issues any InitLatch() or OwnLatch() calls.
190  */
191 void
193 {
194 #if defined(WAIT_USE_POLL)
195  int pipefd[2];
196 
197  if (IsUnderPostmaster)
198  {
199  /*
200  * We might have inherited connections to a self-pipe created by the
201  * postmaster. It's critical that child processes create their own
202  * self-pipes, of course, and we really want them to close the
203  * inherited FDs for safety's sake.
204  */
205  if (selfpipe_owner_pid != 0)
206  {
207  /* Assert we go through here but once in a child process */
208  Assert(selfpipe_owner_pid != MyProcPid);
209  /* Release postmaster's pipe FDs; ignore any error */
210  (void) close(selfpipe_readfd);
211  (void) close(selfpipe_writefd);
212  /* Clean up, just for safety's sake; we'll set these below */
213  selfpipe_readfd = selfpipe_writefd = -1;
214  selfpipe_owner_pid = 0;
215  /* Keep fd.c's accounting straight */
218  }
219  else
220  {
221  /*
222  * Postmaster didn't create a self-pipe ... or else we're in an
223  * EXEC_BACKEND build, in which case it doesn't matter since the
224  * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
225  * fd.c won't have state to clean up, either.
226  */
227  Assert(selfpipe_readfd == -1);
228  }
229  }
230  else
231  {
232  /* In postmaster or standalone backend, assert we do this but once */
233  Assert(selfpipe_readfd == -1);
234  Assert(selfpipe_owner_pid == 0);
235  }
236 
237  /*
238  * Set up the self-pipe that allows a signal handler to wake up the
239  * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
240  * that SetLatch won't block if the event has already been set many times
241  * filling the kernel buffer. Make the read-end non-blocking too, so that
242  * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
243  * Also, make both FDs close-on-exec, since we surely do not want any
244  * child processes messing with them.
245  */
246  if (pipe(pipefd) < 0)
247  elog(FATAL, "pipe() failed: %m");
248  if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
249  elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
250  if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
251  elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
252  if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
253  elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
254  if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
255  elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
256 
257  selfpipe_readfd = pipefd[0];
258  selfpipe_writefd = pipefd[1];
259  selfpipe_owner_pid = MyProcPid;
260 
261  /* Tell fd.c about these two long-lived FDs */
264 
265  pqsignal(SIGURG, latch_sigurg_handler);
266 #endif
267 
268 #ifdef WAIT_USE_EPOLL
269  sigset_t signalfd_mask;
270 
271  /* Block SIGURG, because we'll receive it through a signalfd. */
272  sigaddset(&UnBlockSig, SIGURG);
273 
274  /* Set up the signalfd to receive SIGURG notifications. */
275  sigemptyset(&signalfd_mask);
276  sigaddset(&signalfd_mask, SIGURG);
277  signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
278  if (signal_fd < 0)
279  elog(FATAL, "signalfd() failed");
281 #endif
282 
283 #ifdef WAIT_USE_KQUEUE
284  /* Ignore SIGURG, because we'll receive it via kqueue. */
285  pqsignal(SIGURG, SIG_IGN);
286 #endif
287 }
288 
289 void
291 {
293 
294  Assert(LatchWaitSet == NULL);
295 
296  /* Set up the WaitEventSet used by WaitLatch(). */
297  LatchWaitSet = CreateWaitEventSet(TopMemoryContext, 2);
299  MyLatch, NULL);
300  if (IsUnderPostmaster)
302  PGINVALID_SOCKET, NULL, NULL);
303 
305 }
306 
307 void
309 {
310 #if defined(WAIT_USE_POLL)
311  pqsignal(SIGURG, SIG_IGN);
312 #endif
313 
314  if (LatchWaitSet)
315  {
316  FreeWaitEventSet(LatchWaitSet);
317  LatchWaitSet = NULL;
318  }
319 
320 #if defined(WAIT_USE_POLL)
321  close(selfpipe_readfd);
322  close(selfpipe_writefd);
323  selfpipe_readfd = -1;
324  selfpipe_writefd = -1;
325  selfpipe_owner_pid = InvalidPid;
326 #endif
327 
328 #if defined(WAIT_USE_EPOLL)
329  close(signal_fd);
330  signal_fd = -1;
331 #endif
332 }
333 
334 /*
335  * Initialize a process-local latch.
336  */
337 void
339 {
340  latch->is_set = false;
341  latch->maybe_sleeping = false;
342  latch->owner_pid = MyProcPid;
343  latch->is_shared = false;
344 
345 #if defined(WAIT_USE_POLL)
346  /* Assert InitializeLatchSupport has been called in this process */
347  Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
348 #elif defined(WAIT_USE_WIN32)
349  latch->event = CreateEvent(NULL, TRUE, FALSE, NULL);
350  if (latch->event == NULL)
351  elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
352 #endif /* WIN32 */
353 }
354 
355 /*
356  * Initialize a shared latch that can be set from other processes. The latch
357  * is initially owned by no-one; use OwnLatch to associate it with the
358  * current process.
359  *
360  * InitSharedLatch needs to be called in postmaster before forking child
361  * processes, usually right after allocating the shared memory block
362  * containing the latch with ShmemInitStruct. (The Unix implementation
363  * doesn't actually require that, but the Windows one does.) Because of
364  * this restriction, we have no concurrency issues to worry about here.
365  *
366  * Note that other handles created in this module are never marked as
367  * inheritable. Thus we do not need to worry about cleaning up child
368  * process references to postmaster-private latches or WaitEventSets.
369  */
370 void
372 {
373 #ifdef WIN32
374  SECURITY_ATTRIBUTES sa;
375 
376  /*
377  * Set up security attributes to specify that the events are inherited.
378  */
379  ZeroMemory(&sa, sizeof(sa));
380  sa.nLength = sizeof(sa);
381  sa.bInheritHandle = TRUE;
382 
383  latch->event = CreateEvent(&sa, TRUE, FALSE, NULL);
384  if (latch->event == NULL)
385  elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
386 #endif
387 
388  latch->is_set = false;
389  latch->maybe_sleeping = false;
390  latch->owner_pid = 0;
391  latch->is_shared = true;
392 }
393 
394 /*
395  * Associate a shared latch with the current process, allowing it to
396  * wait on the latch.
397  *
398  * Although there is a sanity check for latch-already-owned, we don't do
399  * any sort of locking here, meaning that we could fail to detect the error
400  * if two processes try to own the same latch at about the same time. If
401  * there is any risk of that, caller must provide an interlock to prevent it.
402  */
403 void
405 {
406  /* Sanity checks */
407  Assert(latch->is_shared);
408 
409 #if defined(WAIT_USE_POLL)
410  /* Assert InitializeLatchSupport has been called in this process */
411  Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
412 #endif
413 
414  if (latch->owner_pid != 0)
415  elog(ERROR, "latch already owned");
416 
417  latch->owner_pid = MyProcPid;
418 }
419 
420 /*
421  * Disown a shared latch currently owned by the current process.
422  */
423 void
425 {
426  Assert(latch->is_shared);
427  Assert(latch->owner_pid == MyProcPid);
428 
429  latch->owner_pid = 0;
430 }
431 
432 /*
433  * Wait for a given latch to be set, or for postmaster death, or until timeout
434  * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
435  * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
436  * function returns immediately.
437  *
438  * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
439  * is given. Although it is declared as "long", we don't actually support
440  * timeouts longer than INT_MAX milliseconds. Note that some extra overhead
441  * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
442  *
443  * The latch must be owned by the current process, ie. it must be a
444  * process-local latch initialized with InitLatch, or a shared latch
445  * associated with the current process by calling OwnLatch.
446  *
447  * Returns bit mask indicating which condition(s) caused the wake-up. Note
448  * that if multiple wake-up conditions are true, there is no guarantee that
449  * we return all of them in one call, but we will return at least one.
450  */
451 int
452 WaitLatch(Latch *latch, int wakeEvents, long timeout,
453  uint32 wait_event_info)
454 {
455  WaitEvent event;
456 
457  /* Postmaster-managed callers must handle postmaster death somehow. */
459  (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
460  (wakeEvents & WL_POSTMASTER_DEATH));
461 
462  /*
463  * Some callers may have a latch other than MyLatch, or no latch at all,
464  * or want to handle postmaster death differently. It's cheap to assign
465  * those, so just do it every time.
466  */
467  if (!(wakeEvents & WL_LATCH_SET))
468  latch = NULL;
469  ModifyWaitEvent(LatchWaitSet, LatchWaitSetLatchPos, WL_LATCH_SET, latch);
470  LatchWaitSet->exit_on_postmaster_death =
471  ((wakeEvents & WL_EXIT_ON_PM_DEATH) != 0);
472 
473  if (WaitEventSetWait(LatchWaitSet,
474  (wakeEvents & WL_TIMEOUT) ? timeout : -1,
475  &event, 1,
476  wait_event_info) == 0)
477  return WL_TIMEOUT;
478  else
479  return event.events;
480 }
481 
482 /*
483  * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
484  * conditions.
485  *
486  * When waiting on a socket, EOF and error conditions always cause the socket
487  * to be reported as readable/writable/connected, so that the caller can deal
488  * with the condition.
489  *
490  * wakeEvents must include either WL_EXIT_ON_PM_DEATH for automatic exit
491  * if the postmaster dies or WL_POSTMASTER_DEATH for a flag set in the
492  * return value if the postmaster dies. The latter is useful for rare cases
493  * where some behavior other than immediate exit is needed.
494  *
495  * NB: These days this is just a wrapper around the WaitEventSet API. When
496  * using a latch very frequently, consider creating a longer living
497  * WaitEventSet instead; that's more efficient.
498  */
499 int
500 WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
501  long timeout, uint32 wait_event_info)
502 {
503  int ret = 0;
504  int rc;
505  WaitEvent event;
507 
508  if (wakeEvents & WL_TIMEOUT)
509  Assert(timeout >= 0);
510  else
511  timeout = -1;
512 
513  if (wakeEvents & WL_LATCH_SET)
514  AddWaitEventToSet(set, WL_LATCH_SET, PGINVALID_SOCKET,
515  latch, NULL);
516 
517  /* Postmaster-managed callers must handle postmaster death somehow. */
519  (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
520  (wakeEvents & WL_POSTMASTER_DEATH));
521 
522  if ((wakeEvents & WL_POSTMASTER_DEATH) && IsUnderPostmaster)
523  AddWaitEventToSet(set, WL_POSTMASTER_DEATH, PGINVALID_SOCKET,
524  NULL, NULL);
525 
526  if ((wakeEvents & WL_EXIT_ON_PM_DEATH) && IsUnderPostmaster)
527  AddWaitEventToSet(set, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET,
528  NULL, NULL);
529 
530  if (wakeEvents & WL_SOCKET_MASK)
531  {
532  int ev;
533 
534  ev = wakeEvents & WL_SOCKET_MASK;
535  AddWaitEventToSet(set, ev, sock, NULL, NULL);
536  }
537 
538  rc = WaitEventSetWait(set, timeout, &event, 1, wait_event_info);
539 
540  if (rc == 0)
541  ret |= WL_TIMEOUT;
542  else
543  {
544  ret |= event.events & (WL_LATCH_SET |
545  WL_POSTMASTER_DEATH |
547  }
548 
549  FreeWaitEventSet(set);
550 
551  return ret;
552 }
553 
554 /*
555  * Sets a latch and wakes up anyone waiting on it.
556  *
557  * This is cheap if the latch is already set, otherwise not so much.
558  *
559  * NB: when calling this in a signal handler, be sure to save and restore
560  * errno around it. (That's standard practice in most signal handlers, of
561  * course, but we used to omit it in handlers that only set a flag.)
562  *
563  * NB: this function is called from critical sections and signal handlers so
564  * throwing an error is not a good idea.
565  */
566 void
568 {
569 #ifndef WIN32
570  pid_t owner_pid;
571 #else
572  HANDLE handle;
573 #endif
574 
575  /*
576  * The memory barrier has to be placed here to ensure that any flag
577  * variables possibly changed by this process have been flushed to main
578  * memory, before we check/set is_set.
579  */
581 
582  /* Quick exit if already set */
583  if (latch->is_set)
584  return;
585 
586  latch->is_set = true;
587 
589  if (!latch->maybe_sleeping)
590  return;
591 
592 #ifndef WIN32
593 
594  /*
595  * See if anyone's waiting for the latch. It can be the current process if
596  * we're in a signal handler. We use the self-pipe or SIGURG to ourselves
597  * to wake up WaitEventSetWaitBlock() without races in that case. If it's
598  * another process, send a signal.
599  *
600  * Fetch owner_pid only once, in case the latch is concurrently getting
601  * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
602  * guaranteed to be true! In practice, the effective range of pid_t fits
603  * in a 32 bit integer, and so should be atomic. In the worst case, we
604  * might end up signaling the wrong process. Even then, you're very
605  * unlucky if a process with that bogus pid exists and belongs to
606  * Postgres; and PG database processes should handle excess SIGUSR1
607  * interrupts without a problem anyhow.
608  *
609  * Another sort of race condition that's possible here is for a new
610  * process to own the latch immediately after we look, so we don't signal
611  * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
612  * the standard coding convention of waiting at the bottom of their loops,
613  * not the top, so that they'll correctly process latch-setting events
614  * that happen before they enter the loop.
615  */
616  owner_pid = latch->owner_pid;
617  if (owner_pid == 0)
618  return;
619  else if (owner_pid == MyProcPid)
620  {
621 #if defined(WAIT_USE_POLL)
622  if (waiting)
623  sendSelfPipeByte();
624 #else
625  if (waiting)
626  kill(MyProcPid, SIGURG);
627 #endif
628  }
629  else
630  kill(owner_pid, SIGURG);
631 
632 #else
633 
634  /*
635  * See if anyone's waiting for the latch. It can be the current process if
636  * we're in a signal handler.
637  *
638  * Use a local variable here just in case somebody changes the event field
639  * concurrently (which really should not happen).
640  */
641  handle = latch->event;
642  if (handle)
643  {
644  SetEvent(handle);
645 
646  /*
647  * Note that we silently ignore any errors. We might be in a signal
648  * handler or other critical path where it's not safe to call elog().
649  */
650  }
651 #endif
652 
653 }
654 
655 /*
656  * Clear the latch. Calling WaitLatch after this will sleep, unless
657  * the latch is set again before the WaitLatch call.
658  */
659 void
661 {
662  /* Only the owner should reset the latch */
663  Assert(latch->owner_pid == MyProcPid);
664  Assert(latch->maybe_sleeping == false);
665 
666  latch->is_set = false;
667 
668  /*
669  * Ensure that the write to is_set gets flushed to main memory before we
670  * examine any flag variables. Otherwise a concurrent SetLatch might
671  * falsely conclude that it needn't signal us, even though we have missed
672  * seeing some flag updates that SetLatch was supposed to inform us of.
673  */
675 }
676 
677 /*
678  * Create a WaitEventSet with space for nevents different events to wait for.
679  *
680  * These events can then be efficiently waited upon together, using
681  * WaitEventSetWait().
682  */
683 WaitEventSet *
685 {
686  WaitEventSet *set;
687  char *data;
688  Size sz = 0;
689 
690  /*
691  * Use MAXALIGN size/alignment to guarantee that later uses of memory are
692  * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
693  * platforms, but earlier allocations like WaitEventSet and WaitEvent
694  * might not sized to guarantee that when purely using sizeof().
695  */
696  sz += MAXALIGN(sizeof(WaitEventSet));
697  sz += MAXALIGN(sizeof(WaitEvent) * nevents);
698 
699 #if defined(WAIT_USE_EPOLL)
700  sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
701 #elif defined(WAIT_USE_KQUEUE)
702  sz += MAXALIGN(sizeof(struct kevent) * nevents);
703 #elif defined(WAIT_USE_POLL)
704  sz += MAXALIGN(sizeof(struct pollfd) * nevents);
705 #elif defined(WAIT_USE_WIN32)
706  /* need space for the pgwin32_signal_event */
707  sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
708 #endif
709 
710  data = (char *) MemoryContextAllocZero(context, sz);
711 
712  set = (WaitEventSet *) data;
713  data += MAXALIGN(sizeof(WaitEventSet));
714 
715  set->events = (WaitEvent *) data;
716  data += MAXALIGN(sizeof(WaitEvent) * nevents);
717 
718 #if defined(WAIT_USE_EPOLL)
719  set->epoll_ret_events = (struct epoll_event *) data;
720  data += MAXALIGN(sizeof(struct epoll_event) * nevents);
721 #elif defined(WAIT_USE_KQUEUE)
722  set->kqueue_ret_events = (struct kevent *) data;
723  data += MAXALIGN(sizeof(struct kevent) * nevents);
724 #elif defined(WAIT_USE_POLL)
725  set->pollfds = (struct pollfd *) data;
726  data += MAXALIGN(sizeof(struct pollfd) * nevents);
727 #elif defined(WAIT_USE_WIN32)
728  set->handles = (HANDLE) data;
729  data += MAXALIGN(sizeof(HANDLE) * nevents);
730 #endif
731 
732  set->latch = NULL;
733  set->nevents_space = nevents;
734  set->exit_on_postmaster_death = false;
735 
736 #if defined(WAIT_USE_EPOLL)
737  if (!AcquireExternalFD())
738  {
739  /* treat this as though epoll_create1 itself returned EMFILE */
740  elog(ERROR, "epoll_create1 failed: %m");
741  }
742  set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
743  if (set->epoll_fd < 0)
744  {
746  elog(ERROR, "epoll_create1 failed: %m");
747  }
748 #elif defined(WAIT_USE_KQUEUE)
749  if (!AcquireExternalFD())
750  {
751  /* treat this as though kqueue itself returned EMFILE */
752  elog(ERROR, "kqueue failed: %m");
753  }
754  set->kqueue_fd = kqueue();
755  if (set->kqueue_fd < 0)
756  {
758  elog(ERROR, "kqueue failed: %m");
759  }
760  if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
761  {
762  int save_errno = errno;
763 
764  close(set->kqueue_fd);
766  errno = save_errno;
767  elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
768  }
769  set->report_postmaster_not_running = false;
770 #elif defined(WAIT_USE_WIN32)
771 
772  /*
773  * To handle signals while waiting, we need to add a win32 specific event.
774  * We accounted for the additional event at the top of this routine. See
775  * port/win32/signal.c for more details.
776  *
777  * Note: pgwin32_signal_event should be first to ensure that it will be
778  * reported when multiple events are set. We want to guarantee that
779  * pending signals are serviced.
780  */
781  set->handles[0] = pgwin32_signal_event;
782  StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
783 #endif
784 
785  return set;
786 }
787 
788 /*
789  * Free a previously created WaitEventSet.
790  *
791  * Note: preferably, this shouldn't have to free any resources that could be
792  * inherited across an exec(). If it did, we'd likely leak those resources in
793  * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
794  * when the FD is created. For the Windows case, we assume that the handles
795  * involved are non-inheritable.
796  */
797 void
799 {
800 #if defined(WAIT_USE_EPOLL)
801  close(set->epoll_fd);
803 #elif defined(WAIT_USE_KQUEUE)
804  close(set->kqueue_fd);
806 #elif defined(WAIT_USE_WIN32)
807  WaitEvent *cur_event;
808 
809  for (cur_event = set->events;
810  cur_event < (set->events + set->nevents);
811  cur_event++)
812  {
813  if (cur_event->events & WL_LATCH_SET)
814  {
815  /* uses the latch's HANDLE */
816  }
817  else if (cur_event->events & WL_POSTMASTER_DEATH)
818  {
819  /* uses PostmasterHandle */
820  }
821  else
822  {
823  /* Clean up the event object we created for the socket */
824  WSAEventSelect(cur_event->fd, NULL, 0);
825  WSACloseEvent(set->handles[cur_event->pos + 1]);
826  }
827  }
828 #endif
829 
830  pfree(set);
831 }
832 
833 /* ---
834  * Add an event to the set. Possible events are:
835  * - WL_LATCH_SET: Wait for the latch to be set
836  * - WL_POSTMASTER_DEATH: Wait for postmaster to die
837  * - WL_SOCKET_READABLE: Wait for socket to become readable,
838  * can be combined in one event with other WL_SOCKET_* events
839  * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
840  * can be combined with other WL_SOCKET_* events
841  * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
842  * can be combined with other WL_SOCKET_* events (on non-Windows
843  * platforms, this is the same as WL_SOCKET_WRITEABLE)
844  * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
845  *
846  * Returns the offset in WaitEventSet->events (starting from 0), which can be
847  * used to modify previously added wait events using ModifyWaitEvent().
848  *
849  * In the WL_LATCH_SET case the latch must be owned by the current process,
850  * i.e. it must be a process-local latch initialized with InitLatch, or a
851  * shared latch associated with the current process by calling OwnLatch.
852  *
853  * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED cases, EOF and error
854  * conditions cause the socket to be reported as readable/writable/connected,
855  * so that the caller can deal with the condition.
856  *
857  * The user_data pointer specified here will be set for the events returned
858  * by WaitEventSetWait(), allowing to easily associate additional data with
859  * events.
860  */
861 int
863  void *user_data)
864 {
865  WaitEvent *event;
866 
867  /* not enough space */
868  Assert(set->nevents < set->nevents_space);
869 
870  if (events == WL_EXIT_ON_PM_DEATH)
871  {
872  events = WL_POSTMASTER_DEATH;
873  set->exit_on_postmaster_death = true;
874  }
875 
876  if (latch)
877  {
878  if (latch->owner_pid != MyProcPid)
879  elog(ERROR, "cannot wait on a latch owned by another process");
880  if (set->latch)
881  elog(ERROR, "cannot wait on more than one latch");
882  if ((events & WL_LATCH_SET) != WL_LATCH_SET)
883  elog(ERROR, "latch events only support being set");
884  }
885  else
886  {
887  if (events & WL_LATCH_SET)
888  elog(ERROR, "cannot wait on latch without a specified latch");
889  }
890 
891  /* waiting for socket readiness without a socket indicates a bug */
892  if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
893  elog(ERROR, "cannot wait on socket event without a socket");
894 
895  event = &set->events[set->nevents];
896  event->pos = set->nevents++;
897  event->fd = fd;
898  event->events = events;
899  event->user_data = user_data;
900 #ifdef WIN32
901  event->reset = false;
902 #endif
903 
904  if (events == WL_LATCH_SET)
905  {
906  set->latch = latch;
907  set->latch_pos = event->pos;
908 #if defined(WAIT_USE_POLL)
909  event->fd = selfpipe_readfd;
910 #elif defined(WAIT_USE_EPOLL)
911  event->fd = signal_fd;
912 #else
913  event->fd = PGINVALID_SOCKET;
914 #ifdef WAIT_USE_EPOLL
915  return event->pos;
916 #endif
917 #endif
918  }
919  else if (events == WL_POSTMASTER_DEATH)
920  {
921 #ifndef WIN32
923 #endif
924  }
925 
926  /* perform wait primitive specific initialization, if needed */
927 #if defined(WAIT_USE_EPOLL)
928  WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
929 #elif defined(WAIT_USE_KQUEUE)
930  WaitEventAdjustKqueue(set, event, 0);
931 #elif defined(WAIT_USE_POLL)
932  WaitEventAdjustPoll(set, event);
933 #elif defined(WAIT_USE_WIN32)
934  WaitEventAdjustWin32(set, event);
935 #endif
936 
937  return event->pos;
938 }
939 
940 /*
941  * Change the event mask and, in the WL_LATCH_SET case, the latch associated
942  * with the WaitEvent. The latch may be changed to NULL to disable the latch
943  * temporarily, and then set back to a latch later.
944  *
945  * 'pos' is the id returned by AddWaitEventToSet.
946  */
947 void
949 {
950  WaitEvent *event;
951 #if defined(WAIT_USE_KQUEUE)
952  int old_events;
953 #endif
954 
955  Assert(pos < set->nevents);
956 
957  event = &set->events[pos];
958 #if defined(WAIT_USE_KQUEUE)
959  old_events = event->events;
960 #endif
961 
962  /*
963  * If neither the event mask nor the associated latch changes, return
964  * early. That's an important optimization for some sockets, where
965  * ModifyWaitEvent is frequently used to switch from waiting for reads to
966  * waiting on writes.
967  */
968  if (events == event->events &&
969  (!(event->events & WL_LATCH_SET) || set->latch == latch))
970  return;
971 
972  if (event->events & WL_LATCH_SET &&
973  events != event->events)
974  {
975  elog(ERROR, "cannot modify latch event");
976  }
977 
978  if (event->events & WL_POSTMASTER_DEATH)
979  {
980  elog(ERROR, "cannot modify postmaster death event");
981  }
982 
983  /* FIXME: validate event mask */
984  event->events = events;
985 
986  if (events == WL_LATCH_SET)
987  {
988  if (latch && latch->owner_pid != MyProcPid)
989  elog(ERROR, "cannot wait on a latch owned by another process");
990  set->latch = latch;
991 
992  /*
993  * On Unix, we don't need to modify the kernel object because the
994  * underlying pipe (if there is one) is the same for all latches so we
995  * can return immediately. On Windows, we need to update our array of
996  * handles, but we leave the old one in place and tolerate spurious
997  * wakeups if the latch is disabled.
998  */
999 #if defined(WAIT_USE_WIN32)
1000  if (!latch)
1001  return;
1002 #else
1003  return;
1004 #endif
1005  }
1006 
1007 #if defined(WAIT_USE_EPOLL)
1008  WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
1009 #elif defined(WAIT_USE_KQUEUE)
1010  WaitEventAdjustKqueue(set, event, old_events);
1011 #elif defined(WAIT_USE_POLL)
1012  WaitEventAdjustPoll(set, event);
1013 #elif defined(WAIT_USE_WIN32)
1014  WaitEventAdjustWin32(set, event);
1015 #endif
1016 }
1017 
1018 #if defined(WAIT_USE_EPOLL)
1019 /*
1020  * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
1021  */
1022 static void
1023 WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
1024 {
1025  struct epoll_event epoll_ev;
1026  int rc;
1027 
1028  /* pointer to our event, returned by epoll_wait */
1029  epoll_ev.data.ptr = event;
1030  /* always wait for errors */
1031  epoll_ev.events = EPOLLERR | EPOLLHUP;
1032 
1033  /* prepare pollfd entry once */
1034  if (event->events == WL_LATCH_SET)
1035  {
1036  Assert(set->latch != NULL);
1037  epoll_ev.events |= EPOLLIN;
1038  }
1039  else if (event->events == WL_POSTMASTER_DEATH)
1040  {
1041  epoll_ev.events |= EPOLLIN;
1042  }
1043  else
1044  {
1045  Assert(event->fd != PGINVALID_SOCKET);
1047 
1048  if (event->events & WL_SOCKET_READABLE)
1049  epoll_ev.events |= EPOLLIN;
1050  if (event->events & WL_SOCKET_WRITEABLE)
1051  epoll_ev.events |= EPOLLOUT;
1052  }
1053 
1054  /*
1055  * Even though unused, we also pass epoll_ev as the data argument if
1056  * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
1057  * requiring that, and actually it makes the code simpler...
1058  */
1059  rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
1060 
1061  if (rc < 0)
1062  ereport(ERROR,
1064  /* translator: %s is a syscall name, such as "poll()" */
1065  errmsg("%s failed: %m",
1066  "epoll_ctl()")));
1067 }
1068 #endif
1069 
1070 #if defined(WAIT_USE_POLL)
1071 static void
1072 WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
1073 {
1074  struct pollfd *pollfd = &set->pollfds[event->pos];
1075 
1076  pollfd->revents = 0;
1077  pollfd->fd = event->fd;
1078 
1079  /* prepare pollfd entry once */
1080  if (event->events == WL_LATCH_SET)
1081  {
1082  Assert(set->latch != NULL);
1083  pollfd->events = POLLIN;
1084  }
1085  else if (event->events == WL_POSTMASTER_DEATH)
1086  {
1087  pollfd->events = POLLIN;
1088  }
1089  else
1090  {
1092  pollfd->events = 0;
1093  if (event->events & WL_SOCKET_READABLE)
1094  pollfd->events |= POLLIN;
1095  if (event->events & WL_SOCKET_WRITEABLE)
1096  pollfd->events |= POLLOUT;
1097  }
1098 
1099  Assert(event->fd != PGINVALID_SOCKET);
1100 }
1101 #endif
1102 
1103 #if defined(WAIT_USE_KQUEUE)
1104 
1105 /*
1106  * On most BSD family systems, the udata member of struct kevent is of type
1107  * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
1108  * NetBSD has it as intptr_t, so here we wallpaper over that difference with
1109  * an lvalue cast.
1110  */
1111 #define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
1112 
1113 static inline void
1114 WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
1115  WaitEvent *event)
1116 {
1117  k_ev->ident = event->fd;
1118  k_ev->filter = filter;
1119  k_ev->flags = action;
1120  k_ev->fflags = 0;
1121  k_ev->data = 0;
1122  AccessWaitEvent(k_ev) = event;
1123 }
1124 
1125 static inline void
1126 WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
1127 {
1128  /* For now postmaster death can only be added, not removed. */
1129  k_ev->ident = PostmasterPid;
1130  k_ev->filter = EVFILT_PROC;
1131  k_ev->flags = EV_ADD;
1132  k_ev->fflags = NOTE_EXIT;
1133  k_ev->data = 0;
1134  AccessWaitEvent(k_ev) = event;
1135 }
1136 
1137 static inline void
1138 WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
1139 {
1140  /* For now latch can only be added, not removed. */
1141  k_ev->ident = SIGURG;
1142  k_ev->filter = EVFILT_SIGNAL;
1143  k_ev->flags = EV_ADD;
1144  k_ev->fflags = 0;
1145  k_ev->data = 0;
1146  AccessWaitEvent(k_ev) = event;
1147 }
1148 
1149 /*
1150  * old_events is the previous event mask, used to compute what has changed.
1151  */
1152 static void
1153 WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
1154 {
1155  int rc;
1156  struct kevent k_ev[2];
1157  int count = 0;
1158  bool new_filt_read = false;
1159  bool old_filt_read = false;
1160  bool new_filt_write = false;
1161  bool old_filt_write = false;
1162 
1163  if (old_events == event->events)
1164  return;
1165 
1166  Assert(event->events != WL_LATCH_SET || set->latch != NULL);
1167  Assert(event->events == WL_LATCH_SET ||
1168  event->events == WL_POSTMASTER_DEATH ||
1170 
1171  if (event->events == WL_POSTMASTER_DEATH)
1172  {
1173  /*
1174  * Unlike all the other implementations, we detect postmaster death
1175  * using process notification instead of waiting on the postmaster
1176  * alive pipe.
1177  */
1178  WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
1179  }
1180  else if (event->events == WL_LATCH_SET)
1181  {
1182  /* We detect latch wakeup using a signal event. */
1183  WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
1184  }
1185  else
1186  {
1187  /*
1188  * We need to compute the adds and deletes required to get from the
1189  * old event mask to the new event mask, since kevent treats readable
1190  * and writable as separate events.
1191  */
1192  if (old_events & WL_SOCKET_READABLE)
1193  old_filt_read = true;
1194  if (event->events & WL_SOCKET_READABLE)
1195  new_filt_read = true;
1196  if (old_events & WL_SOCKET_WRITEABLE)
1197  old_filt_write = true;
1198  if (event->events & WL_SOCKET_WRITEABLE)
1199  new_filt_write = true;
1200  if (old_filt_read && !new_filt_read)
1201  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
1202  event);
1203  else if (!old_filt_read && new_filt_read)
1204  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
1205  event);
1206  if (old_filt_write && !new_filt_write)
1207  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
1208  event);
1209  else if (!old_filt_write && new_filt_write)
1210  WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
1211  event);
1212  }
1213 
1214  Assert(count > 0);
1215  Assert(count <= 2);
1216 
1217  rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
1218 
1219  /*
1220  * When adding the postmaster's pid, we have to consider that it might
1221  * already have exited and perhaps even been replaced by another process
1222  * with the same pid. If so, we have to defer reporting this as an event
1223  * until the next call to WaitEventSetWaitBlock().
1224  */
1225 
1226  if (rc < 0)
1227  {
1228  if (event->events == WL_POSTMASTER_DEATH &&
1229  (errno == ESRCH || errno == EACCES))
1230  set->report_postmaster_not_running = true;
1231  else
1232  ereport(ERROR,
1234  /* translator: %s is a syscall name, such as "poll()" */
1235  errmsg("%s failed: %m",
1236  "kevent()")));
1237  }
1238  else if (event->events == WL_POSTMASTER_DEATH &&
1239  PostmasterPid != getppid() &&
1240  !PostmasterIsAlive())
1241  {
1242  /*
1243  * The extra PostmasterIsAliveInternal() check prevents false alarms
1244  * on systems that give a different value for getppid() while being
1245  * traced by a debugger.
1246  */
1247  set->report_postmaster_not_running = true;
1248  }
1249 }
1250 
1251 #endif
1252 
1253 #if defined(WAIT_USE_WIN32)
1254 static void
1255 WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
1256 {
1257  HANDLE *handle = &set->handles[event->pos + 1];
1258 
1259  if (event->events == WL_LATCH_SET)
1260  {
1261  Assert(set->latch != NULL);
1262  *handle = set->latch->event;
1263  }
1264  else if (event->events == WL_POSTMASTER_DEATH)
1265  {
1266  *handle = PostmasterHandle;
1267  }
1268  else
1269  {
1270  int flags = FD_CLOSE; /* always check for errors/EOF */
1271 
1272  if (event->events & WL_SOCKET_READABLE)
1273  flags |= FD_READ;
1274  if (event->events & WL_SOCKET_WRITEABLE)
1275  flags |= FD_WRITE;
1276  if (event->events & WL_SOCKET_CONNECTED)
1277  flags |= FD_CONNECT;
1278 
1279  if (*handle == WSA_INVALID_EVENT)
1280  {
1281  *handle = WSACreateEvent();
1282  if (*handle == WSA_INVALID_EVENT)
1283  elog(ERROR, "failed to create event for socket: error code %u",
1284  WSAGetLastError());
1285  }
1286  if (WSAEventSelect(event->fd, *handle, flags) != 0)
1287  elog(ERROR, "failed to set up event for socket: error code %u",
1288  WSAGetLastError());
1289 
1290  Assert(event->fd != PGINVALID_SOCKET);
1291  }
1292 }
1293 #endif
1294 
1295 /*
1296  * Wait for events added to the set to happen, or until the timeout is
1297  * reached. At most nevents occurred events are returned.
1298  *
1299  * If timeout = -1, block until an event occurs; if 0, check sockets for
1300  * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1301  *
1302  * Returns the number of events occurred, or 0 if the timeout was reached.
1303  *
1304  * Returned events will have the fd, pos, user_data fields set to the
1305  * values associated with the registered event.
1306  */
1307 int
1308 WaitEventSetWait(WaitEventSet *set, long timeout,
1309  WaitEvent *occurred_events, int nevents,
1310  uint32 wait_event_info)
1311 {
1312  int returned_events = 0;
1314  instr_time cur_time;
1315  long cur_timeout = -1;
1316 
1317  Assert(nevents > 0);
1318 
1319  /*
1320  * Initialize timeout if requested. We must record the current time so
1321  * that we can determine the remaining timeout if interrupted.
1322  */
1323  if (timeout >= 0)
1324  {
1325  INSTR_TIME_SET_CURRENT(start_time);
1326  Assert(timeout >= 0 && timeout <= INT_MAX);
1327  cur_timeout = timeout;
1328  }
1329 
1330  pgstat_report_wait_start(wait_event_info);
1331 
1332 #ifndef WIN32
1333  waiting = true;
1334 #else
1335  /* Ensure that signals are serviced even if latch is already set */
1337 #endif
1338  while (returned_events == 0)
1339  {
1340  int rc;
1341 
1342  /*
1343  * Check if the latch is set already. If so, leave the loop
1344  * immediately, avoid blocking again. We don't attempt to report any
1345  * other events that might also be satisfied.
1346  *
1347  * If someone sets the latch between this and the
1348  * WaitEventSetWaitBlock() below, the setter will write a byte to the
1349  * pipe (or signal us and the signal handler will do that), and the
1350  * readiness routine will return immediately.
1351  *
1352  * On unix, If there's a pending byte in the self pipe, we'll notice
1353  * whenever blocking. Only clearing the pipe in that case avoids
1354  * having to drain it every time WaitLatchOrSocket() is used. Should
1355  * the pipe-buffer fill up we're still ok, because the pipe is in
1356  * nonblocking mode. It's unlikely for that to happen, because the
1357  * self pipe isn't filled unless we're blocking (waiting = true), or
1358  * from inside a signal handler in latch_sigurg_handler().
1359  *
1360  * On windows, we'll also notice if there's a pending event for the
1361  * latch when blocking, but there's no danger of anything filling up,
1362  * as "Setting an event that is already set has no effect.".
1363  *
1364  * Note: we assume that the kernel calls involved in latch management
1365  * will provide adequate synchronization on machines with weak memory
1366  * ordering, so that we cannot miss seeing is_set if a notification
1367  * has already been queued.
1368  */
1369  if (set->latch && !set->latch->is_set)
1370  {
1371  /* about to sleep on a latch */
1372  set->latch->maybe_sleeping = true;
1374  /* and recheck */
1375  }
1376 
1377  if (set->latch && set->latch->is_set)
1378  {
1379  occurred_events->fd = PGINVALID_SOCKET;
1380  occurred_events->pos = set->latch_pos;
1381  occurred_events->user_data =
1382  set->events[set->latch_pos].user_data;
1383  occurred_events->events = WL_LATCH_SET;
1384  occurred_events++;
1385  returned_events++;
1386 
1387  /* could have been set above */
1388  set->latch->maybe_sleeping = false;
1389 
1390  break;
1391  }
1392 
1393  /*
1394  * Wait for events using the readiness primitive chosen at the top of
1395  * this file. If -1 is returned, a timeout has occurred, if 0 we have
1396  * to retry, everything >= 1 is the number of returned events.
1397  */
1398  rc = WaitEventSetWaitBlock(set, cur_timeout,
1399  occurred_events, nevents);
1400 
1401  if (set->latch)
1402  {
1403  Assert(set->latch->maybe_sleeping);
1404  set->latch->maybe_sleeping = false;
1405  }
1406 
1407  if (rc == -1)
1408  break; /* timeout occurred */
1409  else
1410  returned_events = rc;
1411 
1412  /* If we're not done, update cur_timeout for next iteration */
1413  if (returned_events == 0 && timeout >= 0)
1414  {
1415  INSTR_TIME_SET_CURRENT(cur_time);
1416  INSTR_TIME_SUBTRACT(cur_time, start_time);
1417  cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
1418  if (cur_timeout <= 0)
1419  break;
1420  }
1421  }
1422 #ifndef WIN32
1423  waiting = false;
1424 #endif
1425 
1427 
1428  return returned_events;
1429 }
1430 
1431 
1432 #if defined(WAIT_USE_EPOLL)
1433 
1434 /*
1435  * Wait using linux's epoll_wait(2).
1436  *
1437  * This is the preferable wait method, as several readiness notifications are
1438  * delivered, without having to iterate through all of set->events. The return
1439  * epoll_event struct contain a pointer to our events, making association
1440  * easy.
1441  */
1442 static inline int
1443 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1444  WaitEvent *occurred_events, int nevents)
1445 {
1446  int returned_events = 0;
1447  int rc;
1448  WaitEvent *cur_event;
1449  struct epoll_event *cur_epoll_event;
1450 
1451  /* Sleep */
1452  rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1453  nevents, cur_timeout);
1454 
1455  /* Check return code */
1456  if (rc < 0)
1457  {
1458  /* EINTR is okay, otherwise complain */
1459  if (errno != EINTR)
1460  {
1461  waiting = false;
1462  ereport(ERROR,
1464  /* translator: %s is a syscall name, such as "poll()" */
1465  errmsg("%s failed: %m",
1466  "epoll_wait()")));
1467  }
1468  return 0;
1469  }
1470  else if (rc == 0)
1471  {
1472  /* timeout exceeded */
1473  return -1;
1474  }
1475 
1476  /*
1477  * At least one event occurred, iterate over the returned epoll events
1478  * until they're either all processed, or we've returned all the events
1479  * the caller desired.
1480  */
1481  for (cur_epoll_event = set->epoll_ret_events;
1482  cur_epoll_event < (set->epoll_ret_events + rc) &&
1483  returned_events < nevents;
1484  cur_epoll_event++)
1485  {
1486  /* epoll's data pointer is set to the associated WaitEvent */
1487  cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1488 
1489  occurred_events->pos = cur_event->pos;
1490  occurred_events->user_data = cur_event->user_data;
1491  occurred_events->events = 0;
1492 
1493  if (cur_event->events == WL_LATCH_SET &&
1494  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1495  {
1496  /* Drain the signalfd. */
1497  drain();
1498 
1499  if (set->latch && set->latch->is_set)
1500  {
1501  occurred_events->fd = PGINVALID_SOCKET;
1502  occurred_events->events = WL_LATCH_SET;
1503  occurred_events++;
1504  returned_events++;
1505  }
1506  }
1507  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1508  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1509  {
1510  /*
1511  * We expect an EPOLLHUP when the remote end is closed, but
1512  * because we don't expect the pipe to become readable or to have
1513  * any errors either, treat those cases as postmaster death, too.
1514  *
1515  * Be paranoid about a spurious event signaling the postmaster as
1516  * being dead. There have been reports about that happening with
1517  * older primitives (select(2) to be specific), and a spurious
1518  * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1519  * cost much.
1520  */
1522  {
1523  if (set->exit_on_postmaster_death)
1524  proc_exit(1);
1525  occurred_events->fd = PGINVALID_SOCKET;
1526  occurred_events->events = WL_POSTMASTER_DEATH;
1527  occurred_events++;
1528  returned_events++;
1529  }
1530  }
1531  else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
1532  {
1533  Assert(cur_event->fd != PGINVALID_SOCKET);
1534 
1535  if ((cur_event->events & WL_SOCKET_READABLE) &&
1536  (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1537  {
1538  /* data available in socket, or EOF */
1539  occurred_events->events |= WL_SOCKET_READABLE;
1540  }
1541 
1542  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1543  (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1544  {
1545  /* writable, or EOF */
1546  occurred_events->events |= WL_SOCKET_WRITEABLE;
1547  }
1548 
1549  if (occurred_events->events != 0)
1550  {
1551  occurred_events->fd = cur_event->fd;
1552  occurred_events++;
1553  returned_events++;
1554  }
1555  }
1556  }
1557 
1558  return returned_events;
1559 }
1560 
1561 #elif defined(WAIT_USE_KQUEUE)
1562 
1563 /*
1564  * Wait using kevent(2) on BSD-family systems and macOS.
1565  *
1566  * For now this mirrors the epoll code, but in future it could modify the fd
1567  * set in the same call to kevent as it uses for waiting instead of doing that
1568  * with separate system calls.
1569  */
1570 static int
1571 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1572  WaitEvent *occurred_events, int nevents)
1573 {
1574  int returned_events = 0;
1575  int rc;
1576  WaitEvent *cur_event;
1577  struct kevent *cur_kqueue_event;
1578  struct timespec timeout;
1579  struct timespec *timeout_p;
1580 
1581  if (cur_timeout < 0)
1582  timeout_p = NULL;
1583  else
1584  {
1585  timeout.tv_sec = cur_timeout / 1000;
1586  timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1587  timeout_p = &timeout;
1588  }
1589 
1590  /*
1591  * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1592  * earlier call to WaitEventSetWait().
1593  */
1594  if (unlikely(set->report_postmaster_not_running))
1595  {
1596  if (set->exit_on_postmaster_death)
1597  proc_exit(1);
1598  occurred_events->fd = PGINVALID_SOCKET;
1599  occurred_events->events = WL_POSTMASTER_DEATH;
1600  return 1;
1601  }
1602 
1603  /* Sleep */
1604  rc = kevent(set->kqueue_fd, NULL, 0,
1605  set->kqueue_ret_events, nevents,
1606  timeout_p);
1607 
1608  /* Check return code */
1609  if (rc < 0)
1610  {
1611  /* EINTR is okay, otherwise complain */
1612  if (errno != EINTR)
1613  {
1614  waiting = false;
1615  ereport(ERROR,
1617  /* translator: %s is a syscall name, such as "poll()" */
1618  errmsg("%s failed: %m",
1619  "kevent()")));
1620  }
1621  return 0;
1622  }
1623  else if (rc == 0)
1624  {
1625  /* timeout exceeded */
1626  return -1;
1627  }
1628 
1629  /*
1630  * At least one event occurred, iterate over the returned kqueue events
1631  * until they're either all processed, or we've returned all the events
1632  * the caller desired.
1633  */
1634  for (cur_kqueue_event = set->kqueue_ret_events;
1635  cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1636  returned_events < nevents;
1637  cur_kqueue_event++)
1638  {
1639  /* kevent's udata points to the associated WaitEvent */
1640  cur_event = AccessWaitEvent(cur_kqueue_event);
1641 
1642  occurred_events->pos = cur_event->pos;
1643  occurred_events->user_data = cur_event->user_data;
1644  occurred_events->events = 0;
1645 
1646  if (cur_event->events == WL_LATCH_SET &&
1647  cur_kqueue_event->filter == EVFILT_SIGNAL)
1648  {
1649  if (set->latch && set->latch->is_set)
1650  {
1651  occurred_events->fd = PGINVALID_SOCKET;
1652  occurred_events->events = WL_LATCH_SET;
1653  occurred_events++;
1654  returned_events++;
1655  }
1656  }
1657  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1658  cur_kqueue_event->filter == EVFILT_PROC &&
1659  (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1660  {
1661  /*
1662  * The kernel will tell this kqueue object only once about the exit
1663  * of the postmaster, so let's remember that for next time so that
1664  * we provide level-triggered semantics.
1665  */
1666  set->report_postmaster_not_running = true;
1667 
1668  if (set->exit_on_postmaster_death)
1669  proc_exit(1);
1670  occurred_events->fd = PGINVALID_SOCKET;
1671  occurred_events->events = WL_POSTMASTER_DEATH;
1672  occurred_events++;
1673  returned_events++;
1674  }
1675  else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
1676  {
1677  Assert(cur_event->fd >= 0);
1678 
1679  if ((cur_event->events & WL_SOCKET_READABLE) &&
1680  (cur_kqueue_event->filter == EVFILT_READ))
1681  {
1682  /* readable, or EOF */
1683  occurred_events->events |= WL_SOCKET_READABLE;
1684  }
1685 
1686  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1687  (cur_kqueue_event->filter == EVFILT_WRITE))
1688  {
1689  /* writable, or EOF */
1690  occurred_events->events |= WL_SOCKET_WRITEABLE;
1691  }
1692 
1693  if (occurred_events->events != 0)
1694  {
1695  occurred_events->fd = cur_event->fd;
1696  occurred_events++;
1697  returned_events++;
1698  }
1699  }
1700  }
1701 
1702  return returned_events;
1703 }
1704 
1705 #elif defined(WAIT_USE_POLL)
1706 
1707 /*
1708  * Wait using poll(2).
1709  *
1710  * This allows to receive readiness notifications for several events at once,
1711  * but requires iterating through all of set->pollfds.
1712  */
1713 static inline int
1714 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1715  WaitEvent *occurred_events, int nevents)
1716 {
1717  int returned_events = 0;
1718  int rc;
1719  WaitEvent *cur_event;
1720  struct pollfd *cur_pollfd;
1721 
1722  /* Sleep */
1723  rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
1724 
1725  /* Check return code */
1726  if (rc < 0)
1727  {
1728  /* EINTR is okay, otherwise complain */
1729  if (errno != EINTR)
1730  {
1731  waiting = false;
1732  ereport(ERROR,
1734  /* translator: %s is a syscall name, such as "poll()" */
1735  errmsg("%s failed: %m",
1736  "poll()")));
1737  }
1738  return 0;
1739  }
1740  else if (rc == 0)
1741  {
1742  /* timeout exceeded */
1743  return -1;
1744  }
1745 
1746  for (cur_event = set->events, cur_pollfd = set->pollfds;
1747  cur_event < (set->events + set->nevents) &&
1748  returned_events < nevents;
1749  cur_event++, cur_pollfd++)
1750  {
1751  /* no activity on this FD, skip */
1752  if (cur_pollfd->revents == 0)
1753  continue;
1754 
1755  occurred_events->pos = cur_event->pos;
1756  occurred_events->user_data = cur_event->user_data;
1757  occurred_events->events = 0;
1758 
1759  if (cur_event->events == WL_LATCH_SET &&
1760  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1761  {
1762  /* There's data in the self-pipe, clear it. */
1763  drain();
1764 
1765  if (set->latch && set->latch->is_set)
1766  {
1767  occurred_events->fd = PGINVALID_SOCKET;
1768  occurred_events->events = WL_LATCH_SET;
1769  occurred_events++;
1770  returned_events++;
1771  }
1772  }
1773  else if (cur_event->events == WL_POSTMASTER_DEATH &&
1774  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1775  {
1776  /*
1777  * We expect an POLLHUP when the remote end is closed, but because
1778  * we don't expect the pipe to become readable or to have any
1779  * errors either, treat those cases as postmaster death, too.
1780  *
1781  * Be paranoid about a spurious event signaling the postmaster as
1782  * being dead. There have been reports about that happening with
1783  * older primitives (select(2) to be specific), and a spurious
1784  * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1785  * cost much.
1786  */
1788  {
1789  if (set->exit_on_postmaster_death)
1790  proc_exit(1);
1791  occurred_events->fd = PGINVALID_SOCKET;
1792  occurred_events->events = WL_POSTMASTER_DEATH;
1793  occurred_events++;
1794  returned_events++;
1795  }
1796  }
1797  else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
1798  {
1799  int errflags = POLLHUP | POLLERR | POLLNVAL;
1800 
1801  Assert(cur_event->fd >= PGINVALID_SOCKET);
1802 
1803  if ((cur_event->events & WL_SOCKET_READABLE) &&
1804  (cur_pollfd->revents & (POLLIN | errflags)))
1805  {
1806  /* data available in socket, or EOF */
1807  occurred_events->events |= WL_SOCKET_READABLE;
1808  }
1809 
1810  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1811  (cur_pollfd->revents & (POLLOUT | errflags)))
1812  {
1813  /* writeable, or EOF */
1814  occurred_events->events |= WL_SOCKET_WRITEABLE;
1815  }
1816 
1817  if (occurred_events->events != 0)
1818  {
1819  occurred_events->fd = cur_event->fd;
1820  occurred_events++;
1821  returned_events++;
1822  }
1823  }
1824  }
1825  return returned_events;
1826 }
1827 
1828 #elif defined(WAIT_USE_WIN32)
1829 
1830 /*
1831  * Wait using Windows' WaitForMultipleObjects().
1832  *
1833  * Unfortunately this will only ever return a single readiness notification at
1834  * a time. Note that while the official documentation for
1835  * WaitForMultipleObjects is ambiguous about multiple events being "consumed"
1836  * with a single bWaitAll = FALSE call,
1837  * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273 confirms
1838  * that only one event is "consumed".
1839  */
1840 static inline int
1841 WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1842  WaitEvent *occurred_events, int nevents)
1843 {
1844  int returned_events = 0;
1845  DWORD rc;
1846  WaitEvent *cur_event;
1847 
1848  /* Reset any wait events that need it */
1849  for (cur_event = set->events;
1850  cur_event < (set->events + set->nevents);
1851  cur_event++)
1852  {
1853  if (cur_event->reset)
1854  {
1855  WaitEventAdjustWin32(set, cur_event);
1856  cur_event->reset = false;
1857  }
1858 
1859  /*
1860  * Windows does not guarantee to log an FD_WRITE network event
1861  * indicating that more data can be sent unless the previous send()
1862  * failed with WSAEWOULDBLOCK. While our caller might well have made
1863  * such a call, we cannot assume that here. Therefore, if waiting for
1864  * write-ready, force the issue by doing a dummy send(). If the dummy
1865  * send() succeeds, assume that the socket is in fact write-ready, and
1866  * return immediately. Also, if it fails with something other than
1867  * WSAEWOULDBLOCK, return a write-ready indication to let our caller
1868  * deal with the error condition.
1869  */
1870  if (cur_event->events & WL_SOCKET_WRITEABLE)
1871  {
1872  char c;
1873  WSABUF buf;
1874  DWORD sent;
1875  int r;
1876 
1877  buf.buf = &c;
1878  buf.len = 0;
1879 
1880  r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
1881  if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
1882  {
1883  occurred_events->pos = cur_event->pos;
1884  occurred_events->user_data = cur_event->user_data;
1885  occurred_events->events = WL_SOCKET_WRITEABLE;
1886  occurred_events->fd = cur_event->fd;
1887  return 1;
1888  }
1889  }
1890  }
1891 
1892  /*
1893  * Sleep.
1894  *
1895  * Need to wait for ->nevents + 1, because signal handle is in [0].
1896  */
1897  rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
1898  cur_timeout);
1899 
1900  /* Check return code */
1901  if (rc == WAIT_FAILED)
1902  elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
1903  GetLastError());
1904  else if (rc == WAIT_TIMEOUT)
1905  {
1906  /* timeout exceeded */
1907  return -1;
1908  }
1909 
1910  if (rc == WAIT_OBJECT_0)
1911  {
1912  /* Service newly-arrived signals */
1914  return 0; /* retry */
1915  }
1916 
1917  /*
1918  * With an offset of one, due to the always present pgwin32_signal_event,
1919  * the handle offset directly corresponds to a wait event.
1920  */
1921  cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
1922 
1923  occurred_events->pos = cur_event->pos;
1924  occurred_events->user_data = cur_event->user_data;
1925  occurred_events->events = 0;
1926 
1927  if (cur_event->events == WL_LATCH_SET)
1928  {
1929  /*
1930  * We cannot use set->latch->event to reset the fired event if we
1931  * aren't waiting on this latch now.
1932  */
1933  if (!ResetEvent(set->handles[cur_event->pos + 1]))
1934  elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
1935 
1936  if (set->latch && set->latch->is_set)
1937  {
1938  occurred_events->fd = PGINVALID_SOCKET;
1939  occurred_events->events = WL_LATCH_SET;
1940  occurred_events++;
1941  returned_events++;
1942  }
1943  }
1944  else if (cur_event->events == WL_POSTMASTER_DEATH)
1945  {
1946  /*
1947  * Postmaster apparently died. Since the consequences of falsely
1948  * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we take
1949  * the trouble to positively verify this with PostmasterIsAlive(),
1950  * even though there is no known reason to think that the event could
1951  * be falsely set on Windows.
1952  */
1954  {
1955  if (set->exit_on_postmaster_death)
1956  proc_exit(1);
1957  occurred_events->fd = PGINVALID_SOCKET;
1958  occurred_events->events = WL_POSTMASTER_DEATH;
1959  occurred_events++;
1960  returned_events++;
1961  }
1962  }
1963  else if (cur_event->events & WL_SOCKET_MASK)
1964  {
1965  WSANETWORKEVENTS resEvents;
1966  HANDLE handle = set->handles[cur_event->pos + 1];
1967 
1968  Assert(cur_event->fd);
1969 
1970  occurred_events->fd = cur_event->fd;
1971 
1972  ZeroMemory(&resEvents, sizeof(resEvents));
1973  if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
1974  elog(ERROR, "failed to enumerate network events: error code %u",
1975  WSAGetLastError());
1976  if ((cur_event->events & WL_SOCKET_READABLE) &&
1977  (resEvents.lNetworkEvents & FD_READ))
1978  {
1979  /* data available in socket */
1980  occurred_events->events |= WL_SOCKET_READABLE;
1981 
1982  /*------
1983  * WaitForMultipleObjects doesn't guarantee that a read event will
1984  * be returned if the latch is set at the same time. Even if it
1985  * did, the caller might drop that event expecting it to reoccur
1986  * on next call. So, we must force the event to be reset if this
1987  * WaitEventSet is used again in order to avoid an indefinite
1988  * hang. Refer https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
1989  * for the behavior of socket events.
1990  *------
1991  */
1992  cur_event->reset = true;
1993  }
1994  if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1995  (resEvents.lNetworkEvents & FD_WRITE))
1996  {
1997  /* writeable */
1998  occurred_events->events |= WL_SOCKET_WRITEABLE;
1999  }
2000  if ((cur_event->events & WL_SOCKET_CONNECTED) &&
2001  (resEvents.lNetworkEvents & FD_CONNECT))
2002  {
2003  /* connected */
2004  occurred_events->events |= WL_SOCKET_CONNECTED;
2005  }
2006  if (resEvents.lNetworkEvents & FD_CLOSE)
2007  {
2008  /* EOF/error, so signal all caller-requested socket flags */
2009  occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
2010  }
2011 
2012  if (occurred_events->events != 0)
2013  {
2014  occurred_events++;
2015  returned_events++;
2016  }
2017  }
2018 
2019  return returned_events;
2020 }
2021 #endif
2022 
2023 #if defined(WAIT_USE_POLL)
2024 
2025 /*
2026  * SetLatch uses SIGURG to wake up the process waiting on the latch.
2027  *
2028  * Wake up WaitLatch, if we're waiting.
2029  */
2030 static void
2031 latch_sigurg_handler(SIGNAL_ARGS)
2032 {
2033  int save_errno = errno;
2034 
2035  if (waiting)
2036  sendSelfPipeByte();
2037 
2038  errno = save_errno;
2039 }
2040 
2041 /* Send one byte to the self-pipe, to wake up WaitLatch */
2042 static void
2043 sendSelfPipeByte(void)
2044 {
2045  int rc;
2046  char dummy = 0;
2047 
2048 retry:
2049  rc = write(selfpipe_writefd, &dummy, 1);
2050  if (rc < 0)
2051  {
2052  /* If interrupted by signal, just retry */
2053  if (errno == EINTR)
2054  goto retry;
2055 
2056  /*
2057  * If the pipe is full, we don't need to retry, the data that's there
2058  * already is enough to wake up WaitLatch.
2059  */
2060  if (errno == EAGAIN || errno == EWOULDBLOCK)
2061  return;
2062 
2063  /*
2064  * Oops, the write() failed for some other reason. We might be in a
2065  * signal handler, so it's not safe to elog(). We have no choice but
2066  * silently ignore the error.
2067  */
2068  return;
2069  }
2070 }
2071 
2072 #endif
2073 
2074 #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
2075 
2076 /*
2077  * Read all available data from self-pipe or signalfd.
2078  *
2079  * Note: this is only called when waiting = true. If it fails and doesn't
2080  * return, it must reset that flag first (though ideally, this will never
2081  * happen).
2082  */
2083 static void
2084 drain(void)
2085 {
2086  char buf[1024];
2087  int rc;
2088  int fd;
2089 
2090 #ifdef WAIT_USE_POLL
2091  fd = selfpipe_readfd;
2092 #else
2093  fd = signal_fd;
2094 #endif
2095 
2096  for (;;)
2097  {
2098  rc = read(fd, buf, sizeof(buf));
2099  if (rc < 0)
2100  {
2101  if (errno == EAGAIN || errno == EWOULDBLOCK)
2102  break; /* the descriptor is empty */
2103  else if (errno == EINTR)
2104  continue; /* retry */
2105  else
2106  {
2107  waiting = false;
2108 #ifdef WAIT_USE_POLL
2109  elog(ERROR, "read() on self-pipe failed: %m");
2110 #else
2111  elog(ERROR, "read() on signalfd failed: %m");
2112 #endif
2113  }
2114  }
2115  else if (rc == 0)
2116  {
2117  waiting = false;
2118 #ifdef WAIT_USE_POLL
2119  elog(ERROR, "unexpected EOF on self-pipe");
2120 #else
2121  elog(ERROR, "unexpected EOF on signalfd");
2122 #endif
2123  }
2124  else if (rc < sizeof(buf))
2125  {
2126  /* we successfully drained the pipe; no need to read() again */
2127  break;
2128  }
2129  /* else buffer wasn't big enough, so read again */
2130  }
2131 }
2132 
2133 #endif
int latch_pos
Definition: latch.c:107
void InitSharedLatch(Latch *latch)
Definition: latch.c:371
#define WL_SOCKET_WRITEABLE
Definition: latch.h:127
pgsocket fd
Definition: latch.h:146
int MyProcPid
Definition: globals.c:41
int pos
Definition: latch.h:144
void FreeWaitEventSet(WaitEventSet *set)
Definition: latch.c:798
#define WL_TIMEOUT
Definition: latch.h:128
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
Definition: latch.c:862
#define EAGAIN
Definition: win32_port.h:341
#define write(a, b, c)
Definition: win32.h:14
bool is_shared
Definition: latch.h:114
#define INSTR_TIME_GET_MILLISEC(t)
Definition: instr_time.h:202
struct timeval instr_time
Definition: instr_time.h:150
void proc_exit(int code)
Definition: ipc.c:104
void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
Definition: latch.c:948
#define kill(pid, sig)
Definition: win32_port.h:454
#define WL_SOCKET_READABLE
Definition: latch.h:126
void DisownLatch(Latch *latch)
Definition: latch.c:424
#define WL_SOCKET_MASK
Definition: latch.h:138
void InitLatch(Latch *latch)
Definition: latch.c:338
void SetLatch(Latch *latch)
Definition: latch.c:567
static int fd(const char *x, int i)
Definition: preproc-init.c:105
void ResetLatch(Latch *latch)
Definition: latch.c:660
static time_t start_time
Definition: pg_ctl.c:99
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:452
WaitEventSet * CreateWaitEventSet(MemoryContext context, int nevents)
Definition: latch.c:684
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:918
HANDLE pgwin32_signal_event
Definition: signal.c:27
void pfree(void *pointer)
Definition: mcxt.c:1057
void pgwin32_dispatch_queued_signals(void)
Definition: signal.c:108
#define ERROR
Definition: elog.h:45
#define LatchWaitSetLatchPos
Definition: latch.c:143
void OwnLatch(Latch *latch)
Definition: latch.c:404
sig_atomic_t maybe_sleeping
Definition: latch.h:113
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
#define FATAL
Definition: elog.h:54
uint32 events
Definition: latch.h:145
void ReserveExternalFD(void)
Definition: fd.c:1111
Definition: latch.h:110
bool exit_on_postmaster_death
Definition: latch.c:114
static int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, WaitEvent *occurred_events, int nevents)
char * c
static char * buf
Definition: pg_test_fsync.c:68
bool IsUnderPostmaster
Definition: globals.c:110
#define PostmasterIsAlive()
Definition: pmsignal.h:103
int WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock, long timeout, uint32 wait_event_info)
Definition: latch.c:500
unsigned int uint32
Definition: c.h:441
int pgsocket
Definition: port.h:31
sigset_t UnBlockSig
Definition: pqsignal.c:22
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1512
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
MemoryContext TopMemoryContext
Definition: mcxt.c:44
int errcode_for_socket_access(void)
Definition: elog.c:788
int nevents
Definition: latch.c:91
#define SIG_IGN
Definition: win32_port.h:156
pid_t PostmasterPid
Definition: globals.c:96
int postmaster_alive_fds[2]
Definition: postmaster.c:566
#define WL_POSTMASTER_DEATH
Definition: latch.h:129
#define PGINVALID_SOCKET
Definition: port.h:33
void InitializeLatchSupport(void)
Definition: latch.c:192
bool PostmasterIsAliveInternal(void)
Definition: pmsignal.c:344
bool AcquireExternalFD(void)
Definition: fd.c:1076
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:840
#define ereport(elevel,...)
Definition: elog.h:155
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:170
#define pg_memory_barrier()
Definition: atomics.h:145
#define SIGNAL_ARGS
Definition: c.h:1333
#define Assert(condition)
Definition: c.h:804
WaitEvent * events
Definition: latch.c:98
void InitializeLatchWaitSet(void)
Definition: latch.c:290
size_t Size
Definition: c.h:540
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1488
#define MAXALIGN(LEN)
Definition: c.h:757
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
void * user_data
Definition: latch.h:147
void ShutdownLatchSupport(void)
Definition: latch.c:308
void ReleaseExternalFD(void)
Definition: fd.c:1129
int nevents_space
Definition: latch.c:92
int errmsg(const char *fmt,...)
Definition: elog.c:905
int owner_pid
Definition: latch.h:115
sig_atomic_t is_set
Definition: latch.h:112
#define elog(elevel,...)
Definition: elog.h:227
#define unlikely(x)
Definition: c.h:273
struct Latch * MyLatch
Definition: globals.c:55
#define EWOULDBLOCK
Definition: win32_port.h:349
static WaitEventSet * LatchWaitSet
Definition: latch.c:140
#define close(a)
Definition: win32.h:12
#define EINTR
Definition: win32_port.h:343
#define WL_SOCKET_CONNECTED
Definition: latch.h:135
Latch * latch
Definition: latch.c:106
#define WL_LATCH_SET
Definition: latch.h:125
static volatile sig_atomic_t waiting
Definition: latch.c:147
#define POSTMASTER_FD_WATCH
Definition: postmaster.h:42
#define read(a, b, c)
Definition: win32.h:13
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
Definition: latch.c:1308
#define InvalidPid
Definition: miscadmin.h:32
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:155