PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
waiteventset.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * waiteventset.c
4 * ppoll()/pselect() like abstraction
5 *
6 * WaitEvents are an abstraction for waiting for one or more events at a time.
7 * The waiting can be done in a race free fashion, similar ppoll() or
8 * pselect() (as opposed to plain poll()/select()).
9 *
10 * You can wait for:
11 * - a latch being set from another process or from signal handler in the same
12 * process (WL_LATCH_SET)
13 * - data to become readable or writeable on a socket (WL_SOCKET_*)
14 * - postmaster death (WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH)
15 * - timeout (WL_TIMEOUT)
16 *
17 * Implementation
18 * --------------
19 *
20 * The poll() implementation uses the so-called self-pipe trick to overcome the
21 * race condition involved with poll() and setting a global flag in the signal
22 * handler. When a latch is set and the current process is waiting for it, the
23 * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
24 * A signal by itself doesn't interrupt poll() on all platforms, and even on
25 * platforms where it does, a signal that arrives just before the poll() call
26 * does not prevent poll() from entering sleep. An incoming byte on a pipe
27 * however reliably interrupts the sleep, and causes poll() to return
28 * immediately even if the signal arrives before poll() begins.
29 *
30 * The epoll() implementation overcomes the race with a different technique: it
31 * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
32 * don't need to register a signal handler or create our own self-pipe. We
33 * assume that any system that has Linux epoll() also has Linux signalfd().
34 *
35 * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
36 *
37 * The Windows implementation uses Windows events that are inherited by all
38 * postmaster child processes. There's no need for the self-pipe trick there.
39 *
40 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
41 * Portions Copyright (c) 1994, Regents of the University of California
42 *
43 * IDENTIFICATION
44 * src/backend/storage/ipc/waiteventset.c
45 *
46 *-------------------------------------------------------------------------
47 */
48#include "postgres.h"
49
50#include <fcntl.h>
51#include <limits.h>
52#include <signal.h>
53#include <unistd.h>
54#ifdef HAVE_SYS_EPOLL_H
55#include <sys/epoll.h>
56#endif
57#ifdef HAVE_SYS_EVENT_H
58#include <sys/event.h>
59#endif
60#ifdef HAVE_SYS_SIGNALFD_H
61#include <sys/signalfd.h>
62#endif
63#ifdef HAVE_POLL_H
64#include <poll.h>
65#endif
66
67#include "libpq/pqsignal.h"
68#include "miscadmin.h"
69#include "pgstat.h"
72#include "storage/fd.h"
73#include "storage/ipc.h"
74#include "storage/pmsignal.h"
75#include "storage/latch.h"
77#include "utils/memutils.h"
78#include "utils/resowner.h"
79
80/*
81 * Select the fd readiness primitive to use. Normally the "most modern"
82 * primitive supported by the OS will be used, but for testing it can be
83 * useful to manually specify the used primitive. If desired, just add a
84 * define somewhere before this block.
85 */
86#if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
87 defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
88/* don't overwrite manual choice */
89#elif defined(HAVE_SYS_EPOLL_H)
90#define WAIT_USE_EPOLL
91#elif defined(HAVE_KQUEUE)
92#define WAIT_USE_KQUEUE
93#elif defined(HAVE_POLL)
94#define WAIT_USE_POLL
95#elif WIN32
96#define WAIT_USE_WIN32
97#else
98#error "no wait set implementation available"
99#endif
100
101/*
102 * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
103 * available. For testing the choice can also be manually specified.
104 */
105#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
106#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
107/* don't overwrite manual choice */
108#elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
109#define WAIT_USE_SIGNALFD
110#else
111#define WAIT_USE_SELF_PIPE
112#endif
113#endif
114
115/* typedef in waiteventset.h */
117{
119
120 int nevents; /* number of registered events */
121 int nevents_space; /* maximum number of events in this set */
122
123 /*
124 * Array, of nevents_space length, storing the definition of events this
125 * set is waiting for.
126 */
128
129 /*
130 * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
131 * said latch, and latch_pos the offset in the ->events array. This is
132 * useful because we check the state of the latch before performing doing
133 * syscalls related to waiting.
134 */
137
138 /*
139 * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
140 * is set so that we'll exit immediately if postmaster death is detected,
141 * instead of returning.
142 */
144
145#if defined(WAIT_USE_EPOLL)
146 int epoll_fd;
147 /* epoll_wait returns events in a user provided arrays, allocate once */
148 struct epoll_event *epoll_ret_events;
149#elif defined(WAIT_USE_KQUEUE)
150 int kqueue_fd;
151 /* kevent returns events in a user provided arrays, allocate once */
152 struct kevent *kqueue_ret_events;
153 bool report_postmaster_not_running;
154#elif defined(WAIT_USE_POLL)
155 /* poll expects events to be waited on every poll() call, prepare once */
156 struct pollfd *pollfds;
157#elif defined(WAIT_USE_WIN32)
158
159 /*
160 * Array of windows events. The first element always contains
161 * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
162 * event->pos + 1).
163 */
164 HANDLE *handles;
165#endif
166};
167
168#ifndef WIN32
169/* Are we currently in WaitLatch? The signal handler would like to know. */
170static volatile sig_atomic_t waiting = false;
171#endif
172
173#ifdef WAIT_USE_SIGNALFD
174/* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
175static int signal_fd = -1;
176#endif
177
178#ifdef WAIT_USE_SELF_PIPE
179/* Read and write ends of the self-pipe */
180static int selfpipe_readfd = -1;
181static int selfpipe_writefd = -1;
182
183/* Process owning the self-pipe --- needed for checking purposes */
184static int selfpipe_owner_pid = 0;
185
186/* Private function prototypes */
188static void sendSelfPipeByte(void);
189#endif
190
191#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
192static void drain(void);
193#endif
194
195#if defined(WAIT_USE_EPOLL)
196static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
197#elif defined(WAIT_USE_KQUEUE)
198static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
199#elif defined(WAIT_USE_POLL)
200static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
201#elif defined(WAIT_USE_WIN32)
202static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
203#endif
204
205static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
206 WaitEvent *occurred_events, int nevents);
207
208/* ResourceOwner support to hold WaitEventSets */
209static void ResOwnerReleaseWaitEventSet(Datum res);
210
212{
213 .name = "WaitEventSet",
214 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
215 .release_priority = RELEASE_PRIO_WAITEVENTSETS,
216 .ReleaseResource = ResOwnerReleaseWaitEventSet,
217 .DebugPrint = NULL
218};
219
220/* Convenience wrappers over ResourceOwnerRemember/Forget */
221static inline void
223{
225}
226static inline void
228{
230}
231
232
233/*
234 * Initialize the process-local wait event infrastructure.
235 *
236 * This must be called once during startup of any process that can wait on
237 * latches, before it issues any InitLatch() or OwnLatch() calls.
238 */
239void
241{
242#if defined(WAIT_USE_SELF_PIPE)
243 int pipefd[2];
244
246 {
247 /*
248 * We might have inherited connections to a self-pipe created by the
249 * postmaster. It's critical that child processes create their own
250 * self-pipes, of course, and we really want them to close the
251 * inherited FDs for safety's sake.
252 */
253 if (selfpipe_owner_pid != 0)
254 {
255 /* Assert we go through here but once in a child process */
257 /* Release postmaster's pipe FDs; ignore any error */
258 (void) close(selfpipe_readfd);
259 (void) close(selfpipe_writefd);
260 /* Clean up, just for safety's sake; we'll set these below */
263 /* Keep fd.c's accounting straight */
266 }
267 else
268 {
269 /*
270 * Postmaster didn't create a self-pipe ... or else we're in an
271 * EXEC_BACKEND build, in which case it doesn't matter since the
272 * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
273 * fd.c won't have state to clean up, either.
274 */
275 Assert(selfpipe_readfd == -1);
276 }
277 }
278 else
279 {
280 /* In postmaster or standalone backend, assert we do this but once */
281 Assert(selfpipe_readfd == -1);
283 }
284
285 /*
286 * Set up the self-pipe that allows a signal handler to wake up the
287 * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
288 * that SetLatch won't block if the event has already been set many times
289 * filling the kernel buffer. Make the read-end non-blocking too, so that
290 * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
291 * Also, make both FDs close-on-exec, since we surely do not want any
292 * child processes messing with them.
293 */
294 if (pipe(pipefd) < 0)
295 elog(FATAL, "pipe() failed: %m");
296 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
297 elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
298 if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
299 elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
300 if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
301 elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
302 if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
303 elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
304
305 selfpipe_readfd = pipefd[0];
306 selfpipe_writefd = pipefd[1];
308
309 /* Tell fd.c about these two long-lived FDs */
312
314#endif
315
316#ifdef WAIT_USE_SIGNALFD
317 sigset_t signalfd_mask;
318
320 {
321 /*
322 * It would probably be safe to re-use the inherited signalfd since
323 * signalfds only see the current process's pending signals, but it
324 * seems less surprising to close it and create our own.
325 */
326 if (signal_fd != -1)
327 {
328 /* Release postmaster's signal FD; ignore any error */
329 (void) close(signal_fd);
330 signal_fd = -1;
332 }
333 }
334
335 /* Block SIGURG, because we'll receive it through a signalfd. */
336 sigaddset(&UnBlockSig, SIGURG);
337
338 /* Set up the signalfd to receive SIGURG notifications. */
339 sigemptyset(&signalfd_mask);
340 sigaddset(&signalfd_mask, SIGURG);
341 signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
342 if (signal_fd < 0)
343 elog(FATAL, "signalfd() failed");
345#endif
346
347#ifdef WAIT_USE_KQUEUE
348 /* Ignore SIGURG, because we'll receive it via kqueue. */
349 pqsignal(SIGURG, SIG_IGN);
350#endif
351}
352
353/*
354 * Create a WaitEventSet with space for nevents different events to wait for.
355 *
356 * These events can then be efficiently waited upon together, using
357 * WaitEventSetWait().
358 *
359 * The WaitEventSet is tracked by the given 'resowner'. Use NULL for session
360 * lifetime.
361 */
363CreateWaitEventSet(ResourceOwner resowner, int nevents)
364{
365 WaitEventSet *set;
366 char *data;
367 Size sz = 0;
368
369 /*
370 * Use MAXALIGN size/alignment to guarantee that later uses of memory are
371 * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
372 * platforms, but earlier allocations like WaitEventSet and WaitEvent
373 * might not be sized to guarantee that when purely using sizeof().
374 */
375 sz += MAXALIGN(sizeof(WaitEventSet));
376 sz += MAXALIGN(sizeof(WaitEvent) * nevents);
377
378#if defined(WAIT_USE_EPOLL)
379 sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
380#elif defined(WAIT_USE_KQUEUE)
381 sz += MAXALIGN(sizeof(struct kevent) * nevents);
382#elif defined(WAIT_USE_POLL)
383 sz += MAXALIGN(sizeof(struct pollfd) * nevents);
384#elif defined(WAIT_USE_WIN32)
385 /* need space for the pgwin32_signal_event */
386 sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
387#endif
388
389 if (resowner != NULL)
390 ResourceOwnerEnlarge(resowner);
391
393
394 set = (WaitEventSet *) data;
395 data += MAXALIGN(sizeof(WaitEventSet));
396
397 set->events = (WaitEvent *) data;
398 data += MAXALIGN(sizeof(WaitEvent) * nevents);
399
400#if defined(WAIT_USE_EPOLL)
401 set->epoll_ret_events = (struct epoll_event *) data;
402 data += MAXALIGN(sizeof(struct epoll_event) * nevents);
403#elif defined(WAIT_USE_KQUEUE)
404 set->kqueue_ret_events = (struct kevent *) data;
405 data += MAXALIGN(sizeof(struct kevent) * nevents);
406#elif defined(WAIT_USE_POLL)
407 set->pollfds = (struct pollfd *) data;
408 data += MAXALIGN(sizeof(struct pollfd) * nevents);
409#elif defined(WAIT_USE_WIN32)
410 set->handles = (HANDLE) data;
411 data += MAXALIGN(sizeof(HANDLE) * nevents);
412#endif
413
414 set->latch = NULL;
415 set->nevents_space = nevents;
416 set->exit_on_postmaster_death = false;
417
418 if (resowner != NULL)
419 {
421 set->owner = resowner;
422 }
423
424#if defined(WAIT_USE_EPOLL)
425 if (!AcquireExternalFD())
426 elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
427 set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
428 if (set->epoll_fd < 0)
429 {
431 elog(ERROR, "epoll_create1 failed: %m");
432 }
433#elif defined(WAIT_USE_KQUEUE)
434 if (!AcquireExternalFD())
435 elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
436 set->kqueue_fd = kqueue();
437 if (set->kqueue_fd < 0)
438 {
440 elog(ERROR, "kqueue failed: %m");
441 }
442 if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
443 {
444 int save_errno = errno;
445
446 close(set->kqueue_fd);
448 errno = save_errno;
449 elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
450 }
451 set->report_postmaster_not_running = false;
452#elif defined(WAIT_USE_WIN32)
453
454 /*
455 * To handle signals while waiting, we need to add a win32 specific event.
456 * We accounted for the additional event at the top of this routine. See
457 * port/win32/signal.c for more details.
458 *
459 * Note: pgwin32_signal_event should be first to ensure that it will be
460 * reported when multiple events are set. We want to guarantee that
461 * pending signals are serviced.
462 */
463 set->handles[0] = pgwin32_signal_event;
464 StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
465#endif
466
467 return set;
468}
469
470/*
471 * Free a previously created WaitEventSet.
472 *
473 * Note: preferably, this shouldn't have to free any resources that could be
474 * inherited across an exec(). If it did, we'd likely leak those resources in
475 * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
476 * when the FD is created. For the Windows case, we assume that the handles
477 * involved are non-inheritable.
478 */
479void
481{
482 if (set->owner)
483 {
485 set->owner = NULL;
486 }
487
488#if defined(WAIT_USE_EPOLL)
489 close(set->epoll_fd);
491#elif defined(WAIT_USE_KQUEUE)
492 close(set->kqueue_fd);
494#elif defined(WAIT_USE_WIN32)
495 for (WaitEvent *cur_event = set->events;
496 cur_event < (set->events + set->nevents);
497 cur_event++)
498 {
499 if (cur_event->events & WL_LATCH_SET)
500 {
501 /* uses the latch's HANDLE */
502 }
503 else if (cur_event->events & WL_POSTMASTER_DEATH)
504 {
505 /* uses PostmasterHandle */
506 }
507 else
508 {
509 /* Clean up the event object we created for the socket */
510 WSAEventSelect(cur_event->fd, NULL, 0);
511 WSACloseEvent(set->handles[cur_event->pos + 1]);
512 }
513 }
514#endif
515
516 pfree(set);
517}
518
519/*
520 * Free a previously created WaitEventSet in a child process after a fork().
521 */
522void
524{
525#if defined(WAIT_USE_EPOLL)
526 close(set->epoll_fd);
528#elif defined(WAIT_USE_KQUEUE)
529 /* kqueues are not normally inherited by child processes */
531#endif
532
533 pfree(set);
534}
535
536/* ---
537 * Add an event to the set. Possible events are:
538 * - WL_LATCH_SET: Wait for the latch to be set
539 * - WL_POSTMASTER_DEATH: Wait for postmaster to die
540 * - WL_SOCKET_READABLE: Wait for socket to become readable,
541 * can be combined in one event with other WL_SOCKET_* events
542 * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
543 * can be combined with other WL_SOCKET_* events
544 * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
545 * can be combined with other WL_SOCKET_* events (on non-Windows
546 * platforms, this is the same as WL_SOCKET_WRITEABLE)
547 * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
548 * can be combined with other WL_SOCKET_* events (on non-Windows
549 * platforms, this is the same as WL_SOCKET_READABLE)
550 * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
551 * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
552 *
553 * Returns the offset in WaitEventSet->events (starting from 0), which can be
554 * used to modify previously added wait events using ModifyWaitEvent().
555 *
556 * In the WL_LATCH_SET case the latch must be owned by the current process,
557 * i.e. it must be a process-local latch initialized with InitLatch, or a
558 * shared latch associated with the current process by calling OwnLatch.
559 *
560 * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
561 * conditions cause the socket to be reported as readable/writable/connected,
562 * so that the caller can deal with the condition.
563 *
564 * The user_data pointer specified here will be set for the events returned
565 * by WaitEventSetWait(), allowing to easily associate additional data with
566 * events.
567 */
568int
570 void *user_data)
571{
572 WaitEvent *event;
573
574 /* not enough space */
575 Assert(set->nevents < set->nevents_space);
576
577 if (events == WL_EXIT_ON_PM_DEATH)
578 {
579 events = WL_POSTMASTER_DEATH;
580 set->exit_on_postmaster_death = true;
581 }
582
583 if (latch)
584 {
585 if (latch->owner_pid != MyProcPid)
586 elog(ERROR, "cannot wait on a latch owned by another process");
587 if (set->latch)
588 elog(ERROR, "cannot wait on more than one latch");
589 if ((events & WL_LATCH_SET) != WL_LATCH_SET)
590 elog(ERROR, "latch events only support being set");
591 }
592 else
593 {
594 if (events & WL_LATCH_SET)
595 elog(ERROR, "cannot wait on latch without a specified latch");
596 }
597
598 /* waiting for socket readiness without a socket indicates a bug */
599 if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
600 elog(ERROR, "cannot wait on socket event without a socket");
601
602 event = &set->events[set->nevents];
603 event->pos = set->nevents++;
604 event->fd = fd;
605 event->events = events;
606 event->user_data = user_data;
607#ifdef WIN32
608 event->reset = false;
609#endif
610
611 if (events == WL_LATCH_SET)
612 {
613 set->latch = latch;
614 set->latch_pos = event->pos;
615#if defined(WAIT_USE_SELF_PIPE)
616 event->fd = selfpipe_readfd;
617#elif defined(WAIT_USE_SIGNALFD)
618 event->fd = signal_fd;
619#else
620 event->fd = PGINVALID_SOCKET;
621#ifdef WAIT_USE_EPOLL
622 return event->pos;
623#endif
624#endif
625 }
626 else if (events == WL_POSTMASTER_DEATH)
627 {
628#ifndef WIN32
630#endif
631 }
632
633 /* perform wait primitive specific initialization, if needed */
634#if defined(WAIT_USE_EPOLL)
635 WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
636#elif defined(WAIT_USE_KQUEUE)
637 WaitEventAdjustKqueue(set, event, 0);
638#elif defined(WAIT_USE_POLL)
639 WaitEventAdjustPoll(set, event);
640#elif defined(WAIT_USE_WIN32)
641 WaitEventAdjustWin32(set, event);
642#endif
643
644 return event->pos;
645}
646
647/*
648 * Change the event mask and, in the WL_LATCH_SET case, the latch associated
649 * with the WaitEvent. The latch may be changed to NULL to disable the latch
650 * temporarily, and then set back to a latch later.
651 *
652 * 'pos' is the id returned by AddWaitEventToSet.
653 */
654void
655ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
656{
657 WaitEvent *event;
658#if defined(WAIT_USE_KQUEUE)
659 int old_events;
660#endif
661
662 Assert(pos < set->nevents);
663
664 event = &set->events[pos];
665#if defined(WAIT_USE_KQUEUE)
666 old_events = event->events;
667#endif
668
669 /*
670 * Allow switching between WL_POSTMASTER_DEATH and WL_EXIT_ON_PM_DEATH.
671 *
672 * Note that because WL_EXIT_ON_PM_DEATH is mapped to WL_POSTMASTER_DEATH
673 * in AddWaitEventToSet(), this needs to be checked before the fast-path
674 * below that checks if 'events' has changed.
675 */
676 if (event->events == WL_POSTMASTER_DEATH)
677 {
678 if (events != WL_POSTMASTER_DEATH && events != WL_EXIT_ON_PM_DEATH)
679 elog(ERROR, "cannot remove postmaster death event");
680 set->exit_on_postmaster_death = ((events & WL_EXIT_ON_PM_DEATH) != 0);
681 return;
682 }
683
684 /*
685 * If neither the event mask nor the associated latch changes, return
686 * early. That's an important optimization for some sockets, where
687 * ModifyWaitEvent is frequently used to switch from waiting for reads to
688 * waiting on writes.
689 */
690 if (events == event->events &&
691 (!(event->events & WL_LATCH_SET) || set->latch == latch))
692 return;
693
694 if (event->events & WL_LATCH_SET && events != event->events)
695 elog(ERROR, "cannot modify latch event");
696
697 /* FIXME: validate event mask */
698 event->events = events;
699
700 if (events == WL_LATCH_SET)
701 {
702 if (latch && latch->owner_pid != MyProcPid)
703 elog(ERROR, "cannot wait on a latch owned by another process");
704 set->latch = latch;
705
706 /*
707 * On Unix, we don't need to modify the kernel object because the
708 * underlying pipe (if there is one) is the same for all latches so we
709 * can return immediately. On Windows, we need to update our array of
710 * handles, but we leave the old one in place and tolerate spurious
711 * wakeups if the latch is disabled.
712 */
713#if defined(WAIT_USE_WIN32)
714 if (!latch)
715 return;
716#else
717 return;
718#endif
719 }
720
721#if defined(WAIT_USE_EPOLL)
722 WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
723#elif defined(WAIT_USE_KQUEUE)
724 WaitEventAdjustKqueue(set, event, old_events);
725#elif defined(WAIT_USE_POLL)
726 WaitEventAdjustPoll(set, event);
727#elif defined(WAIT_USE_WIN32)
728 WaitEventAdjustWin32(set, event);
729#endif
730}
731
732#if defined(WAIT_USE_EPOLL)
733/*
734 * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
735 */
736static void
737WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
738{
739 struct epoll_event epoll_ev;
740 int rc;
741
742 /* pointer to our event, returned by epoll_wait */
743 epoll_ev.data.ptr = event;
744 /* always wait for errors */
745 epoll_ev.events = EPOLLERR | EPOLLHUP;
746
747 /* prepare pollfd entry once */
748 if (event->events == WL_LATCH_SET)
749 {
750 Assert(set->latch != NULL);
751 epoll_ev.events |= EPOLLIN;
752 }
753 else if (event->events == WL_POSTMASTER_DEATH)
754 {
755 epoll_ev.events |= EPOLLIN;
756 }
757 else
758 {
759 Assert(event->fd != PGINVALID_SOCKET);
763
764 if (event->events & WL_SOCKET_READABLE)
765 epoll_ev.events |= EPOLLIN;
766 if (event->events & WL_SOCKET_WRITEABLE)
767 epoll_ev.events |= EPOLLOUT;
768 if (event->events & WL_SOCKET_CLOSED)
769 epoll_ev.events |= EPOLLRDHUP;
770 }
771
772 /*
773 * Even though unused, we also pass epoll_ev as the data argument if
774 * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
775 * requiring that, and actually it makes the code simpler...
776 */
777 rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
778
779 if (rc < 0)
782 errmsg("%s() failed: %m",
783 "epoll_ctl")));
784}
785#endif
786
787#if defined(WAIT_USE_POLL)
788static void
790{
791 struct pollfd *pollfd = &set->pollfds[event->pos];
792
793 pollfd->revents = 0;
794 pollfd->fd = event->fd;
795
796 /* prepare pollfd entry once */
797 if (event->events == WL_LATCH_SET)
798 {
799 Assert(set->latch != NULL);
800 pollfd->events = POLLIN;
801 }
802 else if (event->events == WL_POSTMASTER_DEATH)
803 {
804 pollfd->events = POLLIN;
805 }
806 else
807 {
811 pollfd->events = 0;
812 if (event->events & WL_SOCKET_READABLE)
813 pollfd->events |= POLLIN;
814 if (event->events & WL_SOCKET_WRITEABLE)
815 pollfd->events |= POLLOUT;
816#ifdef POLLRDHUP
817 if (event->events & WL_SOCKET_CLOSED)
818 pollfd->events |= POLLRDHUP;
819#endif
820 }
821
822 Assert(event->fd != PGINVALID_SOCKET);
823}
824#endif
825
826#if defined(WAIT_USE_KQUEUE)
827
828/*
829 * On most BSD family systems, the udata member of struct kevent is of type
830 * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
831 * NetBSD has it as intptr_t, so here we wallpaper over that difference with
832 * an lvalue cast.
833 */
834#define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
835
836static inline void
837WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
838 WaitEvent *event)
839{
840 k_ev->ident = event->fd;
841 k_ev->filter = filter;
842 k_ev->flags = action;
843 k_ev->fflags = 0;
844 k_ev->data = 0;
845 AccessWaitEvent(k_ev) = event;
846}
847
848static inline void
849WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
850{
851 /* For now postmaster death can only be added, not removed. */
852 k_ev->ident = PostmasterPid;
853 k_ev->filter = EVFILT_PROC;
854 k_ev->flags = EV_ADD;
855 k_ev->fflags = NOTE_EXIT;
856 k_ev->data = 0;
857 AccessWaitEvent(k_ev) = event;
858}
859
860static inline void
861WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
862{
863 /* For now latch can only be added, not removed. */
864 k_ev->ident = SIGURG;
865 k_ev->filter = EVFILT_SIGNAL;
866 k_ev->flags = EV_ADD;
867 k_ev->fflags = 0;
868 k_ev->data = 0;
869 AccessWaitEvent(k_ev) = event;
870}
871
872/*
873 * old_events is the previous event mask, used to compute what has changed.
874 */
875static void
876WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
877{
878 int rc;
879 struct kevent k_ev[2];
880 int count = 0;
881 bool new_filt_read = false;
882 bool old_filt_read = false;
883 bool new_filt_write = false;
884 bool old_filt_write = false;
885
886 if (old_events == event->events)
887 return;
888
889 Assert(event->events != WL_LATCH_SET || set->latch != NULL);
890 Assert(event->events == WL_LATCH_SET ||
891 event->events == WL_POSTMASTER_DEATH ||
892 (event->events & (WL_SOCKET_READABLE |
895
896 if (event->events == WL_POSTMASTER_DEATH)
897 {
898 /*
899 * Unlike all the other implementations, we detect postmaster death
900 * using process notification instead of waiting on the postmaster
901 * alive pipe.
902 */
903 WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
904 }
905 else if (event->events == WL_LATCH_SET)
906 {
907 /* We detect latch wakeup using a signal event. */
908 WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
909 }
910 else
911 {
912 /*
913 * We need to compute the adds and deletes required to get from the
914 * old event mask to the new event mask, since kevent treats readable
915 * and writable as separate events.
916 */
917 if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
918 old_filt_read = true;
920 new_filt_read = true;
921 if (old_events & WL_SOCKET_WRITEABLE)
922 old_filt_write = true;
923 if (event->events & WL_SOCKET_WRITEABLE)
924 new_filt_write = true;
925 if (old_filt_read && !new_filt_read)
926 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
927 event);
928 else if (!old_filt_read && new_filt_read)
929 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
930 event);
931 if (old_filt_write && !new_filt_write)
932 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
933 event);
934 else if (!old_filt_write && new_filt_write)
935 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
936 event);
937 }
938
939 /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
940 if (count == 0)
941 return;
942
943 Assert(count <= 2);
944
945 rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
946
947 /*
948 * When adding the postmaster's pid, we have to consider that it might
949 * already have exited and perhaps even been replaced by another process
950 * with the same pid. If so, we have to defer reporting this as an event
951 * until the next call to WaitEventSetWaitBlock().
952 */
953
954 if (rc < 0)
955 {
956 if (event->events == WL_POSTMASTER_DEATH &&
957 (errno == ESRCH || errno == EACCES))
958 set->report_postmaster_not_running = true;
959 else
962 errmsg("%s() failed: %m",
963 "kevent")));
964 }
965 else if (event->events == WL_POSTMASTER_DEATH &&
966 PostmasterPid != getppid() &&
968 {
969 /*
970 * The extra PostmasterIsAliveInternal() check prevents false alarms
971 * on systems that give a different value for getppid() while being
972 * traced by a debugger.
973 */
974 set->report_postmaster_not_running = true;
975 }
976}
977
978#endif
979
980#if defined(WAIT_USE_WIN32)
981static void
982WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
983{
984 HANDLE *handle = &set->handles[event->pos + 1];
985
986 if (event->events == WL_LATCH_SET)
987 {
988 Assert(set->latch != NULL);
989 *handle = set->latch->event;
990 }
991 else if (event->events == WL_POSTMASTER_DEATH)
992 {
993 *handle = PostmasterHandle;
994 }
995 else
996 {
997 int flags = FD_CLOSE; /* always check for errors/EOF */
998
999 if (event->events & WL_SOCKET_READABLE)
1000 flags |= FD_READ;
1001 if (event->events & WL_SOCKET_WRITEABLE)
1002 flags |= FD_WRITE;
1003 if (event->events & WL_SOCKET_CONNECTED)
1004 flags |= FD_CONNECT;
1005 if (event->events & WL_SOCKET_ACCEPT)
1006 flags |= FD_ACCEPT;
1007
1008 if (*handle == WSA_INVALID_EVENT)
1009 {
1010 *handle = WSACreateEvent();
1011 if (*handle == WSA_INVALID_EVENT)
1012 elog(ERROR, "failed to create event for socket: error code %d",
1013 WSAGetLastError());
1014 }
1015 if (WSAEventSelect(event->fd, *handle, flags) != 0)
1016 elog(ERROR, "failed to set up event for socket: error code %d",
1017 WSAGetLastError());
1018
1019 Assert(event->fd != PGINVALID_SOCKET);
1020 }
1021}
1022#endif
1023
1024/*
1025 * Wait for events added to the set to happen, or until the timeout is
1026 * reached. At most nevents occurred events are returned.
1027 *
1028 * If timeout = -1, block until an event occurs; if 0, check sockets for
1029 * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1030 *
1031 * Returns the number of events occurred, or 0 if the timeout was reached.
1032 *
1033 * Returned events will have the fd, pos, user_data fields set to the
1034 * values associated with the registered event.
1035 */
1036int
1038 WaitEvent *occurred_events, int nevents,
1039 uint32 wait_event_info)
1040{
1041 int returned_events = 0;
1043 instr_time cur_time;
1044 long cur_timeout = -1;
1045
1046 Assert(nevents > 0);
1047
1048 /*
1049 * Initialize timeout if requested. We must record the current time so
1050 * that we can determine the remaining timeout if interrupted.
1051 */
1052 if (timeout >= 0)
1053 {
1055 Assert(timeout >= 0 && timeout <= INT_MAX);
1056 cur_timeout = timeout;
1057 }
1058 else
1060
1061 pgstat_report_wait_start(wait_event_info);
1062
1063#ifndef WIN32
1064 waiting = true;
1065#else
1066 /* Ensure that signals are serviced even if latch is already set */
1068#endif
1069 while (returned_events == 0)
1070 {
1071 int rc;
1072
1073 /*
1074 * Check if the latch is set already first. If so, we either exit
1075 * immediately or ask the kernel for further events available right
1076 * now without waiting, depending on how many events the caller wants.
1077 *
1078 * If someone sets the latch between this and the
1079 * WaitEventSetWaitBlock() below, the setter will write a byte to the
1080 * pipe (or signal us and the signal handler will do that), and the
1081 * readiness routine will return immediately.
1082 *
1083 * On unix, If there's a pending byte in the self pipe, we'll notice
1084 * whenever blocking. Only clearing the pipe in that case avoids
1085 * having to drain it every time WaitLatchOrSocket() is used. Should
1086 * the pipe-buffer fill up we're still ok, because the pipe is in
1087 * nonblocking mode. It's unlikely for that to happen, because the
1088 * self pipe isn't filled unless we're blocking (waiting = true), or
1089 * from inside a signal handler in latch_sigurg_handler().
1090 *
1091 * On windows, we'll also notice if there's a pending event for the
1092 * latch when blocking, but there's no danger of anything filling up,
1093 * as "Setting an event that is already set has no effect.".
1094 *
1095 * Note: we assume that the kernel calls involved in latch management
1096 * will provide adequate synchronization on machines with weak memory
1097 * ordering, so that we cannot miss seeing is_set if a notification
1098 * has already been queued.
1099 */
1100 if (set->latch && !set->latch->is_set)
1101 {
1102 /* about to sleep on a latch */
1103 set->latch->maybe_sleeping = true;
1105 /* and recheck */
1106 }
1107
1108 if (set->latch && set->latch->is_set)
1109 {
1110 occurred_events->fd = PGINVALID_SOCKET;
1111 occurred_events->pos = set->latch_pos;
1112 occurred_events->user_data =
1113 set->events[set->latch_pos].user_data;
1114 occurred_events->events = WL_LATCH_SET;
1115 occurred_events++;
1116 returned_events++;
1117
1118 /* could have been set above */
1119 set->latch->maybe_sleeping = false;
1120
1121 if (returned_events == nevents)
1122 break; /* output buffer full already */
1123
1124 /*
1125 * Even though we already have an event, we'll poll just once with
1126 * zero timeout to see what non-latch events we can fit into the
1127 * output buffer at the same time.
1128 */
1129 cur_timeout = 0;
1130 timeout = 0;
1131 }
1132
1133 /*
1134 * Wait for events using the readiness primitive chosen at the top of
1135 * this file. If -1 is returned, a timeout has occurred, if 0 we have
1136 * to retry, everything >= 1 is the number of returned events.
1137 */
1138 rc = WaitEventSetWaitBlock(set, cur_timeout,
1139 occurred_events, nevents - returned_events);
1140
1141 if (set->latch &&
1142 set->latch->maybe_sleeping)
1143 set->latch->maybe_sleeping = false;
1144
1145 if (rc == -1)
1146 break; /* timeout occurred */
1147 else
1148 returned_events += rc;
1149
1150 /* If we're not done, update cur_timeout for next iteration */
1151 if (returned_events == 0 && timeout >= 0)
1152 {
1153 INSTR_TIME_SET_CURRENT(cur_time);
1155 cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
1156 if (cur_timeout <= 0)
1157 break;
1158 }
1159 }
1160#ifndef WIN32
1161 waiting = false;
1162#endif
1163
1165
1166 return returned_events;
1167}
1168
1169
1170#if defined(WAIT_USE_EPOLL)
1171
1172/*
1173 * Wait using linux's epoll_wait(2).
1174 *
1175 * This is the preferable wait method, as several readiness notifications are
1176 * delivered, without having to iterate through all of set->events. The return
1177 * epoll_event struct contain a pointer to our events, making association
1178 * easy.
1179 */
1180static inline int
1181WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1182 WaitEvent *occurred_events, int nevents)
1183{
1184 int returned_events = 0;
1185 int rc;
1186 WaitEvent *cur_event;
1187 struct epoll_event *cur_epoll_event;
1188
1189 /* Sleep */
1190 rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1191 Min(nevents, set->nevents_space), cur_timeout);
1192
1193 /* Check return code */
1194 if (rc < 0)
1195 {
1196 /* EINTR is okay, otherwise complain */
1197 if (errno != EINTR)
1198 {
1199 waiting = false;
1200 ereport(ERROR,
1202 errmsg("%s() failed: %m",
1203 "epoll_wait")));
1204 }
1205 return 0;
1206 }
1207 else if (rc == 0)
1208 {
1209 /* timeout exceeded */
1210 return -1;
1211 }
1212
1213 /*
1214 * At least one event occurred, iterate over the returned epoll events
1215 * until they're either all processed, or we've returned all the events
1216 * the caller desired.
1217 */
1218 for (cur_epoll_event = set->epoll_ret_events;
1219 cur_epoll_event < (set->epoll_ret_events + rc) &&
1220 returned_events < nevents;
1221 cur_epoll_event++)
1222 {
1223 /* epoll's data pointer is set to the associated WaitEvent */
1224 cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1225
1226 occurred_events->pos = cur_event->pos;
1227 occurred_events->user_data = cur_event->user_data;
1228 occurred_events->events = 0;
1229
1230 if (cur_event->events == WL_LATCH_SET &&
1231 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1232 {
1233 /* Drain the signalfd. */
1234 drain();
1235
1236 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1237 {
1238 occurred_events->fd = PGINVALID_SOCKET;
1239 occurred_events->events = WL_LATCH_SET;
1240 occurred_events++;
1241 returned_events++;
1242 }
1243 }
1244 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1245 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1246 {
1247 /*
1248 * We expect an EPOLLHUP when the remote end is closed, but
1249 * because we don't expect the pipe to become readable or to have
1250 * any errors either, treat those cases as postmaster death, too.
1251 *
1252 * Be paranoid about a spurious event signaling the postmaster as
1253 * being dead. There have been reports about that happening with
1254 * older primitives (select(2) to be specific), and a spurious
1255 * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1256 * cost much.
1257 */
1259 {
1260 if (set->exit_on_postmaster_death)
1261 proc_exit(1);
1262 occurred_events->fd = PGINVALID_SOCKET;
1263 occurred_events->events = WL_POSTMASTER_DEATH;
1264 occurred_events++;
1265 returned_events++;
1266 }
1267 }
1268 else if (cur_event->events & (WL_SOCKET_READABLE |
1271 {
1272 Assert(cur_event->fd != PGINVALID_SOCKET);
1273
1274 if ((cur_event->events & WL_SOCKET_READABLE) &&
1275 (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1276 {
1277 /* data available in socket, or EOF */
1278 occurred_events->events |= WL_SOCKET_READABLE;
1279 }
1280
1281 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1282 (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1283 {
1284 /* writable, or EOF */
1285 occurred_events->events |= WL_SOCKET_WRITEABLE;
1286 }
1287
1288 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1289 (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
1290 {
1291 /* remote peer shut down, or error */
1292 occurred_events->events |= WL_SOCKET_CLOSED;
1293 }
1294
1295 if (occurred_events->events != 0)
1296 {
1297 occurred_events->fd = cur_event->fd;
1298 occurred_events++;
1299 returned_events++;
1300 }
1301 }
1302 }
1303
1304 return returned_events;
1305}
1306
1307#elif defined(WAIT_USE_KQUEUE)
1308
1309/*
1310 * Wait using kevent(2) on BSD-family systems and macOS.
1311 *
1312 * For now this mirrors the epoll code, but in future it could modify the fd
1313 * set in the same call to kevent as it uses for waiting instead of doing that
1314 * with separate system calls.
1315 */
1316static int
1317WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1318 WaitEvent *occurred_events, int nevents)
1319{
1320 int returned_events = 0;
1321 int rc;
1322 WaitEvent *cur_event;
1323 struct kevent *cur_kqueue_event;
1324 struct timespec timeout;
1325 struct timespec *timeout_p;
1326
1327 if (cur_timeout < 0)
1328 timeout_p = NULL;
1329 else
1330 {
1331 timeout.tv_sec = cur_timeout / 1000;
1332 timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1333 timeout_p = &timeout;
1334 }
1335
1336 /*
1337 * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1338 * earlier call to WaitEventSetWait().
1339 */
1340 if (unlikely(set->report_postmaster_not_running))
1341 {
1342 if (set->exit_on_postmaster_death)
1343 proc_exit(1);
1344 occurred_events->fd = PGINVALID_SOCKET;
1345 occurred_events->events = WL_POSTMASTER_DEATH;
1346 return 1;
1347 }
1348
1349 /* Sleep */
1350 rc = kevent(set->kqueue_fd, NULL, 0,
1351 set->kqueue_ret_events,
1352 Min(nevents, set->nevents_space),
1353 timeout_p);
1354
1355 /* Check return code */
1356 if (rc < 0)
1357 {
1358 /* EINTR is okay, otherwise complain */
1359 if (errno != EINTR)
1360 {
1361 waiting = false;
1362 ereport(ERROR,
1364 errmsg("%s() failed: %m",
1365 "kevent")));
1366 }
1367 return 0;
1368 }
1369 else if (rc == 0)
1370 {
1371 /* timeout exceeded */
1372 return -1;
1373 }
1374
1375 /*
1376 * At least one event occurred, iterate over the returned kqueue events
1377 * until they're either all processed, or we've returned all the events
1378 * the caller desired.
1379 */
1380 for (cur_kqueue_event = set->kqueue_ret_events;
1381 cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1382 returned_events < nevents;
1383 cur_kqueue_event++)
1384 {
1385 /* kevent's udata points to the associated WaitEvent */
1386 cur_event = AccessWaitEvent(cur_kqueue_event);
1387
1388 occurred_events->pos = cur_event->pos;
1389 occurred_events->user_data = cur_event->user_data;
1390 occurred_events->events = 0;
1391
1392 if (cur_event->events == WL_LATCH_SET &&
1393 cur_kqueue_event->filter == EVFILT_SIGNAL)
1394 {
1395 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1396 {
1397 occurred_events->fd = PGINVALID_SOCKET;
1398 occurred_events->events = WL_LATCH_SET;
1399 occurred_events++;
1400 returned_events++;
1401 }
1402 }
1403 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1404 cur_kqueue_event->filter == EVFILT_PROC &&
1405 (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1406 {
1407 /*
1408 * The kernel will tell this kqueue object only once about the
1409 * exit of the postmaster, so let's remember that for next time so
1410 * that we provide level-triggered semantics.
1411 */
1412 set->report_postmaster_not_running = true;
1413
1414 if (set->exit_on_postmaster_death)
1415 proc_exit(1);
1416 occurred_events->fd = PGINVALID_SOCKET;
1417 occurred_events->events = WL_POSTMASTER_DEATH;
1418 occurred_events++;
1419 returned_events++;
1420 }
1421 else if (cur_event->events & (WL_SOCKET_READABLE |
1424 {
1425 Assert(cur_event->fd >= 0);
1426
1427 if ((cur_event->events & WL_SOCKET_READABLE) &&
1428 (cur_kqueue_event->filter == EVFILT_READ))
1429 {
1430 /* readable, or EOF */
1431 occurred_events->events |= WL_SOCKET_READABLE;
1432 }
1433
1434 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1435 (cur_kqueue_event->filter == EVFILT_READ) &&
1436 (cur_kqueue_event->flags & EV_EOF))
1437 {
1438 /* the remote peer has shut down */
1439 occurred_events->events |= WL_SOCKET_CLOSED;
1440 }
1441
1442 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1443 (cur_kqueue_event->filter == EVFILT_WRITE))
1444 {
1445 /* writable, or EOF */
1446 occurred_events->events |= WL_SOCKET_WRITEABLE;
1447 }
1448
1449 if (occurred_events->events != 0)
1450 {
1451 occurred_events->fd = cur_event->fd;
1452 occurred_events++;
1453 returned_events++;
1454 }
1455 }
1456 }
1457
1458 return returned_events;
1459}
1460
1461#elif defined(WAIT_USE_POLL)
1462
1463/*
1464 * Wait using poll(2).
1465 *
1466 * This allows to receive readiness notifications for several events at once,
1467 * but requires iterating through all of set->pollfds.
1468 */
1469static inline int
1471 WaitEvent *occurred_events, int nevents)
1472{
1473 int returned_events = 0;
1474 int rc;
1475 WaitEvent *cur_event;
1476 struct pollfd *cur_pollfd;
1477
1478 /* Sleep */
1479 rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
1480
1481 /* Check return code */
1482 if (rc < 0)
1483 {
1484 /* EINTR is okay, otherwise complain */
1485 if (errno != EINTR)
1486 {
1487 waiting = false;
1488 ereport(ERROR,
1490 errmsg("%s() failed: %m",
1491 "poll")));
1492 }
1493 return 0;
1494 }
1495 else if (rc == 0)
1496 {
1497 /* timeout exceeded */
1498 return -1;
1499 }
1500
1501 for (cur_event = set->events, cur_pollfd = set->pollfds;
1502 cur_event < (set->events + set->nevents) &&
1503 returned_events < nevents;
1504 cur_event++, cur_pollfd++)
1505 {
1506 /* no activity on this FD, skip */
1507 if (cur_pollfd->revents == 0)
1508 continue;
1509
1510 occurred_events->pos = cur_event->pos;
1511 occurred_events->user_data = cur_event->user_data;
1512 occurred_events->events = 0;
1513
1514 if (cur_event->events == WL_LATCH_SET &&
1515 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1516 {
1517 /* There's data in the self-pipe, clear it. */
1518 drain();
1519
1520 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1521 {
1522 occurred_events->fd = PGINVALID_SOCKET;
1523 occurred_events->events = WL_LATCH_SET;
1524 occurred_events++;
1525 returned_events++;
1526 }
1527 }
1528 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1529 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1530 {
1531 /*
1532 * We expect an POLLHUP when the remote end is closed, but because
1533 * we don't expect the pipe to become readable or to have any
1534 * errors either, treat those cases as postmaster death, too.
1535 *
1536 * Be paranoid about a spurious event signaling the postmaster as
1537 * being dead. There have been reports about that happening with
1538 * older primitives (select(2) to be specific), and a spurious
1539 * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1540 * cost much.
1541 */
1543 {
1544 if (set->exit_on_postmaster_death)
1545 proc_exit(1);
1546 occurred_events->fd = PGINVALID_SOCKET;
1547 occurred_events->events = WL_POSTMASTER_DEATH;
1548 occurred_events++;
1549 returned_events++;
1550 }
1551 }
1552 else if (cur_event->events & (WL_SOCKET_READABLE |
1555 {
1556 int errflags = POLLHUP | POLLERR | POLLNVAL;
1557
1558 Assert(cur_event->fd >= PGINVALID_SOCKET);
1559
1560 if ((cur_event->events & WL_SOCKET_READABLE) &&
1561 (cur_pollfd->revents & (POLLIN | errflags)))
1562 {
1563 /* data available in socket, or EOF */
1564 occurred_events->events |= WL_SOCKET_READABLE;
1565 }
1566
1567 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1568 (cur_pollfd->revents & (POLLOUT | errflags)))
1569 {
1570 /* writeable, or EOF */
1571 occurred_events->events |= WL_SOCKET_WRITEABLE;
1572 }
1573
1574#ifdef POLLRDHUP
1575 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1576 (cur_pollfd->revents & (POLLRDHUP | errflags)))
1577 {
1578 /* remote peer closed, or error */
1579 occurred_events->events |= WL_SOCKET_CLOSED;
1580 }
1581#endif
1582
1583 if (occurred_events->events != 0)
1584 {
1585 occurred_events->fd = cur_event->fd;
1586 occurred_events++;
1587 returned_events++;
1588 }
1589 }
1590 }
1591 return returned_events;
1592}
1593
1594#elif defined(WAIT_USE_WIN32)
1595
1596/*
1597 * Wait using Windows' WaitForMultipleObjects(). Each call only "consumes" one
1598 * event, so we keep calling until we've filled up our output buffer to match
1599 * the behavior of the other implementations.
1600 *
1601 * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
1602 */
1603static inline int
1604WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1605 WaitEvent *occurred_events, int nevents)
1606{
1607 int returned_events = 0;
1608 DWORD rc;
1609 WaitEvent *cur_event;
1610
1611 /* Reset any wait events that need it */
1612 for (cur_event = set->events;
1613 cur_event < (set->events + set->nevents);
1614 cur_event++)
1615 {
1616 if (cur_event->reset)
1617 {
1618 WaitEventAdjustWin32(set, cur_event);
1619 cur_event->reset = false;
1620 }
1621
1622 /*
1623 * We associate the socket with a new event handle for each
1624 * WaitEventSet. FD_CLOSE is only generated once if the other end
1625 * closes gracefully. Therefore we might miss the FD_CLOSE
1626 * notification, if it was delivered to another event after we stopped
1627 * waiting for it. Close that race by peeking for EOF after setting
1628 * up this handle to receive notifications, and before entering the
1629 * sleep.
1630 *
1631 * XXX If we had one event handle for the lifetime of a socket, we
1632 * wouldn't need this.
1633 */
1634 if (cur_event->events & WL_SOCKET_READABLE)
1635 {
1636 char c;
1637 WSABUF buf;
1638 DWORD received;
1639 DWORD flags;
1640
1641 buf.buf = &c;
1642 buf.len = 1;
1643 flags = MSG_PEEK;
1644 if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
1645 {
1646 occurred_events->pos = cur_event->pos;
1647 occurred_events->user_data = cur_event->user_data;
1648 occurred_events->events = WL_SOCKET_READABLE;
1649 occurred_events->fd = cur_event->fd;
1650 return 1;
1651 }
1652 }
1653
1654 /*
1655 * Windows does not guarantee to log an FD_WRITE network event
1656 * indicating that more data can be sent unless the previous send()
1657 * failed with WSAEWOULDBLOCK. While our caller might well have made
1658 * such a call, we cannot assume that here. Therefore, if waiting for
1659 * write-ready, force the issue by doing a dummy send(). If the dummy
1660 * send() succeeds, assume that the socket is in fact write-ready, and
1661 * return immediately. Also, if it fails with something other than
1662 * WSAEWOULDBLOCK, return a write-ready indication to let our caller
1663 * deal with the error condition.
1664 */
1665 if (cur_event->events & WL_SOCKET_WRITEABLE)
1666 {
1667 char c;
1668 WSABUF buf;
1669 DWORD sent;
1670 int r;
1671
1672 buf.buf = &c;
1673 buf.len = 0;
1674
1675 r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
1676 if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
1677 {
1678 occurred_events->pos = cur_event->pos;
1679 occurred_events->user_data = cur_event->user_data;
1680 occurred_events->events = WL_SOCKET_WRITEABLE;
1681 occurred_events->fd = cur_event->fd;
1682 return 1;
1683 }
1684 }
1685 }
1686
1687 /*
1688 * Sleep.
1689 *
1690 * Need to wait for ->nevents + 1, because signal handle is in [0].
1691 */
1692 rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
1693 cur_timeout);
1694
1695 /* Check return code */
1696 if (rc == WAIT_FAILED)
1697 elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
1698 GetLastError());
1699 else if (rc == WAIT_TIMEOUT)
1700 {
1701 /* timeout exceeded */
1702 return -1;
1703 }
1704
1705 if (rc == WAIT_OBJECT_0)
1706 {
1707 /* Service newly-arrived signals */
1709 return 0; /* retry */
1710 }
1711
1712 /*
1713 * With an offset of one, due to the always present pgwin32_signal_event,
1714 * the handle offset directly corresponds to a wait event.
1715 */
1716 cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
1717
1718 for (;;)
1719 {
1720 int next_pos;
1721 int count;
1722
1723 occurred_events->pos = cur_event->pos;
1724 occurred_events->user_data = cur_event->user_data;
1725 occurred_events->events = 0;
1726
1727 if (cur_event->events == WL_LATCH_SET)
1728 {
1729 /*
1730 * We cannot use set->latch->event to reset the fired event if we
1731 * aren't waiting on this latch now.
1732 */
1733 if (!ResetEvent(set->handles[cur_event->pos + 1]))
1734 elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
1735
1736 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1737 {
1738 occurred_events->fd = PGINVALID_SOCKET;
1739 occurred_events->events = WL_LATCH_SET;
1740 occurred_events++;
1741 returned_events++;
1742 }
1743 }
1744 else if (cur_event->events == WL_POSTMASTER_DEATH)
1745 {
1746 /*
1747 * Postmaster apparently died. Since the consequences of falsely
1748 * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
1749 * take the trouble to positively verify this with
1750 * PostmasterIsAlive(), even though there is no known reason to
1751 * think that the event could be falsely set on Windows.
1752 */
1754 {
1755 if (set->exit_on_postmaster_death)
1756 proc_exit(1);
1757 occurred_events->fd = PGINVALID_SOCKET;
1758 occurred_events->events = WL_POSTMASTER_DEATH;
1759 occurred_events++;
1760 returned_events++;
1761 }
1762 }
1763 else if (cur_event->events & WL_SOCKET_MASK)
1764 {
1765 WSANETWORKEVENTS resEvents;
1766 HANDLE handle = set->handles[cur_event->pos + 1];
1767
1768 Assert(cur_event->fd);
1769
1770 occurred_events->fd = cur_event->fd;
1771
1772 ZeroMemory(&resEvents, sizeof(resEvents));
1773 if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
1774 elog(ERROR, "failed to enumerate network events: error code %d",
1775 WSAGetLastError());
1776 if ((cur_event->events & WL_SOCKET_READABLE) &&
1777 (resEvents.lNetworkEvents & FD_READ))
1778 {
1779 /* data available in socket */
1780 occurred_events->events |= WL_SOCKET_READABLE;
1781
1782 /*------
1783 * WaitForMultipleObjects doesn't guarantee that a read event
1784 * will be returned if the latch is set at the same time. Even
1785 * if it did, the caller might drop that event expecting it to
1786 * reoccur on next call. So, we must force the event to be
1787 * reset if this WaitEventSet is used again in order to avoid
1788 * an indefinite hang.
1789 *
1790 * Refer
1791 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
1792 * for the behavior of socket events.
1793 *------
1794 */
1795 cur_event->reset = true;
1796 }
1797 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1798 (resEvents.lNetworkEvents & FD_WRITE))
1799 {
1800 /* writeable */
1801 occurred_events->events |= WL_SOCKET_WRITEABLE;
1802 }
1803 if ((cur_event->events & WL_SOCKET_CONNECTED) &&
1804 (resEvents.lNetworkEvents & FD_CONNECT))
1805 {
1806 /* connected */
1807 occurred_events->events |= WL_SOCKET_CONNECTED;
1808 }
1809 if ((cur_event->events & WL_SOCKET_ACCEPT) &&
1810 (resEvents.lNetworkEvents & FD_ACCEPT))
1811 {
1812 /* incoming connection could be accepted */
1813 occurred_events->events |= WL_SOCKET_ACCEPT;
1814 }
1815 if (resEvents.lNetworkEvents & FD_CLOSE)
1816 {
1817 /* EOF/error, so signal all caller-requested socket flags */
1818 occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
1819 }
1820
1821 if (occurred_events->events != 0)
1822 {
1823 occurred_events++;
1824 returned_events++;
1825 }
1826 }
1827
1828 /* Is the output buffer full? */
1829 if (returned_events == nevents)
1830 break;
1831
1832 /* Have we run out of possible events? */
1833 next_pos = cur_event->pos + 1;
1834 if (next_pos == set->nevents)
1835 break;
1836
1837 /*
1838 * Poll the rest of the event handles in the array starting at
1839 * next_pos being careful to skip over the initial signal handle too.
1840 * This time we use a zero timeout.
1841 */
1842 count = set->nevents - next_pos;
1843 rc = WaitForMultipleObjects(count,
1844 set->handles + 1 + next_pos,
1845 false,
1846 0);
1847
1848 /*
1849 * We don't distinguish between errors and WAIT_TIMEOUT here because
1850 * we already have events to report.
1851 */
1852 if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + count)
1853 break;
1854
1855 /* We have another event to decode. */
1856 cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
1857 }
1858
1859 return returned_events;
1860}
1861#endif
1862
1863/*
1864 * Return whether the current build options can report WL_SOCKET_CLOSED.
1865 */
1866bool
1868{
1869#if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
1870 defined(WAIT_USE_EPOLL) || \
1871 defined(WAIT_USE_KQUEUE)
1872 return true;
1873#else
1874 return false;
1875#endif
1876}
1877
1878/*
1879 * Get the number of wait events registered in a given WaitEventSet.
1880 */
1881int
1883{
1884 return set->nevents;
1885}
1886
1887#if defined(WAIT_USE_SELF_PIPE)
1888
1889/*
1890 * SetLatch uses SIGURG to wake up the process waiting on the latch.
1891 *
1892 * Wake up WaitLatch, if we're waiting.
1893 */
1894static void
1896{
1897 if (waiting)
1899}
1900
1901/* Send one byte to the self-pipe, to wake up WaitLatch */
1902static void
1904{
1905 int rc;
1906 char dummy = 0;
1907
1908retry:
1909 rc = write(selfpipe_writefd, &dummy, 1);
1910 if (rc < 0)
1911 {
1912 /* If interrupted by signal, just retry */
1913 if (errno == EINTR)
1914 goto retry;
1915
1916 /*
1917 * If the pipe is full, we don't need to retry, the data that's there
1918 * already is enough to wake up WaitLatch.
1919 */
1920 if (errno == EAGAIN || errno == EWOULDBLOCK)
1921 return;
1922
1923 /*
1924 * Oops, the write() failed for some other reason. We might be in a
1925 * signal handler, so it's not safe to elog(). We have no choice but
1926 * silently ignore the error.
1927 */
1928 return;
1929 }
1930}
1931
1932#endif
1933
1934#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
1935
1936/*
1937 * Read all available data from self-pipe or signalfd.
1938 *
1939 * Note: this is only called when waiting = true. If it fails and doesn't
1940 * return, it must reset that flag first (though ideally, this will never
1941 * happen).
1942 */
1943static void
1945{
1946 char buf[1024];
1947 int rc;
1948 int fd;
1949
1950#ifdef WAIT_USE_SELF_PIPE
1952#else
1953 fd = signal_fd;
1954#endif
1955
1956 for (;;)
1957 {
1958 rc = read(fd, buf, sizeof(buf));
1959 if (rc < 0)
1960 {
1961 if (errno == EAGAIN || errno == EWOULDBLOCK)
1962 break; /* the descriptor is empty */
1963 else if (errno == EINTR)
1964 continue; /* retry */
1965 else
1966 {
1967 waiting = false;
1968#ifdef WAIT_USE_SELF_PIPE
1969 elog(ERROR, "read() on self-pipe failed: %m");
1970#else
1971 elog(ERROR, "read() on signalfd failed: %m");
1972#endif
1973 }
1974 }
1975 else if (rc == 0)
1976 {
1977 waiting = false;
1978#ifdef WAIT_USE_SELF_PIPE
1979 elog(ERROR, "unexpected EOF on self-pipe");
1980#else
1981 elog(ERROR, "unexpected EOF on signalfd");
1982#endif
1983 }
1984 else if (rc < sizeof(buf))
1985 {
1986 /* we successfully drained the pipe; no need to read() again */
1987 break;
1988 }
1989 /* else buffer wasn't big enough, so read again */
1990 }
1991}
1992
1993#endif
1994
1995static void
1997{
1999
2000 Assert(set->owner != NULL);
2001 set->owner = NULL;
2002 FreeWaitEventSet(set);
2003}
2004
2005#ifndef WIN32
2006/*
2007 * Wake up my process if it's currently sleeping in WaitEventSetWaitBlock()
2008 *
2009 * NB: be sure to save and restore errno around it. (That's standard practice
2010 * in most signal handlers, of course, but we used to omit it in handlers that
2011 * only set a flag.) XXX
2012 *
2013 * NB: this function is called from critical sections and signal handlers so
2014 * throwing an error is not a good idea.
2015 *
2016 * On Windows, Latch uses SetEvent directly and this is not used.
2017 */
2018void
2020{
2021#if defined(WAIT_USE_SELF_PIPE)
2022 if (waiting)
2024#else
2025 if (waiting)
2026 kill(MyProcPid, SIGURG);
2027#endif
2028}
2029
2030/* Similar to WakeupMyProc, but wake up another process */
2031void
2033{
2034 kill(pid, SIGURG);
2035}
2036#endif
#define pg_memory_barrier()
Definition: atomics.h:143
sigset_t UnBlockSig
Definition: pqsignal.c:22
#define Min(x, y)
Definition: c.h:975
#define MAXALIGN(LEN)
Definition: c.h:782
#define SIGNAL_ARGS
Definition: c.h:1320
#define unlikely(x)
Definition: c.h:347
uint32_t uint32
Definition: c.h:502
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:909
size_t Size
Definition: c.h:576
int errcode_for_socket_access(void)
Definition: elog.c:953
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define FATAL
Definition: elog.h:41
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
void ReleaseExternalFD(void)
Definition: fd.c:1240
bool AcquireExternalFD(void)
Definition: fd.c:1187
void ReserveExternalFD(void)
Definition: fd.c:1222
pid_t PostmasterPid
Definition: globals.c:105
int MyProcPid
Definition: globals.c:46
bool IsUnderPostmaster
Definition: globals.c:119
Assert(PointerIsAligned(start, uint64))
for(;;)
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MILLISEC(t)
Definition: instr_time.h:191
#define INSTR_TIME_SET_ZERO(t)
Definition: instr_time.h:172
#define close(a)
Definition: win32.h:12
#define write(a, b, c)
Definition: win32.h:14
#define read(a, b, c)
Definition: win32.h:13
void proc_exit(int code)
Definition: ipc.c:104
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
void pfree(void *pointer)
Definition: mcxt.c:1524
MemoryContext TopMemoryContext
Definition: mcxt.c:149
const void * data
static time_t start_time
Definition: pg_ctl.c:95
static char * buf
Definition: pg_test_fsync.c:72
bool PostmasterIsAliveInternal(void)
Definition: pmsignal.c:346
#define PostmasterIsAlive()
Definition: pmsignal.h:106
#define pqsignal
Definition: port.h:521
int pgsocket
Definition: port.h:29
#define PGINVALID_SOCKET
Definition: port.h:31
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
uintptr_t Datum
Definition: postgres.h:69
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
int postmaster_alive_fds[2]
Definition: postmaster.c:482
#define POSTMASTER_FD_WATCH
Definition: postmaster.h:83
char * c
static int fd(const char *x, int i)
Definition: preproc-init.c:105
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:564
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:524
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:452
#define RELEASE_PRIO_WAITEVENTSETS
Definition: resowner.h:77
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
void pgwin32_dispatch_queued_signals(void)
Definition: signal.c:120
HANDLE pgwin32_signal_event
Definition: signal.c:27
Definition: latch.h:114
sig_atomic_t is_set
Definition: latch.h:115
sig_atomic_t maybe_sleeping
Definition: latch.h:116
int owner_pid
Definition: latch.h:118
const char * name
Definition: resowner.h:93
Latch * latch
Definition: waiteventset.c:135
bool exit_on_postmaster_death
Definition: waiteventset.c:143
ResourceOwner owner
Definition: waiteventset.c:118
WaitEvent * events
Definition: waiteventset.c:127
struct pollfd * pollfds
Definition: waiteventset.c:156
pgsocket fd
Definition: waiteventset.h:63
void * user_data
Definition: waiteventset.h:64
uint32 events
Definition: waiteventset.h:62
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:85
static void pgstat_report_wait_end(void)
Definition: wait_event.h:101
static void latch_sigurg_handler(SIGNAL_ARGS)
static void sendSelfPipeByte(void)
static void ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
Definition: waiteventset.c:227
static int selfpipe_readfd
Definition: waiteventset.c:180
static const ResourceOwnerDesc wait_event_set_resowner_desc
Definition: waiteventset.c:211
void FreeWaitEventSetAfterFork(WaitEventSet *set)
Definition: waiteventset.c:523
static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
Definition: waiteventset.c:789
void WakeupMyProc(void)
static int selfpipe_owner_pid
Definition: waiteventset.c:184
static int selfpipe_writefd
Definition: waiteventset.c:181
int GetNumRegisteredWaitEvents(WaitEventSet *set)
void WakeupOtherProc(int pid)
static void ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
Definition: waiteventset.c:222
void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
Definition: waiteventset.c:655
static int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, WaitEvent *occurred_events, int nevents)
static void ResOwnerReleaseWaitEventSet(Datum res)
void InitializeWaitEventSupport(void)
Definition: waiteventset.c:240
bool WaitEventSetCanReportClosed(void)
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
Definition: waiteventset.c:569
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
static void drain(void)
static volatile sig_atomic_t waiting
Definition: waiteventset.c:170
void FreeWaitEventSet(WaitEventSet *set)
Definition: waiteventset.c:480
WaitEventSet * CreateWaitEventSet(ResourceOwner resowner, int nevents)
Definition: waiteventset.c:363
#define WL_SOCKET_READABLE
Definition: waiteventset.h:35
#define WL_SOCKET_ACCEPT
Definition: waiteventset.h:51
#define WL_SOCKET_CLOSED
Definition: waiteventset.h:46
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34
#define WL_SOCKET_CONNECTED
Definition: waiteventset.h:44
#define WL_POSTMASTER_DEATH
Definition: waiteventset.h:38
#define WL_SOCKET_WRITEABLE
Definition: waiteventset.h:36
#define WL_SOCKET_MASK
Definition: waiteventset.h:53
#define EINTR
Definition: win32_port.h:364
#define EWOULDBLOCK
Definition: win32_port.h:370
#define kill(pid, sig)
Definition: win32_port.h:493
#define EAGAIN
Definition: win32_port.h:362