PostgreSQL Source Code git master
Loading...
Searching...
No Matches
waiteventset.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * waiteventset.c
4 * ppoll()/pselect() like abstraction
5 *
6 * WaitEvents are an abstraction for waiting for one or more events at a time.
7 * The waiting can be done in a race free fashion, similar ppoll() or
8 * pselect() (as opposed to plain poll()/select()).
9 *
10 * You can wait for:
11 * - a latch being set from another process or from signal handler in the same
12 * process (WL_LATCH_SET)
13 * - data to become readable or writeable on a socket (WL_SOCKET_*)
14 * - postmaster death (WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH)
15 * - timeout (WL_TIMEOUT)
16 *
17 * Implementation
18 * --------------
19 *
20 * The poll() implementation uses the so-called self-pipe trick to overcome the
21 * race condition involved with poll() and setting a global flag in the signal
22 * handler. When a latch is set and the current process is waiting for it, the
23 * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
24 * A signal by itself doesn't interrupt poll() on all platforms, and even on
25 * platforms where it does, a signal that arrives just before the poll() call
26 * does not prevent poll() from entering sleep. An incoming byte on a pipe
27 * however reliably interrupts the sleep, and causes poll() to return
28 * immediately even if the signal arrives before poll() begins.
29 *
30 * The epoll() implementation overcomes the race with a different technique: it
31 * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
32 * don't need to register a signal handler or create our own self-pipe. We
33 * assume that any system that has Linux epoll() also has Linux signalfd().
34 *
35 * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
36 *
37 * The Windows implementation uses Windows events that are inherited by all
38 * postmaster child processes. There's no need for the self-pipe trick there.
39 *
40 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
41 * Portions Copyright (c) 1994, Regents of the University of California
42 *
43 * IDENTIFICATION
44 * src/backend/storage/ipc/waiteventset.c
45 *
46 *-------------------------------------------------------------------------
47 */
48#include "postgres.h"
49
50#include <fcntl.h>
51#include <limits.h>
52#include <signal.h>
53#include <unistd.h>
54#ifdef HAVE_SYS_EPOLL_H
55#include <sys/epoll.h>
56#endif
57#ifdef HAVE_SYS_EVENT_H
58#include <sys/event.h>
59#endif
60#ifdef HAVE_SYS_SIGNALFD_H
61#include <sys/signalfd.h>
62#endif
63#ifdef HAVE_POLL_H
64#include <poll.h>
65#endif
66
67#include "libpq/pqsignal.h"
68#include "miscadmin.h"
69#include "pgstat.h"
70#include "port/atomics.h"
73#include "storage/fd.h"
74#include "storage/ipc.h"
75#include "storage/pmsignal.h"
76#include "storage/latch.h"
78#include "utils/memutils.h"
79#include "utils/resowner.h"
80#include "utils/wait_event.h"
81
82/*
83 * Select the fd readiness primitive to use. Normally the "most modern"
84 * primitive supported by the OS will be used, but for testing it can be
85 * useful to manually specify the used primitive. If desired, just add a
86 * define somewhere before this block.
87 */
88#if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
89 defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
90/* don't overwrite manual choice */
91#elif defined(HAVE_SYS_EPOLL_H)
92#define WAIT_USE_EPOLL
93#elif defined(HAVE_KQUEUE)
94#define WAIT_USE_KQUEUE
95#elif defined(HAVE_POLL)
96#define WAIT_USE_POLL
97#elif WIN32
98#define WAIT_USE_WIN32
99#else
100#error "no wait set implementation available"
101#endif
102
103/*
104 * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
105 * available. For testing the choice can also be manually specified.
106 */
107#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
108#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
109/* don't overwrite manual choice */
110#elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
111#define WAIT_USE_SIGNALFD
112#else
113#define WAIT_USE_SELF_PIPE
114#endif
115#endif
116
117/* typedef in waiteventset.h */
119{
121
122 int nevents; /* number of registered events */
123 int nevents_space; /* maximum number of events in this set */
124
125 /*
126 * Array, of nevents_space length, storing the definition of events this
127 * set is waiting for.
128 */
130
131 /*
132 * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
133 * said latch, and latch_pos the offset in the ->events array. This is
134 * useful because we check the state of the latch before performing doing
135 * syscalls related to waiting.
136 */
139
140 /*
141 * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
142 * is set so that we'll exit immediately if postmaster death is detected,
143 * instead of returning.
144 */
146
147#if defined(WAIT_USE_EPOLL)
148 int epoll_fd;
149 /* epoll_wait returns events in a user provided arrays, allocate once */
151#elif defined(WAIT_USE_KQUEUE)
152 int kqueue_fd;
153 /* kevent returns events in a user provided arrays, allocate once */
156#elif defined(WAIT_USE_POLL)
157 /* poll expects events to be waited on every poll() call, prepare once */
159#elif defined(WAIT_USE_WIN32)
160
161 /*
162 * Array of windows events. The first element always contains
163 * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
164 * event->pos + 1).
165 */
167#endif
168};
169
170#ifndef WIN32
171/* Are we currently in WaitLatch? The signal handler would like to know. */
172static volatile sig_atomic_t waiting = false;
173#endif
174
175#ifdef WAIT_USE_SIGNALFD
176/* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
177static int signal_fd = -1;
178#endif
179
180#ifdef WAIT_USE_SELF_PIPE
181/* Read and write ends of the self-pipe */
182static int selfpipe_readfd = -1;
183static int selfpipe_writefd = -1;
184
185/* Process owning the self-pipe --- needed for checking purposes */
186static int selfpipe_owner_pid = 0;
187
188/* Private function prototypes */
190static void sendSelfPipeByte(void);
191#endif
192
193#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
194static void drain(void);
195#endif
196
197#if defined(WAIT_USE_EPOLL)
198static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
199#elif defined(WAIT_USE_KQUEUE)
200static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
201#elif defined(WAIT_USE_POLL)
202static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
203#elif defined(WAIT_USE_WIN32)
204static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
205#endif
206
207static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
208 WaitEvent *occurred_events, int nevents);
209
210/* ResourceOwner support to hold WaitEventSets */
211static void ResOwnerReleaseWaitEventSet(Datum res);
212
214{
215 .name = "WaitEventSet",
216 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
217 .release_priority = RELEASE_PRIO_WAITEVENTSETS,
218 .ReleaseResource = ResOwnerReleaseWaitEventSet,
219 .DebugPrint = NULL
220};
221
222/* Convenience wrappers over ResourceOwnerRemember/Forget */
223static inline void
228static inline void
233
234
235/*
236 * Initialize the process-local wait event infrastructure.
237 *
238 * This must be called once during startup of any process that can wait on
239 * latches, before it issues any InitLatch() or OwnLatch() calls.
240 */
241void
243{
244#if defined(WAIT_USE_SELF_PIPE)
245 int pipefd[2];
246
248 {
249 /*
250 * We might have inherited connections to a self-pipe created by the
251 * postmaster. It's critical that child processes create their own
252 * self-pipes, of course, and we really want them to close the
253 * inherited FDs for safety's sake.
254 */
255 if (selfpipe_owner_pid != 0)
256 {
257 /* Assert we go through here but once in a child process */
259 /* Release postmaster's pipe FDs; ignore any error */
262 /* Clean up, just for safety's sake; we'll set these below */
265 /* Keep fd.c's accounting straight */
268 }
269 else
270 {
271 /*
272 * Postmaster didn't create a self-pipe ... or else we're in an
273 * EXEC_BACKEND build, in which case it doesn't matter since the
274 * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
275 * fd.c won't have state to clean up, either.
276 */
277 Assert(selfpipe_readfd == -1);
278 }
279 }
280 else
281 {
282 /* In postmaster or standalone backend, assert we do this but once */
283 Assert(selfpipe_readfd == -1);
285 }
286
287 /*
288 * Set up the self-pipe that allows a signal handler to wake up the
289 * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
290 * that SetLatch won't block if the event has already been set many times
291 * filling the kernel buffer. Make the read-end non-blocking too, so that
292 * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
293 * Also, make both FDs close-on-exec, since we surely do not want any
294 * child processes messing with them.
295 */
296 if (pipe(pipefd) < 0)
297 elog(FATAL, "pipe() failed: %m");
298 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
299 elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
300 if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
301 elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
302 if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
303 elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
304 if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
305 elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
306
310
311 /* Tell fd.c about these two long-lived FDs */
314
316#endif
317
318#ifdef WAIT_USE_SIGNALFD
320
322 {
323 /*
324 * It would probably be safe to re-use the inherited signalfd since
325 * signalfds only see the current process's pending signals, but it
326 * seems less surprising to close it and create our own.
327 */
328 if (signal_fd != -1)
329 {
330 /* Release postmaster's signal FD; ignore any error */
332 signal_fd = -1;
334 }
335 }
336
337 /* Block SIGURG, because we'll receive it through a signalfd. */
339
340 /* Set up the signalfd to receive SIGURG notifications. */
344 if (signal_fd < 0)
345 elog(FATAL, "signalfd() failed");
347#endif
348
349#ifdef WAIT_USE_KQUEUE
350 /* Ignore SIGURG, because we'll receive it via kqueue. */
352#endif
353}
354
355/*
356 * Create a WaitEventSet with space for nevents different events to wait for.
357 *
358 * These events can then be efficiently waited upon together, using
359 * WaitEventSetWait().
360 *
361 * The WaitEventSet is tracked by the given 'resowner'. Use NULL for session
362 * lifetime.
363 */
365CreateWaitEventSet(ResourceOwner resowner, int nevents)
366{
367 WaitEventSet *set;
368 char *data;
369 Size sz = 0;
370
371 /*
372 * Use MAXALIGN size/alignment to guarantee that later uses of memory are
373 * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
374 * platforms, but earlier allocations like WaitEventSet and WaitEvent
375 * might not be sized to guarantee that when purely using sizeof().
376 */
377 sz += MAXALIGN(sizeof(WaitEventSet));
378 sz += MAXALIGN(sizeof(WaitEvent) * nevents);
379
380#if defined(WAIT_USE_EPOLL)
381 sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
382#elif defined(WAIT_USE_KQUEUE)
383 sz += MAXALIGN(sizeof(struct kevent) * nevents);
384#elif defined(WAIT_USE_POLL)
385 sz += MAXALIGN(sizeof(struct pollfd) * nevents);
386#elif defined(WAIT_USE_WIN32)
387 /* need space for the pgwin32_signal_event */
388 sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
389#endif
390
391 if (resowner != NULL)
392 ResourceOwnerEnlarge(resowner);
393
395
396 set = (WaitEventSet *) data;
397 data += MAXALIGN(sizeof(WaitEventSet));
398
399 set->events = (WaitEvent *) data;
400 data += MAXALIGN(sizeof(WaitEvent) * nevents);
401
402#if defined(WAIT_USE_EPOLL)
403 set->epoll_ret_events = (struct epoll_event *) data;
404 data += MAXALIGN(sizeof(struct epoll_event) * nevents);
405#elif defined(WAIT_USE_KQUEUE)
406 set->kqueue_ret_events = (struct kevent *) data;
407 data += MAXALIGN(sizeof(struct kevent) * nevents);
408#elif defined(WAIT_USE_POLL)
409 set->pollfds = (struct pollfd *) data;
410 data += MAXALIGN(sizeof(struct pollfd) * nevents);
411#elif defined(WAIT_USE_WIN32)
412 set->handles = (HANDLE) data;
413 data += MAXALIGN(sizeof(HANDLE) * nevents);
414#endif
415
416 set->latch = NULL;
417 set->nevents_space = nevents;
418 set->exit_on_postmaster_death = false;
419
420 if (resowner != NULL)
421 {
423 set->owner = resowner;
424 }
425
426#if defined(WAIT_USE_EPOLL)
427 if (!AcquireExternalFD())
428 elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
429 set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
430 if (set->epoll_fd < 0)
431 {
433 elog(ERROR, "epoll_create1 failed: %m");
434 }
435#elif defined(WAIT_USE_KQUEUE)
436 if (!AcquireExternalFD())
437 elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
438 set->kqueue_fd = kqueue();
439 if (set->kqueue_fd < 0)
440 {
442 elog(ERROR, "kqueue failed: %m");
443 }
444 if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
445 {
446 int save_errno = errno;
447
448 close(set->kqueue_fd);
451 elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
452 }
453 set->report_postmaster_not_running = false;
454#elif defined(WAIT_USE_WIN32)
455
456 /*
457 * To handle signals while waiting, we need to add a win32 specific event.
458 * We accounted for the additional event at the top of this routine. See
459 * port/win32/signal.c for more details.
460 *
461 * Note: pgwin32_signal_event should be first to ensure that it will be
462 * reported when multiple events are set. We want to guarantee that
463 * pending signals are serviced.
464 */
465 set->handles[0] = pgwin32_signal_event;
466#endif
467
468 return set;
469}
470
471/*
472 * Free a previously created WaitEventSet.
473 *
474 * Note: preferably, this shouldn't have to free any resources that could be
475 * inherited across an exec(). If it did, we'd likely leak those resources in
476 * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
477 * when the FD is created. For the Windows case, we assume that the handles
478 * involved are non-inheritable.
479 */
480void
482{
483 if (set->owner)
484 {
486 set->owner = NULL;
487 }
488
489#if defined(WAIT_USE_EPOLL)
490 close(set->epoll_fd);
492#elif defined(WAIT_USE_KQUEUE)
493 close(set->kqueue_fd);
495#elif defined(WAIT_USE_WIN32)
496 for (WaitEvent *cur_event = set->events;
497 cur_event < (set->events + set->nevents);
498 cur_event++)
499 {
500 if (cur_event->events & WL_LATCH_SET)
501 {
502 /* uses the latch's HANDLE */
503 }
504 else if (cur_event->events & WL_POSTMASTER_DEATH)
505 {
506 /* uses PostmasterHandle */
507 }
508 else
509 {
510 /* Clean up the event object we created for the socket */
512 WSACloseEvent(set->handles[cur_event->pos + 1]);
513 }
514 }
515#endif
516
517 pfree(set);
518}
519
520/*
521 * Free a previously created WaitEventSet in a child process after a fork().
522 */
523void
525{
526#if defined(WAIT_USE_EPOLL)
527 close(set->epoll_fd);
529#elif defined(WAIT_USE_KQUEUE)
530 /* kqueues are not normally inherited by child processes */
532#endif
533
534 pfree(set);
535}
536
537/* ---
538 * Add an event to the set. Possible events are:
539 * - WL_LATCH_SET: Wait for the latch to be set
540 * - WL_POSTMASTER_DEATH: Wait for postmaster to die
541 * - WL_SOCKET_READABLE: Wait for socket to become readable,
542 * can be combined in one event with other WL_SOCKET_* events
543 * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
544 * can be combined with other WL_SOCKET_* events
545 * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
546 * can be combined with other WL_SOCKET_* events (on non-Windows
547 * platforms, this is the same as WL_SOCKET_WRITEABLE)
548 * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
549 * can be combined with other WL_SOCKET_* events (on non-Windows
550 * platforms, this is the same as WL_SOCKET_READABLE)
551 * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
552 * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
553 *
554 * Returns the offset in WaitEventSet->events (starting from 0), which can be
555 * used to modify previously added wait events using ModifyWaitEvent().
556 *
557 * In the WL_LATCH_SET case the latch must be owned by the current process,
558 * i.e. it must be a process-local latch initialized with InitLatch, or a
559 * shared latch associated with the current process by calling OwnLatch.
560 *
561 * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
562 * conditions cause the socket to be reported as readable/writable/connected,
563 * so that the caller can deal with the condition.
564 *
565 * The user_data pointer specified here will be set for the events returned
566 * by WaitEventSetWait(), allowing to easily associate additional data with
567 * events.
568 */
569int
571 void *user_data)
572{
573 WaitEvent *event;
574
575 /* not enough space */
576 Assert(set->nevents < set->nevents_space);
577
578 if (events == WL_EXIT_ON_PM_DEATH)
579 {
580 events = WL_POSTMASTER_DEATH;
581 set->exit_on_postmaster_death = true;
582 }
583
584 if (latch)
585 {
586 if (latch->owner_pid != MyProcPid)
587 elog(ERROR, "cannot wait on a latch owned by another process");
588 if (set->latch)
589 elog(ERROR, "cannot wait on more than one latch");
590 if ((events & WL_LATCH_SET) != WL_LATCH_SET)
591 elog(ERROR, "latch events only support being set");
592 }
593 else
594 {
595 if (events & WL_LATCH_SET)
596 elog(ERROR, "cannot wait on latch without a specified latch");
597 }
598
599 /* waiting for socket readiness without a socket indicates a bug */
600 if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
601 elog(ERROR, "cannot wait on socket event without a socket");
602
603 event = &set->events[set->nevents];
604 event->pos = set->nevents++;
605 event->fd = fd;
606 event->events = events;
607 event->user_data = user_data;
608#ifdef WIN32
609 event->reset = false;
610#endif
611
612 if (events == WL_LATCH_SET)
613 {
614 set->latch = latch;
615 set->latch_pos = event->pos;
616#if defined(WAIT_USE_SELF_PIPE)
617 event->fd = selfpipe_readfd;
618#elif defined(WAIT_USE_SIGNALFD)
619 event->fd = signal_fd;
620#else
621 event->fd = PGINVALID_SOCKET;
622#ifdef WAIT_USE_EPOLL
623 return event->pos;
624#endif
625#endif
626 }
627 else if (events == WL_POSTMASTER_DEATH)
628 {
629#ifndef WIN32
631#endif
632 }
633
634 /* perform wait primitive specific initialization, if needed */
635#if defined(WAIT_USE_EPOLL)
637#elif defined(WAIT_USE_KQUEUE)
638 WaitEventAdjustKqueue(set, event, 0);
639#elif defined(WAIT_USE_POLL)
640 WaitEventAdjustPoll(set, event);
641#elif defined(WAIT_USE_WIN32)
642 WaitEventAdjustWin32(set, event);
643#endif
644
645 return event->pos;
646}
647
648/*
649 * Change the event mask and, in the WL_LATCH_SET case, the latch associated
650 * with the WaitEvent. The latch may be changed to NULL to disable the latch
651 * temporarily, and then set back to a latch later.
652 *
653 * 'pos' is the id returned by AddWaitEventToSet.
654 */
655void
656ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
657{
658 WaitEvent *event;
659#if defined(WAIT_USE_KQUEUE)
660 int old_events;
661#endif
662
663 Assert(pos < set->nevents);
664
665 event = &set->events[pos];
666#if defined(WAIT_USE_KQUEUE)
667 old_events = event->events;
668#endif
669
670 /*
671 * Allow switching between WL_POSTMASTER_DEATH and WL_EXIT_ON_PM_DEATH.
672 *
673 * Note that because WL_EXIT_ON_PM_DEATH is mapped to WL_POSTMASTER_DEATH
674 * in AddWaitEventToSet(), this needs to be checked before the fast-path
675 * below that checks if 'events' has changed.
676 */
677 if (event->events == WL_POSTMASTER_DEATH)
678 {
679 if (events != WL_POSTMASTER_DEATH && events != WL_EXIT_ON_PM_DEATH)
680 elog(ERROR, "cannot remove postmaster death event");
681 set->exit_on_postmaster_death = ((events & WL_EXIT_ON_PM_DEATH) != 0);
682 return;
683 }
684
685 /*
686 * If neither the event mask nor the associated latch changes, return
687 * early. That's an important optimization for some sockets, where
688 * ModifyWaitEvent is frequently used to switch from waiting for reads to
689 * waiting on writes.
690 */
691 if (events == event->events &&
692 (!(event->events & WL_LATCH_SET) || set->latch == latch))
693 return;
694
695 if (event->events & WL_LATCH_SET && events != event->events)
696 elog(ERROR, "cannot modify latch event");
697
698 /* FIXME: validate event mask */
699 event->events = events;
700
701 if (events == WL_LATCH_SET)
702 {
703 if (latch && latch->owner_pid != MyProcPid)
704 elog(ERROR, "cannot wait on a latch owned by another process");
705 set->latch = latch;
706
707 /*
708 * On Unix, we don't need to modify the kernel object because the
709 * underlying pipe (if there is one) is the same for all latches so we
710 * can return immediately. On Windows, we need to update our array of
711 * handles, but we leave the old one in place and tolerate spurious
712 * wakeups if the latch is disabled.
713 */
714#if defined(WAIT_USE_WIN32)
715 if (!latch)
716 return;
717#else
718 return;
719#endif
720 }
721
722#if defined(WAIT_USE_EPOLL)
724#elif defined(WAIT_USE_KQUEUE)
726#elif defined(WAIT_USE_POLL)
727 WaitEventAdjustPoll(set, event);
728#elif defined(WAIT_USE_WIN32)
729 WaitEventAdjustWin32(set, event);
730#endif
731}
732
733#if defined(WAIT_USE_EPOLL)
734/*
735 * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
736 */
737static void
738WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
739{
740 struct epoll_event epoll_ev;
741 int rc;
742
743 /* pointer to our event, returned by epoll_wait */
744 epoll_ev.data.ptr = event;
745 /* always wait for errors */
747
748 /* prepare pollfd entry once */
749 if (event->events == WL_LATCH_SET)
750 {
751 Assert(set->latch != NULL);
752 epoll_ev.events |= EPOLLIN;
753 }
754 else if (event->events == WL_POSTMASTER_DEATH)
755 {
756 epoll_ev.events |= EPOLLIN;
757 }
758 else
759 {
760 Assert(event->fd != PGINVALID_SOCKET);
764
765 if (event->events & WL_SOCKET_READABLE)
766 epoll_ev.events |= EPOLLIN;
767 if (event->events & WL_SOCKET_WRITEABLE)
768 epoll_ev.events |= EPOLLOUT;
769 if (event->events & WL_SOCKET_CLOSED)
770 epoll_ev.events |= EPOLLRDHUP;
771 }
772
773 /*
774 * Even though unused, we also pass epoll_ev as the data argument if
775 * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
776 * requiring that, and actually it makes the code simpler...
777 */
778 rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
779
780 if (rc < 0)
783 errmsg("%s() failed: %m",
784 "epoll_ctl")));
785}
786#endif
787
788#if defined(WAIT_USE_POLL)
789static void
791{
792 struct pollfd *pollfd = &set->pollfds[event->pos];
793
794 pollfd->revents = 0;
795 pollfd->fd = event->fd;
796
797 /* prepare pollfd entry once */
798 if (event->events == WL_LATCH_SET)
799 {
800 Assert(set->latch != NULL);
801 pollfd->events = POLLIN;
802 }
803 else if (event->events == WL_POSTMASTER_DEATH)
804 {
805 pollfd->events = POLLIN;
806 }
807 else
808 {
812 pollfd->events = 0;
813 if (event->events & WL_SOCKET_READABLE)
814 pollfd->events |= POLLIN;
815 if (event->events & WL_SOCKET_WRITEABLE)
816 pollfd->events |= POLLOUT;
817#ifdef POLLRDHUP
818 if (event->events & WL_SOCKET_CLOSED)
819 pollfd->events |= POLLRDHUP;
820#endif
821 }
822
823 Assert(event->fd != PGINVALID_SOCKET);
824}
825#endif
826
827#if defined(WAIT_USE_KQUEUE)
828
829/*
830 * On most BSD family systems, the udata member of struct kevent is of type
831 * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
832 * NetBSD has it as intptr_t, so here we wallpaper over that difference with
833 * an lvalue cast.
834 */
835#define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
836
837static inline void
838WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
839 WaitEvent *event)
840{
841 k_ev->ident = event->fd;
842 k_ev->filter = filter;
843 k_ev->flags = action;
844 k_ev->fflags = 0;
845 k_ev->data = 0;
846 AccessWaitEvent(k_ev) = event;
847}
848
849static inline void
851{
852 /* For now postmaster death can only be added, not removed. */
853 k_ev->ident = PostmasterPid;
854 k_ev->filter = EVFILT_PROC;
855 k_ev->flags = EV_ADD;
856 k_ev->fflags = NOTE_EXIT;
857 k_ev->data = 0;
858 AccessWaitEvent(k_ev) = event;
859}
860
861static inline void
863{
864 /* For now latch can only be added, not removed. */
865 k_ev->ident = SIGURG;
866 k_ev->filter = EVFILT_SIGNAL;
867 k_ev->flags = EV_ADD;
868 k_ev->fflags = 0;
869 k_ev->data = 0;
870 AccessWaitEvent(k_ev) = event;
871}
872
873/*
874 * old_events is the previous event mask, used to compute what has changed.
875 */
876static void
878{
879 int rc;
880 struct kevent k_ev[2];
881 int count = 0;
882 bool new_filt_read = false;
883 bool old_filt_read = false;
884 bool new_filt_write = false;
885 bool old_filt_write = false;
886
887 if (old_events == event->events)
888 return;
889
890 Assert(event->events != WL_LATCH_SET || set->latch != NULL);
891 Assert(event->events == WL_LATCH_SET ||
892 event->events == WL_POSTMASTER_DEATH ||
893 (event->events & (WL_SOCKET_READABLE |
896
897 if (event->events == WL_POSTMASTER_DEATH)
898 {
899 /*
900 * Unlike all the other implementations, we detect postmaster death
901 * using process notification instead of waiting on the postmaster
902 * alive pipe.
903 */
905 }
906 else if (event->events == WL_LATCH_SET)
907 {
908 /* We detect latch wakeup using a signal event. */
909 WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
910 }
911 else
912 {
913 /*
914 * We need to compute the adds and deletes required to get from the
915 * old event mask to the new event mask, since kevent treats readable
916 * and writable as separate events.
917 */
919 old_filt_read = true;
921 new_filt_read = true;
923 old_filt_write = true;
924 if (event->events & WL_SOCKET_WRITEABLE)
925 new_filt_write = true;
928 event);
929 else if (!old_filt_read && new_filt_read)
931 event);
934 event);
935 else if (!old_filt_write && new_filt_write)
937 event);
938 }
939
940 /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
941 if (count == 0)
942 return;
943
944 Assert(count <= 2);
945
946 rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
947
948 /*
949 * When adding the postmaster's pid, we have to consider that it might
950 * already have exited and perhaps even been replaced by another process
951 * with the same pid. If so, we have to defer reporting this as an event
952 * until the next call to WaitEventSetWaitBlock().
953 */
954
955 if (rc < 0)
956 {
957 if (event->events == WL_POSTMASTER_DEATH &&
958 (errno == ESRCH || errno == EACCES))
959 set->report_postmaster_not_running = true;
960 else
963 errmsg("%s() failed: %m",
964 "kevent")));
965 }
966 else if (event->events == WL_POSTMASTER_DEATH &&
967 PostmasterPid != getppid() &&
969 {
970 /*
971 * The extra PostmasterIsAliveInternal() check prevents false alarms
972 * on systems that give a different value for getppid() while being
973 * traced by a debugger.
974 */
975 set->report_postmaster_not_running = true;
976 }
977}
978
979#endif
980
981#if defined(WAIT_USE_WIN32)
983
984static void
986{
987 HANDLE *handle = &set->handles[event->pos + 1];
988
989 if (event->events == WL_LATCH_SET)
990 {
991 Assert(set->latch != NULL);
992 *handle = set->latch->event;
993 }
994 else if (event->events == WL_POSTMASTER_DEATH)
995 {
996 *handle = PostmasterHandle;
997 }
998 else
999 {
1000 int flags = FD_CLOSE; /* always check for errors/EOF */
1001
1002 if (event->events & WL_SOCKET_READABLE)
1003 flags |= FD_READ;
1004 if (event->events & WL_SOCKET_WRITEABLE)
1005 flags |= FD_WRITE;
1006 if (event->events & WL_SOCKET_CONNECTED)
1007 flags |= FD_CONNECT;
1008 if (event->events & WL_SOCKET_ACCEPT)
1009 flags |= FD_ACCEPT;
1010
1011 if (*handle == WSA_INVALID_EVENT)
1012 {
1013 *handle = WSACreateEvent();
1014 if (*handle == WSA_INVALID_EVENT)
1015 elog(ERROR, "failed to create event for socket: error code %d",
1016 WSAGetLastError());
1017 }
1018 if (WSAEventSelect(event->fd, *handle, flags) != 0)
1019 elog(ERROR, "failed to set up event for socket: error code %d",
1020 WSAGetLastError());
1021
1022 Assert(event->fd != PGINVALID_SOCKET);
1023 }
1024}
1025#endif
1026
1027/*
1028 * Wait for events added to the set to happen, or until the timeout is
1029 * reached. At most nevents occurred events are returned.
1030 *
1031 * If timeout = -1, block until an event occurs; if 0, check sockets for
1032 * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1033 *
1034 * Returns the number of events occurred, or 0 if the timeout was reached.
1035 *
1036 * Returned events will have the fd, pos, user_data fields set to the
1037 * values associated with the registered event.
1038 */
1039int
1041 WaitEvent *occurred_events, int nevents,
1042 uint32 wait_event_info)
1043{
1044 int returned_events = 0;
1047 long cur_timeout = -1;
1048
1049 Assert(nevents > 0);
1050
1051 /*
1052 * Initialize timeout if requested. We must record the current time so
1053 * that we can determine the remaining timeout if interrupted.
1054 */
1055 if (timeout >= 0)
1056 {
1058 Assert(timeout >= 0 && timeout <= INT_MAX);
1060 }
1061 else
1063
1064 pgstat_report_wait_start(wait_event_info);
1065
1066#ifndef WIN32
1067 waiting = true;
1068#else
1069 /* Ensure that signals are serviced even if latch is already set */
1071#endif
1072 while (returned_events == 0)
1073 {
1074 int rc;
1075
1076 /*
1077 * Check if the latch is set already first. If so, we either exit
1078 * immediately or ask the kernel for further events available right
1079 * now without waiting, depending on how many events the caller wants.
1080 *
1081 * If someone sets the latch between this and the
1082 * WaitEventSetWaitBlock() below, the setter will write a byte to the
1083 * pipe (or signal us and the signal handler will do that), and the
1084 * readiness routine will return immediately.
1085 *
1086 * On unix, If there's a pending byte in the self pipe, we'll notice
1087 * whenever blocking. Only clearing the pipe in that case avoids
1088 * having to drain it every time WaitLatchOrSocket() is used. Should
1089 * the pipe-buffer fill up we're still ok, because the pipe is in
1090 * nonblocking mode. It's unlikely for that to happen, because the
1091 * self pipe isn't filled unless we're blocking (waiting = true), or
1092 * from inside a signal handler in latch_sigurg_handler().
1093 *
1094 * On windows, we'll also notice if there's a pending event for the
1095 * latch when blocking, but there's no danger of anything filling up,
1096 * as "Setting an event that is already set has no effect.".
1097 *
1098 * Note: we assume that the kernel calls involved in latch management
1099 * will provide adequate synchronization on machines with weak memory
1100 * ordering, so that we cannot miss seeing is_set if a notification
1101 * has already been queued.
1102 */
1103 if (set->latch && !set->latch->is_set)
1104 {
1105 /* about to sleep on a latch */
1106 set->latch->maybe_sleeping = true;
1108 /* and recheck */
1109 }
1110
1111 if (set->latch && set->latch->is_set)
1112 {
1114 occurred_events->pos = set->latch_pos;
1115 occurred_events->user_data =
1116 set->events[set->latch_pos].user_data;
1117 occurred_events->events = WL_LATCH_SET;
1120
1121 /* could have been set above */
1122 set->latch->maybe_sleeping = false;
1123
1124 if (returned_events == nevents)
1125 break; /* output buffer full already */
1126
1127 /*
1128 * Even though we already have an event, we'll poll just once with
1129 * zero timeout to see what non-latch events we can fit into the
1130 * output buffer at the same time.
1131 */
1132 cur_timeout = 0;
1133 timeout = 0;
1134 }
1135
1136 /*
1137 * Wait for events using the readiness primitive chosen at the top of
1138 * this file. If -1 is returned, a timeout has occurred, if 0 we have
1139 * to retry, everything >= 1 is the number of returned events.
1140 */
1142 occurred_events, nevents - returned_events);
1143
1144 if (set->latch &&
1145 set->latch->maybe_sleeping)
1146 set->latch->maybe_sleeping = false;
1147
1148 if (rc == -1)
1149 break; /* timeout occurred */
1150 else
1151 returned_events += rc;
1152
1153 /* If we're not done, update cur_timeout for next iteration */
1154 if (returned_events == 0 && timeout >= 0)
1155 {
1159 if (cur_timeout <= 0)
1160 break;
1161 }
1162 }
1163#ifndef WIN32
1164 waiting = false;
1165#endif
1166
1168
1169 return returned_events;
1170}
1171
1172
1173#if defined(WAIT_USE_EPOLL)
1174
1175/*
1176 * Wait using linux's epoll_wait(2).
1177 *
1178 * This is the preferable wait method, as several readiness notifications are
1179 * delivered, without having to iterate through all of set->events. The return
1180 * epoll_event struct contain a pointer to our events, making association
1181 * easy.
1182 */
1183static inline int
1185 WaitEvent *occurred_events, int nevents)
1186{
1187 int returned_events = 0;
1188 int rc;
1191
1192 /* Sleep */
1193 rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1194 Min(nevents, set->nevents_space), cur_timeout);
1195
1196 /* Check return code */
1197 if (rc < 0)
1198 {
1199 /* EINTR is okay, otherwise complain */
1200 if (errno != EINTR)
1201 {
1202 waiting = false;
1203 ereport(ERROR,
1205 errmsg("%s() failed: %m",
1206 "epoll_wait")));
1207 }
1208 return 0;
1209 }
1210 else if (rc == 0)
1211 {
1212 /* timeout exceeded */
1213 return -1;
1214 }
1215
1216 /*
1217 * At least one event occurred, iterate over the returned epoll events
1218 * until they're either all processed, or we've returned all the events
1219 * the caller desired.
1220 */
1221 for (cur_epoll_event = set->epoll_ret_events;
1222 cur_epoll_event < (set->epoll_ret_events + rc) &&
1223 returned_events < nevents;
1225 {
1226 /* epoll's data pointer is set to the associated WaitEvent */
1227 cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1228
1229 occurred_events->pos = cur_event->pos;
1230 occurred_events->user_data = cur_event->user_data;
1231 occurred_events->events = 0;
1232
1233 if (cur_event->events == WL_LATCH_SET &&
1234 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1235 {
1236 /* Drain the signalfd. */
1237 drain();
1238
1239 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1240 {
1242 occurred_events->events = WL_LATCH_SET;
1245 }
1246 }
1247 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1248 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1249 {
1250 /*
1251 * We expect an EPOLLHUP when the remote end is closed, but
1252 * because we don't expect the pipe to become readable or to have
1253 * any errors either, treat those cases as postmaster death, too.
1254 *
1255 * Be paranoid about a spurious event signaling the postmaster as
1256 * being dead. There have been reports about that happening with
1257 * older primitives (select(2) to be specific), and a spurious
1258 * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1259 * cost much.
1260 */
1262 {
1263 if (set->exit_on_postmaster_death)
1264 proc_exit(1);
1269 }
1270 }
1271 else if (cur_event->events & (WL_SOCKET_READABLE |
1274 {
1276
1277 if ((cur_event->events & WL_SOCKET_READABLE) &&
1278 (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1279 {
1280 /* data available in socket, or EOF */
1282 }
1283
1284 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1285 (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1286 {
1287 /* writable, or EOF */
1289 }
1290
1291 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1292 (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
1293 {
1294 /* remote peer shut down, or error */
1296 }
1297
1298 if (occurred_events->events != 0)
1299 {
1300 occurred_events->fd = cur_event->fd;
1303 }
1304 }
1305 }
1306
1307 return returned_events;
1308}
1309
1310#elif defined(WAIT_USE_KQUEUE)
1311
1312/*
1313 * Wait using kevent(2) on BSD-family systems and macOS.
1314 *
1315 * For now this mirrors the epoll code, but in future it could modify the fd
1316 * set in the same call to kevent as it uses for waiting instead of doing that
1317 * with separate system calls.
1318 */
1319static int
1321 WaitEvent *occurred_events, int nevents)
1322{
1323 int returned_events = 0;
1324 int rc;
1326 struct kevent *cur_kqueue_event;
1327 struct timespec timeout;
1328 struct timespec *timeout_p;
1329
1330 if (cur_timeout < 0)
1331 timeout_p = NULL;
1332 else
1333 {
1334 timeout.tv_sec = cur_timeout / 1000;
1335 timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1336 timeout_p = &timeout;
1337 }
1338
1339 /*
1340 * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1341 * earlier call to WaitEventSetWait().
1342 */
1343 if (unlikely(set->report_postmaster_not_running))
1344 {
1345 if (set->exit_on_postmaster_death)
1346 proc_exit(1);
1349 return 1;
1350 }
1351
1352 /* Sleep */
1353 rc = kevent(set->kqueue_fd, NULL, 0,
1354 set->kqueue_ret_events,
1355 Min(nevents, set->nevents_space),
1356 timeout_p);
1357
1358 /* Check return code */
1359 if (rc < 0)
1360 {
1361 /* EINTR is okay, otherwise complain */
1362 if (errno != EINTR)
1363 {
1364 waiting = false;
1365 ereport(ERROR,
1367 errmsg("%s() failed: %m",
1368 "kevent")));
1369 }
1370 return 0;
1371 }
1372 else if (rc == 0)
1373 {
1374 /* timeout exceeded */
1375 return -1;
1376 }
1377
1378 /*
1379 * At least one event occurred, iterate over the returned kqueue events
1380 * until they're either all processed, or we've returned all the events
1381 * the caller desired.
1382 */
1383 for (cur_kqueue_event = set->kqueue_ret_events;
1384 cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1385 returned_events < nevents;
1387 {
1388 /* kevent's udata points to the associated WaitEvent */
1390
1391 occurred_events->pos = cur_event->pos;
1392 occurred_events->user_data = cur_event->user_data;
1393 occurred_events->events = 0;
1394
1395 if (cur_event->events == WL_LATCH_SET &&
1397 {
1398 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1399 {
1401 occurred_events->events = WL_LATCH_SET;
1404 }
1405 }
1406 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1407 cur_kqueue_event->filter == EVFILT_PROC &&
1408 (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1409 {
1410 /*
1411 * The kernel will tell this kqueue object only once about the
1412 * exit of the postmaster, so let's remember that for next time so
1413 * that we provide level-triggered semantics.
1414 */
1415 set->report_postmaster_not_running = true;
1416
1417 if (set->exit_on_postmaster_death)
1418 proc_exit(1);
1423 }
1424 else if (cur_event->events & (WL_SOCKET_READABLE |
1427 {
1428 Assert(cur_event->fd >= 0);
1429
1430 if ((cur_event->events & WL_SOCKET_READABLE) &&
1431 (cur_kqueue_event->filter == EVFILT_READ))
1432 {
1433 /* readable, or EOF */
1435 }
1436
1437 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1438 (cur_kqueue_event->filter == EVFILT_READ) &&
1439 (cur_kqueue_event->flags & EV_EOF))
1440 {
1441 /* the remote peer has shut down */
1443 }
1444
1445 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1446 (cur_kqueue_event->filter == EVFILT_WRITE))
1447 {
1448 /* writable, or EOF */
1450 }
1451
1452 if (occurred_events->events != 0)
1453 {
1454 occurred_events->fd = cur_event->fd;
1457 }
1458 }
1459 }
1460
1461 return returned_events;
1462}
1463
1464#elif defined(WAIT_USE_POLL)
1465
1466/*
1467 * Wait using poll(2).
1468 *
1469 * This allows to receive readiness notifications for several events at once,
1470 * but requires iterating through all of set->pollfds.
1471 */
1472static inline int
1474 WaitEvent *occurred_events, int nevents)
1475{
1476 int returned_events = 0;
1477 int rc;
1479 struct pollfd *cur_pollfd;
1480
1481 /* Sleep */
1482 rc = poll(set->pollfds, set->nevents, cur_timeout);
1483
1484 /* Check return code */
1485 if (rc < 0)
1486 {
1487 /* EINTR is okay, otherwise complain */
1488 if (errno != EINTR)
1489 {
1490 waiting = false;
1491 ereport(ERROR,
1493 errmsg("%s() failed: %m",
1494 "poll")));
1495 }
1496 return 0;
1497 }
1498 else if (rc == 0)
1499 {
1500 /* timeout exceeded */
1501 return -1;
1502 }
1503
1504 for (cur_event = set->events, cur_pollfd = set->pollfds;
1505 cur_event < (set->events + set->nevents) &&
1506 returned_events < nevents;
1507 cur_event++, cur_pollfd++)
1508 {
1509 /* no activity on this FD, skip */
1510 if (cur_pollfd->revents == 0)
1511 continue;
1512
1513 occurred_events->pos = cur_event->pos;
1514 occurred_events->user_data = cur_event->user_data;
1515 occurred_events->events = 0;
1516
1517 if (cur_event->events == WL_LATCH_SET &&
1518 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1519 {
1520 /* There's data in the self-pipe, clear it. */
1521 drain();
1522
1523 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1524 {
1526 occurred_events->events = WL_LATCH_SET;
1529 }
1530 }
1531 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1532 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1533 {
1534 /*
1535 * We expect a POLLHUP when the remote end is closed, but because
1536 * we don't expect the pipe to become readable or to have any
1537 * errors either, treat those cases as postmaster death, too.
1538 *
1539 * Be paranoid about a spurious event signaling the postmaster as
1540 * being dead. There have been reports about that happening with
1541 * older primitives (select(2) to be specific), and a spurious
1542 * WL_POSTMASTER_DEATH event would be painful. Re-checking
1543 * doesn't cost much.
1544 */
1546 {
1547 if (set->exit_on_postmaster_death)
1548 proc_exit(1);
1553 }
1554 }
1555 else if (cur_event->events & (WL_SOCKET_READABLE |
1558 {
1559 int errflags = POLLHUP | POLLERR | POLLNVAL;
1560
1562
1563 if ((cur_event->events & WL_SOCKET_READABLE) &&
1564 (cur_pollfd->revents & (POLLIN | errflags)))
1565 {
1566 /* data available in socket, or EOF */
1568 }
1569
1570 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1571 (cur_pollfd->revents & (POLLOUT | errflags)))
1572 {
1573 /* writeable, or EOF */
1575 }
1576
1577#ifdef POLLRDHUP
1578 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1579 (cur_pollfd->revents & (POLLRDHUP | errflags)))
1580 {
1581 /* remote peer closed, or error */
1583 }
1584#endif
1585
1586 if (occurred_events->events != 0)
1587 {
1588 occurred_events->fd = cur_event->fd;
1591 }
1592 }
1593 }
1594 return returned_events;
1595}
1596
1597#elif defined(WAIT_USE_WIN32)
1598
1599/*
1600 * Wait using Windows' WaitForMultipleObjects(). Each call only "consumes" one
1601 * event, so we keep calling until we've filled up our output buffer to match
1602 * the behavior of the other implementations.
1603 *
1604 * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
1605 */
1606static inline int
1608 WaitEvent *occurred_events, int nevents)
1609{
1610 int returned_events = 0;
1611 DWORD rc;
1613
1614 /* Reset any wait events that need it */
1615 for (cur_event = set->events;
1616 cur_event < (set->events + set->nevents);
1617 cur_event++)
1618 {
1619 if (cur_event->reset)
1620 {
1622 cur_event->reset = false;
1623 }
1624
1625 /*
1626 * We associate the socket with a new event handle for each
1627 * WaitEventSet. FD_CLOSE is only generated once if the other end
1628 * closes gracefully. Therefore we might miss the FD_CLOSE
1629 * notification, if it was delivered to another event after we stopped
1630 * waiting for it. Close that race by peeking for EOF after setting
1631 * up this handle to receive notifications, and before entering the
1632 * sleep.
1633 *
1634 * XXX If we had one event handle for the lifetime of a socket, we
1635 * wouldn't need this.
1636 */
1637 if (cur_event->events & WL_SOCKET_READABLE)
1638 {
1639 char c;
1640 WSABUF buf;
1642 DWORD flags;
1643
1644 buf.buf = &c;
1645 buf.len = 1;
1646 flags = MSG_PEEK;
1647 if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
1648 {
1649 occurred_events->pos = cur_event->pos;
1650 occurred_events->user_data = cur_event->user_data;
1652 occurred_events->fd = cur_event->fd;
1653 return 1;
1654 }
1655 }
1656
1657 /*
1658 * Windows does not guarantee to log an FD_WRITE network event
1659 * indicating that more data can be sent unless the previous send()
1660 * failed with WSAEWOULDBLOCK. While our caller might well have made
1661 * such a call, we cannot assume that here. Therefore, if waiting for
1662 * write-ready, force the issue by doing a dummy send(). If the dummy
1663 * send() succeeds, assume that the socket is in fact write-ready, and
1664 * return immediately. Also, if it fails with something other than
1665 * WSAEWOULDBLOCK, return a write-ready indication to let our caller
1666 * deal with the error condition.
1667 */
1668 if (cur_event->events & WL_SOCKET_WRITEABLE)
1669 {
1670 char c;
1671 WSABUF buf;
1672 DWORD sent;
1673 int r;
1674
1675 buf.buf = &c;
1676 buf.len = 0;
1677
1678 r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
1679 if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
1680 {
1681 occurred_events->pos = cur_event->pos;
1682 occurred_events->user_data = cur_event->user_data;
1684 occurred_events->fd = cur_event->fd;
1685 return 1;
1686 }
1687 }
1688 }
1689
1690 /*
1691 * Sleep.
1692 *
1693 * Need to wait for ->nevents + 1, because signal handle is in [0].
1694 */
1695 rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
1696 cur_timeout);
1697
1698 /* Check return code */
1699 if (rc == WAIT_FAILED)
1700 elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
1701 GetLastError());
1702 else if (rc == WAIT_TIMEOUT)
1703 {
1704 /* timeout exceeded */
1705 return -1;
1706 }
1707
1708 if (rc == WAIT_OBJECT_0)
1709 {
1710 /* Service newly-arrived signals */
1712 return 0; /* retry */
1713 }
1714
1715 /*
1716 * With an offset of one, due to the always present pgwin32_signal_event,
1717 * the handle offset directly corresponds to a wait event.
1718 */
1719 cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
1720
1721 for (;;)
1722 {
1723 int next_pos;
1724 int count;
1725
1726 occurred_events->pos = cur_event->pos;
1727 occurred_events->user_data = cur_event->user_data;
1728 occurred_events->events = 0;
1729
1730 if (cur_event->events == WL_LATCH_SET)
1731 {
1732 /*
1733 * We cannot use set->latch->event to reset the fired event if we
1734 * aren't waiting on this latch now.
1735 */
1736 if (!ResetEvent(set->handles[cur_event->pos + 1]))
1737 elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
1738
1739 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1740 {
1742 occurred_events->events = WL_LATCH_SET;
1745 }
1746 }
1747 else if (cur_event->events == WL_POSTMASTER_DEATH)
1748 {
1749 /*
1750 * Postmaster apparently died. Since the consequences of falsely
1751 * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
1752 * take the trouble to positively verify this with
1753 * PostmasterIsAlive(), even though there is no known reason to
1754 * think that the event could be falsely set on Windows.
1755 */
1757 {
1758 if (set->exit_on_postmaster_death)
1759 proc_exit(1);
1764 }
1765 }
1766 else if (cur_event->events & WL_SOCKET_MASK)
1767 {
1769 HANDLE handle = set->handles[cur_event->pos + 1];
1770
1771 Assert(cur_event->fd);
1772
1773 occurred_events->fd = cur_event->fd;
1774
1775 ZeroMemory(&resEvents, sizeof(resEvents));
1776 if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
1777 elog(ERROR, "failed to enumerate network events: error code %d",
1778 WSAGetLastError());
1779 if ((cur_event->events & WL_SOCKET_READABLE) &&
1780 (resEvents.lNetworkEvents & FD_READ))
1781 {
1782 /* data available in socket */
1784
1785 /*------
1786 * WaitForMultipleObjects doesn't guarantee that a read event
1787 * will be returned if the latch is set at the same time. Even
1788 * if it did, the caller might drop that event expecting it to
1789 * reoccur on next call. So, we must force the event to be
1790 * reset if this WaitEventSet is used again in order to avoid
1791 * an indefinite hang.
1792 *
1793 * Refer
1794 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
1795 * for the behavior of socket events.
1796 *------
1797 */
1798 cur_event->reset = true;
1799 }
1800 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1801 (resEvents.lNetworkEvents & FD_WRITE))
1802 {
1803 /* writeable */
1805 }
1806 if ((cur_event->events & WL_SOCKET_CONNECTED) &&
1807 (resEvents.lNetworkEvents & FD_CONNECT))
1808 {
1809 /* connected */
1811 }
1812 if ((cur_event->events & WL_SOCKET_ACCEPT) &&
1813 (resEvents.lNetworkEvents & FD_ACCEPT))
1814 {
1815 /* incoming connection could be accepted */
1817 }
1818 if (resEvents.lNetworkEvents & FD_CLOSE)
1819 {
1820 /* EOF/error, so signal all caller-requested socket flags */
1821 occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
1822 }
1823
1824 if (occurred_events->events != 0)
1825 {
1828 }
1829 }
1830
1831 /* Is the output buffer full? */
1832 if (returned_events == nevents)
1833 break;
1834
1835 /* Have we run out of possible events? */
1836 next_pos = cur_event->pos + 1;
1837 if (next_pos == set->nevents)
1838 break;
1839
1840 /*
1841 * Poll the rest of the event handles in the array starting at
1842 * next_pos being careful to skip over the initial signal handle too.
1843 * This time we use a zero timeout.
1844 */
1845 count = set->nevents - next_pos;
1846 rc = WaitForMultipleObjects(count,
1847 set->handles + 1 + next_pos,
1848 false,
1849 0);
1850
1851 /*
1852 * We don't distinguish between errors and WAIT_TIMEOUT here because
1853 * we already have events to report.
1854 */
1856 break;
1857
1858 /* We have another event to decode. */
1859 cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
1860 }
1861
1862 return returned_events;
1863}
1864#endif
1865
1866/*
1867 * Return whether the current build options can report WL_SOCKET_CLOSED.
1868 */
1869bool
1871{
1872#if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
1873 defined(WAIT_USE_EPOLL) || \
1874 defined(WAIT_USE_KQUEUE)
1875 return true;
1876#else
1877 return false;
1878#endif
1879}
1880
1881/*
1882 * Get the number of wait events registered in a given WaitEventSet.
1883 */
1884int
1886{
1887 return set->nevents;
1888}
1889
1890#if defined(WAIT_USE_SELF_PIPE)
1891
1892/*
1893 * SetLatch uses SIGURG to wake up the process waiting on the latch.
1894 *
1895 * Wake up WaitLatch, if we're waiting.
1896 */
1897static void
1903
1904/* Send one byte to the self-pipe, to wake up WaitLatch */
1905static void
1907{
1908 int rc;
1909 char dummy = 0;
1910
1911retry:
1912 rc = write(selfpipe_writefd, &dummy, 1);
1913 if (rc < 0)
1914 {
1915 /* If interrupted by signal, just retry */
1916 if (errno == EINTR)
1917 goto retry;
1918
1919 /*
1920 * If the pipe is full, we don't need to retry, the data that's there
1921 * already is enough to wake up WaitLatch.
1922 */
1923 if (errno == EAGAIN || errno == EWOULDBLOCK)
1924 return;
1925
1926 /*
1927 * Oops, the write() failed for some other reason. We might be in a
1928 * signal handler, so it's not safe to elog(). We have no choice but
1929 * silently ignore the error.
1930 */
1931 return;
1932 }
1933}
1934
1935#endif
1936
1937#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
1938
1939/*
1940 * Read all available data from self-pipe or signalfd.
1941 *
1942 * Note: this is only called when waiting = true. If it fails and doesn't
1943 * return, it must reset that flag first (though ideally, this will never
1944 * happen).
1945 */
1946static void
1948{
1949 char buf[1024];
1950 int rc;
1951 int fd;
1952
1953#ifdef WAIT_USE_SELF_PIPE
1955#else
1956 fd = signal_fd;
1957#endif
1958
1959 for (;;)
1960 {
1961 rc = read(fd, buf, sizeof(buf));
1962 if (rc < 0)
1963 {
1964 if (errno == EAGAIN || errno == EWOULDBLOCK)
1965 break; /* the descriptor is empty */
1966 else if (errno == EINTR)
1967 continue; /* retry */
1968 else
1969 {
1970 waiting = false;
1971#ifdef WAIT_USE_SELF_PIPE
1972 elog(ERROR, "read() on self-pipe failed: %m");
1973#else
1974 elog(ERROR, "read() on signalfd failed: %m");
1975#endif
1976 }
1977 }
1978 else if (rc == 0)
1979 {
1980 waiting = false;
1981#ifdef WAIT_USE_SELF_PIPE
1982 elog(ERROR, "unexpected EOF on self-pipe");
1983#else
1984 elog(ERROR, "unexpected EOF on signalfd");
1985#endif
1986 }
1987 else if (rc < sizeof(buf))
1988 {
1989 /* we successfully drained the pipe; no need to read() again */
1990 break;
1991 }
1992 /* else buffer wasn't big enough, so read again */
1993 }
1994}
1995
1996#endif
1997
1998static void
2000{
2002
2003 Assert(set->owner != NULL);
2004 set->owner = NULL;
2005 FreeWaitEventSet(set);
2006}
2007
2008#ifndef WIN32
2009/*
2010 * Wake up my process if it's currently sleeping in WaitEventSetWaitBlock()
2011 *
2012 * NB: be sure to save and restore errno around it. (That's standard practice
2013 * in most signal handlers, of course, but we used to omit it in handlers that
2014 * only set a flag.) XXX
2015 *
2016 * NB: this function is called from critical sections and signal handlers so
2017 * throwing an error is not a good idea.
2018 *
2019 * On Windows, Latch uses SetEvent directly and this is not used.
2020 */
2021void
2023{
2024#if defined(WAIT_USE_SELF_PIPE)
2025 if (waiting)
2027#else
2028 if (waiting)
2030#endif
2031}
2032
2033/* Similar to WakeupMyProc, but wake up another process */
2034void
2036{
2037 kill(pid, SIGURG);
2038}
2039#endif
#define pg_memory_barrier()
Definition atomics.h:141
sigset_t UnBlockSig
Definition pqsignal.c:22
#define Min(x, y)
Definition c.h:1093
#define MAXALIGN(LEN)
Definition c.h:898
#define SIGNAL_ARGS
Definition c.h:1452
#define Assert(condition)
Definition c.h:945
#define unlikely(x)
Definition c.h:432
uint32_t uint32
Definition c.h:618
#define StaticAssertDecl(condition, errmessage)
Definition c.h:1010
size_t Size
Definition c.h:691
int errcode_for_socket_access(void)
Definition elog.c:976
#define FATAL
Definition elog.h:41
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
void ReleaseExternalFD(void)
Definition fd.c:1225
bool AcquireExternalFD(void)
Definition fd.c:1172
void ReserveExternalFD(void)
Definition fd.c:1207
pid_t PostmasterPid
Definition globals.c:106
int MyProcPid
Definition globals.c:47
bool IsUnderPostmaster
Definition globals.c:120
#define INSTR_TIME_SET_CURRENT(t)
Definition instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition instr_time.h:177
#define INSTR_TIME_GET_MILLISEC(t)
Definition instr_time.h:189
#define INSTR_TIME_SET_ZERO(t)
Definition instr_time.h:171
#define close(a)
Definition win32.h:12
#define write(a, b, c)
Definition win32.h:14
#define read(a, b, c)
Definition win32.h:13
void proc_exit(int code)
Definition ipc.c:105
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
static char * errmsg
const void * data
static time_t start_time
Definition pg_ctl.c:96
static char buf[DEFAULT_XLOG_SEG_SIZE]
bool PostmasterIsAliveInternal(void)
Definition pmsignal.c:346
#define PostmasterIsAlive()
Definition pmsignal.h:107
#define pqsignal
Definition port.h:547
int pgsocket
Definition port.h:29
#define PGINVALID_SOCKET
Definition port.h:31
static Datum PointerGetDatum(const void *X)
Definition postgres.h:342
uint64_t Datum
Definition postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:332
int postmaster_alive_fds[2]
Definition postmaster.c:483
#define POSTMASTER_FD_WATCH
Definition postmaster.h:83
char * c
static int fd(const char *x, int i)
static int fb(int x)
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition resowner.c:561
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition resowner.c:521
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition resowner.c:449
#define RELEASE_PRIO_WAITEVENTSETS
Definition resowner.h:77
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition resowner.h:56
void pgwin32_dispatch_queued_signals(void)
Definition signal.c:120
HANDLE pgwin32_signal_event
Definition signal.c:27
Definition latch.h:116
sig_atomic_t is_set
Definition latch.h:117
sig_atomic_t maybe_sleeping
Definition latch.h:118
int owner_pid
Definition latch.h:120
const char * name
Definition resowner.h:93
bool exit_on_postmaster_death
ResourceOwner owner
WaitEvent * events
struct pollfd * pollfds
pgsocket fd
void * user_data
uint32 events
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition wait_event.h:69
static void pgstat_report_wait_end(void)
Definition wait_event.h:85
static void latch_sigurg_handler(SIGNAL_ARGS)
static void sendSelfPipeByte(void)
static void ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
static int selfpipe_readfd
static const ResourceOwnerDesc wait_event_set_resowner_desc
void FreeWaitEventSetAfterFork(WaitEventSet *set)
static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
void WakeupMyProc(void)
static int selfpipe_owner_pid
static int selfpipe_writefd
int GetNumRegisteredWaitEvents(WaitEventSet *set)
void WakeupOtherProc(int pid)
static void ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
static int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, WaitEvent *occurred_events, int nevents)
static void ResOwnerReleaseWaitEventSet(Datum res)
void InitializeWaitEventSupport(void)
bool WaitEventSetCanReportClosed(void)
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
static void drain(void)
static volatile sig_atomic_t waiting
void FreeWaitEventSet(WaitEventSet *set)
WaitEventSet * CreateWaitEventSet(ResourceOwner resowner, int nevents)
#define WL_SOCKET_READABLE
#define WL_SOCKET_ACCEPT
#define WL_SOCKET_CLOSED
#define WL_EXIT_ON_PM_DEATH
#define WL_LATCH_SET
#define WL_SOCKET_CONNECTED
#define WL_POSTMASTER_DEATH
#define WL_SOCKET_WRITEABLE
#define WL_SOCKET_MASK
#define EINTR
Definition win32_port.h:361
#define EWOULDBLOCK
Definition win32_port.h:367
#define kill(pid, sig)
Definition win32_port.h:490
#define EAGAIN
Definition win32_port.h:359