PostgreSQL Source Code git master
latch.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * latch.c
4 * Routines for inter-process latches
5 *
6 * The poll() implementation uses the so-called self-pipe trick to overcome the
7 * race condition involved with poll() and setting a global flag in the signal
8 * handler. When a latch is set and the current process is waiting for it, the
9 * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
10 * A signal by itself doesn't interrupt poll() on all platforms, and even on
11 * platforms where it does, a signal that arrives just before the poll() call
12 * does not prevent poll() from entering sleep. An incoming byte on a pipe
13 * however reliably interrupts the sleep, and causes poll() to return
14 * immediately even if the signal arrives before poll() begins.
15 *
16 * The epoll() implementation overcomes the race with a different technique: it
17 * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
18 * don't need to register a signal handler or create our own self-pipe. We
19 * assume that any system that has Linux epoll() also has Linux signalfd().
20 *
21 * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
22 *
23 * The Windows implementation uses Windows events that are inherited by all
24 * postmaster child processes. There's no need for the self-pipe trick there.
25 *
26 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
27 * Portions Copyright (c) 1994, Regents of the University of California
28 *
29 * IDENTIFICATION
30 * src/backend/storage/ipc/latch.c
31 *
32 *-------------------------------------------------------------------------
33 */
34#include "postgres.h"
35
36#include <fcntl.h>
37#include <limits.h>
38#include <signal.h>
39#include <unistd.h>
40#ifdef HAVE_SYS_EPOLL_H
41#include <sys/epoll.h>
42#endif
43#ifdef HAVE_SYS_EVENT_H
44#include <sys/event.h>
45#endif
46#ifdef HAVE_SYS_SIGNALFD_H
47#include <sys/signalfd.h>
48#endif
49#ifdef HAVE_POLL_H
50#include <poll.h>
51#endif
52
53#include "libpq/pqsignal.h"
54#include "miscadmin.h"
55#include "pgstat.h"
56#include "port/atomics.h"
59#include "storage/fd.h"
60#include "storage/ipc.h"
61#include "storage/latch.h"
62#include "storage/pmsignal.h"
63#include "utils/memutils.h"
64#include "utils/resowner.h"
65
66/*
67 * Select the fd readiness primitive to use. Normally the "most modern"
68 * primitive supported by the OS will be used, but for testing it can be
69 * useful to manually specify the used primitive. If desired, just add a
70 * define somewhere before this block.
71 */
72#if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
73 defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
74/* don't overwrite manual choice */
75#elif defined(HAVE_SYS_EPOLL_H)
76#define WAIT_USE_EPOLL
77#elif defined(HAVE_KQUEUE)
78#define WAIT_USE_KQUEUE
79#elif defined(HAVE_POLL)
80#define WAIT_USE_POLL
81#elif WIN32
82#define WAIT_USE_WIN32
83#else
84#error "no wait set implementation available"
85#endif
86
87/*
88 * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
89 * available. For testing the choice can also be manually specified.
90 */
91#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
92#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
93/* don't overwrite manual choice */
94#elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
95#define WAIT_USE_SIGNALFD
96#else
97#define WAIT_USE_SELF_PIPE
98#endif
99#endif
100
101/* typedef in latch.h */
103{
105
106 int nevents; /* number of registered events */
107 int nevents_space; /* maximum number of events in this set */
108
109 /*
110 * Array, of nevents_space length, storing the definition of events this
111 * set is waiting for.
112 */
114
115 /*
116 * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
117 * said latch, and latch_pos the offset in the ->events array. This is
118 * useful because we check the state of the latch before performing doing
119 * syscalls related to waiting.
120 */
123
124 /*
125 * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
126 * is set so that we'll exit immediately if postmaster death is detected,
127 * instead of returning.
128 */
130
131#if defined(WAIT_USE_EPOLL)
132 int epoll_fd;
133 /* epoll_wait returns events in a user provided arrays, allocate once */
134 struct epoll_event *epoll_ret_events;
135#elif defined(WAIT_USE_KQUEUE)
136 int kqueue_fd;
137 /* kevent returns events in a user provided arrays, allocate once */
138 struct kevent *kqueue_ret_events;
139 bool report_postmaster_not_running;
140#elif defined(WAIT_USE_POLL)
141 /* poll expects events to be waited on every poll() call, prepare once */
142 struct pollfd *pollfds;
143#elif defined(WAIT_USE_WIN32)
144
145 /*
146 * Array of windows events. The first element always contains
147 * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
148 * event->pos + 1).
149 */
150 HANDLE *handles;
151#endif
152};
153
154/* A common WaitEventSet used to implement WaitLatch() */
156
157/* The position of the latch in LatchWaitSet. */
158#define LatchWaitSetLatchPos 0
159
160#ifndef WIN32
161/* Are we currently in WaitLatch? The signal handler would like to know. */
162static volatile sig_atomic_t waiting = false;
163#endif
164
165#ifdef WAIT_USE_SIGNALFD
166/* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
167static int signal_fd = -1;
168#endif
169
170#ifdef WAIT_USE_SELF_PIPE
171/* Read and write ends of the self-pipe */
172static int selfpipe_readfd = -1;
173static int selfpipe_writefd = -1;
174
175/* Process owning the self-pipe --- needed for checking purposes */
176static int selfpipe_owner_pid = 0;
177
178/* Private function prototypes */
180static void sendSelfPipeByte(void);
181#endif
182
183#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
184static void drain(void);
185#endif
186
187#if defined(WAIT_USE_EPOLL)
188static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
189#elif defined(WAIT_USE_KQUEUE)
190static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
191#elif defined(WAIT_USE_POLL)
192static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
193#elif defined(WAIT_USE_WIN32)
194static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
195#endif
196
197static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
198 WaitEvent *occurred_events, int nevents);
199
200/* ResourceOwner support to hold WaitEventSets */
202
204{
205 .name = "WaitEventSet",
206 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
207 .release_priority = RELEASE_PRIO_WAITEVENTSETS,
208 .ReleaseResource = ResOwnerReleaseWaitEventSet,
209 .DebugPrint = NULL
210};
211
212/* Convenience wrappers over ResourceOwnerRemember/Forget */
213static inline void
215{
217}
218static inline void
220{
222}
223
224
225/*
226 * Initialize the process-local latch infrastructure.
227 *
228 * This must be called once during startup of any process that can wait on
229 * latches, before it issues any InitLatch() or OwnLatch() calls.
230 */
231void
233{
234#if defined(WAIT_USE_SELF_PIPE)
235 int pipefd[2];
236
238 {
239 /*
240 * We might have inherited connections to a self-pipe created by the
241 * postmaster. It's critical that child processes create their own
242 * self-pipes, of course, and we really want them to close the
243 * inherited FDs for safety's sake.
244 */
245 if (selfpipe_owner_pid != 0)
246 {
247 /* Assert we go through here but once in a child process */
249 /* Release postmaster's pipe FDs; ignore any error */
250 (void) close(selfpipe_readfd);
251 (void) close(selfpipe_writefd);
252 /* Clean up, just for safety's sake; we'll set these below */
255 /* Keep fd.c's accounting straight */
258 }
259 else
260 {
261 /*
262 * Postmaster didn't create a self-pipe ... or else we're in an
263 * EXEC_BACKEND build, in which case it doesn't matter since the
264 * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
265 * fd.c won't have state to clean up, either.
266 */
267 Assert(selfpipe_readfd == -1);
268 }
269 }
270 else
271 {
272 /* In postmaster or standalone backend, assert we do this but once */
273 Assert(selfpipe_readfd == -1);
275 }
276
277 /*
278 * Set up the self-pipe that allows a signal handler to wake up the
279 * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
280 * that SetLatch won't block if the event has already been set many times
281 * filling the kernel buffer. Make the read-end non-blocking too, so that
282 * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
283 * Also, make both FDs close-on-exec, since we surely do not want any
284 * child processes messing with them.
285 */
286 if (pipe(pipefd) < 0)
287 elog(FATAL, "pipe() failed: %m");
288 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
289 elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
290 if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
291 elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
292 if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
293 elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
294 if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
295 elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
296
297 selfpipe_readfd = pipefd[0];
298 selfpipe_writefd = pipefd[1];
300
301 /* Tell fd.c about these two long-lived FDs */
304
306#endif
307
308#ifdef WAIT_USE_SIGNALFD
309 sigset_t signalfd_mask;
310
312 {
313 /*
314 * It would probably be safe to re-use the inherited signalfd since
315 * signalfds only see the current process's pending signals, but it
316 * seems less surprising to close it and create our own.
317 */
318 if (signal_fd != -1)
319 {
320 /* Release postmaster's signal FD; ignore any error */
321 (void) close(signal_fd);
322 signal_fd = -1;
324 }
325 }
326
327 /* Block SIGURG, because we'll receive it through a signalfd. */
328 sigaddset(&UnBlockSig, SIGURG);
329
330 /* Set up the signalfd to receive SIGURG notifications. */
331 sigemptyset(&signalfd_mask);
332 sigaddset(&signalfd_mask, SIGURG);
333 signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
334 if (signal_fd < 0)
335 elog(FATAL, "signalfd() failed");
337#endif
338
339#ifdef WAIT_USE_KQUEUE
340 /* Ignore SIGURG, because we'll receive it via kqueue. */
341 pqsignal(SIGURG, SIG_IGN);
342#endif
343}
344
345void
347{
348 int latch_pos PG_USED_FOR_ASSERTS_ONLY;
349
350 Assert(LatchWaitSet == NULL);
351
352 /* Set up the WaitEventSet used by WaitLatch(). */
355 MyLatch, NULL);
358 PGINVALID_SOCKET, NULL, NULL);
359
360 Assert(latch_pos == LatchWaitSetLatchPos);
361}
362
363void
365{
366#if defined(WAIT_USE_POLL)
367 pqsignal(SIGURG, SIG_IGN);
368#endif
369
370 if (LatchWaitSet)
371 {
373 LatchWaitSet = NULL;
374 }
375
376#if defined(WAIT_USE_SELF_PIPE)
379 selfpipe_readfd = -1;
380 selfpipe_writefd = -1;
382#endif
383
384#if defined(WAIT_USE_SIGNALFD)
385 close(signal_fd);
386 signal_fd = -1;
387#endif
388}
389
390/*
391 * Initialize a process-local latch.
392 */
393void
395{
396 latch->is_set = false;
397 latch->maybe_sleeping = false;
398 latch->owner_pid = MyProcPid;
399 latch->is_shared = false;
400
401#if defined(WAIT_USE_SELF_PIPE)
402 /* Assert InitializeLatchSupport has been called in this process */
404#elif defined(WAIT_USE_SIGNALFD)
405 /* Assert InitializeLatchSupport has been called in this process */
406 Assert(signal_fd >= 0);
407#elif defined(WAIT_USE_WIN32)
408 latch->event = CreateEvent(NULL, TRUE, FALSE, NULL);
409 if (latch->event == NULL)
410 elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
411#endif /* WIN32 */
412}
413
414/*
415 * Initialize a shared latch that can be set from other processes. The latch
416 * is initially owned by no-one; use OwnLatch to associate it with the
417 * current process.
418 *
419 * InitSharedLatch needs to be called in postmaster before forking child
420 * processes, usually right after allocating the shared memory block
421 * containing the latch with ShmemInitStruct. (The Unix implementation
422 * doesn't actually require that, but the Windows one does.) Because of
423 * this restriction, we have no concurrency issues to worry about here.
424 *
425 * Note that other handles created in this module are never marked as
426 * inheritable. Thus we do not need to worry about cleaning up child
427 * process references to postmaster-private latches or WaitEventSets.
428 */
429void
431{
432#ifdef WIN32
433 SECURITY_ATTRIBUTES sa;
434
435 /*
436 * Set up security attributes to specify that the events are inherited.
437 */
438 ZeroMemory(&sa, sizeof(sa));
439 sa.nLength = sizeof(sa);
440 sa.bInheritHandle = TRUE;
441
442 latch->event = CreateEvent(&sa, TRUE, FALSE, NULL);
443 if (latch->event == NULL)
444 elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
445#endif
446
447 latch->is_set = false;
448 latch->maybe_sleeping = false;
449 latch->owner_pid = 0;
450 latch->is_shared = true;
451}
452
453/*
454 * Associate a shared latch with the current process, allowing it to
455 * wait on the latch.
456 *
457 * Although there is a sanity check for latch-already-owned, we don't do
458 * any sort of locking here, meaning that we could fail to detect the error
459 * if two processes try to own the same latch at about the same time. If
460 * there is any risk of that, caller must provide an interlock to prevent it.
461 */
462void
464{
465 int owner_pid;
466
467 /* Sanity checks */
468 Assert(latch->is_shared);
469
470#if defined(WAIT_USE_SELF_PIPE)
471 /* Assert InitializeLatchSupport has been called in this process */
473#elif defined(WAIT_USE_SIGNALFD)
474 /* Assert InitializeLatchSupport has been called in this process */
475 Assert(signal_fd >= 0);
476#endif
477
478 owner_pid = latch->owner_pid;
479 if (owner_pid != 0)
480 elog(PANIC, "latch already owned by PID %d", owner_pid);
481
482 latch->owner_pid = MyProcPid;
483}
484
485/*
486 * Disown a shared latch currently owned by the current process.
487 */
488void
490{
491 Assert(latch->is_shared);
492 Assert(latch->owner_pid == MyProcPid);
493
494 latch->owner_pid = 0;
495}
496
497/*
498 * Wait for a given latch to be set, or for postmaster death, or until timeout
499 * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
500 * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
501 * function returns immediately.
502 *
503 * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
504 * is given. Although it is declared as "long", we don't actually support
505 * timeouts longer than INT_MAX milliseconds. Note that some extra overhead
506 * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
507 *
508 * The latch must be owned by the current process, ie. it must be a
509 * process-local latch initialized with InitLatch, or a shared latch
510 * associated with the current process by calling OwnLatch.
511 *
512 * Returns bit mask indicating which condition(s) caused the wake-up. Note
513 * that if multiple wake-up conditions are true, there is no guarantee that
514 * we return all of them in one call, but we will return at least one.
515 */
516int
517WaitLatch(Latch *latch, int wakeEvents, long timeout,
518 uint32 wait_event_info)
519{
520 WaitEvent event;
521
522 /* Postmaster-managed callers must handle postmaster death somehow. */
524 (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
525 (wakeEvents & WL_POSTMASTER_DEATH));
526
527 /*
528 * Some callers may have a latch other than MyLatch, or no latch at all,
529 * or want to handle postmaster death differently. It's cheap to assign
530 * those, so just do it every time.
531 */
532 if (!(wakeEvents & WL_LATCH_SET))
533 latch = NULL;
536 ((wakeEvents & WL_EXIT_ON_PM_DEATH) != 0);
537
539 (wakeEvents & WL_TIMEOUT) ? timeout : -1,
540 &event, 1,
541 wait_event_info) == 0)
542 return WL_TIMEOUT;
543 else
544 return event.events;
545}
546
547/*
548 * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
549 * conditions.
550 *
551 * When waiting on a socket, EOF and error conditions always cause the socket
552 * to be reported as readable/writable/connected, so that the caller can deal
553 * with the condition.
554 *
555 * wakeEvents must include either WL_EXIT_ON_PM_DEATH for automatic exit
556 * if the postmaster dies or WL_POSTMASTER_DEATH for a flag set in the
557 * return value if the postmaster dies. The latter is useful for rare cases
558 * where some behavior other than immediate exit is needed.
559 *
560 * NB: These days this is just a wrapper around the WaitEventSet API. When
561 * using a latch very frequently, consider creating a longer living
562 * WaitEventSet instead; that's more efficient.
563 */
564int
565WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
566 long timeout, uint32 wait_event_info)
567{
568 int ret = 0;
569 int rc;
570 WaitEvent event;
572
573 if (wakeEvents & WL_TIMEOUT)
574 Assert(timeout >= 0);
575 else
576 timeout = -1;
577
578 if (wakeEvents & WL_LATCH_SET)
580 latch, NULL);
581
582 /* Postmaster-managed callers must handle postmaster death somehow. */
584 (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
585 (wakeEvents & WL_POSTMASTER_DEATH));
586
587 if ((wakeEvents & WL_POSTMASTER_DEATH) && IsUnderPostmaster)
589 NULL, NULL);
590
591 if ((wakeEvents & WL_EXIT_ON_PM_DEATH) && IsUnderPostmaster)
593 NULL, NULL);
594
595 if (wakeEvents & WL_SOCKET_MASK)
596 {
597 int ev;
598
599 ev = wakeEvents & WL_SOCKET_MASK;
600 AddWaitEventToSet(set, ev, sock, NULL, NULL);
601 }
602
603 rc = WaitEventSetWait(set, timeout, &event, 1, wait_event_info);
604
605 if (rc == 0)
606 ret |= WL_TIMEOUT;
607 else
608 {
609 ret |= event.events & (WL_LATCH_SET |
612 }
613
614 FreeWaitEventSet(set);
615
616 return ret;
617}
618
619/*
620 * Sets a latch and wakes up anyone waiting on it.
621 *
622 * This is cheap if the latch is already set, otherwise not so much.
623 *
624 * NB: when calling this in a signal handler, be sure to save and restore
625 * errno around it. (That's standard practice in most signal handlers, of
626 * course, but we used to omit it in handlers that only set a flag.)
627 *
628 * NB: this function is called from critical sections and signal handlers so
629 * throwing an error is not a good idea.
630 */
631void
633{
634#ifndef WIN32
635 pid_t owner_pid;
636#else
637 HANDLE handle;
638#endif
639
640 /*
641 * The memory barrier has to be placed here to ensure that any flag
642 * variables possibly changed by this process have been flushed to main
643 * memory, before we check/set is_set.
644 */
646
647 /* Quick exit if already set */
648 if (latch->is_set)
649 return;
650
651 latch->is_set = true;
652
654 if (!latch->maybe_sleeping)
655 return;
656
657#ifndef WIN32
658
659 /*
660 * See if anyone's waiting for the latch. It can be the current process if
661 * we're in a signal handler. We use the self-pipe or SIGURG to ourselves
662 * to wake up WaitEventSetWaitBlock() without races in that case. If it's
663 * another process, send a signal.
664 *
665 * Fetch owner_pid only once, in case the latch is concurrently getting
666 * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
667 * guaranteed to be true! In practice, the effective range of pid_t fits
668 * in a 32 bit integer, and so should be atomic. In the worst case, we
669 * might end up signaling the wrong process. Even then, you're very
670 * unlucky if a process with that bogus pid exists and belongs to
671 * Postgres; and PG database processes should handle excess SIGUSR1
672 * interrupts without a problem anyhow.
673 *
674 * Another sort of race condition that's possible here is for a new
675 * process to own the latch immediately after we look, so we don't signal
676 * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
677 * the standard coding convention of waiting at the bottom of their loops,
678 * not the top, so that they'll correctly process latch-setting events
679 * that happen before they enter the loop.
680 */
681 owner_pid = latch->owner_pid;
682 if (owner_pid == 0)
683 return;
684 else if (owner_pid == MyProcPid)
685 {
686#if defined(WAIT_USE_SELF_PIPE)
687 if (waiting)
689#else
690 if (waiting)
691 kill(MyProcPid, SIGURG);
692#endif
693 }
694 else
695 kill(owner_pid, SIGURG);
696
697#else
698
699 /*
700 * See if anyone's waiting for the latch. It can be the current process if
701 * we're in a signal handler.
702 *
703 * Use a local variable here just in case somebody changes the event field
704 * concurrently (which really should not happen).
705 */
706 handle = latch->event;
707 if (handle)
708 {
709 SetEvent(handle);
710
711 /*
712 * Note that we silently ignore any errors. We might be in a signal
713 * handler or other critical path where it's not safe to call elog().
714 */
715 }
716#endif
717}
718
719/*
720 * Clear the latch. Calling WaitLatch after this will sleep, unless
721 * the latch is set again before the WaitLatch call.
722 */
723void
725{
726 /* Only the owner should reset the latch */
727 Assert(latch->owner_pid == MyProcPid);
728 Assert(latch->maybe_sleeping == false);
729
730 latch->is_set = false;
731
732 /*
733 * Ensure that the write to is_set gets flushed to main memory before we
734 * examine any flag variables. Otherwise a concurrent SetLatch might
735 * falsely conclude that it needn't signal us, even though we have missed
736 * seeing some flag updates that SetLatch was supposed to inform us of.
737 */
739}
740
741/*
742 * Create a WaitEventSet with space for nevents different events to wait for.
743 *
744 * These events can then be efficiently waited upon together, using
745 * WaitEventSetWait().
746 *
747 * The WaitEventSet is tracked by the given 'resowner'. Use NULL for session
748 * lifetime.
749 */
751CreateWaitEventSet(ResourceOwner resowner, int nevents)
752{
753 WaitEventSet *set;
754 char *data;
755 Size sz = 0;
756
757 /*
758 * Use MAXALIGN size/alignment to guarantee that later uses of memory are
759 * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
760 * platforms, but earlier allocations like WaitEventSet and WaitEvent
761 * might not be sized to guarantee that when purely using sizeof().
762 */
763 sz += MAXALIGN(sizeof(WaitEventSet));
764 sz += MAXALIGN(sizeof(WaitEvent) * nevents);
765
766#if defined(WAIT_USE_EPOLL)
767 sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
768#elif defined(WAIT_USE_KQUEUE)
769 sz += MAXALIGN(sizeof(struct kevent) * nevents);
770#elif defined(WAIT_USE_POLL)
771 sz += MAXALIGN(sizeof(struct pollfd) * nevents);
772#elif defined(WAIT_USE_WIN32)
773 /* need space for the pgwin32_signal_event */
774 sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
775#endif
776
777 if (resowner != NULL)
778 ResourceOwnerEnlarge(resowner);
779
781
782 set = (WaitEventSet *) data;
783 data += MAXALIGN(sizeof(WaitEventSet));
784
785 set->events = (WaitEvent *) data;
786 data += MAXALIGN(sizeof(WaitEvent) * nevents);
787
788#if defined(WAIT_USE_EPOLL)
789 set->epoll_ret_events = (struct epoll_event *) data;
790 data += MAXALIGN(sizeof(struct epoll_event) * nevents);
791#elif defined(WAIT_USE_KQUEUE)
792 set->kqueue_ret_events = (struct kevent *) data;
793 data += MAXALIGN(sizeof(struct kevent) * nevents);
794#elif defined(WAIT_USE_POLL)
795 set->pollfds = (struct pollfd *) data;
796 data += MAXALIGN(sizeof(struct pollfd) * nevents);
797#elif defined(WAIT_USE_WIN32)
798 set->handles = (HANDLE) data;
799 data += MAXALIGN(sizeof(HANDLE) * nevents);
800#endif
801
802 set->latch = NULL;
803 set->nevents_space = nevents;
804 set->exit_on_postmaster_death = false;
805
806 if (resowner != NULL)
807 {
809 set->owner = resowner;
810 }
811
812#if defined(WAIT_USE_EPOLL)
813 if (!AcquireExternalFD())
814 elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
815 set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
816 if (set->epoll_fd < 0)
817 {
819 elog(ERROR, "epoll_create1 failed: %m");
820 }
821#elif defined(WAIT_USE_KQUEUE)
822 if (!AcquireExternalFD())
823 elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
824 set->kqueue_fd = kqueue();
825 if (set->kqueue_fd < 0)
826 {
828 elog(ERROR, "kqueue failed: %m");
829 }
830 if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
831 {
832 int save_errno = errno;
833
834 close(set->kqueue_fd);
836 errno = save_errno;
837 elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
838 }
839 set->report_postmaster_not_running = false;
840#elif defined(WAIT_USE_WIN32)
841
842 /*
843 * To handle signals while waiting, we need to add a win32 specific event.
844 * We accounted for the additional event at the top of this routine. See
845 * port/win32/signal.c for more details.
846 *
847 * Note: pgwin32_signal_event should be first to ensure that it will be
848 * reported when multiple events are set. We want to guarantee that
849 * pending signals are serviced.
850 */
851 set->handles[0] = pgwin32_signal_event;
852 StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
853#endif
854
855 return set;
856}
857
858/*
859 * Free a previously created WaitEventSet.
860 *
861 * Note: preferably, this shouldn't have to free any resources that could be
862 * inherited across an exec(). If it did, we'd likely leak those resources in
863 * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
864 * when the FD is created. For the Windows case, we assume that the handles
865 * involved are non-inheritable.
866 */
867void
869{
870 if (set->owner)
871 {
873 set->owner = NULL;
874 }
875
876#if defined(WAIT_USE_EPOLL)
877 close(set->epoll_fd);
879#elif defined(WAIT_USE_KQUEUE)
880 close(set->kqueue_fd);
882#elif defined(WAIT_USE_WIN32)
883 for (WaitEvent *cur_event = set->events;
884 cur_event < (set->events + set->nevents);
885 cur_event++)
886 {
887 if (cur_event->events & WL_LATCH_SET)
888 {
889 /* uses the latch's HANDLE */
890 }
891 else if (cur_event->events & WL_POSTMASTER_DEATH)
892 {
893 /* uses PostmasterHandle */
894 }
895 else
896 {
897 /* Clean up the event object we created for the socket */
898 WSAEventSelect(cur_event->fd, NULL, 0);
899 WSACloseEvent(set->handles[cur_event->pos + 1]);
900 }
901 }
902#endif
903
904 pfree(set);
905}
906
907/*
908 * Free a previously created WaitEventSet in a child process after a fork().
909 */
910void
912{
913#if defined(WAIT_USE_EPOLL)
914 close(set->epoll_fd);
916#elif defined(WAIT_USE_KQUEUE)
917 /* kqueues are not normally inherited by child processes */
919#endif
920
921 pfree(set);
922}
923
924/* ---
925 * Add an event to the set. Possible events are:
926 * - WL_LATCH_SET: Wait for the latch to be set
927 * - WL_POSTMASTER_DEATH: Wait for postmaster to die
928 * - WL_SOCKET_READABLE: Wait for socket to become readable,
929 * can be combined in one event with other WL_SOCKET_* events
930 * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
931 * can be combined with other WL_SOCKET_* events
932 * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
933 * can be combined with other WL_SOCKET_* events (on non-Windows
934 * platforms, this is the same as WL_SOCKET_WRITEABLE)
935 * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
936 * can be combined with other WL_SOCKET_* events (on non-Windows
937 * platforms, this is the same as WL_SOCKET_READABLE)
938 * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
939 * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
940 *
941 * Returns the offset in WaitEventSet->events (starting from 0), which can be
942 * used to modify previously added wait events using ModifyWaitEvent().
943 *
944 * In the WL_LATCH_SET case the latch must be owned by the current process,
945 * i.e. it must be a process-local latch initialized with InitLatch, or a
946 * shared latch associated with the current process by calling OwnLatch.
947 *
948 * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
949 * conditions cause the socket to be reported as readable/writable/connected,
950 * so that the caller can deal with the condition.
951 *
952 * The user_data pointer specified here will be set for the events returned
953 * by WaitEventSetWait(), allowing to easily associate additional data with
954 * events.
955 */
956int
958 void *user_data)
959{
960 WaitEvent *event;
961
962 /* not enough space */
963 Assert(set->nevents < set->nevents_space);
964
965 if (events == WL_EXIT_ON_PM_DEATH)
966 {
967 events = WL_POSTMASTER_DEATH;
968 set->exit_on_postmaster_death = true;
969 }
970
971 if (latch)
972 {
973 if (latch->owner_pid != MyProcPid)
974 elog(ERROR, "cannot wait on a latch owned by another process");
975 if (set->latch)
976 elog(ERROR, "cannot wait on more than one latch");
977 if ((events & WL_LATCH_SET) != WL_LATCH_SET)
978 elog(ERROR, "latch events only support being set");
979 }
980 else
981 {
982 if (events & WL_LATCH_SET)
983 elog(ERROR, "cannot wait on latch without a specified latch");
984 }
985
986 /* waiting for socket readiness without a socket indicates a bug */
987 if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
988 elog(ERROR, "cannot wait on socket event without a socket");
989
990 event = &set->events[set->nevents];
991 event->pos = set->nevents++;
992 event->fd = fd;
993 event->events = events;
994 event->user_data = user_data;
995#ifdef WIN32
996 event->reset = false;
997#endif
998
999 if (events == WL_LATCH_SET)
1000 {
1001 set->latch = latch;
1002 set->latch_pos = event->pos;
1003#if defined(WAIT_USE_SELF_PIPE)
1004 event->fd = selfpipe_readfd;
1005#elif defined(WAIT_USE_SIGNALFD)
1006 event->fd = signal_fd;
1007#else
1008 event->fd = PGINVALID_SOCKET;
1009#ifdef WAIT_USE_EPOLL
1010 return event->pos;
1011#endif
1012#endif
1013 }
1014 else if (events == WL_POSTMASTER_DEATH)
1015 {
1016#ifndef WIN32
1018#endif
1019 }
1020
1021 /* perform wait primitive specific initialization, if needed */
1022#if defined(WAIT_USE_EPOLL)
1023 WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
1024#elif defined(WAIT_USE_KQUEUE)
1025 WaitEventAdjustKqueue(set, event, 0);
1026#elif defined(WAIT_USE_POLL)
1027 WaitEventAdjustPoll(set, event);
1028#elif defined(WAIT_USE_WIN32)
1029 WaitEventAdjustWin32(set, event);
1030#endif
1031
1032 return event->pos;
1033}
1034
1035/*
1036 * Change the event mask and, in the WL_LATCH_SET case, the latch associated
1037 * with the WaitEvent. The latch may be changed to NULL to disable the latch
1038 * temporarily, and then set back to a latch later.
1039 *
1040 * 'pos' is the id returned by AddWaitEventToSet.
1041 */
1042void
1043ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
1044{
1045 WaitEvent *event;
1046#if defined(WAIT_USE_KQUEUE)
1047 int old_events;
1048#endif
1049
1050 Assert(pos < set->nevents);
1051
1052 event = &set->events[pos];
1053#if defined(WAIT_USE_KQUEUE)
1054 old_events = event->events;
1055#endif
1056
1057 /*
1058 * If neither the event mask nor the associated latch changes, return
1059 * early. That's an important optimization for some sockets, where
1060 * ModifyWaitEvent is frequently used to switch from waiting for reads to
1061 * waiting on writes.
1062 */
1063 if (events == event->events &&
1064 (!(event->events & WL_LATCH_SET) || set->latch == latch))
1065 return;
1066
1067 if (event->events & WL_LATCH_SET &&
1068 events != event->events)
1069 {
1070 elog(ERROR, "cannot modify latch event");
1071 }
1072
1073 if (event->events & WL_POSTMASTER_DEATH)
1074 {
1075 elog(ERROR, "cannot modify postmaster death event");
1076 }
1077
1078 /* FIXME: validate event mask */
1079 event->events = events;
1080
1081 if (events == WL_LATCH_SET)
1082 {
1083 if (latch && latch->owner_pid != MyProcPid)
1084 elog(ERROR, "cannot wait on a latch owned by another process");
1085 set->latch = latch;
1086
1087 /*
1088 * On Unix, we don't need to modify the kernel object because the
1089 * underlying pipe (if there is one) is the same for all latches so we
1090 * can return immediately. On Windows, we need to update our array of
1091 * handles, but we leave the old one in place and tolerate spurious
1092 * wakeups if the latch is disabled.
1093 */
1094#if defined(WAIT_USE_WIN32)
1095 if (!latch)
1096 return;
1097#else
1098 return;
1099#endif
1100 }
1101
1102#if defined(WAIT_USE_EPOLL)
1103 WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
1104#elif defined(WAIT_USE_KQUEUE)
1105 WaitEventAdjustKqueue(set, event, old_events);
1106#elif defined(WAIT_USE_POLL)
1107 WaitEventAdjustPoll(set, event);
1108#elif defined(WAIT_USE_WIN32)
1109 WaitEventAdjustWin32(set, event);
1110#endif
1111}
1112
1113#if defined(WAIT_USE_EPOLL)
1114/*
1115 * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
1116 */
1117static void
1118WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
1119{
1120 struct epoll_event epoll_ev;
1121 int rc;
1122
1123 /* pointer to our event, returned by epoll_wait */
1124 epoll_ev.data.ptr = event;
1125 /* always wait for errors */
1126 epoll_ev.events = EPOLLERR | EPOLLHUP;
1127
1128 /* prepare pollfd entry once */
1129 if (event->events == WL_LATCH_SET)
1130 {
1131 Assert(set->latch != NULL);
1132 epoll_ev.events |= EPOLLIN;
1133 }
1134 else if (event->events == WL_POSTMASTER_DEATH)
1135 {
1136 epoll_ev.events |= EPOLLIN;
1137 }
1138 else
1139 {
1140 Assert(event->fd != PGINVALID_SOCKET);
1141 Assert(event->events & (WL_SOCKET_READABLE |
1144
1145 if (event->events & WL_SOCKET_READABLE)
1146 epoll_ev.events |= EPOLLIN;
1147 if (event->events & WL_SOCKET_WRITEABLE)
1148 epoll_ev.events |= EPOLLOUT;
1149 if (event->events & WL_SOCKET_CLOSED)
1150 epoll_ev.events |= EPOLLRDHUP;
1151 }
1152
1153 /*
1154 * Even though unused, we also pass epoll_ev as the data argument if
1155 * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
1156 * requiring that, and actually it makes the code simpler...
1157 */
1158 rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
1159
1160 if (rc < 0)
1161 ereport(ERROR,
1163 errmsg("%s() failed: %m",
1164 "epoll_ctl")));
1165}
1166#endif
1167
1168#if defined(WAIT_USE_POLL)
1169static void
1171{
1172 struct pollfd *pollfd = &set->pollfds[event->pos];
1173
1174 pollfd->revents = 0;
1175 pollfd->fd = event->fd;
1176
1177 /* prepare pollfd entry once */
1178 if (event->events == WL_LATCH_SET)
1179 {
1180 Assert(set->latch != NULL);
1181 pollfd->events = POLLIN;
1182 }
1183 else if (event->events == WL_POSTMASTER_DEATH)
1184 {
1185 pollfd->events = POLLIN;
1186 }
1187 else
1188 {
1189 Assert(event->events & (WL_SOCKET_READABLE |
1192 pollfd->events = 0;
1193 if (event->events & WL_SOCKET_READABLE)
1194 pollfd->events |= POLLIN;
1195 if (event->events & WL_SOCKET_WRITEABLE)
1196 pollfd->events |= POLLOUT;
1197#ifdef POLLRDHUP
1198 if (event->events & WL_SOCKET_CLOSED)
1199 pollfd->events |= POLLRDHUP;
1200#endif
1201 }
1202
1203 Assert(event->fd != PGINVALID_SOCKET);
1204}
1205#endif
1206
1207#if defined(WAIT_USE_KQUEUE)
1208
1209/*
1210 * On most BSD family systems, the udata member of struct kevent is of type
1211 * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
1212 * NetBSD has it as intptr_t, so here we wallpaper over that difference with
1213 * an lvalue cast.
1214 */
1215#define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
1216
1217static inline void
1218WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
1219 WaitEvent *event)
1220{
1221 k_ev->ident = event->fd;
1222 k_ev->filter = filter;
1223 k_ev->flags = action;
1224 k_ev->fflags = 0;
1225 k_ev->data = 0;
1226 AccessWaitEvent(k_ev) = event;
1227}
1228
1229static inline void
1230WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
1231{
1232 /* For now postmaster death can only be added, not removed. */
1233 k_ev->ident = PostmasterPid;
1234 k_ev->filter = EVFILT_PROC;
1235 k_ev->flags = EV_ADD;
1236 k_ev->fflags = NOTE_EXIT;
1237 k_ev->data = 0;
1238 AccessWaitEvent(k_ev) = event;
1239}
1240
1241static inline void
1242WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
1243{
1244 /* For now latch can only be added, not removed. */
1245 k_ev->ident = SIGURG;
1246 k_ev->filter = EVFILT_SIGNAL;
1247 k_ev->flags = EV_ADD;
1248 k_ev->fflags = 0;
1249 k_ev->data = 0;
1250 AccessWaitEvent(k_ev) = event;
1251}
1252
1253/*
1254 * old_events is the previous event mask, used to compute what has changed.
1255 */
1256static void
1257WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
1258{
1259 int rc;
1260 struct kevent k_ev[2];
1261 int count = 0;
1262 bool new_filt_read = false;
1263 bool old_filt_read = false;
1264 bool new_filt_write = false;
1265 bool old_filt_write = false;
1266
1267 if (old_events == event->events)
1268 return;
1269
1270 Assert(event->events != WL_LATCH_SET || set->latch != NULL);
1271 Assert(event->events == WL_LATCH_SET ||
1272 event->events == WL_POSTMASTER_DEATH ||
1273 (event->events & (WL_SOCKET_READABLE |
1276
1277 if (event->events == WL_POSTMASTER_DEATH)
1278 {
1279 /*
1280 * Unlike all the other implementations, we detect postmaster death
1281 * using process notification instead of waiting on the postmaster
1282 * alive pipe.
1283 */
1284 WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
1285 }
1286 else if (event->events == WL_LATCH_SET)
1287 {
1288 /* We detect latch wakeup using a signal event. */
1289 WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
1290 }
1291 else
1292 {
1293 /*
1294 * We need to compute the adds and deletes required to get from the
1295 * old event mask to the new event mask, since kevent treats readable
1296 * and writable as separate events.
1297 */
1298 if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
1299 old_filt_read = true;
1301 new_filt_read = true;
1302 if (old_events & WL_SOCKET_WRITEABLE)
1303 old_filt_write = true;
1304 if (event->events & WL_SOCKET_WRITEABLE)
1305 new_filt_write = true;
1306 if (old_filt_read && !new_filt_read)
1307 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
1308 event);
1309 else if (!old_filt_read && new_filt_read)
1310 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
1311 event);
1312 if (old_filt_write && !new_filt_write)
1313 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
1314 event);
1315 else if (!old_filt_write && new_filt_write)
1316 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
1317 event);
1318 }
1319
1320 /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
1321 if (count == 0)
1322 return;
1323
1324 Assert(count <= 2);
1325
1326 rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
1327
1328 /*
1329 * When adding the postmaster's pid, we have to consider that it might
1330 * already have exited and perhaps even been replaced by another process
1331 * with the same pid. If so, we have to defer reporting this as an event
1332 * until the next call to WaitEventSetWaitBlock().
1333 */
1334
1335 if (rc < 0)
1336 {
1337 if (event->events == WL_POSTMASTER_DEATH &&
1338 (errno == ESRCH || errno == EACCES))
1339 set->report_postmaster_not_running = true;
1340 else
1341 ereport(ERROR,
1343 errmsg("%s() failed: %m",
1344 "kevent")));
1345 }
1346 else if (event->events == WL_POSTMASTER_DEATH &&
1347 PostmasterPid != getppid() &&
1349 {
1350 /*
1351 * The extra PostmasterIsAliveInternal() check prevents false alarms
1352 * on systems that give a different value for getppid() while being
1353 * traced by a debugger.
1354 */
1355 set->report_postmaster_not_running = true;
1356 }
1357}
1358
1359#endif
1360
1361#if defined(WAIT_USE_WIN32)
1362static void
1363WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
1364{
1365 HANDLE *handle = &set->handles[event->pos + 1];
1366
1367 if (event->events == WL_LATCH_SET)
1368 {
1369 Assert(set->latch != NULL);
1370 *handle = set->latch->event;
1371 }
1372 else if (event->events == WL_POSTMASTER_DEATH)
1373 {
1374 *handle = PostmasterHandle;
1375 }
1376 else
1377 {
1378 int flags = FD_CLOSE; /* always check for errors/EOF */
1379
1380 if (event->events & WL_SOCKET_READABLE)
1381 flags |= FD_READ;
1382 if (event->events & WL_SOCKET_WRITEABLE)
1383 flags |= FD_WRITE;
1384 if (event->events & WL_SOCKET_CONNECTED)
1385 flags |= FD_CONNECT;
1386 if (event->events & WL_SOCKET_ACCEPT)
1387 flags |= FD_ACCEPT;
1388
1389 if (*handle == WSA_INVALID_EVENT)
1390 {
1391 *handle = WSACreateEvent();
1392 if (*handle == WSA_INVALID_EVENT)
1393 elog(ERROR, "failed to create event for socket: error code %d",
1394 WSAGetLastError());
1395 }
1396 if (WSAEventSelect(event->fd, *handle, flags) != 0)
1397 elog(ERROR, "failed to set up event for socket: error code %d",
1398 WSAGetLastError());
1399
1400 Assert(event->fd != PGINVALID_SOCKET);
1401 }
1402}
1403#endif
1404
1405/*
1406 * Wait for events added to the set to happen, or until the timeout is
1407 * reached. At most nevents occurred events are returned.
1408 *
1409 * If timeout = -1, block until an event occurs; if 0, check sockets for
1410 * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1411 *
1412 * Returns the number of events occurred, or 0 if the timeout was reached.
1413 *
1414 * Returned events will have the fd, pos, user_data fields set to the
1415 * values associated with the registered event.
1416 */
1417int
1419 WaitEvent *occurred_events, int nevents,
1420 uint32 wait_event_info)
1421{
1422 int returned_events = 0;
1424 instr_time cur_time;
1425 long cur_timeout = -1;
1426
1427 Assert(nevents > 0);
1428
1429 /*
1430 * Initialize timeout if requested. We must record the current time so
1431 * that we can determine the remaining timeout if interrupted.
1432 */
1433 if (timeout >= 0)
1434 {
1436 Assert(timeout >= 0 && timeout <= INT_MAX);
1437 cur_timeout = timeout;
1438 }
1439 else
1441
1442 pgstat_report_wait_start(wait_event_info);
1443
1444#ifndef WIN32
1445 waiting = true;
1446#else
1447 /* Ensure that signals are serviced even if latch is already set */
1449#endif
1450 while (returned_events == 0)
1451 {
1452 int rc;
1453
1454 /*
1455 * Check if the latch is set already first. If so, we either exit
1456 * immediately or ask the kernel for further events available right
1457 * now without waiting, depending on how many events the caller wants.
1458 *
1459 * If someone sets the latch between this and the
1460 * WaitEventSetWaitBlock() below, the setter will write a byte to the
1461 * pipe (or signal us and the signal handler will do that), and the
1462 * readiness routine will return immediately.
1463 *
1464 * On unix, If there's a pending byte in the self pipe, we'll notice
1465 * whenever blocking. Only clearing the pipe in that case avoids
1466 * having to drain it every time WaitLatchOrSocket() is used. Should
1467 * the pipe-buffer fill up we're still ok, because the pipe is in
1468 * nonblocking mode. It's unlikely for that to happen, because the
1469 * self pipe isn't filled unless we're blocking (waiting = true), or
1470 * from inside a signal handler in latch_sigurg_handler().
1471 *
1472 * On windows, we'll also notice if there's a pending event for the
1473 * latch when blocking, but there's no danger of anything filling up,
1474 * as "Setting an event that is already set has no effect.".
1475 *
1476 * Note: we assume that the kernel calls involved in latch management
1477 * will provide adequate synchronization on machines with weak memory
1478 * ordering, so that we cannot miss seeing is_set if a notification
1479 * has already been queued.
1480 */
1481 if (set->latch && !set->latch->is_set)
1482 {
1483 /* about to sleep on a latch */
1484 set->latch->maybe_sleeping = true;
1486 /* and recheck */
1487 }
1488
1489 if (set->latch && set->latch->is_set)
1490 {
1491 occurred_events->fd = PGINVALID_SOCKET;
1492 occurred_events->pos = set->latch_pos;
1493 occurred_events->user_data =
1494 set->events[set->latch_pos].user_data;
1495 occurred_events->events = WL_LATCH_SET;
1496 occurred_events++;
1497 returned_events++;
1498
1499 /* could have been set above */
1500 set->latch->maybe_sleeping = false;
1501
1502 if (returned_events == nevents)
1503 break; /* output buffer full already */
1504
1505 /*
1506 * Even though we already have an event, we'll poll just once with
1507 * zero timeout to see what non-latch events we can fit into the
1508 * output buffer at the same time.
1509 */
1510 cur_timeout = 0;
1511 timeout = 0;
1512 }
1513
1514 /*
1515 * Wait for events using the readiness primitive chosen at the top of
1516 * this file. If -1 is returned, a timeout has occurred, if 0 we have
1517 * to retry, everything >= 1 is the number of returned events.
1518 */
1519 rc = WaitEventSetWaitBlock(set, cur_timeout,
1520 occurred_events, nevents - returned_events);
1521
1522 if (set->latch &&
1523 set->latch->maybe_sleeping)
1524 set->latch->maybe_sleeping = false;
1525
1526 if (rc == -1)
1527 break; /* timeout occurred */
1528 else
1529 returned_events += rc;
1530
1531 /* If we're not done, update cur_timeout for next iteration */
1532 if (returned_events == 0 && timeout >= 0)
1533 {
1534 INSTR_TIME_SET_CURRENT(cur_time);
1536 cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
1537 if (cur_timeout <= 0)
1538 break;
1539 }
1540 }
1541#ifndef WIN32
1542 waiting = false;
1543#endif
1544
1546
1547 return returned_events;
1548}
1549
1550
1551#if defined(WAIT_USE_EPOLL)
1552
1553/*
1554 * Wait using linux's epoll_wait(2).
1555 *
1556 * This is the preferable wait method, as several readiness notifications are
1557 * delivered, without having to iterate through all of set->events. The return
1558 * epoll_event struct contain a pointer to our events, making association
1559 * easy.
1560 */
1561static inline int
1562WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1563 WaitEvent *occurred_events, int nevents)
1564{
1565 int returned_events = 0;
1566 int rc;
1567 WaitEvent *cur_event;
1568 struct epoll_event *cur_epoll_event;
1569
1570 /* Sleep */
1571 rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1572 Min(nevents, set->nevents_space), cur_timeout);
1573
1574 /* Check return code */
1575 if (rc < 0)
1576 {
1577 /* EINTR is okay, otherwise complain */
1578 if (errno != EINTR)
1579 {
1580 waiting = false;
1581 ereport(ERROR,
1583 errmsg("%s() failed: %m",
1584 "epoll_wait")));
1585 }
1586 return 0;
1587 }
1588 else if (rc == 0)
1589 {
1590 /* timeout exceeded */
1591 return -1;
1592 }
1593
1594 /*
1595 * At least one event occurred, iterate over the returned epoll events
1596 * until they're either all processed, or we've returned all the events
1597 * the caller desired.
1598 */
1599 for (cur_epoll_event = set->epoll_ret_events;
1600 cur_epoll_event < (set->epoll_ret_events + rc) &&
1601 returned_events < nevents;
1602 cur_epoll_event++)
1603 {
1604 /* epoll's data pointer is set to the associated WaitEvent */
1605 cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1606
1607 occurred_events->pos = cur_event->pos;
1608 occurred_events->user_data = cur_event->user_data;
1609 occurred_events->events = 0;
1610
1611 if (cur_event->events == WL_LATCH_SET &&
1612 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1613 {
1614 /* Drain the signalfd. */
1615 drain();
1616
1617 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1618 {
1619 occurred_events->fd = PGINVALID_SOCKET;
1620 occurred_events->events = WL_LATCH_SET;
1621 occurred_events++;
1622 returned_events++;
1623 }
1624 }
1625 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1626 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1627 {
1628 /*
1629 * We expect an EPOLLHUP when the remote end is closed, but
1630 * because we don't expect the pipe to become readable or to have
1631 * any errors either, treat those cases as postmaster death, too.
1632 *
1633 * Be paranoid about a spurious event signaling the postmaster as
1634 * being dead. There have been reports about that happening with
1635 * older primitives (select(2) to be specific), and a spurious
1636 * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1637 * cost much.
1638 */
1640 {
1641 if (set->exit_on_postmaster_death)
1642 proc_exit(1);
1643 occurred_events->fd = PGINVALID_SOCKET;
1644 occurred_events->events = WL_POSTMASTER_DEATH;
1645 occurred_events++;
1646 returned_events++;
1647 }
1648 }
1649 else if (cur_event->events & (WL_SOCKET_READABLE |
1652 {
1653 Assert(cur_event->fd != PGINVALID_SOCKET);
1654
1655 if ((cur_event->events & WL_SOCKET_READABLE) &&
1656 (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1657 {
1658 /* data available in socket, or EOF */
1659 occurred_events->events |= WL_SOCKET_READABLE;
1660 }
1661
1662 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1663 (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1664 {
1665 /* writable, or EOF */
1666 occurred_events->events |= WL_SOCKET_WRITEABLE;
1667 }
1668
1669 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1670 (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
1671 {
1672 /* remote peer shut down, or error */
1673 occurred_events->events |= WL_SOCKET_CLOSED;
1674 }
1675
1676 if (occurred_events->events != 0)
1677 {
1678 occurred_events->fd = cur_event->fd;
1679 occurred_events++;
1680 returned_events++;
1681 }
1682 }
1683 }
1684
1685 return returned_events;
1686}
1687
1688#elif defined(WAIT_USE_KQUEUE)
1689
1690/*
1691 * Wait using kevent(2) on BSD-family systems and macOS.
1692 *
1693 * For now this mirrors the epoll code, but in future it could modify the fd
1694 * set in the same call to kevent as it uses for waiting instead of doing that
1695 * with separate system calls.
1696 */
1697static int
1698WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1699 WaitEvent *occurred_events, int nevents)
1700{
1701 int returned_events = 0;
1702 int rc;
1703 WaitEvent *cur_event;
1704 struct kevent *cur_kqueue_event;
1705 struct timespec timeout;
1706 struct timespec *timeout_p;
1707
1708 if (cur_timeout < 0)
1709 timeout_p = NULL;
1710 else
1711 {
1712 timeout.tv_sec = cur_timeout / 1000;
1713 timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1714 timeout_p = &timeout;
1715 }
1716
1717 /*
1718 * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1719 * earlier call to WaitEventSetWait().
1720 */
1721 if (unlikely(set->report_postmaster_not_running))
1722 {
1723 if (set->exit_on_postmaster_death)
1724 proc_exit(1);
1725 occurred_events->fd = PGINVALID_SOCKET;
1726 occurred_events->events = WL_POSTMASTER_DEATH;
1727 return 1;
1728 }
1729
1730 /* Sleep */
1731 rc = kevent(set->kqueue_fd, NULL, 0,
1732 set->kqueue_ret_events,
1733 Min(nevents, set->nevents_space),
1734 timeout_p);
1735
1736 /* Check return code */
1737 if (rc < 0)
1738 {
1739 /* EINTR is okay, otherwise complain */
1740 if (errno != EINTR)
1741 {
1742 waiting = false;
1743 ereport(ERROR,
1745 errmsg("%s() failed: %m",
1746 "kevent")));
1747 }
1748 return 0;
1749 }
1750 else if (rc == 0)
1751 {
1752 /* timeout exceeded */
1753 return -1;
1754 }
1755
1756 /*
1757 * At least one event occurred, iterate over the returned kqueue events
1758 * until they're either all processed, or we've returned all the events
1759 * the caller desired.
1760 */
1761 for (cur_kqueue_event = set->kqueue_ret_events;
1762 cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1763 returned_events < nevents;
1764 cur_kqueue_event++)
1765 {
1766 /* kevent's udata points to the associated WaitEvent */
1767 cur_event = AccessWaitEvent(cur_kqueue_event);
1768
1769 occurred_events->pos = cur_event->pos;
1770 occurred_events->user_data = cur_event->user_data;
1771 occurred_events->events = 0;
1772
1773 if (cur_event->events == WL_LATCH_SET &&
1774 cur_kqueue_event->filter == EVFILT_SIGNAL)
1775 {
1776 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1777 {
1778 occurred_events->fd = PGINVALID_SOCKET;
1779 occurred_events->events = WL_LATCH_SET;
1780 occurred_events++;
1781 returned_events++;
1782 }
1783 }
1784 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1785 cur_kqueue_event->filter == EVFILT_PROC &&
1786 (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1787 {
1788 /*
1789 * The kernel will tell this kqueue object only once about the
1790 * exit of the postmaster, so let's remember that for next time so
1791 * that we provide level-triggered semantics.
1792 */
1793 set->report_postmaster_not_running = true;
1794
1795 if (set->exit_on_postmaster_death)
1796 proc_exit(1);
1797 occurred_events->fd = PGINVALID_SOCKET;
1798 occurred_events->events = WL_POSTMASTER_DEATH;
1799 occurred_events++;
1800 returned_events++;
1801 }
1802 else if (cur_event->events & (WL_SOCKET_READABLE |
1805 {
1806 Assert(cur_event->fd >= 0);
1807
1808 if ((cur_event->events & WL_SOCKET_READABLE) &&
1809 (cur_kqueue_event->filter == EVFILT_READ))
1810 {
1811 /* readable, or EOF */
1812 occurred_events->events |= WL_SOCKET_READABLE;
1813 }
1814
1815 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1816 (cur_kqueue_event->filter == EVFILT_READ) &&
1817 (cur_kqueue_event->flags & EV_EOF))
1818 {
1819 /* the remote peer has shut down */
1820 occurred_events->events |= WL_SOCKET_CLOSED;
1821 }
1822
1823 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1824 (cur_kqueue_event->filter == EVFILT_WRITE))
1825 {
1826 /* writable, or EOF */
1827 occurred_events->events |= WL_SOCKET_WRITEABLE;
1828 }
1829
1830 if (occurred_events->events != 0)
1831 {
1832 occurred_events->fd = cur_event->fd;
1833 occurred_events++;
1834 returned_events++;
1835 }
1836 }
1837 }
1838
1839 return returned_events;
1840}
1841
1842#elif defined(WAIT_USE_POLL)
1843
1844/*
1845 * Wait using poll(2).
1846 *
1847 * This allows to receive readiness notifications for several events at once,
1848 * but requires iterating through all of set->pollfds.
1849 */
1850static inline int
1852 WaitEvent *occurred_events, int nevents)
1853{
1854 int returned_events = 0;
1855 int rc;
1856 WaitEvent *cur_event;
1857 struct pollfd *cur_pollfd;
1858
1859 /* Sleep */
1860 rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
1861
1862 /* Check return code */
1863 if (rc < 0)
1864 {
1865 /* EINTR is okay, otherwise complain */
1866 if (errno != EINTR)
1867 {
1868 waiting = false;
1869 ereport(ERROR,
1871 errmsg("%s() failed: %m",
1872 "poll")));
1873 }
1874 return 0;
1875 }
1876 else if (rc == 0)
1877 {
1878 /* timeout exceeded */
1879 return -1;
1880 }
1881
1882 for (cur_event = set->events, cur_pollfd = set->pollfds;
1883 cur_event < (set->events + set->nevents) &&
1884 returned_events < nevents;
1885 cur_event++, cur_pollfd++)
1886 {
1887 /* no activity on this FD, skip */
1888 if (cur_pollfd->revents == 0)
1889 continue;
1890
1891 occurred_events->pos = cur_event->pos;
1892 occurred_events->user_data = cur_event->user_data;
1893 occurred_events->events = 0;
1894
1895 if (cur_event->events == WL_LATCH_SET &&
1896 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1897 {
1898 /* There's data in the self-pipe, clear it. */
1899 drain();
1900
1901 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1902 {
1903 occurred_events->fd = PGINVALID_SOCKET;
1904 occurred_events->events = WL_LATCH_SET;
1905 occurred_events++;
1906 returned_events++;
1907 }
1908 }
1909 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1910 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1911 {
1912 /*
1913 * We expect an POLLHUP when the remote end is closed, but because
1914 * we don't expect the pipe to become readable or to have any
1915 * errors either, treat those cases as postmaster death, too.
1916 *
1917 * Be paranoid about a spurious event signaling the postmaster as
1918 * being dead. There have been reports about that happening with
1919 * older primitives (select(2) to be specific), and a spurious
1920 * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1921 * cost much.
1922 */
1924 {
1925 if (set->exit_on_postmaster_death)
1926 proc_exit(1);
1927 occurred_events->fd = PGINVALID_SOCKET;
1928 occurred_events->events = WL_POSTMASTER_DEATH;
1929 occurred_events++;
1930 returned_events++;
1931 }
1932 }
1933 else if (cur_event->events & (WL_SOCKET_READABLE |
1936 {
1937 int errflags = POLLHUP | POLLERR | POLLNVAL;
1938
1939 Assert(cur_event->fd >= PGINVALID_SOCKET);
1940
1941 if ((cur_event->events & WL_SOCKET_READABLE) &&
1942 (cur_pollfd->revents & (POLLIN | errflags)))
1943 {
1944 /* data available in socket, or EOF */
1945 occurred_events->events |= WL_SOCKET_READABLE;
1946 }
1947
1948 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1949 (cur_pollfd->revents & (POLLOUT | errflags)))
1950 {
1951 /* writeable, or EOF */
1952 occurred_events->events |= WL_SOCKET_WRITEABLE;
1953 }
1954
1955#ifdef POLLRDHUP
1956 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1957 (cur_pollfd->revents & (POLLRDHUP | errflags)))
1958 {
1959 /* remote peer closed, or error */
1960 occurred_events->events |= WL_SOCKET_CLOSED;
1961 }
1962#endif
1963
1964 if (occurred_events->events != 0)
1965 {
1966 occurred_events->fd = cur_event->fd;
1967 occurred_events++;
1968 returned_events++;
1969 }
1970 }
1971 }
1972 return returned_events;
1973}
1974
1975#elif defined(WAIT_USE_WIN32)
1976
1977/*
1978 * Wait using Windows' WaitForMultipleObjects(). Each call only "consumes" one
1979 * event, so we keep calling until we've filled up our output buffer to match
1980 * the behavior of the other implementations.
1981 *
1982 * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
1983 */
1984static inline int
1985WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1986 WaitEvent *occurred_events, int nevents)
1987{
1988 int returned_events = 0;
1989 DWORD rc;
1990 WaitEvent *cur_event;
1991
1992 /* Reset any wait events that need it */
1993 for (cur_event = set->events;
1994 cur_event < (set->events + set->nevents);
1995 cur_event++)
1996 {
1997 if (cur_event->reset)
1998 {
1999 WaitEventAdjustWin32(set, cur_event);
2000 cur_event->reset = false;
2001 }
2002
2003 /*
2004 * We associate the socket with a new event handle for each
2005 * WaitEventSet. FD_CLOSE is only generated once if the other end
2006 * closes gracefully. Therefore we might miss the FD_CLOSE
2007 * notification, if it was delivered to another event after we stopped
2008 * waiting for it. Close that race by peeking for EOF after setting
2009 * up this handle to receive notifications, and before entering the
2010 * sleep.
2011 *
2012 * XXX If we had one event handle for the lifetime of a socket, we
2013 * wouldn't need this.
2014 */
2015 if (cur_event->events & WL_SOCKET_READABLE)
2016 {
2017 char c;
2018 WSABUF buf;
2019 DWORD received;
2020 DWORD flags;
2021
2022 buf.buf = &c;
2023 buf.len = 1;
2024 flags = MSG_PEEK;
2025 if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
2026 {
2027 occurred_events->pos = cur_event->pos;
2028 occurred_events->user_data = cur_event->user_data;
2029 occurred_events->events = WL_SOCKET_READABLE;
2030 occurred_events->fd = cur_event->fd;
2031 return 1;
2032 }
2033 }
2034
2035 /*
2036 * Windows does not guarantee to log an FD_WRITE network event
2037 * indicating that more data can be sent unless the previous send()
2038 * failed with WSAEWOULDBLOCK. While our caller might well have made
2039 * such a call, we cannot assume that here. Therefore, if waiting for
2040 * write-ready, force the issue by doing a dummy send(). If the dummy
2041 * send() succeeds, assume that the socket is in fact write-ready, and
2042 * return immediately. Also, if it fails with something other than
2043 * WSAEWOULDBLOCK, return a write-ready indication to let our caller
2044 * deal with the error condition.
2045 */
2046 if (cur_event->events & WL_SOCKET_WRITEABLE)
2047 {
2048 char c;
2049 WSABUF buf;
2050 DWORD sent;
2051 int r;
2052
2053 buf.buf = &c;
2054 buf.len = 0;
2055
2056 r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
2057 if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
2058 {
2059 occurred_events->pos = cur_event->pos;
2060 occurred_events->user_data = cur_event->user_data;
2061 occurred_events->events = WL_SOCKET_WRITEABLE;
2062 occurred_events->fd = cur_event->fd;
2063 return 1;
2064 }
2065 }
2066 }
2067
2068 /*
2069 * Sleep.
2070 *
2071 * Need to wait for ->nevents + 1, because signal handle is in [0].
2072 */
2073 rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
2074 cur_timeout);
2075
2076 /* Check return code */
2077 if (rc == WAIT_FAILED)
2078 elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
2079 GetLastError());
2080 else if (rc == WAIT_TIMEOUT)
2081 {
2082 /* timeout exceeded */
2083 return -1;
2084 }
2085
2086 if (rc == WAIT_OBJECT_0)
2087 {
2088 /* Service newly-arrived signals */
2090 return 0; /* retry */
2091 }
2092
2093 /*
2094 * With an offset of one, due to the always present pgwin32_signal_event,
2095 * the handle offset directly corresponds to a wait event.
2096 */
2097 cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
2098
2099 for (;;)
2100 {
2101 int next_pos;
2102 int count;
2103
2104 occurred_events->pos = cur_event->pos;
2105 occurred_events->user_data = cur_event->user_data;
2106 occurred_events->events = 0;
2107
2108 if (cur_event->events == WL_LATCH_SET)
2109 {
2110 /*
2111 * We cannot use set->latch->event to reset the fired event if we
2112 * aren't waiting on this latch now.
2113 */
2114 if (!ResetEvent(set->handles[cur_event->pos + 1]))
2115 elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
2116
2117 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
2118 {
2119 occurred_events->fd = PGINVALID_SOCKET;
2120 occurred_events->events = WL_LATCH_SET;
2121 occurred_events++;
2122 returned_events++;
2123 }
2124 }
2125 else if (cur_event->events == WL_POSTMASTER_DEATH)
2126 {
2127 /*
2128 * Postmaster apparently died. Since the consequences of falsely
2129 * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
2130 * take the trouble to positively verify this with
2131 * PostmasterIsAlive(), even though there is no known reason to
2132 * think that the event could be falsely set on Windows.
2133 */
2135 {
2136 if (set->exit_on_postmaster_death)
2137 proc_exit(1);
2138 occurred_events->fd = PGINVALID_SOCKET;
2139 occurred_events->events = WL_POSTMASTER_DEATH;
2140 occurred_events++;
2141 returned_events++;
2142 }
2143 }
2144 else if (cur_event->events & WL_SOCKET_MASK)
2145 {
2146 WSANETWORKEVENTS resEvents;
2147 HANDLE handle = set->handles[cur_event->pos + 1];
2148
2149 Assert(cur_event->fd);
2150
2151 occurred_events->fd = cur_event->fd;
2152
2153 ZeroMemory(&resEvents, sizeof(resEvents));
2154 if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
2155 elog(ERROR, "failed to enumerate network events: error code %d",
2156 WSAGetLastError());
2157 if ((cur_event->events & WL_SOCKET_READABLE) &&
2158 (resEvents.lNetworkEvents & FD_READ))
2159 {
2160 /* data available in socket */
2161 occurred_events->events |= WL_SOCKET_READABLE;
2162
2163 /*------
2164 * WaitForMultipleObjects doesn't guarantee that a read event
2165 * will be returned if the latch is set at the same time. Even
2166 * if it did, the caller might drop that event expecting it to
2167 * reoccur on next call. So, we must force the event to be
2168 * reset if this WaitEventSet is used again in order to avoid
2169 * an indefinite hang.
2170 *
2171 * Refer
2172 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
2173 * for the behavior of socket events.
2174 *------
2175 */
2176 cur_event->reset = true;
2177 }
2178 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
2179 (resEvents.lNetworkEvents & FD_WRITE))
2180 {
2181 /* writeable */
2182 occurred_events->events |= WL_SOCKET_WRITEABLE;
2183 }
2184 if ((cur_event->events & WL_SOCKET_CONNECTED) &&
2185 (resEvents.lNetworkEvents & FD_CONNECT))
2186 {
2187 /* connected */
2188 occurred_events->events |= WL_SOCKET_CONNECTED;
2189 }
2190 if ((cur_event->events & WL_SOCKET_ACCEPT) &&
2191 (resEvents.lNetworkEvents & FD_ACCEPT))
2192 {
2193 /* incoming connection could be accepted */
2194 occurred_events->events |= WL_SOCKET_ACCEPT;
2195 }
2196 if (resEvents.lNetworkEvents & FD_CLOSE)
2197 {
2198 /* EOF/error, so signal all caller-requested socket flags */
2199 occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
2200 }
2201
2202 if (occurred_events->events != 0)
2203 {
2204 occurred_events++;
2205 returned_events++;
2206 }
2207 }
2208
2209 /* Is the output buffer full? */
2210 if (returned_events == nevents)
2211 break;
2212
2213 /* Have we run out of possible events? */
2214 next_pos = cur_event->pos + 1;
2215 if (next_pos == set->nevents)
2216 break;
2217
2218 /*
2219 * Poll the rest of the event handles in the array starting at
2220 * next_pos being careful to skip over the initial signal handle too.
2221 * This time we use a zero timeout.
2222 */
2223 count = set->nevents - next_pos;
2224 rc = WaitForMultipleObjects(count,
2225 set->handles + 1 + next_pos,
2226 false,
2227 0);
2228
2229 /*
2230 * We don't distinguish between errors and WAIT_TIMEOUT here because
2231 * we already have events to report.
2232 */
2233 if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + count)
2234 break;
2235
2236 /* We have another event to decode. */
2237 cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
2238 }
2239
2240 return returned_events;
2241}
2242#endif
2243
2244/*
2245 * Return whether the current build options can report WL_SOCKET_CLOSED.
2246 */
2247bool
2249{
2250#if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
2251 defined(WAIT_USE_EPOLL) || \
2252 defined(WAIT_USE_KQUEUE)
2253 return true;
2254#else
2255 return false;
2256#endif
2257}
2258
2259/*
2260 * Get the number of wait events registered in a given WaitEventSet.
2261 */
2262int
2264{
2265 return set->nevents;
2266}
2267
2268#if defined(WAIT_USE_SELF_PIPE)
2269
2270/*
2271 * SetLatch uses SIGURG to wake up the process waiting on the latch.
2272 *
2273 * Wake up WaitLatch, if we're waiting.
2274 */
2275static void
2277{
2278 if (waiting)
2280}
2281
2282/* Send one byte to the self-pipe, to wake up WaitLatch */
2283static void
2285{
2286 int rc;
2287 char dummy = 0;
2288
2289retry:
2290 rc = write(selfpipe_writefd, &dummy, 1);
2291 if (rc < 0)
2292 {
2293 /* If interrupted by signal, just retry */
2294 if (errno == EINTR)
2295 goto retry;
2296
2297 /*
2298 * If the pipe is full, we don't need to retry, the data that's there
2299 * already is enough to wake up WaitLatch.
2300 */
2301 if (errno == EAGAIN || errno == EWOULDBLOCK)
2302 return;
2303
2304 /*
2305 * Oops, the write() failed for some other reason. We might be in a
2306 * signal handler, so it's not safe to elog(). We have no choice but
2307 * silently ignore the error.
2308 */
2309 return;
2310 }
2311}
2312
2313#endif
2314
2315#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
2316
2317/*
2318 * Read all available data from self-pipe or signalfd.
2319 *
2320 * Note: this is only called when waiting = true. If it fails and doesn't
2321 * return, it must reset that flag first (though ideally, this will never
2322 * happen).
2323 */
2324static void
2326{
2327 char buf[1024];
2328 int rc;
2329 int fd;
2330
2331#ifdef WAIT_USE_SELF_PIPE
2333#else
2334 fd = signal_fd;
2335#endif
2336
2337 for (;;)
2338 {
2339 rc = read(fd, buf, sizeof(buf));
2340 if (rc < 0)
2341 {
2342 if (errno == EAGAIN || errno == EWOULDBLOCK)
2343 break; /* the descriptor is empty */
2344 else if (errno == EINTR)
2345 continue; /* retry */
2346 else
2347 {
2348 waiting = false;
2349#ifdef WAIT_USE_SELF_PIPE
2350 elog(ERROR, "read() on self-pipe failed: %m");
2351#else
2352 elog(ERROR, "read() on signalfd failed: %m");
2353#endif
2354 }
2355 }
2356 else if (rc == 0)
2357 {
2358 waiting = false;
2359#ifdef WAIT_USE_SELF_PIPE
2360 elog(ERROR, "unexpected EOF on self-pipe");
2361#else
2362 elog(ERROR, "unexpected EOF on signalfd");
2363#endif
2364 }
2365 else if (rc < sizeof(buf))
2366 {
2367 /* we successfully drained the pipe; no need to read() again */
2368 break;
2369 }
2370 /* else buffer wasn't big enough, so read again */
2371 }
2372}
2373
2374#endif
2375
2376static void
2378{
2380
2381 Assert(set->owner != NULL);
2382 set->owner = NULL;
2383 FreeWaitEventSet(set);
2384}
#define pg_memory_barrier()
Definition: atomics.h:143
sigset_t UnBlockSig
Definition: pqsignal.c:22
#define Min(x, y)
Definition: c.h:961
#define MAXALIGN(LEN)
Definition: c.h:768
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:204
#define SIGNAL_ARGS
Definition: c.h:1306
#define Assert(condition)
Definition: c.h:815
#define unlikely(x)
Definition: c.h:333
uint32_t uint32
Definition: c.h:488
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:895
size_t Size
Definition: c.h:562
int errcode_for_socket_access(void)
Definition: elog.c:953
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define FATAL
Definition: elog.h:41
#define PANIC
Definition: elog.h:42
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
void ReleaseExternalFD(void)
Definition: fd.c:1238
bool AcquireExternalFD(void)
Definition: fd.c:1185
void ReserveExternalFD(void)
Definition: fd.c:1220
pid_t PostmasterPid
Definition: globals.c:105
int MyProcPid
Definition: globals.c:46
bool IsUnderPostmaster
Definition: globals.c:119
struct Latch * MyLatch
Definition: globals.c:62
for(;;)
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MILLISEC(t)
Definition: instr_time.h:191
#define INSTR_TIME_SET_ZERO(t)
Definition: instr_time.h:172
#define close(a)
Definition: win32.h:12
#define write(a, b, c)
Definition: win32.h:14
#define read(a, b, c)
Definition: win32.h:13
void proc_exit(int code)
Definition: ipc.c:104
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
static void latch_sigurg_handler(SIGNAL_ARGS)
Definition: latch.c:2276
static void sendSelfPipeByte(void)
Definition: latch.c:2284
static void ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
Definition: latch.c:219
void InitializeLatchWaitSet(void)
Definition: latch.c:346
int WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock, long timeout, uint32 wait_event_info)
Definition: latch.c:565
#define LatchWaitSetLatchPos
Definition: latch.c:158
static int selfpipe_readfd
Definition: latch.c:172
void OwnLatch(Latch *latch)
Definition: latch.c:463
void DisownLatch(Latch *latch)
Definition: latch.c:489
static const ResourceOwnerDesc wait_event_set_resowner_desc
Definition: latch.c:203
void FreeWaitEventSetAfterFork(WaitEventSet *set)
Definition: latch.c:911
static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
Definition: latch.c:1170
static int selfpipe_owner_pid
Definition: latch.c:176
static int selfpipe_writefd
Definition: latch.c:173
int GetNumRegisteredWaitEvents(WaitEventSet *set)
Definition: latch.c:2263
void InitSharedLatch(Latch *latch)
Definition: latch.c:430
void InitializeLatchSupport(void)
Definition: latch.c:232
static WaitEventSet * LatchWaitSet
Definition: latch.c:155
static void ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
Definition: latch.c:214
void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
Definition: latch.c:1043
static int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, WaitEvent *occurred_events, int nevents)
Definition: latch.c:1851
static void ResOwnerReleaseWaitEventSet(Datum res)
Definition: latch.c:2377
void SetLatch(Latch *latch)
Definition: latch.c:632
void ShutdownLatchSupport(void)
Definition: latch.c:364
bool WaitEventSetCanReportClosed(void)
Definition: latch.c:2248
void InitLatch(Latch *latch)
Definition: latch.c:394
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
Definition: latch.c:957
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
Definition: latch.c:1418
static void drain(void)
Definition: latch.c:2325
static volatile sig_atomic_t waiting
Definition: latch.c:162
void FreeWaitEventSet(WaitEventSet *set)
Definition: latch.c:868
WaitEventSet * CreateWaitEventSet(ResourceOwner resowner, int nevents)
Definition: latch.c:751
void ResetLatch(Latch *latch)
Definition: latch.c:724
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:517
#define WL_SOCKET_READABLE
Definition: latch.h:128
#define WL_SOCKET_ACCEPT
Definition: latch.h:144
#define WL_TIMEOUT
Definition: latch.h:130
#define WL_SOCKET_CLOSED
Definition: latch.h:139
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:132
#define WL_LATCH_SET
Definition: latch.h:127
#define WL_SOCKET_CONNECTED
Definition: latch.h:137
#define WL_POSTMASTER_DEATH
Definition: latch.h:131
#define WL_SOCKET_WRITEABLE
Definition: latch.h:129
#define WL_SOCKET_MASK
Definition: latch.h:146
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext TopMemoryContext
Definition: mcxt.c:149
#define InvalidPid
Definition: miscadmin.h:32
const void * data
static time_t start_time
Definition: pg_ctl.c:95
static char * buf
Definition: pg_test_fsync.c:72
bool PostmasterIsAliveInternal(void)
Definition: pmsignal.c:346
#define PostmasterIsAlive()
Definition: pmsignal.h:106
#define pqsignal
Definition: port.h:521
int pgsocket
Definition: port.h:29
#define PGINVALID_SOCKET
Definition: port.h:31
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
uintptr_t Datum
Definition: postgres.h:69
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
int postmaster_alive_fds[2]
Definition: postmaster.c:473
#define POSTMASTER_FD_WATCH
Definition: postmaster.h:84
char * c
static int fd(const char *x, int i)
Definition: preproc-init.c:105
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:554
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:514
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442
#define RELEASE_PRIO_WAITEVENTSETS
Definition: resowner.h:77
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
void pgwin32_dispatch_queued_signals(void)
Definition: signal.c:120
HANDLE pgwin32_signal_event
Definition: signal.c:27
Definition: latch.h:113
sig_atomic_t is_set
Definition: latch.h:114
sig_atomic_t maybe_sleeping
Definition: latch.h:115
bool is_shared
Definition: latch.h:116
int owner_pid
Definition: latch.h:117
const char * name
Definition: resowner.h:93
Latch * latch
Definition: latch.c:121
bool exit_on_postmaster_death
Definition: latch.c:129
int nevents
Definition: latch.c:106
int latch_pos
Definition: latch.c:122
int nevents_space
Definition: latch.c:107
ResourceOwner owner
Definition: latch.c:104
WaitEvent * events
Definition: latch.c:113
struct pollfd * pollfds
Definition: latch.c:142
pgsocket fd
Definition: latch.h:156
int pos
Definition: latch.h:154
void * user_data
Definition: latch.h:157
uint32 events
Definition: latch.h:155
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:85
static void pgstat_report_wait_end(void)
Definition: wait_event.h:101
#define EINTR
Definition: win32_port.h:364
#define EWOULDBLOCK
Definition: win32_port.h:370
#define kill(pid, sig)
Definition: win32_port.h:493
#define EAGAIN
Definition: win32_port.h:362