PostgreSQL Source Code git master
Loading...
Searching...
No Matches
method_io_uring.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * method_io_uring.c
4 * AIO - perform AIO using Linux' io_uring
5 *
6 * For now we create one io_uring instance for each backend. These io_uring
7 * instances have to be created in postmaster, during startup, to allow other
8 * backends to process IO completions, if the issuing backend is currently
9 * busy doing other things. Other backends may not use another backend's
10 * io_uring instance to submit IO, that'd require additional locking that
11 * would likely be harmful for performance.
12 *
13 * We likely will want to introduce a backend-local io_uring instance in the
14 * future, e.g. for FE/BE network IO.
15 *
16 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
17 * Portions Copyright (c) 1994, Regents of the University of California
18 *
19 * IDENTIFICATION
20 * src/backend/storage/aio/method_io_uring.c
21 *
22 *-------------------------------------------------------------------------
23 */
24
25#include "postgres.h"
26
27/* included early, for IOMETHOD_IO_URING_ENABLED */
28#include "storage/aio.h"
29
30#ifdef IOMETHOD_IO_URING_ENABLED
31
32#include <sys/mman.h>
33#include <unistd.h>
34
35#include <liburing.h>
36
37#include "miscadmin.h"
39#include "storage/fd.h"
40#include "storage/proc.h"
41#include "storage/shmem.h"
42#include "storage/lwlock.h"
43#include "storage/procnumber.h"
44#include "utils/wait_event.h"
45
46
47/* number of completions processed at once */
48#define PGAIO_MAX_LOCAL_COMPLETED_IO 32
49
50
51/* Entry points for IoMethodOps. */
52static void pgaio_uring_shmem_request(void *arg);
53static void pgaio_uring_shmem_init(void *arg);
54static void pgaio_uring_init_backend(void);
55static int pgaio_uring_submit(uint16 num_staged_ios, PgAioHandle **staged_ios);
58
59/* helper functions */
61
62
64 /*
65 * While io_uring mostly is OK with FDs getting closed while the IO is in
66 * flight, that is not true for IOs submitted with IOSQE_ASYNC.
67 *
68 * See
69 * https://postgr.es/m/5ons2rtmwarqqhhexb3dnqulw5rjgwgoct57vpdau4rujlrffj%403fls6d2mkiwc
70 */
72
73 .shmem_callbacks.request_fn = pgaio_uring_shmem_request,
74 .shmem_callbacks.init_fn = pgaio_uring_shmem_init,
75 .init_backend = pgaio_uring_init_backend,
76
77 .submit = pgaio_uring_submit,
78 .wait_one = pgaio_uring_wait_one,
79 .check_one = pgaio_uring_check_one,
80};
81
82/*
83 * Per-backend state when using io_method=io_uring
84 */
85typedef struct PgAioUringContext
86{
87 /*
88 * Align the whole struct to a cacheline boundary, to prevent false
89 * sharing between completion_lock and prior backend's io_uring_ring.
90 */
91 alignas(PG_CACHE_LINE_SIZE)
92
93 /*
94 * Multiple backends can process completions for this backend's io_uring
95 * instance (e.g. when the backend issuing IO is busy doing something
96 * else). To make that safe we have to ensure that only a single backend
97 * gets io completions from the io_uring instance at a time.
98 */
100
101 struct io_uring io_uring_ring;
103
104/*
105 * Information about the capabilities that io_uring has.
106 *
107 * Depending on liburing and kernel version different features are
108 * supported. At least for the kernel a kernel version check does not suffice
109 * as various vendors do backport features to older kernels :(.
110 */
111typedef struct PgAioUringCaps
112{
113 bool checked;
114 /* -1 if io_uring_queue_init_mem() is unsupported */
115 int mem_init_size;
117
118
119/* PgAioUringContexts for all backends */
121
122/* the current backend's context */
124
126{
127 .checked = false,
128 .mem_init_size = -1,
129};
130
131static uint32
133{
134 /*
135 * We can subtract MAX_IO_WORKERS here as io workers are never used at the
136 * same time as io_method=io_uring.
137 */
139}
140
141/*
142 * Initializes pgaio_uring_caps, unless that's already done.
143 */
144static void
146{
147 if (pgaio_uring_caps.checked)
148 return;
149
150 /*
151 * By default io_uring creates a shared memory mapping for each io_uring
152 * instance, leading to a large number of memory mappings. Unfortunately a
153 * large number of memory mappings slows things down, backend exit is
154 * particularly affected. To address that, newer kernels (6.5) support
155 * using user-provided memory for the memory, by putting the relevant
156 * memory into shared memory we don't need any additional mappings.
157 *
158 * To know whether this is supported, we unfortunately need to probe the
159 * kernel by trying to create a ring with userspace-provided memory. This
160 * also has a secondary benefit: We can determine precisely how much
161 * memory we need for each io_uring instance.
162 */
163#if defined(HAVE_IO_URING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
164 {
165 struct io_uring test_ring;
166 size_t ring_size;
167 void *ring_ptr;
168 struct io_uring_params p = {0};
169 int ret;
170
171 /*
172 * Liburing does not yet provide an API to query how much memory a
173 * ring will need. So we over-estimate it here. As the memory is freed
174 * just below that's small temporary waste of memory.
175 *
176 * 1MB is more than enough for rings within io_max_concurrency's
177 * range.
178 */
179 ring_size = 1024 * 1024;
180
181 /*
182 * Hard to believe a system exists where 1MB would not be a multiple
183 * of the page size. But it's cheap to ensure...
184 */
186
188 if (ring_ptr == MAP_FAILED)
189 elog(ERROR,
190 "mmap(%zu) to determine io_uring_queue_init_mem() support failed: %m",
191 ring_size);
192
194 if (ret > 0)
195 {
196 pgaio_uring_caps.mem_init_size = ret;
197
198 elog(DEBUG1,
199 "can use combined memory mapping for io_uring, each ring needs %d bytes",
200 ret);
201
202 /* clean up the created ring, it was just for a test */
204 }
205 else
206 {
207 /*
208 * There are different reasons for ring creation to fail, but it's
209 * ok to treat that just as io_uring_queue_init_mem() not being
210 * supported. We'll report a more detailed error in
211 * pgaio_uring_shmem_init().
212 */
213 errno = -ret;
214 elog(DEBUG1,
215 "cannot use combined memory mapping for io_uring, ring creation failed: %m");
216
217 }
218
219 if (munmap(ring_ptr, ring_size) != 0)
220 elog(ERROR, "munmap() failed: %m");
221 }
222#else
223 {
224 elog(DEBUG1,
225 "can't use combined memory mapping for io_uring, kernel or liburing too old");
226 }
227#endif
228
229 pgaio_uring_caps.checked = true;
230}
231
232/*
233 * Memory for all PgAioUringContext instances
234 */
235static size_t
237{
239}
240
241/*
242 * Memory for the combined memory used by io_uring instances. Returns 0 if
243 * that is not supported by kernel/liburing.
244 */
245static size_t
247{
248 size_t sz = 0;
249
250 if (pgaio_uring_caps.mem_init_size > 0)
251 {
252 /*
253 * Memory for rings needs to be allocated to the page boundary,
254 * reserve space. Luckily it does not need to be aligned to hugepage
255 * boundaries, even if huge pages are used.
256 */
259 pgaio_uring_caps.mem_init_size));
260 }
261
262 return sz;
263}
264
265static size_t
267{
268 size_t sz;
269
272
273 return sz;
274}
275
276static void
278{
279 /*
280 * Kernel and liburing support for various features influences how much
281 * shmem we need, perform the necessary checks.
282 */
284
285 ShmemRequestStruct(.name = "AioUringContext",
286 .size = pgaio_uring_shmem_size(),
287 .ptr = (void **) &pgaio_uring_contexts,
288 );
289}
290
291static void
293{
295 char *shmem;
296 size_t ring_mem_remain = 0;
297 char *ring_mem_next = 0;
298
299 /*
300 * We allocate memory for all PgAioUringContext instances and, if
301 * supported, the memory required for each of the io_uring instances, in
302 * one combined allocation.
303 *
304 * pgaio_uring_contexts is already set to the base of the allocation.
305 */
306 shmem = (char *) pgaio_uring_contexts;
308
309 /* if supported, handle memory alignment / sizing for io_uring memory */
310 if (pgaio_uring_caps.mem_init_size > 0)
311 {
313 ring_mem_next = shmem;
314
315 /* align to page boundary, see also pgaio_uring_ring_shmem_size() */
317
318 /* account for alignment */
320 shmem += ring_mem_next - shmem;
321
322 shmem += ring_mem_remain;
323 }
324
325 for (int contextno = 0; contextno < TotalProcs; contextno++)
326 {
328 int ret;
329
330 /*
331 * Right now a high TotalProcs will cause problems in two ways:
332 *
333 * - RLIMIT_NOFILE needs to be big enough to allow all
334 * io_uring_queue_init() calls to succeed.
335 *
336 * - RLIMIT_NOFILE needs to be big enough to still have enough file
337 * descriptors to satisfy set_max_safe_fds() left over. Or, even
338 * better, have max_files_per_process left over FDs.
339 *
340 * We probably should adjust the soft RLIMIT_NOFILE to ensure that.
341 *
342 *
343 * XXX: Newer versions of io_uring support sharing the workers that
344 * execute some asynchronous IOs between io_uring instances. It might
345 * be worth using that - also need to evaluate if that causes
346 * noticeable additional contention?
347 */
348
349 /*
350 * If supported (c.f. pgaio_uring_check_capabilities()), create ring
351 * with its data in shared memory. Otherwise fall back io_uring
352 * creating a memory mapping for each ring.
353 */
354#if defined(HAVE_IO_URING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
355 if (pgaio_uring_caps.mem_init_size > 0)
356 {
357 struct io_uring_params p = {0};
358
359 ret = io_uring_queue_init_mem(io_max_concurrency, &context->io_uring_ring, &p, ring_mem_next, ring_mem_remain);
360
361 ring_mem_remain -= ret;
362 ring_mem_next += ret;
363 }
364 else
365#endif
366 {
367 ret = io_uring_queue_init(io_max_concurrency, &context->io_uring_ring, 0);
368 }
369
370 if (ret < 0)
371 {
372 char *hint = NULL;
374
375 /* add hints for some failures that errno explains sufficiently */
376 if (-ret == EPERM)
377 {
379 hint = _("Check if io_uring is disabled via /proc/sys/kernel/io_uring_disabled.");
380 }
381 else if (-ret == EMFILE)
382 {
384 hint = psprintf(_("Consider increasing \"ulimit -n\" to at least %d."),
386 }
387 else if (-ret == ENOSYS)
388 {
390 hint = _("The kernel does not support io_uring.");
391 }
392
393 /* update errno to allow %m to work */
394 errno = -ret;
395
397 errcode(err),
398 errmsg("could not setup io_uring queue: %m"),
399 hint != NULL ? errhint("%s", hint) : 0);
400 }
401
402 LWLockInitialize(&context->completion_lock, LWTRANCHE_AIO_URING_COMPLETION);
403 }
404}
405
406static void
408{
410
412}
413
414static int
415pgaio_uring_submit(uint16 num_staged_ios, PgAioHandle **staged_ios)
416{
417 struct io_uring *uring_instance = &pgaio_my_uring_context->io_uring_ring;
418
419 Assert(num_staged_ios <= PGAIO_SUBMIT_BATCH_SIZE);
420
421 for (int i = 0; i < num_staged_ios; i++)
422 {
423 PgAioHandle *ioh = staged_ios[i];
424 struct io_uring_sqe *sqe;
425
427
428 if (!sqe)
429 elog(ERROR, "io_uring submission queue is unexpectedly full");
430
433 }
434
435 while (true)
436 {
437 int ret;
438
442
443 if (ret == -EINTR)
444 {
446 "aio method uring: submit EINTR, nios: %d",
447 num_staged_ios);
448 }
449 else if (ret < 0)
450 {
451 /*
452 * The io_uring_enter() manpage suggests that the appropriate
453 * reaction to EAGAIN is:
454 *
455 * "The application should wait for some completions and try
456 * again"
457 *
458 * However, it seems unlikely that that would help in our case, as
459 * we apply a low limit to the number of outstanding IOs and thus
460 * also outstanding completions, making it unlikely that we'd get
461 * EAGAIN while the OS is in good working order.
462 *
463 * Additionally, it would be problematic to just wait here, our
464 * caller might hold critical locks. It'd possibly lead to
465 * delaying the crash-restart that seems likely to occur when the
466 * kernel is under such heavy memory pressure.
467 *
468 * Update errno to allow %m to work.
469 */
470 errno = -ret;
471 elog(PANIC, "io_uring submit failed: %m");
472 }
473 else if (ret != num_staged_ios)
474 {
475 /* likely unreachable, but if it is, we would need to re-submit */
476 elog(PANIC, "io_uring submit submitted only %d of %d",
477 ret, num_staged_ios);
478 }
479 else
480 {
482 "aio method uring: submitted %d IOs",
483 num_staged_ios);
484 break;
485 }
486 }
487
488 return num_staged_ios;
489}
490
491static void
493{
494 ProcNumber owner;
496 int32 owner_pid;
498
499 if (!ioh)
500 return;
501
502 /* No need for context if a backend is completing the IO for itself */
503 if (ioh->owner_procno == MyProcNumber)
504 return;
505
506 owner = ioh->owner_procno;
508 owner_pid = owner_proc->pid;
509
510 errcontext("completing I/O on behalf of process %d", owner_pid);
511}
512
513static void
515{
516 int ready;
517 int orig_ready;
518 ErrorContextCallback errcallback = {0};
519
520 Assert(LWLockHeldByMeInMode(&context->completion_lock, LW_EXCLUSIVE));
521
523 errcallback.previous = error_context_stack;
524 error_context_stack = &errcallback;
525
526 /*
527 * Don't drain more events than available right now. Otherwise it's
528 * plausible that one backend could get stuck, for a while, receiving CQEs
529 * without actually processing them.
530 */
531 orig_ready = ready = io_uring_cq_ready(&context->io_uring_ring);
532
533 while (ready > 0)
534 {
537
539 ncqes =
540 io_uring_peek_batch_cqe(&context->io_uring_ring,
541 cqes,
543 Assert(ncqes <= ready);
544
545 ready -= ncqes;
546
547 for (int i = 0; i < ncqes; i++)
548 {
549 struct io_uring_cqe *cqe = cqes[i];
551 int result = cqe->res;
552
553 errcallback.arg = ioh;
554
555 io_uring_cqe_seen(&context->io_uring_ring, cqe);
556
558 errcallback.arg = NULL;
559 }
560
562
564 "drained %d/%d, now expecting %d",
565 ncqes, orig_ready, io_uring_cq_ready(&context->io_uring_ring));
566 }
567
568 error_context_stack = errcallback.previous;
569}
570
571static void
573{
575 ProcNumber owner_procno = ioh->owner_procno;
577 bool expect_cqe;
578 int waited = 0;
579
580 /*
581 * XXX: It would be nice to have a smarter locking scheme, nearly all the
582 * time the backend owning the ring will consume the completions, making
583 * the locking unnecessarily expensive.
584 */
585 LWLockAcquire(&owner_context->completion_lock, LW_EXCLUSIVE);
586
587 while (true)
588 {
590 "wait_one io_gen: %" PRIu64 ", ref_gen: %" PRIu64 ", cycle %d",
591 ioh->generation,
593 waited);
594
597 {
598 /* the IO was completed by another backend */
599 break;
600 }
601 else if (io_uring_cq_ready(&owner_context->io_uring_ring))
602 {
603 /* no need to wait in the kernel, io_uring has a completion */
604 expect_cqe = true;
605 }
606 else
607 {
608 int ret;
609 struct io_uring_cqe *cqes;
610
611 /* need to wait in the kernel */
613 ret = io_uring_wait_cqes(&owner_context->io_uring_ring, &cqes, 1, NULL, NULL);
615
616 if (ret == -EINTR)
617 {
618 continue;
619 }
620 else if (ret != 0)
621 {
622 /* see comment after io_uring_submit() */
623 errno = -ret;
624 elog(PANIC, "io_uring wait failed: %m");
625 }
626 else
627 {
628 Assert(cqes != NULL);
629 expect_cqe = true;
630 waited++;
631 }
632 }
633
634 if (expect_cqe)
635 {
637 }
638 }
639
640 LWLockRelease(&owner_context->completion_lock);
641
643 "wait_one with %d sleeps",
644 waited);
645}
646
647static void
649{
650 ProcNumber owner_procno = ioh->owner_procno;
652
653 /*
654 * This check is not reliable when not holding the completion lock, but
655 * it's a useful cheap pre-check to see if it's worth trying to get the
656 * completion lock.
657 */
658 if (!io_uring_cq_ready(&owner_context->io_uring_ring))
659 return;
660
661 /*
662 * If the completion lock is currently held, the holder will likely
663 * process any pending completions, give up.
664 */
665 if (!LWLockConditionalAcquire(&owner_context->completion_lock, LW_EXCLUSIVE))
666 return;
667
669 "check_one io_gen: %" PRIu64 ", ref_gen: %" PRIu64,
670 ioh->generation,
672
673 /*
674 * Recheck if there are any completions, another backend could have
675 * processed them since we checked above, or our unlocked pre-check could
676 * have been reading outdated values.
677 *
678 * It is possible that the IO handle has been reused since the start of
679 * the call, but now that we have the lock, we can just as well drain all
680 * completions.
681 */
682 if (io_uring_cq_ready(&owner_context->io_uring_ring))
684
685 LWLockRelease(&owner_context->completion_lock);
686}
687
688/*
689 * io_uring executes IO in process context if possible. That's generally good,
690 * as it reduces context switching. When performing a lot of buffered IO that
691 * means that copying between page cache and userspace memory happens in the
692 * foreground, as it can't be offloaded to DMA hardware as is possible when
693 * using direct IO. When executing a lot of buffered IO this causes io_uring
694 * to be slower than worker mode, as worker mode parallelizes the
695 * copying. io_uring can be told to offload work to worker threads instead.
696 *
697 * If the IOs are small, we only benefit from forcing things into the
698 * background if there is a lot of IO, as otherwise the overhead from context
699 * switching is higher than the gain.
700 *
701 * If IOs are large, there is benefit from asynchronous processing at lower
702 * queue depths, as IO latency is less of a crucial factor and parallelizing
703 * memory copies is more important. In addition, it is important to trigger
704 * asynchronous processing even at low queue depth, as with foreground
705 * processing we might never actually reach deep enough IO depths to trigger
706 * asynchronous processing, which in turn would deprive readahead control
707 * logic of information about whether a deeper look-ahead distance would be
708 * advantageous.
709 *
710 * We have done some basic benchmarking to validate the thresholds used, but
711 * it's quite plausible that there are better values. See
712 * https://postgr.es/m/3gkuvs3lz3u3skuaxfkxnsysfqslf2srigl6546vhesekve6v2%40va3r5esummvg
713 * for some details of this benchmarking.
714 */
715static bool
717{
718 /*
719 * With DIO there's no benefit from forcing asynchronous processing, as
720 * io_uring will never execute direct IO synchronously during submission.
721 */
722 if (!(ioh->flags & PGAIO_HF_BUFFERED))
723 return false;
724
725 /*
726 * Once the IO queue depth is not that shallow anymore, the overhead of
727 * dispatching to the background is a less significant factor.
728 */
730 return true;
731
732 /*
733 * If the IO is larger, the gains from parallelizing the memory copy are
734 * larger and typically the impact of the latency is smaller.
735 */
736 if (io_size >= (BLCKSZ * 4))
737 return true;
738
739 return false;
740}
741
742static void
744{
745 struct iovec *iov;
746 size_t io_size = 0;
747
748 switch ((PgAioOp) ioh->op)
749 {
750 case PGAIO_OP_READV:
751 iov = &pgaio_ctl->iovecs[ioh->iovec_off];
752 if (ioh->op_data.read.iov_length == 1)
753 {
755 ioh->op_data.read.fd,
756 iov->iov_base,
757 iov->iov_len,
758 ioh->op_data.read.offset);
759
760 io_size = iov->iov_len;
761 }
762 else
763 {
765 ioh->op_data.read.fd,
766 iov,
767 ioh->op_data.read.iov_length,
768 ioh->op_data.read.offset);
769
770 for (int i = 0; i < ioh->op_data.read.iov_length; i++, iov++)
771 io_size += iov->iov_len;
772 }
773
776
777 break;
778
779 case PGAIO_OP_WRITEV:
780 iov = &pgaio_ctl->iovecs[ioh->iovec_off];
781 if (ioh->op_data.write.iov_length == 1)
782 {
784 ioh->op_data.write.fd,
785 iov->iov_base,
786 iov->iov_len,
787 ioh->op_data.write.offset);
788 }
789 else
790 {
792 ioh->op_data.write.fd,
793 iov,
794 ioh->op_data.write.iov_length,
795 ioh->op_data.write.offset);
796 }
797
798 /*
799 * For now don't trigger use of IOSQE_ASYNC for writes, it's not
800 * clear there is a performance benefit in doing so.
801 */
802
803 break;
804
805 case PGAIO_OP_INVALID:
806 elog(ERROR, "trying to prepare invalid IO operation for execution");
807 }
808
810}
811
812#endif /* IOMETHOD_IO_URING_ENABLED */
void pgaio_io_process_completion(PgAioHandle *ioh, int result)
Definition aio.c:528
PgAioBackend * pgaio_my_backend
Definition aio.c:81
int io_max_concurrency
Definition aio.c:75
PgAioCtl * pgaio_ctl
Definition aio.c:78
bool pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state)
Definition aio.c:559
void pgaio_io_prepare_submit(PgAioHandle *ioh)
Definition aio.c:510
PgAioOp
Definition aio.h:88
@ PGAIO_OP_WRITEV
Definition aio.h:93
@ PGAIO_OP_INVALID
Definition aio.h:90
@ PGAIO_OP_READV
Definition aio.h:92
@ PGAIO_HF_BUFFERED
Definition aio.h:77
PgAioHandleState
@ PGAIO_HS_SUBMITTED
#define pgaio_debug(elevel, msg,...)
#define pgaio_debug_io(elevel, ioh, msg,...)
#define PGAIO_SUBMIT_BATCH_SIZE
#define Min(x, y)
Definition c.h:1091
#define TYPEALIGN(ALIGNVAL, LEN)
Definition c.h:889
#define Assert(condition)
Definition c.h:943
int32_t int32
Definition c.h:620
uint64_t uint64
Definition c.h:625
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
uint32 result
Datum arg
Definition elog.c:1323
ErrorContextCallback * error_context_stack
Definition elog.c:100
int errcode(int sqlerrcode)
Definition elog.c:875
#define _(x)
Definition elog.c:96
#define errcontext
Definition elog.h:200
int errhint(const char *fmt,...) pg_attribute_printf(1
#define DEBUG3
Definition elog.h:29
#define PANIC
Definition elog.h:44
#define DEBUG1
Definition elog.h:31
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
#define DEBUG4
Definition elog.h:28
void err(int eval, const char *fmt,...)
Definition err.c:43
int max_files_per_process
Definition fd.c:147
ProcNumber MyProcNumber
Definition globals.c:92
int MaxBackends
Definition globals.c:149
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
int i
Definition isn.c:77
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1929
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition lwlock.c:670
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1321
@ LW_EXCLUSIVE
Definition lwlock.h:104
#define MAP_FAILED
Definition mem.h:43
#define MAP_ANONYMOUS
Definition mem.h:25
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define END_CRIT_SECTION()
Definition miscadmin.h:154
static char * errmsg
#define PG_CACHE_LINE_SIZE
static int fb(int x)
#define MAX_IO_WORKERS
Definition proc.h:526
#define NUM_AUXILIARY_PROCS
Definition proc.h:527
#define GetPGProcByNumber(n)
Definition proc.h:504
int ProcNumber
Definition procnumber.h:24
char * psprintf(const char *fmt,...)
Definition psprintf.c:43
Size add_size(Size s1, Size s2)
Definition shmem.c:1048
Size mul_size(Size s1, Size s2)
Definition shmem.c:1063
#define ShmemRequestStruct(...)
Definition shmem.h:176
static uint32 TotalProcs
Definition proc.c:88
struct ErrorContextCallback * previous
Definition elog.h:299
void(* callback)(void *arg)
Definition elog.h:300
bool wait_on_fd_before_close
Definition proc.h:179
dclist_head in_flight_ios
struct iovec * iovecs
int32 owner_procno
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition wait_event.h:67
static void pgstat_report_wait_end(void)
Definition wait_event.h:83
const char * name
#define EINTR
Definition win32_port.h:361