PostgreSQL Source Code git master
method_io_uring.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * method_io_uring.c
4 * AIO - perform AIO using Linux' io_uring
5 *
6 * For now we create one io_uring instance for each backend. These io_uring
7 * instances have to be created in postmaster, during startup, to allow other
8 * backends to process IO completions, if the issuing backend is currently
9 * busy doing other things. Other backends may not use another backend's
10 * io_uring instance to submit IO, that'd require additional locking that
11 * would likely be harmful for performance.
12 *
13 * We likely will want to introduce a backend-local io_uring instance in the
14 * future, e.g. for FE/BE network IO.
15 *
16 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
17 * Portions Copyright (c) 1994, Regents of the University of California
18 *
19 * IDENTIFICATION
20 * src/backend/storage/aio/method_io_uring.c
21 *
22 *-------------------------------------------------------------------------
23 */
24
25#include "postgres.h"
26
27/* included early, for IOMETHOD_IO_URING_ENABLED */
28#include "storage/aio.h"
29
30#ifdef IOMETHOD_IO_URING_ENABLED
31
32#include <sys/mman.h>
33#include <unistd.h>
34
35#include <liburing.h>
36
37#include "miscadmin.h"
39#include "storage/fd.h"
40#include "storage/proc.h"
41#include "storage/shmem.h"
42#include "storage/lwlock.h"
43#include "storage/procnumber.h"
44#include "utils/wait_event.h"
45
46
47/* number of completions processed at once */
48#define PGAIO_MAX_LOCAL_COMPLETED_IO 32
49
50
51/* Entry points for IoMethodOps. */
52static size_t pgaio_uring_shmem_size(void);
53static void pgaio_uring_shmem_init(bool first_time);
54static void pgaio_uring_init_backend(void);
55static int pgaio_uring_submit(uint16 num_staged_ios, PgAioHandle **staged_ios);
56static void pgaio_uring_wait_one(PgAioHandle *ioh, uint64 ref_generation);
57
58/* helper functions */
59static void pgaio_uring_sq_from_io(PgAioHandle *ioh, struct io_uring_sqe *sqe);
60
61
62const IoMethodOps pgaio_uring_ops = {
63 /*
64 * While io_uring mostly is OK with FDs getting closed while the IO is in
65 * flight, that is not true for IOs submitted with IOSQE_ASYNC.
66 *
67 * See
68 * https://postgr.es/m/5ons2rtmwarqqhhexb3dnqulw5rjgwgoct57vpdau4rujlrffj%403fls6d2mkiwc
69 */
71
72 .shmem_size = pgaio_uring_shmem_size,
73 .shmem_init = pgaio_uring_shmem_init,
74 .init_backend = pgaio_uring_init_backend,
75
76 .submit = pgaio_uring_submit,
77 .wait_one = pgaio_uring_wait_one,
78};
79
80/*
81 * Per-backend state when using io_method=io_uring
82 *
83 * Align the whole struct to a cacheline boundary, to prevent false sharing
84 * between completion_lock and prior backend's io_uring_ring.
85 */
86typedef struct pg_attribute_aligned (PG_CACHE_LINE_SIZE)
87PgAioUringContext
88{
89 /*
90 * Multiple backends can process completions for this backend's io_uring
91 * instance (e.g. when the backend issuing IO is busy doing something
92 * else). To make that safe we have to ensure that only a single backend
93 * gets io completions from the io_uring instance at a time.
94 */
95 LWLock completion_lock;
96
97 struct io_uring io_uring_ring;
98} PgAioUringContext;
99
100/*
101 * Information about the capabilities that io_uring has.
102 *
103 * Depending on liburing and kernel version different features are
104 * supported. At least for the kernel a kernel version check does not suffice
105 * as various vendors do backport features to older kernels :(.
106 */
107typedef struct PgAioUringCaps
108{
109 bool checked;
110 /* -1 if io_uring_queue_init_mem() is unsupported */
111 int mem_init_size;
112} PgAioUringCaps;
113
114
115/* PgAioUringContexts for all backends */
116static PgAioUringContext *pgaio_uring_contexts;
117
118/* the current backend's context */
119static PgAioUringContext *pgaio_my_uring_context;
120
121static PgAioUringCaps pgaio_uring_caps =
122{
123 .checked = false,
124 .mem_init_size = -1,
125};
126
127static uint32
128pgaio_uring_procs(void)
129{
130 /*
131 * We can subtract MAX_IO_WORKERS here as io workers are never used at the
132 * same time as io_method=io_uring.
133 */
135}
136
137/*
138 * Initializes pgaio_uring_caps, unless that's already done.
139 */
140static void
141pgaio_uring_check_capabilities(void)
142{
143 if (pgaio_uring_caps.checked)
144 return;
145
146 /*
147 * By default io_uring creates a shared memory mapping for each io_uring
148 * instance, leading to a large number of memory mappings. Unfortunately a
149 * large number of memory mappings slows things down, backend exit is
150 * particularly affected. To address that, newer kernels (6.5) support
151 * using user-provided memory for the memory, by putting the relevant
152 * memory into shared memory we don't need any additional mappings.
153 *
154 * To know whether this is supported, we unfortunately need to probe the
155 * kernel by trying to create a ring with userspace-provided memory. This
156 * also has a secondary benefit: We can determine precisely how much
157 * memory we need for each io_uring instance.
158 */
159#if defined(HAVE_LIBURING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
160 {
161 struct io_uring test_ring;
162 size_t ring_size;
163 void *ring_ptr;
164 struct io_uring_params p = {0};
165 int ret;
166
167 /*
168 * Liburing does not yet provide an API to query how much memory a
169 * ring will need. So we over-estimate it here. As the memory is freed
170 * just below that's small temporary waste of memory.
171 *
172 * 1MB is more than enough for rings within io_max_concurrency's
173 * range.
174 */
175 ring_size = 1024 * 1024;
176
177 /*
178 * Hard to believe a system exists where 1MB would not be a multiple
179 * of the page size. But it's cheap to ensure...
180 */
181 ring_size -= ring_size % sysconf(_SC_PAGESIZE);
182
183 ring_ptr = mmap(NULL, ring_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
184 if (ring_ptr == MAP_FAILED)
185 elog(ERROR,
186 "mmap(%zu) to determine io_uring_queue_init_mem() support failed: %m",
187 ring_size);
188
189 ret = io_uring_queue_init_mem(io_max_concurrency, &test_ring, &p, ring_ptr, ring_size);
190 if (ret > 0)
191 {
192 pgaio_uring_caps.mem_init_size = ret;
193
194 elog(DEBUG1,
195 "can use combined memory mapping for io_uring, each ring needs %d bytes",
196 ret);
197
198 /* clean up the created ring, it was just for a test */
199 io_uring_queue_exit(&test_ring);
200 }
201 else
202 {
203 /*
204 * There are different reasons for ring creation to fail, but it's
205 * ok to treat that just as io_uring_queue_init_mem() not being
206 * supported. We'll report a more detailed error in
207 * pgaio_uring_shmem_init().
208 */
209 errno = -ret;
210 elog(DEBUG1,
211 "cannot use combined memory mapping for io_uring, ring creation failed: %m");
212
213 }
214
215 if (munmap(ring_ptr, ring_size) != 0)
216 elog(ERROR, "munmap() failed: %m");
217 }
218#else
219 {
220 elog(DEBUG1,
221 "can't use combined memory mapping for io_uring, kernel or liburing too old");
222 }
223#endif
224
225 pgaio_uring_caps.checked = true;
226}
227
228/*
229 * Memory for all PgAioUringContext instances
230 */
231static size_t
232pgaio_uring_context_shmem_size(void)
233{
234 return mul_size(pgaio_uring_procs(), sizeof(PgAioUringContext));
235}
236
237/*
238 * Memory for the combined memory used by io_uring instances. Returns 0 if
239 * that is not supported by kernel/liburing.
240 */
241static size_t
242pgaio_uring_ring_shmem_size(void)
243{
244 size_t sz = 0;
245
246 if (pgaio_uring_caps.mem_init_size > 0)
247 {
248 /*
249 * Memory for rings needs to be allocated to the page boundary,
250 * reserve space. Luckily it does not need to be aligned to hugepage
251 * boundaries, even if huge pages are used.
252 */
253 sz = add_size(sz, sysconf(_SC_PAGESIZE));
254 sz = add_size(sz, mul_size(pgaio_uring_procs(),
255 pgaio_uring_caps.mem_init_size));
256 }
257
258 return sz;
259}
260
261static size_t
262pgaio_uring_shmem_size(void)
263{
264 size_t sz;
265
266 /*
267 * Kernel and liburing support for various features influences how much
268 * shmem we need, perform the necessary checks.
269 */
270 pgaio_uring_check_capabilities();
271
272 sz = pgaio_uring_context_shmem_size();
273 sz = add_size(sz, pgaio_uring_ring_shmem_size());
274
275 return sz;
276}
277
278static void
279pgaio_uring_shmem_init(bool first_time)
280{
281 int TotalProcs = pgaio_uring_procs();
282 bool found;
283 char *shmem;
284 size_t ring_mem_remain = 0;
285 char *ring_mem_next = 0;
286
287 /*
288 * We allocate memory for all PgAioUringContext instances and, if
289 * supported, the memory required for each of the io_uring instances, in
290 * one ShmemInitStruct().
291 */
292 shmem = ShmemInitStruct("AioUringContext", pgaio_uring_shmem_size(), &found);
293 if (found)
294 return;
295
296 pgaio_uring_contexts = (PgAioUringContext *) shmem;
297 shmem += pgaio_uring_context_shmem_size();
298
299 /* if supported, handle memory alignment / sizing for io_uring memory */
300 if (pgaio_uring_caps.mem_init_size > 0)
301 {
302 ring_mem_remain = pgaio_uring_ring_shmem_size();
303 ring_mem_next = shmem;
304
305 /* align to page boundary, see also pgaio_uring_ring_shmem_size() */
306 ring_mem_next = (char *) TYPEALIGN(sysconf(_SC_PAGESIZE), ring_mem_next);
307
308 /* account for alignment */
309 ring_mem_remain -= ring_mem_next - shmem;
310 shmem += ring_mem_next - shmem;
311
312 shmem += ring_mem_remain;
313 }
314
315 for (int contextno = 0; contextno < TotalProcs; contextno++)
316 {
317 PgAioUringContext *context = &pgaio_uring_contexts[contextno];
318 int ret;
319
320 /*
321 * Right now a high TotalProcs will cause problems in two ways:
322 *
323 * - RLIMIT_NOFILE needs to be big enough to allow all
324 * io_uring_queue_init() calls to succeed.
325 *
326 * - RLIMIT_NOFILE needs to be big enough to still have enough file
327 * descriptors to satisfy set_max_safe_fds() left over. Or, even
328 * better, have max_files_per_process left over FDs.
329 *
330 * We probably should adjust the soft RLIMIT_NOFILE to ensure that.
331 *
332 *
333 * XXX: Newer versions of io_uring support sharing the workers that
334 * execute some asynchronous IOs between io_uring instances. It might
335 * be worth using that - also need to evaluate if that causes
336 * noticeable additional contention?
337 */
338
339 /*
340 * If supported (c.f. pgaio_uring_check_capabilities()), create ring
341 * with its data in shared memory. Otherwise fall back io_uring
342 * creating a memory mapping for each ring.
343 */
344#if defined(HAVE_LIBURING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
345 if (pgaio_uring_caps.mem_init_size > 0)
346 {
347 struct io_uring_params p = {0};
348
349 ret = io_uring_queue_init_mem(io_max_concurrency, &context->io_uring_ring, &p, ring_mem_next, ring_mem_remain);
350
351 ring_mem_remain -= ret;
352 ring_mem_next += ret;
353 }
354 else
355#endif
356 {
357 ret = io_uring_queue_init(io_max_concurrency, &context->io_uring_ring, 0);
358 }
359
360 if (ret < 0)
361 {
362 char *hint = NULL;
363 int err = ERRCODE_INTERNAL_ERROR;
364
365 /* add hints for some failures that errno explains sufficiently */
366 if (-ret == EPERM)
367 {
368 err = ERRCODE_INSUFFICIENT_PRIVILEGE;
369 hint = _("Check if io_uring is disabled via /proc/sys/kernel/io_uring_disabled.");
370 }
371 else if (-ret == EMFILE)
372 {
373 err = ERRCODE_INSUFFICIENT_RESOURCES;
374 hint = psprintf(_("Consider increasing \"ulimit -n\" to at least %d."),
375 TotalProcs + max_files_per_process);
376 }
377 else if (-ret == ENOSYS)
378 {
379 err = ERRCODE_FEATURE_NOT_SUPPORTED;
380 hint = _("The kernel does not support io_uring.");
381 }
382
383 /* update errno to allow %m to work */
384 errno = -ret;
385
387 errcode(err),
388 errmsg("could not setup io_uring queue: %m"),
389 hint != NULL ? errhint("%s", hint) : 0);
390 }
391
392 LWLockInitialize(&context->completion_lock, LWTRANCHE_AIO_URING_COMPLETION);
393 }
394}
395
396static void
397pgaio_uring_init_backend(void)
398{
399 Assert(MyProcNumber < pgaio_uring_procs());
400
401 pgaio_my_uring_context = &pgaio_uring_contexts[MyProcNumber];
402}
403
404static int
405pgaio_uring_submit(uint16 num_staged_ios, PgAioHandle **staged_ios)
406{
407 struct io_uring *uring_instance = &pgaio_my_uring_context->io_uring_ring;
408 int in_flight_before = dclist_count(&pgaio_my_backend->in_flight_ios);
409
410 Assert(num_staged_ios <= PGAIO_SUBMIT_BATCH_SIZE);
411
412 for (int i = 0; i < num_staged_ios; i++)
413 {
414 PgAioHandle *ioh = staged_ios[i];
415 struct io_uring_sqe *sqe;
416
417 sqe = io_uring_get_sqe(uring_instance);
418
419 if (!sqe)
420 elog(ERROR, "io_uring submission queue is unexpectedly full");
421
423 pgaio_uring_sq_from_io(ioh, sqe);
424
425 /*
426 * io_uring executes IO in process context if possible. That's
427 * generally good, as it reduces context switching. When performing a
428 * lot of buffered IO that means that copying between page cache and
429 * userspace memory happens in the foreground, as it can't be
430 * offloaded to DMA hardware as is possible when using direct IO. When
431 * executing a lot of buffered IO this causes io_uring to be slower
432 * than worker mode, as worker mode parallelizes the copying. io_uring
433 * can be told to offload work to worker threads instead.
434 *
435 * If an IO is buffered IO and we already have IOs in flight or
436 * multiple IOs are being submitted, we thus tell io_uring to execute
437 * the IO in the background. We don't do so for the first few IOs
438 * being submitted as executing in this process' context has lower
439 * latency.
440 */
441 if (in_flight_before > 4 && (ioh->flags & PGAIO_HF_BUFFERED))
442 io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
443
444 in_flight_before++;
445 }
446
447 while (true)
448 {
449 int ret;
450
451 pgstat_report_wait_start(WAIT_EVENT_AIO_IO_URING_SUBMIT);
452 ret = io_uring_submit(uring_instance);
454
455 if (ret == -EINTR)
456 {
458 "aio method uring: submit EINTR, nios: %d",
459 num_staged_ios);
460 }
461 else if (ret < 0)
462 {
463 /*
464 * The io_uring_enter() manpage suggests that the appropriate
465 * reaction to EAGAIN is:
466 *
467 * "The application should wait for some completions and try
468 * again"
469 *
470 * However, it seems unlikely that that would help in our case, as
471 * we apply a low limit to the number of outstanding IOs and thus
472 * also outstanding completions, making it unlikely that we'd get
473 * EAGAIN while the OS is in good working order.
474 *
475 * Additionally, it would be problematic to just wait here, our
476 * caller might hold critical locks. It'd possibly lead to
477 * delaying the crash-restart that seems likely to occur when the
478 * kernel is under such heavy memory pressure.
479 *
480 * Update errno to allow %m to work.
481 */
482 errno = -ret;
483 elog(PANIC, "io_uring submit failed: %m");
484 }
485 else if (ret != num_staged_ios)
486 {
487 /* likely unreachable, but if it is, we would need to re-submit */
488 elog(PANIC, "io_uring submit submitted only %d of %d",
489 ret, num_staged_ios);
490 }
491 else
492 {
494 "aio method uring: submitted %d IOs",
495 num_staged_ios);
496 break;
497 }
498 }
499
500 return num_staged_ios;
501}
502
503static void
504pgaio_uring_completion_error_callback(void *arg)
505{
506 ProcNumber owner;
507 PGPROC *owner_proc;
508 int32 owner_pid;
509 PgAioHandle *ioh = arg;
510
511 if (!ioh)
512 return;
513
514 /* No need for context if a backend is completing the IO for itself */
515 if (ioh->owner_procno == MyProcNumber)
516 return;
517
518 owner = ioh->owner_procno;
519 owner_proc = GetPGProcByNumber(owner);
520 owner_pid = owner_proc->pid;
521
522 errcontext("completing I/O on behalf of process %d", owner_pid);
523}
524
525static void
526pgaio_uring_drain_locked(PgAioUringContext *context)
527{
528 int ready;
529 int orig_ready;
530 ErrorContextCallback errcallback = {0};
531
532 Assert(LWLockHeldByMeInMode(&context->completion_lock, LW_EXCLUSIVE));
533
534 errcallback.callback = pgaio_uring_completion_error_callback;
535 errcallback.previous = error_context_stack;
536 error_context_stack = &errcallback;
537
538 /*
539 * Don't drain more events than available right now. Otherwise it's
540 * plausible that one backend could get stuck, for a while, receiving CQEs
541 * without actually processing them.
542 */
543 orig_ready = ready = io_uring_cq_ready(&context->io_uring_ring);
544
545 while (ready > 0)
546 {
547 struct io_uring_cqe *cqes[PGAIO_MAX_LOCAL_COMPLETED_IO];
548 uint32 ncqes;
549
551 ncqes =
552 io_uring_peek_batch_cqe(&context->io_uring_ring,
553 cqes,
554 Min(PGAIO_MAX_LOCAL_COMPLETED_IO, ready));
555 Assert(ncqes <= ready);
556
557 ready -= ncqes;
558
559 for (int i = 0; i < ncqes; i++)
560 {
561 struct io_uring_cqe *cqe = cqes[i];
562 PgAioHandle *ioh;
563
564 ioh = io_uring_cqe_get_data(cqe);
565 errcallback.arg = ioh;
566 io_uring_cqe_seen(&context->io_uring_ring, cqe);
567
568 pgaio_io_process_completion(ioh, cqe->res);
569 errcallback.arg = NULL;
570 }
571
573
575 "drained %d/%d, now expecting %d",
576 ncqes, orig_ready, io_uring_cq_ready(&context->io_uring_ring));
577 }
578
579 error_context_stack = errcallback.previous;
580}
581
582static void
583pgaio_uring_wait_one(PgAioHandle *ioh, uint64 ref_generation)
584{
586 ProcNumber owner_procno = ioh->owner_procno;
587 PgAioUringContext *owner_context = &pgaio_uring_contexts[owner_procno];
588 bool expect_cqe;
589 int waited = 0;
590
591 /*
592 * XXX: It would be nice to have a smarter locking scheme, nearly all the
593 * time the backend owning the ring will consume the completions, making
594 * the locking unnecessarily expensive.
595 */
596 LWLockAcquire(&owner_context->completion_lock, LW_EXCLUSIVE);
597
598 while (true)
599 {
601 "wait_one io_gen: %" PRIu64 ", ref_gen: %" PRIu64 ", cycle %d",
602 ioh->generation,
603 ref_generation,
604 waited);
605
606 if (pgaio_io_was_recycled(ioh, ref_generation, &state) ||
608 {
609 /* the IO was completed by another backend */
610 break;
611 }
612 else if (io_uring_cq_ready(&owner_context->io_uring_ring))
613 {
614 /* no need to wait in the kernel, io_uring has a completion */
615 expect_cqe = true;
616 }
617 else
618 {
619 int ret;
620 struct io_uring_cqe *cqes;
621
622 /* need to wait in the kernel */
623 pgstat_report_wait_start(WAIT_EVENT_AIO_IO_URING_EXECUTION);
624 ret = io_uring_wait_cqes(&owner_context->io_uring_ring, &cqes, 1, NULL, NULL);
626
627 if (ret == -EINTR)
628 {
629 continue;
630 }
631 else if (ret != 0)
632 {
633 /* see comment after io_uring_submit() */
634 errno = -ret;
635 elog(PANIC, "io_uring wait failed: %m");
636 }
637 else
638 {
639 Assert(cqes != NULL);
640 expect_cqe = true;
641 waited++;
642 }
643 }
644
645 if (expect_cqe)
646 {
647 pgaio_uring_drain_locked(owner_context);
648 }
649 }
650
651 LWLockRelease(&owner_context->completion_lock);
652
654 "wait_one with %d sleeps",
655 waited);
656}
657
658static void
659pgaio_uring_sq_from_io(PgAioHandle *ioh, struct io_uring_sqe *sqe)
660{
661 struct iovec *iov;
662
663 switch ((PgAioOp) ioh->op)
664 {
665 case PGAIO_OP_READV:
666 iov = &pgaio_ctl->iovecs[ioh->iovec_off];
667 if (ioh->op_data.read.iov_length == 1)
668 {
669 io_uring_prep_read(sqe,
670 ioh->op_data.read.fd,
671 iov->iov_base,
672 iov->iov_len,
673 ioh->op_data.read.offset);
674 }
675 else
676 {
677 io_uring_prep_readv(sqe,
678 ioh->op_data.read.fd,
679 iov,
681 ioh->op_data.read.offset);
682
683 }
684 break;
685
686 case PGAIO_OP_WRITEV:
687 iov = &pgaio_ctl->iovecs[ioh->iovec_off];
688 if (ioh->op_data.write.iov_length == 1)
689 {
690 io_uring_prep_write(sqe,
691 ioh->op_data.write.fd,
692 iov->iov_base,
693 iov->iov_len,
694 ioh->op_data.write.offset);
695 }
696 else
697 {
698 io_uring_prep_writev(sqe,
699 ioh->op_data.write.fd,
700 iov,
702 ioh->op_data.write.offset);
703 }
704 break;
705
706 case PGAIO_OP_INVALID:
707 elog(ERROR, "trying to prepare invalid IO operation for execution");
708 }
709
710 io_uring_sqe_set_data(sqe, ioh);
711}
712
713#endif /* IOMETHOD_IO_URING_ENABLED */
void pgaio_io_process_completion(PgAioHandle *ioh, int result)
Definition: aio.c:528
PgAioBackend * pgaio_my_backend
Definition: aio.c:81
int io_max_concurrency
Definition: aio.c:75
PgAioCtl * pgaio_ctl
Definition: aio.c:78
bool pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state)
Definition: aio.c:559
void pgaio_io_prepare_submit(PgAioHandle *ioh)
Definition: aio.c:510
PgAioOp
Definition: aio.h:88
@ PGAIO_OP_WRITEV
Definition: aio.h:93
@ PGAIO_OP_INVALID
Definition: aio.h:90
@ PGAIO_OP_READV
Definition: aio.h:92
@ PGAIO_HF_BUFFERED
Definition: aio.h:77
PgAioHandleState
Definition: aio_internal.h:44
@ PGAIO_HS_SUBMITTED
Definition: aio_internal.h:69
#define pgaio_debug(elevel, msg,...)
Definition: aio_internal.h:382
#define pgaio_debug_io(elevel, ioh, msg,...)
Definition: aio_internal.h:395
#define PGAIO_SUBMIT_BATCH_SIZE
Definition: aio_internal.h:28
#define Min(x, y)
Definition: c.h:1006
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:806
int32_t int32
Definition: c.h:537
uint64_t uint64
Definition: c.h:542
uint16_t uint16
Definition: c.h:540
uint32_t uint32
Definition: c.h:541
ErrorContextCallback * error_context_stack
Definition: elog.c:95
int errhint(const char *fmt,...)
Definition: elog.c:1330
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define _(x)
Definition: elog.c:91
#define errcontext
Definition: elog.h:198
#define DEBUG3
Definition: elog.h:28
#define PANIC
Definition: elog.h:42
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
#define DEBUG4
Definition: elog.h:27
void err(int eval, const char *fmt,...)
Definition: err.c:43
int max_files_per_process
Definition: fd.c:146
ProcNumber MyProcNumber
Definition: globals.c:90
int MaxBackends
Definition: globals.c:146
Assert(PointerIsAligned(start, uint64))
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
int i
Definition: isn.c:77
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:2021
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:698
@ LW_EXCLUSIVE
Definition: lwlock.h:112
#define MAP_FAILED
Definition: mem.h:45
#define MAP_ANONYMOUS
Definition: mem.h:25
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
void * arg
#define MAX_IO_WORKERS
Definition: proc.h:462
#define NUM_AUXILIARY_PROCS
Definition: proc.h:463
#define GetPGProcByNumber(n)
Definition: proc.h:440
int ProcNumber
Definition: procnumber.h:24
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
Size add_size(Size s1, Size s2)
Definition: shmem.c:495
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:389
struct ErrorContextCallback * previous
Definition: elog.h:297
void(* callback)(void *arg)
Definition: elog.h:298
bool wait_on_fd_before_close
Definition: aio_internal.h:268
Definition: lwlock.h:42
Definition: proc.h:179
int pid
Definition: proc.h:199
dclist_head in_flight_ios
Definition: aio_internal.h:225
struct iovec * iovecs
Definition: aio_internal.h:240
int32 owner_procno
Definition: aio_internal.h:131
PgAioOpData op_data
Definition: aio_internal.h:180
uint32 iovec_off
Definition: aio_internal.h:170
uint64 generation
Definition: aio_internal.h:152
Definition: regguts.h:323
uint64 offset
Definition: aio.h:140
int fd
Definition: aio.h:138
uint16 iov_length
Definition: aio.h:139
struct PgAioOpData::@126 read
struct PgAioOpData::@127 write
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:69
static void pgstat_report_wait_end(void)
Definition: wait_event.h:85
#define EINTR
Definition: win32_port.h:364