PostgreSQL Source Code git master
Loading...
Searching...
No Matches
md.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * md.c
4 * This code manages relations that reside on magnetic disk.
5 *
6 * Or at least, that was what the Berkeley folk had in mind when they named
7 * this file. In reality, what this code provides is an interface from
8 * the smgr API to Unix-like filesystem APIs, so it will work with any type
9 * of device for which the operating system provides filesystem support.
10 * It doesn't matter whether the bits are on spinning rust or some other
11 * storage technology.
12 *
13 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
14 * Portions Copyright (c) 1994, Regents of the University of California
15 *
16 *
17 * IDENTIFICATION
18 * src/backend/storage/smgr/md.c
19 *
20 *-------------------------------------------------------------------------
21 */
22#include "postgres.h"
23
24#include <limits.h>
25#include <unistd.h>
26#include <fcntl.h>
27#include <sys/file.h>
28
29#include "access/xlogutils.h"
30#include "commands/tablespace.h"
31#include "common/file_utils.h"
32#include "miscadmin.h"
33#include "pg_trace.h"
34#include "pgstat.h"
35#include "storage/aio.h"
36#include "storage/bufmgr.h"
37#include "storage/fd.h"
38#include "storage/md.h"
40#include "storage/smgr.h"
41#include "storage/sync.h"
42#include "utils/memutils.h"
43#include "utils/wait_event.h"
44
45/*
46 * The magnetic disk storage manager keeps track of open file
47 * descriptors in its own descriptor pool. This is done to make it
48 * easier to support relations that are larger than the operating
49 * system's file size limit (often 2GBytes). In order to do that,
50 * we break relations up into "segment" files that are each shorter than
51 * the OS file size limit. The segment size is set by the RELSEG_SIZE
52 * configuration constant in pg_config.h.
53 *
54 * On disk, a relation must consist of consecutively numbered segment
55 * files in the pattern
56 * -- Zero or more full segments of exactly RELSEG_SIZE blocks each
57 * -- Exactly one partial segment of size 0 <= size < RELSEG_SIZE blocks
58 * -- Optionally, any number of inactive segments of size 0 blocks.
59 * The full and partial segments are collectively the "active" segments.
60 * Inactive segments are those that once contained data but are currently
61 * not needed because of an mdtruncate() operation. The reason for leaving
62 * them present at size zero, rather than unlinking them, is that other
63 * backends and/or the checkpointer might be holding open file references to
64 * such segments. If the relation expands again after mdtruncate(), such
65 * that a deactivated segment becomes active again, it is important that
66 * such file references still be valid --- else data might get written
67 * out to an unlinked old copy of a segment file that will eventually
68 * disappear.
69 *
70 * RELSEG_SIZE must fit into BlockNumber; but since we expose its value
71 * as an integer GUC, it actually needs to fit in signed int. It's worth
72 * having a cross-check for this since configure's --with-segsize options
73 * could let people select insane values.
74 */
76 "RELSEG_SIZE must fit in an integer");
77
78/*
79 * File descriptors are stored in the per-fork md_seg_fds arrays inside
80 * SMgrRelation. The length of these arrays is stored in md_num_open_segs.
81 * Note that a fork's md_num_open_segs having a specific value does not
82 * necessarily mean the relation doesn't have additional segments; we may
83 * just not have opened the next segment yet. (We could not have "all
84 * segments are in the array" as an invariant anyway, since another backend
85 * could extend the relation while we aren't looking.) We do not have
86 * entries for inactive segments, however; as soon as we find a partial
87 * segment, we assume that any subsequent segments are inactive.
88 *
89 * The entire MdfdVec array is palloc'd in the MdCxt memory context.
90 */
91
92typedef struct _MdfdVec
93{
94 File mdfd_vfd; /* fd number in fd.c's pool */
95 BlockNumber mdfd_segno; /* segment number, from 0 */
97
98static MemoryContext MdCxt; /* context for all MdfdVec objects */
99
100
101/* Populate a file tag describing an md.c segment file. */
102#define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \
103( \
104 memset(&(a), 0, sizeof(FileTag)), \
105 (a).handler = SYNC_HANDLER_MD, \
106 (a).rlocator = (xx_rlocator), \
107 (a).forknum = (xx_forknum), \
108 (a).segno = (xx_segno) \
109)
110
111
112/*** behavior for mdopen & _mdfd_getseg ***/
113/* ereport if segment not present */
114#define EXTENSION_FAIL (1 << 0)
115/* return NULL if segment not present */
116#define EXTENSION_RETURN_NULL (1 << 1)
117/* create new segments as needed */
118#define EXTENSION_CREATE (1 << 2)
119/* create new segments if needed during recovery */
120#define EXTENSION_CREATE_RECOVERY (1 << 3)
121/* don't try to open a segment, if not already open */
122#define EXTENSION_DONT_OPEN (1 << 5)
123
124
125/*
126 * Fixed-length string to represent paths to files that need to be built by
127 * md.c.
128 *
129 * The maximum number of segments is MaxBlockNumber / RELSEG_SIZE, where
130 * RELSEG_SIZE can be set to 1 (for testing only).
131 */
132#define SEGMENT_CHARS OIDCHARS
133#define MD_PATH_STR_MAXLEN \
134 (\
135 REL_PATH_STR_MAXLEN \
136 + sizeof((char)'.') \
137 + SEGMENT_CHARS \
138 )
139typedef struct MdPathStr
140{
143
144
145/* local routines */
146static void mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forknum,
147 bool isRedo);
148static MdfdVec *mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior);
150 MdfdVec *seg);
151static void register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
152 BlockNumber segno);
153static void register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
154 BlockNumber segno);
156 ForkNumber forknum,
157 int nseg);
159 BlockNumber segno);
161 BlockNumber segno, int oflags);
163 BlockNumber blkno, bool skipFsync, int behavior);
165 MdfdVec *seg);
166
168static void md_readv_report(PgAioResult result, const PgAioTargetData *td, int elevel);
169
174
175
176static inline int
178{
179 int flags = O_RDWR | PG_BINARY;
180
182 flags |= PG_O_DIRECT;
183
184 return flags;
185}
186
187/*
188 * mdinit() -- Initialize private state for magnetic disk storage manager.
189 */
190void
197
198/*
199 * mdexists() -- Does the physical file exist?
200 *
201 * Note: this will return true for lingering files, with pending deletions
202 */
203bool
205{
206 /*
207 * Close it first, to ensure that we notice if the fork has been unlinked
208 * since we opened it. As an optimization, we can skip that in recovery,
209 * which already closes relations when dropping them.
210 */
211 if (!InRecovery)
212 mdclose(reln, forknum);
213
214 return (mdopenfork(reln, forknum, EXTENSION_RETURN_NULL) != NULL);
215}
216
217/*
218 * mdcreate() -- Create a new relation on magnetic disk.
219 *
220 * If isRedo is true, it's okay for the relation to exist already.
221 */
222void
224{
225 MdfdVec *mdfd;
226 RelPathStr path;
227 File fd;
228
229 if (isRedo && reln->md_num_open_segs[forknum] > 0)
230 return; /* created and opened already... */
231
232 Assert(reln->md_num_open_segs[forknum] == 0);
233
234 /*
235 * We may be using the target table space for the first time in this
236 * database, so create a per-database subdirectory if needed.
237 *
238 * XXX this is a fairly ugly violation of module layering, but this seems
239 * to be the best place to put the check. Maybe TablespaceCreateDbspace
240 * should be here and not in commands/tablespace.c? But that would imply
241 * importing a lot of stuff that smgr.c oughtn't know, either.
242 */
243 TablespaceCreateDbspace(reln->smgr_rlocator.locator.spcOid,
244 reln->smgr_rlocator.locator.dbOid,
245 isRedo);
246
247 path = relpath(reln->smgr_rlocator, forknum);
248
250
251 if (fd < 0)
252 {
253 int save_errno = errno;
254
255 if (isRedo)
257 if (fd < 0)
258 {
259 /* be sure to report the error reported by create, not open */
263 errmsg("could not create file \"%s\": %m", path.str)));
264 }
265 }
266
267 _fdvec_resize(reln, forknum, 1);
268 mdfd = &reln->md_seg_fds[forknum][0];
269 mdfd->mdfd_vfd = fd;
270 mdfd->mdfd_segno = 0;
271
272 if (!SmgrIsTemp(reln))
274}
275
276/*
277 * mdunlink() -- Unlink a relation.
278 *
279 * Note that we're passed a RelFileLocatorBackend --- by the time this is called,
280 * there won't be an SMgrRelation hashtable entry anymore.
281 *
282 * forknum can be a fork number to delete a specific fork, or InvalidForkNumber
283 * to delete all forks.
284 *
285 * For regular relations, we don't unlink the first segment file of the rel,
286 * but just truncate it to zero length, and record a request to unlink it after
287 * the next checkpoint. Additional segments can be unlinked immediately,
288 * however. Leaving the empty file in place prevents that relfilenumber
289 * from being reused. The scenario this protects us from is:
290 * 1. We delete a relation (and commit, and actually remove its file).
291 * 2. We create a new relation, which by chance gets the same relfilenumber as
292 * the just-deleted one (OIDs must've wrapped around for that to happen).
293 * 3. We crash before another checkpoint occurs.
294 * During replay, we would delete the file and then recreate it, which is fine
295 * if the contents of the file were repopulated by subsequent WAL entries.
296 * But if we didn't WAL-log insertions, but instead relied on fsyncing the
297 * file after populating it (as we do at wal_level=minimal), the contents of
298 * the file would be lost forever. By leaving the empty file until after the
299 * next checkpoint, we prevent reassignment of the relfilenumber until it's
300 * safe, because relfilenumber assignment skips over any existing file.
301 *
302 * Additional segments, if any, are truncated and then unlinked. The reason
303 * for truncating is that other backends may still hold open FDs for these at
304 * the smgr level, so that the kernel can't remove the file yet. We want to
305 * reclaim the disk space right away despite that.
306 *
307 * We do not need to go through this dance for temp relations, though, because
308 * we never make WAL entries for temp rels, and so a temp rel poses no threat
309 * to the health of a regular rel that has taken over its relfilenumber.
310 * The fact that temp rels and regular rels have different file naming
311 * patterns provides additional safety. Other backends shouldn't have open
312 * FDs for them, either.
313 *
314 * We also don't do it while performing a binary upgrade. There is no reuse
315 * hazard in that case, since after a crash or even a simple ERROR, the
316 * upgrade fails and the whole cluster must be recreated from scratch.
317 * Furthermore, it is important to remove the files from disk immediately,
318 * because we may be about to reuse the same relfilenumber.
319 *
320 * All the above applies only to the relation's main fork; other forks can
321 * just be removed immediately, since they are not needed to prevent the
322 * relfilenumber from being recycled. Also, we do not carefully
323 * track whether other forks have been created or not, but just attempt to
324 * unlink them unconditionally; so we should never complain about ENOENT.
325 *
326 * If isRedo is true, it's unsurprising for the relation to be already gone.
327 * Also, we should remove the file immediately instead of queuing a request
328 * for later, since during redo there's no possibility of creating a
329 * conflicting relation.
330 *
331 * Note: we currently just never warn about ENOENT at all. We could warn in
332 * the main-fork, non-isRedo case, but it doesn't seem worth the trouble.
333 *
334 * Note: any failure should be reported as WARNING not ERROR, because
335 * we are usually not in a transaction anymore when this is called.
336 */
337void
339{
340 /* Now do the per-fork work */
341 if (forknum == InvalidForkNumber)
342 {
343 for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
344 mdunlinkfork(rlocator, forknum, isRedo);
345 }
346 else
347 mdunlinkfork(rlocator, forknum, isRedo);
348}
349
350/*
351 * Truncate a file to release disk space.
352 */
353static int
354do_truncate(const char *path)
355{
356 int save_errno;
357 int ret;
358
359 ret = pg_truncate(path, 0);
360
361 /* Log a warning here to avoid repetition in callers. */
362 if (ret < 0 && errno != ENOENT)
363 {
367 errmsg("could not truncate file \"%s\": %m", path)));
369 }
370
371 return ret;
372}
373
374static void
376{
377 RelPathStr path;
378 int ret;
379 int save_errno;
380
381 path = relpath(rlocator, forknum);
382
383 /*
384 * Truncate and then unlink the first segment, or just register a request
385 * to unlink it later, as described in the comments for mdunlink().
386 */
387 if (isRedo || IsBinaryUpgrade || forknum != MAIN_FORKNUM ||
389 {
390 if (!RelFileLocatorBackendIsTemp(rlocator))
391 {
392 /* Prevent other backends' fds from holding on to the disk space */
393 ret = do_truncate(path.str);
394
395 /* Forget any pending sync requests for the first segment */
397 register_forget_request(rlocator, forknum, 0 /* first seg */ );
399 }
400 else
401 ret = 0;
402
403 /* Next unlink the file, unless it was already found to be missing */
404 if (ret >= 0 || errno != ENOENT)
405 {
406 ret = unlink(path.str);
407 if (ret < 0 && errno != ENOENT)
408 {
412 errmsg("could not remove file \"%s\": %m", path.str)));
414 }
415 }
416 }
417 else
418 {
419 /* Prevent other backends' fds from holding on to the disk space */
420 ret = do_truncate(path.str);
421
422 /* Register request to unlink first segment later */
424 register_unlink_segment(rlocator, forknum, 0 /* first seg */ );
426 }
427
428 /*
429 * Delete any additional segments.
430 *
431 * Note that because we loop until getting ENOENT, we will correctly
432 * remove all inactive segments as well as active ones. Ideally we'd
433 * continue the loop until getting exactly that errno, but that risks an
434 * infinite loop if the problem is directory-wide (for instance, if we
435 * suddenly can't read the data directory itself). We compromise by
436 * continuing after a non-ENOENT truncate error, but stopping after any
437 * unlink error. If there is indeed a directory-wide problem, additional
438 * unlink attempts wouldn't work anyway.
439 */
440 if (ret >= 0 || errno != ENOENT)
441 {
443 BlockNumber segno;
444
445 for (segno = 1;; segno++)
446 {
447 sprintf(segpath.str, "%s.%u", path.str, segno);
448
449 if (!RelFileLocatorBackendIsTemp(rlocator))
450 {
451 /*
452 * Prevent other backends' fds from holding on to the disk
453 * space. We're done if we see ENOENT, though.
454 */
455 if (do_truncate(segpath.str) < 0 && errno == ENOENT)
456 break;
457
458 /*
459 * Forget any pending sync requests for this segment before we
460 * try to unlink.
461 */
462 register_forget_request(rlocator, forknum, segno);
463 }
464
465 if (unlink(segpath.str) < 0)
466 {
467 /* ENOENT is expected after the last segment... */
468 if (errno != ENOENT)
471 errmsg("could not remove file \"%s\": %m", segpath.str)));
472 break;
473 }
474 }
475 }
476}
477
478/*
479 * mdextend() -- Add a block to the specified relation.
480 *
481 * The semantics are nearly the same as mdwrite(): write at the
482 * specified position. However, this is to be used for the case of
483 * extending a relation (i.e., blocknum is at or beyond the current
484 * EOF). Note that we assume writing a block beyond current EOF
485 * causes intervening file space to become filled with zeroes.
486 */
487void
489 const void *buffer, bool skipFsync)
490{
491 pgoff_t seekpos;
492 int nbytes;
493 MdfdVec *v;
494
495 /* If this build supports direct I/O, the buffer must be I/O aligned. */
496 if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ)
497 Assert((uintptr_t) buffer == TYPEALIGN(PG_IO_ALIGN_SIZE, buffer));
498
499 /* This assert is too expensive to have on normally ... */
500#ifdef CHECK_WRITE_VS_EXTEND
501 Assert(blocknum >= mdnblocks(reln, forknum));
502#endif
503
504 /*
505 * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
506 * more --- we mustn't create a block whose number actually is
507 * InvalidBlockNumber. (Note that this failure should be unreachable
508 * because of upstream checks in bufmgr.c.)
509 */
510 if (blocknum == InvalidBlockNumber)
513 errmsg("cannot extend file \"%s\" beyond %u blocks",
514 relpath(reln->smgr_rlocator, forknum).str,
516
517 v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
518
519 seekpos = (pgoff_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
520
521 Assert(seekpos < (pgoff_t) BLCKSZ * RELSEG_SIZE);
522
523 if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_EXTEND)) != BLCKSZ)
524 {
525 if (nbytes < 0)
528 errmsg("could not extend file \"%s\": %m",
530 errhint("Check free disk space.")));
531 /* short write: complain appropriately */
534 errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u",
536 nbytes, BLCKSZ, blocknum),
537 errhint("Check free disk space.")));
538 }
539
540 if (!skipFsync && !SmgrIsTemp(reln))
541 register_dirty_segment(reln, forknum, v);
542
543 Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
544}
545
546/*
547 * mdzeroextend() -- Add new zeroed out blocks to the specified relation.
548 *
549 * Similar to mdextend(), except the relation can be extended by multiple
550 * blocks at once and the added blocks will be filled with zeroes.
551 */
552void
554 BlockNumber blocknum, int nblocks, bool skipFsync)
555{
556 MdfdVec *v;
557 BlockNumber curblocknum = blocknum;
558 int remblocks = nblocks;
559
560 Assert(nblocks > 0);
561
562 /* This assert is too expensive to have on normally ... */
563#ifdef CHECK_WRITE_VS_EXTEND
564 Assert(blocknum >= mdnblocks(reln, forknum));
565#endif
566
567 /*
568 * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
569 * more --- we mustn't create a block whose number actually is
570 * InvalidBlockNumber or larger.
571 */
572 if ((uint64) blocknum + nblocks >= (uint64) InvalidBlockNumber)
575 errmsg("cannot extend file \"%s\" beyond %u blocks",
576 relpath(reln->smgr_rlocator, forknum).str,
578
579 while (remblocks > 0)
580 {
582 pgoff_t seekpos = (pgoff_t) BLCKSZ * segstartblock;
583 int numblocks;
584
587 else
589
591
594
595 /*
596 * If available and useful, use posix_fallocate() (via
597 * FileFallocate()) to extend the relation. That's often more
598 * efficient than using write(), as it commonly won't cause the kernel
599 * to allocate page cache space for the extended pages.
600 *
601 * However, we don't use FileFallocate() for small extensions, as it
602 * defeats delayed allocation on some filesystems. Not clear where
603 * that decision should be made though? For now just use a cutoff of
604 * 8, anything between 4 and 8 worked OK in some local testing.
605 */
606 if (numblocks > 8 &&
608 {
609 int ret = 0;
610
611#ifdef HAVE_POSIX_FALLOCATE
613 {
614 ret = FileFallocate(v->mdfd_vfd,
615 seekpos, (pgoff_t) BLCKSZ * numblocks,
617 }
618 else
619#endif
620 {
621 elog(ERROR, "unsupported file_extend_method: %d",
623 }
624 if (ret != 0)
625 {
628 errmsg("could not extend file \"%s\" with FileFallocate(): %m",
630 errhint("Check free disk space."));
631 }
632 }
633 else
634 {
635 int ret;
636
637 /*
638 * Even if we don't want to use fallocate, we can still extend a
639 * bit more efficiently than writing each 8kB block individually.
640 * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
641 * to avoid multiple writes or needing a zeroed buffer for the
642 * whole length of the extension.
643 */
644 ret = FileZero(v->mdfd_vfd,
645 seekpos, (pgoff_t) BLCKSZ * numblocks,
647 if (ret < 0)
650 errmsg("could not extend file \"%s\": %m",
652 errhint("Check free disk space."));
653 }
654
655 if (!skipFsync && !SmgrIsTemp(reln))
656 register_dirty_segment(reln, forknum, v);
657
658 Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
659
662 }
663}
664
665/*
666 * mdopenfork() -- Open one fork of the specified relation.
667 *
668 * Note we only open the first segment, when there are multiple segments.
669 *
670 * If first segment is not present, either ereport or return NULL according
671 * to "behavior". We treat EXTENSION_CREATE the same as EXTENSION_FAIL;
672 * EXTENSION_CREATE means it's OK to extend an existing relation, not to
673 * invent one out of whole cloth.
674 */
675static MdfdVec *
676mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
677{
678 MdfdVec *mdfd;
679 RelPathStr path;
680 File fd;
681
682 /* No work if already open */
683 if (reln->md_num_open_segs[forknum] > 0)
684 return &reln->md_seg_fds[forknum][0];
685
686 path = relpath(reln->smgr_rlocator, forknum);
687
689
690 if (fd < 0)
691 {
692 if ((behavior & EXTENSION_RETURN_NULL) &&
694 return NULL;
697 errmsg("could not open file \"%s\": %m", path.str)));
698 }
699
700 _fdvec_resize(reln, forknum, 1);
701 mdfd = &reln->md_seg_fds[forknum][0];
702 mdfd->mdfd_vfd = fd;
703 mdfd->mdfd_segno = 0;
704
706
707 return mdfd;
708}
709
710/*
711 * mdopen() -- Initialize newly-opened relation.
712 */
713void
715{
716 /* mark it not open */
717 for (int forknum = 0; forknum <= MAX_FORKNUM; forknum++)
718 reln->md_num_open_segs[forknum] = 0;
719}
720
721/*
722 * mdclose() -- Close the specified relation, if it isn't closed already.
723 */
724void
726{
727 int nopensegs = reln->md_num_open_segs[forknum];
728
729 /* No work if already closed */
730 if (nopensegs == 0)
731 return;
732
733 /* close segments starting from the end */
734 while (nopensegs > 0)
735 {
736 MdfdVec *v = &reln->md_seg_fds[forknum][nopensegs - 1];
737
739 _fdvec_resize(reln, forknum, nopensegs - 1);
740 nopensegs--;
741 }
742}
743
744/*
745 * mdprefetch() -- Initiate asynchronous read of the specified blocks of a relation
746 */
747bool
749 int nblocks)
750{
751#ifdef USE_PREFETCH
752
754
755 if ((uint64) blocknum + nblocks > (uint64) MaxBlockNumber + 1)
756 return false;
757
758 while (nblocks > 0)
759 {
760 pgoff_t seekpos;
761 MdfdVec *v;
763
764 v = _mdfd_getseg(reln, forknum, blocknum, false,
766 if (v == NULL)
767 return false;
768
769 seekpos = (pgoff_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
770
771 Assert(seekpos < (pgoff_t) BLCKSZ * RELSEG_SIZE);
772
774 Min(nblocks,
775 RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
776
779
780 blocknum += nblocks_this_segment;
781 nblocks -= nblocks_this_segment;
782 }
783#endif /* USE_PREFETCH */
784
785 return true;
786}
787
788/*
789 * Convert an array of buffer address into an array of iovec objects, and
790 * return the number that were required. 'iov' must have enough space for up
791 * to 'nblocks' elements, but the number used may be less depending on
792 * merging. In the case of a run of fully contiguous buffers, a single iovec
793 * will be populated that can be handled as a plain non-vectored I/O.
794 */
795static int
796buffers_to_iovec(struct iovec *iov, void **buffers, int nblocks)
797{
798 struct iovec *iovp;
799 int iovcnt;
800
801 Assert(nblocks >= 1);
802
803 /* If this build supports direct I/O, buffers must be I/O aligned. */
804 for (int i = 0; i < nblocks; ++i)
805 {
806 if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ)
807 Assert((uintptr_t) buffers[i] ==
808 TYPEALIGN(PG_IO_ALIGN_SIZE, buffers[i]));
809 }
810
811 /* Start the first iovec off with the first buffer. */
812 iovp = &iov[0];
813 iovp->iov_base = buffers[0];
814 iovp->iov_len = BLCKSZ;
815 iovcnt = 1;
816
817 /* Try to merge the rest. */
818 for (int i = 1; i < nblocks; ++i)
819 {
820 void *buffer = buffers[i];
821
822 if (((char *) iovp->iov_base + iovp->iov_len) == buffer)
823 {
824 /* Contiguous with the last iovec. */
825 iovp->iov_len += BLCKSZ;
826 }
827 else
828 {
829 /* Need a new iovec. */
830 iovp++;
831 iovp->iov_base = buffer;
832 iovp->iov_len = BLCKSZ;
833 iovcnt++;
834 }
835 }
836
837 return iovcnt;
838}
839
840/*
841 * mdmaxcombine() -- Return the maximum number of total blocks that can be
842 * combined with an IO starting at blocknum.
843 */
844uint32
846 BlockNumber blocknum)
847{
848 BlockNumber segoff;
849
850 segoff = blocknum % ((BlockNumber) RELSEG_SIZE);
851
852 return RELSEG_SIZE - segoff;
853}
854
855/*
856 * mdreadv() -- Read the specified blocks from a relation.
857 */
858void
860 void **buffers, BlockNumber nblocks)
861{
862 while (nblocks > 0)
863 {
864 struct iovec iov[PG_IOV_MAX];
865 int iovcnt;
866 pgoff_t seekpos;
867 int nbytes;
868 MdfdVec *v;
871 size_t size_this_segment;
872
873 v = _mdfd_getseg(reln, forknum, blocknum, false,
875
876 seekpos = (pgoff_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
877
878 Assert(seekpos < (pgoff_t) BLCKSZ * RELSEG_SIZE);
879
881 Min(nblocks,
882 RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
884
885 if (nblocks_this_segment != nblocks)
886 elog(ERROR, "read crosses segment boundary");
887
891
892 /*
893 * Inner loop to continue after a short read. We'll keep going until
894 * we hit EOF rather than assuming that a short read means we hit the
895 * end.
896 */
897 for (;;)
898 {
899 TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
900 reln->smgr_rlocator.locator.spcOid,
901 reln->smgr_rlocator.locator.dbOid,
902 reln->smgr_rlocator.locator.relNumber,
903 reln->smgr_rlocator.backend);
904 nbytes = FileReadV(v->mdfd_vfd, iov, iovcnt, seekpos,
906 TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
907 reln->smgr_rlocator.locator.spcOid,
908 reln->smgr_rlocator.locator.dbOid,
909 reln->smgr_rlocator.locator.relNumber,
910 reln->smgr_rlocator.backend,
911 nbytes,
913
914#ifdef SIMULATE_SHORT_READ
915 nbytes = Min(nbytes, 4096);
916#endif
917
918 if (nbytes < 0)
921 errmsg("could not read blocks %u..%u in file \"%s\": %m",
922 blocknum,
923 blocknum + nblocks_this_segment - 1,
924 FilePathName(v->mdfd_vfd))));
925
926 if (nbytes == 0)
927 {
928 /*
929 * We are at or past EOF, or we read a partial block at EOF.
930 * Normally this is an error; upper levels should never try to
931 * read a nonexistent block. However, if zero_damaged_pages
932 * is ON or we are InRecovery, we should instead return zeroes
933 * without complaining. This allows, for example, the case of
934 * trying to update a block that was later truncated away.
935 *
936 * NB: We think that this codepath is unreachable in recovery
937 * and incomplete with zero_damaged_pages, as missing segments
938 * are not created. Putting blocks into the buffer-pool that
939 * do not exist on disk is rather problematic, as it will not
940 * be found by scans that rely on smgrnblocks(), as they are
941 * beyond EOF. It also can cause weird problems with relation
942 * extension, as relation extension does not expect blocks
943 * beyond EOF to exist.
944 *
945 * Therefore we do not want to copy the logic into
946 * mdstartreadv(), where it would have to be more complicated
947 * due to potential differences in the zero_damaged_pages
948 * setting between the definer and completor of IO.
949 *
950 * For PG 18, we are putting an Assert(false) in mdreadv()
951 * (triggering failures in assertion-enabled builds, but
952 * continuing to work in production builds). Afterwards we
953 * plan to remove this code entirely.
954 */
956 {
957 Assert(false); /* see comment above */
958
961 ++i)
962 memset(buffers[i], 0, BLCKSZ);
963 break;
964 }
965 else
968 errmsg("could not read blocks %u..%u in file \"%s\": read only %zu of %zu bytes",
969 blocknum,
970 blocknum + nblocks_this_segment - 1,
974 }
975
976 /* One loop should usually be enough. */
977 transferred_this_segment += nbytes;
980 break;
981
982 /* Adjust position and vectors after a short read. */
983 seekpos += nbytes;
985 }
986
987 nblocks -= nblocks_this_segment;
988 buffers += nblocks_this_segment;
989 blocknum += nblocks_this_segment;
990 }
991}
992
993/*
994 * mdstartreadv() -- Asynchronous version of mdreadv().
995 */
996void
998 SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
999 void **buffers, BlockNumber nblocks)
1000{
1001 pgoff_t seekpos;
1002 MdfdVec *v;
1004 struct iovec *iov;
1005 int iovcnt;
1006 int ret;
1007
1008 v = _mdfd_getseg(reln, forknum, blocknum, false,
1010
1011 seekpos = (pgoff_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1012
1013 Assert(seekpos < (pgoff_t) BLCKSZ * RELSEG_SIZE);
1014
1016 Min(nblocks,
1017 RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
1018
1019 if (nblocks_this_segment != nblocks)
1020 elog(ERROR, "read crossing segment boundary");
1021
1023
1024 Assert(nblocks <= iovcnt);
1025
1027
1029
1032
1034 reln,
1035 forknum,
1036 blocknum,
1037 nblocks,
1038 false);
1040
1042 if (ret != 0)
1043 ereport(ERROR,
1045 errmsg("could not start reading blocks %u..%u in file \"%s\": %m",
1046 blocknum,
1047 blocknum + nblocks_this_segment - 1,
1048 FilePathName(v->mdfd_vfd))));
1049
1050 /*
1051 * The error checks corresponding to the post-read checks in mdreadv() are
1052 * in md_readv_complete().
1053 *
1054 * However we chose, at least for now, to not implement the
1055 * zero_damaged_pages logic present in mdreadv(). As outlined in mdreadv()
1056 * that logic is rather problematic, and we want to get rid of it. Here
1057 * equivalent logic would have to be more complicated due to potential
1058 * differences in the zero_damaged_pages setting between the definer and
1059 * completor of IO.
1060 */
1061}
1062
1063/*
1064 * mdwritev() -- Write the supplied blocks at the appropriate location.
1065 *
1066 * This is to be used only for updating already-existing blocks of a
1067 * relation (ie, those before the current EOF). To extend a relation,
1068 * use mdextend().
1069 */
1070void
1072 const void **buffers, BlockNumber nblocks, bool skipFsync)
1073{
1074 /* This assert is too expensive to have on normally ... */
1075#ifdef CHECK_WRITE_VS_EXTEND
1076 Assert((uint64) blocknum + (uint64) nblocks <= (uint64) mdnblocks(reln, forknum));
1077#endif
1078
1079 while (nblocks > 0)
1080 {
1081 struct iovec iov[PG_IOV_MAX];
1082 int iovcnt;
1083 pgoff_t seekpos;
1084 int nbytes;
1085 MdfdVec *v;
1088 size_t size_this_segment;
1089
1090 v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
1092
1093 seekpos = (pgoff_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1094
1095 Assert(seekpos < (pgoff_t) BLCKSZ * RELSEG_SIZE);
1096
1098 Min(nblocks,
1099 RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
1101
1102 if (nblocks_this_segment != nblocks)
1103 elog(ERROR, "write crosses segment boundary");
1104
1105 iovcnt = buffers_to_iovec(iov, (void **) buffers, nblocks_this_segment);
1108
1109 /*
1110 * Inner loop to continue after a short write. If the reason is that
1111 * we're out of disk space, a future attempt should get an ENOSPC
1112 * error from the kernel.
1113 */
1114 for (;;)
1115 {
1116 TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
1117 reln->smgr_rlocator.locator.spcOid,
1118 reln->smgr_rlocator.locator.dbOid,
1119 reln->smgr_rlocator.locator.relNumber,
1120 reln->smgr_rlocator.backend);
1121 nbytes = FileWriteV(v->mdfd_vfd, iov, iovcnt, seekpos,
1123 TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
1124 reln->smgr_rlocator.locator.spcOid,
1125 reln->smgr_rlocator.locator.dbOid,
1126 reln->smgr_rlocator.locator.relNumber,
1127 reln->smgr_rlocator.backend,
1128 nbytes,
1130
1131#ifdef SIMULATE_SHORT_WRITE
1132 nbytes = Min(nbytes, 4096);
1133#endif
1134
1135 if (nbytes < 0)
1136 {
1137 bool enospc = errno == ENOSPC;
1138
1139 ereport(ERROR,
1141 errmsg("could not write blocks %u..%u in file \"%s\": %m",
1142 blocknum,
1143 blocknum + nblocks_this_segment - 1,
1145 enospc ? errhint("Check free disk space.") : 0));
1146 }
1147
1148 /* One loop should usually be enough. */
1149 transferred_this_segment += nbytes;
1152 break;
1153
1154 /* Adjust position and iovecs after a short write. */
1155 seekpos += nbytes;
1157 }
1158
1159 if (!skipFsync && !SmgrIsTemp(reln))
1160 register_dirty_segment(reln, forknum, v);
1161
1162 nblocks -= nblocks_this_segment;
1163 buffers += nblocks_this_segment;
1164 blocknum += nblocks_this_segment;
1165 }
1166}
1167
1168
1169/*
1170 * mdwriteback() -- Tell the kernel to write pages back to storage.
1171 *
1172 * This accepts a range of blocks because flushing several pages at once is
1173 * considerably more efficient than doing so individually.
1174 */
1175void
1177 BlockNumber blocknum, BlockNumber nblocks)
1178{
1180
1181 /*
1182 * Issue flush requests in as few requests as possible; have to split at
1183 * segment boundaries though, since those are actually separate files.
1184 */
1185 while (nblocks > 0)
1186 {
1187 BlockNumber nflush = nblocks;
1188 pgoff_t seekpos;
1189 MdfdVec *v;
1190 int segnum_start,
1191 segnum_end;
1192
1193 v = _mdfd_getseg(reln, forknum, blocknum, true /* not used */ ,
1195
1196 /*
1197 * We might be flushing buffers of already removed relations, that's
1198 * ok, just ignore that case. If the segment file wasn't open already
1199 * (ie from a recent mdwrite()), then we don't want to re-open it, to
1200 * avoid a race with PROCSIGNAL_BARRIER_SMGRRELEASE that might leave
1201 * us with a descriptor to a file that is about to be unlinked.
1202 */
1203 if (!v)
1204 return;
1205
1206 /* compute offset inside the current segment */
1207 segnum_start = blocknum / RELSEG_SIZE;
1208
1209 /* compute number of desired writes within the current segment */
1210 segnum_end = (blocknum + nblocks - 1) / RELSEG_SIZE;
1211 if (segnum_start != segnum_end)
1212 nflush = RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE));
1213
1214 Assert(nflush >= 1);
1215 Assert(nflush <= nblocks);
1216
1217 seekpos = (pgoff_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1218
1220
1221 nblocks -= nflush;
1222 blocknum += nflush;
1223 }
1224}
1225
1226/*
1227 * mdnblocks() -- Get the number of blocks stored in a relation.
1228 *
1229 * Important side effect: all active segments of the relation are opened
1230 * and added to the md_seg_fds array. If this routine has not been
1231 * called, then only segments up to the last one actually touched
1232 * are present in the array.
1233 */
1236{
1237 MdfdVec *v;
1238 BlockNumber nblocks;
1239 BlockNumber segno;
1240
1241 mdopenfork(reln, forknum, EXTENSION_FAIL);
1242
1243 /* mdopen has opened the first segment */
1244 Assert(reln->md_num_open_segs[forknum] > 0);
1245
1246 /*
1247 * Start from the last open segments, to avoid redundant seeks. We have
1248 * previously verified that these segments are exactly RELSEG_SIZE long,
1249 * and it's useless to recheck that each time.
1250 *
1251 * NOTE: this assumption could only be wrong if another backend has
1252 * truncated the relation. We rely on higher code levels to handle that
1253 * scenario by closing and re-opening the md fd, which is handled via
1254 * relcache flush. (Since the checkpointer doesn't participate in
1255 * relcache flush, it could have segment entries for inactive segments;
1256 * that's OK because the checkpointer never needs to compute relation
1257 * size.)
1258 */
1259 segno = reln->md_num_open_segs[forknum] - 1;
1260 v = &reln->md_seg_fds[forknum][segno];
1261
1262 for (;;)
1263 {
1264 nblocks = _mdnblocks(reln, forknum, v);
1265 if (nblocks > ((BlockNumber) RELSEG_SIZE))
1266 elog(FATAL, "segment too big");
1267 if (nblocks < ((BlockNumber) RELSEG_SIZE))
1268 return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
1269
1270 /*
1271 * If segment is exactly RELSEG_SIZE, advance to next one.
1272 */
1273 segno++;
1274
1275 /*
1276 * We used to pass O_CREAT here, but that has the disadvantage that it
1277 * might create a segment which has vanished through some operating
1278 * system misadventure. In such a case, creating the segment here
1279 * undermines _mdfd_getseg's attempts to notice and report an error
1280 * upon access to a missing segment.
1281 */
1282 v = _mdfd_openseg(reln, forknum, segno, 0);
1283 if (v == NULL)
1284 return segno * ((BlockNumber) RELSEG_SIZE);
1285 }
1286}
1287
1288/*
1289 * mdtruncate() -- Truncate relation to specified number of blocks.
1290 *
1291 * Guaranteed not to allocate memory, so it can be used in a critical section.
1292 * Caller must have called smgrnblocks() to obtain curnblk while holding a
1293 * sufficient lock to prevent a change in relation size, and not used any smgr
1294 * functions for this relation or handled interrupts in between. This makes
1295 * sure we have opened all active segments, so that truncate loop will get
1296 * them all!
1297 *
1298 * If nblocks > curnblk, the request is ignored when we are InRecovery,
1299 * otherwise, an error is raised.
1300 */
1301void
1304{
1306 int curopensegs;
1307
1308 if (nblocks > curnblk)
1309 {
1310 /* Bogus request ... but no complaint if InRecovery */
1311 if (InRecovery)
1312 return;
1313 ereport(ERROR,
1314 (errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
1315 relpath(reln->smgr_rlocator, forknum).str,
1316 nblocks, curnblk)));
1317 }
1318 if (nblocks == curnblk)
1319 return; /* no work */
1320
1321 /*
1322 * Truncate segments, starting at the last one. Starting at the end makes
1323 * managing the memory for the fd array easier, should there be errors.
1324 */
1325 curopensegs = reln->md_num_open_segs[forknum];
1326 while (curopensegs > 0)
1327 {
1328 MdfdVec *v;
1329
1331
1332 v = &reln->md_seg_fds[forknum][curopensegs - 1];
1333
1334 if (priorblocks > nblocks)
1335 {
1336 /*
1337 * This segment is no longer active. We truncate the file, but do
1338 * not delete it, for reasons explained in the header comments.
1339 */
1341 ereport(ERROR,
1343 errmsg("could not truncate file \"%s\": %m",
1344 FilePathName(v->mdfd_vfd))));
1345
1346 if (!SmgrIsTemp(reln))
1347 register_dirty_segment(reln, forknum, v);
1348
1349 /* we never drop the 1st segment */
1350 Assert(v != &reln->md_seg_fds[forknum][0]);
1351
1352 FileClose(v->mdfd_vfd);
1353 _fdvec_resize(reln, forknum, curopensegs - 1);
1354 }
1355 else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
1356 {
1357 /*
1358 * This is the last segment we want to keep. Truncate the file to
1359 * the right length. NOTE: if nblocks is exactly a multiple K of
1360 * RELSEG_SIZE, we will truncate the K+1st segment to 0 length but
1361 * keep it. This adheres to the invariant given in the header
1362 * comments.
1363 */
1365
1367 ereport(ERROR,
1369 errmsg("could not truncate file \"%s\" to %u blocks: %m",
1371 nblocks)));
1372 if (!SmgrIsTemp(reln))
1373 register_dirty_segment(reln, forknum, v);
1374 }
1375 else
1376 {
1377 /*
1378 * We still need this segment, so nothing to do for this and any
1379 * earlier segment.
1380 */
1381 break;
1382 }
1383 curopensegs--;
1384 }
1385}
1386
1387/*
1388 * mdregistersync() -- Mark whole relation as needing fsync
1389 */
1390void
1392{
1393 int segno;
1394 int min_inactive_seg;
1395
1396 /*
1397 * NOTE: mdnblocks makes sure we have opened all active segments, so that
1398 * the loop below will get them all!
1399 */
1400 mdnblocks(reln, forknum);
1401
1402 min_inactive_seg = segno = reln->md_num_open_segs[forknum];
1403
1404 /*
1405 * Temporarily open inactive segments, then close them after sync. There
1406 * may be some inactive segments left opened after error, but that is
1407 * harmless. We don't bother to clean them up and take a risk of further
1408 * trouble. The next mdclose() will soon close them.
1409 */
1410 while (_mdfd_openseg(reln, forknum, segno, 0) != NULL)
1411 segno++;
1412
1413 while (segno > 0)
1414 {
1415 MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
1416
1417 register_dirty_segment(reln, forknum, v);
1418
1419 /* Close inactive segments immediately */
1420 if (segno > min_inactive_seg)
1421 {
1422 FileClose(v->mdfd_vfd);
1423 _fdvec_resize(reln, forknum, segno - 1);
1424 }
1425
1426 segno--;
1427 }
1428}
1429
1430/*
1431 * mdimmedsync() -- Immediately sync a relation to stable storage.
1432 *
1433 * Note that only writes already issued are synced; this routine knows
1434 * nothing of dirty buffers that may exist inside the buffer manager. We
1435 * sync active and inactive segments; smgrDoPendingSyncs() relies on this.
1436 * Consider a relation skipping WAL. Suppose a checkpoint syncs blocks of
1437 * some segment, then mdtruncate() renders that segment inactive. If we
1438 * crash before the next checkpoint syncs the newly-inactive segment, that
1439 * segment may survive recovery, reintroducing unwanted data into the table.
1440 */
1441void
1443{
1444 int segno;
1445 int min_inactive_seg;
1446
1447 /*
1448 * NOTE: mdnblocks makes sure we have opened all active segments, so that
1449 * the loop below will get them all!
1450 */
1451 mdnblocks(reln, forknum);
1452
1453 min_inactive_seg = segno = reln->md_num_open_segs[forknum];
1454
1455 /*
1456 * Temporarily open inactive segments, then close them after sync. There
1457 * may be some inactive segments left opened after fsync() error, but that
1458 * is harmless. We don't bother to clean them up and take a risk of
1459 * further trouble. The next mdclose() will soon close them.
1460 */
1461 while (_mdfd_openseg(reln, forknum, segno, 0) != NULL)
1462 segno++;
1463
1464 while (segno > 0)
1465 {
1466 MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
1467
1468 /*
1469 * fsyncs done through mdimmedsync() should be tracked in a separate
1470 * IOContext than those done through mdsyncfiletag() to differentiate
1471 * between unavoidable client backend fsyncs (e.g. those done during
1472 * index build) and those which ideally would have been done by the
1473 * checkpointer. Since other IO operations bypassing the buffer
1474 * manager could also be tracked in such an IOContext, wait until
1475 * these are also tracked to track immediate fsyncs.
1476 */
1480 errmsg("could not fsync file \"%s\": %m",
1481 FilePathName(v->mdfd_vfd))));
1482
1483 /* Close inactive segments immediately */
1484 if (segno > min_inactive_seg)
1485 {
1486 FileClose(v->mdfd_vfd);
1487 _fdvec_resize(reln, forknum, segno - 1);
1488 }
1489
1490 segno--;
1491 }
1492}
1493
1494int
1496{
1497 MdfdVec *v = mdopenfork(reln, forknum, EXTENSION_FAIL);
1498
1499 v = _mdfd_getseg(reln, forknum, blocknum, false,
1501
1502 *off = (pgoff_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1503
1504 Assert(*off < (pgoff_t) BLCKSZ * RELSEG_SIZE);
1505
1506 return FileGetRawDesc(v->mdfd_vfd);
1507}
1508
1509/*
1510 * register_dirty_segment() -- Mark a relation segment as needing fsync
1511 *
1512 * If there is a local pending-ops table, just make an entry in it for
1513 * ProcessSyncRequests to process later. Otherwise, try to pass off the
1514 * fsync request to the checkpointer process. If that fails, just do the
1515 * fsync locally before returning (we hope this will not happen often
1516 * enough to be a performance problem).
1517 */
1518static void
1520{
1521 FileTag tag;
1522
1523 INIT_MD_FILETAG(tag, reln->smgr_rlocator.locator, forknum, seg->mdfd_segno);
1524
1525 /* Temp relations should never be fsync'd */
1527
1528 if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false /* retryOnError */ ))
1529 {
1531
1533 (errmsg_internal("could not forward fsync request because request queue is full")));
1534
1536
1540 errmsg("could not fsync file \"%s\": %m",
1541 FilePathName(seg->mdfd_vfd))));
1542
1543 /*
1544 * We have no way of knowing if the current IOContext is
1545 * IOCONTEXT_NORMAL or IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] at this
1546 * point, so count the fsync as being in the IOCONTEXT_NORMAL
1547 * IOContext. This is probably okay, because the number of backend
1548 * fsyncs doesn't say anything about the efficacy of the
1549 * BufferAccessStrategy. And counting both fsyncs done in
1550 * IOCONTEXT_NORMAL and IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] under
1551 * IOCONTEXT_NORMAL is likely clearer when investigating the number of
1552 * backend fsyncs.
1553 */
1555 IOOP_FSYNC, io_start, 1, 0);
1556 }
1557}
1558
1559/*
1560 * register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
1561 */
1562static void
1564 BlockNumber segno)
1565{
1566 FileTag tag;
1567
1568 INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
1569
1570 /* Should never be used with temp relations */
1572
1573 RegisterSyncRequest(&tag, SYNC_UNLINK_REQUEST, true /* retryOnError */ );
1574}
1575
1576/*
1577 * register_forget_request() -- forget any fsyncs for a relation fork's segment
1578 */
1579static void
1581 BlockNumber segno)
1582{
1583 FileTag tag;
1584
1585 INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
1586
1587 RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */ );
1588}
1589
1590/*
1591 * ForgetDatabaseSyncRequests -- forget any fsyncs and unlinks for a DB
1592 */
1593void
1595{
1596 FileTag tag;
1597 RelFileLocator rlocator;
1598
1599 rlocator.dbOid = dbid;
1600 rlocator.spcOid = 0;
1601 rlocator.relNumber = 0;
1602
1604
1605 RegisterSyncRequest(&tag, SYNC_FILTER_REQUEST, true /* retryOnError */ );
1606}
1607
1608/*
1609 * DropRelationFiles -- drop files of all given relations
1610 */
1611void
1613{
1615 int i;
1616
1618 for (i = 0; i < ndelrels; i++)
1619 {
1621
1622 if (isRedo)
1623 {
1625
1626 for (fork = 0; fork <= MAX_FORKNUM; fork++)
1628 }
1629 srels[i] = srel;
1630 }
1631
1633
1634 for (i = 0; i < ndelrels; i++)
1635 smgrclose(srels[i]);
1636 pfree(srels);
1637}
1638
1639
1640/*
1641 * _fdvec_resize() -- Resize the fork's open segments array
1642 */
1643static void
1645 ForkNumber forknum,
1646 int nseg)
1647{
1648 if (nseg == 0)
1649 {
1650 if (reln->md_num_open_segs[forknum] > 0)
1651 {
1652 pfree(reln->md_seg_fds[forknum]);
1653 reln->md_seg_fds[forknum] = NULL;
1654 }
1655 }
1656 else if (reln->md_num_open_segs[forknum] == 0)
1657 {
1658 reln->md_seg_fds[forknum] =
1660 }
1661 else if (nseg > reln->md_num_open_segs[forknum])
1662 {
1663 /*
1664 * It doesn't seem worthwhile complicating the code to amortize
1665 * repalloc() calls. Those are far faster than PathNameOpenFile() or
1666 * FileClose(), and the memory context internally will sometimes avoid
1667 * doing an actual reallocation.
1668 */
1669 reln->md_seg_fds[forknum] =
1670 repalloc(reln->md_seg_fds[forknum],
1671 sizeof(MdfdVec) * nseg);
1672 }
1673 else
1674 {
1675 /*
1676 * We don't reallocate a smaller array, because we want mdtruncate()
1677 * to be able to promise that it won't allocate memory, so that it is
1678 * allowed in a critical section. This means that a bit of space in
1679 * the array is now wasted, until the next time we add a segment and
1680 * reallocate.
1681 */
1682 }
1683
1684 reln->md_num_open_segs[forknum] = nseg;
1685}
1686
1687/*
1688 * Return the filename for the specified segment of the relation. The
1689 * returned string is palloc'd.
1690 */
1691static MdPathStr
1693{
1694 RelPathStr path;
1695 MdPathStr fullpath;
1696
1697 path = relpath(reln->smgr_rlocator, forknum);
1698
1699 if (segno > 0)
1700 sprintf(fullpath.str, "%s.%u", path.str, segno);
1701 else
1702 strcpy(fullpath.str, path.str);
1703
1704 return fullpath;
1705}
1706
1707/*
1708 * Open the specified segment of the relation,
1709 * and make a MdfdVec object for it. Returns NULL on failure.
1710 */
1711static MdfdVec *
1713 int oflags)
1714{
1715 MdfdVec *v;
1716 File fd;
1717 MdPathStr fullpath;
1718
1719 fullpath = _mdfd_segpath(reln, forknum, segno);
1720
1721 /* open the file */
1723
1724 if (fd < 0)
1725 return NULL;
1726
1727 /*
1728 * Segments are always opened in order from lowest to highest, so we must
1729 * be adding a new one at the end.
1730 */
1731 Assert(segno == reln->md_num_open_segs[forknum]);
1732
1733 _fdvec_resize(reln, forknum, segno + 1);
1734
1735 /* fill the entry */
1736 v = &reln->md_seg_fds[forknum][segno];
1737 v->mdfd_vfd = fd;
1738 v->mdfd_segno = segno;
1739
1740 Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
1741
1742 /* all done */
1743 return v;
1744}
1745
1746/*
1747 * _mdfd_getseg() -- Find the segment of the relation holding the
1748 * specified block.
1749 *
1750 * If the segment doesn't exist, we ereport, return NULL, or create the
1751 * segment, according to "behavior". Note: skipFsync is only used in the
1752 * EXTENSION_CREATE case.
1753 */
1754static MdfdVec *
1756 bool skipFsync, int behavior)
1757{
1758 MdfdVec *v;
1761
1762 /* some way to handle non-existent segments needs to be specified */
1763 Assert(behavior &
1766
1767 targetseg = blkno / ((BlockNumber) RELSEG_SIZE);
1768
1769 /* if an existing and opened segment, we're done */
1770 if (targetseg < reln->md_num_open_segs[forknum])
1771 {
1772 v = &reln->md_seg_fds[forknum][targetseg];
1773 return v;
1774 }
1775
1776 /* The caller only wants the segment if we already had it open. */
1777 if (behavior & EXTENSION_DONT_OPEN)
1778 return NULL;
1779
1780 /*
1781 * The target segment is not yet open. Iterate over all the segments
1782 * between the last opened and the target segment. This way missing
1783 * segments either raise an error, or get created (according to
1784 * 'behavior'). Start with either the last opened, or the first segment if
1785 * none was opened before.
1786 */
1787 if (reln->md_num_open_segs[forknum] > 0)
1788 v = &reln->md_seg_fds[forknum][reln->md_num_open_segs[forknum] - 1];
1789 else
1790 {
1791 v = mdopenfork(reln, forknum, behavior);
1792 if (!v)
1793 return NULL; /* if behavior & EXTENSION_RETURN_NULL */
1794 }
1795
1796 for (nextsegno = reln->md_num_open_segs[forknum];
1798 {
1799 BlockNumber nblocks = _mdnblocks(reln, forknum, v);
1800 int flags = 0;
1801
1802 Assert(nextsegno == v->mdfd_segno + 1);
1803
1804 if (nblocks > ((BlockNumber) RELSEG_SIZE))
1805 elog(FATAL, "segment too big");
1806
1807 if ((behavior & EXTENSION_CREATE) ||
1808 (InRecovery && (behavior & EXTENSION_CREATE_RECOVERY)))
1809 {
1810 /*
1811 * Normally we will create new segments only if authorized by the
1812 * caller (i.e., we are doing mdextend()). But when doing WAL
1813 * recovery, create segments anyway; this allows cases such as
1814 * replaying WAL data that has a write into a high-numbered
1815 * segment of a relation that was later deleted. We want to go
1816 * ahead and create the segments so we can finish out the replay.
1817 *
1818 * We have to maintain the invariant that segments before the last
1819 * active segment are of size RELSEG_SIZE; therefore, if
1820 * extending, pad them out with zeroes if needed. (This only
1821 * matters if in recovery, or if the caller is extending the
1822 * relation discontiguously, but that can happen in hash indexes.)
1823 */
1824 if (nblocks < ((BlockNumber) RELSEG_SIZE))
1825 {
1828
1829 mdextend(reln, forknum,
1832 pfree(zerobuf);
1833 }
1834 flags = O_CREAT;
1835 }
1836 else if (nblocks < ((BlockNumber) RELSEG_SIZE))
1837 {
1838 /*
1839 * When not extending, only open the next segment if the current
1840 * one is exactly RELSEG_SIZE. If not (this branch), either
1841 * return NULL or fail.
1842 */
1843 if (behavior & EXTENSION_RETURN_NULL)
1844 {
1845 /*
1846 * Some callers discern between reasons for _mdfd_getseg()
1847 * returning NULL based on errno. As there's no failing
1848 * syscall involved in this case, explicitly set errno to
1849 * ENOENT, as that seems the closest interpretation.
1850 */
1851 errno = ENOENT;
1852 return NULL;
1853 }
1854
1855 ereport(ERROR,
1857 errmsg("could not open file \"%s\" (target block %u): previous segment is only %u blocks",
1858 _mdfd_segpath(reln, forknum, nextsegno).str,
1859 blkno, nblocks)));
1860 }
1861
1862 v = _mdfd_openseg(reln, forknum, nextsegno, flags);
1863
1864 if (v == NULL)
1865 {
1866 if ((behavior & EXTENSION_RETURN_NULL) &&
1868 return NULL;
1869 ereport(ERROR,
1871 errmsg("could not open file \"%s\" (target block %u): %m",
1872 _mdfd_segpath(reln, forknum, nextsegno).str,
1873 blkno)));
1874 }
1875 }
1876
1877 return v;
1878}
1879
1880/*
1881 * Get number of blocks present in a single disk file
1882 */
1883static BlockNumber
1885{
1886 pgoff_t len;
1887
1888 len = FileSize(seg->mdfd_vfd);
1889 if (len < 0)
1890 ereport(ERROR,
1892 errmsg("could not seek to end of file \"%s\": %m",
1893 FilePathName(seg->mdfd_vfd))));
1894 /* note that this calculation will ignore any partial block at EOF */
1895 return (BlockNumber) (len / BLCKSZ);
1896}
1897
1898/*
1899 * Sync a file to disk, given a file tag. Write the path into an output
1900 * buffer so the caller can use it in error messages.
1901 *
1902 * Return 0 on success, -1 on failure, with errno set.
1903 */
1904int
1905mdsyncfiletag(const FileTag *ftag, char *path)
1906{
1908 File file;
1910 bool need_to_close;
1911 int result,
1912 save_errno;
1913
1914 /* See if we already have the file open, or need to open it. */
1915 if (ftag->segno < reln->md_num_open_segs[ftag->forknum])
1916 {
1917 file = reln->md_seg_fds[ftag->forknum][ftag->segno].mdfd_vfd;
1918 strlcpy(path, FilePathName(file), MAXPGPATH);
1919 need_to_close = false;
1920 }
1921 else
1922 {
1923 MdPathStr p;
1924
1925 p = _mdfd_segpath(reln, ftag->forknum, ftag->segno);
1926 strlcpy(path, p.str, MD_PATH_STR_MAXLEN);
1927
1928 file = PathNameOpenFile(path, _mdfd_open_flags());
1929 if (file < 0)
1930 return -1;
1931 need_to_close = true;
1932 }
1933
1935
1936 /* Sync the file. */
1937 result = FileSync(file, WAIT_EVENT_DATA_FILE_SYNC);
1938 save_errno = errno;
1939
1940 if (need_to_close)
1941 FileClose(file);
1942
1944 IOOP_FSYNC, io_start, 1, 0);
1945
1946 errno = save_errno;
1947 return result;
1948}
1949
1950/*
1951 * Unlink a file, given a file tag. Write the path into an output
1952 * buffer so the caller can use it in error messages.
1953 *
1954 * Return 0 on success, -1 on failure, with errno set.
1955 */
1956int
1957mdunlinkfiletag(const FileTag *ftag, char *path)
1958{
1959 RelPathStr p;
1960
1961 /* Compute the path. */
1962 p = relpathperm(ftag->rlocator, MAIN_FORKNUM);
1963 strlcpy(path, p.str, MAXPGPATH);
1964
1965 /* Try to unlink the file. */
1966 return unlink(path);
1967}
1968
1969/*
1970 * Check if a given candidate request matches a given tag, when processing
1971 * a SYNC_FILTER_REQUEST request. This will be called for all pending
1972 * requests to find out whether to forget them.
1973 */
1974bool
1976{
1977 /*
1978 * For now we only use filter requests as a way to drop all scheduled
1979 * callbacks relating to a given database, when dropping the database.
1980 * We'll return true for all candidates that have the same database OID as
1981 * the ftag from the SYNC_FILTER_REQUEST request, so they're forgotten.
1982 */
1983 return ftag->rlocator.dbOid == candidate->rlocator.dbOid;
1984}
1985
1986/*
1987 * AIO completion callback for mdstartreadv().
1988 */
1989static PgAioResult
1991{
1993 PgAioResult result = prior_result;
1994
1995 if (prior_result.result < 0)
1996 {
1997 result.status = PGAIO_RS_ERROR;
1998 result.id = PGAIO_HCB_MD_READV;
1999 /* For "hard" errors, track the error number in error_data */
2000 result.error_data = -prior_result.result;
2001 result.result = 0;
2002
2003 /*
2004 * Immediately log a message about the IO error, but only to the
2005 * server log. The reason to do so immediately is that the originator
2006 * might not process the query result immediately (because it is busy
2007 * doing another part of query processing) or at all (e.g. if it was
2008 * cancelled or errored out due to another IO also failing). The
2009 * definer of the IO will emit an ERROR when processing the IO's
2010 * results
2011 */
2013
2014 return result;
2015 }
2016
2017 /*
2018 * As explained above smgrstartreadv(), the smgr API operates on the level
2019 * of blocks, rather than bytes. Convert.
2020 */
2021 result.result /= BLCKSZ;
2022
2023 Assert(result.result <= td->smgr.nblocks);
2024
2025 if (result.result == 0)
2026 {
2027 /* consider 0 blocks read a failure */
2028 result.status = PGAIO_RS_ERROR;
2029 result.id = PGAIO_HCB_MD_READV;
2030 result.error_data = 0;
2031
2032 /* see comment above the "hard error" case */
2034
2035 return result;
2036 }
2037
2038 if (result.status != PGAIO_RS_ERROR &&
2039 result.result < td->smgr.nblocks)
2040 {
2041 /* partial reads should be retried at upper level */
2042 result.status = PGAIO_RS_PARTIAL;
2043 result.id = PGAIO_HCB_MD_READV;
2044 }
2045
2046 return result;
2047}
2048
2049/*
2050 * AIO error reporting callback for mdstartreadv().
2051 *
2052 * Errors are encoded as follows:
2053 * - PgAioResult.error_data != 0 encodes IO that failed with that errno
2054 * - PgAioResult.error_data == 0 encodes IO that didn't read all data
2055 */
2056static void
2057md_readv_report(PgAioResult result, const PgAioTargetData *td, int elevel)
2058{
2059 RelPathStr path;
2060
2061 path = relpathbackend(td->smgr.rlocator,
2063 td->smgr.forkNum);
2064
2065 if (result.error_data != 0)
2066 {
2067 /* for errcode_for_file_access() and %m */
2068 errno = result.error_data;
2069
2070 ereport(elevel,
2072 errmsg("could not read blocks %u..%u in file \"%s\": %m",
2073 td->smgr.blockNum,
2074 td->smgr.blockNum + td->smgr.nblocks - 1,
2075 path.str));
2076 }
2077 else
2078 {
2079 /*
2080 * NB: This will typically only be output in debug messages, while
2081 * retrying a partial IO.
2082 */
2083 ereport(elevel,
2085 errmsg("could not read blocks %u..%u in file \"%s\": read only %zu of %zu bytes",
2086 td->smgr.blockNum,
2087 td->smgr.blockNum + td->smgr.nblocks - 1,
2088 path.str,
2089 result.result * (size_t) BLCKSZ,
2090 td->smgr.nblocks * (size_t) BLCKSZ));
2091 }
2092}
void pgaio_io_set_flag(PgAioHandle *ioh, PgAioHandleFlags flag)
Definition aio.c:330
@ PGAIO_HCB_MD_READV
Definition aio.h:196
@ PGAIO_HF_BUFFERED
Definition aio.h:77
void pgaio_io_register_callbacks(PgAioHandle *ioh, PgAioHandleCallbackID cb_id, uint8 cb_data)
void pgaio_result_report(PgAioResult result, const PgAioTargetData *target_data, int elevel)
int pgaio_io_get_iovec(PgAioHandle *ioh, struct iovec **iov)
Definition aio_io.c:42
PgAioTargetData * pgaio_io_get_target_data(PgAioHandle *ioh)
Definition aio_target.c:73
@ PGAIO_RS_PARTIAL
Definition aio_types.h:82
@ PGAIO_RS_ERROR
Definition aio_types.h:84
void TablespaceCreateDbspace(Oid spcOid, Oid dbOid, bool isRedo)
Definition tablespace.c:113
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
#define MaxBlockNumber
Definition block.h:35
bool track_io_timing
Definition bufmgr.c:192
bool zero_damaged_pages
Definition bufmgr.c:189
#define Min(x, y)
Definition c.h:1093
#define TYPEALIGN(ALIGNVAL, LEN)
Definition c.h:891
uint8_t uint8
Definition c.h:616
#define Assert(condition)
Definition c.h:945
#define PG_BINARY
Definition c.h:1376
uint64_t uint64
Definition c.h:619
uint32_t uint32
Definition c.h:618
#define lengthof(array)
Definition c.h:875
#define StaticAssertDecl(condition, errmessage)
Definition c.h:1010
int errcode_for_file_access(void)
Definition elog.c:897
int errcode(int sqlerrcode)
Definition elog.c:874
int errhint(const char *fmt,...) pg_attribute_printf(1
#define LOG_SERVER_ONLY
Definition elog.h:32
#define FATAL
Definition elog.h:41
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
int pg_truncate(const char *path, pgoff_t length)
Definition fd.c:721
int FileGetRawDesc(File file)
Definition fd.c:2516
void FileWriteback(File file, pgoff_t offset, pgoff_t nbytes, uint32 wait_event_info)
Definition fd.c:2123
int io_direct_flags
Definition fd.c:172
int file_extend_method
Definition fd.c:169
char * FilePathName(File file)
Definition fd.c:2500
int FileSync(File file, uint32 wait_event_info)
Definition fd.c:2336
int FileStartReadV(PgAioHandle *ioh, File file, int iovcnt, pgoff_t offset, uint32 wait_event_info)
Definition fd.c:2205
ssize_t FileReadV(File file, const struct iovec *iov, int iovcnt, pgoff_t offset, uint32 wait_event_info)
Definition fd.c:2149
int FileFallocate(File file, pgoff_t offset, pgoff_t amount, uint32 wait_event_info)
Definition fd.c:2408
pgoff_t FileSize(File file)
Definition fd.c:2448
void FileClose(File file)
Definition fd.c:1966
int data_sync_elevel(int elevel)
Definition fd.c:3986
File PathNameOpenFile(const char *fileName, int fileFlags)
Definition fd.c:1563
int FileTruncate(File file, pgoff_t offset, uint32 wait_event_info)
Definition fd.c:2465
int FileZero(File file, pgoff_t offset, pgoff_t amount, uint32 wait_event_info)
Definition fd.c:2363
int FilePrefetch(File file, pgoff_t offset, pgoff_t amount, uint32 wait_event_info)
Definition fd.c:2067
ssize_t FileWriteV(File file, const struct iovec *iov, int iovcnt, pgoff_t offset, uint32 wait_event_info)
Definition fd.c:2231
#define IO_DIRECT_DATA
Definition fd.h:54
static ssize_t FileWrite(File file, const void *buffer, size_t amount, pgoff_t offset, uint32 wait_event_info)
Definition fd.h:237
@ FILE_EXTEND_METHOD_WRITE_ZEROS
Definition fd.h:63
#define FILE_POSSIBLY_DELETED(err)
Definition fd.h:89
int File
Definition fd.h:51
#define PG_O_DIRECT
Definition fd.h:123
#define MCXT_ALLOC_ZERO
Definition fe_memutils.h:30
#define palloc_array(type, count)
Definition fe_memutils.h:76
int compute_remaining_iovec(struct iovec *destination, const struct iovec *source, int iovcnt, size_t transferred)
Definition file_utils.c:614
bool IsBinaryUpgrade
Definition globals.c:121
ProcNumber MyProcNumber
Definition globals.c:90
const char * str
int i
Definition isn.c:77
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc_aligned(Size size, Size alignto, int flags)
Definition mcxt.c:1606
void mdunlink(RelFileLocatorBackend rlocator, ForkNumber forknum, bool isRedo)
Definition md.c:338
static void md_readv_report(PgAioResult result, const PgAioTargetData *td, int elevel)
Definition md.c:2057
static void register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum, BlockNumber segno)
Definition md.c:1580
#define EXTENSION_CREATE_RECOVERY
Definition md.c:120
void mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber curnblk, BlockNumber nblocks)
Definition md.c:1302
static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
Definition md.c:1884
static void mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forknum, bool isRedo)
Definition md.c:375
void mdwritev(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void **buffers, BlockNumber nblocks, bool skipFsync)
Definition md.c:1071
bool mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
Definition md.c:1975
bool mdexists(SMgrRelation reln, ForkNumber forknum)
Definition md.c:204
void mdreadv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, void **buffers, BlockNumber nblocks)
Definition md.c:859
static MdPathStr _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
Definition md.c:1692
static void register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum, BlockNumber segno)
Definition md.c:1563
#define EXTENSION_DONT_OPEN
Definition md.c:122
BlockNumber mdnblocks(SMgrRelation reln, ForkNumber forknum)
Definition md.c:1235
int mdunlinkfiletag(const FileTag *ftag, char *path)
Definition md.c:1957
static MemoryContext MdCxt
Definition md.c:98
void mdcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition md.c:223
int mdfd(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, uint32 *off)
Definition md.c:1495
void mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition md.c:488
static PgAioResult md_readv_complete(PgAioHandle *ioh, PgAioResult prior_result, uint8 cb_data)
Definition md.c:1990
static int do_truncate(const char *path)
Definition md.c:354
void mdinit(void)
Definition md.c:191
void mdclose(SMgrRelation reln, ForkNumber forknum)
Definition md.c:725
void mdzeroextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks, bool skipFsync)
Definition md.c:553
static MdfdVec * _mdfd_openseg(SMgrRelation reln, ForkNumber forknum, BlockNumber segno, int oflags)
Definition md.c:1712
static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
Definition md.c:1519
int mdsyncfiletag(const FileTag *ftag, char *path)
Definition md.c:1905
void mdwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber nblocks)
Definition md.c:1176
uint32 mdmaxcombine(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition md.c:845
static MdfdVec * _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno, bool skipFsync, int behavior)
Definition md.c:1755
#define EXTENSION_RETURN_NULL
Definition md.c:116
void mdstartreadv(PgAioHandle *ioh, SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, void **buffers, BlockNumber nblocks)
Definition md.c:997
bool mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, int nblocks)
Definition md.c:748
void mdregistersync(SMgrRelation reln, ForkNumber forknum)
Definition md.c:1391
void mdopen(SMgrRelation reln)
Definition md.c:714
#define EXTENSION_CREATE
Definition md.c:118
const PgAioHandleCallbacks aio_md_readv_cb
Definition md.c:170
static int _mdfd_open_flags(void)
Definition md.c:177
#define INIT_MD_FILETAG(a, xx_rlocator, xx_forknum, xx_segno)
Definition md.c:102
#define EXTENSION_FAIL
Definition md.c:114
static MdfdVec * mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
Definition md.c:676
void DropRelationFiles(RelFileLocator *delrels, int ndelrels, bool isRedo)
Definition md.c:1612
static int buffers_to_iovec(struct iovec *iov, void **buffers, int nblocks)
Definition md.c:796
#define MD_PATH_STR_MAXLEN
Definition md.c:133
static void _fdvec_resize(SMgrRelation reln, ForkNumber forknum, int nseg)
Definition md.c:1644
void ForgetDatabaseSyncRequests(Oid dbid)
Definition md.c:1594
void mdimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition md.c:1442
struct _MdfdVec MdfdVec
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
static char * errmsg
#define ERRCODE_DATA_CORRUPTED
#define MAXPGPATH
#define PG_IO_ALIGN_SIZE
const void size_t len
#define PG_IOV_MAX
Definition pg_iovec.h:47
@ IOOBJECT_RELATION
Definition pgstat.h:280
@ IOCONTEXT_NORMAL
Definition pgstat.h:292
@ IOOP_FSYNC
Definition pgstat.h:311
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition pgstat_io.c:91
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt, uint64 bytes)
Definition pgstat_io.c:122
#define sprintf
Definition port.h:262
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition strlcpy.c:45
off_t pgoff_t
Definition port.h:421
unsigned int Oid
static int fd(const char *x, int i)
static int fb(int x)
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
#define RelFileLocatorBackendIsTemp(rlocator)
ForkNumber
Definition relpath.h:56
@ MAIN_FORKNUM
Definition relpath.h:58
@ InvalidForkNumber
Definition relpath.h:57
#define MAX_FORKNUM
Definition relpath.h:70
#define relpath(rlocator, forknum)
Definition relpath.h:150
#define relpathbackend(rlocator, backend, forknum)
Definition relpath.h:141
#define relpathperm(rlocator, forknum)
Definition relpath.h:146
SMgrRelation smgropen(RelFileLocator rlocator, ProcNumber backend)
Definition smgr.c:240
void smgrclose(SMgrRelation reln)
Definition smgr.c:374
void smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
Definition smgr.c:538
void pgaio_io_set_target_smgr(PgAioHandle *ioh, SMgrRelationData *smgr, ForkNumber forknum, BlockNumber blocknum, int nblocks, bool skip_fsync)
Definition smgr.c:1038
#define SmgrIsTemp(smgr)
Definition smgr.h:74
Definition sync.h:51
RelFileLocator rlocator
Definition sync.h:54
int16 forknum
Definition sync.h:53
uint64 segno
Definition sync.h:55
char str[MD_PATH_STR_MAXLEN+1]
Definition md.c:141
PgAioHandleCallbackComplete complete_shared
Definition aio.h:239
uint32 status
Definition aio_types.h:108
uint32 error_data
Definition aio_types.h:111
int32 result
Definition aio_types.h:113
uint32 id
Definition aio_types.h:105
RelFileLocator locator
RelFileNumber relNumber
char str[REL_PATH_STR_MAXLEN+1]
Definition relpath.h:123
Definition md.c:93
File mdfd_vfd
Definition md.c:94
BlockNumber mdfd_segno
Definition md.c:95
bool RegisterSyncRequest(const FileTag *ftag, SyncRequestType type, bool retryOnError)
Definition sync.c:581
@ SYNC_FILTER_REQUEST
Definition sync.h:28
@ SYNC_FORGET_REQUEST
Definition sync.h:27
@ SYNC_UNLINK_REQUEST
Definition sync.h:26
@ SYNC_REQUEST
Definition sync.h:25
BlockNumber blockNum
Definition aio_types.h:66
RelFileLocator rlocator
Definition aio_types.h:65
struct PgAioTargetData::@128 smgr
BlockNumber nblocks
Definition aio_types.h:67
ForkNumber forkNum
Definition aio_types.h:68
bool InRecovery
Definition xlogutils.c:50
void XLogDropRelation(RelFileLocator rlocator, ForkNumber forknum)
Definition xlogutils.c:630