PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
md.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * md.c
4  * This code manages relations that reside on magnetic disk.
5  *
6  * Or at least, that was what the Berkeley folk had in mind when they named
7  * this file. In reality, what this code provides is an interface from
8  * the smgr API to Unix-like filesystem APIs, so it will work with any type
9  * of device for which the operating system provides filesystem support.
10  * It doesn't matter whether the bits are on spinning rust or some other
11  * storage technology.
12  *
13  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  * src/backend/storage/smgr/md.c
19  *
20  *-------------------------------------------------------------------------
21  */
22 #include "postgres.h"
23 
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <sys/file.h>
27 
28 #include "miscadmin.h"
29 #include "access/xlog.h"
30 #include "catalog/catalog.h"
31 #include "portability/instr_time.h"
32 #include "postmaster/bgwriter.h"
33 #include "storage/fd.h"
34 #include "storage/bufmgr.h"
35 #include "storage/relfilenode.h"
36 #include "storage/smgr.h"
37 #include "utils/hsearch.h"
38 #include "utils/memutils.h"
39 #include "pg_trace.h"
40 
41 
42 /* intervals for calling AbsorbFsyncRequests in mdsync and mdpostckpt */
43 #define FSYNCS_PER_ABSORB 10
44 #define UNLINKS_PER_ABSORB 10
45 
46 /*
47  * Special values for the segno arg to RememberFsyncRequest.
48  *
49  * Note that CompactCheckpointerRequestQueue assumes that it's OK to remove an
50  * fsync request from the queue if an identical, subsequent request is found.
51  * See comments there before making changes here.
52  */
53 #define FORGET_RELATION_FSYNC (InvalidBlockNumber)
54 #define FORGET_DATABASE_FSYNC (InvalidBlockNumber-1)
55 #define UNLINK_RELATION_REQUEST (InvalidBlockNumber-2)
56 
57 /*
58  * On Windows, we have to interpret EACCES as possibly meaning the same as
59  * ENOENT, because if a file is unlinked-but-not-yet-gone on that platform,
60  * that's what you get. Ugh. This code is designed so that we don't
61  * actually believe these cases are okay without further evidence (namely,
62  * a pending fsync request getting canceled ... see mdsync).
63  */
64 #ifndef WIN32
65 #define FILE_POSSIBLY_DELETED(err) ((err) == ENOENT)
66 #else
67 #define FILE_POSSIBLY_DELETED(err) ((err) == ENOENT || (err) == EACCES)
68 #endif
69 
70 /*
71  * The magnetic disk storage manager keeps track of open file
72  * descriptors in its own descriptor pool. This is done to make it
73  * easier to support relations that are larger than the operating
74  * system's file size limit (often 2GBytes). In order to do that,
75  * we break relations up into "segment" files that are each shorter than
76  * the OS file size limit. The segment size is set by the RELSEG_SIZE
77  * configuration constant in pg_config.h.
78  *
79  * On disk, a relation must consist of consecutively numbered segment
80  * files in the pattern
81  * -- Zero or more full segments of exactly RELSEG_SIZE blocks each
82  * -- Exactly one partial segment of size 0 <= size < RELSEG_SIZE blocks
83  * -- Optionally, any number of inactive segments of size 0 blocks.
84  * The full and partial segments are collectively the "active" segments.
85  * Inactive segments are those that once contained data but are currently
86  * not needed because of an mdtruncate() operation. The reason for leaving
87  * them present at size zero, rather than unlinking them, is that other
88  * backends and/or the checkpointer might be holding open file references to
89  * such segments. If the relation expands again after mdtruncate(), such
90  * that a deactivated segment becomes active again, it is important that
91  * such file references still be valid --- else data might get written
92  * out to an unlinked old copy of a segment file that will eventually
93  * disappear.
94  *
95  * File descriptors are stored in the per-fork md_seg_fds arrays inside
96  * SMgrRelation. The length of these arrays is stored in md_num_open_segs.
97  * Note that a fork's md_num_open_segs having a specific value does not
98  * necessarily mean the relation doesn't have additional segments; we may
99  * just not have opened the next segment yet. (We could not have "all
100  * segments are in the array" as an invariant anyway, since another backend
101  * could extend the relation while we aren't looking.) We do not have
102  * entries for inactive segments, however; as soon as we find a partial
103  * segment, we assume that any subsequent segments are inactive.
104  *
105  * The entire MdfdVec array is palloc'd in the MdCxt memory context.
106  */
107 
108 typedef struct _MdfdVec
109 {
110  File mdfd_vfd; /* fd number in fd.c's pool */
111  BlockNumber mdfd_segno; /* segment number, from 0 */
112 } MdfdVec;
113 
114 static MemoryContext MdCxt; /* context for all MdfdVec objects */
115 
116 
117 /*
118  * In some contexts (currently, standalone backends and the checkpointer)
119  * we keep track of pending fsync operations: we need to remember all relation
120  * segments that have been written since the last checkpoint, so that we can
121  * fsync them down to disk before completing the next checkpoint. This hash
122  * table remembers the pending operations. We use a hash table mostly as
123  * a convenient way of merging duplicate requests.
124  *
125  * We use a similar mechanism to remember no-longer-needed files that can
126  * be deleted after the next checkpoint, but we use a linked list instead of
127  * a hash table, because we don't expect there to be any duplicate requests.
128  *
129  * These mechanisms are only used for non-temp relations; we never fsync
130  * temp rels, nor do we need to postpone their deletion (see comments in
131  * mdunlink).
132  *
133  * (Regular backends do not track pending operations locally, but forward
134  * them to the checkpointer.)
135  */
136 typedef uint16 CycleCtr; /* can be any convenient integer size */
137 
138 typedef struct
139 {
140  RelFileNode rnode; /* hash table key (must be first!) */
141  CycleCtr cycle_ctr; /* mdsync_cycle_ctr of oldest request */
142  /* requests[f] has bit n set if we need to fsync segment n of fork f */
143  Bitmapset *requests[MAX_FORKNUM + 1];
144  /* canceled[f] is true if we canceled fsyncs for fork "recently" */
145  bool canceled[MAX_FORKNUM + 1];
147 
148 typedef struct
149 {
150  RelFileNode rnode; /* the dead relation to delete */
151  CycleCtr cycle_ctr; /* mdckpt_cycle_ctr when request was made */
153 
156 static MemoryContext pendingOpsCxt; /* context for the above */
157 
160 
161 
162 /*** behavior for mdopen & _mdfd_getseg ***/
163 /* ereport if segment not present */
164 #define EXTENSION_FAIL (1 << 0)
165 /* return NULL if segment not present */
166 #define EXTENSION_RETURN_NULL (1 << 1)
167 /* create new segments as needed */
168 #define EXTENSION_CREATE (1 << 2)
169 /* create new segments if needed during recovery */
170 #define EXTENSION_CREATE_RECOVERY (1 << 3)
171 /*
172  * Allow opening segments which are preceded by segments smaller than
173  * RELSEG_SIZE, e.g. inactive segments (see above). Note that this is breaks
174  * mdnblocks() and related functionality henceforth - which currently is ok,
175  * because this is only required in the checkpointer which never uses
176  * mdnblocks().
177  */
178 #define EXTENSION_DONT_CHECK_SIZE (1 << 4)
179 
180 
181 /* local routines */
182 static void mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum,
183  bool isRedo);
184 static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum, int behavior);
185 static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
186  MdfdVec *seg);
187 static void register_unlink(RelFileNodeBackend rnode);
188 static void _fdvec_resize(SMgrRelation reln,
189  ForkNumber forknum,
190  int nseg);
191 static char *_mdfd_segpath(SMgrRelation reln, ForkNumber forknum,
192  BlockNumber segno);
193 static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forkno,
194  BlockNumber segno, int oflags);
195 static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forkno,
196  BlockNumber blkno, bool skipFsync, int behavior);
197 static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum,
198  MdfdVec *seg);
199 
200 
201 /*
202  * mdinit() -- Initialize private state for magnetic disk storage manager.
203  */
204 void
205 mdinit(void)
206 {
208  "MdSmgr",
210 
211  /*
212  * Create pending-operations hashtable if we need it. Currently, we need
213  * it if we are standalone (not under a postmaster) or if we are a startup
214  * or checkpointer auxiliary process.
215  */
217  {
218  HASHCTL hash_ctl;
219 
220  /*
221  * XXX: The checkpointer needs to add entries to the pending ops table
222  * when absorbing fsync requests. That is done within a critical
223  * section, which isn't usually allowed, but we make an exception. It
224  * means that there's a theoretical possibility that you run out of
225  * memory while absorbing fsync requests, which leads to a PANIC.
226  * Fortunately the hash table is small so that's unlikely to happen in
227  * practice.
228  */
229  pendingOpsCxt = AllocSetContextCreate(MdCxt,
230  "Pending ops context",
232  MemoryContextAllowInCriticalSection(pendingOpsCxt, true);
233 
234  MemSet(&hash_ctl, 0, sizeof(hash_ctl));
235  hash_ctl.keysize = sizeof(RelFileNode);
236  hash_ctl.entrysize = sizeof(PendingOperationEntry);
237  hash_ctl.hcxt = pendingOpsCxt;
238  pendingOpsTable = hash_create("Pending Ops Table",
239  100L,
240  &hash_ctl,
242  pendingUnlinks = NIL;
243  }
244 }
245 
246 /*
247  * In archive recovery, we rely on checkpointer to do fsyncs, but we will have
248  * already created the pendingOpsTable during initialization of the startup
249  * process. Calling this function drops the local pendingOpsTable so that
250  * subsequent requests will be forwarded to checkpointer.
251  */
252 void
254 {
255  /* Perform any pending fsyncs we may have queued up, then drop table */
256  if (pendingOpsTable)
257  {
258  mdsync();
259  hash_destroy(pendingOpsTable);
260  }
261  pendingOpsTable = NULL;
262 
263  /*
264  * We should not have any pending unlink requests, since mdunlink doesn't
265  * queue unlink requests when isRedo.
266  */
267  Assert(pendingUnlinks == NIL);
268 }
269 
270 /*
271  * mdexists() -- Does the physical file exist?
272  *
273  * Note: this will return true for lingering files, with pending deletions
274  */
275 bool
277 {
278  /*
279  * Close it first, to ensure that we notice if the fork has been unlinked
280  * since we opened it.
281  */
282  mdclose(reln, forkNum);
283 
284  return (mdopen(reln, forkNum, EXTENSION_RETURN_NULL) != NULL);
285 }
286 
287 /*
288  * mdcreate() -- Create a new relation on magnetic disk.
289  *
290  * If isRedo is true, it's okay for the relation to exist already.
291  */
292 void
293 mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
294 {
295  MdfdVec *mdfd;
296  char *path;
297  File fd;
298 
299  if (isRedo && reln->md_num_open_segs[forkNum] > 0)
300  return; /* created and opened already... */
301 
302  Assert(reln->md_num_open_segs[forkNum] == 0);
303 
304  path = relpath(reln->smgr_rnode, forkNum);
305 
306  fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
307 
308  if (fd < 0)
309  {
310  int save_errno = errno;
311 
312  /*
313  * During bootstrap, there are cases where a system relation will be
314  * accessed (by internal backend processes) before the bootstrap
315  * script nominally creates it. Therefore, allow the file to exist
316  * already, even if isRedo is not set. (See also mdopen)
317  */
318  if (isRedo || IsBootstrapProcessingMode())
319  fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
320  if (fd < 0)
321  {
322  /* be sure to report the error reported by create, not open */
323  errno = save_errno;
324  ereport(ERROR,
326  errmsg("could not create file \"%s\": %m", path)));
327  }
328  }
329 
330  pfree(path);
331 
332  _fdvec_resize(reln, forkNum, 1);
333  mdfd = &reln->md_seg_fds[forkNum][0];
334  mdfd->mdfd_vfd = fd;
335  mdfd->mdfd_segno = 0;
336 }
337 
338 /*
339  * mdunlink() -- Unlink a relation.
340  *
341  * Note that we're passed a RelFileNodeBackend --- by the time this is called,
342  * there won't be an SMgrRelation hashtable entry anymore.
343  *
344  * forkNum can be a fork number to delete a specific fork, or InvalidForkNumber
345  * to delete all forks.
346  *
347  * For regular relations, we don't unlink the first segment file of the rel,
348  * but just truncate it to zero length, and record a request to unlink it after
349  * the next checkpoint. Additional segments can be unlinked immediately,
350  * however. Leaving the empty file in place prevents that relfilenode
351  * number from being reused. The scenario this protects us from is:
352  * 1. We delete a relation (and commit, and actually remove its file).
353  * 2. We create a new relation, which by chance gets the same relfilenode as
354  * the just-deleted one (OIDs must've wrapped around for that to happen).
355  * 3. We crash before another checkpoint occurs.
356  * During replay, we would delete the file and then recreate it, which is fine
357  * if the contents of the file were repopulated by subsequent WAL entries.
358  * But if we didn't WAL-log insertions, but instead relied on fsyncing the
359  * file after populating it (as for instance CLUSTER and CREATE INDEX do),
360  * the contents of the file would be lost forever. By leaving the empty file
361  * until after the next checkpoint, we prevent reassignment of the relfilenode
362  * number until it's safe, because relfilenode assignment skips over any
363  * existing file.
364  *
365  * We do not need to go through this dance for temp relations, though, because
366  * we never make WAL entries for temp rels, and so a temp rel poses no threat
367  * to the health of a regular rel that has taken over its relfilenode number.
368  * The fact that temp rels and regular rels have different file naming
369  * patterns provides additional safety.
370  *
371  * All the above applies only to the relation's main fork; other forks can
372  * just be removed immediately, since they are not needed to prevent the
373  * relfilenode number from being recycled. Also, we do not carefully
374  * track whether other forks have been created or not, but just attempt to
375  * unlink them unconditionally; so we should never complain about ENOENT.
376  *
377  * If isRedo is true, it's unsurprising for the relation to be already gone.
378  * Also, we should remove the file immediately instead of queuing a request
379  * for later, since during redo there's no possibility of creating a
380  * conflicting relation.
381  *
382  * Note: any failure should be reported as WARNING not ERROR, because
383  * we are usually not in a transaction anymore when this is called.
384  */
385 void
386 mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
387 {
388  /*
389  * We have to clean out any pending fsync requests for the doomed
390  * relation, else the next mdsync() will fail. There can't be any such
391  * requests for a temp relation, though. We can send just one request
392  * even when deleting multiple forks, since the fsync queuing code accepts
393  * the "InvalidForkNumber = all forks" convention.
394  */
395  if (!RelFileNodeBackendIsTemp(rnode))
396  ForgetRelationFsyncRequests(rnode.node, forkNum);
397 
398  /* Now do the per-fork work */
399  if (forkNum == InvalidForkNumber)
400  {
401  for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
402  mdunlinkfork(rnode, forkNum, isRedo);
403  }
404  else
405  mdunlinkfork(rnode, forkNum, isRedo);
406 }
407 
408 static void
409 mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
410 {
411  char *path;
412  int ret;
413 
414  path = relpath(rnode, forkNum);
415 
416  /*
417  * Delete or truncate the first segment.
418  */
419  if (isRedo || forkNum != MAIN_FORKNUM || RelFileNodeBackendIsTemp(rnode))
420  {
421  ret = unlink(path);
422  if (ret < 0 && errno != ENOENT)
425  errmsg("could not remove file \"%s\": %m", path)));
426  }
427  else
428  {
429  /* truncate(2) would be easier here, but Windows hasn't got it */
430  int fd;
431 
432  fd = OpenTransientFile(path, O_RDWR | PG_BINARY, 0);
433  if (fd >= 0)
434  {
435  int save_errno;
436 
437  ret = ftruncate(fd, 0);
438  save_errno = errno;
439  CloseTransientFile(fd);
440  errno = save_errno;
441  }
442  else
443  ret = -1;
444  if (ret < 0 && errno != ENOENT)
447  errmsg("could not truncate file \"%s\": %m", path)));
448 
449  /* Register request to unlink first segment later */
450  register_unlink(rnode);
451  }
452 
453  /*
454  * Delete any additional segments.
455  */
456  if (ret >= 0)
457  {
458  char *segpath = (char *) palloc(strlen(path) + 12);
459  BlockNumber segno;
460 
461  /*
462  * Note that because we loop until getting ENOENT, we will correctly
463  * remove all inactive segments as well as active ones.
464  */
465  for (segno = 1;; segno++)
466  {
467  sprintf(segpath, "%s.%u", path, segno);
468  if (unlink(segpath) < 0)
469  {
470  /* ENOENT is expected after the last segment... */
471  if (errno != ENOENT)
474  errmsg("could not remove file \"%s\": %m", segpath)));
475  break;
476  }
477  }
478  pfree(segpath);
479  }
480 
481  pfree(path);
482 }
483 
484 /*
485  * mdextend() -- Add a block to the specified relation.
486  *
487  * The semantics are nearly the same as mdwrite(): write at the
488  * specified position. However, this is to be used for the case of
489  * extending a relation (i.e., blocknum is at or beyond the current
490  * EOF). Note that we assume writing a block beyond current EOF
491  * causes intervening file space to become filled with zeroes.
492  */
493 void
494 mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
495  char *buffer, bool skipFsync)
496 {
497  off_t seekpos;
498  int nbytes;
499  MdfdVec *v;
500 
501  /* This assert is too expensive to have on normally ... */
502 #ifdef CHECK_WRITE_VS_EXTEND
503  Assert(blocknum >= mdnblocks(reln, forknum));
504 #endif
505 
506  /*
507  * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
508  * more --- we mustn't create a block whose number actually is
509  * InvalidBlockNumber.
510  */
511  if (blocknum == InvalidBlockNumber)
512  ereport(ERROR,
513  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
514  errmsg("cannot extend file \"%s\" beyond %u blocks",
515  relpath(reln->smgr_rnode, forknum),
517 
518  v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
519 
520  seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
521 
522  Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
523 
524  /*
525  * Note: because caller usually obtained blocknum by calling mdnblocks,
526  * which did a seek(SEEK_END), this seek is often redundant and will be
527  * optimized away by fd.c. It's not redundant, however, if there is a
528  * partial page at the end of the file. In that case we want to try to
529  * overwrite the partial page with a full page. It's also not redundant
530  * if bufmgr.c had to dump another buffer of the same file to make room
531  * for the new page's buffer.
532  */
533  if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
534  ereport(ERROR,
536  errmsg("could not seek to block %u in file \"%s\": %m",
537  blocknum, FilePathName(v->mdfd_vfd))));
538 
539  if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ)) != BLCKSZ)
540  {
541  if (nbytes < 0)
542  ereport(ERROR,
544  errmsg("could not extend file \"%s\": %m",
545  FilePathName(v->mdfd_vfd)),
546  errhint("Check free disk space.")));
547  /* short write: complain appropriately */
548  ereport(ERROR,
549  (errcode(ERRCODE_DISK_FULL),
550  errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u",
552  nbytes, BLCKSZ, blocknum),
553  errhint("Check free disk space.")));
554  }
555 
556  if (!skipFsync && !SmgrIsTemp(reln))
557  register_dirty_segment(reln, forknum, v);
558 
559  Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
560 }
561 
562 /*
563  * mdopen() -- Open the specified relation.
564  *
565  * Note we only open the first segment, when there are multiple segments.
566  *
567  * If first segment is not present, either ereport or return NULL according
568  * to "behavior". We treat EXTENSION_CREATE the same as EXTENSION_FAIL;
569  * EXTENSION_CREATE means it's OK to extend an existing relation, not to
570  * invent one out of whole cloth.
571  */
572 static MdfdVec *
573 mdopen(SMgrRelation reln, ForkNumber forknum, int behavior)
574 {
575  MdfdVec *mdfd;
576  char *path;
577  File fd;
578 
579  /* No work if already open */
580  if (reln->md_num_open_segs[forknum] > 0)
581  return &reln->md_seg_fds[forknum][0];
582 
583  path = relpath(reln->smgr_rnode, forknum);
584 
585  fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
586 
587  if (fd < 0)
588  {
589  /*
590  * During bootstrap, there are cases where a system relation will be
591  * accessed (by internal backend processes) before the bootstrap
592  * script nominally creates it. Therefore, accept mdopen() as a
593  * substitute for mdcreate() in bootstrap mode only. (See mdcreate)
594  */
596  fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
597  if (fd < 0)
598  {
599  if ((behavior & EXTENSION_RETURN_NULL) &&
600  FILE_POSSIBLY_DELETED(errno))
601  {
602  pfree(path);
603  return NULL;
604  }
605  ereport(ERROR,
607  errmsg("could not open file \"%s\": %m", path)));
608  }
609  }
610 
611  pfree(path);
612 
613  _fdvec_resize(reln, forknum, 1);
614  mdfd = &reln->md_seg_fds[forknum][0];
615  mdfd->mdfd_vfd = fd;
616  mdfd->mdfd_segno = 0;
617 
618  Assert(_mdnblocks(reln, forknum, mdfd) <= ((BlockNumber) RELSEG_SIZE));
619 
620  return mdfd;
621 }
622 
623 /*
624  * mdclose() -- Close the specified relation, if it isn't closed already.
625  */
626 void
628 {
629  int nopensegs = reln->md_num_open_segs[forknum];
630 
631  /* No work if already closed */
632  if (nopensegs == 0)
633  return;
634 
635  /* close segments starting from the end */
636  while (nopensegs > 0)
637  {
638  MdfdVec *v = &reln->md_seg_fds[forknum][nopensegs - 1];
639 
640  /* if not closed already */
641  if (v->mdfd_vfd >= 0)
642  {
643  FileClose(v->mdfd_vfd);
644  v->mdfd_vfd = -1;
645  }
646 
647  nopensegs--;
648  }
649 
650  /* resize just once, avoids pointless reallocations */
651  _fdvec_resize(reln, forknum, 0);
652 }
653 
654 /*
655  * mdprefetch() -- Initiate asynchronous read of the specified block of a relation
656  */
657 void
659 {
660 #ifdef USE_PREFETCH
661  off_t seekpos;
662  MdfdVec *v;
663 
664  v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL);
665 
666  seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
667 
668  Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
669 
670  (void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ);
671 #endif /* USE_PREFETCH */
672 }
673 
674 /*
675  * mdwriteback() -- Tell the kernel to write pages back to storage.
676  *
677  * This accepts a range of blocks because flushing several pages at once is
678  * considerably more efficient than doing so individually.
679  */
680 void
682  BlockNumber blocknum, BlockNumber nblocks)
683 {
684  /*
685  * Issue flush requests in as few requests as possible; have to split at
686  * segment boundaries though, since those are actually separate files.
687  */
688  while (nblocks > 0)
689  {
690  BlockNumber nflush = nblocks;
691  off_t seekpos;
692  MdfdVec *v;
693  int segnum_start,
694  segnum_end;
695 
696  v = _mdfd_getseg(reln, forknum, blocknum, true /* not used */ ,
698 
699  /*
700  * We might be flushing buffers of already removed relations, that's
701  * ok, just ignore that case.
702  */
703  if (!v)
704  return;
705 
706  /* compute offset inside the current segment */
707  segnum_start = blocknum / RELSEG_SIZE;
708 
709  /* compute number of desired writes within the current segment */
710  segnum_end = (blocknum + nblocks - 1) / RELSEG_SIZE;
711  if (segnum_start != segnum_end)
712  nflush = RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE));
713 
714  Assert(nflush >= 1);
715  Assert(nflush <= nblocks);
716 
717  seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
718 
719  FileWriteback(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * nflush);
720 
721  nblocks -= nflush;
722  blocknum += nflush;
723  }
724 }
725 
726 /*
727  * mdread() -- Read the specified block from a relation.
728  */
729 void
730 mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
731  char *buffer)
732 {
733  off_t seekpos;
734  int nbytes;
735  MdfdVec *v;
736 
737  TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
738  reln->smgr_rnode.node.spcNode,
739  reln->smgr_rnode.node.dbNode,
740  reln->smgr_rnode.node.relNode,
741  reln->smgr_rnode.backend);
742 
743  v = _mdfd_getseg(reln, forknum, blocknum, false,
745 
746  seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
747 
748  Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
749 
750  if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
751  ereport(ERROR,
753  errmsg("could not seek to block %u in file \"%s\": %m",
754  blocknum, FilePathName(v->mdfd_vfd))));
755 
756  nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ);
757 
758  TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
759  reln->smgr_rnode.node.spcNode,
760  reln->smgr_rnode.node.dbNode,
761  reln->smgr_rnode.node.relNode,
762  reln->smgr_rnode.backend,
763  nbytes,
764  BLCKSZ);
765 
766  if (nbytes != BLCKSZ)
767  {
768  if (nbytes < 0)
769  ereport(ERROR,
771  errmsg("could not read block %u in file \"%s\": %m",
772  blocknum, FilePathName(v->mdfd_vfd))));
773 
774  /*
775  * Short read: we are at or past EOF, or we read a partial block at
776  * EOF. Normally this is an error; upper levels should never try to
777  * read a nonexistent block. However, if zero_damaged_pages is ON or
778  * we are InRecovery, we should instead return zeroes without
779  * complaining. This allows, for example, the case of trying to
780  * update a block that was later truncated away.
781  */
783  MemSet(buffer, 0, BLCKSZ);
784  else
785  ereport(ERROR,
786  (errcode(ERRCODE_DATA_CORRUPTED),
787  errmsg("could not read block %u in file \"%s\": read only %d of %d bytes",
788  blocknum, FilePathName(v->mdfd_vfd),
789  nbytes, BLCKSZ)));
790  }
791 }
792 
793 /*
794  * mdwrite() -- Write the supplied block at the appropriate location.
795  *
796  * This is to be used only for updating already-existing blocks of a
797  * relation (ie, those before the current EOF). To extend a relation,
798  * use mdextend().
799  */
800 void
801 mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
802  char *buffer, bool skipFsync)
803 {
804  off_t seekpos;
805  int nbytes;
806  MdfdVec *v;
807 
808  /* This assert is too expensive to have on normally ... */
809 #ifdef CHECK_WRITE_VS_EXTEND
810  Assert(blocknum < mdnblocks(reln, forknum));
811 #endif
812 
813  TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
814  reln->smgr_rnode.node.spcNode,
815  reln->smgr_rnode.node.dbNode,
816  reln->smgr_rnode.node.relNode,
817  reln->smgr_rnode.backend);
818 
819  v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
821 
822  seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
823 
824  Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
825 
826  if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
827  ereport(ERROR,
829  errmsg("could not seek to block %u in file \"%s\": %m",
830  blocknum, FilePathName(v->mdfd_vfd))));
831 
832  nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ);
833 
834  TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
835  reln->smgr_rnode.node.spcNode,
836  reln->smgr_rnode.node.dbNode,
837  reln->smgr_rnode.node.relNode,
838  reln->smgr_rnode.backend,
839  nbytes,
840  BLCKSZ);
841 
842  if (nbytes != BLCKSZ)
843  {
844  if (nbytes < 0)
845  ereport(ERROR,
847  errmsg("could not write block %u in file \"%s\": %m",
848  blocknum, FilePathName(v->mdfd_vfd))));
849  /* short write: complain appropriately */
850  ereport(ERROR,
851  (errcode(ERRCODE_DISK_FULL),
852  errmsg("could not write block %u in file \"%s\": wrote only %d of %d bytes",
853  blocknum,
855  nbytes, BLCKSZ),
856  errhint("Check free disk space.")));
857  }
858 
859  if (!skipFsync && !SmgrIsTemp(reln))
860  register_dirty_segment(reln, forknum, v);
861 }
862 
863 /*
864  * mdnblocks() -- Get the number of blocks stored in a relation.
865  *
866  * Important side effect: all active segments of the relation are opened
867  * and added to the mdfd_seg_fds array. If this routine has not been
868  * called, then only segments up to the last one actually touched
869  * are present in the array.
870  */
873 {
874  MdfdVec *v = mdopen(reln, forknum, EXTENSION_FAIL);
875  BlockNumber nblocks;
876  BlockNumber segno = 0;
877 
878  /* mdopen has opened the first segment */
879  Assert(reln->md_num_open_segs[forknum] > 0);
880 
881  /*
882  * Start from the last open segments, to avoid redundant seeks. We have
883  * previously verified that these segments are exactly RELSEG_SIZE long,
884  * and it's useless to recheck that each time.
885  *
886  * NOTE: this assumption could only be wrong if another backend has
887  * truncated the relation. We rely on higher code levels to handle that
888  * scenario by closing and re-opening the md fd, which is handled via
889  * relcache flush. (Since the checkpointer doesn't participate in
890  * relcache flush, it could have segment entries for inactive segments;
891  * that's OK because the checkpointer never needs to compute relation
892  * size.)
893  */
894  segno = reln->md_num_open_segs[forknum] - 1;
895  v = &reln->md_seg_fds[forknum][segno];
896 
897  for (;;)
898  {
899  nblocks = _mdnblocks(reln, forknum, v);
900  if (nblocks > ((BlockNumber) RELSEG_SIZE))
901  elog(FATAL, "segment too big");
902  if (nblocks < ((BlockNumber) RELSEG_SIZE))
903  return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
904 
905  /*
906  * If segment is exactly RELSEG_SIZE, advance to next one.
907  */
908  segno++;
909 
910  /*
911  * We used to pass O_CREAT here, but that's has the disadvantage that
912  * it might create a segment which has vanished through some operating
913  * system misadventure. In such a case, creating the segment here
914  * undermines _mdfd_getseg's attempts to notice and report an error
915  * upon access to a missing segment.
916  */
917  v = _mdfd_openseg(reln, forknum, segno, 0);
918  if (v == NULL)
919  return segno * ((BlockNumber) RELSEG_SIZE);
920  }
921 }
922 
923 /*
924  * mdtruncate() -- Truncate relation to specified number of blocks.
925  */
926 void
928 {
929  BlockNumber curnblk;
930  BlockNumber priorblocks;
931  int curopensegs;
932 
933  /*
934  * NOTE: mdnblocks makes sure we have opened all active segments, so that
935  * truncation loop will get them all!
936  */
937  curnblk = mdnblocks(reln, forknum);
938  if (nblocks > curnblk)
939  {
940  /* Bogus request ... but no complaint if InRecovery */
941  if (InRecovery)
942  return;
943  ereport(ERROR,
944  (errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
945  relpath(reln->smgr_rnode, forknum),
946  nblocks, curnblk)));
947  }
948  if (nblocks == curnblk)
949  return; /* no work */
950 
951  /*
952  * Truncate segments, starting at the last one. Starting at the end makes
953  * managing the memory for the fd array easier, should there be errors.
954  */
955  curopensegs = reln->md_num_open_segs[forknum];
956  while (curopensegs > 0)
957  {
958  MdfdVec *v;
959 
960  priorblocks = (curopensegs - 1) * RELSEG_SIZE;
961 
962  v = &reln->md_seg_fds[forknum][curopensegs - 1];
963 
964  if (priorblocks > nblocks)
965  {
966  /*
967  * This segment is no longer active. We truncate the file, but do
968  * not delete it, for reasons explained in the header comments.
969  */
970  if (FileTruncate(v->mdfd_vfd, 0) < 0)
971  ereport(ERROR,
973  errmsg("could not truncate file \"%s\": %m",
974  FilePathName(v->mdfd_vfd))));
975 
976  if (!SmgrIsTemp(reln))
977  register_dirty_segment(reln, forknum, v);
978 
979  /* we never drop the 1st segment */
980  Assert(v != &reln->md_seg_fds[forknum][0]);
981 
982  FileClose(v->mdfd_vfd);
983  _fdvec_resize(reln, forknum, curopensegs - 1);
984  }
985  else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
986  {
987  /*
988  * This is the last segment we want to keep. Truncate the file to
989  * the right length. NOTE: if nblocks is exactly a multiple K of
990  * RELSEG_SIZE, we will truncate the K+1st segment to 0 length but
991  * keep it. This adheres to the invariant given in the header
992  * comments.
993  */
994  BlockNumber lastsegblocks = nblocks - priorblocks;
995 
996  if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ) < 0)
997  ereport(ERROR,
999  errmsg("could not truncate file \"%s\" to %u blocks: %m",
1000  FilePathName(v->mdfd_vfd),
1001  nblocks)));
1002  if (!SmgrIsTemp(reln))
1003  register_dirty_segment(reln, forknum, v);
1004  }
1005  else
1006  {
1007  /*
1008  * We still need this segment, so nothing to do for this and any
1009  * earlier segment.
1010  */
1011  break;
1012  }
1013  curopensegs--;
1014  }
1015 }
1016 
1017 /*
1018  * mdimmedsync() -- Immediately sync a relation to stable storage.
1019  *
1020  * Note that only writes already issued are synced; this routine knows
1021  * nothing of dirty buffers that may exist inside the buffer manager.
1022  */
1023 void
1025 {
1026  int segno;
1027 
1028  /*
1029  * NOTE: mdnblocks makes sure we have opened all active segments, so that
1030  * fsync loop will get them all!
1031  */
1032  mdnblocks(reln, forknum);
1033 
1034  segno = reln->md_num_open_segs[forknum];
1035 
1036  while (segno > 0)
1037  {
1038  MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
1039 
1040  if (FileSync(v->mdfd_vfd) < 0)
1041  ereport(ERROR,
1043  errmsg("could not fsync file \"%s\": %m",
1044  FilePathName(v->mdfd_vfd))));
1045  segno--;
1046  }
1047 }
1048 
1049 /*
1050  * mdsync() -- Sync previous writes to stable storage.
1051  */
1052 void
1053 mdsync(void)
1054 {
1055  static bool mdsync_in_progress = false;
1056 
1057  HASH_SEQ_STATUS hstat;
1058  PendingOperationEntry *entry;
1059  int absorb_counter;
1060 
1061  /* Statistics on sync times */
1062  int processed = 0;
1063  instr_time sync_start,
1064  sync_end,
1065  sync_diff;
1066  uint64 elapsed;
1067  uint64 longest = 0;
1068  uint64 total_elapsed = 0;
1069 
1070  /*
1071  * This is only called during checkpoints, and checkpoints should only
1072  * occur in processes that have created a pendingOpsTable.
1073  */
1074  if (!pendingOpsTable)
1075  elog(ERROR, "cannot sync without a pendingOpsTable");
1076 
1077  /*
1078  * If we are in the checkpointer, the sync had better include all fsync
1079  * requests that were queued by backends up to this point. The tightest
1080  * race condition that could occur is that a buffer that must be written
1081  * and fsync'd for the checkpoint could have been dumped by a backend just
1082  * before it was visited by BufferSync(). We know the backend will have
1083  * queued an fsync request before clearing the buffer's dirtybit, so we
1084  * are safe as long as we do an Absorb after completing BufferSync().
1085  */
1087 
1088  /*
1089  * To avoid excess fsync'ing (in the worst case, maybe a never-terminating
1090  * checkpoint), we want to ignore fsync requests that are entered into the
1091  * hashtable after this point --- they should be processed next time,
1092  * instead. We use mdsync_cycle_ctr to tell old entries apart from new
1093  * ones: new ones will have cycle_ctr equal to the incremented value of
1094  * mdsync_cycle_ctr.
1095  *
1096  * In normal circumstances, all entries present in the table at this point
1097  * will have cycle_ctr exactly equal to the current (about to be old)
1098  * value of mdsync_cycle_ctr. However, if we fail partway through the
1099  * fsync'ing loop, then older values of cycle_ctr might remain when we
1100  * come back here to try again. Repeated checkpoint failures would
1101  * eventually wrap the counter around to the point where an old entry
1102  * might appear new, causing us to skip it, possibly allowing a checkpoint
1103  * to succeed that should not have. To forestall wraparound, any time the
1104  * previous mdsync() failed to complete, run through the table and
1105  * forcibly set cycle_ctr = mdsync_cycle_ctr.
1106  *
1107  * Think not to merge this loop with the main loop, as the problem is
1108  * exactly that that loop may fail before having visited all the entries.
1109  * From a performance point of view it doesn't matter anyway, as this path
1110  * will never be taken in a system that's functioning normally.
1111  */
1112  if (mdsync_in_progress)
1113  {
1114  /* prior try failed, so update any stale cycle_ctr values */
1115  hash_seq_init(&hstat, pendingOpsTable);
1116  while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1117  {
1118  entry->cycle_ctr = mdsync_cycle_ctr;
1119  }
1120  }
1121 
1122  /* Advance counter so that new hashtable entries are distinguishable */
1123  mdsync_cycle_ctr++;
1124 
1125  /* Set flag to detect failure if we don't reach the end of the loop */
1126  mdsync_in_progress = true;
1127 
1128  /* Now scan the hashtable for fsync requests to process */
1129  absorb_counter = FSYNCS_PER_ABSORB;
1130  hash_seq_init(&hstat, pendingOpsTable);
1131  while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1132  {
1133  ForkNumber forknum;
1134 
1135  /*
1136  * If the entry is new then don't process it this time; it might
1137  * contain multiple fsync-request bits, but they are all new. Note
1138  * "continue" bypasses the hash-remove call at the bottom of the loop.
1139  */
1140  if (entry->cycle_ctr == mdsync_cycle_ctr)
1141  continue;
1142 
1143  /* Else assert we haven't missed it */
1144  Assert((CycleCtr) (entry->cycle_ctr + 1) == mdsync_cycle_ctr);
1145 
1146  /*
1147  * Scan over the forks and segments represented by the entry.
1148  *
1149  * The bitmap manipulations are slightly tricky, because we can call
1150  * AbsorbFsyncRequests() inside the loop and that could result in
1151  * bms_add_member() modifying and even re-palloc'ing the bitmapsets.
1152  * This is okay because we unlink each bitmapset from the hashtable
1153  * entry before scanning it. That means that any incoming fsync
1154  * requests will be processed now if they reach the table before we
1155  * begin to scan their fork.
1156  */
1157  for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
1158  {
1159  Bitmapset *requests = entry->requests[forknum];
1160  int segno;
1161 
1162  entry->requests[forknum] = NULL;
1163  entry->canceled[forknum] = false;
1164 
1165  while ((segno = bms_first_member(requests)) >= 0)
1166  {
1167  int failures;
1168 
1169  /*
1170  * If fsync is off then we don't have to bother opening the
1171  * file at all. (We delay checking until this point so that
1172  * changing fsync on the fly behaves sensibly.)
1173  */
1174  if (!enableFsync)
1175  continue;
1176 
1177  /*
1178  * If in checkpointer, we want to absorb pending requests
1179  * every so often to prevent overflow of the fsync request
1180  * queue. It is unspecified whether newly-added entries will
1181  * be visited by hash_seq_search, but we don't care since we
1182  * don't need to process them anyway.
1183  */
1184  if (--absorb_counter <= 0)
1185  {
1187  absorb_counter = FSYNCS_PER_ABSORB;
1188  }
1189 
1190  /*
1191  * The fsync table could contain requests to fsync segments
1192  * that have been deleted (unlinked) by the time we get to
1193  * them. Rather than just hoping an ENOENT (or EACCES on
1194  * Windows) error can be ignored, what we do on error is
1195  * absorb pending requests and then retry. Since mdunlink()
1196  * queues a "cancel" message before actually unlinking, the
1197  * fsync request is guaranteed to be marked canceled after the
1198  * absorb if it really was this case. DROP DATABASE likewise
1199  * has to tell us to forget fsync requests before it starts
1200  * deletions.
1201  */
1202  for (failures = 0;; failures++) /* loop exits at "break" */
1203  {
1204  SMgrRelation reln;
1205  MdfdVec *seg;
1206  char *path;
1207  int save_errno;
1208 
1209  /*
1210  * Find or create an smgr hash entry for this relation.
1211  * This may seem a bit unclean -- md calling smgr? But
1212  * it's really the best solution. It ensures that the
1213  * open file reference isn't permanently leaked if we get
1214  * an error here. (You may say "but an unreferenced
1215  * SMgrRelation is still a leak!" Not really, because the
1216  * only case in which a checkpoint is done by a process
1217  * that isn't about to shut down is in the checkpointer,
1218  * and it will periodically do smgrcloseall(). This fact
1219  * justifies our not closing the reln in the success path
1220  * either, which is a good thing since in non-checkpointer
1221  * cases we couldn't safely do that.)
1222  */
1223  reln = smgropen(entry->rnode, InvalidBackendId);
1224 
1225  /* Attempt to open and fsync the target segment */
1226  seg = _mdfd_getseg(reln, forknum,
1227  (BlockNumber) segno * (BlockNumber) RELSEG_SIZE,
1228  false,
1231 
1232  INSTR_TIME_SET_CURRENT(sync_start);
1233 
1234  if (seg != NULL &&
1235  FileSync(seg->mdfd_vfd) >= 0)
1236  {
1237  /* Success; update statistics about sync timing */
1238  INSTR_TIME_SET_CURRENT(sync_end);
1239  sync_diff = sync_end;
1240  INSTR_TIME_SUBTRACT(sync_diff, sync_start);
1241  elapsed = INSTR_TIME_GET_MICROSEC(sync_diff);
1242  if (elapsed > longest)
1243  longest = elapsed;
1244  total_elapsed += elapsed;
1245  processed++;
1246  if (log_checkpoints)
1247  elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec",
1248  processed,
1249  FilePathName(seg->mdfd_vfd),
1250  (double) elapsed / 1000);
1251 
1252  break; /* out of retry loop */
1253  }
1254 
1255  /* Compute file name for use in message */
1256  save_errno = errno;
1257  path = _mdfd_segpath(reln, forknum, (BlockNumber) segno);
1258  errno = save_errno;
1259 
1260  /*
1261  * It is possible that the relation has been dropped or
1262  * truncated since the fsync request was entered.
1263  * Therefore, allow ENOENT, but only if we didn't fail
1264  * already on this file. This applies both for
1265  * _mdfd_getseg() and for FileSync, since fd.c might have
1266  * closed the file behind our back.
1267  *
1268  * XXX is there any point in allowing more than one retry?
1269  * Don't see one at the moment, but easy to change the
1270  * test here if so.
1271  */
1272  if (!FILE_POSSIBLY_DELETED(errno) ||
1273  failures > 0)
1274  ereport(ERROR,
1276  errmsg("could not fsync file \"%s\": %m",
1277  path)));
1278  else
1279  ereport(DEBUG1,
1281  errmsg("could not fsync file \"%s\" but retrying: %m",
1282  path)));
1283  pfree(path);
1284 
1285  /*
1286  * Absorb incoming requests and check to see if a cancel
1287  * arrived for this relation fork.
1288  */
1290  absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */
1291 
1292  if (entry->canceled[forknum])
1293  break;
1294  } /* end retry loop */
1295  }
1296  bms_free(requests);
1297  }
1298 
1299  /*
1300  * We've finished everything that was requested before we started to
1301  * scan the entry. If no new requests have been inserted meanwhile,
1302  * remove the entry. Otherwise, update its cycle counter, as all the
1303  * requests now in it must have arrived during this cycle.
1304  */
1305  for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
1306  {
1307  if (entry->requests[forknum] != NULL)
1308  break;
1309  }
1310  if (forknum <= MAX_FORKNUM)
1311  entry->cycle_ctr = mdsync_cycle_ctr;
1312  else
1313  {
1314  /* Okay to remove it */
1315  if (hash_search(pendingOpsTable, &entry->rnode,
1316  HASH_REMOVE, NULL) == NULL)
1317  elog(ERROR, "pendingOpsTable corrupted");
1318  }
1319  } /* end loop over hashtable entries */
1320 
1321  /* Return sync performance metrics for report at checkpoint end */
1322  CheckpointStats.ckpt_sync_rels = processed;
1324  CheckpointStats.ckpt_agg_sync_time = total_elapsed;
1325 
1326  /* Flag successful completion of mdsync */
1327  mdsync_in_progress = false;
1328 }
1329 
1330 /*
1331  * mdpreckpt() -- Do pre-checkpoint work
1332  *
1333  * To distinguish unlink requests that arrived before this checkpoint
1334  * started from those that arrived during the checkpoint, we use a cycle
1335  * counter similar to the one we use for fsync requests. That cycle
1336  * counter is incremented here.
1337  *
1338  * This must be called *before* the checkpoint REDO point is determined.
1339  * That ensures that we won't delete files too soon.
1340  *
1341  * Note that we can't do anything here that depends on the assumption
1342  * that the checkpoint will be completed.
1343  */
1344 void
1346 {
1347  /*
1348  * Any unlink requests arriving after this point will be assigned the next
1349  * cycle counter, and won't be unlinked until next checkpoint.
1350  */
1351  mdckpt_cycle_ctr++;
1352 }
1353 
1354 /*
1355  * mdpostckpt() -- Do post-checkpoint work
1356  *
1357  * Remove any lingering files that can now be safely removed.
1358  */
1359 void
1361 {
1362  int absorb_counter;
1363 
1364  absorb_counter = UNLINKS_PER_ABSORB;
1365  while (pendingUnlinks != NIL)
1366  {
1367  PendingUnlinkEntry *entry = (PendingUnlinkEntry *) linitial(pendingUnlinks);
1368  char *path;
1369 
1370  /*
1371  * New entries are appended to the end, so if the entry is new we've
1372  * reached the end of old entries.
1373  *
1374  * Note: if just the right number of consecutive checkpoints fail, we
1375  * could be fooled here by cycle_ctr wraparound. However, the only
1376  * consequence is that we'd delay unlinking for one more checkpoint,
1377  * which is perfectly tolerable.
1378  */
1379  if (entry->cycle_ctr == mdckpt_cycle_ctr)
1380  break;
1381 
1382  /* Unlink the file */
1383  path = relpathperm(entry->rnode, MAIN_FORKNUM);
1384  if (unlink(path) < 0)
1385  {
1386  /*
1387  * There's a race condition, when the database is dropped at the
1388  * same time that we process the pending unlink requests. If the
1389  * DROP DATABASE deletes the file before we do, we will get ENOENT
1390  * here. rmtree() also has to ignore ENOENT errors, to deal with
1391  * the possibility that we delete the file first.
1392  */
1393  if (errno != ENOENT)
1394  ereport(WARNING,
1396  errmsg("could not remove file \"%s\": %m", path)));
1397  }
1398  pfree(path);
1399 
1400  /* And remove the list entry */
1401  pendingUnlinks = list_delete_first(pendingUnlinks);
1402  pfree(entry);
1403 
1404  /*
1405  * As in mdsync, we don't want to stop absorbing fsync requests for a
1406  * long time when there are many deletions to be done. We can safely
1407  * call AbsorbFsyncRequests() at this point in the loop (note it might
1408  * try to delete list entries).
1409  */
1410  if (--absorb_counter <= 0)
1411  {
1413  absorb_counter = UNLINKS_PER_ABSORB;
1414  }
1415  }
1416 }
1417 
1418 /*
1419  * register_dirty_segment() -- Mark a relation segment as needing fsync
1420  *
1421  * If there is a local pending-ops table, just make an entry in it for
1422  * mdsync to process later. Otherwise, try to pass off the fsync request
1423  * to the checkpointer process. If that fails, just do the fsync
1424  * locally before returning (we hope this will not happen often enough
1425  * to be a performance problem).
1426  */
1427 static void
1429 {
1430  /* Temp relations should never be fsync'd */
1431  Assert(!SmgrIsTemp(reln));
1432 
1433  if (pendingOpsTable)
1434  {
1435  /* push it into local pending-ops table */
1436  RememberFsyncRequest(reln->smgr_rnode.node, forknum, seg->mdfd_segno);
1437  }
1438  else
1439  {
1440  if (ForwardFsyncRequest(reln->smgr_rnode.node, forknum, seg->mdfd_segno))
1441  return; /* passed it off successfully */
1442 
1443  ereport(DEBUG1,
1444  (errmsg("could not forward fsync request because request queue is full")));
1445 
1446  if (FileSync(seg->mdfd_vfd) < 0)
1447  ereport(ERROR,
1449  errmsg("could not fsync file \"%s\": %m",
1450  FilePathName(seg->mdfd_vfd))));
1451  }
1452 }
1453 
1454 /*
1455  * register_unlink() -- Schedule a file to be deleted after next checkpoint
1456  *
1457  * We don't bother passing in the fork number, because this is only used
1458  * with main forks.
1459  *
1460  * As with register_dirty_segment, this could involve either a local or
1461  * a remote pending-ops table.
1462  */
1463 static void
1465 {
1466  /* Should never be used with temp relations */
1468 
1469  if (pendingOpsTable)
1470  {
1471  /* push it into local pending-ops table */
1474  }
1475  else
1476  {
1477  /*
1478  * Notify the checkpointer about it. If we fail to queue the request
1479  * message, we have to sleep and try again, because we can't simply
1480  * delete the file now. Ugly, but hopefully won't happen often.
1481  *
1482  * XXX should we just leave the file orphaned instead?
1483  */
1485  while (!ForwardFsyncRequest(rnode.node, MAIN_FORKNUM,
1487  pg_usleep(10000L); /* 10 msec seems a good number */
1488  }
1489 }
1490 
1491 /*
1492  * RememberFsyncRequest() -- callback from checkpointer side of fsync request
1493  *
1494  * We stuff fsync requests into the local hash table for execution
1495  * during the checkpointer's next checkpoint. UNLINK requests go into a
1496  * separate linked list, however, because they get processed separately.
1497  *
1498  * The range of possible segment numbers is way less than the range of
1499  * BlockNumber, so we can reserve high values of segno for special purposes.
1500  * We define three:
1501  * - FORGET_RELATION_FSYNC means to cancel pending fsyncs for a relation,
1502  * either for one fork, or all forks if forknum is InvalidForkNumber
1503  * - FORGET_DATABASE_FSYNC means to cancel pending fsyncs for a whole database
1504  * - UNLINK_RELATION_REQUEST is a request to delete the file after the next
1505  * checkpoint.
1506  * Note also that we're assuming real segment numbers don't exceed INT_MAX.
1507  *
1508  * (Handling FORGET_DATABASE_FSYNC requests is a tad slow because the hash
1509  * table has to be searched linearly, but dropping a database is a pretty
1510  * heavyweight operation anyhow, so we'll live with it.)
1511  */
1512 void
1514 {
1515  Assert(pendingOpsTable);
1516 
1517  if (segno == FORGET_RELATION_FSYNC)
1518  {
1519  /* Remove any pending requests for the relation (one or all forks) */
1520  PendingOperationEntry *entry;
1521 
1522  entry = (PendingOperationEntry *) hash_search(pendingOpsTable,
1523  &rnode,
1524  HASH_FIND,
1525  NULL);
1526  if (entry)
1527  {
1528  /*
1529  * We can't just delete the entry since mdsync could have an
1530  * active hashtable scan. Instead we delete the bitmapsets; this
1531  * is safe because of the way mdsync is coded. We also set the
1532  * "canceled" flags so that mdsync can tell that a cancel arrived
1533  * for the fork(s).
1534  */
1535  if (forknum == InvalidForkNumber)
1536  {
1537  /* remove requests for all forks */
1538  for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
1539  {
1540  bms_free(entry->requests[forknum]);
1541  entry->requests[forknum] = NULL;
1542  entry->canceled[forknum] = true;
1543  }
1544  }
1545  else
1546  {
1547  /* remove requests for single fork */
1548  bms_free(entry->requests[forknum]);
1549  entry->requests[forknum] = NULL;
1550  entry->canceled[forknum] = true;
1551  }
1552  }
1553  }
1554  else if (segno == FORGET_DATABASE_FSYNC)
1555  {
1556  /* Remove any pending requests for the entire database */
1557  HASH_SEQ_STATUS hstat;
1558  PendingOperationEntry *entry;
1559  ListCell *cell,
1560  *prev,
1561  *next;
1562 
1563  /* Remove fsync requests */
1564  hash_seq_init(&hstat, pendingOpsTable);
1565  while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1566  {
1567  if (entry->rnode.dbNode == rnode.dbNode)
1568  {
1569  /* remove requests for all forks */
1570  for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
1571  {
1572  bms_free(entry->requests[forknum]);
1573  entry->requests[forknum] = NULL;
1574  entry->canceled[forknum] = true;
1575  }
1576  }
1577  }
1578 
1579  /* Remove unlink requests */
1580  prev = NULL;
1581  for (cell = list_head(pendingUnlinks); cell; cell = next)
1582  {
1583  PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
1584 
1585  next = lnext(cell);
1586  if (entry->rnode.dbNode == rnode.dbNode)
1587  {
1588  pendingUnlinks = list_delete_cell(pendingUnlinks, cell, prev);
1589  pfree(entry);
1590  }
1591  else
1592  prev = cell;
1593  }
1594  }
1595  else if (segno == UNLINK_RELATION_REQUEST)
1596  {
1597  /* Unlink request: put it in the linked list */
1598  MemoryContext oldcxt = MemoryContextSwitchTo(pendingOpsCxt);
1599  PendingUnlinkEntry *entry;
1600 
1601  /* PendingUnlinkEntry doesn't store forknum, since it's always MAIN */
1602  Assert(forknum == MAIN_FORKNUM);
1603 
1604  entry = palloc(sizeof(PendingUnlinkEntry));
1605  entry->rnode = rnode;
1606  entry->cycle_ctr = mdckpt_cycle_ctr;
1607 
1608  pendingUnlinks = lappend(pendingUnlinks, entry);
1609 
1610  MemoryContextSwitchTo(oldcxt);
1611  }
1612  else
1613  {
1614  /* Normal case: enter a request to fsync this segment */
1615  MemoryContext oldcxt = MemoryContextSwitchTo(pendingOpsCxt);
1616  PendingOperationEntry *entry;
1617  bool found;
1618 
1619  entry = (PendingOperationEntry *) hash_search(pendingOpsTable,
1620  &rnode,
1621  HASH_ENTER,
1622  &found);
1623  /* if new entry, initialize it */
1624  if (!found)
1625  {
1626  entry->cycle_ctr = mdsync_cycle_ctr;
1627  MemSet(entry->requests, 0, sizeof(entry->requests));
1628  MemSet(entry->canceled, 0, sizeof(entry->canceled));
1629  }
1630 
1631  /*
1632  * NB: it's intentional that we don't change cycle_ctr if the entry
1633  * already exists. The cycle_ctr must represent the oldest fsync
1634  * request that could be in the entry.
1635  */
1636 
1637  entry->requests[forknum] = bms_add_member(entry->requests[forknum],
1638  (int) segno);
1639 
1640  MemoryContextSwitchTo(oldcxt);
1641  }
1642 }
1643 
1644 /*
1645  * ForgetRelationFsyncRequests -- forget any fsyncs for a relation fork
1646  *
1647  * forknum == InvalidForkNumber means all forks, although this code doesn't
1648  * actually know that, since it's just forwarding the request elsewhere.
1649  */
1650 void
1652 {
1653  if (pendingOpsTable)
1654  {
1655  /* standalone backend or startup process: fsync state is local */
1657  }
1658  else if (IsUnderPostmaster)
1659  {
1660  /*
1661  * Notify the checkpointer about it. If we fail to queue the cancel
1662  * message, we have to sleep and try again ... ugly, but hopefully
1663  * won't happen often.
1664  *
1665  * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an
1666  * error would leave the no-longer-used file still present on disk,
1667  * which would be bad, so I'm inclined to assume that the checkpointer
1668  * will always empty the queue soon.
1669  */
1670  while (!ForwardFsyncRequest(rnode, forknum, FORGET_RELATION_FSYNC))
1671  pg_usleep(10000L); /* 10 msec seems a good number */
1672 
1673  /*
1674  * Note we don't wait for the checkpointer to actually absorb the
1675  * cancel message; see mdsync() for the implications.
1676  */
1677  }
1678 }
1679 
1680 /*
1681  * ForgetDatabaseFsyncRequests -- forget any fsyncs and unlinks for a DB
1682  */
1683 void
1685 {
1686  RelFileNode rnode;
1687 
1688  rnode.dbNode = dbid;
1689  rnode.spcNode = 0;
1690  rnode.relNode = 0;
1691 
1692  if (pendingOpsTable)
1693  {
1694  /* standalone backend or startup process: fsync state is local */
1696  }
1697  else if (IsUnderPostmaster)
1698  {
1699  /* see notes in ForgetRelationFsyncRequests */
1700  while (!ForwardFsyncRequest(rnode, InvalidForkNumber,
1702  pg_usleep(10000L); /* 10 msec seems a good number */
1703  }
1704 }
1705 
1706 
1707 /*
1708  * _fdvec_resize() -- Resize the fork's open segments array
1709  */
1710 static void
1712  ForkNumber forknum,
1713  int nseg)
1714 {
1715  if (nseg == 0)
1716  {
1717  if (reln->md_num_open_segs[forknum] > 0)
1718  {
1719  pfree(reln->md_seg_fds[forknum]);
1720  reln->md_seg_fds[forknum] = NULL;
1721  }
1722  }
1723  else if (reln->md_num_open_segs[forknum] == 0)
1724  {
1725  reln->md_seg_fds[forknum] =
1726  MemoryContextAlloc(MdCxt, sizeof(MdfdVec) * nseg);
1727  }
1728  else
1729  {
1730  /*
1731  * It doesn't seem worthwhile complicating the code by having a more
1732  * aggressive growth strategy here; the number of segments doesn't
1733  * grow that fast, and the memory context internally will sometimes
1734  * avoid doing an actual reallocation.
1735  */
1736  reln->md_seg_fds[forknum] =
1737  repalloc(reln->md_seg_fds[forknum],
1738  sizeof(MdfdVec) * nseg);
1739  }
1740 
1741  reln->md_num_open_segs[forknum] = nseg;
1742 }
1743 
1744 /*
1745  * Return the filename for the specified segment of the relation. The
1746  * returned string is palloc'd.
1747  */
1748 static char *
1750 {
1751  char *path,
1752  *fullpath;
1753 
1754  path = relpath(reln->smgr_rnode, forknum);
1755 
1756  if (segno > 0)
1757  {
1758  fullpath = psprintf("%s.%u", path, segno);
1759  pfree(path);
1760  }
1761  else
1762  fullpath = path;
1763 
1764  return fullpath;
1765 }
1766 
1767 /*
1768  * Open the specified segment of the relation,
1769  * and make a MdfdVec object for it. Returns NULL on failure.
1770  */
1771 static MdfdVec *
1773  int oflags)
1774 {
1775  MdfdVec *v;
1776  int fd;
1777  char *fullpath;
1778 
1779  fullpath = _mdfd_segpath(reln, forknum, segno);
1780 
1781  /* open the file */
1782  fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY | oflags, 0600);
1783 
1784  pfree(fullpath);
1785 
1786  if (fd < 0)
1787  return NULL;
1788 
1789  if (segno <= reln->md_num_open_segs[forknum])
1790  _fdvec_resize(reln, forknum, segno + 1);
1791 
1792  /* fill the entry */
1793  v = &reln->md_seg_fds[forknum][segno];
1794  v->mdfd_vfd = fd;
1795  v->mdfd_segno = segno;
1796 
1797  Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
1798 
1799  /* all done */
1800  return v;
1801 }
1802 
1803 /*
1804  * _mdfd_getseg() -- Find the segment of the relation holding the
1805  * specified block.
1806  *
1807  * If the segment doesn't exist, we ereport, return NULL, or create the
1808  * segment, according to "behavior". Note: skipFsync is only used in the
1809  * EXTENSION_CREATE case.
1810  */
1811 static MdfdVec *
1813  bool skipFsync, int behavior)
1814 {
1815  MdfdVec *v;
1816  BlockNumber targetseg;
1817  BlockNumber nextsegno;
1818 
1819  /* some way to handle non-existent segments needs to be specified */
1820  Assert(behavior &
1822 
1823  targetseg = blkno / ((BlockNumber) RELSEG_SIZE);
1824 
1825  /* if an existing and opened segment, we're done */
1826  if (targetseg < reln->md_num_open_segs[forknum])
1827  {
1828  v = &reln->md_seg_fds[forknum][targetseg];
1829  return v;
1830  }
1831 
1832  /*
1833  * The target segment is not yet open. Iterate over all the segments
1834  * between the last opened and the target segment. This way missing
1835  * segments either raise an error, or get created (according to
1836  * 'behavior'). Start with either the last opened, or the first segment if
1837  * none was opened before.
1838  */
1839  if (reln->md_num_open_segs[forknum] > 0)
1840  v = &reln->md_seg_fds[forknum][reln->md_num_open_segs[forknum] - 1];
1841  else
1842  {
1843  v = mdopen(reln, forknum, behavior);
1844  if (!v)
1845  return NULL; /* if behavior & EXTENSION_RETURN_NULL */
1846  }
1847 
1848  for (nextsegno = reln->md_num_open_segs[forknum];
1849  nextsegno <= targetseg; nextsegno++)
1850  {
1851  BlockNumber nblocks = _mdnblocks(reln, forknum, v);
1852  int flags = 0;
1853 
1854  Assert(nextsegno == v->mdfd_segno + 1);
1855 
1856  if (nblocks > ((BlockNumber) RELSEG_SIZE))
1857  elog(FATAL, "segment too big");
1858 
1859  if ((behavior & EXTENSION_CREATE) ||
1860  (InRecovery && (behavior & EXTENSION_CREATE_RECOVERY)))
1861  {
1862  /*
1863  * Normally we will create new segments only if authorized by the
1864  * caller (i.e., we are doing mdextend()). But when doing WAL
1865  * recovery, create segments anyway; this allows cases such as
1866  * replaying WAL data that has a write into a high-numbered
1867  * segment of a relation that was later deleted. We want to go
1868  * ahead and create the segments so we can finish out the replay.
1869  * However if the caller has specified
1870  * EXTENSION_REALLY_RETURN_NULL, then extension is not desired
1871  * even in recovery; we won't reach this point in that case.
1872  *
1873  * We have to maintain the invariant that segments before the last
1874  * active segment are of size RELSEG_SIZE; therefore, if
1875  * extending, pad them out with zeroes if needed. (This only
1876  * matters if in recovery, or if the caller is extending the
1877  * relation discontiguously, but that can happen in hash indexes.)
1878  */
1879  if (nblocks < ((BlockNumber) RELSEG_SIZE))
1880  {
1881  char *zerobuf = palloc0(BLCKSZ);
1882 
1883  mdextend(reln, forknum,
1884  nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
1885  zerobuf, skipFsync);
1886  pfree(zerobuf);
1887  }
1888  flags = O_CREAT;
1889  }
1890  else if (!(behavior & EXTENSION_DONT_CHECK_SIZE) &&
1891  nblocks < ((BlockNumber) RELSEG_SIZE))
1892  {
1893  /*
1894  * When not extending (or explicitly including truncated
1895  * segments), only open the next segment if the current one is
1896  * exactly RELSEG_SIZE. If not (this branch), either return NULL
1897  * or fail.
1898  */
1899  if (behavior & EXTENSION_RETURN_NULL)
1900  {
1901  /*
1902  * Some callers discern between reasons for _mdfd_getseg()
1903  * returning NULL based on errno. As there's no failing
1904  * syscall involved in this case, explicitly set errno to
1905  * ENOENT, as that seems the closest interpretation.
1906  */
1907  errno = ENOENT;
1908  return NULL;
1909  }
1910 
1911  ereport(ERROR,
1913  errmsg("could not open file \"%s\" (target block %u): previous segment is only %u blocks",
1914  _mdfd_segpath(reln, forknum, nextsegno),
1915  blkno, nblocks)));
1916  }
1917 
1918  v = _mdfd_openseg(reln, forknum, nextsegno, flags);
1919 
1920  if (v == NULL)
1921  {
1922  if ((behavior & EXTENSION_RETURN_NULL) &&
1923  FILE_POSSIBLY_DELETED(errno))
1924  return NULL;
1925  ereport(ERROR,
1927  errmsg("could not open file \"%s\" (target block %u): %m",
1928  _mdfd_segpath(reln, forknum, nextsegno),
1929  blkno)));
1930  }
1931  }
1932 
1933  return v;
1934 }
1935 
1936 /*
1937  * Get number of blocks present in a single disk file
1938  */
1939 static BlockNumber
1941 {
1942  off_t len;
1943 
1944  len = FileSeek(seg->mdfd_vfd, 0L, SEEK_END);
1945  if (len < 0)
1946  ereport(ERROR,
1948  errmsg("could not seek to end of file \"%s\": %m",
1949  FilePathName(seg->mdfd_vfd))));
1950  /* note that this calculation will ignore any partial block at EOF */
1951  return (BlockNumber) (len / BLCKSZ);
1952 }
int FileWrite(File file, char *buffer, int amount)
Definition: fd.c:1666
#define UNLINK_RELATION_REQUEST
Definition: md.c:55
void RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
Definition: md.c:1513
void FileWriteback(File file, off_t offset, off_t nbytes)
Definition: fd.c:1579
#define NIL
Definition: pg_list.h:69
#define AmStartupProcess()
Definition: miscadmin.h:403
uint64 ckpt_agg_sync_time
Definition: xlog.h:213
bool log_checkpoints
Definition: xlog.c:101
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:793
#define relpathperm(rnode, forknum)
Definition: relpath.h:67
int bms_first_member(Bitmapset *a)
Definition: bitmapset.c:855
File PathNameOpenFile(FileName fileName, int fileFlags, int fileMode)
Definition: fd.c:1266
void mdimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition: md.c:1024
int ckpt_sync_rels
Definition: xlog.h:211
#define DEBUG1
Definition: elog.h:25
static MdfdVec * _mdfd_getseg(SMgrRelation reln, ForkNumber forkno, BlockNumber blkno, bool skipFsync, int behavior)
Definition: md.c:1812
int errhint(const char *fmt,...)
Definition: elog.c:987
#define EXTENSION_DONT_CHECK_SIZE
Definition: md.c:178
CycleCtr cycle_ctr
Definition: md.c:151
static int32 next
Definition: blutils.c:210
#define RelFileNodeBackendIsTemp(rnode)
Definition: relfilenode.h:78
#define HASH_CONTEXT
Definition: hsearch.h:93
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition: mcxt.c:374
#define HASH_ELEM
Definition: hsearch.h:87
MemoryContext hcxt
Definition: hsearch.h:78
static CycleCtr mdsync_cycle_ctr
Definition: md.c:158
bool ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
BlockNumber mdnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: md.c:872
#define FSYNCS_PER_ABSORB
Definition: md.c:43
void mdsync(void)
Definition: md.c:1053
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
BlockNumber mdfd_segno
Definition: md.c:111
struct timeval instr_time
Definition: instr_time.h:147
bool InRecovery
Definition: xlog.c:191
RelFileNode rnode
Definition: md.c:140
struct _MdfdVec MdfdVec
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
static CycleCtr mdckpt_cycle_ctr
Definition: md.c:159
Size entrysize
Definition: hsearch.h:73
int FileSync(File file)
Definition: fd.c:1785
int errcode(int sqlerrcode)
Definition: elog.c:575
#define MemSet(start, val, len)
Definition: c.h:853
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
unsigned int Oid
Definition: postgres_ext.h:31
void mdinit(void)
Definition: md.c:205
char * FilePathName(File file)
Definition: fd.c:1923
void ForgetRelationFsyncRequests(RelFileNode rnode, ForkNumber forknum)
Definition: md.c:1651
CycleCtr cycle_ctr
Definition: md.c:141
static int fd(const char *x, int i)
Definition: preproc-init.c:105
void mdpreckpt(void)
Definition: md.c:1345
#define PG_BINARY
Definition: c.h:1038
#define EXTENSION_FAIL
Definition: md.c:164
static HTAB * pendingOpsTable
Definition: md.c:154
bool canceled[MAX_FORKNUM+1]
Definition: md.c:145
static MemoryContext MdCxt
Definition: md.c:114
void mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: md.c:494
#define FORGET_RELATION_FSYNC
Definition: md.c:53
#define SmgrIsTemp(smgr)
Definition: smgr.h:80
void pg_usleep(long microsec)
Definition: signal.c:53
Definition: dynahash.c:193
unsigned short uint16
Definition: c.h:264
void pfree(void *pointer)
Definition: mcxt.c:992
#define linitial(l)
Definition: pg_list.h:110
#define ERROR
Definition: elog.h:43
#define EXTENSION_RETURN_NULL
Definition: md.c:166
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:167
#define AmCheckpointerProcess()
Definition: miscadmin.h:405
#define FATAL
Definition: elog.h:52
RelFileNode rnode
Definition: md.c:150
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:151
RelFileNodeBackend smgr_rnode
Definition: smgr.h:43
#define FORGET_DATABASE_FSYNC
Definition: md.c:54
static void mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
Definition: md.c:409
void mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
Definition: md.c:386
int FileTruncate(File file, off_t offset)
Definition: fd.c:1890
struct RelFileNode RelFileNode
bool IsUnderPostmaster
Definition: globals.c:100
int OpenTransientFile(FileName fileName, int fileFlags, int fileMode)
Definition: fd.c:2093
int errcode_for_file_access(void)
Definition: elog.c:598
#define EXTENSION_CREATE_RECOVERY
Definition: md.c:170
static ListCell * list_head(const List *l)
Definition: pg_list.h:77
void mdwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber nblocks)
Definition: md.c:681
int FilePrefetch(File file, off_t offset, int amount)
Definition: fd.c:1553
#define lnext(lc)
Definition: pg_list.h:105
int unlink(const char *filename)
#define ereport(elevel, rest)
Definition: elog.h:122
MemoryContext TopMemoryContext
Definition: mcxt.c:43
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:137
ForkNumber
Definition: relpath.h:24
static MdfdVec * mdopen(SMgrRelation reln, ForkNumber forknum, int behavior)
Definition: md.c:573
List * lappend(List *list, void *datum)
Definition: list.c:128
int CloseTransientFile(int fd)
Definition: fd.c:2254
#define WARNING
Definition: elog.h:40
List * list_delete_cell(List *list, ListCell *cell, ListCell *prev)
Definition: list.c:528
#define HASH_BLOBS
Definition: hsearch.h:88
#define InvalidBackendId
Definition: backendid.h:23
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:329
void * palloc0(Size size)
Definition: mcxt.c:920
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
static void _fdvec_resize(SMgrRelation reln, ForkNumber forknum, int nseg)
Definition: md.c:1711
Size keysize
Definition: hsearch.h:72
Definition: md.c:108
void ForgetDatabaseFsyncRequests(Oid dbid)
Definition: md.c:1684
void SetForwardFsyncRequests(void)
Definition: md.c:253
static MdfdVec * _mdfd_openseg(SMgrRelation reln, ForkNumber forkno, BlockNumber segno, int oflags)
Definition: md.c:1772
void mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
Definition: md.c:927
#define EXTENSION_CREATE
Definition: md.c:168
bool mdexists(SMgrRelation reln, ForkNumber forkNum)
Definition: md.c:276
RelFileNode node
Definition: relfilenode.h:74
#define ftruncate(a, b)
Definition: win32.h:67
void bms_free(Bitmapset *a)
Definition: bitmapset.c:200
void FileClose(File file)
Definition: fd.c:1456
#define NULL
Definition: c.h:226
void mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: md.c:801
#define Assert(condition)
Definition: c.h:671
#define lfirst(lc)
Definition: pg_list.h:106
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:202
CheckpointStatsData CheckpointStats
Definition: xlog.c:172
static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
Definition: md.c:1940
void mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition: md.c:658
BackendId backend
Definition: relfilenode.h:75
#define InvalidBlockNumber
Definition: block.h:33
#define MAX_FORKNUM
Definition: relpath.h:39
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1353
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:668
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1021
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1343
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:153
static chr * longest(struct vars *v, struct dfa *d, chr *start, chr *stop, int *hitstopp)
Definition: rege_dfa.c:42
void mdclose(SMgrRelation reln, ForkNumber forknum)
Definition: md.c:627
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:365
bool enableFsync
Definition: globals.c:110
void mdpostckpt(void)
Definition: md.c:1360
int md_num_open_segs[MAX_FORKNUM+1]
Definition: smgr.h:71
void * palloc(Size size)
Definition: mcxt.c:891
int errmsg(const char *fmt,...)
Definition: elog.c:797
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:749
Bitmapset * requests[MAX_FORKNUM+1]
Definition: md.c:143
void mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
Definition: md.c:293
#define relpath(rnode, forknum)
Definition: relpath.h:71
static MemoryContext pendingOpsCxt
Definition: md.c:156
struct _MdfdVec * md_seg_fds[MAX_FORKNUM+1]
Definition: smgr.h:72
#define elog
Definition: elog.h:219
void AbsorbFsyncRequests(void)
off_t FileSeek(File file, off_t offset, int whence)
Definition: fd.c:1802
Definition: pg_list.h:45
File mdfd_vfd
Definition: md.c:110
int File
Definition: fd.h:51
#define UNLINKS_PER_ABSORB
Definition: md.c:44
static List * pendingUnlinks
Definition: md.c:155
int FileRead(File file, char *buffer, int amount)
Definition: fd.c:1604
uint16 CycleCtr
Definition: md.c:136
List * list_delete_first(List *list)
Definition: list.c:666
static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
Definition: md.c:1428
static void register_unlink(RelFileNodeBackend rnode)
Definition: md.c:1464
#define FILE_POSSIBLY_DELETED(err)
Definition: md.c:65
static char * _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
Definition: md.c:1749
bool zero_damaged_pages
Definition: bufmgr.c:108
uint64 ckpt_longest_sync
Definition: xlog.h:212
void mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer)
Definition: md.c:730