PostgreSQL Source Code  git master
storage.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * storage.c
4  * code to create and destroy physical storage for relations
5  *
6  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/catalog/storage.c
12  *
13  * NOTES
14  * Some of this code used to be in storage/smgr/smgr.c, and the
15  * function names still reflect that.
16  *
17  *-------------------------------------------------------------------------
18  */
19 
20 #include "postgres.h"
21 
22 #include "access/parallel.h"
23 #include "access/visibilitymap.h"
24 #include "access/xact.h"
25 #include "access/xlog.h"
26 #include "access/xloginsert.h"
27 #include "access/xlogutils.h"
28 #include "catalog/storage.h"
29 #include "catalog/storage_xlog.h"
30 #include "miscadmin.h"
31 #include "storage/freespace.h"
32 #include "storage/smgr.h"
33 #include "utils/hsearch.h"
34 #include "utils/memutils.h"
35 #include "utils/rel.h"
36 
37 /* GUC variables */
38 int wal_skip_threshold = 2048; /* in kilobytes */
39 
40 /*
41  * We keep a list of all relations (represented as RelFileNode values)
42  * that have been created or deleted in the current transaction. When
43  * a relation is created, we create the physical file immediately, but
44  * remember it so that we can delete the file again if the current
45  * transaction is aborted. Conversely, a deletion request is NOT
46  * executed immediately, but is just entered in the list. When and if
47  * the transaction commits, we can delete the physical file.
48  *
49  * To handle subtransactions, every entry is marked with its transaction
50  * nesting level. At subtransaction commit, we reassign the subtransaction's
51  * entries to the parent nesting level. At subtransaction abort, we can
52  * immediately execute the abort-time actions for all entries of the current
53  * nesting level.
54  *
55  * NOTE: the list is kept in TopMemoryContext to be sure it won't disappear
56  * unbetimes. It'd probably be OK to keep it in TopTransactionContext,
57  * but I'm being paranoid.
58  */
59 
60 typedef struct PendingRelDelete
61 {
62  RelFileNode relnode; /* relation that may need to be deleted */
63  BackendId backend; /* InvalidBackendId if not a temp rel */
64  bool atCommit; /* T=delete at commit; F=delete at abort */
65  int nestLevel; /* xact nesting level of request */
66  struct PendingRelDelete *next; /* linked-list link */
68 
69 typedef struct PendingRelSync
70 {
72  bool is_truncated; /* Has the file experienced truncation? */
74 
75 static PendingRelDelete *pendingDeletes = NULL; /* head of linked list */
76 static HTAB *pendingSyncHash = NULL;
77 
78 
79 /*
80  * AddPendingSync
81  * Queue an at-commit fsync.
82  */
83 static void
85 {
86  PendingRelSync *pending;
87  bool found;
88 
89  /* create the hash if not yet */
90  if (!pendingSyncHash)
91  {
92  HASHCTL ctl;
93 
94  ctl.keysize = sizeof(RelFileNode);
95  ctl.entrysize = sizeof(PendingRelSync);
97  pendingSyncHash = hash_create("pending sync hash", 16, &ctl,
99  }
100 
101  pending = hash_search(pendingSyncHash, rnode, HASH_ENTER, &found);
102  Assert(!found);
103  pending->is_truncated = false;
104 }
105 
106 /*
107  * RelationCreateStorage
108  * Create physical storage for a relation.
109  *
110  * Create the underlying disk file storage for the relation. This only
111  * creates the main fork; additional forks are created lazily by the
112  * modules that need them.
113  *
114  * This function is transactional. The creation is WAL-logged, and if the
115  * transaction aborts later on, the storage will be destroyed. A caller
116  * that does not want the storage to be destroyed in case of an abort may
117  * pass register_delete = false.
118  */
120 RelationCreateStorage(RelFileNode rnode, char relpersistence,
121  bool register_delete)
122 {
123  SMgrRelation srel;
124  BackendId backend;
125  bool needs_wal;
126 
127  Assert(!IsInParallelMode()); /* couldn't update pendingSyncHash */
128 
129  switch (relpersistence)
130  {
131  case RELPERSISTENCE_TEMP:
132  backend = BackendIdForTempRelations();
133  needs_wal = false;
134  break;
135  case RELPERSISTENCE_UNLOGGED:
136  backend = InvalidBackendId;
137  needs_wal = false;
138  break;
139  case RELPERSISTENCE_PERMANENT:
140  backend = InvalidBackendId;
141  needs_wal = true;
142  break;
143  default:
144  elog(ERROR, "invalid relpersistence: %c", relpersistence);
145  return NULL; /* placate compiler */
146  }
147 
148  srel = smgropen(rnode, backend);
149  smgrcreate(srel, MAIN_FORKNUM, false);
150 
151  if (needs_wal)
153 
154  /*
155  * Add the relation to the list of stuff to delete at abort, if we are
156  * asked to do so.
157  */
158  if (register_delete)
159  {
160  PendingRelDelete *pending;
161 
162  pending = (PendingRelDelete *)
164  pending->relnode = rnode;
165  pending->backend = backend;
166  pending->atCommit = false; /* delete if abort */
168  pending->next = pendingDeletes;
169  pendingDeletes = pending;
170  }
171 
172  if (relpersistence == RELPERSISTENCE_PERMANENT && !XLogIsNeeded())
173  {
174  Assert(backend == InvalidBackendId);
175  AddPendingSync(&rnode);
176  }
177 
178  return srel;
179 }
180 
181 /*
182  * Perform XLogInsert of an XLOG_SMGR_CREATE record to WAL.
183  */
184 void
185 log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
186 {
187  xl_smgr_create xlrec;
188 
189  /*
190  * Make an XLOG entry reporting the file creation.
191  */
192  xlrec.rnode = *rnode;
193  xlrec.forkNum = forkNum;
194 
195  XLogBeginInsert();
196  XLogRegisterData((char *) &xlrec, sizeof(xlrec));
198 }
199 
200 /*
201  * RelationDropStorage
202  * Schedule unlinking of physical storage at transaction commit.
203  */
204 void
206 {
207  PendingRelDelete *pending;
208 
209  /* Add the relation to the list of stuff to delete at commit */
210  pending = (PendingRelDelete *)
212  pending->relnode = rel->rd_node;
213  pending->backend = rel->rd_backend;
214  pending->atCommit = true; /* delete if commit */
216  pending->next = pendingDeletes;
217  pendingDeletes = pending;
218 
219  /*
220  * NOTE: if the relation was created in this transaction, it will now be
221  * present in the pending-delete list twice, once with atCommit true and
222  * once with atCommit false. Hence, it will be physically deleted at end
223  * of xact in either case (and the other entry will be ignored by
224  * smgrDoPendingDeletes, so no error will occur). We could instead remove
225  * the existing list entry and delete the physical file immediately, but
226  * for now I'll keep the logic simple.
227  */
228 
229  RelationCloseSmgr(rel);
230 }
231 
232 /*
233  * RelationPreserveStorage
234  * Mark a relation as not to be deleted after all.
235  *
236  * We need this function because relation mapping changes are committed
237  * separately from commit of the whole transaction, so it's still possible
238  * for the transaction to abort after the mapping update is done.
239  * When a new physical relation is installed in the map, it would be
240  * scheduled for delete-on-abort, so we'd delete it, and be in trouble.
241  * The relation mapper fixes this by telling us to not delete such relations
242  * after all as part of its commit.
243  *
244  * We also use this to reuse an old build of an index during ALTER TABLE, this
245  * time removing the delete-at-commit entry.
246  *
247  * No-op if the relation is not among those scheduled for deletion.
248  */
249 void
251 {
252  PendingRelDelete *pending;
253  PendingRelDelete *prev;
255 
256  prev = NULL;
257  for (pending = pendingDeletes; pending != NULL; pending = next)
258  {
259  next = pending->next;
260  if (RelFileNodeEquals(rnode, pending->relnode)
261  && pending->atCommit == atCommit)
262  {
263  /* unlink and delete list entry */
264  if (prev)
265  prev->next = next;
266  else
268  pfree(pending);
269  /* prev does not change */
270  }
271  else
272  {
273  /* unrelated entry, don't touch it */
274  prev = pending;
275  }
276  }
277 }
278 
279 /*
280  * RelationTruncate
281  * Physically truncate a relation to the specified number of blocks.
282  *
283  * This includes getting rid of any buffers for the blocks that are to be
284  * dropped.
285  */
286 void
288 {
289  bool fsm;
290  bool vm;
291  bool need_fsm_vacuum = false;
292  ForkNumber forks[MAX_FORKNUM];
293  BlockNumber blocks[MAX_FORKNUM];
294  int nforks = 0;
295  SMgrRelation reln;
296 
297  /*
298  * Make sure smgr_targblock etc aren't pointing somewhere past new end.
299  * (Note: don't rely on this reln pointer below this loop.)
300  */
301  reln = RelationGetSmgr(rel);
303  for (int i = 0; i <= MAX_FORKNUM; ++i)
305 
306  /* Prepare for truncation of MAIN fork of the relation */
307  forks[nforks] = MAIN_FORKNUM;
308  blocks[nforks] = nblocks;
309  nforks++;
310 
311  /* Prepare for truncation of the FSM if it exists */
313  if (fsm)
314  {
315  blocks[nforks] = FreeSpaceMapPrepareTruncateRel(rel, nblocks);
316  if (BlockNumberIsValid(blocks[nforks]))
317  {
318  forks[nforks] = FSM_FORKNUM;
319  nforks++;
320  need_fsm_vacuum = true;
321  }
322  }
323 
324  /* Prepare for truncation of the visibility map too if it exists */
326  if (vm)
327  {
328  blocks[nforks] = visibilitymap_prepare_truncate(rel, nblocks);
329  if (BlockNumberIsValid(blocks[nforks]))
330  {
331  forks[nforks] = VISIBILITYMAP_FORKNUM;
332  nforks++;
333  }
334  }
335 
336  RelationPreTruncate(rel);
337 
338  /*
339  * Make sure that a concurrent checkpoint can't complete while truncation
340  * is in progress.
341  *
342  * The truncation operation might drop buffers that the checkpoint
343  * otherwise would have flushed. If it does, then it's essential that the
344  * files actually get truncated on disk before the checkpoint record is
345  * written. Otherwise, if reply begins from that checkpoint, the
346  * to-be-truncated blocks might still exist on disk but have older
347  * contents than expected, which can cause replay to fail. It's OK for the
348  * blocks to not exist on disk at all, but not for them to have the wrong
349  * contents.
350  */
353 
354  /*
355  * We WAL-log the truncation before actually truncating, which means
356  * trouble if the truncation fails. If we then crash, the WAL replay
357  * likely isn't going to succeed in the truncation either, and cause a
358  * PANIC. It's tempting to put a critical section here, but that cure
359  * would be worse than the disease. It would turn a usually harmless
360  * failure to truncate, that might spell trouble at WAL replay, into a
361  * certain PANIC.
362  */
363  if (RelationNeedsWAL(rel))
364  {
365  /*
366  * Make an XLOG entry reporting the file truncation.
367  */
368  XLogRecPtr lsn;
369  xl_smgr_truncate xlrec;
370 
371  xlrec.blkno = nblocks;
372  xlrec.rnode = rel->rd_node;
373  xlrec.flags = SMGR_TRUNCATE_ALL;
374 
375  XLogBeginInsert();
376  XLogRegisterData((char *) &xlrec, sizeof(xlrec));
377 
378  lsn = XLogInsert(RM_SMGR_ID,
380 
381  /*
382  * Flush, because otherwise the truncation of the main relation might
383  * hit the disk before the WAL record, and the truncation of the FSM
384  * or visibility map. If we crashed during that window, we'd be left
385  * with a truncated heap, but the FSM or visibility map would still
386  * contain entries for the non-existent heap pages.
387  */
388  if (fsm || vm)
389  XLogFlush(lsn);
390  }
391 
392  /*
393  * This will first remove any buffers from the buffer pool that should no
394  * longer exist after truncation is complete, and then truncate the
395  * corresponding files on disk.
396  */
397  smgrtruncate(RelationGetSmgr(rel), forks, nforks, blocks);
398 
399  /* We've done all the critical work, so checkpoints are OK now. */
401 
402  /*
403  * Update upper-level FSM pages to account for the truncation. This is
404  * important because the just-truncated pages were likely marked as
405  * all-free, and would be preferentially selected.
406  *
407  * NB: There's no point in delaying checkpoints until this is done.
408  * Because the FSM is not WAL-logged, we have to be prepared for the
409  * possibility of corruption after a crash anyway.
410  */
411  if (need_fsm_vacuum)
413 }
414 
415 /*
416  * RelationPreTruncate
417  * Perform AM-independent work before a physical truncation.
418  *
419  * If an access method's relation_nontransactional_truncate does not call
420  * RelationTruncate(), it must call this before decreasing the table size.
421  */
422 void
424 {
425  PendingRelSync *pending;
426 
427  if (!pendingSyncHash)
428  return;
429 
430  pending = hash_search(pendingSyncHash,
431  &(RelationGetSmgr(rel)->smgr_rnode.node),
432  HASH_FIND, NULL);
433  if (pending)
434  pending->is_truncated = true;
435 }
436 
437 /*
438  * Copy a fork's data, block by block.
439  *
440  * Note that this requires that there is no dirty data in shared buffers. If
441  * it's possible that there are, callers need to flush those using
442  * e.g. FlushRelationBuffers(rel).
443  *
444  * Also note that this is frequently called via locutions such as
445  * RelationCopyStorage(RelationGetSmgr(rel), ...);
446  * That's safe only because we perform only smgr and WAL operations here.
447  * If we invoked anything else, a relcache flush could cause our SMgrRelation
448  * argument to become a dangling pointer.
449  */
450 void
452  ForkNumber forkNum, char relpersistence)
453 {
455  Page page;
456  bool use_wal;
457  bool copying_initfork;
458  BlockNumber nblocks;
459  BlockNumber blkno;
460 
461  page = (Page) buf.data;
462 
463  /*
464  * The init fork for an unlogged relation in many respects has to be
465  * treated the same as normal relation, changes need to be WAL logged and
466  * it needs to be synced to disk.
467  */
468  copying_initfork = relpersistence == RELPERSISTENCE_UNLOGGED &&
469  forkNum == INIT_FORKNUM;
470 
471  /*
472  * We need to log the copied data in WAL iff WAL archiving/streaming is
473  * enabled AND it's a permanent relation. This gives the same answer as
474  * "RelationNeedsWAL(rel) || copying_initfork", because we know the
475  * current operation created a new relfilenode.
476  */
477  use_wal = XLogIsNeeded() &&
478  (relpersistence == RELPERSISTENCE_PERMANENT || copying_initfork);
479 
480  nblocks = smgrnblocks(src, forkNum);
481 
482  for (blkno = 0; blkno < nblocks; blkno++)
483  {
484  /* If we got a cancel signal during the copy of the data, quit */
486 
487  smgrread(src, forkNum, blkno, buf.data);
488 
489  if (!PageIsVerifiedExtended(page, blkno,
491  {
492  /*
493  * For paranoia's sake, capture the file path before invoking the
494  * ereport machinery. This guards against the possibility of a
495  * relcache flush caused by, e.g., an errcontext callback.
496  * (errcontext callbacks shouldn't be risking any such thing, but
497  * people have been known to forget that rule.)
498  */
499  char *relpath = relpathbackend(src->smgr_rnode.node,
500  src->smgr_rnode.backend,
501  forkNum);
502 
503  ereport(ERROR,
505  errmsg("invalid page in block %u of relation %s",
506  blkno, relpath)));
507  }
508 
509  /*
510  * WAL-log the copied page. Unfortunately we don't know what kind of a
511  * page this is, so we have to log the full page including any unused
512  * space.
513  */
514  if (use_wal)
515  log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false);
516 
517  PageSetChecksumInplace(page, blkno);
518 
519  /*
520  * Now write the page. We say skipFsync = true because there's no
521  * need for smgr to schedule an fsync for this write; we'll do it
522  * ourselves below.
523  */
524  smgrextend(dst, forkNum, blkno, buf.data, true);
525  }
526 
527  /*
528  * When we WAL-logged rel pages, we must nonetheless fsync them. The
529  * reason is that since we're copying outside shared buffers, a CHECKPOINT
530  * occurring during the copy has no way to flush the previously written
531  * data to disk (indeed it won't know the new rel even exists). A crash
532  * later on would replay WAL from the checkpoint, therefore it wouldn't
533  * replay our earlier WAL entries. If we do not fsync those pages here,
534  * they might still not be on disk when the crash occurs.
535  */
536  if (use_wal || copying_initfork)
537  smgrimmedsync(dst, forkNum);
538 }
539 
540 /*
541  * RelFileNodeSkippingWAL
542  * Check if a BM_PERMANENT relfilenode is using WAL.
543  *
544  * Changes of certain relfilenodes must not write WAL; see "Skipping WAL for
545  * New RelFileNode" in src/backend/access/transam/README. Though it is known
546  * from Relation efficiently, this function is intended for the code paths not
547  * having access to Relation.
548  */
549 bool
551 {
552  if (!pendingSyncHash ||
553  hash_search(pendingSyncHash, &rnode, HASH_FIND, NULL) == NULL)
554  return false;
555 
556  return true;
557 }
558 
559 /*
560  * EstimatePendingSyncsSpace
561  * Estimate space needed to pass syncs to parallel workers.
562  */
563 Size
565 {
566  long entries;
567 
569  return mul_size(1 + entries, sizeof(RelFileNode));
570 }
571 
572 /*
573  * SerializePendingSyncs
574  * Serialize syncs for parallel workers.
575  */
576 void
577 SerializePendingSyncs(Size maxSize, char *startAddress)
578 {
579  HTAB *tmphash;
580  HASHCTL ctl;
581  HASH_SEQ_STATUS scan;
582  PendingRelSync *sync;
583  PendingRelDelete *delete;
584  RelFileNode *src;
585  RelFileNode *dest = (RelFileNode *) startAddress;
586 
587  if (!pendingSyncHash)
588  goto terminate;
589 
590  /* Create temporary hash to collect active relfilenodes */
591  ctl.keysize = sizeof(RelFileNode);
592  ctl.entrysize = sizeof(RelFileNode);
594  tmphash = hash_create("tmp relfilenodes",
597 
598  /* collect all rnodes from pending syncs */
600  while ((sync = (PendingRelSync *) hash_seq_search(&scan)))
601  (void) hash_search(tmphash, &sync->rnode, HASH_ENTER, NULL);
602 
603  /* remove deleted rnodes */
604  for (delete = pendingDeletes; delete != NULL; delete = delete->next)
605  if (delete->atCommit)
606  (void) hash_search(tmphash, (void *) &delete->relnode,
607  HASH_REMOVE, NULL);
608 
609  hash_seq_init(&scan, tmphash);
610  while ((src = (RelFileNode *) hash_seq_search(&scan)))
611  *dest++ = *src;
612 
613  hash_destroy(tmphash);
614 
615 terminate:
616  MemSet(dest, 0, sizeof(RelFileNode));
617 }
618 
619 /*
620  * RestorePendingSyncs
621  * Restore syncs within a parallel worker.
622  *
623  * RelationNeedsWAL() and RelFileNodeSkippingWAL() must offer the correct
624  * answer to parallel workers. Only smgrDoPendingSyncs() reads the
625  * is_truncated field, at end of transaction. Hence, don't restore it.
626  */
627 void
628 RestorePendingSyncs(char *startAddress)
629 {
630  RelFileNode *rnode;
631 
632  Assert(pendingSyncHash == NULL);
633  for (rnode = (RelFileNode *) startAddress; rnode->relNode != 0; rnode++)
634  AddPendingSync(rnode);
635 }
636 
637 /*
638  * smgrDoPendingDeletes() -- Take care of relation deletes at end of xact.
639  *
640  * This also runs when aborting a subxact; we want to clean up a failed
641  * subxact immediately.
642  *
643  * Note: It's possible that we're being asked to remove a relation that has
644  * no physical storage in any fork. In particular, it's possible that we're
645  * cleaning up an old temporary relation for which RemovePgTempFiles has
646  * already recovered the physical storage.
647  */
648 void
649 smgrDoPendingDeletes(bool isCommit)
650 {
651  int nestLevel = GetCurrentTransactionNestLevel();
652  PendingRelDelete *pending;
653  PendingRelDelete *prev;
655  int nrels = 0,
656  maxrels = 0;
657  SMgrRelation *srels = NULL;
658 
659  prev = NULL;
660  for (pending = pendingDeletes; pending != NULL; pending = next)
661  {
662  next = pending->next;
663  if (pending->nestLevel < nestLevel)
664  {
665  /* outer-level entries should not be processed yet */
666  prev = pending;
667  }
668  else
669  {
670  /* unlink list entry first, so we don't retry on failure */
671  if (prev)
672  prev->next = next;
673  else
675  /* do deletion if called for */
676  if (pending->atCommit == isCommit)
677  {
678  SMgrRelation srel;
679 
680  srel = smgropen(pending->relnode, pending->backend);
681 
682  /* allocate the initial array, or extend it, if needed */
683  if (maxrels == 0)
684  {
685  maxrels = 8;
686  srels = palloc(sizeof(SMgrRelation) * maxrels);
687  }
688  else if (maxrels <= nrels)
689  {
690  maxrels *= 2;
691  srels = repalloc(srels, sizeof(SMgrRelation) * maxrels);
692  }
693 
694  srels[nrels++] = srel;
695  }
696  /* must explicitly free the list entry */
697  pfree(pending);
698  /* prev does not change */
699  }
700  }
701 
702  if (nrels > 0)
703  {
704  smgrdounlinkall(srels, nrels, false);
705 
706  for (int i = 0; i < nrels; i++)
707  smgrclose(srels[i]);
708 
709  pfree(srels);
710  }
711 }
712 
713 /*
714  * smgrDoPendingSyncs() -- Take care of relation syncs at end of xact.
715  */
716 void
717 smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
718 {
719  PendingRelDelete *pending;
720  int nrels = 0,
721  maxrels = 0;
722  SMgrRelation *srels = NULL;
723  HASH_SEQ_STATUS scan;
724  PendingRelSync *pendingsync;
725 
727 
728  if (!pendingSyncHash)
729  return; /* no relation needs sync */
730 
731  /* Abort -- just throw away all pending syncs */
732  if (!isCommit)
733  {
734  pendingSyncHash = NULL;
735  return;
736  }
737 
739 
740  /* Parallel worker -- just throw away all pending syncs */
741  if (isParallelWorker)
742  {
743  pendingSyncHash = NULL;
744  return;
745  }
746 
747  /* Skip syncing nodes that smgrDoPendingDeletes() will delete. */
748  for (pending = pendingDeletes; pending != NULL; pending = pending->next)
749  if (pending->atCommit)
750  (void) hash_search(pendingSyncHash, (void *) &pending->relnode,
751  HASH_REMOVE, NULL);
752 
754  while ((pendingsync = (PendingRelSync *) hash_seq_search(&scan)))
755  {
756  ForkNumber fork;
757  BlockNumber nblocks[MAX_FORKNUM + 1];
758  BlockNumber total_blocks = 0;
759  SMgrRelation srel;
760 
761  srel = smgropen(pendingsync->rnode, InvalidBackendId);
762 
763  /*
764  * We emit newpage WAL records for smaller relations.
765  *
766  * Small WAL records have a chance to be emitted along with other
767  * backends' WAL records. We emit WAL records instead of syncing for
768  * files that are smaller than a certain threshold, expecting faster
769  * commit. The threshold is defined by the GUC wal_skip_threshold.
770  */
771  if (!pendingsync->is_truncated)
772  {
773  for (fork = 0; fork <= MAX_FORKNUM; fork++)
774  {
775  if (smgrexists(srel, fork))
776  {
777  BlockNumber n = smgrnblocks(srel, fork);
778 
779  /* we shouldn't come here for unlogged relations */
780  Assert(fork != INIT_FORKNUM);
781  nblocks[fork] = n;
782  total_blocks += n;
783  }
784  else
785  nblocks[fork] = InvalidBlockNumber;
786  }
787  }
788 
789  /*
790  * Sync file or emit WAL records for its contents.
791  *
792  * Although we emit WAL record if the file is small enough, do file
793  * sync regardless of the size if the file has experienced a
794  * truncation. It is because the file would be followed by trailing
795  * garbage blocks after a crash recovery if, while a past longer file
796  * had been flushed out, we omitted syncing-out of the file and
797  * emitted WAL instead. You might think that we could choose WAL if
798  * the current main fork is longer than ever, but there's a case where
799  * main fork is longer than ever but FSM fork gets shorter.
800  */
801  if (pendingsync->is_truncated ||
802  total_blocks * BLCKSZ / 1024 >= wal_skip_threshold)
803  {
804  /* allocate the initial array, or extend it, if needed */
805  if (maxrels == 0)
806  {
807  maxrels = 8;
808  srels = palloc(sizeof(SMgrRelation) * maxrels);
809  }
810  else if (maxrels <= nrels)
811  {
812  maxrels *= 2;
813  srels = repalloc(srels, sizeof(SMgrRelation) * maxrels);
814  }
815 
816  srels[nrels++] = srel;
817  }
818  else
819  {
820  /* Emit WAL records for all blocks. The file is small enough. */
821  for (fork = 0; fork <= MAX_FORKNUM; fork++)
822  {
823  int n = nblocks[fork];
824  Relation rel;
825 
826  if (!BlockNumberIsValid(n))
827  continue;
828 
829  /*
830  * Emit WAL for the whole file. Unfortunately we don't know
831  * what kind of a page this is, so we have to log the full
832  * page including any unused space. ReadBufferExtended()
833  * counts some pgstat events; unfortunately, we discard them.
834  */
836  log_newpage_range(rel, fork, 0, n, false);
838  }
839  }
840  }
841 
842  pendingSyncHash = NULL;
843 
844  if (nrels > 0)
845  {
846  smgrdosyncall(srels, nrels);
847  pfree(srels);
848  }
849 }
850 
851 /*
852  * smgrGetPendingDeletes() -- Get a list of non-temp relations to be deleted.
853  *
854  * The return value is the number of relations scheduled for termination.
855  * *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
856  * If there are no relations to be deleted, *ptr is set to NULL.
857  *
858  * Only non-temporary relations are included in the returned list. This is OK
859  * because the list is used only in contexts where temporary relations don't
860  * matter: we're either writing to the two-phase state file (and transactions
861  * that have touched temp tables can't be prepared) or we're writing to xlog
862  * (and all temporary files will be zapped if we restart anyway, so no need
863  * for redo to do it also).
864  *
865  * Note that the list does not include anything scheduled for termination
866  * by upper-level transactions.
867  */
868 int
869 smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr)
870 {
871  int nestLevel = GetCurrentTransactionNestLevel();
872  int nrels;
873  RelFileNode *rptr;
874  PendingRelDelete *pending;
875 
876  nrels = 0;
877  for (pending = pendingDeletes; pending != NULL; pending = pending->next)
878  {
879  if (pending->nestLevel >= nestLevel && pending->atCommit == forCommit
880  && pending->backend == InvalidBackendId)
881  nrels++;
882  }
883  if (nrels == 0)
884  {
885  *ptr = NULL;
886  return 0;
887  }
888  rptr = (RelFileNode *) palloc(nrels * sizeof(RelFileNode));
889  *ptr = rptr;
890  for (pending = pendingDeletes; pending != NULL; pending = pending->next)
891  {
892  if (pending->nestLevel >= nestLevel && pending->atCommit == forCommit
893  && pending->backend == InvalidBackendId)
894  {
895  *rptr = pending->relnode;
896  rptr++;
897  }
898  }
899  return nrels;
900 }
901 
902 /*
903  * PostPrepare_smgr -- Clean up after a successful PREPARE
904  *
905  * What we have to do here is throw away the in-memory state about pending
906  * relation deletes. It's all been recorded in the 2PC state file and
907  * it's no longer smgr's job to worry about it.
908  */
909 void
911 {
912  PendingRelDelete *pending;
914 
915  for (pending = pendingDeletes; pending != NULL; pending = next)
916  {
917  next = pending->next;
919  /* must explicitly free the list entry */
920  pfree(pending);
921  }
922 }
923 
924 
925 /*
926  * AtSubCommit_smgr() --- Take care of subtransaction commit.
927  *
928  * Reassign all items in the pending-deletes list to the parent transaction.
929  */
930 void
932 {
933  int nestLevel = GetCurrentTransactionNestLevel();
934  PendingRelDelete *pending;
935 
936  for (pending = pendingDeletes; pending != NULL; pending = pending->next)
937  {
938  if (pending->nestLevel >= nestLevel)
939  pending->nestLevel = nestLevel - 1;
940  }
941 }
942 
943 /*
944  * AtSubAbort_smgr() --- Take care of subtransaction abort.
945  *
946  * Delete created relations and forget about deleted relations.
947  * We can execute these operations immediately because we know this
948  * subtransaction will not commit.
949  */
950 void
952 {
953  smgrDoPendingDeletes(false);
954 }
955 
956 void
958 {
959  XLogRecPtr lsn = record->EndRecPtr;
960  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
961 
962  /* Backup blocks are not used in smgr records */
963  Assert(!XLogRecHasAnyBlockRefs(record));
964 
965  if (info == XLOG_SMGR_CREATE)
966  {
967  xl_smgr_create *xlrec = (xl_smgr_create *) XLogRecGetData(record);
968  SMgrRelation reln;
969 
970  reln = smgropen(xlrec->rnode, InvalidBackendId);
971  smgrcreate(reln, xlrec->forkNum, true);
972  }
973  else if (info == XLOG_SMGR_TRUNCATE)
974  {
975  xl_smgr_truncate *xlrec = (xl_smgr_truncate *) XLogRecGetData(record);
976  SMgrRelation reln;
977  Relation rel;
978  ForkNumber forks[MAX_FORKNUM];
979  BlockNumber blocks[MAX_FORKNUM];
980  int nforks = 0;
981  bool need_fsm_vacuum = false;
982 
983  reln = smgropen(xlrec->rnode, InvalidBackendId);
984 
985  /*
986  * Forcibly create relation if it doesn't exist (which suggests that
987  * it was dropped somewhere later in the WAL sequence). As in
988  * XLogReadBufferForRedo, we prefer to recreate the rel and replay the
989  * log as best we can until the drop is seen.
990  */
991  smgrcreate(reln, MAIN_FORKNUM, true);
992 
993  /*
994  * Before we perform the truncation, update minimum recovery point to
995  * cover this WAL record. Once the relation is truncated, there's no
996  * going back. The buffer manager enforces the WAL-first rule for
997  * normal updates to relation files, so that the minimum recovery
998  * point is always updated before the corresponding change in the data
999  * file is flushed to disk. We have to do the same manually here.
1000  *
1001  * Doing this before the truncation means that if the truncation fails
1002  * for some reason, you cannot start up the system even after restart,
1003  * until you fix the underlying situation so that the truncation will
1004  * succeed. Alternatively, we could update the minimum recovery point
1005  * after truncation, but that would leave a small window where the
1006  * WAL-first rule could be violated.
1007  */
1008  XLogFlush(lsn);
1009 
1010  /* Prepare for truncation of MAIN fork */
1011  if ((xlrec->flags & SMGR_TRUNCATE_HEAP) != 0)
1012  {
1013  forks[nforks] = MAIN_FORKNUM;
1014  blocks[nforks] = xlrec->blkno;
1015  nforks++;
1016 
1017  /* Also tell xlogutils.c about it */
1018  XLogTruncateRelation(xlrec->rnode, MAIN_FORKNUM, xlrec->blkno);
1019  }
1020 
1021  /* Prepare for truncation of FSM and VM too */
1022  rel = CreateFakeRelcacheEntry(xlrec->rnode);
1023 
1024  if ((xlrec->flags & SMGR_TRUNCATE_FSM) != 0 &&
1025  smgrexists(reln, FSM_FORKNUM))
1026  {
1027  blocks[nforks] = FreeSpaceMapPrepareTruncateRel(rel, xlrec->blkno);
1028  if (BlockNumberIsValid(blocks[nforks]))
1029  {
1030  forks[nforks] = FSM_FORKNUM;
1031  nforks++;
1032  need_fsm_vacuum = true;
1033  }
1034  }
1035  if ((xlrec->flags & SMGR_TRUNCATE_VM) != 0 &&
1037  {
1038  blocks[nforks] = visibilitymap_prepare_truncate(rel, xlrec->blkno);
1039  if (BlockNumberIsValid(blocks[nforks]))
1040  {
1041  forks[nforks] = VISIBILITYMAP_FORKNUM;
1042  nforks++;
1043  }
1044  }
1045 
1046  /* Do the real work to truncate relation forks */
1047  if (nforks > 0)
1048  smgrtruncate(reln, forks, nforks, blocks);
1049 
1050  /*
1051  * Update upper-level FSM pages to account for the truncation. This is
1052  * important because the just-truncated pages were likely marked as
1053  * all-free, and would be preferentially selected.
1054  */
1055  if (need_fsm_vacuum)
1056  FreeSpaceMapVacuumRange(rel, xlrec->blkno,
1058 
1059  FreeFakeRelcacheEntry(rel);
1060  }
1061  else
1062  elog(PANIC, "smgr_redo: unknown op code %u", info);
1063 }
int BackendId
Definition: backendid.h:21
#define BackendIdForTempRelations()
Definition: backendid.h:34
#define InvalidBackendId
Definition: backendid.h:23
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
static int32 next
Definition: blutils.c:219
bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags)
Definition: bufpage.c:88
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1539
Pointer Page
Definition: bufpage.h:78
#define PIV_LOG_WARNING
Definition: bufpage.h:412
#define PIV_REPORT_STAT
Definition: bufpage.h:413
unsigned char uint8
Definition: c.h:450
#define MemSet(start, val, len)
Definition: c.h:1019
size_t Size
Definition: c.h:551
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:862
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1382
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
int errcode(int sqlerrcode)
Definition: elog.c:693
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define PANIC
Definition: elog.h:36
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition: freespace.c:354
BlockNumber FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks)
Definition: freespace.c:263
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
MemoryContext TopTransactionContext
Definition: mcxt.c:53
void pfree(void *pointer)
Definition: mcxt.c:1175
MemoryContext TopMemoryContext
Definition: mcxt.c:48
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1188
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
void * palloc(Size size)
Definition: mcxt.c:1068
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:43
static char * buf
Definition: pg_test_fsync.c:67
#define DELAY_CHKPT_COMPLETE
Definition: proc.h:118
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:555
#define RelationNeedsWAL(relation)
Definition: rel.h:612
#define RelationCloseSmgr(relation)
Definition: rel.h:568
#define AssertPendingSyncs_RelationCache()
Definition: relcache.h:133
struct RelFileNode RelFileNode
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
ForkNumber
Definition: relpath.h:41
@ FSM_FORKNUM
Definition: relpath.h:44
@ VISIBILITYMAP_FORKNUM
Definition: relpath.h:45
@ MAIN_FORKNUM
Definition: relpath.h:43
@ INIT_FORKNUM
Definition: relpath.h:46
#define MAX_FORKNUM
Definition: relpath.h:55
#define relpath(rnode, forknum)
Definition: relpath.h:87
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:579
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:493
void smgrdosyncall(SMgrRelation *rels, int nrels)
Definition: smgr.c:384
void smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:691
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:369
void smgrclose(SMgrRelation reln)
Definition: smgr.c:256
void smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer)
Definition: smgr.c:532
void smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks)
Definition: smgr.c:626
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
void smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
Definition: smgr.c:420
PGPROC * MyProc
Definition: proc.c:68
void RelationPreTruncate(Relation rel)
Definition: storage.c:423
struct PendingRelDelete PendingRelDelete
void SerializePendingSyncs(Size maxSize, char *startAddress)
Definition: storage.c:577
void AtSubCommit_smgr(void)
Definition: storage.c:931
SMgrRelation RelationCreateStorage(RelFileNode rnode, char relpersistence, bool register_delete)
Definition: storage.c:120
int wal_skip_threshold
Definition: storage.c:38
int smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr)
Definition: storage.c:869
Size EstimatePendingSyncsSpace(void)
Definition: storage.c:564
static HTAB * pendingSyncHash
Definition: storage.c:76
struct PendingRelSync PendingRelSync
static PendingRelDelete * pendingDeletes
Definition: storage.c:75
void AtSubAbort_smgr(void)
Definition: storage.c:951
void RelationCopyStorage(SMgrRelation src, SMgrRelation dst, ForkNumber forkNum, char relpersistence)
Definition: storage.c:451
void smgr_redo(XLogReaderState *record)
Definition: storage.c:957
void RelationPreserveStorage(RelFileNode rnode, bool atCommit)
Definition: storage.c:250
void PostPrepare_smgr(void)
Definition: storage.c:910
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:550
void log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
Definition: storage.c:185
void RestorePendingSyncs(char *startAddress)
Definition: storage.c:628
static void AddPendingSync(const RelFileNode *rnode)
Definition: storage.c:84
void RelationDropStorage(Relation rel)
Definition: storage.c:205
void RelationTruncate(Relation rel, BlockNumber nblocks)
Definition: storage.c:287
void smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
Definition: storage.c:717
void smgrDoPendingDeletes(bool isCommit)
Definition: storage.c:649
#define SMGR_TRUNCATE_ALL
Definition: storage_xlog.h:43
#define SMGR_TRUNCATE_VM
Definition: storage_xlog.h:41
#define XLOG_SMGR_CREATE
Definition: storage_xlog.h:30
#define XLOG_SMGR_TRUNCATE
Definition: storage_xlog.h:31
#define SMGR_TRUNCATE_HEAP
Definition: storage_xlog.h:40
#define SMGR_TRUNCATE_FSM
Definition: storage_xlog.h:42
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220
int delayChkptFlags
Definition: proc.h:225
RelFileNode relnode
Definition: storage.c:62
struct PendingRelDelete * next
Definition: storage.c:66
BackendId backend
Definition: storage.c:63
bool is_truncated
Definition: storage.c:72
RelFileNode rnode
Definition: storage.c:71
BackendId backend
Definition: relfilenode.h:75
RelFileNode node
Definition: relfilenode.h:74
RelFileNode rd_node
Definition: rel.h:56
BackendId rd_backend
Definition: rel.h:59
RelFileNodeBackend smgr_rnode
Definition: smgr.h:42
BlockNumber smgr_targblock
Definition: smgr.h:53
BlockNumber smgr_cached_nblocks[MAX_FORKNUM+1]
Definition: smgr.h:54
dlist_node node
Definition: smgr.h:72
XLogRecPtr EndRecPtr
Definition: xlogreader.h:207
ForkNumber forkNum
Definition: storage_xlog.h:36
RelFileNode rnode
Definition: storage_xlog.h:35
BlockNumber blkno
Definition: storage_xlog.h:48
RelFileNode rnode
Definition: storage_xlog.h:49
BlockNumber visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:910
bool IsInParallelMode(void)
Definition: xact.c:1065
void XLogFlush(XLogRecPtr record)
Definition: xlog.c:2509
#define XLogIsNeeded()
Definition: xlog.h:104
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:443
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:1083
void log_newpage_range(Relation rel, ForkNumber forkNum, BlockNumber startblk, BlockNumber endblk, bool page_std)
Definition: xloginsert.c:1210
void XLogBeginInsert(void)
Definition: xloginsert.c:150
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:351
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:408
#define XLogRecGetData(decoder)
Definition: xlogreader.h:413
#define XLogRecHasAnyBlockRefs(decoder)
Definition: xlogreader.h:415
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
#define XLR_SPECIAL_REL_UPDATE
Definition: xlogrecord.h:71
void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks)
Definition: xlogutils.c:685
void FreeFakeRelcacheEntry(Relation fakerel)
Definition: xlogutils.c:640
Relation CreateFakeRelcacheEntry(RelFileNode rnode)
Definition: xlogutils.c:597