PostgreSQL Source Code  git master
rewriteheap.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * rewriteheap.c
4  * Support functions to rewrite tables.
5  *
6  * These functions provide a facility to completely rewrite a heap, while
7  * preserving visibility information and update chains.
8  *
9  * INTERFACE
10  *
11  * The caller is responsible for creating the new heap, all catalog
12  * changes, supplying the tuples to be written to the new heap, and
13  * rebuilding indexes. The caller must hold AccessExclusiveLock on the
14  * target table, because we assume no one else is writing into it.
15  *
16  * To use the facility:
17  *
18  * begin_heap_rewrite
19  * while (fetch next tuple)
20  * {
21  * if (tuple is dead)
22  * rewrite_heap_dead_tuple
23  * else
24  * {
25  * // do any transformations here if required
26  * rewrite_heap_tuple
27  * }
28  * }
29  * end_heap_rewrite
30  *
31  * The contents of the new relation shouldn't be relied on until after
32  * end_heap_rewrite is called.
33  *
34  *
35  * IMPLEMENTATION
36  *
37  * This would be a fairly trivial affair, except that we need to maintain
38  * the ctid chains that link versions of an updated tuple together.
39  * Since the newly stored tuples will have tids different from the original
40  * ones, if we just copied t_ctid fields to the new table the links would
41  * be wrong. When we are required to copy a (presumably recently-dead or
42  * delete-in-progress) tuple whose ctid doesn't point to itself, we have
43  * to substitute the correct ctid instead.
44  *
45  * For each ctid reference from A -> B, we might encounter either A first
46  * or B first. (Note that a tuple in the middle of a chain is both A and B
47  * of different pairs.)
48  *
49  * If we encounter A first, we'll store the tuple in the unresolved_tups
50  * hash table. When we later encounter B, we remove A from the hash table,
51  * fix the ctid to point to the new location of B, and insert both A and B
52  * to the new heap.
53  *
54  * If we encounter B first, we can insert B to the new heap right away.
55  * We then add an entry to the old_new_tid_map hash table showing B's
56  * original tid (in the old heap) and new tid (in the new heap).
57  * When we later encounter A, we get the new location of B from the table,
58  * and can write A immediately with the correct ctid.
59  *
60  * Entries in the hash tables can be removed as soon as the later tuple
61  * is encountered. That helps to keep the memory usage down. At the end,
62  * both tables are usually empty; we should have encountered both A and B
63  * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
64  * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
65  * for deadness using OldestXmin is not exact. In such a case we might
66  * encounter B first, and skip it, and find A later. Then A would be added
67  * to unresolved_tups, and stay there until end of the rewrite. Since
68  * this case is very unusual, we don't worry about the memory usage.
69  *
70  * Using in-memory hash tables means that we use some memory for each live
71  * update chain in the table, from the time we find one end of the
72  * reference until we find the other end. That shouldn't be a problem in
73  * practice, but if you do something like an UPDATE without a where-clause
74  * on a large table, and then run CLUSTER in the same transaction, you
75  * could run out of memory. It doesn't seem worthwhile to add support for
76  * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
77  * table under normal circumstances. Furthermore, in the typical scenario
78  * of CLUSTERing on an unchanging key column, we'll see all the versions
79  * of a given tuple together anyway, and so the peak memory usage is only
80  * proportional to the number of RECENTLY_DEAD versions of a single row, not
81  * in the whole table. Note that if we do fail halfway through a CLUSTER,
82  * the old table is still valid, so failure is not catastrophic.
83  *
84  * We can't use the normal heap_insert function to insert into the new
85  * heap, because heap_insert overwrites the visibility information.
86  * We use a special-purpose raw_heap_insert function instead, which
87  * is optimized for bulk inserting a lot of tuples, knowing that we have
88  * exclusive access to the heap. raw_heap_insert builds new pages in
89  * local storage. When a page is full, or at the end of the process,
90  * we insert it to WAL as a single record and then write it to disk
91  * directly through smgr. Note, however, that any data sent to the new
92  * heap's TOAST table will go through the normal bufmgr.
93  *
94  *
95  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
96  * Portions Copyright (c) 1994-5, Regents of the University of California
97  *
98  * IDENTIFICATION
99  * src/backend/access/heap/rewriteheap.c
100  *
101  *-------------------------------------------------------------------------
102  */
103 #include "postgres.h"
104 
105 #include <sys/stat.h>
106 #include <unistd.h>
107 
108 #include "access/heapam.h"
109 #include "access/heapam_xlog.h"
110 #include "access/heaptoast.h"
111 #include "access/rewriteheap.h"
112 #include "access/transam.h"
113 #include "access/xact.h"
114 #include "access/xloginsert.h"
115 #include "catalog/catalog.h"
116 #include "lib/ilist.h"
117 #include "miscadmin.h"
118 #include "pgstat.h"
119 #include "replication/logical.h"
120 #include "replication/slot.h"
121 #include "storage/bufmgr.h"
122 #include "storage/fd.h"
123 #include "storage/procarray.h"
124 #include "storage/smgr.h"
125 #include "utils/memutils.h"
126 #include "utils/rel.h"
127 
128 /*
129  * State associated with a rewrite operation. This is opaque to the user
130  * of the rewrite facility.
131  */
132 typedef struct RewriteStateData
133 {
134  Relation rs_old_rel; /* source heap */
135  Relation rs_new_rel; /* destination heap */
136  Page rs_buffer; /* page currently being built */
137  BlockNumber rs_blockno; /* block where page will go */
138  bool rs_buffer_valid; /* T if any tuples in buffer */
139  bool rs_logical_rewrite; /* do we need to do logical rewriting */
140  TransactionId rs_oldest_xmin; /* oldest xmin used by caller to determine
141  * tuple visibility */
142  TransactionId rs_freeze_xid; /* Xid that will be used as freeze cutoff
143  * point */
144  TransactionId rs_logical_xmin; /* Xid that will be used as cutoff point
145  * for logical rewrites */
146  MultiXactId rs_cutoff_multi; /* MultiXactId that will be used as cutoff
147  * point for multixacts */
148  MemoryContext rs_cxt; /* for hash tables and entries and tuples in
149  * them */
150  XLogRecPtr rs_begin_lsn; /* XLogInsertLsn when starting the rewrite */
151  HTAB *rs_unresolved_tups; /* unmatched A tuples */
152  HTAB *rs_old_new_tid_map; /* unmatched B tuples */
153  HTAB *rs_logical_mappings; /* logical remapping files */
154  uint32 rs_num_rewrite_mappings; /* # in memory mappings */
156 
157 /*
158  * The lookup keys for the hash tables are tuple TID and xmin (we must check
159  * both to avoid false matches from dead tuples). Beware that there is
160  * probably some padding space in this struct; it must be zeroed out for
161  * correct hashtable operation.
162  */
163 typedef struct
164 {
165  TransactionId xmin; /* tuple xmin */
166  ItemPointerData tid; /* tuple location in old heap */
167 } TidHashKey;
168 
169 /*
170  * Entry structures for the hash tables
171  */
172 typedef struct
173 {
174  TidHashKey key; /* expected xmin/old location of B tuple */
175  ItemPointerData old_tid; /* A's location in the old heap */
176  HeapTuple tuple; /* A's tuple contents */
178 
180 
181 typedef struct
182 {
183  TidHashKey key; /* actual xmin/old location of B tuple */
184  ItemPointerData new_tid; /* where we put it in the new heap */
186 
188 
189 /*
190  * In-Memory data for an xid that might need logical remapping entries
191  * to be logged.
192  */
193 typedef struct RewriteMappingFile
194 {
195  TransactionId xid; /* xid that might need to see the row */
196  int vfd; /* fd of mappings file */
197  off_t off; /* how far have we written yet */
198  uint32 num_mappings; /* number of in-memory mappings */
199  dlist_head mappings; /* list of in-memory mappings */
200  char path[MAXPGPATH]; /* path, for error messages */
202 
203 /*
204  * A single In-Memory logical rewrite mapping, hanging off
205  * RewriteMappingFile->mappings.
206  */
208 {
209  LogicalRewriteMappingData map; /* map between old and new location of the
210  * tuple */
213 
214 
215 /* prototypes for internal functions */
216 static void raw_heap_insert(RewriteState state, HeapTuple tup);
217 
218 /* internal logical remapping prototypes */
222 
223 
224 /*
225  * Begin a rewrite of a table
226  *
227  * old_heap old, locked heap relation tuples will be read from
228  * new_heap new, locked heap relation to insert tuples to
229  * oldest_xmin xid used by the caller to determine which tuples are dead
230  * freeze_xid xid before which tuples will be frozen
231  * cutoff_multi multixact before which multis will be removed
232  *
233  * Returns an opaque RewriteState, allocated in current memory context,
234  * to be used in subsequent calls to the other functions.
235  */
237 begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
238  TransactionId freeze_xid, MultiXactId cutoff_multi)
239 {
241  MemoryContext rw_cxt;
242  MemoryContext old_cxt;
243  HASHCTL hash_ctl;
244 
245  /*
246  * To ease cleanup, make a separate context that will contain the
247  * RewriteState struct itself plus all subsidiary data.
248  */
250  "Table rewrite",
252  old_cxt = MemoryContextSwitchTo(rw_cxt);
253 
254  /* Create and fill in the state struct */
255  state = palloc0(sizeof(RewriteStateData));
256 
257  state->rs_old_rel = old_heap;
258  state->rs_new_rel = new_heap;
259  state->rs_buffer = (Page) palloc(BLCKSZ);
260  /* new_heap needn't be empty, just locked */
261  state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
262  state->rs_buffer_valid = false;
263  state->rs_oldest_xmin = oldest_xmin;
264  state->rs_freeze_xid = freeze_xid;
265  state->rs_cutoff_multi = cutoff_multi;
266  state->rs_cxt = rw_cxt;
267 
268  /* Initialize hash tables used to track update chains */
269  hash_ctl.keysize = sizeof(TidHashKey);
270  hash_ctl.entrysize = sizeof(UnresolvedTupData);
271  hash_ctl.hcxt = state->rs_cxt;
272 
273  state->rs_unresolved_tups =
274  hash_create("Rewrite / Unresolved ctids",
275  128, /* arbitrary initial size */
276  &hash_ctl,
278 
279  hash_ctl.entrysize = sizeof(OldToNewMappingData);
280 
281  state->rs_old_new_tid_map =
282  hash_create("Rewrite / Old to new tid map",
283  128, /* arbitrary initial size */
284  &hash_ctl,
286 
287  MemoryContextSwitchTo(old_cxt);
288 
290 
291  return state;
292 }
293 
294 /*
295  * End a rewrite.
296  *
297  * state and any other resources are freed.
298  */
299 void
301 {
302  HASH_SEQ_STATUS seq_status;
303  UnresolvedTup unresolved;
304 
305  /*
306  * Write any remaining tuples in the UnresolvedTups table. If we have any
307  * left, they should in fact be dead, but let's err on the safe side.
308  */
309  hash_seq_init(&seq_status, state->rs_unresolved_tups);
310 
311  while ((unresolved = hash_seq_search(&seq_status)) != NULL)
312  {
313  ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
314  raw_heap_insert(state, unresolved->tuple);
315  }
316 
317  /* Write the last page, if any */
318  if (state->rs_buffer_valid)
319  {
320  if (RelationNeedsWAL(state->rs_new_rel))
321  log_newpage(&state->rs_new_rel->rd_node,
322  MAIN_FORKNUM,
323  state->rs_blockno,
324  state->rs_buffer,
325  true);
326 
328 
330  state->rs_blockno, (char *) state->rs_buffer, true);
331  }
332 
333  /*
334  * When we WAL-logged rel pages, we must nonetheless fsync them. The
335  * reason is the same as in storage.c's RelationCopyStorage(): we're
336  * writing data that's not in shared buffers, and so a CHECKPOINT
337  * occurring during the rewriteheap operation won't have fsync'd data we
338  * wrote before the checkpoint.
339  */
340  if (RelationNeedsWAL(state->rs_new_rel))
342 
344 
345  /* Deleting the context frees everything */
346  MemoryContextDelete(state->rs_cxt);
347 }
348 
349 /*
350  * Add a tuple to the new heap.
351  *
352  * Visibility information is copied from the original tuple, except that
353  * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
354  * it had better be temp storage not a pointer to the original tuple.
355  *
356  * state opaque state as returned by begin_heap_rewrite
357  * old_tuple original tuple in the old heap
358  * new_tuple new, rewritten tuple to be inserted to new heap
359  */
360 void
362  HeapTuple old_tuple, HeapTuple new_tuple)
363 {
364  MemoryContext old_cxt;
365  ItemPointerData old_tid;
366  TidHashKey hashkey;
367  bool found;
368  bool free_new;
369 
370  old_cxt = MemoryContextSwitchTo(state->rs_cxt);
371 
372  /*
373  * Copy the original tuple's visibility information into new_tuple.
374  *
375  * XXX we might later need to copy some t_infomask2 bits, too? Right now,
376  * we intentionally clear the HOT status bits.
377  */
378  memcpy(&new_tuple->t_data->t_choice.t_heap,
379  &old_tuple->t_data->t_choice.t_heap,
380  sizeof(HeapTupleFields));
381 
382  new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
383  new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
384  new_tuple->t_data->t_infomask |=
385  old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
386 
387  /*
388  * While we have our hands on the tuple, we may as well freeze any
389  * eligible xmin or xmax, so that future VACUUM effort can be saved.
390  */
391  heap_freeze_tuple(new_tuple->t_data,
392  state->rs_old_rel->rd_rel->relfrozenxid,
393  state->rs_old_rel->rd_rel->relminmxid,
394  state->rs_freeze_xid,
395  state->rs_cutoff_multi);
396 
397  /*
398  * Invalid ctid means that ctid should point to the tuple itself. We'll
399  * override it later if the tuple is part of an update chain.
400  */
401  ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
402 
403  /*
404  * If the tuple has been updated, check the old-to-new mapping hash table.
405  */
406  if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
407  HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
409  !(ItemPointerEquals(&(old_tuple->t_self),
410  &(old_tuple->t_data->t_ctid))))
411  {
412  OldToNewMapping mapping;
413 
414  memset(&hashkey, 0, sizeof(hashkey));
415  hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
416  hashkey.tid = old_tuple->t_data->t_ctid;
417 
418  mapping = (OldToNewMapping)
419  hash_search(state->rs_old_new_tid_map, &hashkey,
420  HASH_FIND, NULL);
421 
422  if (mapping != NULL)
423  {
424  /*
425  * We've already copied the tuple that t_ctid points to, so we can
426  * set the ctid of this tuple to point to the new location, and
427  * insert it right away.
428  */
429  new_tuple->t_data->t_ctid = mapping->new_tid;
430 
431  /* We don't need the mapping entry anymore */
432  hash_search(state->rs_old_new_tid_map, &hashkey,
433  HASH_REMOVE, &found);
434  Assert(found);
435  }
436  else
437  {
438  /*
439  * We haven't seen the tuple t_ctid points to yet. Stash this
440  * tuple into unresolved_tups to be written later.
441  */
442  UnresolvedTup unresolved;
443 
444  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
445  HASH_ENTER, &found);
446  Assert(!found);
447 
448  unresolved->old_tid = old_tuple->t_self;
449  unresolved->tuple = heap_copytuple(new_tuple);
450 
451  /*
452  * We can't do anything more now, since we don't know where the
453  * tuple will be written.
454  */
455  MemoryContextSwitchTo(old_cxt);
456  return;
457  }
458  }
459 
460  /*
461  * Now we will write the tuple, and then check to see if it is the B tuple
462  * in any new or known pair. When we resolve a known pair, we will be
463  * able to write that pair's A tuple, and then we have to check if it
464  * resolves some other pair. Hence, we need a loop here.
465  */
466  old_tid = old_tuple->t_self;
467  free_new = false;
468 
469  for (;;)
470  {
471  ItemPointerData new_tid;
472 
473  /* Insert the tuple and find out where it's put in new_heap */
474  raw_heap_insert(state, new_tuple);
475  new_tid = new_tuple->t_self;
476 
477  logical_rewrite_heap_tuple(state, old_tid, new_tuple);
478 
479  /*
480  * If the tuple is the updated version of a row, and the prior version
481  * wouldn't be DEAD yet, then we need to either resolve the prior
482  * version (if it's waiting in rs_unresolved_tups), or make an entry
483  * in rs_old_new_tid_map (so we can resolve it when we do see it). The
484  * previous tuple's xmax would equal this one's xmin, so it's
485  * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
486  */
487  if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
489  state->rs_oldest_xmin))
490  {
491  /*
492  * Okay, this is B in an update pair. See if we've seen A.
493  */
494  UnresolvedTup unresolved;
495 
496  memset(&hashkey, 0, sizeof(hashkey));
497  hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
498  hashkey.tid = old_tid;
499 
500  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
501  HASH_FIND, NULL);
502 
503  if (unresolved != NULL)
504  {
505  /*
506  * We have seen and memorized the previous tuple already. Now
507  * that we know where we inserted the tuple its t_ctid points
508  * to, fix its t_ctid and insert it to the new heap.
509  */
510  if (free_new)
511  heap_freetuple(new_tuple);
512  new_tuple = unresolved->tuple;
513  free_new = true;
514  old_tid = unresolved->old_tid;
515  new_tuple->t_data->t_ctid = new_tid;
516 
517  /*
518  * We don't need the hash entry anymore, but don't free its
519  * tuple just yet.
520  */
521  hash_search(state->rs_unresolved_tups, &hashkey,
522  HASH_REMOVE, &found);
523  Assert(found);
524 
525  /* loop back to insert the previous tuple in the chain */
526  continue;
527  }
528  else
529  {
530  /*
531  * Remember the new tid of this tuple. We'll use it to set the
532  * ctid when we find the previous tuple in the chain.
533  */
534  OldToNewMapping mapping;
535 
536  mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
537  HASH_ENTER, &found);
538  Assert(!found);
539 
540  mapping->new_tid = new_tid;
541  }
542  }
543 
544  /* Done with this (chain of) tuples, for now */
545  if (free_new)
546  heap_freetuple(new_tuple);
547  break;
548  }
549 
550  MemoryContextSwitchTo(old_cxt);
551 }
552 
553 /*
554  * Register a dead tuple with an ongoing rewrite. Dead tuples are not
555  * copied to the new table, but we still make note of them so that we
556  * can release some resources earlier.
557  *
558  * Returns true if a tuple was removed from the unresolved_tups table.
559  * This indicates that that tuple, previously thought to be "recently dead",
560  * is now known really dead and won't be written to the output.
561  */
562 bool
564 {
565  /*
566  * If we have already seen an earlier tuple in the update chain that
567  * points to this tuple, let's forget about that earlier tuple. It's in
568  * fact dead as well, our simple xmax < OldestXmin test in
569  * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
570  * when xmin of a tuple is greater than xmax, which sounds
571  * counter-intuitive but is perfectly valid.
572  *
573  * We don't bother to try to detect the situation the other way round,
574  * when we encounter the dead tuple first and then the recently dead one
575  * that points to it. If that happens, we'll have some unmatched entries
576  * in the UnresolvedTups hash table at the end. That can happen anyway,
577  * because a vacuum might have removed the dead tuple in the chain before
578  * us.
579  */
580  UnresolvedTup unresolved;
581  TidHashKey hashkey;
582  bool found;
583 
584  memset(&hashkey, 0, sizeof(hashkey));
585  hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
586  hashkey.tid = old_tuple->t_self;
587 
588  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
589  HASH_FIND, NULL);
590 
591  if (unresolved != NULL)
592  {
593  /* Need to free the contained tuple as well as the hashtable entry */
594  heap_freetuple(unresolved->tuple);
595  hash_search(state->rs_unresolved_tups, &hashkey,
596  HASH_REMOVE, &found);
597  Assert(found);
598  return true;
599  }
600 
601  return false;
602 }
603 
604 /*
605  * Insert a tuple to the new relation. This has to track heap_insert
606  * and its subsidiary functions!
607  *
608  * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
609  * tuple is invalid on entry, it's replaced with the new TID as well (in
610  * the inserted data only, not in the caller's copy).
611  */
612 static void
614 {
615  Page page = state->rs_buffer;
616  Size pageFreeSpace,
617  saveFreeSpace;
618  Size len;
619  OffsetNumber newoff;
620  HeapTuple heaptup;
621 
622  /*
623  * If the new tuple is too big for storage or contains already toasted
624  * out-of-line attributes from some other relation, invoke the toaster.
625  *
626  * Note: below this point, heaptup is the data we actually intend to store
627  * into the relation; tup is the caller's original untoasted data.
628  */
629  if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
630  {
631  /* toast table entries should never be recursively toasted */
633  heaptup = tup;
634  }
635  else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
636  {
638 
639  /*
640  * While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
641  * for the TOAST table are not logically decoded. The main heap is
642  * WAL-logged as XLOG FPI records, which are not logically decoded.
643  */
644  options |= HEAP_INSERT_NO_LOGICAL;
645 
646  heaptup = heap_toast_insert_or_update(state->rs_new_rel, tup, NULL,
647  options);
648  }
649  else
650  heaptup = tup;
651 
652  len = MAXALIGN(heaptup->t_len); /* be conservative */
653 
654  /*
655  * If we're gonna fail for oversize tuple, do it right away
656  */
657  if (len > MaxHeapTupleSize)
658  ereport(ERROR,
659  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
660  errmsg("row is too big: size %zu, maximum size %zu",
661  len, MaxHeapTupleSize)));
662 
663  /* Compute desired extra freespace due to fillfactor option */
664  saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
666 
667  /* Now we can check to see if there's enough free space already. */
668  if (state->rs_buffer_valid)
669  {
670  pageFreeSpace = PageGetHeapFreeSpace(page);
671 
672  if (len + saveFreeSpace > pageFreeSpace)
673  {
674  /*
675  * Doesn't fit, so write out the existing page. It always
676  * contains a tuple. Hence, unlike RelationGetBufferForTuple(),
677  * enforce saveFreeSpace unconditionally.
678  */
679 
680  /* XLOG stuff */
681  if (RelationNeedsWAL(state->rs_new_rel))
682  log_newpage(&state->rs_new_rel->rd_node,
683  MAIN_FORKNUM,
684  state->rs_blockno,
685  page,
686  true);
687 
688  /*
689  * Now write the page. We say skipFsync = true because there's no
690  * need for smgr to schedule an fsync for this write; we'll do it
691  * ourselves in end_heap_rewrite.
692  */
693  PageSetChecksumInplace(page, state->rs_blockno);
694 
696  state->rs_blockno, (char *) page, true);
697 
698  state->rs_blockno++;
699  state->rs_buffer_valid = false;
700  }
701  }
702 
703  if (!state->rs_buffer_valid)
704  {
705  /* Initialize a new empty page */
706  PageInit(page, BLCKSZ, 0);
707  state->rs_buffer_valid = true;
708  }
709 
710  /* And now we can insert the tuple into the page */
711  newoff = PageAddItem(page, (Item) heaptup->t_data, heaptup->t_len,
712  InvalidOffsetNumber, false, true);
713  if (newoff == InvalidOffsetNumber)
714  elog(ERROR, "failed to add tuple");
715 
716  /* Update caller's t_self to the actual position where it was stored */
717  ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
718 
719  /*
720  * Insert the correct position into CTID of the stored tuple, too, if the
721  * caller didn't supply a valid CTID.
722  */
723  if (!ItemPointerIsValid(&tup->t_data->t_ctid))
724  {
725  ItemId newitemid;
726  HeapTupleHeader onpage_tup;
727 
728  newitemid = PageGetItemId(page, newoff);
729  onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
730 
731  onpage_tup->t_ctid = tup->t_self;
732  }
733 
734  /* If heaptup is a private copy, release it. */
735  if (heaptup != tup)
736  heap_freetuple(heaptup);
737 }
738 
739 /* ------------------------------------------------------------------------
740  * Logical rewrite support
741  *
742  * When doing logical decoding - which relies on using cmin/cmax of catalog
743  * tuples, via xl_heap_new_cid records - heap rewrites have to log enough
744  * information to allow the decoding backend to updates its internal mapping
745  * of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
746  *
747  * For that, every time we find a tuple that's been modified in a catalog
748  * relation within the xmin horizon of any decoding slot, we log a mapping
749  * from the old to the new location.
750  *
751  * To deal with rewrites that abort the filename of a mapping file contains
752  * the xid of the transaction performing the rewrite, which then can be
753  * checked before being read in.
754  *
755  * For efficiency we don't immediately spill every single map mapping for a
756  * row to disk but only do so in batches when we've collected several of them
757  * in memory or when end_heap_rewrite() has been called.
758  *
759  * Crash-Safety: This module diverts from the usual patterns of doing WAL
760  * since it cannot rely on checkpoint flushing out all buffers and thus
761  * waiting for exclusive locks on buffers. Usually the XLogInsert() covering
762  * buffer modifications is performed while the buffer(s) that are being
763  * modified are exclusively locked guaranteeing that both the WAL record and
764  * the modified heap are on either side of the checkpoint. But since the
765  * mapping files we log aren't in shared_buffers that interlock doesn't work.
766  *
767  * Instead we simply write the mapping files out to disk, *before* the
768  * XLogInsert() is performed. That guarantees that either the XLogInsert() is
769  * inserted after the checkpoint's redo pointer or that the checkpoint (via
770  * CheckPointLogicalRewriteHeap()) has flushed the (partial) mapping file to
771  * disk. That leaves the tail end that has not yet been flushed open to
772  * corruption, which is solved by including the current offset in the
773  * xl_heap_rewrite_mapping records and truncating the mapping file to it
774  * during replay. Every time a rewrite is finished all generated mapping files
775  * are synced to disk.
776  *
777  * Note that if we were only concerned about crash safety we wouldn't have to
778  * deal with WAL logging at all - an fsync() at the end of a rewrite would be
779  * sufficient for crash safety. Any mapping that hasn't been safely flushed to
780  * disk has to be by an aborted (explicitly or via a crash) transaction and is
781  * ignored by virtue of the xid in its name being subject to a
782  * TransactionDidCommit() check. But we want to support having standbys via
783  * physical replication, both for availability and to do logical decoding
784  * there.
785  * ------------------------------------------------------------------------
786  */
787 
788 /*
789  * Do preparations for logging logical mappings during a rewrite if
790  * necessary. If we detect that we don't need to log anything we'll prevent
791  * any further action by the various logical rewrite functions.
792  */
793 static void
795 {
796  HASHCTL hash_ctl;
797  TransactionId logical_xmin;
798 
799  /*
800  * We only need to persist these mappings if the rewritten table can be
801  * accessed during logical decoding, if not, we can skip doing any
802  * additional work.
803  */
804  state->rs_logical_rewrite =
806 
807  if (!state->rs_logical_rewrite)
808  return;
809 
810  ProcArrayGetReplicationSlotXmin(NULL, &logical_xmin);
811 
812  /*
813  * If there are no logical slots in progress we don't need to do anything,
814  * there cannot be any remappings for relevant rows yet. The relation's
815  * lock protects us against races.
816  */
817  if (logical_xmin == InvalidTransactionId)
818  {
819  state->rs_logical_rewrite = false;
820  return;
821  }
822 
823  state->rs_logical_xmin = logical_xmin;
825  state->rs_num_rewrite_mappings = 0;
826 
827  hash_ctl.keysize = sizeof(TransactionId);
828  hash_ctl.entrysize = sizeof(RewriteMappingFile);
829  hash_ctl.hcxt = state->rs_cxt;
830 
831  state->rs_logical_mappings =
832  hash_create("Logical rewrite mapping",
833  128, /* arbitrary initial size */
834  &hash_ctl,
836 }
837 
838 /*
839  * Flush all logical in-memory mappings to disk, but don't fsync them yet.
840  */
841 static void
843 {
844  HASH_SEQ_STATUS seq_status;
845  RewriteMappingFile *src;
846  dlist_mutable_iter iter;
847 
848  Assert(state->rs_logical_rewrite);
849 
850  /* no logical rewrite in progress, no need to iterate over mappings */
851  if (state->rs_num_rewrite_mappings == 0)
852  return;
853 
854  elog(DEBUG1, "flushing %u logical rewrite mapping entries",
855  state->rs_num_rewrite_mappings);
856 
857  hash_seq_init(&seq_status, state->rs_logical_mappings);
858  while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
859  {
860  char *waldata;
861  char *waldata_start;
863  Oid dboid;
864  uint32 len;
865  int written;
866 
867  /* this file hasn't got any new mappings */
868  if (src->num_mappings == 0)
869  continue;
870 
871  if (state->rs_old_rel->rd_rel->relisshared)
872  dboid = InvalidOid;
873  else
874  dboid = MyDatabaseId;
875 
876  xlrec.num_mappings = src->num_mappings;
877  xlrec.mapped_rel = RelationGetRelid(state->rs_old_rel);
878  xlrec.mapped_xid = src->xid;
879  xlrec.mapped_db = dboid;
880  xlrec.offset = src->off;
881  xlrec.start_lsn = state->rs_begin_lsn;
882 
883  /* write all mappings consecutively */
884  len = src->num_mappings * sizeof(LogicalRewriteMappingData);
885  waldata_start = waldata = palloc(len);
886 
887  /*
888  * collect data we need to write out, but don't modify ondisk data yet
889  */
890  dlist_foreach_modify(iter, &src->mappings)
891  {
893 
894  pmap = dlist_container(RewriteMappingDataEntry, node, iter.cur);
895 
896  memcpy(waldata, &pmap->map, sizeof(pmap->map));
897  waldata += sizeof(pmap->map);
898 
899  /* remove from the list and free */
900  dlist_delete(&pmap->node);
901  pfree(pmap);
902 
903  /* update bookkeeping */
904  state->rs_num_rewrite_mappings--;
905  src->num_mappings--;
906  }
907 
908  Assert(src->num_mappings == 0);
909  Assert(waldata == waldata_start + len);
910 
911  /*
912  * Note that we deviate from the usual WAL coding practices here,
913  * check the above "Logical rewrite support" comment for reasoning.
914  */
915  written = FileWrite(src->vfd, waldata_start, len, src->off,
917  if (written != len)
918  ereport(ERROR,
920  errmsg("could not write to file \"%s\", wrote %d of %d: %m", src->path,
921  written, len)));
922  src->off += len;
923 
924  XLogBeginInsert();
925  XLogRegisterData((char *) (&xlrec), sizeof(xlrec));
926  XLogRegisterData(waldata_start, len);
927 
928  /* write xlog record */
929  XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_REWRITE);
930 
931  pfree(waldata_start);
932  }
933  Assert(state->rs_num_rewrite_mappings == 0);
934 }
935 
936 /*
937  * Logical remapping part of end_heap_rewrite().
938  */
939 static void
941 {
942  HASH_SEQ_STATUS seq_status;
943  RewriteMappingFile *src;
944 
945  /* done, no logical rewrite in progress */
946  if (!state->rs_logical_rewrite)
947  return;
948 
949  /* writeout remaining in-memory entries */
950  if (state->rs_num_rewrite_mappings > 0)
952 
953  /* Iterate over all mappings we have written and fsync the files. */
954  hash_seq_init(&seq_status, state->rs_logical_mappings);
955  while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
956  {
960  errmsg("could not fsync file \"%s\": %m", src->path)));
961  FileClose(src->vfd);
962  }
963  /* memory context cleanup will deal with the rest */
964 }
965 
966 /*
967  * Log a single (old->new) mapping for 'xid'.
968  */
969 static void
972 {
973  RewriteMappingFile *src;
975  Oid relid;
976  bool found;
977 
978  relid = RelationGetRelid(state->rs_old_rel);
979 
980  /* look for existing mappings for this 'mapped' xid */
981  src = hash_search(state->rs_logical_mappings, &xid,
982  HASH_ENTER, &found);
983 
984  /*
985  * We haven't yet had the need to map anything for this xid, create
986  * per-xid data structures.
987  */
988  if (!found)
989  {
990  char path[MAXPGPATH];
991  Oid dboid;
992 
993  if (state->rs_old_rel->rd_rel->relisshared)
994  dboid = InvalidOid;
995  else
996  dboid = MyDatabaseId;
997 
998  snprintf(path, MAXPGPATH,
999  "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1000  dboid, relid,
1001  LSN_FORMAT_ARGS(state->rs_begin_lsn),
1002  xid, GetCurrentTransactionId());
1003 
1004  dlist_init(&src->mappings);
1005  src->num_mappings = 0;
1006  src->off = 0;
1007  memcpy(src->path, path, sizeof(path));
1008  src->vfd = PathNameOpenFile(path,
1009  O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
1010  if (src->vfd < 0)
1011  ereport(ERROR,
1013  errmsg("could not create file \"%s\": %m", path)));
1014  }
1015 
1016  pmap = MemoryContextAlloc(state->rs_cxt,
1017  sizeof(RewriteMappingDataEntry));
1018  memcpy(&pmap->map, map, sizeof(LogicalRewriteMappingData));
1019  dlist_push_tail(&src->mappings, &pmap->node);
1020  src->num_mappings++;
1021  state->rs_num_rewrite_mappings++;
1022 
1023  /*
1024  * Write out buffer every time we've too many in-memory entries across all
1025  * mapping files.
1026  */
1027  if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ )
1029 }
1030 
1031 /*
1032  * Perform logical remapping for a tuple that's mapped from old_tid to
1033  * new_tuple->t_self by rewrite_heap_tuple() if necessary for the tuple.
1034  */
1035 static void
1037  HeapTuple new_tuple)
1038 {
1039  ItemPointerData new_tid = new_tuple->t_self;
1040  TransactionId cutoff = state->rs_logical_xmin;
1041  TransactionId xmin;
1042  TransactionId xmax;
1043  bool do_log_xmin = false;
1044  bool do_log_xmax = false;
1046 
1047  /* no logical rewrite in progress, we don't need to log anything */
1048  if (!state->rs_logical_rewrite)
1049  return;
1050 
1051  xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
1052  /* use *GetUpdateXid to correctly deal with multixacts */
1053  xmax = HeapTupleHeaderGetUpdateXid(new_tuple->t_data);
1054 
1055  /*
1056  * Log the mapping iff the tuple has been created recently.
1057  */
1058  if (TransactionIdIsNormal(xmin) && !TransactionIdPrecedes(xmin, cutoff))
1059  do_log_xmin = true;
1060 
1061  if (!TransactionIdIsNormal(xmax))
1062  {
1063  /*
1064  * no xmax is set, can't have any permanent ones, so this check is
1065  * sufficient
1066  */
1067  }
1068  else if (HEAP_XMAX_IS_LOCKED_ONLY(new_tuple->t_data->t_infomask))
1069  {
1070  /* only locked, we don't care */
1071  }
1072  else if (!TransactionIdPrecedes(xmax, cutoff))
1073  {
1074  /* tuple has been deleted recently, log */
1075  do_log_xmax = true;
1076  }
1077 
1078  /* if neither needs to be logged, we're done */
1079  if (!do_log_xmin && !do_log_xmax)
1080  return;
1081 
1082  /* fill out mapping information */
1083  map.old_node = state->rs_old_rel->rd_node;
1084  map.old_tid = old_tid;
1085  map.new_node = state->rs_new_rel->rd_node;
1086  map.new_tid = new_tid;
1087 
1088  /* ---
1089  * Now persist the mapping for the individual xids that are affected. We
1090  * need to log for both xmin and xmax if they aren't the same transaction
1091  * since the mapping files are per "affected" xid.
1092  * We don't muster all that much effort detecting whether xmin and xmax
1093  * are actually the same transaction, we just check whether the xid is the
1094  * same disregarding subtransactions. Logging too much is relatively
1095  * harmless and we could never do the check fully since subtransaction
1096  * data is thrown away during restarts.
1097  * ---
1098  */
1099  if (do_log_xmin)
1100  logical_rewrite_log_mapping(state, xmin, &map);
1101  /* separately log mapping for xmax unless it'd be redundant */
1102  if (do_log_xmax && !TransactionIdEquals(xmin, xmax))
1103  logical_rewrite_log_mapping(state, xmax, &map);
1104 }
1105 
1106 /*
1107  * Replay XLOG_HEAP2_REWRITE records
1108  */
1109 void
1111 {
1112  char path[MAXPGPATH];
1113  int fd;
1114  xl_heap_rewrite_mapping *xlrec;
1115  uint32 len;
1116  char *data;
1117 
1118  xlrec = (xl_heap_rewrite_mapping *) XLogRecGetData(r);
1119 
1120  snprintf(path, MAXPGPATH,
1121  "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1122  xlrec->mapped_db, xlrec->mapped_rel,
1123  LSN_FORMAT_ARGS(xlrec->start_lsn),
1124  xlrec->mapped_xid, XLogRecGetXid(r));
1125 
1126  fd = OpenTransientFile(path,
1127  O_CREAT | O_WRONLY | PG_BINARY);
1128  if (fd < 0)
1129  ereport(ERROR,
1131  errmsg("could not create file \"%s\": %m", path)));
1132 
1133  /*
1134  * Truncate all data that's not guaranteed to have been safely fsynced (by
1135  * previous record or by the last checkpoint).
1136  */
1138  if (ftruncate(fd, xlrec->offset) != 0)
1139  ereport(ERROR,
1141  errmsg("could not truncate file \"%s\" to %u: %m",
1142  path, (uint32) xlrec->offset)));
1144 
1145  data = XLogRecGetData(r) + sizeof(*xlrec);
1146 
1147  len = xlrec->num_mappings * sizeof(LogicalRewriteMappingData);
1148 
1149  /* write out tail end of mapping file (again) */
1150  errno = 0;
1152  if (pg_pwrite(fd, data, len, xlrec->offset) != len)
1153  {
1154  /* if write didn't set errno, assume problem is no disk space */
1155  if (errno == 0)
1156  errno = ENOSPC;
1157  ereport(ERROR,
1159  errmsg("could not write to file \"%s\": %m", path)));
1160  }
1162 
1163  /*
1164  * Now fsync all previously written data. We could improve things and only
1165  * do this for the last write to a file, but the required bookkeeping
1166  * doesn't seem worth the trouble.
1167  */
1169  if (pg_fsync(fd) != 0)
1172  errmsg("could not fsync file \"%s\": %m", path)));
1174 
1175  if (CloseTransientFile(fd) != 0)
1176  ereport(ERROR,
1178  errmsg("could not close file \"%s\": %m", path)));
1179 }
1180 
1181 /* ---
1182  * Perform a checkpoint for logical rewrite mappings
1183  *
1184  * This serves two tasks:
1185  * 1) Remove all mappings not needed anymore based on the logical restart LSN
1186  * 2) Flush all remaining mappings to disk, so that replay after a checkpoint
1187  * only has to deal with the parts of a mapping that have been written out
1188  * after the checkpoint started.
1189  * ---
1190  */
1191 void
1193 {
1194  XLogRecPtr cutoff;
1195  XLogRecPtr redo;
1196  DIR *mappings_dir;
1197  struct dirent *mapping_de;
1198  char path[MAXPGPATH + 20];
1199 
1200  /*
1201  * We start of with a minimum of the last redo pointer. No new decoding
1202  * slot will start before that, so that's a safe upper bound for removal.
1203  */
1204  redo = GetRedoRecPtr();
1205 
1206  /* now check for the restart ptrs from existing slots */
1208 
1209  /* don't start earlier than the restart lsn */
1210  if (cutoff != InvalidXLogRecPtr && redo < cutoff)
1211  cutoff = redo;
1212 
1213  mappings_dir = AllocateDir("pg_logical/mappings");
1214  while ((mapping_de = ReadDir(mappings_dir, "pg_logical/mappings")) != NULL)
1215  {
1216  struct stat statbuf;
1217  Oid dboid;
1218  Oid relid;
1219  XLogRecPtr lsn;
1220  TransactionId rewrite_xid;
1221  TransactionId create_xid;
1222  uint32 hi,
1223  lo;
1224 
1225  if (strcmp(mapping_de->d_name, ".") == 0 ||
1226  strcmp(mapping_de->d_name, "..") == 0)
1227  continue;
1228 
1229  snprintf(path, sizeof(path), "pg_logical/mappings/%s", mapping_de->d_name);
1230  if (lstat(path, &statbuf) == 0 && !S_ISREG(statbuf.st_mode))
1231  continue;
1232 
1233  /* Skip over files that cannot be ours. */
1234  if (strncmp(mapping_de->d_name, "map-", 4) != 0)
1235  continue;
1236 
1237  if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
1238  &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
1239  elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
1240 
1241  lsn = ((uint64) hi) << 32 | lo;
1242 
1243  if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
1244  {
1245  elog(DEBUG1, "removing logical rewrite file \"%s\"", path);
1246  if (unlink(path) < 0)
1247  ereport(ERROR,
1249  errmsg("could not remove file \"%s\": %m", path)));
1250  }
1251  else
1252  {
1253  /* on some operating systems fsyncing a file requires O_RDWR */
1254  int fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
1255 
1256  /*
1257  * The file cannot vanish due to concurrency since this function
1258  * is the only one removing logical mappings and only one
1259  * checkpoint can be in progress at a time.
1260  */
1261  if (fd < 0)
1262  ereport(ERROR,
1264  errmsg("could not open file \"%s\": %m", path)));
1265 
1266  /*
1267  * We could try to avoid fsyncing files that either haven't
1268  * changed or have only been created since the checkpoint's start,
1269  * but it's currently not deemed worth the effort.
1270  */
1272  if (pg_fsync(fd) != 0)
1275  errmsg("could not fsync file \"%s\": %m", path)));
1277 
1278  if (CloseTransientFile(fd) != 0)
1279  ereport(ERROR,
1281  errmsg("could not close file \"%s\": %m", path)));
1282  }
1283  }
1284  FreeDir(mappings_dir);
1285 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:680
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:218
#define AllocSetContextCreate
Definition: memutils.h:173
void end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:300
#define DEBUG1
Definition: elog.h:25
dlist_node * cur
Definition: ilist.h:180
TidHashKey key
Definition: rewriteheap.c:174
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1110
static void pgstat_report_wait_end(void)
Definition: wait_event.h:278
HeapTupleFields t_heap
Definition: htup_details.h:156
TransactionId rs_logical_xmin
Definition: rewriteheap.c:144
File PathNameOpenFile(const char *fileName, int fileFlags)
Definition: fd.c:1564
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:543
uint32 TransactionId
Definition: c.h:587
MemoryContext hcxt
Definition: hsearch.h:86
static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple)
Definition: rewriteheap.c:1036
static void logical_end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:940
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
TransactionId rs_freeze_xid
Definition: rewriteheap.c:142
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
MultiXactId rs_cutoff_multi
Definition: rewriteheap.c:146
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Pointer Item
Definition: item.h:17
Size entrysize
Definition: hsearch.h:76
Relation rs_new_rel
Definition: rewriteheap.c:135
int errcode(int sqlerrcode)
Definition: elog.c:698
#define HEAP2_XACT_MASK
Definition: htup_details.h:283
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:416
UnresolvedTupData * UnresolvedTup
Definition: rewriteheap.c:179
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
void CheckPointLogicalRewriteHeap(void)
Definition: rewriteheap.c:1192
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
#define HEAP_UPDATED
Definition: htup_details.h:209
ItemPointerData old_tid
Definition: rewriteheap.c:175
Form_pg_class rd_rel
Definition: rel.h:109
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
unsigned int Oid
Definition: postgres_ext.h:31
Definition: dirent.h:9
void ProcArrayGetReplicationSlotXmin(TransactionId *xmin, TransactionId *catalog_xmin)
Definition: procarray.c:3890
static int fd(const char *x, int i)
Definition: preproc-init.c:105
#define PG_BINARY
Definition: c.h:1271
ssize_t pg_pwrite(int fd, const void *buf, size_t nbyte, off_t offset)
Definition: pwrite.c:27
union HeapTupleHeaderData::@43 t_choice
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
Definition: dynahash.c:219
#define LSN_FORMAT_ARGS(lsn)
Definition: xlogdefs.h:43
#define dlist_container(type, membername, ptr)
Definition: ilist.h:496
void pfree(void *pointer)
Definition: mcxt.c:1169
uint32 rs_num_rewrite_mappings
Definition: rewriteheap.c:154
#define XLogRecGetData(decoder)
Definition: xlogreader.h:310
Definition: dirent.c:25
#define ERROR
Definition: elog.h:46
ItemPointerData new_tid
Definition: rewriteheap.c:184
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:984
int OpenTransientFile(const char *fileName, int fileFlags)
Definition: fd.c:2509
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
char path[MAXPGPATH]
Definition: rewriteheap.c:200
XLogRecPtr GetXLogInsertRecPtr(void)
Definition: xlog.c:11764
ItemPointerData t_ctid
Definition: htup_details.h:160
HTAB * rs_unresolved_tups
Definition: rewriteheap.c:151
#define MAXPGPATH
ItemPointerData t_self
Definition: htup.h:65
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:195
int FileSync(File file, uint32 wait_event_info)
Definition: fd.c:2264
TransactionId xid
Definition: rewriteheap.c:195
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
static void logical_rewrite_log_mapping(RewriteState state, TransactionId xid, LogicalRewriteMappingData *map)
Definition: rewriteheap.c:970
#define MaxHeapTupleSize
Definition: htup_details.h:559
int errcode_for_file_access(void)
Definition: elog.c:721
XLogRecPtr ReplicationSlotsComputeLogicalRestartLSN(void)
Definition: slot.c:860
#define InvalidTransactionId
Definition: transam.h:31
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
unsigned int uint32
Definition: c.h:441
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2720
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
Definition: heapam.c:6652
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
XLogRecPtr rs_begin_lsn
Definition: rewriteheap.c:150
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:361
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:262
#define S_ISREG(m)
Definition: win32_port.h:319
HTAB * rs_logical_mappings
Definition: rewriteheap.c:153
int CloseTransientFile(int fd)
Definition: fd.c:2686
TransactionId rs_oldest_xmin
Definition: rewriteheap.c:140
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define TOAST_TUPLE_THRESHOLD
Definition: heaptoast.h:48
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:432
#define HASH_BLOBS
Definition: hsearch.h:97
#define XLogRecGetXid(decoder)
Definition: xlogreader.h:307
int FileWrite(File file, char *buffer, int amount, off_t offset, uint32 wait_event_info)
Definition: fd.c:2166
MemoryContext rs_cxt
Definition: rewriteheap.c:148
void * palloc0(Size size)
Definition: mcxt.c:1093
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:657
int data_sync_elevel(int elevel)
Definition: fd.c:3805
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
unsigned short st_mode
Definition: win32_port.h:260
Oid MyDatabaseId
Definition: globals.c:88
Size keysize
Definition: hsearch.h:75
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:212
ItemPointerData tid
Definition: rewriteheap.c:166
#define InvalidOffsetNumber
Definition: off.h:26
#define InvalidOid
Definition: postgres_ext.h:36
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
struct RewriteStateData RewriteStateData
#define ereport(elevel,...)
Definition: elog.h:157
TransactionId mapped_xid
Definition: heapam_xlog.h:384
TransactionId MultiXactId
Definition: c.h:597
RelFileNode rd_node
Definition: rel.h:56
void FileClose(File file)
Definition: fd.c:1959
static void logical_begin_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:794
ItemPointerData new_tid
Definition: rewriteheap.h:40
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
HTAB * rs_old_new_tid_map
Definition: rewriteheap.c:152
Definition: regguts.h:317
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2786
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:544
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
size_t Size
Definition: c.h:540
dlist_head mappings
Definition: rewriteheap.c:199
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1532
#define MAXALIGN(LEN)
Definition: c.h:757
XLogRecPtr GetRedoRecPtr(void)
Definition: xlog.c:8512
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
#define RelationNeedsWAL(relation)
Definition: rel.h:601
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
static void logical_heap_rewrite_flush_mappings(RewriteState state)
Definition: rewriteheap.c:842
struct RewriteMappingFile RewriteMappingFile
#define LOGICAL_REWRITE_FORMAT
Definition: rewriteheap.h:54
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:462
#define lstat(path, sb)
Definition: win32_port.h:276
#define HEAP_INSERT_SKIP_FSM
Definition: heapam.h:34
bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
Definition: rewriteheap.c:563
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:172
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:672
void * palloc(Size size)
Definition: mcxt.c:1062
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define HEAP_INSERT_NO_LOGICAL
Definition: heapam.h:36
TransactionId xmin
Definition: rewriteheap.c:165
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
LogicalRewriteMappingData map
Definition: rewriteheap.c:209
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:1048
#define elog(elevel,...)
Definition: elog.h:232
static void raw_heap_insert(RewriteState state, HeapTuple tup)
Definition: rewriteheap.c:613
struct LogicalRewriteMappingData LogicalRewriteMappingData
#define HEAP_XACT_MASK
Definition: htup_details.h:218
int pg_fsync(int fd)
Definition: fd.c:357
Relation rs_old_rel
Definition: rewriteheap.c:134
char d_name[MAX_PATH]
Definition: dirent.h:15
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:332
struct RewriteMappingDataEntry RewriteMappingDataEntry
HeapTuple heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, int options)
Definition: heaptoast.c:96
RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi)
Definition: rewriteheap.c:237
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
BlockNumber rs_blockno
Definition: rewriteheap.c:137
void XLogBeginInsert(void)
Definition: xloginsert.c:135
#define snprintf
Definition: port.h:216
void smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:660
void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple)
Definition: rewriteheap.c:361
#define RelationGetRelid(relation)
Definition: rel.h:477
OldToNewMappingData * OldToNewMapping
Definition: rewriteheap.c:187
int FreeDir(DIR *dir)
Definition: fd.c:2838
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42
ItemPointerData old_tid
Definition: rewriteheap.h:39
#define ftruncate(a, b)
Definition: win32_port.h:65