PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
rewriteheap.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * rewriteheap.c
4  * Support functions to rewrite tables.
5  *
6  * These functions provide a facility to completely rewrite a heap, while
7  * preserving visibility information and update chains.
8  *
9  * INTERFACE
10  *
11  * The caller is responsible for creating the new heap, all catalog
12  * changes, supplying the tuples to be written to the new heap, and
13  * rebuilding indexes. The caller must hold AccessExclusiveLock on the
14  * target table, because we assume no one else is writing into it.
15  *
16  * To use the facility:
17  *
18  * begin_heap_rewrite
19  * while (fetch next tuple)
20  * {
21  * if (tuple is dead)
22  * rewrite_heap_dead_tuple
23  * else
24  * {
25  * // do any transformations here if required
26  * rewrite_heap_tuple
27  * }
28  * }
29  * end_heap_rewrite
30  *
31  * The contents of the new relation shouldn't be relied on until after
32  * end_heap_rewrite is called.
33  *
34  *
35  * IMPLEMENTATION
36  *
37  * This would be a fairly trivial affair, except that we need to maintain
38  * the ctid chains that link versions of an updated tuple together.
39  * Since the newly stored tuples will have tids different from the original
40  * ones, if we just copied t_ctid fields to the new table the links would
41  * be wrong. When we are required to copy a (presumably recently-dead or
42  * delete-in-progress) tuple whose ctid doesn't point to itself, we have
43  * to substitute the correct ctid instead.
44  *
45  * For each ctid reference from A -> B, we might encounter either A first
46  * or B first. (Note that a tuple in the middle of a chain is both A and B
47  * of different pairs.)
48  *
49  * If we encounter A first, we'll store the tuple in the unresolved_tups
50  * hash table. When we later encounter B, we remove A from the hash table,
51  * fix the ctid to point to the new location of B, and insert both A and B
52  * to the new heap.
53  *
54  * If we encounter B first, we can insert B to the new heap right away.
55  * We then add an entry to the old_new_tid_map hash table showing B's
56  * original tid (in the old heap) and new tid (in the new heap).
57  * When we later encounter A, we get the new location of B from the table,
58  * and can write A immediately with the correct ctid.
59  *
60  * Entries in the hash tables can be removed as soon as the later tuple
61  * is encountered. That helps to keep the memory usage down. At the end,
62  * both tables are usually empty; we should have encountered both A and B
63  * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
64  * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
65  * for deadness using OldestXmin is not exact. In such a case we might
66  * encounter B first, and skip it, and find A later. Then A would be added
67  * to unresolved_tups, and stay there until end of the rewrite. Since
68  * this case is very unusual, we don't worry about the memory usage.
69  *
70  * Using in-memory hash tables means that we use some memory for each live
71  * update chain in the table, from the time we find one end of the
72  * reference until we find the other end. That shouldn't be a problem in
73  * practice, but if you do something like an UPDATE without a where-clause
74  * on a large table, and then run CLUSTER in the same transaction, you
75  * could run out of memory. It doesn't seem worthwhile to add support for
76  * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
77  * table under normal circumstances. Furthermore, in the typical scenario
78  * of CLUSTERing on an unchanging key column, we'll see all the versions
79  * of a given tuple together anyway, and so the peak memory usage is only
80  * proportional to the number of RECENTLY_DEAD versions of a single row, not
81  * in the whole table. Note that if we do fail halfway through a CLUSTER,
82  * the old table is still valid, so failure is not catastrophic.
83  *
84  * We can't use the normal heap_insert function to insert into the new
85  * heap, because heap_insert overwrites the visibility information.
86  * We use a special-purpose raw_heap_insert function instead, which
87  * is optimized for bulk inserting a lot of tuples, knowing that we have
88  * exclusive access to the heap. raw_heap_insert builds new pages in
89  * local storage. When a page is full, or at the end of the process,
90  * we insert it to WAL as a single record and then write it to disk
91  * directly through smgr. Note, however, that any data sent to the new
92  * heap's TOAST table will go through the normal bufmgr.
93  *
94  *
95  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
96  * Portions Copyright (c) 1994-5, Regents of the University of California
97  *
98  * IDENTIFICATION
99  * src/backend/access/heap/rewriteheap.c
100  *
101  *-------------------------------------------------------------------------
102  */
103 #include "postgres.h"
104 
105 #include <sys/stat.h>
106 #include <unistd.h>
107 
108 #include "miscadmin.h"
109 
110 #include "access/heapam.h"
111 #include "access/heapam_xlog.h"
112 #include "access/rewriteheap.h"
113 #include "access/transam.h"
114 #include "access/tuptoaster.h"
115 #include "access/xact.h"
116 #include "access/xloginsert.h"
117 
118 #include "catalog/catalog.h"
119 
120 #include "lib/ilist.h"
121 
122 #include "replication/logical.h"
123 #include "replication/slot.h"
124 
125 #include "storage/bufmgr.h"
126 #include "storage/fd.h"
127 #include "storage/smgr.h"
128 
129 #include "utils/memutils.h"
130 #include "utils/rel.h"
131 #include "utils/tqual.h"
132 
133 #include "storage/procarray.h"
134 
135 /*
136  * State associated with a rewrite operation. This is opaque to the user
137  * of the rewrite facility.
138  */
139 typedef struct RewriteStateData
140 {
141  Relation rs_old_rel; /* source heap */
142  Relation rs_new_rel; /* destination heap */
143  Page rs_buffer; /* page currently being built */
144  BlockNumber rs_blockno; /* block where page will go */
145  bool rs_buffer_valid; /* T if any tuples in buffer */
146  bool rs_use_wal; /* must we WAL-log inserts? */
147  bool rs_logical_rewrite; /* do we need to do logical rewriting */
148  TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
149  * determine tuple visibility */
150  TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
151  * point */
152  TransactionId rs_logical_xmin; /* Xid that will be used as cutoff
153  * point for logical rewrites */
154  MultiXactId rs_cutoff_multi;/* MultiXactId that will be used as cutoff
155  * point for multixacts */
156  MemoryContext rs_cxt; /* for hash tables and entries and tuples in
157  * them */
158  XLogRecPtr rs_begin_lsn; /* XLogInsertLsn when starting the rewrite */
159  HTAB *rs_unresolved_tups; /* unmatched A tuples */
160  HTAB *rs_old_new_tid_map; /* unmatched B tuples */
161  HTAB *rs_logical_mappings; /* logical remapping files */
162  uint32 rs_num_rewrite_mappings; /* # in memory mappings */
164 
165 /*
166  * The lookup keys for the hash tables are tuple TID and xmin (we must check
167  * both to avoid false matches from dead tuples). Beware that there is
168  * probably some padding space in this struct; it must be zeroed out for
169  * correct hashtable operation.
170  */
171 typedef struct
172 {
173  TransactionId xmin; /* tuple xmin */
174  ItemPointerData tid; /* tuple location in old heap */
175 } TidHashKey;
176 
177 /*
178  * Entry structures for the hash tables
179  */
180 typedef struct
181 {
182  TidHashKey key; /* expected xmin/old location of B tuple */
183  ItemPointerData old_tid; /* A's location in the old heap */
184  HeapTuple tuple; /* A's tuple contents */
186 
188 
189 typedef struct
190 {
191  TidHashKey key; /* actual xmin/old location of B tuple */
192  ItemPointerData new_tid; /* where we put it in the new heap */
194 
196 
197 /*
198  * In-Memory data for an xid that might need logical remapping entries
199  * to be logged.
200  */
201 typedef struct RewriteMappingFile
202 {
203  TransactionId xid; /* xid that might need to see the row */
204  int vfd; /* fd of mappings file */
205  off_t off; /* how far have we written yet */
206  uint32 num_mappings; /* number of in-memory mappings */
207  dlist_head mappings; /* list of in-memory mappings */
208  char path[MAXPGPATH]; /* path, for error messages */
210 
211 /*
212  * A single In-Memory logical rewrite mapping, hanging off
213  * RewriteMappingFile->mappings.
214  */
216 {
217  LogicalRewriteMappingData map; /* map between old and new location of
218  * the tuple */
221 
222 
223 /* prototypes for internal functions */
224 static void raw_heap_insert(RewriteState state, HeapTuple tup);
225 
226 /* internal logical remapping prototypes */
230 
231 
232 /*
233  * Begin a rewrite of a table
234  *
235  * old_heap old, locked heap relation tuples will be read from
236  * new_heap new, locked heap relation to insert tuples to
237  * oldest_xmin xid used by the caller to determine which tuples are dead
238  * freeze_xid xid before which tuples will be frozen
239  * min_multi multixact before which multis will be removed
240  * use_wal should the inserts to the new heap be WAL-logged?
241  *
242  * Returns an opaque RewriteState, allocated in current memory context,
243  * to be used in subsequent calls to the other functions.
244  */
246 begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
247  TransactionId freeze_xid, MultiXactId cutoff_multi,
248  bool use_wal)
249 {
251  MemoryContext rw_cxt;
252  MemoryContext old_cxt;
253  HASHCTL hash_ctl;
254 
255  /*
256  * To ease cleanup, make a separate context that will contain the
257  * RewriteState struct itself plus all subsidiary data.
258  */
260  "Table rewrite",
262  old_cxt = MemoryContextSwitchTo(rw_cxt);
263 
264  /* Create and fill in the state struct */
265  state = palloc0(sizeof(RewriteStateData));
266 
267  state->rs_old_rel = old_heap;
268  state->rs_new_rel = new_heap;
269  state->rs_buffer = (Page) palloc(BLCKSZ);
270  /* new_heap needn't be empty, just locked */
271  state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
272  state->rs_buffer_valid = false;
273  state->rs_use_wal = use_wal;
274  state->rs_oldest_xmin = oldest_xmin;
275  state->rs_freeze_xid = freeze_xid;
276  state->rs_cutoff_multi = cutoff_multi;
277  state->rs_cxt = rw_cxt;
278 
279  /* Initialize hash tables used to track update chains */
280  memset(&hash_ctl, 0, sizeof(hash_ctl));
281  hash_ctl.keysize = sizeof(TidHashKey);
282  hash_ctl.entrysize = sizeof(UnresolvedTupData);
283  hash_ctl.hcxt = state->rs_cxt;
284 
285  state->rs_unresolved_tups =
286  hash_create("Rewrite / Unresolved ctids",
287  128, /* arbitrary initial size */
288  &hash_ctl,
290 
291  hash_ctl.entrysize = sizeof(OldToNewMappingData);
292 
293  state->rs_old_new_tid_map =
294  hash_create("Rewrite / Old to new tid map",
295  128, /* arbitrary initial size */
296  &hash_ctl,
298 
299  MemoryContextSwitchTo(old_cxt);
300 
302 
303  return state;
304 }
305 
306 /*
307  * End a rewrite.
308  *
309  * state and any other resources are freed.
310  */
311 void
313 {
314  HASH_SEQ_STATUS seq_status;
315  UnresolvedTup unresolved;
316 
317  /*
318  * Write any remaining tuples in the UnresolvedTups table. If we have any
319  * left, they should in fact be dead, but let's err on the safe side.
320  */
321  hash_seq_init(&seq_status, state->rs_unresolved_tups);
322 
323  while ((unresolved = hash_seq_search(&seq_status)) != NULL)
324  {
325  ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
326  raw_heap_insert(state, unresolved->tuple);
327  }
328 
329  /* Write the last page, if any */
330  if (state->rs_buffer_valid)
331  {
332  if (state->rs_use_wal)
333  log_newpage(&state->rs_new_rel->rd_node,
334  MAIN_FORKNUM,
335  state->rs_blockno,
336  state->rs_buffer,
337  true);
339 
341 
343  (char *) state->rs_buffer, true);
344  }
345 
346  /*
347  * If the rel is WAL-logged, must fsync before commit. We use heap_sync
348  * to ensure that the toast table gets fsync'd too.
349  *
350  * It's obvious that we must do this when not WAL-logging. It's less
351  * obvious that we have to do it even if we did WAL-log the pages. The
352  * reason is the same as in tablecmds.c's copy_relation_data(): we're
353  * writing data that's not in shared buffers, and so a CHECKPOINT
354  * occurring during the rewriteheap operation won't have fsync'd data we
355  * wrote before the checkpoint.
356  */
357  if (RelationNeedsWAL(state->rs_new_rel))
358  heap_sync(state->rs_new_rel);
359 
361 
362  /* Deleting the context frees everything */
363  MemoryContextDelete(state->rs_cxt);
364 }
365 
366 /*
367  * Add a tuple to the new heap.
368  *
369  * Visibility information is copied from the original tuple, except that
370  * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
371  * it had better be temp storage not a pointer to the original tuple.
372  *
373  * state opaque state as returned by begin_heap_rewrite
374  * old_tuple original tuple in the old heap
375  * new_tuple new, rewritten tuple to be inserted to new heap
376  */
377 void
379  HeapTuple old_tuple, HeapTuple new_tuple)
380 {
381  MemoryContext old_cxt;
382  ItemPointerData old_tid;
383  TidHashKey hashkey;
384  bool found;
385  bool free_new;
386 
387  old_cxt = MemoryContextSwitchTo(state->rs_cxt);
388 
389  /*
390  * Copy the original tuple's visibility information into new_tuple.
391  *
392  * XXX we might later need to copy some t_infomask2 bits, too? Right now,
393  * we intentionally clear the HOT status bits.
394  */
395  memcpy(&new_tuple->t_data->t_choice.t_heap,
396  &old_tuple->t_data->t_choice.t_heap,
397  sizeof(HeapTupleFields));
398 
399  new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
400  new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
401  new_tuple->t_data->t_infomask |=
402  old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
403 
404  /*
405  * While we have our hands on the tuple, we may as well freeze any
406  * eligible xmin or xmax, so that future VACUUM effort can be saved.
407  */
408  heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid,
409  state->rs_cutoff_multi);
410 
411  /*
412  * Invalid ctid means that ctid should point to the tuple itself. We'll
413  * override it later if the tuple is part of an update chain.
414  */
415  ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
416 
417  /*
418  * If the tuple has been updated, check the old-to-new mapping hash table.
419  */
420  if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
421  HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
422  !(ItemPointerEquals(&(old_tuple->t_self),
423  &(old_tuple->t_data->t_ctid))))
424  {
425  OldToNewMapping mapping;
426 
427  memset(&hashkey, 0, sizeof(hashkey));
428  hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
429  hashkey.tid = old_tuple->t_data->t_ctid;
430 
431  mapping = (OldToNewMapping)
432  hash_search(state->rs_old_new_tid_map, &hashkey,
433  HASH_FIND, NULL);
434 
435  if (mapping != NULL)
436  {
437  /*
438  * We've already copied the tuple that t_ctid points to, so we can
439  * set the ctid of this tuple to point to the new location, and
440  * insert it right away.
441  */
442  new_tuple->t_data->t_ctid = mapping->new_tid;
443 
444  /* We don't need the mapping entry anymore */
445  hash_search(state->rs_old_new_tid_map, &hashkey,
446  HASH_REMOVE, &found);
447  Assert(found);
448  }
449  else
450  {
451  /*
452  * We haven't seen the tuple t_ctid points to yet. Stash this
453  * tuple into unresolved_tups to be written later.
454  */
455  UnresolvedTup unresolved;
456 
457  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
458  HASH_ENTER, &found);
459  Assert(!found);
460 
461  unresolved->old_tid = old_tuple->t_self;
462  unresolved->tuple = heap_copytuple(new_tuple);
463 
464  /*
465  * We can't do anything more now, since we don't know where the
466  * tuple will be written.
467  */
468  MemoryContextSwitchTo(old_cxt);
469  return;
470  }
471  }
472 
473  /*
474  * Now we will write the tuple, and then check to see if it is the B tuple
475  * in any new or known pair. When we resolve a known pair, we will be
476  * able to write that pair's A tuple, and then we have to check if it
477  * resolves some other pair. Hence, we need a loop here.
478  */
479  old_tid = old_tuple->t_self;
480  free_new = false;
481 
482  for (;;)
483  {
484  ItemPointerData new_tid;
485 
486  /* Insert the tuple and find out where it's put in new_heap */
487  raw_heap_insert(state, new_tuple);
488  new_tid = new_tuple->t_self;
489 
490  logical_rewrite_heap_tuple(state, old_tid, new_tuple);
491 
492  /*
493  * If the tuple is the updated version of a row, and the prior version
494  * wouldn't be DEAD yet, then we need to either resolve the prior
495  * version (if it's waiting in rs_unresolved_tups), or make an entry
496  * in rs_old_new_tid_map (so we can resolve it when we do see it). The
497  * previous tuple's xmax would equal this one's xmin, so it's
498  * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
499  */
500  if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
502  state->rs_oldest_xmin))
503  {
504  /*
505  * Okay, this is B in an update pair. See if we've seen A.
506  */
507  UnresolvedTup unresolved;
508 
509  memset(&hashkey, 0, sizeof(hashkey));
510  hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
511  hashkey.tid = old_tid;
512 
513  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
514  HASH_FIND, NULL);
515 
516  if (unresolved != NULL)
517  {
518  /*
519  * We have seen and memorized the previous tuple already. Now
520  * that we know where we inserted the tuple its t_ctid points
521  * to, fix its t_ctid and insert it to the new heap.
522  */
523  if (free_new)
524  heap_freetuple(new_tuple);
525  new_tuple = unresolved->tuple;
526  free_new = true;
527  old_tid = unresolved->old_tid;
528  new_tuple->t_data->t_ctid = new_tid;
529 
530  /*
531  * We don't need the hash entry anymore, but don't free its
532  * tuple just yet.
533  */
534  hash_search(state->rs_unresolved_tups, &hashkey,
535  HASH_REMOVE, &found);
536  Assert(found);
537 
538  /* loop back to insert the previous tuple in the chain */
539  continue;
540  }
541  else
542  {
543  /*
544  * Remember the new tid of this tuple. We'll use it to set the
545  * ctid when we find the previous tuple in the chain.
546  */
547  OldToNewMapping mapping;
548 
549  mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
550  HASH_ENTER, &found);
551  Assert(!found);
552 
553  mapping->new_tid = new_tid;
554  }
555  }
556 
557  /* Done with this (chain of) tuples, for now */
558  if (free_new)
559  heap_freetuple(new_tuple);
560  break;
561  }
562 
563  MemoryContextSwitchTo(old_cxt);
564 }
565 
566 /*
567  * Register a dead tuple with an ongoing rewrite. Dead tuples are not
568  * copied to the new table, but we still make note of them so that we
569  * can release some resources earlier.
570  *
571  * Returns true if a tuple was removed from the unresolved_tups table.
572  * This indicates that that tuple, previously thought to be "recently dead",
573  * is now known really dead and won't be written to the output.
574  */
575 bool
577 {
578  /*
579  * If we have already seen an earlier tuple in the update chain that
580  * points to this tuple, let's forget about that earlier tuple. It's in
581  * fact dead as well, our simple xmax < OldestXmin test in
582  * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
583  * when xmin of a tuple is greater than xmax, which sounds
584  * counter-intuitive but is perfectly valid.
585  *
586  * We don't bother to try to detect the situation the other way round,
587  * when we encounter the dead tuple first and then the recently dead one
588  * that points to it. If that happens, we'll have some unmatched entries
589  * in the UnresolvedTups hash table at the end. That can happen anyway,
590  * because a vacuum might have removed the dead tuple in the chain before
591  * us.
592  */
593  UnresolvedTup unresolved;
594  TidHashKey hashkey;
595  bool found;
596 
597  memset(&hashkey, 0, sizeof(hashkey));
598  hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
599  hashkey.tid = old_tuple->t_self;
600 
601  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
602  HASH_FIND, NULL);
603 
604  if (unresolved != NULL)
605  {
606  /* Need to free the contained tuple as well as the hashtable entry */
607  heap_freetuple(unresolved->tuple);
608  hash_search(state->rs_unresolved_tups, &hashkey,
609  HASH_REMOVE, &found);
610  Assert(found);
611  return true;
612  }
613 
614  return false;
615 }
616 
617 /*
618  * Insert a tuple to the new relation. This has to track heap_insert
619  * and its subsidiary functions!
620  *
621  * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
622  * tuple is invalid on entry, it's replaced with the new TID as well (in
623  * the inserted data only, not in the caller's copy).
624  */
625 static void
627 {
628  Page page = state->rs_buffer;
629  Size pageFreeSpace,
630  saveFreeSpace;
631  Size len;
632  OffsetNumber newoff;
633  HeapTuple heaptup;
634 
635  /*
636  * If the new tuple is too big for storage or contains already toasted
637  * out-of-line attributes from some other relation, invoke the toaster.
638  *
639  * Note: below this point, heaptup is the data we actually intend to store
640  * into the relation; tup is the caller's original untoasted data.
641  */
642  if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
643  {
644  /* toast table entries should never be recursively toasted */
646  heaptup = tup;
647  }
648  else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
649  heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL,
651  (state->rs_use_wal ?
652  0 : HEAP_INSERT_SKIP_WAL));
653  else
654  heaptup = tup;
655 
656  len = MAXALIGN(heaptup->t_len); /* be conservative */
657 
658  /*
659  * If we're gonna fail for oversize tuple, do it right away
660  */
661  if (len > MaxHeapTupleSize)
662  ereport(ERROR,
663  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
664  errmsg("row is too big: size %zu, maximum size %zu",
665  len, MaxHeapTupleSize)));
666 
667  /* Compute desired extra freespace due to fillfactor option */
668  saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
670 
671  /* Now we can check to see if there's enough free space already. */
672  if (state->rs_buffer_valid)
673  {
674  pageFreeSpace = PageGetHeapFreeSpace(page);
675 
676  if (len + saveFreeSpace > pageFreeSpace)
677  {
678  /* Doesn't fit, so write out the existing page */
679 
680  /* XLOG stuff */
681  if (state->rs_use_wal)
682  log_newpage(&state->rs_new_rel->rd_node,
683  MAIN_FORKNUM,
684  state->rs_blockno,
685  page,
686  true);
687 
688  /*
689  * Now write the page. We say isTemp = true even if it's not a
690  * temp table, because there's no need for smgr to schedule an
691  * fsync for this write; we'll do it ourselves in
692  * end_heap_rewrite.
693  */
695 
696  PageSetChecksumInplace(page, state->rs_blockno);
697 
699  state->rs_blockno, (char *) page, true);
700 
701  state->rs_blockno++;
702  state->rs_buffer_valid = false;
703  }
704  }
705 
706  if (!state->rs_buffer_valid)
707  {
708  /* Initialize a new empty page */
709  PageInit(page, BLCKSZ, 0);
710  state->rs_buffer_valid = true;
711  }
712 
713  /* And now we can insert the tuple into the page */
714  newoff = PageAddItem(page, (Item) heaptup->t_data, heaptup->t_len,
715  InvalidOffsetNumber, false, true);
716  if (newoff == InvalidOffsetNumber)
717  elog(ERROR, "failed to add tuple");
718 
719  /* Update caller's t_self to the actual position where it was stored */
720  ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
721 
722  /*
723  * Insert the correct position into CTID of the stored tuple, too, if the
724  * caller didn't supply a valid CTID.
725  */
726  if (!ItemPointerIsValid(&tup->t_data->t_ctid))
727  {
728  ItemId newitemid;
729  HeapTupleHeader onpage_tup;
730 
731  newitemid = PageGetItemId(page, newoff);
732  onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
733 
734  onpage_tup->t_ctid = tup->t_self;
735  }
736 
737  /* If heaptup is a private copy, release it. */
738  if (heaptup != tup)
739  heap_freetuple(heaptup);
740 }
741 
742 /* ------------------------------------------------------------------------
743  * Logical rewrite support
744  *
745  * When doing logical decoding - which relies on using cmin/cmax of catalog
746  * tuples, via xl_heap_new_cid records - heap rewrites have to log enough
747  * information to allow the decoding backend to updates its internal mapping
748  * of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
749  *
750  * For that, every time we find a tuple that's been modified in a catalog
751  * relation within the xmin horizon of any decoding slot, we log a mapping
752  * from the old to the new location.
753  *
754  * To deal with rewrites that abort the filename of a mapping file contains
755  * the xid of the transaction performing the rewrite, which then can be
756  * checked before being read in.
757  *
758  * For efficiency we don't immediately spill every single map mapping for a
759  * row to disk but only do so in batches when we've collected several of them
760  * in memory or when end_heap_rewrite() has been called.
761  *
762  * Crash-Safety: This module diverts from the usual patterns of doing WAL
763  * since it cannot rely on checkpoint flushing out all buffers and thus
764  * waiting for exclusive locks on buffers. Usually the XLogInsert() covering
765  * buffer modifications is performed while the buffer(s) that are being
766  * modified are exclusively locked guaranteeing that both the WAL record and
767  * the modified heap are on either side of the checkpoint. But since the
768  * mapping files we log aren't in shared_buffers that interlock doesn't work.
769  *
770  * Instead we simply write the mapping files out to disk, *before* the
771  * XLogInsert() is performed. That guarantees that either the XLogInsert() is
772  * inserted after the checkpoint's redo pointer or that the checkpoint (via
773  * LogicalRewriteHeapCheckpoint()) has flushed the (partial) mapping file to
774  * disk. That leaves the tail end that has not yet been flushed open to
775  * corruption, which is solved by including the current offset in the
776  * xl_heap_rewrite_mapping records and truncating the mapping file to it
777  * during replay. Every time a rewrite is finished all generated mapping files
778  * are synced to disk.
779  *
780  * Note that if we were only concerned about crash safety we wouldn't have to
781  * deal with WAL logging at all - an fsync() at the end of a rewrite would be
782  * sufficient for crash safety. Any mapping that hasn't been safely flushed to
783  * disk has to be by an aborted (explicitly or via a crash) transaction and is
784  * ignored by virtue of the xid in its name being subject to a
785  * TransactionDidCommit() check. But we want to support having standbys via
786  * physical replication, both for availability and to do logical decoding
787  * there.
788  * ------------------------------------------------------------------------
789  */
790 
791 /*
792  * Do preparations for logging logical mappings during a rewrite if
793  * necessary. If we detect that we don't need to log anything we'll prevent
794  * any further action by the various logical rewrite functions.
795  */
796 static void
798 {
799  HASHCTL hash_ctl;
800  TransactionId logical_xmin;
801 
802  /*
803  * We only need to persist these mappings if the rewritten table can be
804  * accessed during logical decoding, if not, we can skip doing any
805  * additional work.
806  */
807  state->rs_logical_rewrite =
809 
810  if (!state->rs_logical_rewrite)
811  return;
812 
813  ProcArrayGetReplicationSlotXmin(NULL, &logical_xmin);
814 
815  /*
816  * If there are no logical slots in progress we don't need to do anything,
817  * there cannot be any remappings for relevant rows yet. The relation's
818  * lock protects us against races.
819  */
820  if (logical_xmin == InvalidTransactionId)
821  {
822  state->rs_logical_rewrite = false;
823  return;
824  }
825 
826  state->rs_logical_xmin = logical_xmin;
828  state->rs_num_rewrite_mappings = 0;
829 
830  memset(&hash_ctl, 0, sizeof(hash_ctl));
831  hash_ctl.keysize = sizeof(TransactionId);
832  hash_ctl.entrysize = sizeof(RewriteMappingFile);
833  hash_ctl.hcxt = state->rs_cxt;
834 
835  state->rs_logical_mappings =
836  hash_create("Logical rewrite mapping",
837  128, /* arbitrary initial size */
838  &hash_ctl,
840 }
841 
842 /*
843  * Flush all logical in-memory mappings to disk, but don't fsync them yet.
844  */
845 static void
847 {
848  HASH_SEQ_STATUS seq_status;
849  RewriteMappingFile *src;
850  dlist_mutable_iter iter;
851 
852  Assert(state->rs_logical_rewrite);
853 
854  /* no logical rewrite in progress, no need to iterate over mappings */
855  if (state->rs_num_rewrite_mappings == 0)
856  return;
857 
858  elog(DEBUG1, "flushing %u logical rewrite mapping entries",
859  state->rs_num_rewrite_mappings);
860 
861  hash_seq_init(&seq_status, state->rs_logical_mappings);
862  while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
863  {
864  char *waldata;
865  char *waldata_start;
867  Oid dboid;
868  uint32 len;
869  int written;
870 
871  /* this file hasn't got any new mappings */
872  if (src->num_mappings == 0)
873  continue;
874 
875  if (state->rs_old_rel->rd_rel->relisshared)
876  dboid = InvalidOid;
877  else
878  dboid = MyDatabaseId;
879 
880  xlrec.num_mappings = src->num_mappings;
881  xlrec.mapped_rel = RelationGetRelid(state->rs_old_rel);
882  xlrec.mapped_xid = src->xid;
883  xlrec.mapped_db = dboid;
884  xlrec.offset = src->off;
885  xlrec.start_lsn = state->rs_begin_lsn;
886 
887  /* write all mappings consecutively */
888  len = src->num_mappings * sizeof(LogicalRewriteMappingData);
889  waldata_start = waldata = palloc(len);
890 
891  /*
892  * collect data we need to write out, but don't modify ondisk data yet
893  */
894  dlist_foreach_modify(iter, &src->mappings)
895  {
897 
898  pmap = dlist_container(RewriteMappingDataEntry, node, iter.cur);
899 
900  memcpy(waldata, &pmap->map, sizeof(pmap->map));
901  waldata += sizeof(pmap->map);
902 
903  /* remove from the list and free */
904  dlist_delete(&pmap->node);
905  pfree(pmap);
906 
907  /* update bookkeeping */
908  state->rs_num_rewrite_mappings--;
909  src->num_mappings--;
910  }
911 
912  Assert(src->num_mappings == 0);
913  Assert(waldata == waldata_start + len);
914 
915  /*
916  * Note that we deviate from the usual WAL coding practices here,
917  * check the above "Logical rewrite support" comment for reasoning.
918  */
919  written = FileWrite(src->vfd, waldata_start, len);
920  if (written != len)
921  ereport(ERROR,
923  errmsg("could not write to file \"%s\", wrote %d of %d: %m", src->path,
924  written, len)));
925  src->off += len;
926 
927  XLogBeginInsert();
928  XLogRegisterData((char *) (&xlrec), sizeof(xlrec));
929  XLogRegisterData(waldata_start, len);
930 
931  /* write xlog record */
932  XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_REWRITE);
933 
934  pfree(waldata_start);
935  }
936  Assert(state->rs_num_rewrite_mappings == 0);
937 }
938 
939 /*
940  * Logical remapping part of end_heap_rewrite().
941  */
942 static void
944 {
945  HASH_SEQ_STATUS seq_status;
946  RewriteMappingFile *src;
947 
948  /* done, no logical rewrite in progress */
949  if (!state->rs_logical_rewrite)
950  return;
951 
952  /* writeout remaining in-memory entries */
953  if (state->rs_num_rewrite_mappings > 0)
955 
956  /* Iterate over all mappings we have written and fsync the files. */
957  hash_seq_init(&seq_status, state->rs_logical_mappings);
958  while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
959  {
960  if (FileSync(src->vfd) != 0)
961  ereport(ERROR,
963  errmsg("could not fsync file \"%s\": %m", src->path)));
964  FileClose(src->vfd);
965  }
966  /* memory context cleanup will deal with the rest */
967 }
968 
969 /*
970  * Log a single (old->new) mapping for 'xid'.
971  */
972 static void
975 {
976  RewriteMappingFile *src;
978  Oid relid;
979  bool found;
980 
981  relid = RelationGetRelid(state->rs_old_rel);
982 
983  /* look for existing mappings for this 'mapped' xid */
984  src = hash_search(state->rs_logical_mappings, &xid,
985  HASH_ENTER, &found);
986 
987  /*
988  * We haven't yet had the need to map anything for this xid, create
989  * per-xid data structures.
990  */
991  if (!found)
992  {
993  char path[MAXPGPATH];
994  Oid dboid;
995 
996  if (state->rs_old_rel->rd_rel->relisshared)
997  dboid = InvalidOid;
998  else
999  dboid = MyDatabaseId;
1000 
1001  snprintf(path, MAXPGPATH,
1002  "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1003  dboid, relid,
1004  (uint32) (state->rs_begin_lsn >> 32),
1005  (uint32) state->rs_begin_lsn,
1006  xid, GetCurrentTransactionId());
1007 
1008  dlist_init(&src->mappings);
1009  src->num_mappings = 0;
1010  src->off = 0;
1011  memcpy(src->path, path, sizeof(path));
1012  src->vfd = PathNameOpenFile(path,
1013  O_CREAT | O_EXCL | O_WRONLY | PG_BINARY,
1014  S_IRUSR | S_IWUSR);
1015  if (src->vfd < 0)
1016  ereport(ERROR,
1018  errmsg("could not create file \"%s\": %m", path)));
1019  }
1020 
1021  pmap = MemoryContextAlloc(state->rs_cxt,
1022  sizeof(RewriteMappingDataEntry));
1023  memcpy(&pmap->map, map, sizeof(LogicalRewriteMappingData));
1024  dlist_push_tail(&src->mappings, &pmap->node);
1025  src->num_mappings++;
1026  state->rs_num_rewrite_mappings++;
1027 
1028  /*
1029  * Write out buffer every time we've too many in-memory entries across all
1030  * mapping files.
1031  */
1032  if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ )
1034 }
1035 
1036 /*
1037  * Perform logical remapping for a tuple that's mapped from old_tid to
1038  * new_tuple->t_self by rewrite_heap_tuple() if necessary for the tuple.
1039  */
1040 static void
1042  HeapTuple new_tuple)
1043 {
1044  ItemPointerData new_tid = new_tuple->t_self;
1045  TransactionId cutoff = state->rs_logical_xmin;
1046  TransactionId xmin;
1047  TransactionId xmax;
1048  bool do_log_xmin = false;
1049  bool do_log_xmax = false;
1051 
1052  /* no logical rewrite in progress, we don't need to log anything */
1053  if (!state->rs_logical_rewrite)
1054  return;
1055 
1056  xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
1057  /* use *GetUpdateXid to correctly deal with multixacts */
1058  xmax = HeapTupleHeaderGetUpdateXid(new_tuple->t_data);
1059 
1060  /*
1061  * Log the mapping iff the tuple has been created recently.
1062  */
1063  if (TransactionIdIsNormal(xmin) && !TransactionIdPrecedes(xmin, cutoff))
1064  do_log_xmin = true;
1065 
1066  if (!TransactionIdIsNormal(xmax))
1067  {
1068  /*
1069  * no xmax is set, can't have any permanent ones, so this check is
1070  * sufficient
1071  */
1072  }
1073  else if (HEAP_XMAX_IS_LOCKED_ONLY(new_tuple->t_data->t_infomask))
1074  {
1075  /* only locked, we don't care */
1076  }
1077  else if (!TransactionIdPrecedes(xmax, cutoff))
1078  {
1079  /* tuple has been deleted recently, log */
1080  do_log_xmax = true;
1081  }
1082 
1083  /* if neither needs to be logged, we're done */
1084  if (!do_log_xmin && !do_log_xmax)
1085  return;
1086 
1087  /* fill out mapping information */
1088  map.old_node = state->rs_old_rel->rd_node;
1089  map.old_tid = old_tid;
1090  map.new_node = state->rs_new_rel->rd_node;
1091  map.new_tid = new_tid;
1092 
1093  /* ---
1094  * Now persist the mapping for the individual xids that are affected. We
1095  * need to log for both xmin and xmax if they aren't the same transaction
1096  * since the mapping files are per "affected" xid.
1097  * We don't muster all that much effort detecting whether xmin and xmax
1098  * are actually the same transaction, we just check whether the xid is the
1099  * same disregarding subtransactions. Logging too much is relatively
1100  * harmless and we could never do the check fully since subtransaction
1101  * data is thrown away during restarts.
1102  * ---
1103  */
1104  if (do_log_xmin)
1105  logical_rewrite_log_mapping(state, xmin, &map);
1106  /* separately log mapping for xmax unless it'd be redundant */
1107  if (do_log_xmax && !TransactionIdEquals(xmin, xmax))
1108  logical_rewrite_log_mapping(state, xmax, &map);
1109 }
1110 
1111 /*
1112  * Replay XLOG_HEAP2_REWRITE records
1113  */
1114 void
1116 {
1117  char path[MAXPGPATH];
1118  int fd;
1119  xl_heap_rewrite_mapping *xlrec;
1120  uint32 len;
1121  char *data;
1122 
1123  xlrec = (xl_heap_rewrite_mapping *) XLogRecGetData(r);
1124 
1125  snprintf(path, MAXPGPATH,
1126  "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1127  xlrec->mapped_db, xlrec->mapped_rel,
1128  (uint32) (xlrec->start_lsn >> 32),
1129  (uint32) xlrec->start_lsn,
1130  xlrec->mapped_xid, XLogRecGetXid(r));
1131 
1132  fd = OpenTransientFile(path,
1133  O_CREAT | O_WRONLY | PG_BINARY,
1134  S_IRUSR | S_IWUSR);
1135  if (fd < 0)
1136  ereport(ERROR,
1138  errmsg("could not create file \"%s\": %m", path)));
1139 
1140  /*
1141  * Truncate all data that's not guaranteed to have been safely fsynced (by
1142  * previous record or by the last checkpoint).
1143  */
1144  if (ftruncate(fd, xlrec->offset) != 0)
1145  ereport(ERROR,
1147  errmsg("could not truncate file \"%s\" to %u: %m",
1148  path, (uint32) xlrec->offset)));
1149 
1150  /* now seek to the position we want to write our data to */
1151  if (lseek(fd, xlrec->offset, SEEK_SET) != xlrec->offset)
1152  ereport(ERROR,
1154  errmsg("could not seek to end of file \"%s\": %m",
1155  path)));
1156 
1157  data = XLogRecGetData(r) + sizeof(*xlrec);
1158 
1159  len = xlrec->num_mappings * sizeof(LogicalRewriteMappingData);
1160 
1161  /* write out tail end of mapping file (again) */
1162  if (write(fd, data, len) != len)
1163  ereport(ERROR,
1165  errmsg("could not write to file \"%s\": %m", path)));
1166 
1167  /*
1168  * Now fsync all previously written data. We could improve things and only
1169  * do this for the last write to a file, but the required bookkeeping
1170  * doesn't seem worth the trouble.
1171  */
1172  if (pg_fsync(fd) != 0)
1173  ereport(ERROR,
1175  errmsg("could not fsync file \"%s\": %m", path)));
1176 
1177  CloseTransientFile(fd);
1178 }
1179 
1180 /* ---
1181  * Perform a checkpoint for logical rewrite mappings
1182  *
1183  * This serves two tasks:
1184  * 1) Remove all mappings not needed anymore based on the logical restart LSN
1185  * 2) Flush all remaining mappings to disk, so that replay after a checkpoint
1186  * only has to deal with the parts of a mapping that have been written out
1187  * after the checkpoint started.
1188  * ---
1189  */
1190 void
1192 {
1193  XLogRecPtr cutoff;
1194  XLogRecPtr redo;
1195  DIR *mappings_dir;
1196  struct dirent *mapping_de;
1197  char path[MAXPGPATH];
1198 
1199  /*
1200  * We start of with a minimum of the last redo pointer. No new decoding
1201  * slot will start before that, so that's a safe upper bound for removal.
1202  */
1203  redo = GetRedoRecPtr();
1204 
1205  /* now check for the restart ptrs from existing slots */
1207 
1208  /* don't start earlier than the restart lsn */
1209  if (cutoff != InvalidXLogRecPtr && redo < cutoff)
1210  cutoff = redo;
1211 
1212  mappings_dir = AllocateDir("pg_logical/mappings");
1213  while ((mapping_de = ReadDir(mappings_dir, "pg_logical/mappings")) != NULL)
1214  {
1215  struct stat statbuf;
1216  Oid dboid;
1217  Oid relid;
1218  XLogRecPtr lsn;
1219  TransactionId rewrite_xid;
1220  TransactionId create_xid;
1221  uint32 hi,
1222  lo;
1223 
1224  if (strcmp(mapping_de->d_name, ".") == 0 ||
1225  strcmp(mapping_de->d_name, "..") == 0)
1226  continue;
1227 
1228  snprintf(path, MAXPGPATH, "pg_logical/mappings/%s", mapping_de->d_name);
1229  if (lstat(path, &statbuf) == 0 && !S_ISREG(statbuf.st_mode))
1230  continue;
1231 
1232  /* Skip over files that cannot be ours. */
1233  if (strncmp(mapping_de->d_name, "map-", 4) != 0)
1234  continue;
1235 
1236  if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
1237  &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
1238  elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
1239 
1240  lsn = ((uint64) hi) << 32 | lo;
1241 
1242  if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
1243  {
1244  elog(DEBUG1, "removing logical rewrite file \"%s\"", path);
1245  if (unlink(path) < 0)
1246  ereport(ERROR,
1248  errmsg("could not remove file \"%s\": %m", path)));
1249  }
1250  else
1251  {
1252  int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0);
1253 
1254  /*
1255  * The file cannot vanish due to concurrency since this function
1256  * is the only one removing logical mappings and it's run while
1257  * CheckpointLock is held exclusively.
1258  */
1259  if (fd < 0)
1260  ereport(ERROR,
1262  errmsg("could not open file \"%s\": %m", path)));
1263 
1264  /*
1265  * We could try to avoid fsyncing files that either haven't
1266  * changed or have only been created since the checkpoint's start,
1267  * but it's currently not deemed worth the effort.
1268  */
1269  else if (pg_fsync(fd) != 0)
1270  ereport(ERROR,
1272  errmsg("could not fsync file \"%s\": %m", path)));
1273  CloseTransientFile(fd);
1274  }
1275  }
1276  FreeDir(mappings_dir);
1277 }
int FileWrite(File file, char *buffer, int amount)
Definition: fd.c:1666
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:608
#define TOAST_TUPLE_THRESHOLD
Definition: tuptoaster.h:55
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
File PathNameOpenFile(FileName fileName, int fileFlags, int fileMode)
Definition: fd.c:1266
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
void end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:312
#define DEBUG1
Definition: elog.h:25
dlist_node * cur
Definition: ilist.h:180
TidHashKey key
Definition: rewriteheap.c:182
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1115
HeapTupleFields t_heap
Definition: htup_details.h:146
TransactionId rs_logical_xmin
Definition: rewriteheap.c:152
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
uint32 TransactionId
Definition: c.h:394
MemoryContext hcxt
Definition: hsearch.h:78
RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi, bool use_wal)
Definition: rewriteheap.c:246
static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple)
Definition: rewriteheap.c:1041
static void logical_end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:943
union HeapTupleHeaderData::@38 t_choice
HeapTuple toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, int options)
Definition: tuptoaster.c:536
#define write(a, b, c)
Definition: win32.h:19
struct RewriteStateData RewriteStateData
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
TransactionId rs_freeze_xid
Definition: rewriteheap.c:150
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
MultiXactId rs_cutoff_multi
Definition: rewriteheap.c:154
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Pointer Item
Definition: item.h:17
Size entrysize
Definition: hsearch.h:73
Relation rs_new_rel
Definition: rewriteheap.c:142
int FileSync(File file)
Definition: fd.c:1785
int errcode(int sqlerrcode)
Definition: elog.c:575
#define HEAP2_XACT_MASK
Definition: htup_details.h:269
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:413
UnresolvedTupData * UnresolvedTup
Definition: rewriteheap.c:187
void heap_sync(Relation rel)
Definition: heapam.c:9122
int snprintf(char *str, size_t count, const char *fmt,...) pg_attribute_printf(3
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void CheckPointLogicalRewriteHeap(void)
Definition: rewriteheap.c:1191
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
#define HEAP_UPDATED
Definition: htup_details.h:195
ItemPointerData old_tid
Definition: rewriteheap.c:183
Form_pg_class rd_rel
Definition: rel.h:113
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
unsigned int Oid
Definition: postgres_ext.h:31
Definition: dirent.h:9
void ProcArrayGetReplicationSlotXmin(TransactionId *xmin, TransactionId *catalog_xmin)
Definition: procarray.c:2969
static int fd(const char *x, int i)
Definition: preproc-init.c:105
#define PG_BINARY
Definition: c.h:1038
bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi)
Definition: heapam.c:6807
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define RelationOpenSmgr(relation)
Definition: rel.h:457
Definition: dynahash.c:193
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
void pfree(void *pointer)
Definition: mcxt.c:992
uint32 rs_num_rewrite_mappings
Definition: rewriteheap.c:162
#define XLogRecGetData(decoder)
Definition: xlogreader.h:202
Definition: dirent.c:25
#define ERROR
Definition: elog.h:43
ItemPointerData new_tid
Definition: rewriteheap.c:192
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:639
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
char path[MAXPGPATH]
Definition: rewriteheap.c:208
XLogRecPtr GetXLogInsertRecPtr(void)
Definition: xlog.c:11003
ItemPointerData t_ctid
Definition: htup_details.h:150
HTAB * rs_unresolved_tups
Definition: rewriteheap.c:159
#define MAXPGPATH
ItemPointerData t_self
Definition: htup.h:65
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:145
TransactionId xid
Definition: rewriteheap.c:203
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:416
uint32 t_len
Definition: htup.h:64
static void logical_rewrite_log_mapping(RewriteState state, TransactionId xid, LogicalRewriteMappingData *map)
Definition: rewriteheap.c:973
#define MaxHeapTupleSize
Definition: htup_details.h:561
int OpenTransientFile(FileName fileName, int fileFlags, int fileMode)
Definition: fd.c:2093
int errcode_for_file_access(void)
Definition: elog.c:598
XLogRecPtr ReplicationSlotsComputeLogicalRestartLSN(void)
Definition: slot.c:707
#define InvalidTransactionId
Definition: transam.h:31
unsigned int uint32
Definition: c.h:265
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2284
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
int unlink(const char *filename)
#define ereport(elevel, rest)
Definition: elog.h:122
XLogRecPtr rs_begin_lsn
Definition: rewriteheap.c:158
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:304
HTAB * rs_logical_mappings
Definition: rewriteheap.c:161
int CloseTransientFile(int fd)
Definition: fd.c:2254
TransactionId rs_oldest_xmin
Definition: rewriteheap.c:148
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define HASH_BLOBS
Definition: hsearch.h:88
#define XLogRecGetXid(decoder)
Definition: xlogreader.h:200
MemoryContext rs_cxt
Definition: rewriteheap.c:156
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:440
void * palloc0(Size size)
Definition: mcxt.c:920
#define RELKIND_TOASTVALUE
Definition: pg_class.h:163
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:556
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
Oid MyDatabaseId
Definition: globals.c:76
Size keysize
Definition: hsearch.h:72
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
ItemPointerData tid
Definition: rewriteheap.c:174
#define InvalidOffsetNumber
Definition: off.h:26
#define InvalidOid
Definition: postgres_ext.h:36
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
TransactionId mapped_xid
Definition: heapam_xlog.h:362
TransactionId MultiXactId
Definition: c.h:404
RelFileNode rd_node
Definition: rel.h:85
#define ftruncate(a, b)
Definition: win32.h:67
void FileClose(File file)
Definition: fd.c:1456
static void logical_begin_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:797
ItemPointerData new_tid
Definition: rewriteheap.h:40
#define NULL
Definition: c.h:226
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:671
HTAB * rs_old_new_tid_map
Definition: rewriteheap.c:160
Definition: regguts.h:298
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2350
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
size_t Size
Definition: c.h:353
dlist_head mappings
Definition: rewriteheap.c:207
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1172
#define MAXALIGN(LEN)
Definition: c.h:584
XLogRecPtr GetRedoRecPtr(void)
Definition: xlog.c:8095
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1353
#define RelationNeedsWAL(relation)
Definition: rel.h:502
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1343
static void logical_heap_rewrite_flush_mappings(RewriteState state)
Definition: rewriteheap.c:846
struct RewriteMappingFile RewriteMappingFile
#define LOGICAL_REWRITE_FORMAT
Definition: rewriteheap.h:54
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:600
#define HEAP_INSERT_SKIP_FSM
Definition: heapam.h:29
bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
Definition: rewriteheap.c:576
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:131
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void * palloc(Size size)
Definition: mcxt.c:891
int errmsg(const char *fmt,...)
Definition: elog.c:797
TransactionId xmin
Definition: rewriteheap.c:173
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:749
LogicalRewriteMappingData map
Definition: rewriteheap.c:217
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:973
static void raw_heap_insert(RewriteState state, HeapTuple tup)
Definition: rewriteheap.c:626
struct LogicalRewriteMappingData LogicalRewriteMappingData
#define HEAP_XACT_MASK
Definition: htup_details.h:204
int pg_fsync(int fd)
Definition: fd.c:333
Relation rs_old_rel
Definition: rewriteheap.c:141
char d_name[MAX_PATH]
Definition: dirent.h:14
#define elog
Definition: elog.h:219
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:283
struct RewriteMappingDataEntry RewriteMappingDataEntry
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
BlockNumber rs_blockno
Definition: rewriteheap.c:144
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define lstat(path, sb)
Definition: win32.h:272
void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple)
Definition: rewriteheap.c:378
#define RelationGetRelid(relation)
Definition: rel.h:413
OldToNewMappingData * OldToNewMapping
Definition: rewriteheap.c:195
int FreeDir(DIR *dir)
Definition: fd.c:2393
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:86
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:41
ItemPointerData old_tid
Definition: rewriteheap.h:39