PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
rewriteheap.h File Reference
#include "access/htup.h"
#include "storage/itemptr.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
Include dependency graph for rewriteheap.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  LogicalRewriteMappingData
 

Macros

#define LOGICAL_REWRITE_FORMAT   "map-%x-%x-%X_%X-%x-%x"
 

Typedefs

typedef struct RewriteStateDataRewriteState
 
typedef struct
LogicalRewriteMappingData 
LogicalRewriteMappingData
 

Functions

RewriteState begin_heap_rewrite (Relation OldHeap, Relation NewHeap, TransactionId OldestXmin, TransactionId FreezeXid, MultiXactId MultiXactCutoff, bool use_wal)
 
void end_heap_rewrite (RewriteState state)
 
void rewrite_heap_tuple (RewriteState state, HeapTuple oldTuple, HeapTuple newTuple)
 
bool rewrite_heap_dead_tuple (RewriteState state, HeapTuple oldTuple)
 
void CheckPointLogicalRewriteHeap (void)
 

Macro Definition Documentation

#define LOGICAL_REWRITE_FORMAT   "map-%x-%x-%X_%X-%x-%x"

Typedef Documentation

Definition at line 22 of file rewriteheap.h.

Function Documentation

RewriteState begin_heap_rewrite ( Relation  OldHeap,
Relation  NewHeap,
TransactionId  OldestXmin,
TransactionId  FreezeXid,
MultiXactId  MultiXactCutoff,
bool  use_wal 
)

Definition at line 248 of file rewriteheap.c.

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate(), CurrentMemoryContext, HASHCTL::entrysize, HASH_BLOBS, HASH_CONTEXT, hash_create(), HASH_ELEM, HASHCTL::hcxt, HASHCTL::keysize, logical_begin_heap_rewrite(), MemoryContextSwitchTo(), palloc(), palloc0(), RelationGetNumberOfBlocks, RewriteStateData::rs_blockno, RewriteStateData::rs_buffer, RewriteStateData::rs_buffer_valid, RewriteStateData::rs_cutoff_multi, RewriteStateData::rs_cxt, RewriteStateData::rs_freeze_xid, RewriteStateData::rs_new_rel, RewriteStateData::rs_old_new_tid_map, RewriteStateData::rs_old_rel, RewriteStateData::rs_oldest_xmin, RewriteStateData::rs_unresolved_tups, and RewriteStateData::rs_use_wal.

Referenced by copy_heap_data().

251 {
253  MemoryContext rw_cxt;
254  MemoryContext old_cxt;
255  HASHCTL hash_ctl;
256 
257  /*
258  * To ease cleanup, make a separate context that will contain the
259  * RewriteState struct itself plus all subsidiary data.
260  */
262  "Table rewrite",
264  old_cxt = MemoryContextSwitchTo(rw_cxt);
265 
266  /* Create and fill in the state struct */
267  state = palloc0(sizeof(RewriteStateData));
268 
269  state->rs_old_rel = old_heap;
270  state->rs_new_rel = new_heap;
271  state->rs_buffer = (Page) palloc(BLCKSZ);
272  /* new_heap needn't be empty, just locked */
273  state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
274  state->rs_buffer_valid = false;
275  state->rs_use_wal = use_wal;
276  state->rs_oldest_xmin = oldest_xmin;
277  state->rs_freeze_xid = freeze_xid;
278  state->rs_cutoff_multi = cutoff_multi;
279  state->rs_cxt = rw_cxt;
280 
281  /* Initialize hash tables used to track update chains */
282  memset(&hash_ctl, 0, sizeof(hash_ctl));
283  hash_ctl.keysize = sizeof(TidHashKey);
284  hash_ctl.entrysize = sizeof(UnresolvedTupData);
285  hash_ctl.hcxt = state->rs_cxt;
286 
287  state->rs_unresolved_tups =
288  hash_create("Rewrite / Unresolved ctids",
289  128, /* arbitrary initial size */
290  &hash_ctl,
292 
293  hash_ctl.entrysize = sizeof(OldToNewMappingData);
294 
295  state->rs_old_new_tid_map =
296  hash_create("Rewrite / Old to new tid map",
297  128, /* arbitrary initial size */
298  &hash_ctl,
300 
301  MemoryContextSwitchTo(old_cxt);
302 
304 
305  return state;
306 }
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
MemoryContext hcxt
Definition: hsearch.h:78
TransactionId rs_freeze_xid
Definition: rewriteheap.c:152
MultiXactId rs_cutoff_multi
Definition: rewriteheap.c:156
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Size entrysize
Definition: hsearch.h:73
Relation rs_new_rel
Definition: rewriteheap.c:144
HTAB * rs_unresolved_tups
Definition: rewriteheap.c:161
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
TransactionId rs_oldest_xmin
Definition: rewriteheap.c:150
#define HASH_BLOBS
Definition: hsearch.h:88
MemoryContext rs_cxt
Definition: rewriteheap.c:158
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
void * palloc0(Size size)
Definition: mcxt.c:878
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
Size keysize
Definition: hsearch.h:72
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
static void logical_begin_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:799
HTAB * rs_old_new_tid_map
Definition: rewriteheap.c:162
Definition: regguts.h:298
void * palloc(Size size)
Definition: mcxt.c:849
Relation rs_old_rel
Definition: rewriteheap.c:143
BlockNumber rs_blockno
Definition: rewriteheap.c:146
Pointer Page
Definition: bufpage.h:74
void CheckPointLogicalRewriteHeap ( void  )

Definition at line 1200 of file rewriteheap.c.

References AllocateDir(), CloseTransientFile(), dirent::d_name, DEBUG1, elog, ereport, errcode_for_file_access(), errmsg(), ERROR, fd(), FreeDir(), GetRedoRecPtr(), InvalidXLogRecPtr, LOGICAL_REWRITE_FORMAT, lstat, MAXPGPATH, NULL, OpenTransientFile(), PG_BINARY, pg_fsync(), pgstat_report_wait_end(), pgstat_report_wait_start(), ReadDir(), ReplicationSlotsComputeLogicalRestartLSN(), snprintf(), unlink(), and WAIT_EVENT_LOGICAL_REWRITE_CHECKPOINT_SYNC.

Referenced by CheckPointGuts().

1201 {
1202  XLogRecPtr cutoff;
1203  XLogRecPtr redo;
1204  DIR *mappings_dir;
1205  struct dirent *mapping_de;
1206  char path[MAXPGPATH + 20];
1207 
1208  /*
1209  * We start of with a minimum of the last redo pointer. No new decoding
1210  * slot will start before that, so that's a safe upper bound for removal.
1211  */
1212  redo = GetRedoRecPtr();
1213 
1214  /* now check for the restart ptrs from existing slots */
1216 
1217  /* don't start earlier than the restart lsn */
1218  if (cutoff != InvalidXLogRecPtr && redo < cutoff)
1219  cutoff = redo;
1220 
1221  mappings_dir = AllocateDir("pg_logical/mappings");
1222  while ((mapping_de = ReadDir(mappings_dir, "pg_logical/mappings")) != NULL)
1223  {
1224  struct stat statbuf;
1225  Oid dboid;
1226  Oid relid;
1227  XLogRecPtr lsn;
1228  TransactionId rewrite_xid;
1229  TransactionId create_xid;
1230  uint32 hi,
1231  lo;
1232 
1233  if (strcmp(mapping_de->d_name, ".") == 0 ||
1234  strcmp(mapping_de->d_name, "..") == 0)
1235  continue;
1236 
1237  snprintf(path, sizeof(path), "pg_logical/mappings/%s", mapping_de->d_name);
1238  if (lstat(path, &statbuf) == 0 && !S_ISREG(statbuf.st_mode))
1239  continue;
1240 
1241  /* Skip over files that cannot be ours. */
1242  if (strncmp(mapping_de->d_name, "map-", 4) != 0)
1243  continue;
1244 
1245  if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
1246  &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
1247  elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
1248 
1249  lsn = ((uint64) hi) << 32 | lo;
1250 
1251  if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
1252  {
1253  elog(DEBUG1, "removing logical rewrite file \"%s\"", path);
1254  if (unlink(path) < 0)
1255  ereport(ERROR,
1257  errmsg("could not remove file \"%s\": %m", path)));
1258  }
1259  else
1260  {
1261  int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0);
1262 
1263  /*
1264  * The file cannot vanish due to concurrency since this function
1265  * is the only one removing logical mappings and it's run while
1266  * CheckpointLock is held exclusively.
1267  */
1268  if (fd < 0)
1269  ereport(ERROR,
1271  errmsg("could not open file \"%s\": %m", path)));
1272 
1273  /*
1274  * We could try to avoid fsyncing files that either haven't
1275  * changed or have only been created since the checkpoint's start,
1276  * but it's currently not deemed worth the effort.
1277  */
1279  if (pg_fsync(fd) != 0)
1280  ereport(ERROR,
1282  errmsg("could not fsync file \"%s\": %m", path)));
1284  CloseTransientFile(fd);
1285  }
1286  }
1287  FreeDir(mappings_dir);
1288 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define DEBUG1
Definition: elog.h:25
uint32 TransactionId
Definition: c.h:397
int snprintf(char *str, size_t count, const char *fmt,...) pg_attribute_printf(3
unsigned int Oid
Definition: postgres_ext.h:31
Definition: dirent.h:9
static int fd(const char *x, int i)
Definition: preproc-init.c:105
#define PG_BINARY
Definition: c.h:1039
Definition: dirent.c:25
#define ERROR
Definition: elog.h:43
#define MAXPGPATH
int OpenTransientFile(FileName fileName, int fileFlags, int fileMode)
Definition: fd.c:2144
int errcode_for_file_access(void)
Definition: elog.c:598
XLogRecPtr ReplicationSlotsComputeLogicalRestartLSN(void)
Definition: slot.c:784
unsigned int uint32
Definition: c.h:268
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2335
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1235
int unlink(const char *filename)
#define ereport(elevel, rest)
Definition: elog.h:122
int CloseTransientFile(int fd)
Definition: fd.c:2305
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2401
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1211
XLogRecPtr GetRedoRecPtr(void)
Definition: xlog.c:8168
#define LOGICAL_REWRITE_FORMAT
Definition: rewriteheap.h:54
int errmsg(const char *fmt,...)
Definition: elog.c:797
int pg_fsync(int fd)
Definition: fd.c:333
char d_name[MAX_PATH]
Definition: dirent.h:14
#define elog
Definition: elog.h:219
#define lstat(path, sb)
Definition: win32.h:262
int FreeDir(DIR *dir)
Definition: fd.c:2444
void end_heap_rewrite ( RewriteState  state)

Definition at line 314 of file rewriteheap.c.

References hash_seq_init(), hash_seq_search(), heap_sync(), ItemPointerSetInvalid, log_newpage(), logical_end_heap_rewrite(), MAIN_FORKNUM, MemoryContextDelete(), NULL, PageSetChecksumInplace(), raw_heap_insert(), RelationData::rd_node, RelationData::rd_smgr, RelationNeedsWAL, RelationOpenSmgr, RewriteStateData::rs_blockno, RewriteStateData::rs_buffer, RewriteStateData::rs_buffer_valid, RewriteStateData::rs_cxt, RewriteStateData::rs_new_rel, RewriteStateData::rs_unresolved_tups, RewriteStateData::rs_use_wal, smgrextend(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, and UnresolvedTupData::tuple.

Referenced by copy_heap_data().

315 {
316  HASH_SEQ_STATUS seq_status;
317  UnresolvedTup unresolved;
318 
319  /*
320  * Write any remaining tuples in the UnresolvedTups table. If we have any
321  * left, they should in fact be dead, but let's err on the safe side.
322  */
323  hash_seq_init(&seq_status, state->rs_unresolved_tups);
324 
325  while ((unresolved = hash_seq_search(&seq_status)) != NULL)
326  {
327  ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
328  raw_heap_insert(state, unresolved->tuple);
329  }
330 
331  /* Write the last page, if any */
332  if (state->rs_buffer_valid)
333  {
334  if (state->rs_use_wal)
335  log_newpage(&state->rs_new_rel->rd_node,
336  MAIN_FORKNUM,
337  state->rs_blockno,
338  state->rs_buffer,
339  true);
341 
343 
345  (char *) state->rs_buffer, true);
346  }
347 
348  /*
349  * If the rel is WAL-logged, must fsync before commit. We use heap_sync
350  * to ensure that the toast table gets fsync'd too.
351  *
352  * It's obvious that we must do this when not WAL-logging. It's less
353  * obvious that we have to do it even if we did WAL-log the pages. The
354  * reason is the same as in tablecmds.c's copy_relation_data(): we're
355  * writing data that's not in shared buffers, and so a CHECKPOINT
356  * occurring during the rewriteheap operation won't have fsync'd data we
357  * wrote before the checkpoint.
358  */
359  if (RelationNeedsWAL(state->rs_new_rel))
360  heap_sync(state->rs_new_rel);
361 
363 
364  /* Deleting the context frees everything */
365  MemoryContextDelete(state->rs_cxt);
366 }
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
static void logical_end_heap_rewrite(RewriteState state)
Definition: rewriteheap.c:946
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
Relation rs_new_rel
Definition: rewriteheap.c:144
void heap_sync(Relation rel)
Definition: heapam.c:9134
HeapTupleHeader t_data
Definition: htup.h:67
#define RelationOpenSmgr(relation)
Definition: rel.h:460
ItemPointerData t_ctid
Definition: htup_details.h:150
HTAB * rs_unresolved_tups
Definition: rewriteheap.c:161
MemoryContext rs_cxt
Definition: rewriteheap.c:158
RelFileNode rd_node
Definition: rel.h:85
#define NULL
Definition: c.h:229
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1199
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1385
#define RelationNeedsWAL(relation)
Definition: rel.h:505
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1375
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:600
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:150
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:972
static void raw_heap_insert(RewriteState state, HeapTuple tup)
Definition: rewriteheap.c:628
BlockNumber rs_blockno
Definition: rewriteheap.c:146
bool rewrite_heap_dead_tuple ( RewriteState  state,
HeapTuple  oldTuple 
)

Definition at line 578 of file rewriteheap.c.

References Assert, HASH_FIND, HASH_REMOVE, hash_search(), heap_freetuple(), HeapTupleHeaderGetXmin, NULL, RewriteStateData::rs_unresolved_tups, HeapTupleData::t_data, HeapTupleData::t_self, TidHashKey::tid, UnresolvedTupData::tuple, and TidHashKey::xmin.

Referenced by copy_heap_data().

579 {
580  /*
581  * If we have already seen an earlier tuple in the update chain that
582  * points to this tuple, let's forget about that earlier tuple. It's in
583  * fact dead as well, our simple xmax < OldestXmin test in
584  * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
585  * when xmin of a tuple is greater than xmax, which sounds
586  * counter-intuitive but is perfectly valid.
587  *
588  * We don't bother to try to detect the situation the other way round,
589  * when we encounter the dead tuple first and then the recently dead one
590  * that points to it. If that happens, we'll have some unmatched entries
591  * in the UnresolvedTups hash table at the end. That can happen anyway,
592  * because a vacuum might have removed the dead tuple in the chain before
593  * us.
594  */
595  UnresolvedTup unresolved;
596  TidHashKey hashkey;
597  bool found;
598 
599  memset(&hashkey, 0, sizeof(hashkey));
600  hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
601  hashkey.tid = old_tuple->t_self;
602 
603  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
604  HASH_FIND, NULL);
605 
606  if (unresolved != NULL)
607  {
608  /* Need to free the contained tuple as well as the hashtable entry */
609  heap_freetuple(unresolved->tuple);
610  hash_search(state->rs_unresolved_tups, &hashkey,
611  HASH_REMOVE, &found);
612  Assert(found);
613  return true;
614  }
615 
616  return false;
617 }
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:902
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
HTAB * rs_unresolved_tups
Definition: rewriteheap.c:161
ItemPointerData tid
Definition: rewriteheap.c:176
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:676
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
TransactionId xmin
Definition: rewriteheap.c:175
void rewrite_heap_tuple ( RewriteState  state,
HeapTuple  oldTuple,
HeapTuple  newTuple 
)

Definition at line 380 of file rewriteheap.c.

References Assert, HASH_ENTER, HASH_FIND, HASH_REMOVE, hash_search(), HEAP2_XACT_MASK, heap_copytuple(), heap_freetuple(), heap_freeze_tuple(), HEAP_UPDATED, HEAP_XACT_MASK, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsOnlyLocked(), ItemPointerEquals(), ItemPointerSetInvalid, logical_rewrite_heap_tuple(), MemoryContextSwitchTo(), OldToNewMappingData::new_tid, NULL, UnresolvedTupData::old_tid, raw_heap_insert(), RewriteStateData::rs_cutoff_multi, RewriteStateData::rs_cxt, RewriteStateData::rs_freeze_xid, RewriteStateData::rs_old_new_tid_map, RewriteStateData::rs_oldest_xmin, RewriteStateData::rs_unresolved_tups, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_self, TidHashKey::tid, TransactionIdPrecedes(), UnresolvedTupData::tuple, and TidHashKey::xmin.

Referenced by reform_and_rewrite_tuple().

382 {
383  MemoryContext old_cxt;
384  ItemPointerData old_tid;
385  TidHashKey hashkey;
386  bool found;
387  bool free_new;
388 
389  old_cxt = MemoryContextSwitchTo(state->rs_cxt);
390 
391  /*
392  * Copy the original tuple's visibility information into new_tuple.
393  *
394  * XXX we might later need to copy some t_infomask2 bits, too? Right now,
395  * we intentionally clear the HOT status bits.
396  */
397  memcpy(&new_tuple->t_data->t_choice.t_heap,
398  &old_tuple->t_data->t_choice.t_heap,
399  sizeof(HeapTupleFields));
400 
401  new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
402  new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
403  new_tuple->t_data->t_infomask |=
404  old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
405 
406  /*
407  * While we have our hands on the tuple, we may as well freeze any
408  * eligible xmin or xmax, so that future VACUUM effort can be saved.
409  */
410  heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid,
411  state->rs_cutoff_multi);
412 
413  /*
414  * Invalid ctid means that ctid should point to the tuple itself. We'll
415  * override it later if the tuple is part of an update chain.
416  */
417  ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
418 
419  /*
420  * If the tuple has been updated, check the old-to-new mapping hash table.
421  */
422  if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
423  HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
424  !(ItemPointerEquals(&(old_tuple->t_self),
425  &(old_tuple->t_data->t_ctid))))
426  {
427  OldToNewMapping mapping;
428 
429  memset(&hashkey, 0, sizeof(hashkey));
430  hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
431  hashkey.tid = old_tuple->t_data->t_ctid;
432 
433  mapping = (OldToNewMapping)
434  hash_search(state->rs_old_new_tid_map, &hashkey,
435  HASH_FIND, NULL);
436 
437  if (mapping != NULL)
438  {
439  /*
440  * We've already copied the tuple that t_ctid points to, so we can
441  * set the ctid of this tuple to point to the new location, and
442  * insert it right away.
443  */
444  new_tuple->t_data->t_ctid = mapping->new_tid;
445 
446  /* We don't need the mapping entry anymore */
447  hash_search(state->rs_old_new_tid_map, &hashkey,
448  HASH_REMOVE, &found);
449  Assert(found);
450  }
451  else
452  {
453  /*
454  * We haven't seen the tuple t_ctid points to yet. Stash this
455  * tuple into unresolved_tups to be written later.
456  */
457  UnresolvedTup unresolved;
458 
459  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
460  HASH_ENTER, &found);
461  Assert(!found);
462 
463  unresolved->old_tid = old_tuple->t_self;
464  unresolved->tuple = heap_copytuple(new_tuple);
465 
466  /*
467  * We can't do anything more now, since we don't know where the
468  * tuple will be written.
469  */
470  MemoryContextSwitchTo(old_cxt);
471  return;
472  }
473  }
474 
475  /*
476  * Now we will write the tuple, and then check to see if it is the B tuple
477  * in any new or known pair. When we resolve a known pair, we will be
478  * able to write that pair's A tuple, and then we have to check if it
479  * resolves some other pair. Hence, we need a loop here.
480  */
481  old_tid = old_tuple->t_self;
482  free_new = false;
483 
484  for (;;)
485  {
486  ItemPointerData new_tid;
487 
488  /* Insert the tuple and find out where it's put in new_heap */
489  raw_heap_insert(state, new_tuple);
490  new_tid = new_tuple->t_self;
491 
492  logical_rewrite_heap_tuple(state, old_tid, new_tuple);
493 
494  /*
495  * If the tuple is the updated version of a row, and the prior version
496  * wouldn't be DEAD yet, then we need to either resolve the prior
497  * version (if it's waiting in rs_unresolved_tups), or make an entry
498  * in rs_old_new_tid_map (so we can resolve it when we do see it). The
499  * previous tuple's xmax would equal this one's xmin, so it's
500  * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
501  */
502  if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
503  !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
504  state->rs_oldest_xmin))
505  {
506  /*
507  * Okay, this is B in an update pair. See if we've seen A.
508  */
509  UnresolvedTup unresolved;
510 
511  memset(&hashkey, 0, sizeof(hashkey));
512  hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
513  hashkey.tid = old_tid;
514 
515  unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
516  HASH_FIND, NULL);
517 
518  if (unresolved != NULL)
519  {
520  /*
521  * We have seen and memorized the previous tuple already. Now
522  * that we know where we inserted the tuple its t_ctid points
523  * to, fix its t_ctid and insert it to the new heap.
524  */
525  if (free_new)
526  heap_freetuple(new_tuple);
527  new_tuple = unresolved->tuple;
528  free_new = true;
529  old_tid = unresolved->old_tid;
530  new_tuple->t_data->t_ctid = new_tid;
531 
532  /*
533  * We don't need the hash entry anymore, but don't free its
534  * tuple just yet.
535  */
536  hash_search(state->rs_unresolved_tups, &hashkey,
537  HASH_REMOVE, &found);
538  Assert(found);
539 
540  /* loop back to insert the previous tuple in the chain */
541  continue;
542  }
543  else
544  {
545  /*
546  * Remember the new tid of this tuple. We'll use it to set the
547  * ctid when we find the previous tuple in the chain.
548  */
549  OldToNewMapping mapping;
550 
551  mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
552  HASH_ENTER, &found);
553  Assert(!found);
554 
555  mapping->new_tid = new_tid;
556  }
557  }
558 
559  /* Done with this (chain of) tuples, for now */
560  if (free_new)
561  heap_freetuple(new_tuple);
562  break;
563  }
564 
565  MemoryContextSwitchTo(old_cxt);
566 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:611
static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple)
Definition: rewriteheap.c:1044
TransactionId rs_freeze_xid
Definition: rewriteheap.c:152
MultiXactId rs_cutoff_multi
Definition: rewriteheap.c:156
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define HEAP2_XACT_MASK
Definition: htup_details.h:269
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:902
#define HEAP_UPDATED
Definition: htup_details.h:195
ItemPointerData old_tid
Definition: rewriteheap.c:185
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi)
Definition: heapam.c:6819
ItemPointerData new_tid
Definition: rewriteheap.c:194
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
HTAB * rs_unresolved_tups
Definition: rewriteheap.c:161
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
TransactionId rs_oldest_xmin
Definition: rewriteheap.c:150
MemoryContext rs_cxt
Definition: rewriteheap.c:158
ItemPointerData tid
Definition: rewriteheap.c:176
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:676
HTAB * rs_old_new_tid_map
Definition: rewriteheap.c:162
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:150
TransactionId xmin
Definition: rewriteheap.c:175
static void raw_heap_insert(RewriteState state, HeapTuple tup)
Definition: rewriteheap.c:628
#define HEAP_XACT_MASK
Definition: htup_details.h:204
OldToNewMappingData * OldToNewMapping
Definition: rewriteheap.c:197