PostgreSQL Source Code git master
heapam.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heapam.c
4 * heap access method code
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/heapam.c
12 *
13 *
14 * INTERFACE ROUTINES
15 * heap_beginscan - begin relation scan
16 * heap_rescan - restart a relation scan
17 * heap_endscan - end relation scan
18 * heap_getnext - retrieve next tuple in scan
19 * heap_fetch - retrieve tuple with given tid
20 * heap_insert - insert tuple into a relation
21 * heap_multi_insert - insert multiple tuples into a relation
22 * heap_delete - delete a tuple from a relation
23 * heap_update - replace a tuple in a relation with another tuple
24 *
25 * NOTES
26 * This file contains the heap_ routines which implement
27 * the POSTGRES heap access method used for all POSTGRES
28 * relations.
29 *
30 *-------------------------------------------------------------------------
31 */
32#include "postgres.h"
33
34#include "access/heapam.h"
35#include "access/heaptoast.h"
36#include "access/hio.h"
37#include "access/multixact.h"
38#include "access/subtrans.h"
39#include "access/syncscan.h"
40#include "access/valid.h"
42#include "access/xloginsert.h"
43#include "catalog/pg_database.h"
44#include "catalog/pg_database_d.h"
45#include "commands/vacuum.h"
46#include "pgstat.h"
47#include "port/pg_bitutils.h"
48#include "storage/lmgr.h"
49#include "storage/predicate.h"
50#include "storage/procarray.h"
51#include "utils/datum.h"
53#include "utils/inval.h"
54#include "utils/spccache.h"
55#include "utils/syscache.h"
56
57
59 TransactionId xid, CommandId cid, int options);
60static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
61 Buffer newbuf, HeapTuple oldtup,
62 HeapTuple newtup, HeapTuple old_key_tuple,
63 bool all_visible_cleared, bool new_all_visible_cleared);
64#ifdef USE_ASSERT_CHECKING
65static void check_lock_if_inplace_updateable_rel(Relation relation,
66 const ItemPointerData *otid,
67 HeapTuple newtup);
68static void check_inplace_rel_lock(HeapTuple oldtup);
69#endif
71 Bitmapset *interesting_cols,
72 Bitmapset *external_cols,
73 HeapTuple oldtup, HeapTuple newtup,
74 bool *has_external);
75static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,
77 bool *have_tuple_lock);
79 BlockNumber block,
80 ScanDirection dir);
82 ScanDirection dir);
83static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
84 uint16 old_infomask2, TransactionId add_to_xmax,
85 LockTupleMode mode, bool is_update,
86 TransactionId *result_xmax, uint16 *result_infomask,
87 uint16 *result_infomask2);
89 uint16 prior_infomask,
90 TransactionId prior_raw_xmax,
91 const ItemPointerData *prior_ctid,
92 TransactionId xid,
94static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
95 uint16 *new_infomask2);
97 uint16 t_infomask);
98static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
99 LockTupleMode lockmode, bool *current_is_member);
100static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
101 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
102 int *remaining);
104 uint16 infomask, Relation rel, int *remaining,
105 bool logLockFailure);
106static void index_delete_sort(TM_IndexDeleteOp *delstate);
107static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
108static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
109static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
110 bool *copy);
111
112
113/*
114 * Each tuple lock mode has a corresponding heavyweight lock, and one or two
115 * corresponding MultiXactStatuses (one to merely lock tuples, another one to
116 * update them). This table (and the macros below) helps us determine the
117 * heavyweight lock mode and MultiXactStatus values to use for any particular
118 * tuple lock strength.
119 *
120 * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
121 *
122 * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
123 * instead.
124 */
125static const struct
126{
130}
131
133{
134 { /* LockTupleKeyShare */
137 -1 /* KeyShare does not allow updating tuples */
138 },
139 { /* LockTupleShare */
142 -1 /* Share does not allow updating tuples */
143 },
144 { /* LockTupleNoKeyExclusive */
148 },
149 { /* LockTupleExclusive */
153 }
155
156/* Get the LOCKMODE for a given MultiXactStatus */
157#define LOCKMODE_from_mxstatus(status) \
158 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
159
160/*
161 * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
162 * This is more readable than having every caller translate it to lock.h's
163 * LOCKMODE.
164 */
165#define LockTupleTuplock(rel, tup, mode) \
166 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
167#define UnlockTupleTuplock(rel, tup, mode) \
168 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
169#define ConditionalLockTupleTuplock(rel, tup, mode, log) \
170 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
171
172#ifdef USE_PREFETCH
173/*
174 * heap_index_delete_tuples and index_delete_prefetch_buffer use this
175 * structure to coordinate prefetching activity
176 */
177typedef struct
178{
179 BlockNumber cur_hblkno;
180 int next_item;
181 int ndeltids;
182 TM_IndexDelete *deltids;
183} IndexDeletePrefetchState;
184#endif
185
186/* heap_index_delete_tuples bottom-up index deletion costing constants */
187#define BOTTOMUP_MAX_NBLOCKS 6
188#define BOTTOMUP_TOLERANCE_NBLOCKS 3
189
190/*
191 * heap_index_delete_tuples uses this when determining which heap blocks it
192 * must visit to help its bottom-up index deletion caller
193 */
194typedef struct IndexDeleteCounts
195{
196 int16 npromisingtids; /* Number of "promising" TIDs in group */
197 int16 ntids; /* Number of TIDs in group */
198 int16 ifirsttid; /* Offset to group's first deltid */
200
201/*
202 * This table maps tuple lock strength values for each particular
203 * MultiXactStatus value.
204 */
206{
207 LockTupleKeyShare, /* ForKeyShare */
208 LockTupleShare, /* ForShare */
209 LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
210 LockTupleExclusive, /* ForUpdate */
211 LockTupleNoKeyExclusive, /* NoKeyUpdate */
212 LockTupleExclusive /* Update */
213};
214
215/* Get the LockTupleMode for a given MultiXactStatus */
216#define TUPLOCK_from_mxstatus(status) \
217 (MultiXactStatusLock[(status)])
218
219/*
220 * Check that we have a valid snapshot if we might need TOAST access.
221 */
222static inline void
224{
225#ifdef USE_ASSERT_CHECKING
226
227 /* bootstrap mode in particular breaks this rule */
229 return;
230
231 /* if the relation doesn't have a TOAST table, we are good */
232 if (!OidIsValid(rel->rd_rel->reltoastrelid))
233 return;
234
236
237#endif /* USE_ASSERT_CHECKING */
238}
239
240/* ----------------------------------------------------------------
241 * heap support routines
242 * ----------------------------------------------------------------
243 */
244
245/*
246 * Streaming read API callback for parallel sequential scans. Returns the next
247 * block the caller wants from the read stream or InvalidBlockNumber when done.
248 */
249static BlockNumber
251 void *callback_private_data,
252 void *per_buffer_data)
253{
254 HeapScanDesc scan = (HeapScanDesc) callback_private_data;
255
258
259 if (unlikely(!scan->rs_inited))
260 {
261 /* parallel scan */
265 scan->rs_startblock,
266 scan->rs_numblocks);
267
268 /* may return InvalidBlockNumber if there are no more blocks */
272 scan->rs_inited = true;
273 }
274 else
275 {
278 scan->rs_base.rs_parallel);
279 }
280
281 return scan->rs_prefetch_block;
282}
283
284/*
285 * Streaming read API callback for serial sequential and TID range scans.
286 * Returns the next block the caller wants from the read stream or
287 * InvalidBlockNumber when done.
288 */
289static BlockNumber
291 void *callback_private_data,
292 void *per_buffer_data)
293{
294 HeapScanDesc scan = (HeapScanDesc) callback_private_data;
295
296 if (unlikely(!scan->rs_inited))
297 {
299 scan->rs_inited = true;
300 }
301 else
303 scan->rs_prefetch_block,
304 scan->rs_dir);
305
306 return scan->rs_prefetch_block;
307}
308
309/*
310 * Read stream API callback for bitmap heap scans.
311 * Returns the next block the caller wants from the read stream or
312 * InvalidBlockNumber when done.
313 */
314static BlockNumber
315bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
316 void *per_buffer_data)
317{
318 TBMIterateResult *tbmres = per_buffer_data;
319 BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) private_data;
320 HeapScanDesc hscan = (HeapScanDesc) bscan;
321 TableScanDesc sscan = &hscan->rs_base;
322
323 for (;;)
324 {
326
327 /* no more entries in the bitmap */
328 if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
329 return InvalidBlockNumber;
330
331 /*
332 * Ignore any claimed entries past what we think is the end of the
333 * relation. It may have been extended after the start of our scan (we
334 * only hold an AccessShareLock, and it could be inserts from this
335 * backend). We don't take this optimization in SERIALIZABLE
336 * isolation though, as we need to examine all invisible tuples
337 * reachable by the index.
338 */
340 tbmres->blockno >= hscan->rs_nblocks)
341 continue;
342
343 return tbmres->blockno;
344 }
345
346 /* not reachable */
347 Assert(false);
348}
349
350/* ----------------
351 * initscan - scan code common to heap_beginscan and heap_rescan
352 * ----------------
353 */
354static void
355initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
356{
357 ParallelBlockTableScanDesc bpscan = NULL;
358 bool allow_strat;
359 bool allow_sync;
360
361 /*
362 * Determine the number of blocks we have to scan.
363 *
364 * It is sufficient to do this once at scan start, since any tuples added
365 * while the scan is in progress will be invisible to my snapshot anyway.
366 * (That is not true when using a non-MVCC snapshot. However, we couldn't
367 * guarantee to return tuples added after scan start anyway, since they
368 * might go into pages we already scanned. To guarantee consistent
369 * results for a non-MVCC snapshot, the caller must hold some higher-level
370 * lock that ensures the interesting tuple(s) won't change.)
371 */
372 if (scan->rs_base.rs_parallel != NULL)
373 {
375 scan->rs_nblocks = bpscan->phs_nblocks;
376 }
377 else
379
380 /*
381 * If the table is large relative to NBuffers, use a bulk-read access
382 * strategy and enable synchronized scanning (see syncscan.c). Although
383 * the thresholds for these features could be different, we make them the
384 * same so that there are only two behaviors to tune rather than four.
385 * (However, some callers need to be able to disable one or both of these
386 * behaviors, independently of the size of the table; also there is a GUC
387 * variable that can disable synchronized scanning.)
388 *
389 * Note that table_block_parallelscan_initialize has a very similar test;
390 * if you change this, consider changing that one, too.
391 */
393 scan->rs_nblocks > NBuffers / 4)
394 {
395 allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
396 allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
397 }
398 else
399 allow_strat = allow_sync = false;
400
401 if (allow_strat)
402 {
403 /* During a rescan, keep the previous strategy object. */
404 if (scan->rs_strategy == NULL)
406 }
407 else
408 {
409 if (scan->rs_strategy != NULL)
411 scan->rs_strategy = NULL;
412 }
413
414 if (scan->rs_base.rs_parallel != NULL)
415 {
416 /* For parallel scan, believe whatever ParallelTableScanDesc says. */
419 else
420 scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
421
422 /*
423 * If not rescanning, initialize the startblock. Finding the actual
424 * start location is done in table_block_parallelscan_startblock_init,
425 * based on whether an alternative start location has been set with
426 * heap_setscanlimits, or using the syncscan location, when syncscan
427 * is enabled.
428 */
429 if (!keep_startblock)
431 }
432 else
433 {
434 if (keep_startblock)
435 {
436 /*
437 * When rescanning, we want to keep the previous startblock
438 * setting, so that rewinding a cursor doesn't generate surprising
439 * results. Reset the active syncscan setting, though.
440 */
441 if (allow_sync && synchronize_seqscans)
443 else
444 scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
445 }
446 else if (allow_sync && synchronize_seqscans)
447 {
450 }
451 else
452 {
453 scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
454 scan->rs_startblock = 0;
455 }
456 }
457
459 scan->rs_inited = false;
460 scan->rs_ctup.t_data = NULL;
462 scan->rs_cbuf = InvalidBuffer;
464 scan->rs_ntuples = 0;
465 scan->rs_cindex = 0;
466
467 /*
468 * Initialize to ForwardScanDirection because it is most common and
469 * because heap scans go forward before going backward (e.g. CURSORs).
470 */
473
474 /* page-at-a-time fields are always invalid when not rs_inited */
475
476 /*
477 * copy the scan key, if appropriate
478 */
479 if (key != NULL && scan->rs_base.rs_nkeys > 0)
480 memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
481
482 /*
483 * Currently, we only have a stats counter for sequential heap scans (but
484 * e.g for bitmap scans the underlying bitmap index scans will be counted,
485 * and for sample scans we update stats for tuple fetches).
486 */
487 if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
489}
490
491/*
492 * heap_setscanlimits - restrict range of a heapscan
493 *
494 * startBlk is the page to start at
495 * numBlks is number of pages to scan (InvalidBlockNumber means "all")
496 */
497void
499{
500 HeapScanDesc scan = (HeapScanDesc) sscan;
501
502 Assert(!scan->rs_inited); /* else too late to change */
503 /* else rs_startblock is significant */
505
506 /* Check startBlk is valid (but allow case of zero blocks...) */
507 Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
508
509 scan->rs_startblock = startBlk;
510 scan->rs_numblocks = numBlks;
511}
512
513/*
514 * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
515 * multiple times, with constant arguments for all_visible,
516 * check_serializable.
517 */
519static int
521 Page page, Buffer buffer,
522 BlockNumber block, int lines,
523 bool all_visible, bool check_serializable)
524{
525 int ntup = 0;
526 OffsetNumber lineoff;
527
528 for (lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
529 {
530 ItemId lpp = PageGetItemId(page, lineoff);
531 HeapTupleData loctup;
532 bool valid;
533
534 if (!ItemIdIsNormal(lpp))
535 continue;
536
537 loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
538 loctup.t_len = ItemIdGetLength(lpp);
540 ItemPointerSet(&(loctup.t_self), block, lineoff);
541
542 if (all_visible)
543 valid = true;
544 else
545 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
546
547 if (check_serializable)
549 &loctup, buffer, snapshot);
550
551 if (valid)
552 {
553 scan->rs_vistuples[ntup] = lineoff;
554 ntup++;
555 }
556 }
557
559
560 return ntup;
561}
562
563/*
564 * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
565 *
566 * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
567 * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
568 */
569void
571{
572 HeapScanDesc scan = (HeapScanDesc) sscan;
573 Buffer buffer = scan->rs_cbuf;
574 BlockNumber block = scan->rs_cblock;
575 Snapshot snapshot;
576 Page page;
577 int lines;
578 bool all_visible;
579 bool check_serializable;
580
581 Assert(BufferGetBlockNumber(buffer) == block);
582
583 /* ensure we're not accidentally being used when not in pagemode */
585 snapshot = scan->rs_base.rs_snapshot;
586
587 /*
588 * Prune and repair fragmentation for the whole page, if possible.
589 */
590 heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
591
592 /*
593 * We must hold share lock on the buffer content while examining tuple
594 * visibility. Afterwards, however, the tuples we have found to be
595 * visible are guaranteed good as long as we hold the buffer pin.
596 */
598
599 page = BufferGetPage(buffer);
600 lines = PageGetMaxOffsetNumber(page);
601
602 /*
603 * If the all-visible flag indicates that all tuples on the page are
604 * visible to everyone, we can skip the per-tuple visibility tests.
605 *
606 * Note: In hot standby, a tuple that's already visible to all
607 * transactions on the primary might still be invisible to a read-only
608 * transaction in the standby. We partly handle this problem by tracking
609 * the minimum xmin of visible tuples as the cut-off XID while marking a
610 * page all-visible on the primary and WAL log that along with the
611 * visibility map SET operation. In hot standby, we wait for (or abort)
612 * all transactions that can potentially may not see one or more tuples on
613 * the page. That's how index-only scans work fine in hot standby. A
614 * crucial difference between index-only scans and heap scans is that the
615 * index-only scan completely relies on the visibility map where as heap
616 * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
617 * the page-level flag can be trusted in the same way, because it might
618 * get propagated somehow without being explicitly WAL-logged, e.g. via a
619 * full page write. Until we can prove that beyond doubt, let's check each
620 * tuple for visibility the hard way.
621 */
622 all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
623 check_serializable =
625
626 /*
627 * We call page_collect_tuples() with constant arguments, to get the
628 * compiler to constant fold the constant arguments. Separate calls with
629 * constant arguments, rather than variables, are needed on several
630 * compilers to actually perform constant folding.
631 */
632 if (likely(all_visible))
633 {
634 if (likely(!check_serializable))
635 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
636 block, lines, true, false);
637 else
638 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
639 block, lines, true, true);
640 }
641 else
642 {
643 if (likely(!check_serializable))
644 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
645 block, lines, false, false);
646 else
647 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
648 block, lines, false, true);
649 }
650
652}
653
654/*
655 * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
656 *
657 * Read the next block of the scan relation from the read stream and save it
658 * in the scan descriptor. It is already pinned.
659 */
660static inline void
662{
663 Assert(scan->rs_read_stream);
664
665 /* release previous scan buffer, if any */
666 if (BufferIsValid(scan->rs_cbuf))
667 {
668 ReleaseBuffer(scan->rs_cbuf);
669 scan->rs_cbuf = InvalidBuffer;
670 }
671
672 /*
673 * Be sure to check for interrupts at least once per page. Checks at
674 * higher code levels won't be able to stop a seqscan that encounters many
675 * pages' worth of consecutive dead tuples.
676 */
678
679 /*
680 * If the scan direction is changing, reset the prefetch block to the
681 * current block. Otherwise, we will incorrectly prefetch the blocks
682 * between the prefetch block and the current block again before
683 * prefetching blocks in the new, correct scan direction.
684 */
685 if (unlikely(scan->rs_dir != dir))
686 {
687 scan->rs_prefetch_block = scan->rs_cblock;
689 }
690
691 scan->rs_dir = dir;
692
694 if (BufferIsValid(scan->rs_cbuf))
696}
697
698/*
699 * heapgettup_initial_block - return the first BlockNumber to scan
700 *
701 * Returns InvalidBlockNumber when there are no blocks to scan. This can
702 * occur with empty tables and in parallel scans when parallel workers get all
703 * of the pages before we can get a chance to get our first page.
704 */
707{
708 Assert(!scan->rs_inited);
709 Assert(scan->rs_base.rs_parallel == NULL);
710
711 /* When there are no pages to scan, return InvalidBlockNumber */
712 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
713 return InvalidBlockNumber;
714
715 if (ScanDirectionIsForward(dir))
716 {
717 return scan->rs_startblock;
718 }
719 else
720 {
721 /*
722 * Disable reporting to syncscan logic in a backwards scan; it's not
723 * very likely anyone else is doing the same thing at the same time,
724 * and much more likely that we'll just bollix things for forward
725 * scanners.
726 */
727 scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
728
729 /*
730 * Start from last page of the scan. Ensure we take into account
731 * rs_numblocks if it's been adjusted by heap_setscanlimits().
732 */
733 if (scan->rs_numblocks != InvalidBlockNumber)
734 return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
735
736 if (scan->rs_startblock > 0)
737 return scan->rs_startblock - 1;
738
739 return scan->rs_nblocks - 1;
740 }
741}
742
743
744/*
745 * heapgettup_start_page - helper function for heapgettup()
746 *
747 * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
748 * to the number of tuples on this page. Also set *lineoff to the first
749 * offset to scan with forward scans getting the first offset and backward
750 * getting the final offset on the page.
751 */
752static Page
754 OffsetNumber *lineoff)
755{
756 Page page;
757
758 Assert(scan->rs_inited);
760
761 /* Caller is responsible for ensuring buffer is locked if needed */
762 page = BufferGetPage(scan->rs_cbuf);
763
764 *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
765
766 if (ScanDirectionIsForward(dir))
767 *lineoff = FirstOffsetNumber;
768 else
769 *lineoff = (OffsetNumber) (*linesleft);
770
771 /* lineoff now references the physically previous or next tid */
772 return page;
773}
774
775
776/*
777 * heapgettup_continue_page - helper function for heapgettup()
778 *
779 * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
780 * to the number of tuples left to scan on this page. Also set *lineoff to
781 * the next offset to scan according to the ScanDirection in 'dir'.
782 */
783static inline Page
785 OffsetNumber *lineoff)
786{
787 Page page;
788
789 Assert(scan->rs_inited);
791
792 /* Caller is responsible for ensuring buffer is locked if needed */
793 page = BufferGetPage(scan->rs_cbuf);
794
795 if (ScanDirectionIsForward(dir))
796 {
797 *lineoff = OffsetNumberNext(scan->rs_coffset);
798 *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
799 }
800 else
801 {
802 /*
803 * The previous returned tuple may have been vacuumed since the
804 * previous scan when we use a non-MVCC snapshot, so we must
805 * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
806 */
807 *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
808 *linesleft = *lineoff;
809 }
810
811 /* lineoff now references the physically previous or next tid */
812 return page;
813}
814
815/*
816 * heapgettup_advance_block - helper for heap_fetch_next_buffer()
817 *
818 * Given the current block number, the scan direction, and various information
819 * contained in the scan descriptor, calculate the BlockNumber to scan next
820 * and return it. If there are no further blocks to scan, return
821 * InvalidBlockNumber to indicate this fact to the caller.
822 *
823 * This should not be called to determine the initial block number -- only for
824 * subsequent blocks.
825 *
826 * This also adjusts rs_numblocks when a limit has been imposed by
827 * heap_setscanlimits().
828 */
829static inline BlockNumber
831{
832 Assert(scan->rs_base.rs_parallel == NULL);
833
835 {
836 block++;
837
838 /* wrap back to the start of the heap */
839 if (block >= scan->rs_nblocks)
840 block = 0;
841
842 /*
843 * Report our new scan position for synchronization purposes. We don't
844 * do that when moving backwards, however. That would just mess up any
845 * other forward-moving scanners.
846 *
847 * Note: we do this before checking for end of scan so that the final
848 * state of the position hint is back at the start of the rel. That's
849 * not strictly necessary, but otherwise when you run the same query
850 * multiple times the starting position would shift a little bit
851 * backwards on every invocation, which is confusing. We don't
852 * guarantee any specific ordering in general, though.
853 */
854 if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
855 ss_report_location(scan->rs_base.rs_rd, block);
856
857 /* we're done if we're back at where we started */
858 if (block == scan->rs_startblock)
859 return InvalidBlockNumber;
860
861 /* check if the limit imposed by heap_setscanlimits() is met */
862 if (scan->rs_numblocks != InvalidBlockNumber)
863 {
864 if (--scan->rs_numblocks == 0)
865 return InvalidBlockNumber;
866 }
867
868 return block;
869 }
870 else
871 {
872 /* we're done if the last block is the start position */
873 if (block == scan->rs_startblock)
874 return InvalidBlockNumber;
875
876 /* check if the limit imposed by heap_setscanlimits() is met */
877 if (scan->rs_numblocks != InvalidBlockNumber)
878 {
879 if (--scan->rs_numblocks == 0)
880 return InvalidBlockNumber;
881 }
882
883 /* wrap to the end of the heap when the last page was page 0 */
884 if (block == 0)
885 block = scan->rs_nblocks;
886
887 block--;
888
889 return block;
890 }
891}
892
893/* ----------------
894 * heapgettup - fetch next heap tuple
895 *
896 * Initialize the scan if not already done; then advance to the next
897 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
898 * or set scan->rs_ctup.t_data = NULL if no more tuples.
899 *
900 * Note: the reason nkeys/key are passed separately, even though they are
901 * kept in the scan descriptor, is that the caller may not want us to check
902 * the scankeys.
903 *
904 * Note: when we fall off the end of the scan in either direction, we
905 * reset rs_inited. This means that a further request with the same
906 * scan direction will restart the scan, which is a bit odd, but a
907 * request with the opposite scan direction will start a fresh scan
908 * in the proper direction. The latter is required behavior for cursors,
909 * while the former case is generally undefined behavior in Postgres
910 * so we don't care too much.
911 * ----------------
912 */
913static void
915 ScanDirection dir,
916 int nkeys,
917 ScanKey key)
918{
919 HeapTuple tuple = &(scan->rs_ctup);
920 Page page;
921 OffsetNumber lineoff;
922 int linesleft;
923
924 if (likely(scan->rs_inited))
925 {
926 /* continue from previously returned page/tuple */
928 page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
929 goto continue_page;
930 }
931
932 /*
933 * advance the scan until we find a qualifying tuple or run out of stuff
934 * to scan
935 */
936 while (true)
937 {
938 heap_fetch_next_buffer(scan, dir);
939
940 /* did we run out of blocks to scan? */
941 if (!BufferIsValid(scan->rs_cbuf))
942 break;
943
945
947 page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
948continue_page:
949
950 /*
951 * Only continue scanning the page while we have lines left.
952 *
953 * Note that this protects us from accessing line pointers past
954 * PageGetMaxOffsetNumber(); both for forward scans when we resume the
955 * table scan, and for when we start scanning a new page.
956 */
957 for (; linesleft > 0; linesleft--, lineoff += dir)
958 {
959 bool visible;
960 ItemId lpp = PageGetItemId(page, lineoff);
961
962 if (!ItemIdIsNormal(lpp))
963 continue;
964
965 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
966 tuple->t_len = ItemIdGetLength(lpp);
967 ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
968
969 visible = HeapTupleSatisfiesVisibility(tuple,
970 scan->rs_base.rs_snapshot,
971 scan->rs_cbuf);
972
974 tuple, scan->rs_cbuf,
975 scan->rs_base.rs_snapshot);
976
977 /* skip tuples not visible to this snapshot */
978 if (!visible)
979 continue;
980
981 /* skip any tuples that don't match the scan key */
982 if (key != NULL &&
984 nkeys, key))
985 continue;
986
988 scan->rs_coffset = lineoff;
989 return;
990 }
991
992 /*
993 * if we get here, it means we've exhausted the items on this page and
994 * it's time to move to the next.
995 */
997 }
998
999 /* end of scan */
1000 if (BufferIsValid(scan->rs_cbuf))
1001 ReleaseBuffer(scan->rs_cbuf);
1002
1003 scan->rs_cbuf = InvalidBuffer;
1006 tuple->t_data = NULL;
1007 scan->rs_inited = false;
1008}
1009
1010/* ----------------
1011 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
1012 *
1013 * Same API as heapgettup, but used in page-at-a-time mode
1014 *
1015 * The internal logic is much the same as heapgettup's too, but there are some
1016 * differences: we do not take the buffer content lock (that only needs to
1017 * happen inside heap_prepare_pagescan), and we iterate through just the
1018 * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
1019 * that lineindex is 0-based, where the corresponding loop variable lineoff in
1020 * heapgettup is 1-based.
1021 * ----------------
1022 */
1023static void
1025 ScanDirection dir,
1026 int nkeys,
1027 ScanKey key)
1028{
1029 HeapTuple tuple = &(scan->rs_ctup);
1030 Page page;
1031 uint32 lineindex;
1032 uint32 linesleft;
1033
1034 if (likely(scan->rs_inited))
1035 {
1036 /* continue from previously returned page/tuple */
1037 page = BufferGetPage(scan->rs_cbuf);
1038
1039 lineindex = scan->rs_cindex + dir;
1040 if (ScanDirectionIsForward(dir))
1041 linesleft = scan->rs_ntuples - lineindex;
1042 else
1043 linesleft = scan->rs_cindex;
1044 /* lineindex now references the next or previous visible tid */
1045
1046 goto continue_page;
1047 }
1048
1049 /*
1050 * advance the scan until we find a qualifying tuple or run out of stuff
1051 * to scan
1052 */
1053 while (true)
1054 {
1055 heap_fetch_next_buffer(scan, dir);
1056
1057 /* did we run out of blocks to scan? */
1058 if (!BufferIsValid(scan->rs_cbuf))
1059 break;
1060
1062
1063 /* prune the page and determine visible tuple offsets */
1065 page = BufferGetPage(scan->rs_cbuf);
1066 linesleft = scan->rs_ntuples;
1067 lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
1068
1069 /* block is the same for all tuples, set it once outside the loop */
1070 ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
1071
1072 /* lineindex now references the next or previous visible tid */
1073continue_page:
1074
1075 for (; linesleft > 0; linesleft--, lineindex += dir)
1076 {
1077 ItemId lpp;
1078 OffsetNumber lineoff;
1079
1080 Assert(lineindex < scan->rs_ntuples);
1081 lineoff = scan->rs_vistuples[lineindex];
1082 lpp = PageGetItemId(page, lineoff);
1083 Assert(ItemIdIsNormal(lpp));
1084
1085 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1086 tuple->t_len = ItemIdGetLength(lpp);
1087 ItemPointerSetOffsetNumber(&tuple->t_self, lineoff);
1088
1089 /* skip any tuples that don't match the scan key */
1090 if (key != NULL &&
1091 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1092 nkeys, key))
1093 continue;
1094
1095 scan->rs_cindex = lineindex;
1096 return;
1097 }
1098 }
1099
1100 /* end of scan */
1101 if (BufferIsValid(scan->rs_cbuf))
1102 ReleaseBuffer(scan->rs_cbuf);
1103 scan->rs_cbuf = InvalidBuffer;
1106 tuple->t_data = NULL;
1107 scan->rs_inited = false;
1108}
1109
1110
1111/* ----------------------------------------------------------------
1112 * heap access method interface
1113 * ----------------------------------------------------------------
1114 */
1115
1116
1119 int nkeys, ScanKey key,
1120 ParallelTableScanDesc parallel_scan,
1121 uint32 flags)
1122{
1123 HeapScanDesc scan;
1124
1125 /*
1126 * increment relation ref count while scanning relation
1127 *
1128 * This is just to make really sure the relcache entry won't go away while
1129 * the scan has a pointer to it. Caller should be holding the rel open
1130 * anyway, so this is redundant in all normal scenarios...
1131 */
1133
1134 /*
1135 * allocate and initialize scan descriptor
1136 */
1137 if (flags & SO_TYPE_BITMAPSCAN)
1138 {
1140
1141 /*
1142 * Bitmap Heap scans do not have any fields that a normal Heap Scan
1143 * does not have, so no special initializations required here.
1144 */
1145 scan = (HeapScanDesc) bscan;
1146 }
1147 else
1149
1150 scan->rs_base.rs_rd = relation;
1151 scan->rs_base.rs_snapshot = snapshot;
1152 scan->rs_base.rs_nkeys = nkeys;
1153 scan->rs_base.rs_flags = flags;
1154 scan->rs_base.rs_parallel = parallel_scan;
1155 scan->rs_strategy = NULL; /* set in initscan */
1156 scan->rs_cbuf = InvalidBuffer;
1157
1158 /*
1159 * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1160 */
1161 if (!(snapshot && IsMVCCSnapshot(snapshot)))
1162 scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1163
1164 /* Check that a historic snapshot is not used for non-catalog tables */
1165 if (snapshot &&
1166 IsHistoricMVCCSnapshot(snapshot) &&
1168 {
1169 ereport(ERROR,
1170 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
1171 errmsg("cannot query non-catalog table \"%s\" during logical decoding",
1172 RelationGetRelationName(relation))));
1173 }
1174
1175 /*
1176 * For seqscan and sample scans in a serializable transaction, acquire a
1177 * predicate lock on the entire relation. This is required not only to
1178 * lock all the matching tuples, but also to conflict with new insertions
1179 * into the table. In an indexscan, we take page locks on the index pages
1180 * covering the range specified in the scan qual, but in a heap scan there
1181 * is nothing more fine-grained to lock. A bitmap scan is a different
1182 * story, there we have already scanned the index and locked the index
1183 * pages covering the predicate. But in that case we still have to lock
1184 * any matching heap tuples. For sample scan we could optimize the locking
1185 * to be at least page-level granularity, but we'd need to add per-tuple
1186 * locking for that.
1187 */
1189 {
1190 /*
1191 * Ensure a missing snapshot is noticed reliably, even if the
1192 * isolation mode means predicate locking isn't performed (and
1193 * therefore the snapshot isn't used here).
1194 */
1195 Assert(snapshot);
1196 PredicateLockRelation(relation, snapshot);
1197 }
1198
1199 /* we only need to set this up once */
1200 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1201
1202 /*
1203 * Allocate memory to keep track of page allocation for parallel workers
1204 * when doing a parallel scan.
1205 */
1206 if (parallel_scan != NULL)
1208 else
1209 scan->rs_parallelworkerdata = NULL;
1210
1211 /*
1212 * we do this here instead of in initscan() because heap_rescan also calls
1213 * initscan() and we don't want to allocate memory again
1214 */
1215 if (nkeys > 0)
1216 scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
1217 else
1218 scan->rs_base.rs_key = NULL;
1219
1220 initscan(scan, key, false);
1221
1222 scan->rs_read_stream = NULL;
1223
1224 /*
1225 * Set up a read stream for sequential scans and TID range scans. This
1226 * should be done after initscan() because initscan() allocates the
1227 * BufferAccessStrategy object passed to the read stream API.
1228 */
1229 if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
1231 {
1233
1234 if (scan->rs_base.rs_parallel)
1236 else
1238
1239 /* ---
1240 * It is safe to use batchmode as the only locks taken by `cb`
1241 * are never taken while waiting for IO:
1242 * - SyncScanLock is used in the non-parallel case
1243 * - in the parallel case, only spinlocks and atomics are used
1244 * ---
1245 */
1248 scan->rs_strategy,
1249 scan->rs_base.rs_rd,
1251 cb,
1252 scan,
1253 0);
1254 }
1255 else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
1256 {
1259 scan->rs_strategy,
1260 scan->rs_base.rs_rd,
1263 scan,
1264 sizeof(TBMIterateResult));
1265 }
1266
1267
1268 return (TableScanDesc) scan;
1269}
1270
1271void
1272heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
1273 bool allow_strat, bool allow_sync, bool allow_pagemode)
1274{
1275 HeapScanDesc scan = (HeapScanDesc) sscan;
1276
1277 if (set_params)
1278 {
1279 if (allow_strat)
1281 else
1282 scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
1283
1284 if (allow_sync)
1286 else
1287 scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
1288
1289 if (allow_pagemode && scan->rs_base.rs_snapshot &&
1292 else
1294 }
1295
1296 /*
1297 * unpin scan buffers
1298 */
1299 if (BufferIsValid(scan->rs_cbuf))
1300 {
1301 ReleaseBuffer(scan->rs_cbuf);
1302 scan->rs_cbuf = InvalidBuffer;
1303 }
1304
1305 /*
1306 * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
1307 * additional data vs a normal HeapScan
1308 */
1309
1310 /*
1311 * The read stream is reset on rescan. This must be done before
1312 * initscan(), as some state referred to by read_stream_reset() is reset
1313 * in initscan().
1314 */
1315 if (scan->rs_read_stream)
1317
1318 /*
1319 * reinitialize scan descriptor
1320 */
1321 initscan(scan, key, true);
1322}
1323
1324void
1326{
1327 HeapScanDesc scan = (HeapScanDesc) sscan;
1328
1329 /* Note: no locking manipulations needed */
1330
1331 /*
1332 * unpin scan buffers
1333 */
1334 if (BufferIsValid(scan->rs_cbuf))
1335 ReleaseBuffer(scan->rs_cbuf);
1336
1337 /*
1338 * Must free the read stream before freeing the BufferAccessStrategy.
1339 */
1340 if (scan->rs_read_stream)
1342
1343 /*
1344 * decrement relation reference count and free scan descriptor storage
1345 */
1347
1348 if (scan->rs_base.rs_key)
1349 pfree(scan->rs_base.rs_key);
1350
1351 if (scan->rs_strategy != NULL)
1353
1354 if (scan->rs_parallelworkerdata != NULL)
1356
1357 if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1359
1360 pfree(scan);
1361}
1362
1365{
1366 HeapScanDesc scan = (HeapScanDesc) sscan;
1367
1368 /*
1369 * This is still widely used directly, without going through table AM, so
1370 * add a safety check. It's possible we should, at a later point,
1371 * downgrade this to an assert. The reason for checking the AM routine,
1372 * rather than the AM oid, is that this allows to write regression tests
1373 * that create another AM reusing the heap handler.
1374 */
1376 ereport(ERROR,
1377 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1378 errmsg_internal("only heap AM is supported")));
1379
1380 /*
1381 * We don't expect direct calls to heap_getnext with valid CheckXidAlive
1382 * for catalog or regular tables. See detailed comments in xact.c where
1383 * these variables are declared. Normally we have such a check at tableam
1384 * level API but this is called from many places so we need to ensure it
1385 * here.
1386 */
1388 elog(ERROR, "unexpected heap_getnext call during logical decoding");
1389
1390 /* Note: no locking manipulations needed */
1391
1393 heapgettup_pagemode(scan, direction,
1394 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1395 else
1396 heapgettup(scan, direction,
1397 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1398
1399 if (scan->rs_ctup.t_data == NULL)
1400 return NULL;
1401
1402 /*
1403 * if we get here it means we have a new current scan tuple, so point to
1404 * the proper return buffer and return the tuple.
1405 */
1406
1408
1409 return &scan->rs_ctup;
1410}
1411
1412bool
1414{
1415 HeapScanDesc scan = (HeapScanDesc) sscan;
1416
1417 /* Note: no locking manipulations needed */
1418
1419 if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1420 heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1421 else
1422 heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1423
1424 if (scan->rs_ctup.t_data == NULL)
1425 {
1426 ExecClearTuple(slot);
1427 return false;
1428 }
1429
1430 /*
1431 * if we get here it means we have a new current scan tuple, so point to
1432 * the proper return buffer and return the tuple.
1433 */
1434
1436
1437 ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1438 scan->rs_cbuf);
1439 return true;
1440}
1441
1442void
1444 ItemPointer maxtid)
1445{
1446 HeapScanDesc scan = (HeapScanDesc) sscan;
1447 BlockNumber startBlk;
1448 BlockNumber numBlks;
1449 ItemPointerData highestItem;
1450 ItemPointerData lowestItem;
1451
1452 /*
1453 * For relations without any pages, we can simply leave the TID range
1454 * unset. There will be no tuples to scan, therefore no tuples outside
1455 * the given TID range.
1456 */
1457 if (scan->rs_nblocks == 0)
1458 return;
1459
1460 /*
1461 * Set up some ItemPointers which point to the first and last possible
1462 * tuples in the heap.
1463 */
1464 ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
1465 ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
1466
1467 /*
1468 * If the given maximum TID is below the highest possible TID in the
1469 * relation, then restrict the range to that, otherwise we scan to the end
1470 * of the relation.
1471 */
1472 if (ItemPointerCompare(maxtid, &highestItem) < 0)
1473 ItemPointerCopy(maxtid, &highestItem);
1474
1475 /*
1476 * If the given minimum TID is above the lowest possible TID in the
1477 * relation, then restrict the range to only scan for TIDs above that.
1478 */
1479 if (ItemPointerCompare(mintid, &lowestItem) > 0)
1480 ItemPointerCopy(mintid, &lowestItem);
1481
1482 /*
1483 * Check for an empty range and protect from would be negative results
1484 * from the numBlks calculation below.
1485 */
1486 if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
1487 {
1488 /* Set an empty range of blocks to scan */
1489 heap_setscanlimits(sscan, 0, 0);
1490 return;
1491 }
1492
1493 /*
1494 * Calculate the first block and the number of blocks we must scan. We
1495 * could be more aggressive here and perform some more validation to try
1496 * and further narrow the scope of blocks to scan by checking if the
1497 * lowestItem has an offset above MaxOffsetNumber. In this case, we could
1498 * advance startBlk by one. Likewise, if highestItem has an offset of 0
1499 * we could scan one fewer blocks. However, such an optimization does not
1500 * seem worth troubling over, currently.
1501 */
1502 startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
1503
1504 numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
1505 ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
1506
1507 /* Set the start block and number of blocks to scan */
1508 heap_setscanlimits(sscan, startBlk, numBlks);
1509
1510 /* Finally, set the TID range in sscan */
1511 ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
1512 ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
1513}
1514
1515bool
1517 TupleTableSlot *slot)
1518{
1519 HeapScanDesc scan = (HeapScanDesc) sscan;
1520 ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
1521 ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
1522
1523 /* Note: no locking manipulations needed */
1524 for (;;)
1525 {
1526 if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1527 heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1528 else
1529 heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1530
1531 if (scan->rs_ctup.t_data == NULL)
1532 {
1533 ExecClearTuple(slot);
1534 return false;
1535 }
1536
1537 /*
1538 * heap_set_tidrange will have used heap_setscanlimits to limit the
1539 * range of pages we scan to only ones that can contain the TID range
1540 * we're scanning for. Here we must filter out any tuples from these
1541 * pages that are outside of that range.
1542 */
1543 if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1544 {
1545 ExecClearTuple(slot);
1546
1547 /*
1548 * When scanning backwards, the TIDs will be in descending order.
1549 * Future tuples in this direction will be lower still, so we can
1550 * just return false to indicate there will be no more tuples.
1551 */
1552 if (ScanDirectionIsBackward(direction))
1553 return false;
1554
1555 continue;
1556 }
1557
1558 /*
1559 * Likewise for the final page, we must filter out TIDs greater than
1560 * maxtid.
1561 */
1562 if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1563 {
1564 ExecClearTuple(slot);
1565
1566 /*
1567 * When scanning forward, the TIDs will be in ascending order.
1568 * Future tuples in this direction will be higher still, so we can
1569 * just return false to indicate there will be no more tuples.
1570 */
1571 if (ScanDirectionIsForward(direction))
1572 return false;
1573 continue;
1574 }
1575
1576 break;
1577 }
1578
1579 /*
1580 * if we get here it means we have a new current scan tuple, so point to
1581 * the proper return buffer and return the tuple.
1582 */
1584
1585 ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1586 return true;
1587}
1588
1589/*
1590 * heap_fetch - retrieve tuple with given tid
1591 *
1592 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1593 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1594 * against the specified snapshot.
1595 *
1596 * If successful (tuple found and passes snapshot time qual), then *userbuf
1597 * is set to the buffer holding the tuple and true is returned. The caller
1598 * must unpin the buffer when done with the tuple.
1599 *
1600 * If the tuple is not found (ie, item number references a deleted slot),
1601 * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
1602 * and false is returned.
1603 *
1604 * If the tuple is found but fails the time qual check, then the behavior
1605 * depends on the keep_buf parameter. If keep_buf is false, the results
1606 * are the same as for the tuple-not-found case. If keep_buf is true,
1607 * then tuple->t_data and *userbuf are returned as for the success case,
1608 * and again the caller must unpin the buffer; but false is returned.
1609 *
1610 * heap_fetch does not follow HOT chains: only the exact TID requested will
1611 * be fetched.
1612 *
1613 * It is somewhat inconsistent that we ereport() on invalid block number but
1614 * return false on invalid item number. There are a couple of reasons though.
1615 * One is that the caller can relatively easily check the block number for
1616 * validity, but cannot check the item number without reading the page
1617 * himself. Another is that when we are following a t_ctid link, we can be
1618 * reasonably confident that the page number is valid (since VACUUM shouldn't
1619 * truncate off the destination page without having killed the referencing
1620 * tuple first), but the item number might well not be good.
1621 */
1622bool
1624 Snapshot snapshot,
1625 HeapTuple tuple,
1626 Buffer *userbuf,
1627 bool keep_buf)
1628{
1629 ItemPointer tid = &(tuple->t_self);
1630 ItemId lp;
1631 Buffer buffer;
1632 Page page;
1633 OffsetNumber offnum;
1634 bool valid;
1635
1636 /*
1637 * Fetch and pin the appropriate page of the relation.
1638 */
1639 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1640
1641 /*
1642 * Need share lock on buffer to examine tuple commit status.
1643 */
1645 page = BufferGetPage(buffer);
1646
1647 /*
1648 * We'd better check for out-of-range offnum in case of VACUUM since the
1649 * TID was obtained.
1650 */
1651 offnum = ItemPointerGetOffsetNumber(tid);
1652 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1653 {
1655 ReleaseBuffer(buffer);
1656 *userbuf = InvalidBuffer;
1657 tuple->t_data = NULL;
1658 return false;
1659 }
1660
1661 /*
1662 * get the item line pointer corresponding to the requested tid
1663 */
1664 lp = PageGetItemId(page, offnum);
1665
1666 /*
1667 * Must check for deleted tuple.
1668 */
1669 if (!ItemIdIsNormal(lp))
1670 {
1672 ReleaseBuffer(buffer);
1673 *userbuf = InvalidBuffer;
1674 tuple->t_data = NULL;
1675 return false;
1676 }
1677
1678 /*
1679 * fill in *tuple fields
1680 */
1681 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1682 tuple->t_len = ItemIdGetLength(lp);
1683 tuple->t_tableOid = RelationGetRelid(relation);
1684
1685 /*
1686 * check tuple visibility, then release lock
1687 */
1688 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1689
1690 if (valid)
1691 PredicateLockTID(relation, &(tuple->t_self), snapshot,
1693
1694 HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1695
1697
1698 if (valid)
1699 {
1700 /*
1701 * All checks passed, so return the tuple as valid. Caller is now
1702 * responsible for releasing the buffer.
1703 */
1704 *userbuf = buffer;
1705
1706 return true;
1707 }
1708
1709 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1710 if (keep_buf)
1711 *userbuf = buffer;
1712 else
1713 {
1714 ReleaseBuffer(buffer);
1715 *userbuf = InvalidBuffer;
1716 tuple->t_data = NULL;
1717 }
1718
1719 return false;
1720}
1721
1722/*
1723 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1724 *
1725 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1726 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1727 * for the first chain member satisfying the given snapshot. If one is
1728 * found, we update *tid to reference that tuple's offset number, and
1729 * return true. If no match, return false without modifying *tid.
1730 *
1731 * heapTuple is a caller-supplied buffer. When a match is found, we return
1732 * the tuple here, in addition to updating *tid. If no match is found, the
1733 * contents of this buffer on return are undefined.
1734 *
1735 * If all_dead is not NULL, we check non-visible tuples to see if they are
1736 * globally dead; *all_dead is set true if all members of the HOT chain
1737 * are vacuumable, false if not.
1738 *
1739 * Unlike heap_fetch, the caller must already have pin and (at least) share
1740 * lock on the buffer; it is still pinned/locked at exit.
1741 */
1742bool
1744 Snapshot snapshot, HeapTuple heapTuple,
1745 bool *all_dead, bool first_call)
1746{
1747 Page page = BufferGetPage(buffer);
1749 BlockNumber blkno;
1750 OffsetNumber offnum;
1751 bool at_chain_start;
1752 bool valid;
1753 bool skip;
1754 GlobalVisState *vistest = NULL;
1755
1756 /* If this is not the first call, previous call returned a (live!) tuple */
1757 if (all_dead)
1758 *all_dead = first_call;
1759
1760 blkno = ItemPointerGetBlockNumber(tid);
1761 offnum = ItemPointerGetOffsetNumber(tid);
1762 at_chain_start = first_call;
1763 skip = !first_call;
1764
1765 /* XXX: we should assert that a snapshot is pushed or registered */
1767 Assert(BufferGetBlockNumber(buffer) == blkno);
1768
1769 /* Scan through possible multiple members of HOT-chain */
1770 for (;;)
1771 {
1772 ItemId lp;
1773
1774 /* check for bogus TID */
1775 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1776 break;
1777
1778 lp = PageGetItemId(page, offnum);
1779
1780 /* check for unused, dead, or redirected items */
1781 if (!ItemIdIsNormal(lp))
1782 {
1783 /* We should only see a redirect at start of chain */
1784 if (ItemIdIsRedirected(lp) && at_chain_start)
1785 {
1786 /* Follow the redirect */
1787 offnum = ItemIdGetRedirect(lp);
1788 at_chain_start = false;
1789 continue;
1790 }
1791 /* else must be end of chain */
1792 break;
1793 }
1794
1795 /*
1796 * Update heapTuple to point to the element of the HOT chain we're
1797 * currently investigating. Having t_self set correctly is important
1798 * because the SSI checks and the *Satisfies routine for historical
1799 * MVCC snapshots need the correct tid to decide about the visibility.
1800 */
1801 heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1802 heapTuple->t_len = ItemIdGetLength(lp);
1803 heapTuple->t_tableOid = RelationGetRelid(relation);
1804 ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1805
1806 /*
1807 * Shouldn't see a HEAP_ONLY tuple at chain start.
1808 */
1809 if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1810 break;
1811
1812 /*
1813 * The xmin should match the previous xmax value, else chain is
1814 * broken.
1815 */
1816 if (TransactionIdIsValid(prev_xmax) &&
1817 !TransactionIdEquals(prev_xmax,
1818 HeapTupleHeaderGetXmin(heapTuple->t_data)))
1819 break;
1820
1821 /*
1822 * When first_call is true (and thus, skip is initially false) we'll
1823 * return the first tuple we find. But on later passes, heapTuple
1824 * will initially be pointing to the tuple we returned last time.
1825 * Returning it again would be incorrect (and would loop forever), so
1826 * we skip it and return the next match we find.
1827 */
1828 if (!skip)
1829 {
1830 /* If it's visible per the snapshot, we must return it */
1831 valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1832 HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1833 buffer, snapshot);
1834
1835 if (valid)
1836 {
1837 ItemPointerSetOffsetNumber(tid, offnum);
1838 PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1839 HeapTupleHeaderGetXmin(heapTuple->t_data));
1840 if (all_dead)
1841 *all_dead = false;
1842 return true;
1843 }
1844 }
1845 skip = false;
1846
1847 /*
1848 * If we can't see it, maybe no one else can either. At caller
1849 * request, check whether all chain members are dead to all
1850 * transactions.
1851 *
1852 * Note: if you change the criterion here for what is "dead", fix the
1853 * planner's get_actual_variable_range() function to match.
1854 */
1855 if (all_dead && *all_dead)
1856 {
1857 if (!vistest)
1858 vistest = GlobalVisTestFor(relation);
1859
1860 if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1861 *all_dead = false;
1862 }
1863
1864 /*
1865 * Check to see if HOT chain continues past this tuple; if so fetch
1866 * the next offnum and loop around.
1867 */
1868 if (HeapTupleIsHotUpdated(heapTuple))
1869 {
1871 blkno);
1872 offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1873 at_chain_start = false;
1874 prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1875 }
1876 else
1877 break; /* end of chain */
1878 }
1879
1880 return false;
1881}
1882
1883/*
1884 * heap_get_latest_tid - get the latest tid of a specified tuple
1885 *
1886 * Actually, this gets the latest version that is visible according to the
1887 * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
1888 * possibly uncommitted version.
1889 *
1890 * *tid is both an input and an output parameter: it is updated to
1891 * show the latest version of the row. Note that it will not be changed
1892 * if no version of the row passes the snapshot test.
1893 */
1894void
1896 ItemPointer tid)
1897{
1898 Relation relation = sscan->rs_rd;
1899 Snapshot snapshot = sscan->rs_snapshot;
1900 ItemPointerData ctid;
1901 TransactionId priorXmax;
1902
1903 /*
1904 * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1905 * Assume that t_ctid links are valid however - there shouldn't be invalid
1906 * ones in the table.
1907 */
1909
1910 /*
1911 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1912 * need to examine, and *tid is the TID we will return if ctid turns out
1913 * to be bogus.
1914 *
1915 * Note that we will loop until we reach the end of the t_ctid chain.
1916 * Depending on the snapshot passed, there might be at most one visible
1917 * version of the row, but we don't try to optimize for that.
1918 */
1919 ctid = *tid;
1920 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1921 for (;;)
1922 {
1923 Buffer buffer;
1924 Page page;
1925 OffsetNumber offnum;
1926 ItemId lp;
1927 HeapTupleData tp;
1928 bool valid;
1929
1930 /*
1931 * Read, pin, and lock the page.
1932 */
1933 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1935 page = BufferGetPage(buffer);
1936
1937 /*
1938 * Check for bogus item number. This is not treated as an error
1939 * condition because it can happen while following a t_ctid link. We
1940 * just assume that the prior tid is OK and return it unchanged.
1941 */
1942 offnum = ItemPointerGetOffsetNumber(&ctid);
1943 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1944 {
1945 UnlockReleaseBuffer(buffer);
1946 break;
1947 }
1948 lp = PageGetItemId(page, offnum);
1949 if (!ItemIdIsNormal(lp))
1950 {
1951 UnlockReleaseBuffer(buffer);
1952 break;
1953 }
1954
1955 /* OK to access the tuple */
1956 tp.t_self = ctid;
1957 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1958 tp.t_len = ItemIdGetLength(lp);
1959 tp.t_tableOid = RelationGetRelid(relation);
1960
1961 /*
1962 * After following a t_ctid link, we might arrive at an unrelated
1963 * tuple. Check for XMIN match.
1964 */
1965 if (TransactionIdIsValid(priorXmax) &&
1967 {
1968 UnlockReleaseBuffer(buffer);
1969 break;
1970 }
1971
1972 /*
1973 * Check tuple visibility; if visible, set it as the new result
1974 * candidate.
1975 */
1976 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1977 HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1978 if (valid)
1979 *tid = ctid;
1980
1981 /*
1982 * If there's a valid t_ctid link, follow it, else we're done.
1983 */
1984 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1988 {
1989 UnlockReleaseBuffer(buffer);
1990 break;
1991 }
1992
1993 ctid = tp.t_data->t_ctid;
1994 priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1995 UnlockReleaseBuffer(buffer);
1996 } /* end of loop */
1997}
1998
1999
2000/*
2001 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
2002 *
2003 * This is called after we have waited for the XMAX transaction to terminate.
2004 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
2005 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
2006 * hint bit if possible --- but beware that that may not yet be possible,
2007 * if the transaction committed asynchronously.
2008 *
2009 * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
2010 * even if it commits.
2011 *
2012 * Hence callers should look only at XMAX_INVALID.
2013 *
2014 * Note this is not allowed for tuples whose xmax is a multixact.
2015 */
2016static void
2018{
2021
2023 {
2024 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
2027 xid);
2028 else
2031 }
2032}
2033
2034
2035/*
2036 * GetBulkInsertState - prepare status object for a bulk insert
2037 */
2040{
2041 BulkInsertState bistate;
2042
2045 bistate->current_buf = InvalidBuffer;
2046 bistate->next_free = InvalidBlockNumber;
2047 bistate->last_free = InvalidBlockNumber;
2048 bistate->already_extended_by = 0;
2049 return bistate;
2050}
2051
2052/*
2053 * FreeBulkInsertState - clean up after finishing a bulk insert
2054 */
2055void
2057{
2058 if (bistate->current_buf != InvalidBuffer)
2059 ReleaseBuffer(bistate->current_buf);
2060 FreeAccessStrategy(bistate->strategy);
2061 pfree(bistate);
2062}
2063
2064/*
2065 * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
2066 */
2067void
2069{
2070 if (bistate->current_buf != InvalidBuffer)
2071 ReleaseBuffer(bistate->current_buf);
2072 bistate->current_buf = InvalidBuffer;
2073
2074 /*
2075 * Despite the name, we also reset bulk relation extension state.
2076 * Otherwise we can end up erroring out due to looking for free space in
2077 * ->next_free of one partition, even though ->next_free was set when
2078 * extending another partition. It could obviously also be bad for
2079 * efficiency to look at existing blocks at offsets from another
2080 * partition, even if we don't error out.
2081 */
2082 bistate->next_free = InvalidBlockNumber;
2083 bistate->last_free = InvalidBlockNumber;
2084}
2085
2086
2087/*
2088 * heap_insert - insert tuple into a heap
2089 *
2090 * The new tuple is stamped with current transaction ID and the specified
2091 * command ID.
2092 *
2093 * See table_tuple_insert for comments about most of the input flags, except
2094 * that this routine directly takes a tuple rather than a slot.
2095 *
2096 * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
2097 * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
2098 * implement table_tuple_insert_speculative().
2099 *
2100 * On return the header fields of *tup are updated to match the stored tuple;
2101 * in particular tup->t_self receives the actual TID where the tuple was
2102 * stored. But note that any toasting of fields within the tuple data is NOT
2103 * reflected into *tup.
2104 */
2105void
2107 int options, BulkInsertState bistate)
2108{
2110 HeapTuple heaptup;
2111 Buffer buffer;
2112 Buffer vmbuffer = InvalidBuffer;
2113 bool all_visible_cleared = false;
2114
2115 /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2118
2119 AssertHasSnapshotForToast(relation);
2120
2121 /*
2122 * Fill in tuple header fields and toast the tuple if necessary.
2123 *
2124 * Note: below this point, heaptup is the data we actually intend to store
2125 * into the relation; tup is the caller's original untoasted data.
2126 */
2127 heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2128
2129 /*
2130 * Find buffer to insert this tuple into. If the page is all visible,
2131 * this will also pin the requisite visibility map page.
2132 */
2133 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2134 InvalidBuffer, options, bistate,
2135 &vmbuffer, NULL,
2136 0);
2137
2138 /*
2139 * We're about to do the actual insert -- but check for conflict first, to
2140 * avoid possibly having to roll back work we've just done.
2141 *
2142 * This is safe without a recheck as long as there is no possibility of
2143 * another process scanning the page between this check and the insert
2144 * being visible to the scan (i.e., an exclusive buffer content lock is
2145 * continuously held from this point until the tuple insert is visible).
2146 *
2147 * For a heap insert, we only need to check for table-level SSI locks. Our
2148 * new tuple can't possibly conflict with existing tuple locks, and heap
2149 * page locks are only consolidated versions of tuple locks; they do not
2150 * lock "gaps" as index page locks do. So we don't need to specify a
2151 * buffer when making the call, which makes for a faster check.
2152 */
2154
2155 /* NO EREPORT(ERROR) from here till changes are logged */
2157
2158 RelationPutHeapTuple(relation, buffer, heaptup,
2160
2161 if (PageIsAllVisible(BufferGetPage(buffer)))
2162 {
2163 all_visible_cleared = true;
2165 visibilitymap_clear(relation,
2166 ItemPointerGetBlockNumber(&(heaptup->t_self)),
2167 vmbuffer, VISIBILITYMAP_VALID_BITS);
2168 }
2169
2170 /*
2171 * XXX Should we set PageSetPrunable on this page ?
2172 *
2173 * The inserting transaction may eventually abort thus making this tuple
2174 * DEAD and hence available for pruning. Though we don't want to optimize
2175 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2176 * aborted tuple will never be pruned until next vacuum is triggered.
2177 *
2178 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2179 */
2180
2181 MarkBufferDirty(buffer);
2182
2183 /* XLOG stuff */
2184 if (RelationNeedsWAL(relation))
2185 {
2186 xl_heap_insert xlrec;
2187 xl_heap_header xlhdr;
2188 XLogRecPtr recptr;
2189 Page page = BufferGetPage(buffer);
2190 uint8 info = XLOG_HEAP_INSERT;
2191 int bufflags = 0;
2192
2193 /*
2194 * If this is a catalog, we need to transmit combo CIDs to properly
2195 * decode, so log that as well.
2196 */
2198 log_heap_new_cid(relation, heaptup);
2199
2200 /*
2201 * If this is the single and first tuple on page, we can reinit the
2202 * page instead of restoring the whole thing. Set flag, and hide
2203 * buffer references from XLogInsert.
2204 */
2207 {
2208 info |= XLOG_HEAP_INIT_PAGE;
2209 bufflags |= REGBUF_WILL_INIT;
2210 }
2211
2212 xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2213 xlrec.flags = 0;
2214 if (all_visible_cleared)
2219
2220 /*
2221 * For logical decoding, we need the tuple even if we're doing a full
2222 * page write, so make sure it's included even if we take a full-page
2223 * image. (XXX We could alternatively store a pointer into the FPW).
2224 */
2225 if (RelationIsLogicallyLogged(relation) &&
2227 {
2229 bufflags |= REGBUF_KEEP_DATA;
2230
2231 if (IsToastRelation(relation))
2233 }
2234
2237
2238 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2239 xlhdr.t_infomask = heaptup->t_data->t_infomask;
2240 xlhdr.t_hoff = heaptup->t_data->t_hoff;
2241
2242 /*
2243 * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2244 * write the whole page to the xlog, we don't need to store
2245 * xl_heap_header in the xlog.
2246 */
2247 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2249 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2251 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2252 heaptup->t_len - SizeofHeapTupleHeader);
2253
2254 /* filtering by origin on a row level is much more efficient */
2256
2257 recptr = XLogInsert(RM_HEAP_ID, info);
2258
2259 PageSetLSN(page, recptr);
2260 }
2261
2263
2264 UnlockReleaseBuffer(buffer);
2265 if (vmbuffer != InvalidBuffer)
2266 ReleaseBuffer(vmbuffer);
2267
2268 /*
2269 * If tuple is cacheable, mark it for invalidation from the caches in case
2270 * we abort. Note it is OK to do this after releasing the buffer, because
2271 * the heaptup data structure is all in local memory, not in the shared
2272 * buffer.
2273 */
2274 CacheInvalidateHeapTuple(relation, heaptup, NULL);
2275
2276 /* Note: speculative insertions are counted too, even if aborted later */
2277 pgstat_count_heap_insert(relation, 1);
2278
2279 /*
2280 * If heaptup is a private copy, release it. Don't forget to copy t_self
2281 * back to the caller's image, too.
2282 */
2283 if (heaptup != tup)
2284 {
2285 tup->t_self = heaptup->t_self;
2286 heap_freetuple(heaptup);
2287 }
2288}
2289
2290/*
2291 * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2292 * tuple header fields and toasts the tuple if necessary. Returns a toasted
2293 * version of the tuple if it was toasted, or the original tuple if not. Note
2294 * that in any case, the header fields are also set in the original tuple.
2295 */
2296static HeapTuple
2298 CommandId cid, int options)
2299{
2300 /*
2301 * To allow parallel inserts, we need to ensure that they are safe to be
2302 * performed in workers. We have the infrastructure to allow parallel
2303 * inserts in general except for the cases where inserts generate a new
2304 * CommandId (eg. inserts into a table having a foreign key column).
2305 */
2306 if (IsParallelWorker())
2307 ereport(ERROR,
2308 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2309 errmsg("cannot insert tuples in a parallel worker")));
2310
2311 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2314 HeapTupleHeaderSetXmin(tup->t_data, xid);
2317
2318 HeapTupleHeaderSetCmin(tup->t_data, cid);
2319 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2320 tup->t_tableOid = RelationGetRelid(relation);
2321
2322 /*
2323 * If the new tuple is too big for storage or contains already toasted
2324 * out-of-line attributes from some other relation, invoke the toaster.
2325 */
2326 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2327 relation->rd_rel->relkind != RELKIND_MATVIEW)
2328 {
2329 /* toast table entries should never be recursively toasted */
2331 return tup;
2332 }
2333 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2334 return heap_toast_insert_or_update(relation, tup, NULL, options);
2335 else
2336 return tup;
2337}
2338
2339/*
2340 * Helper for heap_multi_insert() that computes the number of entire pages
2341 * that inserting the remaining heaptuples requires. Used to determine how
2342 * much the relation needs to be extended by.
2343 */
2344static int
2345heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
2346{
2347 size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2348 int npages = 1;
2349
2350 for (int i = done; i < ntuples; i++)
2351 {
2352 size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
2353
2354 if (page_avail < tup_sz)
2355 {
2356 npages++;
2357 page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2358 }
2359 page_avail -= tup_sz;
2360 }
2361
2362 return npages;
2363}
2364
2365/*
2366 * heap_multi_insert - insert multiple tuples into a heap
2367 *
2368 * This is like heap_insert(), but inserts multiple tuples in one operation.
2369 * That's faster than calling heap_insert() in a loop, because when multiple
2370 * tuples can be inserted on a single page, we can write just a single WAL
2371 * record covering all of them, and only need to lock/unlock the page once.
2372 *
2373 * Note: this leaks memory into the current memory context. You can create a
2374 * temporary context before calling this, if that's a problem.
2375 */
2376void
2377heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2378 CommandId cid, int options, BulkInsertState bistate)
2379{
2381 HeapTuple *heaptuples;
2382 int i;
2383 int ndone;
2384 PGAlignedBlock scratch;
2385 Page page;
2386 Buffer vmbuffer = InvalidBuffer;
2387 bool needwal;
2388 Size saveFreeSpace;
2389 bool need_tuple_data = RelationIsLogicallyLogged(relation);
2390 bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
2391 bool starting_with_empty_page = false;
2392 int npages = 0;
2393 int npages_used = 0;
2394
2395 /* currently not needed (thus unsupported) for heap_multi_insert() */
2397
2398 AssertHasSnapshotForToast(relation);
2399
2400 needwal = RelationNeedsWAL(relation);
2401 saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2403
2404 /* Toast and set header data in all the slots */
2405 heaptuples = palloc(ntuples * sizeof(HeapTuple));
2406 for (i = 0; i < ntuples; i++)
2407 {
2408 HeapTuple tuple;
2409
2410 tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2411 slots[i]->tts_tableOid = RelationGetRelid(relation);
2412 tuple->t_tableOid = slots[i]->tts_tableOid;
2413 heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2414 options);
2415 }
2416
2417 /*
2418 * We're about to do the actual inserts -- but check for conflict first,
2419 * to minimize the possibility of having to roll back work we've just
2420 * done.
2421 *
2422 * A check here does not definitively prevent a serialization anomaly;
2423 * that check MUST be done at least past the point of acquiring an
2424 * exclusive buffer content lock on every buffer that will be affected,
2425 * and MAY be done after all inserts are reflected in the buffers and
2426 * those locks are released; otherwise there is a race condition. Since
2427 * multiple buffers can be locked and unlocked in the loop below, and it
2428 * would not be feasible to identify and lock all of those buffers before
2429 * the loop, we must do a final check at the end.
2430 *
2431 * The check here could be omitted with no loss of correctness; it is
2432 * present strictly as an optimization.
2433 *
2434 * For heap inserts, we only need to check for table-level SSI locks. Our
2435 * new tuples can't possibly conflict with existing tuple locks, and heap
2436 * page locks are only consolidated versions of tuple locks; they do not
2437 * lock "gaps" as index page locks do. So we don't need to specify a
2438 * buffer when making the call, which makes for a faster check.
2439 */
2441
2442 ndone = 0;
2443 while (ndone < ntuples)
2444 {
2445 Buffer buffer;
2446 bool all_visible_cleared = false;
2447 bool all_frozen_set = false;
2448 int nthispage;
2449
2451
2452 /*
2453 * Compute number of pages needed to fit the to-be-inserted tuples in
2454 * the worst case. This will be used to determine how much to extend
2455 * the relation by in RelationGetBufferForTuple(), if needed. If we
2456 * filled a prior page from scratch, we can just update our last
2457 * computation, but if we started with a partially filled page,
2458 * recompute from scratch, the number of potentially required pages
2459 * can vary due to tuples needing to fit onto the page, page headers
2460 * etc.
2461 */
2462 if (ndone == 0 || !starting_with_empty_page)
2463 {
2464 npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
2465 saveFreeSpace);
2466 npages_used = 0;
2467 }
2468 else
2469 npages_used++;
2470
2471 /*
2472 * Find buffer where at least the next tuple will fit. If the page is
2473 * all-visible, this will also pin the requisite visibility map page.
2474 *
2475 * Also pin visibility map page if COPY FREEZE inserts tuples into an
2476 * empty page. See all_frozen_set below.
2477 */
2478 buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2479 InvalidBuffer, options, bistate,
2480 &vmbuffer, NULL,
2481 npages - npages_used);
2482 page = BufferGetPage(buffer);
2483
2484 starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
2485
2486 if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
2487 {
2488 all_frozen_set = true;
2489 /* Lock the vmbuffer before entering the critical section */
2491 }
2492
2493 /* NO EREPORT(ERROR) from here till changes are logged */
2495
2496 /*
2497 * RelationGetBufferForTuple has ensured that the first tuple fits.
2498 * Put that on the page, and then as many other tuples as fit.
2499 */
2500 RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2501
2502 /*
2503 * For logical decoding we need combo CIDs to properly decode the
2504 * catalog.
2505 */
2506 if (needwal && need_cids)
2507 log_heap_new_cid(relation, heaptuples[ndone]);
2508
2509 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2510 {
2511 HeapTuple heaptup = heaptuples[ndone + nthispage];
2512
2513 if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2514 break;
2515
2516 RelationPutHeapTuple(relation, buffer, heaptup, false);
2517
2518 /*
2519 * For logical decoding we need combo CIDs to properly decode the
2520 * catalog.
2521 */
2522 if (needwal && need_cids)
2523 log_heap_new_cid(relation, heaptup);
2524 }
2525
2526 /*
2527 * If the page is all visible, need to clear that, unless we're only
2528 * going to add further frozen rows to it.
2529 *
2530 * If we're only adding already frozen rows to a previously empty
2531 * page, mark it as all-frozen and update the visibility map. We're
2532 * already holding a pin on the vmbuffer.
2533 */
2535 {
2536 all_visible_cleared = true;
2537 PageClearAllVisible(page);
2538 visibilitymap_clear(relation,
2539 BufferGetBlockNumber(buffer),
2540 vmbuffer, VISIBILITYMAP_VALID_BITS);
2541 }
2542 else if (all_frozen_set)
2543 {
2544 PageSetAllVisible(page);
2546 vmbuffer,
2549 relation->rd_locator);
2550 }
2551
2552 /*
2553 * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2554 */
2555
2556 MarkBufferDirty(buffer);
2557
2558 /* XLOG stuff */
2559 if (needwal)
2560 {
2561 XLogRecPtr recptr;
2562 xl_heap_multi_insert *xlrec;
2564 char *tupledata;
2565 int totaldatalen;
2566 char *scratchptr = scratch.data;
2567 bool init;
2568 int bufflags = 0;
2569
2570 /*
2571 * If the page was previously empty, we can reinit the page
2572 * instead of restoring the whole thing.
2573 */
2574 init = starting_with_empty_page;
2575
2576 /* allocate xl_heap_multi_insert struct from the scratch area */
2577 xlrec = (xl_heap_multi_insert *) scratchptr;
2578 scratchptr += SizeOfHeapMultiInsert;
2579
2580 /*
2581 * Allocate offsets array. Unless we're reinitializing the page,
2582 * in that case the tuples are stored in order starting at
2583 * FirstOffsetNumber and we don't need to store the offsets
2584 * explicitly.
2585 */
2586 if (!init)
2587 scratchptr += nthispage * sizeof(OffsetNumber);
2588
2589 /* the rest of the scratch space is used for tuple data */
2590 tupledata = scratchptr;
2591
2592 /* check that the mutually exclusive flags are not both set */
2593 Assert(!(all_visible_cleared && all_frozen_set));
2594
2595 xlrec->flags = 0;
2596 if (all_visible_cleared)
2598
2599 /*
2600 * We don't have to worry about including a conflict xid in the
2601 * WAL record, as HEAP_INSERT_FROZEN intentionally violates
2602 * visibility rules.
2603 */
2604 if (all_frozen_set)
2606
2607 xlrec->ntuples = nthispage;
2608
2609 /*
2610 * Write out an xl_multi_insert_tuple and the tuple data itself
2611 * for each tuple.
2612 */
2613 for (i = 0; i < nthispage; i++)
2614 {
2615 HeapTuple heaptup = heaptuples[ndone + i];
2616 xl_multi_insert_tuple *tuphdr;
2617 int datalen;
2618
2619 if (!init)
2620 xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2621 /* xl_multi_insert_tuple needs two-byte alignment. */
2622 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2623 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2624
2625 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2626 tuphdr->t_infomask = heaptup->t_data->t_infomask;
2627 tuphdr->t_hoff = heaptup->t_data->t_hoff;
2628
2629 /* write bitmap [+ padding] [+ oid] + data */
2630 datalen = heaptup->t_len - SizeofHeapTupleHeader;
2631 memcpy(scratchptr,
2632 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2633 datalen);
2634 tuphdr->datalen = datalen;
2635 scratchptr += datalen;
2636 }
2637 totaldatalen = scratchptr - tupledata;
2638 Assert((scratchptr - scratch.data) < BLCKSZ);
2639
2640 if (need_tuple_data)
2642
2643 /*
2644 * Signal that this is the last xl_heap_multi_insert record
2645 * emitted by this call to heap_multi_insert(). Needed for logical
2646 * decoding so it knows when to cleanup temporary data.
2647 */
2648 if (ndone + nthispage == ntuples)
2650
2651 if (init)
2652 {
2653 info |= XLOG_HEAP_INIT_PAGE;
2654 bufflags |= REGBUF_WILL_INIT;
2655 }
2656
2657 /*
2658 * If we're doing logical decoding, include the new tuple data
2659 * even if we take a full-page image of the page.
2660 */
2661 if (need_tuple_data)
2662 bufflags |= REGBUF_KEEP_DATA;
2663
2665 XLogRegisterData(xlrec, tupledata - scratch.data);
2666 XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2667 if (all_frozen_set)
2668 XLogRegisterBuffer(1, vmbuffer, 0);
2669
2670 XLogRegisterBufData(0, tupledata, totaldatalen);
2671
2672 /* filtering by origin on a row level is much more efficient */
2674
2675 recptr = XLogInsert(RM_HEAP2_ID, info);
2676
2677 PageSetLSN(page, recptr);
2678 if (all_frozen_set)
2679 {
2680 Assert(BufferIsDirty(vmbuffer));
2681 PageSetLSN(BufferGetPage(vmbuffer), recptr);
2682 }
2683 }
2684
2686
2687 if (all_frozen_set)
2688 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2689
2690 UnlockReleaseBuffer(buffer);
2691 ndone += nthispage;
2692
2693 /*
2694 * NB: Only release vmbuffer after inserting all tuples - it's fairly
2695 * likely that we'll insert into subsequent heap pages that are likely
2696 * to use the same vm page.
2697 */
2698 }
2699
2700 /* We're done with inserting all tuples, so release the last vmbuffer. */
2701 if (vmbuffer != InvalidBuffer)
2702 ReleaseBuffer(vmbuffer);
2703
2704 /*
2705 * We're done with the actual inserts. Check for conflicts again, to
2706 * ensure that all rw-conflicts in to these inserts are detected. Without
2707 * this final check, a sequential scan of the heap may have locked the
2708 * table after the "before" check, missing one opportunity to detect the
2709 * conflict, and then scanned the table before the new tuples were there,
2710 * missing the other chance to detect the conflict.
2711 *
2712 * For heap inserts, we only need to check for table-level SSI locks. Our
2713 * new tuples can't possibly conflict with existing tuple locks, and heap
2714 * page locks are only consolidated versions of tuple locks; they do not
2715 * lock "gaps" as index page locks do. So we don't need to specify a
2716 * buffer when making the call.
2717 */
2719
2720 /*
2721 * If tuples are cacheable, mark them for invalidation from the caches in
2722 * case we abort. Note it is OK to do this after releasing the buffer,
2723 * because the heaptuples data structure is all in local memory, not in
2724 * the shared buffer.
2725 */
2726 if (IsCatalogRelation(relation))
2727 {
2728 for (i = 0; i < ntuples; i++)
2729 CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2730 }
2731
2732 /* copy t_self fields back to the caller's slots */
2733 for (i = 0; i < ntuples; i++)
2734 slots[i]->tts_tid = heaptuples[i]->t_self;
2735
2736 pgstat_count_heap_insert(relation, ntuples);
2737}
2738
2739/*
2740 * simple_heap_insert - insert a tuple
2741 *
2742 * Currently, this routine differs from heap_insert only in supplying
2743 * a default command ID and not allowing access to the speedup options.
2744 *
2745 * This should be used rather than using heap_insert directly in most places
2746 * where we are modifying system catalogs.
2747 */
2748void
2750{
2751 heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2752}
2753
2754/*
2755 * Given infomask/infomask2, compute the bits that must be saved in the
2756 * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2757 * xl_heap_lock_updated WAL records.
2758 *
2759 * See fix_infomask_from_infobits.
2760 */
2761static uint8
2762compute_infobits(uint16 infomask, uint16 infomask2)
2763{
2764 return
2765 ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2766 ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2767 ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2768 /* note we ignore HEAP_XMAX_SHR_LOCK here */
2769 ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2770 ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2771 XLHL_KEYS_UPDATED : 0);
2772}
2773
2774/*
2775 * Given two versions of the same t_infomask for a tuple, compare them and
2776 * return whether the relevant status for a tuple Xmax has changed. This is
2777 * used after a buffer lock has been released and reacquired: we want to ensure
2778 * that the tuple state continues to be the same it was when we previously
2779 * examined it.
2780 *
2781 * Note the Xmax field itself must be compared separately.
2782 */
2783static inline bool
2784xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2785{
2786 const uint16 interesting =
2788
2789 if ((new_infomask & interesting) != (old_infomask & interesting))
2790 return true;
2791
2792 return false;
2793}
2794
2795/*
2796 * heap_delete - delete a tuple
2797 *
2798 * See table_tuple_delete() for an explanation of the parameters, except that
2799 * this routine directly takes a tuple rather than a slot.
2800 *
2801 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2802 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2803 * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
2804 * generated by another transaction).
2805 */
2808 CommandId cid, Snapshot crosscheck, bool wait,
2809 TM_FailureData *tmfd, bool changingPart)
2810{
2811 TM_Result result;
2813 ItemId lp;
2814 HeapTupleData tp;
2815 Page page;
2816 BlockNumber block;
2817 Buffer buffer;
2818 Buffer vmbuffer = InvalidBuffer;
2819 TransactionId new_xmax;
2820 uint16 new_infomask,
2821 new_infomask2;
2822 bool have_tuple_lock = false;
2823 bool iscombo;
2824 bool all_visible_cleared = false;
2825 HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2826 bool old_key_copied = false;
2827
2829
2830 AssertHasSnapshotForToast(relation);
2831
2832 /*
2833 * Forbid this during a parallel operation, lest it allocate a combo CID.
2834 * Other workers might need that combo CID for visibility checks, and we
2835 * have no provision for broadcasting it to them.
2836 */
2837 if (IsInParallelMode())
2838 ereport(ERROR,
2839 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2840 errmsg("cannot delete tuples during a parallel operation")));
2841
2842 block = ItemPointerGetBlockNumber(tid);
2843 buffer = ReadBuffer(relation, block);
2844 page = BufferGetPage(buffer);
2845
2846 /*
2847 * Before locking the buffer, pin the visibility map page if it appears to
2848 * be necessary. Since we haven't got the lock yet, someone else might be
2849 * in the middle of changing this, so we'll need to recheck after we have
2850 * the lock.
2851 */
2852 if (PageIsAllVisible(page))
2853 visibilitymap_pin(relation, block, &vmbuffer);
2854
2856
2859
2860 tp.t_tableOid = RelationGetRelid(relation);
2861 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2862 tp.t_len = ItemIdGetLength(lp);
2863 tp.t_self = *tid;
2864
2865l1:
2866
2867 /*
2868 * If we didn't pin the visibility map page and the page has become all
2869 * visible while we were busy locking the buffer, we'll have to unlock and
2870 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2871 * unfortunate, but hopefully shouldn't happen often.
2872 */
2873 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2874 {
2876 visibilitymap_pin(relation, block, &vmbuffer);
2878 }
2879
2880 result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2881
2882 if (result == TM_Invisible)
2883 {
2884 UnlockReleaseBuffer(buffer);
2885 ereport(ERROR,
2886 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2887 errmsg("attempted to delete invisible tuple")));
2888 }
2889 else if (result == TM_BeingModified && wait)
2890 {
2891 TransactionId xwait;
2892 uint16 infomask;
2893
2894 /* must copy state data before unlocking buffer */
2896 infomask = tp.t_data->t_infomask;
2897
2898 /*
2899 * Sleep until concurrent transaction ends -- except when there's a
2900 * single locker and it's our own transaction. Note we don't care
2901 * which lock mode the locker has, because we need the strongest one.
2902 *
2903 * Before sleeping, we need to acquire tuple lock to establish our
2904 * priority for the tuple (see heap_lock_tuple). LockTuple will
2905 * release us when we are next-in-line for the tuple.
2906 *
2907 * If we are forced to "start over" below, we keep the tuple lock;
2908 * this arranges that we stay at the head of the line while rechecking
2909 * tuple state.
2910 */
2911 if (infomask & HEAP_XMAX_IS_MULTI)
2912 {
2913 bool current_is_member = false;
2914
2915 if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2916 LockTupleExclusive, &current_is_member))
2917 {
2919
2920 /*
2921 * Acquire the lock, if necessary (but skip it when we're
2922 * requesting a lock and already have one; avoids deadlock).
2923 */
2924 if (!current_is_member)
2926 LockWaitBlock, &have_tuple_lock);
2927
2928 /* wait for multixact */
2930 relation, &(tp.t_self), XLTW_Delete,
2931 NULL);
2933
2934 /*
2935 * If xwait had just locked the tuple then some other xact
2936 * could update this tuple before we get to this point. Check
2937 * for xmax change, and start over if so.
2938 *
2939 * We also must start over if we didn't pin the VM page, and
2940 * the page has become all visible.
2941 */
2942 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2943 xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2945 xwait))
2946 goto l1;
2947 }
2948
2949 /*
2950 * You might think the multixact is necessarily done here, but not
2951 * so: it could have surviving members, namely our own xact or
2952 * other subxacts of this backend. It is legal for us to delete
2953 * the tuple in either case, however (the latter case is
2954 * essentially a situation of upgrading our former shared lock to
2955 * exclusive). We don't bother changing the on-disk hint bits
2956 * since we are about to overwrite the xmax altogether.
2957 */
2958 }
2959 else if (!TransactionIdIsCurrentTransactionId(xwait))
2960 {
2961 /*
2962 * Wait for regular transaction to end; but first, acquire tuple
2963 * lock.
2964 */
2967 LockWaitBlock, &have_tuple_lock);
2968 XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2970
2971 /*
2972 * xwait is done, but if xwait had just locked the tuple then some
2973 * other xact could update this tuple before we get to this point.
2974 * Check for xmax change, and start over if so.
2975 *
2976 * We also must start over if we didn't pin the VM page, and the
2977 * page has become all visible.
2978 */
2979 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2980 xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2982 xwait))
2983 goto l1;
2984
2985 /* Otherwise check if it committed or aborted */
2986 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2987 }
2988
2989 /*
2990 * We may overwrite if previous xmax aborted, or if it committed but
2991 * only locked the tuple without updating it.
2992 */
2993 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2996 result = TM_Ok;
2997 else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2998 result = TM_Updated;
2999 else
3000 result = TM_Deleted;
3001 }
3002
3003 /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3004 if (result != TM_Ok)
3005 {
3006 Assert(result == TM_SelfModified ||
3007 result == TM_Updated ||
3008 result == TM_Deleted ||
3009 result == TM_BeingModified);
3011 Assert(result != TM_Updated ||
3013 }
3014
3015 if (crosscheck != InvalidSnapshot && result == TM_Ok)
3016 {
3017 /* Perform additional check for transaction-snapshot mode RI updates */
3018 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3019 result = TM_Updated;
3020 }
3021
3022 if (result != TM_Ok)
3023 {
3024 tmfd->ctid = tp.t_data->t_ctid;
3026 if (result == TM_SelfModified)
3028 else
3029 tmfd->cmax = InvalidCommandId;
3030 UnlockReleaseBuffer(buffer);
3031 if (have_tuple_lock)
3033 if (vmbuffer != InvalidBuffer)
3034 ReleaseBuffer(vmbuffer);
3035 return result;
3036 }
3037
3038 /*
3039 * We're about to do the actual delete -- check for conflict first, to
3040 * avoid possibly having to roll back work we've just done.
3041 *
3042 * This is safe without a recheck as long as there is no possibility of
3043 * another process scanning the page between this check and the delete
3044 * being visible to the scan (i.e., an exclusive buffer content lock is
3045 * continuously held from this point until the tuple delete is visible).
3046 */
3048
3049 /* replace cid with a combo CID if necessary */
3050 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3051
3052 /*
3053 * Compute replica identity tuple before entering the critical section so
3054 * we don't PANIC upon a memory allocation failure.
3055 */
3056 old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3057
3058 /*
3059 * If this is the first possibly-multixact-able operation in the current
3060 * transaction, set my per-backend OldestMemberMXactId setting. We can be
3061 * certain that the transaction will never become a member of any older
3062 * MultiXactIds than that. (We have to do this even if we end up just
3063 * using our own TransactionId below, since some other backend could
3064 * incorporate our XID into a MultiXact immediately afterwards.)
3065 */
3067
3070 xid, LockTupleExclusive, true,
3071 &new_xmax, &new_infomask, &new_infomask2);
3072
3074
3075 /*
3076 * If this transaction commits, the tuple will become DEAD sooner or
3077 * later. Set flag that this page is a candidate for pruning once our xid
3078 * falls below the OldestXmin horizon. If the transaction finally aborts,
3079 * the subsequent page pruning will be a no-op and the hint will be
3080 * cleared.
3081 */
3082 PageSetPrunable(page, xid);
3083
3084 if (PageIsAllVisible(page))
3085 {
3086 all_visible_cleared = true;
3087 PageClearAllVisible(page);
3088 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3089 vmbuffer, VISIBILITYMAP_VALID_BITS);
3090 }
3091
3092 /* store transaction information of xact deleting the tuple */
3094 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3095 tp.t_data->t_infomask |= new_infomask;
3096 tp.t_data->t_infomask2 |= new_infomask2;
3098 HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3099 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3100 /* Make sure there is no forward chain link in t_ctid */
3101 tp.t_data->t_ctid = tp.t_self;
3102
3103 /* Signal that this is actually a move into another partition */
3104 if (changingPart)
3106
3107 MarkBufferDirty(buffer);
3108
3109 /*
3110 * XLOG stuff
3111 *
3112 * NB: heap_abort_speculative() uses the same xlog record and replay
3113 * routines.
3114 */
3115 if (RelationNeedsWAL(relation))
3116 {
3117 xl_heap_delete xlrec;
3118 xl_heap_header xlhdr;
3119 XLogRecPtr recptr;
3120
3121 /*
3122 * For logical decode we need combo CIDs to properly decode the
3123 * catalog
3124 */
3126 log_heap_new_cid(relation, &tp);
3127
3128 xlrec.flags = 0;
3129 if (all_visible_cleared)
3131 if (changingPart)
3134 tp.t_data->t_infomask2);
3136 xlrec.xmax = new_xmax;
3137
3138 if (old_key_tuple != NULL)
3139 {
3140 if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3142 else
3144 }
3145
3148
3150
3151 /*
3152 * Log replica identity of the deleted tuple if there is one
3153 */
3154 if (old_key_tuple != NULL)
3155 {
3156 xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3157 xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3158 xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3159
3161 XLogRegisterData((char *) old_key_tuple->t_data
3163 old_key_tuple->t_len
3165 }
3166
3167 /* filtering by origin on a row level is much more efficient */
3169
3170 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3171
3172 PageSetLSN(page, recptr);
3173 }
3174
3176
3178
3179 if (vmbuffer != InvalidBuffer)
3180 ReleaseBuffer(vmbuffer);
3181
3182 /*
3183 * If the tuple has toasted out-of-line attributes, we need to delete
3184 * those items too. We have to do this before releasing the buffer
3185 * because we need to look at the contents of the tuple, but it's OK to
3186 * release the content lock on the buffer first.
3187 */
3188 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3189 relation->rd_rel->relkind != RELKIND_MATVIEW)
3190 {
3191 /* toast table entries should never be recursively toasted */
3193 }
3194 else if (HeapTupleHasExternal(&tp))
3195 heap_toast_delete(relation, &tp, false);
3196
3197 /*
3198 * Mark tuple for invalidation from system caches at next command
3199 * boundary. We have to do this before releasing the buffer because we
3200 * need to look at the contents of the tuple.
3201 */
3202 CacheInvalidateHeapTuple(relation, &tp, NULL);
3203
3204 /* Now we can release the buffer */
3205 ReleaseBuffer(buffer);
3206
3207 /*
3208 * Release the lmgr tuple lock, if we had it.
3209 */
3210 if (have_tuple_lock)
3212
3213 pgstat_count_heap_delete(relation);
3214
3215 if (old_key_tuple != NULL && old_key_copied)
3216 heap_freetuple(old_key_tuple);
3217
3218 return TM_Ok;
3219}
3220
3221/*
3222 * simple_heap_delete - delete a tuple
3223 *
3224 * This routine may be used to delete a tuple when concurrent updates of
3225 * the target tuple are not expected (for example, because we have a lock
3226 * on the relation associated with the tuple). Any failure is reported
3227 * via ereport().
3228 */
3229void
3231{
3232 TM_Result result;
3233 TM_FailureData tmfd;
3234
3235 result = heap_delete(relation, tid,
3237 true /* wait for commit */ ,
3238 &tmfd, false /* changingPart */ );
3239 switch (result)
3240 {
3241 case TM_SelfModified:
3242 /* Tuple was already updated in current command? */
3243 elog(ERROR, "tuple already updated by self");
3244 break;
3245
3246 case TM_Ok:
3247 /* done successfully */
3248 break;
3249
3250 case TM_Updated:
3251 elog(ERROR, "tuple concurrently updated");
3252 break;
3253
3254 case TM_Deleted:
3255 elog(ERROR, "tuple concurrently deleted");
3256 break;
3257
3258 default:
3259 elog(ERROR, "unrecognized heap_delete status: %u", result);
3260 break;
3261 }
3262}
3263
3264/*
3265 * heap_update - replace a tuple
3266 *
3267 * See table_tuple_update() for an explanation of the parameters, except that
3268 * this routine directly takes a tuple rather than a slot.
3269 *
3270 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
3271 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
3272 * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
3273 * generated by another transaction).
3274 */
3276heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,
3277 CommandId cid, Snapshot crosscheck, bool wait,
3278 TM_FailureData *tmfd, LockTupleMode *lockmode,
3279 TU_UpdateIndexes *update_indexes)
3280{
3281 TM_Result result;
3283 Bitmapset *hot_attrs;
3284 Bitmapset *sum_attrs;
3285 Bitmapset *key_attrs;
3286 Bitmapset *id_attrs;
3287 Bitmapset *interesting_attrs;
3288 Bitmapset *modified_attrs;
3289 ItemId lp;
3290 HeapTupleData oldtup;
3291 HeapTuple heaptup;
3292 HeapTuple old_key_tuple = NULL;
3293 bool old_key_copied = false;
3294 Page page;
3295 BlockNumber block;
3296 MultiXactStatus mxact_status;
3297 Buffer buffer,
3298 newbuf,
3299 vmbuffer = InvalidBuffer,
3300 vmbuffer_new = InvalidBuffer;
3301 bool need_toast;
3302 Size newtupsize,
3303 pagefree;
3304 bool have_tuple_lock = false;
3305 bool iscombo;
3306 bool use_hot_update = false;
3307 bool summarized_update = false;
3308 bool key_intact;
3309 bool all_visible_cleared = false;
3310 bool all_visible_cleared_new = false;
3311 bool checked_lockers;
3312 bool locker_remains;
3313 bool id_has_external = false;
3314 TransactionId xmax_new_tuple,
3315 xmax_old_tuple;
3316 uint16 infomask_old_tuple,
3317 infomask2_old_tuple,
3318 infomask_new_tuple,
3319 infomask2_new_tuple;
3320
3322
3323 /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
3326
3327 AssertHasSnapshotForToast(relation);
3328
3329 /*
3330 * Forbid this during a parallel operation, lest it allocate a combo CID.
3331 * Other workers might need that combo CID for visibility checks, and we
3332 * have no provision for broadcasting it to them.
3333 */
3334 if (IsInParallelMode())
3335 ereport(ERROR,
3336 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3337 errmsg("cannot update tuples during a parallel operation")));
3338
3339#ifdef USE_ASSERT_CHECKING
3340 check_lock_if_inplace_updateable_rel(relation, otid, newtup);
3341#endif
3342
3343 /*
3344 * Fetch the list of attributes to be checked for various operations.
3345 *
3346 * For HOT considerations, this is wasted effort if we fail to update or
3347 * have to put the new tuple on a different page. But we must compute the
3348 * list before obtaining buffer lock --- in the worst case, if we are
3349 * doing an update on one of the relevant system catalogs, we could
3350 * deadlock if we try to fetch the list later. In any case, the relcache
3351 * caches the data so this is usually pretty cheap.
3352 *
3353 * We also need columns used by the replica identity and columns that are
3354 * considered the "key" of rows in the table.
3355 *
3356 * Note that we get copies of each bitmap, so we need not worry about
3357 * relcache flush happening midway through.
3358 */
3359 hot_attrs = RelationGetIndexAttrBitmap(relation,
3361 sum_attrs = RelationGetIndexAttrBitmap(relation,
3364 id_attrs = RelationGetIndexAttrBitmap(relation,
3366 interesting_attrs = NULL;
3367 interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
3368 interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
3369 interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
3370 interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
3371
3372 block = ItemPointerGetBlockNumber(otid);
3373 INJECTION_POINT("heap_update-before-pin", NULL);
3374 buffer = ReadBuffer(relation, block);
3375 page = BufferGetPage(buffer);
3376
3377 /*
3378 * Before locking the buffer, pin the visibility map page if it appears to
3379 * be necessary. Since we haven't got the lock yet, someone else might be
3380 * in the middle of changing this, so we'll need to recheck after we have
3381 * the lock.
3382 */
3383 if (PageIsAllVisible(page))
3384 visibilitymap_pin(relation, block, &vmbuffer);
3385
3387
3388 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3389
3390 /*
3391 * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
3392 * we see LP_NORMAL here. When the otid origin is a syscache, we may have
3393 * neither a pin nor a snapshot. Hence, we may see other LP_ states, each
3394 * of which indicates concurrent pruning.
3395 *
3396 * Failing with TM_Updated would be most accurate. However, unlike other
3397 * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
3398 * LP_DEAD cases. While the distinction between TM_Updated and TM_Deleted
3399 * does matter to SQL statements UPDATE and MERGE, those SQL statements
3400 * hold a snapshot that ensures LP_NORMAL. Hence, the choice between
3401 * TM_Updated and TM_Deleted affects only the wording of error messages.
3402 * Settle on TM_Deleted, for two reasons. First, it avoids complicating
3403 * the specification of when tmfd->ctid is valid. Second, it creates
3404 * error log evidence that we took this branch.
3405 *
3406 * Since it's possible to see LP_UNUSED at otid, it's also possible to see
3407 * LP_NORMAL for a tuple that replaced LP_UNUSED. If it's a tuple for an
3408 * unrelated row, we'll fail with "duplicate key value violates unique".
3409 * XXX if otid is the live, newer version of the newtup row, we'll discard
3410 * changes originating in versions of this catalog row after the version
3411 * the caller got from syscache. See syscache-update-pruned.spec.
3412 */
3413 if (!ItemIdIsNormal(lp))
3414 {
3416
3417 UnlockReleaseBuffer(buffer);
3418 Assert(!have_tuple_lock);
3419 if (vmbuffer != InvalidBuffer)
3420 ReleaseBuffer(vmbuffer);
3421 tmfd->ctid = *otid;
3422 tmfd->xmax = InvalidTransactionId;
3423 tmfd->cmax = InvalidCommandId;
3424 *update_indexes = TU_None;
3425
3426 bms_free(hot_attrs);
3427 bms_free(sum_attrs);
3428 bms_free(key_attrs);
3429 bms_free(id_attrs);
3430 /* modified_attrs not yet initialized */
3431 bms_free(interesting_attrs);
3432 return TM_Deleted;
3433 }
3434
3435 /*
3436 * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
3437 * properly.
3438 */
3439 oldtup.t_tableOid = RelationGetRelid(relation);
3440 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3441 oldtup.t_len = ItemIdGetLength(lp);
3442 oldtup.t_self = *otid;
3443
3444 /* the new tuple is ready, except for this: */
3445 newtup->t_tableOid = RelationGetRelid(relation);
3446
3447 /*
3448 * Determine columns modified by the update. Additionally, identify
3449 * whether any of the unmodified replica identity key attributes in the
3450 * old tuple is externally stored or not. This is required because for
3451 * such attributes the flattened value won't be WAL logged as part of the
3452 * new tuple so we must include it as part of the old_key_tuple. See
3453 * ExtractReplicaIdentity.
3454 */
3455 modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
3456 id_attrs, &oldtup,
3457 newtup, &id_has_external);
3458
3459 /*
3460 * If we're not updating any "key" column, we can grab a weaker lock type.
3461 * This allows for more concurrency when we are running simultaneously
3462 * with foreign key checks.
3463 *
3464 * Note that if a column gets detoasted while executing the update, but
3465 * the value ends up being the same, this test will fail and we will use
3466 * the stronger lock. This is acceptable; the important case to optimize
3467 * is updates that don't manipulate key columns, not those that
3468 * serendipitously arrive at the same key values.
3469 */
3470 if (!bms_overlap(modified_attrs, key_attrs))
3471 {
3472 *lockmode = LockTupleNoKeyExclusive;
3473 mxact_status = MultiXactStatusNoKeyUpdate;
3474 key_intact = true;
3475
3476 /*
3477 * If this is the first possibly-multixact-able operation in the
3478 * current transaction, set my per-backend OldestMemberMXactId
3479 * setting. We can be certain that the transaction will never become a
3480 * member of any older MultiXactIds than that. (We have to do this
3481 * even if we end up just using our own TransactionId below, since
3482 * some other backend could incorporate our XID into a MultiXact
3483 * immediately afterwards.)
3484 */
3486 }
3487 else
3488 {
3489 *lockmode = LockTupleExclusive;
3490 mxact_status = MultiXactStatusUpdate;
3491 key_intact = false;
3492 }
3493
3494 /*
3495 * Note: beyond this point, use oldtup not otid to refer to old tuple.
3496 * otid may very well point at newtup->t_self, which we will overwrite
3497 * with the new tuple's location, so there's great risk of confusion if we
3498 * use otid anymore.
3499 */
3500
3501l2:
3502 checked_lockers = false;
3503 locker_remains = false;
3504 result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3505
3506 /* see below about the "no wait" case */
3507 Assert(result != TM_BeingModified || wait);
3508
3509 if (result == TM_Invisible)
3510 {
3511 UnlockReleaseBuffer(buffer);
3512 ereport(ERROR,
3513 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3514 errmsg("attempted to update invisible tuple")));
3515 }
3516 else if (result == TM_BeingModified && wait)
3517 {
3518 TransactionId xwait;
3519 uint16 infomask;
3520 bool can_continue = false;
3521
3522 /*
3523 * XXX note that we don't consider the "no wait" case here. This
3524 * isn't a problem currently because no caller uses that case, but it
3525 * should be fixed if such a caller is introduced. It wasn't a
3526 * problem previously because this code would always wait, but now
3527 * that some tuple locks do not conflict with one of the lock modes we
3528 * use, it is possible that this case is interesting to handle
3529 * specially.
3530 *
3531 * This may cause failures with third-party code that calls
3532 * heap_update directly.
3533 */
3534
3535 /* must copy state data before unlocking buffer */
3536 xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3537 infomask = oldtup.t_data->t_infomask;
3538
3539 /*
3540 * Now we have to do something about the existing locker. If it's a
3541 * multi, sleep on it; we might be awakened before it is completely
3542 * gone (or even not sleep at all in some cases); we need to preserve
3543 * it as locker, unless it is gone completely.
3544 *
3545 * If it's not a multi, we need to check for sleeping conditions
3546 * before actually going to sleep. If the update doesn't conflict
3547 * with the locks, we just continue without sleeping (but making sure
3548 * it is preserved).
3549 *
3550 * Before sleeping, we need to acquire tuple lock to establish our
3551 * priority for the tuple (see heap_lock_tuple). LockTuple will
3552 * release us when we are next-in-line for the tuple. Note we must
3553 * not acquire the tuple lock until we're sure we're going to sleep;
3554 * otherwise we're open for race conditions with other transactions
3555 * holding the tuple lock which sleep on us.
3556 *
3557 * If we are forced to "start over" below, we keep the tuple lock;
3558 * this arranges that we stay at the head of the line while rechecking
3559 * tuple state.
3560 */
3561 if (infomask & HEAP_XMAX_IS_MULTI)
3562 {
3563 TransactionId update_xact;
3564 int remain;
3565 bool current_is_member = false;
3566
3567 if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3568 *lockmode, &current_is_member))
3569 {
3571
3572 /*
3573 * Acquire the lock, if necessary (but skip it when we're
3574 * requesting a lock and already have one; avoids deadlock).
3575 */
3576 if (!current_is_member)
3577 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3578 LockWaitBlock, &have_tuple_lock);
3579
3580 /* wait for multixact */
3581 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3582 relation, &oldtup.t_self, XLTW_Update,
3583 &remain);
3584 checked_lockers = true;
3585 locker_remains = remain != 0;
3587
3588 /*
3589 * If xwait had just locked the tuple then some other xact
3590 * could update this tuple before we get to this point. Check
3591 * for xmax change, and start over if so.
3592 */
3594 infomask) ||
3596 xwait))
3597 goto l2;
3598 }
3599
3600 /*
3601 * Note that the multixact may not be done by now. It could have
3602 * surviving members; our own xact or other subxacts of this
3603 * backend, and also any other concurrent transaction that locked
3604 * the tuple with LockTupleKeyShare if we only got
3605 * LockTupleNoKeyExclusive. If this is the case, we have to be
3606 * careful to mark the updated tuple with the surviving members in
3607 * Xmax.
3608 *
3609 * Note that there could have been another update in the
3610 * MultiXact. In that case, we need to check whether it committed
3611 * or aborted. If it aborted we are safe to update it again;
3612 * otherwise there is an update conflict, and we have to return
3613 * TableTuple{Deleted, Updated} below.
3614 *
3615 * In the LockTupleExclusive case, we still need to preserve the
3616 * surviving members: those would include the tuple locks we had
3617 * before this one, which are important to keep in case this
3618 * subxact aborts.
3619 */
3621 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3622 else
3623 update_xact = InvalidTransactionId;
3624
3625 /*
3626 * There was no UPDATE in the MultiXact; or it aborted. No
3627 * TransactionIdIsInProgress() call needed here, since we called
3628 * MultiXactIdWait() above.
3629 */
3630 if (!TransactionIdIsValid(update_xact) ||
3631 TransactionIdDidAbort(update_xact))
3632 can_continue = true;
3633 }
3635 {
3636 /*
3637 * The only locker is ourselves; we can avoid grabbing the tuple
3638 * lock here, but must preserve our locking information.
3639 */
3640 checked_lockers = true;
3641 locker_remains = true;
3642 can_continue = true;
3643 }
3644 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3645 {
3646 /*
3647 * If it's just a key-share locker, and we're not changing the key
3648 * columns, we don't need to wait for it to end; but we need to
3649 * preserve it as locker.
3650 */
3651 checked_lockers = true;
3652 locker_remains = true;
3653 can_continue = true;
3654 }
3655 else
3656 {
3657 /*
3658 * Wait for regular transaction to end; but first, acquire tuple
3659 * lock.
3660 */
3662 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3663 LockWaitBlock, &have_tuple_lock);
3664 XactLockTableWait(xwait, relation, &oldtup.t_self,
3665 XLTW_Update);
3666 checked_lockers = true;
3668
3669 /*
3670 * xwait is done, but if xwait had just locked the tuple then some
3671 * other xact could update this tuple before we get to this point.
3672 * Check for xmax change, and start over if so.
3673 */
3674 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3675 !TransactionIdEquals(xwait,
3677 goto l2;
3678
3679 /* Otherwise check if it committed or aborted */
3680 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3681 if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3682 can_continue = true;
3683 }
3684
3685 if (can_continue)
3686 result = TM_Ok;
3687 else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
3688 result = TM_Updated;
3689 else
3690 result = TM_Deleted;
3691 }
3692
3693 /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3694 if (result != TM_Ok)
3695 {
3696 Assert(result == TM_SelfModified ||
3697 result == TM_Updated ||
3698 result == TM_Deleted ||
3699 result == TM_BeingModified);
3701 Assert(result != TM_Updated ||
3702 !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3703 }
3704
3705 if (crosscheck != InvalidSnapshot && result == TM_Ok)
3706 {
3707 /* Perform additional check for transaction-snapshot mode RI updates */
3708 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3709 result = TM_Updated;
3710 }
3711
3712 if (result != TM_Ok)
3713 {
3714 tmfd->ctid = oldtup.t_data->t_ctid;
3715 tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3716 if (result == TM_SelfModified)
3717 tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3718 else
3719 tmfd->cmax = InvalidCommandId;
3720 UnlockReleaseBuffer(buffer);
3721 if (have_tuple_lock)
3722 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3723 if (vmbuffer != InvalidBuffer)
3724 ReleaseBuffer(vmbuffer);
3725 *update_indexes = TU_None;
3726
3727 bms_free(hot_attrs);
3728 bms_free(sum_attrs);
3729 bms_free(key_attrs);
3730 bms_free(id_attrs);
3731 bms_free(modified_attrs);
3732 bms_free(interesting_attrs);
3733 return result;
3734 }
3735
3736 /*
3737 * If we didn't pin the visibility map page and the page has become all
3738 * visible while we were busy locking the buffer, or during some
3739 * subsequent window during which we had it unlocked, we'll have to unlock
3740 * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3741 * bit unfortunate, especially since we'll now have to recheck whether the
3742 * tuple has been locked or updated under us, but hopefully it won't
3743 * happen very often.
3744 */
3745 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3746 {
3748 visibilitymap_pin(relation, block, &vmbuffer);
3750 goto l2;
3751 }
3752
3753 /* Fill in transaction status data */
3754
3755 /*
3756 * If the tuple we're updating is locked, we need to preserve the locking
3757 * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3758 */
3760 oldtup.t_data->t_infomask,
3761 oldtup.t_data->t_infomask2,
3762 xid, *lockmode, true,
3763 &xmax_old_tuple, &infomask_old_tuple,
3764 &infomask2_old_tuple);
3765
3766 /*
3767 * And also prepare an Xmax value for the new copy of the tuple. If there
3768 * was no xmax previously, or there was one but all lockers are now gone,
3769 * then use InvalidTransactionId; otherwise, get the xmax from the old
3770 * tuple. (In rare cases that might also be InvalidTransactionId and yet
3771 * not have the HEAP_XMAX_INVALID bit set; that's fine.)
3772 */
3773 if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3775 (checked_lockers && !locker_remains))
3776 xmax_new_tuple = InvalidTransactionId;
3777 else
3778 xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3779
3780 if (!TransactionIdIsValid(xmax_new_tuple))
3781 {
3782 infomask_new_tuple = HEAP_XMAX_INVALID;
3783 infomask2_new_tuple = 0;
3784 }
3785 else
3786 {
3787 /*
3788 * If we found a valid Xmax for the new tuple, then the infomask bits
3789 * to use on the new tuple depend on what was there on the old one.
3790 * Note that since we're doing an update, the only possibility is that
3791 * the lockers had FOR KEY SHARE lock.
3792 */
3793 if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3794 {
3795 GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3796 &infomask2_new_tuple);
3797 }
3798 else
3799 {
3800 infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3801 infomask2_new_tuple = 0;
3802 }
3803 }
3804
3805 /*
3806 * Prepare the new tuple with the appropriate initial values of Xmin and
3807 * Xmax, as well as initial infomask bits as computed above.
3808 */
3809 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3810 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3811 HeapTupleHeaderSetXmin(newtup->t_data, xid);
3812 HeapTupleHeaderSetCmin(newtup->t_data, cid);
3813 newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3814 newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3815 HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3816
3817 /*
3818 * Replace cid with a combo CID if necessary. Note that we already put
3819 * the plain cid into the new tuple.
3820 */
3821 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3822
3823 /*
3824 * If the toaster needs to be activated, OR if the new tuple will not fit
3825 * on the same page as the old, then we need to release the content lock
3826 * (but not the pin!) on the old tuple's buffer while we are off doing
3827 * TOAST and/or table-file-extension work. We must mark the old tuple to
3828 * show that it's locked, else other processes may try to update it
3829 * themselves.
3830 *
3831 * We need to invoke the toaster if there are already any out-of-line
3832 * toasted values present, or if the new tuple is over-threshold.
3833 */
3834 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3835 relation->rd_rel->relkind != RELKIND_MATVIEW)
3836 {
3837 /* toast table entries should never be recursively toasted */
3838 Assert(!HeapTupleHasExternal(&oldtup));
3839 Assert(!HeapTupleHasExternal(newtup));
3840 need_toast = false;
3841 }
3842 else
3843 need_toast = (HeapTupleHasExternal(&oldtup) ||
3844 HeapTupleHasExternal(newtup) ||
3845 newtup->t_len > TOAST_TUPLE_THRESHOLD);
3846
3847 pagefree = PageGetHeapFreeSpace(page);
3848
3849 newtupsize = MAXALIGN(newtup->t_len);
3850
3851 if (need_toast || newtupsize > pagefree)
3852 {
3853 TransactionId xmax_lock_old_tuple;
3854 uint16 infomask_lock_old_tuple,
3855 infomask2_lock_old_tuple;
3856 bool cleared_all_frozen = false;
3857
3858 /*
3859 * To prevent concurrent sessions from updating the tuple, we have to
3860 * temporarily mark it locked, while we release the page-level lock.
3861 *
3862 * To satisfy the rule that any xid potentially appearing in a buffer
3863 * written out to disk, we unfortunately have to WAL log this
3864 * temporary modification. We can reuse xl_heap_lock for this
3865 * purpose. If we crash/error before following through with the
3866 * actual update, xmax will be of an aborted transaction, allowing
3867 * other sessions to proceed.
3868 */
3869
3870 /*
3871 * Compute xmax / infomask appropriate for locking the tuple. This has
3872 * to be done separately from the combo that's going to be used for
3873 * updating, because the potentially created multixact would otherwise
3874 * be wrong.
3875 */
3877 oldtup.t_data->t_infomask,
3878 oldtup.t_data->t_infomask2,
3879 xid, *lockmode, false,
3880 &xmax_lock_old_tuple, &infomask_lock_old_tuple,
3881 &infomask2_lock_old_tuple);
3882
3883 Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
3884
3886
3887 /* Clear obsolete visibility flags ... */
3889 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3890 HeapTupleClearHotUpdated(&oldtup);
3891 /* ... and store info about transaction updating this tuple */
3892 Assert(TransactionIdIsValid(xmax_lock_old_tuple));
3893 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
3894 oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3895 oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
3896 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3897
3898 /* temporarily make it look not-updated, but locked */
3899 oldtup.t_data->t_ctid = oldtup.t_self;
3900
3901 /*
3902 * Clear all-frozen bit on visibility map if needed. We could
3903 * immediately reset ALL_VISIBLE, but given that the WAL logging
3904 * overhead would be unchanged, that doesn't seem necessarily
3905 * worthwhile.
3906 */
3907 if (PageIsAllVisible(page) &&
3908 visibilitymap_clear(relation, block, vmbuffer,
3910 cleared_all_frozen = true;
3911
3912 MarkBufferDirty(buffer);
3913
3914 if (RelationNeedsWAL(relation))
3915 {
3916 xl_heap_lock xlrec;
3917 XLogRecPtr recptr;
3918
3921
3922 xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
3923 xlrec.xmax = xmax_lock_old_tuple;
3925 oldtup.t_data->t_infomask2);
3926 xlrec.flags =
3927 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
3929 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
3930 PageSetLSN(page, recptr);
3931 }
3932
3934
3936
3937 /*
3938 * Let the toaster do its thing, if needed.
3939 *
3940 * Note: below this point, heaptup is the data we actually intend to
3941 * store into the relation; newtup is the caller's original untoasted
3942 * data.
3943 */
3944 if (need_toast)
3945 {
3946 /* Note we always use WAL and FSM during updates */
3947 heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
3948 newtupsize = MAXALIGN(heaptup->t_len);
3949 }
3950 else
3951 heaptup = newtup;
3952
3953 /*
3954 * Now, do we need a new page for the tuple, or not? This is a bit
3955 * tricky since someone else could have added tuples to the page while
3956 * we weren't looking. We have to recheck the available space after
3957 * reacquiring the buffer lock. But don't bother to do that if the
3958 * former amount of free space is still not enough; it's unlikely
3959 * there's more free now than before.
3960 *
3961 * What's more, if we need to get a new page, we will need to acquire
3962 * buffer locks on both old and new pages. To avoid deadlock against
3963 * some other backend trying to get the same two locks in the other
3964 * order, we must be consistent about the order we get the locks in.
3965 * We use the rule "lock the lower-numbered page of the relation
3966 * first". To implement this, we must do RelationGetBufferForTuple
3967 * while not holding the lock on the old page, and we must rely on it
3968 * to get the locks on both pages in the correct order.
3969 *
3970 * Another consideration is that we need visibility map page pin(s) if
3971 * we will have to clear the all-visible flag on either page. If we
3972 * call RelationGetBufferForTuple, we rely on it to acquire any such
3973 * pins; but if we don't, we have to handle that here. Hence we need
3974 * a loop.
3975 */
3976 for (;;)
3977 {
3978 if (newtupsize > pagefree)
3979 {
3980 /* It doesn't fit, must use RelationGetBufferForTuple. */
3981 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3982 buffer, 0, NULL,
3983 &vmbuffer_new, &vmbuffer,
3984 0);
3985 /* We're all done. */
3986 break;
3987 }
3988 /* Acquire VM page pin if needed and we don't have it. */
3989 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3990 visibilitymap_pin(relation, block, &vmbuffer);
3991 /* Re-acquire the lock on the old tuple's page. */
3993 /* Re-check using the up-to-date free space */
3994 pagefree = PageGetHeapFreeSpace(page);
3995 if (newtupsize > pagefree ||
3996 (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
3997 {
3998 /*
3999 * Rats, it doesn't fit anymore, or somebody just now set the
4000 * all-visible flag. We must now unlock and loop to avoid
4001 * deadlock. Fortunately, this path should seldom be taken.
4002 */
4004 }
4005 else
4006 {
4007 /* We're all done. */
4008 newbuf = buffer;
4009 break;
4010 }
4011 }
4012 }
4013 else
4014 {
4015 /* No TOAST work needed, and it'll fit on same page */
4016 newbuf = buffer;
4017 heaptup = newtup;
4018 }
4019
4020 /*
4021 * We're about to do the actual update -- check for conflict first, to
4022 * avoid possibly having to roll back work we've just done.
4023 *
4024 * This is safe without a recheck as long as there is no possibility of
4025 * another process scanning the pages between this check and the update
4026 * being visible to the scan (i.e., exclusive buffer content lock(s) are
4027 * continuously held from this point until the tuple update is visible).
4028 *
4029 * For the new tuple the only check needed is at the relation level, but
4030 * since both tuples are in the same relation and the check for oldtup
4031 * will include checking the relation level, there is no benefit to a
4032 * separate check for the new tuple.
4033 */
4034 CheckForSerializableConflictIn(relation, &oldtup.t_self,
4035 BufferGetBlockNumber(buffer));
4036
4037 /*
4038 * At this point newbuf and buffer are both pinned and locked, and newbuf
4039 * has enough space for the new tuple. If they are the same buffer, only
4040 * one pin is held.
4041 */
4042
4043 if (newbuf == buffer)
4044 {
4045 /*
4046 * Since the new tuple is going into the same page, we might be able
4047 * to do a HOT update. Check if any of the index columns have been
4048 * changed.
4049 */
4050 if (!bms_overlap(modified_attrs, hot_attrs))
4051 {
4052 use_hot_update = true;
4053
4054 /*
4055 * If none of the columns that are used in hot-blocking indexes
4056 * were updated, we can apply HOT, but we do still need to check
4057 * if we need to update the summarizing indexes, and update those
4058 * indexes if the columns were updated, or we may fail to detect
4059 * e.g. value bound changes in BRIN minmax indexes.
4060 */
4061 if (bms_overlap(modified_attrs, sum_attrs))
4062 summarized_update = true;
4063 }
4064 }
4065 else
4066 {
4067 /* Set a hint that the old page could use prune/defrag */
4068 PageSetFull(page);
4069 }
4070
4071 /*
4072 * Compute replica identity tuple before entering the critical section so
4073 * we don't PANIC upon a memory allocation failure.
4074 * ExtractReplicaIdentity() will return NULL if nothing needs to be
4075 * logged. Pass old key required as true only if the replica identity key
4076 * columns are modified or it has external data.
4077 */
4078 old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
4079 bms_overlap(modified_attrs, id_attrs) ||
4080 id_has_external,
4081 &old_key_copied);
4082
4083 /* NO EREPORT(ERROR) from here till changes are logged */
4085
4086 /*
4087 * If this transaction commits, the old tuple will become DEAD sooner or
4088 * later. Set flag that this page is a candidate for pruning once our xid
4089 * falls below the OldestXmin horizon. If the transaction finally aborts,
4090 * the subsequent page pruning will be a no-op and the hint will be
4091 * cleared.
4092 *
4093 * XXX Should we set hint on newbuf as well? If the transaction aborts,
4094 * there would be a prunable tuple in the newbuf; but for now we choose
4095 * not to optimize for aborts. Note that heap_xlog_update must be kept in
4096 * sync if this decision changes.
4097 */
4098 PageSetPrunable(page, xid);
4099
4100 if (use_hot_update)
4101 {
4102 /* Mark the old tuple as HOT-updated */
4103 HeapTupleSetHotUpdated(&oldtup);
4104 /* And mark the new tuple as heap-only */
4105 HeapTupleSetHeapOnly(heaptup);
4106 /* Mark the caller's copy too, in case different from heaptup */
4107 HeapTupleSetHeapOnly(newtup);
4108 }
4109 else
4110 {
4111 /* Make sure tuples are correctly marked as not-HOT */
4112 HeapTupleClearHotUpdated(&oldtup);
4113 HeapTupleClearHeapOnly(heaptup);
4114 HeapTupleClearHeapOnly(newtup);
4115 }
4116
4117 RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4118
4119
4120 /* Clear obsolete visibility flags, possibly set by ourselves above... */
4122 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4123 /* ... and store info about transaction updating this tuple */
4124 Assert(TransactionIdIsValid(xmax_old_tuple));
4125 HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
4126 oldtup.t_data->t_infomask |= infomask_old_tuple;
4127 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4128 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
4129
4130 /* record address of new tuple in t_ctid of old one */
4131 oldtup.t_data->t_ctid = heaptup->t_self;
4132
4133 /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
4134 if (PageIsAllVisible(BufferGetPage(buffer)))
4135 {
4136 all_visible_cleared = true;
4138 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4139 vmbuffer, VISIBILITYMAP_VALID_BITS);
4140 }
4141 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
4142 {
4143 all_visible_cleared_new = true;
4145 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
4146 vmbuffer_new, VISIBILITYMAP_VALID_BITS);
4147 }
4148
4149 if (newbuf != buffer)
4150 MarkBufferDirty(newbuf);
4151 MarkBufferDirty(buffer);
4152
4153 /* XLOG stuff */
4154 if (RelationNeedsWAL(relation))
4155 {
4156 XLogRecPtr recptr;
4157
4158 /*
4159 * For logical decoding we need combo CIDs to properly decode the
4160 * catalog.
4161 */
4163 {
4164 log_heap_new_cid(relation, &oldtup);
4165 log_heap_new_cid(relation, heaptup);
4166 }
4167
4168 recptr = log_heap_update(relation, buffer,
4169 newbuf, &oldtup, heaptup,
4170 old_key_tuple,
4171 all_visible_cleared,
4172 all_visible_cleared_new);
4173 if (newbuf != buffer)
4174 {
4175 PageSetLSN(BufferGetPage(newbuf), recptr);
4176 }
4177 PageSetLSN(BufferGetPage(buffer), recptr);
4178 }
4179
4181
4182 if (newbuf != buffer)
4185
4186 /*
4187 * Mark old tuple for invalidation from system caches at next command
4188 * boundary, and mark the new tuple for invalidation in case we abort. We
4189 * have to do this before releasing the buffer because oldtup is in the
4190 * buffer. (heaptup is all in local memory, but it's necessary to process
4191 * both tuple versions in one call to inval.c so we can avoid redundant
4192 * sinval messages.)
4193 */
4194 CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
4195
4196 /* Now we can release the buffer(s) */
4197 if (newbuf != buffer)
4198 ReleaseBuffer(newbuf);
4199 ReleaseBuffer(buffer);
4200 if (BufferIsValid(vmbuffer_new))
4201 ReleaseBuffer(vmbuffer_new);
4202 if (BufferIsValid(vmbuffer))
4203 ReleaseBuffer(vmbuffer);
4204
4205 /*
4206 * Release the lmgr tuple lock, if we had it.
4207 */
4208 if (have_tuple_lock)
4209 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4210
4211 pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
4212
4213 /*
4214 * If heaptup is a private copy, release it. Don't forget to copy t_self
4215 * back to the caller's image, too.
4216 */
4217 if (heaptup != newtup)
4218 {
4219 newtup->t_self = heaptup->t_self;
4220 heap_freetuple(heaptup);
4221 }
4222
4223 /*
4224 * If it is a HOT update, the update may still need to update summarized
4225 * indexes, lest we fail to update those summaries and get incorrect
4226 * results (for example, minmax bounds of the block may change with this
4227 * update).
4228 */
4229 if (use_hot_update)
4230 {
4231 if (summarized_update)
4232 *update_indexes = TU_Summarizing;
4233 else
4234 *update_indexes = TU_None;
4235 }
4236 else
4237 *update_indexes = TU_All;
4238
4239 if (old_key_tuple != NULL && old_key_copied)
4240 heap_freetuple(old_key_tuple);
4241
4242 bms_free(hot_attrs);
4243 bms_free(sum_attrs);
4244 bms_free(key_attrs);
4245 bms_free(id_attrs);
4246 bms_free(modified_attrs);
4247 bms_free(interesting_attrs);
4248
4249 return TM_Ok;
4250}
4251
4252#ifdef USE_ASSERT_CHECKING
4253/*
4254 * Confirm adequate lock held during heap_update(), per rules from
4255 * README.tuplock section "Locking to write inplace-updated tables".
4256 */
4257static void
4258check_lock_if_inplace_updateable_rel(Relation relation,
4259 const ItemPointerData *otid,
4260 HeapTuple newtup)
4261{
4262 /* LOCKTAG_TUPLE acceptable for any catalog */
4263 switch (RelationGetRelid(relation))
4264 {
4265 case RelationRelationId:
4266 case DatabaseRelationId:
4267 {
4268 LOCKTAG tuptag;
4269
4270 SET_LOCKTAG_TUPLE(tuptag,
4271 relation->rd_lockInfo.lockRelId.dbId,
4272 relation->rd_lockInfo.lockRelId.relId,
4275 if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock, false))
4276 return;
4277 }
4278 break;
4279 default:
4280 Assert(!IsInplaceUpdateRelation(relation));
4281 return;
4282 }
4283
4284 switch (RelationGetRelid(relation))
4285 {
4286 case RelationRelationId:
4287 {
4288 /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
4289 Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
4290 Oid relid = classForm->oid;
4291 Oid dbid;
4292 LOCKTAG tag;
4293
4294 if (IsSharedRelation(relid))
4295 dbid = InvalidOid;
4296 else
4297 dbid = MyDatabaseId;
4298
4299 if (classForm->relkind == RELKIND_INDEX)
4300 {
4301 Relation irel = index_open(relid, AccessShareLock);
4302
4303 SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4305 }
4306 else
4307 SET_LOCKTAG_RELATION(tag, dbid, relid);
4308
4309 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
4310 !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
4311 elog(WARNING,
4312 "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4313 NameStr(classForm->relname),
4314 relid,
4315 classForm->relkind,
4318 }
4319 break;
4320 case DatabaseRelationId:
4321 {
4322 /* LOCKTAG_TUPLE required */
4323 Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
4324
4325 elog(WARNING,
4326 "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
4327 NameStr(dbForm->datname),
4328 dbForm->oid,
4331 }
4332 break;
4333 }
4334}
4335
4336/*
4337 * Confirm adequate relation lock held, per rules from README.tuplock section
4338 * "Locking to write inplace-updated tables".
4339 */
4340static void
4341check_inplace_rel_lock(HeapTuple oldtup)
4342{
4343 Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
4344 Oid relid = classForm->oid;
4345 Oid dbid;
4346 LOCKTAG tag;
4347
4348 if (IsSharedRelation(relid))
4349 dbid = InvalidOid;
4350 else
4351 dbid = MyDatabaseId;
4352
4353 if (classForm->relkind == RELKIND_INDEX)
4354 {
4355 Relation irel = index_open(relid, AccessShareLock);
4356
4357 SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4359 }
4360 else
4361 SET_LOCKTAG_RELATION(tag, dbid, relid);
4362
4363 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
4364 elog(WARNING,
4365 "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4366 NameStr(classForm->relname),
4367 relid,
4368 classForm->relkind,
4371}
4372#endif
4373
4374/*
4375 * Check if the specified attribute's values are the same. Subroutine for
4376 * HeapDetermineColumnsInfo.
4377 */
4378static bool
4379heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
4380 bool isnull1, bool isnull2)
4381{
4382 /*
4383 * If one value is NULL and other is not, then they are certainly not
4384 * equal
4385 */
4386 if (isnull1 != isnull2)
4387 return false;
4388
4389 /*
4390 * If both are NULL, they can be considered equal.
4391 */
4392 if (isnull1)
4393 return true;
4394
4395 /*
4396 * We do simple binary comparison of the two datums. This may be overly
4397 * strict because there can be multiple binary representations for the
4398 * same logical value. But we should be OK as long as there are no false
4399 * positives. Using a type-specific equality operator is messy because
4400 * there could be multiple notions of equality in different operator
4401 * classes; furthermore, we cannot safely invoke user-defined functions
4402 * while holding exclusive buffer lock.
4403 */
4404 if (attrnum <= 0)
4405 {
4406 /* The only allowed system columns are OIDs, so do this */
4407 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
4408 }
4409 else
4410 {
4411 CompactAttribute *att;
4412
4413 Assert(attrnum <= tupdesc->natts);
4414 att = TupleDescCompactAttr(tupdesc, attrnum - 1);
4415 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4416 }
4417}
4418
4419/*
4420 * Check which columns are being updated.
4421 *
4422 * Given an updated tuple, determine (and return into the output bitmapset),
4423 * from those listed as interesting, the set of columns that changed.
4424 *
4425 * has_external indicates if any of the unmodified attributes (from those
4426 * listed as interesting) of the old tuple is a member of external_cols and is
4427 * stored externally.
4428 */
4429static Bitmapset *
4431 Bitmapset *interesting_cols,
4432 Bitmapset *external_cols,
4433 HeapTuple oldtup, HeapTuple newtup,
4434 bool *has_external)
4435{
4436 int attidx;
4437 Bitmapset *modified = NULL;
4438 TupleDesc tupdesc = RelationGetDescr(relation);
4439
4440 attidx = -1;
4441 while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
4442 {
4443 /* attidx is zero-based, attrnum is the normal attribute number */
4445 Datum value1,
4446 value2;
4447 bool isnull1,
4448 isnull2;
4449
4450 /*
4451 * If it's a whole-tuple reference, say "not equal". It's not really
4452 * worth supporting this case, since it could only succeed after a
4453 * no-op update, which is hardly a case worth optimizing for.
4454 */
4455 if (attrnum == 0)
4456 {
4457 modified = bms_add_member(modified, attidx);
4458 continue;
4459 }
4460
4461 /*
4462 * Likewise, automatically say "not equal" for any system attribute
4463 * other than tableOID; we cannot expect these to be consistent in a
4464 * HOT chain, or even to be set correctly yet in the new tuple.
4465 */
4466 if (attrnum < 0)
4467 {
4468 if (attrnum != TableOidAttributeNumber)
4469 {
4470 modified = bms_add_member(modified, attidx);
4471 continue;
4472 }
4473 }
4474
4475 /*
4476 * Extract the corresponding values. XXX this is pretty inefficient
4477 * if there are many indexed columns. Should we do a single
4478 * heap_deform_tuple call on each tuple, instead? But that doesn't
4479 * work for system columns ...
4480 */
4481 value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4482 value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4483
4484 if (!heap_attr_equals(tupdesc, attrnum, value1,
4485 value2, isnull1, isnull2))
4486 {
4487 modified = bms_add_member(modified, attidx);
4488 continue;
4489 }
4490
4491 /*
4492 * No need to check attributes that can't be stored externally. Note
4493 * that system attributes can't be stored externally.
4494 */
4495 if (attrnum < 0 || isnull1 ||
4496 TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
4497 continue;
4498
4499 /*
4500 * Check if the old tuple's attribute is stored externally and is a
4501 * member of external_cols.
4502 */
4503 if (VARATT_IS_EXTERNAL((struct varlena *) DatumGetPointer(value1)) &&
4504 bms_is_member(attidx, external_cols))
4505 *has_external = true;
4506 }
4507
4508 return modified;
4509}
4510
4511/*
4512 * simple_heap_update - replace a tuple
4513 *
4514 * This routine may be used to update a tuple when concurrent updates of
4515 * the target tuple are not expected (for example, because we have a lock
4516 * on the relation associated with the tuple). Any failure is reported
4517 * via ereport().
4518 */
4519void
4521 TU_UpdateIndexes *update_indexes)
4522{
4523 TM_Result result;
4524 TM_FailureData tmfd;
4525 LockTupleMode lockmode;
4526
4527 result = heap_update(relation, otid, tup,
4529 true /* wait for commit */ ,
4530 &tmfd, &lockmode, update_indexes);
4531 switch (result)
4532 {
4533 case TM_SelfModified:
4534 /* Tuple was already updated in current command? */
4535 elog(ERROR, "tuple already updated by self");
4536 break;
4537
4538 case TM_Ok:
4539 /* done successfully */
4540 break;
4541
4542 case TM_Updated:
4543 elog(ERROR, "tuple concurrently updated");
4544 break;
4545
4546 case TM_Deleted:
4547 elog(ERROR, "tuple concurrently deleted");
4548 break;
4549
4550 default:
4551 elog(ERROR, "unrecognized heap_update status: %u", result);
4552 break;
4553 }
4554}
4555
4556
4557/*
4558 * Return the MultiXactStatus corresponding to the given tuple lock mode.
4559 */
4560static MultiXactStatus
4562{
4563 int retval;
4564
4565 if (is_update)
4566 retval = tupleLockExtraInfo[mode].updstatus;
4567 else
4568 retval = tupleLockExtraInfo[mode].lockstatus;
4569
4570 if (retval == -1)
4571 elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4572 is_update ? "true" : "false");
4573
4574 return (MultiXactStatus) retval;
4575}
4576
4577/*
4578 * heap_lock_tuple - lock a tuple in shared or exclusive mode
4579 *
4580 * Note that this acquires a buffer pin, which the caller must release.
4581 *
4582 * Input parameters:
4583 * relation: relation containing tuple (caller must hold suitable lock)
4584 * cid: current command ID (used for visibility test, and stored into
4585 * tuple's cmax if lock is successful)
4586 * mode: indicates if shared or exclusive tuple lock is desired
4587 * wait_policy: what to do if tuple lock is not available
4588 * follow_updates: if true, follow the update chain to also lock descendant
4589 * tuples.
4590 *
4591 * Output parameters:
4592 * *tuple: all fields filled in
4593 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4594 * *tmfd: filled in failure cases (see below)
4595 *
4596 * Function results are the same as the ones for table_tuple_lock().
4597 *
4598 * In the failure cases other than TM_Invisible, the routine fills
4599 * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4600 * if necessary), and t_cmax (the last only for TM_SelfModified,
4601 * since we cannot obtain cmax from a combo CID generated by another
4602 * transaction).
4603 * See comments for struct TM_FailureData for additional info.
4604 *
4605 * See README.tuplock for a thorough explanation of this mechanism.
4606 */
4609 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
4610 bool follow_updates,
4611 Buffer *buffer, TM_FailureData *tmfd)
4612{
4613 TM_Result result;
4614 ItemPointer tid = &(tuple->t_self);
4615 ItemId lp;
4616 Page page;
4617 Buffer vmbuffer = InvalidBuffer;
4618 BlockNumber block;
4619 TransactionId xid,
4620 xmax;
4621 uint16 old_infomask,
4622 new_infomask,
4623 new_infomask2;
4624 bool first_time = true;
4625 bool skip_tuple_lock = false;
4626 bool have_tuple_lock = false;
4627 bool cleared_all_frozen = false;
4628
4629 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4630 block = ItemPointerGetBlockNumber(tid);
4631
4632 /*
4633 * Before locking the buffer, pin the visibility map page if it appears to
4634 * be necessary. Since we haven't got the lock yet, someone else might be
4635 * in the middle of changing this, so we'll need to recheck after we have
4636 * the lock.
4637 */
4638 if (PageIsAllVisible(BufferGetPage(*buffer)))
4639 visibilitymap_pin(relation, block, &vmbuffer);
4640
4642
4643 page = BufferGetPage(*buffer);
4646
4647 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4648 tuple->t_len = ItemIdGetLength(lp);
4649 tuple->t_tableOid = RelationGetRelid(relation);
4650
4651l3:
4652 result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4653
4654 if (result == TM_Invisible)
4655 {
4656 /*
4657 * This is possible, but only when locking a tuple for ON CONFLICT
4658 * UPDATE. We return this value here rather than throwing an error in
4659 * order to give that case the opportunity to throw a more specific
4660 * error.
4661 */
4662 result = TM_Invisible;
4663 goto out_locked;
4664 }
4665 else if (result == TM_BeingModified ||
4666 result == TM_Updated ||
4667 result == TM_Deleted)
4668 {
4669 TransactionId xwait;
4670 uint16 infomask;
4671 uint16 infomask2;
4672 bool require_sleep;
4673 ItemPointerData t_ctid;
4674
4675 /* must copy state data before unlocking buffer */
4676 xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4677 infomask = tuple->t_data->t_infomask;
4678 infomask2 = tuple->t_data->t_infomask2;
4679 ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4680
4682
4683 /*
4684 * If any subtransaction of the current top transaction already holds
4685 * a lock as strong as or stronger than what we're requesting, we
4686 * effectively hold the desired lock already. We *must* succeed
4687 * without trying to take the tuple lock, else we will deadlock
4688 * against anyone wanting to acquire a stronger lock.
4689 *
4690 * Note we only do this the first time we loop on the HTSU result;
4691 * there is no point in testing in subsequent passes, because
4692 * evidently our own transaction cannot have acquired a new lock after
4693 * the first time we checked.
4694 */
4695 if (first_time)
4696 {
4697 first_time = false;
4698
4699 if (infomask & HEAP_XMAX_IS_MULTI)
4700 {
4701 int i;
4702 int nmembers;
4703 MultiXactMember *members;
4704
4705 /*
4706 * We don't need to allow old multixacts here; if that had
4707 * been the case, HeapTupleSatisfiesUpdate would have returned
4708 * MayBeUpdated and we wouldn't be here.
4709 */
4710 nmembers =
4711 GetMultiXactIdMembers(xwait, &members, false,
4712 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4713
4714 for (i = 0; i < nmembers; i++)
4715 {
4716 /* only consider members of our own transaction */
4717 if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4718 continue;
4719
4720 if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4721 {
4722 pfree(members);
4723 result = TM_Ok;
4724 goto out_unlocked;
4725 }
4726 else
4727 {
4728 /*
4729 * Disable acquisition of the heavyweight tuple lock.
4730 * Otherwise, when promoting a weaker lock, we might
4731 * deadlock with another locker that has acquired the
4732 * heavyweight tuple lock and is waiting for our
4733 * transaction to finish.
4734 *
4735 * Note that in this case we still need to wait for
4736 * the multixact if required, to avoid acquiring
4737 * conflicting locks.
4738 */
4739 skip_tuple_lock = true;
4740 }
4741 }
4742
4743 if (members)
4744 pfree(members);
4745 }
4747 {
4748 switch (mode)
4749 {
4750 case LockTupleKeyShare:
4752 HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4753 HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4754 result = TM_Ok;
4755 goto out_unlocked;
4756 case LockTupleShare:
4757 if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4758 HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4759 {
4760 result = TM_Ok;
4761 goto out_unlocked;
4762 }
4763 break;
4765 if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4766 {
4767 result = TM_Ok;
4768 goto out_unlocked;
4769 }
4770 break;
4771 case LockTupleExclusive:
4772 if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4773 infomask2 & HEAP_KEYS_UPDATED)
4774 {
4775 result = TM_Ok;
4776 goto out_unlocked;
4777 }
4778 break;
4779 }
4780 }
4781 }
4782
4783 /*
4784 * Initially assume that we will have to wait for the locking
4785 * transaction(s) to finish. We check various cases below in which
4786 * this can be turned off.
4787 */
4788 require_sleep = true;
4789 if (mode == LockTupleKeyShare)
4790 {
4791 /*
4792 * If we're requesting KeyShare, and there's no update present, we
4793 * don't need to wait. Even if there is an update, we can still
4794 * continue if the key hasn't been modified.
4795 *
4796 * However, if there are updates, we need to walk the update chain
4797 * to mark future versions of the row as locked, too. That way,
4798 * if somebody deletes that future version, we're protected
4799 * against the key going away. This locking of future versions
4800 * could block momentarily, if a concurrent transaction is
4801 * deleting a key; or it could return a value to the effect that
4802 * the transaction deleting the key has already committed. So we
4803 * do this before re-locking the buffer; otherwise this would be
4804 * prone to deadlocks.
4805 *
4806 * Note that the TID we're locking was grabbed before we unlocked
4807 * the buffer. For it to change while we're not looking, the
4808 * other properties we're testing for below after re-locking the
4809 * buffer would also change, in which case we would restart this
4810 * loop above.
4811 */
4812 if (!(infomask2 & HEAP_KEYS_UPDATED))
4813 {
4814 bool updated;
4815
4816 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4817
4818 /*
4819 * If there are updates, follow the update chain; bail out if
4820 * that cannot be done.
4821 */
4822 if (follow_updates && updated &&
4823 !ItemPointerEquals(&tuple->t_self, &t_ctid))
4824 {
4825 TM_Result res;
4826
4827 res = heap_lock_updated_tuple(relation,
4828 infomask, xwait, &t_ctid,
4830 mode);
4831 if (res != TM_Ok)
4832 {
4833 result = res;
4834 /* recovery code expects to have buffer lock held */
4836 goto failed;
4837 }
4838 }
4839
4841
4842 /*
4843 * Make sure it's still an appropriate lock, else start over.
4844 * Also, if it wasn't updated before we released the lock, but
4845 * is updated now, we start over too; the reason is that we
4846 * now need to follow the update chain to lock the new
4847 * versions.
4848 */
4849 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4850 ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4851 !updated))
4852 goto l3;
4853
4854 /* Things look okay, so we can skip sleeping */
4855 require_sleep = false;
4856
4857 /*
4858 * Note we allow Xmax to change here; other updaters/lockers
4859 * could have modified it before we grabbed the buffer lock.
4860 * However, this is not a problem, because with the recheck we
4861 * just did we ensure that they still don't conflict with the
4862 * lock we want.
4863 */
4864 }
4865 }
4866 else if (mode == LockTupleShare)
4867 {
4868 /*
4869 * If we're requesting Share, we can similarly avoid sleeping if
4870 * there's no update and no exclusive lock present.
4871 */
4872 if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4873 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4874 {
4876
4877 /*
4878 * Make sure it's still an appropriate lock, else start over.
4879 * See above about allowing xmax to change.
4880 */
4883 goto l3;
4884 require_sleep = false;
4885 }
4886 }
4887 else if (mode == LockTupleNoKeyExclusive)
4888 {
4889 /*
4890 * If we're requesting NoKeyExclusive, we might also be able to
4891 * avoid sleeping; just ensure that there no conflicting lock
4892 * already acquired.
4893 */
4894 if (infomask & HEAP_XMAX_IS_MULTI)
4895 {
4896 if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4897 mode, NULL))
4898 {
4899 /*
4900 * No conflict, but if the xmax changed under us in the
4901 * meantime, start over.
4902 */
4904 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4906 xwait))
4907 goto l3;
4908
4909 /* otherwise, we're good */
4910 require_sleep = false;
4911 }
4912 }
4913 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4914 {
4916
4917 /* if the xmax changed in the meantime, start over */
4918 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4920 xwait))
4921 goto l3;
4922 /* otherwise, we're good */
4923 require_sleep = false;
4924 }
4925 }
4926
4927 /*
4928 * As a check independent from those above, we can also avoid sleeping
4929 * if the current transaction is the sole locker of the tuple. Note
4930 * that the strength of the lock already held is irrelevant; this is
4931 * not about recording the lock in Xmax (which will be done regardless
4932 * of this optimization, below). Also, note that the cases where we
4933 * hold a lock stronger than we are requesting are already handled
4934 * above by not doing anything.
4935 *
4936 * Note we only deal with the non-multixact case here; MultiXactIdWait
4937 * is well equipped to deal with this situation on its own.
4938 */
4939 if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4941 {
4942 /* ... but if the xmax changed in the meantime, start over */
4944 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4946 xwait))
4947 goto l3;
4949 require_sleep = false;
4950 }
4951
4952 /*
4953 * Time to sleep on the other transaction/multixact, if necessary.
4954 *
4955 * If the other transaction is an update/delete that's already
4956 * committed, then sleeping cannot possibly do any good: if we're
4957 * required to sleep, get out to raise an error instead.
4958 *
4959 * By here, we either have already acquired the buffer exclusive lock,
4960 * or we must wait for the locking transaction or multixact; so below
4961 * we ensure that we grab buffer lock after the sleep.
4962 */
4963 if (require_sleep && (result == TM_Updated || result == TM_Deleted))
4964 {
4966 goto failed;
4967 }
4968 else if (require_sleep)
4969 {
4970 /*
4971 * Acquire tuple lock to establish our priority for the tuple, or
4972 * die trying. LockTuple will release us when we are next-in-line
4973 * for the tuple. We must do this even if we are share-locking,
4974 * but not if we already have a weaker lock on the tuple.
4975 *
4976 * If we are forced to "start over" below, we keep the tuple lock;
4977 * this arranges that we stay at the head of the line while
4978 * rechecking tuple state.
4979 */
4980 if (!skip_tuple_lock &&
4981 !heap_acquire_tuplock(relation, tid, mode, wait_policy,
4982 &have_tuple_lock))
4983 {
4984 /*
4985 * This can only happen if wait_policy is Skip and the lock
4986 * couldn't be obtained.
4987 */
4988 result = TM_WouldBlock;
4989 /* recovery code expects to have buffer lock held */
4991 goto failed;
4992 }
4993
4994 if (infomask & HEAP_XMAX_IS_MULTI)
4995 {
4997
4998 /* We only ever lock tuples, never update them */
4999 if (status >= MultiXactStatusNoKeyUpdate)
5000 elog(ERROR, "invalid lock mode in heap_lock_tuple");
5001
5002 /* wait for multixact to end, or die trying */
5003 switch (wait_policy)
5004 {
5005 case LockWaitBlock:
5006 MultiXactIdWait((MultiXactId) xwait, status, infomask,
5007 relation, &tuple->t_self, XLTW_Lock, NULL);
5008 break;
5009 case LockWaitSkip:
5011 status, infomask, relation,
5012 NULL, false))
5013 {
5014 result = TM_WouldBlock;
5015 /* recovery code expects to have buffer lock held */
5017 goto failed;
5018 }
5019 break;
5020 case LockWaitError:
5022 status, infomask, relation,
5023 NULL, log_lock_failures))
5024 ereport(ERROR,
5025 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5026 errmsg("could not obtain lock on row in relation \"%s\"",
5027 RelationGetRelationName(relation))));
5028
5029 break;
5030 }
5031
5032 /*
5033 * Of course, the multixact might not be done here: if we're
5034 * requesting a light lock mode, other transactions with light
5035 * locks could still be alive, as well as locks owned by our
5036 * own xact or other subxacts of this backend. We need to
5037 * preserve the surviving MultiXact members. Note that it
5038 * isn't absolutely necessary in the latter case, but doing so
5039 * is simpler.
5040 */
5041 }
5042 else
5043 {
5044 /* wait for regular transaction to end, or die trying */
5045 switch (wait_policy)
5046 {
5047 case LockWaitBlock:
5048 XactLockTableWait(xwait, relation, &tuple->t_self,
5049 XLTW_Lock);
5050 break;
5051 case LockWaitSkip:
5052 if (!ConditionalXactLockTableWait(xwait, false))
5053 {
5054 result = TM_WouldBlock;
5055 /* recovery code expects to have buffer lock held */
5057 goto failed;
5058 }
5059 break;
5060 case LockWaitError:
5062 ereport(ERROR,
5063 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5064 errmsg("could not obtain lock on row in relation \"%s\"",
5065 RelationGetRelationName(relation))));
5066 break;
5067 }
5068 }
5069
5070 /* if there are updates, follow the update chain */
5071 if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
5072 !ItemPointerEquals(&tuple->t_self, &t_ctid))
5073 {
5074 TM_Result res;
5075
5076 res = heap_lock_updated_tuple(relation,
5077 infomask, xwait, &t_ctid,
5079 mode);
5080 if (res != TM_Ok)
5081 {
5082 result = res;
5083 /* recovery code expects to have buffer lock held */
5085 goto failed;
5086 }
5087 }
5088
5090
5091 /*
5092 * xwait is done, but if xwait had just locked the tuple then some
5093 * other xact could update this tuple before we get to this point.
5094 * Check for xmax change, and start over if so.
5095 */
5096 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5098 xwait))
5099 goto l3;
5100
5101 if (!(infomask & HEAP_XMAX_IS_MULTI))
5102 {
5103 /*
5104 * Otherwise check if it committed or aborted. Note we cannot
5105 * be here if the tuple was only locked by somebody who didn't
5106 * conflict with us; that would have been handled above. So
5107 * that transaction must necessarily be gone by now. But
5108 * don't check for this in the multixact case, because some
5109 * locker transactions might still be running.
5110 */
5111 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5112 }
5113 }
5114
5115 /* By here, we're certain that we hold buffer exclusive lock again */
5116
5117 /*
5118 * We may lock if previous xmax aborted, or if it committed but only
5119 * locked the tuple without updating it; or if we didn't have to wait
5120 * at all for whatever reason.
5121 */
5122 if (!require_sleep ||
5123 (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5126 result = TM_Ok;
5127 else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
5128 result = TM_Updated;
5129 else
5130 result = TM_Deleted;
5131 }
5132
5133failed:
5134 if (result != TM_Ok)
5135 {
5136 Assert(result == TM_SelfModified || result == TM_Updated ||
5137 result == TM_Deleted || result == TM_WouldBlock);
5138
5139 /*
5140 * When locking a tuple under LockWaitSkip semantics and we fail with
5141 * TM_WouldBlock above, it's possible for concurrent transactions to
5142 * release the lock and set HEAP_XMAX_INVALID in the meantime. So
5143 * this assert is slightly different from the equivalent one in
5144 * heap_delete and heap_update.
5145 */
5146 Assert((result == TM_WouldBlock) ||
5147 !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5148 Assert(result != TM_Updated ||
5149 !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
5150 tmfd->ctid = tuple->t_data->t_ctid;
5151 tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5152 if (result == TM_SelfModified)
5153 tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5154 else
5155 tmfd->cmax = InvalidCommandId;
5156 goto out_locked;
5157 }
5158
5159 /*
5160 * If we didn't pin the visibility map page and the page has become all
5161 * visible while we were busy locking the buffer, or during some
5162 * subsequent window during which we had it unlocked, we'll have to unlock
5163 * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5164 * unfortunate, especially since we'll now have to recheck whether the
5165 * tuple has been locked or updated under us, but hopefully it won't
5166 * happen very often.
5167 */
5168 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5169 {
5171 visibilitymap_pin(relation, block, &vmbuffer);
5173 goto l3;
5174 }
5175
5176 xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5177 old_infomask = tuple->t_data->t_infomask;
5178
5179 /*
5180 * If this is the first possibly-multixact-able operation in the current
5181 * transaction, set my per-backend OldestMemberMXactId setting. We can be
5182 * certain that the transaction will never become a member of any older
5183 * MultiXactIds than that. (We have to do this even if we end up just
5184 * using our own TransactionId below, since some other backend could
5185 * incorporate our XID into a MultiXact immediately afterwards.)
5186 */
5188
5189 /*
5190 * Compute the new xmax and infomask to store into the tuple. Note we do
5191 * not modify the tuple just yet, because that would leave it in the wrong
5192 * state if multixact.c elogs.
5193 */
5194 compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5195 GetCurrentTransactionId(), mode, false,
5196 &xid, &new_infomask, &new_infomask2);
5197
5199
5200 /*
5201 * Store transaction information of xact locking the tuple.
5202 *
5203 * Note: Cmax is meaningless in this context, so don't set it; this avoids
5204 * possibly generating a useless combo CID. Moreover, if we're locking a
5205 * previously updated tuple, it's important to preserve the Cmax.
5206 *
5207 * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5208 * we would break the HOT chain.
5209 */
5210 tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5211 tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5212 tuple->t_data->t_infomask |= new_infomask;
5213 tuple->t_data->t_infomask2 |= new_infomask2;
5214 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5216 HeapTupleHeaderSetXmax(tuple->t_data, xid);
5217
5218 /*
5219 * Make sure there is no forward chain link in t_ctid. Note that in the
5220 * cases where the tuple has been updated, we must not overwrite t_ctid,
5221 * because it was set by the updater. Moreover, if the tuple has been
5222 * updated, we need to follow the update chain to lock the new versions of
5223 * the tuple as well.
5224 */
5225 if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5226 tuple->t_data->t_ctid = *tid;
5227
5228 /* Clear only the all-frozen bit on visibility map if needed */
5229 if (PageIsAllVisible(page) &&
5230 visibilitymap_clear(relation, block, vmbuffer,
5232 cleared_all_frozen = true;
5233
5234
5235 MarkBufferDirty(*buffer);
5236
5237 /*
5238 * XLOG stuff. You might think that we don't need an XLOG record because
5239 * there is no state change worth restoring after a crash. You would be
5240 * wrong however: we have just written either a TransactionId or a
5241 * MultiXactId that may never have been seen on disk before, and we need
5242 * to make sure that there are XLOG entries covering those ID numbers.
5243 * Else the same IDs might be re-used after a crash, which would be
5244 * disastrous if this page made it to disk before the crash. Essentially
5245 * we have to enforce the WAL log-before-data rule even in this case.
5246 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5247 * entries for everything anyway.)
5248 */
5249 if (RelationNeedsWAL(relation))
5250 {
5251 xl_heap_lock xlrec;
5252 XLogRecPtr recptr;
5253
5256
5257 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5258 xlrec.xmax = xid;
5259 xlrec.infobits_set = compute_infobits(new_infomask,
5260 tuple->t_data->t_infomask2);
5261 xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
5263
5264 /* we don't decode row locks atm, so no need to log the origin */
5265
5266 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
5267
5268 PageSetLSN(page, recptr);
5269 }
5270
5272
5273 result = TM_Ok;
5274
5275out_locked:
5277
5278out_unlocked:
5279 if (BufferIsValid(vmbuffer))
5280 ReleaseBuffer(vmbuffer);
5281
5282 /*
5283 * Don't update the visibility map here. Locking a tuple doesn't change
5284 * visibility info.
5285 */
5286
5287 /*
5288 * Now that we have successfully marked the tuple as locked, we can
5289 * release the lmgr tuple lock, if we had it.
5290 */
5291 if (have_tuple_lock)
5292 UnlockTupleTuplock(relation, tid, mode);
5293
5294 return result;
5295}
5296
5297/*
5298 * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5299 * its normal, Xmax-based tuple lock.
5300 *
5301 * have_tuple_lock is an input and output parameter: on input, it indicates
5302 * whether the lock has previously been acquired (and this function does
5303 * nothing in that case). If this function returns success, have_tuple_lock
5304 * has been flipped to true.
5305 *
5306 * Returns false if it was unable to obtain the lock; this can only happen if
5307 * wait_policy is Skip.
5308 */
5309static bool
5311 LockWaitPolicy wait_policy, bool *have_tuple_lock)
5312{
5313 if (*have_tuple_lock)
5314 return true;
5315
5316 switch (wait_policy)
5317 {
5318 case LockWaitBlock:
5319 LockTupleTuplock(relation, tid, mode);
5320 break;
5321
5322 case LockWaitSkip:
5323 if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
5324 return false;
5325 break;
5326
5327 case LockWaitError:
5329 ereport(ERROR,
5330 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5331 errmsg("could not obtain lock on row in relation \"%s\"",
5332 RelationGetRelationName(relation))));
5333 break;
5334 }
5335 *have_tuple_lock = true;
5336
5337 return true;
5338}
5339
5340/*
5341 * Given an original set of Xmax and infomask, and a transaction (identified by
5342 * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5343 * corresponding infomasks to use on the tuple.
5344 *
5345 * Note that this might have side effects such as creating a new MultiXactId.
5346 *
5347 * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5348 * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5349 * but it was not running anymore. There is a race condition, which is that the
5350 * MultiXactId may have finished since then, but that uncommon case is handled
5351 * either here, or within MultiXactIdExpand.
5352 *
5353 * There is a similar race condition possible when the old xmax was a regular
5354 * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5355 * window, but it's still possible to end up creating an unnecessary
5356 * MultiXactId. Fortunately this is harmless.
5357 */
5358static void
5360 uint16 old_infomask2, TransactionId add_to_xmax,
5361 LockTupleMode mode, bool is_update,
5362 TransactionId *result_xmax, uint16 *result_infomask,
5363 uint16 *result_infomask2)
5364{
5365 TransactionId new_xmax;
5366 uint16 new_infomask,
5367 new_infomask2;
5368
5370
5371l5:
5372 new_infomask = 0;
5373 new_infomask2 = 0;
5374 if (old_infomask & HEAP_XMAX_INVALID)
5375 {
5376 /*
5377 * No previous locker; we just insert our own TransactionId.
5378 *
5379 * Note that it's critical that this case be the first one checked,
5380 * because there are several blocks below that come back to this one
5381 * to implement certain optimizations; old_infomask might contain
5382 * other dirty bits in those cases, but we don't really care.
5383 */
5384 if (is_update)
5385 {
5386 new_xmax = add_to_xmax;
5387 if (mode == LockTupleExclusive)
5388 new_infomask2 |= HEAP_KEYS_UPDATED;
5389 }
5390 else
5391 {
5392 new_infomask |= HEAP_XMAX_LOCK_ONLY;
5393 switch (mode)
5394 {
5395 case LockTupleKeyShare:
5396 new_xmax = add_to_xmax;
5397 new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5398 break;
5399 case LockTupleShare:
5400 new_xmax = add_to_xmax;
5401 new_infomask |= HEAP_XMAX_SHR_LOCK;
5402 break;
5404 new_xmax = add_to_xmax;
5405 new_infomask |= HEAP_XMAX_EXCL_LOCK;
5406 break;
5407 case LockTupleExclusive:
5408 new_xmax = add_to_xmax;
5409 new_infomask |= HEAP_XMAX_EXCL_LOCK;
5410 new_infomask2 |= HEAP_KEYS_UPDATED;
5411 break;
5412 default:
5413 new_xmax = InvalidTransactionId; /* silence compiler */
5414 elog(ERROR, "invalid lock mode");
5415 }
5416 }
5417 }
5418 else if (old_infomask & HEAP_XMAX_IS_MULTI)
5419 {
5420 MultiXactStatus new_status;
5421
5422 /*
5423 * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5424 * cross-check.
5425 */
5426 Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5427
5428 /*
5429 * A multixact together with LOCK_ONLY set but neither lock bit set
5430 * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5431 * anymore. This check is critical for databases upgraded by
5432 * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5433 * that such multis are never passed.
5434 */
5435 if (HEAP_LOCKED_UPGRADED(old_infomask))
5436 {
5437 old_infomask &= ~HEAP_XMAX_IS_MULTI;
5438 old_infomask |= HEAP_XMAX_INVALID;
5439 goto l5;
5440 }
5441
5442 /*
5443 * If the XMAX is already a MultiXactId, then we need to expand it to
5444 * include add_to_xmax; but if all the members were lockers and are
5445 * all gone, we can do away with the IS_MULTI bit and just set
5446 * add_to_xmax as the only locker/updater. If all lockers are gone
5447 * and we have an updater that aborted, we can also do without a
5448 * multi.
5449 *
5450 * The cost of doing GetMultiXactIdMembers would be paid by
5451 * MultiXactIdExpand if we weren't to do this, so this check is not
5452 * incurring extra work anyhow.
5453 */
5454 if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5455 {
5456 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5458 old_infomask)))
5459 {
5460 /*
5461 * Reset these bits and restart; otherwise fall through to
5462 * create a new multi below.
5463 */
5464 old_infomask &= ~HEAP_XMAX_IS_MULTI;
5465 old_infomask |= HEAP_XMAX_INVALID;
5466 goto l5;
5467 }
5468 }
5469
5470 new_status = get_mxact_status_for_lock(mode, is_update);
5471
5472 new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5473 new_status);
5474 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5475 }
5476 else if (old_infomask & HEAP_XMAX_COMMITTED)
5477 {
5478 /*
5479 * It's a committed update, so we need to preserve him as updater of
5480 * the tuple.
5481 */
5482 MultiXactStatus status;
5483 MultiXactStatus new_status;
5484
5485 if (old_infomask2 & HEAP_KEYS_UPDATED)
5486 status = MultiXactStatusUpdate;
5487 else
5489
5490 new_status = get_mxact_status_for_lock(mode, is_update);
5491
5492 /*
5493 * since it's not running, it's obviously impossible for the old
5494 * updater to be identical to the current one, so we need not check
5495 * for that case as we do in the block above.
5496 */
5497 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5498 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5499 }
5500 else if (TransactionIdIsInProgress(xmax))
5501 {
5502 /*
5503 * If the XMAX is a valid, in-progress TransactionId, then we need to
5504 * create a new MultiXactId that includes both the old locker or
5505 * updater and our own TransactionId.
5506 */
5507 MultiXactStatus new_status;
5508 MultiXactStatus old_status;
5509 LockTupleMode old_mode;
5510
5511 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5512 {
5513 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5514 old_status = MultiXactStatusForKeyShare;
5515 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5516 old_status = MultiXactStatusForShare;
5517 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5518 {
5519 if (old_infomask2 & HEAP_KEYS_UPDATED)
5520 old_status = MultiXactStatusForUpdate;
5521 else
5522 old_status = MultiXactStatusForNoKeyUpdate;
5523 }
5524 else
5525 {
5526 /*
5527 * LOCK_ONLY can be present alone only when a page has been
5528 * upgraded by pg_upgrade. But in that case,
5529 * TransactionIdIsInProgress() should have returned false. We
5530 * assume it's no longer locked in this case.
5531 */
5532 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5533 old_infomask |= HEAP_XMAX_INVALID;
5534 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5535 goto l5;
5536 }
5537 }
5538 else
5539 {
5540 /* it's an update, but which kind? */
5541 if (old_infomask2 & HEAP_KEYS_UPDATED)
5542 old_status = MultiXactStatusUpdate;
5543 else
5544 old_status = MultiXactStatusNoKeyUpdate;
5545 }
5546
5547 old_mode = TUPLOCK_from_mxstatus(old_status);
5548
5549 /*
5550 * If the lock to be acquired is for the same TransactionId as the
5551 * existing lock, there's an optimization possible: consider only the
5552 * strongest of both locks as the only one present, and restart.
5553 */
5554 if (xmax == add_to_xmax)
5555 {
5556 /*
5557 * Note that it's not possible for the original tuple to be
5558 * updated: we wouldn't be here because the tuple would have been
5559 * invisible and we wouldn't try to update it. As a subtlety,
5560 * this code can also run when traversing an update chain to lock
5561 * future versions of a tuple. But we wouldn't be here either,
5562 * because the add_to_xmax would be different from the original
5563 * updater.
5564 */
5565 Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5566
5567 /* acquire the strongest of both */
5568 if (mode < old_mode)
5569 mode = old_mode;
5570 /* mustn't touch is_update */
5571
5572 old_infomask |= HEAP_XMAX_INVALID;
5573 goto l5;
5574 }
5575
5576 /* otherwise, just fall back to creating a new multixact */
5577 new_status = get_mxact_status_for_lock(mode, is_update);
5578 new_xmax = MultiXactIdCreate(xmax, old_status,
5579 add_to_xmax, new_status);
5580 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5581 }
5582 else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5584 {
5585 /*
5586 * It's a committed update, so we gotta preserve him as updater of the
5587 * tuple.
5588 */
5589 MultiXactStatus status;
5590 MultiXactStatus new_status;
5591
5592 if (old_infomask2 & HEAP_KEYS_UPDATED)
5593 status = MultiXactStatusUpdate;
5594 else
5596
5597 new_status = get_mxact_status_for_lock(mode, is_update);
5598
5599 /*
5600 * since it's not running, it's obviously impossible for the old
5601 * updater to be identical to the current one, so we need not check
5602 * for that case as we do in the block above.
5603 */
5604 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5605 GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5606 }
5607 else
5608 {
5609 /*
5610 * Can get here iff the locking/updating transaction was running when
5611 * the infomask was extracted from the tuple, but finished before
5612 * TransactionIdIsInProgress got to run. Deal with it as if there was
5613 * no locker at all in the first place.
5614 */
5615 old_infomask |= HEAP_XMAX_INVALID;
5616 goto l5;
5617 }
5618
5619 *result_infomask = new_infomask;
5620 *result_infomask2 = new_infomask2;
5621 *result_xmax = new_xmax;
5622}
5623
5624/*
5625 * Subroutine for heap_lock_updated_tuple_rec.
5626 *
5627 * Given a hypothetical multixact status held by the transaction identified
5628 * with the given xid, does the current transaction need to wait, fail, or can
5629 * it continue if it wanted to acquire a lock of the given mode? "needwait"
5630 * is set to true if waiting is necessary; if it can continue, then TM_Ok is
5631 * returned. If the lock is already held by the current transaction, return
5632 * TM_SelfModified. In case of a conflict with another transaction, a
5633 * different HeapTupleSatisfiesUpdate return code is returned.
5634 *
5635 * The held status is said to be hypothetical because it might correspond to a
5636 * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5637 * way for simplicity of API.
5638 */
5639static TM_Result
5642 bool *needwait)
5643{
5644 MultiXactStatus wantedstatus;
5645
5646 *needwait = false;
5647 wantedstatus = get_mxact_status_for_lock(mode, false);
5648
5649 /*
5650 * Note: we *must* check TransactionIdIsInProgress before
5651 * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
5652 * for an explanation.
5653 */
5655 {
5656 /*
5657 * The tuple has already been locked by our own transaction. This is
5658 * very rare but can happen if multiple transactions are trying to
5659 * lock an ancient version of the same tuple.
5660 */
5661 return TM_SelfModified;
5662 }
5663 else if (TransactionIdIsInProgress(xid))
5664 {
5665 /*
5666 * If the locking transaction is running, what we do depends on
5667 * whether the lock modes conflict: if they do, then we must wait for
5668 * it to finish; otherwise we can fall through to lock this tuple
5669 * version without waiting.
5670 */
5672 LOCKMODE_from_mxstatus(wantedstatus)))
5673 {
5674 *needwait = true;
5675 }
5676
5677 /*
5678 * If we set needwait above, then this value doesn't matter;
5679 * otherwise, this value signals to caller that it's okay to proceed.
5680 */
5681 return TM_Ok;
5682 }
5683 else if (TransactionIdDidAbort(xid))
5684 return TM_Ok;
5685 else if (TransactionIdDidCommit(xid))
5686 {
5687 /*
5688 * The other transaction committed. If it was only a locker, then the
5689 * lock is completely gone now and we can return success; but if it
5690 * was an update, then what we do depends on whether the two lock
5691 * modes conflict. If they conflict, then we must report error to
5692 * caller. But if they don't, we can fall through to allow the current
5693 * transaction to lock the tuple.
5694 *
5695 * Note: the reason we worry about ISUPDATE here is because as soon as
5696 * a transaction ends, all its locks are gone and meaningless, and
5697 * thus we can ignore them; whereas its updates persist. In the
5698 * TransactionIdIsInProgress case, above, we don't need to check
5699 * because we know the lock is still "alive" and thus a conflict needs
5700 * always be checked.
5701 */
5702 if (!ISUPDATE_from_mxstatus(status))
5703 return TM_Ok;
5704
5706 LOCKMODE_from_mxstatus(wantedstatus)))
5707 {
5708 /* bummer */
5709 if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
5710 return TM_Updated;
5711 else
5712 return TM_Deleted;
5713 }
5714
5715 return TM_Ok;
5716 }
5717
5718 /* Not in progress, not aborted, not committed -- must have crashed */
5719 return TM_Ok;
5720}
5721
5722
5723/*
5724 * Recursive part of heap_lock_updated_tuple
5725 *
5726 * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5727 * xid with the given mode; if this tuple is updated, recurse to lock the new
5728 * version as well.
5729 */
5730static TM_Result
5732 const ItemPointerData *tid, TransactionId xid,
5734{
5735 TM_Result result;
5736 ItemPointerData tupid;
5737 HeapTupleData mytup;
5738 Buffer buf;
5739 uint16 new_infomask,
5740 new_infomask2,
5741 old_infomask,
5742 old_infomask2;
5743 TransactionId xmax,
5744 new_xmax;
5745 bool cleared_all_frozen = false;
5746 bool pinned_desired_page;
5747 Buffer vmbuffer = InvalidBuffer;
5748 BlockNumber block;
5749
5750 ItemPointerCopy(tid, &tupid);
5751
5752 for (;;)
5753 {
5754 new_infomask = 0;
5755 new_xmax = InvalidTransactionId;
5756 block = ItemPointerGetBlockNumber(&tupid);
5757 ItemPointerCopy(&tupid, &(mytup.t_self));
5758
5759 if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
5760 {
5761 /*
5762 * if we fail to find the updated version of the tuple, it's
5763 * because it was vacuumed/pruned away after its creator
5764 * transaction aborted. So behave as if we got to the end of the
5765 * chain, and there's no further tuple to lock: return success to
5766 * caller.
5767 */
5768 result = TM_Ok;
5769 goto out_unlocked;
5770 }
5771
5772l4:
5774
5775 /*
5776 * Before locking the buffer, pin the visibility map page if it
5777 * appears to be necessary. Since we haven't got the lock yet,
5778 * someone else might be in the middle of changing this, so we'll need
5779 * to recheck after we have the lock.
5780 */
5782 {
5783 visibilitymap_pin(rel, block, &vmbuffer);
5784 pinned_desired_page = true;
5785 }
5786 else
5787 pinned_desired_page = false;
5788
5790
5791 /*
5792 * If we didn't pin the visibility map page and the page has become
5793 * all visible while we were busy locking the buffer, we'll have to
5794 * unlock and re-lock, to avoid holding the buffer lock across I/O.
5795 * That's a bit unfortunate, but hopefully shouldn't happen often.
5796 *
5797 * Note: in some paths through this function, we will reach here
5798 * holding a pin on a vm page that may or may not be the one matching
5799 * this page. If this page isn't all-visible, we won't use the vm
5800 * page, but we hold onto such a pin till the end of the function.
5801 */
5802 if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
5803 {
5805 visibilitymap_pin(rel, block, &vmbuffer);
5807 }
5808
5809 /*
5810 * Check the tuple XMIN against prior XMAX, if any. If we reached the
5811 * end of the chain, we're done, so return success.
5812 */
5813 if (TransactionIdIsValid(priorXmax) &&
5815 priorXmax))
5816 {
5817 result = TM_Ok;
5818 goto out_locked;
5819 }
5820
5821 /*
5822 * Also check Xmin: if this tuple was created by an aborted
5823 * (sub)transaction, then we already locked the last live one in the
5824 * chain, thus we're done, so return success.
5825 */
5827 {
5828 result = TM_Ok;
5829 goto out_locked;
5830 }
5831
5832 old_infomask = mytup.t_data->t_infomask;
5833 old_infomask2 = mytup.t_data->t_infomask2;
5834 xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5835
5836 /*
5837 * If this tuple version has been updated or locked by some concurrent
5838 * transaction(s), what we do depends on whether our lock mode
5839 * conflicts with what those other transactions hold, and also on the
5840 * status of them.
5841 */
5842 if (!(old_infomask & HEAP_XMAX_INVALID))
5843 {
5844 TransactionId rawxmax;
5845 bool needwait;
5846
5847 rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5848 if (old_infomask & HEAP_XMAX_IS_MULTI)
5849 {
5850 int nmembers;
5851 int i;
5852 MultiXactMember *members;
5853
5854 /*
5855 * We don't need a test for pg_upgrade'd tuples: this is only
5856 * applied to tuples after the first in an update chain. Said
5857 * first tuple in the chain may well be locked-in-9.2-and-
5858 * pg_upgraded, but that one was already locked by our caller,
5859 * not us; and any subsequent ones cannot be because our
5860 * caller must necessarily have obtained a snapshot later than
5861 * the pg_upgrade itself.
5862 */
5864
5865 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5866 HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5867 for (i = 0; i < nmembers; i++)
5868 {
5869 result = test_lockmode_for_conflict(members[i].status,
5870 members[i].xid,
5871 mode,
5872 &mytup,
5873 &needwait);
5874
5875 /*
5876 * If the tuple was already locked by ourselves in a
5877 * previous iteration of this (say heap_lock_tuple was
5878 * forced to restart the locking loop because of a change
5879 * in xmax), then we hold the lock already on this tuple
5880 * version and we don't need to do anything; and this is
5881 * not an error condition either. We just need to skip
5882 * this tuple and continue locking the next version in the
5883 * update chain.
5884 */
5885 if (result == TM_SelfModified)
5886 {
5887 pfree(members);
5888 goto next;
5889 }
5890
5891 if (needwait)
5892 {
5894 XactLockTableWait(members[i].xid, rel,
5895 &mytup.t_self,
5897 pfree(members);
5898 goto l4;
5899 }
5900 if (result != TM_Ok)
5901 {
5902 pfree(members);
5903 goto out_locked;
5904 }
5905 }
5906 if (members)
5907 pfree(members);
5908 }
5909 else
5910 {
5911 MultiXactStatus status;
5912
5913 /*
5914 * For a non-multi Xmax, we first need to compute the
5915 * corresponding MultiXactStatus by using the infomask bits.
5916 */
5917 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5918 {
5919 if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5921 else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5922 status = MultiXactStatusForShare;
5923 else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5924 {
5925 if (old_infomask2 & HEAP_KEYS_UPDATED)
5926 status = MultiXactStatusForUpdate;
5927 else
5929 }
5930 else
5931 {
5932 /*
5933 * LOCK_ONLY present alone (a pg_upgraded tuple marked
5934 * as share-locked in the old cluster) shouldn't be
5935 * seen in the middle of an update chain.
5936 */
5937 elog(ERROR, "invalid lock status in tuple");
5938 }
5939 }
5940 else
5941 {
5942 /* it's an update, but which kind? */
5943 if (old_infomask2 & HEAP_KEYS_UPDATED)
5944 status = MultiXactStatusUpdate;
5945 else
5947 }
5948
5949 result = test_lockmode_for_conflict(status, rawxmax, mode,
5950 &mytup, &needwait);
5951
5952 /*
5953 * If the tuple was already locked by ourselves in a previous
5954 * iteration of this (say heap_lock_tuple was forced to
5955 * restart the locking loop because of a change in xmax), then
5956 * we hold the lock already on this tuple version and we don't
5957 * need to do anything; and this is not an error condition
5958 * either. We just need to skip this tuple and continue
5959 * locking the next version in the update chain.
5960 */
5961 if (result == TM_SelfModified)
5962 goto next;
5963
5964 if (needwait)
5965 {
5967 XactLockTableWait(rawxmax, rel, &mytup.t_self,
5969 goto l4;
5970 }
5971 if (result != TM_Ok)
5972 {
5973 goto out_locked;
5974 }
5975 }
5976 }
5977
5978 /* compute the new Xmax and infomask values for the tuple ... */
5979 compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5980 xid, mode, false,
5981 &new_xmax, &new_infomask, &new_infomask2);
5982
5984 visibilitymap_clear(rel, block, vmbuffer,
5986 cleared_all_frozen = true;
5987
5989
5990 /* ... and set them */
5991 HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5992 mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5993 mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5994 mytup.t_data->t_infomask |= new_infomask;
5995 mytup.t_data->t_infomask2 |= new_infomask2;
5996
5998
5999 /* XLOG stuff */
6000 if (RelationNeedsWAL(rel))
6001 {
6003 XLogRecPtr recptr;
6004 Page page = BufferGetPage(buf);
6005
6008
6010 xlrec.xmax = new_xmax;
6011 xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
6012 xlrec.flags =
6013 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
6014
6016
6017 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
6018
6019 PageSetLSN(page, recptr);
6020 }
6021
6023
6024next:
6025 /* if we find the end of update chain, we're done. */
6026 if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
6028 ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
6030 {
6031 result = TM_Ok;
6032 goto out_locked;
6033 }
6034
6035 /* tail recursion */
6036 priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
6037 ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
6039 }
6040
6041 result = TM_Ok;
6042
6043out_locked:
6045
6046out_unlocked:
6047 if (vmbuffer != InvalidBuffer)
6048 ReleaseBuffer(vmbuffer);
6049
6050 return result;
6051}
6052
6053/*
6054 * heap_lock_updated_tuple
6055 * Follow update chain when locking an updated tuple, acquiring locks (row
6056 * marks) on the updated versions.
6057 *
6058 * 'prior_infomask', 'prior_raw_xmax' and 'prior_ctid' are the corresponding
6059 * fields from the initial tuple. We will lock the tuples starting from the
6060 * one that 'prior_ctid' points to. Note: This function does not lock the
6061 * initial tuple itself.
6062 *
6063 * This function doesn't check visibility, it just unconditionally marks the
6064 * tuple(s) as locked. If any tuple in the updated chain is being deleted
6065 * concurrently (or updated with the key being modified), sleep until the
6066 * transaction doing it is finished.
6067 *
6068 * Note that we don't acquire heavyweight tuple locks on the tuples we walk
6069 * when we have to wait for other transactions to release them, as opposed to
6070 * what heap_lock_tuple does. The reason is that having more than one
6071 * transaction walking the chain is probably uncommon enough that risk of
6072 * starvation is not likely: one of the preconditions for being here is that
6073 * the snapshot in use predates the update that created this tuple (because we
6074 * started at an earlier version of the tuple), but at the same time such a
6075 * transaction cannot be using repeatable read or serializable isolation
6076 * levels, because that would lead to a serializability failure.
6077 */
6078static TM_Result
6080 uint16 prior_infomask,
6081 TransactionId prior_raw_xmax,
6082 const ItemPointerData *prior_ctid,
6084{
6085 INJECTION_POINT("heap_lock_updated_tuple", NULL);
6086
6087 /*
6088 * If the tuple has moved into another partition (effectively a delete)
6089 * stop here.
6090 */
6091 if (!ItemPointerIndicatesMovedPartitions(prior_ctid))
6092 {
6093 TransactionId prior_xmax;
6094
6095 /*
6096 * If this is the first possibly-multixact-able operation in the
6097 * current transaction, set my per-backend OldestMemberMXactId
6098 * setting. We can be certain that the transaction will never become a
6099 * member of any older MultiXactIds than that. (We have to do this
6100 * even if we end up just using our own TransactionId below, since
6101 * some other backend could incorporate our XID into a MultiXact
6102 * immediately afterwards.)
6103 */
6105
6106 prior_xmax = (prior_infomask & HEAP_XMAX_IS_MULTI) ?
6107 MultiXactIdGetUpdateXid(prior_raw_xmax, prior_infomask) : prior_raw_xmax;
6108 return heap_lock_updated_tuple_rec(rel, prior_xmax, prior_ctid, xid, mode);
6109 }
6110
6111 /* nothing to lock */
6112 return TM_Ok;
6113}
6114
6115/*
6116 * heap_finish_speculative - mark speculative insertion as successful
6117 *
6118 * To successfully finish a speculative insertion we have to clear speculative
6119 * token from tuple. To do so the t_ctid field, which will contain a
6120 * speculative token value, is modified in place to point to the tuple itself,
6121 * which is characteristic of a newly inserted ordinary tuple.
6122 *
6123 * NB: It is not ok to commit without either finishing or aborting a
6124 * speculative insertion. We could treat speculative tuples of committed
6125 * transactions implicitly as completed, but then we would have to be prepared
6126 * to deal with speculative tokens on committed tuples. That wouldn't be
6127 * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
6128 * but clearing the token at completion isn't very expensive either.
6129 * An explicit confirmation WAL record also makes logical decoding simpler.
6130 */
6131void
6133{
6134 Buffer buffer;
6135 Page page;
6136 OffsetNumber offnum;
6137 ItemId lp;
6138 HeapTupleHeader htup;
6139
6140 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
6142 page = BufferGetPage(buffer);
6143
6144 offnum = ItemPointerGetOffsetNumber(tid);
6145 if (offnum < 1 || offnum > PageGetMaxOffsetNumber(page))
6146 elog(ERROR, "offnum out of range");
6147 lp = PageGetItemId(page, offnum);
6148 if (!ItemIdIsNormal(lp))
6149 elog(ERROR, "invalid lp");
6150
6151 htup = (HeapTupleHeader) PageGetItem(page, lp);
6152
6153 /* NO EREPORT(ERROR) from here till changes are logged */
6155
6157
6158 MarkBufferDirty(buffer);
6159
6160 /*
6161 * Replace the speculative insertion token with a real t_ctid, pointing to
6162 * itself like it does on regular tuples.
6163 */
6164 htup->t_ctid = *tid;
6165
6166 /* XLOG stuff */
6167 if (RelationNeedsWAL(relation))
6168 {
6169 xl_heap_confirm xlrec;
6170 XLogRecPtr recptr;
6171
6173
6175
6176 /* We want the same filtering on this as on a plain insert */
6178
6181
6182 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6183
6184 PageSetLSN(page, recptr);
6185 }
6186
6188
6189 UnlockReleaseBuffer(buffer);
6190}
6191
6192/*
6193 * heap_abort_speculative - kill a speculatively inserted tuple
6194 *
6195 * Marks a tuple that was speculatively inserted in the same command as dead,
6196 * by setting its xmin as invalid. That makes it immediately appear as dead
6197 * to all transactions, including our own. In particular, it makes
6198 * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
6199 * inserting a duplicate key value won't unnecessarily wait for our whole
6200 * transaction to finish (it'll just wait for our speculative insertion to
6201 * finish).
6202 *
6203 * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
6204 * that arise due to a mutual dependency that is not user visible. By
6205 * definition, unprincipled deadlocks cannot be prevented by the user
6206 * reordering lock acquisition in client code, because the implementation level
6207 * lock acquisitions are not under the user's direct control. If speculative
6208 * inserters did not take this precaution, then under high concurrency they
6209 * could deadlock with each other, which would not be acceptable.
6210 *
6211 * This is somewhat redundant with heap_delete, but we prefer to have a
6212 * dedicated routine with stripped down requirements. Note that this is also
6213 * used to delete the TOAST tuples created during speculative insertion.
6214 *
6215 * This routine does not affect logical decoding as it only looks at
6216 * confirmation records.
6217 */
6218void
6220{
6222 ItemId lp;
6223 HeapTupleData tp;
6224 Page page;
6225 BlockNumber block;
6226 Buffer buffer;
6227
6229
6230 block = ItemPointerGetBlockNumber(tid);
6231 buffer = ReadBuffer(relation, block);
6232 page = BufferGetPage(buffer);
6233
6235
6236 /*
6237 * Page can't be all visible, we just inserted into it, and are still
6238 * running.
6239 */
6240 Assert(!PageIsAllVisible(page));
6241
6244
6245 tp.t_tableOid = RelationGetRelid(relation);
6246 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6247 tp.t_len = ItemIdGetLength(lp);
6248 tp.t_self = *tid;
6249
6250 /*
6251 * Sanity check that the tuple really is a speculatively inserted tuple,
6252 * inserted by us.
6253 */
6254 if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6255 elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6256 if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6257 elog(ERROR, "attempted to kill a non-speculative tuple");
6259
6260 /*
6261 * No need to check for serializable conflicts here. There is never a
6262 * need for a combo CID, either. No need to extract replica identity, or
6263 * do anything special with infomask bits.
6264 */
6265
6267
6268 /*
6269 * The tuple will become DEAD immediately. Flag that this page is a
6270 * candidate for pruning by setting xmin to TransactionXmin. While not
6271 * immediately prunable, it is the oldest xid we can cheaply determine
6272 * that's safe against wraparound / being older than the table's
6273 * relfrozenxid. To defend against the unlikely case of a new relation
6274 * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
6275 * if so (vacuum can't subsequently move relfrozenxid to beyond
6276 * TransactionXmin, so there's no race here).
6277 */
6279 {
6280 TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
6281 TransactionId prune_xid;
6282
6283 if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
6284 prune_xid = relfrozenxid;
6285 else
6286 prune_xid = TransactionXmin;
6287 PageSetPrunable(page, prune_xid);
6288 }
6289
6290 /* store transaction information of xact deleting the tuple */
6292 tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6293
6294 /*
6295 * Set the tuple header xmin to InvalidTransactionId. This makes the
6296 * tuple immediately invisible everyone. (In particular, to any
6297 * transactions waiting on the speculative token, woken up later.)
6298 */
6300
6301 /* Clear the speculative insertion token too */
6302 tp.t_data->t_ctid = tp.t_self;
6303
6304 MarkBufferDirty(buffer);
6305
6306 /*
6307 * XLOG stuff
6308 *
6309 * The WAL records generated here match heap_delete(). The same recovery
6310 * routines are used.
6311 */
6312 if (RelationNeedsWAL(relation))
6313 {
6314 xl_heap_delete xlrec;
6315 XLogRecPtr recptr;
6316
6317 xlrec.flags = XLH_DELETE_IS_SUPER;
6319 tp.t_data->t_infomask2);
6321 xlrec.xmax = xid;
6322
6326
6327 /* No replica identity & replication origin logged */
6328
6329 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6330
6331 PageSetLSN(page, recptr);
6332 }
6333
6335
6337
6338 if (HeapTupleHasExternal(&tp))
6339 {
6340 Assert(!IsToastRelation(relation));
6341 heap_toast_delete(relation, &tp, true);
6342 }
6343
6344 /*
6345 * Never need to mark tuple for invalidation, since catalogs don't support
6346 * speculative insertion
6347 */
6348
6349 /* Now we can release the buffer */
6350 ReleaseBuffer(buffer);
6351
6352 /* count deletion, as we counted the insertion too */
6353 pgstat_count_heap_delete(relation);
6354}
6355
6356/*
6357 * heap_inplace_lock - protect inplace update from concurrent heap_update()
6358 *
6359 * Evaluate whether the tuple's state is compatible with a no-key update.
6360 * Current transaction rowmarks are fine, as is KEY SHARE from any
6361 * transaction. If compatible, return true with the buffer exclusive-locked,
6362 * and the caller must release that by calling
6363 * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
6364 * an error. Otherwise, call release_callback(arg), wait for blocking
6365 * transactions to end, and return false.
6366 *
6367 * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
6368 * DDL, this doesn't guarantee any particular predicate locking.
6369 *
6370 * heap_delete() is a rarer source of blocking transactions (xwait). We'll
6371 * wait for such a transaction just like for the normal heap_update() case.
6372 * Normal concurrent DROP commands won't cause that, because all inplace
6373 * updaters take some lock that conflicts with DROP. An explicit SQL "DELETE
6374 * FROM pg_class" can cause it. By waiting, if the concurrent transaction
6375 * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller
6376 * can find the successor tuple.
6377 *
6378 * Readers of inplace-updated fields expect changes to those fields are
6379 * durable. For example, vac_truncate_clog() reads datfrozenxid from
6380 * pg_database tuples via catalog snapshots. A future snapshot must not
6381 * return a lower datfrozenxid for the same database OID (lower in the
6382 * FullTransactionIdPrecedes() sense). We achieve that since no update of a
6383 * tuple can start while we hold a lock on its buffer. In cases like
6384 * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
6385 * to this transaction. ROLLBACK then is one case where it's okay to lose
6386 * inplace updates. (Restoring relhasindex=false on ROLLBACK is fine, since
6387 * any concurrent CREATE INDEX would have blocked, then inplace-updated the
6388 * committed tuple.)
6389 *
6390 * In principle, we could avoid waiting by overwriting every tuple in the
6391 * updated tuple chain. Reader expectations permit updating a tuple only if
6392 * it's aborted, is the tail of the chain, or we already updated the tuple
6393 * referenced in its t_ctid. Hence, we would need to overwrite the tuples in
6394 * order from tail to head. That would imply either (a) mutating all tuples
6395 * in one critical section or (b) accepting a chance of partial completion.
6396 * Partial completion of a relfrozenxid update would have the weird
6397 * consequence that the table's next VACUUM could see the table's relfrozenxid
6398 * move forward between vacuum_get_cutoffs() and finishing.
6399 */
6400bool
6402 HeapTuple oldtup_ptr, Buffer buffer,
6403 void (*release_callback) (void *), void *arg)
6404{
6405 HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
6406 TM_Result result;
6407 bool ret;
6408
6409#ifdef USE_ASSERT_CHECKING
6410 if (RelationGetRelid(relation) == RelationRelationId)
6411 check_inplace_rel_lock(oldtup_ptr);
6412#endif
6413
6414 Assert(BufferIsValid(buffer));
6415
6416 /*
6417 * Register shared cache invals if necessary. Other sessions may finish
6418 * inplace updates of this tuple between this step and LockTuple(). Since
6419 * inplace updates don't change cache keys, that's harmless.
6420 *
6421 * While it's tempting to register invals only after confirming we can
6422 * return true, the following obstacle precludes reordering steps that
6423 * way. Registering invals might reach a CatalogCacheInitializeCache()
6424 * that locks "buffer". That would hang indefinitely if running after our
6425 * own LockBuffer(). Hence, we must register invals before LockBuffer().
6426 */
6427 CacheInvalidateHeapTupleInplace(relation, oldtup_ptr);
6428
6429 LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6431
6432 /*----------
6433 * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
6434 *
6435 * - wait unconditionally
6436 * - already locked tuple above, since inplace needs that unconditionally
6437 * - don't recheck header after wait: simpler to defer to next iteration
6438 * - don't try to continue even if the updater aborts: likewise
6439 * - no crosscheck
6440 */
6441 result = HeapTupleSatisfiesUpdate(&oldtup, GetCurrentCommandId(false),
6442 buffer);
6443
6444 if (result == TM_Invisible)
6445 {
6446 /* no known way this can happen */
6447 ereport(ERROR,
6448 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6449 errmsg_internal("attempted to overwrite invisible tuple")));
6450 }
6451 else if (result == TM_SelfModified)
6452 {
6453 /*
6454 * CREATE INDEX might reach this if an expression is silly enough to
6455 * call e.g. SELECT ... FROM pg_class FOR SHARE. C code of other SQL
6456 * statements might get here after a heap_update() of the same row, in
6457 * the absence of an intervening CommandCounterIncrement().
6458 */
6459 ereport(ERROR,
6460 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6461 errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
6462 }
6463 else if (result == TM_BeingModified)
6464 {
6465 TransactionId xwait;
6466 uint16 infomask;
6467
6468 xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
6469 infomask = oldtup.t_data->t_infomask;
6470
6471 if (infomask & HEAP_XMAX_IS_MULTI)
6472 {
6475 int remain;
6476
6477 if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
6478 lockmode, NULL))
6479 {
6481 release_callback(arg);
6482 ret = false;
6483 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
6484 relation, &oldtup.t_self, XLTW_Update,
6485 &remain);
6486 }
6487 else
6488 ret = true;
6489 }
6491 ret = true;
6492 else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
6493 ret = true;
6494 else
6495 {
6497 release_callback(arg);
6498 ret = false;
6499 XactLockTableWait(xwait, relation, &oldtup.t_self,
6500 XLTW_Update);
6501 }
6502 }
6503 else
6504 {
6505 ret = (result == TM_Ok);
6506 if (!ret)
6507 {
6509 release_callback(arg);
6510 }
6511 }
6512
6513 /*
6514 * GetCatalogSnapshot() relies on invalidation messages to know when to
6515 * take a new snapshot. COMMIT of xwait is responsible for sending the
6516 * invalidation. We're not acquiring heavyweight locks sufficient to
6517 * block if not yet sent, so we must take a new snapshot to ensure a later
6518 * attempt has a fair chance. While we don't need this if xwait aborted,
6519 * don't bother optimizing that.
6520 */
6521 if (!ret)
6522 {
6523 UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6526 }
6527 return ret;
6528}
6529
6530/*
6531 * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
6532 *
6533 * The tuple cannot change size, and therefore its header fields and null
6534 * bitmap (if any) don't change either.
6535 *
6536 * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
6537 */
6538void
6540 HeapTuple oldtup, HeapTuple tuple,
6541 Buffer buffer)
6542{
6543 HeapTupleHeader htup = oldtup->t_data;
6544 uint32 oldlen;
6545 uint32 newlen;
6546 char *dst;
6547 char *src;
6548 int nmsgs = 0;
6549 SharedInvalidationMessage *invalMessages = NULL;
6550 bool RelcacheInitFileInval = false;
6551
6552 Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
6553 oldlen = oldtup->t_len - htup->t_hoff;
6554 newlen = tuple->t_len - tuple->t_data->t_hoff;
6555 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6556 elog(ERROR, "wrong tuple length");
6557
6558 dst = (char *) htup + htup->t_hoff;
6559 src = (char *) tuple->t_data + tuple->t_data->t_hoff;
6560
6561 /* Like RecordTransactionCommit(), log only if needed */
6563 nmsgs = inplaceGetInvalidationMessages(&invalMessages,
6564 &RelcacheInitFileInval);
6565
6566 /*
6567 * Unlink relcache init files as needed. If unlinking, acquire
6568 * RelCacheInitLock until after associated invalidations. By doing this
6569 * in advance, if we checkpoint and then crash between inplace
6570 * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6571 * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6572 * neglect to PANIC on EIO.
6573 */
6575
6576 /*----------
6577 * NO EREPORT(ERROR) from here till changes are complete
6578 *
6579 * Our buffer lock won't stop a reader having already pinned and checked
6580 * visibility for this tuple. Hence, we write WAL first, then mutate the
6581 * buffer. Like in MarkBufferDirtyHint() or RecordTransactionCommit(),
6582 * checkpoint delay makes that acceptable. With the usual order of
6583 * changes, a crash after memcpy() and before XLogInsert() could allow
6584 * datfrozenxid to overtake relfrozenxid:
6585 *
6586 * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
6587 * ["R" is a VACUUM tbl]
6588 * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
6589 * D: systable_getnext() returns pg_class tuple of tbl
6590 * R: memcpy() into pg_class tuple of tbl
6591 * D: raise pg_database.datfrozenxid, XLogInsert(), finish
6592 * [crash]
6593 * [recovery restores datfrozenxid w/o relfrozenxid]
6594 *
6595 * Mimic MarkBufferDirtyHint() subroutine XLogSaveBufferForHint().
6596 * Specifically, use DELAY_CHKPT_START, and copy the buffer to the stack.
6597 * The stack copy facilitates a FPI of the post-mutation block before we
6598 * accept other sessions seeing it. DELAY_CHKPT_START allows us to
6599 * XLogInsert() before MarkBufferDirty(). Since XLogSaveBufferForHint()
6600 * can operate under BUFFER_LOCK_SHARED, it can't avoid DELAY_CHKPT_START.
6601 * This function, however, likely could avoid it with the following order
6602 * of operations: MarkBufferDirty(), XLogInsert(), memcpy(). Opt to use
6603 * DELAY_CHKPT_START here, too, as a way to have fewer distinct code
6604 * patterns to analyze. Inplace update isn't so frequent that it should
6605 * pursue the small optimization of skipping DELAY_CHKPT_START.
6606 */
6610
6611 /* XLOG stuff */
6612 if (RelationNeedsWAL(relation))
6613 {
6614 xl_heap_inplace xlrec;
6615 PGAlignedBlock copied_buffer;
6616 char *origdata = (char *) BufferGetBlock(buffer);
6617 Page page = BufferGetPage(buffer);
6618 uint16 lower = ((PageHeader) page)->pd_lower;
6619 uint16 upper = ((PageHeader) page)->pd_upper;
6620 uintptr_t dst_offset_in_block;
6621 RelFileLocator rlocator;
6622 ForkNumber forkno;
6623 BlockNumber blkno;
6624 XLogRecPtr recptr;
6625
6626 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6627 xlrec.dbId = MyDatabaseId;
6628 xlrec.tsId = MyDatabaseTableSpace;
6629 xlrec.relcacheInitFileInval = RelcacheInitFileInval;
6630 xlrec.nmsgs = nmsgs;
6631
6634 if (nmsgs != 0)
6635 XLogRegisterData(invalMessages,
6636 nmsgs * sizeof(SharedInvalidationMessage));
6637
6638 /* register block matching what buffer will look like after changes */
6639 memcpy(copied_buffer.data, origdata, lower);
6640 memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
6641 dst_offset_in_block = dst - origdata;
6642 memcpy(copied_buffer.data + dst_offset_in_block, src, newlen);
6643 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
6644 Assert(forkno == MAIN_FORKNUM);
6645 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
6647 XLogRegisterBufData(0, src, newlen);
6648
6649 /* inplace updates aren't decoded atm, don't log the origin */
6650
6651 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6652
6653 PageSetLSN(page, recptr);
6654 }
6655
6656 memcpy(dst, src, newlen);
6657
6658 MarkBufferDirty(buffer);
6659
6661
6662 /*
6663 * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6664 * do this before UnlockTuple().
6665 */
6667
6668 MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
6670 UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
6671
6672 AcceptInvalidationMessages(); /* local processing of just-sent inval */
6673
6674 /*
6675 * Queue a transactional inval, for logical decoding and for third-party
6676 * code that might have been relying on it since long before inplace
6677 * update adopted immediate invalidation. See README.tuplock section
6678 * "Reading inplace-updated columns" for logical decoding details.
6679 */
6681 CacheInvalidateHeapTuple(relation, tuple, NULL);
6682}
6683
6684/*
6685 * heap_inplace_unlock - reverse of heap_inplace_lock
6686 */
6687void
6689 HeapTuple oldtup, Buffer buffer)
6690{
6692 UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
6694}
6695
6696#define FRM_NOOP 0x0001
6697#define FRM_INVALIDATE_XMAX 0x0002
6698#define FRM_RETURN_IS_XID 0x0004
6699#define FRM_RETURN_IS_MULTI 0x0008
6700#define FRM_MARK_COMMITTED 0x0010
6701
6702/*
6703 * FreezeMultiXactId
6704 * Determine what to do during freezing when a tuple is marked by a
6705 * MultiXactId.
6706 *
6707 * "flags" is an output value; it's used to tell caller what to do on return.
6708 * "pagefrz" is an input/output value, used to manage page level freezing.
6709 *
6710 * Possible values that we can set in "flags":
6711 * FRM_NOOP
6712 * don't do anything -- keep existing Xmax
6713 * FRM_INVALIDATE_XMAX
6714 * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6715 * FRM_RETURN_IS_XID
6716 * The Xid return value is a single update Xid to set as xmax.
6717 * FRM_MARK_COMMITTED
6718 * Xmax can be marked as HEAP_XMAX_COMMITTED
6719 * FRM_RETURN_IS_MULTI
6720 * The return value is a new MultiXactId to set as new Xmax.
6721 * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6722 *
6723 * Caller delegates control of page freezing to us. In practice we always
6724 * force freezing of caller's page unless FRM_NOOP processing is indicated.
6725 * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
6726 * can never be left behind. We freely choose when and how to process each
6727 * Multi, without ever violating the cutoff postconditions for freezing.
6728 *
6729 * It's useful to remove Multis on a proactive timeline (relative to freezing
6730 * XIDs) to keep MultiXact member SLRU buffer misses to a minimum. It can also
6731 * be cheaper in the short run, for us, since we too can avoid SLRU buffer
6732 * misses through eager processing.
6733 *
6734 * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
6735 * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
6736 * This can usually be put off, which is usually enough to avoid it altogether.
6737 * Allocating new multis during VACUUM should be avoided on general principle;
6738 * only VACUUM can advance relminmxid, so allocating new Multis here comes with
6739 * its own special risks.
6740 *
6741 * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
6742 * using heap_tuple_should_freeze when we haven't forced page-level freezing.
6743 *
6744 * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
6745 * have already forced page-level freezing, since that might incur the same
6746 * SLRU buffer misses that we specifically intended to avoid by freezing.
6747 */
6748static TransactionId
6750 const struct VacuumCutoffs *cutoffs, uint16 *flags,
6751 HeapPageFreeze *pagefrz)
6752{
6753 TransactionId newxmax;
6754 MultiXactMember *members;
6755 int nmembers;
6756 bool need_replace;
6757 int nnewmembers;
6758 MultiXactMember *newmembers;
6759 bool has_lockers;
6760 TransactionId update_xid;
6761 bool update_committed;
6762 TransactionId FreezePageRelfrozenXid;
6763
6764 *flags = 0;
6765
6766 /* We should only be called in Multis */
6767 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6768
6769 if (!MultiXactIdIsValid(multi) ||
6770 HEAP_LOCKED_UPGRADED(t_infomask))
6771 {
6772 *flags |= FRM_INVALIDATE_XMAX;
6773 pagefrz->freeze_required = true;
6774 return InvalidTransactionId;
6775 }
6776 else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
6777 ereport(ERROR,
6779 errmsg_internal("found multixact %u from before relminmxid %u",
6780 multi, cutoffs->relminmxid)));
6781 else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
6782 {
6783 TransactionId update_xact;
6784
6785 /*
6786 * This old multi cannot possibly have members still running, but
6787 * verify just in case. If it was a locker only, it can be removed
6788 * without any further consideration; but if it contained an update,
6789 * we might need to preserve it.
6790 */
6791 if (MultiXactIdIsRunning(multi,
6792 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6793 ereport(ERROR,
6795 errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
6796 multi, cutoffs->OldestMxact)));
6797
6798 if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6799 {
6800 *flags |= FRM_INVALIDATE_XMAX;
6801 pagefrz->freeze_required = true;
6802 return InvalidTransactionId;
6803 }
6804
6805 /* replace multi with single XID for its updater? */
6806 update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
6807 if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
6808 ereport(ERROR,
6810 errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
6811 multi, update_xact,
6812 cutoffs->relfrozenxid)));
6813 else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
6814 {
6815 /*
6816 * Updater XID has to have aborted (otherwise the tuple would have
6817 * been pruned away instead, since updater XID is < OldestXmin).
6818 * Just remove xmax.
6819 */
6820 if (TransactionIdDidCommit(update_xact))
6821 ereport(ERROR,
6823 errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6824 multi, update_xact,
6825 cutoffs->OldestXmin)));
6826 *flags |= FRM_INVALIDATE_XMAX;
6827 pagefrz->freeze_required = true;
6828 return InvalidTransactionId;
6829 }
6830
6831 /* Have to keep updater XID as new xmax */
6832 *flags |= FRM_RETURN_IS_XID;
6833 pagefrz->freeze_required = true;
6834 return update_xact;
6835 }
6836
6837 /*
6838 * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
6839 * need to walk the whole members array to figure out what to do, if
6840 * anything.
6841 */
6842 nmembers =
6843 GetMultiXactIdMembers(multi, &members, false,
6844 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6845 if (nmembers <= 0)
6846 {
6847 /* Nothing worth keeping */
6848 *flags |= FRM_INVALIDATE_XMAX;
6849 pagefrz->freeze_required = true;
6850 return InvalidTransactionId;
6851 }
6852
6853 /*
6854 * The FRM_NOOP case is the only case where we might need to ratchet back
6855 * FreezePageRelfrozenXid or FreezePageRelminMxid. It is also the only
6856 * case where our caller might ratchet back its NoFreezePageRelfrozenXid
6857 * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
6858 * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
6859 * trackers managed by VACUUM being ratcheting back by xmax to the degree
6860 * required to make it safe to leave xmax undisturbed, independent of
6861 * whether or not page freezing is triggered somewhere else.
6862 *
6863 * Our policy is to force freezing in every case other than FRM_NOOP,
6864 * which obviates the need to maintain either set of trackers, anywhere.
6865 * Every other case will reliably execute a freeze plan for xmax that
6866 * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
6867 * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
6868 * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
6869 * OldestXmin/OldestMxact, so later values never need to be tracked here.)
6870 */
6871 need_replace = false;
6872 FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
6873 for (int i = 0; i < nmembers; i++)
6874 {
6875 TransactionId xid = members[i].xid;
6876
6877 Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6878
6879 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
6880 {
6881 /* Can't violate the FreezeLimit postcondition */
6882 need_replace = true;
6883 break;
6884 }
6885 if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
6886 FreezePageRelfrozenXid = xid;
6887 }
6888
6889 /* Can't violate the MultiXactCutoff postcondition, either */
6890 if (!need_replace)
6891 need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
6892
6893 if (!need_replace)
6894 {
6895 /*
6896 * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
6897 * both together to make it safe to retain this particular multi after
6898 * freezing its page
6899 */
6900 *flags |= FRM_NOOP;
6901 pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
6902 if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
6903 pagefrz->FreezePageRelminMxid = multi;
6904 pfree(members);
6905 return multi;
6906 }
6907
6908 /*
6909 * Do a more thorough second pass over the multi to figure out which
6910 * member XIDs actually need to be kept. Checking the precise status of
6911 * individual members might even show that we don't need to keep anything.
6912 * That is quite possible even though the Multi must be >= OldestMxact,
6913 * since our second pass only keeps member XIDs when it's truly necessary;
6914 * even member XIDs >= OldestXmin often won't be kept by second pass.
6915 */
6916 nnewmembers = 0;
6917 newmembers = palloc_array(MultiXactMember, nmembers);
6918 has_lockers = false;
6919 update_xid = InvalidTransactionId;
6920 update_committed = false;
6921
6922 /*
6923 * Determine whether to keep each member xid, or to ignore it instead
6924 */
6925 for (int i = 0; i < nmembers; i++)
6926 {
6927 TransactionId xid = members[i].xid;
6928 MultiXactStatus mstatus = members[i].status;
6929
6930 Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6931
6932 if (!ISUPDATE_from_mxstatus(mstatus))
6933 {
6934 /*
6935 * Locker XID (not updater XID). We only keep lockers that are
6936 * still running.
6937 */
6940 {
6941 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6942 ereport(ERROR,
6944 errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
6945 multi, xid,
6946 cutoffs->OldestXmin)));
6947 newmembers[nnewmembers++] = members[i];
6948 has_lockers = true;
6949 }
6950
6951 continue;
6952 }
6953
6954 /*
6955 * Updater XID (not locker XID). Should we keep it?
6956 *
6957 * Since the tuple wasn't totally removed when vacuum pruned, the
6958 * update Xid cannot possibly be older than OldestXmin cutoff unless
6959 * the updater XID aborted. If the updater transaction is known
6960 * aborted or crashed then it's okay to ignore it, otherwise not.
6961 *
6962 * In any case the Multi should never contain two updaters, whatever
6963 * their individual commit status. Check for that first, in passing.
6964 */
6965 if (TransactionIdIsValid(update_xid))
6966 ereport(ERROR,
6968 errmsg_internal("multixact %u has two or more updating members",
6969 multi),
6970 errdetail_internal("First updater XID=%u second updater XID=%u.",
6971 update_xid, xid)));
6972
6973 /*
6974 * As with all tuple visibility routines, it's critical to test
6975 * TransactionIdIsInProgress before TransactionIdDidCommit, because of
6976 * race conditions explained in detail in heapam_visibility.c.
6977 */
6980 update_xid = xid;
6981 else if (TransactionIdDidCommit(xid))
6982 {
6983 /*
6984 * The transaction committed, so we can tell caller to set
6985 * HEAP_XMAX_COMMITTED. (We can only do this because we know the
6986 * transaction is not running.)
6987 */
6988 update_committed = true;
6989 update_xid = xid;
6990 }
6991 else
6992 {
6993 /*
6994 * Not in progress, not committed -- must be aborted or crashed;
6995 * we can ignore it.
6996 */
6997 continue;
6998 }
6999
7000 /*
7001 * We determined that updater must be kept -- add it to pending new
7002 * members list
7003 */
7004 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
7005 ereport(ERROR,
7007 errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
7008 multi, xid, cutoffs->OldestXmin)));
7009 newmembers[nnewmembers++] = members[i];
7010 }
7011
7012 pfree(members);
7013
7014 /*
7015 * Determine what to do with caller's multi based on information gathered
7016 * during our second pass
7017 */
7018 if (nnewmembers == 0)
7019 {
7020 /* Nothing worth keeping */
7021 *flags |= FRM_INVALIDATE_XMAX;
7022 newxmax = InvalidTransactionId;
7023 }
7024 else if (TransactionIdIsValid(update_xid) && !has_lockers)
7025 {
7026 /*
7027 * If there's a single member and it's an update, pass it back alone
7028 * without creating a new Multi. (XXX we could do this when there's a
7029 * single remaining locker, too, but that would complicate the API too
7030 * much; moreover, the case with the single updater is more
7031 * interesting, because those are longer-lived.)
7032 */
7033 Assert(nnewmembers == 1);
7034 *flags |= FRM_RETURN_IS_XID;
7035 if (update_committed)
7036 *flags |= FRM_MARK_COMMITTED;
7037 newxmax = update_xid;
7038 }
7039 else
7040 {
7041 /*
7042 * Create a new multixact with the surviving members of the previous
7043 * one, to set as new Xmax in the tuple
7044 */
7045 newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
7046 *flags |= FRM_RETURN_IS_MULTI;
7047 }
7048
7049 pfree(newmembers);
7050
7051 pagefrz->freeze_required = true;
7052 return newxmax;
7053}
7054
7055/*
7056 * heap_prepare_freeze_tuple
7057 *
7058 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7059 * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so,
7060 * setup enough state (in the *frz output argument) to enable caller to
7061 * process this tuple as part of freezing its page, and return true. Return
7062 * false if nothing can be changed about the tuple right now.
7063 *
7064 * Also sets *totally_frozen to true if the tuple will be totally frozen once
7065 * caller executes returned freeze plan (or if the tuple was already totally
7066 * frozen by an earlier VACUUM). This indicates that there are no remaining
7067 * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
7068 *
7069 * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
7070 * tuple that we returned true for, and then execute freezing. Caller must
7071 * initialize pagefrz fields for page as a whole before first call here for
7072 * each heap page.
7073 *
7074 * VACUUM caller decides on whether or not to freeze the page as a whole.
7075 * We'll often prepare freeze plans for a page that caller just discards.
7076 * However, VACUUM doesn't always get to make a choice; it must freeze when
7077 * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
7078 * MXIDs < MultiXactCutoff) can never be left behind. We help to make sure
7079 * that VACUUM always follows that rule.
7080 *
7081 * We sometimes force freezing of xmax MultiXactId values long before it is
7082 * strictly necessary to do so just to ensure the FreezeLimit postcondition.
7083 * It's worth processing MultiXactIds proactively when it is cheap to do so,
7084 * and it's convenient to make that happen by piggy-backing it on the "force
7085 * freezing" mechanism. Conversely, we sometimes delay freezing MultiXactIds
7086 * because it is expensive right now (though only when it's still possible to
7087 * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
7088 *
7089 * It is assumed that the caller has checked the tuple with
7090 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
7091 * (else we should be removing the tuple, not freezing it).
7092 *
7093 * NB: This function has side effects: it might allocate a new MultiXactId.
7094 * It will be set as tuple's new xmax when our *frz output is processed within
7095 * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer
7096 * then caller had better have an exclusive lock on it already.
7097 */
7098bool
7100 const struct VacuumCutoffs *cutoffs,
7101 HeapPageFreeze *pagefrz,
7102 HeapTupleFreeze *frz, bool *totally_frozen)
7103{
7104 bool xmin_already_frozen = false,
7105 xmax_already_frozen = false;
7106 bool freeze_xmin = false,
7107 replace_xvac = false,
7108 replace_xmax = false,
7109 freeze_xmax = false;
7110 TransactionId xid;
7111
7112 frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
7113 frz->t_infomask2 = tuple->t_infomask2;
7114 frz->t_infomask = tuple->t_infomask;
7115 frz->frzflags = 0;
7116 frz->checkflags = 0;
7117
7118 /*
7119 * Process xmin, while keeping track of whether it's already frozen, or
7120 * will become frozen iff our freeze plan is executed by caller (could be
7121 * neither).
7122 */
7123 xid = HeapTupleHeaderGetXmin(tuple);
7124 if (!TransactionIdIsNormal(xid))
7125 xmin_already_frozen = true;
7126 else
7127 {
7128 if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7129 ereport(ERROR,
7131 errmsg_internal("found xmin %u from before relfrozenxid %u",
7132 xid, cutoffs->relfrozenxid)));
7133
7134 /* Will set freeze_xmin flags in freeze plan below */
7135 freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7136
7137 /* Verify that xmin committed if and when freeze plan is executed */
7138 if (freeze_xmin)
7140 }
7141
7142 /*
7143 * Old-style VACUUM FULL is gone, but we have to process xvac for as long
7144 * as we support having MOVED_OFF/MOVED_IN tuples in the database
7145 */
7146 xid = HeapTupleHeaderGetXvac(tuple);
7147 if (TransactionIdIsNormal(xid))
7148 {
7150 Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
7151
7152 /*
7153 * For Xvac, we always freeze proactively. This allows totally_frozen
7154 * tracking to ignore xvac.
7155 */
7156 replace_xvac = pagefrz->freeze_required = true;
7157
7158 /* Will set replace_xvac flags in freeze plan below */
7159 }
7160
7161 /* Now process xmax */
7162 xid = frz->xmax;
7163 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7164 {
7165 /* Raw xmax is a MultiXactId */
7166 TransactionId newxmax;
7167 uint16 flags;
7168
7169 /*
7170 * We will either remove xmax completely (in the "freeze_xmax" path),
7171 * process xmax by replacing it (in the "replace_xmax" path), or
7172 * perform no-op xmax processing. The only constraint is that the
7173 * FreezeLimit/MultiXactCutoff postcondition must never be violated.
7174 */
7175 newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
7176 &flags, pagefrz);
7177
7178 if (flags & FRM_NOOP)
7179 {
7180 /*
7181 * xmax is a MultiXactId, and nothing about it changes for now.
7182 * This is the only case where 'freeze_required' won't have been
7183 * set for us by FreezeMultiXactId, as well as the only case where
7184 * neither freeze_xmax nor replace_xmax are set (given a multi).
7185 *
7186 * This is a no-op, but the call to FreezeMultiXactId might have
7187 * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
7188 * for us (the "freeze page" variants, specifically). That'll
7189 * make it safe for our caller to freeze the page later on, while
7190 * leaving this particular xmax undisturbed.
7191 *
7192 * FreezeMultiXactId is _not_ responsible for the "no freeze"
7193 * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
7194 * job. A call to heap_tuple_should_freeze for this same tuple
7195 * will take place below if 'freeze_required' isn't set already.
7196 * (This repeats work from FreezeMultiXactId, but allows "no
7197 * freeze" tracker maintenance to happen in only one place.)
7198 */
7199 Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
7200 Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
7201 }
7202 else if (flags & FRM_RETURN_IS_XID)
7203 {
7204 /*
7205 * xmax will become an updater Xid (original MultiXact's updater
7206 * member Xid will be carried forward as a simple Xid in Xmax).
7207 */
7208 Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
7209
7210 /*
7211 * NB -- some of these transformations are only valid because we
7212 * know the return Xid is a tuple updater (i.e. not merely a
7213 * locker.) Also note that the only reason we don't explicitly
7214 * worry about HEAP_KEYS_UPDATED is because it lives in
7215 * t_infomask2 rather than t_infomask.
7216 */
7217 frz->t_infomask &= ~HEAP_XMAX_BITS;
7218 frz->xmax = newxmax;
7219 if (flags & FRM_MARK_COMMITTED)
7221 replace_xmax = true;
7222 }
7223 else if (flags & FRM_RETURN_IS_MULTI)
7224 {
7225 uint16 newbits;
7226 uint16 newbits2;
7227
7228 /*
7229 * xmax is an old MultiXactId that we have to replace with a new
7230 * MultiXactId, to carry forward two or more original member XIDs.
7231 */
7232 Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
7233
7234 /*
7235 * We can't use GetMultiXactIdHintBits directly on the new multi
7236 * here; that routine initializes the masks to all zeroes, which
7237 * would lose other bits we need. Doing it this way ensures all
7238 * unrelated bits remain untouched.
7239 */
7240 frz->t_infomask &= ~HEAP_XMAX_BITS;
7241 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7242 GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
7243 frz->t_infomask |= newbits;
7244 frz->t_infomask2 |= newbits2;
7245 frz->xmax = newxmax;
7246 replace_xmax = true;
7247 }
7248 else
7249 {
7250 /*
7251 * Freeze plan for tuple "freezes xmax" in the strictest sense:
7252 * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
7253 */
7254 Assert(flags & FRM_INVALIDATE_XMAX);
7255 Assert(!TransactionIdIsValid(newxmax));
7256
7257 /* Will set freeze_xmax flags in freeze plan below */
7258 freeze_xmax = true;
7259 }
7260
7261 /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
7262 Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
7263 }
7264 else if (TransactionIdIsNormal(xid))
7265 {
7266 /* Raw xmax is normal XID */
7267 if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7268 ereport(ERROR,
7270 errmsg_internal("found xmax %u from before relfrozenxid %u",
7271 xid, cutoffs->relfrozenxid)));
7272
7273 /* Will set freeze_xmax flags in freeze plan below */
7274 freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7275
7276 /*
7277 * Verify that xmax aborted if and when freeze plan is executed,
7278 * provided it's from an update. (A lock-only xmax can be removed
7279 * independent of this, since the lock is released at xact end.)
7280 */
7281 if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
7283 }
7284 else if (!TransactionIdIsValid(xid))
7285 {
7286 /* Raw xmax is InvalidTransactionId XID */
7287 Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
7288 xmax_already_frozen = true;
7289 }
7290 else
7291 ereport(ERROR,
7293 errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
7294 xid, tuple->t_infomask)));
7295
7296 if (freeze_xmin)
7297 {
7298 Assert(!xmin_already_frozen);
7299
7301 }
7302 if (replace_xvac)
7303 {
7304 /*
7305 * If a MOVED_OFF tuple is not dead, the xvac transaction must have
7306 * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
7307 * transaction succeeded.
7308 */
7309 Assert(pagefrz->freeze_required);
7310 if (tuple->t_infomask & HEAP_MOVED_OFF)
7311 frz->frzflags |= XLH_INVALID_XVAC;
7312 else
7313 frz->frzflags |= XLH_FREEZE_XVAC;
7314 }
7315 if (replace_xmax)
7316 {
7317 Assert(!xmax_already_frozen && !freeze_xmax);
7318 Assert(pagefrz->freeze_required);
7319
7320 /* Already set replace_xmax flags in freeze plan earlier */
7321 }
7322 if (freeze_xmax)
7323 {
7324 Assert(!xmax_already_frozen && !replace_xmax);
7325
7327
7328 /*
7329 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
7330 * LOCKED. Normalize to INVALID just to be sure no one gets confused.
7331 * Also get rid of the HEAP_KEYS_UPDATED bit.
7332 */
7333 frz->t_infomask &= ~HEAP_XMAX_BITS;
7335 frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
7336 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7337 }
7338
7339 /*
7340 * Determine if this tuple is already totally frozen, or will become
7341 * totally frozen (provided caller executes freeze plans for the page)
7342 */
7343 *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
7344 (freeze_xmax || xmax_already_frozen));
7345
7346 if (!pagefrz->freeze_required && !(xmin_already_frozen &&
7347 xmax_already_frozen))
7348 {
7349 /*
7350 * So far no previous tuple from the page made freezing mandatory.
7351 * Does this tuple force caller to freeze the entire page?
7352 */
7353 pagefrz->freeze_required =
7354 heap_tuple_should_freeze(tuple, cutoffs,
7355 &pagefrz->NoFreezePageRelfrozenXid,
7356 &pagefrz->NoFreezePageRelminMxid);
7357 }
7358
7359 /* Tell caller if this tuple has a usable freeze plan set in *frz */
7360 return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
7361}
7362
7363/*
7364 * Perform xmin/xmax XID status sanity checks before actually executing freeze
7365 * plans.
7366 *
7367 * heap_prepare_freeze_tuple doesn't perform these checks directly because
7368 * pg_xact lookups are relatively expensive. They shouldn't be repeated by
7369 * successive VACUUMs that each decide against freezing the same page.
7370 */
7371void
7373 HeapTupleFreeze *tuples, int ntuples)
7374{
7375 Page page = BufferGetPage(buffer);
7376
7377 for (int i = 0; i < ntuples; i++)
7378 {
7379 HeapTupleFreeze *frz = tuples + i;
7380 ItemId itemid = PageGetItemId(page, frz->offset);
7381 HeapTupleHeader htup;
7382
7383 htup = (HeapTupleHeader) PageGetItem(page, itemid);
7384
7385 /* Deliberately avoid relying on tuple hint bits here */
7387 {
7389
7391 if (unlikely(!TransactionIdDidCommit(xmin)))
7392 ereport(ERROR,
7394 errmsg_internal("uncommitted xmin %u needs to be frozen",
7395 xmin)));
7396 }
7397
7398 /*
7399 * TransactionIdDidAbort won't work reliably in the presence of XIDs
7400 * left behind by transactions that were in progress during a crash,
7401 * so we can only check that xmax didn't commit
7402 */
7404 {
7406
7409 ereport(ERROR,
7411 errmsg_internal("cannot freeze committed xmax %u",
7412 xmax)));
7413 }
7414 }
7415}
7416
7417/*
7418 * Helper which executes freezing of one or more heap tuples on a page on
7419 * behalf of caller. Caller passes an array of tuple plans from
7420 * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
7421 * Must be called in a critical section that also marks the buffer dirty and,
7422 * if needed, emits WAL.
7423 */
7424void
7426{
7427 Page page = BufferGetPage(buffer);
7428
7429 for (int i = 0; i < ntuples; i++)
7430 {
7431 HeapTupleFreeze *frz = tuples + i;
7432 ItemId itemid = PageGetItemId(page, frz->offset);
7433 HeapTupleHeader htup;
7434
7435 htup = (HeapTupleHeader) PageGetItem(page, itemid);
7436 heap_execute_freeze_tuple(htup, frz);
7437 }
7438}
7439
7440/*
7441 * heap_freeze_tuple
7442 * Freeze tuple in place, without WAL logging.
7443 *
7444 * Useful for callers like CLUSTER that perform their own WAL logging.
7445 */
7446bool
7448 TransactionId relfrozenxid, TransactionId relminmxid,
7449 TransactionId FreezeLimit, TransactionId MultiXactCutoff)
7450{
7451 HeapTupleFreeze frz;
7452 bool do_freeze;
7453 bool totally_frozen;
7454 struct VacuumCutoffs cutoffs;
7455 HeapPageFreeze pagefrz;
7456
7457 cutoffs.relfrozenxid = relfrozenxid;
7458 cutoffs.relminmxid = relminmxid;
7459 cutoffs.OldestXmin = FreezeLimit;
7460 cutoffs.OldestMxact = MultiXactCutoff;
7461 cutoffs.FreezeLimit = FreezeLimit;
7463
7464 pagefrz.freeze_required = true;
7465 pagefrz.FreezePageRelfrozenXid = FreezeLimit;
7466 pagefrz.FreezePageRelminMxid = MultiXactCutoff;
7467 pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
7468 pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
7469
7470 do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
7471 &pagefrz, &frz, &totally_frozen);
7472
7473 /*
7474 * Note that because this is not a WAL-logged operation, we don't need to
7475 * fill in the offset in the freeze record.
7476 */
7477
7478 if (do_freeze)
7479 heap_execute_freeze_tuple(tuple, &frz);
7480 return do_freeze;
7481}
7482
7483/*
7484 * For a given MultiXactId, return the hint bits that should be set in the
7485 * tuple's infomask.
7486 *
7487 * Normally this should be called for a multixact that was just created, and
7488 * so is on our local cache, so the GetMembers call is fast.
7489 */
7490static void
7492 uint16 *new_infomask2)
7493{
7494 int nmembers;
7495 MultiXactMember *members;
7496 int i;
7498 uint16 bits2 = 0;
7499 bool has_update = false;
7500 LockTupleMode strongest = LockTupleKeyShare;
7501
7502 /*
7503 * We only use this in multis we just created, so they cannot be values
7504 * pre-pg_upgrade.
7505 */
7506 nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7507
7508 for (i = 0; i < nmembers; i++)
7509 {
7511
7512 /*
7513 * Remember the strongest lock mode held by any member of the
7514 * multixact.
7515 */
7516 mode = TUPLOCK_from_mxstatus(members[i].status);
7517 if (mode > strongest)
7518 strongest = mode;
7519
7520 /* See what other bits we need */
7521 switch (members[i].status)
7522 {
7526 break;
7527
7529 bits2 |= HEAP_KEYS_UPDATED;
7530 break;
7531
7533 has_update = true;
7534 break;
7535
7537 bits2 |= HEAP_KEYS_UPDATED;
7538 has_update = true;
7539 break;
7540 }
7541 }
7542
7543 if (strongest == LockTupleExclusive ||
7544 strongest == LockTupleNoKeyExclusive)
7545 bits |= HEAP_XMAX_EXCL_LOCK;
7546 else if (strongest == LockTupleShare)
7547 bits |= HEAP_XMAX_SHR_LOCK;
7548 else if (strongest == LockTupleKeyShare)
7549 bits |= HEAP_XMAX_KEYSHR_LOCK;
7550
7551 if (!has_update)
7552 bits |= HEAP_XMAX_LOCK_ONLY;
7553
7554 if (nmembers > 0)
7555 pfree(members);
7556
7557 *new_infomask = bits;
7558 *new_infomask2 = bits2;
7559}
7560
7561/*
7562 * MultiXactIdGetUpdateXid
7563 *
7564 * Given a multixact Xmax and corresponding infomask, which does not have the
7565 * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
7566 * transaction.
7567 *
7568 * Caller is expected to check the status of the updating transaction, if
7569 * necessary.
7570 */
7571static TransactionId
7573{
7574 TransactionId update_xact = InvalidTransactionId;
7575 MultiXactMember *members;
7576 int nmembers;
7577
7578 Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
7579 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
7580
7581 /*
7582 * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
7583 * pre-pg_upgrade.
7584 */
7585 nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
7586
7587 if (nmembers > 0)
7588 {
7589 int i;
7590
7591 for (i = 0; i < nmembers; i++)
7592 {
7593 /* Ignore lockers */
7594 if (!ISUPDATE_from_mxstatus(members[i].status))
7595 continue;
7596
7597 /* there can be at most one updater */
7598 Assert(update_xact == InvalidTransactionId);
7599 update_xact = members[i].xid;
7600#ifndef USE_ASSERT_CHECKING
7601
7602 /*
7603 * in an assert-enabled build, walk the whole array to ensure
7604 * there's no other updater.
7605 */
7606 break;
7607#endif
7608 }
7609
7610 pfree(members);
7611 }
7612
7613 return update_xact;
7614}
7615
7616/*
7617 * HeapTupleGetUpdateXid
7618 * As above, but use a HeapTupleHeader
7619 *
7620 * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
7621 * checking the hint bits.
7622 */
7625{
7627 tup->t_infomask);
7628}
7629
7630/*
7631 * Does the given multixact conflict with the current transaction grabbing a
7632 * tuple lock of the given strength?
7633 *
7634 * The passed infomask pairs up with the given multixact in the tuple header.
7635 *
7636 * If current_is_member is not NULL, it is set to 'true' if the current
7637 * transaction is a member of the given multixact.
7638 */
7639static bool
7641 LockTupleMode lockmode, bool *current_is_member)
7642{
7643 int nmembers;
7644 MultiXactMember *members;
7645 bool result = false;
7646 LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7647
7648 if (HEAP_LOCKED_UPGRADED(infomask))
7649 return false;
7650
7651 nmembers = GetMultiXactIdMembers(multi, &members, false,
7652 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7653 if (nmembers >= 0)
7654 {
7655 int i;
7656
7657 for (i = 0; i < nmembers; i++)
7658 {
7659 TransactionId memxid;
7660 LOCKMODE memlockmode;
7661
7662 if (result && (current_is_member == NULL || *current_is_member))
7663 break;
7664
7665 memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7666
7667 /* ignore members from current xact (but track their presence) */
7668 memxid = members[i].xid;
7670 {
7671 if (current_is_member != NULL)
7672 *current_is_member = true;
7673 continue;
7674 }
7675 else if (result)
7676 continue;
7677
7678 /* ignore members that don't conflict with the lock we want */
7679 if (!DoLockModesConflict(memlockmode, wanted))
7680 continue;
7681
7682 if (ISUPDATE_from_mxstatus(members[i].status))
7683 {
7684 /* ignore aborted updaters */
7685 if (TransactionIdDidAbort(memxid))
7686 continue;
7687 }
7688 else
7689 {
7690 /* ignore lockers-only that are no longer in progress */
7691 if (!TransactionIdIsInProgress(memxid))
7692 continue;
7693 }
7694
7695 /*
7696 * Whatever remains are either live lockers that conflict with our
7697 * wanted lock, and updaters that are not aborted. Those conflict
7698 * with what we want. Set up to return true, but keep going to
7699 * look for the current transaction among the multixact members,
7700 * if needed.
7701 */
7702 result = true;
7703 }
7704 pfree(members);
7705 }
7706
7707 return result;
7708}
7709
7710/*
7711 * Do_MultiXactIdWait
7712 * Actual implementation for the two functions below.
7713 *
7714 * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
7715 * needed to ensure we only sleep on conflicting members, and the infomask is
7716 * used to optimize multixact access in case it's a lock-only multi); 'nowait'
7717 * indicates whether to use conditional lock acquisition, to allow callers to
7718 * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
7719 * context information for error messages. 'remaining', if not NULL, receives
7720 * the number of members that are still running, including any (non-aborted)
7721 * subtransactions of our own transaction. 'logLockFailure' indicates whether
7722 * to log details when a lock acquisition fails with 'nowait' enabled.
7723 *
7724 * We do this by sleeping on each member using XactLockTableWait. Any
7725 * members that belong to the current backend are *not* waited for, however;
7726 * this would not merely be useless but would lead to Assert failure inside
7727 * XactLockTableWait. By the time this returns, it is certain that all
7728 * transactions *of other backends* that were members of the MultiXactId
7729 * that conflict with the requested status are dead (and no new ones can have
7730 * been added, since it is not legal to add members to an existing
7731 * MultiXactId).
7732 *
7733 * But by the time we finish sleeping, someone else may have changed the Xmax
7734 * of the containing tuple, so the caller needs to iterate on us somehow.
7735 *
7736 * Note that in case we return false, the number of remaining members is
7737 * not to be trusted.
7738 */
7739static bool
7741 uint16 infomask, bool nowait,
7742 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7743 int *remaining, bool logLockFailure)
7744{
7745 bool result = true;
7746 MultiXactMember *members;
7747 int nmembers;
7748 int remain = 0;
7749
7750 /* for pre-pg_upgrade tuples, no need to sleep at all */
7751 nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7752 GetMultiXactIdMembers(multi, &members, false,
7753 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7754
7755 if (nmembers >= 0)
7756 {
7757 int i;
7758
7759 for (i = 0; i < nmembers; i++)
7760 {
7761 TransactionId memxid = members[i].xid;
7762 MultiXactStatus memstatus = members[i].status;
7763
7765 {
7766 remain++;
7767 continue;
7768 }
7769
7771 LOCKMODE_from_mxstatus(status)))
7772 {
7773 if (remaining && TransactionIdIsInProgress(memxid))
7774 remain++;
7775 continue;
7776 }
7777
7778 /*
7779 * This member conflicts with our multi, so we have to sleep (or
7780 * return failure, if asked to avoid waiting.)
7781 *
7782 * Note that we don't set up an error context callback ourselves,
7783 * but instead we pass the info down to XactLockTableWait. This
7784 * might seem a bit wasteful because the context is set up and
7785 * tore down for each member of the multixact, but in reality it
7786 * should be barely noticeable, and it avoids duplicate code.
7787 */
7788 if (nowait)
7789 {
7790 result = ConditionalXactLockTableWait(memxid, logLockFailure);
7791 if (!result)
7792 break;
7793 }
7794 else
7795 XactLockTableWait(memxid, rel, ctid, oper);
7796 }
7797
7798 pfree(members);
7799 }
7800
7801 if (remaining)
7802 *remaining = remain;
7803
7804 return result;
7805}
7806
7807/*
7808 * MultiXactIdWait
7809 * Sleep on a MultiXactId.
7810 *
7811 * By the time we finish sleeping, someone else may have changed the Xmax
7812 * of the containing tuple, so the caller needs to iterate on us somehow.
7813 *
7814 * We return (in *remaining, if not NULL) the number of members that are still
7815 * running, including any (non-aborted) subtransactions of our own transaction.
7816 */
7817static void
7819 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7820 int *remaining)
7821{
7822 (void) Do_MultiXactIdWait(multi, status, infomask, false,
7823 rel, ctid, oper, remaining, false);
7824}
7825
7826/*
7827 * ConditionalMultiXactIdWait
7828 * As above, but only lock if we can get the lock without blocking.
7829 *
7830 * By the time we finish sleeping, someone else may have changed the Xmax
7831 * of the containing tuple, so the caller needs to iterate on us somehow.
7832 *
7833 * If the multixact is now all gone, return true. Returns false if some
7834 * transactions might still be running.
7835 *
7836 * We return (in *remaining, if not NULL) the number of members that are still
7837 * running, including any (non-aborted) subtransactions of our own transaction.
7838 */
7839static bool
7841 uint16 infomask, Relation rel, int *remaining,
7842 bool logLockFailure)
7843{
7844 return Do_MultiXactIdWait(multi, status, infomask, true,
7845 rel, NULL, XLTW_None, remaining, logLockFailure);
7846}
7847
7848/*
7849 * heap_tuple_needs_eventual_freeze
7850 *
7851 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7852 * will eventually require freezing (if tuple isn't removed by pruning first).
7853 */
7854bool
7856{
7857 TransactionId xid;
7858
7859 /*
7860 * If xmin is a normal transaction ID, this tuple is definitely not
7861 * frozen.
7862 */
7863 xid = HeapTupleHeaderGetXmin(tuple);
7864 if (TransactionIdIsNormal(xid))
7865 return true;
7866
7867 /*
7868 * If xmax is a valid xact or multixact, this tuple is also not frozen.
7869 */
7870 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7871 {
7872 MultiXactId multi;
7873
7874 multi = HeapTupleHeaderGetRawXmax(tuple);
7875 if (MultiXactIdIsValid(multi))
7876 return true;
7877 }
7878 else
7879 {
7880 xid = HeapTupleHeaderGetRawXmax(tuple);
7881 if (TransactionIdIsNormal(xid))
7882 return true;
7883 }
7884
7885 if (tuple->t_infomask & HEAP_MOVED)
7886 {
7887 xid = HeapTupleHeaderGetXvac(tuple);
7888 if (TransactionIdIsNormal(xid))
7889 return true;
7890 }
7891
7892 return false;
7893}
7894
7895/*
7896 * heap_tuple_should_freeze
7897 *
7898 * Return value indicates if heap_prepare_freeze_tuple sibling function would
7899 * (or should) force freezing of the heap page that contains caller's tuple.
7900 * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
7901 * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
7902 *
7903 * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
7904 * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
7905 * Our working assumption is that caller won't decide to freeze this tuple.
7906 * It's up to caller to only ratchet back its own top-level trackers after the
7907 * point that it fully commits to not freezing the tuple/page in question.
7908 */
7909bool
7911 const struct VacuumCutoffs *cutoffs,
7912 TransactionId *NoFreezePageRelfrozenXid,
7913 MultiXactId *NoFreezePageRelminMxid)
7914{
7915 TransactionId xid;
7916 MultiXactId multi;
7917 bool freeze = false;
7918
7919 /* First deal with xmin */
7920 xid = HeapTupleHeaderGetXmin(tuple);
7921 if (TransactionIdIsNormal(xid))
7922 {
7924 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7925 *NoFreezePageRelfrozenXid = xid;
7926 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7927 freeze = true;
7928 }
7929
7930 /* Now deal with xmax */
7932 multi = InvalidMultiXactId;
7933 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7934 multi = HeapTupleHeaderGetRawXmax(tuple);
7935 else
7936 xid = HeapTupleHeaderGetRawXmax(tuple);
7937
7938 if (TransactionIdIsNormal(xid))
7939 {
7941 /* xmax is a non-permanent XID */
7942 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7943 *NoFreezePageRelfrozenXid = xid;
7944 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7945 freeze = true;
7946 }
7947 else if (!MultiXactIdIsValid(multi))
7948 {
7949 /* xmax is a permanent XID or invalid MultiXactId/XID */
7950 }
7951 else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
7952 {
7953 /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
7954 if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7955 *NoFreezePageRelminMxid = multi;
7956 /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
7957 freeze = true;
7958 }
7959 else
7960 {
7961 /* xmax is a MultiXactId that may have an updater XID */
7962 MultiXactMember *members;
7963 int nmembers;
7964
7966 if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7967 *NoFreezePageRelminMxid = multi;
7968 if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
7969 freeze = true;
7970
7971 /* need to check whether any member of the mxact is old */
7972 nmembers = GetMultiXactIdMembers(multi, &members, false,
7974
7975 for (int i = 0; i < nmembers; i++)
7976 {
7977 xid = members[i].xid;
7979 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7980 *NoFreezePageRelfrozenXid = xid;
7981 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7982 freeze = true;
7983 }
7984 if (nmembers > 0)
7985 pfree(members);
7986 }
7987
7988 if (tuple->t_infomask & HEAP_MOVED)
7989 {
7990 xid = HeapTupleHeaderGetXvac(tuple);
7991 if (TransactionIdIsNormal(xid))
7992 {
7994 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7995 *NoFreezePageRelfrozenXid = xid;
7996 /* heap_prepare_freeze_tuple forces xvac freezing */
7997 freeze = true;
7998 }
7999 }
8000
8001 return freeze;
8002}
8003
8004/*
8005 * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
8006 * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
8007 * that caller is in the process of physically removing, e.g. via HOT pruning
8008 * or index deletion.
8009 *
8010 * Caller must initialize its value to InvalidTransactionId, which is
8011 * generally interpreted as "definitely no need for a recovery conflict".
8012 * Final value must reflect all heap tuples that caller will physically remove
8013 * (or remove TID references to) via its ongoing pruning/deletion operation.
8014 * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
8015 * caller's WAL record) by REDO routine when it replays caller's operation.
8016 */
8017void
8019 TransactionId *snapshotConflictHorizon)
8020{
8024
8025 if (tuple->t_infomask & HEAP_MOVED)
8026 {
8027 if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
8028 *snapshotConflictHorizon = xvac;
8029 }
8030
8031 /*
8032 * Ignore tuples inserted by an aborted transaction or if the tuple was
8033 * updated/deleted by the inserting transaction.
8034 *
8035 * Look for a committed hint bit, or if no xmin bit is set, check clog.
8036 */
8037 if (HeapTupleHeaderXminCommitted(tuple) ||
8039 {
8040 if (xmax != xmin &&
8041 TransactionIdFollows(xmax, *snapshotConflictHorizon))
8042 *snapshotConflictHorizon = xmax;
8043 }
8044}
8045
8046#ifdef USE_PREFETCH
8047/*
8048 * Helper function for heap_index_delete_tuples. Issues prefetch requests for
8049 * prefetch_count buffers. The prefetch_state keeps track of all the buffers
8050 * we can prefetch, and which have already been prefetched; each call to this
8051 * function picks up where the previous call left off.
8052 *
8053 * Note: we expect the deltids array to be sorted in an order that groups TIDs
8054 * by heap block, with all TIDs for each block appearing together in exactly
8055 * one group.
8056 */
8057static void
8058index_delete_prefetch_buffer(Relation rel,
8059 IndexDeletePrefetchState *prefetch_state,
8060 int prefetch_count)
8061{
8062 BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
8063 int count = 0;
8064 int i;
8065 int ndeltids = prefetch_state->ndeltids;
8066 TM_IndexDelete *deltids = prefetch_state->deltids;
8067
8068 for (i = prefetch_state->next_item;
8069 i < ndeltids && count < prefetch_count;
8070 i++)
8071 {
8072 ItemPointer htid = &deltids[i].tid;
8073
8074 if (cur_hblkno == InvalidBlockNumber ||
8075 ItemPointerGetBlockNumber(htid) != cur_hblkno)
8076 {
8077 cur_hblkno = ItemPointerGetBlockNumber(htid);
8078 PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
8079 count++;
8080 }
8081 }
8082
8083 /*
8084 * Save the prefetch position so that next time we can continue from that
8085 * position.
8086 */
8087 prefetch_state->next_item = i;
8088 prefetch_state->cur_hblkno = cur_hblkno;
8089}
8090#endif
8091
8092/*
8093 * Helper function for heap_index_delete_tuples. Checks for index corruption
8094 * involving an invalid TID in index AM caller's index page.
8095 *
8096 * This is an ideal place for these checks. The index AM must hold a buffer
8097 * lock on the index page containing the TIDs we examine here, so we don't
8098 * have to worry about concurrent VACUUMs at all. We can be sure that the
8099 * index is corrupt when htid points directly to an LP_UNUSED item or
8100 * heap-only tuple, which is not the case during standard index scans.
8101 */
8102static inline void
8104 Page page, OffsetNumber maxoff,
8105 const ItemPointerData *htid, TM_IndexStatus *istatus)
8106{
8107 OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
8108 ItemId iid;
8109
8111
8112 if (unlikely(indexpagehoffnum > maxoff))
8113 ereport(ERROR,
8114 (errcode(ERRCODE_INDEX_CORRUPTED),
8115 errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
8117 indexpagehoffnum,
8118 istatus->idxoffnum, delstate->iblknum,
8119 RelationGetRelationName(delstate->irel))));
8120
8121 iid = PageGetItemId(page, indexpagehoffnum);
8122 if (unlikely(!ItemIdIsUsed(iid)))
8123 ereport(ERROR,
8124 (errcode(ERRCODE_INDEX_CORRUPTED),
8125 errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
8127 indexpagehoffnum,
8128 istatus->idxoffnum, delstate->iblknum,
8129 RelationGetRelationName(delstate->irel))));
8130
8131 if (ItemIdHasStorage(iid))
8132 {
8133 HeapTupleHeader htup;
8134
8135 Assert(ItemIdIsNormal(iid));
8136 htup = (HeapTupleHeader) PageGetItem(page, iid);
8137
8139 ereport(ERROR,
8140 (errcode(ERRCODE_INDEX_CORRUPTED),
8141 errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
8143 indexpagehoffnum,
8144 istatus->idxoffnum, delstate->iblknum,
8145 RelationGetRelationName(delstate->irel))));
8146 }
8147}
8148
8149/*
8150 * heapam implementation of tableam's index_delete_tuples interface.
8151 *
8152 * This helper function is called by index AMs during index tuple deletion.
8153 * See tableam header comments for an explanation of the interface implemented
8154 * here and a general theory of operation. Note that each call here is either
8155 * a simple index deletion call, or a bottom-up index deletion call.
8156 *
8157 * It's possible for this to generate a fair amount of I/O, since we may be
8158 * deleting hundreds of tuples from a single index block. To amortize that
8159 * cost to some degree, this uses prefetching and combines repeat accesses to
8160 * the same heap block.
8161 */
8164{
8165 /* Initial assumption is that earlier pruning took care of conflict */
8166 TransactionId snapshotConflictHorizon = InvalidTransactionId;
8169 Page page = NULL;
8171 TransactionId priorXmax;
8172#ifdef USE_PREFETCH
8173 IndexDeletePrefetchState prefetch_state;
8174 int prefetch_distance;
8175#endif
8176 SnapshotData SnapshotNonVacuumable;
8177 int finalndeltids = 0,
8178 nblocksaccessed = 0;
8179
8180 /* State that's only used in bottom-up index deletion case */
8181 int nblocksfavorable = 0;
8182 int curtargetfreespace = delstate->bottomupfreespace,
8183 lastfreespace = 0,
8184 actualfreespace = 0;
8185 bool bottomup_final_block = false;
8186
8187 InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
8188
8189 /* Sort caller's deltids array by TID for further processing */
8190 index_delete_sort(delstate);
8191
8192 /*
8193 * Bottom-up case: resort deltids array in an order attuned to where the
8194 * greatest number of promising TIDs are to be found, and determine how
8195 * many blocks from the start of sorted array should be considered
8196 * favorable. This will also shrink the deltids array in order to
8197 * eliminate completely unfavorable blocks up front.
8198 */
8199 if (delstate->bottomup)
8200 nblocksfavorable = bottomup_sort_and_shrink(delstate);
8201
8202#ifdef USE_PREFETCH
8203 /* Initialize prefetch state. */
8204 prefetch_state.cur_hblkno = InvalidBlockNumber;
8205 prefetch_state.next_item = 0;
8206 prefetch_state.ndeltids = delstate->ndeltids;
8207 prefetch_state.deltids = delstate->deltids;
8208
8209 /*
8210 * Determine the prefetch distance that we will attempt to maintain.
8211 *
8212 * Since the caller holds a buffer lock somewhere in rel, we'd better make
8213 * sure that isn't a catalog relation before we call code that does
8214 * syscache lookups, to avoid risk of deadlock.
8215 */
8216 if (IsCatalogRelation(rel))
8217 prefetch_distance = maintenance_io_concurrency;
8218 else
8219 prefetch_distance =
8221
8222 /* Cap initial prefetch distance for bottom-up deletion caller */
8223 if (delstate->bottomup)
8224 {
8225 Assert(nblocksfavorable >= 1);
8226 Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
8227 prefetch_distance = Min(prefetch_distance, nblocksfavorable);
8228 }
8229
8230 /* Start prefetching. */
8231 index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
8232#endif
8233
8234 /* Iterate over deltids, determine which to delete, check their horizon */
8235 Assert(delstate->ndeltids > 0);
8236 for (int i = 0; i < delstate->ndeltids; i++)
8237 {
8238 TM_IndexDelete *ideltid = &delstate->deltids[i];
8239 TM_IndexStatus *istatus = delstate->status + ideltid->id;
8240 ItemPointer htid = &ideltid->tid;
8241 OffsetNumber offnum;
8242
8243 /*
8244 * Read buffer, and perform required extra steps each time a new block
8245 * is encountered. Avoid refetching if it's the same block as the one
8246 * from the last htid.
8247 */
8248 if (blkno == InvalidBlockNumber ||
8249 ItemPointerGetBlockNumber(htid) != blkno)
8250 {
8251 /*
8252 * Consider giving up early for bottom-up index deletion caller
8253 * first. (Only prefetch next-next block afterwards, when it
8254 * becomes clear that we're at least going to access the next
8255 * block in line.)
8256 *
8257 * Sometimes the first block frees so much space for bottom-up
8258 * caller that the deletion process can end without accessing any
8259 * more blocks. It is usually necessary to access 2 or 3 blocks
8260 * per bottom-up deletion operation, though.
8261 */
8262 if (delstate->bottomup)
8263 {
8264 /*
8265 * We often allow caller to delete a few additional items
8266 * whose entries we reached after the point that space target
8267 * from caller was satisfied. The cost of accessing the page
8268 * was already paid at that point, so it made sense to finish
8269 * it off. When that happened, we finalize everything here
8270 * (by finishing off the whole bottom-up deletion operation
8271 * without needlessly paying the cost of accessing any more
8272 * blocks).
8273 */
8274 if (bottomup_final_block)
8275 break;
8276
8277 /*
8278 * Give up when we didn't enable our caller to free any
8279 * additional space as a result of processing the page that we
8280 * just finished up with. This rule is the main way in which
8281 * we keep the cost of bottom-up deletion under control.
8282 */
8283 if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
8284 break;
8285 lastfreespace = actualfreespace; /* for next time */
8286
8287 /*
8288 * Deletion operation (which is bottom-up) will definitely
8289 * access the next block in line. Prepare for that now.
8290 *
8291 * Decay target free space so that we don't hang on for too
8292 * long with a marginal case. (Space target is only truly
8293 * helpful when it allows us to recognize that we don't need
8294 * to access more than 1 or 2 blocks to satisfy caller due to
8295 * agreeable workload characteristics.)
8296 *
8297 * We are a bit more patient when we encounter contiguous
8298 * blocks, though: these are treated as favorable blocks. The
8299 * decay process is only applied when the next block in line
8300 * is not a favorable/contiguous block. This is not an
8301 * exception to the general rule; we still insist on finding
8302 * at least one deletable item per block accessed. See
8303 * bottomup_nblocksfavorable() for full details of the theory
8304 * behind favorable blocks and heap block locality in general.
8305 *
8306 * Note: The first block in line is always treated as a
8307 * favorable block, so the earliest possible point that the
8308 * decay can be applied is just before we access the second
8309 * block in line. The Assert() verifies this for us.
8310 */
8311 Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
8312 if (nblocksfavorable > 0)
8313 nblocksfavorable--;
8314 else
8315 curtargetfreespace /= 2;
8316 }
8317
8318 /* release old buffer */
8319 if (BufferIsValid(buf))
8321
8322 blkno = ItemPointerGetBlockNumber(htid);
8323 buf = ReadBuffer(rel, blkno);
8324 nblocksaccessed++;
8325 Assert(!delstate->bottomup ||
8326 nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
8327
8328#ifdef USE_PREFETCH
8329
8330 /*
8331 * To maintain the prefetch distance, prefetch one more page for
8332 * each page we read.
8333 */
8334 index_delete_prefetch_buffer(rel, &prefetch_state, 1);
8335#endif
8336
8338
8339 page = BufferGetPage(buf);
8340 maxoff = PageGetMaxOffsetNumber(page);
8341 }
8342
8343 /*
8344 * In passing, detect index corruption involving an index page with a
8345 * TID that points to a location in the heap that couldn't possibly be
8346 * correct. We only do this with actual TIDs from caller's index page
8347 * (not items reached by traversing through a HOT chain).
8348 */
8349 index_delete_check_htid(delstate, page, maxoff, htid, istatus);
8350
8351 if (istatus->knowndeletable)
8352 Assert(!delstate->bottomup && !istatus->promising);
8353 else
8354 {
8355 ItemPointerData tmp = *htid;
8356 HeapTupleData heapTuple;
8357
8358 /* Are any tuples from this HOT chain non-vacuumable? */
8359 if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
8360 &heapTuple, NULL, true))
8361 continue; /* can't delete entry */
8362
8363 /* Caller will delete, since whole HOT chain is vacuumable */
8364 istatus->knowndeletable = true;
8365
8366 /* Maintain index free space info for bottom-up deletion case */
8367 if (delstate->bottomup)
8368 {
8369 Assert(istatus->freespace > 0);
8370 actualfreespace += istatus->freespace;
8371 if (actualfreespace >= curtargetfreespace)
8372 bottomup_final_block = true;
8373 }
8374 }
8375
8376 /*
8377 * Maintain snapshotConflictHorizon value for deletion operation as a
8378 * whole by advancing current value using heap tuple headers. This is
8379 * loosely based on the logic for pruning a HOT chain.
8380 */
8381 offnum = ItemPointerGetOffsetNumber(htid);
8382 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
8383 for (;;)
8384 {
8385 ItemId lp;
8386 HeapTupleHeader htup;
8387
8388 /* Sanity check (pure paranoia) */
8389 if (offnum < FirstOffsetNumber)
8390 break;
8391
8392 /*
8393 * An offset past the end of page's line pointer array is possible
8394 * when the array was truncated
8395 */
8396 if (offnum > maxoff)
8397 break;
8398
8399 lp = PageGetItemId(page, offnum);
8400 if (ItemIdIsRedirected(lp))
8401 {
8402 offnum = ItemIdGetRedirect(lp);
8403 continue;
8404 }
8405
8406 /*
8407 * We'll often encounter LP_DEAD line pointers (especially with an
8408 * entry marked knowndeletable by our caller up front). No heap
8409 * tuple headers get examined for an htid that leads us to an
8410 * LP_DEAD item. This is okay because the earlier pruning
8411 * operation that made the line pointer LP_DEAD in the first place
8412 * must have considered the original tuple header as part of
8413 * generating its own snapshotConflictHorizon value.
8414 *
8415 * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
8416 * the same strategy that index vacuuming uses in all cases. Index
8417 * VACUUM WAL records don't even have a snapshotConflictHorizon
8418 * field of their own for this reason.
8419 */
8420 if (!ItemIdIsNormal(lp))
8421 break;
8422
8423 htup = (HeapTupleHeader) PageGetItem(page, lp);
8424
8425 /*
8426 * Check the tuple XMIN against prior XMAX, if any
8427 */
8428 if (TransactionIdIsValid(priorXmax) &&
8430 break;
8431
8433 &snapshotConflictHorizon);
8434
8435 /*
8436 * If the tuple is not HOT-updated, then we are at the end of this
8437 * HOT-chain. No need to visit later tuples from the same update
8438 * chain (they get their own index entries) -- just move on to
8439 * next htid from index AM caller.
8440 */
8441 if (!HeapTupleHeaderIsHotUpdated(htup))
8442 break;
8443
8444 /* Advance to next HOT chain member */
8445 Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
8446 offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
8447 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
8448 }
8449
8450 /* Enable further/final shrinking of deltids for caller */
8451 finalndeltids = i + 1;
8452 }
8453
8455
8456 /*
8457 * Shrink deltids array to exclude non-deletable entries at the end. This
8458 * is not just a minor optimization. Final deltids array size might be
8459 * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
8460 * ndeltids being zero in all cases with zero total deletable entries.
8461 */
8462 Assert(finalndeltids > 0 || delstate->bottomup);
8463 delstate->ndeltids = finalndeltids;
8464
8465 return snapshotConflictHorizon;
8466}
8467
8468/*
8469 * Specialized inlineable comparison function for index_delete_sort()
8470 */
8471static inline int
8473{
8474 ItemPointer tid1 = &deltid1->tid;
8475 ItemPointer tid2 = &deltid2->tid;
8476
8477 {
8480
8481 if (blk1 != blk2)
8482 return (blk1 < blk2) ? -1 : 1;
8483 }
8484 {
8487
8488 if (pos1 != pos2)
8489 return (pos1 < pos2) ? -1 : 1;
8490 }
8491
8492 Assert(false);
8493
8494 return 0;
8495}
8496
8497/*
8498 * Sort deltids array from delstate by TID. This prepares it for further
8499 * processing by heap_index_delete_tuples().
8500 *
8501 * This operation becomes a noticeable consumer of CPU cycles with some
8502 * workloads, so we go to the trouble of specialization/micro optimization.
8503 * We use shellsort for this because it's easy to specialize, compiles to
8504 * relatively few instructions, and is adaptive to presorted inputs/subsets
8505 * (which are typical here).
8506 */
8507static void
8509{
8510 TM_IndexDelete *deltids = delstate->deltids;
8511 int ndeltids = delstate->ndeltids;
8512
8513 /*
8514 * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
8515 *
8516 * This implementation is fast with array sizes up to ~4500. This covers
8517 * all supported BLCKSZ values.
8518 */
8519 const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8520
8521 /* Think carefully before changing anything here -- keep swaps cheap */
8522 StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
8523 "element size exceeds 8 bytes");
8524
8525 for (int g = 0; g < lengthof(gaps); g++)
8526 {
8527 for (int hi = gaps[g], i = hi; i < ndeltids; i++)
8528 {
8529 TM_IndexDelete d = deltids[i];
8530 int j = i;
8531
8532 while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
8533 {
8534 deltids[j] = deltids[j - hi];
8535 j -= hi;
8536 }
8537 deltids[j] = d;
8538 }
8539 }
8540}
8541
8542/*
8543 * Returns how many blocks should be considered favorable/contiguous for a
8544 * bottom-up index deletion pass. This is a number of heap blocks that starts
8545 * from and includes the first block in line.
8546 *
8547 * There is always at least one favorable block during bottom-up index
8548 * deletion. In the worst case (i.e. with totally random heap blocks) the
8549 * first block in line (the only favorable block) can be thought of as a
8550 * degenerate array of contiguous blocks that consists of a single block.
8551 * heap_index_delete_tuples() will expect this.
8552 *
8553 * Caller passes blockgroups, a description of the final order that deltids
8554 * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
8555 * processing. Note that deltids need not actually be sorted just yet (caller
8556 * only passes deltids to us so that we can interpret blockgroups).
8557 *
8558 * You might guess that the existence of contiguous blocks cannot matter much,
8559 * since in general the main factor that determines which blocks we visit is
8560 * the number of promising TIDs, which is a fixed hint from the index AM.
8561 * We're not really targeting the general case, though -- the actual goal is
8562 * to adapt our behavior to a wide variety of naturally occurring conditions.
8563 * The effects of most of the heuristics we apply are only noticeable in the
8564 * aggregate, over time and across many _related_ bottom-up index deletion
8565 * passes.
8566 *
8567 * Deeming certain blocks favorable allows heapam to recognize and adapt to
8568 * workloads where heap blocks visited during bottom-up index deletion can be
8569 * accessed contiguously, in the sense that each newly visited block is the
8570 * neighbor of the block that bottom-up deletion just finished processing (or
8571 * close enough to it). It will likely be cheaper to access more favorable
8572 * blocks sooner rather than later (e.g. in this pass, not across a series of
8573 * related bottom-up passes). Either way it is probably only a matter of time
8574 * (or a matter of further correlated version churn) before all blocks that
8575 * appear together as a single large batch of favorable blocks get accessed by
8576 * _some_ bottom-up pass. Large batches of favorable blocks tend to either
8577 * appear almost constantly or not even once (it all depends on per-index
8578 * workload characteristics).
8579 *
8580 * Note that the blockgroups sort order applies a power-of-two bucketing
8581 * scheme that creates opportunities for contiguous groups of blocks to get
8582 * batched together, at least with workloads that are naturally amenable to
8583 * being driven by heap block locality. This doesn't just enhance the spatial
8584 * locality of bottom-up heap block processing in the obvious way. It also
8585 * enables temporal locality of access, since sorting by heap block number
8586 * naturally tends to make the bottom-up processing order deterministic.
8587 *
8588 * Consider the following example to get a sense of how temporal locality
8589 * might matter: There is a heap relation with several indexes, each of which
8590 * is low to medium cardinality. It is subject to constant non-HOT updates.
8591 * The updates are skewed (in one part of the primary key, perhaps). None of
8592 * the indexes are logically modified by the UPDATE statements (if they were
8593 * then bottom-up index deletion would not be triggered in the first place).
8594 * Naturally, each new round of index tuples (for each heap tuple that gets a
8595 * heap_update() call) will have the same heap TID in each and every index.
8596 * Since these indexes are low cardinality and never get logically modified,
8597 * heapam processing during bottom-up deletion passes will access heap blocks
8598 * in approximately sequential order. Temporal locality of access occurs due
8599 * to bottom-up deletion passes behaving very similarly across each of the
8600 * indexes at any given moment. This keeps the number of buffer misses needed
8601 * to visit heap blocks to a minimum.
8602 */
8603static int
8604bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
8605 TM_IndexDelete *deltids)
8606{
8607 int64 lastblock = -1;
8608 int nblocksfavorable = 0;
8609
8610 Assert(nblockgroups >= 1);
8611 Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
8612
8613 /*
8614 * We tolerate heap blocks that will be accessed only slightly out of
8615 * physical order. Small blips occur when a pair of almost-contiguous
8616 * blocks happen to fall into different buckets (perhaps due only to a
8617 * small difference in npromisingtids that the bucketing scheme didn't
8618 * quite manage to ignore). We effectively ignore these blips by applying
8619 * a small tolerance. The precise tolerance we use is a little arbitrary,
8620 * but it works well enough in practice.
8621 */
8622 for (int b = 0; b < nblockgroups; b++)
8623 {
8624 IndexDeleteCounts *group = blockgroups + b;
8625 TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
8626 BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
8627
8628 if (lastblock != -1 &&
8629 ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
8630 (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
8631 break;
8632
8633 nblocksfavorable++;
8634 lastblock = block;
8635 }
8636
8637 /* Always indicate that there is at least 1 favorable block */
8638 Assert(nblocksfavorable >= 1);
8639
8640 return nblocksfavorable;
8641}
8642
8643/*
8644 * qsort comparison function for bottomup_sort_and_shrink()
8645 */
8646static int
8647bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
8648{
8649 const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
8650 const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
8651
8652 /*
8653 * Most significant field is npromisingtids (which we invert the order of
8654 * so as to sort in desc order).
8655 *
8656 * Caller should have already normalized npromisingtids fields into
8657 * power-of-two values (buckets).
8658 */
8659 if (group1->npromisingtids > group2->npromisingtids)
8660 return -1;
8661 if (group1->npromisingtids < group2->npromisingtids)
8662 return 1;
8663
8664 /*
8665 * Tiebreak: desc ntids sort order.
8666 *
8667 * We cannot expect power-of-two values for ntids fields. We should
8668 * behave as if they were already rounded up for us instead.
8669 */
8670 if (group1->ntids != group2->ntids)
8671 {
8672 uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
8673 uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
8674
8675 if (ntids1 > ntids2)
8676 return -1;
8677 if (ntids1 < ntids2)
8678 return 1;
8679 }
8680
8681 /*
8682 * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
8683 * block in deltids array) order.
8684 *
8685 * This is equivalent to sorting in ascending heap block number order
8686 * (among otherwise equal subsets of the array). This approach allows us
8687 * to avoid accessing the out-of-line TID. (We rely on the assumption
8688 * that the deltids array was sorted in ascending heap TID order when
8689 * these offsets to the first TID from each heap block group were formed.)
8690 */
8691 if (group1->ifirsttid > group2->ifirsttid)
8692 return 1;
8693 if (group1->ifirsttid < group2->ifirsttid)
8694 return -1;
8695
8697
8698 return 0;
8699}
8700
8701/*
8702 * heap_index_delete_tuples() helper function for bottom-up deletion callers.
8703 *
8704 * Sorts deltids array in the order needed for useful processing by bottom-up
8705 * deletion. The array should already be sorted in TID order when we're
8706 * called. The sort process groups heap TIDs from deltids into heap block
8707 * groupings. Earlier/more-promising groups/blocks are usually those that are
8708 * known to have the most "promising" TIDs.
8709 *
8710 * Sets new size of deltids array (ndeltids) in state. deltids will only have
8711 * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
8712 * return. This often means that deltids will be shrunk to a small fraction
8713 * of its original size (we eliminate many heap blocks from consideration for
8714 * caller up front).
8715 *
8716 * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
8717 * for a definition and full details.
8718 */
8719static int
8721{
8722 IndexDeleteCounts *blockgroups;
8723 TM_IndexDelete *reordereddeltids;
8725 int nblockgroups = 0;
8726 int ncopied = 0;
8727 int nblocksfavorable = 0;
8728
8729 Assert(delstate->bottomup);
8730 Assert(delstate->ndeltids > 0);
8731
8732 /* Calculate per-heap-block count of TIDs */
8733 blockgroups = palloc_array(IndexDeleteCounts, delstate->ndeltids);
8734 for (int i = 0; i < delstate->ndeltids; i++)
8735 {
8736 TM_IndexDelete *ideltid = &delstate->deltids[i];
8737 TM_IndexStatus *istatus = delstate->status + ideltid->id;
8738 ItemPointer htid = &ideltid->tid;
8739 bool promising = istatus->promising;
8740
8741 if (curblock != ItemPointerGetBlockNumber(htid))
8742 {
8743 /* New block group */
8744 nblockgroups++;
8745
8746 Assert(curblock < ItemPointerGetBlockNumber(htid) ||
8747 !BlockNumberIsValid(curblock));
8748
8749 curblock = ItemPointerGetBlockNumber(htid);
8750 blockgroups[nblockgroups - 1].ifirsttid = i;
8751 blockgroups[nblockgroups - 1].ntids = 1;
8752 blockgroups[nblockgroups - 1].npromisingtids = 0;
8753 }
8754 else
8755 {
8756 blockgroups[nblockgroups - 1].ntids++;
8757 }
8758
8759 if (promising)
8760 blockgroups[nblockgroups - 1].npromisingtids++;
8761 }
8762
8763 /*
8764 * We're about ready to sort block groups to determine the optimal order
8765 * for visiting heap blocks. But before we do, round the number of
8766 * promising tuples for each block group up to the next power-of-two,
8767 * unless it is very low (less than 4), in which case we round up to 4.
8768 * npromisingtids is far too noisy to trust when choosing between a pair
8769 * of block groups that both have very low values.
8770 *
8771 * This scheme divides heap blocks/block groups into buckets. Each bucket
8772 * contains blocks that have _approximately_ the same number of promising
8773 * TIDs as each other. The goal is to ignore relatively small differences
8774 * in the total number of promising entries, so that the whole process can
8775 * give a little weight to heapam factors (like heap block locality)
8776 * instead. This isn't a trade-off, really -- we have nothing to lose. It
8777 * would be foolish to interpret small differences in npromisingtids
8778 * values as anything more than noise.
8779 *
8780 * We tiebreak on nhtids when sorting block group subsets that have the
8781 * same npromisingtids, but this has the same issues as npromisingtids,
8782 * and so nhtids is subject to the same power-of-two bucketing scheme. The
8783 * only reason that we don't fix nhtids in the same way here too is that
8784 * we'll need accurate nhtids values after the sort. We handle nhtids
8785 * bucketization dynamically instead (in the sort comparator).
8786 *
8787 * See bottomup_nblocksfavorable() for a full explanation of when and how
8788 * heap locality/favorable blocks can significantly influence when and how
8789 * heap blocks are accessed.
8790 */
8791 for (int b = 0; b < nblockgroups; b++)
8792 {
8793 IndexDeleteCounts *group = blockgroups + b;
8794
8795 /* Better off falling back on nhtids with low npromisingtids */
8796 if (group->npromisingtids <= 4)
8797 group->npromisingtids = 4;
8798 else
8799 group->npromisingtids =
8801 }
8802
8803 /* Sort groups and rearrange caller's deltids array */
8804 qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
8806 reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
8807
8808 nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
8809 /* Determine number of favorable blocks at the start of final deltids */
8810 nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
8811 delstate->deltids);
8812
8813 for (int b = 0; b < nblockgroups; b++)
8814 {
8815 IndexDeleteCounts *group = blockgroups + b;
8816 TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
8817
8818 memcpy(reordereddeltids + ncopied, firstdtid,
8819 sizeof(TM_IndexDelete) * group->ntids);
8820 ncopied += group->ntids;
8821 }
8822
8823 /* Copy final grouped and sorted TIDs back into start of caller's array */
8824 memcpy(delstate->deltids, reordereddeltids,
8825 sizeof(TM_IndexDelete) * ncopied);
8826 delstate->ndeltids = ncopied;
8827
8828 pfree(reordereddeltids);
8829 pfree(blockgroups);
8830
8831 return nblocksfavorable;
8832}
8833
8834/*
8835 * Perform XLogInsert for a heap-visible operation. 'block' is the block
8836 * being marked all-visible, and vm_buffer is the buffer containing the
8837 * corresponding visibility map block. Both should have already been modified
8838 * and dirtied.
8839 *
8840 * snapshotConflictHorizon comes from the largest xmin on the page being
8841 * marked all-visible. REDO routine uses it to generate recovery conflicts.
8842 *
8843 * If checksums or wal_log_hints are enabled, we may also generate a full-page
8844 * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
8845 * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
8846 * update the heap page's LSN.
8847 */
8849log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
8850 TransactionId snapshotConflictHorizon, uint8 vmflags)
8851{
8852 xl_heap_visible xlrec;
8853 XLogRecPtr recptr;
8854 uint8 flags;
8855
8856 Assert(BufferIsValid(heap_buffer));
8857 Assert(BufferIsValid(vm_buffer));
8858
8859 xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
8860 xlrec.flags = vmflags;
8865
8866 XLogRegisterBuffer(0, vm_buffer, 0);
8867
8868 flags = REGBUF_STANDARD;
8869 if (!XLogHintBitIsNeeded())
8870 flags |= REGBUF_NO_IMAGE;
8871 XLogRegisterBuffer(1, heap_buffer, flags);
8872
8873 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
8874
8875 return recptr;
8876}
8877
8878/*
8879 * Perform XLogInsert for a heap-update operation. Caller must already
8880 * have modified the buffer(s) and marked them dirty.
8881 */
8882static XLogRecPtr
8884 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
8885 HeapTuple old_key_tuple,
8886 bool all_visible_cleared, bool new_all_visible_cleared)
8887{
8888 xl_heap_update xlrec;
8889 xl_heap_header xlhdr;
8890 xl_heap_header xlhdr_idx;
8891 uint8 info;
8892 uint16 prefix_suffix[2];
8893 uint16 prefixlen = 0,
8894 suffixlen = 0;
8895 XLogRecPtr recptr;
8896 Page page = BufferGetPage(newbuf);
8897 bool need_tuple_data = RelationIsLogicallyLogged(reln);
8898 bool init;
8899 int bufflags;
8900
8901 /* Caller should not call me on a non-WAL-logged relation */
8902 Assert(RelationNeedsWAL(reln));
8903
8905
8906 if (HeapTupleIsHeapOnly(newtup))
8907 info = XLOG_HEAP_HOT_UPDATE;
8908 else
8909 info = XLOG_HEAP_UPDATE;
8910
8911 /*
8912 * If the old and new tuple are on the same page, we only need to log the
8913 * parts of the new tuple that were changed. That saves on the amount of
8914 * WAL we need to write. Currently, we just count any unchanged bytes in
8915 * the beginning and end of the tuple. That's quick to check, and
8916 * perfectly covers the common case that only one field is updated.
8917 *
8918 * We could do this even if the old and new tuple are on different pages,
8919 * but only if we don't make a full-page image of the old page, which is
8920 * difficult to know in advance. Also, if the old tuple is corrupt for
8921 * some reason, it would allow the corruption to propagate the new page,
8922 * so it seems best to avoid. Under the general assumption that most
8923 * updates tend to create the new tuple version on the same page, there
8924 * isn't much to be gained by doing this across pages anyway.
8925 *
8926 * Skip this if we're taking a full-page image of the new page, as we
8927 * don't include the new tuple in the WAL record in that case. Also
8928 * disable if effective_wal_level='logical', as logical decoding needs to
8929 * be able to read the new tuple in whole from the WAL record alone.
8930 */
8931 if (oldbuf == newbuf && !need_tuple_data &&
8933 {
8934 char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
8935 char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
8936 int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
8937 int newlen = newtup->t_len - newtup->t_data->t_hoff;
8938
8939 /* Check for common prefix between old and new tuple */
8940 for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
8941 {
8942 if (newp[prefixlen] != oldp[prefixlen])
8943 break;
8944 }
8945
8946 /*
8947 * Storing the length of the prefix takes 2 bytes, so we need to save
8948 * at least 3 bytes or there's no point.
8949 */
8950 if (prefixlen < 3)
8951 prefixlen = 0;
8952
8953 /* Same for suffix */
8954 for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
8955 {
8956 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
8957 break;
8958 }
8959 if (suffixlen < 3)
8960 suffixlen = 0;
8961 }
8962
8963 /* Prepare main WAL data chain */
8964 xlrec.flags = 0;
8965 if (all_visible_cleared)
8967 if (new_all_visible_cleared)
8969 if (prefixlen > 0)
8971 if (suffixlen > 0)
8973 if (need_tuple_data)
8974 {
8976 if (old_key_tuple)
8977 {
8978 if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
8980 else
8982 }
8983 }
8984
8985 /* If new tuple is the single and first tuple on page... */
8988 {
8989 info |= XLOG_HEAP_INIT_PAGE;
8990 init = true;
8991 }
8992 else
8993 init = false;
8994
8995 /* Prepare WAL data for the old page */
8997 xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
8999 oldtup->t_data->t_infomask2);
9000
9001 /* Prepare WAL data for the new page */
9003 xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
9004
9005 bufflags = REGBUF_STANDARD;
9006 if (init)
9007 bufflags |= REGBUF_WILL_INIT;
9008 if (need_tuple_data)
9009 bufflags |= REGBUF_KEEP_DATA;
9010
9011 XLogRegisterBuffer(0, newbuf, bufflags);
9012 if (oldbuf != newbuf)
9014
9016
9017 /*
9018 * Prepare WAL data for the new tuple.
9019 */
9020 if (prefixlen > 0 || suffixlen > 0)
9021 {
9022 if (prefixlen > 0 && suffixlen > 0)
9023 {
9024 prefix_suffix[0] = prefixlen;
9025 prefix_suffix[1] = suffixlen;
9026 XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
9027 }
9028 else if (prefixlen > 0)
9029 {
9030 XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
9031 }
9032 else
9033 {
9034 XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
9035 }
9036 }
9037
9038 xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
9039 xlhdr.t_infomask = newtup->t_data->t_infomask;
9040 xlhdr.t_hoff = newtup->t_data->t_hoff;
9041 Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
9042
9043 /*
9044 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
9045 *
9046 * The 'data' doesn't include the common prefix or suffix.
9047 */
9049 if (prefixlen == 0)
9050 {
9052 (char *) newtup->t_data + SizeofHeapTupleHeader,
9053 newtup->t_len - SizeofHeapTupleHeader - suffixlen);
9054 }
9055 else
9056 {
9057 /*
9058 * Have to write the null bitmap and data after the common prefix as
9059 * two separate rdata entries.
9060 */
9061 /* bitmap [+ padding] [+ oid] */
9062 if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
9063 {
9065 (char *) newtup->t_data + SizeofHeapTupleHeader,
9067 }
9068
9069 /* data after common prefix */
9071 (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
9072 newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
9073 }
9074
9075 /* We need to log a tuple identity */
9076 if (need_tuple_data && old_key_tuple)
9077 {
9078 /* don't really need this, but its more comfy to decode */
9079 xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
9080 xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
9081 xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
9082
9084
9085 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
9086 XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
9087 old_key_tuple->t_len - SizeofHeapTupleHeader);
9088 }
9089
9090 /* filtering by origin on a row level is much more efficient */
9092
9093 recptr = XLogInsert(RM_HEAP_ID, info);
9094
9095 return recptr;
9096}
9097
9098/*
9099 * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
9100 *
9101 * This is only used when effective_wal_level is logical, and only for
9102 * catalog tuples.
9103 */
9104static XLogRecPtr
9106{
9107 xl_heap_new_cid xlrec;
9108
9109 XLogRecPtr recptr;
9110 HeapTupleHeader hdr = tup->t_data;
9111
9113 Assert(tup->t_tableOid != InvalidOid);
9114
9115 xlrec.top_xid = GetTopTransactionId();
9116 xlrec.target_locator = relation->rd_locator;
9117 xlrec.target_tid = tup->t_self;
9118
9119 /*
9120 * If the tuple got inserted & deleted in the same TX we definitely have a
9121 * combo CID, set cmin and cmax.
9122 */
9123 if (hdr->t_infomask & HEAP_COMBOCID)
9124 {
9127 xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
9128 xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
9130 }
9131 /* No combo CID, so only cmin or cmax can be set by this TX */
9132 else
9133 {
9134 /*
9135 * Tuple inserted.
9136 *
9137 * We need to check for LOCK ONLY because multixacts might be
9138 * transferred to the new tuple in case of FOR KEY SHARE updates in
9139 * which case there will be an xmax, although the tuple just got
9140 * inserted.
9141 */
9142 if (hdr->t_infomask & HEAP_XMAX_INVALID ||
9144 {
9146 xlrec.cmax = InvalidCommandId;
9147 }
9148 /* Tuple from a different tx updated or deleted. */
9149 else
9150 {
9151 xlrec.cmin = InvalidCommandId;
9153 }
9154 xlrec.combocid = InvalidCommandId;
9155 }
9156
9157 /*
9158 * Note that we don't need to register the buffer here, because this
9159 * operation does not modify the page. The insert/update/delete that
9160 * called us certainly did, but that's WAL-logged separately.
9161 */
9164
9165 /* will be looked at irrespective of origin */
9166
9167 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
9168
9169 return recptr;
9170}
9171
9172/*
9173 * Build a heap tuple representing the configured REPLICA IDENTITY to represent
9174 * the old tuple in an UPDATE or DELETE.
9175 *
9176 * Returns NULL if there's no need to log an identity or if there's no suitable
9177 * key defined.
9178 *
9179 * Pass key_required true if any replica identity columns changed value, or if
9180 * any of them have any external data. Delete must always pass true.
9181 *
9182 * *copy is set to true if the returned tuple is a modified copy rather than
9183 * the same tuple that was passed in.
9184 */
9185static HeapTuple
9186ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
9187 bool *copy)
9188{
9189 TupleDesc desc = RelationGetDescr(relation);
9190 char replident = relation->rd_rel->relreplident;
9191 Bitmapset *idattrs;
9192 HeapTuple key_tuple;
9193 bool nulls[MaxHeapAttributeNumber];
9195
9196 *copy = false;
9197
9198 if (!RelationIsLogicallyLogged(relation))
9199 return NULL;
9200
9201 if (replident == REPLICA_IDENTITY_NOTHING)
9202 return NULL;
9203
9204 if (replident == REPLICA_IDENTITY_FULL)
9205 {
9206 /*
9207 * When logging the entire old tuple, it very well could contain
9208 * toasted columns. If so, force them to be inlined.
9209 */
9210 if (HeapTupleHasExternal(tp))
9211 {
9212 *copy = true;
9213 tp = toast_flatten_tuple(tp, desc);
9214 }
9215 return tp;
9216 }
9217
9218 /* if the key isn't required and we're only logging the key, we're done */
9219 if (!key_required)
9220 return NULL;
9221
9222 /* find out the replica identity columns */
9223 idattrs = RelationGetIndexAttrBitmap(relation,
9225
9226 /*
9227 * If there's no defined replica identity columns, treat as !key_required.
9228 * (This case should not be reachable from heap_update, since that should
9229 * calculate key_required accurately. But heap_delete just passes
9230 * constant true for key_required, so we can hit this case in deletes.)
9231 */
9232 if (bms_is_empty(idattrs))
9233 return NULL;
9234
9235 /*
9236 * Construct a new tuple containing only the replica identity columns,
9237 * with nulls elsewhere. While we're at it, assert that the replica
9238 * identity columns aren't null.
9239 */
9240 heap_deform_tuple(tp, desc, values, nulls);
9241
9242 for (int i = 0; i < desc->natts; i++)
9243 {
9245 idattrs))
9246 Assert(!nulls[i]);
9247 else
9248 nulls[i] = true;
9249 }
9250
9251 key_tuple = heap_form_tuple(desc, values, nulls);
9252 *copy = true;
9253
9254 bms_free(idattrs);
9255
9256 /*
9257 * If the tuple, which by here only contains indexed columns, still has
9258 * toasted columns, force them to be inlined. This is somewhat unlikely
9259 * since there's limits on the size of indexed columns, so we don't
9260 * duplicate toast_flatten_tuple()s functionality in the above loop over
9261 * the indexed columns, even if it would be more efficient.
9262 */
9263 if (HeapTupleHasExternal(key_tuple))
9264 {
9265 HeapTuple oldtup = key_tuple;
9266
9267 key_tuple = toast_flatten_tuple(oldtup, desc);
9268 heap_freetuple(oldtup);
9269 }
9270
9271 return key_tuple;
9272}
9273
9274/*
9275 * HeapCheckForSerializableConflictOut
9276 * We are reading a tuple. If it's not visible, there may be a
9277 * rw-conflict out with the inserter. Otherwise, if it is visible to us
9278 * but has been deleted, there may be a rw-conflict out with the deleter.
9279 *
9280 * We will determine the top level xid of the writing transaction with which
9281 * we may be in conflict, and ask CheckForSerializableConflictOut() to check
9282 * for overlap with our own transaction.
9283 *
9284 * This function should be called just about anywhere in heapam.c where a
9285 * tuple has been read. The caller must hold at least a shared lock on the
9286 * buffer, because this function might set hint bits on the tuple. There is
9287 * currently no known reason to call this function from an index AM.
9288 */
9289void
9291 HeapTuple tuple, Buffer buffer,
9292 Snapshot snapshot)
9293{
9294 TransactionId xid;
9295 HTSV_Result htsvResult;
9296
9297 if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
9298 return;
9299
9300 /*
9301 * Check to see whether the tuple has been written to by a concurrent
9302 * transaction, either to create it not visible to us, or to delete it
9303 * while it is visible to us. The "visible" bool indicates whether the
9304 * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
9305 * is going on with it.
9306 *
9307 * In the event of a concurrently inserted tuple that also happens to have
9308 * been concurrently updated (by a separate transaction), the xmin of the
9309 * tuple will be used -- not the updater's xid.
9310 */
9311 htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
9312 switch (htsvResult)
9313 {
9314 case HEAPTUPLE_LIVE:
9315 if (visible)
9316 return;
9317 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9318 break;
9321 if (visible)
9322 xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
9323 else
9324 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9325
9327 {
9328 /* This is like the HEAPTUPLE_DEAD case */
9329 Assert(!visible);
9330 return;
9331 }
9332 break;
9334 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9335 break;
9336 case HEAPTUPLE_DEAD:
9337 Assert(!visible);
9338 return;
9339 default:
9340
9341 /*
9342 * The only way to get to this default clause is if a new value is
9343 * added to the enum type without adding it to this switch
9344 * statement. That's a bug, so elog.
9345 */
9346 elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
9347
9348 /*
9349 * In spite of having all enum values covered and calling elog on
9350 * this default, some compilers think this is a code path which
9351 * allows xid to be used below without initialization. Silence
9352 * that warning.
9353 */
9355 }
9356
9359
9360 /*
9361 * Find top level xid. Bail out if xid is too early to be a conflict, or
9362 * if it's our own xid.
9363 */
9365 return;
9368 return;
9369
9370 CheckForSerializableConflictOut(relation, xid, snapshot);
9371}
int16 AttrNumber
Definition: attnum.h:21
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1305
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:814
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:916
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:581
#define bms_is_empty(a)
Definition: bitmapset.h:118
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
static int32 next
Definition: blutils.c:225
static Datum values[MAXATTR]
Definition: bootstrap.c:155
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4318
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:747
void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition: bufmgr.c:5699
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: bufmgr.c:4339
bool BufferIsDirty(Buffer buffer)
Definition: bufmgr.c:3005
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5461
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5478
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:3037
int maintenance_io_concurrency
Definition: bufmgr.c:184
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:839
@ BAS_BULKREAD
Definition: bufmgr.h:37
@ BAS_BULKWRITE
Definition: bufmgr.h:39
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:294
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:436
static Block BufferGetBlock(Buffer buffer)
Definition: bufmgr.h:403
@ BUFFER_LOCK_SHARE
Definition: bufmgr.h:206
@ BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:207
@ BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:205
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:387
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
PageHeaderData * PageHeader
Definition: bufpage.h:173
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:428
static void PageClearAllVisible(Page page)
Definition: bufpage.h:438
#define SizeOfPageHeaderData
Definition: bufpage.h:216
static void PageSetAllVisible(Page page)
Definition: bufpage.h:433
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
static void PageSetFull(Page page)
Definition: bufpage.h:417
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:390
PageData * Page
Definition: bufpage.h:81
#define PageSetPrunable(page, xid)
Definition: bufpage.h:446
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define NameStr(name)
Definition: c.h:771
#define InvalidCommandId
Definition: c.h:689
#define pg_noinline
Definition: c.h:301
#define Min(x, y)
Definition: c.h:1003
#define likely(x)
Definition: c.h:417
#define MAXALIGN(LEN)
Definition: c.h:832
uint8_t uint8
Definition: c.h:550
int64_t int64
Definition: c.h:549
TransactionId MultiXactId
Definition: c.h:682
#define pg_attribute_always_inline
Definition: c.h:285
int16_t int16
Definition: c.h:547
#define SHORTALIGN(LEN)
Definition: c.h:828
uint16_t uint16
Definition: c.h:551
#define pg_unreachable()
Definition: c.h:347
#define unlikely(x)
Definition: c.h:418
uint32_t uint32
Definition: c.h:552
#define lengthof(array)
Definition: c.h:809
#define StaticAssertDecl(condition, errmessage)
Definition: c.h:948
uint32 CommandId
Definition: c.h:686
uint32 TransactionId
Definition: c.h:672
#define OidIsValid(objectId)
Definition: c.h:794
size_t Size
Definition: c.h:625
bool IsToastRelation(Relation relation)
Definition: catalog.c:206
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:104
bool IsSharedRelation(Oid relationId)
Definition: catalog.c:304
bool IsInplaceUpdateRelation(Relation relation)
Definition: catalog.c:183
CommandId HeapTupleHeaderGetCmin(const HeapTupleHeaderData *tup)
Definition: combocid.c:104
void HeapTupleHeaderAdjustCmax(const HeapTupleHeaderData *tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:153
CommandId HeapTupleHeaderGetCmax(const HeapTupleHeaderData *tup)
Definition: combocid.c:118
bool datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:223
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1170
int errdetail_internal(const char *fmt,...)
Definition: elog.c:1243
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define WARNING
Definition: elog.h:36
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
Definition: execTuples.c:1833
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1581
#define palloc_object(type)
Definition: fe_memutils.h:74
#define palloc_array(type, count)
Definition: fe_memutils.h:76
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:461
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:643
int NBuffers
Definition: globals.c:142
Oid MyDatabaseTableSpace
Definition: globals.c:96
Oid MyDatabaseId
Definition: globals.c:94
Assert(PointerIsAligned(start, uint64))
void simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup, TU_UpdateIndexes *update_indexes)
Definition: heapam.c:4520
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition: heapam.c:7640
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:2106
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:9105
XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags)
Definition: heapam.c:8849
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5359
struct IndexDeleteCounts IndexDeleteCounts
static TM_Result heap_lock_updated_tuple_rec(Relation rel, TransactionId priorXmax, const ItemPointerData *tid, TransactionId xid, LockTupleMode mode)
Definition: heapam.c:5731
static void heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
Definition: heapam.c:661
bool heap_inplace_lock(Relation relation, HeapTuple oldtup_ptr, Buffer buffer, void(*release_callback)(void *), void *arg)
Definition: heapam.c:6401
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf)
Definition: heapam.c:1623
#define BOTTOMUP_TOLERANCE_NBLOCKS
Definition: heapam.c:188
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2297
static BlockNumber heap_scan_stream_read_next_parallel(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: heapam.c:250
int updstatus
Definition: heapam.c:129
static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
Definition: heapam.c:8720
static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5310
static int heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
Definition: heapam.c:2345
static pg_attribute_always_inline int page_collect_tuples(HeapScanDesc scan, Snapshot snapshot, Page page, Buffer buffer, BlockNumber block, int lines, bool all_visible, bool check_serializable)
Definition: heapam.c:520
static BlockNumber heap_scan_stream_read_next_serial(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: heapam.c:290
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:7491
void heap_finish_speculative(Relation relation, const ItemPointerData *tid)
Definition: heapam.c:6132
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition: heapam.c:8018
bool heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: heapam.c:1413
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:157
void heap_endscan(TableScanDesc sscan)
Definition: heapam.c:1325
#define FRM_RETURN_IS_XID
Definition: heapam.c:6698
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:216
void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
Definition: heapam.c:1272
void heap_inplace_unlock(Relation relation, HeapTuple oldtup, Buffer buffer)
Definition: heapam.c:6688
TM_Result heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition: heapam.c:3276
static int index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
Definition: heapam.c:8472
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining, bool logLockFailure)
Definition: heapam.c:7840
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition: heapam.c:7855
TM_Result heap_delete(Relation relation, const ItemPointerData *tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition: heapam.c:2807
static TransactionId FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, const struct VacuumCutoffs *cutoffs, uint16 *flags, HeapPageFreeze *pagefrz)
Definition: heapam.c:6749
static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, bool *copy)
Definition: heapam.c:9186
static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
Definition: heapam.c:706
static TM_Result heap_lock_updated_tuple(Relation rel, uint16 prior_infomask, TransactionId prior_raw_xmax, const ItemPointerData *prior_ctid, TransactionId xid, LockTupleMode mode)
Definition: heapam.c:6079
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:165
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition: heapam.c:7910
bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId FreezeLimit, TransactionId MultiXactCutoff)
Definition: heapam.c:7447
void heap_inplace_update_and_unlock(Relation relation, HeapTuple oldtup, HeapTuple tuple, Buffer buffer)
Definition: heapam.c:6539
static BlockNumber heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
Definition: heapam.c:830
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:7572
#define BOTTOMUP_MAX_NBLOCKS
Definition: heapam.c:187
void ReleaseBulkInsertStatePin(BulkInsertState bistate)
Definition: heapam.c:2068
#define FRM_MARK_COMMITTED
Definition: heapam.c:6700
#define FRM_NOOP
Definition: heapam.c:6696
static void index_delete_check_htid(TM_IndexDeleteOp *delstate, Page page, OffsetNumber maxoff, const ItemPointerData *htid, TM_IndexStatus *istatus)
Definition: heapam.c:8103
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition: heapam.c:1364
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1743
int lockstatus
Definition: heapam.c:128
void heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7425
bool heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition: heapam.c:1516
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7818
void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid)
Definition: heapam.c:1443
void heap_abort_speculative(Relation relation, const ItemPointerData *tid)
Definition: heapam.c:6219
static BlockNumber bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data, void *per_buffer_data)
Definition: heapam.c:315
TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
Definition: heapam.c:1118
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:914
static Page heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
Definition: heapam.c:784
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2762
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6699
LOCKMODE hwlock
Definition: heapam.c:127
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6697
static bool heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2, bool isnull1, bool isnull2)
Definition: heapam.c:4379
static void index_delete_sort(TM_IndexDeleteOp *delstate)
Definition: heapam.c:8508
void heap_prepare_pagescan(TableScanDesc sscan)
Definition: heapam.c:570
static Bitmapset * HeapDetermineColumnsInfo(Relation relation, Bitmapset *interesting_cols, Bitmapset *external_cols, HeapTuple oldtup, HeapTuple newtup, bool *has_external)
Definition: heapam.c:4430
static const int MultiXactStatusLock[MaxMultiXactStatus+1]
Definition: heapam.c:205
void simple_heap_insert(Relation relation, HeapTuple tup)
Definition: heapam.c:2749
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2784
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:167
static TM_Result test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
Definition: heapam.c:5640
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition: heapam.c:7099
static void AssertHasSnapshotForToast(Relation rel)
Definition: heapam.c:223
void simple_heap_delete(Relation relation, const ItemPointerData *tid)
Definition: heapam.c:3230
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
Definition: heapam.c:8883
TransactionId HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
Definition: heapam.c:7624
TransactionId heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
Definition: heapam.c:8163
void heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
Definition: heapam.c:2377
#define ConditionalLockTupleTuplock(rel, tup, mode, log)
Definition: heapam.c:169
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:355
static int bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
Definition: heapam.c:8604
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:1024
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
Definition: heapam.c:4608
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2017
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper, int *remaining, bool logLockFailure)
Definition: heapam.c:7740
static int bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
Definition: heapam.c:8647
void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid)
Definition: heapam.c:1895
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
Definition: heapam.c:498
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9290
static Page heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
Definition: heapam.c:753
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4561
void heap_pre_freeze_checks(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7372
BulkInsertState GetBulkInsertState(void)
Definition: heapam.c:2039
void FreeBulkInsertState(BulkInsertState bistate)
Definition: heapam.c:2056
static const struct @15 tupleLockExtraInfo[MaxLockTupleMode+1]
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:40
#define HEAP_FREEZE_CHECK_XMAX_ABORTED
Definition: heapam.h:138
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:102
HTSV_Result
Definition: heapam.h:125
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
struct BitmapHeapScanDescData * BitmapHeapScanDesc
Definition: heapam.h:110
#define HEAP_INSERT_FROZEN
Definition: heapam.h:38
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition: heapam.h:475
#define HEAP_FREEZE_CHECK_XMIN_COMMITTED
Definition: heapam.h:137
#define HEAP_INSERT_NO_LOGICAL
Definition: heapam.h:39
#define MaxLockTupleMode
Definition: heapam.h:51
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:46
const TableAmRoutine * GetHeapamTableAmRoutine(void)
void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer, uint16 infomask, TransactionId xid)
bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer)
bool HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest)
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define XLH_INSERT_ON_TOAST_RELATION
Definition: heapam_xlog.h:76
#define SizeOfHeapMultiInsert
Definition: heapam_xlog.h:188
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:64
#define SizeOfHeapUpdate
Definition: heapam_xlog.h:233
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:348
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:87
#define SizeOfHeapVisible
Definition: heapam_xlog.h:458
#define XLOG_HEAP_HOT_UPDATE
Definition: heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:34
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:74
#define XLH_LOCK_ALL_FROZEN_CLEARED
Definition: heapam_xlog.h:401
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:104
#define XLH_UPDATE_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:90
#define XLH_INSERT_LAST_IN_MULTI
Definition: heapam_xlog.h:73
#define XLH_INSERT_ALL_FROZEN_SET
Definition: heapam_xlog.h:79
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:347
#define XLOG_HEAP_UPDATE
Definition: heapam_xlog.h:35
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:397
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:102
#define XLH_UPDATE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:88
#define SizeOfHeapNewCid
Definition: heapam_xlog.h:478
#define SizeOfHeapLockUpdated
Definition: heapam_xlog.h:423
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:394
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:72
#define SizeOfHeapHeader
Definition: heapam_xlog.h:157
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:106
#define MinSizeOfHeapInplace
Definition: heapam_xlog.h:444
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:85
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:395
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:40
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:65
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition: heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition: heapam_xlog.h:91
#define SizeOfMultiInsertTuple
Definition: heapam_xlog.h:199
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:396
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:66
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:103
#define XLOG_HEAP_LOCK
Definition: heapam_xlog.h:39
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:33
#define SizeOfHeapInsert
Definition: heapam_xlog.h:168
#define SizeOfHeapDelete
Definition: heapam_xlog.h:121
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:105
#define XLH_UPDATE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:89
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:398
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:63
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:75
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:47
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:431
#define SizeOfHeapLock
Definition: heapam_xlog.h:412
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:38
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
HeapTuple heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, int options)
Definition: heaptoast.c:96
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
#define TOAST_TUPLE_THRESHOLD
Definition: heaptoast.h:48
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
Definition: heaptuple.c:1117
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1346
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1435
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other, int num_pages)
Definition: hio.c:500
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HEAP_MOVED_OFF
Definition: htup_details.h:211
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:200
static bool HeapTupleIsHotUpdated(const HeapTupleData *tuple)
Definition: htup_details.h:768
#define HEAP_XMIN_FROZEN
Definition: htup_details.h:206
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:904
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
Definition: htup_details.h:350
#define HeapTupleHeaderGetNatts(tup)
Definition: htup_details.h:577
static void HeapTupleHeaderSetXminFrozen(HeapTupleHeaderData *tup)
Definition: htup_details.h:370
#define SizeofHeapTupleHeader
Definition: htup_details.h:185
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:289
static bool HEAP_XMAX_IS_SHR_LOCKED(uint16 infomask)
Definition: htup_details.h:263
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
Definition: htup_details.h:226
static bool HeapTupleHeaderXminInvalid(const HeapTupleHeaderData *tup)
Definition: htup_details.h:343
static void HeapTupleClearHotUpdated(const HeapTupleData *tuple)
Definition: htup_details.h:780
static bool HeapTupleHasExternal(const HeapTupleData *tuple)
Definition: htup_details.h:762
static TransactionId HeapTupleHeaderGetXvac(const HeapTupleHeaderData *tup)
Definition: htup_details.h:442
#define HEAP2_XACT_MASK
Definition: htup_details.h:293
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
Definition: htup_details.h:431
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:197
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
Definition: htup_details.h:549
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
Definition: htup_details.h:422
#define HEAP_XMAX_BITS
Definition: htup_details.h:281
#define HEAP_LOCK_MASK
Definition: htup_details.h:202
static CommandId HeapTupleHeaderGetRawCommandId(const HeapTupleHeaderData *tup)
Definition: htup_details.h:415
static TransactionId HeapTupleHeaderGetRawXmax(const HeapTupleHeaderData *tup)
Definition: htup_details.h:377
static bool HeapTupleHeaderIsHeapOnly(const HeapTupleHeaderData *tup)
Definition: htup_details.h:555
static bool HeapTupleIsHeapOnly(const HeapTupleData *tuple)
Definition: htup_details.h:786
#define HEAP_MOVED
Definition: htup_details.h:213
static void HeapTupleSetHeapOnly(const HeapTupleData *tuple)
Definition: htup_details.h:792
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:209
static bool HEAP_XMAX_IS_KEYSHR_LOCKED(uint16 infomask)
Definition: htup_details.h:275
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:207
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
#define HEAP_COMBOCID
Definition: htup_details.h:195
#define HEAP_XACT_MASK
Definition: htup_details.h:215
static bool HeapTupleHeaderIndicatesMovedPartitions(const HeapTupleHeaderData *tup)
Definition: htup_details.h:480
static void HeapTupleSetHotUpdated(const HeapTupleData *tuple)
Definition: htup_details.h:774
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:196
static bool HeapTupleHeaderIsHotUpdated(const HeapTupleHeaderData *tup)
Definition: htup_details.h:534
#define HEAP_XMAX_INVALID
Definition: htup_details.h:208
static TransactionId HeapTupleHeaderGetRawXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:318
static void * GETSTRUCT(const HeapTupleData *tuple)
Definition: htup_details.h:728
static void HeapTupleClearHeapOnly(const HeapTupleData *tuple)
Definition: htup_details.h:798
#define MaxHeapAttributeNumber
Definition: htup_details.h:48
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
Definition: htup_details.h:461
static TransactionId HeapTupleHeaderGetUpdateXid(const HeapTupleHeaderData *tup)
Definition: htup_details.h:397
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
static bool HEAP_XMAX_IS_EXCL_LOCKED(uint16 infomask)
Definition: htup_details.h:269
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
Definition: htup_details.h:331
static bool HEAP_LOCKED_UPGRADED(uint16 infomask)
Definition: htup_details.h:251
#define HEAP_UPDATED
Definition: htup_details.h:210
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:194
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
Definition: htup_details.h:486
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
Definition: htup_details.h:383
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
#define IsParallelWorker()
Definition: parallel.h:60
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
int remaining
Definition: informix.c:692
#define INJECTION_POINT(name, arg)
void AcceptInvalidationMessages(void)
Definition: inval.c:930
int inplaceGetInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition: inval.c:1088
void PreInplace_Inval(void)
Definition: inval.c:1250
void CacheInvalidateHeapTupleInplace(Relation relation, HeapTuple key_equivalent_tuple)
Definition: inval.c:1593
void AtInplace_Inval(void)
Definition: inval.c:1263
void ForgetInplace_Inval(void)
Definition: inval.c:1286
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1571
int b
Definition: isn.c:74
int init
Definition: isn.c:79
int j
Definition: isn.c:78
int i
Definition: isn.c:77
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
struct ItemIdData ItemIdData
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
int32 ItemPointerCompare(const ItemPointerData *arg1, const ItemPointerData *arg2)
Definition: itemptr.c:51
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition: itemptr.c:35
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition: itemptr.h:184
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static bool ItemPointerIndicatesMovedPartitions(const ItemPointerData *pointer)
Definition: itemptr.h:197
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:93
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition: itemptr.h:172
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition: itemptr.h:83
void UnlockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition: lmgr.c:601
bool ConditionalXactLockTableWait(TransactionId xid, bool logLockFailure)
Definition: lmgr.c:739
void LockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition: lmgr.c:562
void XactLockTableWait(TransactionId xid, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper)
Definition: lmgr.c:663
XLTW_Oper
Definition: lmgr.h:25
@ XLTW_None
Definition: lmgr.h:26
@ XLTW_Lock
Definition: lmgr.h:29
@ XLTW_Delete
Definition: lmgr.h:28
@ XLTW_LockUpdated
Definition: lmgr.h:30
@ XLTW_Update
Definition: lmgr.h:27
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:643
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:623
bool log_lock_failures
Definition: lock.c:54
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:183
#define SET_LOCKTAG_TUPLE(locktag, dboid, reloid, blocknum, offnum)
Definition: lock.h:219
int LOCKMODE
Definition: lockdefs.h:26
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define ShareRowExclusiveLock
Definition: lockdefs.h:41
#define AccessShareLock
Definition: lockdefs.h:36
#define InplaceUpdateTupleLock
Definition: lockdefs.h:48
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
#define ExclusiveLock
Definition: lockdefs.h:42
#define RowShareLock
Definition: lockdefs.h:37
LockWaitPolicy
Definition: lockoptions.h:37
@ LockWaitSkip
Definition: lockoptions.h:41
@ LockWaitBlock
Definition: lockoptions.h:39
@ LockWaitError
Definition: lockoptions.h:43
LockTupleMode
Definition: lockoptions.h:50
@ LockTupleExclusive
Definition: lockoptions.h:58
@ LockTupleNoKeyExclusive
Definition: lockoptions.h:56
@ LockTupleShare
Definition: lockoptions.h:54
@ LockTupleKeyShare
Definition: lockoptions.h:52
void pfree(void *pointer)
Definition: mcxt.c:1616
void * palloc(Size size)
Definition: mcxt.c:1387
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:477
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define IsNormalProcessingMode()
Definition: miscadmin.h:479
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:354
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:2833
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:2847
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:465
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:539
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:658
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:301
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool isLockOnly)
Definition: multixact.c:1115
#define MultiXactIdIsValid(multi)
Definition: multixact.h:29
MultiXactStatus
Definition: multixact.h:37
@ MultiXactStatusForShare
Definition: multixact.h:39
@ MultiXactStatusForNoKeyUpdate
Definition: multixact.h:40
@ MultiXactStatusNoKeyUpdate
Definition: multixact.h:43
@ MultiXactStatusUpdate
Definition: multixact.h:45
@ MultiXactStatusForUpdate
Definition: multixact.h:41
@ MultiXactStatusForKeyShare
Definition: multixact.h:38
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:51
#define InvalidMultiXactId
Definition: multixact.h:25
#define MaxMultiXactStatus
Definition: multixact.h:48
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
#define MaxOffsetNumber
Definition: off.h:28
Datum lower(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:49
Datum upper(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:80
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:371
int16 attlen
Definition: pg_attribute.h:59
void * arg
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:42
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
static PgChecksumMode mode
Definition: pg_checksums.c:56
static const struct exclude_list_item skip[]
Definition: pg_checksums.c:108
FormData_pg_class * Form_pg_class
Definition: pg_class.h:156
FormData_pg_database * Form_pg_database
Definition: pg_database.h:96
static char buf[DEFAULT_XLOG_SEG_SIZE]
Definition: pg_test_fsync.c:71
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:695
#define pgstat_count_heap_scan(rel)
Definition: pgstat.h:690
void pgstat_count_heap_update(Relation rel, bool hot, bool newpage)
void pgstat_count_heap_delete(Relation rel)
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
#define qsort(a, b, c, d)
Definition: port.h:499
static Oid DatumGetObjectId(Datum X)
Definition: postgres.h:252
uint64_t Datum
Definition: postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:342
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
void CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, BlockNumber blkno)
Definition: predicate.c:4334
void CheckForSerializableConflictOut(Relation relation, TransactionId xid, Snapshot snapshot)
Definition: predicate.c:4021
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2574
void PredicateLockTID(Relation relation, const ItemPointerData *tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2619
bool CheckForSerializableConflictOutNeeded(Relation relation, Snapshot snapshot)
Definition: predicate.c:3989
#define DELAY_CHKPT_START
Definition: proc.h:135
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4086
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1404
void heap_page_prune_opt(Relation relation, Buffer buffer)
Definition: pruneheap.c:209
void read_stream_reset(ReadStream *stream)
Definition: read_stream.c:1044
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
Definition: read_stream.c:791
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
Definition: read_stream.c:737
void read_stream_end(ReadStream *stream)
Definition: read_stream.c:1089
#define READ_STREAM_USE_BATCHING
Definition: read_stream.h:64
BlockNumber(* ReadStreamBlockNumberCB)(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition: read_stream.h:77
#define READ_STREAM_DEFAULT
Definition: read_stream.h:21
#define READ_STREAM_SEQUENTIAL
Definition: read_stream.h:36
#define RelationGetRelid(relation)
Definition: rel.h:515
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:711
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:390
#define RelationGetDescr(relation)
Definition: rel.h:541
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:521
#define RelationGetRelationName(relation)
Definition: rel.h:549
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:694
#define RelationNeedsWAL(relation)
Definition: rel.h:638
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:647
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:361
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2195
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition: relcache.c:5298
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2182
@ INDEX_ATTR_BITMAP_KEY
Definition: relcache.h:69
@ INDEX_ATTR_BITMAP_HOT_BLOCKING
Definition: relcache.h:72
@ INDEX_ATTR_BITMAP_SUMMARIZED
Definition: relcache.h:73
@ INDEX_ATTR_BITMAP_IDENTITY_KEY
Definition: relcache.h:71
ForkNumber
Definition: relpath.h:56
@ MAIN_FORKNUM
Definition: relpath.h:58
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition: relscan.h:104
#define ScanDirectionIsForward(direction)
Definition: sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:50
ScanDirection
Definition: sdir.h:25
@ ForwardScanDirection
Definition: sdir.h:28
TransactionId RecentXmin
Definition: snapmgr.c:160
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:866
TransactionId TransactionXmin
Definition: snapmgr.c:159
bool HaveRegisteredOrActiveSnapshot(void)
Definition: snapmgr.c:1644
void InvalidateCatalogSnapshot(void)
Definition: snapmgr.c:455
#define IsHistoricMVCCSnapshot(snapshot)
Definition: snapmgr.h:59
#define SnapshotAny
Definition: snapmgr.h:33
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
Definition: snapmgr.h:50
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:55
#define InvalidSnapshot
Definition: snapshot.h:119
int get_tablespace_maintenance_io_concurrency(Oid spcid)
Definition: spccache.c:229
PGPROC * MyProc
Definition: proc.c:67
BlockNumber last_free
Definition: hio.h:49
BufferAccessStrategy strategy
Definition: hio.h:31
uint32 already_extended_by
Definition: hio.h:50
BlockNumber next_free
Definition: hio.h:48
Buffer current_buf
Definition: hio.h:32
int16 attlen
Definition: tupdesc.h:71
MultiXactId NoFreezePageRelminMxid
Definition: heapam.h:220
TransactionId FreezePageRelfrozenXid
Definition: heapam.h:208
bool freeze_required
Definition: heapam.h:182
MultiXactId FreezePageRelminMxid
Definition: heapam.h:209
TransactionId NoFreezePageRelfrozenXid
Definition: heapam.h:219
BufferAccessStrategy rs_strategy
Definition: heapam.h:73
ScanDirection rs_dir
Definition: heapam.h:88
uint32 rs_ntuples
Definition: heapam.h:99
OffsetNumber rs_coffset
Definition: heapam.h:68
bool rs_inited
Definition: heapam.h:67
Buffer rs_cbuf
Definition: heapam.h:70
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
Definition: heapam.h:95
BlockNumber rs_startblock
Definition: heapam.h:62
HeapTupleData rs_ctup
Definition: heapam.h:75
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition: heapam.h:100
BlockNumber rs_numblocks
Definition: heapam.h:63
BlockNumber rs_nblocks
Definition: heapam.h:61
ReadStream * rs_read_stream
Definition: heapam.h:78
uint32 rs_cindex
Definition: heapam.h:98
BlockNumber rs_prefetch_block
Definition: heapam.h:89
BlockNumber rs_cblock
Definition: heapam.h:69
TableScanDescData rs_base
Definition: heapam.h:58
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
TransactionId t_xmin
Definition: htup_details.h:124
uint8 frzflags
Definition: heapam.h:147
uint16 t_infomask2
Definition: heapam.h:145
TransactionId xmax
Definition: heapam.h:144
OffsetNumber offset
Definition: heapam.h:152
uint8 checkflags
Definition: heapam.h:150
uint16 t_infomask
Definition: heapam.h:146
union HeapTupleHeaderData::@56 t_choice
ItemPointerData t_ctid
Definition: htup_details.h:161
HeapTupleFields t_heap
Definition: htup_details.h:157
int16 ifirsttid
Definition: heapam.c:198
int16 npromisingtids
Definition: heapam.c:196
Definition: lock.h:167
LockRelId lockRelId
Definition: rel.h:46
Oid relId
Definition: rel.h:40
Oid dbId
Definition: rel.h:41
TransactionId xid
Definition: multixact.h:57
MultiXactStatus status
Definition: multixact.h:58
char data[BLCKSZ]
Definition: c.h:1116
int delayChkptFlags
Definition: proc.h:257
const struct TableAmRoutine * rd_tableam
Definition: rel.h:189
LockInfoData rd_lockInfo
Definition: rel.h:114
Form_pg_index rd_index
Definition: rel.h:192
RelFileLocator rd_locator
Definition: rel.h:57
Form_pg_class rd_rel
Definition: rel.h:111
bool takenDuringRecovery
Definition: snapshot.h:180
BlockNumber blockno
Definition: tidbitmap.h:63
TransactionId xmax
Definition: tableam.h:150
CommandId cmax
Definition: tableam.h:151
ItemPointerData ctid
Definition: tableam.h:149
TM_IndexStatus * status
Definition: tableam.h:254
int bottomupfreespace
Definition: tableam.h:249
Relation irel
Definition: tableam.h:246
TM_IndexDelete * deltids
Definition: tableam.h:253
BlockNumber iblknum
Definition: tableam.h:247
ItemPointerData tid
Definition: tableam.h:212
bool knowndeletable
Definition: tableam.h:219
bool promising
Definition: tableam.h:222
int16 freespace
Definition: tableam.h:223
OffsetNumber idxoffnum
Definition: tableam.h:218
TBMIterator rs_tbmiterator
Definition: relscan.h:47
Relation rs_rd
Definition: relscan.h:36
ItemPointerData rs_mintid
Definition: relscan.h:55
union TableScanDescData::@49 st
ItemPointerData rs_maxtid
Definition: relscan.h:56
uint32 rs_flags
Definition: relscan.h:64
struct TableScanDescData::@49::@50 tidrange
struct ScanKeyData * rs_key
Definition: relscan.h:39
struct SnapshotData * rs_snapshot
Definition: relscan.h:37
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:66
Oid tts_tableOid
Definition: tuptable.h:129
TransactionId FreezeLimit
Definition: vacuum.h:289
TransactionId OldestXmin
Definition: vacuum.h:279
TransactionId relfrozenxid
Definition: vacuum.h:263
MultiXactId relminmxid
Definition: vacuum.h:264
MultiXactId MultiXactCutoff
Definition: vacuum.h:290
MultiXactId OldestMxact
Definition: vacuum.h:280
Definition: c.h:712
OffsetNumber offnum
Definition: heapam_xlog.h:428
TransactionId xmax
Definition: heapam_xlog.h:115
OffsetNumber offnum
Definition: heapam_xlog.h:116
uint8 infobits_set
Definition: heapam_xlog.h:117
uint16 t_infomask
Definition: heapam_xlog.h:153
uint16 t_infomask2
Definition: heapam_xlog.h:152
OffsetNumber offnum
Definition: heapam_xlog.h:436
bool relcacheInitFileInval
Definition: heapam_xlog.h:439
OffsetNumber offnum
Definition: heapam_xlog.h:162
TransactionId xmax
Definition: heapam_xlog.h:417
OffsetNumber offnum
Definition: heapam_xlog.h:418
uint8 infobits_set
Definition: heapam_xlog.h:408
OffsetNumber offnum
Definition: heapam_xlog.h:407
TransactionId xmax
Definition: heapam_xlog.h:406
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: heapam_xlog.h:185
CommandId cmin
Definition: heapam_xlog.h:467
CommandId combocid
Definition: heapam_xlog.h:469
ItemPointerData target_tid
Definition: heapam_xlog.h:475
TransactionId top_xid
Definition: heapam_xlog.h:466
CommandId cmax
Definition: heapam_xlog.h:468
RelFileLocator target_locator
Definition: heapam_xlog.h:474
TransactionId new_xmax
Definition: heapam_xlog.h:224
uint8 old_infobits_set
Definition: heapam_xlog.h:222
TransactionId old_xmax
Definition: heapam_xlog.h:220
OffsetNumber old_offnum
Definition: heapam_xlog.h:221
OffsetNumber new_offnum
Definition: heapam_xlog.h:225
TransactionId snapshotConflictHorizon
Definition: heapam_xlog.h:454
TransactionId SubTransGetTopmostTransaction(TransactionId xid)
Definition: subtrans.c:162
void ss_report_location(Relation rel, BlockNumber location)
Definition: syncscan.c:289
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition: syncscan.c:254
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
#define TableOidAttributeNumber
Definition: sysattr.h:26
bool RelationSupportsSysCache(Oid relid)
Definition: syscache.c:762
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan, BlockNumber startblock, BlockNumber numblocks)
Definition: tableam.c:459
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
Definition: tableam.c:554
bool synchronize_seqscans
Definition: tableam.c:50
@ SO_ALLOW_STRAT
Definition: tableam.h:58
@ SO_TYPE_TIDRANGESCAN
Definition: tableam.h:53
@ SO_TEMP_SNAPSHOT
Definition: tableam.h:65
@ SO_ALLOW_PAGEMODE
Definition: tableam.h:62
@ SO_TYPE_SAMPLESCAN
Definition: tableam.h:51
@ SO_ALLOW_SYNC
Definition: tableam.h:60
@ SO_TYPE_SEQSCAN
Definition: tableam.h:49
@ SO_TYPE_BITMAPSCAN
Definition: tableam.h:50
TU_UpdateIndexes
Definition: tableam.h:111
@ TU_Summarizing
Definition: tableam.h:119
@ TU_All
Definition: tableam.h:116
@ TU_None
Definition: tableam.h:113
TM_Result
Definition: tableam.h:73
@ TM_Ok
Definition: tableam.h:78
@ TM_BeingModified
Definition: tableam.h:100
@ TM_Deleted
Definition: tableam.h:93
@ TM_WouldBlock
Definition: tableam.h:103
@ TM_Updated
Definition: tableam.h:90
@ TM_SelfModified
Definition: tableam.h:84
@ TM_Invisible
Definition: tableam.h:81
bool tbm_iterate(TBMIterator *iterator, TBMIterateResult *tbmres)
Definition: tidbitmap.c:1614
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:126
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:188
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.h:297
#define InvalidTransactionId
Definition: transam.h:31
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.h:282
static bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.h:312
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.h:263
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:175
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:457
static bool HeapKeyTest(HeapTuple tuple, TupleDesc tupdesc, int nkeys, ScanKey keys)
Definition: valid.h:28
static bool VARATT_IS_EXTERNAL(const void *PTR)
Definition: varatt.h:354
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_XLOG_CATALOG_REL
#define VISIBILITYMAP_ALL_VISIBLE
TransactionId GetTopTransactionId(void)
Definition: xact.c:427
bool bsysscan
Definition: xact.c:101
TransactionId CheckXidAlive
Definition: xact.c:100
TransactionId GetTopTransactionIdIfAny(void)
Definition: xact.c:442
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:942
bool IsInParallelMode(void)
Definition: xact.c:1090
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:455
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:830
#define IsolationIsSerializable()
Definition: xact.h:53
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:165
#define XLogHintBitIsNeeded()
Definition: xlog.h:122
#define XLogStandbyInfoActive()
Definition: xlog.h:125
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:478
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition: xloginsert.c:409
bool XLogCheckBufferNeedsBackup(Buffer buffer)
Definition: xloginsert.c:1049
void XLogRegisterData(const void *data, uint32 len)
Definition: xloginsert.c:368
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:460
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
Definition: xloginsert.c:313
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:245
void XLogBeginInsert(void)
Definition: xloginsert.c:152
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:33
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:36
#define REGBUF_WILL_INIT
Definition: xloginsert.h:34