PostgreSQL Source Code git master
Loading...
Searching...
No Matches
heapam.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heapam.c
4 * heap access method code
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/heapam.c
12 *
13 *
14 * INTERFACE ROUTINES
15 * heap_beginscan - begin relation scan
16 * heap_rescan - restart a relation scan
17 * heap_endscan - end relation scan
18 * heap_getnext - retrieve next tuple in scan
19 * heap_fetch - retrieve tuple with given tid
20 * heap_insert - insert tuple into a relation
21 * heap_multi_insert - insert multiple tuples into a relation
22 * heap_delete - delete a tuple from a relation
23 * heap_update - replace a tuple in a relation with another tuple
24 *
25 * NOTES
26 * This file contains the heap_ routines which implement
27 * the POSTGRES heap access method used for all POSTGRES
28 * relations.
29 *
30 *-------------------------------------------------------------------------
31 */
32#include "postgres.h"
33
34#include "access/heapam.h"
35#include "access/heaptoast.h"
36#include "access/hio.h"
37#include "access/multixact.h"
38#include "access/subtrans.h"
39#include "access/syncscan.h"
40#include "access/valid.h"
42#include "access/xloginsert.h"
43#include "catalog/pg_database.h"
44#include "catalog/pg_database_d.h"
45#include "commands/vacuum.h"
46#include "pgstat.h"
47#include "port/pg_bitutils.h"
48#include "storage/lmgr.h"
49#include "storage/predicate.h"
50#include "storage/proc.h"
51#include "storage/procarray.h"
52#include "utils/datum.h"
54#include "utils/inval.h"
55#include "utils/spccache.h"
56#include "utils/syscache.h"
57
58
65#ifdef USE_ASSERT_CHECKING
67 const ItemPointerData *otid,
70#endif
75 bool *has_external);
76static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,
78 bool *have_tuple_lock);
80 BlockNumber block,
81 ScanDirection dir);
83 ScanDirection dir);
93 TransactionId xid,
98 uint16 t_infomask);
100 LockTupleMode lockmode, bool *current_is_member);
102 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
103 int *remaining);
106 bool logLockFailure);
111 bool *copy);
112
113
114/*
115 * This table lists the heavyweight lock mode that corresponds to each tuple
116 * lock mode, as well as one or two corresponding MultiXactStatus values:
117 * .lockstatus to merely lock tuples, and .updstatus to update them. The
118 * latter is set to -1 if the corresponding tuple lock mode does not allow
119 * updating tuples -- see get_mxact_status_for_lock().
120 *
121 * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
122 *
123 * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
124 * instead.
125 */
126static const struct
127{
132
133{
135 .hwlock = AccessShareLock,
136 .lockstatus = MultiXactStatusForKeyShare,
137 /* KeyShare does not allow updating tuples */
138 .updstatus = -1
139 },
140 [LockTupleShare] = {
141 .hwlock = RowShareLock,
142 .lockstatus = MultiXactStatusForShare,
143 /* Share does not allow updating tuples */
144 .updstatus = -1
145 },
147 .hwlock = ExclusiveLock,
148 .lockstatus = MultiXactStatusForNoKeyUpdate,
149 .updstatus = MultiXactStatusNoKeyUpdate
150 },
152 .hwlock = AccessExclusiveLock,
153 .lockstatus = MultiXactStatusForUpdate,
154 .updstatus = MultiXactStatusUpdate
155 }
157
158/* Get the LOCKMODE for a given MultiXactStatus */
159#define LOCKMODE_from_mxstatus(status) \
160 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
161
162/*
163 * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
164 * This is more readable than having every caller translate it to lock.h's
165 * LOCKMODE.
166 */
167#define LockTupleTuplock(rel, tup, mode) \
168 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
169#define UnlockTupleTuplock(rel, tup, mode) \
170 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
171#define ConditionalLockTupleTuplock(rel, tup, mode, log) \
172 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
173
174#ifdef USE_PREFETCH
175/*
176 * heap_index_delete_tuples and index_delete_prefetch_buffer use this
177 * structure to coordinate prefetching activity
178 */
179typedef struct
180{
182 int next_item;
183 int ndeltids;
184 TM_IndexDelete *deltids;
186#endif
187
188/* heap_index_delete_tuples bottom-up index deletion costing constants */
189#define BOTTOMUP_MAX_NBLOCKS 6
190#define BOTTOMUP_TOLERANCE_NBLOCKS 3
191
192/*
193 * heap_index_delete_tuples uses this when determining which heap blocks it
194 * must visit to help its bottom-up index deletion caller
195 */
196typedef struct IndexDeleteCounts
197{
198 int16 npromisingtids; /* Number of "promising" TIDs in group */
199 int16 ntids; /* Number of TIDs in group */
200 int16 ifirsttid; /* Offset to group's first deltid */
202
203/*
204 * This table maps tuple lock strength values for each particular
205 * MultiXactStatus value.
206 */
208{
209 LockTupleKeyShare, /* ForKeyShare */
210 LockTupleShare, /* ForShare */
211 LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
212 LockTupleExclusive, /* ForUpdate */
213 LockTupleNoKeyExclusive, /* NoKeyUpdate */
214 LockTupleExclusive /* Update */
215};
216
217/* Get the LockTupleMode for a given MultiXactStatus */
218#define TUPLOCK_from_mxstatus(status) \
219 (MultiXactStatusLock[(status)])
220
221/*
222 * Check that we have a valid snapshot if we might need TOAST access.
223 */
224static inline void
226{
227#ifdef USE_ASSERT_CHECKING
228
229 /* bootstrap mode in particular breaks this rule */
231 return;
232
233 /* if the relation doesn't have a TOAST table, we are good */
234 if (!OidIsValid(rel->rd_rel->reltoastrelid))
235 return;
236
238
239#endif /* USE_ASSERT_CHECKING */
240}
241
242/* ----------------------------------------------------------------
243 * heap support routines
244 * ----------------------------------------------------------------
245 */
246
247/*
248 * Streaming read API callback for parallel sequential scans. Returns the next
249 * block the caller wants from the read stream or InvalidBlockNumber when done.
250 */
251static BlockNumber
253 void *callback_private_data,
254 void *per_buffer_data)
255{
256 HeapScanDesc scan = (HeapScanDesc) callback_private_data;
257
260
261 if (unlikely(!scan->rs_inited))
262 {
263 /* parallel scan */
267 scan->rs_startblock,
268 scan->rs_numblocks);
269
270 /* may return InvalidBlockNumber if there are no more blocks */
274 scan->rs_inited = true;
275 }
276 else
277 {
280 scan->rs_base.rs_parallel);
281 }
282
283 return scan->rs_prefetch_block;
284}
285
286/*
287 * Streaming read API callback for serial sequential and TID range scans.
288 * Returns the next block the caller wants from the read stream or
289 * InvalidBlockNumber when done.
290 */
291static BlockNumber
293 void *callback_private_data,
294 void *per_buffer_data)
295{
296 HeapScanDesc scan = (HeapScanDesc) callback_private_data;
297
298 if (unlikely(!scan->rs_inited))
299 {
301 scan->rs_inited = true;
302 }
303 else
305 scan->rs_prefetch_block,
306 scan->rs_dir);
307
308 return scan->rs_prefetch_block;
309}
310
311/*
312 * Read stream API callback for bitmap heap scans.
313 * Returns the next block the caller wants from the read stream or
314 * InvalidBlockNumber when done.
315 */
316static BlockNumber
318 void *per_buffer_data)
319{
320 TBMIterateResult *tbmres = per_buffer_data;
323 TableScanDesc sscan = &hscan->rs_base;
324
325 for (;;)
326 {
328
329 /* no more entries in the bitmap */
330 if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
331 return InvalidBlockNumber;
332
333 /*
334 * Ignore any claimed entries past what we think is the end of the
335 * relation. It may have been extended after the start of our scan (we
336 * only hold an AccessShareLock, and it could be inserts from this
337 * backend). We don't take this optimization in SERIALIZABLE
338 * isolation though, as we need to examine all invisible tuples
339 * reachable by the index.
340 */
342 tbmres->blockno >= hscan->rs_nblocks)
343 continue;
344
345 return tbmres->blockno;
346 }
347
348 /* not reachable */
349 Assert(false);
350}
351
352/* ----------------
353 * initscan - scan code common to heap_beginscan and heap_rescan
354 * ----------------
355 */
356static void
358{
360 bool allow_strat;
361 bool allow_sync;
362
363 /*
364 * Determine the number of blocks we have to scan.
365 *
366 * It is sufficient to do this once at scan start, since any tuples added
367 * while the scan is in progress will be invisible to my snapshot anyway.
368 * (That is not true when using a non-MVCC snapshot. However, we couldn't
369 * guarantee to return tuples added after scan start anyway, since they
370 * might go into pages we already scanned. To guarantee consistent
371 * results for a non-MVCC snapshot, the caller must hold some higher-level
372 * lock that ensures the interesting tuple(s) won't change.)
373 */
374 if (scan->rs_base.rs_parallel != NULL)
375 {
377 scan->rs_nblocks = bpscan->phs_nblocks;
378 }
379 else
381
382 /*
383 * If the table is large relative to NBuffers, use a bulk-read access
384 * strategy and enable synchronized scanning (see syncscan.c). Although
385 * the thresholds for these features could be different, we make them the
386 * same so that there are only two behaviors to tune rather than four.
387 * (However, some callers need to be able to disable one or both of these
388 * behaviors, independently of the size of the table; also there is a GUC
389 * variable that can disable synchronized scanning.)
390 *
391 * Note that table_block_parallelscan_initialize has a very similar test;
392 * if you change this, consider changing that one, too.
393 */
395 scan->rs_nblocks > NBuffers / 4)
396 {
398 allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
399 }
400 else
401 allow_strat = allow_sync = false;
402
403 if (allow_strat)
404 {
405 /* During a rescan, keep the previous strategy object. */
406 if (scan->rs_strategy == NULL)
408 }
409 else
410 {
411 if (scan->rs_strategy != NULL)
413 scan->rs_strategy = NULL;
414 }
415
416 if (scan->rs_base.rs_parallel != NULL)
417 {
418 /* For parallel scan, believe whatever ParallelTableScanDesc says. */
421 else
423
424 /*
425 * If not rescanning, initialize the startblock. Finding the actual
426 * start location is done in table_block_parallelscan_startblock_init,
427 * based on whether an alternative start location has been set with
428 * heap_setscanlimits, or using the syncscan location, when syncscan
429 * is enabled.
430 */
431 if (!keep_startblock)
433 }
434 else
435 {
436 if (keep_startblock)
437 {
438 /*
439 * When rescanning, we want to keep the previous startblock
440 * setting, so that rewinding a cursor doesn't generate surprising
441 * results. Reset the active syncscan setting, though.
442 */
445 else
447 }
449 {
452 }
453 else
454 {
456 scan->rs_startblock = 0;
457 }
458 }
459
461 scan->rs_inited = false;
462 scan->rs_ctup.t_data = NULL;
464 scan->rs_cbuf = InvalidBuffer;
466 scan->rs_ntuples = 0;
467 scan->rs_cindex = 0;
468
469 /*
470 * Initialize to ForwardScanDirection because it is most common and
471 * because heap scans go forward before going backward (e.g. CURSORs).
472 */
475
476 /* page-at-a-time fields are always invalid when not rs_inited */
477
478 /*
479 * copy the scan key, if appropriate
480 */
481 if (key != NULL && scan->rs_base.rs_nkeys > 0)
482 memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
483
484 /*
485 * Currently, we only have a stats counter for sequential heap scans (but
486 * e.g for bitmap scans the underlying bitmap index scans will be counted,
487 * and for sample scans we update stats for tuple fetches).
488 */
489 if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
491}
492
493/*
494 * heap_setscanlimits - restrict range of a heapscan
495 *
496 * startBlk is the page to start at
497 * numBlks is number of pages to scan (InvalidBlockNumber means "all")
498 */
499void
501{
503
504 Assert(!scan->rs_inited); /* else too late to change */
505 /* else rs_startblock is significant */
507
508 /* Check startBlk is valid (but allow case of zero blocks...) */
509 Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
510
511 scan->rs_startblock = startBlk;
512 scan->rs_numblocks = numBlks;
513}
514
515/*
516 * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
517 * multiple times, with constant arguments for all_visible,
518 * check_serializable.
519 */
521static int
523 Page page, Buffer buffer,
524 BlockNumber block, int lines,
525 bool all_visible, bool check_serializable)
526{
527 Oid relid = RelationGetRelid(scan->rs_base.rs_rd);
528 int ntup = 0;
529 int nvis = 0;
531
532 /* page at a time should have been disabled otherwise */
533 Assert(IsMVCCSnapshot(snapshot));
534
535 /* first find all tuples on the page */
537 {
540
542 continue;
543
544 /*
545 * If the page is not all-visible or we need to check serializability,
546 * maintain enough state to be able to refind the tuple efficiently,
547 * without again first needing to fetch the item and then via that the
548 * tuple.
549 */
550 if (!all_visible || check_serializable)
551 {
552 tup = &batchmvcc.tuples[ntup];
553
555 tup->t_len = ItemIdGetLength(lpp);
556 tup->t_tableOid = relid;
557 ItemPointerSet(&(tup->t_self), block, lineoff);
558 }
559
560 /*
561 * If the page is all visible, these fields otherwise won't be
562 * populated in loop below.
563 */
564 if (all_visible)
565 {
567 {
568 batchmvcc.visible[ntup] = true;
569 }
570 scan->rs_vistuples[ntup] = lineoff;
571 }
572
573 ntup++;
574 }
575
577
578 /*
579 * Unless the page is all visible, test visibility for all tuples one go.
580 * That is considerably more efficient than calling
581 * HeapTupleSatisfiesMVCC() one-by-one.
582 */
583 if (all_visible)
584 nvis = ntup;
585 else
586 nvis = HeapTupleSatisfiesMVCCBatch(snapshot, buffer,
587 ntup,
588 &batchmvcc,
589 scan->rs_vistuples);
590
591 /*
592 * So far we don't have batch API for testing serializabilty, so do so
593 * one-by-one.
594 */
596 {
597 for (int i = 0; i < ntup; i++)
598 {
600 scan->rs_base.rs_rd,
601 &batchmvcc.tuples[i],
602 buffer, snapshot);
603 }
604 }
605
606 return nvis;
607}
608
609/*
610 * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
611 *
612 * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
613 * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
614 */
615void
617{
619 Buffer buffer = scan->rs_cbuf;
620 BlockNumber block = scan->rs_cblock;
621 Snapshot snapshot;
622 Page page;
623 int lines;
624 bool all_visible;
626
627 Assert(BufferGetBlockNumber(buffer) == block);
628
629 /* ensure we're not accidentally being used when not in pagemode */
631 snapshot = scan->rs_base.rs_snapshot;
632
633 /*
634 * Prune and repair fragmentation for the whole page, if possible.
635 */
636 heap_page_prune_opt(scan->rs_base.rs_rd, buffer, &scan->rs_vmbuffer);
637
638 /*
639 * We must hold share lock on the buffer content while examining tuple
640 * visibility. Afterwards, however, the tuples we have found to be
641 * visible are guaranteed good as long as we hold the buffer pin.
642 */
644
645 page = BufferGetPage(buffer);
646 lines = PageGetMaxOffsetNumber(page);
647
648 /*
649 * If the all-visible flag indicates that all tuples on the page are
650 * visible to everyone, we can skip the per-tuple visibility tests.
651 *
652 * Note: In hot standby, a tuple that's already visible to all
653 * transactions on the primary might still be invisible to a read-only
654 * transaction in the standby. We partly handle this problem by tracking
655 * the minimum xmin of visible tuples as the cut-off XID while marking a
656 * page all-visible on the primary and WAL log that along with the
657 * visibility map SET operation. In hot standby, we wait for (or abort)
658 * all transactions that can potentially may not see one or more tuples on
659 * the page. That's how index-only scans work fine in hot standby. A
660 * crucial difference between index-only scans and heap scans is that the
661 * index-only scan completely relies on the visibility map where as heap
662 * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
663 * the page-level flag can be trusted in the same way, because it might
664 * get propagated somehow without being explicitly WAL-logged, e.g. via a
665 * full page write. Until we can prove that beyond doubt, let's check each
666 * tuple for visibility the hard way.
667 */
668 all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
671
672 /*
673 * We call page_collect_tuples() with constant arguments, to get the
674 * compiler to constant fold the constant arguments. Separate calls with
675 * constant arguments, rather than variables, are needed on several
676 * compilers to actually perform constant folding.
677 */
678 if (likely(all_visible))
679 {
681 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
682 block, lines, true, false);
683 else
684 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
685 block, lines, true, true);
686 }
687 else
688 {
690 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
691 block, lines, false, false);
692 else
693 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
694 block, lines, false, true);
695 }
696
698}
699
700/*
701 * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
702 *
703 * Read the next block of the scan relation from the read stream and save it
704 * in the scan descriptor. It is already pinned.
705 */
706static inline void
708{
709 Assert(scan->rs_read_stream);
710
711 /* release previous scan buffer, if any */
712 if (BufferIsValid(scan->rs_cbuf))
713 {
714 ReleaseBuffer(scan->rs_cbuf);
715 scan->rs_cbuf = InvalidBuffer;
716 }
717
718 /*
719 * Be sure to check for interrupts at least once per page. Checks at
720 * higher code levels won't be able to stop a seqscan that encounters many
721 * pages' worth of consecutive dead tuples.
722 */
724
725 /*
726 * If the scan direction is changing, reset the prefetch block to the
727 * current block. Otherwise, we will incorrectly prefetch the blocks
728 * between the prefetch block and the current block again before
729 * prefetching blocks in the new, correct scan direction.
730 */
731 if (unlikely(scan->rs_dir != dir))
732 {
733 scan->rs_prefetch_block = scan->rs_cblock;
735 }
736
737 scan->rs_dir = dir;
738
740 if (BufferIsValid(scan->rs_cbuf))
742}
743
744/*
745 * heapgettup_initial_block - return the first BlockNumber to scan
746 *
747 * Returns InvalidBlockNumber when there are no blocks to scan. This can
748 * occur with empty tables and in parallel scans when parallel workers get all
749 * of the pages before we can get a chance to get our first page.
750 */
753{
754 Assert(!scan->rs_inited);
755 Assert(scan->rs_base.rs_parallel == NULL);
756
757 /* When there are no pages to scan, return InvalidBlockNumber */
758 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
759 return InvalidBlockNumber;
760
761 if (ScanDirectionIsForward(dir))
762 {
763 return scan->rs_startblock;
764 }
765 else
766 {
767 /*
768 * Disable reporting to syncscan logic in a backwards scan; it's not
769 * very likely anyone else is doing the same thing at the same time,
770 * and much more likely that we'll just bollix things for forward
771 * scanners.
772 */
774
775 /*
776 * Start from last page of the scan. Ensure we take into account
777 * rs_numblocks if it's been adjusted by heap_setscanlimits().
778 */
779 if (scan->rs_numblocks != InvalidBlockNumber)
780 return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
781
782 if (scan->rs_startblock > 0)
783 return scan->rs_startblock - 1;
784
785 return scan->rs_nblocks - 1;
786 }
787}
788
789
790/*
791 * heapgettup_start_page - helper function for heapgettup()
792 *
793 * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
794 * to the number of tuples on this page. Also set *lineoff to the first
795 * offset to scan with forward scans getting the first offset and backward
796 * getting the final offset on the page.
797 */
798static Page
801{
802 Page page;
803
804 Assert(scan->rs_inited);
806
807 /* Caller is responsible for ensuring buffer is locked if needed */
808 page = BufferGetPage(scan->rs_cbuf);
809
811
812 if (ScanDirectionIsForward(dir))
814 else
816
817 /* lineoff now references the physically previous or next tid */
818 return page;
819}
820
821
822/*
823 * heapgettup_continue_page - helper function for heapgettup()
824 *
825 * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
826 * to the number of tuples left to scan on this page. Also set *lineoff to
827 * the next offset to scan according to the ScanDirection in 'dir'.
828 */
829static inline Page
832{
833 Page page;
834
835 Assert(scan->rs_inited);
837
838 /* Caller is responsible for ensuring buffer is locked if needed */
839 page = BufferGetPage(scan->rs_cbuf);
840
841 if (ScanDirectionIsForward(dir))
842 {
844 *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
845 }
846 else
847 {
848 /*
849 * The previous returned tuple may have been vacuumed since the
850 * previous scan when we use a non-MVCC snapshot, so we must
851 * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
852 */
854 *linesleft = *lineoff;
855 }
856
857 /* lineoff now references the physically previous or next tid */
858 return page;
859}
860
861/*
862 * heapgettup_advance_block - helper for heap_fetch_next_buffer()
863 *
864 * Given the current block number, the scan direction, and various information
865 * contained in the scan descriptor, calculate the BlockNumber to scan next
866 * and return it. If there are no further blocks to scan, return
867 * InvalidBlockNumber to indicate this fact to the caller.
868 *
869 * This should not be called to determine the initial block number -- only for
870 * subsequent blocks.
871 *
872 * This also adjusts rs_numblocks when a limit has been imposed by
873 * heap_setscanlimits().
874 */
875static inline BlockNumber
877{
878 Assert(scan->rs_base.rs_parallel == NULL);
879
881 {
882 block++;
883
884 /* wrap back to the start of the heap */
885 if (block >= scan->rs_nblocks)
886 block = 0;
887
888 /*
889 * Report our new scan position for synchronization purposes. We don't
890 * do that when moving backwards, however. That would just mess up any
891 * other forward-moving scanners.
892 *
893 * Note: we do this before checking for end of scan so that the final
894 * state of the position hint is back at the start of the rel. That's
895 * not strictly necessary, but otherwise when you run the same query
896 * multiple times the starting position would shift a little bit
897 * backwards on every invocation, which is confusing. We don't
898 * guarantee any specific ordering in general, though.
899 */
900 if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
901 ss_report_location(scan->rs_base.rs_rd, block);
902
903 /* we're done if we're back at where we started */
904 if (block == scan->rs_startblock)
905 return InvalidBlockNumber;
906
907 /* check if the limit imposed by heap_setscanlimits() is met */
908 if (scan->rs_numblocks != InvalidBlockNumber)
909 {
910 if (--scan->rs_numblocks == 0)
911 return InvalidBlockNumber;
912 }
913
914 return block;
915 }
916 else
917 {
918 /* we're done if the last block is the start position */
919 if (block == scan->rs_startblock)
920 return InvalidBlockNumber;
921
922 /* check if the limit imposed by heap_setscanlimits() is met */
923 if (scan->rs_numblocks != InvalidBlockNumber)
924 {
925 if (--scan->rs_numblocks == 0)
926 return InvalidBlockNumber;
927 }
928
929 /* wrap to the end of the heap when the last page was page 0 */
930 if (block == 0)
931 block = scan->rs_nblocks;
932
933 block--;
934
935 return block;
936 }
937}
938
939/* ----------------
940 * heapgettup - fetch next heap tuple
941 *
942 * Initialize the scan if not already done; then advance to the next
943 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
944 * or set scan->rs_ctup.t_data = NULL if no more tuples.
945 *
946 * Note: the reason nkeys/key are passed separately, even though they are
947 * kept in the scan descriptor, is that the caller may not want us to check
948 * the scankeys.
949 *
950 * Note: when we fall off the end of the scan in either direction, we
951 * reset rs_inited. This means that a further request with the same
952 * scan direction will restart the scan, which is a bit odd, but a
953 * request with the opposite scan direction will start a fresh scan
954 * in the proper direction. The latter is required behavior for cursors,
955 * while the former case is generally undefined behavior in Postgres
956 * so we don't care too much.
957 * ----------------
958 */
959static void
961 ScanDirection dir,
962 int nkeys,
963 ScanKey key)
964{
965 HeapTuple tuple = &(scan->rs_ctup);
966 Page page;
968 int linesleft;
969
970 if (likely(scan->rs_inited))
971 {
972 /* continue from previously returned page/tuple */
974 page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
975 goto continue_page;
976 }
977
978 /*
979 * advance the scan until we find a qualifying tuple or run out of stuff
980 * to scan
981 */
982 while (true)
983 {
984 heap_fetch_next_buffer(scan, dir);
985
986 /* did we run out of blocks to scan? */
987 if (!BufferIsValid(scan->rs_cbuf))
988 break;
989
991
993 page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
995
996 /*
997 * Only continue scanning the page while we have lines left.
998 *
999 * Note that this protects us from accessing line pointers past
1000 * PageGetMaxOffsetNumber(); both for forward scans when we resume the
1001 * table scan, and for when we start scanning a new page.
1002 */
1003 for (; linesleft > 0; linesleft--, lineoff += dir)
1004 {
1005 bool visible;
1007
1008 if (!ItemIdIsNormal(lpp))
1009 continue;
1010
1011 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1012 tuple->t_len = ItemIdGetLength(lpp);
1013 ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
1014
1015 visible = HeapTupleSatisfiesVisibility(tuple,
1016 scan->rs_base.rs_snapshot,
1017 scan->rs_cbuf);
1018
1020 tuple, scan->rs_cbuf,
1021 scan->rs_base.rs_snapshot);
1022
1023 /* skip tuples not visible to this snapshot */
1024 if (!visible)
1025 continue;
1026
1027 /* skip any tuples that don't match the scan key */
1028 if (key != NULL &&
1030 nkeys, key))
1031 continue;
1032
1034 scan->rs_coffset = lineoff;
1035 return;
1036 }
1037
1038 /*
1039 * if we get here, it means we've exhausted the items on this page and
1040 * it's time to move to the next.
1041 */
1043 }
1044
1045 /* end of scan */
1046 if (BufferIsValid(scan->rs_cbuf))
1047 ReleaseBuffer(scan->rs_cbuf);
1048
1049 scan->rs_cbuf = InvalidBuffer;
1052 tuple->t_data = NULL;
1053 scan->rs_inited = false;
1054}
1055
1056/* ----------------
1057 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
1058 *
1059 * Same API as heapgettup, but used in page-at-a-time mode
1060 *
1061 * The internal logic is much the same as heapgettup's too, but there are some
1062 * differences: we do not take the buffer content lock (that only needs to
1063 * happen inside heap_prepare_pagescan), and we iterate through just the
1064 * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
1065 * that lineindex is 0-based, where the corresponding loop variable lineoff in
1066 * heapgettup is 1-based.
1067 * ----------------
1068 */
1069static void
1071 ScanDirection dir,
1072 int nkeys,
1073 ScanKey key)
1074{
1075 HeapTuple tuple = &(scan->rs_ctup);
1076 Page page;
1079
1080 if (likely(scan->rs_inited))
1081 {
1082 /* continue from previously returned page/tuple */
1083 page = BufferGetPage(scan->rs_cbuf);
1084
1085 lineindex = scan->rs_cindex + dir;
1086 if (ScanDirectionIsForward(dir))
1087 linesleft = scan->rs_ntuples - lineindex;
1088 else
1089 linesleft = scan->rs_cindex;
1090 /* lineindex now references the next or previous visible tid */
1091
1092 goto continue_page;
1093 }
1094
1095 /*
1096 * advance the scan until we find a qualifying tuple or run out of stuff
1097 * to scan
1098 */
1099 while (true)
1100 {
1101 heap_fetch_next_buffer(scan, dir);
1102
1103 /* did we run out of blocks to scan? */
1104 if (!BufferIsValid(scan->rs_cbuf))
1105 break;
1106
1108
1109 /* prune the page and determine visible tuple offsets */
1111 page = BufferGetPage(scan->rs_cbuf);
1112 linesleft = scan->rs_ntuples;
1114
1115 /* block is the same for all tuples, set it once outside the loop */
1116 ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
1117
1118 /* lineindex now references the next or previous visible tid */
1120
1121 for (; linesleft > 0; linesleft--, lineindex += dir)
1122 {
1123 ItemId lpp;
1125
1126 Assert(lineindex < scan->rs_ntuples);
1127 lineoff = scan->rs_vistuples[lineindex];
1128 lpp = PageGetItemId(page, lineoff);
1130
1131 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1132 tuple->t_len = ItemIdGetLength(lpp);
1134
1135 /* skip any tuples that don't match the scan key */
1136 if (key != NULL &&
1137 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1138 nkeys, key))
1139 continue;
1140
1141 scan->rs_cindex = lineindex;
1142 return;
1143 }
1144 }
1145
1146 /* end of scan */
1147 if (BufferIsValid(scan->rs_cbuf))
1148 ReleaseBuffer(scan->rs_cbuf);
1149 scan->rs_cbuf = InvalidBuffer;
1152 tuple->t_data = NULL;
1153 scan->rs_inited = false;
1154}
1155
1156
1157/* ----------------------------------------------------------------
1158 * heap access method interface
1159 * ----------------------------------------------------------------
1160 */
1161
1162
1165 int nkeys, ScanKey key,
1166 ParallelTableScanDesc parallel_scan,
1167 uint32 flags)
1168{
1169 HeapScanDesc scan;
1170
1171 /*
1172 * increment relation ref count while scanning relation
1173 *
1174 * This is just to make really sure the relcache entry won't go away while
1175 * the scan has a pointer to it. Caller should be holding the rel open
1176 * anyway, so this is redundant in all normal scenarios...
1177 */
1179
1180 /*
1181 * allocate and initialize scan descriptor
1182 */
1183 if (flags & SO_TYPE_BITMAPSCAN)
1184 {
1186
1187 /*
1188 * Bitmap Heap scans do not have any fields that a normal Heap Scan
1189 * does not have, so no special initializations required here.
1190 */
1191 scan = (HeapScanDesc) bscan;
1192 }
1193 else
1195
1196 scan->rs_base.rs_rd = relation;
1197 scan->rs_base.rs_snapshot = snapshot;
1198 scan->rs_base.rs_nkeys = nkeys;
1199 scan->rs_base.rs_flags = flags;
1200 scan->rs_base.rs_parallel = parallel_scan;
1201 scan->rs_strategy = NULL; /* set in initscan */
1202 scan->rs_cbuf = InvalidBuffer;
1203
1204 /*
1205 * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1206 */
1207 if (!(snapshot && IsMVCCSnapshot(snapshot)))
1209
1210 /* Check that a historic snapshot is not used for non-catalog tables */
1211 if (snapshot &&
1212 IsHistoricMVCCSnapshot(snapshot) &&
1214 {
1215 ereport(ERROR,
1217 errmsg("cannot query non-catalog table \"%s\" during logical decoding",
1218 RelationGetRelationName(relation))));
1219 }
1220
1221 /*
1222 * For seqscan and sample scans in a serializable transaction, acquire a
1223 * predicate lock on the entire relation. This is required not only to
1224 * lock all the matching tuples, but also to conflict with new insertions
1225 * into the table. In an indexscan, we take page locks on the index pages
1226 * covering the range specified in the scan qual, but in a heap scan there
1227 * is nothing more fine-grained to lock. A bitmap scan is a different
1228 * story, there we have already scanned the index and locked the index
1229 * pages covering the predicate. But in that case we still have to lock
1230 * any matching heap tuples. For sample scan we could optimize the locking
1231 * to be at least page-level granularity, but we'd need to add per-tuple
1232 * locking for that.
1233 */
1235 {
1236 /*
1237 * Ensure a missing snapshot is noticed reliably, even if the
1238 * isolation mode means predicate locking isn't performed (and
1239 * therefore the snapshot isn't used here).
1240 */
1241 Assert(snapshot);
1242 PredicateLockRelation(relation, snapshot);
1243 }
1244
1245 /* we only need to set this up once */
1246 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1247
1248 /*
1249 * Allocate memory to keep track of page allocation for parallel workers
1250 * when doing a parallel scan.
1251 */
1252 if (parallel_scan != NULL)
1254 else
1256
1257 /*
1258 * we do this here instead of in initscan() because heap_rescan also calls
1259 * initscan() and we don't want to allocate memory again
1260 */
1261 if (nkeys > 0)
1262 scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
1263 else
1264 scan->rs_base.rs_key = NULL;
1265
1266 initscan(scan, key, false);
1267
1268 scan->rs_read_stream = NULL;
1269
1270 /*
1271 * Set up a read stream for sequential scans and TID range scans. This
1272 * should be done after initscan() because initscan() allocates the
1273 * BufferAccessStrategy object passed to the read stream API.
1274 */
1275 if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
1277 {
1279
1280 if (scan->rs_base.rs_parallel)
1282 else
1284
1285 /* ---
1286 * It is safe to use batchmode as the only locks taken by `cb`
1287 * are never taken while waiting for IO:
1288 * - SyncScanLock is used in the non-parallel case
1289 * - in the parallel case, only spinlocks and atomics are used
1290 * ---
1291 */
1294 scan->rs_strategy,
1295 scan->rs_base.rs_rd,
1297 cb,
1298 scan,
1299 0);
1300 }
1301 else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
1302 {
1305 scan->rs_strategy,
1306 scan->rs_base.rs_rd,
1309 scan,
1310 sizeof(TBMIterateResult));
1311 }
1312
1313 scan->rs_vmbuffer = InvalidBuffer;
1314
1315 return (TableScanDesc) scan;
1316}
1317
1318void
1320 bool allow_strat, bool allow_sync, bool allow_pagemode)
1321{
1323
1324 if (set_params)
1325 {
1326 if (allow_strat)
1328 else
1330
1331 if (allow_sync)
1333 else
1335
1336 if (allow_pagemode && scan->rs_base.rs_snapshot &&
1339 else
1341 }
1342
1343 /*
1344 * unpin scan buffers
1345 */
1346 if (BufferIsValid(scan->rs_cbuf))
1347 {
1348 ReleaseBuffer(scan->rs_cbuf);
1349 scan->rs_cbuf = InvalidBuffer;
1350 }
1351
1352 if (BufferIsValid(scan->rs_vmbuffer))
1353 {
1355 scan->rs_vmbuffer = InvalidBuffer;
1356 }
1357
1358 /*
1359 * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
1360 * additional data vs a normal HeapScan
1361 */
1362
1363 /*
1364 * The read stream is reset on rescan. This must be done before
1365 * initscan(), as some state referred to by read_stream_reset() is reset
1366 * in initscan().
1367 */
1368 if (scan->rs_read_stream)
1370
1371 /*
1372 * reinitialize scan descriptor
1373 */
1374 initscan(scan, key, true);
1375}
1376
1377void
1379{
1381
1382 /* Note: no locking manipulations needed */
1383
1384 /*
1385 * unpin scan buffers
1386 */
1387 if (BufferIsValid(scan->rs_cbuf))
1388 ReleaseBuffer(scan->rs_cbuf);
1389
1390 if (BufferIsValid(scan->rs_vmbuffer))
1392
1393 /*
1394 * Must free the read stream before freeing the BufferAccessStrategy.
1395 */
1396 if (scan->rs_read_stream)
1398
1399 /*
1400 * decrement relation reference count and free scan descriptor storage
1401 */
1403
1404 if (scan->rs_base.rs_key)
1405 pfree(scan->rs_base.rs_key);
1406
1407 if (scan->rs_strategy != NULL)
1409
1410 if (scan->rs_parallelworkerdata != NULL)
1412
1413 if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1415
1416 pfree(scan);
1417}
1418
1421{
1423
1424 /*
1425 * This is still widely used directly, without going through table AM, so
1426 * add a safety check. It's possible we should, at a later point,
1427 * downgrade this to an assert. The reason for checking the AM routine,
1428 * rather than the AM oid, is that this allows to write regression tests
1429 * that create another AM reusing the heap handler.
1430 */
1431 if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
1432 ereport(ERROR,
1434 errmsg_internal("only heap AM is supported")));
1435
1436 /* Note: no locking manipulations needed */
1437
1439 heapgettup_pagemode(scan, direction,
1440 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1441 else
1442 heapgettup(scan, direction,
1443 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1444
1445 if (scan->rs_ctup.t_data == NULL)
1446 return NULL;
1447
1448 /*
1449 * if we get here it means we have a new current scan tuple, so point to
1450 * the proper return buffer and return the tuple.
1451 */
1452
1454
1455 return &scan->rs_ctup;
1456}
1457
1458bool
1460{
1462
1463 /* Note: no locking manipulations needed */
1464
1465 if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1466 heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1467 else
1468 heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1469
1470 if (scan->rs_ctup.t_data == NULL)
1471 {
1472 ExecClearTuple(slot);
1473 return false;
1474 }
1475
1476 /*
1477 * if we get here it means we have a new current scan tuple, so point to
1478 * the proper return buffer and return the tuple.
1479 */
1480
1482
1483 ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1484 scan->rs_cbuf);
1485 return true;
1486}
1487
1488void
1491{
1497
1498 /*
1499 * For relations without any pages, we can simply leave the TID range
1500 * unset. There will be no tuples to scan, therefore no tuples outside
1501 * the given TID range.
1502 */
1503 if (scan->rs_nblocks == 0)
1504 return;
1505
1506 /*
1507 * Set up some ItemPointers which point to the first and last possible
1508 * tuples in the heap.
1509 */
1512
1513 /*
1514 * If the given maximum TID is below the highest possible TID in the
1515 * relation, then restrict the range to that, otherwise we scan to the end
1516 * of the relation.
1517 */
1520
1521 /*
1522 * If the given minimum TID is above the lowest possible TID in the
1523 * relation, then restrict the range to only scan for TIDs above that.
1524 */
1527
1528 /*
1529 * Check for an empty range and protect from would be negative results
1530 * from the numBlks calculation below.
1531 */
1533 {
1534 /* Set an empty range of blocks to scan */
1536 return;
1537 }
1538
1539 /*
1540 * Calculate the first block and the number of blocks we must scan. We
1541 * could be more aggressive here and perform some more validation to try
1542 * and further narrow the scope of blocks to scan by checking if the
1543 * lowestItem has an offset above MaxOffsetNumber. In this case, we could
1544 * advance startBlk by one. Likewise, if highestItem has an offset of 0
1545 * we could scan one fewer blocks. However, such an optimization does not
1546 * seem worth troubling over, currently.
1547 */
1549
1552
1553 /* Set the start block and number of blocks to scan */
1555
1556 /* Finally, set the TID range in sscan */
1557 ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
1558 ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
1559}
1560
1561bool
1563 TupleTableSlot *slot)
1564{
1566 ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
1567 ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
1568
1569 /* Note: no locking manipulations needed */
1570 for (;;)
1571 {
1572 if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1573 heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1574 else
1575 heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1576
1577 if (scan->rs_ctup.t_data == NULL)
1578 {
1579 ExecClearTuple(slot);
1580 return false;
1581 }
1582
1583 /*
1584 * heap_set_tidrange will have used heap_setscanlimits to limit the
1585 * range of pages we scan to only ones that can contain the TID range
1586 * we're scanning for. Here we must filter out any tuples from these
1587 * pages that are outside of that range.
1588 */
1589 if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1590 {
1591 ExecClearTuple(slot);
1592
1593 /*
1594 * When scanning backwards, the TIDs will be in descending order.
1595 * Future tuples in this direction will be lower still, so we can
1596 * just return false to indicate there will be no more tuples.
1597 */
1598 if (ScanDirectionIsBackward(direction))
1599 return false;
1600
1601 continue;
1602 }
1603
1604 /*
1605 * Likewise for the final page, we must filter out TIDs greater than
1606 * maxtid.
1607 */
1608 if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1609 {
1610 ExecClearTuple(slot);
1611
1612 /*
1613 * When scanning forward, the TIDs will be in ascending order.
1614 * Future tuples in this direction will be higher still, so we can
1615 * just return false to indicate there will be no more tuples.
1616 */
1617 if (ScanDirectionIsForward(direction))
1618 return false;
1619 continue;
1620 }
1621
1622 break;
1623 }
1624
1625 /*
1626 * if we get here it means we have a new current scan tuple, so point to
1627 * the proper return buffer and return the tuple.
1628 */
1630
1631 ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1632 return true;
1633}
1634
1635/*
1636 * heap_fetch - retrieve tuple with given tid
1637 *
1638 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1639 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1640 * against the specified snapshot.
1641 *
1642 * If successful (tuple found and passes snapshot time qual), then *userbuf
1643 * is set to the buffer holding the tuple and true is returned. The caller
1644 * must unpin the buffer when done with the tuple.
1645 *
1646 * If the tuple is not found (ie, item number references a deleted slot),
1647 * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
1648 * and false is returned.
1649 *
1650 * If the tuple is found but fails the time qual check, then the behavior
1651 * depends on the keep_buf parameter. If keep_buf is false, the results
1652 * are the same as for the tuple-not-found case. If keep_buf is true,
1653 * then tuple->t_data and *userbuf are returned as for the success case,
1654 * and again the caller must unpin the buffer; but false is returned.
1655 *
1656 * heap_fetch does not follow HOT chains: only the exact TID requested will
1657 * be fetched.
1658 *
1659 * It is somewhat inconsistent that we ereport() on invalid block number but
1660 * return false on invalid item number. There are a couple of reasons though.
1661 * One is that the caller can relatively easily check the block number for
1662 * validity, but cannot check the item number without reading the page
1663 * himself. Another is that when we are following a t_ctid link, we can be
1664 * reasonably confident that the page number is valid (since VACUUM shouldn't
1665 * truncate off the destination page without having killed the referencing
1666 * tuple first), but the item number might well not be good.
1667 */
1668bool
1670 Snapshot snapshot,
1671 HeapTuple tuple,
1672 Buffer *userbuf,
1673 bool keep_buf)
1674{
1675 ItemPointer tid = &(tuple->t_self);
1676 ItemId lp;
1677 Buffer buffer;
1678 Page page;
1679 OffsetNumber offnum;
1680 bool valid;
1681
1682 /*
1683 * Fetch and pin the appropriate page of the relation.
1684 */
1685 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1686
1687 /*
1688 * Need share lock on buffer to examine tuple commit status.
1689 */
1691 page = BufferGetPage(buffer);
1692
1693 /*
1694 * We'd better check for out-of-range offnum in case of VACUUM since the
1695 * TID was obtained.
1696 */
1697 offnum = ItemPointerGetOffsetNumber(tid);
1699 {
1700 UnlockReleaseBuffer(buffer);
1702 tuple->t_data = NULL;
1703 return false;
1704 }
1705
1706 /*
1707 * get the item line pointer corresponding to the requested tid
1708 */
1709 lp = PageGetItemId(page, offnum);
1710
1711 /*
1712 * Must check for deleted tuple.
1713 */
1714 if (!ItemIdIsNormal(lp))
1715 {
1716 UnlockReleaseBuffer(buffer);
1718 tuple->t_data = NULL;
1719 return false;
1720 }
1721
1722 /*
1723 * fill in *tuple fields
1724 */
1725 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1726 tuple->t_len = ItemIdGetLength(lp);
1727 tuple->t_tableOid = RelationGetRelid(relation);
1728
1729 /*
1730 * check tuple visibility, then release lock
1731 */
1732 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1733
1734 if (valid)
1735 PredicateLockTID(relation, &(tuple->t_self), snapshot,
1737
1738 HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1739
1741
1742 if (valid)
1743 {
1744 /*
1745 * All checks passed, so return the tuple as valid. Caller is now
1746 * responsible for releasing the buffer.
1747 */
1748 *userbuf = buffer;
1749
1750 return true;
1751 }
1752
1753 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1754 if (keep_buf)
1755 *userbuf = buffer;
1756 else
1757 {
1758 ReleaseBuffer(buffer);
1760 tuple->t_data = NULL;
1761 }
1762
1763 return false;
1764}
1765
1766/*
1767 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1768 *
1769 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1770 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1771 * for the first chain member satisfying the given snapshot. If one is
1772 * found, we update *tid to reference that tuple's offset number, and
1773 * return true. If no match, return false without modifying *tid.
1774 *
1775 * heapTuple is a caller-supplied buffer. When a match is found, we return
1776 * the tuple here, in addition to updating *tid. If no match is found, the
1777 * contents of this buffer on return are undefined.
1778 *
1779 * If all_dead is not NULL, we check non-visible tuples to see if they are
1780 * globally dead; *all_dead is set true if all members of the HOT chain
1781 * are vacuumable, false if not.
1782 *
1783 * Unlike heap_fetch, the caller must already have pin and (at least) share
1784 * lock on the buffer; it is still pinned/locked at exit.
1785 */
1786bool
1788 Snapshot snapshot, HeapTuple heapTuple,
1789 bool *all_dead, bool first_call)
1790{
1791 Page page = BufferGetPage(buffer);
1793 BlockNumber blkno;
1794 OffsetNumber offnum;
1795 bool at_chain_start;
1796 bool valid;
1797 bool skip;
1798 GlobalVisState *vistest = NULL;
1799
1800 /* If this is not the first call, previous call returned a (live!) tuple */
1801 if (all_dead)
1803
1804 blkno = ItemPointerGetBlockNumber(tid);
1805 offnum = ItemPointerGetOffsetNumber(tid);
1807 skip = !first_call;
1808
1809 /* XXX: we should assert that a snapshot is pushed or registered */
1811 Assert(BufferGetBlockNumber(buffer) == blkno);
1812
1813 /* Scan through possible multiple members of HOT-chain */
1814 for (;;)
1815 {
1816 ItemId lp;
1817
1818 /* check for bogus TID */
1820 break;
1821
1822 lp = PageGetItemId(page, offnum);
1823
1824 /* check for unused, dead, or redirected items */
1825 if (!ItemIdIsNormal(lp))
1826 {
1827 /* We should only see a redirect at start of chain */
1829 {
1830 /* Follow the redirect */
1831 offnum = ItemIdGetRedirect(lp);
1832 at_chain_start = false;
1833 continue;
1834 }
1835 /* else must be end of chain */
1836 break;
1837 }
1838
1839 /*
1840 * Update heapTuple to point to the element of the HOT chain we're
1841 * currently investigating. Having t_self set correctly is important
1842 * because the SSI checks and the *Satisfies routine for historical
1843 * MVCC snapshots need the correct tid to decide about the visibility.
1844 */
1845 heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1846 heapTuple->t_len = ItemIdGetLength(lp);
1847 heapTuple->t_tableOid = RelationGetRelid(relation);
1848 ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1849
1850 /*
1851 * Shouldn't see a HEAP_ONLY tuple at chain start.
1852 */
1854 break;
1855
1856 /*
1857 * The xmin should match the previous xmax value, else chain is
1858 * broken.
1859 */
1863 break;
1864
1865 /*
1866 * When first_call is true (and thus, skip is initially false) we'll
1867 * return the first tuple we find. But on later passes, heapTuple
1868 * will initially be pointing to the tuple we returned last time.
1869 * Returning it again would be incorrect (and would loop forever), so
1870 * we skip it and return the next match we find.
1871 */
1872 if (!skip)
1873 {
1874 /* If it's visible per the snapshot, we must return it */
1875 valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1877 buffer, snapshot);
1878
1879 if (valid)
1880 {
1881 ItemPointerSetOffsetNumber(tid, offnum);
1882 PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1884 if (all_dead)
1885 *all_dead = false;
1886 return true;
1887 }
1888 }
1889 skip = false;
1890
1891 /*
1892 * If we can't see it, maybe no one else can either. At caller
1893 * request, check whether all chain members are dead to all
1894 * transactions.
1895 *
1896 * Note: if you change the criterion here for what is "dead", fix the
1897 * planner's get_actual_variable_range() function to match.
1898 */
1899 if (all_dead && *all_dead)
1900 {
1901 if (!vistest)
1902 vistest = GlobalVisTestFor(relation);
1903
1904 if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1905 *all_dead = false;
1906 }
1907
1908 /*
1909 * Check to see if HOT chain continues past this tuple; if so fetch
1910 * the next offnum and loop around.
1911 */
1913 {
1914 Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1915 blkno);
1916 offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1917 at_chain_start = false;
1919 }
1920 else
1921 break; /* end of chain */
1922 }
1923
1924 return false;
1925}
1926
1927/*
1928 * heap_get_latest_tid - get the latest tid of a specified tuple
1929 *
1930 * Actually, this gets the latest version that is visible according to the
1931 * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
1932 * possibly uncommitted version.
1933 *
1934 * *tid is both an input and an output parameter: it is updated to
1935 * show the latest version of the row. Note that it will not be changed
1936 * if no version of the row passes the snapshot test.
1937 */
1938void
1940 ItemPointer tid)
1941{
1942 Relation relation = sscan->rs_rd;
1943 Snapshot snapshot = sscan->rs_snapshot;
1944 ItemPointerData ctid;
1946
1947 /*
1948 * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1949 * Assume that t_ctid links are valid however - there shouldn't be invalid
1950 * ones in the table.
1951 */
1953
1954 /*
1955 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1956 * need to examine, and *tid is the TID we will return if ctid turns out
1957 * to be bogus.
1958 *
1959 * Note that we will loop until we reach the end of the t_ctid chain.
1960 * Depending on the snapshot passed, there might be at most one visible
1961 * version of the row, but we don't try to optimize for that.
1962 */
1963 ctid = *tid;
1964 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1965 for (;;)
1966 {
1967 Buffer buffer;
1968 Page page;
1969 OffsetNumber offnum;
1970 ItemId lp;
1971 HeapTupleData tp;
1972 bool valid;
1973
1974 /*
1975 * Read, pin, and lock the page.
1976 */
1977 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1979 page = BufferGetPage(buffer);
1980
1981 /*
1982 * Check for bogus item number. This is not treated as an error
1983 * condition because it can happen while following a t_ctid link. We
1984 * just assume that the prior tid is OK and return it unchanged.
1985 */
1986 offnum = ItemPointerGetOffsetNumber(&ctid);
1988 {
1989 UnlockReleaseBuffer(buffer);
1990 break;
1991 }
1992 lp = PageGetItemId(page, offnum);
1993 if (!ItemIdIsNormal(lp))
1994 {
1995 UnlockReleaseBuffer(buffer);
1996 break;
1997 }
1998
1999 /* OK to access the tuple */
2000 tp.t_self = ctid;
2001 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2002 tp.t_len = ItemIdGetLength(lp);
2003 tp.t_tableOid = RelationGetRelid(relation);
2004
2005 /*
2006 * After following a t_ctid link, we might arrive at an unrelated
2007 * tuple. Check for XMIN match.
2008 */
2011 {
2012 UnlockReleaseBuffer(buffer);
2013 break;
2014 }
2015
2016 /*
2017 * Check tuple visibility; if visible, set it as the new result
2018 * candidate.
2019 */
2020 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2021 HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2022 if (valid)
2023 *tid = ctid;
2024
2025 /*
2026 * If there's a valid t_ctid link, follow it, else we're done.
2027 */
2028 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2032 {
2033 UnlockReleaseBuffer(buffer);
2034 break;
2035 }
2036
2037 ctid = tp.t_data->t_ctid;
2039 UnlockReleaseBuffer(buffer);
2040 } /* end of loop */
2041}
2042
2043
2044/*
2045 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
2046 *
2047 * This is called after we have waited for the XMAX transaction to terminate.
2048 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
2049 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
2050 * hint bit if possible --- but beware that that may not yet be possible,
2051 * if the transaction committed asynchronously.
2052 *
2053 * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
2054 * even if it commits.
2055 *
2056 * Hence callers should look only at XMAX_INVALID.
2057 *
2058 * Note this is not allowed for tuples whose xmax is a multixact.
2059 */
2060static void
2062{
2065
2067 {
2068 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
2071 xid);
2072 else
2075 }
2076}
2077
2078
2079/*
2080 * GetBulkInsertState - prepare status object for a bulk insert
2081 */
2084{
2085 BulkInsertState bistate;
2086
2089 bistate->current_buf = InvalidBuffer;
2090 bistate->next_free = InvalidBlockNumber;
2091 bistate->last_free = InvalidBlockNumber;
2092 bistate->already_extended_by = 0;
2093 return bistate;
2094}
2095
2096/*
2097 * FreeBulkInsertState - clean up after finishing a bulk insert
2098 */
2099void
2101{
2102 if (bistate->current_buf != InvalidBuffer)
2103 ReleaseBuffer(bistate->current_buf);
2104 FreeAccessStrategy(bistate->strategy);
2105 pfree(bistate);
2106}
2107
2108/*
2109 * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
2110 */
2111void
2113{
2114 if (bistate->current_buf != InvalidBuffer)
2115 ReleaseBuffer(bistate->current_buf);
2116 bistate->current_buf = InvalidBuffer;
2117
2118 /*
2119 * Despite the name, we also reset bulk relation extension state.
2120 * Otherwise we can end up erroring out due to looking for free space in
2121 * ->next_free of one partition, even though ->next_free was set when
2122 * extending another partition. It could obviously also be bad for
2123 * efficiency to look at existing blocks at offsets from another
2124 * partition, even if we don't error out.
2125 */
2126 bistate->next_free = InvalidBlockNumber;
2127 bistate->last_free = InvalidBlockNumber;
2128}
2129
2130
2131/*
2132 * heap_insert - insert tuple into a heap
2133 *
2134 * The new tuple is stamped with current transaction ID and the specified
2135 * command ID.
2136 *
2137 * See table_tuple_insert for comments about most of the input flags, except
2138 * that this routine directly takes a tuple rather than a slot.
2139 *
2140 * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
2141 * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
2142 * implement table_tuple_insert_speculative().
2143 *
2144 * On return the header fields of *tup are updated to match the stored tuple;
2145 * in particular tup->t_self receives the actual TID where the tuple was
2146 * stored. But note that any toasting of fields within the tuple data is NOT
2147 * reflected into *tup.
2148 */
2149void
2152{
2155 Buffer buffer;
2156 Buffer vmbuffer = InvalidBuffer;
2157 bool all_visible_cleared = false;
2158
2159 /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2162
2163 AssertHasSnapshotForToast(relation);
2164
2165 /*
2166 * Fill in tuple header fields and toast the tuple if necessary.
2167 *
2168 * Note: below this point, heaptup is the data we actually intend to store
2169 * into the relation; tup is the caller's original untoasted data.
2170 */
2171 heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2172
2173 /*
2174 * Find buffer to insert this tuple into. If the page is all visible,
2175 * this will also pin the requisite visibility map page.
2176 */
2177 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2178 InvalidBuffer, options, bistate,
2179 &vmbuffer, NULL,
2180 0);
2181
2182 /*
2183 * We're about to do the actual insert -- but check for conflict first, to
2184 * avoid possibly having to roll back work we've just done.
2185 *
2186 * This is safe without a recheck as long as there is no possibility of
2187 * another process scanning the page between this check and the insert
2188 * being visible to the scan (i.e., an exclusive buffer content lock is
2189 * continuously held from this point until the tuple insert is visible).
2190 *
2191 * For a heap insert, we only need to check for table-level SSI locks. Our
2192 * new tuple can't possibly conflict with existing tuple locks, and heap
2193 * page locks are only consolidated versions of tuple locks; they do not
2194 * lock "gaps" as index page locks do. So we don't need to specify a
2195 * buffer when making the call, which makes for a faster check.
2196 */
2198
2199 /* NO EREPORT(ERROR) from here till changes are logged */
2201
2202 RelationPutHeapTuple(relation, buffer, heaptup,
2204
2205 if (PageIsAllVisible(BufferGetPage(buffer)))
2206 {
2207 all_visible_cleared = true;
2209 visibilitymap_clear(relation,
2211 vmbuffer, VISIBILITYMAP_VALID_BITS);
2212 }
2213
2214 /*
2215 * XXX Should we set PageSetPrunable on this page ?
2216 *
2217 * The inserting transaction may eventually abort thus making this tuple
2218 * DEAD and hence available for pruning. Though we don't want to optimize
2219 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2220 * aborted tuple will never be pruned until next vacuum is triggered.
2221 *
2222 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2223 */
2224
2225 MarkBufferDirty(buffer);
2226
2227 /* XLOG stuff */
2228 if (RelationNeedsWAL(relation))
2229 {
2233 Page page = BufferGetPage(buffer);
2234 uint8 info = XLOG_HEAP_INSERT;
2235 int bufflags = 0;
2236
2237 /*
2238 * If this is a catalog, we need to transmit combo CIDs to properly
2239 * decode, so log that as well.
2240 */
2242 log_heap_new_cid(relation, heaptup);
2243
2244 /*
2245 * If this is the single and first tuple on page, we can reinit the
2246 * page instead of restoring the whole thing. Set flag, and hide
2247 * buffer references from XLogInsert.
2248 */
2251 {
2252 info |= XLOG_HEAP_INIT_PAGE;
2254 }
2255
2256 xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2257 xlrec.flags = 0;
2263
2264 /*
2265 * For logical decoding, we need the tuple even if we're doing a full
2266 * page write, so make sure it's included even if we take a full-page
2267 * image. (XXX We could alternatively store a pointer into the FPW).
2268 */
2269 if (RelationIsLogicallyLogged(relation) &&
2271 {
2274
2275 if (IsToastRelation(relation))
2277 }
2278
2281
2282 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2283 xlhdr.t_infomask = heaptup->t_data->t_infomask;
2284 xlhdr.t_hoff = heaptup->t_data->t_hoff;
2285
2286 /*
2287 * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2288 * write the whole page to the xlog, we don't need to store
2289 * xl_heap_header in the xlog.
2290 */
2293 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2295 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2297
2298 /* filtering by origin on a row level is much more efficient */
2300
2301 recptr = XLogInsert(RM_HEAP_ID, info);
2302
2303 PageSetLSN(page, recptr);
2304 }
2305
2307
2308 UnlockReleaseBuffer(buffer);
2309 if (vmbuffer != InvalidBuffer)
2310 ReleaseBuffer(vmbuffer);
2311
2312 /*
2313 * If tuple is cacheable, mark it for invalidation from the caches in case
2314 * we abort. Note it is OK to do this after releasing the buffer, because
2315 * the heaptup data structure is all in local memory, not in the shared
2316 * buffer.
2317 */
2319
2320 /* Note: speculative insertions are counted too, even if aborted later */
2321 pgstat_count_heap_insert(relation, 1);
2322
2323 /*
2324 * If heaptup is a private copy, release it. Don't forget to copy t_self
2325 * back to the caller's image, too.
2326 */
2327 if (heaptup != tup)
2328 {
2329 tup->t_self = heaptup->t_self;
2331 }
2332}
2333
2334/*
2335 * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2336 * tuple header fields and toasts the tuple if necessary. Returns a toasted
2337 * version of the tuple if it was toasted, or the original tuple if not. Note
2338 * that in any case, the header fields are also set in the original tuple.
2339 */
2340static HeapTuple
2343{
2344 /*
2345 * To allow parallel inserts, we need to ensure that they are safe to be
2346 * performed in workers. We have the infrastructure to allow parallel
2347 * inserts in general except for the cases where inserts generate a new
2348 * CommandId (eg. inserts into a table having a foreign key column).
2349 */
2350 if (IsParallelWorker())
2351 ereport(ERROR,
2353 errmsg("cannot insert tuples in a parallel worker")));
2354
2355 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2356 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2357 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2358 HeapTupleHeaderSetXmin(tup->t_data, xid);
2361
2362 HeapTupleHeaderSetCmin(tup->t_data, cid);
2363 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2364 tup->t_tableOid = RelationGetRelid(relation);
2365
2366 /*
2367 * If the new tuple is too big for storage or contains already toasted
2368 * out-of-line attributes from some other relation, invoke the toaster.
2369 */
2370 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2371 relation->rd_rel->relkind != RELKIND_MATVIEW)
2372 {
2373 /* toast table entries should never be recursively toasted */
2375 return tup;
2376 }
2377 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2378 return heap_toast_insert_or_update(relation, tup, NULL, options);
2379 else
2380 return tup;
2381}
2382
2383/*
2384 * Helper for heap_multi_insert() that computes the number of entire pages
2385 * that inserting the remaining heaptuples requires. Used to determine how
2386 * much the relation needs to be extended by.
2387 */
2388static int
2390{
2392 int npages = 1;
2393
2394 for (int i = done; i < ntuples; i++)
2395 {
2396 size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
2397
2398 if (page_avail < tup_sz)
2399 {
2400 npages++;
2402 }
2403 page_avail -= tup_sz;
2404 }
2405
2406 return npages;
2407}
2408
2409/*
2410 * heap_multi_insert - insert multiple tuples into a heap
2411 *
2412 * This is like heap_insert(), but inserts multiple tuples in one operation.
2413 * That's faster than calling heap_insert() in a loop, because when multiple
2414 * tuples can be inserted on a single page, we can write just a single WAL
2415 * record covering all of them, and only need to lock/unlock the page once.
2416 *
2417 * Note: this leaks memory into the current memory context. You can create a
2418 * temporary context before calling this, if that's a problem.
2419 */
2420void
2421heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2423{
2426 int i;
2427 int ndone;
2429 Page page;
2430 Buffer vmbuffer = InvalidBuffer;
2431 bool needwal;
2435 bool starting_with_empty_page = false;
2436 int npages = 0;
2437 int npages_used = 0;
2438
2439 /* currently not needed (thus unsupported) for heap_multi_insert() */
2441
2442 AssertHasSnapshotForToast(relation);
2443
2444 needwal = RelationNeedsWAL(relation);
2447
2448 /* Toast and set header data in all the slots */
2449 heaptuples = palloc(ntuples * sizeof(HeapTuple));
2450 for (i = 0; i < ntuples; i++)
2451 {
2452 HeapTuple tuple;
2453
2454 tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2455 slots[i]->tts_tableOid = RelationGetRelid(relation);
2456 tuple->t_tableOid = slots[i]->tts_tableOid;
2457 heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2458 options);
2459 }
2460
2461 /*
2462 * We're about to do the actual inserts -- but check for conflict first,
2463 * to minimize the possibility of having to roll back work we've just
2464 * done.
2465 *
2466 * A check here does not definitively prevent a serialization anomaly;
2467 * that check MUST be done at least past the point of acquiring an
2468 * exclusive buffer content lock on every buffer that will be affected,
2469 * and MAY be done after all inserts are reflected in the buffers and
2470 * those locks are released; otherwise there is a race condition. Since
2471 * multiple buffers can be locked and unlocked in the loop below, and it
2472 * would not be feasible to identify and lock all of those buffers before
2473 * the loop, we must do a final check at the end.
2474 *
2475 * The check here could be omitted with no loss of correctness; it is
2476 * present strictly as an optimization.
2477 *
2478 * For heap inserts, we only need to check for table-level SSI locks. Our
2479 * new tuples can't possibly conflict with existing tuple locks, and heap
2480 * page locks are only consolidated versions of tuple locks; they do not
2481 * lock "gaps" as index page locks do. So we don't need to specify a
2482 * buffer when making the call, which makes for a faster check.
2483 */
2485
2486 ndone = 0;
2487 while (ndone < ntuples)
2488 {
2489 Buffer buffer;
2490 bool all_visible_cleared = false;
2491 bool all_frozen_set = false;
2492 int nthispage;
2493
2495
2496 /*
2497 * Compute number of pages needed to fit the to-be-inserted tuples in
2498 * the worst case. This will be used to determine how much to extend
2499 * the relation by in RelationGetBufferForTuple(), if needed. If we
2500 * filled a prior page from scratch, we can just update our last
2501 * computation, but if we started with a partially filled page,
2502 * recompute from scratch, the number of potentially required pages
2503 * can vary due to tuples needing to fit onto the page, page headers
2504 * etc.
2505 */
2506 if (ndone == 0 || !starting_with_empty_page)
2507 {
2508 npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
2510 npages_used = 0;
2511 }
2512 else
2513 npages_used++;
2514
2515 /*
2516 * Find buffer where at least the next tuple will fit. If the page is
2517 * all-visible, this will also pin the requisite visibility map page.
2518 *
2519 * Also pin visibility map page if COPY FREEZE inserts tuples into an
2520 * empty page. See all_frozen_set below.
2521 */
2522 buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2523 InvalidBuffer, options, bistate,
2524 &vmbuffer, NULL,
2525 npages - npages_used);
2526 page = BufferGetPage(buffer);
2527
2529
2531 {
2532 all_frozen_set = true;
2533 /* Lock the vmbuffer before entering the critical section */
2535 }
2536
2537 /* NO EREPORT(ERROR) from here till changes are logged */
2539
2540 /*
2541 * RelationGetBufferForTuple has ensured that the first tuple fits.
2542 * Put that on the page, and then as many other tuples as fit.
2543 */
2544 RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2545
2546 /*
2547 * For logical decoding we need combo CIDs to properly decode the
2548 * catalog.
2549 */
2550 if (needwal && need_cids)
2551 log_heap_new_cid(relation, heaptuples[ndone]);
2552
2553 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2554 {
2556
2557 if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2558 break;
2559
2560 RelationPutHeapTuple(relation, buffer, heaptup, false);
2561
2562 /*
2563 * For logical decoding we need combo CIDs to properly decode the
2564 * catalog.
2565 */
2566 if (needwal && need_cids)
2567 log_heap_new_cid(relation, heaptup);
2568 }
2569
2570 /*
2571 * If the page is all visible, need to clear that, unless we're only
2572 * going to add further frozen rows to it.
2573 *
2574 * If we're only adding already frozen rows to a previously empty
2575 * page, mark it as all-frozen and update the visibility map. We're
2576 * already holding a pin on the vmbuffer.
2577 */
2579 {
2580 all_visible_cleared = true;
2581 PageClearAllVisible(page);
2582 visibilitymap_clear(relation,
2583 BufferGetBlockNumber(buffer),
2584 vmbuffer, VISIBILITYMAP_VALID_BITS);
2585 }
2586 else if (all_frozen_set)
2587 {
2588 PageSetAllVisible(page);
2589 PageClearPrunable(page);
2591 vmbuffer,
2594 relation->rd_locator);
2595 }
2596
2597 /*
2598 * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2599 */
2600
2601 MarkBufferDirty(buffer);
2602
2603 /* XLOG stuff */
2604 if (needwal)
2605 {
2609 char *tupledata;
2610 int totaldatalen;
2611 char *scratchptr = scratch.data;
2612 bool init;
2613 int bufflags = 0;
2614
2615 /*
2616 * If the page was previously empty, we can reinit the page
2617 * instead of restoring the whole thing.
2618 */
2620
2621 /* allocate xl_heap_multi_insert struct from the scratch area */
2624
2625 /*
2626 * Allocate offsets array. Unless we're reinitializing the page,
2627 * in that case the tuples are stored in order starting at
2628 * FirstOffsetNumber and we don't need to store the offsets
2629 * explicitly.
2630 */
2631 if (!init)
2632 scratchptr += nthispage * sizeof(OffsetNumber);
2633
2634 /* the rest of the scratch space is used for tuple data */
2635 tupledata = scratchptr;
2636
2637 /* check that the mutually exclusive flags are not both set */
2639
2640 xlrec->flags = 0;
2643
2644 /*
2645 * We don't have to worry about including a conflict xid in the
2646 * WAL record, as HEAP_INSERT_FROZEN intentionally violates
2647 * visibility rules.
2648 */
2649 if (all_frozen_set)
2651
2652 xlrec->ntuples = nthispage;
2653
2654 /*
2655 * Write out an xl_multi_insert_tuple and the tuple data itself
2656 * for each tuple.
2657 */
2658 for (i = 0; i < nthispage; i++)
2659 {
2661 xl_multi_insert_tuple *tuphdr;
2662 int datalen;
2663
2664 if (!init)
2665 xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2666 /* xl_multi_insert_tuple needs two-byte alignment. */
2668 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2669
2670 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2671 tuphdr->t_infomask = heaptup->t_data->t_infomask;
2672 tuphdr->t_hoff = heaptup->t_data->t_hoff;
2673
2674 /* write bitmap [+ padding] [+ oid] + data */
2675 datalen = heaptup->t_len - SizeofHeapTupleHeader;
2677 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2678 datalen);
2679 tuphdr->datalen = datalen;
2680 scratchptr += datalen;
2681 }
2682 totaldatalen = scratchptr - tupledata;
2683 Assert((scratchptr - scratch.data) < BLCKSZ);
2684
2685 if (need_tuple_data)
2687
2688 /*
2689 * Signal that this is the last xl_heap_multi_insert record
2690 * emitted by this call to heap_multi_insert(). Needed for logical
2691 * decoding so it knows when to cleanup temporary data.
2692 */
2693 if (ndone + nthispage == ntuples)
2695
2696 if (init)
2697 {
2698 info |= XLOG_HEAP_INIT_PAGE;
2700 }
2701
2702 /*
2703 * If we're doing logical decoding, include the new tuple data
2704 * even if we take a full-page image of the page.
2705 */
2706 if (need_tuple_data)
2708
2710 XLogRegisterData(xlrec, tupledata - scratch.data);
2712 if (all_frozen_set)
2713 XLogRegisterBuffer(1, vmbuffer, 0);
2714
2715 XLogRegisterBufData(0, tupledata, totaldatalen);
2716
2717 /* filtering by origin on a row level is much more efficient */
2719
2720 recptr = XLogInsert(RM_HEAP2_ID, info);
2721
2722 PageSetLSN(page, recptr);
2723 if (all_frozen_set)
2724 {
2725 Assert(BufferIsDirty(vmbuffer));
2726 PageSetLSN(BufferGetPage(vmbuffer), recptr);
2727 }
2728 }
2729
2731
2732 if (all_frozen_set)
2733 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2734
2735 UnlockReleaseBuffer(buffer);
2736 ndone += nthispage;
2737
2738 /*
2739 * NB: Only release vmbuffer after inserting all tuples - it's fairly
2740 * likely that we'll insert into subsequent heap pages that are likely
2741 * to use the same vm page.
2742 */
2743 }
2744
2745 /* We're done with inserting all tuples, so release the last vmbuffer. */
2746 if (vmbuffer != InvalidBuffer)
2747 ReleaseBuffer(vmbuffer);
2748
2749 /*
2750 * We're done with the actual inserts. Check for conflicts again, to
2751 * ensure that all rw-conflicts in to these inserts are detected. Without
2752 * this final check, a sequential scan of the heap may have locked the
2753 * table after the "before" check, missing one opportunity to detect the
2754 * conflict, and then scanned the table before the new tuples were there,
2755 * missing the other chance to detect the conflict.
2756 *
2757 * For heap inserts, we only need to check for table-level SSI locks. Our
2758 * new tuples can't possibly conflict with existing tuple locks, and heap
2759 * page locks are only consolidated versions of tuple locks; they do not
2760 * lock "gaps" as index page locks do. So we don't need to specify a
2761 * buffer when making the call.
2762 */
2764
2765 /*
2766 * If tuples are cacheable, mark them for invalidation from the caches in
2767 * case we abort. Note it is OK to do this after releasing the buffer,
2768 * because the heaptuples data structure is all in local memory, not in
2769 * the shared buffer.
2770 */
2771 if (IsCatalogRelation(relation))
2772 {
2773 for (i = 0; i < ntuples; i++)
2775 }
2776
2777 /* copy t_self fields back to the caller's slots */
2778 for (i = 0; i < ntuples; i++)
2779 slots[i]->tts_tid = heaptuples[i]->t_self;
2780
2781 pgstat_count_heap_insert(relation, ntuples);
2782}
2783
2784/*
2785 * simple_heap_insert - insert a tuple
2786 *
2787 * Currently, this routine differs from heap_insert only in supplying
2788 * a default command ID and not allowing access to the speedup options.
2789 *
2790 * This should be used rather than using heap_insert directly in most places
2791 * where we are modifying system catalogs.
2792 */
2793void
2795{
2796 heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2797}
2798
2799/*
2800 * Given infomask/infomask2, compute the bits that must be saved in the
2801 * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2802 * xl_heap_lock_updated WAL records.
2803 *
2804 * See fix_infomask_from_infobits.
2805 */
2806static uint8
2808{
2809 return
2813 /* note we ignore HEAP_XMAX_SHR_LOCK here */
2815 ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2816 XLHL_KEYS_UPDATED : 0);
2817}
2818
2819/*
2820 * Given two versions of the same t_infomask for a tuple, compare them and
2821 * return whether the relevant status for a tuple Xmax has changed. This is
2822 * used after a buffer lock has been released and reacquired: we want to ensure
2823 * that the tuple state continues to be the same it was when we previously
2824 * examined it.
2825 *
2826 * Note the Xmax field itself must be compared separately.
2827 */
2828static inline bool
2830{
2831 const uint16 interesting =
2833
2834 if ((new_infomask & interesting) != (old_infomask & interesting))
2835 return true;
2836
2837 return false;
2838}
2839
2840/*
2841 * heap_delete - delete a tuple
2842 *
2843 * See table_tuple_delete() for an explanation of the parameters, except that
2844 * this routine directly takes a tuple rather than a slot.
2845 *
2846 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2847 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2848 * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
2849 * generated by another transaction).
2850 */
2853 CommandId cid, Snapshot crosscheck, bool wait,
2854 TM_FailureData *tmfd, bool changingPart)
2855{
2856 TM_Result result;
2858 ItemId lp;
2859 HeapTupleData tp;
2860 Page page;
2861 BlockNumber block;
2862 Buffer buffer;
2863 Buffer vmbuffer = InvalidBuffer;
2864 TransactionId new_xmax;
2867 bool have_tuple_lock = false;
2868 bool iscombo;
2869 bool all_visible_cleared = false;
2870 HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2871 bool old_key_copied = false;
2872
2874
2875 AssertHasSnapshotForToast(relation);
2876
2877 /*
2878 * Forbid this during a parallel operation, lest it allocate a combo CID.
2879 * Other workers might need that combo CID for visibility checks, and we
2880 * have no provision for broadcasting it to them.
2881 */
2882 if (IsInParallelMode())
2883 ereport(ERROR,
2885 errmsg("cannot delete tuples during a parallel operation")));
2886
2887 block = ItemPointerGetBlockNumber(tid);
2888 buffer = ReadBuffer(relation, block);
2889 page = BufferGetPage(buffer);
2890
2891 /*
2892 * Before locking the buffer, pin the visibility map page if it appears to
2893 * be necessary. Since we haven't got the lock yet, someone else might be
2894 * in the middle of changing this, so we'll need to recheck after we have
2895 * the lock.
2896 */
2897 if (PageIsAllVisible(page))
2898 visibilitymap_pin(relation, block, &vmbuffer);
2899
2901
2904
2905 tp.t_tableOid = RelationGetRelid(relation);
2906 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2907 tp.t_len = ItemIdGetLength(lp);
2908 tp.t_self = *tid;
2909
2910l1:
2911
2912 /*
2913 * If we didn't pin the visibility map page and the page has become all
2914 * visible while we were busy locking the buffer, we'll have to unlock and
2915 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2916 * unfortunate, but hopefully shouldn't happen often.
2917 */
2918 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2919 {
2921 visibilitymap_pin(relation, block, &vmbuffer);
2923 }
2924
2925 result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2926
2927 if (result == TM_Invisible)
2928 {
2929 UnlockReleaseBuffer(buffer);
2930 ereport(ERROR,
2932 errmsg("attempted to delete invisible tuple")));
2933 }
2934 else if (result == TM_BeingModified && wait)
2935 {
2938
2939 /* must copy state data before unlocking buffer */
2942
2943 /*
2944 * Sleep until concurrent transaction ends -- except when there's a
2945 * single locker and it's our own transaction. Note we don't care
2946 * which lock mode the locker has, because we need the strongest one.
2947 *
2948 * Before sleeping, we need to acquire tuple lock to establish our
2949 * priority for the tuple (see heap_lock_tuple). LockTuple will
2950 * release us when we are next-in-line for the tuple.
2951 *
2952 * If we are forced to "start over" below, we keep the tuple lock;
2953 * this arranges that we stay at the head of the line while rechecking
2954 * tuple state.
2955 */
2957 {
2958 bool current_is_member = false;
2959
2962 {
2964
2965 /*
2966 * Acquire the lock, if necessary (but skip it when we're
2967 * requesting a lock and already have one; avoids deadlock).
2968 */
2969 if (!current_is_member)
2972
2973 /* wait for multixact */
2975 relation, &(tp.t_self), XLTW_Delete,
2976 NULL);
2978
2979 /*
2980 * If xwait had just locked the tuple then some other xact
2981 * could update this tuple before we get to this point. Check
2982 * for xmax change, and start over if so.
2983 *
2984 * We also must start over if we didn't pin the VM page, and
2985 * the page has become all visible.
2986 */
2987 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2990 xwait))
2991 goto l1;
2992 }
2993
2994 /*
2995 * You might think the multixact is necessarily done here, but not
2996 * so: it could have surviving members, namely our own xact or
2997 * other subxacts of this backend. It is legal for us to delete
2998 * the tuple in either case, however (the latter case is
2999 * essentially a situation of upgrading our former shared lock to
3000 * exclusive). We don't bother changing the on-disk hint bits
3001 * since we are about to overwrite the xmax altogether.
3002 */
3003 }
3005 {
3006 /*
3007 * Wait for regular transaction to end; but first, acquire tuple
3008 * lock.
3009 */
3013 XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3015
3016 /*
3017 * xwait is done, but if xwait had just locked the tuple then some
3018 * other xact could update this tuple before we get to this point.
3019 * Check for xmax change, and start over if so.
3020 *
3021 * We also must start over if we didn't pin the VM page, and the
3022 * page has become all visible.
3023 */
3024 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
3027 xwait))
3028 goto l1;
3029
3030 /* Otherwise check if it committed or aborted */
3031 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3032 }
3033
3034 /*
3035 * We may overwrite if previous xmax aborted, or if it committed but
3036 * only locked the tuple without updating it.
3037 */
3038 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3041 result = TM_Ok;
3042 else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
3043 result = TM_Updated;
3044 else
3045 result = TM_Deleted;
3046 }
3047
3048 /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3049 if (result != TM_Ok)
3050 {
3051 Assert(result == TM_SelfModified ||
3052 result == TM_Updated ||
3053 result == TM_Deleted ||
3054 result == TM_BeingModified);
3056 Assert(result != TM_Updated ||
3058 }
3059
3060 if (crosscheck != InvalidSnapshot && result == TM_Ok)
3061 {
3062 /* Perform additional check for transaction-snapshot mode RI updates */
3063 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3064 result = TM_Updated;
3065 }
3066
3067 if (result != TM_Ok)
3068 {
3069 tmfd->ctid = tp.t_data->t_ctid;
3071 if (result == TM_SelfModified)
3073 else
3074 tmfd->cmax = InvalidCommandId;
3075 UnlockReleaseBuffer(buffer);
3076 if (have_tuple_lock)
3078 if (vmbuffer != InvalidBuffer)
3079 ReleaseBuffer(vmbuffer);
3080 return result;
3081 }
3082
3083 /*
3084 * We're about to do the actual delete -- check for conflict first, to
3085 * avoid possibly having to roll back work we've just done.
3086 *
3087 * This is safe without a recheck as long as there is no possibility of
3088 * another process scanning the page between this check and the delete
3089 * being visible to the scan (i.e., an exclusive buffer content lock is
3090 * continuously held from this point until the tuple delete is visible).
3091 */
3093
3094 /* replace cid with a combo CID if necessary */
3096
3097 /*
3098 * Compute replica identity tuple before entering the critical section so
3099 * we don't PANIC upon a memory allocation failure.
3100 */
3101 old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3102
3103 /*
3104 * If this is the first possibly-multixact-able operation in the current
3105 * transaction, set my per-backend OldestMemberMXactId setting. We can be
3106 * certain that the transaction will never become a member of any older
3107 * MultiXactIds than that. (We have to do this even if we end up just
3108 * using our own TransactionId below, since some other backend could
3109 * incorporate our XID into a MultiXact immediately afterwards.)
3110 */
3112
3115 xid, LockTupleExclusive, true,
3116 &new_xmax, &new_infomask, &new_infomask2);
3117
3119
3120 /*
3121 * If this transaction commits, the tuple will become DEAD sooner or
3122 * later. Set flag that this page is a candidate for pruning once our xid
3123 * falls below the OldestXmin horizon. If the transaction finally aborts,
3124 * the subsequent page pruning will be a no-op and the hint will be
3125 * cleared.
3126 */
3127 PageSetPrunable(page, xid);
3128
3129 if (PageIsAllVisible(page))
3130 {
3131 all_visible_cleared = true;
3132 PageClearAllVisible(page);
3133 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3134 vmbuffer, VISIBILITYMAP_VALID_BITS);
3135 }
3136
3137 /* store transaction information of xact deleting the tuple */
3143 HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3145 /* Make sure there is no forward chain link in t_ctid */
3146 tp.t_data->t_ctid = tp.t_self;
3147
3148 /* Signal that this is actually a move into another partition */
3149 if (changingPart)
3151
3152 MarkBufferDirty(buffer);
3153
3154 /*
3155 * XLOG stuff
3156 *
3157 * NB: heap_abort_speculative() uses the same xlog record and replay
3158 * routines.
3159 */
3160 if (RelationNeedsWAL(relation))
3161 {
3165
3166 /*
3167 * For logical decode we need combo CIDs to properly decode the
3168 * catalog
3169 */
3171 log_heap_new_cid(relation, &tp);
3172
3173 xlrec.flags = 0;
3176 if (changingPart)
3178 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
3179 tp.t_data->t_infomask2);
3181 xlrec.xmax = new_xmax;
3182
3183 if (old_key_tuple != NULL)
3184 {
3185 if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3187 else
3189 }
3190
3193
3195
3196 /*
3197 * Log replica identity of the deleted tuple if there is one
3198 */
3199 if (old_key_tuple != NULL)
3200 {
3201 xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3202 xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3203 xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3204
3206 XLogRegisterData((char *) old_key_tuple->t_data
3208 old_key_tuple->t_len
3210 }
3211
3212 /* filtering by origin on a row level is much more efficient */
3214
3216
3217 PageSetLSN(page, recptr);
3218 }
3219
3221
3223
3224 if (vmbuffer != InvalidBuffer)
3225 ReleaseBuffer(vmbuffer);
3226
3227 /*
3228 * If the tuple has toasted out-of-line attributes, we need to delete
3229 * those items too. We have to do this before releasing the buffer
3230 * because we need to look at the contents of the tuple, but it's OK to
3231 * release the content lock on the buffer first.
3232 */
3233 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3234 relation->rd_rel->relkind != RELKIND_MATVIEW)
3235 {
3236 /* toast table entries should never be recursively toasted */
3238 }
3239 else if (HeapTupleHasExternal(&tp))
3240 heap_toast_delete(relation, &tp, false);
3241
3242 /*
3243 * Mark tuple for invalidation from system caches at next command
3244 * boundary. We have to do this before releasing the buffer because we
3245 * need to look at the contents of the tuple.
3246 */
3247 CacheInvalidateHeapTuple(relation, &tp, NULL);
3248
3249 /* Now we can release the buffer */
3250 ReleaseBuffer(buffer);
3251
3252 /*
3253 * Release the lmgr tuple lock, if we had it.
3254 */
3255 if (have_tuple_lock)
3257
3258 pgstat_count_heap_delete(relation);
3259
3262
3263 return TM_Ok;
3264}
3265
3266/*
3267 * simple_heap_delete - delete a tuple
3268 *
3269 * This routine may be used to delete a tuple when concurrent updates of
3270 * the target tuple are not expected (for example, because we have a lock
3271 * on the relation associated with the tuple). Any failure is reported
3272 * via ereport().
3273 */
3274void
3276{
3277 TM_Result result;
3278 TM_FailureData tmfd;
3279
3280 result = heap_delete(relation, tid,
3282 true /* wait for commit */ ,
3283 &tmfd, false /* changingPart */ );
3284 switch (result)
3285 {
3286 case TM_SelfModified:
3287 /* Tuple was already updated in current command? */
3288 elog(ERROR, "tuple already updated by self");
3289 break;
3290
3291 case TM_Ok:
3292 /* done successfully */
3293 break;
3294
3295 case TM_Updated:
3296 elog(ERROR, "tuple concurrently updated");
3297 break;
3298
3299 case TM_Deleted:
3300 elog(ERROR, "tuple concurrently deleted");
3301 break;
3302
3303 default:
3304 elog(ERROR, "unrecognized heap_delete status: %u", result);
3305 break;
3306 }
3307}
3308
3309/*
3310 * heap_update - replace a tuple
3311 *
3312 * See table_tuple_update() for an explanation of the parameters, except that
3313 * this routine directly takes a tuple rather than a slot.
3314 *
3315 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
3316 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
3317 * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
3318 * generated by another transaction).
3319 */
3322 CommandId cid, Snapshot crosscheck, bool wait,
3323 TM_FailureData *tmfd, LockTupleMode *lockmode,
3325{
3326 TM_Result result;
3334 ItemId lp;
3338 bool old_key_copied = false;
3339 Page page,
3340 newpage;
3341 BlockNumber block;
3343 Buffer buffer,
3344 newbuf,
3345 vmbuffer = InvalidBuffer,
3347 bool need_toast;
3349 pagefree;
3350 bool have_tuple_lock = false;
3351 bool iscombo;
3352 bool use_hot_update = false;
3353 bool summarized_update = false;
3354 bool key_intact;
3355 bool all_visible_cleared = false;
3356 bool all_visible_cleared_new = false;
3357 bool checked_lockers;
3358 bool locker_remains;
3359 bool id_has_external = false;
3366
3368
3369 /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
3372
3373 AssertHasSnapshotForToast(relation);
3374
3375 /*
3376 * Forbid this during a parallel operation, lest it allocate a combo CID.
3377 * Other workers might need that combo CID for visibility checks, and we
3378 * have no provision for broadcasting it to them.
3379 */
3380 if (IsInParallelMode())
3381 ereport(ERROR,
3383 errmsg("cannot update tuples during a parallel operation")));
3384
3385#ifdef USE_ASSERT_CHECKING
3387#endif
3388
3389 /*
3390 * Fetch the list of attributes to be checked for various operations.
3391 *
3392 * For HOT considerations, this is wasted effort if we fail to update or
3393 * have to put the new tuple on a different page. But we must compute the
3394 * list before obtaining buffer lock --- in the worst case, if we are
3395 * doing an update on one of the relevant system catalogs, we could
3396 * deadlock if we try to fetch the list later. In any case, the relcache
3397 * caches the data so this is usually pretty cheap.
3398 *
3399 * We also need columns used by the replica identity and columns that are
3400 * considered the "key" of rows in the table.
3401 *
3402 * Note that we get copies of each bitmap, so we need not worry about
3403 * relcache flush happening midway through.
3404 */
3417
3419 INJECTION_POINT("heap_update-before-pin", NULL);
3420 buffer = ReadBuffer(relation, block);
3421 page = BufferGetPage(buffer);
3422
3423 /*
3424 * Before locking the buffer, pin the visibility map page if it appears to
3425 * be necessary. Since we haven't got the lock yet, someone else might be
3426 * in the middle of changing this, so we'll need to recheck after we have
3427 * the lock.
3428 */
3429 if (PageIsAllVisible(page))
3430 visibilitymap_pin(relation, block, &vmbuffer);
3431
3433
3435
3436 /*
3437 * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
3438 * we see LP_NORMAL here. When the otid origin is a syscache, we may have
3439 * neither a pin nor a snapshot. Hence, we may see other LP_ states, each
3440 * of which indicates concurrent pruning.
3441 *
3442 * Failing with TM_Updated would be most accurate. However, unlike other
3443 * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
3444 * LP_DEAD cases. While the distinction between TM_Updated and TM_Deleted
3445 * does matter to SQL statements UPDATE and MERGE, those SQL statements
3446 * hold a snapshot that ensures LP_NORMAL. Hence, the choice between
3447 * TM_Updated and TM_Deleted affects only the wording of error messages.
3448 * Settle on TM_Deleted, for two reasons. First, it avoids complicating
3449 * the specification of when tmfd->ctid is valid. Second, it creates
3450 * error log evidence that we took this branch.
3451 *
3452 * Since it's possible to see LP_UNUSED at otid, it's also possible to see
3453 * LP_NORMAL for a tuple that replaced LP_UNUSED. If it's a tuple for an
3454 * unrelated row, we'll fail with "duplicate key value violates unique".
3455 * XXX if otid is the live, newer version of the newtup row, we'll discard
3456 * changes originating in versions of this catalog row after the version
3457 * the caller got from syscache. See syscache-update-pruned.spec.
3458 */
3459 if (!ItemIdIsNormal(lp))
3460 {
3462
3463 UnlockReleaseBuffer(buffer);
3465 if (vmbuffer != InvalidBuffer)
3466 ReleaseBuffer(vmbuffer);
3467 tmfd->ctid = *otid;
3468 tmfd->xmax = InvalidTransactionId;
3469 tmfd->cmax = InvalidCommandId;
3471
3476 /* modified_attrs not yet initialized */
3478 return TM_Deleted;
3479 }
3480
3481 /*
3482 * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
3483 * properly.
3484 */
3485 oldtup.t_tableOid = RelationGetRelid(relation);
3486 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3487 oldtup.t_len = ItemIdGetLength(lp);
3488 oldtup.t_self = *otid;
3489
3490 /* the new tuple is ready, except for this: */
3491 newtup->t_tableOid = RelationGetRelid(relation);
3492
3493 /*
3494 * Determine columns modified by the update. Additionally, identify
3495 * whether any of the unmodified replica identity key attributes in the
3496 * old tuple is externally stored or not. This is required because for
3497 * such attributes the flattened value won't be WAL logged as part of the
3498 * new tuple so we must include it as part of the old_key_tuple. See
3499 * ExtractReplicaIdentity.
3500 */
3502 id_attrs, &oldtup,
3504
3505 /*
3506 * If we're not updating any "key" column, we can grab a weaker lock type.
3507 * This allows for more concurrency when we are running simultaneously
3508 * with foreign key checks.
3509 *
3510 * Note that if a column gets detoasted while executing the update, but
3511 * the value ends up being the same, this test will fail and we will use
3512 * the stronger lock. This is acceptable; the important case to optimize
3513 * is updates that don't manipulate key columns, not those that
3514 * serendipitously arrive at the same key values.
3515 */
3517 {
3518 *lockmode = LockTupleNoKeyExclusive;
3520 key_intact = true;
3521
3522 /*
3523 * If this is the first possibly-multixact-able operation in the
3524 * current transaction, set my per-backend OldestMemberMXactId
3525 * setting. We can be certain that the transaction will never become a
3526 * member of any older MultiXactIds than that. (We have to do this
3527 * even if we end up just using our own TransactionId below, since
3528 * some other backend could incorporate our XID into a MultiXact
3529 * immediately afterwards.)
3530 */
3532 }
3533 else
3534 {
3535 *lockmode = LockTupleExclusive;
3537 key_intact = false;
3538 }
3539
3540 /*
3541 * Note: beyond this point, use oldtup not otid to refer to old tuple.
3542 * otid may very well point at newtup->t_self, which we will overwrite
3543 * with the new tuple's location, so there's great risk of confusion if we
3544 * use otid anymore.
3545 */
3546
3547l2:
3548 checked_lockers = false;
3549 locker_remains = false;
3550 result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3551
3552 /* see below about the "no wait" case */
3553 Assert(result != TM_BeingModified || wait);
3554
3555 if (result == TM_Invisible)
3556 {
3557 UnlockReleaseBuffer(buffer);
3558 ereport(ERROR,
3560 errmsg("attempted to update invisible tuple")));
3561 }
3562 else if (result == TM_BeingModified && wait)
3563 {
3566 bool can_continue = false;
3567
3568 /*
3569 * XXX note that we don't consider the "no wait" case here. This
3570 * isn't a problem currently because no caller uses that case, but it
3571 * should be fixed if such a caller is introduced. It wasn't a
3572 * problem previously because this code would always wait, but now
3573 * that some tuple locks do not conflict with one of the lock modes we
3574 * use, it is possible that this case is interesting to handle
3575 * specially.
3576 *
3577 * This may cause failures with third-party code that calls
3578 * heap_update directly.
3579 */
3580
3581 /* must copy state data before unlocking buffer */
3583 infomask = oldtup.t_data->t_infomask;
3584
3585 /*
3586 * Now we have to do something about the existing locker. If it's a
3587 * multi, sleep on it; we might be awakened before it is completely
3588 * gone (or even not sleep at all in some cases); we need to preserve
3589 * it as locker, unless it is gone completely.
3590 *
3591 * If it's not a multi, we need to check for sleeping conditions
3592 * before actually going to sleep. If the update doesn't conflict
3593 * with the locks, we just continue without sleeping (but making sure
3594 * it is preserved).
3595 *
3596 * Before sleeping, we need to acquire tuple lock to establish our
3597 * priority for the tuple (see heap_lock_tuple). LockTuple will
3598 * release us when we are next-in-line for the tuple. Note we must
3599 * not acquire the tuple lock until we're sure we're going to sleep;
3600 * otherwise we're open for race conditions with other transactions
3601 * holding the tuple lock which sleep on us.
3602 *
3603 * If we are forced to "start over" below, we keep the tuple lock;
3604 * this arranges that we stay at the head of the line while rechecking
3605 * tuple state.
3606 */
3608 {
3610 int remain;
3611 bool current_is_member = false;
3612
3614 *lockmode, &current_is_member))
3615 {
3617
3618 /*
3619 * Acquire the lock, if necessary (but skip it when we're
3620 * requesting a lock and already have one; avoids deadlock).
3621 */
3622 if (!current_is_member)
3623 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3625
3626 /* wait for multixact */
3628 relation, &oldtup.t_self, XLTW_Update,
3629 &remain);
3630 checked_lockers = true;
3631 locker_remains = remain != 0;
3633
3634 /*
3635 * If xwait had just locked the tuple then some other xact
3636 * could update this tuple before we get to this point. Check
3637 * for xmax change, and start over if so.
3638 */
3639 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3640 infomask) ||
3642 xwait))
3643 goto l2;
3644 }
3645
3646 /*
3647 * Note that the multixact may not be done by now. It could have
3648 * surviving members; our own xact or other subxacts of this
3649 * backend, and also any other concurrent transaction that locked
3650 * the tuple with LockTupleKeyShare if we only got
3651 * LockTupleNoKeyExclusive. If this is the case, we have to be
3652 * careful to mark the updated tuple with the surviving members in
3653 * Xmax.
3654 *
3655 * Note that there could have been another update in the
3656 * MultiXact. In that case, we need to check whether it committed
3657 * or aborted. If it aborted we are safe to update it again;
3658 * otherwise there is an update conflict, and we have to return
3659 * TableTuple{Deleted, Updated} below.
3660 *
3661 * In the LockTupleExclusive case, we still need to preserve the
3662 * surviving members: those would include the tuple locks we had
3663 * before this one, which are important to keep in case this
3664 * subxact aborts.
3665 */
3666 if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3668 else
3670
3671 /*
3672 * There was no UPDATE in the MultiXact; or it aborted. No
3673 * TransactionIdIsInProgress() call needed here, since we called
3674 * MultiXactIdWait() above.
3675 */
3678 can_continue = true;
3679 }
3681 {
3682 /*
3683 * The only locker is ourselves; we can avoid grabbing the tuple
3684 * lock here, but must preserve our locking information.
3685 */
3686 checked_lockers = true;
3687 locker_remains = true;
3688 can_continue = true;
3689 }
3691 {
3692 /*
3693 * If it's just a key-share locker, and we're not changing the key
3694 * columns, we don't need to wait for it to end; but we need to
3695 * preserve it as locker.
3696 */
3697 checked_lockers = true;
3698 locker_remains = true;
3699 can_continue = true;
3700 }
3701 else
3702 {
3703 /*
3704 * Wait for regular transaction to end; but first, acquire tuple
3705 * lock.
3706 */
3708 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3710 XactLockTableWait(xwait, relation, &oldtup.t_self,
3711 XLTW_Update);
3712 checked_lockers = true;
3714
3715 /*
3716 * xwait is done, but if xwait had just locked the tuple then some
3717 * other xact could update this tuple before we get to this point.
3718 * Check for xmax change, and start over if so.
3719 */
3720 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3723 goto l2;
3724
3725 /* Otherwise check if it committed or aborted */
3726 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3727 if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3728 can_continue = true;
3729 }
3730
3731 if (can_continue)
3732 result = TM_Ok;
3733 else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
3734 result = TM_Updated;
3735 else
3736 result = TM_Deleted;
3737 }
3738
3739 /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3740 if (result != TM_Ok)
3741 {
3742 Assert(result == TM_SelfModified ||
3743 result == TM_Updated ||
3744 result == TM_Deleted ||
3745 result == TM_BeingModified);
3746 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3747 Assert(result != TM_Updated ||
3748 !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3749 }
3750
3751 if (crosscheck != InvalidSnapshot && result == TM_Ok)
3752 {
3753 /* Perform additional check for transaction-snapshot mode RI updates */
3755 result = TM_Updated;
3756 }
3757
3758 if (result != TM_Ok)
3759 {
3760 tmfd->ctid = oldtup.t_data->t_ctid;
3761 tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3762 if (result == TM_SelfModified)
3763 tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3764 else
3765 tmfd->cmax = InvalidCommandId;
3766 UnlockReleaseBuffer(buffer);
3767 if (have_tuple_lock)
3768 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3769 if (vmbuffer != InvalidBuffer)
3770 ReleaseBuffer(vmbuffer);
3772
3779 return result;
3780 }
3781
3782 /*
3783 * If we didn't pin the visibility map page and the page has become all
3784 * visible while we were busy locking the buffer, or during some
3785 * subsequent window during which we had it unlocked, we'll have to unlock
3786 * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3787 * bit unfortunate, especially since we'll now have to recheck whether the
3788 * tuple has been locked or updated under us, but hopefully it won't
3789 * happen very often.
3790 */
3791 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3792 {
3794 visibilitymap_pin(relation, block, &vmbuffer);
3796 goto l2;
3797 }
3798
3799 /* Fill in transaction status data */
3800
3801 /*
3802 * If the tuple we're updating is locked, we need to preserve the locking
3803 * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3804 */
3806 oldtup.t_data->t_infomask,
3807 oldtup.t_data->t_infomask2,
3808 xid, *lockmode, true,
3811
3812 /*
3813 * And also prepare an Xmax value for the new copy of the tuple. If there
3814 * was no xmax previously, or there was one but all lockers are now gone,
3815 * then use InvalidTransactionId; otherwise, get the xmax from the old
3816 * tuple. (In rare cases that might also be InvalidTransactionId and yet
3817 * not have the HEAP_XMAX_INVALID bit set; that's fine.)
3818 */
3819 if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3820 HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
3823 else
3825
3827 {
3830 }
3831 else
3832 {
3833 /*
3834 * If we found a valid Xmax for the new tuple, then the infomask bits
3835 * to use on the new tuple depend on what was there on the old one.
3836 * Note that since we're doing an update, the only possibility is that
3837 * the lockers had FOR KEY SHARE lock.
3838 */
3839 if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3840 {
3843 }
3844 else
3845 {
3848 }
3849 }
3850
3851 /*
3852 * Prepare the new tuple with the appropriate initial values of Xmin and
3853 * Xmax, as well as initial infomask bits as computed above.
3854 */
3855 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3856 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3857 HeapTupleHeaderSetXmin(newtup->t_data, xid);
3859 newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3860 newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3862
3863 /*
3864 * Replace cid with a combo CID if necessary. Note that we already put
3865 * the plain cid into the new tuple.
3866 */
3868
3869 /*
3870 * If the toaster needs to be activated, OR if the new tuple will not fit
3871 * on the same page as the old, then we need to release the content lock
3872 * (but not the pin!) on the old tuple's buffer while we are off doing
3873 * TOAST and/or table-file-extension work. We must mark the old tuple to
3874 * show that it's locked, else other processes may try to update it
3875 * themselves.
3876 *
3877 * We need to invoke the toaster if there are already any out-of-line
3878 * toasted values present, or if the new tuple is over-threshold.
3879 */
3880 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3881 relation->rd_rel->relkind != RELKIND_MATVIEW)
3882 {
3883 /* toast table entries should never be recursively toasted */
3886 need_toast = false;
3887 }
3888 else
3891 newtup->t_len > TOAST_TUPLE_THRESHOLD);
3892
3894
3895 newtupsize = MAXALIGN(newtup->t_len);
3896
3898 {
3902 bool cleared_all_frozen = false;
3903
3904 /*
3905 * To prevent concurrent sessions from updating the tuple, we have to
3906 * temporarily mark it locked, while we release the page-level lock.
3907 *
3908 * To satisfy the rule that any xid potentially appearing in a buffer
3909 * written out to disk, we unfortunately have to WAL log this
3910 * temporary modification. We can reuse xl_heap_lock for this
3911 * purpose. If we crash/error before following through with the
3912 * actual update, xmax will be of an aborted transaction, allowing
3913 * other sessions to proceed.
3914 */
3915
3916 /*
3917 * Compute xmax / infomask appropriate for locking the tuple. This has
3918 * to be done separately from the combo that's going to be used for
3919 * updating, because the potentially created multixact would otherwise
3920 * be wrong.
3921 */
3923 oldtup.t_data->t_infomask,
3924 oldtup.t_data->t_infomask2,
3925 xid, *lockmode, false,
3928
3930
3932
3933 /* Clear obsolete visibility flags ... */
3934 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3935 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3937 /* ... and store info about transaction updating this tuple */
3940 oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3941 oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
3943
3944 /* temporarily make it look not-updated, but locked */
3945 oldtup.t_data->t_ctid = oldtup.t_self;
3946
3947 /*
3948 * Clear all-frozen bit on visibility map if needed. We could
3949 * immediately reset ALL_VISIBLE, but given that the WAL logging
3950 * overhead would be unchanged, that doesn't seem necessarily
3951 * worthwhile.
3952 */
3953 if (PageIsAllVisible(page) &&
3954 visibilitymap_clear(relation, block, vmbuffer,
3956 cleared_all_frozen = true;
3957
3958 MarkBufferDirty(buffer);
3959
3960 if (RelationNeedsWAL(relation))
3961 {
3964
3967
3968 xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
3970 xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
3971 oldtup.t_data->t_infomask2);
3972 xlrec.flags =
3976 PageSetLSN(page, recptr);
3977 }
3978
3980
3982
3983 /*
3984 * Let the toaster do its thing, if needed.
3985 *
3986 * Note: below this point, heaptup is the data we actually intend to
3987 * store into the relation; newtup is the caller's original untoasted
3988 * data.
3989 */
3990 if (need_toast)
3991 {
3992 /* Note we always use WAL and FSM during updates */
3994 newtupsize = MAXALIGN(heaptup->t_len);
3995 }
3996 else
3997 heaptup = newtup;
3998
3999 /*
4000 * Now, do we need a new page for the tuple, or not? This is a bit
4001 * tricky since someone else could have added tuples to the page while
4002 * we weren't looking. We have to recheck the available space after
4003 * reacquiring the buffer lock. But don't bother to do that if the
4004 * former amount of free space is still not enough; it's unlikely
4005 * there's more free now than before.
4006 *
4007 * What's more, if we need to get a new page, we will need to acquire
4008 * buffer locks on both old and new pages. To avoid deadlock against
4009 * some other backend trying to get the same two locks in the other
4010 * order, we must be consistent about the order we get the locks in.
4011 * We use the rule "lock the lower-numbered page of the relation
4012 * first". To implement this, we must do RelationGetBufferForTuple
4013 * while not holding the lock on the old page, and we must rely on it
4014 * to get the locks on both pages in the correct order.
4015 *
4016 * Another consideration is that we need visibility map page pin(s) if
4017 * we will have to clear the all-visible flag on either page. If we
4018 * call RelationGetBufferForTuple, we rely on it to acquire any such
4019 * pins; but if we don't, we have to handle that here. Hence we need
4020 * a loop.
4021 */
4022 for (;;)
4023 {
4024 if (newtupsize > pagefree)
4025 {
4026 /* It doesn't fit, must use RelationGetBufferForTuple. */
4027 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
4028 buffer, 0, NULL,
4029 &vmbuffer_new, &vmbuffer,
4030 0);
4031 /* We're all done. */
4032 break;
4033 }
4034 /* Acquire VM page pin if needed and we don't have it. */
4035 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
4036 visibilitymap_pin(relation, block, &vmbuffer);
4037 /* Re-acquire the lock on the old tuple's page. */
4039 /* Re-check using the up-to-date free space */
4041 if (newtupsize > pagefree ||
4042 (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
4043 {
4044 /*
4045 * Rats, it doesn't fit anymore, or somebody just now set the
4046 * all-visible flag. We must now unlock and loop to avoid
4047 * deadlock. Fortunately, this path should seldom be taken.
4048 */
4050 }
4051 else
4052 {
4053 /* We're all done. */
4054 newbuf = buffer;
4055 break;
4056 }
4057 }
4058 }
4059 else
4060 {
4061 /* No TOAST work needed, and it'll fit on same page */
4062 newbuf = buffer;
4063 heaptup = newtup;
4064 }
4065
4067
4068 /*
4069 * We're about to do the actual update -- check for conflict first, to
4070 * avoid possibly having to roll back work we've just done.
4071 *
4072 * This is safe without a recheck as long as there is no possibility of
4073 * another process scanning the pages between this check and the update
4074 * being visible to the scan (i.e., exclusive buffer content lock(s) are
4075 * continuously held from this point until the tuple update is visible).
4076 *
4077 * For the new tuple the only check needed is at the relation level, but
4078 * since both tuples are in the same relation and the check for oldtup
4079 * will include checking the relation level, there is no benefit to a
4080 * separate check for the new tuple.
4081 */
4082 CheckForSerializableConflictIn(relation, &oldtup.t_self,
4083 BufferGetBlockNumber(buffer));
4084
4085 /*
4086 * At this point newbuf and buffer are both pinned and locked, and newbuf
4087 * has enough space for the new tuple. If they are the same buffer, only
4088 * one pin is held.
4089 */
4090
4091 if (newbuf == buffer)
4092 {
4093 /*
4094 * Since the new tuple is going into the same page, we might be able
4095 * to do a HOT update. Check if any of the index columns have been
4096 * changed.
4097 */
4099 {
4100 use_hot_update = true;
4101
4102 /*
4103 * If none of the columns that are used in hot-blocking indexes
4104 * were updated, we can apply HOT, but we do still need to check
4105 * if we need to update the summarizing indexes, and update those
4106 * indexes if the columns were updated, or we may fail to detect
4107 * e.g. value bound changes in BRIN minmax indexes.
4108 */
4110 summarized_update = true;
4111 }
4112 }
4113 else
4114 {
4115 /* Set a hint that the old page could use prune/defrag */
4116 PageSetFull(page);
4117 }
4118
4119 /*
4120 * Compute replica identity tuple before entering the critical section so
4121 * we don't PANIC upon a memory allocation failure.
4122 * ExtractReplicaIdentity() will return NULL if nothing needs to be
4123 * logged. Pass old key required as true only if the replica identity key
4124 * columns are modified or it has external data.
4125 */
4130
4131 /* NO EREPORT(ERROR) from here till changes are logged */
4133
4134 /*
4135 * If this transaction commits, the old tuple will become DEAD sooner or
4136 * later. Set flag that this page is a candidate for pruning once our xid
4137 * falls below the OldestXmin horizon. If the transaction finally aborts,
4138 * the subsequent page pruning will be a no-op and the hint will be
4139 * cleared.
4140 *
4141 * XXX Should we set hint on newbuf as well? If the transaction aborts,
4142 * there would be a prunable tuple in the newbuf; but for now we choose
4143 * not to optimize for aborts. Note that heap_xlog_update must be kept in
4144 * sync if this decision changes.
4145 */
4146 PageSetPrunable(page, xid);
4147
4148 if (use_hot_update)
4149 {
4150 /* Mark the old tuple as HOT-updated */
4152 /* And mark the new tuple as heap-only */
4154 /* Mark the caller's copy too, in case different from heaptup */
4156 }
4157 else
4158 {
4159 /* Make sure tuples are correctly marked as not-HOT */
4163 }
4164
4165 RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4166
4167
4168 /* Clear obsolete visibility flags, possibly set by ourselves above... */
4169 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
4170 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4171 /* ... and store info about transaction updating this tuple */
4174 oldtup.t_data->t_infomask |= infomask_old_tuple;
4175 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4177
4178 /* record address of new tuple in t_ctid of old one */
4179 oldtup.t_data->t_ctid = heaptup->t_self;
4180
4181 /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
4182 if (PageIsAllVisible(page))
4183 {
4184 all_visible_cleared = true;
4185 PageClearAllVisible(page);
4186 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4187 vmbuffer, VISIBILITYMAP_VALID_BITS);
4188 }
4189 if (newbuf != buffer && PageIsAllVisible(newpage))
4190 {
4195 }
4196
4197 if (newbuf != buffer)
4199 MarkBufferDirty(buffer);
4200
4201 /* XLOG stuff */
4202 if (RelationNeedsWAL(relation))
4203 {
4205
4206 /*
4207 * For logical decoding we need combo CIDs to properly decode the
4208 * catalog.
4209 */
4211 {
4212 log_heap_new_cid(relation, &oldtup);
4213 log_heap_new_cid(relation, heaptup);
4214 }
4215
4216 recptr = log_heap_update(relation, buffer,
4221 if (newbuf != buffer)
4222 {
4224 }
4225 PageSetLSN(page, recptr);
4226 }
4227
4229
4230 if (newbuf != buffer)
4233
4234 /*
4235 * Mark old tuple for invalidation from system caches at next command
4236 * boundary, and mark the new tuple for invalidation in case we abort. We
4237 * have to do this before releasing the buffer because oldtup is in the
4238 * buffer. (heaptup is all in local memory, but it's necessary to process
4239 * both tuple versions in one call to inval.c so we can avoid redundant
4240 * sinval messages.)
4241 */
4243
4244 /* Now we can release the buffer(s) */
4245 if (newbuf != buffer)
4247 ReleaseBuffer(buffer);
4250 if (BufferIsValid(vmbuffer))
4251 ReleaseBuffer(vmbuffer);
4252
4253 /*
4254 * Release the lmgr tuple lock, if we had it.
4255 */
4256 if (have_tuple_lock)
4257 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4258
4259 pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
4260
4261 /*
4262 * If heaptup is a private copy, release it. Don't forget to copy t_self
4263 * back to the caller's image, too.
4264 */
4265 if (heaptup != newtup)
4266 {
4267 newtup->t_self = heaptup->t_self;
4269 }
4270
4271 /*
4272 * If it is a HOT update, the update may still need to update summarized
4273 * indexes, lest we fail to update those summaries and get incorrect
4274 * results (for example, minmax bounds of the block may change with this
4275 * update).
4276 */
4277 if (use_hot_update)
4278 {
4281 else
4283 }
4284 else
4286
4289
4296
4297 return TM_Ok;
4298}
4299
4300#ifdef USE_ASSERT_CHECKING
4301/*
4302 * Confirm adequate lock held during heap_update(), per rules from
4303 * README.tuplock section "Locking to write inplace-updated tables".
4304 */
4305static void
4307 const ItemPointerData *otid,
4309{
4310 /* LOCKTAG_TUPLE acceptable for any catalog */
4311 switch (RelationGetRelid(relation))
4312 {
4313 case RelationRelationId:
4314 case DatabaseRelationId:
4315 {
4317
4319 relation->rd_lockInfo.lockRelId.dbId,
4320 relation->rd_lockInfo.lockRelId.relId,
4324 return;
4325 }
4326 break;
4327 default:
4328 Assert(!IsInplaceUpdateRelation(relation));
4329 return;
4330 }
4331
4332 switch (RelationGetRelid(relation))
4333 {
4334 case RelationRelationId:
4335 {
4336 /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
4338 Oid relid = classForm->oid;
4339 Oid dbid;
4340 LOCKTAG tag;
4341
4342 if (IsSharedRelation(relid))
4343 dbid = InvalidOid;
4344 else
4345 dbid = MyDatabaseId;
4346
4347 if (classForm->relkind == RELKIND_INDEX)
4348 {
4349 Relation irel = index_open(relid, AccessShareLock);
4350
4351 SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4353 }
4354 else
4355 SET_LOCKTAG_RELATION(tag, dbid, relid);
4356
4357 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
4358 !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
4359 elog(WARNING,
4360 "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4361 NameStr(classForm->relname),
4362 relid,
4363 classForm->relkind,
4366 }
4367 break;
4368 case DatabaseRelationId:
4369 {
4370 /* LOCKTAG_TUPLE required */
4372
4373 elog(WARNING,
4374 "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
4375 NameStr(dbForm->datname),
4376 dbForm->oid,
4379 }
4380 break;
4381 }
4382}
4383
4384/*
4385 * Confirm adequate relation lock held, per rules from README.tuplock section
4386 * "Locking to write inplace-updated tables".
4387 */
4388static void
4390{
4392 Oid relid = classForm->oid;
4393 Oid dbid;
4394 LOCKTAG tag;
4395
4396 if (IsSharedRelation(relid))
4397 dbid = InvalidOid;
4398 else
4399 dbid = MyDatabaseId;
4400
4401 if (classForm->relkind == RELKIND_INDEX)
4402 {
4403 Relation irel = index_open(relid, AccessShareLock);
4404
4405 SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4407 }
4408 else
4409 SET_LOCKTAG_RELATION(tag, dbid, relid);
4410
4411 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
4412 elog(WARNING,
4413 "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4414 NameStr(classForm->relname),
4415 relid,
4416 classForm->relkind,
4419}
4420#endif
4421
4422/*
4423 * Check if the specified attribute's values are the same. Subroutine for
4424 * HeapDetermineColumnsInfo.
4425 */
4426static bool
4428 bool isnull1, bool isnull2)
4429{
4430 /*
4431 * If one value is NULL and other is not, then they are certainly not
4432 * equal
4433 */
4434 if (isnull1 != isnull2)
4435 return false;
4436
4437 /*
4438 * If both are NULL, they can be considered equal.
4439 */
4440 if (isnull1)
4441 return true;
4442
4443 /*
4444 * We do simple binary comparison of the two datums. This may be overly
4445 * strict because there can be multiple binary representations for the
4446 * same logical value. But we should be OK as long as there are no false
4447 * positives. Using a type-specific equality operator is messy because
4448 * there could be multiple notions of equality in different operator
4449 * classes; furthermore, we cannot safely invoke user-defined functions
4450 * while holding exclusive buffer lock.
4451 */
4452 if (attrnum <= 0)
4453 {
4454 /* The only allowed system columns are OIDs, so do this */
4456 }
4457 else
4458 {
4460
4462 att = TupleDescCompactAttr(tupdesc, attrnum - 1);
4463 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4464 }
4465}
4466
4467/*
4468 * Check which columns are being updated.
4469 *
4470 * Given an updated tuple, determine (and return into the output bitmapset),
4471 * from those listed as interesting, the set of columns that changed.
4472 *
4473 * has_external indicates if any of the unmodified attributes (from those
4474 * listed as interesting) of the old tuple is a member of external_cols and is
4475 * stored externally.
4476 */
4477static Bitmapset *
4482 bool *has_external)
4483{
4484 int attidx;
4486 TupleDesc tupdesc = RelationGetDescr(relation);
4487
4488 attidx = -1;
4489 while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
4490 {
4491 /* attidx is zero-based, attrnum is the normal attribute number */
4493 Datum value1,
4494 value2;
4495 bool isnull1,
4496 isnull2;
4497
4498 /*
4499 * If it's a whole-tuple reference, say "not equal". It's not really
4500 * worth supporting this case, since it could only succeed after a
4501 * no-op update, which is hardly a case worth optimizing for.
4502 */
4503 if (attrnum == 0)
4504 {
4505 modified = bms_add_member(modified, attidx);
4506 continue;
4507 }
4508
4509 /*
4510 * Likewise, automatically say "not equal" for any system attribute
4511 * other than tableOID; we cannot expect these to be consistent in a
4512 * HOT chain, or even to be set correctly yet in the new tuple.
4513 */
4514 if (attrnum < 0)
4515 {
4516 if (attrnum != TableOidAttributeNumber)
4517 {
4518 modified = bms_add_member(modified, attidx);
4519 continue;
4520 }
4521 }
4522
4523 /*
4524 * Extract the corresponding values. XXX this is pretty inefficient
4525 * if there are many indexed columns. Should we do a single
4526 * heap_deform_tuple call on each tuple, instead? But that doesn't
4527 * work for system columns ...
4528 */
4529 value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4530 value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4531
4532 if (!heap_attr_equals(tupdesc, attrnum, value1,
4533 value2, isnull1, isnull2))
4534 {
4535 modified = bms_add_member(modified, attidx);
4536 continue;
4537 }
4538
4539 /*
4540 * No need to check attributes that can't be stored externally. Note
4541 * that system attributes can't be stored externally.
4542 */
4543 if (attrnum < 0 || isnull1 ||
4544 TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
4545 continue;
4546
4547 /*
4548 * Check if the old tuple's attribute is stored externally and is a
4549 * member of external_cols.
4550 */
4553 *has_external = true;
4554 }
4555
4556 return modified;
4557}
4558
4559/*
4560 * simple_heap_update - replace a tuple
4561 *
4562 * This routine may be used to update a tuple when concurrent updates of
4563 * the target tuple are not expected (for example, because we have a lock
4564 * on the relation associated with the tuple). Any failure is reported
4565 * via ereport().
4566 */
4567void
4570{
4571 TM_Result result;
4572 TM_FailureData tmfd;
4573 LockTupleMode lockmode;
4574
4575 result = heap_update(relation, otid, tup,
4577 true /* wait for commit */ ,
4578 &tmfd, &lockmode, update_indexes);
4579 switch (result)
4580 {
4581 case TM_SelfModified:
4582 /* Tuple was already updated in current command? */
4583 elog(ERROR, "tuple already updated by self");
4584 break;
4585
4586 case TM_Ok:
4587 /* done successfully */
4588 break;
4589
4590 case TM_Updated:
4591 elog(ERROR, "tuple concurrently updated");
4592 break;
4593
4594 case TM_Deleted:
4595 elog(ERROR, "tuple concurrently deleted");
4596 break;
4597
4598 default:
4599 elog(ERROR, "unrecognized heap_update status: %u", result);
4600 break;
4601 }
4602}
4603
4604
4605/*
4606 * Return the MultiXactStatus corresponding to the given tuple lock mode.
4607 */
4608static MultiXactStatus
4610{
4611 int retval;
4612
4613 if (is_update)
4614 retval = tupleLockExtraInfo[mode].updstatus;
4615 else
4616 retval = tupleLockExtraInfo[mode].lockstatus;
4617
4618 if (retval == -1)
4619 elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4620 is_update ? "true" : "false");
4621
4622 return (MultiXactStatus) retval;
4623}
4624
4625/*
4626 * heap_lock_tuple - lock a tuple in shared or exclusive mode
4627 *
4628 * Note that this acquires a buffer pin, which the caller must release.
4629 *
4630 * Input parameters:
4631 * relation: relation containing tuple (caller must hold suitable lock)
4632 * cid: current command ID (used for visibility test, and stored into
4633 * tuple's cmax if lock is successful)
4634 * mode: indicates if shared or exclusive tuple lock is desired
4635 * wait_policy: what to do if tuple lock is not available
4636 * follow_updates: if true, follow the update chain to also lock descendant
4637 * tuples.
4638 *
4639 * Output parameters:
4640 * *tuple: all fields filled in
4641 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4642 * *tmfd: filled in failure cases (see below)
4643 *
4644 * Function results are the same as the ones for table_tuple_lock().
4645 *
4646 * In the failure cases other than TM_Invisible, the routine fills
4647 * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4648 * if necessary), and t_cmax (the last only for TM_SelfModified,
4649 * since we cannot obtain cmax from a combo CID generated by another
4650 * transaction).
4651 * See comments for struct TM_FailureData for additional info.
4652 *
4653 * See README.tuplock for a thorough explanation of this mechanism.
4654 */
4658 bool follow_updates,
4659 Buffer *buffer, TM_FailureData *tmfd)
4660{
4661 TM_Result result;
4662 ItemPointer tid = &(tuple->t_self);
4663 ItemId lp;
4664 Page page;
4665 Buffer vmbuffer = InvalidBuffer;
4666 BlockNumber block;
4667 TransactionId xid,
4668 xmax;
4672 bool first_time = true;
4673 bool skip_tuple_lock = false;
4674 bool have_tuple_lock = false;
4675 bool cleared_all_frozen = false;
4676
4677 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4678 block = ItemPointerGetBlockNumber(tid);
4679
4680 /*
4681 * Before locking the buffer, pin the visibility map page if it appears to
4682 * be necessary. Since we haven't got the lock yet, someone else might be
4683 * in the middle of changing this, so we'll need to recheck after we have
4684 * the lock.
4685 */
4686 if (PageIsAllVisible(BufferGetPage(*buffer)))
4687 visibilitymap_pin(relation, block, &vmbuffer);
4688
4690
4691 page = BufferGetPage(*buffer);
4694
4695 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4696 tuple->t_len = ItemIdGetLength(lp);
4697 tuple->t_tableOid = RelationGetRelid(relation);
4698
4699l3:
4700 result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4701
4702 if (result == TM_Invisible)
4703 {
4704 /*
4705 * This is possible, but only when locking a tuple for ON CONFLICT DO
4706 * SELECT/UPDATE. We return this value here rather than throwing an
4707 * error in order to give that case the opportunity to throw a more
4708 * specific error.
4709 */
4710 result = TM_Invisible;
4711 goto out_locked;
4712 }
4713 else if (result == TM_BeingModified ||
4714 result == TM_Updated ||
4715 result == TM_Deleted)
4716 {
4720 bool require_sleep;
4721 ItemPointerData t_ctid;
4722
4723 /* must copy state data before unlocking buffer */
4725 infomask = tuple->t_data->t_infomask;
4726 infomask2 = tuple->t_data->t_infomask2;
4727 ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4728
4730
4731 /*
4732 * If any subtransaction of the current top transaction already holds
4733 * a lock as strong as or stronger than what we're requesting, we
4734 * effectively hold the desired lock already. We *must* succeed
4735 * without trying to take the tuple lock, else we will deadlock
4736 * against anyone wanting to acquire a stronger lock.
4737 *
4738 * Note we only do this the first time we loop on the HTSU result;
4739 * there is no point in testing in subsequent passes, because
4740 * evidently our own transaction cannot have acquired a new lock after
4741 * the first time we checked.
4742 */
4743 if (first_time)
4744 {
4745 first_time = false;
4746
4748 {
4749 int i;
4750 int nmembers;
4751 MultiXactMember *members;
4752
4753 /*
4754 * We don't need to allow old multixacts here; if that had
4755 * been the case, HeapTupleSatisfiesUpdate would have returned
4756 * MayBeUpdated and we wouldn't be here.
4757 */
4758 nmembers =
4759 GetMultiXactIdMembers(xwait, &members, false,
4761
4762 for (i = 0; i < nmembers; i++)
4763 {
4764 /* only consider members of our own transaction */
4765 if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4766 continue;
4767
4768 if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4769 {
4770 pfree(members);
4771 result = TM_Ok;
4772 goto out_unlocked;
4773 }
4774 else
4775 {
4776 /*
4777 * Disable acquisition of the heavyweight tuple lock.
4778 * Otherwise, when promoting a weaker lock, we might
4779 * deadlock with another locker that has acquired the
4780 * heavyweight tuple lock and is waiting for our
4781 * transaction to finish.
4782 *
4783 * Note that in this case we still need to wait for
4784 * the multixact if required, to avoid acquiring
4785 * conflicting locks.
4786 */
4787 skip_tuple_lock = true;
4788 }
4789 }
4790
4791 if (members)
4792 pfree(members);
4793 }
4795 {
4796 switch (mode)
4797 {
4798 case LockTupleKeyShare:
4802 result = TM_Ok;
4803 goto out_unlocked;
4804 case LockTupleShare:
4807 {
4808 result = TM_Ok;
4809 goto out_unlocked;
4810 }
4811 break;
4814 {
4815 result = TM_Ok;
4816 goto out_unlocked;
4817 }
4818 break;
4819 case LockTupleExclusive:
4822 {
4823 result = TM_Ok;
4824 goto out_unlocked;
4825 }
4826 break;
4827 }
4828 }
4829 }
4830
4831 /*
4832 * Initially assume that we will have to wait for the locking
4833 * transaction(s) to finish. We check various cases below in which
4834 * this can be turned off.
4835 */
4836 require_sleep = true;
4837 if (mode == LockTupleKeyShare)
4838 {
4839 /*
4840 * If we're requesting KeyShare, and there's no update present, we
4841 * don't need to wait. Even if there is an update, we can still
4842 * continue if the key hasn't been modified.
4843 *
4844 * However, if there are updates, we need to walk the update chain
4845 * to mark future versions of the row as locked, too. That way,
4846 * if somebody deletes that future version, we're protected
4847 * against the key going away. This locking of future versions
4848 * could block momentarily, if a concurrent transaction is
4849 * deleting a key; or it could return a value to the effect that
4850 * the transaction deleting the key has already committed. So we
4851 * do this before re-locking the buffer; otherwise this would be
4852 * prone to deadlocks.
4853 *
4854 * Note that the TID we're locking was grabbed before we unlocked
4855 * the buffer. For it to change while we're not looking, the
4856 * other properties we're testing for below after re-locking the
4857 * buffer would also change, in which case we would restart this
4858 * loop above.
4859 */
4861 {
4862 bool updated;
4863
4865
4866 /*
4867 * If there are updates, follow the update chain; bail out if
4868 * that cannot be done.
4869 */
4870 if (follow_updates && updated &&
4871 !ItemPointerEquals(&tuple->t_self, &t_ctid))
4872 {
4873 TM_Result res;
4874
4875 res = heap_lock_updated_tuple(relation,
4876 infomask, xwait, &t_ctid,
4878 mode);
4879 if (res != TM_Ok)
4880 {
4881 result = res;
4882 /* recovery code expects to have buffer lock held */
4884 goto failed;
4885 }
4886 }
4887
4889
4890 /*
4891 * Make sure it's still an appropriate lock, else start over.
4892 * Also, if it wasn't updated before we released the lock, but
4893 * is updated now, we start over too; the reason is that we
4894 * now need to follow the update chain to lock the new
4895 * versions.
4896 */
4897 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4898 ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4899 !updated))
4900 goto l3;
4901
4902 /* Things look okay, so we can skip sleeping */
4903 require_sleep = false;
4904
4905 /*
4906 * Note we allow Xmax to change here; other updaters/lockers
4907 * could have modified it before we grabbed the buffer lock.
4908 * However, this is not a problem, because with the recheck we
4909 * just did we ensure that they still don't conflict with the
4910 * lock we want.
4911 */
4912 }
4913 }
4914 else if (mode == LockTupleShare)
4915 {
4916 /*
4917 * If we're requesting Share, we can similarly avoid sleeping if
4918 * there's no update and no exclusive lock present.
4919 */
4922 {
4924
4925 /*
4926 * Make sure it's still an appropriate lock, else start over.
4927 * See above about allowing xmax to change.
4928 */
4931 goto l3;
4932 require_sleep = false;
4933 }
4934 }
4935 else if (mode == LockTupleNoKeyExclusive)
4936 {
4937 /*
4938 * If we're requesting NoKeyExclusive, we might also be able to
4939 * avoid sleeping; just ensure that there no conflicting lock
4940 * already acquired.
4941 */
4943 {
4945 mode, NULL))
4946 {
4947 /*
4948 * No conflict, but if the xmax changed under us in the
4949 * meantime, start over.
4950 */
4954 xwait))
4955 goto l3;
4956
4957 /* otherwise, we're good */
4958 require_sleep = false;
4959 }
4960 }
4962 {
4964
4965 /* if the xmax changed in the meantime, start over */
4968 xwait))
4969 goto l3;
4970 /* otherwise, we're good */
4971 require_sleep = false;
4972 }
4973 }
4974
4975 /*
4976 * As a check independent from those above, we can also avoid sleeping
4977 * if the current transaction is the sole locker of the tuple. Note
4978 * that the strength of the lock already held is irrelevant; this is
4979 * not about recording the lock in Xmax (which will be done regardless
4980 * of this optimization, below). Also, note that the cases where we
4981 * hold a lock stronger than we are requesting are already handled
4982 * above by not doing anything.
4983 *
4984 * Note we only deal with the non-multixact case here; MultiXactIdWait
4985 * is well equipped to deal with this situation on its own.
4986 */
4989 {
4990 /* ... but if the xmax changed in the meantime, start over */
4994 xwait))
4995 goto l3;
4997 require_sleep = false;
4998 }
4999
5000 /*
5001 * Time to sleep on the other transaction/multixact, if necessary.
5002 *
5003 * If the other transaction is an update/delete that's already
5004 * committed, then sleeping cannot possibly do any good: if we're
5005 * required to sleep, get out to raise an error instead.
5006 *
5007 * By here, we either have already acquired the buffer exclusive lock,
5008 * or we must wait for the locking transaction or multixact; so below
5009 * we ensure that we grab buffer lock after the sleep.
5010 */
5011 if (require_sleep && (result == TM_Updated || result == TM_Deleted))
5012 {
5014 goto failed;
5015 }
5016 else if (require_sleep)
5017 {
5018 /*
5019 * Acquire tuple lock to establish our priority for the tuple, or
5020 * die trying. LockTuple will release us when we are next-in-line
5021 * for the tuple. We must do this even if we are share-locking,
5022 * but not if we already have a weaker lock on the tuple.
5023 *
5024 * If we are forced to "start over" below, we keep the tuple lock;
5025 * this arranges that we stay at the head of the line while
5026 * rechecking tuple state.
5027 */
5028 if (!skip_tuple_lock &&
5029 !heap_acquire_tuplock(relation, tid, mode, wait_policy,
5031 {
5032 /*
5033 * This can only happen if wait_policy is Skip and the lock
5034 * couldn't be obtained.
5035 */
5036 result = TM_WouldBlock;
5037 /* recovery code expects to have buffer lock held */
5039 goto failed;
5040 }
5041
5043 {
5045
5046 /* We only ever lock tuples, never update them */
5047 if (status >= MultiXactStatusNoKeyUpdate)
5048 elog(ERROR, "invalid lock mode in heap_lock_tuple");
5049
5050 /* wait for multixact to end, or die trying */
5051 switch (wait_policy)
5052 {
5053 case LockWaitBlock:
5055 relation, &tuple->t_self, XLTW_Lock, NULL);
5056 break;
5057 case LockWaitSkip:
5059 status, infomask, relation,
5060 NULL, false))
5061 {
5062 result = TM_WouldBlock;
5063 /* recovery code expects to have buffer lock held */
5065 goto failed;
5066 }
5067 break;
5068 case LockWaitError:
5070 status, infomask, relation,
5072 ereport(ERROR,
5074 errmsg("could not obtain lock on row in relation \"%s\"",
5075 RelationGetRelationName(relation))));
5076
5077 break;
5078 }
5079
5080 /*
5081 * Of course, the multixact might not be done here: if we're
5082 * requesting a light lock mode, other transactions with light
5083 * locks could still be alive, as well as locks owned by our
5084 * own xact or other subxacts of this backend. We need to
5085 * preserve the surviving MultiXact members. Note that it
5086 * isn't absolutely necessary in the latter case, but doing so
5087 * is simpler.
5088 */
5089 }
5090 else
5091 {
5092 /* wait for regular transaction to end, or die trying */
5093 switch (wait_policy)
5094 {
5095 case LockWaitBlock:
5096 XactLockTableWait(xwait, relation, &tuple->t_self,
5097 XLTW_Lock);
5098 break;
5099 case LockWaitSkip:
5101 {
5102 result = TM_WouldBlock;
5103 /* recovery code expects to have buffer lock held */
5105 goto failed;
5106 }
5107 break;
5108 case LockWaitError:
5110 ereport(ERROR,
5112 errmsg("could not obtain lock on row in relation \"%s\"",
5113 RelationGetRelationName(relation))));
5114 break;
5115 }
5116 }
5117
5118 /* if there are updates, follow the update chain */
5120 !ItemPointerEquals(&tuple->t_self, &t_ctid))
5121 {
5122 TM_Result res;
5123
5124 res = heap_lock_updated_tuple(relation,
5125 infomask, xwait, &t_ctid,
5127 mode);
5128 if (res != TM_Ok)
5129 {
5130 result = res;
5131 /* recovery code expects to have buffer lock held */
5133 goto failed;
5134 }
5135 }
5136
5138
5139 /*
5140 * xwait is done, but if xwait had just locked the tuple then some
5141 * other xact could update this tuple before we get to this point.
5142 * Check for xmax change, and start over if so.
5143 */
5146 xwait))
5147 goto l3;
5148
5150 {
5151 /*
5152 * Otherwise check if it committed or aborted. Note we cannot
5153 * be here if the tuple was only locked by somebody who didn't
5154 * conflict with us; that would have been handled above. So
5155 * that transaction must necessarily be gone by now. But
5156 * don't check for this in the multixact case, because some
5157 * locker transactions might still be running.
5158 */
5159 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5160 }
5161 }
5162
5163 /* By here, we're certain that we hold buffer exclusive lock again */
5164
5165 /*
5166 * We may lock if previous xmax aborted, or if it committed but only
5167 * locked the tuple without updating it; or if we didn't have to wait
5168 * at all for whatever reason.
5169 */
5170 if (!require_sleep ||
5171 (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5174 result = TM_Ok;
5175 else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
5176 result = TM_Updated;
5177 else
5178 result = TM_Deleted;
5179 }
5180
5181failed:
5182 if (result != TM_Ok)
5183 {
5184 Assert(result == TM_SelfModified || result == TM_Updated ||
5185 result == TM_Deleted || result == TM_WouldBlock);
5186
5187 /*
5188 * When locking a tuple under LockWaitSkip semantics and we fail with
5189 * TM_WouldBlock above, it's possible for concurrent transactions to
5190 * release the lock and set HEAP_XMAX_INVALID in the meantime. So
5191 * this assert is slightly different from the equivalent one in
5192 * heap_delete and heap_update.
5193 */
5194 Assert((result == TM_WouldBlock) ||
5195 !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5196 Assert(result != TM_Updated ||
5197 !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
5198 tmfd->ctid = tuple->t_data->t_ctid;
5199 tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5200 if (result == TM_SelfModified)
5201 tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5202 else
5203 tmfd->cmax = InvalidCommandId;
5204 goto out_locked;
5205 }
5206
5207 /*
5208 * If we didn't pin the visibility map page and the page has become all
5209 * visible while we were busy locking the buffer, or during some
5210 * subsequent window during which we had it unlocked, we'll have to unlock
5211 * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5212 * unfortunate, especially since we'll now have to recheck whether the
5213 * tuple has been locked or updated under us, but hopefully it won't
5214 * happen very often.
5215 */
5216 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5217 {
5219 visibilitymap_pin(relation, block, &vmbuffer);
5221 goto l3;
5222 }
5223
5224 xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5225 old_infomask = tuple->t_data->t_infomask;
5226
5227 /*
5228 * If this is the first possibly-multixact-able operation in the current
5229 * transaction, set my per-backend OldestMemberMXactId setting. We can be
5230 * certain that the transaction will never become a member of any older
5231 * MultiXactIds than that. (We have to do this even if we end up just
5232 * using our own TransactionId below, since some other backend could
5233 * incorporate our XID into a MultiXact immediately afterwards.)
5234 */
5236
5237 /*
5238 * Compute the new xmax and infomask to store into the tuple. Note we do
5239 * not modify the tuple just yet, because that would leave it in the wrong
5240 * state if multixact.c elogs.
5241 */
5243 GetCurrentTransactionId(), mode, false,
5244 &xid, &new_infomask, &new_infomask2);
5245
5247
5248 /*
5249 * Store transaction information of xact locking the tuple.
5250 *
5251 * Note: Cmax is meaningless in this context, so don't set it; this avoids
5252 * possibly generating a useless combo CID. Moreover, if we're locking a
5253 * previously updated tuple, it's important to preserve the Cmax.
5254 *
5255 * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5256 * we would break the HOT chain.
5257 */
5260 tuple->t_data->t_infomask |= new_infomask;
5261 tuple->t_data->t_infomask2 |= new_infomask2;
5264 HeapTupleHeaderSetXmax(tuple->t_data, xid);
5265
5266 /*
5267 * Make sure there is no forward chain link in t_ctid. Note that in the
5268 * cases where the tuple has been updated, we must not overwrite t_ctid,
5269 * because it was set by the updater. Moreover, if the tuple has been
5270 * updated, we need to follow the update chain to lock the new versions of
5271 * the tuple as well.
5272 */
5274 tuple->t_data->t_ctid = *tid;
5275
5276 /* Clear only the all-frozen bit on visibility map if needed */
5277 if (PageIsAllVisible(page) &&
5278 visibilitymap_clear(relation, block, vmbuffer,
5280 cleared_all_frozen = true;
5281
5282
5283 MarkBufferDirty(*buffer);
5284
5285 /*
5286 * XLOG stuff. You might think that we don't need an XLOG record because
5287 * there is no state change worth restoring after a crash. You would be
5288 * wrong however: we have just written either a TransactionId or a
5289 * MultiXactId that may never have been seen on disk before, and we need
5290 * to make sure that there are XLOG entries covering those ID numbers.
5291 * Else the same IDs might be re-used after a crash, which would be
5292 * disastrous if this page made it to disk before the crash. Essentially
5293 * we have to enforce the WAL log-before-data rule even in this case.
5294 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5295 * entries for everything anyway.)
5296 */
5297 if (RelationNeedsWAL(relation))
5298 {
5301
5304
5305 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5306 xlrec.xmax = xid;
5307 xlrec.infobits_set = compute_infobits(new_infomask,
5308 tuple->t_data->t_infomask2);
5311
5312 /* we don't decode row locks atm, so no need to log the origin */
5313
5315
5316 PageSetLSN(page, recptr);
5317 }
5318
5320
5321 result = TM_Ok;
5322
5325
5327 if (BufferIsValid(vmbuffer))
5328 ReleaseBuffer(vmbuffer);
5329
5330 /*
5331 * Don't update the visibility map here. Locking a tuple doesn't change
5332 * visibility info.
5333 */
5334
5335 /*
5336 * Now that we have successfully marked the tuple as locked, we can
5337 * release the lmgr tuple lock, if we had it.
5338 */
5339 if (have_tuple_lock)
5340 UnlockTupleTuplock(relation, tid, mode);
5341
5342 return result;
5343}
5344
5345/*
5346 * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5347 * its normal, Xmax-based tuple lock.
5348 *
5349 * have_tuple_lock is an input and output parameter: on input, it indicates
5350 * whether the lock has previously been acquired (and this function does
5351 * nothing in that case). If this function returns success, have_tuple_lock
5352 * has been flipped to true.
5353 *
5354 * Returns false if it was unable to obtain the lock; this can only happen if
5355 * wait_policy is Skip.
5356 */
5357static bool
5360{
5361 if (*have_tuple_lock)
5362 return true;
5363
5364 switch (wait_policy)
5365 {
5366 case LockWaitBlock:
5367 LockTupleTuplock(relation, tid, mode);
5368 break;
5369
5370 case LockWaitSkip:
5371 if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
5372 return false;
5373 break;
5374
5375 case LockWaitError:
5377 ereport(ERROR,
5379 errmsg("could not obtain lock on row in relation \"%s\"",
5380 RelationGetRelationName(relation))));
5381 break;
5382 }
5383 *have_tuple_lock = true;
5384
5385 return true;
5386}
5387
5388/*
5389 * Given an original set of Xmax and infomask, and a transaction (identified by
5390 * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5391 * corresponding infomasks to use on the tuple.
5392 *
5393 * Note that this might have side effects such as creating a new MultiXactId.
5394 *
5395 * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5396 * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5397 * but it was not running anymore. There is a race condition, which is that the
5398 * MultiXactId may have finished since then, but that uncommon case is handled
5399 * either here, or within MultiXactIdExpand.
5400 *
5401 * There is a similar race condition possible when the old xmax was a regular
5402 * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5403 * window, but it's still possible to end up creating an unnecessary
5404 * MultiXactId. Fortunately this is harmless.
5405 */
5406static void
5412{
5413 TransactionId new_xmax;
5416
5418
5419l5:
5420 new_infomask = 0;
5421 new_infomask2 = 0;
5423 {
5424 /*
5425 * No previous locker; we just insert our own TransactionId.
5426 *
5427 * Note that it's critical that this case be the first one checked,
5428 * because there are several blocks below that come back to this one
5429 * to implement certain optimizations; old_infomask might contain
5430 * other dirty bits in those cases, but we don't really care.
5431 */
5432 if (is_update)
5433 {
5434 new_xmax = add_to_xmax;
5435 if (mode == LockTupleExclusive)
5437 }
5438 else
5439 {
5441 switch (mode)
5442 {
5443 case LockTupleKeyShare:
5444 new_xmax = add_to_xmax;
5446 break;
5447 case LockTupleShare:
5448 new_xmax = add_to_xmax;
5450 break;
5452 new_xmax = add_to_xmax;
5454 break;
5455 case LockTupleExclusive:
5456 new_xmax = add_to_xmax;
5459 break;
5460 default:
5461 new_xmax = InvalidTransactionId; /* silence compiler */
5462 elog(ERROR, "invalid lock mode");
5463 }
5464 }
5465 }
5467 {
5469
5470 /*
5471 * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5472 * cross-check.
5473 */
5475
5476 /*
5477 * A multixact together with LOCK_ONLY set but neither lock bit set
5478 * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5479 * anymore. This check is critical for databases upgraded by
5480 * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5481 * that such multis are never passed.
5482 */
5484 {
5487 goto l5;
5488 }
5489
5490 /*
5491 * If the XMAX is already a MultiXactId, then we need to expand it to
5492 * include add_to_xmax; but if all the members were lockers and are
5493 * all gone, we can do away with the IS_MULTI bit and just set
5494 * add_to_xmax as the only locker/updater. If all lockers are gone
5495 * and we have an updater that aborted, we can also do without a
5496 * multi.
5497 *
5498 * The cost of doing GetMultiXactIdMembers would be paid by
5499 * MultiXactIdExpand if we weren't to do this, so this check is not
5500 * incurring extra work anyhow.
5501 */
5503 {
5506 old_infomask)))
5507 {
5508 /*
5509 * Reset these bits and restart; otherwise fall through to
5510 * create a new multi below.
5511 */
5514 goto l5;
5515 }
5516 }
5517
5519
5520 new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5521 new_status);
5523 }
5525 {
5526 /*
5527 * It's a committed update, so we need to preserve him as updater of
5528 * the tuple.
5529 */
5530 MultiXactStatus status;
5532
5534 status = MultiXactStatusUpdate;
5535 else
5537
5539
5540 /*
5541 * since it's not running, it's obviously impossible for the old
5542 * updater to be identical to the current one, so we need not check
5543 * for that case as we do in the block above.
5544 */
5545 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5547 }
5548 else if (TransactionIdIsInProgress(xmax))
5549 {
5550 /*
5551 * If the XMAX is a valid, in-progress TransactionId, then we need to
5552 * create a new MultiXactId that includes both the old locker or
5553 * updater and our own TransactionId.
5554 */
5558
5560 {
5566 {
5569 else
5571 }
5572 else
5573 {
5574 /*
5575 * LOCK_ONLY can be present alone only when a page has been
5576 * upgraded by pg_upgrade. But in that case,
5577 * TransactionIdIsInProgress() should have returned false. We
5578 * assume it's no longer locked in this case.
5579 */
5580 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5583 goto l5;
5584 }
5585 }
5586 else
5587 {
5588 /* it's an update, but which kind? */
5591 else
5593 }
5594
5596
5597 /*
5598 * If the lock to be acquired is for the same TransactionId as the
5599 * existing lock, there's an optimization possible: consider only the
5600 * strongest of both locks as the only one present, and restart.
5601 */
5602 if (xmax == add_to_xmax)
5603 {
5604 /*
5605 * Note that it's not possible for the original tuple to be
5606 * updated: we wouldn't be here because the tuple would have been
5607 * invisible and we wouldn't try to update it. As a subtlety,
5608 * this code can also run when traversing an update chain to lock
5609 * future versions of a tuple. But we wouldn't be here either,
5610 * because the add_to_xmax would be different from the original
5611 * updater.
5612 */
5614
5615 /* acquire the strongest of both */
5616 if (mode < old_mode)
5617 mode = old_mode;
5618 /* mustn't touch is_update */
5619
5621 goto l5;
5622 }
5623
5624 /* otherwise, just fall back to creating a new multixact */
5626 new_xmax = MultiXactIdCreate(xmax, old_status,
5629 }
5632 {
5633 /*
5634 * It's a committed update, so we gotta preserve him as updater of the
5635 * tuple.
5636 */
5637 MultiXactStatus status;
5639
5641 status = MultiXactStatusUpdate;
5642 else
5644
5646
5647 /*
5648 * since it's not running, it's obviously impossible for the old
5649 * updater to be identical to the current one, so we need not check
5650 * for that case as we do in the block above.
5651 */
5652 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5654 }
5655 else
5656 {
5657 /*
5658 * Can get here iff the locking/updating transaction was running when
5659 * the infomask was extracted from the tuple, but finished before
5660 * TransactionIdIsInProgress got to run. Deal with it as if there was
5661 * no locker at all in the first place.
5662 */
5664 goto l5;
5665 }
5666
5669 *result_xmax = new_xmax;
5670}
5671
5672/*
5673 * Subroutine for heap_lock_updated_tuple_rec.
5674 *
5675 * Given a hypothetical multixact status held by the transaction identified
5676 * with the given xid, does the current transaction need to wait, fail, or can
5677 * it continue if it wanted to acquire a lock of the given mode? "needwait"
5678 * is set to true if waiting is necessary; if it can continue, then TM_Ok is
5679 * returned. If the lock is already held by the current transaction, return
5680 * TM_SelfModified. In case of a conflict with another transaction, a
5681 * different HeapTupleSatisfiesUpdate return code is returned.
5682 *
5683 * The held status is said to be hypothetical because it might correspond to a
5684 * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5685 * way for simplicity of API.
5686 */
5687static TM_Result
5690 bool *needwait)
5691{
5693
5694 *needwait = false;
5696
5697 /*
5698 * Note: we *must* check TransactionIdIsInProgress before
5699 * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
5700 * for an explanation.
5701 */
5703 {
5704 /*
5705 * The tuple has already been locked by our own transaction. This is
5706 * very rare but can happen if multiple transactions are trying to
5707 * lock an ancient version of the same tuple.
5708 */
5709 return TM_SelfModified;
5710 }
5711 else if (TransactionIdIsInProgress(xid))
5712 {
5713 /*
5714 * If the locking transaction is running, what we do depends on
5715 * whether the lock modes conflict: if they do, then we must wait for
5716 * it to finish; otherwise we can fall through to lock this tuple
5717 * version without waiting.
5718 */
5721 {
5722 *needwait = true;
5723 }
5724
5725 /*
5726 * If we set needwait above, then this value doesn't matter;
5727 * otherwise, this value signals to caller that it's okay to proceed.
5728 */
5729 return TM_Ok;
5730 }
5731 else if (TransactionIdDidAbort(xid))
5732 return TM_Ok;
5733 else if (TransactionIdDidCommit(xid))
5734 {
5735 /*
5736 * The other transaction committed. If it was only a locker, then the
5737 * lock is completely gone now and we can return success; but if it
5738 * was an update, then what we do depends on whether the two lock
5739 * modes conflict. If they conflict, then we must report error to
5740 * caller. But if they don't, we can fall through to allow the current
5741 * transaction to lock the tuple.
5742 *
5743 * Note: the reason we worry about ISUPDATE here is because as soon as
5744 * a transaction ends, all its locks are gone and meaningless, and
5745 * thus we can ignore them; whereas its updates persist. In the
5746 * TransactionIdIsInProgress case, above, we don't need to check
5747 * because we know the lock is still "alive" and thus a conflict needs
5748 * always be checked.
5749 */
5750 if (!ISUPDATE_from_mxstatus(status))
5751 return TM_Ok;
5752
5755 {
5756 /* bummer */
5757 if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
5758 return TM_Updated;
5759 else
5760 return TM_Deleted;
5761 }
5762
5763 return TM_Ok;
5764 }
5765
5766 /* Not in progress, not aborted, not committed -- must have crashed */
5767 return TM_Ok;
5768}
5769
5770
5771/*
5772 * Recursive part of heap_lock_updated_tuple
5773 *
5774 * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5775 * xid with the given mode; if this tuple is updated, recurse to lock the new
5776 * version as well.
5777 */
5778static TM_Result
5780 const ItemPointerData *tid, TransactionId xid,
5782{
5783 TM_Result result;
5786 Buffer buf;
5791 TransactionId xmax,
5792 new_xmax;
5793 bool cleared_all_frozen = false;
5795 Buffer vmbuffer = InvalidBuffer;
5796 BlockNumber block;
5797
5798 ItemPointerCopy(tid, &tupid);
5799
5800 for (;;)
5801 {
5802 new_infomask = 0;
5803 new_xmax = InvalidTransactionId;
5805 ItemPointerCopy(&tupid, &(mytup.t_self));
5806
5807 if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
5808 {
5809 /*
5810 * if we fail to find the updated version of the tuple, it's
5811 * because it was vacuumed/pruned away after its creator
5812 * transaction aborted. So behave as if we got to the end of the
5813 * chain, and there's no further tuple to lock: return success to
5814 * caller.
5815 */
5816 result = TM_Ok;
5817 goto out_unlocked;
5818 }
5819
5820l4:
5822
5823 /*
5824 * Before locking the buffer, pin the visibility map page if it
5825 * appears to be necessary. Since we haven't got the lock yet,
5826 * someone else might be in the middle of changing this, so we'll need
5827 * to recheck after we have the lock.
5828 */
5830 {
5831 visibilitymap_pin(rel, block, &vmbuffer);
5832 pinned_desired_page = true;
5833 }
5834 else
5835 pinned_desired_page = false;
5836
5838
5839 /*
5840 * If we didn't pin the visibility map page and the page has become
5841 * all visible while we were busy locking the buffer, we'll have to
5842 * unlock and re-lock, to avoid holding the buffer lock across I/O.
5843 * That's a bit unfortunate, but hopefully shouldn't happen often.
5844 *
5845 * Note: in some paths through this function, we will reach here
5846 * holding a pin on a vm page that may or may not be the one matching
5847 * this page. If this page isn't all-visible, we won't use the vm
5848 * page, but we hold onto such a pin till the end of the function.
5849 */
5851 {
5853 visibilitymap_pin(rel, block, &vmbuffer);
5855 }
5856
5857 /*
5858 * Check the tuple XMIN against prior XMAX, if any. If we reached the
5859 * end of the chain, we're done, so return success.
5860 */
5863 priorXmax))
5864 {
5865 result = TM_Ok;
5866 goto out_locked;
5867 }
5868
5869 /*
5870 * Also check Xmin: if this tuple was created by an aborted
5871 * (sub)transaction, then we already locked the last live one in the
5872 * chain, thus we're done, so return success.
5873 */
5875 {
5876 result = TM_Ok;
5877 goto out_locked;
5878 }
5879
5880 old_infomask = mytup.t_data->t_infomask;
5881 old_infomask2 = mytup.t_data->t_infomask2;
5882 xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5883
5884 /*
5885 * If this tuple version has been updated or locked by some concurrent
5886 * transaction(s), what we do depends on whether our lock mode
5887 * conflicts with what those other transactions hold, and also on the
5888 * status of them.
5889 */
5891 {
5893 bool needwait;
5894
5897 {
5898 int nmembers;
5899 int i;
5900 MultiXactMember *members;
5901
5902 /*
5903 * We don't need a test for pg_upgrade'd tuples: this is only
5904 * applied to tuples after the first in an update chain. Said
5905 * first tuple in the chain may well be locked-in-9.2-and-
5906 * pg_upgraded, but that one was already locked by our caller,
5907 * not us; and any subsequent ones cannot be because our
5908 * caller must necessarily have obtained a snapshot later than
5909 * the pg_upgrade itself.
5910 */
5911 Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
5912
5913 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5915 for (i = 0; i < nmembers; i++)
5916 {
5917 result = test_lockmode_for_conflict(members[i].status,
5918 members[i].xid,
5919 mode,
5920 &mytup,
5921 &needwait);
5922
5923 /*
5924 * If the tuple was already locked by ourselves in a
5925 * previous iteration of this (say heap_lock_tuple was
5926 * forced to restart the locking loop because of a change
5927 * in xmax), then we hold the lock already on this tuple
5928 * version and we don't need to do anything; and this is
5929 * not an error condition either. We just need to skip
5930 * this tuple and continue locking the next version in the
5931 * update chain.
5932 */
5933 if (result == TM_SelfModified)
5934 {
5935 pfree(members);
5936 goto next;
5937 }
5938
5939 if (needwait)
5940 {
5942 XactLockTableWait(members[i].xid, rel,
5943 &mytup.t_self,
5945 pfree(members);
5946 goto l4;
5947 }
5948 if (result != TM_Ok)
5949 {
5950 pfree(members);
5951 goto out_locked;
5952 }
5953 }
5954 if (members)
5955 pfree(members);
5956 }
5957 else
5958 {
5959 MultiXactStatus status;
5960
5961 /*
5962 * For a non-multi Xmax, we first need to compute the
5963 * corresponding MultiXactStatus by using the infomask bits.
5964 */
5966 {
5970 status = MultiXactStatusForShare;
5972 {
5974 status = MultiXactStatusForUpdate;
5975 else
5977 }
5978 else
5979 {
5980 /*
5981 * LOCK_ONLY present alone (a pg_upgraded tuple marked
5982 * as share-locked in the old cluster) shouldn't be
5983 * seen in the middle of an update chain.
5984 */
5985 elog(ERROR, "invalid lock status in tuple");
5986 }
5987 }
5988 else
5989 {
5990 /* it's an update, but which kind? */
5992 status = MultiXactStatusUpdate;
5993 else
5995 }
5996
5997 result = test_lockmode_for_conflict(status, rawxmax, mode,
5998 &mytup, &needwait);
5999
6000 /*
6001 * If the tuple was already locked by ourselves in a previous
6002 * iteration of this (say heap_lock_tuple was forced to
6003 * restart the locking loop because of a change in xmax), then
6004 * we hold the lock already on this tuple version and we don't
6005 * need to do anything; and this is not an error condition
6006 * either. We just need to skip this tuple and continue
6007 * locking the next version in the update chain.
6008 */
6009 if (result == TM_SelfModified)
6010 goto next;
6011
6012 if (needwait)
6013 {
6015 XactLockTableWait(rawxmax, rel, &mytup.t_self,
6017 goto l4;
6018 }
6019 if (result != TM_Ok)
6020 {
6021 goto out_locked;
6022 }
6023 }
6024 }
6025
6026 /* compute the new Xmax and infomask values for the tuple ... */
6027 compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
6028 xid, mode, false,
6029 &new_xmax, &new_infomask, &new_infomask2);
6030
6032 visibilitymap_clear(rel, block, vmbuffer,
6034 cleared_all_frozen = true;
6035
6037
6038 /* ... and set them */
6039 HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
6040 mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
6041 mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6042 mytup.t_data->t_infomask |= new_infomask;
6043 mytup.t_data->t_infomask2 |= new_infomask2;
6044
6046
6047 /* XLOG stuff */
6048 if (RelationNeedsWAL(rel))
6049 {
6052 Page page = BufferGetPage(buf);
6053
6056
6057 xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
6058 xlrec.xmax = new_xmax;
6060 xlrec.flags =
6062
6064
6066
6067 PageSetLSN(page, recptr);
6068 }
6069
6071
6072next:
6073 /* if we find the end of update chain, we're done. */
6074 if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
6076 ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
6078 {
6079 result = TM_Ok;
6080 goto out_locked;
6081 }
6082
6083 /* tail recursion */
6085 ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
6087 }
6088
6089 result = TM_Ok;
6090
6093
6095 if (vmbuffer != InvalidBuffer)
6096 ReleaseBuffer(vmbuffer);
6097
6098 return result;
6099}
6100
6101/*
6102 * heap_lock_updated_tuple
6103 * Follow update chain when locking an updated tuple, acquiring locks (row
6104 * marks) on the updated versions.
6105 *
6106 * 'prior_infomask', 'prior_raw_xmax' and 'prior_ctid' are the corresponding
6107 * fields from the initial tuple. We will lock the tuples starting from the
6108 * one that 'prior_ctid' points to. Note: This function does not lock the
6109 * initial tuple itself.
6110 *
6111 * This function doesn't check visibility, it just unconditionally marks the
6112 * tuple(s) as locked. If any tuple in the updated chain is being deleted
6113 * concurrently (or updated with the key being modified), sleep until the
6114 * transaction doing it is finished.
6115 *
6116 * Note that we don't acquire heavyweight tuple locks on the tuples we walk
6117 * when we have to wait for other transactions to release them, as opposed to
6118 * what heap_lock_tuple does. The reason is that having more than one
6119 * transaction walking the chain is probably uncommon enough that risk of
6120 * starvation is not likely: one of the preconditions for being here is that
6121 * the snapshot in use predates the update that created this tuple (because we
6122 * started at an earlier version of the tuple), but at the same time such a
6123 * transaction cannot be using repeatable read or serializable isolation
6124 * levels, because that would lead to a serializability failure.
6125 */
6126static TM_Result
6132{
6133 INJECTION_POINT("heap_lock_updated_tuple", NULL);
6134
6135 /*
6136 * If the tuple has moved into another partition (effectively a delete)
6137 * stop here.
6138 */
6140 {
6142
6143 /*
6144 * If this is the first possibly-multixact-able operation in the
6145 * current transaction, set my per-backend OldestMemberMXactId
6146 * setting. We can be certain that the transaction will never become a
6147 * member of any older MultiXactIds than that. (We have to do this
6148 * even if we end up just using our own TransactionId below, since
6149 * some other backend could incorporate our XID into a MultiXact
6150 * immediately afterwards.)
6151 */
6153
6157 }
6158
6159 /* nothing to lock */
6160 return TM_Ok;
6161}
6162
6163/*
6164 * heap_finish_speculative - mark speculative insertion as successful
6165 *
6166 * To successfully finish a speculative insertion we have to clear speculative
6167 * token from tuple. To do so the t_ctid field, which will contain a
6168 * speculative token value, is modified in place to point to the tuple itself,
6169 * which is characteristic of a newly inserted ordinary tuple.
6170 *
6171 * NB: It is not ok to commit without either finishing or aborting a
6172 * speculative insertion. We could treat speculative tuples of committed
6173 * transactions implicitly as completed, but then we would have to be prepared
6174 * to deal with speculative tokens on committed tuples. That wouldn't be
6175 * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
6176 * but clearing the token at completion isn't very expensive either.
6177 * An explicit confirmation WAL record also makes logical decoding simpler.
6178 */
6179void
6181{
6182 Buffer buffer;
6183 Page page;
6184 OffsetNumber offnum;
6185 ItemId lp;
6186 HeapTupleHeader htup;
6187
6188 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
6190 page = BufferGetPage(buffer);
6191
6192 offnum = ItemPointerGetOffsetNumber(tid);
6194 elog(ERROR, "offnum out of range");
6195 lp = PageGetItemId(page, offnum);
6196 if (!ItemIdIsNormal(lp))
6197 elog(ERROR, "invalid lp");
6198
6199 htup = (HeapTupleHeader) PageGetItem(page, lp);
6200
6201 /* NO EREPORT(ERROR) from here till changes are logged */
6203
6205
6206 MarkBufferDirty(buffer);
6207
6208 /*
6209 * Replace the speculative insertion token with a real t_ctid, pointing to
6210 * itself like it does on regular tuples.
6211 */
6212 htup->t_ctid = *tid;
6213
6214 /* XLOG stuff */
6215 if (RelationNeedsWAL(relation))
6216 {
6219
6221
6223
6224 /* We want the same filtering on this as on a plain insert */
6226
6229
6231
6232 PageSetLSN(page, recptr);
6233 }
6234
6236
6237 UnlockReleaseBuffer(buffer);
6238}
6239
6240/*
6241 * heap_abort_speculative - kill a speculatively inserted tuple
6242 *
6243 * Marks a tuple that was speculatively inserted in the same command as dead,
6244 * by setting its xmin as invalid. That makes it immediately appear as dead
6245 * to all transactions, including our own. In particular, it makes
6246 * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
6247 * inserting a duplicate key value won't unnecessarily wait for our whole
6248 * transaction to finish (it'll just wait for our speculative insertion to
6249 * finish).
6250 *
6251 * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
6252 * that arise due to a mutual dependency that is not user visible. By
6253 * definition, unprincipled deadlocks cannot be prevented by the user
6254 * reordering lock acquisition in client code, because the implementation level
6255 * lock acquisitions are not under the user's direct control. If speculative
6256 * inserters did not take this precaution, then under high concurrency they
6257 * could deadlock with each other, which would not be acceptable.
6258 *
6259 * This is somewhat redundant with heap_delete, but we prefer to have a
6260 * dedicated routine with stripped down requirements. Note that this is also
6261 * used to delete the TOAST tuples created during speculative insertion.
6262 *
6263 * This routine does not affect logical decoding as it only looks at
6264 * confirmation records.
6265 */
6266void
6268{
6270 ItemId lp;
6271 HeapTupleData tp;
6272 Page page;
6273 BlockNumber block;
6274 Buffer buffer;
6275
6277
6278 block = ItemPointerGetBlockNumber(tid);
6279 buffer = ReadBuffer(relation, block);
6280 page = BufferGetPage(buffer);
6281
6283
6284 /*
6285 * Page can't be all visible, we just inserted into it, and are still
6286 * running.
6287 */
6288 Assert(!PageIsAllVisible(page));
6289
6292
6293 tp.t_tableOid = RelationGetRelid(relation);
6294 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6295 tp.t_len = ItemIdGetLength(lp);
6296 tp.t_self = *tid;
6297
6298 /*
6299 * Sanity check that the tuple really is a speculatively inserted tuple,
6300 * inserted by us.
6301 */
6302 if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6303 elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6304 if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6305 elog(ERROR, "attempted to kill a non-speculative tuple");
6307
6308 /*
6309 * No need to check for serializable conflicts here. There is never a
6310 * need for a combo CID, either. No need to extract replica identity, or
6311 * do anything special with infomask bits.
6312 */
6313
6315
6316 /*
6317 * The tuple will become DEAD immediately. Flag that this page is a
6318 * candidate for pruning by setting xmin to TransactionXmin. While not
6319 * immediately prunable, it is the oldest xid we can cheaply determine
6320 * that's safe against wraparound / being older than the table's
6321 * relfrozenxid. To defend against the unlikely case of a new relation
6322 * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
6323 * if so (vacuum can't subsequently move relfrozenxid to beyond
6324 * TransactionXmin, so there's no race here).
6325 */
6327 {
6328 TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
6330
6331 if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
6332 prune_xid = relfrozenxid;
6333 else
6336 }
6337
6338 /* store transaction information of xact deleting the tuple */
6341
6342 /*
6343 * Set the tuple header xmin to InvalidTransactionId. This makes the
6344 * tuple immediately invisible everyone. (In particular, to any
6345 * transactions waiting on the speculative token, woken up later.)
6346 */
6348
6349 /* Clear the speculative insertion token too */
6350 tp.t_data->t_ctid = tp.t_self;
6351
6352 MarkBufferDirty(buffer);
6353
6354 /*
6355 * XLOG stuff
6356 *
6357 * The WAL records generated here match heap_delete(). The same recovery
6358 * routines are used.
6359 */
6360 if (RelationNeedsWAL(relation))
6361 {
6364
6366 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
6367 tp.t_data->t_infomask2);
6369 xlrec.xmax = xid;
6370
6374
6375 /* No replica identity & replication origin logged */
6376
6378
6379 PageSetLSN(page, recptr);
6380 }
6381
6383
6385
6386 if (HeapTupleHasExternal(&tp))
6387 {
6388 Assert(!IsToastRelation(relation));
6389 heap_toast_delete(relation, &tp, true);
6390 }
6391
6392 /*
6393 * Never need to mark tuple for invalidation, since catalogs don't support
6394 * speculative insertion
6395 */
6396
6397 /* Now we can release the buffer */
6398 ReleaseBuffer(buffer);
6399
6400 /* count deletion, as we counted the insertion too */
6401 pgstat_count_heap_delete(relation);
6402}
6403
6404/*
6405 * heap_inplace_lock - protect inplace update from concurrent heap_update()
6406 *
6407 * Evaluate whether the tuple's state is compatible with a no-key update.
6408 * Current transaction rowmarks are fine, as is KEY SHARE from any
6409 * transaction. If compatible, return true with the buffer exclusive-locked,
6410 * and the caller must release that by calling
6411 * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
6412 * an error. Otherwise, call release_callback(arg), wait for blocking
6413 * transactions to end, and return false.
6414 *
6415 * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
6416 * DDL, this doesn't guarantee any particular predicate locking.
6417 *
6418 * heap_delete() is a rarer source of blocking transactions (xwait). We'll
6419 * wait for such a transaction just like for the normal heap_update() case.
6420 * Normal concurrent DROP commands won't cause that, because all inplace
6421 * updaters take some lock that conflicts with DROP. An explicit SQL "DELETE
6422 * FROM pg_class" can cause it. By waiting, if the concurrent transaction
6423 * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller
6424 * can find the successor tuple.
6425 *
6426 * Readers of inplace-updated fields expect changes to those fields are
6427 * durable. For example, vac_truncate_clog() reads datfrozenxid from
6428 * pg_database tuples via catalog snapshots. A future snapshot must not
6429 * return a lower datfrozenxid for the same database OID (lower in the
6430 * FullTransactionIdPrecedes() sense). We achieve that since no update of a
6431 * tuple can start while we hold a lock on its buffer. In cases like
6432 * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
6433 * to this transaction. ROLLBACK then is one case where it's okay to lose
6434 * inplace updates. (Restoring relhasindex=false on ROLLBACK is fine, since
6435 * any concurrent CREATE INDEX would have blocked, then inplace-updated the
6436 * committed tuple.)
6437 *
6438 * In principle, we could avoid waiting by overwriting every tuple in the
6439 * updated tuple chain. Reader expectations permit updating a tuple only if
6440 * it's aborted, is the tail of the chain, or we already updated the tuple
6441 * referenced in its t_ctid. Hence, we would need to overwrite the tuples in
6442 * order from tail to head. That would imply either (a) mutating all tuples
6443 * in one critical section or (b) accepting a chance of partial completion.
6444 * Partial completion of a relfrozenxid update would have the weird
6445 * consequence that the table's next VACUUM could see the table's relfrozenxid
6446 * move forward between vacuum_get_cutoffs() and finishing.
6447 */
6448bool
6450 HeapTuple oldtup_ptr, Buffer buffer,
6451 void (*release_callback) (void *), void *arg)
6452{
6453 HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
6454 TM_Result result;
6455 bool ret;
6456
6457#ifdef USE_ASSERT_CHECKING
6458 if (RelationGetRelid(relation) == RelationRelationId)
6460#endif
6461
6462 Assert(BufferIsValid(buffer));
6463
6464 /*
6465 * Register shared cache invals if necessary. Other sessions may finish
6466 * inplace updates of this tuple between this step and LockTuple(). Since
6467 * inplace updates don't change cache keys, that's harmless.
6468 *
6469 * While it's tempting to register invals only after confirming we can
6470 * return true, the following obstacle precludes reordering steps that
6471 * way. Registering invals might reach a CatalogCacheInitializeCache()
6472 * that locks "buffer". That would hang indefinitely if running after our
6473 * own LockBuffer(). Hence, we must register invals before LockBuffer().
6474 */
6476
6477 LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6479
6480 /*----------
6481 * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
6482 *
6483 * - wait unconditionally
6484 * - already locked tuple above, since inplace needs that unconditionally
6485 * - don't recheck header after wait: simpler to defer to next iteration
6486 * - don't try to continue even if the updater aborts: likewise
6487 * - no crosscheck
6488 */
6490 buffer);
6491
6492 if (result == TM_Invisible)
6493 {
6494 /* no known way this can happen */
6495 ereport(ERROR,
6497 errmsg_internal("attempted to overwrite invisible tuple")));
6498 }
6499 else if (result == TM_SelfModified)
6500 {
6501 /*
6502 * CREATE INDEX might reach this if an expression is silly enough to
6503 * call e.g. SELECT ... FROM pg_class FOR SHARE. C code of other SQL
6504 * statements might get here after a heap_update() of the same row, in
6505 * the absence of an intervening CommandCounterIncrement().
6506 */
6507 ereport(ERROR,
6509 errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
6510 }
6511 else if (result == TM_BeingModified)
6512 {
6515
6517 infomask = oldtup.t_data->t_infomask;
6518
6520 {
6523 int remain;
6524
6526 lockmode, NULL))
6527 {
6530 ret = false;
6532 relation, &oldtup.t_self, XLTW_Update,
6533 &remain);
6534 }
6535 else
6536 ret = true;
6537 }
6539 ret = true;
6541 ret = true;
6542 else
6543 {
6546 ret = false;
6547 XactLockTableWait(xwait, relation, &oldtup.t_self,
6548 XLTW_Update);
6549 }
6550 }
6551 else
6552 {
6553 ret = (result == TM_Ok);
6554 if (!ret)
6555 {
6558 }
6559 }
6560
6561 /*
6562 * GetCatalogSnapshot() relies on invalidation messages to know when to
6563 * take a new snapshot. COMMIT of xwait is responsible for sending the
6564 * invalidation. We're not acquiring heavyweight locks sufficient to
6565 * block if not yet sent, so we must take a new snapshot to ensure a later
6566 * attempt has a fair chance. While we don't need this if xwait aborted,
6567 * don't bother optimizing that.
6568 */
6569 if (!ret)
6570 {
6571 UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6574 }
6575 return ret;
6576}
6577
6578/*
6579 * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
6580 *
6581 * The tuple cannot change size, and therefore its header fields and null
6582 * bitmap (if any) don't change either.
6583 *
6584 * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
6585 */
6586void
6588 HeapTuple oldtup, HeapTuple tuple,
6589 Buffer buffer)
6590{
6591 HeapTupleHeader htup = oldtup->t_data;
6592 uint32 oldlen;
6593 uint32 newlen;
6594 char *dst;
6595 char *src;
6596 int nmsgs = 0;
6598 bool RelcacheInitFileInval = false;
6599
6600 Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
6601 oldlen = oldtup->t_len - htup->t_hoff;
6602 newlen = tuple->t_len - tuple->t_data->t_hoff;
6603 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6604 elog(ERROR, "wrong tuple length");
6605
6606 dst = (char *) htup + htup->t_hoff;
6607 src = (char *) tuple->t_data + tuple->t_data->t_hoff;
6608
6609 /* Like RecordTransactionCommit(), log only if needed */
6612 &RelcacheInitFileInval);
6613
6614 /*
6615 * Unlink relcache init files as needed. If unlinking, acquire
6616 * RelCacheInitLock until after associated invalidations. By doing this
6617 * in advance, if we checkpoint and then crash between inplace
6618 * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6619 * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6620 * neglect to PANIC on EIO.
6621 */
6623
6624 /*----------
6625 * NO EREPORT(ERROR) from here till changes are complete
6626 *
6627 * Our exclusive buffer lock won't stop a reader having already pinned and
6628 * checked visibility for this tuple. With the usual order of changes
6629 * (i.e. updating the buffer contents before WAL logging), a reader could
6630 * observe our not-yet-persistent update to relfrozenxid and update
6631 * datfrozenxid based on that. A crash in that moment could allow
6632 * datfrozenxid to overtake relfrozenxid:
6633 *
6634 * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
6635 * ["R" is a VACUUM tbl]
6636 * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
6637 * D: systable_getnext() returns pg_class tuple of tbl
6638 * R: memcpy() into pg_class tuple of tbl
6639 * D: raise pg_database.datfrozenxid, XLogInsert(), finish
6640 * [crash]
6641 * [recovery restores datfrozenxid w/o relfrozenxid]
6642 *
6643 * We avoid that by using a temporary copy of the buffer to hide our
6644 * change from other backends until the change has been WAL-logged. We
6645 * apply our change to the temporary copy and WAL-log it, before modifying
6646 * the real page. That way any action a reader of the in-place-updated
6647 * value takes will be WAL logged after this change.
6648 */
6650
6651 MarkBufferDirty(buffer);
6652
6653 /* XLOG stuff */
6654 if (RelationNeedsWAL(relation))
6655 {
6658 char *origdata = (char *) BufferGetBlock(buffer);
6659 Page page = BufferGetPage(buffer);
6660 uint16 lower = ((PageHeader) page)->pd_lower;
6661 uint16 upper = ((PageHeader) page)->pd_upper;
6663 RelFileLocator rlocator;
6664 ForkNumber forkno;
6665 BlockNumber blkno;
6667
6668 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6669 xlrec.dbId = MyDatabaseId;
6671 xlrec.relcacheInitFileInval = RelcacheInitFileInval;
6672 xlrec.nmsgs = nmsgs;
6673
6676 if (nmsgs != 0)
6678 nmsgs * sizeof(SharedInvalidationMessage));
6679
6680 /* register block matching what buffer will look like after changes */
6685 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
6686 Assert(forkno == MAIN_FORKNUM);
6687 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
6689 XLogRegisterBufData(0, src, newlen);
6690
6691 /* inplace updates aren't decoded atm, don't log the origin */
6692
6694
6695 PageSetLSN(page, recptr);
6696 }
6697
6698 memcpy(dst, src, newlen);
6699
6701
6702 /*
6703 * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6704 * do this before UnlockTuple().
6705 */
6707
6709 UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
6710
6711 AcceptInvalidationMessages(); /* local processing of just-sent inval */
6712
6713 /*
6714 * Queue a transactional inval, for logical decoding and for third-party
6715 * code that might have been relying on it since long before inplace
6716 * update adopted immediate invalidation. See README.tuplock section
6717 * "Reading inplace-updated columns" for logical decoding details.
6718 */
6720 CacheInvalidateHeapTuple(relation, tuple, NULL);
6721}
6722
6723/*
6724 * heap_inplace_unlock - reverse of heap_inplace_lock
6725 */
6726void
6728 HeapTuple oldtup, Buffer buffer)
6729{
6731 UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
6733}
6734
6735#define FRM_NOOP 0x0001
6736#define FRM_INVALIDATE_XMAX 0x0002
6737#define FRM_RETURN_IS_XID 0x0004
6738#define FRM_RETURN_IS_MULTI 0x0008
6739#define FRM_MARK_COMMITTED 0x0010
6740
6741/*
6742 * FreezeMultiXactId
6743 * Determine what to do during freezing when a tuple is marked by a
6744 * MultiXactId.
6745 *
6746 * "flags" is an output value; it's used to tell caller what to do on return.
6747 * "pagefrz" is an input/output value, used to manage page level freezing.
6748 *
6749 * Possible values that we can set in "flags":
6750 * FRM_NOOP
6751 * don't do anything -- keep existing Xmax
6752 * FRM_INVALIDATE_XMAX
6753 * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6754 * FRM_RETURN_IS_XID
6755 * The Xid return value is a single update Xid to set as xmax.
6756 * FRM_MARK_COMMITTED
6757 * Xmax can be marked as HEAP_XMAX_COMMITTED
6758 * FRM_RETURN_IS_MULTI
6759 * The return value is a new MultiXactId to set as new Xmax.
6760 * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6761 *
6762 * Caller delegates control of page freezing to us. In practice we always
6763 * force freezing of caller's page unless FRM_NOOP processing is indicated.
6764 * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
6765 * can never be left behind. We freely choose when and how to process each
6766 * Multi, without ever violating the cutoff postconditions for freezing.
6767 *
6768 * It's useful to remove Multis on a proactive timeline (relative to freezing
6769 * XIDs) to keep MultiXact member SLRU buffer misses to a minimum. It can also
6770 * be cheaper in the short run, for us, since we too can avoid SLRU buffer
6771 * misses through eager processing.
6772 *
6773 * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
6774 * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
6775 * This can usually be put off, which is usually enough to avoid it altogether.
6776 * Allocating new multis during VACUUM should be avoided on general principle;
6777 * only VACUUM can advance relminmxid, so allocating new Multis here comes with
6778 * its own special risks.
6779 *
6780 * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
6781 * using heap_tuple_should_freeze when we haven't forced page-level freezing.
6782 *
6783 * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
6784 * have already forced page-level freezing, since that might incur the same
6785 * SLRU buffer misses that we specifically intended to avoid by freezing.
6786 */
6787static TransactionId
6789 const struct VacuumCutoffs *cutoffs, uint16 *flags,
6790 HeapPageFreeze *pagefrz)
6791{
6793 MultiXactMember *members;
6794 int nmembers;
6795 bool need_replace;
6796 int nnewmembers;
6798 bool has_lockers;
6800 bool update_committed;
6801 TransactionId FreezePageRelfrozenXid;
6802
6803 *flags = 0;
6804
6805 /* We should only be called in Multis */
6806 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6807
6808 if (!MultiXactIdIsValid(multi) ||
6809 HEAP_LOCKED_UPGRADED(t_infomask))
6810 {
6811 *flags |= FRM_INVALIDATE_XMAX;
6812 pagefrz->freeze_required = true;
6813 return InvalidTransactionId;
6814 }
6815 else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
6816 ereport(ERROR,
6818 errmsg_internal("found multixact %u from before relminmxid %u",
6819 multi, cutoffs->relminmxid)));
6820 else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
6821 {
6823
6824 /*
6825 * This old multi cannot possibly have members still running, but
6826 * verify just in case. If it was a locker only, it can be removed
6827 * without any further consideration; but if it contained an update,
6828 * we might need to preserve it.
6829 */
6830 if (MultiXactIdIsRunning(multi,
6831 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6832 ereport(ERROR,
6834 errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
6835 multi, cutoffs->OldestMxact)));
6836
6837 if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6838 {
6839 *flags |= FRM_INVALIDATE_XMAX;
6840 pagefrz->freeze_required = true;
6841 return InvalidTransactionId;
6842 }
6843
6844 /* replace multi with single XID for its updater? */
6845 update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
6847 ereport(ERROR,
6849 errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
6850 multi, update_xact,
6851 cutoffs->relfrozenxid)));
6852 else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
6853 {
6854 /*
6855 * Updater XID has to have aborted (otherwise the tuple would have
6856 * been pruned away instead, since updater XID is < OldestXmin).
6857 * Just remove xmax.
6858 */
6860 ereport(ERROR,
6862 errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6863 multi, update_xact,
6864 cutoffs->OldestXmin)));
6865 *flags |= FRM_INVALIDATE_XMAX;
6866 pagefrz->freeze_required = true;
6867 return InvalidTransactionId;
6868 }
6869
6870 /* Have to keep updater XID as new xmax */
6871 *flags |= FRM_RETURN_IS_XID;
6872 pagefrz->freeze_required = true;
6873 return update_xact;
6874 }
6875
6876 /*
6877 * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
6878 * need to walk the whole members array to figure out what to do, if
6879 * anything.
6880 */
6881 nmembers =
6882 GetMultiXactIdMembers(multi, &members, false,
6883 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6884 if (nmembers <= 0)
6885 {
6886 /* Nothing worth keeping */
6887 *flags |= FRM_INVALIDATE_XMAX;
6888 pagefrz->freeze_required = true;
6889 return InvalidTransactionId;
6890 }
6891
6892 /*
6893 * The FRM_NOOP case is the only case where we might need to ratchet back
6894 * FreezePageRelfrozenXid or FreezePageRelminMxid. It is also the only
6895 * case where our caller might ratchet back its NoFreezePageRelfrozenXid
6896 * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
6897 * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
6898 * trackers managed by VACUUM being ratcheting back by xmax to the degree
6899 * required to make it safe to leave xmax undisturbed, independent of
6900 * whether or not page freezing is triggered somewhere else.
6901 *
6902 * Our policy is to force freezing in every case other than FRM_NOOP,
6903 * which obviates the need to maintain either set of trackers, anywhere.
6904 * Every other case will reliably execute a freeze plan for xmax that
6905 * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
6906 * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
6907 * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
6908 * OldestXmin/OldestMxact, so later values never need to be tracked here.)
6909 */
6910 need_replace = false;
6911 FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
6912 for (int i = 0; i < nmembers; i++)
6913 {
6914 TransactionId xid = members[i].xid;
6915
6916 Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6917
6918 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
6919 {
6920 /* Can't violate the FreezeLimit postcondition */
6921 need_replace = true;
6922 break;
6923 }
6924 if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
6925 FreezePageRelfrozenXid = xid;
6926 }
6927
6928 /* Can't violate the MultiXactCutoff postcondition, either */
6929 if (!need_replace)
6931
6932 if (!need_replace)
6933 {
6934 /*
6935 * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
6936 * both together to make it safe to retain this particular multi after
6937 * freezing its page
6938 */
6939 *flags |= FRM_NOOP;
6940 pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
6941 if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
6942 pagefrz->FreezePageRelminMxid = multi;
6943 pfree(members);
6944 return multi;
6945 }
6946
6947 /*
6948 * Do a more thorough second pass over the multi to figure out which
6949 * member XIDs actually need to be kept. Checking the precise status of
6950 * individual members might even show that we don't need to keep anything.
6951 * That is quite possible even though the Multi must be >= OldestMxact,
6952 * since our second pass only keeps member XIDs when it's truly necessary;
6953 * even member XIDs >= OldestXmin often won't be kept by second pass.
6954 */
6955 nnewmembers = 0;
6957 has_lockers = false;
6959 update_committed = false;
6960
6961 /*
6962 * Determine whether to keep each member xid, or to ignore it instead
6963 */
6964 for (int i = 0; i < nmembers; i++)
6965 {
6966 TransactionId xid = members[i].xid;
6967 MultiXactStatus mstatus = members[i].status;
6968
6969 Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6970
6971 if (!ISUPDATE_from_mxstatus(mstatus))
6972 {
6973 /*
6974 * Locker XID (not updater XID). We only keep lockers that are
6975 * still running.
6976 */
6979 {
6980 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6981 ereport(ERROR,
6983 errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
6984 multi, xid,
6985 cutoffs->OldestXmin)));
6986 newmembers[nnewmembers++] = members[i];
6987 has_lockers = true;
6988 }
6989
6990 continue;
6991 }
6992
6993 /*
6994 * Updater XID (not locker XID). Should we keep it?
6995 *
6996 * Since the tuple wasn't totally removed when vacuum pruned, the
6997 * update Xid cannot possibly be older than OldestXmin cutoff unless
6998 * the updater XID aborted. If the updater transaction is known
6999 * aborted or crashed then it's okay to ignore it, otherwise not.
7000 *
7001 * In any case the Multi should never contain two updaters, whatever
7002 * their individual commit status. Check for that first, in passing.
7003 */
7005 ereport(ERROR,
7007 errmsg_internal("multixact %u has two or more updating members",
7008 multi),
7009 errdetail_internal("First updater XID=%u second updater XID=%u.",
7010 update_xid, xid)));
7011
7012 /*
7013 * As with all tuple visibility routines, it's critical to test
7014 * TransactionIdIsInProgress before TransactionIdDidCommit, because of
7015 * race conditions explained in detail in heapam_visibility.c.
7016 */
7019 update_xid = xid;
7020 else if (TransactionIdDidCommit(xid))
7021 {
7022 /*
7023 * The transaction committed, so we can tell caller to set
7024 * HEAP_XMAX_COMMITTED. (We can only do this because we know the
7025 * transaction is not running.)
7026 */
7027 update_committed = true;
7028 update_xid = xid;
7029 }
7030 else
7031 {
7032 /*
7033 * Not in progress, not committed -- must be aborted or crashed;
7034 * we can ignore it.
7035 */
7036 continue;
7037 }
7038
7039 /*
7040 * We determined that updater must be kept -- add it to pending new
7041 * members list
7042 */
7043 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
7044 ereport(ERROR,
7046 errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
7047 multi, xid, cutoffs->OldestXmin)));
7048 newmembers[nnewmembers++] = members[i];
7049 }
7050
7051 pfree(members);
7052
7053 /*
7054 * Determine what to do with caller's multi based on information gathered
7055 * during our second pass
7056 */
7057 if (nnewmembers == 0)
7058 {
7059 /* Nothing worth keeping */
7060 *flags |= FRM_INVALIDATE_XMAX;
7062 }
7064 {
7065 /*
7066 * If there's a single member and it's an update, pass it back alone
7067 * without creating a new Multi. (XXX we could do this when there's a
7068 * single remaining locker, too, but that would complicate the API too
7069 * much; moreover, the case with the single updater is more
7070 * interesting, because those are longer-lived.)
7071 */
7072 Assert(nnewmembers == 1);
7073 *flags |= FRM_RETURN_IS_XID;
7074 if (update_committed)
7075 *flags |= FRM_MARK_COMMITTED;
7077 }
7078 else
7079 {
7080 /*
7081 * Create a new multixact with the surviving members of the previous
7082 * one, to set as new Xmax in the tuple
7083 */
7085 *flags |= FRM_RETURN_IS_MULTI;
7086 }
7087
7089
7090 pagefrz->freeze_required = true;
7091 return newxmax;
7092}
7093
7094/*
7095 * heap_prepare_freeze_tuple
7096 *
7097 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7098 * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so,
7099 * setup enough state (in the *frz output argument) to enable caller to
7100 * process this tuple as part of freezing its page, and return true. Return
7101 * false if nothing can be changed about the tuple right now.
7102 *
7103 * FreezePageConflictXid is advanced only for xmin/xvac freezing, not for xmax
7104 * changes. We only remove xmax state here when it is lock-only, or when the
7105 * updater XID (including an updater member of a MultiXact) must be aborted;
7106 * otherwise, the tuple would already be removable. Neither case affects
7107 * visibility on a standby.
7108 *
7109 * Also sets *totally_frozen to true if the tuple will be totally frozen once
7110 * caller executes returned freeze plan (or if the tuple was already totally
7111 * frozen by an earlier VACUUM). This indicates that there are no remaining
7112 * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
7113 *
7114 * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
7115 * tuple that we returned true for, and then execute freezing. Caller must
7116 * initialize pagefrz fields for page as a whole before first call here for
7117 * each heap page.
7118 *
7119 * VACUUM caller decides on whether or not to freeze the page as a whole.
7120 * We'll often prepare freeze plans for a page that caller just discards.
7121 * However, VACUUM doesn't always get to make a choice; it must freeze when
7122 * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
7123 * MXIDs < MultiXactCutoff) can never be left behind. We help to make sure
7124 * that VACUUM always follows that rule.
7125 *
7126 * We sometimes force freezing of xmax MultiXactId values long before it is
7127 * strictly necessary to do so just to ensure the FreezeLimit postcondition.
7128 * It's worth processing MultiXactIds proactively when it is cheap to do so,
7129 * and it's convenient to make that happen by piggy-backing it on the "force
7130 * freezing" mechanism. Conversely, we sometimes delay freezing MultiXactIds
7131 * because it is expensive right now (though only when it's still possible to
7132 * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
7133 *
7134 * It is assumed that the caller has checked the tuple with
7135 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
7136 * (else we should be removing the tuple, not freezing it).
7137 *
7138 * NB: This function has side effects: it might allocate a new MultiXactId.
7139 * It will be set as tuple's new xmax when our *frz output is processed within
7140 * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer
7141 * then caller had better have an exclusive lock on it already.
7142 */
7143bool
7145 const struct VacuumCutoffs *cutoffs,
7146 HeapPageFreeze *pagefrz,
7148{
7149 bool xmin_already_frozen = false,
7150 xmax_already_frozen = false;
7151 bool freeze_xmin = false,
7152 replace_xvac = false,
7153 replace_xmax = false,
7154 freeze_xmax = false;
7155 TransactionId xid;
7156
7157 frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
7158 frz->t_infomask2 = tuple->t_infomask2;
7159 frz->t_infomask = tuple->t_infomask;
7160 frz->frzflags = 0;
7161 frz->checkflags = 0;
7162
7163 /*
7164 * Process xmin, while keeping track of whether it's already frozen, or
7165 * will become frozen iff our freeze plan is executed by caller (could be
7166 * neither).
7167 */
7168 xid = HeapTupleHeaderGetXmin(tuple);
7169 if (!TransactionIdIsNormal(xid))
7170 xmin_already_frozen = true;
7171 else
7172 {
7173 if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7174 ereport(ERROR,
7176 errmsg_internal("found xmin %u from before relfrozenxid %u",
7177 xid, cutoffs->relfrozenxid)));
7178
7179 /* Will set freeze_xmin flags in freeze plan below */
7181
7182 /* Verify that xmin committed if and when freeze plan is executed */
7183 if (freeze_xmin)
7184 {
7187 pagefrz->FreezePageConflictXid = xid;
7188 }
7189 }
7190
7191 /*
7192 * Old-style VACUUM FULL is gone, but we have to process xvac for as long
7193 * as we support having MOVED_OFF/MOVED_IN tuples in the database
7194 */
7195 xid = HeapTupleHeaderGetXvac(tuple);
7196 if (TransactionIdIsNormal(xid))
7197 {
7199 Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
7200
7201 /*
7202 * For Xvac, we always freeze proactively. This allows totally_frozen
7203 * tracking to ignore xvac.
7204 */
7205 replace_xvac = pagefrz->freeze_required = true;
7206
7208 pagefrz->FreezePageConflictXid = xid;
7209
7210 /* Will set replace_xvac flags in freeze plan below */
7211 }
7212
7213 /* Now process xmax */
7214 xid = frz->xmax;
7215 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7216 {
7217 /* Raw xmax is a MultiXactId */
7219 uint16 flags;
7220
7221 /*
7222 * We will either remove xmax completely (in the "freeze_xmax" path),
7223 * process xmax by replacing it (in the "replace_xmax" path), or
7224 * perform no-op xmax processing. The only constraint is that the
7225 * FreezeLimit/MultiXactCutoff postcondition must never be violated.
7226 */
7227 newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
7228 &flags, pagefrz);
7229
7230 if (flags & FRM_NOOP)
7231 {
7232 /*
7233 * xmax is a MultiXactId, and nothing about it changes for now.
7234 * This is the only case where 'freeze_required' won't have been
7235 * set for us by FreezeMultiXactId, as well as the only case where
7236 * neither freeze_xmax nor replace_xmax are set (given a multi).
7237 *
7238 * This is a no-op, but the call to FreezeMultiXactId might have
7239 * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
7240 * for us (the "freeze page" variants, specifically). That'll
7241 * make it safe for our caller to freeze the page later on, while
7242 * leaving this particular xmax undisturbed.
7243 *
7244 * FreezeMultiXactId is _not_ responsible for the "no freeze"
7245 * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
7246 * job. A call to heap_tuple_should_freeze for this same tuple
7247 * will take place below if 'freeze_required' isn't set already.
7248 * (This repeats work from FreezeMultiXactId, but allows "no
7249 * freeze" tracker maintenance to happen in only one place.)
7250 */
7253 }
7254 else if (flags & FRM_RETURN_IS_XID)
7255 {
7256 /*
7257 * xmax will become an updater Xid (original MultiXact's updater
7258 * member Xid will be carried forward as a simple Xid in Xmax).
7259 */
7261
7262 /*
7263 * NB -- some of these transformations are only valid because we
7264 * know the return Xid is a tuple updater (i.e. not merely a
7265 * locker.) Also note that the only reason we don't explicitly
7266 * worry about HEAP_KEYS_UPDATED is because it lives in
7267 * t_infomask2 rather than t_infomask.
7268 */
7269 frz->t_infomask &= ~HEAP_XMAX_BITS;
7270 frz->xmax = newxmax;
7271 if (flags & FRM_MARK_COMMITTED)
7272 frz->t_infomask |= HEAP_XMAX_COMMITTED;
7273 replace_xmax = true;
7274 }
7275 else if (flags & FRM_RETURN_IS_MULTI)
7276 {
7279
7280 /*
7281 * xmax is an old MultiXactId that we have to replace with a new
7282 * MultiXactId, to carry forward two or more original member XIDs.
7283 */
7285
7286 /*
7287 * We can't use GetMultiXactIdHintBits directly on the new multi
7288 * here; that routine initializes the masks to all zeroes, which
7289 * would lose other bits we need. Doing it this way ensures all
7290 * unrelated bits remain untouched.
7291 */
7292 frz->t_infomask &= ~HEAP_XMAX_BITS;
7293 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7295 frz->t_infomask |= newbits;
7296 frz->t_infomask2 |= newbits2;
7297 frz->xmax = newxmax;
7298 replace_xmax = true;
7299 }
7300 else
7301 {
7302 /*
7303 * Freeze plan for tuple "freezes xmax" in the strictest sense:
7304 * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
7305 */
7306 Assert(flags & FRM_INVALIDATE_XMAX);
7308
7309 /* Will set freeze_xmax flags in freeze plan below */
7310 freeze_xmax = true;
7311 }
7312
7313 /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
7314 Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
7315 }
7316 else if (TransactionIdIsNormal(xid))
7317 {
7318 /* Raw xmax is normal XID */
7319 if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7320 ereport(ERROR,
7322 errmsg_internal("found xmax %u from before relfrozenxid %u",
7323 xid, cutoffs->relfrozenxid)));
7324
7325 /* Will set freeze_xmax flags in freeze plan below */
7327
7328 /*
7329 * Verify that xmax aborted if and when freeze plan is executed,
7330 * provided it's from an update. (A lock-only xmax can be removed
7331 * independent of this, since the lock is released at xact end.)
7332 */
7334 frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
7335 }
7336 else if (!TransactionIdIsValid(xid))
7337 {
7338 /* Raw xmax is InvalidTransactionId XID */
7339 Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
7340 xmax_already_frozen = true;
7341 }
7342 else
7343 ereport(ERROR,
7345 errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
7346 xid, tuple->t_infomask)));
7347
7348 if (freeze_xmin)
7349 {
7351
7352 frz->t_infomask |= HEAP_XMIN_FROZEN;
7353 }
7354 if (replace_xvac)
7355 {
7356 /*
7357 * If a MOVED_OFF tuple is not dead, the xvac transaction must have
7358 * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
7359 * transaction succeeded.
7360 */
7361 Assert(pagefrz->freeze_required);
7362 if (tuple->t_infomask & HEAP_MOVED_OFF)
7363 frz->frzflags |= XLH_INVALID_XVAC;
7364 else
7365 frz->frzflags |= XLH_FREEZE_XVAC;
7366 }
7367 if (replace_xmax)
7368 {
7370 Assert(pagefrz->freeze_required);
7371
7372 /* Already set replace_xmax flags in freeze plan earlier */
7373 }
7374 if (freeze_xmax)
7375 {
7377
7378 frz->xmax = InvalidTransactionId;
7379
7380 /*
7381 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
7382 * LOCKED. Normalize to INVALID just to be sure no one gets confused.
7383 * Also get rid of the HEAP_KEYS_UPDATED bit.
7384 */
7385 frz->t_infomask &= ~HEAP_XMAX_BITS;
7386 frz->t_infomask |= HEAP_XMAX_INVALID;
7387 frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
7388 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7389 }
7390
7391 /*
7392 * Determine if this tuple is already totally frozen, or will become
7393 * totally frozen (provided caller executes freeze plans for the page)
7394 */
7397
7398 if (!pagefrz->freeze_required && !(xmin_already_frozen &&
7400 {
7401 /*
7402 * So far no previous tuple from the page made freezing mandatory.
7403 * Does this tuple force caller to freeze the entire page?
7404 */
7405 pagefrz->freeze_required =
7406 heap_tuple_should_freeze(tuple, cutoffs,
7407 &pagefrz->NoFreezePageRelfrozenXid,
7408 &pagefrz->NoFreezePageRelminMxid);
7409 }
7410
7411 /* Tell caller if this tuple has a usable freeze plan set in *frz */
7413}
7414
7415/*
7416 * Perform xmin/xmax XID status sanity checks before actually executing freeze
7417 * plans.
7418 *
7419 * heap_prepare_freeze_tuple doesn't perform these checks directly because
7420 * pg_xact lookups are relatively expensive. They shouldn't be repeated by
7421 * successive VACUUMs that each decide against freezing the same page.
7422 */
7423void
7425 HeapTupleFreeze *tuples, int ntuples)
7426{
7427 Page page = BufferGetPage(buffer);
7428
7429 for (int i = 0; i < ntuples; i++)
7430 {
7431 HeapTupleFreeze *frz = tuples + i;
7432 ItemId itemid = PageGetItemId(page, frz->offset);
7433 HeapTupleHeader htup;
7434
7435 htup = (HeapTupleHeader) PageGetItem(page, itemid);
7436
7437 /* Deliberately avoid relying on tuple hint bits here */
7438 if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
7439 {
7441
7443 if (unlikely(!TransactionIdDidCommit(xmin)))
7444 ereport(ERROR,
7446 errmsg_internal("uncommitted xmin %u needs to be frozen",
7447 xmin)));
7448 }
7449
7450 /*
7451 * TransactionIdDidAbort won't work reliably in the presence of XIDs
7452 * left behind by transactions that were in progress during a crash,
7453 * so we can only check that xmax didn't commit
7454 */
7455 if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
7456 {
7458
7461 ereport(ERROR,
7463 errmsg_internal("cannot freeze committed xmax %u",
7464 xmax)));
7465 }
7466 }
7467}
7468
7469/*
7470 * Helper which executes freezing of one or more heap tuples on a page on
7471 * behalf of caller. Caller passes an array of tuple plans from
7472 * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
7473 * Must be called in a critical section that also marks the buffer dirty and,
7474 * if needed, emits WAL.
7475 */
7476void
7478{
7479 Page page = BufferGetPage(buffer);
7480
7481 for (int i = 0; i < ntuples; i++)
7482 {
7483 HeapTupleFreeze *frz = tuples + i;
7484 ItemId itemid = PageGetItemId(page, frz->offset);
7485 HeapTupleHeader htup;
7486
7487 htup = (HeapTupleHeader) PageGetItem(page, itemid);
7489 }
7490}
7491
7492/*
7493 * heap_freeze_tuple
7494 * Freeze tuple in place, without WAL logging.
7495 *
7496 * Useful for callers like CLUSTER that perform their own WAL logging.
7497 */
7498bool
7500 TransactionId relfrozenxid, TransactionId relminmxid,
7501 TransactionId FreezeLimit, TransactionId MultiXactCutoff)
7502{
7504 bool do_freeze;
7505 bool totally_frozen;
7506 struct VacuumCutoffs cutoffs;
7507 HeapPageFreeze pagefrz;
7508
7509 cutoffs.relfrozenxid = relfrozenxid;
7510 cutoffs.relminmxid = relminmxid;
7511 cutoffs.OldestXmin = FreezeLimit;
7512 cutoffs.OldestMxact = MultiXactCutoff;
7513 cutoffs.FreezeLimit = FreezeLimit;
7515
7516 pagefrz.freeze_required = true;
7517 pagefrz.FreezePageRelfrozenXid = FreezeLimit;
7518 pagefrz.FreezePageRelminMxid = MultiXactCutoff;
7519 pagefrz.FreezePageConflictXid = InvalidTransactionId;
7520 pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
7521 pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
7522
7523 do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
7524 &pagefrz, &frz, &totally_frozen);
7525
7526 /*
7527 * Note that because this is not a WAL-logged operation, we don't need to
7528 * fill in the offset in the freeze record.
7529 */
7530
7531 if (do_freeze)
7533 return do_freeze;
7534}
7535
7536/*
7537 * For a given MultiXactId, return the hint bits that should be set in the
7538 * tuple's infomask.
7539 *
7540 * Normally this should be called for a multixact that was just created, and
7541 * so is on our local cache, so the GetMembers call is fast.
7542 */
7543static void
7546{
7547 int nmembers;
7548 MultiXactMember *members;
7549 int i;
7551 uint16 bits2 = 0;
7552 bool has_update = false;
7554
7555 /*
7556 * We only use this in multis we just created, so they cannot be values
7557 * pre-pg_upgrade.
7558 */
7559 nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7560
7561 for (i = 0; i < nmembers; i++)
7562 {
7564
7565 /*
7566 * Remember the strongest lock mode held by any member of the
7567 * multixact.
7568 */
7569 mode = TUPLOCK_from_mxstatus(members[i].status);
7570 if (mode > strongest)
7571 strongest = mode;
7572
7573 /* See what other bits we need */
7574 switch (members[i].status)
7575 {
7579 break;
7580
7583 break;
7584
7586 has_update = true;
7587 break;
7588
7591 has_update = true;
7592 break;
7593 }
7594 }
7595
7598 bits |= HEAP_XMAX_EXCL_LOCK;
7599 else if (strongest == LockTupleShare)
7600 bits |= HEAP_XMAX_SHR_LOCK;
7601 else if (strongest == LockTupleKeyShare)
7602 bits |= HEAP_XMAX_KEYSHR_LOCK;
7603
7604 if (!has_update)
7605 bits |= HEAP_XMAX_LOCK_ONLY;
7606
7607 if (nmembers > 0)
7608 pfree(members);
7609
7610 *new_infomask = bits;
7612}
7613
7614/*
7615 * MultiXactIdGetUpdateXid
7616 *
7617 * Given a multixact Xmax and corresponding infomask, which does not have the
7618 * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
7619 * transaction.
7620 *
7621 * Caller is expected to check the status of the updating transaction, if
7622 * necessary.
7623 */
7624static TransactionId
7626{
7628 MultiXactMember *members;
7629 int nmembers;
7630
7631 Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
7632 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
7633
7634 /*
7635 * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
7636 * pre-pg_upgrade.
7637 */
7638 nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
7639
7640 if (nmembers > 0)
7641 {
7642 int i;
7643
7644 for (i = 0; i < nmembers; i++)
7645 {
7646 /* Ignore lockers */
7647 if (!ISUPDATE_from_mxstatus(members[i].status))
7648 continue;
7649
7650 /* there can be at most one updater */
7652 update_xact = members[i].xid;
7653#ifndef USE_ASSERT_CHECKING
7654
7655 /*
7656 * in an assert-enabled build, walk the whole array to ensure
7657 * there's no other updater.
7658 */
7659 break;
7660#endif
7661 }
7662
7663 pfree(members);
7664 }
7665
7666 return update_xact;
7667}
7668
7669/*
7670 * HeapTupleGetUpdateXid
7671 * As above, but use a HeapTupleHeader
7672 *
7673 * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
7674 * checking the hint bits.
7675 */
7682
7683/*
7684 * Does the given multixact conflict with the current transaction grabbing a
7685 * tuple lock of the given strength?
7686 *
7687 * The passed infomask pairs up with the given multixact in the tuple header.
7688 *
7689 * If current_is_member is not NULL, it is set to 'true' if the current
7690 * transaction is a member of the given multixact.
7691 */
7692static bool
7694 LockTupleMode lockmode, bool *current_is_member)
7695{
7696 int nmembers;
7697 MultiXactMember *members;
7698 bool result = false;
7699 LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7700
7702 return false;
7703
7704 nmembers = GetMultiXactIdMembers(multi, &members, false,
7706 if (nmembers >= 0)
7707 {
7708 int i;
7709
7710 for (i = 0; i < nmembers; i++)
7711 {
7714
7715 if (result && (current_is_member == NULL || *current_is_member))
7716 break;
7717
7718 memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7719
7720 /* ignore members from current xact (but track their presence) */
7721 memxid = members[i].xid;
7723 {
7724 if (current_is_member != NULL)
7725 *current_is_member = true;
7726 continue;
7727 }
7728 else if (result)
7729 continue;
7730
7731 /* ignore members that don't conflict with the lock we want */
7733 continue;
7734
7735 if (ISUPDATE_from_mxstatus(members[i].status))
7736 {
7737 /* ignore aborted updaters */
7739 continue;
7740 }
7741 else
7742 {
7743 /* ignore lockers-only that are no longer in progress */
7745 continue;
7746 }
7747
7748 /*
7749 * Whatever remains are either live lockers that conflict with our
7750 * wanted lock, and updaters that are not aborted. Those conflict
7751 * with what we want. Set up to return true, but keep going to
7752 * look for the current transaction among the multixact members,
7753 * if needed.
7754 */
7755 result = true;
7756 }
7757 pfree(members);
7758 }
7759
7760 return result;
7761}
7762
7763/*
7764 * Do_MultiXactIdWait
7765 * Actual implementation for the two functions below.
7766 *
7767 * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
7768 * needed to ensure we only sleep on conflicting members, and the infomask is
7769 * used to optimize multixact access in case it's a lock-only multi); 'nowait'
7770 * indicates whether to use conditional lock acquisition, to allow callers to
7771 * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
7772 * context information for error messages. 'remaining', if not NULL, receives
7773 * the number of members that are still running, including any (non-aborted)
7774 * subtransactions of our own transaction. 'logLockFailure' indicates whether
7775 * to log details when a lock acquisition fails with 'nowait' enabled.
7776 *
7777 * We do this by sleeping on each member using XactLockTableWait. Any
7778 * members that belong to the current backend are *not* waited for, however;
7779 * this would not merely be useless but would lead to Assert failure inside
7780 * XactLockTableWait. By the time this returns, it is certain that all
7781 * transactions *of other backends* that were members of the MultiXactId
7782 * that conflict with the requested status are dead (and no new ones can have
7783 * been added, since it is not legal to add members to an existing
7784 * MultiXactId).
7785 *
7786 * But by the time we finish sleeping, someone else may have changed the Xmax
7787 * of the containing tuple, so the caller needs to iterate on us somehow.
7788 *
7789 * Note that in case we return false, the number of remaining members is
7790 * not to be trusted.
7791 */
7792static bool
7794 uint16 infomask, bool nowait,
7795 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7796 int *remaining, bool logLockFailure)
7797{
7798 bool result = true;
7799 MultiXactMember *members;
7800 int nmembers;
7801 int remain = 0;
7802
7803 /* for pre-pg_upgrade tuples, no need to sleep at all */
7804 nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7805 GetMultiXactIdMembers(multi, &members, false,
7807
7808 if (nmembers >= 0)
7809 {
7810 int i;
7811
7812 for (i = 0; i < nmembers; i++)
7813 {
7814 TransactionId memxid = members[i].xid;
7815 MultiXactStatus memstatus = members[i].status;
7816
7818 {
7819 remain++;
7820 continue;
7821 }
7822
7824 LOCKMODE_from_mxstatus(status)))
7825 {
7827 remain++;
7828 continue;
7829 }
7830
7831 /*
7832 * This member conflicts with our multi, so we have to sleep (or
7833 * return failure, if asked to avoid waiting.)
7834 *
7835 * Note that we don't set up an error context callback ourselves,
7836 * but instead we pass the info down to XactLockTableWait. This
7837 * might seem a bit wasteful because the context is set up and
7838 * tore down for each member of the multixact, but in reality it
7839 * should be barely noticeable, and it avoids duplicate code.
7840 */
7841 if (nowait)
7842 {
7844 if (!result)
7845 break;
7846 }
7847 else
7848 XactLockTableWait(memxid, rel, ctid, oper);
7849 }
7850
7851 pfree(members);
7852 }
7853
7854 if (remaining)
7855 *remaining = remain;
7856
7857 return result;
7858}
7859
7860/*
7861 * MultiXactIdWait
7862 * Sleep on a MultiXactId.
7863 *
7864 * By the time we finish sleeping, someone else may have changed the Xmax
7865 * of the containing tuple, so the caller needs to iterate on us somehow.
7866 *
7867 * We return (in *remaining, if not NULL) the number of members that are still
7868 * running, including any (non-aborted) subtransactions of our own transaction.
7869 */
7870static void
7872 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7873 int *remaining)
7874{
7875 (void) Do_MultiXactIdWait(multi, status, infomask, false,
7876 rel, ctid, oper, remaining, false);
7877}
7878
7879/*
7880 * ConditionalMultiXactIdWait
7881 * As above, but only lock if we can get the lock without blocking.
7882 *
7883 * By the time we finish sleeping, someone else may have changed the Xmax
7884 * of the containing tuple, so the caller needs to iterate on us somehow.
7885 *
7886 * If the multixact is now all gone, return true. Returns false if some
7887 * transactions might still be running.
7888 *
7889 * We return (in *remaining, if not NULL) the number of members that are still
7890 * running, including any (non-aborted) subtransactions of our own transaction.
7891 */
7892static bool
7894 uint16 infomask, Relation rel, int *remaining,
7895 bool logLockFailure)
7896{
7897 return Do_MultiXactIdWait(multi, status, infomask, true,
7899}
7900
7901/*
7902 * heap_tuple_needs_eventual_freeze
7903 *
7904 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7905 * will eventually require freezing (if tuple isn't removed by pruning first).
7906 */
7907bool
7909{
7910 TransactionId xid;
7911
7912 /*
7913 * If xmin is a normal transaction ID, this tuple is definitely not
7914 * frozen.
7915 */
7916 xid = HeapTupleHeaderGetXmin(tuple);
7917 if (TransactionIdIsNormal(xid))
7918 return true;
7919
7920 /*
7921 * If xmax is a valid xact or multixact, this tuple is also not frozen.
7922 */
7923 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7924 {
7925 MultiXactId multi;
7926
7927 multi = HeapTupleHeaderGetRawXmax(tuple);
7928 if (MultiXactIdIsValid(multi))
7929 return true;
7930 }
7931 else
7932 {
7933 xid = HeapTupleHeaderGetRawXmax(tuple);
7934 if (TransactionIdIsNormal(xid))
7935 return true;
7936 }
7937
7938 if (tuple->t_infomask & HEAP_MOVED)
7939 {
7940 xid = HeapTupleHeaderGetXvac(tuple);
7941 if (TransactionIdIsNormal(xid))
7942 return true;
7943 }
7944
7945 return false;
7946}
7947
7948/*
7949 * heap_tuple_should_freeze
7950 *
7951 * Return value indicates if heap_prepare_freeze_tuple sibling function would
7952 * (or should) force freezing of the heap page that contains caller's tuple.
7953 * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
7954 * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
7955 *
7956 * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
7957 * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
7958 * Our working assumption is that caller won't decide to freeze this tuple.
7959 * It's up to caller to only ratchet back its own top-level trackers after the
7960 * point that it fully commits to not freezing the tuple/page in question.
7961 */
7962bool
7964 const struct VacuumCutoffs *cutoffs,
7965 TransactionId *NoFreezePageRelfrozenXid,
7966 MultiXactId *NoFreezePageRelminMxid)
7967{
7968 TransactionId xid;
7969 MultiXactId multi;
7970 bool freeze = false;
7971
7972 /* First deal with xmin */
7973 xid = HeapTupleHeaderGetXmin(tuple);
7974 if (TransactionIdIsNormal(xid))
7975 {
7977 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7978 *NoFreezePageRelfrozenXid = xid;
7979 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7980 freeze = true;
7981 }
7982
7983 /* Now deal with xmax */
7985 multi = InvalidMultiXactId;
7986 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7987 multi = HeapTupleHeaderGetRawXmax(tuple);
7988 else
7989 xid = HeapTupleHeaderGetRawXmax(tuple);
7990
7991 if (TransactionIdIsNormal(xid))
7992 {
7994 /* xmax is a non-permanent XID */
7995 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7996 *NoFreezePageRelfrozenXid = xid;
7997 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7998 freeze = true;
7999 }
8000 else if (!MultiXactIdIsValid(multi))
8001 {
8002 /* xmax is a permanent XID or invalid MultiXactId/XID */
8003 }
8004 else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
8005 {
8006 /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
8007 if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
8008 *NoFreezePageRelminMxid = multi;
8009 /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
8010 freeze = true;
8011 }
8012 else
8013 {
8014 /* xmax is a MultiXactId that may have an updater XID */
8015 MultiXactMember *members;
8016 int nmembers;
8017
8019 if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
8020 *NoFreezePageRelminMxid = multi;
8021 if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
8022 freeze = true;
8023
8024 /* need to check whether any member of the mxact is old */
8025 nmembers = GetMultiXactIdMembers(multi, &members, false,
8027
8028 for (int i = 0; i < nmembers; i++)
8029 {
8030 xid = members[i].xid;
8032 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
8033 *NoFreezePageRelfrozenXid = xid;
8034 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
8035 freeze = true;
8036 }
8037 if (nmembers > 0)
8038 pfree(members);
8039 }
8040
8041 if (tuple->t_infomask & HEAP_MOVED)
8042 {
8043 xid = HeapTupleHeaderGetXvac(tuple);
8044 if (TransactionIdIsNormal(xid))
8045 {
8047 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
8048 *NoFreezePageRelfrozenXid = xid;
8049 /* heap_prepare_freeze_tuple forces xvac freezing */
8050 freeze = true;
8051 }
8052 }
8053
8054 return freeze;
8055}
8056
8057/*
8058 * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
8059 * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
8060 * that caller is in the process of physically removing, e.g. via HOT pruning
8061 * or index deletion.
8062 *
8063 * Caller must initialize its value to InvalidTransactionId, which is
8064 * generally interpreted as "definitely no need for a recovery conflict".
8065 * Final value must reflect all heap tuples that caller will physically remove
8066 * (or remove TID references to) via its ongoing pruning/deletion operation.
8067 * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
8068 * caller's WAL record) by REDO routine when it replays caller's operation.
8069 */
8070void
8072 TransactionId *snapshotConflictHorizon)
8073{
8077
8078 if (tuple->t_infomask & HEAP_MOVED)
8079 {
8080 if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
8081 *snapshotConflictHorizon = xvac;
8082 }
8083
8084 /*
8085 * Ignore tuples inserted by an aborted transaction or if the tuple was
8086 * updated/deleted by the inserting transaction.
8087 *
8088 * Look for a committed hint bit, or if no xmin bit is set, check clog.
8089 */
8090 if (HeapTupleHeaderXminCommitted(tuple) ||
8092 {
8093 if (xmax != xmin &&
8094 TransactionIdFollows(xmax, *snapshotConflictHorizon))
8095 *snapshotConflictHorizon = xmax;
8096 }
8097}
8098
8099#ifdef USE_PREFETCH
8100/*
8101 * Helper function for heap_index_delete_tuples. Issues prefetch requests for
8102 * prefetch_count buffers. The prefetch_state keeps track of all the buffers
8103 * we can prefetch, and which have already been prefetched; each call to this
8104 * function picks up where the previous call left off.
8105 *
8106 * Note: we expect the deltids array to be sorted in an order that groups TIDs
8107 * by heap block, with all TIDs for each block appearing together in exactly
8108 * one group.
8109 */
8110static void
8113 int prefetch_count)
8114{
8116 int count = 0;
8117 int i;
8118 int ndeltids = prefetch_state->ndeltids;
8119 TM_IndexDelete *deltids = prefetch_state->deltids;
8120
8121 for (i = prefetch_state->next_item;
8122 i < ndeltids && count < prefetch_count;
8123 i++)
8124 {
8125 ItemPointer htid = &deltids[i].tid;
8126
8129 {
8132 count++;
8133 }
8134 }
8135
8136 /*
8137 * Save the prefetch position so that next time we can continue from that
8138 * position.
8139 */
8140 prefetch_state->next_item = i;
8141 prefetch_state->cur_hblkno = cur_hblkno;
8142}
8143#endif
8144
8145/*
8146 * Helper function for heap_index_delete_tuples. Checks for index corruption
8147 * involving an invalid TID in index AM caller's index page.
8148 *
8149 * This is an ideal place for these checks. The index AM must hold a buffer
8150 * lock on the index page containing the TIDs we examine here, so we don't
8151 * have to worry about concurrent VACUUMs at all. We can be sure that the
8152 * index is corrupt when htid points directly to an LP_UNUSED item or
8153 * heap-only tuple, which is not the case during standard index scans.
8154 */
8155static inline void
8157 Page page, OffsetNumber maxoff,
8159{
8161 ItemId iid;
8162
8163 Assert(OffsetNumberIsValid(istatus->idxoffnum));
8164
8165 if (unlikely(indexpagehoffnum > maxoff))
8166 ereport(ERROR,
8168 errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
8171 istatus->idxoffnum, delstate->iblknum,
8173
8175 if (unlikely(!ItemIdIsUsed(iid)))
8176 ereport(ERROR,
8178 errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
8181 istatus->idxoffnum, delstate->iblknum,
8183
8184 if (ItemIdHasStorage(iid))
8185 {
8186 HeapTupleHeader htup;
8187
8189 htup = (HeapTupleHeader) PageGetItem(page, iid);
8190
8192 ereport(ERROR,
8194 errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
8197 istatus->idxoffnum, delstate->iblknum,
8199 }
8200}
8201
8202/*
8203 * heapam implementation of tableam's index_delete_tuples interface.
8204 *
8205 * This helper function is called by index AMs during index tuple deletion.
8206 * See tableam header comments for an explanation of the interface implemented
8207 * here and a general theory of operation. Note that each call here is either
8208 * a simple index deletion call, or a bottom-up index deletion call.
8209 *
8210 * It's possible for this to generate a fair amount of I/O, since we may be
8211 * deleting hundreds of tuples from a single index block. To amortize that
8212 * cost to some degree, this uses prefetching and combines repeat accesses to
8213 * the same heap block.
8214 */
8217{
8218 /* Initial assumption is that earlier pruning took care of conflict */
8219 TransactionId snapshotConflictHorizon = InvalidTransactionId;
8222 Page page = NULL;
8225#ifdef USE_PREFETCH
8228#endif
8230 int finalndeltids = 0,
8231 nblocksaccessed = 0;
8232
8233 /* State that's only used in bottom-up index deletion case */
8234 int nblocksfavorable = 0;
8235 int curtargetfreespace = delstate->bottomupfreespace,
8236 lastfreespace = 0,
8237 actualfreespace = 0;
8238 bool bottomup_final_block = false;
8239
8241
8242 /* Sort caller's deltids array by TID for further processing */
8244
8245 /*
8246 * Bottom-up case: resort deltids array in an order attuned to where the
8247 * greatest number of promising TIDs are to be found, and determine how
8248 * many blocks from the start of sorted array should be considered
8249 * favorable. This will also shrink the deltids array in order to
8250 * eliminate completely unfavorable blocks up front.
8251 */
8252 if (delstate->bottomup)
8254
8255#ifdef USE_PREFETCH
8256 /* Initialize prefetch state. */
8258 prefetch_state.next_item = 0;
8259 prefetch_state.ndeltids = delstate->ndeltids;
8260 prefetch_state.deltids = delstate->deltids;
8261
8262 /*
8263 * Determine the prefetch distance that we will attempt to maintain.
8264 *
8265 * Since the caller holds a buffer lock somewhere in rel, we'd better make
8266 * sure that isn't a catalog relation before we call code that does
8267 * syscache lookups, to avoid risk of deadlock.
8268 */
8269 if (IsCatalogRelation(rel))
8271 else
8274
8275 /* Cap initial prefetch distance for bottom-up deletion caller */
8276 if (delstate->bottomup)
8277 {
8281 }
8282
8283 /* Start prefetching. */
8285#endif
8286
8287 /* Iterate over deltids, determine which to delete, check their horizon */
8288 Assert(delstate->ndeltids > 0);
8289 for (int i = 0; i < delstate->ndeltids; i++)
8290 {
8291 TM_IndexDelete *ideltid = &delstate->deltids[i];
8292 TM_IndexStatus *istatus = delstate->status + ideltid->id;
8293 ItemPointer htid = &ideltid->tid;
8294 OffsetNumber offnum;
8295
8296 /*
8297 * Read buffer, and perform required extra steps each time a new block
8298 * is encountered. Avoid refetching if it's the same block as the one
8299 * from the last htid.
8300 */
8301 if (blkno == InvalidBlockNumber ||
8303 {
8304 /*
8305 * Consider giving up early for bottom-up index deletion caller
8306 * first. (Only prefetch next-next block afterwards, when it
8307 * becomes clear that we're at least going to access the next
8308 * block in line.)
8309 *
8310 * Sometimes the first block frees so much space for bottom-up
8311 * caller that the deletion process can end without accessing any
8312 * more blocks. It is usually necessary to access 2 or 3 blocks
8313 * per bottom-up deletion operation, though.
8314 */
8315 if (delstate->bottomup)
8316 {
8317 /*
8318 * We often allow caller to delete a few additional items
8319 * whose entries we reached after the point that space target
8320 * from caller was satisfied. The cost of accessing the page
8321 * was already paid at that point, so it made sense to finish
8322 * it off. When that happened, we finalize everything here
8323 * (by finishing off the whole bottom-up deletion operation
8324 * without needlessly paying the cost of accessing any more
8325 * blocks).
8326 */
8328 break;
8329
8330 /*
8331 * Give up when we didn't enable our caller to free any
8332 * additional space as a result of processing the page that we
8333 * just finished up with. This rule is the main way in which
8334 * we keep the cost of bottom-up deletion under control.
8335 */
8337 break;
8338 lastfreespace = actualfreespace; /* for next time */
8339
8340 /*
8341 * Deletion operation (which is bottom-up) will definitely
8342 * access the next block in line. Prepare for that now.
8343 *
8344 * Decay target free space so that we don't hang on for too
8345 * long with a marginal case. (Space target is only truly
8346 * helpful when it allows us to recognize that we don't need
8347 * to access more than 1 or 2 blocks to satisfy caller due to
8348 * agreeable workload characteristics.)
8349 *
8350 * We are a bit more patient when we encounter contiguous
8351 * blocks, though: these are treated as favorable blocks. The
8352 * decay process is only applied when the next block in line
8353 * is not a favorable/contiguous block. This is not an
8354 * exception to the general rule; we still insist on finding
8355 * at least one deletable item per block accessed. See
8356 * bottomup_nblocksfavorable() for full details of the theory
8357 * behind favorable blocks and heap block locality in general.
8358 *
8359 * Note: The first block in line is always treated as a
8360 * favorable block, so the earliest possible point that the
8361 * decay can be applied is just before we access the second
8362 * block in line. The Assert() verifies this for us.
8363 */
8365 if (nblocksfavorable > 0)
8367 else
8368 curtargetfreespace /= 2;
8369 }
8370
8371 /* release old buffer */
8372 if (BufferIsValid(buf))
8374
8376 buf = ReadBuffer(rel, blkno);
8378 Assert(!delstate->bottomup ||
8380
8381#ifdef USE_PREFETCH
8382
8383 /*
8384 * To maintain the prefetch distance, prefetch one more page for
8385 * each page we read.
8386 */
8388#endif
8389
8391
8392 page = BufferGetPage(buf);
8393 maxoff = PageGetMaxOffsetNumber(page);
8394 }
8395
8396 /*
8397 * In passing, detect index corruption involving an index page with a
8398 * TID that points to a location in the heap that couldn't possibly be
8399 * correct. We only do this with actual TIDs from caller's index page
8400 * (not items reached by traversing through a HOT chain).
8401 */
8403
8404 if (istatus->knowndeletable)
8405 Assert(!delstate->bottomup && !istatus->promising);
8406 else
8407 {
8408 ItemPointerData tmp = *htid;
8410
8411 /* Are any tuples from this HOT chain non-vacuumable? */
8413 &heapTuple, NULL, true))
8414 continue; /* can't delete entry */
8415
8416 /* Caller will delete, since whole HOT chain is vacuumable */
8417 istatus->knowndeletable = true;
8418
8419 /* Maintain index free space info for bottom-up deletion case */
8420 if (delstate->bottomup)
8421 {
8422 Assert(istatus->freespace > 0);
8423 actualfreespace += istatus->freespace;
8425 bottomup_final_block = true;
8426 }
8427 }
8428
8429 /*
8430 * Maintain snapshotConflictHorizon value for deletion operation as a
8431 * whole by advancing current value using heap tuple headers. This is
8432 * loosely based on the logic for pruning a HOT chain.
8433 */
8435 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
8436 for (;;)
8437 {
8438 ItemId lp;
8439 HeapTupleHeader htup;
8440
8441 /* Sanity check (pure paranoia) */
8442 if (offnum < FirstOffsetNumber)
8443 break;
8444
8445 /*
8446 * An offset past the end of page's line pointer array is possible
8447 * when the array was truncated
8448 */
8449 if (offnum > maxoff)
8450 break;
8451
8452 lp = PageGetItemId(page, offnum);
8454 {
8455 offnum = ItemIdGetRedirect(lp);
8456 continue;
8457 }
8458
8459 /*
8460 * We'll often encounter LP_DEAD line pointers (especially with an
8461 * entry marked knowndeletable by our caller up front). No heap
8462 * tuple headers get examined for an htid that leads us to an
8463 * LP_DEAD item. This is okay because the earlier pruning
8464 * operation that made the line pointer LP_DEAD in the first place
8465 * must have considered the original tuple header as part of
8466 * generating its own snapshotConflictHorizon value.
8467 *
8468 * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
8469 * the same strategy that index vacuuming uses in all cases. Index
8470 * VACUUM WAL records don't even have a snapshotConflictHorizon
8471 * field of their own for this reason.
8472 */
8473 if (!ItemIdIsNormal(lp))
8474 break;
8475
8476 htup = (HeapTupleHeader) PageGetItem(page, lp);
8477
8478 /*
8479 * Check the tuple XMIN against prior XMAX, if any
8480 */
8483 break;
8484
8486 &snapshotConflictHorizon);
8487
8488 /*
8489 * If the tuple is not HOT-updated, then we are at the end of this
8490 * HOT-chain. No need to visit later tuples from the same update
8491 * chain (they get their own index entries) -- just move on to
8492 * next htid from index AM caller.
8493 */
8494 if (!HeapTupleHeaderIsHotUpdated(htup))
8495 break;
8496
8497 /* Advance to next HOT chain member */
8498 Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
8499 offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
8501 }
8502
8503 /* Enable further/final shrinking of deltids for caller */
8504 finalndeltids = i + 1;
8505 }
8506
8508
8509 /*
8510 * Shrink deltids array to exclude non-deletable entries at the end. This
8511 * is not just a minor optimization. Final deltids array size might be
8512 * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
8513 * ndeltids being zero in all cases with zero total deletable entries.
8514 */
8515 Assert(finalndeltids > 0 || delstate->bottomup);
8516 delstate->ndeltids = finalndeltids;
8517
8518 return snapshotConflictHorizon;
8519}
8520
8521/*
8522 * Specialized inlineable comparison function for index_delete_sort()
8523 */
8524static inline int
8526{
8527 ItemPointer tid1 = &deltid1->tid;
8528 ItemPointer tid2 = &deltid2->tid;
8529
8530 {
8533
8534 if (blk1 != blk2)
8535 return (blk1 < blk2) ? -1 : 1;
8536 }
8537 {
8540
8541 if (pos1 != pos2)
8542 return (pos1 < pos2) ? -1 : 1;
8543 }
8544
8545 Assert(false);
8546
8547 return 0;
8548}
8549
8550/*
8551 * Sort deltids array from delstate by TID. This prepares it for further
8552 * processing by heap_index_delete_tuples().
8553 *
8554 * This operation becomes a noticeable consumer of CPU cycles with some
8555 * workloads, so we go to the trouble of specialization/micro optimization.
8556 * We use shellsort for this because it's easy to specialize, compiles to
8557 * relatively few instructions, and is adaptive to presorted inputs/subsets
8558 * (which are typical here).
8559 */
8560static void
8562{
8563 TM_IndexDelete *deltids = delstate->deltids;
8564 int ndeltids = delstate->ndeltids;
8565
8566 /*
8567 * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
8568 *
8569 * This implementation is fast with array sizes up to ~4500. This covers
8570 * all supported BLCKSZ values.
8571 */
8572 const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8573
8574 /* Think carefully before changing anything here -- keep swaps cheap */
8575 StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
8576 "element size exceeds 8 bytes");
8577
8578 for (int g = 0; g < lengthof(gaps); g++)
8579 {
8580 for (int hi = gaps[g], i = hi; i < ndeltids; i++)
8581 {
8582 TM_IndexDelete d = deltids[i];
8583 int j = i;
8584
8585 while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
8586 {
8587 deltids[j] = deltids[j - hi];
8588 j -= hi;
8589 }
8590 deltids[j] = d;
8591 }
8592 }
8593}
8594
8595/*
8596 * Returns how many blocks should be considered favorable/contiguous for a
8597 * bottom-up index deletion pass. This is a number of heap blocks that starts
8598 * from and includes the first block in line.
8599 *
8600 * There is always at least one favorable block during bottom-up index
8601 * deletion. In the worst case (i.e. with totally random heap blocks) the
8602 * first block in line (the only favorable block) can be thought of as a
8603 * degenerate array of contiguous blocks that consists of a single block.
8604 * heap_index_delete_tuples() will expect this.
8605 *
8606 * Caller passes blockgroups, a description of the final order that deltids
8607 * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
8608 * processing. Note that deltids need not actually be sorted just yet (caller
8609 * only passes deltids to us so that we can interpret blockgroups).
8610 *
8611 * You might guess that the existence of contiguous blocks cannot matter much,
8612 * since in general the main factor that determines which blocks we visit is
8613 * the number of promising TIDs, which is a fixed hint from the index AM.
8614 * We're not really targeting the general case, though -- the actual goal is
8615 * to adapt our behavior to a wide variety of naturally occurring conditions.
8616 * The effects of most of the heuristics we apply are only noticeable in the
8617 * aggregate, over time and across many _related_ bottom-up index deletion
8618 * passes.
8619 *
8620 * Deeming certain blocks favorable allows heapam to recognize and adapt to
8621 * workloads where heap blocks visited during bottom-up index deletion can be
8622 * accessed contiguously, in the sense that each newly visited block is the
8623 * neighbor of the block that bottom-up deletion just finished processing (or
8624 * close enough to it). It will likely be cheaper to access more favorable
8625 * blocks sooner rather than later (e.g. in this pass, not across a series of
8626 * related bottom-up passes). Either way it is probably only a matter of time
8627 * (or a matter of further correlated version churn) before all blocks that
8628 * appear together as a single large batch of favorable blocks get accessed by
8629 * _some_ bottom-up pass. Large batches of favorable blocks tend to either
8630 * appear almost constantly or not even once (it all depends on per-index
8631 * workload characteristics).
8632 *
8633 * Note that the blockgroups sort order applies a power-of-two bucketing
8634 * scheme that creates opportunities for contiguous groups of blocks to get
8635 * batched together, at least with workloads that are naturally amenable to
8636 * being driven by heap block locality. This doesn't just enhance the spatial
8637 * locality of bottom-up heap block processing in the obvious way. It also
8638 * enables temporal locality of access, since sorting by heap block number
8639 * naturally tends to make the bottom-up processing order deterministic.
8640 *
8641 * Consider the following example to get a sense of how temporal locality
8642 * might matter: There is a heap relation with several indexes, each of which
8643 * is low to medium cardinality. It is subject to constant non-HOT updates.
8644 * The updates are skewed (in one part of the primary key, perhaps). None of
8645 * the indexes are logically modified by the UPDATE statements (if they were
8646 * then bottom-up index deletion would not be triggered in the first place).
8647 * Naturally, each new round of index tuples (for each heap tuple that gets a
8648 * heap_update() call) will have the same heap TID in each and every index.
8649 * Since these indexes are low cardinality and never get logically modified,
8650 * heapam processing during bottom-up deletion passes will access heap blocks
8651 * in approximately sequential order. Temporal locality of access occurs due
8652 * to bottom-up deletion passes behaving very similarly across each of the
8653 * indexes at any given moment. This keeps the number of buffer misses needed
8654 * to visit heap blocks to a minimum.
8655 */
8656static int
8658 TM_IndexDelete *deltids)
8659{
8660 int64 lastblock = -1;
8661 int nblocksfavorable = 0;
8662
8663 Assert(nblockgroups >= 1);
8665
8666 /*
8667 * We tolerate heap blocks that will be accessed only slightly out of
8668 * physical order. Small blips occur when a pair of almost-contiguous
8669 * blocks happen to fall into different buckets (perhaps due only to a
8670 * small difference in npromisingtids that the bucketing scheme didn't
8671 * quite manage to ignore). We effectively ignore these blips by applying
8672 * a small tolerance. The precise tolerance we use is a little arbitrary,
8673 * but it works well enough in practice.
8674 */
8675 for (int b = 0; b < nblockgroups; b++)
8676 {
8677 IndexDeleteCounts *group = blockgroups + b;
8678 TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
8680
8681 if (lastblock != -1 &&
8684 break;
8685
8687 lastblock = block;
8688 }
8689
8690 /* Always indicate that there is at least 1 favorable block */
8692
8693 return nblocksfavorable;
8694}
8695
8696/*
8697 * qsort comparison function for bottomup_sort_and_shrink()
8698 */
8699static int
8700bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
8701{
8704
8705 /*
8706 * Most significant field is npromisingtids (which we invert the order of
8707 * so as to sort in desc order).
8708 *
8709 * Caller should have already normalized npromisingtids fields into
8710 * power-of-two values (buckets).
8711 */
8712 if (group1->npromisingtids > group2->npromisingtids)
8713 return -1;
8714 if (group1->npromisingtids < group2->npromisingtids)
8715 return 1;
8716
8717 /*
8718 * Tiebreak: desc ntids sort order.
8719 *
8720 * We cannot expect power-of-two values for ntids fields. We should
8721 * behave as if they were already rounded up for us instead.
8722 */
8723 if (group1->ntids != group2->ntids)
8724 {
8727
8728 if (ntids1 > ntids2)
8729 return -1;
8730 if (ntids1 < ntids2)
8731 return 1;
8732 }
8733
8734 /*
8735 * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
8736 * block in deltids array) order.
8737 *
8738 * This is equivalent to sorting in ascending heap block number order
8739 * (among otherwise equal subsets of the array). This approach allows us
8740 * to avoid accessing the out-of-line TID. (We rely on the assumption
8741 * that the deltids array was sorted in ascending heap TID order when
8742 * these offsets to the first TID from each heap block group were formed.)
8743 */
8744 if (group1->ifirsttid > group2->ifirsttid)
8745 return 1;
8746 if (group1->ifirsttid < group2->ifirsttid)
8747 return -1;
8748
8750
8751 return 0;
8752}
8753
8754/*
8755 * heap_index_delete_tuples() helper function for bottom-up deletion callers.
8756 *
8757 * Sorts deltids array in the order needed for useful processing by bottom-up
8758 * deletion. The array should already be sorted in TID order when we're
8759 * called. The sort process groups heap TIDs from deltids into heap block
8760 * groupings. Earlier/more-promising groups/blocks are usually those that are
8761 * known to have the most "promising" TIDs.
8762 *
8763 * Sets new size of deltids array (ndeltids) in state. deltids will only have
8764 * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
8765 * return. This often means that deltids will be shrunk to a small fraction
8766 * of its original size (we eliminate many heap blocks from consideration for
8767 * caller up front).
8768 *
8769 * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
8770 * for a definition and full details.
8771 */
8772static int
8774{
8778 int nblockgroups = 0;
8779 int ncopied = 0;
8780 int nblocksfavorable = 0;
8781
8782 Assert(delstate->bottomup);
8783 Assert(delstate->ndeltids > 0);
8784
8785 /* Calculate per-heap-block count of TIDs */
8787 for (int i = 0; i < delstate->ndeltids; i++)
8788 {
8789 TM_IndexDelete *ideltid = &delstate->deltids[i];
8790 TM_IndexStatus *istatus = delstate->status + ideltid->id;
8791 ItemPointer htid = &ideltid->tid;
8792 bool promising = istatus->promising;
8793
8794 if (curblock != ItemPointerGetBlockNumber(htid))
8795 {
8796 /* New block group */
8797 nblockgroups++;
8798
8800 !BlockNumberIsValid(curblock));
8801
8802 curblock = ItemPointerGetBlockNumber(htid);
8803 blockgroups[nblockgroups - 1].ifirsttid = i;
8804 blockgroups[nblockgroups - 1].ntids = 1;
8805 blockgroups[nblockgroups - 1].npromisingtids = 0;
8806 }
8807 else
8808 {
8809 blockgroups[nblockgroups - 1].ntids++;
8810 }
8811
8812 if (promising)
8813 blockgroups[nblockgroups - 1].npromisingtids++;
8814 }
8815
8816 /*
8817 * We're about ready to sort block groups to determine the optimal order
8818 * for visiting heap blocks. But before we do, round the number of
8819 * promising tuples for each block group up to the next power-of-two,
8820 * unless it is very low (less than 4), in which case we round up to 4.
8821 * npromisingtids is far too noisy to trust when choosing between a pair
8822 * of block groups that both have very low values.
8823 *
8824 * This scheme divides heap blocks/block groups into buckets. Each bucket
8825 * contains blocks that have _approximately_ the same number of promising
8826 * TIDs as each other. The goal is to ignore relatively small differences
8827 * in the total number of promising entries, so that the whole process can
8828 * give a little weight to heapam factors (like heap block locality)
8829 * instead. This isn't a trade-off, really -- we have nothing to lose. It
8830 * would be foolish to interpret small differences in npromisingtids
8831 * values as anything more than noise.
8832 *
8833 * We tiebreak on nhtids when sorting block group subsets that have the
8834 * same npromisingtids, but this has the same issues as npromisingtids,
8835 * and so nhtids is subject to the same power-of-two bucketing scheme. The
8836 * only reason that we don't fix nhtids in the same way here too is that
8837 * we'll need accurate nhtids values after the sort. We handle nhtids
8838 * bucketization dynamically instead (in the sort comparator).
8839 *
8840 * See bottomup_nblocksfavorable() for a full explanation of when and how
8841 * heap locality/favorable blocks can significantly influence when and how
8842 * heap blocks are accessed.
8843 */
8844 for (int b = 0; b < nblockgroups; b++)
8845 {
8846 IndexDeleteCounts *group = blockgroups + b;
8847
8848 /* Better off falling back on nhtids with low npromisingtids */
8849 if (group->npromisingtids <= 4)
8850 group->npromisingtids = 4;
8851 else
8852 group->npromisingtids =
8854 }
8855
8856 /* Sort groups and rearrange caller's deltids array */
8859 reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
8860
8862 /* Determine number of favorable blocks at the start of final deltids */
8864 delstate->deltids);
8865
8866 for (int b = 0; b < nblockgroups; b++)
8867 {
8868 IndexDeleteCounts *group = blockgroups + b;
8869 TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
8870
8872 sizeof(TM_IndexDelete) * group->ntids);
8873 ncopied += group->ntids;
8874 }
8875
8876 /* Copy final grouped and sorted TIDs back into start of caller's array */
8878 sizeof(TM_IndexDelete) * ncopied);
8879 delstate->ndeltids = ncopied;
8880
8883
8884 return nblocksfavorable;
8885}
8886
8887/*
8888 * Perform XLogInsert for a heap-update operation. Caller must already
8889 * have modified the buffer(s) and marked them dirty.
8890 */
8891static XLogRecPtr
8896{
8900 uint8 info;
8902 uint16 prefixlen = 0,
8903 suffixlen = 0;
8905 Page page = BufferGetPage(newbuf);
8907 bool init;
8908 int bufflags;
8909
8910 /* Caller should not call me on a non-WAL-logged relation */
8912
8914
8916 info = XLOG_HEAP_HOT_UPDATE;
8917 else
8918 info = XLOG_HEAP_UPDATE;
8919
8920 /*
8921 * If the old and new tuple are on the same page, we only need to log the
8922 * parts of the new tuple that were changed. That saves on the amount of
8923 * WAL we need to write. Currently, we just count any unchanged bytes in
8924 * the beginning and end of the tuple. That's quick to check, and
8925 * perfectly covers the common case that only one field is updated.
8926 *
8927 * We could do this even if the old and new tuple are on different pages,
8928 * but only if we don't make a full-page image of the old page, which is
8929 * difficult to know in advance. Also, if the old tuple is corrupt for
8930 * some reason, it would allow the corruption to propagate the new page,
8931 * so it seems best to avoid. Under the general assumption that most
8932 * updates tend to create the new tuple version on the same page, there
8933 * isn't much to be gained by doing this across pages anyway.
8934 *
8935 * Skip this if we're taking a full-page image of the new page, as we
8936 * don't include the new tuple in the WAL record in that case. Also
8937 * disable if effective_wal_level='logical', as logical decoding needs to
8938 * be able to read the new tuple in whole from the WAL record alone.
8939 */
8940 if (oldbuf == newbuf && !need_tuple_data &&
8942 {
8943 char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
8944 char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
8945 int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
8946 int newlen = newtup->t_len - newtup->t_data->t_hoff;
8947
8948 /* Check for common prefix between old and new tuple */
8949 for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
8950 {
8951 if (newp[prefixlen] != oldp[prefixlen])
8952 break;
8953 }
8954
8955 /*
8956 * Storing the length of the prefix takes 2 bytes, so we need to save
8957 * at least 3 bytes or there's no point.
8958 */
8959 if (prefixlen < 3)
8960 prefixlen = 0;
8961
8962 /* Same for suffix */
8964 {
8965 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
8966 break;
8967 }
8968 if (suffixlen < 3)
8969 suffixlen = 0;
8970 }
8971
8972 /* Prepare main WAL data chain */
8973 xlrec.flags = 0;
8978 if (prefixlen > 0)
8980 if (suffixlen > 0)
8982 if (need_tuple_data)
8983 {
8985 if (old_key_tuple)
8986 {
8987 if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
8989 else
8991 }
8992 }
8993
8994 /* If new tuple is the single and first tuple on page... */
8997 {
8998 info |= XLOG_HEAP_INIT_PAGE;
8999 init = true;
9000 }
9001 else
9002 init = false;
9003
9004 /* Prepare WAL data for the old page */
9005 xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
9006 xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
9007 xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
9008 oldtup->t_data->t_infomask2);
9009
9010 /* Prepare WAL data for the new page */
9011 xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
9012 xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
9013
9015 if (init)
9017 if (need_tuple_data)
9019
9021 if (oldbuf != newbuf)
9023
9025
9026 /*
9027 * Prepare WAL data for the new tuple.
9028 */
9029 if (prefixlen > 0 || suffixlen > 0)
9030 {
9031 if (prefixlen > 0 && suffixlen > 0)
9032 {
9035 XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
9036 }
9037 else if (prefixlen > 0)
9038 {
9039 XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
9040 }
9041 else
9042 {
9043 XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
9044 }
9045 }
9046
9047 xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
9048 xlhdr.t_infomask = newtup->t_data->t_infomask;
9049 xlhdr.t_hoff = newtup->t_data->t_hoff;
9051
9052 /*
9053 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
9054 *
9055 * The 'data' doesn't include the common prefix or suffix.
9056 */
9058 if (prefixlen == 0)
9059 {
9061 (char *) newtup->t_data + SizeofHeapTupleHeader,
9063 }
9064 else
9065 {
9066 /*
9067 * Have to write the null bitmap and data after the common prefix as
9068 * two separate rdata entries.
9069 */
9070 /* bitmap [+ padding] [+ oid] */
9071 if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
9072 {
9074 (char *) newtup->t_data + SizeofHeapTupleHeader,
9075 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
9076 }
9077
9078 /* data after common prefix */
9080 (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
9081 newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
9082 }
9083
9084 /* We need to log a tuple identity */
9086 {
9087 /* don't really need this, but its more comfy to decode */
9088 xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
9089 xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
9090 xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
9091
9093
9094 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
9097 }
9098
9099 /* filtering by origin on a row level is much more efficient */
9101
9102 recptr = XLogInsert(RM_HEAP_ID, info);
9103
9104 return recptr;
9105}
9106
9107/*
9108 * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
9109 *
9110 * This is only used when effective_wal_level is logical, and only for
9111 * catalog tuples.
9112 */
9113static XLogRecPtr
9115{
9117
9119 HeapTupleHeader hdr = tup->t_data;
9120
9121 Assert(ItemPointerIsValid(&tup->t_self));
9122 Assert(tup->t_tableOid != InvalidOid);
9123
9124 xlrec.top_xid = GetTopTransactionId();
9125 xlrec.target_locator = relation->rd_locator;
9126 xlrec.target_tid = tup->t_self;
9127
9128 /*
9129 * If the tuple got inserted & deleted in the same TX we definitely have a
9130 * combo CID, set cmin and cmax.
9131 */
9132 if (hdr->t_infomask & HEAP_COMBOCID)
9133 {
9136 xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
9137 xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
9138 xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
9139 }
9140 /* No combo CID, so only cmin or cmax can be set by this TX */
9141 else
9142 {
9143 /*
9144 * Tuple inserted.
9145 *
9146 * We need to check for LOCK ONLY because multixacts might be
9147 * transferred to the new tuple in case of FOR KEY SHARE updates in
9148 * which case there will be an xmax, although the tuple just got
9149 * inserted.
9150 */
9151 if (hdr->t_infomask & HEAP_XMAX_INVALID ||
9153 {
9155 xlrec.cmax = InvalidCommandId;
9156 }
9157 /* Tuple from a different tx updated or deleted. */
9158 else
9159 {
9160 xlrec.cmin = InvalidCommandId;
9162 }
9163 xlrec.combocid = InvalidCommandId;
9164 }
9165
9166 /*
9167 * Note that we don't need to register the buffer here, because this
9168 * operation does not modify the page. The insert/update/delete that
9169 * called us certainly did, but that's WAL-logged separately.
9170 */
9173
9174 /* will be looked at irrespective of origin */
9175
9177
9178 return recptr;
9179}
9180
9181/*
9182 * Build a heap tuple representing the configured REPLICA IDENTITY to represent
9183 * the old tuple in an UPDATE or DELETE.
9184 *
9185 * Returns NULL if there's no need to log an identity or if there's no suitable
9186 * key defined.
9187 *
9188 * Pass key_required true if any replica identity columns changed value, or if
9189 * any of them have any external data. Delete must always pass true.
9190 *
9191 * *copy is set to true if the returned tuple is a modified copy rather than
9192 * the same tuple that was passed in.
9193 */
9194static HeapTuple
9196 bool *copy)
9197{
9198 TupleDesc desc = RelationGetDescr(relation);
9199 char replident = relation->rd_rel->relreplident;
9202 bool nulls[MaxHeapAttributeNumber];
9204
9205 *copy = false;
9206
9207 if (!RelationIsLogicallyLogged(relation))
9208 return NULL;
9209
9210 if (replident == REPLICA_IDENTITY_NOTHING)
9211 return NULL;
9212
9213 if (replident == REPLICA_IDENTITY_FULL)
9214 {
9215 /*
9216 * When logging the entire old tuple, it very well could contain
9217 * toasted columns. If so, force them to be inlined.
9218 */
9219 if (HeapTupleHasExternal(tp))
9220 {
9221 *copy = true;
9222 tp = toast_flatten_tuple(tp, desc);
9223 }
9224 return tp;
9225 }
9226
9227 /* if the key isn't required and we're only logging the key, we're done */
9228 if (!key_required)
9229 return NULL;
9230
9231 /* find out the replica identity columns */
9234
9235 /*
9236 * If there's no defined replica identity columns, treat as !key_required.
9237 * (This case should not be reachable from heap_update, since that should
9238 * calculate key_required accurately. But heap_delete just passes
9239 * constant true for key_required, so we can hit this case in deletes.)
9240 */
9241 if (bms_is_empty(idattrs))
9242 return NULL;
9243
9244 /*
9245 * Construct a new tuple containing only the replica identity columns,
9246 * with nulls elsewhere. While we're at it, assert that the replica
9247 * identity columns aren't null.
9248 */
9249 heap_deform_tuple(tp, desc, values, nulls);
9250
9251 for (int i = 0; i < desc->natts; i++)
9252 {
9254 idattrs))
9255 Assert(!nulls[i]);
9256 else
9257 nulls[i] = true;
9258 }
9259
9260 key_tuple = heap_form_tuple(desc, values, nulls);
9261 *copy = true;
9262
9264
9265 /*
9266 * If the tuple, which by here only contains indexed columns, still has
9267 * toasted columns, force them to be inlined. This is somewhat unlikely
9268 * since there's limits on the size of indexed columns, so we don't
9269 * duplicate toast_flatten_tuple()s functionality in the above loop over
9270 * the indexed columns, even if it would be more efficient.
9271 */
9273 {
9275
9278 }
9279
9280 return key_tuple;
9281}
9282
9283/*
9284 * HeapCheckForSerializableConflictOut
9285 * We are reading a tuple. If it's not visible, there may be a
9286 * rw-conflict out with the inserter. Otherwise, if it is visible to us
9287 * but has been deleted, there may be a rw-conflict out with the deleter.
9288 *
9289 * We will determine the top level xid of the writing transaction with which
9290 * we may be in conflict, and ask CheckForSerializableConflictOut() to check
9291 * for overlap with our own transaction.
9292 *
9293 * This function should be called just about anywhere in heapam.c where a
9294 * tuple has been read. The caller must hold at least a shared lock on the
9295 * buffer, because this function might set hint bits on the tuple. There is
9296 * currently no known reason to call this function from an index AM.
9297 */
9298void
9300 HeapTuple tuple, Buffer buffer,
9301 Snapshot snapshot)
9302{
9303 TransactionId xid;
9305
9306 if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
9307 return;
9308
9309 /*
9310 * Check to see whether the tuple has been written to by a concurrent
9311 * transaction, either to create it not visible to us, or to delete it
9312 * while it is visible to us. The "visible" bool indicates whether the
9313 * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
9314 * is going on with it.
9315 *
9316 * In the event of a concurrently inserted tuple that also happens to have
9317 * been concurrently updated (by a separate transaction), the xmin of the
9318 * tuple will be used -- not the updater's xid.
9319 */
9321 switch (htsvResult)
9322 {
9323 case HEAPTUPLE_LIVE:
9324 if (visible)
9325 return;
9326 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9327 break;
9330 if (visible)
9331 xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
9332 else
9333 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9334
9336 {
9337 /* This is like the HEAPTUPLE_DEAD case */
9338 Assert(!visible);
9339 return;
9340 }
9341 break;
9343 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9344 break;
9345 case HEAPTUPLE_DEAD:
9346 Assert(!visible);
9347 return;
9348 default:
9349
9350 /*
9351 * The only way to get to this default clause is if a new value is
9352 * added to the enum type without adding it to this switch
9353 * statement. That's a bug, so elog.
9354 */
9355 elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
9356
9357 /*
9358 * In spite of having all enum values covered and calling elog on
9359 * this default, some compilers think this is a code path which
9360 * allows xid to be used below without initialization. Silence
9361 * that warning.
9362 */
9364 }
9365
9368
9369 /*
9370 * Find top level xid. Bail out if xid is too early to be a conflict, or
9371 * if it's our own xid.
9372 */
9374 return;
9377 return;
9378
9379 CheckForSerializableConflictOut(relation, xid, snapshot);
9380}
int16 AttrNumber
Definition attnum.h:21
int bms_next_member(const Bitmapset *a, int prevbit)
Definition bitmapset.c:1290
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:901
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:575
#define bms_is_empty(a)
Definition bitmapset.h:118
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition block.h:71
static int32 next
Definition blutils.c:225
static Datum values[MAXATTR]
Definition bootstrap.c:188
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4426
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition bufmgr.c:787
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition bufmgr.c:4447
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3099
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5566
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5583
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3132
int maintenance_io_concurrency
Definition bufmgr.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition bufmgr.c:879
@ BAS_BULKREAD
Definition bufmgr.h:37
@ BAS_BULKWRITE
Definition bufmgr.h:39
#define RelationGetNumberOfBlocks(reln)
Definition bufmgr.h:309
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:472
static Block BufferGetBlock(Buffer buffer)
Definition bufmgr.h:439
@ BUFFER_LOCK_SHARE
Definition bufmgr.h:212
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:222
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:207
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:334
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:423
Size PageGetHeapFreeSpace(const PageData *page)
Definition bufpage.c:990
PageHeaderData * PageHeader
Definition bufpage.h:199
static bool PageIsAllVisible(const PageData *page)
Definition bufpage.h:455
static void PageClearAllVisible(Page page)
Definition bufpage.h:465
#define SizeOfPageHeaderData
Definition bufpage.h:242
static void PageSetAllVisible(Page page)
Definition bufpage.h:460
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:269
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:379
static void PageSetFull(Page page)
Definition bufpage.h:444
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:417
PageData * Page
Definition bufpage.h:81
#define PageClearPrunable(page)
Definition bufpage.h:486
#define PageSetPrunable(page, xid)
Definition bufpage.h:479
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:397
#define NameStr(name)
Definition c.h:837
#define InvalidCommandId
Definition c.h:755
#define pg_noinline
Definition c.h:315
#define Min(x, y)
Definition c.h:1093
#define likely(x)
Definition c.h:431
#define MAXALIGN(LEN)
Definition c.h:898
uint8_t uint8
Definition c.h:616
#define Assert(condition)
Definition c.h:945
int64_t int64
Definition c.h:615
TransactionId MultiXactId
Definition c.h:748
#define pg_attribute_always_inline
Definition c.h:299
int16_t int16
Definition c.h:613
#define SHORTALIGN(LEN)
Definition c.h:894
uint32 bits32
Definition c.h:627
uint16_t uint16
Definition c.h:617
#define pg_unreachable()
Definition c.h:361
#define unlikely(x)
Definition c.h:432
uint32_t uint32
Definition c.h:618
#define lengthof(array)
Definition c.h:875
#define StaticAssertDecl(condition, errmessage)
Definition c.h:1010
uint32 CommandId
Definition c.h:752
uint32 TransactionId
Definition c.h:738
#define OidIsValid(objectId)
Definition c.h:860
size_t Size
Definition c.h:691
bool IsToastRelation(Relation relation)
Definition catalog.c:206
bool IsCatalogRelation(Relation relation)
Definition catalog.c:104
bool IsSharedRelation(Oid relationId)
Definition catalog.c:304
bool IsInplaceUpdateRelation(Relation relation)
Definition catalog.c:183
CommandId HeapTupleHeaderGetCmin(const HeapTupleHeaderData *tup)
Definition combocid.c:104
void HeapTupleHeaderAdjustCmax(const HeapTupleHeaderData *tup, CommandId *cmax, bool *iscombo)
Definition combocid.c:153
CommandId HeapTupleHeaderGetCmax(const HeapTupleHeaderData *tup)
Definition combocid.c:118
bool datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
Definition datum.c:223
Datum arg
Definition elog.c:1322
int errcode(int sqlerrcode)
Definition elog.c:874
int int errdetail_internal(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition freelist.c:461
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition freelist.c:643
int NBuffers
Definition globals.c:142
Oid MyDatabaseTableSpace
Definition globals.c:96
Oid MyDatabaseId
Definition globals.c:94
void simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup, TU_UpdateIndexes *update_indexes)
Definition heapam.c:4568
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition heapam.c:7693
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition heapam.c:9114
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition heapam.c:5407
static TM_Result heap_lock_updated_tuple_rec(Relation rel, TransactionId priorXmax, const ItemPointerData *tid, TransactionId xid, LockTupleMode mode)
Definition heapam.c:5779
static void heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
Definition heapam.c:707
bool heap_inplace_lock(Relation relation, HeapTuple oldtup_ptr, Buffer buffer, void(*release_callback)(void *), void *arg)
Definition heapam.c:6449
void heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, bits32 options, BulkInsertState bistate)
Definition heapam.c:2421
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf)
Definition heapam.c:1669
#define BOTTOMUP_TOLERANCE_NBLOCKS
Definition heapam.c:190
static BlockNumber heap_scan_stream_read_next_parallel(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition heapam.c:252
int updstatus
Definition heapam.c:130
static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
Definition heapam.c:8773
static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition heapam.c:5358
static int heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
Definition heapam.c:2389
static pg_attribute_always_inline int page_collect_tuples(HeapScanDesc scan, Snapshot snapshot, Page page, Buffer buffer, BlockNumber block, int lines, bool all_visible, bool check_serializable)
Definition heapam.c:522
static BlockNumber heap_scan_stream_read_next_serial(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition heapam.c:292
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition heapam.c:7544
void heap_finish_speculative(Relation relation, const ItemPointerData *tid)
Definition heapam.c:6180
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition heapam.c:8071
bool heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition heapam.c:1459
#define LOCKMODE_from_mxstatus(status)
Definition heapam.c:159
void heap_endscan(TableScanDesc sscan)
Definition heapam.c:1378
#define FRM_RETURN_IS_XID
Definition heapam.c:6737
#define TUPLOCK_from_mxstatus(status)
Definition heapam.c:218
void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
Definition heapam.c:1319
void heap_inplace_unlock(Relation relation, HeapTuple oldtup, Buffer buffer)
Definition heapam.c:6727
TM_Result heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition heapam.c:3321
static int index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
Definition heapam.c:8525
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining, bool logLockFailure)
Definition heapam.c:7893
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition heapam.c:7908
TM_Result heap_delete(Relation relation, const ItemPointerData *tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
Definition heapam.c:2852
static TransactionId FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, const struct VacuumCutoffs *cutoffs, uint16 *flags, HeapPageFreeze *pagefrz)
Definition heapam.c:6788
static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, bool *copy)
Definition heapam.c:9195
static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
Definition heapam.c:752
static TM_Result heap_lock_updated_tuple(Relation rel, uint16 prior_infomask, TransactionId prior_raw_xmax, const ItemPointerData *prior_ctid, TransactionId xid, LockTupleMode mode)
Definition heapam.c:6127
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, bits32 options, BulkInsertState bistate)
Definition heapam.c:2150
#define LockTupleTuplock(rel, tup, mode)
Definition heapam.c:167
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition heapam.c:7963
bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId FreezeLimit, TransactionId MultiXactCutoff)
Definition heapam.c:7499
void heap_inplace_update_and_unlock(Relation relation, HeapTuple oldtup, HeapTuple tuple, Buffer buffer)
Definition heapam.c:6587
static BlockNumber heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
Definition heapam.c:876
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition heapam.c:7625
#define BOTTOMUP_MAX_NBLOCKS
Definition heapam.c:189
void ReleaseBulkInsertStatePin(BulkInsertState bistate)
Definition heapam.c:2112
#define FRM_MARK_COMMITTED
Definition heapam.c:6739
#define FRM_NOOP
Definition heapam.c:6735
static void index_delete_check_htid(TM_IndexDeleteOp *delstate, Page page, OffsetNumber maxoff, const ItemPointerData *htid, TM_IndexStatus *istatus)
Definition heapam.c:8156
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition heapam.c:1420
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition heapam.c:1787
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, bits32 options)
Definition heapam.c:2341
int lockstatus
Definition heapam.c:129
void heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition heapam.c:7477
bool heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition heapam.c:1562
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper, int *remaining)
Definition heapam.c:7871
void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid)
Definition heapam.c:1489
void heap_abort_speculative(Relation relation, const ItemPointerData *tid)
Definition heapam.c:6267
static BlockNumber bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data, void *per_buffer_data)
Definition heapam.c:317
TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
Definition heapam.c:1164
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition heapam.c:960
static Page heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
Definition heapam.c:830
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition heapam.c:2807
#define FRM_RETURN_IS_MULTI
Definition heapam.c:6738
LOCKMODE hwlock
Definition heapam.c:128
#define FRM_INVALIDATE_XMAX
Definition heapam.c:6736
static bool heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2, bool isnull1, bool isnull2)
Definition heapam.c:4427
static void index_delete_sort(TM_IndexDeleteOp *delstate)
Definition heapam.c:8561
void heap_prepare_pagescan(TableScanDesc sscan)
Definition heapam.c:616
static Bitmapset * HeapDetermineColumnsInfo(Relation relation, Bitmapset *interesting_cols, Bitmapset *external_cols, HeapTuple oldtup, HeapTuple newtup, bool *has_external)
Definition heapam.c:4478
static const int MultiXactStatusLock[MaxMultiXactStatus+1]
Definition heapam.c:207
void simple_heap_insert(Relation relation, HeapTuple tup)
Definition heapam.c:2794
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition heapam.c:2829
#define UnlockTupleTuplock(rel, tup, mode)
Definition heapam.c:169
static TM_Result test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
Definition heapam.c:5688
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition heapam.c:7144
static void AssertHasSnapshotForToast(Relation rel)
Definition heapam.c:225
void simple_heap_delete(Relation relation, const ItemPointerData *tid)
Definition heapam.c:3275
static const struct @15 tupleLockExtraInfo[]
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
Definition heapam.c:8892
TransactionId HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
Definition heapam.c:7677
TransactionId heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
Definition heapam.c:8216
#define ConditionalLockTupleTuplock(rel, tup, mode, log)
Definition heapam.c:171
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition heapam.c:357
static int bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
Definition heapam.c:8657
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition heapam.c:1070
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
Definition heapam.c:4656
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition heapam.c:2061
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper, int *remaining, bool logLockFailure)
Definition heapam.c:7793
static int bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
Definition heapam.c:8700
void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid)
Definition heapam.c:1939
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
Definition heapam.c:500
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition heapam.c:9299
static Page heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
Definition heapam.c:799
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition heapam.c:4609
void heap_pre_freeze_checks(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition heapam.c:7424
BulkInsertState GetBulkInsertState(void)
Definition heapam.c:2083
void FreeBulkInsertState(BulkInsertState bistate)
Definition heapam.c:2100
#define HEAP_INSERT_SPECULATIVE
Definition heapam.h:40
#define HEAP_FREEZE_CHECK_XMAX_ABORTED
Definition heapam.h:151
struct HeapScanDescData * HeapScanDesc
Definition heapam.h:109
HTSV_Result
Definition heapam.h:138
@ HEAPTUPLE_RECENTLY_DEAD
Definition heapam.h:141
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition heapam.h:142
@ HEAPTUPLE_LIVE
Definition heapam.h:140
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition heapam.h:143
@ HEAPTUPLE_DEAD
Definition heapam.h:139
struct BitmapHeapScanDescData * BitmapHeapScanDesc
Definition heapam.h:117
#define HEAP_INSERT_FROZEN
Definition heapam.h:38
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition heapam.h:524
#define HEAP_FREEZE_CHECK_XMIN_COMMITTED
Definition heapam.h:150
#define HEAP_INSERT_NO_LOGICAL
Definition heapam.h:39
struct BulkInsertStateData * BulkInsertState
Definition heapam.h:47
const TableAmRoutine * GetHeapamTableAmRoutine(void)
void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer, uint16 infomask, TransactionId xid)
bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer)
bool HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest)
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
int HeapTupleSatisfiesMVCCBatch(Snapshot snapshot, Buffer buffer, int ntups, BatchMVCCState *batchmvcc, OffsetNumber *vistuples_dense)
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define XLH_INSERT_ON_TOAST_RELATION
Definition heapam_xlog.h:76
#define SizeOfHeapMultiInsert
#define XLOG_HEAP2_MULTI_INSERT
Definition heapam_xlog.h:64
#define SizeOfHeapUpdate
#define XLH_INVALID_XVAC
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:87
#define XLOG_HEAP_HOT_UPDATE
Definition heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition heapam_xlog.h:34
#define XLH_INSERT_IS_SPECULATIVE
Definition heapam_xlog.h:74
#define XLH_LOCK_ALL_FROZEN_CLEARED
#define XLH_DELETE_CONTAINS_OLD_KEY
#define XLH_UPDATE_CONTAINS_NEW_TUPLE
Definition heapam_xlog.h:90
#define XLH_INSERT_LAST_IN_MULTI
Definition heapam_xlog.h:73
#define XLH_INSERT_ALL_FROZEN_SET
Definition heapam_xlog.h:79
#define XLH_FREEZE_XVAC
#define XLOG_HEAP_UPDATE
Definition heapam_xlog.h:35
#define XLHL_XMAX_KEYSHR_LOCK
#define XLH_DELETE_ALL_VISIBLE_CLEARED
#define XLH_UPDATE_CONTAINS_OLD_TUPLE
Definition heapam_xlog.h:88
#define SizeOfHeapNewCid
#define SizeOfHeapLockUpdated
#define XLHL_XMAX_IS_MULTI
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:72
#define SizeOfHeapHeader
#define XLH_DELETE_IS_PARTITION_MOVE
#define MinSizeOfHeapInplace
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:85
#define XLHL_XMAX_LOCK_ONLY
#define XLOG_HEAP_INPLACE
Definition heapam_xlog.h:40
#define XLOG_HEAP2_LOCK_UPDATED
Definition heapam_xlog.h:65
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition heapam_xlog.h:91
#define SizeOfMultiInsertTuple
#define XLHL_XMAX_EXCL_LOCK
#define XLOG_HEAP2_NEW_CID
Definition heapam_xlog.h:66
#define XLH_DELETE_CONTAINS_OLD_TUPLE
#define XLOG_HEAP_LOCK
Definition heapam_xlog.h:39
#define XLOG_HEAP_INSERT
Definition heapam_xlog.h:33
#define SizeOfHeapInsert
#define SizeOfHeapDelete
#define XLH_DELETE_IS_SUPER
#define XLH_UPDATE_CONTAINS_OLD_KEY
Definition heapam_xlog.h:89
#define XLHL_KEYS_UPDATED
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition heapam_xlog.h:75
#define XLOG_HEAP_INIT_PAGE
Definition heapam_xlog.h:47
#define SizeOfHeapConfirm
#define SizeOfHeapLock
#define XLOG_HEAP_CONFIRM
Definition heapam_xlog.h:38
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition heaptoast.c:43
HeapTuple heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, int options)
Definition heaptoast.c:96
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition heaptoast.c:350
#define TOAST_TUPLE_THRESHOLD
Definition heaptoast.h:48
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
Definition heaptuple.c:1037
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition heaptuple.c:1266
void heap_freetuple(HeapTuple htup)
Definition heaptuple.c:1384
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition hio.c:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other, int num_pages)
Definition hio.c:500
HeapTupleHeaderData * HeapTupleHeader
Definition htup.h:23
#define HEAP_MOVED_OFF
#define HEAP_XMAX_SHR_LOCK
static bool HeapTupleIsHotUpdated(const HeapTupleData *tuple)
#define HEAP_XMIN_FROZEN
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
#define HeapTupleHeaderGetNatts(tup)
static void HeapTupleHeaderSetXminFrozen(HeapTupleHeaderData *tup)
#define SizeofHeapTupleHeader
#define HEAP_KEYS_UPDATED
static bool HEAP_XMAX_IS_SHR_LOCKED(uint16 infomask)
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
static bool HeapTupleHeaderXminInvalid(const HeapTupleHeaderData *tup)
static void HeapTupleClearHotUpdated(const HeapTupleData *tuple)
static bool HeapTupleHasExternal(const HeapTupleData *tuple)
static TransactionId HeapTupleHeaderGetXvac(const HeapTupleHeaderData *tup)
#define HEAP2_XACT_MASK
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
#define HEAP_XMAX_LOCK_ONLY
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
#define HEAP_XMAX_BITS
#define HEAP_LOCK_MASK
static CommandId HeapTupleHeaderGetRawCommandId(const HeapTupleHeaderData *tup)
static TransactionId HeapTupleHeaderGetRawXmax(const HeapTupleHeaderData *tup)
static bool HeapTupleHeaderIsHeapOnly(const HeapTupleHeaderData *tup)
static bool HeapTupleIsHeapOnly(const HeapTupleData *tuple)
#define HEAP_MOVED
static void HeapTupleSetHeapOnly(const HeapTupleData *tuple)
#define HEAP_XMAX_IS_MULTI
static bool HEAP_XMAX_IS_KEYSHR_LOCKED(uint16 infomask)
#define HEAP_XMAX_COMMITTED
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
#define HEAP_COMBOCID
#define HEAP_XACT_MASK
static bool HeapTupleHeaderIndicatesMovedPartitions(const HeapTupleHeaderData *tup)
static void HeapTupleSetHotUpdated(const HeapTupleData *tuple)
#define HEAP_XMAX_EXCL_LOCK
static bool HeapTupleHeaderIsHotUpdated(const HeapTupleHeaderData *tup)
#define HEAP_XMAX_INVALID
static TransactionId HeapTupleHeaderGetRawXmin(const HeapTupleHeaderData *tup)
static void * GETSTRUCT(const HeapTupleData *tuple)
static void HeapTupleClearHeapOnly(const HeapTupleData *tuple)
#define MaxHeapAttributeNumber
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
static TransactionId HeapTupleHeaderGetUpdateXid(const HeapTupleHeaderData *tup)
#define MaxHeapTuplesPerPage
static bool HEAP_XMAX_IS_EXCL_LOCKED(uint16 infomask)
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
static bool HEAP_LOCKED_UPGRADED(uint16 infomask)
#define HEAP_UPDATED
#define HEAP_XMAX_KEYSHR_LOCK
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
#define IsParallelWorker()
Definition parallel.h:62
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:178
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:134
int remaining
Definition informix.c:692
#define INJECTION_POINT(name, arg)
void AcceptInvalidationMessages(void)
Definition inval.c:930
int inplaceGetInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition inval.c:1088
void PreInplace_Inval(void)
Definition inval.c:1250
void CacheInvalidateHeapTupleInplace(Relation relation, HeapTuple key_equivalent_tuple)
Definition inval.c:1593
void AtInplace_Inval(void)
Definition inval.c:1263
void ForgetInplace_Inval(void)
Definition inval.c:1286
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition inval.c:1571
int b
Definition isn.c:74
int j
Definition isn.c:78
int i
Definition isn.c:77
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdIsNormal(itemId)
Definition itemid.h:99
#define ItemIdGetRedirect(itemId)
Definition itemid.h:78
#define ItemIdIsUsed(itemId)
Definition itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition itemid.h:106
#define ItemIdHasStorage(itemId)
Definition itemid.h:120
int32 ItemPointerCompare(const ItemPointerData *arg1, const ItemPointerData *arg2)
Definition itemptr.c:51
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition itemptr.c:35
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition itemptr.h:135
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition itemptr.h:184
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition itemptr.h:147
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition itemptr.h:124
static bool ItemPointerIndicatesMovedPartitions(const ItemPointerData *pointer)
Definition itemptr.h:197
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition itemptr.h:103
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
Definition itemptr.h:93
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition itemptr.h:172
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition itemptr.h:83
void UnlockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition lmgr.c:601
bool ConditionalXactLockTableWait(TransactionId xid, bool logLockFailure)
Definition lmgr.c:739
void LockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition lmgr.c:562
void XactLockTableWait(TransactionId xid, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper)
Definition lmgr.c:663
XLTW_Oper
Definition lmgr.h:25
@ XLTW_None
Definition lmgr.h:26
@ XLTW_Lock
Definition lmgr.h:29
@ XLTW_Delete
Definition lmgr.h:28
@ XLTW_LockUpdated
Definition lmgr.h:30
@ XLTW_Update
Definition lmgr.h:27
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition lock.c:644
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition lock.c:624
bool log_lock_failures
Definition lock.c:55
int LOCKMODE
Definition lockdefs.h:26
#define AccessExclusiveLock
Definition lockdefs.h:43
#define ShareRowExclusiveLock
Definition lockdefs.h:41
#define AccessShareLock
Definition lockdefs.h:36
#define InplaceUpdateTupleLock
Definition lockdefs.h:48
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
#define ExclusiveLock
Definition lockdefs.h:42
#define RowShareLock
Definition lockdefs.h:37
LockWaitPolicy
Definition lockoptions.h:38
@ LockWaitSkip
Definition lockoptions.h:42
@ LockWaitBlock
Definition lockoptions.h:40
@ LockWaitError
Definition lockoptions.h:44
LockTupleMode
Definition lockoptions.h:51
@ LockTupleExclusive
Definition lockoptions.h:59
@ LockTupleNoKeyExclusive
Definition lockoptions.h:57
@ LockTupleShare
Definition lockoptions.h:55
@ LockTupleKeyShare
Definition lockoptions.h:53
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition locktag.h:81
#define SET_LOCKTAG_TUPLE(locktag, dboid, reloid, blocknum, offnum)
Definition locktag.h:117
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc(Size size)
Definition mcxt.c:1387
#define IsBootstrapProcessingMode()
Definition miscadmin.h:477
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define IsNormalProcessingMode()
Definition miscadmin.h:479
#define END_CRIT_SECTION()
Definition miscadmin.h:152
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition multixact.c:400
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition multixact.c:2857
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition multixact.c:2871
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition multixact.c:511
void MultiXactIdSetOldestMember(void)
Definition multixact.c:585
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition multixact.c:704
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition multixact.c:347
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool isLockOnly)
Definition multixact.c:1161
#define MultiXactIdIsValid(multi)
Definition multixact.h:29
MultiXactStatus
Definition multixact.h:37
@ MultiXactStatusForShare
Definition multixact.h:39
@ MultiXactStatusForNoKeyUpdate
Definition multixact.h:40
@ MultiXactStatusNoKeyUpdate
Definition multixact.h:43
@ MultiXactStatusUpdate
Definition multixact.h:45
@ MultiXactStatusForUpdate
Definition multixact.h:41
@ MultiXactStatusForKeyShare
Definition multixact.h:38
#define ISUPDATE_from_mxstatus(status)
Definition multixact.h:51
#define InvalidMultiXactId
Definition multixact.h:25
#define MaxMultiXactStatus
Definition multixact.h:48
static char * errmsg
#define InvalidOffsetNumber
Definition off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition off.h:39
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition off.h:54
#define MaxOffsetNumber
Definition off.h:28
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition parse_oper.c:373
int16 attlen
#define ERRCODE_DATA_CORRUPTED
static uint32 pg_nextpower2_32(uint32 num)
static PgChecksumMode mode
static const struct exclude_list_item skip[]
FormData_pg_class * Form_pg_class
Definition pg_class.h:160
END_CATALOG_STRUCT typedef FormData_pg_database * Form_pg_database
static char buf[DEFAULT_XLOG_SEG_SIZE]
#define pgstat_count_heap_getnext(rel)
Definition pgstat.h:726
#define pgstat_count_heap_scan(rel)
Definition pgstat.h:721
void pgstat_count_heap_update(Relation rel, bool hot, bool newpage)
void pgstat_count_heap_delete(Relation rel)
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
#define qsort(a, b, c, d)
Definition port.h:495
static Oid DatumGetObjectId(Datum X)
Definition postgres.h:242
uint64_t Datum
Definition postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:332
#define InvalidOid
unsigned int Oid
void CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, BlockNumber blkno)
Definition predicate.c:4347
void CheckForSerializableConflictOut(Relation relation, TransactionId xid, Snapshot snapshot)
Definition predicate.c:4034
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition predicate.c:2587
void PredicateLockTID(Relation relation, const ItemPointerData *tid, Snapshot snapshot, TransactionId tuple_xid)
Definition predicate.c:2632
bool CheckForSerializableConflictOutNeeded(Relation relation, Snapshot snapshot)
Definition predicate.c:4002
static int fb(int x)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition procarray.c:4114
bool TransactionIdIsInProgress(TransactionId xid)
Definition procarray.c:1401
void heap_page_prune_opt(Relation relation, Buffer buffer, Buffer *vmbuffer)
Definition pruneheap.c:256
void read_stream_reset(ReadStream *stream)
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
void read_stream_end(ReadStream *stream)
#define READ_STREAM_USE_BATCHING
Definition read_stream.h:64
BlockNumber(* ReadStreamBlockNumberCB)(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition read_stream.h:77
#define READ_STREAM_DEFAULT
Definition read_stream.h:21
#define READ_STREAM_SEQUENTIAL
Definition read_stream.h:36
#define RelationGetRelid(relation)
Definition rel.h:514
#define RelationIsLogicallyLogged(relation)
Definition rel.h:710
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition rel.h:389
#define RelationGetDescr(relation)
Definition rel.h:540
#define RelationGetNumberOfAttributes(relation)
Definition rel.h:520
#define RelationGetRelationName(relation)
Definition rel.h:548
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition rel.h:693
#define RelationNeedsWAL(relation)
Definition rel.h:637
#define RelationUsesLocalBuffers(relation)
Definition rel.h:646
#define HEAP_DEFAULT_FILLFACTOR
Definition rel.h:360
void RelationDecrementReferenceCount(Relation rel)
Definition relcache.c:2190
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition relcache.c:5294
void RelationIncrementReferenceCount(Relation rel)
Definition relcache.c:2177
@ INDEX_ATTR_BITMAP_KEY
Definition relcache.h:69
@ INDEX_ATTR_BITMAP_HOT_BLOCKING
Definition relcache.h:72
@ INDEX_ATTR_BITMAP_SUMMARIZED
Definition relcache.h:73
@ INDEX_ATTR_BITMAP_IDENTITY_KEY
Definition relcache.h:71
ForkNumber
Definition relpath.h:56
@ MAIN_FORKNUM
Definition relpath.h:58
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition relscan.h:103
#define ScanDirectionIsForward(direction)
Definition sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition sdir.h:50
ScanDirection
Definition sdir.h:25
@ ForwardScanDirection
Definition sdir.h:28
TransactionId RecentXmin
Definition snapmgr.c:160
void UnregisterSnapshot(Snapshot snapshot)
Definition snapmgr.c:866
TransactionId TransactionXmin
Definition snapmgr.c:159
bool HaveRegisteredOrActiveSnapshot(void)
Definition snapmgr.c:1644
void InvalidateCatalogSnapshot(void)
Definition snapmgr.c:455
#define IsHistoricMVCCSnapshot(snapshot)
Definition snapmgr.h:67
#define SnapshotAny
Definition snapmgr.h:33
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
Definition snapmgr.h:50
#define IsMVCCSnapshot(snapshot)
Definition snapmgr.h:59
#define InvalidSnapshot
Definition snapshot.h:119
int get_tablespace_maintenance_io_concurrency(Oid spcid)
Definition spccache.c:230
#define init()
BlockNumber last_free
Definition hio.h:49
BufferAccessStrategy strategy
Definition hio.h:31
uint32 already_extended_by
Definition hio.h:50
BlockNumber next_free
Definition hio.h:48
Buffer current_buf
Definition hio.h:32
MultiXactId NoFreezePageRelminMxid
Definition heapam.h:245
TransactionId FreezePageConflictXid
Definition heapam.h:234
TransactionId FreezePageRelfrozenXid
Definition heapam.h:221
bool freeze_required
Definition heapam.h:195
MultiXactId FreezePageRelminMxid
Definition heapam.h:222
TransactionId NoFreezePageRelfrozenXid
Definition heapam.h:244
Buffer rs_vmbuffer
Definition heapam.h:102
BufferAccessStrategy rs_strategy
Definition heapam.h:74
ScanDirection rs_dir
Definition heapam.h:89
uint32 rs_ntuples
Definition heapam.h:106
OffsetNumber rs_coffset
Definition heapam.h:69
Buffer rs_cbuf
Definition heapam.h:71
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
Definition heapam.h:96
BlockNumber rs_startblock
Definition heapam.h:63
HeapTupleData rs_ctup
Definition heapam.h:76
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition heapam.h:107
BlockNumber rs_numblocks
Definition heapam.h:64
BlockNumber rs_nblocks
Definition heapam.h:62
ReadStream * rs_read_stream
Definition heapam.h:79
uint32 rs_cindex
Definition heapam.h:105
BlockNumber rs_prefetch_block
Definition heapam.h:90
BlockNumber rs_cblock
Definition heapam.h:70
TableScanDescData rs_base
Definition heapam.h:59
ItemPointerData t_self
Definition htup.h:65
uint32 t_len
Definition htup.h:64
HeapTupleHeader t_data
Definition htup.h:68
Oid t_tableOid
Definition htup.h:66
TransactionId t_xmin
union HeapTupleHeaderData::@52 t_choice
ItemPointerData t_ctid
HeapTupleFields t_heap
int16 npromisingtids
Definition heapam.c:198
LockRelId lockRelId
Definition rel.h:46
Oid relId
Definition rel.h:40
Oid dbId
Definition rel.h:41
TransactionId xid
Definition multixact.h:57
MultiXactStatus status
Definition multixact.h:58
LockInfoData rd_lockInfo
Definition rel.h:114
Form_pg_index rd_index
Definition rel.h:192
RelFileLocator rd_locator
Definition rel.h:57
Form_pg_class rd_rel
Definition rel.h:111
bool takenDuringRecovery
Definition snapshot.h:180
TransactionId xmax
Definition tableam.h:150
CommandId cmax
Definition tableam.h:151
ItemPointerData ctid
Definition tableam.h:149
ItemPointerData tid
Definition tableam.h:212
Relation rs_rd
Definition relscan.h:35
uint32 rs_flags
Definition relscan.h:63
struct ScanKeyData * rs_key
Definition relscan.h:38
struct SnapshotData * rs_snapshot
Definition relscan.h:36
struct ParallelTableScanDescData * rs_parallel
Definition relscan.h:65
TransactionId FreezeLimit
Definition vacuum.h:288
TransactionId OldestXmin
Definition vacuum.h:278
TransactionId relfrozenxid
Definition vacuum.h:262
MultiXactId relminmxid
Definition vacuum.h:263
MultiXactId MultiXactCutoff
Definition vacuum.h:289
MultiXactId OldestMxact
Definition vacuum.h:279
Definition c.h:778
OffsetNumber offnum
TransactionId SubTransGetTopmostTransaction(TransactionId xid)
Definition subtrans.c:163
void ss_report_location(Relation rel, BlockNumber location)
Definition syncscan.c:289
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition syncscan.c:254
#define FirstLowInvalidHeapAttributeNumber
Definition sysattr.h:27
#define TableOidAttributeNumber
Definition sysattr.h:26
bool RelationSupportsSysCache(Oid relid)
Definition syscache.c:763
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan, BlockNumber startblock, BlockNumber numblocks)
Definition tableam.c:451
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
Definition tableam.c:546
bool synchronize_seqscans
Definition tableam.c:50
@ SO_ALLOW_STRAT
Definition tableam.h:58
@ SO_TYPE_TIDRANGESCAN
Definition tableam.h:53
@ SO_TEMP_SNAPSHOT
Definition tableam.h:65
@ SO_ALLOW_PAGEMODE
Definition tableam.h:62
@ SO_TYPE_SAMPLESCAN
Definition tableam.h:51
@ SO_ALLOW_SYNC
Definition tableam.h:60
@ SO_TYPE_SEQSCAN
Definition tableam.h:49
@ SO_TYPE_BITMAPSCAN
Definition tableam.h:50
TU_UpdateIndexes
Definition tableam.h:111
@ TU_Summarizing
Definition tableam.h:119
@ TU_All
Definition tableam.h:116
@ TU_None
Definition tableam.h:113
TM_Result
Definition tableam.h:73
@ TM_Ok
Definition tableam.h:78
@ TM_BeingModified
Definition tableam.h:100
@ TM_Deleted
Definition tableam.h:93
@ TM_WouldBlock
Definition tableam.h:103
@ TM_Updated
Definition tableam.h:90
@ TM_SelfModified
Definition tableam.h:84
@ TM_Invisible
Definition tableam.h:81
bool tbm_iterate(TBMIterator *iterator, TBMIterateResult *tbmres)
Definition tidbitmap.c:1614
bool TransactionIdDidCommit(TransactionId transactionId)
Definition transam.c:126
bool TransactionIdDidAbort(TransactionId transactionId)
Definition transam.c:188
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition transam.h:297
#define InvalidTransactionId
Definition transam.h:31
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition transam.h:282
static bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2)
Definition transam.h:312
#define TransactionIdEquals(id1, id2)
Definition transam.h:43
#define TransactionIdIsValid(xid)
Definition transam.h:41
#define TransactionIdIsNormal(xid)
Definition transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition transam.h:263
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:195
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition tuptable.h:476
static bool HeapKeyTest(HeapTuple tuple, TupleDesc tupdesc, int nkeys, ScanKey keys)
Definition valid.h:28
static bool VARATT_IS_EXTERNAL(const void *PTR)
Definition varatt.h:354
void visibilitymap_set(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
TransactionId GetTopTransactionId(void)
Definition xact.c:428
TransactionId GetTopTransactionIdIfAny(void)
Definition xact.c:443
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition xact.c:943
bool IsInParallelMode(void)
Definition xact.c:1091
TransactionId GetCurrentTransactionId(void)
Definition xact.c:456
CommandId GetCurrentCommandId(bool used)
Definition xact.c:831
#define IsolationIsSerializable()
Definition xact.h:53
#define XLOG_INCLUDE_ORIGIN
Definition xlog.h:165
#define XLogStandbyInfoActive()
Definition xlog.h:125
uint64 XLogRecPtr
Definition xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:482
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:413
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:372
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:464
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
Definition xloginsert.c:317
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:246
void XLogBeginInsert(void)
Definition xloginsert.c:153
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define REGBUF_KEEP_DATA
Definition xloginsert.h:36
#define REGBUF_WILL_INIT
Definition xloginsert.h:34