PostgreSQL Source Code git master
Loading...
Searching...
No Matches
heapam.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heapam.c
4 * heap access method code
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/heapam.c
12 *
13 *
14 * INTERFACE ROUTINES
15 * heap_beginscan - begin relation scan
16 * heap_rescan - restart a relation scan
17 * heap_endscan - end relation scan
18 * heap_getnext - retrieve next tuple in scan
19 * heap_fetch - retrieve tuple with given tid
20 * heap_insert - insert tuple into a relation
21 * heap_multi_insert - insert multiple tuples into a relation
22 * heap_delete - delete a tuple from a relation
23 * heap_update - replace a tuple in a relation with another tuple
24 *
25 * NOTES
26 * This file contains the heap_ routines which implement
27 * the POSTGRES heap access method used for all POSTGRES
28 * relations.
29 *
30 *-------------------------------------------------------------------------
31 */
32#include "postgres.h"
33
34#include "access/heapam.h"
35#include "access/heaptoast.h"
36#include "access/hio.h"
37#include "access/multixact.h"
38#include "access/subtrans.h"
39#include "access/syncscan.h"
40#include "access/valid.h"
42#include "access/xloginsert.h"
43#include "catalog/pg_database.h"
44#include "catalog/pg_database_d.h"
45#include "commands/vacuum.h"
47#include "pgstat.h"
48#include "port/pg_bitutils.h"
49#include "storage/lmgr.h"
50#include "storage/predicate.h"
51#include "storage/proc.h"
52#include "storage/procarray.h"
53#include "utils/datum.h"
55#include "utils/inval.h"
56#include "utils/spccache.h"
57#include "utils/syscache.h"
58
59
66 bool walLogical);
67#ifdef USE_ASSERT_CHECKING
69 const ItemPointerData *otid,
72#endif
77 bool *has_external);
78static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,
80 bool *have_tuple_lock);
82 BlockNumber block,
83 ScanDirection dir);
85 ScanDirection dir);
95 TransactionId xid,
100 uint16 t_infomask);
102 LockTupleMode lockmode, bool *current_is_member);
104 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
105 int *remaining);
108 bool logLockFailure);
113 bool *copy);
114
115
116/*
117 * This table lists the heavyweight lock mode that corresponds to each tuple
118 * lock mode, as well as one or two corresponding MultiXactStatus values:
119 * .lockstatus to merely lock tuples, and .updstatus to update them. The
120 * latter is set to -1 if the corresponding tuple lock mode does not allow
121 * updating tuples -- see get_mxact_status_for_lock().
122 *
123 * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
124 *
125 * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
126 * instead.
127 */
128static const struct
129{
134
135{
137 .hwlock = AccessShareLock,
138 .lockstatus = MultiXactStatusForKeyShare,
139 /* KeyShare does not allow updating tuples */
140 .updstatus = -1
141 },
142 [LockTupleShare] = {
143 .hwlock = RowShareLock,
144 .lockstatus = MultiXactStatusForShare,
145 /* Share does not allow updating tuples */
146 .updstatus = -1
147 },
149 .hwlock = ExclusiveLock,
150 .lockstatus = MultiXactStatusForNoKeyUpdate,
151 .updstatus = MultiXactStatusNoKeyUpdate
152 },
154 .hwlock = AccessExclusiveLock,
155 .lockstatus = MultiXactStatusForUpdate,
156 .updstatus = MultiXactStatusUpdate
157 }
159
160/* Get the LOCKMODE for a given MultiXactStatus */
161#define LOCKMODE_from_mxstatus(status) \
162 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
163
164/*
165 * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
166 * This is more readable than having every caller translate it to lock.h's
167 * LOCKMODE.
168 */
169#define LockTupleTuplock(rel, tup, mode) \
170 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
171#define UnlockTupleTuplock(rel, tup, mode) \
172 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
173#define ConditionalLockTupleTuplock(rel, tup, mode, log) \
174 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
175
176#ifdef USE_PREFETCH
177/*
178 * heap_index_delete_tuples and index_delete_prefetch_buffer use this
179 * structure to coordinate prefetching activity
180 */
181typedef struct
182{
184 int next_item;
185 int ndeltids;
186 TM_IndexDelete *deltids;
188#endif
189
190/* heap_index_delete_tuples bottom-up index deletion costing constants */
191#define BOTTOMUP_MAX_NBLOCKS 6
192#define BOTTOMUP_TOLERANCE_NBLOCKS 3
193
194/*
195 * heap_index_delete_tuples uses this when determining which heap blocks it
196 * must visit to help its bottom-up index deletion caller
197 */
198typedef struct IndexDeleteCounts
199{
200 int16 npromisingtids; /* Number of "promising" TIDs in group */
201 int16 ntids; /* Number of TIDs in group */
202 int16 ifirsttid; /* Offset to group's first deltid */
204
205/*
206 * This table maps tuple lock strength values for each particular
207 * MultiXactStatus value.
208 */
210{
211 LockTupleKeyShare, /* ForKeyShare */
212 LockTupleShare, /* ForShare */
213 LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
214 LockTupleExclusive, /* ForUpdate */
215 LockTupleNoKeyExclusive, /* NoKeyUpdate */
216 LockTupleExclusive /* Update */
217};
218
219/* Get the LockTupleMode for a given MultiXactStatus */
220#define TUPLOCK_from_mxstatus(status) \
221 (MultiXactStatusLock[(status)])
222
223/*
224 * Check that we have a valid snapshot if we might need TOAST access.
225 */
226static inline void
228{
229#ifdef USE_ASSERT_CHECKING
230
231 /* bootstrap mode in particular breaks this rule */
233 return;
234
235 /* if the relation doesn't have a TOAST table, we are good */
236 if (!OidIsValid(rel->rd_rel->reltoastrelid))
237 return;
238
240
241#endif /* USE_ASSERT_CHECKING */
242}
243
244/* ----------------------------------------------------------------
245 * heap support routines
246 * ----------------------------------------------------------------
247 */
248
249/*
250 * Streaming read API callback for parallel sequential scans. Returns the next
251 * block the caller wants from the read stream or InvalidBlockNumber when done.
252 */
253static BlockNumber
255 void *callback_private_data,
256 void *per_buffer_data)
257{
258 HeapScanDesc scan = (HeapScanDesc) callback_private_data;
259
262
263 if (unlikely(!scan->rs_inited))
264 {
265 /* parallel scan */
269 scan->rs_startblock,
270 scan->rs_numblocks);
271
272 /* may return InvalidBlockNumber if there are no more blocks */
276 scan->rs_inited = true;
277 }
278 else
279 {
282 scan->rs_base.rs_parallel);
283 }
284
285 return scan->rs_prefetch_block;
286}
287
288/*
289 * Streaming read API callback for serial sequential and TID range scans.
290 * Returns the next block the caller wants from the read stream or
291 * InvalidBlockNumber when done.
292 */
293static BlockNumber
295 void *callback_private_data,
296 void *per_buffer_data)
297{
298 HeapScanDesc scan = (HeapScanDesc) callback_private_data;
299
300 if (unlikely(!scan->rs_inited))
301 {
303 scan->rs_inited = true;
304 }
305 else
307 scan->rs_prefetch_block,
308 scan->rs_dir);
309
310 return scan->rs_prefetch_block;
311}
312
313/*
314 * Read stream API callback for bitmap heap scans.
315 * Returns the next block the caller wants from the read stream or
316 * InvalidBlockNumber when done.
317 */
318static BlockNumber
320 void *per_buffer_data)
321{
322 TBMIterateResult *tbmres = per_buffer_data;
325 TableScanDesc sscan = &hscan->rs_base;
326
327 for (;;)
328 {
330
331 /* no more entries in the bitmap */
332 if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
333 return InvalidBlockNumber;
334
335 /*
336 * Ignore any claimed entries past what we think is the end of the
337 * relation. It may have been extended after the start of our scan (we
338 * only hold an AccessShareLock, and it could be inserts from this
339 * backend). We don't take this optimization in SERIALIZABLE
340 * isolation though, as we need to examine all invisible tuples
341 * reachable by the index.
342 */
344 tbmres->blockno >= hscan->rs_nblocks)
345 continue;
346
347 return tbmres->blockno;
348 }
349
350 /* not reachable */
351 Assert(false);
352}
353
354/* ----------------
355 * initscan - scan code common to heap_beginscan and heap_rescan
356 * ----------------
357 */
358static void
360{
362 bool allow_strat;
363 bool allow_sync;
364
365 /*
366 * Determine the number of blocks we have to scan.
367 *
368 * It is sufficient to do this once at scan start, since any tuples added
369 * while the scan is in progress will be invisible to my snapshot anyway.
370 * (That is not true when using a non-MVCC snapshot. However, we couldn't
371 * guarantee to return tuples added after scan start anyway, since they
372 * might go into pages we already scanned. To guarantee consistent
373 * results for a non-MVCC snapshot, the caller must hold some higher-level
374 * lock that ensures the interesting tuple(s) won't change.)
375 */
376 if (scan->rs_base.rs_parallel != NULL)
377 {
379 scan->rs_nblocks = bpscan->phs_nblocks;
380 }
381 else
383
384 /*
385 * If the table is large relative to NBuffers, use a bulk-read access
386 * strategy and enable synchronized scanning (see syncscan.c). Although
387 * the thresholds for these features could be different, we make them the
388 * same so that there are only two behaviors to tune rather than four.
389 * (However, some callers need to be able to disable one or both of these
390 * behaviors, independently of the size of the table; also there is a GUC
391 * variable that can disable synchronized scanning.)
392 *
393 * Note that table_block_parallelscan_initialize has a very similar test;
394 * if you change this, consider changing that one, too.
395 */
397 scan->rs_nblocks > NBuffers / 4)
398 {
400 allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
401 }
402 else
403 allow_strat = allow_sync = false;
404
405 if (allow_strat)
406 {
407 /* During a rescan, keep the previous strategy object. */
408 if (scan->rs_strategy == NULL)
410 }
411 else
412 {
413 if (scan->rs_strategy != NULL)
415 scan->rs_strategy = NULL;
416 }
417
418 if (scan->rs_base.rs_parallel != NULL)
419 {
420 /* For parallel scan, believe whatever ParallelTableScanDesc says. */
423 else
425
426 /*
427 * If not rescanning, initialize the startblock. Finding the actual
428 * start location is done in table_block_parallelscan_startblock_init,
429 * based on whether an alternative start location has been set with
430 * heap_setscanlimits, or using the syncscan location, when syncscan
431 * is enabled.
432 */
433 if (!keep_startblock)
435 }
436 else
437 {
438 if (keep_startblock)
439 {
440 /*
441 * When rescanning, we want to keep the previous startblock
442 * setting, so that rewinding a cursor doesn't generate surprising
443 * results. Reset the active syncscan setting, though.
444 */
447 else
449 }
451 {
454 }
455 else
456 {
458 scan->rs_startblock = 0;
459 }
460 }
461
463 scan->rs_inited = false;
464 scan->rs_ctup.t_data = NULL;
466 scan->rs_cbuf = InvalidBuffer;
468 scan->rs_ntuples = 0;
469 scan->rs_cindex = 0;
470
471 /*
472 * Initialize to ForwardScanDirection because it is most common and
473 * because heap scans go forward before going backward (e.g. CURSORs).
474 */
477
478 /* page-at-a-time fields are always invalid when not rs_inited */
479
480 /*
481 * copy the scan key, if appropriate
482 */
483 if (key != NULL && scan->rs_base.rs_nkeys > 0)
484 memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
485
486 /*
487 * Currently, we only have a stats counter for sequential heap scans (but
488 * e.g for bitmap scans the underlying bitmap index scans will be counted,
489 * and for sample scans we update stats for tuple fetches).
490 */
491 if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
493}
494
495/*
496 * heap_setscanlimits - restrict range of a heapscan
497 *
498 * startBlk is the page to start at
499 * numBlks is number of pages to scan (InvalidBlockNumber means "all")
500 */
501void
503{
505
506 Assert(!scan->rs_inited); /* else too late to change */
507 /* else rs_startblock is significant */
509
510 /* Check startBlk is valid (but allow case of zero blocks...) */
511 Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
512
513 scan->rs_startblock = startBlk;
514 scan->rs_numblocks = numBlks;
515}
516
517/*
518 * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
519 * multiple times, with constant arguments for all_visible,
520 * check_serializable.
521 */
523static int
525 Page page, Buffer buffer,
526 BlockNumber block, int lines,
527 bool all_visible, bool check_serializable)
528{
529 Oid relid = RelationGetRelid(scan->rs_base.rs_rd);
530 int ntup = 0;
531 int nvis = 0;
533
534 /* page at a time should have been disabled otherwise */
535 Assert(IsMVCCSnapshot(snapshot));
536
537 /* first find all tuples on the page */
539 {
542
544 continue;
545
546 /*
547 * If the page is not all-visible or we need to check serializability,
548 * maintain enough state to be able to refind the tuple efficiently,
549 * without again first needing to fetch the item and then via that the
550 * tuple.
551 */
552 if (!all_visible || check_serializable)
553 {
554 tup = &batchmvcc.tuples[ntup];
555
557 tup->t_len = ItemIdGetLength(lpp);
558 tup->t_tableOid = relid;
559 ItemPointerSet(&(tup->t_self), block, lineoff);
560 }
561
562 /*
563 * If the page is all visible, these fields otherwise won't be
564 * populated in loop below.
565 */
566 if (all_visible)
567 {
569 {
570 batchmvcc.visible[ntup] = true;
571 }
572 scan->rs_vistuples[ntup] = lineoff;
573 }
574
575 ntup++;
576 }
577
579
580 /*
581 * Unless the page is all visible, test visibility for all tuples one go.
582 * That is considerably more efficient than calling
583 * HeapTupleSatisfiesMVCC() one-by-one.
584 */
585 if (all_visible)
586 nvis = ntup;
587 else
588 nvis = HeapTupleSatisfiesMVCCBatch(snapshot, buffer,
589 ntup,
590 &batchmvcc,
591 scan->rs_vistuples);
592
593 /*
594 * So far we don't have batch API for testing serializabilty, so do so
595 * one-by-one.
596 */
598 {
599 for (int i = 0; i < ntup; i++)
600 {
602 scan->rs_base.rs_rd,
603 &batchmvcc.tuples[i],
604 buffer, snapshot);
605 }
606 }
607
608 return nvis;
609}
610
611/*
612 * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
613 *
614 * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
615 * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
616 */
617void
619{
621 Buffer buffer = scan->rs_cbuf;
622 BlockNumber block = scan->rs_cblock;
623 Snapshot snapshot;
624 Page page;
625 int lines;
626 bool all_visible;
628
629 Assert(BufferGetBlockNumber(buffer) == block);
630
631 /* ensure we're not accidentally being used when not in pagemode */
633 snapshot = scan->rs_base.rs_snapshot;
634
635 /*
636 * Prune and repair fragmentation for the whole page, if possible.
637 */
638 heap_page_prune_opt(scan->rs_base.rs_rd, buffer, &scan->rs_vmbuffer,
639 sscan->rs_flags & SO_HINT_REL_READ_ONLY);
640
641 /*
642 * We must hold share lock on the buffer content while examining tuple
643 * visibility. Afterwards, however, the tuples we have found to be
644 * visible are guaranteed good as long as we hold the buffer pin.
645 */
647
648 page = BufferGetPage(buffer);
649 lines = PageGetMaxOffsetNumber(page);
650
651 /*
652 * If the all-visible flag indicates that all tuples on the page are
653 * visible to everyone, we can skip the per-tuple visibility tests.
654 *
655 * Note: In hot standby, a tuple that's already visible to all
656 * transactions on the primary might still be invisible to a read-only
657 * transaction in the standby. We partly handle this problem by tracking
658 * the minimum xmin of visible tuples as the cut-off XID while marking a
659 * page all-visible on the primary and WAL log that along with the
660 * visibility map SET operation. In hot standby, we wait for (or abort)
661 * all transactions that can potentially may not see one or more tuples on
662 * the page. That's how index-only scans work fine in hot standby. A
663 * crucial difference between index-only scans and heap scans is that the
664 * index-only scan completely relies on the visibility map where as heap
665 * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
666 * the page-level flag can be trusted in the same way, because it might
667 * get propagated somehow without being explicitly WAL-logged, e.g. via a
668 * full page write. Until we can prove that beyond doubt, let's check each
669 * tuple for visibility the hard way.
670 */
671 all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
674
675 /*
676 * We call page_collect_tuples() with constant arguments, to get the
677 * compiler to constant fold the constant arguments. Separate calls with
678 * constant arguments, rather than variables, are needed on several
679 * compilers to actually perform constant folding.
680 */
681 if (likely(all_visible))
682 {
684 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
685 block, lines, true, false);
686 else
687 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
688 block, lines, true, true);
689 }
690 else
691 {
693 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
694 block, lines, false, false);
695 else
696 scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
697 block, lines, false, true);
698 }
699
701}
702
703/*
704 * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
705 *
706 * Read the next block of the scan relation from the read stream and save it
707 * in the scan descriptor. It is already pinned.
708 */
709static inline void
711{
712 Assert(scan->rs_read_stream);
713
714 /* release previous scan buffer, if any */
715 if (BufferIsValid(scan->rs_cbuf))
716 {
717 ReleaseBuffer(scan->rs_cbuf);
718 scan->rs_cbuf = InvalidBuffer;
719 }
720
721 /*
722 * Be sure to check for interrupts at least once per page. Checks at
723 * higher code levels won't be able to stop a seqscan that encounters many
724 * pages' worth of consecutive dead tuples.
725 */
727
728 /*
729 * If the scan direction is changing, reset the prefetch block to the
730 * current block. Otherwise, we will incorrectly prefetch the blocks
731 * between the prefetch block and the current block again before
732 * prefetching blocks in the new, correct scan direction.
733 */
734 if (unlikely(scan->rs_dir != dir))
735 {
736 scan->rs_prefetch_block = scan->rs_cblock;
738 }
739
740 scan->rs_dir = dir;
741
743 if (BufferIsValid(scan->rs_cbuf))
745}
746
747/*
748 * heapgettup_initial_block - return the first BlockNumber to scan
749 *
750 * Returns InvalidBlockNumber when there are no blocks to scan. This can
751 * occur with empty tables and in parallel scans when parallel workers get all
752 * of the pages before we can get a chance to get our first page.
753 */
756{
757 Assert(!scan->rs_inited);
758 Assert(scan->rs_base.rs_parallel == NULL);
759
760 /* When there are no pages to scan, return InvalidBlockNumber */
761 if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
762 return InvalidBlockNumber;
763
764 if (ScanDirectionIsForward(dir))
765 {
766 return scan->rs_startblock;
767 }
768 else
769 {
770 /*
771 * Disable reporting to syncscan logic in a backwards scan; it's not
772 * very likely anyone else is doing the same thing at the same time,
773 * and much more likely that we'll just bollix things for forward
774 * scanners.
775 */
777
778 /*
779 * Start from last page of the scan. Ensure we take into account
780 * rs_numblocks if it's been adjusted by heap_setscanlimits().
781 */
782 if (scan->rs_numblocks != InvalidBlockNumber)
783 return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
784
785 if (scan->rs_startblock > 0)
786 return scan->rs_startblock - 1;
787
788 return scan->rs_nblocks - 1;
789 }
790}
791
792
793/*
794 * heapgettup_start_page - helper function for heapgettup()
795 *
796 * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
797 * to the number of tuples on this page. Also set *lineoff to the first
798 * offset to scan with forward scans getting the first offset and backward
799 * getting the final offset on the page.
800 */
801static Page
804{
805 Page page;
806
807 Assert(scan->rs_inited);
809
810 /* Caller is responsible for ensuring buffer is locked if needed */
811 page = BufferGetPage(scan->rs_cbuf);
812
814
815 if (ScanDirectionIsForward(dir))
817 else
819
820 /* lineoff now references the physically previous or next tid */
821 return page;
822}
823
824
825/*
826 * heapgettup_continue_page - helper function for heapgettup()
827 *
828 * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
829 * to the number of tuples left to scan on this page. Also set *lineoff to
830 * the next offset to scan according to the ScanDirection in 'dir'.
831 */
832static inline Page
835{
836 Page page;
837
838 Assert(scan->rs_inited);
840
841 /* Caller is responsible for ensuring buffer is locked if needed */
842 page = BufferGetPage(scan->rs_cbuf);
843
844 if (ScanDirectionIsForward(dir))
845 {
847 *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
848 }
849 else
850 {
851 /*
852 * The previous returned tuple may have been vacuumed since the
853 * previous scan when we use a non-MVCC snapshot, so we must
854 * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
855 */
857 *linesleft = *lineoff;
858 }
859
860 /* lineoff now references the physically previous or next tid */
861 return page;
862}
863
864/*
865 * heapgettup_advance_block - helper for heap_fetch_next_buffer()
866 *
867 * Given the current block number, the scan direction, and various information
868 * contained in the scan descriptor, calculate the BlockNumber to scan next
869 * and return it. If there are no further blocks to scan, return
870 * InvalidBlockNumber to indicate this fact to the caller.
871 *
872 * This should not be called to determine the initial block number -- only for
873 * subsequent blocks.
874 *
875 * This also adjusts rs_numblocks when a limit has been imposed by
876 * heap_setscanlimits().
877 */
878static inline BlockNumber
880{
881 Assert(scan->rs_base.rs_parallel == NULL);
882
884 {
885 block++;
886
887 /* wrap back to the start of the heap */
888 if (block >= scan->rs_nblocks)
889 block = 0;
890
891 /*
892 * Report our new scan position for synchronization purposes. We don't
893 * do that when moving backwards, however. That would just mess up any
894 * other forward-moving scanners.
895 *
896 * Note: we do this before checking for end of scan so that the final
897 * state of the position hint is back at the start of the rel. That's
898 * not strictly necessary, but otherwise when you run the same query
899 * multiple times the starting position would shift a little bit
900 * backwards on every invocation, which is confusing. We don't
901 * guarantee any specific ordering in general, though.
902 */
903 if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
904 ss_report_location(scan->rs_base.rs_rd, block);
905
906 /* we're done if we're back at where we started */
907 if (block == scan->rs_startblock)
908 return InvalidBlockNumber;
909
910 /* check if the limit imposed by heap_setscanlimits() is met */
911 if (scan->rs_numblocks != InvalidBlockNumber)
912 {
913 if (--scan->rs_numblocks == 0)
914 return InvalidBlockNumber;
915 }
916
917 return block;
918 }
919 else
920 {
921 /* we're done if the last block is the start position */
922 if (block == scan->rs_startblock)
923 return InvalidBlockNumber;
924
925 /* check if the limit imposed by heap_setscanlimits() is met */
926 if (scan->rs_numblocks != InvalidBlockNumber)
927 {
928 if (--scan->rs_numblocks == 0)
929 return InvalidBlockNumber;
930 }
931
932 /* wrap to the end of the heap when the last page was page 0 */
933 if (block == 0)
934 block = scan->rs_nblocks;
935
936 block--;
937
938 return block;
939 }
940}
941
942/* ----------------
943 * heapgettup - fetch next heap tuple
944 *
945 * Initialize the scan if not already done; then advance to the next
946 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
947 * or set scan->rs_ctup.t_data = NULL if no more tuples.
948 *
949 * Note: the reason nkeys/key are passed separately, even though they are
950 * kept in the scan descriptor, is that the caller may not want us to check
951 * the scankeys.
952 *
953 * Note: when we fall off the end of the scan in either direction, we
954 * reset rs_inited. This means that a further request with the same
955 * scan direction will restart the scan, which is a bit odd, but a
956 * request with the opposite scan direction will start a fresh scan
957 * in the proper direction. The latter is required behavior for cursors,
958 * while the former case is generally undefined behavior in Postgres
959 * so we don't care too much.
960 * ----------------
961 */
962static void
964 ScanDirection dir,
965 int nkeys,
966 ScanKey key)
967{
968 HeapTuple tuple = &(scan->rs_ctup);
969 Page page;
971 int linesleft;
972
973 if (likely(scan->rs_inited))
974 {
975 /* continue from previously returned page/tuple */
977 page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
978 goto continue_page;
979 }
980
981 /*
982 * advance the scan until we find a qualifying tuple or run out of stuff
983 * to scan
984 */
985 while (true)
986 {
987 heap_fetch_next_buffer(scan, dir);
988
989 /* did we run out of blocks to scan? */
990 if (!BufferIsValid(scan->rs_cbuf))
991 break;
992
994
996 page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
998
999 /*
1000 * Only continue scanning the page while we have lines left.
1001 *
1002 * Note that this protects us from accessing line pointers past
1003 * PageGetMaxOffsetNumber(); both for forward scans when we resume the
1004 * table scan, and for when we start scanning a new page.
1005 */
1006 for (; linesleft > 0; linesleft--, lineoff += dir)
1007 {
1008 bool visible;
1010
1011 if (!ItemIdIsNormal(lpp))
1012 continue;
1013
1014 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1015 tuple->t_len = ItemIdGetLength(lpp);
1016 ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
1017
1018 visible = HeapTupleSatisfiesVisibility(tuple,
1019 scan->rs_base.rs_snapshot,
1020 scan->rs_cbuf);
1021
1023 tuple, scan->rs_cbuf,
1024 scan->rs_base.rs_snapshot);
1025
1026 /* skip tuples not visible to this snapshot */
1027 if (!visible)
1028 continue;
1029
1030 /* skip any tuples that don't match the scan key */
1031 if (key != NULL &&
1033 nkeys, key))
1034 continue;
1035
1037 scan->rs_coffset = lineoff;
1038 return;
1039 }
1040
1041 /*
1042 * if we get here, it means we've exhausted the items on this page and
1043 * it's time to move to the next.
1044 */
1046 }
1047
1048 /* end of scan */
1049 if (BufferIsValid(scan->rs_cbuf))
1050 ReleaseBuffer(scan->rs_cbuf);
1051
1052 scan->rs_cbuf = InvalidBuffer;
1055 tuple->t_data = NULL;
1056 scan->rs_inited = false;
1057}
1058
1059/* ----------------
1060 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
1061 *
1062 * Same API as heapgettup, but used in page-at-a-time mode
1063 *
1064 * The internal logic is much the same as heapgettup's too, but there are some
1065 * differences: we do not take the buffer content lock (that only needs to
1066 * happen inside heap_prepare_pagescan), and we iterate through just the
1067 * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
1068 * that lineindex is 0-based, where the corresponding loop variable lineoff in
1069 * heapgettup is 1-based.
1070 * ----------------
1071 */
1072static void
1074 ScanDirection dir,
1075 int nkeys,
1076 ScanKey key)
1077{
1078 HeapTuple tuple = &(scan->rs_ctup);
1079 Page page;
1082
1083 if (likely(scan->rs_inited))
1084 {
1085 /* continue from previously returned page/tuple */
1086 page = BufferGetPage(scan->rs_cbuf);
1087
1088 lineindex = scan->rs_cindex + dir;
1089 if (ScanDirectionIsForward(dir))
1090 linesleft = scan->rs_ntuples - lineindex;
1091 else
1092 linesleft = scan->rs_cindex;
1093 /* lineindex now references the next or previous visible tid */
1094
1095 goto continue_page;
1096 }
1097
1098 /*
1099 * advance the scan until we find a qualifying tuple or run out of stuff
1100 * to scan
1101 */
1102 while (true)
1103 {
1104 heap_fetch_next_buffer(scan, dir);
1105
1106 /* did we run out of blocks to scan? */
1107 if (!BufferIsValid(scan->rs_cbuf))
1108 break;
1109
1111
1112 /* prune the page and determine visible tuple offsets */
1114 page = BufferGetPage(scan->rs_cbuf);
1115 linesleft = scan->rs_ntuples;
1117
1118 /* block is the same for all tuples, set it once outside the loop */
1119 ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
1120
1121 /* lineindex now references the next or previous visible tid */
1123
1124 for (; linesleft > 0; linesleft--, lineindex += dir)
1125 {
1126 ItemId lpp;
1128
1129 Assert(lineindex < scan->rs_ntuples);
1130 lineoff = scan->rs_vistuples[lineindex];
1131 lpp = PageGetItemId(page, lineoff);
1133
1134 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1135 tuple->t_len = ItemIdGetLength(lpp);
1137
1138 /* skip any tuples that don't match the scan key */
1139 if (key != NULL &&
1140 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1141 nkeys, key))
1142 continue;
1143
1144 scan->rs_cindex = lineindex;
1145 return;
1146 }
1147 }
1148
1149 /* end of scan */
1150 if (BufferIsValid(scan->rs_cbuf))
1151 ReleaseBuffer(scan->rs_cbuf);
1152 scan->rs_cbuf = InvalidBuffer;
1155 tuple->t_data = NULL;
1156 scan->rs_inited = false;
1157}
1158
1159
1160/* ----------------------------------------------------------------
1161 * heap access method interface
1162 * ----------------------------------------------------------------
1163 */
1164
1165
1168 int nkeys, ScanKey key,
1169 ParallelTableScanDesc parallel_scan,
1170 uint32 flags)
1171{
1172 HeapScanDesc scan;
1173
1174 /*
1175 * increment relation ref count while scanning relation
1176 *
1177 * This is just to make really sure the relcache entry won't go away while
1178 * the scan has a pointer to it. Caller should be holding the rel open
1179 * anyway, so this is redundant in all normal scenarios...
1180 */
1182
1183 /*
1184 * allocate and initialize scan descriptor
1185 */
1186 if (flags & SO_TYPE_BITMAPSCAN)
1187 {
1189
1190 /*
1191 * Bitmap Heap scans do not have any fields that a normal Heap Scan
1192 * does not have, so no special initializations required here.
1193 */
1194 scan = (HeapScanDesc) bscan;
1195 }
1196 else
1198
1199 scan->rs_base.rs_rd = relation;
1200 scan->rs_base.rs_snapshot = snapshot;
1201 scan->rs_base.rs_nkeys = nkeys;
1202 scan->rs_base.rs_flags = flags;
1203 scan->rs_base.rs_parallel = parallel_scan;
1204 scan->rs_base.rs_instrument = NULL;
1205 scan->rs_strategy = NULL; /* set in initscan */
1206 scan->rs_cbuf = InvalidBuffer;
1207
1208 /*
1209 * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1210 */
1211 if (!(snapshot && IsMVCCSnapshot(snapshot)))
1213
1214 /* Check that a historic snapshot is not used for non-catalog tables */
1215 if (snapshot &&
1216 IsHistoricMVCCSnapshot(snapshot) &&
1218 {
1219 ereport(ERROR,
1221 errmsg("cannot query non-catalog table \"%s\" during logical decoding",
1222 RelationGetRelationName(relation))));
1223 }
1224
1225 /*
1226 * For seqscan and sample scans in a serializable transaction, acquire a
1227 * predicate lock on the entire relation. This is required not only to
1228 * lock all the matching tuples, but also to conflict with new insertions
1229 * into the table. In an indexscan, we take page locks on the index pages
1230 * covering the range specified in the scan qual, but in a heap scan there
1231 * is nothing more fine-grained to lock. A bitmap scan is a different
1232 * story, there we have already scanned the index and locked the index
1233 * pages covering the predicate. But in that case we still have to lock
1234 * any matching heap tuples. For sample scan we could optimize the locking
1235 * to be at least page-level granularity, but we'd need to add per-tuple
1236 * locking for that.
1237 */
1239 {
1240 /*
1241 * Ensure a missing snapshot is noticed reliably, even if the
1242 * isolation mode means predicate locking isn't performed (and
1243 * therefore the snapshot isn't used here).
1244 */
1245 Assert(snapshot);
1246 PredicateLockRelation(relation, snapshot);
1247 }
1248
1249 /* we only need to set this up once */
1250 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1251
1252 /*
1253 * Allocate memory to keep track of page allocation for parallel workers
1254 * when doing a parallel scan.
1255 */
1256 if (parallel_scan != NULL)
1258 else
1260
1261 /*
1262 * we do this here instead of in initscan() because heap_rescan also calls
1263 * initscan() and we don't want to allocate memory again
1264 */
1265 if (nkeys > 0)
1266 scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
1267 else
1268 scan->rs_base.rs_key = NULL;
1269
1270 initscan(scan, key, false);
1271
1272 scan->rs_read_stream = NULL;
1273
1274 /*
1275 * Set up a read stream for sequential scans and TID range scans. This
1276 * should be done after initscan() because initscan() allocates the
1277 * BufferAccessStrategy object passed to the read stream API.
1278 */
1279 if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
1281 {
1283
1284 if (scan->rs_base.rs_parallel)
1286 else
1288
1289 /* ---
1290 * It is safe to use batchmode as the only locks taken by `cb`
1291 * are never taken while waiting for IO:
1292 * - SyncScanLock is used in the non-parallel case
1293 * - in the parallel case, only spinlocks and atomics are used
1294 * ---
1295 */
1298 scan->rs_strategy,
1299 scan->rs_base.rs_rd,
1301 cb,
1302 scan,
1303 0);
1304 }
1305 else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
1306 {
1309 scan->rs_strategy,
1310 scan->rs_base.rs_rd,
1313 scan,
1314 sizeof(TBMIterateResult));
1315 }
1316
1317 /* enable read stream instrumentation */
1318 if ((flags & SO_SCAN_INSTRUMENT) && (scan->rs_read_stream != NULL))
1319 {
1322 &scan->rs_base.rs_instrument->io);
1323 }
1324
1325 scan->rs_vmbuffer = InvalidBuffer;
1326
1327 return (TableScanDesc) scan;
1328}
1329
1330void
1332 bool allow_strat, bool allow_sync, bool allow_pagemode)
1333{
1335
1336 if (set_params)
1337 {
1338 if (allow_strat)
1340 else
1342
1343 if (allow_sync)
1345 else
1347
1348 if (allow_pagemode && scan->rs_base.rs_snapshot &&
1351 else
1353 }
1354
1355 /*
1356 * unpin scan buffers
1357 */
1358 if (BufferIsValid(scan->rs_cbuf))
1359 {
1360 ReleaseBuffer(scan->rs_cbuf);
1361 scan->rs_cbuf = InvalidBuffer;
1362 }
1363
1364 if (BufferIsValid(scan->rs_vmbuffer))
1365 {
1367 scan->rs_vmbuffer = InvalidBuffer;
1368 }
1369
1370 /*
1371 * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
1372 * additional data vs a normal HeapScan
1373 */
1374
1375 /*
1376 * The read stream is reset on rescan. This must be done before
1377 * initscan(), as some state referred to by read_stream_reset() is reset
1378 * in initscan().
1379 */
1380 if (scan->rs_read_stream)
1382
1383 /*
1384 * reinitialize scan descriptor
1385 */
1386 initscan(scan, key, true);
1387}
1388
1389void
1391{
1393
1394 /* Note: no locking manipulations needed */
1395
1396 /*
1397 * unpin scan buffers
1398 */
1399 if (BufferIsValid(scan->rs_cbuf))
1400 ReleaseBuffer(scan->rs_cbuf);
1401
1402 if (BufferIsValid(scan->rs_vmbuffer))
1404
1405 /*
1406 * Must free the read stream before freeing the BufferAccessStrategy.
1407 */
1408 if (scan->rs_read_stream)
1410
1411 /*
1412 * decrement relation reference count and free scan descriptor storage
1413 */
1415
1416 if (scan->rs_base.rs_key)
1417 pfree(scan->rs_base.rs_key);
1418
1419 if (scan->rs_strategy != NULL)
1421
1422 if (scan->rs_parallelworkerdata != NULL)
1424
1425 if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1427
1428 if (scan->rs_base.rs_instrument)
1430
1431 pfree(scan);
1432}
1433
1436{
1438
1439 /*
1440 * This is still widely used directly, without going through table AM, so
1441 * add a safety check. It's possible we should, at a later point,
1442 * downgrade this to an assert. The reason for checking the AM routine,
1443 * rather than the AM oid, is that this allows to write regression tests
1444 * that create another AM reusing the heap handler.
1445 */
1446 if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
1447 ereport(ERROR,
1449 errmsg_internal("only heap AM is supported")));
1450
1451 /* Note: no locking manipulations needed */
1452
1454 heapgettup_pagemode(scan, direction,
1455 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1456 else
1457 heapgettup(scan, direction,
1458 scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1459
1460 if (scan->rs_ctup.t_data == NULL)
1461 return NULL;
1462
1463 /*
1464 * if we get here it means we have a new current scan tuple, so point to
1465 * the proper return buffer and return the tuple.
1466 */
1467
1469
1470 return &scan->rs_ctup;
1471}
1472
1473bool
1475{
1477
1478 /* Note: no locking manipulations needed */
1479
1480 if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1481 heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1482 else
1483 heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1484
1485 if (scan->rs_ctup.t_data == NULL)
1486 {
1487 ExecClearTuple(slot);
1488 return false;
1489 }
1490
1491 /*
1492 * if we get here it means we have a new current scan tuple, so point to
1493 * the proper return buffer and return the tuple.
1494 */
1495
1497
1498 ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1499 scan->rs_cbuf);
1500 return true;
1501}
1502
1503void
1506{
1512
1513 /*
1514 * For relations without any pages, we can simply leave the TID range
1515 * unset. There will be no tuples to scan, therefore no tuples outside
1516 * the given TID range.
1517 */
1518 if (scan->rs_nblocks == 0)
1519 return;
1520
1521 /*
1522 * Set up some ItemPointers which point to the first and last possible
1523 * tuples in the heap.
1524 */
1527
1528 /*
1529 * If the given maximum TID is below the highest possible TID in the
1530 * relation, then restrict the range to that, otherwise we scan to the end
1531 * of the relation.
1532 */
1535
1536 /*
1537 * If the given minimum TID is above the lowest possible TID in the
1538 * relation, then restrict the range to only scan for TIDs above that.
1539 */
1542
1543 /*
1544 * Check for an empty range and protect from would be negative results
1545 * from the numBlks calculation below.
1546 */
1548 {
1549 /* Set an empty range of blocks to scan */
1551 return;
1552 }
1553
1554 /*
1555 * Calculate the first block and the number of blocks we must scan. We
1556 * could be more aggressive here and perform some more validation to try
1557 * and further narrow the scope of blocks to scan by checking if the
1558 * lowestItem has an offset above MaxOffsetNumber. In this case, we could
1559 * advance startBlk by one. Likewise, if highestItem has an offset of 0
1560 * we could scan one fewer blocks. However, such an optimization does not
1561 * seem worth troubling over, currently.
1562 */
1564
1567
1568 /* Set the start block and number of blocks to scan */
1570
1571 /* Finally, set the TID range in sscan */
1572 ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
1573 ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
1574}
1575
1576bool
1578 TupleTableSlot *slot)
1579{
1581 ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
1582 ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
1583
1584 /* Note: no locking manipulations needed */
1585 for (;;)
1586 {
1587 if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1588 heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1589 else
1590 heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1591
1592 if (scan->rs_ctup.t_data == NULL)
1593 {
1594 ExecClearTuple(slot);
1595 return false;
1596 }
1597
1598 /*
1599 * heap_set_tidrange will have used heap_setscanlimits to limit the
1600 * range of pages we scan to only ones that can contain the TID range
1601 * we're scanning for. Here we must filter out any tuples from these
1602 * pages that are outside of that range.
1603 */
1604 if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1605 {
1606 ExecClearTuple(slot);
1607
1608 /*
1609 * When scanning backwards, the TIDs will be in descending order.
1610 * Future tuples in this direction will be lower still, so we can
1611 * just return false to indicate there will be no more tuples.
1612 */
1613 if (ScanDirectionIsBackward(direction))
1614 return false;
1615
1616 continue;
1617 }
1618
1619 /*
1620 * Likewise for the final page, we must filter out TIDs greater than
1621 * maxtid.
1622 */
1623 if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1624 {
1625 ExecClearTuple(slot);
1626
1627 /*
1628 * When scanning forward, the TIDs will be in ascending order.
1629 * Future tuples in this direction will be higher still, so we can
1630 * just return false to indicate there will be no more tuples.
1631 */
1632 if (ScanDirectionIsForward(direction))
1633 return false;
1634 continue;
1635 }
1636
1637 break;
1638 }
1639
1640 /*
1641 * if we get here it means we have a new current scan tuple, so point to
1642 * the proper return buffer and return the tuple.
1643 */
1645
1646 ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1647 return true;
1648}
1649
1650/*
1651 * heap_fetch - retrieve tuple with given tid
1652 *
1653 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1654 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1655 * against the specified snapshot.
1656 *
1657 * If successful (tuple found and passes snapshot time qual), then *userbuf
1658 * is set to the buffer holding the tuple and true is returned. The caller
1659 * must unpin the buffer when done with the tuple.
1660 *
1661 * If the tuple is not found (ie, item number references a deleted slot),
1662 * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
1663 * and false is returned.
1664 *
1665 * If the tuple is found but fails the time qual check, then the behavior
1666 * depends on the keep_buf parameter. If keep_buf is false, the results
1667 * are the same as for the tuple-not-found case. If keep_buf is true,
1668 * then tuple->t_data and *userbuf are returned as for the success case,
1669 * and again the caller must unpin the buffer; but false is returned.
1670 *
1671 * heap_fetch does not follow HOT chains: only the exact TID requested will
1672 * be fetched.
1673 *
1674 * It is somewhat inconsistent that we ereport() on invalid block number but
1675 * return false on invalid item number. There are a couple of reasons though.
1676 * One is that the caller can relatively easily check the block number for
1677 * validity, but cannot check the item number without reading the page
1678 * himself. Another is that when we are following a t_ctid link, we can be
1679 * reasonably confident that the page number is valid (since VACUUM shouldn't
1680 * truncate off the destination page without having killed the referencing
1681 * tuple first), but the item number might well not be good.
1682 */
1683bool
1685 Snapshot snapshot,
1686 HeapTuple tuple,
1687 Buffer *userbuf,
1688 bool keep_buf)
1689{
1690 ItemPointer tid = &(tuple->t_self);
1691 ItemId lp;
1692 Buffer buffer;
1693 Page page;
1694 OffsetNumber offnum;
1695 bool valid;
1696
1697 /*
1698 * Fetch and pin the appropriate page of the relation.
1699 */
1700 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1701
1702 /*
1703 * Need share lock on buffer to examine tuple commit status.
1704 */
1706 page = BufferGetPage(buffer);
1707
1708 /*
1709 * We'd better check for out-of-range offnum in case of VACUUM since the
1710 * TID was obtained.
1711 */
1712 offnum = ItemPointerGetOffsetNumber(tid);
1714 {
1715 UnlockReleaseBuffer(buffer);
1717 tuple->t_data = NULL;
1718 return false;
1719 }
1720
1721 /*
1722 * get the item line pointer corresponding to the requested tid
1723 */
1724 lp = PageGetItemId(page, offnum);
1725
1726 /*
1727 * Must check for deleted tuple.
1728 */
1729 if (!ItemIdIsNormal(lp))
1730 {
1731 UnlockReleaseBuffer(buffer);
1733 tuple->t_data = NULL;
1734 return false;
1735 }
1736
1737 /*
1738 * fill in *tuple fields
1739 */
1740 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1741 tuple->t_len = ItemIdGetLength(lp);
1742 tuple->t_tableOid = RelationGetRelid(relation);
1743
1744 /*
1745 * check tuple visibility, then release lock
1746 */
1747 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1748
1749 if (valid)
1750 PredicateLockTID(relation, &(tuple->t_self), snapshot,
1752
1753 HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1754
1756
1757 if (valid)
1758 {
1759 /*
1760 * All checks passed, so return the tuple as valid. Caller is now
1761 * responsible for releasing the buffer.
1762 */
1763 *userbuf = buffer;
1764
1765 return true;
1766 }
1767
1768 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1769 if (keep_buf)
1770 *userbuf = buffer;
1771 else
1772 {
1773 ReleaseBuffer(buffer);
1775 tuple->t_data = NULL;
1776 }
1777
1778 return false;
1779}
1780
1781/*
1782 * heap_get_latest_tid - get the latest tid of a specified tuple
1783 *
1784 * Actually, this gets the latest version that is visible according to the
1785 * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
1786 * possibly uncommitted version.
1787 *
1788 * *tid is both an input and an output parameter: it is updated to
1789 * show the latest version of the row. Note that it will not be changed
1790 * if no version of the row passes the snapshot test.
1791 */
1792void
1794 ItemPointer tid)
1795{
1796 Relation relation = sscan->rs_rd;
1797 Snapshot snapshot = sscan->rs_snapshot;
1798 ItemPointerData ctid;
1800
1801 /*
1802 * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1803 * Assume that t_ctid links are valid however - there shouldn't be invalid
1804 * ones in the table.
1805 */
1807
1808 /*
1809 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1810 * need to examine, and *tid is the TID we will return if ctid turns out
1811 * to be bogus.
1812 *
1813 * Note that we will loop until we reach the end of the t_ctid chain.
1814 * Depending on the snapshot passed, there might be at most one visible
1815 * version of the row, but we don't try to optimize for that.
1816 */
1817 ctid = *tid;
1818 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1819 for (;;)
1820 {
1821 Buffer buffer;
1822 Page page;
1823 OffsetNumber offnum;
1824 ItemId lp;
1825 HeapTupleData tp;
1826 bool valid;
1827
1828 /*
1829 * Read, pin, and lock the page.
1830 */
1831 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1833 page = BufferGetPage(buffer);
1834
1835 /*
1836 * Check for bogus item number. This is not treated as an error
1837 * condition because it can happen while following a t_ctid link. We
1838 * just assume that the prior tid is OK and return it unchanged.
1839 */
1840 offnum = ItemPointerGetOffsetNumber(&ctid);
1842 {
1843 UnlockReleaseBuffer(buffer);
1844 break;
1845 }
1846 lp = PageGetItemId(page, offnum);
1847 if (!ItemIdIsNormal(lp))
1848 {
1849 UnlockReleaseBuffer(buffer);
1850 break;
1851 }
1852
1853 /* OK to access the tuple */
1854 tp.t_self = ctid;
1855 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1856 tp.t_len = ItemIdGetLength(lp);
1857 tp.t_tableOid = RelationGetRelid(relation);
1858
1859 /*
1860 * After following a t_ctid link, we might arrive at an unrelated
1861 * tuple. Check for XMIN match.
1862 */
1865 {
1866 UnlockReleaseBuffer(buffer);
1867 break;
1868 }
1869
1870 /*
1871 * Check tuple visibility; if visible, set it as the new result
1872 * candidate.
1873 */
1874 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1875 HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1876 if (valid)
1877 *tid = ctid;
1878
1879 /*
1880 * If there's a valid t_ctid link, follow it, else we're done.
1881 */
1882 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1886 {
1887 UnlockReleaseBuffer(buffer);
1888 break;
1889 }
1890
1891 ctid = tp.t_data->t_ctid;
1893 UnlockReleaseBuffer(buffer);
1894 } /* end of loop */
1895}
1896
1897
1898/*
1899 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1900 *
1901 * This is called after we have waited for the XMAX transaction to terminate.
1902 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1903 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1904 * hint bit if possible --- but beware that that may not yet be possible,
1905 * if the transaction committed asynchronously.
1906 *
1907 * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
1908 * even if it commits.
1909 *
1910 * Hence callers should look only at XMAX_INVALID.
1911 *
1912 * Note this is not allowed for tuples whose xmax is a multixact.
1913 */
1914static void
1916{
1919
1921 {
1922 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
1925 xid);
1926 else
1929 }
1930}
1931
1932
1933/*
1934 * GetBulkInsertState - prepare status object for a bulk insert
1935 */
1938{
1939 BulkInsertState bistate;
1940
1943 bistate->current_buf = InvalidBuffer;
1944 bistate->next_free = InvalidBlockNumber;
1945 bistate->last_free = InvalidBlockNumber;
1946 bistate->already_extended_by = 0;
1947 return bistate;
1948}
1949
1950/*
1951 * FreeBulkInsertState - clean up after finishing a bulk insert
1952 */
1953void
1955{
1956 if (bistate->current_buf != InvalidBuffer)
1957 ReleaseBuffer(bistate->current_buf);
1958 FreeAccessStrategy(bistate->strategy);
1959 pfree(bistate);
1960}
1961
1962/*
1963 * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
1964 */
1965void
1967{
1968 if (bistate->current_buf != InvalidBuffer)
1969 ReleaseBuffer(bistate->current_buf);
1970 bistate->current_buf = InvalidBuffer;
1971
1972 /*
1973 * Despite the name, we also reset bulk relation extension state.
1974 * Otherwise we can end up erroring out due to looking for free space in
1975 * ->next_free of one partition, even though ->next_free was set when
1976 * extending another partition. It could obviously also be bad for
1977 * efficiency to look at existing blocks at offsets from another
1978 * partition, even if we don't error out.
1979 */
1980 bistate->next_free = InvalidBlockNumber;
1981 bistate->last_free = InvalidBlockNumber;
1982}
1983
1984
1985/*
1986 * heap_insert - insert tuple into a heap
1987 *
1988 * The new tuple is stamped with current transaction ID and the specified
1989 * command ID.
1990 *
1991 * See table_tuple_insert for comments about most of the input flags, except
1992 * that this routine directly takes a tuple rather than a slot.
1993 *
1994 * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
1995 * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
1996 * implement table_tuple_insert_speculative().
1997 *
1998 * On return the header fields of *tup are updated to match the stored tuple;
1999 * in particular tup->t_self receives the actual TID where the tuple was
2000 * stored. But note that any toasting of fields within the tuple data is NOT
2001 * reflected into *tup.
2002 */
2003void
2006{
2009 Buffer buffer;
2010 Page page;
2011 Buffer vmbuffer = InvalidBuffer;
2012 bool all_visible_cleared = false;
2013
2014 /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2017
2018 AssertHasSnapshotForToast(relation);
2019
2020 /*
2021 * Fill in tuple header fields and toast the tuple if necessary.
2022 *
2023 * Note: below this point, heaptup is the data we actually intend to store
2024 * into the relation; tup is the caller's original untoasted data.
2025 */
2026 heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2027
2028 /*
2029 * Find buffer to insert this tuple into. If the page is all visible,
2030 * this will also pin the requisite visibility map page.
2031 */
2032 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2033 InvalidBuffer, options, bistate,
2034 &vmbuffer, NULL,
2035 0);
2036
2037 page = BufferGetPage(buffer);
2038
2039 /*
2040 * We're about to do the actual insert -- but check for conflict first, to
2041 * avoid possibly having to roll back work we've just done.
2042 *
2043 * This is safe without a recheck as long as there is no possibility of
2044 * another process scanning the page between this check and the insert
2045 * being visible to the scan (i.e., an exclusive buffer content lock is
2046 * continuously held from this point until the tuple insert is visible).
2047 *
2048 * For a heap insert, we only need to check for table-level SSI locks. Our
2049 * new tuple can't possibly conflict with existing tuple locks, and heap
2050 * page locks are only consolidated versions of tuple locks; they do not
2051 * lock "gaps" as index page locks do. So we don't need to specify a
2052 * buffer when making the call, which makes for a faster check.
2053 */
2055
2056 /* NO EREPORT(ERROR) from here till changes are logged */
2058
2059 RelationPutHeapTuple(relation, buffer, heaptup,
2061
2062 if (PageIsAllVisible(page))
2063 {
2064 all_visible_cleared = true;
2065 PageClearAllVisible(page);
2066 visibilitymap_clear(relation,
2068 vmbuffer, VISIBILITYMAP_VALID_BITS);
2069 }
2070
2071 /*
2072 * Set pd_prune_xid to trigger heap_page_prune_and_freeze() once the page
2073 * is full so that we can set the page all-visible in the VM on the next
2074 * page access.
2075 *
2076 * Setting pd_prune_xid is also handy if the inserting transaction
2077 * eventually aborts making this tuple DEAD and hence available for
2078 * pruning. If no other tuple in this page is UPDATEd/DELETEd, the aborted
2079 * tuple would never otherwise be pruned until next vacuum is triggered.
2080 *
2081 * Don't set it if we are in bootstrap mode or we are inserting a frozen
2082 * tuple, as there is no further pruning/freezing needed in those cases.
2083 */
2085 PageSetPrunable(page, xid);
2086
2087 MarkBufferDirty(buffer);
2088
2089 /* XLOG stuff */
2090 if (RelationNeedsWAL(relation))
2091 {
2095 uint8 info = XLOG_HEAP_INSERT;
2096 int bufflags = 0;
2097
2098 /*
2099 * If this is a catalog, we need to transmit combo CIDs to properly
2100 * decode, so log that as well.
2101 */
2103 log_heap_new_cid(relation, heaptup);
2104
2105 /*
2106 * If this is the single and first tuple on page, we can reinit the
2107 * page instead of restoring the whole thing. Set flag, and hide
2108 * buffer references from XLogInsert.
2109 */
2112 {
2113 info |= XLOG_HEAP_INIT_PAGE;
2115 }
2116
2117 xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2118 xlrec.flags = 0;
2124
2125 /*
2126 * For logical decoding, we need the tuple even if we're doing a full
2127 * page write, so make sure it's included even if we take a full-page
2128 * image. (XXX We could alternatively store a pointer into the FPW).
2129 */
2130 if (RelationIsLogicallyLogged(relation) &&
2132 {
2135
2136 if (IsToastRelation(relation))
2138 }
2139
2142
2143 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2144 xlhdr.t_infomask = heaptup->t_data->t_infomask;
2145 xlhdr.t_hoff = heaptup->t_data->t_hoff;
2146
2147 /*
2148 * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2149 * write the whole page to the xlog, we don't need to store
2150 * xl_heap_header in the xlog.
2151 */
2154 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2156 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2158
2159 /* filtering by origin on a row level is much more efficient */
2161
2162 recptr = XLogInsert(RM_HEAP_ID, info);
2163
2164 PageSetLSN(page, recptr);
2165 }
2166
2168
2169 UnlockReleaseBuffer(buffer);
2170 if (vmbuffer != InvalidBuffer)
2171 ReleaseBuffer(vmbuffer);
2172
2173 /*
2174 * If tuple is cacheable, mark it for invalidation from the caches in case
2175 * we abort. Note it is OK to do this after releasing the buffer, because
2176 * the heaptup data structure is all in local memory, not in the shared
2177 * buffer.
2178 */
2180
2181 /* Note: speculative insertions are counted too, even if aborted later */
2182 pgstat_count_heap_insert(relation, 1);
2183
2184 /*
2185 * If heaptup is a private copy, release it. Don't forget to copy t_self
2186 * back to the caller's image, too.
2187 */
2188 if (heaptup != tup)
2189 {
2190 tup->t_self = heaptup->t_self;
2192 }
2193}
2194
2195/*
2196 * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2197 * tuple header fields and toasts the tuple if necessary. Returns a toasted
2198 * version of the tuple if it was toasted, or the original tuple if not. Note
2199 * that in any case, the header fields are also set in the original tuple.
2200 */
2201static HeapTuple
2204{
2205 /*
2206 * To allow parallel inserts, we need to ensure that they are safe to be
2207 * performed in workers. We have the infrastructure to allow parallel
2208 * inserts in general except for the cases where inserts generate a new
2209 * CommandId (eg. inserts into a table having a foreign key column).
2210 */
2211 if (IsParallelWorker())
2212 ereport(ERROR,
2214 errmsg("cannot insert tuples in a parallel worker")));
2215
2216 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2217 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2218 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2219 HeapTupleHeaderSetXmin(tup->t_data, xid);
2222
2223 HeapTupleHeaderSetCmin(tup->t_data, cid);
2224 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2225 tup->t_tableOid = RelationGetRelid(relation);
2226
2227 /*
2228 * If the new tuple is too big for storage or contains already toasted
2229 * out-of-line attributes from some other relation, invoke the toaster.
2230 */
2231 if (relation->rd_rel->relkind != RELKIND_RELATION &&
2232 relation->rd_rel->relkind != RELKIND_MATVIEW)
2233 {
2234 /* toast table entries should never be recursively toasted */
2236 return tup;
2237 }
2238 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2239 return heap_toast_insert_or_update(relation, tup, NULL, options);
2240 else
2241 return tup;
2242}
2243
2244/*
2245 * Helper for heap_multi_insert() that computes the number of entire pages
2246 * that inserting the remaining heaptuples requires. Used to determine how
2247 * much the relation needs to be extended by.
2248 */
2249static int
2251{
2253 int npages = 1;
2254
2255 for (int i = done; i < ntuples; i++)
2256 {
2257 size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
2258
2259 if (page_avail < tup_sz)
2260 {
2261 npages++;
2263 }
2264 page_avail -= tup_sz;
2265 }
2266
2267 return npages;
2268}
2269
2270/*
2271 * heap_multi_insert - insert multiple tuples into a heap
2272 *
2273 * This is like heap_insert(), but inserts multiple tuples in one operation.
2274 * That's faster than calling heap_insert() in a loop, because when multiple
2275 * tuples can be inserted on a single page, we can write just a single WAL
2276 * record covering all of them, and only need to lock/unlock the page once.
2277 *
2278 * Note: this leaks memory into the current memory context. You can create a
2279 * temporary context before calling this, if that's a problem.
2280 */
2281void
2282heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2284{
2287 int i;
2288 int ndone;
2290 Page page;
2291 Buffer vmbuffer = InvalidBuffer;
2292 bool needwal;
2296 bool starting_with_empty_page = false;
2297 int npages = 0;
2298 int npages_used = 0;
2299
2300 /* currently not needed (thus unsupported) for heap_multi_insert() */
2302
2303 AssertHasSnapshotForToast(relation);
2304
2305 needwal = RelationNeedsWAL(relation);
2308
2309 /* Toast and set header data in all the slots */
2310 heaptuples = palloc(ntuples * sizeof(HeapTuple));
2311 for (i = 0; i < ntuples; i++)
2312 {
2313 HeapTuple tuple;
2314
2315 tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2316 slots[i]->tts_tableOid = RelationGetRelid(relation);
2317 tuple->t_tableOid = slots[i]->tts_tableOid;
2318 heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2319 options);
2320 }
2321
2322 /*
2323 * We're about to do the actual inserts -- but check for conflict first,
2324 * to minimize the possibility of having to roll back work we've just
2325 * done.
2326 *
2327 * A check here does not definitively prevent a serialization anomaly;
2328 * that check MUST be done at least past the point of acquiring an
2329 * exclusive buffer content lock on every buffer that will be affected,
2330 * and MAY be done after all inserts are reflected in the buffers and
2331 * those locks are released; otherwise there is a race condition. Since
2332 * multiple buffers can be locked and unlocked in the loop below, and it
2333 * would not be feasible to identify and lock all of those buffers before
2334 * the loop, we must do a final check at the end.
2335 *
2336 * The check here could be omitted with no loss of correctness; it is
2337 * present strictly as an optimization.
2338 *
2339 * For heap inserts, we only need to check for table-level SSI locks. Our
2340 * new tuples can't possibly conflict with existing tuple locks, and heap
2341 * page locks are only consolidated versions of tuple locks; they do not
2342 * lock "gaps" as index page locks do. So we don't need to specify a
2343 * buffer when making the call, which makes for a faster check.
2344 */
2346
2347 ndone = 0;
2348 while (ndone < ntuples)
2349 {
2350 Buffer buffer;
2351 bool all_visible_cleared = false;
2352 bool all_frozen_set = false;
2353 int nthispage;
2354
2356
2357 /*
2358 * Compute number of pages needed to fit the to-be-inserted tuples in
2359 * the worst case. This will be used to determine how much to extend
2360 * the relation by in RelationGetBufferForTuple(), if needed. If we
2361 * filled a prior page from scratch, we can just update our last
2362 * computation, but if we started with a partially filled page,
2363 * recompute from scratch, the number of potentially required pages
2364 * can vary due to tuples needing to fit onto the page, page headers
2365 * etc.
2366 */
2367 if (ndone == 0 || !starting_with_empty_page)
2368 {
2369 npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
2371 npages_used = 0;
2372 }
2373 else
2374 npages_used++;
2375
2376 /*
2377 * Find buffer where at least the next tuple will fit. If the page is
2378 * all-visible, this will also pin the requisite visibility map page.
2379 *
2380 * Also pin visibility map page if COPY FREEZE inserts tuples into an
2381 * empty page. See all_frozen_set below.
2382 */
2383 buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2384 InvalidBuffer, options, bistate,
2385 &vmbuffer, NULL,
2386 npages - npages_used);
2387 page = BufferGetPage(buffer);
2388
2390
2392 {
2393 all_frozen_set = true;
2394 /* Lock the vmbuffer before entering the critical section */
2396 }
2397
2398 /* NO EREPORT(ERROR) from here till changes are logged */
2400
2401 /*
2402 * RelationGetBufferForTuple has ensured that the first tuple fits.
2403 * Put that on the page, and then as many other tuples as fit.
2404 */
2405 RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2406
2407 /*
2408 * For logical decoding we need combo CIDs to properly decode the
2409 * catalog.
2410 */
2411 if (needwal && need_cids)
2412 log_heap_new_cid(relation, heaptuples[ndone]);
2413
2414 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2415 {
2417
2418 if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2419 break;
2420
2421 RelationPutHeapTuple(relation, buffer, heaptup, false);
2422
2423 /*
2424 * For logical decoding we need combo CIDs to properly decode the
2425 * catalog.
2426 */
2427 if (needwal && need_cids)
2428 log_heap_new_cid(relation, heaptup);
2429 }
2430
2431 /*
2432 * If the page is all visible, need to clear that, unless we're only
2433 * going to add further frozen rows to it.
2434 *
2435 * If we're only adding already frozen rows to a previously empty
2436 * page, mark it as all-frozen and update the visibility map. We're
2437 * already holding a pin on the vmbuffer.
2438 */
2440 {
2441 all_visible_cleared = true;
2442 PageClearAllVisible(page);
2443 visibilitymap_clear(relation,
2444 BufferGetBlockNumber(buffer),
2445 vmbuffer, VISIBILITYMAP_VALID_BITS);
2446 }
2447 else if (all_frozen_set)
2448 {
2449 PageSetAllVisible(page);
2450 PageClearPrunable(page);
2452 vmbuffer,
2455 relation->rd_locator);
2456 }
2457
2458 /*
2459 * Set pd_prune_xid. See heap_insert() for more on why we do this when
2460 * inserting tuples. This only makes sense if we aren't already
2461 * setting the page frozen in the VM and we're not in bootstrap mode.
2462 */
2464 PageSetPrunable(page, xid);
2465
2466 MarkBufferDirty(buffer);
2467
2468 /* XLOG stuff */
2469 if (needwal)
2470 {
2474 char *tupledata;
2475 int totaldatalen;
2476 char *scratchptr = scratch.data;
2477 bool init;
2478 int bufflags = 0;
2479
2480 /*
2481 * If the page was previously empty, we can reinit the page
2482 * instead of restoring the whole thing.
2483 */
2485
2486 /* allocate xl_heap_multi_insert struct from the scratch area */
2489
2490 /*
2491 * Allocate offsets array. Unless we're reinitializing the page,
2492 * in that case the tuples are stored in order starting at
2493 * FirstOffsetNumber and we don't need to store the offsets
2494 * explicitly.
2495 */
2496 if (!init)
2497 scratchptr += nthispage * sizeof(OffsetNumber);
2498
2499 /* the rest of the scratch space is used for tuple data */
2500 tupledata = scratchptr;
2501
2502 /* check that the mutually exclusive flags are not both set */
2504
2505 xlrec->flags = 0;
2508
2509 /*
2510 * We don't have to worry about including a conflict xid in the
2511 * WAL record, as HEAP_INSERT_FROZEN intentionally violates
2512 * visibility rules.
2513 */
2514 if (all_frozen_set)
2516
2517 xlrec->ntuples = nthispage;
2518
2519 /*
2520 * Write out an xl_multi_insert_tuple and the tuple data itself
2521 * for each tuple.
2522 */
2523 for (i = 0; i < nthispage; i++)
2524 {
2526 xl_multi_insert_tuple *tuphdr;
2527 int datalen;
2528
2529 if (!init)
2530 xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2531 /* xl_multi_insert_tuple needs two-byte alignment. */
2533 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2534
2535 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2536 tuphdr->t_infomask = heaptup->t_data->t_infomask;
2537 tuphdr->t_hoff = heaptup->t_data->t_hoff;
2538
2539 /* write bitmap [+ padding] [+ oid] + data */
2540 datalen = heaptup->t_len - SizeofHeapTupleHeader;
2542 (char *) heaptup->t_data + SizeofHeapTupleHeader,
2543 datalen);
2544 tuphdr->datalen = datalen;
2545 scratchptr += datalen;
2546 }
2547 totaldatalen = scratchptr - tupledata;
2548 Assert((scratchptr - scratch.data) < BLCKSZ);
2549
2550 if (need_tuple_data)
2552
2553 /*
2554 * Signal that this is the last xl_heap_multi_insert record
2555 * emitted by this call to heap_multi_insert(). Needed for logical
2556 * decoding so it knows when to cleanup temporary data.
2557 */
2558 if (ndone + nthispage == ntuples)
2560
2561 if (init)
2562 {
2563 info |= XLOG_HEAP_INIT_PAGE;
2565 }
2566
2567 /*
2568 * If we're doing logical decoding, include the new tuple data
2569 * even if we take a full-page image of the page.
2570 */
2571 if (need_tuple_data)
2573
2575 XLogRegisterData(xlrec, tupledata - scratch.data);
2577 if (all_frozen_set)
2578 XLogRegisterBuffer(1, vmbuffer, 0);
2579
2580 XLogRegisterBufData(0, tupledata, totaldatalen);
2581
2582 /* filtering by origin on a row level is much more efficient */
2584
2585 recptr = XLogInsert(RM_HEAP2_ID, info);
2586
2587 PageSetLSN(page, recptr);
2588 if (all_frozen_set)
2589 {
2590 Assert(BufferIsDirty(vmbuffer));
2591 PageSetLSN(BufferGetPage(vmbuffer), recptr);
2592 }
2593 }
2594
2596
2597 if (all_frozen_set)
2598 LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2599
2600 UnlockReleaseBuffer(buffer);
2601 ndone += nthispage;
2602
2603 /*
2604 * NB: Only release vmbuffer after inserting all tuples - it's fairly
2605 * likely that we'll insert into subsequent heap pages that are likely
2606 * to use the same vm page.
2607 */
2608 }
2609
2610 /* We're done with inserting all tuples, so release the last vmbuffer. */
2611 if (vmbuffer != InvalidBuffer)
2612 ReleaseBuffer(vmbuffer);
2613
2614 /*
2615 * We're done with the actual inserts. Check for conflicts again, to
2616 * ensure that all rw-conflicts in to these inserts are detected. Without
2617 * this final check, a sequential scan of the heap may have locked the
2618 * table after the "before" check, missing one opportunity to detect the
2619 * conflict, and then scanned the table before the new tuples were there,
2620 * missing the other chance to detect the conflict.
2621 *
2622 * For heap inserts, we only need to check for table-level SSI locks. Our
2623 * new tuples can't possibly conflict with existing tuple locks, and heap
2624 * page locks are only consolidated versions of tuple locks; they do not
2625 * lock "gaps" as index page locks do. So we don't need to specify a
2626 * buffer when making the call.
2627 */
2629
2630 /*
2631 * If tuples are cacheable, mark them for invalidation from the caches in
2632 * case we abort. Note it is OK to do this after releasing the buffer,
2633 * because the heaptuples data structure is all in local memory, not in
2634 * the shared buffer.
2635 */
2636 if (IsCatalogRelation(relation))
2637 {
2638 for (i = 0; i < ntuples; i++)
2640 }
2641
2642 /* copy t_self fields back to the caller's slots */
2643 for (i = 0; i < ntuples; i++)
2644 slots[i]->tts_tid = heaptuples[i]->t_self;
2645
2646 pgstat_count_heap_insert(relation, ntuples);
2647}
2648
2649/*
2650 * simple_heap_insert - insert a tuple
2651 *
2652 * Currently, this routine differs from heap_insert only in supplying
2653 * a default command ID and not allowing access to the speedup options.
2654 *
2655 * This should be used rather than using heap_insert directly in most places
2656 * where we are modifying system catalogs.
2657 */
2658void
2660{
2661 heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2662}
2663
2664/*
2665 * Given infomask/infomask2, compute the bits that must be saved in the
2666 * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2667 * xl_heap_lock_updated WAL records.
2668 *
2669 * See fix_infomask_from_infobits.
2670 */
2671static uint8
2673{
2674 return
2678 /* note we ignore HEAP_XMAX_SHR_LOCK here */
2680 ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2681 XLHL_KEYS_UPDATED : 0);
2682}
2683
2684/*
2685 * Given two versions of the same t_infomask for a tuple, compare them and
2686 * return whether the relevant status for a tuple Xmax has changed. This is
2687 * used after a buffer lock has been released and reacquired: we want to ensure
2688 * that the tuple state continues to be the same it was when we previously
2689 * examined it.
2690 *
2691 * Note the Xmax field itself must be compared separately.
2692 */
2693static inline bool
2695{
2696 const uint16 interesting =
2698
2699 if ((new_infomask & interesting) != (old_infomask & interesting))
2700 return true;
2701
2702 return false;
2703}
2704
2705/*
2706 * heap_delete - delete a tuple
2707 *
2708 * See table_tuple_delete() for an explanation of the parameters, except that
2709 * this routine directly takes a tuple rather than a slot.
2710 *
2711 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2712 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2713 * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
2714 * generated by another transaction).
2715 */
2719 bool wait, TM_FailureData *tmfd)
2720{
2723 ItemId lp;
2724 HeapTupleData tp;
2725 Page page;
2726 BlockNumber block;
2727 Buffer buffer;
2728 Buffer vmbuffer = InvalidBuffer;
2729 TransactionId new_xmax;
2734 bool have_tuple_lock = false;
2735 bool iscombo;
2736 bool all_visible_cleared = false;
2737 HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2738 bool old_key_copied = false;
2739
2741
2742 AssertHasSnapshotForToast(relation);
2743
2744 /*
2745 * Forbid this during a parallel operation, lest it allocate a combo CID.
2746 * Other workers might need that combo CID for visibility checks, and we
2747 * have no provision for broadcasting it to them.
2748 */
2749 if (IsInParallelMode())
2750 ereport(ERROR,
2752 errmsg("cannot delete tuples during a parallel operation")));
2753
2754 block = ItemPointerGetBlockNumber(tid);
2755 buffer = ReadBuffer(relation, block);
2756 page = BufferGetPage(buffer);
2757
2758 /*
2759 * Before locking the buffer, pin the visibility map page if it appears to
2760 * be necessary. Since we haven't got the lock yet, someone else might be
2761 * in the middle of changing this, so we'll need to recheck after we have
2762 * the lock.
2763 */
2764 if (PageIsAllVisible(page))
2765 visibilitymap_pin(relation, block, &vmbuffer);
2766
2768
2771
2772 tp.t_tableOid = RelationGetRelid(relation);
2773 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2774 tp.t_len = ItemIdGetLength(lp);
2775 tp.t_self = *tid;
2776
2777l1:
2778
2779 /*
2780 * If we didn't pin the visibility map page and the page has become all
2781 * visible while we were busy locking the buffer, we'll have to unlock and
2782 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2783 * unfortunate, but hopefully shouldn't happen often.
2784 */
2785 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2786 {
2788 visibilitymap_pin(relation, block, &vmbuffer);
2790 }
2791
2792 result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2793
2794 if (result == TM_Invisible)
2795 {
2796 UnlockReleaseBuffer(buffer);
2797 ereport(ERROR,
2799 errmsg("attempted to delete invisible tuple")));
2800 }
2801 else if (result == TM_BeingModified && wait)
2802 {
2805
2806 /* must copy state data before unlocking buffer */
2809
2810 /*
2811 * Sleep until concurrent transaction ends -- except when there's a
2812 * single locker and it's our own transaction. Note we don't care
2813 * which lock mode the locker has, because we need the strongest one.
2814 *
2815 * Before sleeping, we need to acquire tuple lock to establish our
2816 * priority for the tuple (see heap_lock_tuple). LockTuple will
2817 * release us when we are next-in-line for the tuple.
2818 *
2819 * If we are forced to "start over" below, we keep the tuple lock;
2820 * this arranges that we stay at the head of the line while rechecking
2821 * tuple state.
2822 */
2824 {
2825 bool current_is_member = false;
2826
2829 {
2831
2832 /*
2833 * Acquire the lock, if necessary (but skip it when we're
2834 * requesting a lock and already have one; avoids deadlock).
2835 */
2836 if (!current_is_member)
2839
2840 /* wait for multixact */
2842 relation, &(tp.t_self), XLTW_Delete,
2843 NULL);
2845
2846 /*
2847 * If xwait had just locked the tuple then some other xact
2848 * could update this tuple before we get to this point. Check
2849 * for xmax change, and start over if so.
2850 *
2851 * We also must start over if we didn't pin the VM page, and
2852 * the page has become all visible.
2853 */
2854 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2857 xwait))
2858 goto l1;
2859 }
2860
2861 /*
2862 * You might think the multixact is necessarily done here, but not
2863 * so: it could have surviving members, namely our own xact or
2864 * other subxacts of this backend. It is legal for us to delete
2865 * the tuple in either case, however (the latter case is
2866 * essentially a situation of upgrading our former shared lock to
2867 * exclusive). We don't bother changing the on-disk hint bits
2868 * since we are about to overwrite the xmax altogether.
2869 */
2870 }
2872 {
2873 /*
2874 * Wait for regular transaction to end; but first, acquire tuple
2875 * lock.
2876 */
2880 XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2882
2883 /*
2884 * xwait is done, but if xwait had just locked the tuple then some
2885 * other xact could update this tuple before we get to this point.
2886 * Check for xmax change, and start over if so.
2887 *
2888 * We also must start over if we didn't pin the VM page, and the
2889 * page has become all visible.
2890 */
2891 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2894 xwait))
2895 goto l1;
2896
2897 /* Otherwise check if it committed or aborted */
2898 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2899 }
2900
2901 /*
2902 * We may overwrite if previous xmax aborted, or if it committed but
2903 * only locked the tuple without updating it.
2904 */
2905 if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2908 result = TM_Ok;
2909 else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2911 else
2913 }
2914
2915 /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
2916 if (result != TM_Ok)
2917 {
2919 result == TM_Updated ||
2920 result == TM_Deleted ||
2925 }
2926
2928 {
2929 /* Perform additional check for transaction-snapshot mode RI updates */
2930 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2932 }
2933
2934 if (result != TM_Ok)
2935 {
2936 tmfd->ctid = tp.t_data->t_ctid;
2938 if (result == TM_SelfModified)
2940 else
2941 tmfd->cmax = InvalidCommandId;
2942 UnlockReleaseBuffer(buffer);
2943 if (have_tuple_lock)
2945 if (vmbuffer != InvalidBuffer)
2946 ReleaseBuffer(vmbuffer);
2947 return result;
2948 }
2949
2950 /*
2951 * We're about to do the actual delete -- check for conflict first, to
2952 * avoid possibly having to roll back work we've just done.
2953 *
2954 * This is safe without a recheck as long as there is no possibility of
2955 * another process scanning the page between this check and the delete
2956 * being visible to the scan (i.e., an exclusive buffer content lock is
2957 * continuously held from this point until the tuple delete is visible).
2958 */
2960
2961 /* replace cid with a combo CID if necessary */
2963
2964 /*
2965 * Compute replica identity tuple before entering the critical section so
2966 * we don't PANIC upon a memory allocation failure.
2967 */
2969 ExtractReplicaIdentity(relation, &tp, true, &old_key_copied) : NULL;
2970
2971 /*
2972 * If this is the first possibly-multixact-able operation in the current
2973 * transaction, set my per-backend OldestMemberMXactId setting. We can be
2974 * certain that the transaction will never become a member of any older
2975 * MultiXactIds than that. (We have to do this even if we end up just
2976 * using our own TransactionId below, since some other backend could
2977 * incorporate our XID into a MultiXact immediately afterwards.)
2978 */
2980
2983 xid, LockTupleExclusive, true,
2984 &new_xmax, &new_infomask, &new_infomask2);
2985
2987
2988 /*
2989 * If this transaction commits, the tuple will become DEAD sooner or
2990 * later. Set flag that this page is a candidate for pruning once our xid
2991 * falls below the OldestXmin horizon. If the transaction finally aborts,
2992 * the subsequent page pruning will be a no-op and the hint will be
2993 * cleared.
2994 */
2995 PageSetPrunable(page, xid);
2996
2997 if (PageIsAllVisible(page))
2998 {
2999 all_visible_cleared = true;
3000 PageClearAllVisible(page);
3001 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3002 vmbuffer, VISIBILITYMAP_VALID_BITS);
3003 }
3004
3005 /* store transaction information of xact deleting the tuple */
3011 HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3013 /* Make sure there is no forward chain link in t_ctid */
3014 tp.t_data->t_ctid = tp.t_self;
3015
3016 /* Signal that this is actually a move into another partition */
3017 if (changingPart)
3019
3020 MarkBufferDirty(buffer);
3021
3022 /*
3023 * XLOG stuff
3024 *
3025 * NB: heap_abort_speculative() uses the same xlog record and replay
3026 * routines.
3027 */
3028 if (RelationNeedsWAL(relation))
3029 {
3033
3034 /*
3035 * For logical decode we need combo CIDs to properly decode the
3036 * catalog
3037 */
3039 log_heap_new_cid(relation, &tp);
3040
3041 xlrec.flags = 0;
3044 if (changingPart)
3046 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
3047 tp.t_data->t_infomask2);
3049 xlrec.xmax = new_xmax;
3050
3051 if (old_key_tuple != NULL)
3052 {
3053 if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3055 else
3057 }
3058
3059 /*
3060 * Mark the change as not-for-logical-decoding if caller requested so.
3061 *
3062 * (This is used for changes that affect relations not visible to
3063 * other transactions, such as the transient table during concurrent
3064 * repack.)
3065 */
3066 if (!walLogical)
3068
3071
3073
3074 /*
3075 * Log replica identity of the deleted tuple if there is one
3076 */
3077 if (old_key_tuple != NULL)
3078 {
3079 xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3080 xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3081 xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3082
3084 XLogRegisterData((char *) old_key_tuple->t_data
3086 old_key_tuple->t_len
3088 }
3089
3090 /* filtering by origin on a row level is much more efficient */
3092
3094
3095 PageSetLSN(page, recptr);
3096 }
3097
3099
3101
3102 if (vmbuffer != InvalidBuffer)
3103 ReleaseBuffer(vmbuffer);
3104
3105 /*
3106 * If the tuple has toasted out-of-line attributes, we need to delete
3107 * those items too. We have to do this before releasing the buffer
3108 * because we need to look at the contents of the tuple, but it's OK to
3109 * release the content lock on the buffer first.
3110 */
3111 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3112 relation->rd_rel->relkind != RELKIND_MATVIEW)
3113 {
3114 /* toast table entries should never be recursively toasted */
3116 }
3117 else if (HeapTupleHasExternal(&tp))
3118 heap_toast_delete(relation, &tp, false);
3119
3120 /*
3121 * Mark tuple for invalidation from system caches at next command
3122 * boundary. We have to do this before releasing the buffer because we
3123 * need to look at the contents of the tuple.
3124 */
3125 CacheInvalidateHeapTuple(relation, &tp, NULL);
3126
3127 /* Now we can release the buffer */
3128 ReleaseBuffer(buffer);
3129
3130 /*
3131 * Release the lmgr tuple lock, if we had it.
3132 */
3133 if (have_tuple_lock)
3135
3136 pgstat_count_heap_delete(relation);
3137
3140
3141 return TM_Ok;
3142}
3143
3144/*
3145 * simple_heap_delete - delete a tuple
3146 *
3147 * This routine may be used to delete a tuple when concurrent updates of
3148 * the target tuple are not expected (for example, because we have a lock
3149 * on the relation associated with the tuple). Any failure is reported
3150 * via ereport().
3151 */
3152void
3154{
3156 TM_FailureData tmfd;
3157
3158 result = heap_delete(relation, tid,
3159 GetCurrentCommandId(true),
3160 0,
3162 true /* wait for commit */ ,
3163 &tmfd);
3164 switch (result)
3165 {
3166 case TM_SelfModified:
3167 /* Tuple was already updated in current command? */
3168 elog(ERROR, "tuple already updated by self");
3169 break;
3170
3171 case TM_Ok:
3172 /* done successfully */
3173 break;
3174
3175 case TM_Updated:
3176 elog(ERROR, "tuple concurrently updated");
3177 break;
3178
3179 case TM_Deleted:
3180 elog(ERROR, "tuple concurrently deleted");
3181 break;
3182
3183 default:
3184 elog(ERROR, "unrecognized heap_delete status: %u", result);
3185 break;
3186 }
3187}
3188
3189/*
3190 * heap_update - replace a tuple
3191 *
3192 * See table_tuple_update() for an explanation of the parameters, except that
3193 * this routine directly takes a tuple rather than a slot.
3194 *
3195 * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
3196 * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
3197 * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
3198 * generated by another transaction).
3199 */
3203 TM_FailureData *tmfd, LockTupleMode *lockmode,
3205{
3214 ItemId lp;
3218 bool old_key_copied = false;
3220 Page page,
3221 newpage;
3222 BlockNumber block;
3224 Buffer buffer,
3225 newbuf,
3226 vmbuffer = InvalidBuffer,
3228 bool need_toast;
3230 pagefree;
3231 bool have_tuple_lock = false;
3232 bool iscombo;
3233 bool use_hot_update = false;
3234 bool summarized_update = false;
3235 bool key_intact;
3236 bool all_visible_cleared = false;
3237 bool all_visible_cleared_new = false;
3238 bool checked_lockers;
3239 bool locker_remains;
3240 bool id_has_external = false;
3247
3249
3250 /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
3253
3254 AssertHasSnapshotForToast(relation);
3255
3256 /*
3257 * Forbid this during a parallel operation, lest it allocate a combo CID.
3258 * Other workers might need that combo CID for visibility checks, and we
3259 * have no provision for broadcasting it to them.
3260 */
3261 if (IsInParallelMode())
3262 ereport(ERROR,
3264 errmsg("cannot update tuples during a parallel operation")));
3265
3266#ifdef USE_ASSERT_CHECKING
3268#endif
3269
3270 /*
3271 * Fetch the list of attributes to be checked for various operations.
3272 *
3273 * For HOT considerations, this is wasted effort if we fail to update or
3274 * have to put the new tuple on a different page. But we must compute the
3275 * list before obtaining buffer lock --- in the worst case, if we are
3276 * doing an update on one of the relevant system catalogs, we could
3277 * deadlock if we try to fetch the list later. In any case, the relcache
3278 * caches the data so this is usually pretty cheap.
3279 *
3280 * We also need columns used by the replica identity and columns that are
3281 * considered the "key" of rows in the table.
3282 *
3283 * Note that we get copies of each bitmap, so we need not worry about
3284 * relcache flush happening midway through.
3285 */
3298
3300 INJECTION_POINT("heap_update-before-pin", NULL);
3301 buffer = ReadBuffer(relation, block);
3302 page = BufferGetPage(buffer);
3303
3304 /*
3305 * Before locking the buffer, pin the visibility map page if it appears to
3306 * be necessary. Since we haven't got the lock yet, someone else might be
3307 * in the middle of changing this, so we'll need to recheck after we have
3308 * the lock.
3309 */
3310 if (PageIsAllVisible(page))
3311 visibilitymap_pin(relation, block, &vmbuffer);
3312
3314
3316
3317 /*
3318 * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
3319 * we see LP_NORMAL here. When the otid origin is a syscache, we may have
3320 * neither a pin nor a snapshot. Hence, we may see other LP_ states, each
3321 * of which indicates concurrent pruning.
3322 *
3323 * Failing with TM_Updated would be most accurate. However, unlike other
3324 * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
3325 * LP_DEAD cases. While the distinction between TM_Updated and TM_Deleted
3326 * does matter to SQL statements UPDATE and MERGE, those SQL statements
3327 * hold a snapshot that ensures LP_NORMAL. Hence, the choice between
3328 * TM_Updated and TM_Deleted affects only the wording of error messages.
3329 * Settle on TM_Deleted, for two reasons. First, it avoids complicating
3330 * the specification of when tmfd->ctid is valid. Second, it creates
3331 * error log evidence that we took this branch.
3332 *
3333 * Since it's possible to see LP_UNUSED at otid, it's also possible to see
3334 * LP_NORMAL for a tuple that replaced LP_UNUSED. If it's a tuple for an
3335 * unrelated row, we'll fail with "duplicate key value violates unique".
3336 * XXX if otid is the live, newer version of the newtup row, we'll discard
3337 * changes originating in versions of this catalog row after the version
3338 * the caller got from syscache. See syscache-update-pruned.spec.
3339 */
3340 if (!ItemIdIsNormal(lp))
3341 {
3343
3344 UnlockReleaseBuffer(buffer);
3346 if (vmbuffer != InvalidBuffer)
3347 ReleaseBuffer(vmbuffer);
3348 tmfd->ctid = *otid;
3349 tmfd->xmax = InvalidTransactionId;
3350 tmfd->cmax = InvalidCommandId;
3352
3357 /* modified_attrs not yet initialized */
3359 return TM_Deleted;
3360 }
3361
3362 /*
3363 * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
3364 * properly.
3365 */
3366 oldtup.t_tableOid = RelationGetRelid(relation);
3367 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3368 oldtup.t_len = ItemIdGetLength(lp);
3369 oldtup.t_self = *otid;
3370
3371 /* the new tuple is ready, except for this: */
3372 newtup->t_tableOid = RelationGetRelid(relation);
3373
3374 /*
3375 * Determine columns modified by the update. Additionally, identify
3376 * whether any of the unmodified replica identity key attributes in the
3377 * old tuple is externally stored or not. This is required because for
3378 * such attributes the flattened value won't be WAL logged as part of the
3379 * new tuple so we must include it as part of the old_key_tuple. See
3380 * ExtractReplicaIdentity.
3381 */
3383 id_attrs, &oldtup,
3385
3386 /*
3387 * If we're not updating any "key" column, we can grab a weaker lock type.
3388 * This allows for more concurrency when we are running simultaneously
3389 * with foreign key checks.
3390 *
3391 * Note that if a column gets detoasted while executing the update, but
3392 * the value ends up being the same, this test will fail and we will use
3393 * the stronger lock. This is acceptable; the important case to optimize
3394 * is updates that don't manipulate key columns, not those that
3395 * serendipitously arrive at the same key values.
3396 */
3398 {
3399 *lockmode = LockTupleNoKeyExclusive;
3401 key_intact = true;
3402
3403 /*
3404 * If this is the first possibly-multixact-able operation in the
3405 * current transaction, set my per-backend OldestMemberMXactId
3406 * setting. We can be certain that the transaction will never become a
3407 * member of any older MultiXactIds than that. (We have to do this
3408 * even if we end up just using our own TransactionId below, since
3409 * some other backend could incorporate our XID into a MultiXact
3410 * immediately afterwards.)
3411 */
3413 }
3414 else
3415 {
3416 *lockmode = LockTupleExclusive;
3418 key_intact = false;
3419 }
3420
3421 /*
3422 * Note: beyond this point, use oldtup not otid to refer to old tuple.
3423 * otid may very well point at newtup->t_self, which we will overwrite
3424 * with the new tuple's location, so there's great risk of confusion if we
3425 * use otid anymore.
3426 */
3427
3428l2:
3429 checked_lockers = false;
3430 locker_remains = false;
3432
3433 /* see below about the "no wait" case */
3434 Assert(result != TM_BeingModified || wait);
3435
3436 if (result == TM_Invisible)
3437 {
3438 UnlockReleaseBuffer(buffer);
3439 ereport(ERROR,
3441 errmsg("attempted to update invisible tuple")));
3442 }
3443 else if (result == TM_BeingModified && wait)
3444 {
3447 bool can_continue = false;
3448
3449 /*
3450 * XXX note that we don't consider the "no wait" case here. This
3451 * isn't a problem currently because no caller uses that case, but it
3452 * should be fixed if such a caller is introduced. It wasn't a
3453 * problem previously because this code would always wait, but now
3454 * that some tuple locks do not conflict with one of the lock modes we
3455 * use, it is possible that this case is interesting to handle
3456 * specially.
3457 *
3458 * This may cause failures with third-party code that calls
3459 * heap_update directly.
3460 */
3461
3462 /* must copy state data before unlocking buffer */
3464 infomask = oldtup.t_data->t_infomask;
3465
3466 /*
3467 * Now we have to do something about the existing locker. If it's a
3468 * multi, sleep on it; we might be awakened before it is completely
3469 * gone (or even not sleep at all in some cases); we need to preserve
3470 * it as locker, unless it is gone completely.
3471 *
3472 * If it's not a multi, we need to check for sleeping conditions
3473 * before actually going to sleep. If the update doesn't conflict
3474 * with the locks, we just continue without sleeping (but making sure
3475 * it is preserved).
3476 *
3477 * Before sleeping, we need to acquire tuple lock to establish our
3478 * priority for the tuple (see heap_lock_tuple). LockTuple will
3479 * release us when we are next-in-line for the tuple. Note we must
3480 * not acquire the tuple lock until we're sure we're going to sleep;
3481 * otherwise we're open for race conditions with other transactions
3482 * holding the tuple lock which sleep on us.
3483 *
3484 * If we are forced to "start over" below, we keep the tuple lock;
3485 * this arranges that we stay at the head of the line while rechecking
3486 * tuple state.
3487 */
3489 {
3491 int remain;
3492 bool current_is_member = false;
3493
3495 *lockmode, &current_is_member))
3496 {
3498
3499 /*
3500 * Acquire the lock, if necessary (but skip it when we're
3501 * requesting a lock and already have one; avoids deadlock).
3502 */
3503 if (!current_is_member)
3504 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3506
3507 /* wait for multixact */
3509 relation, &oldtup.t_self, XLTW_Update,
3510 &remain);
3511 checked_lockers = true;
3512 locker_remains = remain != 0;
3514
3515 /*
3516 * If xwait had just locked the tuple then some other xact
3517 * could update this tuple before we get to this point. Check
3518 * for xmax change, and start over if so.
3519 */
3520 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3521 infomask) ||
3523 xwait))
3524 goto l2;
3525 }
3526
3527 /*
3528 * Note that the multixact may not be done by now. It could have
3529 * surviving members; our own xact or other subxacts of this
3530 * backend, and also any other concurrent transaction that locked
3531 * the tuple with LockTupleKeyShare if we only got
3532 * LockTupleNoKeyExclusive. If this is the case, we have to be
3533 * careful to mark the updated tuple with the surviving members in
3534 * Xmax.
3535 *
3536 * Note that there could have been another update in the
3537 * MultiXact. In that case, we need to check whether it committed
3538 * or aborted. If it aborted we are safe to update it again;
3539 * otherwise there is an update conflict, and we have to return
3540 * TableTuple{Deleted, Updated} below.
3541 *
3542 * In the LockTupleExclusive case, we still need to preserve the
3543 * surviving members: those would include the tuple locks we had
3544 * before this one, which are important to keep in case this
3545 * subxact aborts.
3546 */
3547 if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3549 else
3551
3552 /*
3553 * There was no UPDATE in the MultiXact; or it aborted. No
3554 * TransactionIdIsInProgress() call needed here, since we called
3555 * MultiXactIdWait() above.
3556 */
3559 can_continue = true;
3560 }
3562 {
3563 /*
3564 * The only locker is ourselves; we can avoid grabbing the tuple
3565 * lock here, but must preserve our locking information.
3566 */
3567 checked_lockers = true;
3568 locker_remains = true;
3569 can_continue = true;
3570 }
3572 {
3573 /*
3574 * If it's just a key-share locker, and we're not changing the key
3575 * columns, we don't need to wait for it to end; but we need to
3576 * preserve it as locker.
3577 */
3578 checked_lockers = true;
3579 locker_remains = true;
3580 can_continue = true;
3581 }
3582 else
3583 {
3584 /*
3585 * Wait for regular transaction to end; but first, acquire tuple
3586 * lock.
3587 */
3589 heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3591 XactLockTableWait(xwait, relation, &oldtup.t_self,
3592 XLTW_Update);
3593 checked_lockers = true;
3595
3596 /*
3597 * xwait is done, but if xwait had just locked the tuple then some
3598 * other xact could update this tuple before we get to this point.
3599 * Check for xmax change, and start over if so.
3600 */
3601 if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3604 goto l2;
3605
3606 /* Otherwise check if it committed or aborted */
3607 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3608 if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3609 can_continue = true;
3610 }
3611
3612 if (can_continue)
3613 result = TM_Ok;
3614 else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
3616 else
3618 }
3619
3620 /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3621 if (result != TM_Ok)
3622 {
3624 result == TM_Updated ||
3625 result == TM_Deleted ||
3627 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3629 !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3630 }
3631
3633 {
3634 /* Perform additional check for transaction-snapshot mode RI updates */
3637 }
3638
3639 if (result != TM_Ok)
3640 {
3641 tmfd->ctid = oldtup.t_data->t_ctid;
3642 tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3643 if (result == TM_SelfModified)
3644 tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3645 else
3646 tmfd->cmax = InvalidCommandId;
3647 UnlockReleaseBuffer(buffer);
3648 if (have_tuple_lock)
3649 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3650 if (vmbuffer != InvalidBuffer)
3651 ReleaseBuffer(vmbuffer);
3653
3660 return result;
3661 }
3662
3663 /*
3664 * If we didn't pin the visibility map page and the page has become all
3665 * visible while we were busy locking the buffer, or during some
3666 * subsequent window during which we had it unlocked, we'll have to unlock
3667 * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3668 * bit unfortunate, especially since we'll now have to recheck whether the
3669 * tuple has been locked or updated under us, but hopefully it won't
3670 * happen very often.
3671 */
3672 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3673 {
3675 visibilitymap_pin(relation, block, &vmbuffer);
3677 goto l2;
3678 }
3679
3680 /* Fill in transaction status data */
3681
3682 /*
3683 * If the tuple we're updating is locked, we need to preserve the locking
3684 * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3685 */
3687 oldtup.t_data->t_infomask,
3688 oldtup.t_data->t_infomask2,
3689 xid, *lockmode, true,
3692
3693 /*
3694 * And also prepare an Xmax value for the new copy of the tuple. If there
3695 * was no xmax previously, or there was one but all lockers are now gone,
3696 * then use InvalidTransactionId; otherwise, get the xmax from the old
3697 * tuple. (In rare cases that might also be InvalidTransactionId and yet
3698 * not have the HEAP_XMAX_INVALID bit set; that's fine.)
3699 */
3700 if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3701 HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
3704 else
3706
3708 {
3711 }
3712 else
3713 {
3714 /*
3715 * If we found a valid Xmax for the new tuple, then the infomask bits
3716 * to use on the new tuple depend on what was there on the old one.
3717 * Note that since we're doing an update, the only possibility is that
3718 * the lockers had FOR KEY SHARE lock.
3719 */
3720 if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3721 {
3724 }
3725 else
3726 {
3729 }
3730 }
3731
3732 /*
3733 * Prepare the new tuple with the appropriate initial values of Xmin and
3734 * Xmax, as well as initial infomask bits as computed above.
3735 */
3736 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3737 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3738 HeapTupleHeaderSetXmin(newtup->t_data, xid);
3740 newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3741 newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3743
3744 /*
3745 * Replace cid with a combo CID if necessary. Note that we already put
3746 * the plain cid into the new tuple.
3747 */
3749
3750 /*
3751 * If the toaster needs to be activated, OR if the new tuple will not fit
3752 * on the same page as the old, then we need to release the content lock
3753 * (but not the pin!) on the old tuple's buffer while we are off doing
3754 * TOAST and/or table-file-extension work. We must mark the old tuple to
3755 * show that it's locked, else other processes may try to update it
3756 * themselves.
3757 *
3758 * We need to invoke the toaster if there are already any out-of-line
3759 * toasted values present, or if the new tuple is over-threshold.
3760 */
3761 if (relation->rd_rel->relkind != RELKIND_RELATION &&
3762 relation->rd_rel->relkind != RELKIND_MATVIEW)
3763 {
3764 /* toast table entries should never be recursively toasted */
3767 need_toast = false;
3768 }
3769 else
3772 newtup->t_len > TOAST_TUPLE_THRESHOLD);
3773
3775
3776 newtupsize = MAXALIGN(newtup->t_len);
3777
3779 {
3783 bool cleared_all_frozen = false;
3784
3785 /*
3786 * To prevent concurrent sessions from updating the tuple, we have to
3787 * temporarily mark it locked, while we release the page-level lock.
3788 *
3789 * To satisfy the rule that any xid potentially appearing in a buffer
3790 * written out to disk, we unfortunately have to WAL log this
3791 * temporary modification. We can reuse xl_heap_lock for this
3792 * purpose. If we crash/error before following through with the
3793 * actual update, xmax will be of an aborted transaction, allowing
3794 * other sessions to proceed.
3795 */
3796
3797 /*
3798 * Compute xmax / infomask appropriate for locking the tuple. This has
3799 * to be done separately from the combo that's going to be used for
3800 * updating, because the potentially created multixact would otherwise
3801 * be wrong.
3802 */
3804 oldtup.t_data->t_infomask,
3805 oldtup.t_data->t_infomask2,
3806 xid, *lockmode, false,
3809
3811
3813
3814 /* Clear obsolete visibility flags ... */
3815 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3816 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3818 /* ... and store info about transaction updating this tuple */
3821 oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3822 oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
3824
3825 /* temporarily make it look not-updated, but locked */
3826 oldtup.t_data->t_ctid = oldtup.t_self;
3827
3828 /*
3829 * Clear all-frozen bit on visibility map if needed. We could
3830 * immediately reset ALL_VISIBLE, but given that the WAL logging
3831 * overhead would be unchanged, that doesn't seem necessarily
3832 * worthwhile.
3833 */
3834 if (PageIsAllVisible(page) &&
3835 visibilitymap_clear(relation, block, vmbuffer,
3837 cleared_all_frozen = true;
3838
3839 MarkBufferDirty(buffer);
3840
3841 if (RelationNeedsWAL(relation))
3842 {
3845
3848
3849 xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
3851 xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
3852 oldtup.t_data->t_infomask2);
3853 xlrec.flags =
3857 PageSetLSN(page, recptr);
3858 }
3859
3861
3863
3864 /*
3865 * Let the toaster do its thing, if needed.
3866 *
3867 * Note: below this point, heaptup is the data we actually intend to
3868 * store into the relation; newtup is the caller's original untoasted
3869 * data.
3870 */
3871 if (need_toast)
3872 {
3873 /* Note we always use WAL and FSM during updates */
3875 newtupsize = MAXALIGN(heaptup->t_len);
3876 }
3877 else
3878 heaptup = newtup;
3879
3880 /*
3881 * Now, do we need a new page for the tuple, or not? This is a bit
3882 * tricky since someone else could have added tuples to the page while
3883 * we weren't looking. We have to recheck the available space after
3884 * reacquiring the buffer lock. But don't bother to do that if the
3885 * former amount of free space is still not enough; it's unlikely
3886 * there's more free now than before.
3887 *
3888 * What's more, if we need to get a new page, we will need to acquire
3889 * buffer locks on both old and new pages. To avoid deadlock against
3890 * some other backend trying to get the same two locks in the other
3891 * order, we must be consistent about the order we get the locks in.
3892 * We use the rule "lock the lower-numbered page of the relation
3893 * first". To implement this, we must do RelationGetBufferForTuple
3894 * while not holding the lock on the old page, and we must rely on it
3895 * to get the locks on both pages in the correct order.
3896 *
3897 * Another consideration is that we need visibility map page pin(s) if
3898 * we will have to clear the all-visible flag on either page. If we
3899 * call RelationGetBufferForTuple, we rely on it to acquire any such
3900 * pins; but if we don't, we have to handle that here. Hence we need
3901 * a loop.
3902 */
3903 for (;;)
3904 {
3905 if (newtupsize > pagefree)
3906 {
3907 /* It doesn't fit, must use RelationGetBufferForTuple. */
3908 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3909 buffer, 0, NULL,
3910 &vmbuffer_new, &vmbuffer,
3911 0);
3912 /* We're all done. */
3913 break;
3914 }
3915 /* Acquire VM page pin if needed and we don't have it. */
3916 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3917 visibilitymap_pin(relation, block, &vmbuffer);
3918 /* Re-acquire the lock on the old tuple's page. */
3920 /* Re-check using the up-to-date free space */
3922 if (newtupsize > pagefree ||
3923 (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
3924 {
3925 /*
3926 * Rats, it doesn't fit anymore, or somebody just now set the
3927 * all-visible flag. We must now unlock and loop to avoid
3928 * deadlock. Fortunately, this path should seldom be taken.
3929 */
3931 }
3932 else
3933 {
3934 /* We're all done. */
3935 newbuf = buffer;
3936 break;
3937 }
3938 }
3939 }
3940 else
3941 {
3942 /* No TOAST work needed, and it'll fit on same page */
3943 newbuf = buffer;
3944 heaptup = newtup;
3945 }
3946
3948
3949 /*
3950 * We're about to do the actual update -- check for conflict first, to
3951 * avoid possibly having to roll back work we've just done.
3952 *
3953 * This is safe without a recheck as long as there is no possibility of
3954 * another process scanning the pages between this check and the update
3955 * being visible to the scan (i.e., exclusive buffer content lock(s) are
3956 * continuously held from this point until the tuple update is visible).
3957 *
3958 * For the new tuple the only check needed is at the relation level, but
3959 * since both tuples are in the same relation and the check for oldtup
3960 * will include checking the relation level, there is no benefit to a
3961 * separate check for the new tuple.
3962 */
3963 CheckForSerializableConflictIn(relation, &oldtup.t_self,
3964 BufferGetBlockNumber(buffer));
3965
3966 /*
3967 * At this point newbuf and buffer are both pinned and locked, and newbuf
3968 * has enough space for the new tuple. If they are the same buffer, only
3969 * one pin is held.
3970 */
3971
3972 if (newbuf == buffer)
3973 {
3974 /*
3975 * Since the new tuple is going into the same page, we might be able
3976 * to do a HOT update. Check if any of the index columns have been
3977 * changed.
3978 */
3980 {
3981 use_hot_update = true;
3982
3983 /*
3984 * If none of the columns that are used in hot-blocking indexes
3985 * were updated, we can apply HOT, but we do still need to check
3986 * if we need to update the summarizing indexes, and update those
3987 * indexes if the columns were updated, or we may fail to detect
3988 * e.g. value bound changes in BRIN minmax indexes.
3989 */
3991 summarized_update = true;
3992 }
3993 }
3994 else
3995 {
3996 /* Set a hint that the old page could use prune/defrag */
3997 PageSetFull(page);
3998 }
3999
4000 /*
4001 * Compute replica identity tuple before entering the critical section so
4002 * we don't PANIC upon a memory allocation failure.
4003 * ExtractReplicaIdentity() will return NULL if nothing needs to be
4004 * logged. Pass old key required as true only if the replica identity key
4005 * columns are modified or it has external data.
4006 */
4011
4012 /* NO EREPORT(ERROR) from here till changes are logged */
4014
4015 /*
4016 * If this transaction commits, the old tuple will become DEAD sooner or
4017 * later. Set flag that this page is a candidate for pruning once our xid
4018 * falls below the OldestXmin horizon. If the transaction finally aborts,
4019 * the subsequent page pruning will be a no-op and the hint will be
4020 * cleared.
4021 *
4022 * We set the new page prunable as well. See heap_insert() for more on why
4023 * we do this when inserting tuples.
4024 */
4025 PageSetPrunable(page, xid);
4026 if (newbuf != buffer)
4028
4029 if (use_hot_update)
4030 {
4031 /* Mark the old tuple as HOT-updated */
4033 /* And mark the new tuple as heap-only */
4035 /* Mark the caller's copy too, in case different from heaptup */
4037 }
4038 else
4039 {
4040 /* Make sure tuples are correctly marked as not-HOT */
4044 }
4045
4046 RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4047
4048
4049 /* Clear obsolete visibility flags, possibly set by ourselves above... */
4050 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
4051 oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4052 /* ... and store info about transaction updating this tuple */
4055 oldtup.t_data->t_infomask |= infomask_old_tuple;
4056 oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4058
4059 /* record address of new tuple in t_ctid of old one */
4060 oldtup.t_data->t_ctid = heaptup->t_self;
4061
4062 /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
4063 if (PageIsAllVisible(page))
4064 {
4065 all_visible_cleared = true;
4066 PageClearAllVisible(page);
4067 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4068 vmbuffer, VISIBILITYMAP_VALID_BITS);
4069 }
4070 if (newbuf != buffer && PageIsAllVisible(newpage))
4071 {
4076 }
4077
4078 if (newbuf != buffer)
4080 MarkBufferDirty(buffer);
4081
4082 /* XLOG stuff */
4083 if (RelationNeedsWAL(relation))
4084 {
4086
4087 /*
4088 * For logical decoding we need combo CIDs to properly decode the
4089 * catalog.
4090 */
4092 {
4093 log_heap_new_cid(relation, &oldtup);
4094 log_heap_new_cid(relation, heaptup);
4095 }
4096
4097 recptr = log_heap_update(relation, buffer,
4102 walLogical);
4103 if (newbuf != buffer)
4104 {
4106 }
4107 PageSetLSN(page, recptr);
4108 }
4109
4111
4112 if (newbuf != buffer)
4115
4116 /*
4117 * Mark old tuple for invalidation from system caches at next command
4118 * boundary, and mark the new tuple for invalidation in case we abort. We
4119 * have to do this before releasing the buffer because oldtup is in the
4120 * buffer. (heaptup is all in local memory, but it's necessary to process
4121 * both tuple versions in one call to inval.c so we can avoid redundant
4122 * sinval messages.)
4123 */
4125
4126 /* Now we can release the buffer(s) */
4127 if (newbuf != buffer)
4129 ReleaseBuffer(buffer);
4132 if (BufferIsValid(vmbuffer))
4133 ReleaseBuffer(vmbuffer);
4134
4135 /*
4136 * Release the lmgr tuple lock, if we had it.
4137 */
4138 if (have_tuple_lock)
4139 UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4140
4141 pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
4142
4143 /*
4144 * If heaptup is a private copy, release it. Don't forget to copy t_self
4145 * back to the caller's image, too.
4146 */
4147 if (heaptup != newtup)
4148 {
4149 newtup->t_self = heaptup->t_self;
4151 }
4152
4153 /*
4154 * If it is a HOT update, the update may still need to update summarized
4155 * indexes, lest we fail to update those summaries and get incorrect
4156 * results (for example, minmax bounds of the block may change with this
4157 * update).
4158 */
4159 if (use_hot_update)
4160 {
4163 else
4165 }
4166 else
4168
4171
4178
4179 return TM_Ok;
4180}
4181
4182#ifdef USE_ASSERT_CHECKING
4183/*
4184 * Confirm adequate lock held during heap_update(), per rules from
4185 * README.tuplock section "Locking to write inplace-updated tables".
4186 */
4187static void
4189 const ItemPointerData *otid,
4191{
4192 /* LOCKTAG_TUPLE acceptable for any catalog */
4193 switch (RelationGetRelid(relation))
4194 {
4195 case RelationRelationId:
4196 case DatabaseRelationId:
4197 {
4199
4201 relation->rd_lockInfo.lockRelId.dbId,
4202 relation->rd_lockInfo.lockRelId.relId,
4206 return;
4207 }
4208 break;
4209 default:
4210 Assert(!IsInplaceUpdateRelation(relation));
4211 return;
4212 }
4213
4214 switch (RelationGetRelid(relation))
4215 {
4216 case RelationRelationId:
4217 {
4218 /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
4220 Oid relid = classForm->oid;
4221 Oid dbid;
4222 LOCKTAG tag;
4223
4224 if (IsSharedRelation(relid))
4225 dbid = InvalidOid;
4226 else
4227 dbid = MyDatabaseId;
4228
4229 if (classForm->relkind == RELKIND_INDEX)
4230 {
4231 Relation irel = index_open(relid, AccessShareLock);
4232
4233 SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4235 }
4236 else
4237 SET_LOCKTAG_RELATION(tag, dbid, relid);
4238
4239 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
4240 !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
4241 elog(WARNING,
4242 "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4243 NameStr(classForm->relname),
4244 relid,
4245 classForm->relkind,
4248 }
4249 break;
4250 case DatabaseRelationId:
4251 {
4252 /* LOCKTAG_TUPLE required */
4254
4255 elog(WARNING,
4256 "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
4257 NameStr(dbForm->datname),
4258 dbForm->oid,
4261 }
4262 break;
4263 }
4264}
4265
4266/*
4267 * Confirm adequate relation lock held, per rules from README.tuplock section
4268 * "Locking to write inplace-updated tables".
4269 */
4270static void
4272{
4274 Oid relid = classForm->oid;
4275 Oid dbid;
4276 LOCKTAG tag;
4277
4278 if (IsSharedRelation(relid))
4279 dbid = InvalidOid;
4280 else
4281 dbid = MyDatabaseId;
4282
4283 if (classForm->relkind == RELKIND_INDEX)
4284 {
4285 Relation irel = index_open(relid, AccessShareLock);
4286
4287 SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4289 }
4290 else
4291 SET_LOCKTAG_RELATION(tag, dbid, relid);
4292
4293 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
4294 elog(WARNING,
4295 "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4296 NameStr(classForm->relname),
4297 relid,
4298 classForm->relkind,
4301}
4302#endif
4303
4304/*
4305 * Check if the specified attribute's values are the same. Subroutine for
4306 * HeapDetermineColumnsInfo.
4307 */
4308static bool
4310 bool isnull1, bool isnull2)
4311{
4312 /*
4313 * If one value is NULL and other is not, then they are certainly not
4314 * equal
4315 */
4316 if (isnull1 != isnull2)
4317 return false;
4318
4319 /*
4320 * If both are NULL, they can be considered equal.
4321 */
4322 if (isnull1)
4323 return true;
4324
4325 /*
4326 * We do simple binary comparison of the two datums. This may be overly
4327 * strict because there can be multiple binary representations for the
4328 * same logical value. But we should be OK as long as there are no false
4329 * positives. Using a type-specific equality operator is messy because
4330 * there could be multiple notions of equality in different operator
4331 * classes; furthermore, we cannot safely invoke user-defined functions
4332 * while holding exclusive buffer lock.
4333 */
4334 if (attrnum <= 0)
4335 {
4336 /* The only allowed system columns are OIDs, so do this */
4338 }
4339 else
4340 {
4341 CompactAttribute *att;
4342
4344 att = TupleDescCompactAttr(tupdesc, attrnum - 1);
4345 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4346 }
4347}
4348
4349/*
4350 * Check which columns are being updated.
4351 *
4352 * Given an updated tuple, determine (and return into the output bitmapset),
4353 * from those listed as interesting, the set of columns that changed.
4354 *
4355 * has_external indicates if any of the unmodified attributes (from those
4356 * listed as interesting) of the old tuple is a member of external_cols and is
4357 * stored externally.
4358 */
4359static Bitmapset *
4364 bool *has_external)
4365{
4366 int attidx;
4368 TupleDesc tupdesc = RelationGetDescr(relation);
4369
4370 attidx = -1;
4371 while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
4372 {
4373 /* attidx is zero-based, attrnum is the normal attribute number */
4375 Datum value1,
4376 value2;
4377 bool isnull1,
4378 isnull2;
4379
4380 /*
4381 * If it's a whole-tuple reference, say "not equal". It's not really
4382 * worth supporting this case, since it could only succeed after a
4383 * no-op update, which is hardly a case worth optimizing for.
4384 */
4385 if (attrnum == 0)
4386 {
4387 modified = bms_add_member(modified, attidx);
4388 continue;
4389 }
4390
4391 /*
4392 * Likewise, automatically say "not equal" for any system attribute
4393 * other than tableOID; we cannot expect these to be consistent in a
4394 * HOT chain, or even to be set correctly yet in the new tuple.
4395 */
4396 if (attrnum < 0)
4397 {
4398 if (attrnum != TableOidAttributeNumber)
4399 {
4400 modified = bms_add_member(modified, attidx);
4401 continue;
4402 }
4403 }
4404
4405 /*
4406 * Extract the corresponding values. XXX this is pretty inefficient
4407 * if there are many indexed columns. Should we do a single
4408 * heap_deform_tuple call on each tuple, instead? But that doesn't
4409 * work for system columns ...
4410 */
4411 value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4412 value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4413
4414 if (!heap_attr_equals(tupdesc, attrnum, value1,
4415 value2, isnull1, isnull2))
4416 {
4417 modified = bms_add_member(modified, attidx);
4418 continue;
4419 }
4420
4421 /*
4422 * No need to check attributes that can't be stored externally. Note
4423 * that system attributes can't be stored externally.
4424 */
4425 if (attrnum < 0 || isnull1 ||
4426 TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
4427 continue;
4428
4429 /*
4430 * Check if the old tuple's attribute is stored externally and is a
4431 * member of external_cols.
4432 */
4435 *has_external = true;
4436 }
4437
4438 return modified;
4439}
4440
4441/*
4442 * simple_heap_update - replace a tuple
4443 *
4444 * This routine may be used to update a tuple when concurrent updates of
4445 * the target tuple are not expected (for example, because we have a lock
4446 * on the relation associated with the tuple). Any failure is reported
4447 * via ereport().
4448 */
4449void
4452{
4454 TM_FailureData tmfd;
4455 LockTupleMode lockmode;
4456
4457 result = heap_update(relation, otid, tup,
4458 GetCurrentCommandId(true), 0,
4460 true /* wait for commit */ ,
4461 &tmfd, &lockmode, update_indexes);
4462 switch (result)
4463 {
4464 case TM_SelfModified:
4465 /* Tuple was already updated in current command? */
4466 elog(ERROR, "tuple already updated by self");
4467 break;
4468
4469 case TM_Ok:
4470 /* done successfully */
4471 break;
4472
4473 case TM_Updated:
4474 elog(ERROR, "tuple concurrently updated");
4475 break;
4476
4477 case TM_Deleted:
4478 elog(ERROR, "tuple concurrently deleted");
4479 break;
4480
4481 default:
4482 elog(ERROR, "unrecognized heap_update status: %u", result);
4483 break;
4484 }
4485}
4486
4487
4488/*
4489 * Return the MultiXactStatus corresponding to the given tuple lock mode.
4490 */
4491static MultiXactStatus
4493{
4494 int retval;
4495
4496 if (is_update)
4497 retval = tupleLockExtraInfo[mode].updstatus;
4498 else
4499 retval = tupleLockExtraInfo[mode].lockstatus;
4500
4501 if (retval == -1)
4502 elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4503 is_update ? "true" : "false");
4504
4505 return (MultiXactStatus) retval;
4506}
4507
4508/*
4509 * heap_lock_tuple - lock a tuple in shared or exclusive mode
4510 *
4511 * Note that this acquires a buffer pin, which the caller must release.
4512 *
4513 * Input parameters:
4514 * relation: relation containing tuple (caller must hold suitable lock)
4515 * cid: current command ID (used for visibility test, and stored into
4516 * tuple's cmax if lock is successful)
4517 * mode: indicates if shared or exclusive tuple lock is desired
4518 * wait_policy: what to do if tuple lock is not available
4519 * follow_updates: if true, follow the update chain to also lock descendant
4520 * tuples.
4521 *
4522 * Output parameters:
4523 * *tuple: all fields filled in
4524 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4525 * *tmfd: filled in failure cases (see below)
4526 *
4527 * Function results are the same as the ones for table_tuple_lock().
4528 *
4529 * In the failure cases other than TM_Invisible, the routine fills
4530 * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4531 * if necessary), and t_cmax (the last only for TM_SelfModified,
4532 * since we cannot obtain cmax from a combo CID generated by another
4533 * transaction).
4534 * See comments for struct TM_FailureData for additional info.
4535 *
4536 * See README.tuplock for a thorough explanation of this mechanism.
4537 */
4541 bool follow_updates,
4542 Buffer *buffer, TM_FailureData *tmfd)
4543{
4545 ItemPointer tid = &(tuple->t_self);
4546 ItemId lp;
4547 Page page;
4548 Buffer vmbuffer = InvalidBuffer;
4549 BlockNumber block;
4550 TransactionId xid,
4551 xmax;
4555 bool first_time = true;
4556 bool skip_tuple_lock = false;
4557 bool have_tuple_lock = false;
4558 bool cleared_all_frozen = false;
4559
4560 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4561 block = ItemPointerGetBlockNumber(tid);
4562
4563 /*
4564 * Before locking the buffer, pin the visibility map page if it appears to
4565 * be necessary. Since we haven't got the lock yet, someone else might be
4566 * in the middle of changing this, so we'll need to recheck after we have
4567 * the lock.
4568 */
4569 if (PageIsAllVisible(BufferGetPage(*buffer)))
4570 visibilitymap_pin(relation, block, &vmbuffer);
4571
4573
4574 page = BufferGetPage(*buffer);
4577
4578 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4579 tuple->t_len = ItemIdGetLength(lp);
4580 tuple->t_tableOid = RelationGetRelid(relation);
4581
4582l3:
4583 result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4584
4585 if (result == TM_Invisible)
4586 {
4587 /*
4588 * This is possible, but only when locking a tuple for ON CONFLICT DO
4589 * SELECT/UPDATE. We return this value here rather than throwing an
4590 * error in order to give that case the opportunity to throw a more
4591 * specific error.
4592 */
4594 goto out_locked;
4595 }
4596 else if (result == TM_BeingModified ||
4597 result == TM_Updated ||
4598 result == TM_Deleted)
4599 {
4603 bool require_sleep;
4604 ItemPointerData t_ctid;
4605
4606 /* must copy state data before unlocking buffer */
4608 infomask = tuple->t_data->t_infomask;
4609 infomask2 = tuple->t_data->t_infomask2;
4610 ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4611
4613
4614 /*
4615 * If any subtransaction of the current top transaction already holds
4616 * a lock as strong as or stronger than what we're requesting, we
4617 * effectively hold the desired lock already. We *must* succeed
4618 * without trying to take the tuple lock, else we will deadlock
4619 * against anyone wanting to acquire a stronger lock.
4620 *
4621 * Note we only do this the first time we loop on the HTSU result;
4622 * there is no point in testing in subsequent passes, because
4623 * evidently our own transaction cannot have acquired a new lock after
4624 * the first time we checked.
4625 */
4626 if (first_time)
4627 {
4628 first_time = false;
4629
4631 {
4632 int i;
4633 int nmembers;
4634 MultiXactMember *members;
4635
4636 /*
4637 * We don't need to allow old multixacts here; if that had
4638 * been the case, HeapTupleSatisfiesUpdate would have returned
4639 * MayBeUpdated and we wouldn't be here.
4640 */
4641 nmembers =
4642 GetMultiXactIdMembers(xwait, &members, false,
4644
4645 for (i = 0; i < nmembers; i++)
4646 {
4647 /* only consider members of our own transaction */
4648 if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4649 continue;
4650
4651 if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4652 {
4653 pfree(members);
4654 result = TM_Ok;
4655 goto out_unlocked;
4656 }
4657 else
4658 {
4659 /*
4660 * Disable acquisition of the heavyweight tuple lock.
4661 * Otherwise, when promoting a weaker lock, we might
4662 * deadlock with another locker that has acquired the
4663 * heavyweight tuple lock and is waiting for our
4664 * transaction to finish.
4665 *
4666 * Note that in this case we still need to wait for
4667 * the multixact if required, to avoid acquiring
4668 * conflicting locks.
4669 */
4670 skip_tuple_lock = true;
4671 }
4672 }
4673
4674 if (members)
4675 pfree(members);
4676 }
4678 {
4679 switch (mode)
4680 {
4681 case LockTupleKeyShare:
4685 result = TM_Ok;
4686 goto out_unlocked;
4687 case LockTupleShare:
4690 {
4691 result = TM_Ok;
4692 goto out_unlocked;
4693 }
4694 break;
4697 {
4698 result = TM_Ok;
4699 goto out_unlocked;
4700 }
4701 break;
4702 case LockTupleExclusive:
4705 {
4706 result = TM_Ok;
4707 goto out_unlocked;
4708 }
4709 break;
4710 }
4711 }
4712 }
4713
4714 /*
4715 * Initially assume that we will have to wait for the locking
4716 * transaction(s) to finish. We check various cases below in which
4717 * this can be turned off.
4718 */
4719 require_sleep = true;
4720 if (mode == LockTupleKeyShare)
4721 {
4722 /*
4723 * If we're requesting KeyShare, and there's no update present, we
4724 * don't need to wait. Even if there is an update, we can still
4725 * continue if the key hasn't been modified.
4726 *
4727 * However, if there are updates, we need to walk the update chain
4728 * to mark future versions of the row as locked, too. That way,
4729 * if somebody deletes that future version, we're protected
4730 * against the key going away. This locking of future versions
4731 * could block momentarily, if a concurrent transaction is
4732 * deleting a key; or it could return a value to the effect that
4733 * the transaction deleting the key has already committed. So we
4734 * do this before re-locking the buffer; otherwise this would be
4735 * prone to deadlocks.
4736 *
4737 * Note that the TID we're locking was grabbed before we unlocked
4738 * the buffer. For it to change while we're not looking, the
4739 * other properties we're testing for below after re-locking the
4740 * buffer would also change, in which case we would restart this
4741 * loop above.
4742 */
4744 {
4745 bool updated;
4746
4748
4749 /*
4750 * If there are updates, follow the update chain; bail out if
4751 * that cannot be done.
4752 */
4753 if (follow_updates && updated &&
4754 !ItemPointerEquals(&tuple->t_self, &t_ctid))
4755 {
4756 TM_Result res;
4757
4758 res = heap_lock_updated_tuple(relation,
4759 infomask, xwait, &t_ctid,
4761 mode);
4762 if (res != TM_Ok)
4763 {
4764 result = res;
4765 /* recovery code expects to have buffer lock held */
4767 goto failed;
4768 }
4769 }
4770
4772
4773 /*
4774 * Make sure it's still an appropriate lock, else start over.
4775 * Also, if it wasn't updated before we released the lock, but
4776 * is updated now, we start over too; the reason is that we
4777 * now need to follow the update chain to lock the new
4778 * versions.
4779 */
4780 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4781 ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4782 !updated))
4783 goto l3;
4784
4785 /* Things look okay, so we can skip sleeping */
4786 require_sleep = false;
4787
4788 /*
4789 * Note we allow Xmax to change here; other updaters/lockers
4790 * could have modified it before we grabbed the buffer lock.
4791 * However, this is not a problem, because with the recheck we
4792 * just did we ensure that they still don't conflict with the
4793 * lock we want.
4794 */
4795 }
4796 }
4797 else if (mode == LockTupleShare)
4798 {
4799 /*
4800 * If we're requesting Share, we can similarly avoid sleeping if
4801 * there's no update and no exclusive lock present.
4802 */
4805 {
4807
4808 /*
4809 * Make sure it's still an appropriate lock, else start over.
4810 * See above about allowing xmax to change.
4811 */
4814 goto l3;
4815 require_sleep = false;
4816 }
4817 }
4818 else if (mode == LockTupleNoKeyExclusive)
4819 {
4820 /*
4821 * If we're requesting NoKeyExclusive, we might also be able to
4822 * avoid sleeping; just ensure that there no conflicting lock
4823 * already acquired.
4824 */
4826 {
4828 mode, NULL))
4829 {
4830 /*
4831 * No conflict, but if the xmax changed under us in the
4832 * meantime, start over.
4833 */
4837 xwait))
4838 goto l3;
4839
4840 /* otherwise, we're good */
4841 require_sleep = false;
4842 }
4843 }
4845 {
4847
4848 /* if the xmax changed in the meantime, start over */
4851 xwait))
4852 goto l3;
4853 /* otherwise, we're good */
4854 require_sleep = false;
4855 }
4856 }
4857
4858 /*
4859 * As a check independent from those above, we can also avoid sleeping
4860 * if the current transaction is the sole locker of the tuple. Note
4861 * that the strength of the lock already held is irrelevant; this is
4862 * not about recording the lock in Xmax (which will be done regardless
4863 * of this optimization, below). Also, note that the cases where we
4864 * hold a lock stronger than we are requesting are already handled
4865 * above by not doing anything.
4866 *
4867 * Note we only deal with the non-multixact case here; MultiXactIdWait
4868 * is well equipped to deal with this situation on its own.
4869 */
4872 {
4873 /* ... but if the xmax changed in the meantime, start over */
4877 xwait))
4878 goto l3;
4880 require_sleep = false;
4881 }
4882
4883 /*
4884 * Time to sleep on the other transaction/multixact, if necessary.
4885 *
4886 * If the other transaction is an update/delete that's already
4887 * committed, then sleeping cannot possibly do any good: if we're
4888 * required to sleep, get out to raise an error instead.
4889 *
4890 * By here, we either have already acquired the buffer exclusive lock,
4891 * or we must wait for the locking transaction or multixact; so below
4892 * we ensure that we grab buffer lock after the sleep.
4893 */
4895 {
4897 goto failed;
4898 }
4899 else if (require_sleep)
4900 {
4901 /*
4902 * Acquire tuple lock to establish our priority for the tuple, or
4903 * die trying. LockTuple will release us when we are next-in-line
4904 * for the tuple. We must do this even if we are share-locking,
4905 * but not if we already have a weaker lock on the tuple.
4906 *
4907 * If we are forced to "start over" below, we keep the tuple lock;
4908 * this arranges that we stay at the head of the line while
4909 * rechecking tuple state.
4910 */
4911 if (!skip_tuple_lock &&
4912 !heap_acquire_tuplock(relation, tid, mode, wait_policy,
4914 {
4915 /*
4916 * This can only happen if wait_policy is Skip and the lock
4917 * couldn't be obtained.
4918 */
4920 /* recovery code expects to have buffer lock held */
4922 goto failed;
4923 }
4924
4926 {
4928
4929 /* We only ever lock tuples, never update them */
4930 if (status >= MultiXactStatusNoKeyUpdate)
4931 elog(ERROR, "invalid lock mode in heap_lock_tuple");
4932
4933 /* wait for multixact to end, or die trying */
4934 switch (wait_policy)
4935 {
4936 case LockWaitBlock:
4938 relation, &tuple->t_self, XLTW_Lock, NULL);
4939 break;
4940 case LockWaitSkip:
4942 status, infomask, relation,
4943 NULL, false))
4944 {
4946 /* recovery code expects to have buffer lock held */
4948 goto failed;
4949 }
4950 break;
4951 case LockWaitError:
4953 status, infomask, relation,
4955 ereport(ERROR,
4957 errmsg("could not obtain lock on row in relation \"%s\"",
4958 RelationGetRelationName(relation))));
4959
4960 break;
4961 }
4962
4963 /*
4964 * Of course, the multixact might not be done here: if we're
4965 * requesting a light lock mode, other transactions with light
4966 * locks could still be alive, as well as locks owned by our
4967 * own xact or other subxacts of this backend. We need to
4968 * preserve the surviving MultiXact members. Note that it
4969 * isn't absolutely necessary in the latter case, but doing so
4970 * is simpler.
4971 */
4972 }
4973 else
4974 {
4975 /* wait for regular transaction to end, or die trying */
4976 switch (wait_policy)
4977 {
4978 case LockWaitBlock:
4979 XactLockTableWait(xwait, relation, &tuple->t_self,
4980 XLTW_Lock);
4981 break;
4982 case LockWaitSkip:
4984 {
4986 /* recovery code expects to have buffer lock held */
4988 goto failed;
4989 }
4990 break;
4991 case LockWaitError:
4993 ereport(ERROR,
4995 errmsg("could not obtain lock on row in relation \"%s\"",
4996 RelationGetRelationName(relation))));
4997 break;
4998 }
4999 }
5000
5001 /* if there are updates, follow the update chain */
5003 !ItemPointerEquals(&tuple->t_self, &t_ctid))
5004 {
5005 TM_Result res;
5006
5007 res = heap_lock_updated_tuple(relation,
5008 infomask, xwait, &t_ctid,
5010 mode);
5011 if (res != TM_Ok)
5012 {
5013 result = res;
5014 /* recovery code expects to have buffer lock held */
5016 goto failed;
5017 }
5018 }
5019
5021
5022 /*
5023 * xwait is done, but if xwait had just locked the tuple then some
5024 * other xact could update this tuple before we get to this point.
5025 * Check for xmax change, and start over if so.
5026 */
5029 xwait))
5030 goto l3;
5031
5033 {
5034 /*
5035 * Otherwise check if it committed or aborted. Note we cannot
5036 * be here if the tuple was only locked by somebody who didn't
5037 * conflict with us; that would have been handled above. So
5038 * that transaction must necessarily be gone by now. But
5039 * don't check for this in the multixact case, because some
5040 * locker transactions might still be running.
5041 */
5042 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5043 }
5044 }
5045
5046 /* By here, we're certain that we hold buffer exclusive lock again */
5047
5048 /*
5049 * We may lock if previous xmax aborted, or if it committed but only
5050 * locked the tuple without updating it; or if we didn't have to wait
5051 * at all for whatever reason.
5052 */
5053 if (!require_sleep ||
5054 (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5057 result = TM_Ok;
5058 else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
5060 else
5062 }
5063
5064failed:
5065 if (result != TM_Ok)
5066 {
5069
5070 /*
5071 * When locking a tuple under LockWaitSkip semantics and we fail with
5072 * TM_WouldBlock above, it's possible for concurrent transactions to
5073 * release the lock and set HEAP_XMAX_INVALID in the meantime. So
5074 * this assert is slightly different from the equivalent one in
5075 * heap_delete and heap_update.
5076 */
5078 !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5080 !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
5081 tmfd->ctid = tuple->t_data->t_ctid;
5082 tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5083 if (result == TM_SelfModified)
5084 tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5085 else
5086 tmfd->cmax = InvalidCommandId;
5087 goto out_locked;
5088 }
5089
5090 /*
5091 * If we didn't pin the visibility map page and the page has become all
5092 * visible while we were busy locking the buffer, or during some
5093 * subsequent window during which we had it unlocked, we'll have to unlock
5094 * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5095 * unfortunate, especially since we'll now have to recheck whether the
5096 * tuple has been locked or updated under us, but hopefully it won't
5097 * happen very often.
5098 */
5099 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5100 {
5102 visibilitymap_pin(relation, block, &vmbuffer);
5104 goto l3;
5105 }
5106
5107 xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5108 old_infomask = tuple->t_data->t_infomask;
5109
5110 /*
5111 * If this is the first possibly-multixact-able operation in the current
5112 * transaction, set my per-backend OldestMemberMXactId setting. We can be
5113 * certain that the transaction will never become a member of any older
5114 * MultiXactIds than that. (We have to do this even if we end up just
5115 * using our own TransactionId below, since some other backend could
5116 * incorporate our XID into a MultiXact immediately afterwards.)
5117 */
5119
5120 /*
5121 * Compute the new xmax and infomask to store into the tuple. Note we do
5122 * not modify the tuple just yet, because that would leave it in the wrong
5123 * state if multixact.c elogs.
5124 */
5126 GetCurrentTransactionId(), mode, false,
5127 &xid, &new_infomask, &new_infomask2);
5128
5130
5131 /*
5132 * Store transaction information of xact locking the tuple.
5133 *
5134 * Note: Cmax is meaningless in this context, so don't set it; this avoids
5135 * possibly generating a useless combo CID. Moreover, if we're locking a
5136 * previously updated tuple, it's important to preserve the Cmax.
5137 *
5138 * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5139 * we would break the HOT chain.
5140 */
5143 tuple->t_data->t_infomask |= new_infomask;
5144 tuple->t_data->t_infomask2 |= new_infomask2;
5147 HeapTupleHeaderSetXmax(tuple->t_data, xid);
5148
5149 /*
5150 * Make sure there is no forward chain link in t_ctid. Note that in the
5151 * cases where the tuple has been updated, we must not overwrite t_ctid,
5152 * because it was set by the updater. Moreover, if the tuple has been
5153 * updated, we need to follow the update chain to lock the new versions of
5154 * the tuple as well.
5155 */
5157 tuple->t_data->t_ctid = *tid;
5158
5159 /* Clear only the all-frozen bit on visibility map if needed */
5160 if (PageIsAllVisible(page) &&
5161 visibilitymap_clear(relation, block, vmbuffer,
5163 cleared_all_frozen = true;
5164
5165
5166 MarkBufferDirty(*buffer);
5167
5168 /*
5169 * XLOG stuff. You might think that we don't need an XLOG record because
5170 * there is no state change worth restoring after a crash. You would be
5171 * wrong however: we have just written either a TransactionId or a
5172 * MultiXactId that may never have been seen on disk before, and we need
5173 * to make sure that there are XLOG entries covering those ID numbers.
5174 * Else the same IDs might be re-used after a crash, which would be
5175 * disastrous if this page made it to disk before the crash. Essentially
5176 * we have to enforce the WAL log-before-data rule even in this case.
5177 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5178 * entries for everything anyway.)
5179 */
5180 if (RelationNeedsWAL(relation))
5181 {
5184
5187
5188 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5189 xlrec.xmax = xid;
5190 xlrec.infobits_set = compute_infobits(new_infomask,
5191 tuple->t_data->t_infomask2);
5194
5195 /* we don't decode row locks atm, so no need to log the origin */
5196
5198
5199 PageSetLSN(page, recptr);
5200 }
5201
5203
5204 result = TM_Ok;
5205
5208
5210 if (BufferIsValid(vmbuffer))
5211 ReleaseBuffer(vmbuffer);
5212
5213 /*
5214 * Don't update the visibility map here. Locking a tuple doesn't change
5215 * visibility info.
5216 */
5217
5218 /*
5219 * Now that we have successfully marked the tuple as locked, we can
5220 * release the lmgr tuple lock, if we had it.
5221 */
5222 if (have_tuple_lock)
5223 UnlockTupleTuplock(relation, tid, mode);
5224
5225 return result;
5226}
5227
5228/*
5229 * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5230 * its normal, Xmax-based tuple lock.
5231 *
5232 * have_tuple_lock is an input and output parameter: on input, it indicates
5233 * whether the lock has previously been acquired (and this function does
5234 * nothing in that case). If this function returns success, have_tuple_lock
5235 * has been flipped to true.
5236 *
5237 * Returns false if it was unable to obtain the lock; this can only happen if
5238 * wait_policy is Skip.
5239 */
5240static bool
5243{
5244 if (*have_tuple_lock)
5245 return true;
5246
5247 switch (wait_policy)
5248 {
5249 case LockWaitBlock:
5250 LockTupleTuplock(relation, tid, mode);
5251 break;
5252
5253 case LockWaitSkip:
5254 if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
5255 return false;
5256 break;
5257
5258 case LockWaitError:
5260 ereport(ERROR,
5262 errmsg("could not obtain lock on row in relation \"%s\"",
5263 RelationGetRelationName(relation))));
5264 break;
5265 }
5266 *have_tuple_lock = true;
5267
5268 return true;
5269}
5270
5271/*
5272 * Given an original set of Xmax and infomask, and a transaction (identified by
5273 * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5274 * corresponding infomasks to use on the tuple.
5275 *
5276 * Note that this might have side effects such as creating a new MultiXactId.
5277 *
5278 * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5279 * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5280 * but it was not running anymore. There is a race condition, which is that the
5281 * MultiXactId may have finished since then, but that uncommon case is handled
5282 * either here, or within MultiXactIdExpand.
5283 *
5284 * There is a similar race condition possible when the old xmax was a regular
5285 * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5286 * window, but it's still possible to end up creating an unnecessary
5287 * MultiXactId. Fortunately this is harmless.
5288 */
5289static void
5295{
5296 TransactionId new_xmax;
5299
5301
5302l5:
5303 new_infomask = 0;
5304 new_infomask2 = 0;
5306 {
5307 /*
5308 * No previous locker; we just insert our own TransactionId.
5309 *
5310 * Note that it's critical that this case be the first one checked,
5311 * because there are several blocks below that come back to this one
5312 * to implement certain optimizations; old_infomask might contain
5313 * other dirty bits in those cases, but we don't really care.
5314 */
5315 if (is_update)
5316 {
5317 new_xmax = add_to_xmax;
5318 if (mode == LockTupleExclusive)
5320 }
5321 else
5322 {
5324 switch (mode)
5325 {
5326 case LockTupleKeyShare:
5327 new_xmax = add_to_xmax;
5329 break;
5330 case LockTupleShare:
5331 new_xmax = add_to_xmax;
5333 break;
5335 new_xmax = add_to_xmax;
5337 break;
5338 case LockTupleExclusive:
5339 new_xmax = add_to_xmax;
5342 break;
5343 default:
5344 new_xmax = InvalidTransactionId; /* silence compiler */
5345 elog(ERROR, "invalid lock mode");
5346 }
5347 }
5348 }
5350 {
5352
5353 /*
5354 * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5355 * cross-check.
5356 */
5358
5359 /*
5360 * A multixact together with LOCK_ONLY set but neither lock bit set
5361 * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5362 * anymore. This check is critical for databases upgraded by
5363 * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5364 * that such multis are never passed.
5365 */
5367 {
5370 goto l5;
5371 }
5372
5373 /*
5374 * If the XMAX is already a MultiXactId, then we need to expand it to
5375 * include add_to_xmax; but if all the members were lockers and are
5376 * all gone, we can do away with the IS_MULTI bit and just set
5377 * add_to_xmax as the only locker/updater. If all lockers are gone
5378 * and we have an updater that aborted, we can also do without a
5379 * multi.
5380 *
5381 * The cost of doing GetMultiXactIdMembers would be paid by
5382 * MultiXactIdExpand if we weren't to do this, so this check is not
5383 * incurring extra work anyhow.
5384 */
5386 {
5389 old_infomask)))
5390 {
5391 /*
5392 * Reset these bits and restart; otherwise fall through to
5393 * create a new multi below.
5394 */
5397 goto l5;
5398 }
5399 }
5400
5402
5403 new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5404 new_status);
5406 }
5408 {
5409 /*
5410 * It's a committed update, so we need to preserve him as updater of
5411 * the tuple.
5412 */
5413 MultiXactStatus status;
5415
5417 status = MultiXactStatusUpdate;
5418 else
5420
5422
5423 /*
5424 * since it's not running, it's obviously impossible for the old
5425 * updater to be identical to the current one, so we need not check
5426 * for that case as we do in the block above.
5427 */
5428 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5430 }
5431 else if (TransactionIdIsInProgress(xmax))
5432 {
5433 /*
5434 * If the XMAX is a valid, in-progress TransactionId, then we need to
5435 * create a new MultiXactId that includes both the old locker or
5436 * updater and our own TransactionId.
5437 */
5441
5443 {
5449 {
5452 else
5454 }
5455 else
5456 {
5457 /*
5458 * LOCK_ONLY can be present alone only when a page has been
5459 * upgraded by pg_upgrade. But in that case,
5460 * TransactionIdIsInProgress() should have returned false. We
5461 * assume it's no longer locked in this case.
5462 */
5463 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5466 goto l5;
5467 }
5468 }
5469 else
5470 {
5471 /* it's an update, but which kind? */
5474 else
5476 }
5477
5479
5480 /*
5481 * If the lock to be acquired is for the same TransactionId as the
5482 * existing lock, there's an optimization possible: consider only the
5483 * strongest of both locks as the only one present, and restart.
5484 */
5485 if (xmax == add_to_xmax)
5486 {
5487 /*
5488 * Note that it's not possible for the original tuple to be
5489 * updated: we wouldn't be here because the tuple would have been
5490 * invisible and we wouldn't try to update it. As a subtlety,
5491 * this code can also run when traversing an update chain to lock
5492 * future versions of a tuple. But we wouldn't be here either,
5493 * because the add_to_xmax would be different from the original
5494 * updater.
5495 */
5497
5498 /* acquire the strongest of both */
5499 if (mode < old_mode)
5500 mode = old_mode;
5501 /* mustn't touch is_update */
5502
5504 goto l5;
5505 }
5506
5507 /* otherwise, just fall back to creating a new multixact */
5509 new_xmax = MultiXactIdCreate(xmax, old_status,
5512 }
5515 {
5516 /*
5517 * It's a committed update, so we gotta preserve him as updater of the
5518 * tuple.
5519 */
5520 MultiXactStatus status;
5522
5524 status = MultiXactStatusUpdate;
5525 else
5527
5529
5530 /*
5531 * since it's not running, it's obviously impossible for the old
5532 * updater to be identical to the current one, so we need not check
5533 * for that case as we do in the block above.
5534 */
5535 new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5537 }
5538 else
5539 {
5540 /*
5541 * Can get here iff the locking/updating transaction was running when
5542 * the infomask was extracted from the tuple, but finished before
5543 * TransactionIdIsInProgress got to run. Deal with it as if there was
5544 * no locker at all in the first place.
5545 */
5547 goto l5;
5548 }
5549
5552 *result_xmax = new_xmax;
5553}
5554
5555/*
5556 * Subroutine for heap_lock_updated_tuple_rec.
5557 *
5558 * Given a hypothetical multixact status held by the transaction identified
5559 * with the given xid, does the current transaction need to wait, fail, or can
5560 * it continue if it wanted to acquire a lock of the given mode? "needwait"
5561 * is set to true if waiting is necessary; if it can continue, then TM_Ok is
5562 * returned. If the lock is already held by the current transaction, return
5563 * TM_SelfModified. In case of a conflict with another transaction, a
5564 * different HeapTupleSatisfiesUpdate return code is returned.
5565 *
5566 * The held status is said to be hypothetical because it might correspond to a
5567 * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5568 * way for simplicity of API.
5569 */
5570static TM_Result
5573 bool *needwait)
5574{
5576
5577 *needwait = false;
5579
5580 /*
5581 * Note: we *must* check TransactionIdIsInProgress before
5582 * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
5583 * for an explanation.
5584 */
5586 {
5587 /*
5588 * The tuple has already been locked by our own transaction. This is
5589 * very rare but can happen if multiple transactions are trying to
5590 * lock an ancient version of the same tuple.
5591 */
5592 return TM_SelfModified;
5593 }
5594 else if (TransactionIdIsInProgress(xid))
5595 {
5596 /*
5597 * If the locking transaction is running, what we do depends on
5598 * whether the lock modes conflict: if they do, then we must wait for
5599 * it to finish; otherwise we can fall through to lock this tuple
5600 * version without waiting.
5601 */
5604 {
5605 *needwait = true;
5606 }
5607
5608 /*
5609 * If we set needwait above, then this value doesn't matter;
5610 * otherwise, this value signals to caller that it's okay to proceed.
5611 */
5612 return TM_Ok;
5613 }
5614 else if (TransactionIdDidAbort(xid))
5615 return TM_Ok;
5616 else if (TransactionIdDidCommit(xid))
5617 {
5618 /*
5619 * The other transaction committed. If it was only a locker, then the
5620 * lock is completely gone now and we can return success; but if it
5621 * was an update, then what we do depends on whether the two lock
5622 * modes conflict. If they conflict, then we must report error to
5623 * caller. But if they don't, we can fall through to allow the current
5624 * transaction to lock the tuple.
5625 *
5626 * Note: the reason we worry about ISUPDATE here is because as soon as
5627 * a transaction ends, all its locks are gone and meaningless, and
5628 * thus we can ignore them; whereas its updates persist. In the
5629 * TransactionIdIsInProgress case, above, we don't need to check
5630 * because we know the lock is still "alive" and thus a conflict needs
5631 * always be checked.
5632 */
5633 if (!ISUPDATE_from_mxstatus(status))
5634 return TM_Ok;
5635
5638 {
5639 /* bummer */
5640 if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
5641 return TM_Updated;
5642 else
5643 return TM_Deleted;
5644 }
5645
5646 return TM_Ok;
5647 }
5648
5649 /* Not in progress, not aborted, not committed -- must have crashed */
5650 return TM_Ok;
5651}
5652
5653
5654/*
5655 * Recursive part of heap_lock_updated_tuple
5656 *
5657 * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5658 * xid with the given mode; if this tuple is updated, recurse to lock the new
5659 * version as well.
5660 */
5661static TM_Result
5663 const ItemPointerData *tid, TransactionId xid,
5665{
5669 Buffer buf;
5674 TransactionId xmax,
5675 new_xmax;
5676 bool cleared_all_frozen = false;
5678 Buffer vmbuffer = InvalidBuffer;
5679 BlockNumber block;
5680
5681 ItemPointerCopy(tid, &tupid);
5682
5683 for (;;)
5684 {
5685 new_infomask = 0;
5686 new_xmax = InvalidTransactionId;
5688 ItemPointerCopy(&tupid, &(mytup.t_self));
5689
5690 if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
5691 {
5692 /*
5693 * if we fail to find the updated version of the tuple, it's
5694 * because it was vacuumed/pruned away after its creator
5695 * transaction aborted. So behave as if we got to the end of the
5696 * chain, and there's no further tuple to lock: return success to
5697 * caller.
5698 */
5699 result = TM_Ok;
5700 goto out_unlocked;
5701 }
5702
5703l4:
5705
5706 /*
5707 * Before locking the buffer, pin the visibility map page if it
5708 * appears to be necessary. Since we haven't got the lock yet,
5709 * someone else might be in the middle of changing this, so we'll need
5710 * to recheck after we have the lock.
5711 */
5713 {
5714 visibilitymap_pin(rel, block, &vmbuffer);
5715 pinned_desired_page = true;
5716 }
5717 else
5718 pinned_desired_page = false;
5719
5721
5722 /*
5723 * If we didn't pin the visibility map page and the page has become
5724 * all visible while we were busy locking the buffer, we'll have to
5725 * unlock and re-lock, to avoid holding the buffer lock across I/O.
5726 * That's a bit unfortunate, but hopefully shouldn't happen often.
5727 *
5728 * Note: in some paths through this function, we will reach here
5729 * holding a pin on a vm page that may or may not be the one matching
5730 * this page. If this page isn't all-visible, we won't use the vm
5731 * page, but we hold onto such a pin till the end of the function.
5732 */
5734 {
5736 visibilitymap_pin(rel, block, &vmbuffer);
5738 }
5739
5740 /*
5741 * Check the tuple XMIN against prior XMAX, if any. If we reached the
5742 * end of the chain, we're done, so return success.
5743 */
5746 priorXmax))
5747 {
5748 result = TM_Ok;
5749 goto out_locked;
5750 }
5751
5752 /*
5753 * Also check Xmin: if this tuple was created by an aborted
5754 * (sub)transaction, then we already locked the last live one in the
5755 * chain, thus we're done, so return success.
5756 */
5758 {
5759 result = TM_Ok;
5760 goto out_locked;
5761 }
5762
5763 old_infomask = mytup.t_data->t_infomask;
5764 old_infomask2 = mytup.t_data->t_infomask2;
5765 xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5766
5767 /*
5768 * If this tuple version has been updated or locked by some concurrent
5769 * transaction(s), what we do depends on whether our lock mode
5770 * conflicts with what those other transactions hold, and also on the
5771 * status of them.
5772 */
5774 {
5776 bool needwait;
5777
5780 {
5781 int nmembers;
5782 int i;
5783 MultiXactMember *members;
5784
5785 /*
5786 * We don't need a test for pg_upgrade'd tuples: this is only
5787 * applied to tuples after the first in an update chain. Said
5788 * first tuple in the chain may well be locked-in-9.2-and-
5789 * pg_upgraded, but that one was already locked by our caller,
5790 * not us; and any subsequent ones cannot be because our
5791 * caller must necessarily have obtained a snapshot later than
5792 * the pg_upgrade itself.
5793 */
5794 Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
5795
5796 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5798 for (i = 0; i < nmembers; i++)
5799 {
5800 result = test_lockmode_for_conflict(members[i].status,
5801 members[i].xid,
5802 mode,
5803 &mytup,
5804 &needwait);
5805
5806 /*
5807 * If the tuple was already locked by ourselves in a
5808 * previous iteration of this (say heap_lock_tuple was
5809 * forced to restart the locking loop because of a change
5810 * in xmax), then we hold the lock already on this tuple
5811 * version and we don't need to do anything; and this is
5812 * not an error condition either. We just need to skip
5813 * this tuple and continue locking the next version in the
5814 * update chain.
5815 */
5816 if (result == TM_SelfModified)
5817 {
5818 pfree(members);
5819 goto next;
5820 }
5821
5822 if (needwait)
5823 {
5825 XactLockTableWait(members[i].xid, rel,
5826 &mytup.t_self,
5828 pfree(members);
5829 goto l4;
5830 }
5831 if (result != TM_Ok)
5832 {
5833 pfree(members);
5834 goto out_locked;
5835 }
5836 }
5837 if (members)
5838 pfree(members);
5839 }
5840 else
5841 {
5842 MultiXactStatus status;
5843
5844 /*
5845 * For a non-multi Xmax, we first need to compute the
5846 * corresponding MultiXactStatus by using the infomask bits.
5847 */
5849 {
5853 status = MultiXactStatusForShare;
5855 {
5857 status = MultiXactStatusForUpdate;
5858 else
5860 }
5861 else
5862 {
5863 /*
5864 * LOCK_ONLY present alone (a pg_upgraded tuple marked
5865 * as share-locked in the old cluster) shouldn't be
5866 * seen in the middle of an update chain.
5867 */
5868 elog(ERROR, "invalid lock status in tuple");
5869 }
5870 }
5871 else
5872 {
5873 /* it's an update, but which kind? */
5875 status = MultiXactStatusUpdate;
5876 else
5878 }
5879
5881 &mytup, &needwait);
5882
5883 /*
5884 * If the tuple was already locked by ourselves in a previous
5885 * iteration of this (say heap_lock_tuple was forced to
5886 * restart the locking loop because of a change in xmax), then
5887 * we hold the lock already on this tuple version and we don't
5888 * need to do anything; and this is not an error condition
5889 * either. We just need to skip this tuple and continue
5890 * locking the next version in the update chain.
5891 */
5892 if (result == TM_SelfModified)
5893 goto next;
5894
5895 if (needwait)
5896 {
5898 XactLockTableWait(rawxmax, rel, &mytup.t_self,
5900 goto l4;
5901 }
5902 if (result != TM_Ok)
5903 {
5904 goto out_locked;
5905 }
5906 }
5907 }
5908
5909 /* compute the new Xmax and infomask values for the tuple ... */
5910 compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5911 xid, mode, false,
5912 &new_xmax, &new_infomask, &new_infomask2);
5913
5915 visibilitymap_clear(rel, block, vmbuffer,
5917 cleared_all_frozen = true;
5918
5920
5921 /* ... and set them */
5922 HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5923 mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5924 mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5925 mytup.t_data->t_infomask |= new_infomask;
5926 mytup.t_data->t_infomask2 |= new_infomask2;
5927
5929
5930 /* XLOG stuff */
5931 if (RelationNeedsWAL(rel))
5932 {
5935 Page page = BufferGetPage(buf);
5936
5939
5940 xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
5941 xlrec.xmax = new_xmax;
5943 xlrec.flags =
5945
5947
5949
5950 PageSetLSN(page, recptr);
5951 }
5952
5954
5955next:
5956 /* if we find the end of update chain, we're done. */
5957 if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
5959 ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
5961 {
5962 result = TM_Ok;
5963 goto out_locked;
5964 }
5965
5966 /* tail recursion */
5968 ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
5970 }
5971
5972 result = TM_Ok;
5973
5976
5978 if (vmbuffer != InvalidBuffer)
5979 ReleaseBuffer(vmbuffer);
5980
5981 return result;
5982}
5983
5984/*
5985 * heap_lock_updated_tuple
5986 * Follow update chain when locking an updated tuple, acquiring locks (row
5987 * marks) on the updated versions.
5988 *
5989 * 'prior_infomask', 'prior_raw_xmax' and 'prior_ctid' are the corresponding
5990 * fields from the initial tuple. We will lock the tuples starting from the
5991 * one that 'prior_ctid' points to. Note: This function does not lock the
5992 * initial tuple itself.
5993 *
5994 * This function doesn't check visibility, it just unconditionally marks the
5995 * tuple(s) as locked. If any tuple in the updated chain is being deleted
5996 * concurrently (or updated with the key being modified), sleep until the
5997 * transaction doing it is finished.
5998 *
5999 * Note that we don't acquire heavyweight tuple locks on the tuples we walk
6000 * when we have to wait for other transactions to release them, as opposed to
6001 * what heap_lock_tuple does. The reason is that having more than one
6002 * transaction walking the chain is probably uncommon enough that risk of
6003 * starvation is not likely: one of the preconditions for being here is that
6004 * the snapshot in use predates the update that created this tuple (because we
6005 * started at an earlier version of the tuple), but at the same time such a
6006 * transaction cannot be using repeatable read or serializable isolation
6007 * levels, because that would lead to a serializability failure.
6008 */
6009static TM_Result
6015{
6016 INJECTION_POINT("heap_lock_updated_tuple", NULL);
6017
6018 /*
6019 * If the tuple has moved into another partition (effectively a delete)
6020 * stop here.
6021 */
6023 {
6025
6026 /*
6027 * If this is the first possibly-multixact-able operation in the
6028 * current transaction, set my per-backend OldestMemberMXactId
6029 * setting. We can be certain that the transaction will never become a
6030 * member of any older MultiXactIds than that. (We have to do this
6031 * even if we end up just using our own TransactionId below, since
6032 * some other backend could incorporate our XID into a MultiXact
6033 * immediately afterwards.)
6034 */
6036
6040 }
6041
6042 /* nothing to lock */
6043 return TM_Ok;
6044}
6045
6046/*
6047 * heap_finish_speculative - mark speculative insertion as successful
6048 *
6049 * To successfully finish a speculative insertion we have to clear speculative
6050 * token from tuple. To do so the t_ctid field, which will contain a
6051 * speculative token value, is modified in place to point to the tuple itself,
6052 * which is characteristic of a newly inserted ordinary tuple.
6053 *
6054 * NB: It is not ok to commit without either finishing or aborting a
6055 * speculative insertion. We could treat speculative tuples of committed
6056 * transactions implicitly as completed, but then we would have to be prepared
6057 * to deal with speculative tokens on committed tuples. That wouldn't be
6058 * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
6059 * but clearing the token at completion isn't very expensive either.
6060 * An explicit confirmation WAL record also makes logical decoding simpler.
6061 */
6062void
6064{
6065 Buffer buffer;
6066 Page page;
6067 OffsetNumber offnum;
6068 ItemId lp;
6069 HeapTupleHeader htup;
6070
6071 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
6073 page = BufferGetPage(buffer);
6074
6075 offnum = ItemPointerGetOffsetNumber(tid);
6077 elog(ERROR, "offnum out of range");
6078 lp = PageGetItemId(page, offnum);
6079 if (!ItemIdIsNormal(lp))
6080 elog(ERROR, "invalid lp");
6081
6082 htup = (HeapTupleHeader) PageGetItem(page, lp);
6083
6084 /* NO EREPORT(ERROR) from here till changes are logged */
6086
6088
6089 MarkBufferDirty(buffer);
6090
6091 /*
6092 * Replace the speculative insertion token with a real t_ctid, pointing to
6093 * itself like it does on regular tuples.
6094 */
6095 htup->t_ctid = *tid;
6096
6097 /* XLOG stuff */
6098 if (RelationNeedsWAL(relation))
6099 {
6102
6104
6106
6107 /* We want the same filtering on this as on a plain insert */
6109
6112
6114
6115 PageSetLSN(page, recptr);
6116 }
6117
6119
6120 UnlockReleaseBuffer(buffer);
6121}
6122
6123/*
6124 * heap_abort_speculative - kill a speculatively inserted tuple
6125 *
6126 * Marks a tuple that was speculatively inserted in the same command as dead,
6127 * by setting its xmin as invalid. That makes it immediately appear as dead
6128 * to all transactions, including our own. In particular, it makes
6129 * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
6130 * inserting a duplicate key value won't unnecessarily wait for our whole
6131 * transaction to finish (it'll just wait for our speculative insertion to
6132 * finish).
6133 *
6134 * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
6135 * that arise due to a mutual dependency that is not user visible. By
6136 * definition, unprincipled deadlocks cannot be prevented by the user
6137 * reordering lock acquisition in client code, because the implementation level
6138 * lock acquisitions are not under the user's direct control. If speculative
6139 * inserters did not take this precaution, then under high concurrency they
6140 * could deadlock with each other, which would not be acceptable.
6141 *
6142 * This is somewhat redundant with heap_delete, but we prefer to have a
6143 * dedicated routine with stripped down requirements. Note that this is also
6144 * used to delete the TOAST tuples created during speculative insertion.
6145 *
6146 * This routine does not affect logical decoding as it only looks at
6147 * confirmation records.
6148 */
6149void
6151{
6153 ItemId lp;
6154 HeapTupleData tp;
6155 Page page;
6156 BlockNumber block;
6157 Buffer buffer;
6158
6160
6161 block = ItemPointerGetBlockNumber(tid);
6162 buffer = ReadBuffer(relation, block);
6163 page = BufferGetPage(buffer);
6164
6166
6167 /*
6168 * Page can't be all visible, we just inserted into it, and are still
6169 * running.
6170 */
6171 Assert(!PageIsAllVisible(page));
6172
6175
6176 tp.t_tableOid = RelationGetRelid(relation);
6177 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6178 tp.t_len = ItemIdGetLength(lp);
6179 tp.t_self = *tid;
6180
6181 /*
6182 * Sanity check that the tuple really is a speculatively inserted tuple,
6183 * inserted by us.
6184 */
6185 if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6186 elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6187 if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6188 elog(ERROR, "attempted to kill a non-speculative tuple");
6190
6191 /*
6192 * No need to check for serializable conflicts here. There is never a
6193 * need for a combo CID, either. No need to extract replica identity, or
6194 * do anything special with infomask bits.
6195 */
6196
6198
6199 /*
6200 * The tuple will become DEAD immediately. Flag that this page is a
6201 * candidate for pruning by setting xmin to TransactionXmin. While not
6202 * immediately prunable, it is the oldest xid we can cheaply determine
6203 * that's safe against wraparound / being older than the table's
6204 * relfrozenxid. To defend against the unlikely case of a new relation
6205 * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
6206 * if so (vacuum can't subsequently move relfrozenxid to beyond
6207 * TransactionXmin, so there's no race here).
6208 */
6210 {
6211 TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
6213
6214 if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
6215 prune_xid = relfrozenxid;
6216 else
6219 }
6220
6221 /* store transaction information of xact deleting the tuple */
6224
6225 /*
6226 * Set the tuple header xmin to InvalidTransactionId. This makes the
6227 * tuple immediately invisible everyone. (In particular, to any
6228 * transactions waiting on the speculative token, woken up later.)
6229 */
6231
6232 /* Clear the speculative insertion token too */
6233 tp.t_data->t_ctid = tp.t_self;
6234
6235 MarkBufferDirty(buffer);
6236
6237 /*
6238 * XLOG stuff
6239 *
6240 * The WAL records generated here match heap_delete(). The same recovery
6241 * routines are used.
6242 */
6243 if (RelationNeedsWAL(relation))
6244 {
6247
6249 xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
6250 tp.t_data->t_infomask2);
6252 xlrec.xmax = xid;
6253
6257
6258 /* No replica identity & replication origin logged */
6259
6261
6262 PageSetLSN(page, recptr);
6263 }
6264
6266
6268
6269 if (HeapTupleHasExternal(&tp))
6270 {
6271 Assert(!IsToastRelation(relation));
6272 heap_toast_delete(relation, &tp, true);
6273 }
6274
6275 /*
6276 * Never need to mark tuple for invalidation, since catalogs don't support
6277 * speculative insertion
6278 */
6279
6280 /* Now we can release the buffer */
6281 ReleaseBuffer(buffer);
6282
6283 /* count deletion, as we counted the insertion too */
6284 pgstat_count_heap_delete(relation);
6285}
6286
6287/*
6288 * heap_inplace_lock - protect inplace update from concurrent heap_update()
6289 *
6290 * Evaluate whether the tuple's state is compatible with a no-key update.
6291 * Current transaction rowmarks are fine, as is KEY SHARE from any
6292 * transaction. If compatible, return true with the buffer exclusive-locked,
6293 * and the caller must release that by calling
6294 * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
6295 * an error. Otherwise, call release_callback(arg), wait for blocking
6296 * transactions to end, and return false.
6297 *
6298 * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
6299 * DDL, this doesn't guarantee any particular predicate locking.
6300 *
6301 * heap_delete() is a rarer source of blocking transactions (xwait). We'll
6302 * wait for such a transaction just like for the normal heap_update() case.
6303 * Normal concurrent DROP commands won't cause that, because all inplace
6304 * updaters take some lock that conflicts with DROP. An explicit SQL "DELETE
6305 * FROM pg_class" can cause it. By waiting, if the concurrent transaction
6306 * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller
6307 * can find the successor tuple.
6308 *
6309 * Readers of inplace-updated fields expect changes to those fields are
6310 * durable. For example, vac_truncate_clog() reads datfrozenxid from
6311 * pg_database tuples via catalog snapshots. A future snapshot must not
6312 * return a lower datfrozenxid for the same database OID (lower in the
6313 * FullTransactionIdPrecedes() sense). We achieve that since no update of a
6314 * tuple can start while we hold a lock on its buffer. In cases like
6315 * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
6316 * to this transaction. ROLLBACK then is one case where it's okay to lose
6317 * inplace updates. (Restoring relhasindex=false on ROLLBACK is fine, since
6318 * any concurrent CREATE INDEX would have blocked, then inplace-updated the
6319 * committed tuple.)
6320 *
6321 * In principle, we could avoid waiting by overwriting every tuple in the
6322 * updated tuple chain. Reader expectations permit updating a tuple only if
6323 * it's aborted, is the tail of the chain, or we already updated the tuple
6324 * referenced in its t_ctid. Hence, we would need to overwrite the tuples in
6325 * order from tail to head. That would imply either (a) mutating all tuples
6326 * in one critical section or (b) accepting a chance of partial completion.
6327 * Partial completion of a relfrozenxid update would have the weird
6328 * consequence that the table's next VACUUM could see the table's relfrozenxid
6329 * move forward between vacuum_get_cutoffs() and finishing.
6330 */
6331bool
6333 HeapTuple oldtup_ptr, Buffer buffer,
6334 void (*release_callback) (void *), void *arg)
6335{
6336 HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
6338 bool ret;
6339
6340#ifdef USE_ASSERT_CHECKING
6341 if (RelationGetRelid(relation) == RelationRelationId)
6343#endif
6344
6345 Assert(BufferIsValid(buffer));
6346
6347 /*
6348 * Register shared cache invals if necessary. Other sessions may finish
6349 * inplace updates of this tuple between this step and LockTuple(). Since
6350 * inplace updates don't change cache keys, that's harmless.
6351 *
6352 * While it's tempting to register invals only after confirming we can
6353 * return true, the following obstacle precludes reordering steps that
6354 * way. Registering invals might reach a CatalogCacheInitializeCache()
6355 * that locks "buffer". That would hang indefinitely if running after our
6356 * own LockBuffer(). Hence, we must register invals before LockBuffer().
6357 */
6359
6360 LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6362
6363 /*----------
6364 * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
6365 *
6366 * - wait unconditionally
6367 * - already locked tuple above, since inplace needs that unconditionally
6368 * - don't recheck header after wait: simpler to defer to next iteration
6369 * - don't try to continue even if the updater aborts: likewise
6370 * - no crosscheck
6371 */
6373 buffer);
6374
6375 if (result == TM_Invisible)
6376 {
6377 /* no known way this can happen */
6378 ereport(ERROR,
6380 errmsg_internal("attempted to overwrite invisible tuple")));
6381 }
6382 else if (result == TM_SelfModified)
6383 {
6384 /*
6385 * CREATE INDEX might reach this if an expression is silly enough to
6386 * call e.g. SELECT ... FROM pg_class FOR SHARE. C code of other SQL
6387 * statements might get here after a heap_update() of the same row, in
6388 * the absence of an intervening CommandCounterIncrement().
6389 */
6390 ereport(ERROR,
6392 errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
6393 }
6394 else if (result == TM_BeingModified)
6395 {
6398
6400 infomask = oldtup.t_data->t_infomask;
6401
6403 {
6406 int remain;
6407
6409 lockmode, NULL))
6410 {
6413 ret = false;
6415 relation, &oldtup.t_self, XLTW_Update,
6416 &remain);
6417 }
6418 else
6419 ret = true;
6420 }
6422 ret = true;
6424 ret = true;
6425 else
6426 {
6429 ret = false;
6430 XactLockTableWait(xwait, relation, &oldtup.t_self,
6431 XLTW_Update);
6432 }
6433 }
6434 else
6435 {
6436 ret = (result == TM_Ok);
6437 if (!ret)
6438 {
6441 }
6442 }
6443
6444 /*
6445 * GetCatalogSnapshot() relies on invalidation messages to know when to
6446 * take a new snapshot. COMMIT of xwait is responsible for sending the
6447 * invalidation. We're not acquiring heavyweight locks sufficient to
6448 * block if not yet sent, so we must take a new snapshot to ensure a later
6449 * attempt has a fair chance. While we don't need this if xwait aborted,
6450 * don't bother optimizing that.
6451 */
6452 if (!ret)
6453 {
6454 UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6457 }
6458 return ret;
6459}
6460
6461/*
6462 * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
6463 *
6464 * The tuple cannot change size, and therefore its header fields and null
6465 * bitmap (if any) don't change either.
6466 *
6467 * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
6468 */
6469void
6471 HeapTuple oldtup, HeapTuple tuple,
6472 Buffer buffer)
6473{
6474 HeapTupleHeader htup = oldtup->t_data;
6475 uint32 oldlen;
6476 uint32 newlen;
6477 char *dst;
6478 char *src;
6479 int nmsgs = 0;
6481 bool RelcacheInitFileInval = false;
6482
6483 Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
6484 oldlen = oldtup->t_len - htup->t_hoff;
6485 newlen = tuple->t_len - tuple->t_data->t_hoff;
6486 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6487 elog(ERROR, "wrong tuple length");
6488
6489 dst = (char *) htup + htup->t_hoff;
6490 src = (char *) tuple->t_data + tuple->t_data->t_hoff;
6491
6492 /* Like RecordTransactionCommit(), log only if needed */
6495 &RelcacheInitFileInval);
6496
6497 /*
6498 * Unlink relcache init files as needed. If unlinking, acquire
6499 * RelCacheInitLock until after associated invalidations. By doing this
6500 * in advance, if we checkpoint and then crash between inplace
6501 * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6502 * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6503 * neglect to PANIC on EIO.
6504 */
6506
6507 /*----------
6508 * NO EREPORT(ERROR) from here till changes are complete
6509 *
6510 * Our exclusive buffer lock won't stop a reader having already pinned and
6511 * checked visibility for this tuple. With the usual order of changes
6512 * (i.e. updating the buffer contents before WAL logging), a reader could
6513 * observe our not-yet-persistent update to relfrozenxid and update
6514 * datfrozenxid based on that. A crash in that moment could allow
6515 * datfrozenxid to overtake relfrozenxid:
6516 *
6517 * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
6518 * ["R" is a VACUUM tbl]
6519 * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
6520 * D: systable_getnext() returns pg_class tuple of tbl
6521 * R: memcpy() into pg_class tuple of tbl
6522 * D: raise pg_database.datfrozenxid, XLogInsert(), finish
6523 * [crash]
6524 * [recovery restores datfrozenxid w/o relfrozenxid]
6525 *
6526 * We avoid that by using a temporary copy of the buffer to hide our
6527 * change from other backends until the change has been WAL-logged. We
6528 * apply our change to the temporary copy and WAL-log it, before modifying
6529 * the real page. That way any action a reader of the in-place-updated
6530 * value takes will be WAL logged after this change.
6531 */
6533
6534 MarkBufferDirty(buffer);
6535
6536 /* XLOG stuff */
6537 if (RelationNeedsWAL(relation))
6538 {
6541 char *origdata = (char *) BufferGetBlock(buffer);
6542 Page page = BufferGetPage(buffer);
6543 uint16 lower = ((PageHeader) page)->pd_lower;
6544 uint16 upper = ((PageHeader) page)->pd_upper;
6546 RelFileLocator rlocator;
6547 ForkNumber forkno;
6548 BlockNumber blkno;
6550
6551 xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6552 xlrec.dbId = MyDatabaseId;
6554 xlrec.relcacheInitFileInval = RelcacheInitFileInval;
6555 xlrec.nmsgs = nmsgs;
6556
6559 if (nmsgs != 0)
6561 nmsgs * sizeof(SharedInvalidationMessage));
6562
6563 /* register block matching what buffer will look like after changes */
6568 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
6569 Assert(forkno == MAIN_FORKNUM);
6570 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
6572 XLogRegisterBufData(0, src, newlen);
6573
6574 /* inplace updates aren't decoded atm, don't log the origin */
6575
6577
6578 PageSetLSN(page, recptr);
6579 }
6580
6581 memcpy(dst, src, newlen);
6582
6584
6585 /*
6586 * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6587 * do this before UnlockTuple().
6588 */
6590
6592 UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
6593
6594 AcceptInvalidationMessages(); /* local processing of just-sent inval */
6595
6596 /*
6597 * Queue a transactional inval, for logical decoding and for third-party
6598 * code that might have been relying on it since long before inplace
6599 * update adopted immediate invalidation. See README.tuplock section
6600 * "Reading inplace-updated columns" for logical decoding details.
6601 */
6603 CacheInvalidateHeapTuple(relation, tuple, NULL);
6604}
6605
6606/*
6607 * heap_inplace_unlock - reverse of heap_inplace_lock
6608 */
6609void
6611 HeapTuple oldtup, Buffer buffer)
6612{
6614 UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
6616}
6617
6618#define FRM_NOOP 0x0001
6619#define FRM_INVALIDATE_XMAX 0x0002
6620#define FRM_RETURN_IS_XID 0x0004
6621#define FRM_RETURN_IS_MULTI 0x0008
6622#define FRM_MARK_COMMITTED 0x0010
6623
6624/*
6625 * FreezeMultiXactId
6626 * Determine what to do during freezing when a tuple is marked by a
6627 * MultiXactId.
6628 *
6629 * "flags" is an output value; it's used to tell caller what to do on return.
6630 * "pagefrz" is an input/output value, used to manage page level freezing.
6631 *
6632 * Possible values that we can set in "flags":
6633 * FRM_NOOP
6634 * don't do anything -- keep existing Xmax
6635 * FRM_INVALIDATE_XMAX
6636 * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6637 * FRM_RETURN_IS_XID
6638 * The Xid return value is a single update Xid to set as xmax.
6639 * FRM_MARK_COMMITTED
6640 * Xmax can be marked as HEAP_XMAX_COMMITTED
6641 * FRM_RETURN_IS_MULTI
6642 * The return value is a new MultiXactId to set as new Xmax.
6643 * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6644 *
6645 * Caller delegates control of page freezing to us. In practice we always
6646 * force freezing of caller's page unless FRM_NOOP processing is indicated.
6647 * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
6648 * can never be left behind. We freely choose when and how to process each
6649 * Multi, without ever violating the cutoff postconditions for freezing.
6650 *
6651 * It's useful to remove Multis on a proactive timeline (relative to freezing
6652 * XIDs) to keep MultiXact member SLRU buffer misses to a minimum. It can also
6653 * be cheaper in the short run, for us, since we too can avoid SLRU buffer
6654 * misses through eager processing.
6655 *
6656 * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
6657 * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
6658 * This can usually be put off, which is usually enough to avoid it altogether.
6659 * Allocating new multis during VACUUM should be avoided on general principle;
6660 * only VACUUM can advance relminmxid, so allocating new Multis here comes with
6661 * its own special risks.
6662 *
6663 * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
6664 * using heap_tuple_should_freeze when we haven't forced page-level freezing.
6665 *
6666 * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
6667 * have already forced page-level freezing, since that might incur the same
6668 * SLRU buffer misses that we specifically intended to avoid by freezing.
6669 */
6670static TransactionId
6672 const struct VacuumCutoffs *cutoffs, uint16 *flags,
6673 HeapPageFreeze *pagefrz)
6674{
6676 MultiXactMember *members;
6677 int nmembers;
6678 bool need_replace;
6679 int nnewmembers;
6681 bool has_lockers;
6683 bool update_committed;
6684 TransactionId FreezePageRelfrozenXid;
6685
6686 *flags = 0;
6687
6688 /* We should only be called in Multis */
6689 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6690
6691 if (!MultiXactIdIsValid(multi) ||
6692 HEAP_LOCKED_UPGRADED(t_infomask))
6693 {
6694 *flags |= FRM_INVALIDATE_XMAX;
6695 pagefrz->freeze_required = true;
6696 return InvalidTransactionId;
6697 }
6698 else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
6699 ereport(ERROR,
6701 errmsg_internal("found multixact %u from before relminmxid %u",
6702 multi, cutoffs->relminmxid)));
6703 else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
6704 {
6706
6707 /*
6708 * This old multi cannot possibly have members still running, but
6709 * verify just in case. If it was a locker only, it can be removed
6710 * without any further consideration; but if it contained an update,
6711 * we might need to preserve it.
6712 */
6713 if (MultiXactIdIsRunning(multi,
6714 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6715 ereport(ERROR,
6717 errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
6718 multi, cutoffs->OldestMxact)));
6719
6720 if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6721 {
6722 *flags |= FRM_INVALIDATE_XMAX;
6723 pagefrz->freeze_required = true;
6724 return InvalidTransactionId;
6725 }
6726
6727 /* replace multi with single XID for its updater? */
6728 update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
6730 ereport(ERROR,
6732 errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
6733 multi, update_xact,
6734 cutoffs->relfrozenxid)));
6735 else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
6736 {
6737 /*
6738 * Updater XID has to have aborted (otherwise the tuple would have
6739 * been pruned away instead, since updater XID is < OldestXmin).
6740 * Just remove xmax.
6741 */
6743 ereport(ERROR,
6745 errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6746 multi, update_xact,
6747 cutoffs->OldestXmin)));
6748 *flags |= FRM_INVALIDATE_XMAX;
6749 pagefrz->freeze_required = true;
6750 return InvalidTransactionId;
6751 }
6752
6753 /* Have to keep updater XID as new xmax */
6754 *flags |= FRM_RETURN_IS_XID;
6755 pagefrz->freeze_required = true;
6756 return update_xact;
6757 }
6758
6759 /*
6760 * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
6761 * need to walk the whole members array to figure out what to do, if
6762 * anything.
6763 */
6764 nmembers =
6765 GetMultiXactIdMembers(multi, &members, false,
6766 HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6767 if (nmembers <= 0)
6768 {
6769 /* Nothing worth keeping */
6770 *flags |= FRM_INVALIDATE_XMAX;
6771 pagefrz->freeze_required = true;
6772 return InvalidTransactionId;
6773 }
6774
6775 /*
6776 * The FRM_NOOP case is the only case where we might need to ratchet back
6777 * FreezePageRelfrozenXid or FreezePageRelminMxid. It is also the only
6778 * case where our caller might ratchet back its NoFreezePageRelfrozenXid
6779 * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
6780 * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
6781 * trackers managed by VACUUM being ratcheting back by xmax to the degree
6782 * required to make it safe to leave xmax undisturbed, independent of
6783 * whether or not page freezing is triggered somewhere else.
6784 *
6785 * Our policy is to force freezing in every case other than FRM_NOOP,
6786 * which obviates the need to maintain either set of trackers, anywhere.
6787 * Every other case will reliably execute a freeze plan for xmax that
6788 * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
6789 * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
6790 * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
6791 * OldestXmin/OldestMxact, so later values never need to be tracked here.)
6792 */
6793 need_replace = false;
6794 FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
6795 for (int i = 0; i < nmembers; i++)
6796 {
6797 TransactionId xid = members[i].xid;
6798
6799 Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6800
6801 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
6802 {
6803 /* Can't violate the FreezeLimit postcondition */
6804 need_replace = true;
6805 break;
6806 }
6807 if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
6808 FreezePageRelfrozenXid = xid;
6809 }
6810
6811 /* Can't violate the MultiXactCutoff postcondition, either */
6812 if (!need_replace)
6814
6815 if (!need_replace)
6816 {
6817 /*
6818 * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
6819 * both together to make it safe to retain this particular multi after
6820 * freezing its page
6821 */
6822 *flags |= FRM_NOOP;
6823 pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
6824 if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
6825 pagefrz->FreezePageRelminMxid = multi;
6826 pfree(members);
6827 return multi;
6828 }
6829
6830 /*
6831 * Do a more thorough second pass over the multi to figure out which
6832 * member XIDs actually need to be kept. Checking the precise status of
6833 * individual members might even show that we don't need to keep anything.
6834 * That is quite possible even though the Multi must be >= OldestMxact,
6835 * since our second pass only keeps member XIDs when it's truly necessary;
6836 * even member XIDs >= OldestXmin often won't be kept by second pass.
6837 */
6838 nnewmembers = 0;
6840 has_lockers = false;
6842 update_committed = false;
6843
6844 /*
6845 * Determine whether to keep each member xid, or to ignore it instead
6846 */
6847 for (int i = 0; i < nmembers; i++)
6848 {
6849 TransactionId xid = members[i].xid;
6850 MultiXactStatus mstatus = members[i].status;
6851
6852 Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6853
6854 if (!ISUPDATE_from_mxstatus(mstatus))
6855 {
6856 /*
6857 * Locker XID (not updater XID). We only keep lockers that are
6858 * still running.
6859 */
6862 {
6863 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6864 ereport(ERROR,
6866 errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
6867 multi, xid,
6868 cutoffs->OldestXmin)));
6869 newmembers[nnewmembers++] = members[i];
6870 has_lockers = true;
6871 }
6872
6873 continue;
6874 }
6875
6876 /*
6877 * Updater XID (not locker XID). Should we keep it?
6878 *
6879 * Since the tuple wasn't totally removed when vacuum pruned, the
6880 * update Xid cannot possibly be older than OldestXmin cutoff unless
6881 * the updater XID aborted. If the updater transaction is known
6882 * aborted or crashed then it's okay to ignore it, otherwise not.
6883 *
6884 * In any case the Multi should never contain two updaters, whatever
6885 * their individual commit status. Check for that first, in passing.
6886 */
6888 ereport(ERROR,
6890 errmsg_internal("multixact %u has two or more updating members",
6891 multi),
6892 errdetail_internal("First updater XID=%u second updater XID=%u.",
6893 update_xid, xid)));
6894
6895 /*
6896 * As with all tuple visibility routines, it's critical to test
6897 * TransactionIdIsInProgress before TransactionIdDidCommit, because of
6898 * race conditions explained in detail in heapam_visibility.c.
6899 */
6902 update_xid = xid;
6903 else if (TransactionIdDidCommit(xid))
6904 {
6905 /*
6906 * The transaction committed, so we can tell caller to set
6907 * HEAP_XMAX_COMMITTED. (We can only do this because we know the
6908 * transaction is not running.)
6909 */
6910 update_committed = true;
6911 update_xid = xid;
6912 }
6913 else
6914 {
6915 /*
6916 * Not in progress, not committed -- must be aborted or crashed;
6917 * we can ignore it.
6918 */
6919 continue;
6920 }
6921
6922 /*
6923 * We determined that updater must be kept -- add it to pending new
6924 * members list
6925 */
6926 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6927 ereport(ERROR,
6929 errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6930 multi, xid, cutoffs->OldestXmin)));
6931 newmembers[nnewmembers++] = members[i];
6932 }
6933
6934 pfree(members);
6935
6936 /*
6937 * Determine what to do with caller's multi based on information gathered
6938 * during our second pass
6939 */
6940 if (nnewmembers == 0)
6941 {
6942 /* Nothing worth keeping */
6943 *flags |= FRM_INVALIDATE_XMAX;
6945 }
6947 {
6948 /*
6949 * If there's a single member and it's an update, pass it back alone
6950 * without creating a new Multi. (XXX we could do this when there's a
6951 * single remaining locker, too, but that would complicate the API too
6952 * much; moreover, the case with the single updater is more
6953 * interesting, because those are longer-lived.)
6954 */
6955 Assert(nnewmembers == 1);
6956 *flags |= FRM_RETURN_IS_XID;
6957 if (update_committed)
6958 *flags |= FRM_MARK_COMMITTED;
6960 }
6961 else
6962 {
6963 /*
6964 * Create a new multixact with the surviving members of the previous
6965 * one, to set as new Xmax in the tuple
6966 */
6968 *flags |= FRM_RETURN_IS_MULTI;
6969 }
6970
6972
6973 pagefrz->freeze_required = true;
6974 return newxmax;
6975}
6976
6977/*
6978 * heap_prepare_freeze_tuple
6979 *
6980 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
6981 * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so,
6982 * setup enough state (in the *frz output argument) to enable caller to
6983 * process this tuple as part of freezing its page, and return true. Return
6984 * false if nothing can be changed about the tuple right now.
6985 *
6986 * FreezePageConflictXid is advanced only for xmin/xvac freezing, not for xmax
6987 * changes. We only remove xmax state here when it is lock-only, or when the
6988 * updater XID (including an updater member of a MultiXact) must be aborted;
6989 * otherwise, the tuple would already be removable. Neither case affects
6990 * visibility on a standby.
6991 *
6992 * Also sets *totally_frozen to true if the tuple will be totally frozen once
6993 * caller executes returned freeze plan (or if the tuple was already totally
6994 * frozen by an earlier VACUUM). This indicates that there are no remaining
6995 * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
6996 *
6997 * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
6998 * tuple that we returned true for, and then execute freezing. Caller must
6999 * initialize pagefrz fields for page as a whole before first call here for
7000 * each heap page.
7001 *
7002 * VACUUM caller decides on whether or not to freeze the page as a whole.
7003 * We'll often prepare freeze plans for a page that caller just discards.
7004 * However, VACUUM doesn't always get to make a choice; it must freeze when
7005 * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
7006 * MXIDs < MultiXactCutoff) can never be left behind. We help to make sure
7007 * that VACUUM always follows that rule.
7008 *
7009 * We sometimes force freezing of xmax MultiXactId values long before it is
7010 * strictly necessary to do so just to ensure the FreezeLimit postcondition.
7011 * It's worth processing MultiXactIds proactively when it is cheap to do so,
7012 * and it's convenient to make that happen by piggy-backing it on the "force
7013 * freezing" mechanism. Conversely, we sometimes delay freezing MultiXactIds
7014 * because it is expensive right now (though only when it's still possible to
7015 * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
7016 *
7017 * It is assumed that the caller has checked the tuple with
7018 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
7019 * (else we should be removing the tuple, not freezing it).
7020 *
7021 * NB: This function has side effects: it might allocate a new MultiXactId.
7022 * It will be set as tuple's new xmax when our *frz output is processed within
7023 * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer
7024 * then caller had better have an exclusive lock on it already.
7025 */
7026bool
7028 const struct VacuumCutoffs *cutoffs,
7029 HeapPageFreeze *pagefrz,
7031{
7032 bool xmin_already_frozen = false,
7033 xmax_already_frozen = false;
7034 bool freeze_xmin = false,
7035 replace_xvac = false,
7036 replace_xmax = false,
7037 freeze_xmax = false;
7038 TransactionId xid;
7039
7040 frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
7041 frz->t_infomask2 = tuple->t_infomask2;
7042 frz->t_infomask = tuple->t_infomask;
7043 frz->frzflags = 0;
7044 frz->checkflags = 0;
7045
7046 /*
7047 * Process xmin, while keeping track of whether it's already frozen, or
7048 * will become frozen iff our freeze plan is executed by caller (could be
7049 * neither).
7050 */
7051 xid = HeapTupleHeaderGetXmin(tuple);
7052 if (!TransactionIdIsNormal(xid))
7053 xmin_already_frozen = true;
7054 else
7055 {
7056 if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7057 ereport(ERROR,
7059 errmsg_internal("found xmin %u from before relfrozenxid %u",
7060 xid, cutoffs->relfrozenxid)));
7061
7062 /* Will set freeze_xmin flags in freeze plan below */
7064
7065 /* Verify that xmin committed if and when freeze plan is executed */
7066 if (freeze_xmin)
7067 {
7070 pagefrz->FreezePageConflictXid = xid;
7071 }
7072 }
7073
7074 /*
7075 * Old-style VACUUM FULL is gone, but we have to process xvac for as long
7076 * as we support having MOVED_OFF/MOVED_IN tuples in the database
7077 */
7078 xid = HeapTupleHeaderGetXvac(tuple);
7079 if (TransactionIdIsNormal(xid))
7080 {
7082 Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
7083
7084 /*
7085 * For Xvac, we always freeze proactively. This allows totally_frozen
7086 * tracking to ignore xvac.
7087 */
7088 replace_xvac = pagefrz->freeze_required = true;
7089
7091 pagefrz->FreezePageConflictXid = xid;
7092
7093 /* Will set replace_xvac flags in freeze plan below */
7094 }
7095
7096 /* Now process xmax */
7097 xid = frz->xmax;
7098 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7099 {
7100 /* Raw xmax is a MultiXactId */
7102 uint16 flags;
7103
7104 /*
7105 * We will either remove xmax completely (in the "freeze_xmax" path),
7106 * process xmax by replacing it (in the "replace_xmax" path), or
7107 * perform no-op xmax processing. The only constraint is that the
7108 * FreezeLimit/MultiXactCutoff postcondition must never be violated.
7109 */
7110 newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
7111 &flags, pagefrz);
7112
7113 if (flags & FRM_NOOP)
7114 {
7115 /*
7116 * xmax is a MultiXactId, and nothing about it changes for now.
7117 * This is the only case where 'freeze_required' won't have been
7118 * set for us by FreezeMultiXactId, as well as the only case where
7119 * neither freeze_xmax nor replace_xmax are set (given a multi).
7120 *
7121 * This is a no-op, but the call to FreezeMultiXactId might have
7122 * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
7123 * for us (the "freeze page" variants, specifically). That'll
7124 * make it safe for our caller to freeze the page later on, while
7125 * leaving this particular xmax undisturbed.
7126 *
7127 * FreezeMultiXactId is _not_ responsible for the "no freeze"
7128 * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
7129 * job. A call to heap_tuple_should_freeze for this same tuple
7130 * will take place below if 'freeze_required' isn't set already.
7131 * (This repeats work from FreezeMultiXactId, but allows "no
7132 * freeze" tracker maintenance to happen in only one place.)
7133 */
7136 }
7137 else if (flags & FRM_RETURN_IS_XID)
7138 {
7139 /*
7140 * xmax will become an updater Xid (original MultiXact's updater
7141 * member Xid will be carried forward as a simple Xid in Xmax).
7142 */
7144
7145 /*
7146 * NB -- some of these transformations are only valid because we
7147 * know the return Xid is a tuple updater (i.e. not merely a
7148 * locker.) Also note that the only reason we don't explicitly
7149 * worry about HEAP_KEYS_UPDATED is because it lives in
7150 * t_infomask2 rather than t_infomask.
7151 */
7152 frz->t_infomask &= ~HEAP_XMAX_BITS;
7153 frz->xmax = newxmax;
7154 if (flags & FRM_MARK_COMMITTED)
7155 frz->t_infomask |= HEAP_XMAX_COMMITTED;
7156 replace_xmax = true;
7157 }
7158 else if (flags & FRM_RETURN_IS_MULTI)
7159 {
7162
7163 /*
7164 * xmax is an old MultiXactId that we have to replace with a new
7165 * MultiXactId, to carry forward two or more original member XIDs.
7166 */
7168
7169 /*
7170 * We can't use GetMultiXactIdHintBits directly on the new multi
7171 * here; that routine initializes the masks to all zeroes, which
7172 * would lose other bits we need. Doing it this way ensures all
7173 * unrelated bits remain untouched.
7174 */
7175 frz->t_infomask &= ~HEAP_XMAX_BITS;
7176 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7178 frz->t_infomask |= newbits;
7179 frz->t_infomask2 |= newbits2;
7180 frz->xmax = newxmax;
7181 replace_xmax = true;
7182 }
7183 else
7184 {
7185 /*
7186 * Freeze plan for tuple "freezes xmax" in the strictest sense:
7187 * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
7188 */
7189 Assert(flags & FRM_INVALIDATE_XMAX);
7191
7192 /* Will set freeze_xmax flags in freeze plan below */
7193 freeze_xmax = true;
7194 }
7195
7196 /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
7197 Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
7198 }
7199 else if (TransactionIdIsNormal(xid))
7200 {
7201 /* Raw xmax is normal XID */
7202 if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7203 ereport(ERROR,
7205 errmsg_internal("found xmax %u from before relfrozenxid %u",
7206 xid, cutoffs->relfrozenxid)));
7207
7208 /* Will set freeze_xmax flags in freeze plan below */
7210
7211 /*
7212 * Verify that xmax aborted if and when freeze plan is executed,
7213 * provided it's from an update. (A lock-only xmax can be removed
7214 * independent of this, since the lock is released at xact end.)
7215 */
7217 frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
7218 }
7219 else if (!TransactionIdIsValid(xid))
7220 {
7221 /* Raw xmax is InvalidTransactionId XID */
7222 Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
7223 xmax_already_frozen = true;
7224 }
7225 else
7226 ereport(ERROR,
7228 errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
7229 xid, tuple->t_infomask)));
7230
7231 if (freeze_xmin)
7232 {
7234
7235 frz->t_infomask |= HEAP_XMIN_FROZEN;
7236 }
7237 if (replace_xvac)
7238 {
7239 /*
7240 * If a MOVED_OFF tuple is not dead, the xvac transaction must have
7241 * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
7242 * transaction succeeded.
7243 */
7244 Assert(pagefrz->freeze_required);
7245 if (tuple->t_infomask & HEAP_MOVED_OFF)
7246 frz->frzflags |= XLH_INVALID_XVAC;
7247 else
7248 frz->frzflags |= XLH_FREEZE_XVAC;
7249 }
7250 if (replace_xmax)
7251 {
7253 Assert(pagefrz->freeze_required);
7254
7255 /* Already set replace_xmax flags in freeze plan earlier */
7256 }
7257 if (freeze_xmax)
7258 {
7260
7261 frz->xmax = InvalidTransactionId;
7262
7263 /*
7264 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
7265 * LOCKED. Normalize to INVALID just to be sure no one gets confused.
7266 * Also get rid of the HEAP_KEYS_UPDATED bit.
7267 */
7268 frz->t_infomask &= ~HEAP_XMAX_BITS;
7269 frz->t_infomask |= HEAP_XMAX_INVALID;
7270 frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
7271 frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7272 }
7273
7274 /*
7275 * Determine if this tuple is already totally frozen, or will become
7276 * totally frozen (provided caller executes freeze plans for the page)
7277 */
7280
7281 if (!pagefrz->freeze_required && !(xmin_already_frozen &&
7283 {
7284 /*
7285 * So far no previous tuple from the page made freezing mandatory.
7286 * Does this tuple force caller to freeze the entire page?
7287 */
7288 pagefrz->freeze_required =
7289 heap_tuple_should_freeze(tuple, cutoffs,
7290 &pagefrz->NoFreezePageRelfrozenXid,
7291 &pagefrz->NoFreezePageRelminMxid);
7292 }
7293
7294 /* Tell caller if this tuple has a usable freeze plan set in *frz */
7296}
7297
7298/*
7299 * Perform xmin/xmax XID status sanity checks before actually executing freeze
7300 * plans.
7301 *
7302 * heap_prepare_freeze_tuple doesn't perform these checks directly because
7303 * pg_xact lookups are relatively expensive. They shouldn't be repeated by
7304 * successive VACUUMs that each decide against freezing the same page.
7305 */
7306void
7308 HeapTupleFreeze *tuples, int ntuples)
7309{
7310 Page page = BufferGetPage(buffer);
7311
7312 for (int i = 0; i < ntuples; i++)
7313 {
7314 HeapTupleFreeze *frz = tuples + i;
7315 ItemId itemid = PageGetItemId(page, frz->offset);
7316 HeapTupleHeader htup;
7317
7318 htup = (HeapTupleHeader) PageGetItem(page, itemid);
7319
7320 /* Deliberately avoid relying on tuple hint bits here */
7321 if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
7322 {
7324
7326 if (unlikely(!TransactionIdDidCommit(xmin)))
7327 ereport(ERROR,
7329 errmsg_internal("uncommitted xmin %u needs to be frozen",
7330 xmin)));
7331 }
7332
7333 /*
7334 * TransactionIdDidAbort won't work reliably in the presence of XIDs
7335 * left behind by transactions that were in progress during a crash,
7336 * so we can only check that xmax didn't commit
7337 */
7338 if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
7339 {
7341
7344 ereport(ERROR,
7346 errmsg_internal("cannot freeze committed xmax %u",
7347 xmax)));
7348 }
7349 }
7350}
7351
7352/*
7353 * Helper which executes freezing of one or more heap tuples on a page on
7354 * behalf of caller. Caller passes an array of tuple plans from
7355 * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
7356 * Must be called in a critical section that also marks the buffer dirty and,
7357 * if needed, emits WAL.
7358 */
7359void
7361{
7362 Page page = BufferGetPage(buffer);
7363
7364 for (int i = 0; i < ntuples; i++)
7365 {
7366 HeapTupleFreeze *frz = tuples + i;
7367 ItemId itemid = PageGetItemId(page, frz->offset);
7368 HeapTupleHeader htup;
7369
7370 htup = (HeapTupleHeader) PageGetItem(page, itemid);
7372 }
7373}
7374
7375/*
7376 * heap_freeze_tuple
7377 * Freeze tuple in place, without WAL logging.
7378 *
7379 * Useful for callers like CLUSTER that perform their own WAL logging.
7380 */
7381bool
7383 TransactionId relfrozenxid, TransactionId relminmxid,
7384 TransactionId FreezeLimit, TransactionId MultiXactCutoff)
7385{
7387 bool do_freeze;
7388 bool totally_frozen;
7389 struct VacuumCutoffs cutoffs;
7390 HeapPageFreeze pagefrz;
7391
7392 cutoffs.relfrozenxid = relfrozenxid;
7393 cutoffs.relminmxid = relminmxid;
7394 cutoffs.OldestXmin = FreezeLimit;
7395 cutoffs.OldestMxact = MultiXactCutoff;
7396 cutoffs.FreezeLimit = FreezeLimit;
7398
7399 pagefrz.freeze_required = true;
7400 pagefrz.FreezePageRelfrozenXid = FreezeLimit;
7401 pagefrz.FreezePageRelminMxid = MultiXactCutoff;
7402 pagefrz.FreezePageConflictXid = InvalidTransactionId;
7403 pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
7404 pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
7405
7406 do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
7407 &pagefrz, &frz, &totally_frozen);
7408
7409 /*
7410 * Note that because this is not a WAL-logged operation, we don't need to
7411 * fill in the offset in the freeze record.
7412 */
7413
7414 if (do_freeze)
7416 return do_freeze;
7417}
7418
7419/*
7420 * For a given MultiXactId, return the hint bits that should be set in the
7421 * tuple's infomask.
7422 *
7423 * Normally this should be called for a multixact that was just created, and
7424 * so is on our local cache, so the GetMembers call is fast.
7425 */
7426static void
7429{
7430 int nmembers;
7431 MultiXactMember *members;
7432 int i;
7434 uint16 bits2 = 0;
7435 bool has_update = false;
7437
7438 /*
7439 * We only use this in multis we just created, so they cannot be values
7440 * pre-pg_upgrade.
7441 */
7442 nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7443
7444 for (i = 0; i < nmembers; i++)
7445 {
7447
7448 /*
7449 * Remember the strongest lock mode held by any member of the
7450 * multixact.
7451 */
7452 mode = TUPLOCK_from_mxstatus(members[i].status);
7453 if (mode > strongest)
7454 strongest = mode;
7455
7456 /* See what other bits we need */
7457 switch (members[i].status)
7458 {
7462 break;
7463
7466 break;
7467
7469 has_update = true;
7470 break;
7471
7474 has_update = true;
7475 break;
7476 }
7477 }
7478
7481 bits |= HEAP_XMAX_EXCL_LOCK;
7482 else if (strongest == LockTupleShare)
7483 bits |= HEAP_XMAX_SHR_LOCK;
7484 else if (strongest == LockTupleKeyShare)
7485 bits |= HEAP_XMAX_KEYSHR_LOCK;
7486
7487 if (!has_update)
7488 bits |= HEAP_XMAX_LOCK_ONLY;
7489
7490 if (nmembers > 0)
7491 pfree(members);
7492
7493 *new_infomask = bits;
7495}
7496
7497/*
7498 * MultiXactIdGetUpdateXid
7499 *
7500 * Given a multixact Xmax and corresponding infomask, which does not have the
7501 * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
7502 * transaction.
7503 *
7504 * Caller is expected to check the status of the updating transaction, if
7505 * necessary.
7506 */
7507static TransactionId
7509{
7511 MultiXactMember *members;
7512 int nmembers;
7513
7514 Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
7515 Assert(t_infomask & HEAP_XMAX_IS_MULTI);
7516
7517 /*
7518 * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
7519 * pre-pg_upgrade.
7520 */
7521 nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
7522
7523 if (nmembers > 0)
7524 {
7525 int i;
7526
7527 for (i = 0; i < nmembers; i++)
7528 {
7529 /* Ignore lockers */
7530 if (!ISUPDATE_from_mxstatus(members[i].status))
7531 continue;
7532
7533 /* there can be at most one updater */
7535 update_xact = members[i].xid;
7536#ifndef USE_ASSERT_CHECKING
7537
7538 /*
7539 * in an assert-enabled build, walk the whole array to ensure
7540 * there's no other updater.
7541 */
7542 break;
7543#endif
7544 }
7545
7546 pfree(members);
7547 }
7548
7549 return update_xact;
7550}
7551
7552/*
7553 * HeapTupleGetUpdateXid
7554 * As above, but use a HeapTupleHeader
7555 *
7556 * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
7557 * checking the hint bits.
7558 */
7565
7566/*
7567 * Does the given multixact conflict with the current transaction grabbing a
7568 * tuple lock of the given strength?
7569 *
7570 * The passed infomask pairs up with the given multixact in the tuple header.
7571 *
7572 * If current_is_member is not NULL, it is set to 'true' if the current
7573 * transaction is a member of the given multixact.
7574 */
7575static bool
7577 LockTupleMode lockmode, bool *current_is_member)
7578{
7579 int nmembers;
7580 MultiXactMember *members;
7581 bool result = false;
7582 LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7583
7585 return false;
7586
7587 nmembers = GetMultiXactIdMembers(multi, &members, false,
7589 if (nmembers >= 0)
7590 {
7591 int i;
7592
7593 for (i = 0; i < nmembers; i++)
7594 {
7597
7599 break;
7600
7601 memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7602
7603 /* ignore members from current xact (but track their presence) */
7604 memxid = members[i].xid;
7606 {
7607 if (current_is_member != NULL)
7608 *current_is_member = true;
7609 continue;
7610 }
7611 else if (result)
7612 continue;
7613
7614 /* ignore members that don't conflict with the lock we want */
7616 continue;
7617
7618 if (ISUPDATE_from_mxstatus(members[i].status))
7619 {
7620 /* ignore aborted updaters */
7622 continue;
7623 }
7624 else
7625 {
7626 /* ignore lockers-only that are no longer in progress */
7628 continue;
7629 }
7630
7631 /*
7632 * Whatever remains are either live lockers that conflict with our
7633 * wanted lock, and updaters that are not aborted. Those conflict
7634 * with what we want. Set up to return true, but keep going to
7635 * look for the current transaction among the multixact members,
7636 * if needed.
7637 */
7638 result = true;
7639 }
7640 pfree(members);
7641 }
7642
7643 return result;
7644}
7645
7646/*
7647 * Do_MultiXactIdWait
7648 * Actual implementation for the two functions below.
7649 *
7650 * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
7651 * needed to ensure we only sleep on conflicting members, and the infomask is
7652 * used to optimize multixact access in case it's a lock-only multi); 'nowait'
7653 * indicates whether to use conditional lock acquisition, to allow callers to
7654 * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
7655 * context information for error messages. 'remaining', if not NULL, receives
7656 * the number of members that are still running, including any (non-aborted)
7657 * subtransactions of our own transaction. 'logLockFailure' indicates whether
7658 * to log details when a lock acquisition fails with 'nowait' enabled.
7659 *
7660 * We do this by sleeping on each member using XactLockTableWait. Any
7661 * members that belong to the current backend are *not* waited for, however;
7662 * this would not merely be useless but would lead to Assert failure inside
7663 * XactLockTableWait. By the time this returns, it is certain that all
7664 * transactions *of other backends* that were members of the MultiXactId
7665 * that conflict with the requested status are dead (and no new ones can have
7666 * been added, since it is not legal to add members to an existing
7667 * MultiXactId).
7668 *
7669 * But by the time we finish sleeping, someone else may have changed the Xmax
7670 * of the containing tuple, so the caller needs to iterate on us somehow.
7671 *
7672 * Note that in case we return false, the number of remaining members is
7673 * not to be trusted.
7674 */
7675static bool
7677 uint16 infomask, bool nowait,
7678 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7679 int *remaining, bool logLockFailure)
7680{
7681 bool result = true;
7682 MultiXactMember *members;
7683 int nmembers;
7684 int remain = 0;
7685
7686 /* for pre-pg_upgrade tuples, no need to sleep at all */
7687 nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7688 GetMultiXactIdMembers(multi, &members, false,
7690
7691 if (nmembers >= 0)
7692 {
7693 int i;
7694
7695 for (i = 0; i < nmembers; i++)
7696 {
7697 TransactionId memxid = members[i].xid;
7698 MultiXactStatus memstatus = members[i].status;
7699
7701 {
7702 remain++;
7703 continue;
7704 }
7705
7707 LOCKMODE_from_mxstatus(status)))
7708 {
7710 remain++;
7711 continue;
7712 }
7713
7714 /*
7715 * This member conflicts with our multi, so we have to sleep (or
7716 * return failure, if asked to avoid waiting.)
7717 *
7718 * Note that we don't set up an error context callback ourselves,
7719 * but instead we pass the info down to XactLockTableWait. This
7720 * might seem a bit wasteful because the context is set up and
7721 * tore down for each member of the multixact, but in reality it
7722 * should be barely noticeable, and it avoids duplicate code.
7723 */
7724 if (nowait)
7725 {
7727 if (!result)
7728 break;
7729 }
7730 else
7731 XactLockTableWait(memxid, rel, ctid, oper);
7732 }
7733
7734 pfree(members);
7735 }
7736
7737 if (remaining)
7738 *remaining = remain;
7739
7740 return result;
7741}
7742
7743/*
7744 * MultiXactIdWait
7745 * Sleep on a MultiXactId.
7746 *
7747 * By the time we finish sleeping, someone else may have changed the Xmax
7748 * of the containing tuple, so the caller needs to iterate on us somehow.
7749 *
7750 * We return (in *remaining, if not NULL) the number of members that are still
7751 * running, including any (non-aborted) subtransactions of our own transaction.
7752 */
7753static void
7755 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7756 int *remaining)
7757{
7758 (void) Do_MultiXactIdWait(multi, status, infomask, false,
7759 rel, ctid, oper, remaining, false);
7760}
7761
7762/*
7763 * ConditionalMultiXactIdWait
7764 * As above, but only lock if we can get the lock without blocking.
7765 *
7766 * By the time we finish sleeping, someone else may have changed the Xmax
7767 * of the containing tuple, so the caller needs to iterate on us somehow.
7768 *
7769 * If the multixact is now all gone, return true. Returns false if some
7770 * transactions might still be running.
7771 *
7772 * We return (in *remaining, if not NULL) the number of members that are still
7773 * running, including any (non-aborted) subtransactions of our own transaction.
7774 */
7775static bool
7777 uint16 infomask, Relation rel, int *remaining,
7778 bool logLockFailure)
7779{
7780 return Do_MultiXactIdWait(multi, status, infomask, true,
7782}
7783
7784/*
7785 * heap_tuple_needs_eventual_freeze
7786 *
7787 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7788 * will eventually require freezing (if tuple isn't removed by pruning first).
7789 */
7790bool
7792{
7793 TransactionId xid;
7794
7795 /*
7796 * If xmin is a normal transaction ID, this tuple is definitely not
7797 * frozen.
7798 */
7799 xid = HeapTupleHeaderGetXmin(tuple);
7800 if (TransactionIdIsNormal(xid))
7801 return true;
7802
7803 /*
7804 * If xmax is a valid xact or multixact, this tuple is also not frozen.
7805 */
7806 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7807 {
7808 MultiXactId multi;
7809
7810 multi = HeapTupleHeaderGetRawXmax(tuple);
7811 if (MultiXactIdIsValid(multi))
7812 return true;
7813 }
7814 else
7815 {
7816 xid = HeapTupleHeaderGetRawXmax(tuple);
7817 if (TransactionIdIsNormal(xid))
7818 return true;
7819 }
7820
7821 if (tuple->t_infomask & HEAP_MOVED)
7822 {
7823 xid = HeapTupleHeaderGetXvac(tuple);
7824 if (TransactionIdIsNormal(xid))
7825 return true;
7826 }
7827
7828 return false;
7829}
7830
7831/*
7832 * heap_tuple_should_freeze
7833 *
7834 * Return value indicates if heap_prepare_freeze_tuple sibling function would
7835 * (or should) force freezing of the heap page that contains caller's tuple.
7836 * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
7837 * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
7838 *
7839 * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
7840 * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
7841 * Our working assumption is that caller won't decide to freeze this tuple.
7842 * It's up to caller to only ratchet back its own top-level trackers after the
7843 * point that it fully commits to not freezing the tuple/page in question.
7844 */
7845bool
7847 const struct VacuumCutoffs *cutoffs,
7848 TransactionId *NoFreezePageRelfrozenXid,
7849 MultiXactId *NoFreezePageRelminMxid)
7850{
7851 TransactionId xid;
7852 MultiXactId multi;
7853 bool freeze = false;
7854
7855 /* First deal with xmin */
7856 xid = HeapTupleHeaderGetXmin(tuple);
7857 if (TransactionIdIsNormal(xid))
7858 {
7860 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7861 *NoFreezePageRelfrozenXid = xid;
7862 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7863 freeze = true;
7864 }
7865
7866 /* Now deal with xmax */
7868 multi = InvalidMultiXactId;
7869 if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7870 multi = HeapTupleHeaderGetRawXmax(tuple);
7871 else
7872 xid = HeapTupleHeaderGetRawXmax(tuple);
7873
7874 if (TransactionIdIsNormal(xid))
7875 {
7877 /* xmax is a non-permanent XID */
7878 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7879 *NoFreezePageRelfrozenXid = xid;
7880 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7881 freeze = true;
7882 }
7883 else if (!MultiXactIdIsValid(multi))
7884 {
7885 /* xmax is a permanent XID or invalid MultiXactId/XID */
7886 }
7887 else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
7888 {
7889 /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
7890 if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7891 *NoFreezePageRelminMxid = multi;
7892 /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
7893 freeze = true;
7894 }
7895 else
7896 {
7897 /* xmax is a MultiXactId that may have an updater XID */
7898 MultiXactMember *members;
7899 int nmembers;
7900
7902 if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7903 *NoFreezePageRelminMxid = multi;
7904 if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
7905 freeze = true;
7906
7907 /* need to check whether any member of the mxact is old */
7908 nmembers = GetMultiXactIdMembers(multi, &members, false,
7910
7911 for (int i = 0; i < nmembers; i++)
7912 {
7913 xid = members[i].xid;
7915 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7916 *NoFreezePageRelfrozenXid = xid;
7917 if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7918 freeze = true;
7919 }
7920 if (nmembers > 0)
7921 pfree(members);
7922 }
7923
7924 if (tuple->t_infomask & HEAP_MOVED)
7925 {
7926 xid = HeapTupleHeaderGetXvac(tuple);
7927 if (TransactionIdIsNormal(xid))
7928 {
7930 if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7931 *NoFreezePageRelfrozenXid = xid;
7932 /* heap_prepare_freeze_tuple forces xvac freezing */
7933 freeze = true;
7934 }
7935 }
7936
7937 return freeze;
7938}
7939
7940/*
7941 * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
7942 * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
7943 * that caller is in the process of physically removing, e.g. via HOT pruning
7944 * or index deletion.
7945 *
7946 * Caller must initialize its value to InvalidTransactionId, which is
7947 * generally interpreted as "definitely no need for a recovery conflict".
7948 * Final value must reflect all heap tuples that caller will physically remove
7949 * (or remove TID references to) via its ongoing pruning/deletion operation.
7950 * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
7951 * caller's WAL record) by REDO routine when it replays caller's operation.
7952 */
7953void
7955 TransactionId *snapshotConflictHorizon)
7956{
7960
7961 if (tuple->t_infomask & HEAP_MOVED)
7962 {
7963 if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
7964 *snapshotConflictHorizon = xvac;
7965 }
7966
7967 /*
7968 * Ignore tuples inserted by an aborted transaction or if the tuple was
7969 * updated/deleted by the inserting transaction.
7970 *
7971 * Look for a committed hint bit, or if no xmin bit is set, check clog.
7972 */
7973 if (HeapTupleHeaderXminCommitted(tuple) ||
7975 {
7976 if (xmax != xmin &&
7977 TransactionIdFollows(xmax, *snapshotConflictHorizon))
7978 *snapshotConflictHorizon = xmax;
7979 }
7980}
7981
7982#ifdef USE_PREFETCH
7983/*
7984 * Helper function for heap_index_delete_tuples. Issues prefetch requests for
7985 * prefetch_count buffers. The prefetch_state keeps track of all the buffers
7986 * we can prefetch, and which have already been prefetched; each call to this
7987 * function picks up where the previous call left off.
7988 *
7989 * Note: we expect the deltids array to be sorted in an order that groups TIDs
7990 * by heap block, with all TIDs for each block appearing together in exactly
7991 * one group.
7992 */
7993static void
7996 int prefetch_count)
7997{
7999 int count = 0;
8000 int i;
8001 int ndeltids = prefetch_state->ndeltids;
8002 TM_IndexDelete *deltids = prefetch_state->deltids;
8003
8004 for (i = prefetch_state->next_item;
8005 i < ndeltids && count < prefetch_count;
8006 i++)
8007 {
8008 ItemPointer htid = &deltids[i].tid;
8009
8012 {
8015 count++;
8016 }
8017 }
8018
8019 /*
8020 * Save the prefetch position so that next time we can continue from that
8021 * position.
8022 */
8023 prefetch_state->next_item = i;
8024 prefetch_state->cur_hblkno = cur_hblkno;
8025}
8026#endif
8027
8028/*
8029 * Helper function for heap_index_delete_tuples. Checks for index corruption
8030 * involving an invalid TID in index AM caller's index page.
8031 *
8032 * This is an ideal place for these checks. The index AM must hold a buffer
8033 * lock on the index page containing the TIDs we examine here, so we don't
8034 * have to worry about concurrent VACUUMs at all. We can be sure that the
8035 * index is corrupt when htid points directly to an LP_UNUSED item or
8036 * heap-only tuple, which is not the case during standard index scans.
8037 */
8038static inline void
8040 Page page, OffsetNumber maxoff,
8042{
8044 ItemId iid;
8045
8046 Assert(OffsetNumberIsValid(istatus->idxoffnum));
8047
8048 if (unlikely(indexpagehoffnum > maxoff))
8049 ereport(ERROR,
8051 errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
8054 istatus->idxoffnum, delstate->iblknum,
8056
8058 if (unlikely(!ItemIdIsUsed(iid)))
8059 ereport(ERROR,
8061 errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
8064 istatus->idxoffnum, delstate->iblknum,
8066
8067 if (ItemIdHasStorage(iid))
8068 {
8069 HeapTupleHeader htup;
8070
8072 htup = (HeapTupleHeader) PageGetItem(page, iid);
8073
8075 ereport(ERROR,
8077 errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
8080 istatus->idxoffnum, delstate->iblknum,
8082 }
8083}
8084
8085/*
8086 * heapam implementation of tableam's index_delete_tuples interface.
8087 *
8088 * This helper function is called by index AMs during index tuple deletion.
8089 * See tableam header comments for an explanation of the interface implemented
8090 * here and a general theory of operation. Note that each call here is either
8091 * a simple index deletion call, or a bottom-up index deletion call.
8092 *
8093 * It's possible for this to generate a fair amount of I/O, since we may be
8094 * deleting hundreds of tuples from a single index block. To amortize that
8095 * cost to some degree, this uses prefetching and combines repeat accesses to
8096 * the same heap block.
8097 */
8100{
8101 /* Initial assumption is that earlier pruning took care of conflict */
8102 TransactionId snapshotConflictHorizon = InvalidTransactionId;
8105 Page page = NULL;
8108#ifdef USE_PREFETCH
8111#endif
8113 int finalndeltids = 0,
8114 nblocksaccessed = 0;
8115
8116 /* State that's only used in bottom-up index deletion case */
8117 int nblocksfavorable = 0;
8118 int curtargetfreespace = delstate->bottomupfreespace,
8119 lastfreespace = 0,
8120 actualfreespace = 0;
8121 bool bottomup_final_block = false;
8122
8124
8125 /* Sort caller's deltids array by TID for further processing */
8127
8128 /*
8129 * Bottom-up case: resort deltids array in an order attuned to where the
8130 * greatest number of promising TIDs are to be found, and determine how
8131 * many blocks from the start of sorted array should be considered
8132 * favorable. This will also shrink the deltids array in order to
8133 * eliminate completely unfavorable blocks up front.
8134 */
8135 if (delstate->bottomup)
8137
8138#ifdef USE_PREFETCH
8139 /* Initialize prefetch state. */
8141 prefetch_state.next_item = 0;
8142 prefetch_state.ndeltids = delstate->ndeltids;
8143 prefetch_state.deltids = delstate->deltids;
8144
8145 /*
8146 * Determine the prefetch distance that we will attempt to maintain.
8147 *
8148 * Since the caller holds a buffer lock somewhere in rel, we'd better make
8149 * sure that isn't a catalog relation before we call code that does
8150 * syscache lookups, to avoid risk of deadlock.
8151 */
8152 if (IsCatalogRelation(rel))
8154 else
8157
8158 /* Cap initial prefetch distance for bottom-up deletion caller */
8159 if (delstate->bottomup)
8160 {
8164 }
8165
8166 /* Start prefetching. */
8168#endif
8169
8170 /* Iterate over deltids, determine which to delete, check their horizon */
8171 Assert(delstate->ndeltids > 0);
8172 for (int i = 0; i < delstate->ndeltids; i++)
8173 {
8174 TM_IndexDelete *ideltid = &delstate->deltids[i];
8175 TM_IndexStatus *istatus = delstate->status + ideltid->id;
8176 ItemPointer htid = &ideltid->tid;
8177 OffsetNumber offnum;
8178
8179 /*
8180 * Read buffer, and perform required extra steps each time a new block
8181 * is encountered. Avoid refetching if it's the same block as the one
8182 * from the last htid.
8183 */
8184 if (blkno == InvalidBlockNumber ||
8186 {
8187 /*
8188 * Consider giving up early for bottom-up index deletion caller
8189 * first. (Only prefetch next-next block afterwards, when it
8190 * becomes clear that we're at least going to access the next
8191 * block in line.)
8192 *
8193 * Sometimes the first block frees so much space for bottom-up
8194 * caller that the deletion process can end without accessing any
8195 * more blocks. It is usually necessary to access 2 or 3 blocks
8196 * per bottom-up deletion operation, though.
8197 */
8198 if (delstate->bottomup)
8199 {
8200 /*
8201 * We often allow caller to delete a few additional items
8202 * whose entries we reached after the point that space target
8203 * from caller was satisfied. The cost of accessing the page
8204 * was already paid at that point, so it made sense to finish
8205 * it off. When that happened, we finalize everything here
8206 * (by finishing off the whole bottom-up deletion operation
8207 * without needlessly paying the cost of accessing any more
8208 * blocks).
8209 */
8211 break;
8212
8213 /*
8214 * Give up when we didn't enable our caller to free any
8215 * additional space as a result of processing the page that we
8216 * just finished up with. This rule is the main way in which
8217 * we keep the cost of bottom-up deletion under control.
8218 */
8220 break;
8221 lastfreespace = actualfreespace; /* for next time */
8222
8223 /*
8224 * Deletion operation (which is bottom-up) will definitely
8225 * access the next block in line. Prepare for that now.
8226 *
8227 * Decay target free space so that we don't hang on for too
8228 * long with a marginal case. (Space target is only truly
8229 * helpful when it allows us to recognize that we don't need
8230 * to access more than 1 or 2 blocks to satisfy caller due to
8231 * agreeable workload characteristics.)
8232 *
8233 * We are a bit more patient when we encounter contiguous
8234 * blocks, though: these are treated as favorable blocks. The
8235 * decay process is only applied when the next block in line
8236 * is not a favorable/contiguous block. This is not an
8237 * exception to the general rule; we still insist on finding
8238 * at least one deletable item per block accessed. See
8239 * bottomup_nblocksfavorable() for full details of the theory
8240 * behind favorable blocks and heap block locality in general.
8241 *
8242 * Note: The first block in line is always treated as a
8243 * favorable block, so the earliest possible point that the
8244 * decay can be applied is just before we access the second
8245 * block in line. The Assert() verifies this for us.
8246 */
8248 if (nblocksfavorable > 0)
8250 else
8251 curtargetfreespace /= 2;
8252 }
8253
8254 /* release old buffer */
8255 if (BufferIsValid(buf))
8257
8259 buf = ReadBuffer(rel, blkno);
8261 Assert(!delstate->bottomup ||
8263
8264#ifdef USE_PREFETCH
8265
8266 /*
8267 * To maintain the prefetch distance, prefetch one more page for
8268 * each page we read.
8269 */
8271#endif
8272
8274
8275 page = BufferGetPage(buf);
8276 maxoff = PageGetMaxOffsetNumber(page);
8277 }
8278
8279 /*
8280 * In passing, detect index corruption involving an index page with a
8281 * TID that points to a location in the heap that couldn't possibly be
8282 * correct. We only do this with actual TIDs from caller's index page
8283 * (not items reached by traversing through a HOT chain).
8284 */
8286
8287 if (istatus->knowndeletable)
8288 Assert(!delstate->bottomup && !istatus->promising);
8289 else
8290 {
8291 ItemPointerData tmp = *htid;
8293
8294 /* Are any tuples from this HOT chain non-vacuumable? */
8296 &heapTuple, NULL, true))
8297 continue; /* can't delete entry */
8298
8299 /* Caller will delete, since whole HOT chain is vacuumable */
8300 istatus->knowndeletable = true;
8301
8302 /* Maintain index free space info for bottom-up deletion case */
8303 if (delstate->bottomup)
8304 {
8305 Assert(istatus->freespace > 0);
8306 actualfreespace += istatus->freespace;
8308 bottomup_final_block = true;
8309 }
8310 }
8311
8312 /*
8313 * Maintain snapshotConflictHorizon value for deletion operation as a
8314 * whole by advancing current value using heap tuple headers. This is
8315 * loosely based on the logic for pruning a HOT chain.
8316 */
8318 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
8319 for (;;)
8320 {
8321 ItemId lp;
8322 HeapTupleHeader htup;
8323
8324 /* Sanity check (pure paranoia) */
8325 if (offnum < FirstOffsetNumber)
8326 break;
8327
8328 /*
8329 * An offset past the end of page's line pointer array is possible
8330 * when the array was truncated
8331 */
8332 if (offnum > maxoff)
8333 break;
8334
8335 lp = PageGetItemId(page, offnum);
8337 {
8338 offnum = ItemIdGetRedirect(lp);
8339 continue;
8340 }
8341
8342 /*
8343 * We'll often encounter LP_DEAD line pointers (especially with an
8344 * entry marked knowndeletable by our caller up front). No heap
8345 * tuple headers get examined for an htid that leads us to an
8346 * LP_DEAD item. This is okay because the earlier pruning
8347 * operation that made the line pointer LP_DEAD in the first place
8348 * must have considered the original tuple header as part of
8349 * generating its own snapshotConflictHorizon value.
8350 *
8351 * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
8352 * the same strategy that index vacuuming uses in all cases. Index
8353 * VACUUM WAL records don't even have a snapshotConflictHorizon
8354 * field of their own for this reason.
8355 */
8356 if (!ItemIdIsNormal(lp))
8357 break;
8358
8359 htup = (HeapTupleHeader) PageGetItem(page, lp);
8360
8361 /*
8362 * Check the tuple XMIN against prior XMAX, if any
8363 */
8366 break;
8367
8369 &snapshotConflictHorizon);
8370
8371 /*
8372 * If the tuple is not HOT-updated, then we are at the end of this
8373 * HOT-chain. No need to visit later tuples from the same update
8374 * chain (they get their own index entries) -- just move on to
8375 * next htid from index AM caller.
8376 */
8377 if (!HeapTupleHeaderIsHotUpdated(htup))
8378 break;
8379
8380 /* Advance to next HOT chain member */
8381 Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
8382 offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
8384 }
8385
8386 /* Enable further/final shrinking of deltids for caller */
8387 finalndeltids = i + 1;
8388 }
8389
8391
8392 /*
8393 * Shrink deltids array to exclude non-deletable entries at the end. This
8394 * is not just a minor optimization. Final deltids array size might be
8395 * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
8396 * ndeltids being zero in all cases with zero total deletable entries.
8397 */
8398 Assert(finalndeltids > 0 || delstate->bottomup);
8399 delstate->ndeltids = finalndeltids;
8400
8401 return snapshotConflictHorizon;
8402}
8403
8404/*
8405 * Specialized inlineable comparison function for index_delete_sort()
8406 */
8407static inline int
8409{
8410 ItemPointer tid1 = &deltid1->tid;
8411 ItemPointer tid2 = &deltid2->tid;
8412
8413 {
8416
8417 if (blk1 != blk2)
8418 return (blk1 < blk2) ? -1 : 1;
8419 }
8420 {
8423
8424 if (pos1 != pos2)
8425 return (pos1 < pos2) ? -1 : 1;
8426 }
8427
8428 Assert(false);
8429
8430 return 0;
8431}
8432
8433/*
8434 * Sort deltids array from delstate by TID. This prepares it for further
8435 * processing by heap_index_delete_tuples().
8436 *
8437 * This operation becomes a noticeable consumer of CPU cycles with some
8438 * workloads, so we go to the trouble of specialization/micro optimization.
8439 * We use shellsort for this because it's easy to specialize, compiles to
8440 * relatively few instructions, and is adaptive to presorted inputs/subsets
8441 * (which are typical here).
8442 */
8443static void
8445{
8446 TM_IndexDelete *deltids = delstate->deltids;
8447 int ndeltids = delstate->ndeltids;
8448
8449 /*
8450 * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
8451 *
8452 * This implementation is fast with array sizes up to ~4500. This covers
8453 * all supported BLCKSZ values.
8454 */
8455 const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8456
8457 /* Think carefully before changing anything here -- keep swaps cheap */
8458 StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
8459 "element size exceeds 8 bytes");
8460
8461 for (int g = 0; g < lengthof(gaps); g++)
8462 {
8463 for (int hi = gaps[g], i = hi; i < ndeltids; i++)
8464 {
8465 TM_IndexDelete d = deltids[i];
8466 int j = i;
8467
8468 while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
8469 {
8470 deltids[j] = deltids[j - hi];
8471 j -= hi;
8472 }
8473 deltids[j] = d;
8474 }
8475 }
8476}
8477
8478/*
8479 * Returns how many blocks should be considered favorable/contiguous for a
8480 * bottom-up index deletion pass. This is a number of heap blocks that starts
8481 * from and includes the first block in line.
8482 *
8483 * There is always at least one favorable block during bottom-up index
8484 * deletion. In the worst case (i.e. with totally random heap blocks) the
8485 * first block in line (the only favorable block) can be thought of as a
8486 * degenerate array of contiguous blocks that consists of a single block.
8487 * heap_index_delete_tuples() will expect this.
8488 *
8489 * Caller passes blockgroups, a description of the final order that deltids
8490 * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
8491 * processing. Note that deltids need not actually be sorted just yet (caller
8492 * only passes deltids to us so that we can interpret blockgroups).
8493 *
8494 * You might guess that the existence of contiguous blocks cannot matter much,
8495 * since in general the main factor that determines which blocks we visit is
8496 * the number of promising TIDs, which is a fixed hint from the index AM.
8497 * We're not really targeting the general case, though -- the actual goal is
8498 * to adapt our behavior to a wide variety of naturally occurring conditions.
8499 * The effects of most of the heuristics we apply are only noticeable in the
8500 * aggregate, over time and across many _related_ bottom-up index deletion
8501 * passes.
8502 *
8503 * Deeming certain blocks favorable allows heapam to recognize and adapt to
8504 * workloads where heap blocks visited during bottom-up index deletion can be
8505 * accessed contiguously, in the sense that each newly visited block is the
8506 * neighbor of the block that bottom-up deletion just finished processing (or
8507 * close enough to it). It will likely be cheaper to access more favorable
8508 * blocks sooner rather than later (e.g. in this pass, not across a series of
8509 * related bottom-up passes). Either way it is probably only a matter of time
8510 * (or a matter of further correlated version churn) before all blocks that
8511 * appear together as a single large batch of favorable blocks get accessed by
8512 * _some_ bottom-up pass. Large batches of favorable blocks tend to either
8513 * appear almost constantly or not even once (it all depends on per-index
8514 * workload characteristics).
8515 *
8516 * Note that the blockgroups sort order applies a power-of-two bucketing
8517 * scheme that creates opportunities for contiguous groups of blocks to get
8518 * batched together, at least with workloads that are naturally amenable to
8519 * being driven by heap block locality. This doesn't just enhance the spatial
8520 * locality of bottom-up heap block processing in the obvious way. It also
8521 * enables temporal locality of access, since sorting by heap block number
8522 * naturally tends to make the bottom-up processing order deterministic.
8523 *
8524 * Consider the following example to get a sense of how temporal locality
8525 * might matter: There is a heap relation with several indexes, each of which
8526 * is low to medium cardinality. It is subject to constant non-HOT updates.
8527 * The updates are skewed (in one part of the primary key, perhaps). None of
8528 * the indexes are logically modified by the UPDATE statements (if they were
8529 * then bottom-up index deletion would not be triggered in the first place).
8530 * Naturally, each new round of index tuples (for each heap tuple that gets a
8531 * heap_update() call) will have the same heap TID in each and every index.
8532 * Since these indexes are low cardinality and never get logically modified,
8533 * heapam processing during bottom-up deletion passes will access heap blocks
8534 * in approximately sequential order. Temporal locality of access occurs due
8535 * to bottom-up deletion passes behaving very similarly across each of the
8536 * indexes at any given moment. This keeps the number of buffer misses needed
8537 * to visit heap blocks to a minimum.
8538 */
8539static int
8541 TM_IndexDelete *deltids)
8542{
8543 int64 lastblock = -1;
8544 int nblocksfavorable = 0;
8545
8546 Assert(nblockgroups >= 1);
8548
8549 /*
8550 * We tolerate heap blocks that will be accessed only slightly out of
8551 * physical order. Small blips occur when a pair of almost-contiguous
8552 * blocks happen to fall into different buckets (perhaps due only to a
8553 * small difference in npromisingtids that the bucketing scheme didn't
8554 * quite manage to ignore). We effectively ignore these blips by applying
8555 * a small tolerance. The precise tolerance we use is a little arbitrary,
8556 * but it works well enough in practice.
8557 */
8558 for (int b = 0; b < nblockgroups; b++)
8559 {
8560 IndexDeleteCounts *group = blockgroups + b;
8561 TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
8563
8564 if (lastblock != -1 &&
8567 break;
8568
8570 lastblock = block;
8571 }
8572
8573 /* Always indicate that there is at least 1 favorable block */
8575
8576 return nblocksfavorable;
8577}
8578
8579/*
8580 * qsort comparison function for bottomup_sort_and_shrink()
8581 */
8582static int
8583bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
8584{
8587
8588 /*
8589 * Most significant field is npromisingtids (which we invert the order of
8590 * so as to sort in desc order).
8591 *
8592 * Caller should have already normalized npromisingtids fields into
8593 * power-of-two values (buckets).
8594 */
8595 if (group1->npromisingtids > group2->npromisingtids)
8596 return -1;
8597 if (group1->npromisingtids < group2->npromisingtids)
8598 return 1;
8599
8600 /*
8601 * Tiebreak: desc ntids sort order.
8602 *
8603 * We cannot expect power-of-two values for ntids fields. We should
8604 * behave as if they were already rounded up for us instead.
8605 */
8606 if (group1->ntids != group2->ntids)
8607 {
8610
8611 if (ntids1 > ntids2)
8612 return -1;
8613 if (ntids1 < ntids2)
8614 return 1;
8615 }
8616
8617 /*
8618 * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
8619 * block in deltids array) order.
8620 *
8621 * This is equivalent to sorting in ascending heap block number order
8622 * (among otherwise equal subsets of the array). This approach allows us
8623 * to avoid accessing the out-of-line TID. (We rely on the assumption
8624 * that the deltids array was sorted in ascending heap TID order when
8625 * these offsets to the first TID from each heap block group were formed.)
8626 */
8627 if (group1->ifirsttid > group2->ifirsttid)
8628 return 1;
8629 if (group1->ifirsttid < group2->ifirsttid)
8630 return -1;
8631
8633
8634 return 0;
8635}
8636
8637/*
8638 * heap_index_delete_tuples() helper function for bottom-up deletion callers.
8639 *
8640 * Sorts deltids array in the order needed for useful processing by bottom-up
8641 * deletion. The array should already be sorted in TID order when we're
8642 * called. The sort process groups heap TIDs from deltids into heap block
8643 * groupings. Earlier/more-promising groups/blocks are usually those that are
8644 * known to have the most "promising" TIDs.
8645 *
8646 * Sets new size of deltids array (ndeltids) in state. deltids will only have
8647 * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
8648 * return. This often means that deltids will be shrunk to a small fraction
8649 * of its original size (we eliminate many heap blocks from consideration for
8650 * caller up front).
8651 *
8652 * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
8653 * for a definition and full details.
8654 */
8655static int
8657{
8661 int nblockgroups = 0;
8662 int ncopied = 0;
8663 int nblocksfavorable = 0;
8664
8665 Assert(delstate->bottomup);
8666 Assert(delstate->ndeltids > 0);
8667
8668 /* Calculate per-heap-block count of TIDs */
8670 for (int i = 0; i < delstate->ndeltids; i++)
8671 {
8672 TM_IndexDelete *ideltid = &delstate->deltids[i];
8673 TM_IndexStatus *istatus = delstate->status + ideltid->id;
8674 ItemPointer htid = &ideltid->tid;
8675 bool promising = istatus->promising;
8676
8677 if (curblock != ItemPointerGetBlockNumber(htid))
8678 {
8679 /* New block group */
8680 nblockgroups++;
8681
8683 !BlockNumberIsValid(curblock));
8684
8685 curblock = ItemPointerGetBlockNumber(htid);
8686 blockgroups[nblockgroups - 1].ifirsttid = i;
8687 blockgroups[nblockgroups - 1].ntids = 1;
8688 blockgroups[nblockgroups - 1].npromisingtids = 0;
8689 }
8690 else
8691 {
8692 blockgroups[nblockgroups - 1].ntids++;
8693 }
8694
8695 if (promising)
8696 blockgroups[nblockgroups - 1].npromisingtids++;
8697 }
8698
8699 /*
8700 * We're about ready to sort block groups to determine the optimal order
8701 * for visiting heap blocks. But before we do, round the number of
8702 * promising tuples for each block group up to the next power-of-two,
8703 * unless it is very low (less than 4), in which case we round up to 4.
8704 * npromisingtids is far too noisy to trust when choosing between a pair
8705 * of block groups that both have very low values.
8706 *
8707 * This scheme divides heap blocks/block groups into buckets. Each bucket
8708 * contains blocks that have _approximately_ the same number of promising
8709 * TIDs as each other. The goal is to ignore relatively small differences
8710 * in the total number of promising entries, so that the whole process can
8711 * give a little weight to heapam factors (like heap block locality)
8712 * instead. This isn't a trade-off, really -- we have nothing to lose. It
8713 * would be foolish to interpret small differences in npromisingtids
8714 * values as anything more than noise.
8715 *
8716 * We tiebreak on nhtids when sorting block group subsets that have the
8717 * same npromisingtids, but this has the same issues as npromisingtids,
8718 * and so nhtids is subject to the same power-of-two bucketing scheme. The
8719 * only reason that we don't fix nhtids in the same way here too is that
8720 * we'll need accurate nhtids values after the sort. We handle nhtids
8721 * bucketization dynamically instead (in the sort comparator).
8722 *
8723 * See bottomup_nblocksfavorable() for a full explanation of when and how
8724 * heap locality/favorable blocks can significantly influence when and how
8725 * heap blocks are accessed.
8726 */
8727 for (int b = 0; b < nblockgroups; b++)
8728 {
8729 IndexDeleteCounts *group = blockgroups + b;
8730
8731 /* Better off falling back on nhtids with low npromisingtids */
8732 if (group->npromisingtids <= 4)
8733 group->npromisingtids = 4;
8734 else
8735 group->npromisingtids =
8737 }
8738
8739 /* Sort groups and rearrange caller's deltids array */
8742 reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
8743
8745 /* Determine number of favorable blocks at the start of final deltids */
8747 delstate->deltids);
8748
8749 for (int b = 0; b < nblockgroups; b++)
8750 {
8751 IndexDeleteCounts *group = blockgroups + b;
8752 TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
8753
8755 sizeof(TM_IndexDelete) * group->ntids);
8756 ncopied += group->ntids;
8757 }
8758
8759 /* Copy final grouped and sorted TIDs back into start of caller's array */
8761 sizeof(TM_IndexDelete) * ncopied);
8762 delstate->ndeltids = ncopied;
8763
8766
8767 return nblocksfavorable;
8768}
8769
8770/*
8771 * Perform XLogInsert for a heap-update operation. Caller must already
8772 * have modified the buffer(s) and marked them dirty.
8773 */
8774static XLogRecPtr
8779 bool walLogical)
8780{
8784 uint8 info;
8786 uint16 prefixlen = 0,
8787 suffixlen = 0;
8789 Page page = BufferGetPage(newbuf);
8791 bool init;
8792 int bufflags;
8793
8794 /* Caller should not call me on a non-WAL-logged relation */
8796
8798
8800 info = XLOG_HEAP_HOT_UPDATE;
8801 else
8802 info = XLOG_HEAP_UPDATE;
8803
8804 /*
8805 * If the old and new tuple are on the same page, we only need to log the
8806 * parts of the new tuple that were changed. That saves on the amount of
8807 * WAL we need to write. Currently, we just count any unchanged bytes in
8808 * the beginning and end of the tuple. That's quick to check, and
8809 * perfectly covers the common case that only one field is updated.
8810 *
8811 * We could do this even if the old and new tuple are on different pages,
8812 * but only if we don't make a full-page image of the old page, which is
8813 * difficult to know in advance. Also, if the old tuple is corrupt for
8814 * some reason, it would allow the corruption to propagate the new page,
8815 * so it seems best to avoid. Under the general assumption that most
8816 * updates tend to create the new tuple version on the same page, there
8817 * isn't much to be gained by doing this across pages anyway.
8818 *
8819 * Skip this if we're taking a full-page image of the new page, as we
8820 * don't include the new tuple in the WAL record in that case. Also
8821 * disable if effective_wal_level='logical', as logical decoding needs to
8822 * be able to read the new tuple in whole from the WAL record alone.
8823 */
8824 if (oldbuf == newbuf && !need_tuple_data &&
8826 {
8827 char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
8828 char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
8829 int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
8830 int newlen = newtup->t_len - newtup->t_data->t_hoff;
8831
8832 /* Check for common prefix between old and new tuple */
8833 for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
8834 {
8835 if (newp[prefixlen] != oldp[prefixlen])
8836 break;
8837 }
8838
8839 /*
8840 * Storing the length of the prefix takes 2 bytes, so we need to save
8841 * at least 3 bytes or there's no point.
8842 */
8843 if (prefixlen < 3)
8844 prefixlen = 0;
8845
8846 /* Same for suffix */
8848 {
8849 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
8850 break;
8851 }
8852 if (suffixlen < 3)
8853 suffixlen = 0;
8854 }
8855
8856 /* Prepare main WAL data chain */
8857 xlrec.flags = 0;
8862 if (prefixlen > 0)
8864 if (suffixlen > 0)
8866 if (need_tuple_data)
8867 {
8869 if (old_key_tuple)
8870 {
8871 if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
8873 else
8875 }
8876 }
8877
8878 /* If new tuple is the single and first tuple on page... */
8881 {
8882 info |= XLOG_HEAP_INIT_PAGE;
8883 init = true;
8884 }
8885 else
8886 init = false;
8887
8888 /* Prepare WAL data for the old page */
8889 xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
8890 xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
8891 xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
8892 oldtup->t_data->t_infomask2);
8893
8894 /* Prepare WAL data for the new page */
8895 xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
8896 xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
8897
8899 if (init)
8901 if (need_tuple_data)
8903
8905 if (oldbuf != newbuf)
8907
8909
8910 /*
8911 * Prepare WAL data for the new tuple.
8912 */
8913 if (prefixlen > 0 || suffixlen > 0)
8914 {
8915 if (prefixlen > 0 && suffixlen > 0)
8916 {
8919 XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
8920 }
8921 else if (prefixlen > 0)
8922 {
8923 XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
8924 }
8925 else
8926 {
8927 XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
8928 }
8929 }
8930
8931 xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
8932 xlhdr.t_infomask = newtup->t_data->t_infomask;
8933 xlhdr.t_hoff = newtup->t_data->t_hoff;
8935
8936 /*
8937 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
8938 *
8939 * The 'data' doesn't include the common prefix or suffix.
8940 */
8942 if (prefixlen == 0)
8943 {
8945 (char *) newtup->t_data + SizeofHeapTupleHeader,
8947 }
8948 else
8949 {
8950 /*
8951 * Have to write the null bitmap and data after the common prefix as
8952 * two separate rdata entries.
8953 */
8954 /* bitmap [+ padding] [+ oid] */
8955 if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
8956 {
8958 (char *) newtup->t_data + SizeofHeapTupleHeader,
8959 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
8960 }
8961
8962 /* data after common prefix */
8964 (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
8965 newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
8966 }
8967
8968 /* We need to log a tuple identity */
8970 {
8971 /* don't really need this, but its more comfy to decode */
8972 xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
8973 xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
8974 xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
8975
8977
8978 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
8981 }
8982
8983 /* filtering by origin on a row level is much more efficient */
8985
8986 recptr = XLogInsert(RM_HEAP_ID, info);
8987
8988 return recptr;
8989}
8990
8991/*
8992 * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
8993 *
8994 * This is only used when effective_wal_level is logical, and only for
8995 * catalog tuples.
8996 */
8997static XLogRecPtr
8999{
9001
9003 HeapTupleHeader hdr = tup->t_data;
9004
9005 Assert(ItemPointerIsValid(&tup->t_self));
9006 Assert(tup->t_tableOid != InvalidOid);
9007
9008 xlrec.top_xid = GetTopTransactionId();
9009 xlrec.target_locator = relation->rd_locator;
9010 xlrec.target_tid = tup->t_self;
9011
9012 /*
9013 * If the tuple got inserted & deleted in the same TX we definitely have a
9014 * combo CID, set cmin and cmax.
9015 */
9016 if (hdr->t_infomask & HEAP_COMBOCID)
9017 {
9020 xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
9021 xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
9022 xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
9023 }
9024 /* No combo CID, so only cmin or cmax can be set by this TX */
9025 else
9026 {
9027 /*
9028 * Tuple inserted.
9029 *
9030 * We need to check for LOCK ONLY because multixacts might be
9031 * transferred to the new tuple in case of FOR KEY SHARE updates in
9032 * which case there will be an xmax, although the tuple just got
9033 * inserted.
9034 */
9035 if (hdr->t_infomask & HEAP_XMAX_INVALID ||
9037 {
9039 xlrec.cmax = InvalidCommandId;
9040 }
9041 /* Tuple from a different tx updated or deleted. */
9042 else
9043 {
9044 xlrec.cmin = InvalidCommandId;
9046 }
9047 xlrec.combocid = InvalidCommandId;
9048 }
9049
9050 /*
9051 * Note that we don't need to register the buffer here, because this
9052 * operation does not modify the page. The insert/update/delete that
9053 * called us certainly did, but that's WAL-logged separately.
9054 */
9057
9058 /* will be looked at irrespective of origin */
9059
9061
9062 return recptr;
9063}
9064
9065/*
9066 * Build a heap tuple representing the configured REPLICA IDENTITY to represent
9067 * the old tuple in an UPDATE or DELETE.
9068 *
9069 * Returns NULL if there's no need to log an identity or if there's no suitable
9070 * key defined.
9071 *
9072 * Pass key_required true if any replica identity columns changed value, or if
9073 * any of them have any external data. Delete must always pass true.
9074 *
9075 * *copy is set to true if the returned tuple is a modified copy rather than
9076 * the same tuple that was passed in.
9077 */
9078static HeapTuple
9080 bool *copy)
9081{
9082 TupleDesc desc = RelationGetDescr(relation);
9083 char replident = relation->rd_rel->relreplident;
9086 bool nulls[MaxHeapAttributeNumber];
9088
9089 *copy = false;
9090
9091 if (!RelationIsLogicallyLogged(relation))
9092 return NULL;
9093
9094 if (replident == REPLICA_IDENTITY_NOTHING)
9095 return NULL;
9096
9097 if (replident == REPLICA_IDENTITY_FULL)
9098 {
9099 /*
9100 * When logging the entire old tuple, it very well could contain
9101 * toasted columns. If so, force them to be inlined.
9102 */
9103 if (HeapTupleHasExternal(tp))
9104 {
9105 *copy = true;
9106 tp = toast_flatten_tuple(tp, desc);
9107 }
9108 return tp;
9109 }
9110
9111 /* if the key isn't required and we're only logging the key, we're done */
9112 if (!key_required)
9113 return NULL;
9114
9115 /* find out the replica identity columns */
9118
9119 /*
9120 * If there's no defined replica identity columns, treat as !key_required.
9121 * (This case should not be reachable from heap_update, since that should
9122 * calculate key_required accurately. But heap_delete just passes
9123 * constant true for key_required, so we can hit this case in deletes.)
9124 */
9125 if (bms_is_empty(idattrs))
9126 return NULL;
9127
9128 /*
9129 * Construct a new tuple containing only the replica identity columns,
9130 * with nulls elsewhere. While we're at it, assert that the replica
9131 * identity columns aren't null.
9132 */
9133 heap_deform_tuple(tp, desc, values, nulls);
9134
9135 for (int i = 0; i < desc->natts; i++)
9136 {
9138 idattrs))
9139 Assert(!nulls[i]);
9140 else
9141 nulls[i] = true;
9142 }
9143
9144 key_tuple = heap_form_tuple(desc, values, nulls);
9145 *copy = true;
9146
9148
9149 /*
9150 * If the tuple, which by here only contains indexed columns, still has
9151 * toasted columns, force them to be inlined. This is somewhat unlikely
9152 * since there's limits on the size of indexed columns, so we don't
9153 * duplicate toast_flatten_tuple()s functionality in the above loop over
9154 * the indexed columns, even if it would be more efficient.
9155 */
9157 {
9159
9162 }
9163
9164 return key_tuple;
9165}
9166
9167/*
9168 * HeapCheckForSerializableConflictOut
9169 * We are reading a tuple. If it's not visible, there may be a
9170 * rw-conflict out with the inserter. Otherwise, if it is visible to us
9171 * but has been deleted, there may be a rw-conflict out with the deleter.
9172 *
9173 * We will determine the top level xid of the writing transaction with which
9174 * we may be in conflict, and ask CheckForSerializableConflictOut() to check
9175 * for overlap with our own transaction.
9176 *
9177 * This function should be called just about anywhere in heapam.c where a
9178 * tuple has been read. The caller must hold at least a shared lock on the
9179 * buffer, because this function might set hint bits on the tuple. There is
9180 * currently no known reason to call this function from an index AM.
9181 */
9182void
9184 HeapTuple tuple, Buffer buffer,
9185 Snapshot snapshot)
9186{
9187 TransactionId xid;
9189
9190 if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
9191 return;
9192
9193 /*
9194 * Check to see whether the tuple has been written to by a concurrent
9195 * transaction, either to create it not visible to us, or to delete it
9196 * while it is visible to us. The "visible" bool indicates whether the
9197 * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
9198 * is going on with it.
9199 *
9200 * In the event of a concurrently inserted tuple that also happens to have
9201 * been concurrently updated (by a separate transaction), the xmin of the
9202 * tuple will be used -- not the updater's xid.
9203 */
9205 switch (htsvResult)
9206 {
9207 case HEAPTUPLE_LIVE:
9208 if (visible)
9209 return;
9210 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9211 break;
9214 if (visible)
9215 xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
9216 else
9217 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9218
9220 {
9221 /* This is like the HEAPTUPLE_DEAD case */
9222 Assert(!visible);
9223 return;
9224 }
9225 break;
9227 xid = HeapTupleHeaderGetXmin(tuple->t_data);
9228 break;
9229 case HEAPTUPLE_DEAD:
9230 Assert(!visible);
9231 return;
9232 default:
9233
9234 /*
9235 * The only way to get to this default clause is if a new value is
9236 * added to the enum type without adding it to this switch
9237 * statement. That's a bug, so elog.
9238 */
9239 elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
9240
9241 /*
9242 * In spite of having all enum values covered and calling elog on
9243 * this default, some compilers think this is a code path which
9244 * allows xid to be used below without initialization. Silence
9245 * that warning.
9246 */
9248 }
9249
9252
9253 /*
9254 * Find top level xid. Bail out if xid is too early to be a conflict, or
9255 * if it's our own xid.
9256 */
9258 return;
9261 return;
9262
9263 CheckForSerializableConflictOut(relation, xid, snapshot);
9264}
int16 AttrNumber
Definition attnum.h:21
int bms_next_member(const Bitmapset *a, int prevbit)
Definition bitmapset.c:1290
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:901
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition bitmapset.c:575
#define bms_is_empty(a)
Definition bitmapset.h:118
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition block.h:71
static int32 next
Definition blutils.c:225
static Datum values[MAXATTR]
Definition bootstrap.c:190
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4446
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition bufmgr.c:787
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition bufmgr.c:4467
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3114
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5586
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5603
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3147
int maintenance_io_concurrency
Definition bufmgr.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition bufmgr.c:879
@ BAS_BULKREAD
Definition bufmgr.h:37
@ BAS_BULKWRITE
Definition bufmgr.h:39
#define RelationGetNumberOfBlocks(reln)
Definition bufmgr.h:309
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:468
static Block BufferGetBlock(Buffer buffer)
Definition bufmgr.h:435
@ BUFFER_LOCK_SHARE
Definition bufmgr.h:212
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:222
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:207
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:334
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:419
Size PageGetHeapFreeSpace(const PageData *page)
Definition bufpage.c:1000
PageHeaderData * PageHeader
Definition bufpage.h:199
static bool PageIsAllVisible(const PageData *page)
Definition bufpage.h:454
static void PageClearAllVisible(Page page)
Definition bufpage.h:464
#define SizeOfPageHeaderData
Definition bufpage.h:241
static void PageSetAllVisible(Page page)
Definition bufpage.h:459
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:268
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:378
static void PageSetFull(Page page)
Definition bufpage.h:443
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:416
PageData * Page
Definition bufpage.h:81
#define PageClearPrunable(page)
Definition bufpage.h:485
#define PageSetPrunable(page, xid)
Definition bufpage.h:478
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:396
#define NameStr(name)
Definition c.h:835
#define InvalidCommandId
Definition c.h:753
#define pg_noinline
Definition c.h:321
#define Min(x, y)
Definition c.h:1091
#define pg_attribute_unused()
Definition c.h:149
#define likely(x)
Definition c.h:437
#define MAXALIGN(LEN)
Definition c.h:896
uint8_t uint8
Definition c.h:622
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
TransactionId MultiXactId
Definition c.h:746
#define pg_attribute_always_inline
Definition c.h:305
int16_t int16
Definition c.h:619
#define SHORTALIGN(LEN)
Definition c.h:892
uint16_t uint16
Definition c.h:623
#define pg_unreachable()
Definition c.h:367
#define unlikely(x)
Definition c.h:438
uint32_t uint32
Definition c.h:624
#define lengthof(array)
Definition c.h:873
#define StaticAssertDecl(condition, errmessage)
Definition c.h:1008
uint32 CommandId
Definition c.h:750
uint32 TransactionId
Definition c.h:736
#define OidIsValid(objectId)
Definition c.h:858
size_t Size
Definition c.h:689
bool IsToastRelation(Relation relation)
Definition catalog.c:206
bool IsCatalogRelation(Relation relation)
Definition catalog.c:104
bool IsSharedRelation(Oid relationId)
Definition catalog.c:304
bool IsInplaceUpdateRelation(Relation relation)
Definition catalog.c:183
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
CommandId HeapTupleHeaderGetCmin(const HeapTupleHeaderData *tup)
Definition combocid.c:104
void HeapTupleHeaderAdjustCmax(const HeapTupleHeaderData *tup, CommandId *cmax, bool *iscombo)
Definition combocid.c:153
CommandId HeapTupleHeaderGetCmax(const HeapTupleHeaderData *tup)
Definition combocid.c:118
bool datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
Definition datum.c:223
Datum arg
Definition elog.c:1323
int errcode(int sqlerrcode)
Definition elog.c:875
int int errdetail_internal(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:37
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_object(type)
Definition fe_memutils.h:75
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition freelist.c:426
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition freelist.c:608
int NBuffers
Definition globals.c:144
Oid MyDatabaseTableSpace
Definition globals.c:98
Oid MyDatabaseId
Definition globals.c:96
void simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup, TU_UpdateIndexes *update_indexes)
Definition heapam.c:4450
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition heapam.c:7576
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition heapam.c:8998
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition heapam.c:5290
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, uint32 options)
Definition heapam.c:2202
static TM_Result heap_lock_updated_tuple_rec(Relation rel, TransactionId priorXmax, const ItemPointerData *tid, TransactionId xid, LockTupleMode mode)
Definition heapam.c:5662
static void heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
Definition heapam.c:710
bool heap_inplace_lock(Relation relation, HeapTuple oldtup_ptr, Buffer buffer, void(*release_callback)(void *), void *arg)
Definition heapam.c:6332
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf)
Definition heapam.c:1684
#define BOTTOMUP_TOLERANCE_NBLOCKS
Definition heapam.c:192
static BlockNumber heap_scan_stream_read_next_parallel(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition heapam.c:254
int updstatus
Definition heapam.c:132
static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
Definition heapam.c:8656
static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition heapam.c:5241
static int heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
Definition heapam.c:2250
static pg_attribute_always_inline int page_collect_tuples(HeapScanDesc scan, Snapshot snapshot, Page page, Buffer buffer, BlockNumber block, int lines, bool all_visible, bool check_serializable)
Definition heapam.c:524
static BlockNumber heap_scan_stream_read_next_serial(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition heapam.c:294
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition heapam.c:7427
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, uint32 options, BulkInsertState bistate)
Definition heapam.c:2004
void heap_finish_speculative(Relation relation, const ItemPointerData *tid)
Definition heapam.c:6063
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition heapam.c:7954
bool heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition heapam.c:1474
#define LOCKMODE_from_mxstatus(status)
Definition heapam.c:161
void heap_endscan(TableScanDesc sscan)
Definition heapam.c:1390
#define FRM_RETURN_IS_XID
Definition heapam.c:6620
#define TUPLOCK_from_mxstatus(status)
Definition heapam.c:220
void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
Definition heapam.c:1331
void heap_inplace_unlock(Relation relation, HeapTuple oldtup, Buffer buffer)
Definition heapam.c:6610
static int index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
Definition heapam.c:8408
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining, bool logLockFailure)
Definition heapam.c:7776
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
Definition heapam.c:7791
static TransactionId FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, const struct VacuumCutoffs *cutoffs, uint16 *flags, HeapPageFreeze *pagefrz)
Definition heapam.c:6671
static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, bool *copy)
Definition heapam.c:9079
static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
Definition heapam.c:755
static TM_Result heap_lock_updated_tuple(Relation rel, uint16 prior_infomask, TransactionId prior_raw_xmax, const ItemPointerData *prior_ctid, TransactionId xid, LockTupleMode mode)
Definition heapam.c:6010
#define LockTupleTuplock(rel, tup, mode)
Definition heapam.c:169
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
Definition heapam.c:7846
bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId FreezeLimit, TransactionId MultiXactCutoff)
Definition heapam.c:7382
void heap_inplace_update_and_unlock(Relation relation, HeapTuple oldtup, HeapTuple tuple, Buffer buffer)
Definition heapam.c:6470
static BlockNumber heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
Definition heapam.c:879
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition heapam.c:7508
#define BOTTOMUP_MAX_NBLOCKS
Definition heapam.c:191
void ReleaseBulkInsertStatePin(BulkInsertState bistate)
Definition heapam.c:1966
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared, bool walLogical)
Definition heapam.c:8775
#define FRM_MARK_COMMITTED
Definition heapam.c:6622
#define FRM_NOOP
Definition heapam.c:6618
static void index_delete_check_htid(TM_IndexDeleteOp *delstate, Page page, OffsetNumber maxoff, const ItemPointerData *htid, TM_IndexStatus *istatus)
Definition heapam.c:8039
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
Definition heapam.c:1435
int lockstatus
Definition heapam.c:131
void heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition heapam.c:7360
bool heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
Definition heapam.c:1577
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper, int *remaining)
Definition heapam.c:7754
void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid)
Definition heapam.c:1504
void heap_abort_speculative(Relation relation, const ItemPointerData *tid)
Definition heapam.c:6150
void heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, uint32 options, BulkInsertState bistate)
Definition heapam.c:2282
static BlockNumber bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data, void *per_buffer_data)
Definition heapam.c:319
TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
Definition heapam.c:1167
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition heapam.c:963
static Page heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
Definition heapam.c:833
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition heapam.c:2672
#define FRM_RETURN_IS_MULTI
Definition heapam.c:6621
LOCKMODE hwlock
Definition heapam.c:130
#define FRM_INVALIDATE_XMAX
Definition heapam.c:6619
static bool heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2, bool isnull1, bool isnull2)
Definition heapam.c:4309
static void index_delete_sort(TM_IndexDeleteOp *delstate)
Definition heapam.c:8444
void heap_prepare_pagescan(TableScanDesc sscan)
Definition heapam.c:618
static Bitmapset * HeapDetermineColumnsInfo(Relation relation, Bitmapset *interesting_cols, Bitmapset *external_cols, HeapTuple oldtup, HeapTuple newtup, bool *has_external)
Definition heapam.c:4360
static const int MultiXactStatusLock[MaxMultiXactStatus+1]
Definition heapam.c:209
void simple_heap_insert(Relation relation, HeapTuple tup)
Definition heapam.c:2659
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition heapam.c:2694
#define UnlockTupleTuplock(rel, tup, mode)
Definition heapam.c:171
TM_Result heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup, CommandId cid, uint32 options pg_attribute_unused(), Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
Definition heapam.c:3201
static TM_Result test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
Definition heapam.c:5571
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition heapam.c:7027
static void AssertHasSnapshotForToast(Relation rel)
Definition heapam.c:227
void simple_heap_delete(Relation relation, const ItemPointerData *tid)
Definition heapam.c:3153
static const struct @15 tupleLockExtraInfo[]
TransactionId HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
Definition heapam.c:7560
TransactionId heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
Definition heapam.c:8099
#define ConditionalLockTupleTuplock(rel, tup, mode, log)
Definition heapam.c:173
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition heapam.c:359
static int bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
Definition heapam.c:8540
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition heapam.c:1073
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
Definition heapam.c:4539
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition heapam.c:1915
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper, int *remaining, bool logLockFailure)
Definition heapam.c:7676
static int bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
Definition heapam.c:8583
void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid)
Definition heapam.c:1793
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
Definition heapam.c:502
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition heapam.c:9183
static Page heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
Definition heapam.c:802
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition heapam.c:4492
void heap_pre_freeze_checks(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition heapam.c:7307
BulkInsertState GetBulkInsertState(void)
Definition heapam.c:1937
TM_Result heap_delete(Relation relation, const ItemPointerData *tid, CommandId cid, uint32 options, Snapshot crosscheck, bool wait, TM_FailureData *tmfd)
Definition heapam.c:2717
void FreeBulkInsertState(BulkInsertState bistate)
Definition heapam.c:1954
#define HEAP_INSERT_SPECULATIVE
Definition heapam.h:39
#define HEAP_FREEZE_CHECK_XMAX_ABORTED
Definition heapam.h:150
struct HeapScanDescData * HeapScanDesc
Definition heapam.h:107
HTSV_Result
Definition heapam.h:137
@ HEAPTUPLE_RECENTLY_DEAD
Definition heapam.h:140
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition heapam.h:141
@ HEAPTUPLE_LIVE
Definition heapam.h:139
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition heapam.h:142
@ HEAPTUPLE_DEAD
Definition heapam.h:138
struct BitmapHeapScanDescData * BitmapHeapScanDesc
Definition heapam.h:115
#define HEAP_INSERT_FROZEN
Definition heapam.h:37
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition heapam.h:533
#define HEAP_FREEZE_CHECK_XMIN_COMMITTED
Definition heapam.h:149
#define HEAP_INSERT_NO_LOGICAL
Definition heapam.h:38
struct BulkInsertStateData * BulkInsertState
Definition heapam.h:47
const TableAmRoutine * GetHeapamTableAmRoutine(void)
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer, uint16 infomask, TransactionId xid)
bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer)
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
int HeapTupleSatisfiesMVCCBatch(Snapshot snapshot, Buffer buffer, int ntups, BatchMVCCState *batchmvcc, OffsetNumber *vistuples_dense)
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define XLH_INSERT_ON_TOAST_RELATION
Definition heapam_xlog.h:76
#define SizeOfHeapMultiInsert
#define XLOG_HEAP2_MULTI_INSERT
Definition heapam_xlog.h:64
#define SizeOfHeapUpdate
#define XLH_INVALID_XVAC
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:87
#define XLOG_HEAP_HOT_UPDATE
Definition heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition heapam_xlog.h:34
#define XLH_INSERT_IS_SPECULATIVE
Definition heapam_xlog.h:74
#define XLH_LOCK_ALL_FROZEN_CLEARED
#define XLH_DELETE_CONTAINS_OLD_KEY
#define XLH_UPDATE_CONTAINS_NEW_TUPLE
Definition heapam_xlog.h:90
#define XLH_INSERT_LAST_IN_MULTI
Definition heapam_xlog.h:73
#define XLH_INSERT_ALL_FROZEN_SET
Definition heapam_xlog.h:79
#define XLH_FREEZE_XVAC
#define XLOG_HEAP_UPDATE
Definition heapam_xlog.h:35
#define XLHL_XMAX_KEYSHR_LOCK
#define XLH_DELETE_ALL_VISIBLE_CLEARED
#define XLH_UPDATE_CONTAINS_OLD_TUPLE
Definition heapam_xlog.h:88
#define SizeOfHeapNewCid
#define SizeOfHeapLockUpdated
#define XLHL_XMAX_IS_MULTI
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:72
#define SizeOfHeapHeader
#define XLH_DELETE_IS_PARTITION_MOVE
#define MinSizeOfHeapInplace
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:85
#define XLHL_XMAX_LOCK_ONLY
#define XLOG_HEAP_INPLACE
Definition heapam_xlog.h:40
#define XLOG_HEAP2_LOCK_UPDATED
Definition heapam_xlog.h:65
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition heapam_xlog.h:91
#define SizeOfMultiInsertTuple
#define XLHL_XMAX_EXCL_LOCK
#define XLOG_HEAP2_NEW_CID
Definition heapam_xlog.h:66
#define XLH_DELETE_CONTAINS_OLD_TUPLE
#define XLOG_HEAP_LOCK
Definition heapam_xlog.h:39
#define XLOG_HEAP_INSERT
Definition heapam_xlog.h:33
#define SizeOfHeapInsert
#define SizeOfHeapDelete
#define XLH_DELETE_IS_SUPER
#define XLH_UPDATE_CONTAINS_OLD_KEY
Definition heapam_xlog.h:89
#define XLH_DELETE_NO_LOGICAL
#define XLHL_KEYS_UPDATED
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition heapam_xlog.h:75
#define XLOG_HEAP_INIT_PAGE
Definition heapam_xlog.h:47
#define SizeOfHeapConfirm
#define SizeOfHeapLock
#define XLOG_HEAP_CONFIRM
Definition heapam_xlog.h:38
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition heaptoast.c:43
HeapTuple heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, uint32 options)
Definition heaptoast.c:96
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition heaptoast.c:350
#define TOAST_TUPLE_THRESHOLD
Definition heaptoast.h:48
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
Definition heaptuple.c:1025
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition heaptuple.c:1254
void heap_freetuple(HeapTuple htup)
Definition heaptuple.c:1372
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition hio.c:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, uint32 options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other, int num_pages)
Definition hio.c:500
HeapTupleHeaderData * HeapTupleHeader
Definition htup.h:23
#define HEAP_MOVED_OFF
#define HEAP_XMAX_SHR_LOCK
#define HEAP_XMIN_FROZEN
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
#define HeapTupleHeaderGetNatts(tup)
static void HeapTupleHeaderSetXminFrozen(HeapTupleHeaderData *tup)
#define SizeofHeapTupleHeader
#define HEAP_KEYS_UPDATED
static bool HEAP_XMAX_IS_SHR_LOCKED(uint16 infomask)
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
static bool HeapTupleHeaderXminInvalid(const HeapTupleHeaderData *tup)
static void HeapTupleClearHotUpdated(const HeapTupleData *tuple)
static bool HeapTupleHasExternal(const HeapTupleData *tuple)
static TransactionId HeapTupleHeaderGetXvac(const HeapTupleHeaderData *tup)
#define HEAP2_XACT_MASK
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
#define HEAP_XMAX_LOCK_ONLY
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
#define HEAP_XMAX_BITS
#define HEAP_LOCK_MASK
static CommandId HeapTupleHeaderGetRawCommandId(const HeapTupleHeaderData *tup)
static TransactionId HeapTupleHeaderGetRawXmax(const HeapTupleHeaderData *tup)
static bool HeapTupleHeaderIsHeapOnly(const HeapTupleHeaderData *tup)
static bool HeapTupleIsHeapOnly(const HeapTupleData *tuple)
#define HEAP_MOVED
static void HeapTupleSetHeapOnly(const HeapTupleData *tuple)
#define HEAP_XMAX_IS_MULTI
static bool HEAP_XMAX_IS_KEYSHR_LOCKED(uint16 infomask)
#define HEAP_XMAX_COMMITTED
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
#define HEAP_COMBOCID
#define HEAP_XACT_MASK
static bool HeapTupleHeaderIndicatesMovedPartitions(const HeapTupleHeaderData *tup)
static void HeapTupleSetHotUpdated(const HeapTupleData *tuple)
#define HEAP_XMAX_EXCL_LOCK
static bool HeapTupleHeaderIsHotUpdated(const HeapTupleHeaderData *tup)
#define HEAP_XMAX_INVALID
static TransactionId HeapTupleHeaderGetRawXmin(const HeapTupleHeaderData *tup)
static void * GETSTRUCT(const HeapTupleData *tuple)
static void HeapTupleClearHeapOnly(const HeapTupleData *tuple)
#define MaxHeapAttributeNumber
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
static TransactionId HeapTupleHeaderGetUpdateXid(const HeapTupleHeaderData *tup)
#define MaxHeapTuplesPerPage
static bool HEAP_XMAX_IS_EXCL_LOCKED(uint16 infomask)
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
static bool HEAP_LOCKED_UPGRADED(uint16 infomask)
#define HEAP_UPDATED
#define HEAP_XMAX_KEYSHR_LOCK
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
#define IsParallelWorker()
Definition parallel.h:62
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:178
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:134
int remaining
Definition informix.c:692
#define INJECTION_POINT(name, arg)
void AcceptInvalidationMessages(void)
Definition inval.c:930
int inplaceGetInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition inval.c:1088
void PreInplace_Inval(void)
Definition inval.c:1250
void CacheInvalidateHeapTupleInplace(Relation relation, HeapTuple key_equivalent_tuple)
Definition inval.c:1593
void AtInplace_Inval(void)
Definition inval.c:1263
void ForgetInplace_Inval(void)
Definition inval.c:1286
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition inval.c:1571
int b
Definition isn.c:74
int j
Definition isn.c:78
int i
Definition isn.c:77
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdIsNormal(itemId)
Definition itemid.h:99
#define ItemIdGetRedirect(itemId)
Definition itemid.h:78
#define ItemIdIsUsed(itemId)
Definition itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition itemid.h:106
#define ItemIdHasStorage(itemId)
Definition itemid.h:120
int32 ItemPointerCompare(const ItemPointerData *arg1, const ItemPointerData *arg2)
Definition itemptr.c:51
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition itemptr.c:35
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition itemptr.h:135
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition itemptr.h:184
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition itemptr.h:147
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition itemptr.h:124
static bool ItemPointerIndicatesMovedPartitions(const ItemPointerData *pointer)
Definition itemptr.h:197
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition itemptr.h:103
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
Definition itemptr.h:93
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition itemptr.h:172
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition itemptr.h:83
void UnlockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition lmgr.c:601
bool ConditionalXactLockTableWait(TransactionId xid, bool logLockFailure)
Definition lmgr.c:739
void LockTuple(Relation relation, const ItemPointerData *tid, LOCKMODE lockmode)
Definition lmgr.c:562
void XactLockTableWait(TransactionId xid, Relation rel, const ItemPointerData *ctid, XLTW_Oper oper)
Definition lmgr.c:663
XLTW_Oper
Definition lmgr.h:25
@ XLTW_None
Definition lmgr.h:26
@ XLTW_Lock
Definition lmgr.h:29
@ XLTW_Delete
Definition lmgr.h:28
@ XLTW_LockUpdated
Definition lmgr.h:30
@ XLTW_Update
Definition lmgr.h:27
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition lock.c:641
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition lock.c:621
bool log_lock_failures
Definition lock.c:57
int LOCKMODE
Definition lockdefs.h:26
#define AccessExclusiveLock
Definition lockdefs.h:43
#define ShareRowExclusiveLock
Definition lockdefs.h:41
#define AccessShareLock
Definition lockdefs.h:36
#define InplaceUpdateTupleLock
Definition lockdefs.h:48
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
#define ExclusiveLock
Definition lockdefs.h:42
#define RowShareLock
Definition lockdefs.h:37
LockWaitPolicy
Definition lockoptions.h:38
@ LockWaitSkip
Definition lockoptions.h:42
@ LockWaitBlock
Definition lockoptions.h:40
@ LockWaitError
Definition lockoptions.h:44
LockTupleMode
Definition lockoptions.h:51
@ LockTupleExclusive
Definition lockoptions.h:59
@ LockTupleNoKeyExclusive
Definition lockoptions.h:57
@ LockTupleShare
Definition lockoptions.h:55
@ LockTupleKeyShare
Definition lockoptions.h:53
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition locktag.h:81
#define SET_LOCKTAG_TUPLE(locktag, dboid, reloid, blocknum, offnum)
Definition locktag.h:117
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc(Size size)
Definition mcxt.c:1387
#define IsBootstrapProcessingMode()
Definition miscadmin.h:495
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:125
#define IsNormalProcessingMode()
Definition miscadmin.h:497
#define END_CRIT_SECTION()
Definition miscadmin.h:154
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition multixact.c:411
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition multixact.c:2865
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
Definition multixact.c:2879
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition multixact.c:522
void MultiXactIdSetOldestMember(void)
Definition multixact.c:596
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition multixact.c:715
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition multixact.c:358
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool isLockOnly)
Definition multixact.c:1172
#define MultiXactIdIsValid(multi)
Definition multixact.h:29
MultiXactStatus
Definition multixact.h:37
@ MultiXactStatusForShare
Definition multixact.h:39
@ MultiXactStatusForNoKeyUpdate
Definition multixact.h:40
@ MultiXactStatusNoKeyUpdate
Definition multixact.h:43
@ MultiXactStatusUpdate
Definition multixact.h:45
@ MultiXactStatusForUpdate
Definition multixact.h:41
@ MultiXactStatusForKeyShare
Definition multixact.h:38
#define ISUPDATE_from_mxstatus(status)
Definition multixact.h:51
#define InvalidMultiXactId
Definition multixact.h:25
#define MaxMultiXactStatus
Definition multixact.h:48
static char * errmsg
#define InvalidOffsetNumber
Definition off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition off.h:39
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition off.h:54
#define MaxOffsetNumber
Definition off.h:28
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition parse_oper.c:373
int16 attlen
#define ERRCODE_DATA_CORRUPTED
static uint32 pg_nextpower2_32(uint32 num)
static PgChecksumMode mode
FormData_pg_class * Form_pg_class
Definition pg_class.h:160
END_CATALOG_STRUCT typedef FormData_pg_database * Form_pg_database
static char buf[DEFAULT_XLOG_SEG_SIZE]
#define pgstat_count_heap_getnext(rel)
Definition pgstat.h:722
#define pgstat_count_heap_scan(rel)
Definition pgstat.h:717
void pgstat_count_heap_update(Relation rel, bool hot, bool newpage)
void pgstat_count_heap_delete(Relation rel)
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
#define qsort(a, b, c, d)
Definition port.h:495
static Oid DatumGetObjectId(Datum X)
Definition postgres.h:242
uint64_t Datum
Definition postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:332
#define InvalidOid
unsigned int Oid
void CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, BlockNumber blkno)
Definition predicate.c:4266
void CheckForSerializableConflictOut(Relation relation, TransactionId xid, Snapshot snapshot)
Definition predicate.c:3953
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition predicate.c:2506
void PredicateLockTID(Relation relation, const ItemPointerData *tid, Snapshot snapshot, TransactionId tuple_xid)
Definition predicate.c:2551
bool CheckForSerializableConflictOutNeeded(Relation relation, Snapshot snapshot)
Definition predicate.c:3921
static int fb(int x)
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition procarray.c:4127
bool TransactionIdIsInProgress(TransactionId xid)
Definition procarray.c:1393
void heap_page_prune_opt(Relation relation, Buffer buffer, Buffer *vmbuffer, bool rel_read_only)
Definition pruneheap.c:271
void read_stream_reset(ReadStream *stream)
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
void read_stream_enable_stats(ReadStream *stream, IOStats *stats)
void read_stream_end(ReadStream *stream)
#define READ_STREAM_USE_BATCHING
Definition read_stream.h:64
BlockNumber(* ReadStreamBlockNumberCB)(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
Definition read_stream.h:78
#define READ_STREAM_DEFAULT
Definition read_stream.h:21
#define READ_STREAM_SEQUENTIAL
Definition read_stream.h:36
#define RelationGetRelid(relation)
Definition rel.h:516
#define RelationIsLogicallyLogged(relation)
Definition rel.h:712
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition rel.h:391
#define RelationGetDescr(relation)
Definition rel.h:542
#define RelationGetNumberOfAttributes(relation)
Definition rel.h:522
#define RelationGetRelationName(relation)
Definition rel.h:550
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition rel.h:695
#define RelationNeedsWAL(relation)
Definition rel.h:639
#define RelationUsesLocalBuffers(relation)
Definition rel.h:648
#define HEAP_DEFAULT_FILLFACTOR
Definition rel.h:362
void RelationDecrementReferenceCount(Relation rel)
Definition relcache.c:2190
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition relcache.c:5294
void RelationIncrementReferenceCount(Relation rel)
Definition relcache.c:2177
@ INDEX_ATTR_BITMAP_KEY
Definition relcache.h:69
@ INDEX_ATTR_BITMAP_HOT_BLOCKING
Definition relcache.h:72
@ INDEX_ATTR_BITMAP_SUMMARIZED
Definition relcache.h:73
@ INDEX_ATTR_BITMAP_IDENTITY_KEY
Definition relcache.h:71
ForkNumber
Definition relpath.h:56
@ MAIN_FORKNUM
Definition relpath.h:58
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
Definition relscan.h:109
#define ScanDirectionIsForward(direction)
Definition sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition sdir.h:50
ScanDirection
Definition sdir.h:25
@ ForwardScanDirection
Definition sdir.h:28
void UnregisterSnapshot(Snapshot snapshot)
Definition snapmgr.c:866
TransactionId TransactionXmin
Definition snapmgr.c:159
bool HaveRegisteredOrActiveSnapshot(void)
Definition snapmgr.c:1644
void InvalidateCatalogSnapshot(void)
Definition snapmgr.c:455
#define IsHistoricMVCCSnapshot(snapshot)
Definition snapmgr.h:67
#define SnapshotAny
Definition snapmgr.h:33
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
Definition snapmgr.h:50
#define IsMVCCSnapshot(snapshot)
Definition snapmgr.h:59
#define InvalidSnapshot
Definition snapshot.h:119
int get_tablespace_maintenance_io_concurrency(Oid spcid)
Definition spccache.c:230
#define init()
BlockNumber last_free
Definition hio.h:49
BufferAccessStrategy strategy
Definition hio.h:31
uint32 already_extended_by
Definition hio.h:50
BlockNumber next_free
Definition hio.h:48
Buffer current_buf
Definition hio.h:32
MultiXactId NoFreezePageRelminMxid
Definition heapam.h:244
TransactionId FreezePageConflictXid
Definition heapam.h:233
TransactionId FreezePageRelfrozenXid
Definition heapam.h:220
bool freeze_required
Definition heapam.h:194
MultiXactId FreezePageRelminMxid
Definition heapam.h:221
TransactionId NoFreezePageRelfrozenXid
Definition heapam.h:243
Buffer rs_vmbuffer
Definition heapam.h:100
BufferAccessStrategy rs_strategy
Definition heapam.h:75
ScanDirection rs_dir
Definition heapam.h:90
uint32 rs_ntuples
Definition heapam.h:104
OffsetNumber rs_coffset
Definition heapam.h:70
Buffer rs_cbuf
Definition heapam.h:72
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
Definition heapam.h:97
BlockNumber rs_startblock
Definition heapam.h:64
HeapTupleData rs_ctup
Definition heapam.h:77
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
Definition heapam.h:105
BlockNumber rs_numblocks
Definition heapam.h:65
BlockNumber rs_nblocks
Definition heapam.h:63
ReadStream * rs_read_stream
Definition heapam.h:80
uint32 rs_cindex
Definition heapam.h:103
BlockNumber rs_prefetch_block
Definition heapam.h:91
BlockNumber rs_cblock
Definition heapam.h:71
TableScanDescData rs_base
Definition heapam.h:60
ItemPointerData t_self
Definition htup.h:65
uint32 t_len
Definition htup.h:64
HeapTupleHeader t_data
Definition htup.h:68
Oid t_tableOid
Definition htup.h:66
TransactionId t_xmin
union HeapTupleHeaderData::@54 t_choice
ItemPointerData t_ctid
HeapTupleFields t_heap
int16 npromisingtids
Definition heapam.c:200
LockRelId lockRelId
Definition rel.h:46
Oid relId
Definition rel.h:40
Oid dbId
Definition rel.h:41
TransactionId xid
Definition multixact.h:57
MultiXactStatus status
Definition multixact.h:58
LockInfoData rd_lockInfo
Definition rel.h:114
Form_pg_index rd_index
Definition rel.h:192
RelFileLocator rd_locator
Definition rel.h:57
Form_pg_class rd_rel
Definition rel.h:111
bool takenDuringRecovery
Definition snapshot.h:180
TransactionId xmax
Definition tableam.h:172
CommandId cmax
Definition tableam.h:173
ItemPointerData ctid
Definition tableam.h:171
ItemPointerData tid
Definition tableam.h:234
Relation rs_rd
Definition relscan.h:36
struct TableScanInstrumentation * rs_instrument
Definition relscan.h:72
uint32 rs_flags
Definition relscan.h:64
struct ScanKeyData * rs_key
Definition relscan.h:39
struct SnapshotData * rs_snapshot
Definition relscan.h:37
struct ParallelTableScanDescData * rs_parallel
Definition relscan.h:66
TransactionId FreezeLimit
Definition vacuum.h:288
TransactionId OldestXmin
Definition vacuum.h:278
TransactionId relfrozenxid
Definition vacuum.h:262
MultiXactId relminmxid
Definition vacuum.h:263
MultiXactId MultiXactCutoff
Definition vacuum.h:289
MultiXactId OldestMxact
Definition vacuum.h:279
Definition c.h:776
OffsetNumber offnum
TransactionId SubTransGetTopmostTransaction(TransactionId xid)
Definition subtrans.c:170
void ss_report_location(Relation rel, BlockNumber location)
Definition syncscan.c:287
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
Definition syncscan.c:252
#define FirstLowInvalidHeapAttributeNumber
Definition sysattr.h:27
#define TableOidAttributeNumber
Definition sysattr.h:26
bool RelationSupportsSysCache(Oid relid)
Definition syscache.c:763
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan, BlockNumber startblock, BlockNumber numblocks)
Definition tableam.c:453
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
Definition tableam.c:548
bool synchronize_seqscans
Definition tableam.c:50
@ SO_ALLOW_STRAT
Definition tableam.h:61
@ SO_TYPE_TIDRANGESCAN
Definition tableam.h:56
@ SO_TEMP_SNAPSHOT
Definition tableam.h:68
@ SO_HINT_REL_READ_ONLY
Definition tableam.h:71
@ SO_ALLOW_PAGEMODE
Definition tableam.h:65
@ SO_TYPE_SAMPLESCAN
Definition tableam.h:54
@ SO_ALLOW_SYNC
Definition tableam.h:63
@ SO_TYPE_SEQSCAN
Definition tableam.h:52
@ SO_SCAN_INSTRUMENT
Definition tableam.h:74
@ SO_TYPE_BITMAPSCAN
Definition tableam.h:53
TU_UpdateIndexes
Definition tableam.h:133
@ TU_Summarizing
Definition tableam.h:141
@ TU_All
Definition tableam.h:138
@ TU_None
Definition tableam.h:135
TM_Result
Definition tableam.h:95
@ TM_Ok
Definition tableam.h:100
@ TM_BeingModified
Definition tableam.h:122
@ TM_Deleted
Definition tableam.h:115
@ TM_WouldBlock
Definition tableam.h:125
@ TM_Updated
Definition tableam.h:112
@ TM_SelfModified
Definition tableam.h:106
@ TM_Invisible
Definition tableam.h:103
#define TABLE_DELETE_CHANGING_PARTITION
Definition tableam.h:289
#define TABLE_DELETE_NO_LOGICAL
Definition tableam.h:290
#define TABLE_UPDATE_NO_LOGICAL
Definition tableam.h:293
bool tbm_iterate(TBMIterator *iterator, TBMIterateResult *tbmres)
Definition tidbitmap.c:1614
bool TransactionIdDidCommit(TransactionId transactionId)
Definition transam.c:126
bool TransactionIdDidAbort(TransactionId transactionId)
Definition transam.c:188
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition transam.h:297
#define InvalidTransactionId
Definition transam.h:31
static bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition transam.h:282
static bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2)
Definition transam.h:312
#define TransactionIdEquals(id1, id2)
Definition transam.h:43
#define TransactionIdIsValid(xid)
Definition transam.h:41
#define TransactionIdIsNormal(xid)
Definition transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition transam.h:263
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:195
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition tuptable.h:476
static bool HeapKeyTest(HeapTuple tuple, TupleDesc tupdesc, int nkeys, ScanKey keys)
Definition valid.h:28
static bool VARATT_IS_EXTERNAL(const void *PTR)
Definition varatt.h:354
void visibilitymap_set(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
TransactionId GetTopTransactionId(void)
Definition xact.c:428
TransactionId GetTopTransactionIdIfAny(void)
Definition xact.c:443
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition xact.c:943
bool IsInParallelMode(void)
Definition xact.c:1119
TransactionId GetCurrentTransactionId(void)
Definition xact.c:456
CommandId GetCurrentCommandId(bool used)
Definition xact.c:831
#define IsolationIsSerializable()
Definition xact.h:53
#define XLOG_INCLUDE_ORIGIN
Definition xlog.h:166
#define XLogStandbyInfoActive()
Definition xlog.h:126
uint64 XLogRecPtr
Definition xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:482
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:413
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:372
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:464
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
Definition xloginsert.c:317
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:246
void XLogBeginInsert(void)
Definition xloginsert.c:153
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define REGBUF_KEEP_DATA
Definition xloginsert.h:36
#define REGBUF_WILL_INIT
Definition xloginsert.h:34