PostgreSQL Source Code git master
Loading...
Searching...
No Matches
bufpage.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * bufpage.c
4 * POSTGRES standard buffer page code.
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/storage/page/bufpage.c
12 *
13 *-------------------------------------------------------------------------
14 */
15#include "postgres.h"
16
17#include "access/htup_details.h"
18#include "access/itup.h"
19#include "access/xlog.h"
20#include "pgstat.h"
21#include "storage/checksum.h"
22#include "utils/memdebug.h"
23#include "utils/memutils.h"
24
25
26/* GUC variable */
28
29
30/* ----------------------------------------------------------------
31 * Page support functions
32 * ----------------------------------------------------------------
33 */
34
35/*
36 * PageInit
37 * Initializes the contents of a page.
38 * Note that we don't calculate an initial checksum here; that's not done
39 * until it's time to write.
40 */
41void
43{
44 PageHeader p = (PageHeader) page;
45
47
50
51 /* Make sure all fields of page are zero, as well as unused space */
52 MemSet(p, 0, pageSize);
53
54 p->pd_flags = 0;
59 /* p->pd_prune_xid = InvalidTransactionId; done by above MemSet */
60}
61
62
63/*
64 * PageIsVerified
65 * Check that the page header and checksum (if any) appear valid.
66 *
67 * This is called when a page has just been read in from disk. The idea is
68 * to cheaply detect trashed pages before we go nuts following bogus line
69 * pointers, testing invalid transaction identifiers, etc.
70 *
71 * It turns out to be necessary to allow zeroed pages here too. Even though
72 * this routine is *not* called when deliberately adding a page to a relation,
73 * there are scenarios in which a zeroed page might be found in a table.
74 * (Example: a backend extends a relation, then crashes before it can write
75 * any WAL entry about the new page. The kernel will already have the
76 * zeroed page in the file, and it will stay that way after restart.) So we
77 * allow zeroed pages here, and are careful that the page access macros
78 * treat such a page as empty and without free space. Eventually, VACUUM
79 * will clean up such a page and make it usable.
80 *
81 * If flag PIV_LOG_WARNING/PIV_LOG_LOG is set, a WARNING/LOG message is logged
82 * in the event of a checksum failure.
83 *
84 * If flag PIV_IGNORE_CHECKSUM_FAILURE is set, checksum failures will cause a
85 * message about the failure to be emitted, but will not cause
86 * PageIsVerified() to return false.
87 *
88 * To allow the caller to report statistics about checksum failures,
89 * *checksum_failure_p can be passed in. Note that there may be checksum
90 * failures even if this function returns true, due to
91 * PIV_IGNORE_CHECKSUM_FAILURE.
92 */
93bool
95{
96 const PageHeaderData *p = (const PageHeaderData *) page;
97 size_t *pagebytes;
98 bool checksum_failure = false;
99 bool header_sane = false;
100 uint16 checksum = 0;
101
103 *checksum_failure_p = false;
104
105 /*
106 * Don't verify page data unless the page passes basic non-zero test
107 */
108 if (!PageIsNew(page))
109 {
110 /*
111 * There shouldn't be any check for interrupt calls happening in this
112 * codepath, but just to be on the safe side we hold interrupts since
113 * if they did happen the data checksum state could change during
114 * verifying checksums, which could lead to incorrect verification
115 * results.
116 */
119 {
120 checksum = pg_checksum_page(page, blkno);
121
122 if (checksum != p->pd_checksum)
123 {
124 checksum_failure = true;
126 *checksum_failure_p = true;
127 }
128 }
130
131 /*
132 * The following checks don't prove the header is correct, only that
133 * it looks sane enough to allow into the buffer pool. Later usage of
134 * the block can still reveal problems, which is why we offer the
135 * checksum option.
136 */
137 if ((p->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
138 p->pd_lower <= p->pd_upper &&
139 p->pd_upper <= p->pd_special &&
140 p->pd_special <= BLCKSZ &&
142 header_sane = true;
143
145 return true;
146 }
147
148 /* Check all-zeroes case */
149 pagebytes = (size_t *) page;
150
152 return true;
153
154 /*
155 * Throw a WARNING/LOG, as instructed by PIV_LOG_*, if the checksum fails,
156 * but only after we've checked for the all-zeroes case.
157 */
159 {
160 if ((flags & (PIV_LOG_WARNING | PIV_LOG_LOG)) != 0)
163 errmsg("page verification failed, calculated checksum %u but expected %u%s",
164 checksum, p->pd_checksum,
165 (flags & PIV_ZERO_BUFFERS_ON_ERROR ? ", buffer will be zeroed" : ""))));
166
168 return true;
169 }
170
171 return false;
172}
173
174
175/*
176 * PageAddItemExtended
177 *
178 * Add an item to a page. Return value is the offset at which it was
179 * inserted, or InvalidOffsetNumber if the item is not inserted for any
180 * reason. A WARNING is issued indicating the reason for the refusal.
181 *
182 * offsetNumber must be either InvalidOffsetNumber to specify finding a
183 * free line pointer, or a value between FirstOffsetNumber and one past
184 * the last existing item, to specify using that particular line pointer.
185 *
186 * If offsetNumber is valid and flag PAI_OVERWRITE is set, we just store
187 * the item at the specified offsetNumber, which must be either a
188 * currently-unused line pointer, or one past the last existing item.
189 *
190 * If offsetNumber is valid and flag PAI_OVERWRITE is not set, insert
191 * the item at the specified offsetNumber, moving existing items later
192 * in the array to make room.
193 *
194 * If offsetNumber is not valid, then assign a slot by finding the first
195 * one that is both unused and deallocated.
196 *
197 * If flag PAI_IS_HEAP is set, we enforce that there can't be more than
198 * MaxHeapTuplesPerPage line pointers on the page.
199 *
200 * !!! EREPORT(ERROR) IS DISALLOWED HERE !!!
201 */
204 const void *item,
205 Size size,
207 int flags)
208{
209 PageHeader phdr = (PageHeader) page;
211 int lower;
212 int upper;
214 OffsetNumber limit;
215 bool needshuffle = false;
216
217 /*
218 * Be wary about corrupted page pointers
219 */
220 if (phdr->pd_lower < SizeOfPageHeaderData ||
221 phdr->pd_lower > phdr->pd_upper ||
222 phdr->pd_upper > phdr->pd_special ||
223 phdr->pd_special > BLCKSZ)
226 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
227 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
228
229 /*
230 * Select offsetNumber to place the new item at
231 */
233
234 /* was offsetNumber passed in? */
236 {
237 /* yes, check it */
238 if ((flags & PAI_OVERWRITE) != 0)
239 {
240 if (offsetNumber < limit)
241 {
244 {
245 elog(WARNING, "will not overwrite a used ItemId");
246 return InvalidOffsetNumber;
247 }
248 }
249 }
250 else
251 {
252 if (offsetNumber < limit)
253 needshuffle = true; /* need to move existing linp's */
254 }
255 }
256 else
257 {
258 /* offsetNumber was not passed in, so find a free slot */
259 /* if no free slot, we'll put it at limit (1st open slot) */
260 if (PageHasFreeLinePointers(page))
261 {
262 /*
263 * Scan line pointer array to locate a "recyclable" (unused)
264 * ItemId.
265 *
266 * Always use earlier items first. PageTruncateLinePointerArray
267 * can only truncate unused items when they appear as a contiguous
268 * group at the end of the line pointer array.
269 */
271 offsetNumber < limit; /* limit is maxoff+1 */
272 offsetNumber++)
273 {
275
276 /*
277 * We check for no storage as well, just to be paranoid;
278 * unused items should never have storage. Assert() that the
279 * invariant is respected too.
280 */
282
284 break;
285 }
286 if (offsetNumber >= limit)
287 {
288 /* the hint is wrong, so reset it */
290 }
291 }
292 else
293 {
294 /* don't bother searching if hint says there's no free slot */
295 offsetNumber = limit;
296 }
297 }
298
299 /* Reject placing items beyond the first unused line pointer */
300 if (offsetNumber > limit)
301 {
302 elog(WARNING, "specified item offset is too large");
303 return InvalidOffsetNumber;
304 }
305
306 /* Reject placing items beyond heap boundary, if heap */
307 if ((flags & PAI_IS_HEAP) != 0 && offsetNumber > MaxHeapTuplesPerPage)
308 {
309 elog(WARNING, "can't put more than MaxHeapTuplesPerPage items in a heap page");
310 return InvalidOffsetNumber;
311 }
312
313 /*
314 * Compute new lower and upper pointers for page, see if it'll fit.
315 *
316 * Note: do arithmetic as signed ints, to avoid mistakes if, say,
317 * alignedSize > pd_upper.
318 */
319 if (offsetNumber == limit || needshuffle)
320 lower = phdr->pd_lower + sizeof(ItemIdData);
321 else
322 lower = phdr->pd_lower;
323
324 alignedSize = MAXALIGN(size);
325
326 upper = (int) phdr->pd_upper - (int) alignedSize;
327
328 if (lower > upper)
329 return InvalidOffsetNumber;
330
331 /*
332 * OK to insert the item. First, shuffle the existing pointers if needed.
333 */
335
336 if (needshuffle)
337 memmove(itemId + 1, itemId,
338 (limit - offsetNumber) * sizeof(ItemIdData));
339
340 /* set the line pointer */
342
343 /*
344 * Items normally contain no uninitialized bytes. Core bufpage consumers
345 * conform, but this is not a necessary coding rule; a new index AM could
346 * opt to depart from it. However, data type input functions and other
347 * C-language functions that synthesize datums should initialize all
348 * bytes; datumIsEqual() relies on this. Testing here, along with the
349 * similar check in printtup(), helps to catch such mistakes.
350 *
351 * Values of the "name" type retrieved via index-only scans may contain
352 * uninitialized bytes; see comment in btrescan(). Valgrind will report
353 * this as an error, but it is safe to ignore.
354 */
356
357 /* copy the item's data onto the page */
358 memcpy((char *) page + upper, item, size);
359
360 /* adjust page header */
361 phdr->pd_lower = (LocationIndex) lower;
362 phdr->pd_upper = (LocationIndex) upper;
363
364 return offsetNumber;
365}
366
367
368/*
369 * PageGetTempPage
370 * Get a temporary page in local memory for special processing.
371 * The returned page is not initialized at all; caller must do that.
372 */
373Page
375{
377 Page temp;
378
381
382 return temp;
383}
384
385/*
386 * PageGetTempPageCopy
387 * Get a temporary page in local memory for special processing.
388 * The page is initialized by copying the contents of the given page.
389 */
390Page
392{
394 Page temp;
395
398
399 memcpy(temp, page, pageSize);
400
401 return temp;
402}
403
404/*
405 * PageGetTempPageCopySpecial
406 * Get a temporary page in local memory for special processing.
407 * The page is PageInit'd with the same special-space size as the
408 * given page, and the special space is copied from the given page.
409 */
410Page
412{
414 Page temp;
415
418
422 PageGetSpecialSize(page));
423
424 return temp;
425}
426
427/*
428 * PageRestoreTempPage
429 * Copy temporary page back to permanent page after special processing
430 * and release the temporary page.
431 */
432void
442
443/*
444 * Tuple defrag support for PageRepairFragmentation and PageIndexMultiDelete
445 */
446typedef struct itemIdCompactData
447{
448 uint16 offsetindex; /* linp array index */
449 int16 itemoff; /* page offset of item data */
450 uint16 alignedlen; /* MAXALIGN(item data len) */
453
454/*
455 * After removing or marking some line pointers unused, move the tuples to
456 * remove the gaps caused by the removed items and reorder them back into
457 * reverse line pointer order in the page.
458 *
459 * This function can often be fairly hot, so it pays to take some measures to
460 * make it as optimal as possible.
461 *
462 * Callers may pass 'presorted' as true if the 'itemidbase' array is sorted in
463 * descending order of itemoff. When this is true we can just memmove()
464 * tuples towards the end of the page. This is quite a common case as it's
465 * the order that tuples are initially inserted into pages. When we call this
466 * function to defragment the tuples in the page then any new line pointers
467 * added to the page will keep that presorted order, so hitting this case is
468 * still very common for tables that are commonly updated.
469 *
470 * When the 'itemidbase' array is not presorted then we're unable to just
471 * memmove() tuples around freely. Doing so could cause us to overwrite the
472 * memory belonging to a tuple we've not moved yet. In this case, we copy all
473 * the tuples that need to be moved into a temporary buffer. We can then
474 * simply memcpy() out of that temp buffer back into the page at the correct
475 * location. Tuples are copied back into the page in the same order as the
476 * 'itemidbase' array, so we end up reordering the tuples back into reverse
477 * line pointer order. This will increase the chances of hitting the
478 * presorted case the next time around.
479 *
480 * Callers must ensure that nitems is > 0
481 */
482static void
484{
485 PageHeader phdr = (PageHeader) page;
490 int i;
491
492 /* Code within will not work correctly if nitems == 0 */
493 Assert(nitems > 0);
494
495 if (presorted)
496 {
497
498#ifdef USE_ASSERT_CHECKING
499 {
500 /*
501 * Verify we've not gotten any new callers that are incorrectly
502 * passing a true presorted value.
503 */
504 Offset lastoff = phdr->pd_special;
505
506 for (i = 0; i < nitems; i++)
507 {
509
510 Assert(lastoff > itemidptr->itemoff);
511
512 lastoff = itemidptr->itemoff;
513 }
514 }
515#endif /* USE_ASSERT_CHECKING */
516
517 /*
518 * 'itemidbase' is already in the optimal order, i.e, lower item
519 * pointers have a higher offset. This allows us to memmove() the
520 * tuples up to the end of the page without having to worry about
521 * overwriting other tuples that have not been moved yet.
522 *
523 * There's a good chance that there are tuples already right at the
524 * end of the page that we can simply skip over because they're
525 * already in the correct location within the page. We'll do that
526 * first...
527 */
528 upper = phdr->pd_special;
529 i = 0;
530 do
531 {
533 if (upper != itemidptr->itemoff + itemidptr->alignedlen)
534 break;
535 upper -= itemidptr->alignedlen;
536
537 i++;
538 } while (i < nitems);
539
540 /*
541 * Now that we've found the first tuple that needs to be moved, we can
542 * do the tuple compactification. We try and make the least number of
543 * memmove() calls and only call memmove() when there's a gap. When
544 * we see a gap we just move all tuples after the gap up until the
545 * point of the last move operation.
546 */
547 copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen;
548 for (; i < nitems; i++)
549 {
550 ItemId lp;
551
553 lp = PageGetItemId(page, itemidptr->offsetindex + 1);
554
555 if (copy_head != itemidptr->itemoff + itemidptr->alignedlen)
556 {
557 memmove((char *) page + upper,
558 page + copy_head,
560
561 /*
562 * We've now moved all tuples already seen, but not the
563 * current tuple, so we set the copy_tail to the end of this
564 * tuple so it can be moved in another iteration of the loop.
565 */
566 copy_tail = itemidptr->itemoff + itemidptr->alignedlen;
567 }
568 /* shift the target offset down by the length of this tuple */
569 upper -= itemidptr->alignedlen;
570 /* point the copy_head to the start of this tuple */
571 copy_head = itemidptr->itemoff;
572
573 /* update the line pointer to reference the new offset */
574 lp->lp_off = upper;
575 }
576
577 /* move the remaining tuples. */
578 memmove((char *) page + upper,
579 page + copy_head,
581 }
582 else
583 {
585 char *scratchptr = scratch.data;
586
587 /*
588 * Non-presorted case: The tuples in the itemidbase array may be in
589 * any order. So, in order to move these to the end of the page we
590 * must make a temp copy of each tuple that needs to be moved before
591 * we copy them back into the page at the new offset.
592 *
593 * If a large percentage of tuples have been pruned (>75%) then we'll
594 * copy these into the temp buffer tuple-by-tuple, otherwise, we'll
595 * just do a single memcpy() for all tuples that need to be moved.
596 * When so many tuples have been removed there's likely to be a lot of
597 * gaps and it's unlikely that many non-movable tuples remain at the
598 * end of the page.
599 */
600 if (nitems < PageGetMaxOffsetNumber(page) / 4)
601 {
602 i = 0;
603 do
604 {
606 memcpy(scratchptr + itemidptr->itemoff, page + itemidptr->itemoff,
607 itemidptr->alignedlen);
608 i++;
609 } while (i < nitems);
610
611 /* Set things up for the compactification code below */
612 i = 0;
613 itemidptr = &itemidbase[0];
614 upper = phdr->pd_special;
615 }
616 else
617 {
618 upper = phdr->pd_special;
619
620 /*
621 * Many tuples are likely to already be in the correct location.
622 * There's no need to copy these into the temp buffer. Instead
623 * we'll just skip forward in the itemidbase array to the position
624 * that we do need to move tuples from so that the code below just
625 * leaves these ones alone.
626 */
627 i = 0;
628 do
629 {
631 if (upper != itemidptr->itemoff + itemidptr->alignedlen)
632 break;
633 upper -= itemidptr->alignedlen;
634
635 i++;
636 } while (i < nitems);
637
638 /* Copy all tuples that need to be moved into the temp buffer */
639 memcpy(scratchptr + phdr->pd_upper,
640 page + phdr->pd_upper,
641 upper - phdr->pd_upper);
642 }
643
644 /*
645 * Do the tuple compactification. itemidptr is already pointing to
646 * the first tuple that we're going to move. Here we collapse the
647 * memcpy calls for adjacent tuples into a single call. This is done
648 * by delaying the memcpy call until we find a gap that needs to be
649 * closed.
650 */
651 copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen;
652 for (; i < nitems; i++)
653 {
654 ItemId lp;
655
657 lp = PageGetItemId(page, itemidptr->offsetindex + 1);
658
659 /* copy pending tuples when we detect a gap */
660 if (copy_head != itemidptr->itemoff + itemidptr->alignedlen)
661 {
662 memcpy((char *) page + upper,
665
666 /*
667 * We've now copied all tuples already seen, but not the
668 * current tuple, so we set the copy_tail to the end of this
669 * tuple.
670 */
671 copy_tail = itemidptr->itemoff + itemidptr->alignedlen;
672 }
673 /* shift the target offset down by the length of this tuple */
674 upper -= itemidptr->alignedlen;
675 /* point the copy_head to the start of this tuple */
676 copy_head = itemidptr->itemoff;
677
678 /* update the line pointer to reference the new offset */
679 lp->lp_off = upper;
680 }
681
682 /* Copy the remaining chunk */
683 memcpy((char *) page + upper,
686 }
687
688 phdr->pd_upper = upper;
689}
690
691/*
692 * PageRepairFragmentation
693 *
694 * Frees fragmented space on a heap page following pruning.
695 *
696 * This routine is usable for heap pages only, but see PageIndexMultiDelete.
697 *
698 * This routine removes unused line pointers from the end of the line pointer
699 * array. This is possible when dead heap-only tuples get removed by pruning,
700 * especially when there were HOT chains with several tuples each beforehand.
701 *
702 * Caller had better have a full cleanup lock on page's buffer. As a side
703 * effect the page's PD_HAS_FREE_LINES hint bit will be set or unset as
704 * needed. Caller might also need to account for a reduction in the length of
705 * the line pointer array following array truncation.
706 */
707void
709{
710 Offset pd_lower = ((PageHeader) page)->pd_lower;
711 Offset pd_upper = ((PageHeader) page)->pd_upper;
712 Offset pd_special = ((PageHeader) page)->pd_special;
716 ItemId lp;
717 int nline,
718 nstorage,
719 nunused;
721 int i;
722 Size totallen;
723 bool presorted = true; /* For now */
724
725 /*
726 * It's worth the trouble to be more paranoid here than in most places,
727 * because we are about to reshuffle data in (what is usually) a shared
728 * disk buffer. If we aren't careful then corrupted pointers, lengths,
729 * etc could cause us to clobber adjacent disk buffers, spreading the data
730 * loss further. So, check everything.
731 */
732 if (pd_lower < SizeOfPageHeaderData ||
733 pd_lower > pd_upper ||
734 pd_upper > pd_special ||
735 pd_special > BLCKSZ ||
736 pd_special != MAXALIGN(pd_special))
739 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
740 pd_lower, pd_upper, pd_special)));
741
742 /*
743 * Run through the line pointer array and collect data about live items.
744 */
747 nunused = totallen = 0;
748 last_offset = pd_special;
749 for (i = FirstOffsetNumber; i <= nline; i++)
750 {
751 lp = PageGetItemId(page, i);
752 if (ItemIdIsUsed(lp))
753 {
754 if (ItemIdHasStorage(lp))
755 {
756 itemidptr->offsetindex = i - 1;
757 itemidptr->itemoff = ItemIdGetOffset(lp);
758
759 if (last_offset > itemidptr->itemoff)
760 last_offset = itemidptr->itemoff;
761 else
762 presorted = false;
763
764 if (unlikely(itemidptr->itemoff < (int) pd_upper ||
765 itemidptr->itemoff >= (int) pd_special))
768 errmsg("corrupted line pointer: %u",
769 itemidptr->itemoff)));
770 itemidptr->alignedlen = MAXALIGN(ItemIdGetLength(lp));
771 totallen += itemidptr->alignedlen;
772 itemidptr++;
773 }
774
775 finalusedlp = i; /* Could be the final non-LP_UNUSED item */
776 }
777 else
778 {
779 /* Unused entries should have lp_len = 0, but make sure */
782 nunused++;
783 }
784 }
785
787 if (nstorage == 0)
788 {
789 /* Page is completely empty, so just reset it quickly */
790 ((PageHeader) page)->pd_upper = pd_special;
791 }
792 else
793 {
794 /* Need to compact the page the hard way */
795 if (totallen > (Size) (pd_special - pd_lower))
798 errmsg("corrupted item lengths: total %zu, available space %u",
799 totallen, pd_special - pd_lower)));
800
802 }
803
804 if (finalusedlp != nline)
805 {
806 /* The last line pointer is not the last used line pointer */
808
809 Assert(nunused >= nunusedend && nunusedend > 0);
810
811 /* remove trailing unused line pointers from the count */
812 nunused -= nunusedend;
813 /* truncate the line pointer array */
814 ((PageHeader) page)->pd_lower -= (sizeof(ItemIdData) * nunusedend);
815 }
816
817 /* Set hint bit for PageAddItemExtended */
818 if (nunused > 0)
820 else
822}
823
824/*
825 * PageTruncateLinePointerArray
826 *
827 * Removes unused line pointers at the end of the line pointer array.
828 *
829 * This routine is usable for heap pages only. It is called by VACUUM during
830 * its second pass over the heap. We expect at least one LP_UNUSED line
831 * pointer on the page (if VACUUM didn't have an LP_DEAD item on the page that
832 * it just set to LP_UNUSED then it should not call here).
833 *
834 * We avoid truncating the line pointer array to 0 items, if necessary by
835 * leaving behind a single remaining LP_UNUSED item. This is a little
836 * arbitrary, but it seems like a good idea to avoid leaving a PageIsEmpty()
837 * page behind.
838 *
839 * Caller can have either an exclusive lock or a full cleanup lock on page's
840 * buffer. The page's PD_HAS_FREE_LINES hint bit will be set or unset based
841 * on whether or not we leave behind any remaining LP_UNUSED items.
842 */
843void
845{
846 PageHeader phdr = (PageHeader) page;
847 bool countdone = false,
848 sethint = false;
849 int nunusedend = 0;
850
851 /* Scan line pointer array back-to-front */
852 for (int i = PageGetMaxOffsetNumber(page); i >= FirstOffsetNumber; i--)
853 {
854 ItemId lp = PageGetItemId(page, i);
855
856 if (!countdone && i > FirstOffsetNumber)
857 {
858 /*
859 * Still determining which line pointers from the end of the array
860 * will be truncated away. Either count another line pointer as
861 * safe to truncate, or notice that it's not safe to truncate
862 * additional line pointers (stop counting line pointers).
863 */
864 if (!ItemIdIsUsed(lp))
865 nunusedend++;
866 else
867 countdone = true;
868 }
869 else
870 {
871 /*
872 * Once we've stopped counting we still need to figure out if
873 * there are any remaining LP_UNUSED line pointers somewhere more
874 * towards the front of the array.
875 */
876 if (!ItemIdIsUsed(lp))
877 {
878 /*
879 * This is an unused line pointer that we won't be truncating
880 * away -- so there is at least one. Set hint on page.
881 */
882 sethint = true;
883 break;
884 }
885 }
886 }
887
888 if (nunusedend > 0)
889 {
890 phdr->pd_lower -= sizeof(ItemIdData) * nunusedend;
891
892#ifdef CLOBBER_FREED_MEMORY
893 memset((char *) page + phdr->pd_lower, 0x7F,
894 sizeof(ItemIdData) * nunusedend);
895#endif
896 }
897 else
899
900 /* Set hint bit for PageAddItemExtended */
901 if (sethint)
903 else
905}
906
907/*
908 * PageGetFreeSpace
909 * Returns the size of the free (allocatable) space on a page,
910 * reduced by the space needed for a new line pointer.
911 *
912 * Note: this should usually only be used on index pages. Use
913 * PageGetHeapFreeSpace on heap pages.
914 */
915Size
917{
918 const PageHeaderData *phdr = (const PageHeaderData *) page;
919 int space;
920
921 /*
922 * Use signed arithmetic here so that we behave sensibly if pd_lower >
923 * pd_upper.
924 */
925 space = (int) phdr->pd_upper - (int) phdr->pd_lower;
926
927 if (space < (int) sizeof(ItemIdData))
928 return 0;
929 space -= sizeof(ItemIdData);
930
931 return (Size) space;
932}
933
934/*
935 * PageGetFreeSpaceForMultipleTuples
936 * Returns the size of the free (allocatable) space on a page,
937 * reduced by the space needed for multiple new line pointers.
938 *
939 * Note: this should usually only be used on index pages. Use
940 * PageGetHeapFreeSpace on heap pages.
941 */
942Size
944{
945 const PageHeaderData *phdr = (const PageHeaderData *) page;
946 int space;
947
948 /*
949 * Use signed arithmetic here so that we behave sensibly if pd_lower >
950 * pd_upper.
951 */
952 space = (int) phdr->pd_upper - (int) phdr->pd_lower;
953
954 if (space < (int) (ntups * sizeof(ItemIdData)))
955 return 0;
956 space -= ntups * sizeof(ItemIdData);
957
958 return (Size) space;
959}
960
961/*
962 * PageGetExactFreeSpace
963 * Returns the size of the free (allocatable) space on a page,
964 * without any consideration for adding/removing line pointers.
965 */
966Size
968{
969 const PageHeaderData *phdr = (const PageHeaderData *) page;
970 int space;
971
972 /*
973 * Use signed arithmetic here so that we behave sensibly if pd_lower >
974 * pd_upper.
975 */
976 space = (int) phdr->pd_upper - (int) phdr->pd_lower;
977
978 if (space < 0)
979 return 0;
980
981 return (Size) space;
982}
983
984
985/*
986 * PageGetHeapFreeSpace
987 * Returns the size of the free (allocatable) space on a page,
988 * reduced by the space needed for a new line pointer.
989 *
990 * The difference between this and PageGetFreeSpace is that this will return
991 * zero if there are already MaxHeapTuplesPerPage line pointers in the page
992 * and none are free. We use this to enforce that no more than
993 * MaxHeapTuplesPerPage line pointers are created on a heap page. (Although
994 * no more tuples than that could fit anyway, in the presence of redirected
995 * or dead line pointers it'd be possible to have too many line pointers.
996 * To avoid breaking code that assumes MaxHeapTuplesPerPage is a hard limit
997 * on the number of line pointers, we make this extra check.)
998 */
999Size
1001{
1002 Size space;
1003
1004 space = PageGetFreeSpace(page);
1005 if (space > 0)
1006 {
1007 OffsetNumber offnum,
1008 nline;
1009
1010 /*
1011 * Are there already MaxHeapTuplesPerPage line pointers in the page?
1012 */
1015 {
1016 if (PageHasFreeLinePointers(page))
1017 {
1018 /*
1019 * Since this is just a hint, we must confirm that there is
1020 * indeed a free line pointer
1021 */
1022 for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
1023 {
1024 ItemId lp = PageGetItemId(unconstify(PageData *, page), offnum);
1025
1026 if (!ItemIdIsUsed(lp))
1027 break;
1028 }
1029
1030 if (offnum > nline)
1031 {
1032 /*
1033 * The hint is wrong, but we can't clear it here since we
1034 * don't have the ability to mark the page dirty.
1035 */
1036 space = 0;
1037 }
1038 }
1039 else
1040 {
1041 /*
1042 * Although the hint might be wrong, PageAddItem will believe
1043 * it anyway, so we must believe it too.
1044 */
1045 space = 0;
1046 }
1047 }
1048 }
1049 return space;
1050}
1051
1052
1053/*
1054 * PageIndexTupleDelete
1055 *
1056 * This routine does the work of removing a tuple from an index page.
1057 *
1058 * Unlike heap pages, we compact out the line pointer for the removed tuple.
1059 */
1060void
1062{
1063 PageHeader phdr = (PageHeader) page;
1064 char *addr;
1065 ItemId tup;
1066 Size size;
1067 unsigned offset;
1068 int nbytes;
1069 int offidx;
1070 int nline;
1071
1072 /*
1073 * As with PageRepairFragmentation, paranoia seems justified.
1074 */
1075 if (phdr->pd_lower < SizeOfPageHeaderData ||
1076 phdr->pd_lower > phdr->pd_upper ||
1077 phdr->pd_upper > phdr->pd_special ||
1078 phdr->pd_special > BLCKSZ ||
1079 phdr->pd_special != MAXALIGN(phdr->pd_special))
1080 ereport(ERROR,
1082 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1083 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1084
1086 if ((int) offnum <= 0 || (int) offnum > nline)
1087 elog(ERROR, "invalid index offnum: %u", offnum);
1088
1089 /* change offset number to offset index */
1090 offidx = offnum - 1;
1091
1092 tup = PageGetItemId(page, offnum);
1094 size = ItemIdGetLength(tup);
1095 offset = ItemIdGetOffset(tup);
1096
1097 if (offset < phdr->pd_upper || (offset + size) > phdr->pd_special ||
1098 offset != MAXALIGN(offset))
1099 ereport(ERROR,
1101 errmsg("corrupted line pointer: offset = %u, size = %zu",
1102 offset, size)));
1103
1104 /* Amount of space to actually be deleted */
1105 size = MAXALIGN(size);
1106
1107 /*
1108 * First, we want to get rid of the pd_linp entry for the index tuple. We
1109 * copy all subsequent linp's back one slot in the array. We don't use
1110 * PageGetItemId, because we are manipulating the _array_, not individual
1111 * linp's.
1112 */
1113 nbytes = phdr->pd_lower -
1114 ((char *) &phdr->pd_linp[offidx + 1] - (char *) phdr);
1115
1116 if (nbytes > 0)
1117 memmove(&(phdr->pd_linp[offidx]),
1118 &(phdr->pd_linp[offidx + 1]),
1119 nbytes);
1120
1121 /*
1122 * Now move everything between the old upper bound (beginning of tuple
1123 * space) and the beginning of the deleted tuple forward, so that space in
1124 * the middle of the page is left free. If we've just deleted the tuple
1125 * at the beginning of tuple space, then there's no need to do the copy.
1126 */
1127
1128 /* beginning of tuple space */
1129 addr = (char *) page + phdr->pd_upper;
1130
1131 if (offset > phdr->pd_upper)
1132 memmove(addr + size, addr, offset - phdr->pd_upper);
1133
1134 /* adjust free space boundary pointers */
1135 phdr->pd_upper += size;
1136 phdr->pd_lower -= sizeof(ItemIdData);
1137
1138 /*
1139 * Finally, we need to adjust the linp entries that remain.
1140 *
1141 * Anything that used to be before the deleted tuple's data was moved
1142 * forward by the size of the deleted tuple.
1143 */
1144 if (!PageIsEmpty(page))
1145 {
1146 int i;
1147
1148 nline--; /* there's one less than when we started */
1149 for (i = 1; i <= nline; i++)
1150 {
1151 ItemId ii = PageGetItemId(page, i);
1152
1154 if (ItemIdGetOffset(ii) <= offset)
1155 ii->lp_off += size;
1156 }
1157 }
1158}
1159
1160
1161/*
1162 * PageIndexMultiDelete
1163 *
1164 * This routine handles the case of deleting multiple tuples from an
1165 * index page at once. It is considerably faster than a loop around
1166 * PageIndexTupleDelete ... however, the caller *must* supply the array
1167 * of item numbers to be deleted in item number order!
1168 */
1169void
1171{
1172 PageHeader phdr = (PageHeader) page;
1173 Offset pd_lower = phdr->pd_lower;
1174 Offset pd_upper = phdr->pd_upper;
1175 Offset pd_special = phdr->pd_special;
1180 ItemId lp;
1181 int nline,
1182 nused;
1183 Size totallen;
1184 Size size;
1185 unsigned offset;
1186 int nextitm;
1187 OffsetNumber offnum;
1188 bool presorted = true; /* For now */
1189
1191
1192 /*
1193 * If there aren't very many items to delete, then retail
1194 * PageIndexTupleDelete is the best way. Delete the items in reverse
1195 * order so we don't have to think about adjusting item numbers for
1196 * previous deletions.
1197 *
1198 * TODO: tune the magic number here
1199 */
1200 if (nitems <= 2)
1201 {
1202 while (--nitems >= 0)
1204 return;
1205 }
1206
1207 /*
1208 * As with PageRepairFragmentation, paranoia seems justified.
1209 */
1210 if (pd_lower < SizeOfPageHeaderData ||
1211 pd_lower > pd_upper ||
1212 pd_upper > pd_special ||
1213 pd_special > BLCKSZ ||
1214 pd_special != MAXALIGN(pd_special))
1215 ereport(ERROR,
1217 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1218 pd_lower, pd_upper, pd_special)));
1219
1220 /*
1221 * Scan the line pointer array and build a list of just the ones we are
1222 * going to keep. Notice we do not modify the page yet, since we are
1223 * still validity-checking.
1224 */
1227 totallen = 0;
1228 nused = 0;
1229 nextitm = 0;
1230 last_offset = pd_special;
1231 for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
1232 {
1233 lp = PageGetItemId(page, offnum);
1235 size = ItemIdGetLength(lp);
1236 offset = ItemIdGetOffset(lp);
1237 if (offset < pd_upper ||
1238 (offset + size) > pd_special ||
1239 offset != MAXALIGN(offset))
1240 ereport(ERROR,
1242 errmsg("corrupted line pointer: offset = %u, size = %zu",
1243 offset, size)));
1244
1245 if (nextitm < nitems && offnum == itemnos[nextitm])
1246 {
1247 /* skip item to be deleted */
1248 nextitm++;
1249 }
1250 else
1251 {
1252 itemidptr->offsetindex = nused; /* where it will go */
1253 itemidptr->itemoff = offset;
1254
1255 if (last_offset > itemidptr->itemoff)
1256 last_offset = itemidptr->itemoff;
1257 else
1258 presorted = false;
1259
1260 itemidptr->alignedlen = MAXALIGN(size);
1261 totallen += itemidptr->alignedlen;
1262 newitemids[nused] = *lp;
1263 itemidptr++;
1264 nused++;
1265 }
1266 }
1267
1268 /* this will catch invalid or out-of-order itemnos[] */
1269 if (nextitm != nitems)
1270 elog(ERROR, "incorrect index offsets supplied");
1271
1272 if (totallen > (Size) (pd_special - pd_lower))
1273 ereport(ERROR,
1275 errmsg("corrupted item lengths: total %zu, available space %u",
1276 totallen, pd_special - pd_lower)));
1277
1278 /*
1279 * Looks good. Overwrite the line pointers with the copy, from which we've
1280 * removed all the unused items.
1281 */
1282 memcpy(phdr->pd_linp, newitemids, nused * sizeof(ItemIdData));
1283 phdr->pd_lower = SizeOfPageHeaderData + nused * sizeof(ItemIdData);
1284
1285 /* and compactify the tuple data */
1286 if (nused > 0)
1287 compactify_tuples(itemidbase, nused, page, presorted);
1288 else
1289 phdr->pd_upper = pd_special;
1290}
1291
1292
1293/*
1294 * PageIndexTupleDeleteNoCompact
1295 *
1296 * Remove the specified tuple from an index page, but set its line pointer
1297 * to "unused" instead of compacting it out, except that it can be removed
1298 * if it's the last line pointer on the page.
1299 *
1300 * This is used for index AMs that require that existing TIDs of live tuples
1301 * remain unchanged, and are willing to allow unused line pointers instead.
1302 */
1303void
1305{
1306 PageHeader phdr = (PageHeader) page;
1307 char *addr;
1308 ItemId tup;
1309 Size size;
1310 unsigned offset;
1311 int nline;
1312
1313 /*
1314 * As with PageRepairFragmentation, paranoia seems justified.
1315 */
1316 if (phdr->pd_lower < SizeOfPageHeaderData ||
1317 phdr->pd_lower > phdr->pd_upper ||
1318 phdr->pd_upper > phdr->pd_special ||
1319 phdr->pd_special > BLCKSZ ||
1320 phdr->pd_special != MAXALIGN(phdr->pd_special))
1321 ereport(ERROR,
1323 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1324 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1325
1327 if ((int) offnum <= 0 || (int) offnum > nline)
1328 elog(ERROR, "invalid index offnum: %u", offnum);
1329
1330 tup = PageGetItemId(page, offnum);
1332 size = ItemIdGetLength(tup);
1333 offset = ItemIdGetOffset(tup);
1334
1335 if (offset < phdr->pd_upper || (offset + size) > phdr->pd_special ||
1336 offset != MAXALIGN(offset))
1337 ereport(ERROR,
1339 errmsg("corrupted line pointer: offset = %u, size = %zu",
1340 offset, size)));
1341
1342 /* Amount of space to actually be deleted */
1343 size = MAXALIGN(size);
1344
1345 /*
1346 * Either set the line pointer to "unused", or zap it if it's the last
1347 * one. (Note: it's possible that the next-to-last one(s) are already
1348 * unused, but we do not trouble to try to compact them out if so.)
1349 */
1350 if ((int) offnum < nline)
1352 else
1353 {
1354 phdr->pd_lower -= sizeof(ItemIdData);
1355 nline--; /* there's one less than when we started */
1356 }
1357
1358 /*
1359 * Now move everything between the old upper bound (beginning of tuple
1360 * space) and the beginning of the deleted tuple forward, so that space in
1361 * the middle of the page is left free. If we've just deleted the tuple
1362 * at the beginning of tuple space, then there's no need to do the copy.
1363 */
1364
1365 /* beginning of tuple space */
1366 addr = (char *) page + phdr->pd_upper;
1367
1368 if (offset > phdr->pd_upper)
1369 memmove(addr + size, addr, offset - phdr->pd_upper);
1370
1371 /* adjust free space boundary pointer */
1372 phdr->pd_upper += size;
1373
1374 /*
1375 * Finally, we need to adjust the linp entries that remain.
1376 *
1377 * Anything that used to be before the deleted tuple's data was moved
1378 * forward by the size of the deleted tuple.
1379 */
1380 if (!PageIsEmpty(page))
1381 {
1382 int i;
1383
1384 for (i = 1; i <= nline; i++)
1385 {
1386 ItemId ii = PageGetItemId(page, i);
1387
1388 if (ItemIdHasStorage(ii) && ItemIdGetOffset(ii) <= offset)
1389 ii->lp_off += size;
1390 }
1391 }
1392}
1393
1394
1395/*
1396 * PageIndexTupleOverwrite
1397 *
1398 * Replace a specified tuple on an index page.
1399 *
1400 * The new tuple is placed exactly where the old one had been, shifting
1401 * other tuples' data up or down as needed to keep the page compacted.
1402 * This is better than deleting and reinserting the tuple, because it
1403 * avoids any data shifting when the tuple size doesn't change; and
1404 * even when it does, we avoid moving the line pointers around.
1405 * This could be used by an index AM that doesn't want to unset the
1406 * LP_DEAD bit when it happens to be set. It could conceivably also be
1407 * used by an index AM that cares about the physical order of tuples as
1408 * well as their logical/ItemId order.
1409 *
1410 * If there's insufficient space for the new tuple, return false. Other
1411 * errors represent data-corruption problems, so we just elog.
1412 */
1413bool
1415 const void *newtup, Size newsize)
1416{
1417 PageHeader phdr = (PageHeader) page;
1418 ItemId tupid;
1419 int oldsize;
1420 unsigned offset;
1422 int size_diff;
1423 int itemcount;
1424
1425 /*
1426 * As with PageRepairFragmentation, paranoia seems justified.
1427 */
1428 if (phdr->pd_lower < SizeOfPageHeaderData ||
1429 phdr->pd_lower > phdr->pd_upper ||
1430 phdr->pd_upper > phdr->pd_special ||
1431 phdr->pd_special > BLCKSZ ||
1432 phdr->pd_special != MAXALIGN(phdr->pd_special))
1433 ereport(ERROR,
1435 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1436 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1437
1439 if ((int) offnum <= 0 || (int) offnum > itemcount)
1440 elog(ERROR, "invalid index offnum: %u", offnum);
1441
1442 tupid = PageGetItemId(page, offnum);
1445 offset = ItemIdGetOffset(tupid);
1446
1447 if (offset < phdr->pd_upper || (offset + oldsize) > phdr->pd_special ||
1448 offset != MAXALIGN(offset))
1449 ereport(ERROR,
1451 errmsg("corrupted line pointer: offset = %u, size = %d",
1452 offset, oldsize)));
1453
1454 /*
1455 * Determine actual change in space requirement, check for page overflow.
1456 */
1459 if (alignednewsize > oldsize + (phdr->pd_upper - phdr->pd_lower))
1460 return false;
1461
1462 /*
1463 * Relocate existing data and update line pointers, unless the new tuple
1464 * is the same size as the old (after alignment), in which case there's
1465 * nothing to do. Notice that what we have to relocate is data before the
1466 * target tuple, not data after, so it's convenient to express size_diff
1467 * as the amount by which the tuple's size is decreasing, making it the
1468 * delta to add to pd_upper and affected line pointers.
1469 */
1471 if (size_diff != 0)
1472 {
1473 char *addr = (char *) page + phdr->pd_upper;
1474 int i;
1475
1476 /* relocate all tuple data before the target tuple */
1477 memmove(addr + size_diff, addr, offset - phdr->pd_upper);
1478
1479 /* adjust free space boundary pointer */
1480 phdr->pd_upper += size_diff;
1481
1482 /* adjust affected line pointers too */
1483 for (i = FirstOffsetNumber; i <= itemcount; i++)
1484 {
1485 ItemId ii = PageGetItemId(page, i);
1486
1487 /* Allow items without storage; currently only BRIN needs that */
1488 if (ItemIdHasStorage(ii) && ItemIdGetOffset(ii) <= offset)
1489 ii->lp_off += size_diff;
1490 }
1491 }
1492
1493 /* Update the item's tuple length without changing its lp_flags field */
1494 tupid->lp_off = offset + size_diff;
1495 tupid->lp_len = newsize;
1496
1497 /* Copy new tuple data onto page */
1499
1500 return true;
1501}
1502
1503
1504/*
1505 * Set checksum on a page.
1506 *
1507 * If the page is in shared buffers, it needs to be locked in at least
1508 * share-exclusive mode.
1509 *
1510 * If checksums are disabled, or if the page is not initialized, just
1511 * return. Otherwise compute and set the checksum.
1512 *
1513 * In the past this needed to be done on a copy of the page, due to the
1514 * possibility of e.g., hint bits being set concurrently. However, this is not
1515 * necessary anymore as hint bits won't be set while IO is going on.
1516 */
1517void
1519{
1521 /* If we don't need a checksum, just return */
1522 if (PageIsNew(page) || !DataChecksumsNeedWrite())
1523 {
1525 return;
1526 }
1527
1528 ((PageHeader) page)->pd_checksum = pg_checksum_page(page, blkno);
1530}
uint32 BlockNumber
Definition block.h:31
Size PageGetFreeSpace(const PageData *page)
Definition bufpage.c:916
static void compactify_tuples(itemIdCompact itemidbase, int nitems, Page page, bool presorted)
Definition bufpage.c:483
bool ignore_checksum_failure
Definition bufpage.c:27
void PageRestoreTempPage(Page tempPage, Page oldPage)
Definition bufpage.c:433
Size PageGetFreeSpaceForMultipleTuples(const PageData *page, int ntups)
Definition bufpage.c:943
Size PageGetHeapFreeSpace(const PageData *page)
Definition bufpage.c:1000
void PageSetChecksum(Page page, BlockNumber blkno)
Definition bufpage.c:1518
itemIdCompactData * itemIdCompact
Definition bufpage.c:452
Page PageGetTempPage(const PageData *page)
Definition bufpage.c:374
Page PageGetTempPageCopy(const PageData *page)
Definition bufpage.c:391
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition bufpage.c:1170
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, const void *newtup, Size newsize)
Definition bufpage.c:1414
OffsetNumber PageAddItemExtended(Page page, const void *item, Size size, OffsetNumber offsetNumber, int flags)
Definition bufpage.c:203
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition bufpage.c:1061
void PageRepairFragmentation(Page page)
Definition bufpage.c:708
Size PageGetExactFreeSpace(const PageData *page)
Definition bufpage.c:967
void PageTruncateLinePointerArray(Page page)
Definition bufpage.c:844
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
bool PageIsVerified(PageData *page, BlockNumber blkno, int flags, bool *checksum_failure_p)
Definition bufpage.c:94
Page PageGetTempPageCopySpecial(const PageData *page)
Definition bufpage.c:411
void PageIndexTupleDeleteNoCompact(Page page, OffsetNumber offnum)
Definition bufpage.c:1304
#define PD_VALID_FLAG_BITS
Definition bufpage.h:217
static bool PageIsEmpty(const PageData *page)
Definition bufpage.h:248
#define PIV_LOG_LOG
Definition bufpage.h:500
PageHeaderData * PageHeader
Definition bufpage.h:199
static void PageSetHasFreeLinePointers(Page page)
Definition bufpage.h:427
static uint16 PageGetSpecialSize(const PageData *page)
Definition bufpage.h:341
#define PageGetSpecialPointer(page)
Definition bufpage.h:363
char PageData
Definition bufpage.h:80
#define PIV_ZERO_BUFFERS_ON_ERROR
Definition bufpage.h:502
static Size PageGetPageSize(const PageData *page)
Definition bufpage.h:301
#define PIV_LOG_WARNING
Definition bufpage.h:499
static bool PageIsNew(const PageData *page)
Definition bufpage.h:258
#define SizeOfPageHeaderData
Definition bufpage.h:241
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:268
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:378
static bool PageHasFreeLinePointers(const PageData *page)
Definition bufpage.h:422
#define PG_PAGE_LAYOUT_VERSION
Definition bufpage.h:231
static void PageClearHasFreeLinePointers(Page page)
Definition bufpage.h:432
PageData * Page
Definition bufpage.h:81
uint16 LocationIndex
Definition bufpage.h:90
static void PageSetPageSizeAndVersion(Page page, Size size, uint8 version)
Definition bufpage.h:324
#define PIV_IGNORE_CHECKSUM_FAILURE
Definition bufpage.h:501
#define PAI_IS_HEAP
Definition bufpage.h:496
#define PAI_OVERWRITE
Definition bufpage.h:495
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:396
#define unconstify(underlying_type, expr)
Definition c.h:1325
#define MAXALIGN(LEN)
Definition c.h:896
#define Assert(condition)
Definition c.h:943
int16_t int16
Definition c.h:619
uint16_t uint16
Definition c.h:623
#define unlikely(x)
Definition c.h:438
#define MemSet(start, val, len)
Definition c.h:1107
signed int Offset
Definition c.h:708
size_t Size
Definition c.h:689
uint16 pg_checksum_page(char *page, BlockNumber blkno)
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
int errcode(int sqlerrcode)
Definition elog.c:874
#define LOG
Definition elog.h:32
#define WARNING
Definition elog.h:37
#define PANIC
Definition elog.h:44
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
#define MaxHeapTuplesPerPage
#define nitems(x)
Definition indent.h:31
int i
Definition isn.c:77
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdGetOffset(itemId)
Definition itemid.h:65
#define ItemIdSetNormal(itemId, off, len)
Definition itemid.h:140
#define ItemIdIsUsed(itemId)
Definition itemid.h:92
#define ItemIdSetUnused(itemId)
Definition itemid.h:128
#define ItemIdHasStorage(itemId)
Definition itemid.h:120
#define MaxIndexTuplesPerPage
Definition itup.h:181
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc(Size size)
Definition mcxt.c:1387
#define VALGRIND_CHECK_MEM_IS_DEFINED(addr, size)
Definition memdebug.h:23
static bool pg_memory_is_all_zeros(const void *ptr, size_t len)
Definition memutils.h:219
#define RESUME_INTERRUPTS()
Definition miscadmin.h:138
#define HOLD_INTERRUPTS()
Definition miscadmin.h:136
static char * errmsg
#define InvalidOffsetNumber
Definition off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition off.h:39
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
static bool checksum_failure
#define ERRCODE_DATA_CORRUPTED
static int fb(int x)
unsigned lp_off
Definition itemid.h:27
char data[BLCKSZ]
Definition c.h:1204
LocationIndex pd_special
Definition bufpage.h:193
LocationIndex pd_upper
Definition bufpage.h:192
uint16 pd_flags
Definition bufpage.h:190
uint16 pd_checksum
Definition bufpage.h:189
LocationIndex pd_lower
Definition bufpage.h:191
ItemIdData pd_linp[FLEXIBLE_ARRAY_MEMBER]
Definition bufpage.h:196
uint16 offsetindex
Definition bufpage.c:448
uint16 alignedlen
Definition bufpage.c:450
bool DataChecksumsNeedVerify(void)
Definition xlog.c:4706
bool DataChecksumsNeedWrite(void)
Definition xlog.c:4678