PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
bufpage.c File Reference
#include "postgres.h"
#include "access/htup_details.h"
#include "access/itup.h"
#include "access/xlog.h"
#include "pgstat.h"
#include "storage/checksum.h"
#include "utils/memdebug.h"
#include "utils/memutils.h"
Include dependency graph for bufpage.c:

Go to the source code of this file.

Data Structures

struct  itemIdCompactData
 

Typedefs

typedef struct itemIdCompactData itemIdCompactData
 
typedef itemIdCompactDataitemIdCompact
 

Functions

void PageInit (Page page, Size pageSize, Size specialSize)
 
bool PageIsVerifiedExtended (Page page, BlockNumber blkno, int flags)
 
OffsetNumber PageAddItemExtended (Page page, Item item, Size size, OffsetNumber offsetNumber, int flags)
 
Page PageGetTempPage (Page page)
 
Page PageGetTempPageCopy (Page page)
 
Page PageGetTempPageCopySpecial (Page page)
 
void PageRestoreTempPage (Page tempPage, Page oldPage)
 
static void compactify_tuples (itemIdCompact itemidbase, int nitems, Page page, bool presorted)
 
void PageRepairFragmentation (Page page)
 
void PageTruncateLinePointerArray (Page page)
 
Size PageGetFreeSpace (Page page)
 
Size PageGetFreeSpaceForMultipleTuples (Page page, int ntups)
 
Size PageGetExactFreeSpace (Page page)
 
Size PageGetHeapFreeSpace (Page page)
 
void PageIndexTupleDelete (Page page, OffsetNumber offnum)
 
void PageIndexMultiDelete (Page page, OffsetNumber *itemnos, int nitems)
 
void PageIndexTupleDeleteNoCompact (Page page, OffsetNumber offnum)
 
bool PageIndexTupleOverwrite (Page page, OffsetNumber offnum, Item newtup, Size newsize)
 
char * PageSetChecksumCopy (Page page, BlockNumber blkno)
 
void PageSetChecksumInplace (Page page, BlockNumber blkno)
 

Variables

bool ignore_checksum_failure = false
 

Typedef Documentation

◆ itemIdCompact

Definition at line 432 of file bufpage.c.

◆ itemIdCompactData

Function Documentation

◆ compactify_tuples()

static void compactify_tuples ( itemIdCompact  itemidbase,
int  nitems,
Page  page,
bool  presorted 
)
static

Definition at line 463 of file bufpage.c.

464{
465 PageHeader phdr = (PageHeader) page;
467 Offset copy_tail;
468 Offset copy_head;
469 itemIdCompact itemidptr;
470 int i;
471
472 /* Code within will not work correctly if nitems == 0 */
473 Assert(nitems > 0);
474
475 if (presorted)
476 {
477
478#ifdef USE_ASSERT_CHECKING
479 {
480 /*
481 * Verify we've not gotten any new callers that are incorrectly
482 * passing a true presorted value.
483 */
484 Offset lastoff = phdr->pd_special;
485
486 for (i = 0; i < nitems; i++)
487 {
488 itemidptr = &itemidbase[i];
489
490 Assert(lastoff > itemidptr->itemoff);
491
492 lastoff = itemidptr->itemoff;
493 }
494 }
495#endif /* USE_ASSERT_CHECKING */
496
497 /*
498 * 'itemidbase' is already in the optimal order, i.e, lower item
499 * pointers have a higher offset. This allows us to memmove() the
500 * tuples up to the end of the page without having to worry about
501 * overwriting other tuples that have not been moved yet.
502 *
503 * There's a good chance that there are tuples already right at the
504 * end of the page that we can simply skip over because they're
505 * already in the correct location within the page. We'll do that
506 * first...
507 */
508 upper = phdr->pd_special;
509 i = 0;
510 do
511 {
512 itemidptr = &itemidbase[i];
513 if (upper != itemidptr->itemoff + itemidptr->alignedlen)
514 break;
515 upper -= itemidptr->alignedlen;
516
517 i++;
518 } while (i < nitems);
519
520 /*
521 * Now that we've found the first tuple that needs to be moved, we can
522 * do the tuple compactification. We try and make the least number of
523 * memmove() calls and only call memmove() when there's a gap. When
524 * we see a gap we just move all tuples after the gap up until the
525 * point of the last move operation.
526 */
527 copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen;
528 for (; i < nitems; i++)
529 {
530 ItemId lp;
531
532 itemidptr = &itemidbase[i];
533 lp = PageGetItemId(page, itemidptr->offsetindex + 1);
534
535 if (copy_head != itemidptr->itemoff + itemidptr->alignedlen)
536 {
537 memmove((char *) page + upper,
538 page + copy_head,
539 copy_tail - copy_head);
540
541 /*
542 * We've now moved all tuples already seen, but not the
543 * current tuple, so we set the copy_tail to the end of this
544 * tuple so it can be moved in another iteration of the loop.
545 */
546 copy_tail = itemidptr->itemoff + itemidptr->alignedlen;
547 }
548 /* shift the target offset down by the length of this tuple */
549 upper -= itemidptr->alignedlen;
550 /* point the copy_head to the start of this tuple */
551 copy_head = itemidptr->itemoff;
552
553 /* update the line pointer to reference the new offset */
554 lp->lp_off = upper;
555 }
556
557 /* move the remaining tuples. */
558 memmove((char *) page + upper,
559 page + copy_head,
560 copy_tail - copy_head);
561 }
562 else
563 {
564 PGAlignedBlock scratch;
565 char *scratchptr = scratch.data;
566
567 /*
568 * Non-presorted case: The tuples in the itemidbase array may be in
569 * any order. So, in order to move these to the end of the page we
570 * must make a temp copy of each tuple that needs to be moved before
571 * we copy them back into the page at the new offset.
572 *
573 * If a large percentage of tuples have been pruned (>75%) then we'll
574 * copy these into the temp buffer tuple-by-tuple, otherwise, we'll
575 * just do a single memcpy() for all tuples that need to be moved.
576 * When so many tuples have been removed there's likely to be a lot of
577 * gaps and it's unlikely that many non-movable tuples remain at the
578 * end of the page.
579 */
580 if (nitems < PageGetMaxOffsetNumber(page) / 4)
581 {
582 i = 0;
583 do
584 {
585 itemidptr = &itemidbase[i];
586 memcpy(scratchptr + itemidptr->itemoff, page + itemidptr->itemoff,
587 itemidptr->alignedlen);
588 i++;
589 } while (i < nitems);
590
591 /* Set things up for the compactification code below */
592 i = 0;
593 itemidptr = &itemidbase[0];
594 upper = phdr->pd_special;
595 }
596 else
597 {
598 upper = phdr->pd_special;
599
600 /*
601 * Many tuples are likely to already be in the correct location.
602 * There's no need to copy these into the temp buffer. Instead
603 * we'll just skip forward in the itemidbase array to the position
604 * that we do need to move tuples from so that the code below just
605 * leaves these ones alone.
606 */
607 i = 0;
608 do
609 {
610 itemidptr = &itemidbase[i];
611 if (upper != itemidptr->itemoff + itemidptr->alignedlen)
612 break;
613 upper -= itemidptr->alignedlen;
614
615 i++;
616 } while (i < nitems);
617
618 /* Copy all tuples that need to be moved into the temp buffer */
619 memcpy(scratchptr + phdr->pd_upper,
620 page + phdr->pd_upper,
621 upper - phdr->pd_upper);
622 }
623
624 /*
625 * Do the tuple compactification. itemidptr is already pointing to
626 * the first tuple that we're going to move. Here we collapse the
627 * memcpy calls for adjacent tuples into a single call. This is done
628 * by delaying the memcpy call until we find a gap that needs to be
629 * closed.
630 */
631 copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen;
632 for (; i < nitems; i++)
633 {
634 ItemId lp;
635
636 itemidptr = &itemidbase[i];
637 lp = PageGetItemId(page, itemidptr->offsetindex + 1);
638
639 /* copy pending tuples when we detect a gap */
640 if (copy_head != itemidptr->itemoff + itemidptr->alignedlen)
641 {
642 memcpy((char *) page + upper,
643 scratchptr + copy_head,
644 copy_tail - copy_head);
645
646 /*
647 * We've now copied all tuples already seen, but not the
648 * current tuple, so we set the copy_tail to the end of this
649 * tuple.
650 */
651 copy_tail = itemidptr->itemoff + itemidptr->alignedlen;
652 }
653 /* shift the target offset down by the length of this tuple */
654 upper -= itemidptr->alignedlen;
655 /* point the copy_head to the start of this tuple */
656 copy_head = itemidptr->itemoff;
657
658 /* update the line pointer to reference the new offset */
659 lp->lp_off = upper;
660 }
661
662 /* Copy the remaining chunk */
663 memcpy((char *) page + upper,
664 scratchptr + copy_head,
665 copy_tail - copy_head);
666 }
667
668 phdr->pd_upper = upper;
669}
PageHeaderData * PageHeader
Definition: bufpage.h:173
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:372
#define Assert(condition)
Definition: c.h:812
signed int Offset
Definition: c.h:578
#define nitems(x)
Definition: indent.h:31
int i
Definition: isn.c:72
Datum upper(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:80
unsigned lp_off
Definition: itemid.h:27
LocationIndex pd_special
Definition: bufpage.h:167
LocationIndex pd_upper
Definition: bufpage.h:166
uint16 offsetindex
Definition: bufpage.c:428
uint16 alignedlen
Definition: bufpage.c:430
char data[BLCKSZ]
Definition: c.h:1073

References itemIdCompactData::alignedlen, Assert, PGAlignedBlock::data, i, itemIdCompactData::itemoff, ItemIdData::lp_off, nitems, itemIdCompactData::offsetindex, PageGetItemId(), PageGetMaxOffsetNumber(), PageHeaderData::pd_special, PageHeaderData::pd_upper, and upper().

Referenced by PageIndexMultiDelete(), and PageRepairFragmentation().

◆ PageAddItemExtended()

OffsetNumber PageAddItemExtended ( Page  page,
Item  item,
Size  size,
OffsetNumber  offsetNumber,
int  flags 
)

Definition at line 183 of file bufpage.c.

188{
189 PageHeader phdr = (PageHeader) page;
190 Size alignedSize;
191 int lower;
192 int upper;
193 ItemId itemId;
194 OffsetNumber limit;
195 bool needshuffle = false;
196
197 /*
198 * Be wary about corrupted page pointers
199 */
200 if (phdr->pd_lower < SizeOfPageHeaderData ||
201 phdr->pd_lower > phdr->pd_upper ||
202 phdr->pd_upper > phdr->pd_special ||
203 phdr->pd_special > BLCKSZ)
206 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
207 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
208
209 /*
210 * Select offsetNumber to place the new item at
211 */
213
214 /* was offsetNumber passed in? */
215 if (OffsetNumberIsValid(offsetNumber))
216 {
217 /* yes, check it */
218 if ((flags & PAI_OVERWRITE) != 0)
219 {
220 if (offsetNumber < limit)
221 {
222 itemId = PageGetItemId(page, offsetNumber);
223 if (ItemIdIsUsed(itemId) || ItemIdHasStorage(itemId))
224 {
225 elog(WARNING, "will not overwrite a used ItemId");
226 return InvalidOffsetNumber;
227 }
228 }
229 }
230 else
231 {
232 if (offsetNumber < limit)
233 needshuffle = true; /* need to move existing linp's */
234 }
235 }
236 else
237 {
238 /* offsetNumber was not passed in, so find a free slot */
239 /* if no free slot, we'll put it at limit (1st open slot) */
240 if (PageHasFreeLinePointers(page))
241 {
242 /*
243 * Scan line pointer array to locate a "recyclable" (unused)
244 * ItemId.
245 *
246 * Always use earlier items first. PageTruncateLinePointerArray
247 * can only truncate unused items when they appear as a contiguous
248 * group at the end of the line pointer array.
249 */
250 for (offsetNumber = FirstOffsetNumber;
251 offsetNumber < limit; /* limit is maxoff+1 */
252 offsetNumber++)
253 {
254 itemId = PageGetItemId(page, offsetNumber);
255
256 /*
257 * We check for no storage as well, just to be paranoid;
258 * unused items should never have storage. Assert() that the
259 * invariant is respected too.
260 */
261 Assert(ItemIdIsUsed(itemId) || !ItemIdHasStorage(itemId));
262
263 if (!ItemIdIsUsed(itemId) && !ItemIdHasStorage(itemId))
264 break;
265 }
266 if (offsetNumber >= limit)
267 {
268 /* the hint is wrong, so reset it */
270 }
271 }
272 else
273 {
274 /* don't bother searching if hint says there's no free slot */
275 offsetNumber = limit;
276 }
277 }
278
279 /* Reject placing items beyond the first unused line pointer */
280 if (offsetNumber > limit)
281 {
282 elog(WARNING, "specified item offset is too large");
283 return InvalidOffsetNumber;
284 }
285
286 /* Reject placing items beyond heap boundary, if heap */
287 if ((flags & PAI_IS_HEAP) != 0 && offsetNumber > MaxHeapTuplesPerPage)
288 {
289 elog(WARNING, "can't put more than MaxHeapTuplesPerPage items in a heap page");
290 return InvalidOffsetNumber;
291 }
292
293 /*
294 * Compute new lower and upper pointers for page, see if it'll fit.
295 *
296 * Note: do arithmetic as signed ints, to avoid mistakes if, say,
297 * alignedSize > pd_upper.
298 */
299 if (offsetNumber == limit || needshuffle)
300 lower = phdr->pd_lower + sizeof(ItemIdData);
301 else
302 lower = phdr->pd_lower;
303
304 alignedSize = MAXALIGN(size);
305
306 upper = (int) phdr->pd_upper - (int) alignedSize;
307
308 if (lower > upper)
309 return InvalidOffsetNumber;
310
311 /*
312 * OK to insert the item. First, shuffle the existing pointers if needed.
313 */
314 itemId = PageGetItemId(page, offsetNumber);
315
316 if (needshuffle)
317 memmove(itemId + 1, itemId,
318 (limit - offsetNumber) * sizeof(ItemIdData));
319
320 /* set the line pointer */
321 ItemIdSetNormal(itemId, upper, size);
322
323 /*
324 * Items normally contain no uninitialized bytes. Core bufpage consumers
325 * conform, but this is not a necessary coding rule; a new index AM could
326 * opt to depart from it. However, data type input functions and other
327 * C-language functions that synthesize datums should initialize all
328 * bytes; datumIsEqual() relies on this. Testing here, along with the
329 * similar check in printtup(), helps to catch such mistakes.
330 *
331 * Values of the "name" type retrieved via index-only scans may contain
332 * uninitialized bytes; see comment in btrescan(). Valgrind will report
333 * this as an error, but it is safe to ignore.
334 */
336
337 /* copy the item's data onto the page */
338 memcpy((char *) page + upper, item, size);
339
340 /* adjust page header */
341 phdr->pd_lower = (LocationIndex) lower;
342 phdr->pd_upper = (LocationIndex) upper;
343
344 return offsetNumber;
345}
#define SizeOfPageHeaderData
Definition: bufpage.h:216
static void PageClearHasFreeLinePointers(Page page)
Definition: bufpage.h:407
static bool PageHasFreeLinePointers(Page page)
Definition: bufpage.h:397
uint16 LocationIndex
Definition: bufpage.h:90
#define PAI_IS_HEAP
Definition: bufpage.h:465
#define PAI_OVERWRITE
Definition: bufpage.h:464
#define MAXALIGN(LEN)
Definition: c.h:765
size_t Size
Definition: c.h:559
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define WARNING
Definition: elog.h:36
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
struct ItemIdData ItemIdData
#define ItemIdSetNormal(itemId, off, len)
Definition: itemid.h:140
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
#define VALGRIND_CHECK_MEM_IS_DEFINED(addr, size)
Definition: memdebug.h:23
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
Datum lower(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:49
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
static pg_noinline void Size size
Definition: slab.c:607
LocationIndex pd_lower
Definition: bufpage.h:165

References Assert, elog, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), FirstOffsetNumber, InvalidOffsetNumber, ItemIdHasStorage, ItemIdIsUsed, ItemIdSetNormal, lower(), MAXALIGN, MaxHeapTuplesPerPage, OffsetNumberIsValid, OffsetNumberNext, PageClearHasFreeLinePointers(), PageGetItemId(), PageGetMaxOffsetNumber(), PageHasFreeLinePointers(), PAI_IS_HEAP, PAI_OVERWRITE, PANIC, PageHeaderData::pd_lower, PageHeaderData::pd_special, PageHeaderData::pd_upper, size, SizeOfPageHeaderData, upper(), VALGRIND_CHECK_MEM_IS_DEFINED, and WARNING.

◆ PageGetExactFreeSpace()

Size PageGetExactFreeSpace ( Page  page)

Definition at line 947 of file bufpage.c.

948{
949 int space;
950
951 /*
952 * Use signed arithmetic here so that we behave sensibly if pd_lower >
953 * pd_upper.
954 */
955 space = (int) ((PageHeader) page)->pd_upper -
956 (int) ((PageHeader) page)->pd_lower;
957
958 if (space < 0)
959 return 0;
960
961 return (Size) space;
962}

Referenced by _bt_bottomupdel_pass(), _bt_dedup_pass(), _bt_findsplitloc(), allocNewBuffer(), brin_can_do_samepage_update(), doPickSplit(), GetHashPageStats(), ginHeapTupleFastInsert(), pgstat_heap(), pgstat_index_page(), pgstatindex_impl(), spgAddNodeAction(), SpGistGetBuffer(), SpGistPageAddNewItem(), SpGistSetLastUsedPage(), statapprox_heap(), and writeListPage().

◆ PageGetFreeSpace()

Size PageGetFreeSpace ( Page  page)

Definition at line 896 of file bufpage.c.

897{
898 int space;
899
900 /*
901 * Use signed arithmetic here so that we behave sensibly if pd_lower >
902 * pd_upper.
903 */
904 space = (int) ((PageHeader) page)->pd_upper -
905 (int) ((PageHeader) page)->pd_lower;
906
907 if (space < (int) sizeof(ItemIdData))
908 return 0;
909 space -= sizeof(ItemIdData);
910
911 return (Size) space;
912}

Referenced by _bt_buildadd(), _bt_delete_or_dedup_one_page(), _bt_findinsertloc(), _bt_insertonpg(), _bt_search_insert(), _hash_doinsert(), br_page_get_freespace(), entryIsEnoughSpace(), GetBTPageStatistics(), GetHashPageStatistics(), gist_indexsortbuild_levelstate_add(), gistnospace(), heap_xlog_visible(), PageGetHeapFreeSpace(), and terminate_brin_buildstate().

◆ PageGetFreeSpaceForMultipleTuples()

Size PageGetFreeSpaceForMultipleTuples ( Page  page,
int  ntups 
)

Definition at line 923 of file bufpage.c.

924{
925 int space;
926
927 /*
928 * Use signed arithmetic here so that we behave sensibly if pd_lower >
929 * pd_upper.
930 */
931 space = (int) ((PageHeader) page)->pd_upper -
932 (int) ((PageHeader) page)->pd_lower;
933
934 if (space < (int) (ntups * sizeof(ItemIdData)))
935 return 0;
936 space -= ntups * sizeof(ItemIdData);
937
938 return (Size) space;
939}

Referenced by _hash_splitbucket(), and _hash_squeezebucket().

◆ PageGetHeapFreeSpace()

Size PageGetHeapFreeSpace ( Page  page)

Definition at line 980 of file bufpage.c.

981{
982 Size space;
983
984 space = PageGetFreeSpace(page);
985 if (space > 0)
986 {
987 OffsetNumber offnum,
988 nline;
989
990 /*
991 * Are there already MaxHeapTuplesPerPage line pointers in the page?
992 */
993 nline = PageGetMaxOffsetNumber(page);
994 if (nline >= MaxHeapTuplesPerPage)
995 {
996 if (PageHasFreeLinePointers(page))
997 {
998 /*
999 * Since this is just a hint, we must confirm that there is
1000 * indeed a free line pointer
1001 */
1002 for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
1003 {
1004 ItemId lp = PageGetItemId(page, offnum);
1005
1006 if (!ItemIdIsUsed(lp))
1007 break;
1008 }
1009
1010 if (offnum > nline)
1011 {
1012 /*
1013 * The hint is wrong, but we can't clear it here since we
1014 * don't have the ability to mark the page dirty.
1015 */
1016 space = 0;
1017 }
1018 }
1019 else
1020 {
1021 /*
1022 * Although the hint might be wrong, PageAddItem will believe
1023 * it anyway, so we must believe it too.
1024 */
1025 space = 0;
1026 }
1027 }
1028 }
1029 return space;
1030}
Size PageGetFreeSpace(Page page)
Definition: bufpage.c:896

References FirstOffsetNumber, ItemIdIsUsed, MaxHeapTuplesPerPage, OffsetNumberNext, PageGetFreeSpace(), PageGetItemId(), PageGetMaxOffsetNumber(), and PageHasFreeLinePointers().

Referenced by heap_multi_insert(), heap_page_prune_opt(), heap_update(), heap_xlog_insert(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), raw_heap_insert(), and RelationGetBufferForTuple().

◆ PageGetTempPage()

Page PageGetTempPage ( Page  page)

Definition at line 354 of file bufpage.c.

355{
356 Size pageSize;
357 Page temp;
358
359 pageSize = PageGetPageSize(page);
360 temp = (Page) palloc(pageSize);
361
362 return temp;
363}
Pointer Page
Definition: bufpage.h:81
static Size PageGetPageSize(Page page)
Definition: bufpage.h:276
void * palloc(Size size)
Definition: mcxt.c:1317

References PageGetPageSize(), and palloc().

Referenced by _bt_split(), dataSplitPageInternal(), and ginPlaceToPage().

◆ PageGetTempPageCopy()

Page PageGetTempPageCopy ( Page  page)

Definition at line 371 of file bufpage.c.

372{
373 Size pageSize;
374 Page temp;
375
376 pageSize = PageGetPageSize(page);
377 temp = (Page) palloc(pageSize);
378
379 memcpy(temp, page, pageSize);
380
381 return temp;
382}

References PageGetPageSize(), and palloc().

Referenced by entrySplitPage(), and ginVacuumEntryPage().

◆ PageGetTempPageCopySpecial()

Page PageGetTempPageCopySpecial ( Page  page)

Definition at line 391 of file bufpage.c.

392{
393 Size pageSize;
394 Page temp;
395
396 pageSize = PageGetPageSize(page);
397 temp = (Page) palloc(pageSize);
398
399 PageInit(temp, pageSize, PageGetSpecialSize(page));
400 memcpy(PageGetSpecialPointer(temp),
402 PageGetSpecialSize(page));
403
404 return temp;
405}
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42
static char * PageGetSpecialPointer(Page page)
Definition: bufpage.h:339
static uint16 PageGetSpecialSize(Page page)
Definition: bufpage.h:316

References PageGetPageSize(), PageGetSpecialPointer(), PageGetSpecialSize(), PageInit(), and palloc().

Referenced by _bt_dedup_pass(), btree_xlog_dedup(), btree_xlog_split(), and gistplacetopage().

◆ PageIndexMultiDelete()

void PageIndexMultiDelete ( Page  page,
OffsetNumber itemnos,
int  nitems 
)

Definition at line 1150 of file bufpage.c.

1151{
1152 PageHeader phdr = (PageHeader) page;
1153 Offset pd_lower = phdr->pd_lower;
1154 Offset pd_upper = phdr->pd_upper;
1155 Offset pd_special = phdr->pd_special;
1156 Offset last_offset;
1159 itemIdCompact itemidptr;
1160 ItemId lp;
1161 int nline,
1162 nused;
1163 Size totallen;
1164 Size size;
1165 unsigned offset;
1166 int nextitm;
1167 OffsetNumber offnum;
1168 bool presorted = true; /* For now */
1169
1171
1172 /*
1173 * If there aren't very many items to delete, then retail
1174 * PageIndexTupleDelete is the best way. Delete the items in reverse
1175 * order so we don't have to think about adjusting item numbers for
1176 * previous deletions.
1177 *
1178 * TODO: tune the magic number here
1179 */
1180 if (nitems <= 2)
1181 {
1182 while (--nitems >= 0)
1183 PageIndexTupleDelete(page, itemnos[nitems]);
1184 return;
1185 }
1186
1187 /*
1188 * As with PageRepairFragmentation, paranoia seems justified.
1189 */
1190 if (pd_lower < SizeOfPageHeaderData ||
1191 pd_lower > pd_upper ||
1192 pd_upper > pd_special ||
1193 pd_special > BLCKSZ ||
1194 pd_special != MAXALIGN(pd_special))
1195 ereport(ERROR,
1197 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1198 pd_lower, pd_upper, pd_special)));
1199
1200 /*
1201 * Scan the line pointer array and build a list of just the ones we are
1202 * going to keep. Notice we do not modify the page yet, since we are
1203 * still validity-checking.
1204 */
1205 nline = PageGetMaxOffsetNumber(page);
1206 itemidptr = itemidbase;
1207 totallen = 0;
1208 nused = 0;
1209 nextitm = 0;
1210 last_offset = pd_special;
1211 for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
1212 {
1213 lp = PageGetItemId(page, offnum);
1215 size = ItemIdGetLength(lp);
1216 offset = ItemIdGetOffset(lp);
1217 if (offset < pd_upper ||
1218 (offset + size) > pd_special ||
1219 offset != MAXALIGN(offset))
1220 ereport(ERROR,
1222 errmsg("corrupted line pointer: offset = %u, size = %u",
1223 offset, (unsigned int) size)));
1224
1225 if (nextitm < nitems && offnum == itemnos[nextitm])
1226 {
1227 /* skip item to be deleted */
1228 nextitm++;
1229 }
1230 else
1231 {
1232 itemidptr->offsetindex = nused; /* where it will go */
1233 itemidptr->itemoff = offset;
1234
1235 if (last_offset > itemidptr->itemoff)
1236 last_offset = itemidptr->itemoff;
1237 else
1238 presorted = false;
1239
1240 itemidptr->alignedlen = MAXALIGN(size);
1241 totallen += itemidptr->alignedlen;
1242 newitemids[nused] = *lp;
1243 itemidptr++;
1244 nused++;
1245 }
1246 }
1247
1248 /* this will catch invalid or out-of-order itemnos[] */
1249 if (nextitm != nitems)
1250 elog(ERROR, "incorrect index offsets supplied");
1251
1252 if (totallen > (Size) (pd_special - pd_lower))
1253 ereport(ERROR,
1255 errmsg("corrupted item lengths: total %u, available space %u",
1256 (unsigned int) totallen, pd_special - pd_lower)));
1257
1258 /*
1259 * Looks good. Overwrite the line pointers with the copy, from which we've
1260 * removed all the unused items.
1261 */
1262 memcpy(phdr->pd_linp, newitemids, nused * sizeof(ItemIdData));
1263 phdr->pd_lower = SizeOfPageHeaderData + nused * sizeof(ItemIdData);
1264
1265 /* and compactify the tuple data */
1266 if (nused > 0)
1267 compactify_tuples(itemidbase, nused, page, presorted);
1268 else
1269 phdr->pd_upper = pd_special;
1270}
static void compactify_tuples(itemIdCompact itemidbase, int nitems, Page page, bool presorted)
Definition: bufpage.c:463
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition: bufpage.c:1041
#define ERROR
Definition: elog.h:39
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
#define MaxIndexTuplesPerPage
Definition: itup.h:167
ItemIdData pd_linp[FLEXIBLE_ARRAY_MEMBER]
Definition: bufpage.h:170

References itemIdCompactData::alignedlen, Assert, compactify_tuples(), elog, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ERROR, FirstOffsetNumber, ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, itemIdCompactData::itemoff, MAXALIGN, MaxIndexTuplesPerPage, nitems, itemIdCompactData::offsetindex, OffsetNumberNext, PageGetItemId(), PageGetMaxOffsetNumber(), PageIndexTupleDelete(), PageHeaderData::pd_linp, PageHeaderData::pd_lower, PageHeaderData::pd_special, PageHeaderData::pd_upper, size, and SizeOfPageHeaderData.

Referenced by _bt_delitems_delete(), _bt_delitems_vacuum(), _hash_squeezebucket(), _hash_vacuum_one_page(), btree_xlog_delete(), btree_xlog_vacuum(), gistprunepage(), gistRedoDeleteRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), spgPageIndexMultiDelete(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), vacuumLeafRoot(), and vacuumRedirectAndPlaceholder().

◆ PageIndexTupleDelete()

void PageIndexTupleDelete ( Page  page,
OffsetNumber  offnum 
)

Definition at line 1041 of file bufpage.c.

1042{
1043 PageHeader phdr = (PageHeader) page;
1044 char *addr;
1045 ItemId tup;
1046 Size size;
1047 unsigned offset;
1048 int nbytes;
1049 int offidx;
1050 int nline;
1051
1052 /*
1053 * As with PageRepairFragmentation, paranoia seems justified.
1054 */
1055 if (phdr->pd_lower < SizeOfPageHeaderData ||
1056 phdr->pd_lower > phdr->pd_upper ||
1057 phdr->pd_upper > phdr->pd_special ||
1058 phdr->pd_special > BLCKSZ ||
1059 phdr->pd_special != MAXALIGN(phdr->pd_special))
1060 ereport(ERROR,
1062 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1063 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1064
1065 nline = PageGetMaxOffsetNumber(page);
1066 if ((int) offnum <= 0 || (int) offnum > nline)
1067 elog(ERROR, "invalid index offnum: %u", offnum);
1068
1069 /* change offset number to offset index */
1070 offidx = offnum - 1;
1071
1072 tup = PageGetItemId(page, offnum);
1074 size = ItemIdGetLength(tup);
1075 offset = ItemIdGetOffset(tup);
1076
1077 if (offset < phdr->pd_upper || (offset + size) > phdr->pd_special ||
1078 offset != MAXALIGN(offset))
1079 ereport(ERROR,
1081 errmsg("corrupted line pointer: offset = %u, size = %u",
1082 offset, (unsigned int) size)));
1083
1084 /* Amount of space to actually be deleted */
1085 size = MAXALIGN(size);
1086
1087 /*
1088 * First, we want to get rid of the pd_linp entry for the index tuple. We
1089 * copy all subsequent linp's back one slot in the array. We don't use
1090 * PageGetItemId, because we are manipulating the _array_, not individual
1091 * linp's.
1092 */
1093 nbytes = phdr->pd_lower -
1094 ((char *) &phdr->pd_linp[offidx + 1] - (char *) phdr);
1095
1096 if (nbytes > 0)
1097 memmove((char *) &(phdr->pd_linp[offidx]),
1098 (char *) &(phdr->pd_linp[offidx + 1]),
1099 nbytes);
1100
1101 /*
1102 * Now move everything between the old upper bound (beginning of tuple
1103 * space) and the beginning of the deleted tuple forward, so that space in
1104 * the middle of the page is left free. If we've just deleted the tuple
1105 * at the beginning of tuple space, then there's no need to do the copy.
1106 */
1107
1108 /* beginning of tuple space */
1109 addr = (char *) page + phdr->pd_upper;
1110
1111 if (offset > phdr->pd_upper)
1112 memmove(addr + size, addr, offset - phdr->pd_upper);
1113
1114 /* adjust free space boundary pointers */
1115 phdr->pd_upper += size;
1116 phdr->pd_lower -= sizeof(ItemIdData);
1117
1118 /*
1119 * Finally, we need to adjust the linp entries that remain.
1120 *
1121 * Anything that used to be before the deleted tuple's data was moved
1122 * forward by the size of the deleted tuple.
1123 */
1124 if (!PageIsEmpty(page))
1125 {
1126 int i;
1127
1128 nline--; /* there's one less than when we started */
1129 for (i = 1; i <= nline; i++)
1130 {
1131 ItemId ii = PageGetItemId(page, i);
1132
1134 if (ItemIdGetOffset(ii) <= offset)
1135 ii->lp_off += size;
1136 }
1137 }
1138}
static bool PageIsEmpty(Page page)
Definition: bufpage.h:223
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76

References Assert, elog, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ERROR, i, if(), ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, ItemIdData::lp_off, MAXALIGN, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageHeaderData::pd_linp, PageHeaderData::pd_lower, PageHeaderData::pd_special, PageHeaderData::pd_upper, size, and SizeOfPageHeaderData.

Referenced by _bt_mark_page_halfdead(), addLeafTuple(), addOrReplaceTuple(), btree_xlog_mark_page_halfdead(), entryPreparePage(), ginRedoInsertEntry(), ginVacuumEntryPage(), gistdeletepage(), gistplacetopage(), gistRedoPageDelete(), PageIndexMultiDelete(), spgAddNodeAction(), SpGistPageAddNewItem(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoSplitTuple(), and spgSplitNodeAction().

◆ PageIndexTupleDeleteNoCompact()

void PageIndexTupleDeleteNoCompact ( Page  page,
OffsetNumber  offnum 
)

Definition at line 1284 of file bufpage.c.

1285{
1286 PageHeader phdr = (PageHeader) page;
1287 char *addr;
1288 ItemId tup;
1289 Size size;
1290 unsigned offset;
1291 int nline;
1292
1293 /*
1294 * As with PageRepairFragmentation, paranoia seems justified.
1295 */
1296 if (phdr->pd_lower < SizeOfPageHeaderData ||
1297 phdr->pd_lower > phdr->pd_upper ||
1298 phdr->pd_upper > phdr->pd_special ||
1299 phdr->pd_special > BLCKSZ ||
1300 phdr->pd_special != MAXALIGN(phdr->pd_special))
1301 ereport(ERROR,
1303 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1304 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1305
1306 nline = PageGetMaxOffsetNumber(page);
1307 if ((int) offnum <= 0 || (int) offnum > nline)
1308 elog(ERROR, "invalid index offnum: %u", offnum);
1309
1310 tup = PageGetItemId(page, offnum);
1312 size = ItemIdGetLength(tup);
1313 offset = ItemIdGetOffset(tup);
1314
1315 if (offset < phdr->pd_upper || (offset + size) > phdr->pd_special ||
1316 offset != MAXALIGN(offset))
1317 ereport(ERROR,
1319 errmsg("corrupted line pointer: offset = %u, size = %u",
1320 offset, (unsigned int) size)));
1321
1322 /* Amount of space to actually be deleted */
1323 size = MAXALIGN(size);
1324
1325 /*
1326 * Either set the line pointer to "unused", or zap it if it's the last
1327 * one. (Note: it's possible that the next-to-last one(s) are already
1328 * unused, but we do not trouble to try to compact them out if so.)
1329 */
1330 if ((int) offnum < nline)
1331 ItemIdSetUnused(tup);
1332 else
1333 {
1334 phdr->pd_lower -= sizeof(ItemIdData);
1335 nline--; /* there's one less than when we started */
1336 }
1337
1338 /*
1339 * Now move everything between the old upper bound (beginning of tuple
1340 * space) and the beginning of the deleted tuple forward, so that space in
1341 * the middle of the page is left free. If we've just deleted the tuple
1342 * at the beginning of tuple space, then there's no need to do the copy.
1343 */
1344
1345 /* beginning of tuple space */
1346 addr = (char *) page + phdr->pd_upper;
1347
1348 if (offset > phdr->pd_upper)
1349 memmove(addr + size, addr, offset - phdr->pd_upper);
1350
1351 /* adjust free space boundary pointer */
1352 phdr->pd_upper += size;
1353
1354 /*
1355 * Finally, we need to adjust the linp entries that remain.
1356 *
1357 * Anything that used to be before the deleted tuple's data was moved
1358 * forward by the size of the deleted tuple.
1359 */
1360 if (!PageIsEmpty(page))
1361 {
1362 int i;
1363
1364 for (i = 1; i <= nline; i++)
1365 {
1366 ItemId ii = PageGetItemId(page, i);
1367
1368 if (ItemIdHasStorage(ii) && ItemIdGetOffset(ii) <= offset)
1369 ii->lp_off += size;
1370 }
1371 }
1372}
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128

References Assert, elog, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ERROR, i, if(), ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, ItemIdSetUnused, ItemIdData::lp_off, MAXALIGN, PageGetItemId(), PageGetMaxOffsetNumber(), PageIsEmpty(), PageHeaderData::pd_lower, PageHeaderData::pd_special, PageHeaderData::pd_upper, size, and SizeOfPageHeaderData.

Referenced by brin_doupdate(), brin_xlog_desummarize_page(), brin_xlog_update(), and brinRevmapDesummarizeRange().

◆ PageIndexTupleOverwrite()

bool PageIndexTupleOverwrite ( Page  page,
OffsetNumber  offnum,
Item  newtup,
Size  newsize 
)

Definition at line 1394 of file bufpage.c.

1396{
1397 PageHeader phdr = (PageHeader) page;
1398 ItemId tupid;
1399 int oldsize;
1400 unsigned offset;
1401 Size alignednewsize;
1402 int size_diff;
1403 int itemcount;
1404
1405 /*
1406 * As with PageRepairFragmentation, paranoia seems justified.
1407 */
1408 if (phdr->pd_lower < SizeOfPageHeaderData ||
1409 phdr->pd_lower > phdr->pd_upper ||
1410 phdr->pd_upper > phdr->pd_special ||
1411 phdr->pd_special > BLCKSZ ||
1412 phdr->pd_special != MAXALIGN(phdr->pd_special))
1413 ereport(ERROR,
1415 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1416 phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1417
1418 itemcount = PageGetMaxOffsetNumber(page);
1419 if ((int) offnum <= 0 || (int) offnum > itemcount)
1420 elog(ERROR, "invalid index offnum: %u", offnum);
1421
1422 tupid = PageGetItemId(page, offnum);
1423 Assert(ItemIdHasStorage(tupid));
1424 oldsize = ItemIdGetLength(tupid);
1425 offset = ItemIdGetOffset(tupid);
1426
1427 if (offset < phdr->pd_upper || (offset + oldsize) > phdr->pd_special ||
1428 offset != MAXALIGN(offset))
1429 ereport(ERROR,
1431 errmsg("corrupted line pointer: offset = %u, size = %u",
1432 offset, (unsigned int) oldsize)));
1433
1434 /*
1435 * Determine actual change in space requirement, check for page overflow.
1436 */
1437 oldsize = MAXALIGN(oldsize);
1438 alignednewsize = MAXALIGN(newsize);
1439 if (alignednewsize > oldsize + (phdr->pd_upper - phdr->pd_lower))
1440 return false;
1441
1442 /*
1443 * Relocate existing data and update line pointers, unless the new tuple
1444 * is the same size as the old (after alignment), in which case there's
1445 * nothing to do. Notice that what we have to relocate is data before the
1446 * target tuple, not data after, so it's convenient to express size_diff
1447 * as the amount by which the tuple's size is decreasing, making it the
1448 * delta to add to pd_upper and affected line pointers.
1449 */
1450 size_diff = oldsize - (int) alignednewsize;
1451 if (size_diff != 0)
1452 {
1453 char *addr = (char *) page + phdr->pd_upper;
1454 int i;
1455
1456 /* relocate all tuple data before the target tuple */
1457 memmove(addr + size_diff, addr, offset - phdr->pd_upper);
1458
1459 /* adjust free space boundary pointer */
1460 phdr->pd_upper += size_diff;
1461
1462 /* adjust affected line pointers too */
1463 for (i = FirstOffsetNumber; i <= itemcount; i++)
1464 {
1465 ItemId ii = PageGetItemId(page, i);
1466
1467 /* Allow items without storage; currently only BRIN needs that */
1468 if (ItemIdHasStorage(ii) && ItemIdGetOffset(ii) <= offset)
1469 ii->lp_off += size_diff;
1470 }
1471 }
1472
1473 /* Update the item's tuple length without changing its lp_flags field */
1474 tupid->lp_off = offset + size_diff;
1475 tupid->lp_len = newsize;
1476
1477 /* Copy new tuple data onto page */
1478 memcpy(PageGetItem(page, tupid), newtup, newsize);
1479
1480 return true;
1481}
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:354
unsigned lp_len
Definition: itemid.h:29

References Assert, elog, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ERROR, FirstOffsetNumber, i, ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, ItemIdData::lp_len, ItemIdData::lp_off, MAXALIGN, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageHeaderData::pd_lower, PageHeaderData::pd_special, PageHeaderData::pd_upper, and SizeOfPageHeaderData.

Referenced by _bt_buildadd(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_mark_page_halfdead(), brin_doupdate(), brin_xlog_samepage_update(), btree_xlog_updates(), gistplacetopage(), and gistRedoPageUpdateRecord().

◆ PageInit()

void PageInit ( Page  page,
Size  pageSize,
Size  specialSize 
)

Definition at line 42 of file bufpage.c.

43{
44 PageHeader p = (PageHeader) page;
45
46 specialSize = MAXALIGN(specialSize);
47
48 Assert(pageSize == BLCKSZ);
49 Assert(pageSize > specialSize + SizeOfPageHeaderData);
50
51 /* Make sure all fields of page are zero, as well as unused space */
52 MemSet(p, 0, pageSize);
53
54 p->pd_flags = 0;
56 p->pd_upper = pageSize - specialSize;
57 p->pd_special = pageSize - specialSize;
59 /* p->pd_prune_xid = InvalidTransactionId; done by above MemSet */
60}
#define PG_PAGE_LAYOUT_VERSION
Definition: bufpage.h:205
static void PageSetPageSizeAndVersion(Page page, Size size, uint8 version)
Definition: bufpage.h:299
#define MemSet(start, val, len)
Definition: c.h:974
uint16 pd_flags
Definition: bufpage.h:164

References Assert, MAXALIGN, MemSet, PageSetPageSizeAndVersion(), PageHeaderData::pd_flags, PageHeaderData::pd_lower, PageHeaderData::pd_special, PageHeaderData::pd_upper, PG_PAGE_LAYOUT_VERSION, and SizeOfPageHeaderData.

Referenced by _bt_pageinit(), _hash_pageinit(), BloomInitPage(), brin_page_init(), fill_seq_fork_with_data(), fsm_readbuf(), GinInitPage(), gistinitpage(), heap_xlog_insert(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), PageGetTempPageCopySpecial(), raw_heap_insert(), RelationAddBlocks(), RelationGetBufferForTuple(), seq_redo(), SpGistInitPage(), vm_readbuf(), and XLogRecordPageWithFreeSpace().

◆ PageIsVerifiedExtended()

bool PageIsVerifiedExtended ( Page  page,
BlockNumber  blkno,
int  flags 
)

Definition at line 88 of file bufpage.c.

89{
90 PageHeader p = (PageHeader) page;
91 size_t *pagebytes;
92 bool checksum_failure = false;
93 bool header_sane = false;
94 uint16 checksum = 0;
95
96 /*
97 * Don't verify page data unless the page passes basic non-zero test
98 */
99 if (!PageIsNew(page))
100 {
102 {
103 checksum = pg_checksum_page((char *) page, blkno);
104
105 if (checksum != p->pd_checksum)
106 checksum_failure = true;
107 }
108
109 /*
110 * The following checks don't prove the header is correct, only that
111 * it looks sane enough to allow into the buffer pool. Later usage of
112 * the block can still reveal problems, which is why we offer the
113 * checksum option.
114 */
115 if ((p->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
116 p->pd_lower <= p->pd_upper &&
117 p->pd_upper <= p->pd_special &&
118 p->pd_special <= BLCKSZ &&
120 header_sane = true;
121
122 if (header_sane && !checksum_failure)
123 return true;
124 }
125
126 /* Check all-zeroes case */
127 pagebytes = (size_t *) page;
128
129 if (pg_memory_is_all_zeros(pagebytes, BLCKSZ))
130 return true;
131
132 /*
133 * Throw a WARNING if the checksum fails, but only after we've checked for
134 * the all-zeroes case.
135 */
137 {
138 if ((flags & PIV_LOG_WARNING) != 0)
141 errmsg("page verification failed, calculated checksum %u but expected %u",
142 checksum, p->pd_checksum)));
143
144 if ((flags & PIV_REPORT_STAT) != 0)
146
147 if (header_sane && ignore_checksum_failure)
148 return true;
149 }
150
151 return false;
152}
bool ignore_checksum_failure
Definition: bufpage.c:27
#define PD_VALID_FLAG_BITS
Definition: bufpage.h:191
#define PIV_LOG_WARNING
Definition: bufpage.h:468
static bool PageIsNew(Page page)
Definition: bufpage.h:233
#define PIV_REPORT_STAT
Definition: bufpage.h:469
uint16_t uint16
Definition: c.h:484
uint16 pg_checksum_page(char *page, BlockNumber blkno)
static bool pg_memory_is_all_zeros(const void *ptr, size_t len)
Definition: memutils.h:219
static bool checksum_failure
void pgstat_report_checksum_failure(void)
uint16 pd_checksum
Definition: bufpage.h:163
bool DataChecksumsEnabled(void)
Definition: xlog.c:4588

References checksum_failure, DataChecksumsEnabled(), ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ignore_checksum_failure, MAXALIGN, PageIsNew(), PageHeaderData::pd_checksum, PageHeaderData::pd_flags, PageHeaderData::pd_lower, PageHeaderData::pd_special, PageHeaderData::pd_upper, PD_VALID_FLAG_BITS, pg_checksum_page(), pg_memory_is_all_zeros(), pgstat_report_checksum_failure(), PIV_LOG_WARNING, PIV_REPORT_STAT, and WARNING.

Referenced by RelationCopyStorage(), and WaitReadBuffers().

◆ PageRepairFragmentation()

void PageRepairFragmentation ( Page  page)

Definition at line 688 of file bufpage.c.

689{
690 Offset pd_lower = ((PageHeader) page)->pd_lower;
691 Offset pd_upper = ((PageHeader) page)->pd_upper;
692 Offset pd_special = ((PageHeader) page)->pd_special;
693 Offset last_offset;
695 itemIdCompact itemidptr;
696 ItemId lp;
697 int nline,
698 nstorage,
699 nunused;
700 OffsetNumber finalusedlp = InvalidOffsetNumber;
701 int i;
702 Size totallen;
703 bool presorted = true; /* For now */
704
705 /*
706 * It's worth the trouble to be more paranoid here than in most places,
707 * because we are about to reshuffle data in (what is usually) a shared
708 * disk buffer. If we aren't careful then corrupted pointers, lengths,
709 * etc could cause us to clobber adjacent disk buffers, spreading the data
710 * loss further. So, check everything.
711 */
712 if (pd_lower < SizeOfPageHeaderData ||
713 pd_lower > pd_upper ||
714 pd_upper > pd_special ||
715 pd_special > BLCKSZ ||
716 pd_special != MAXALIGN(pd_special))
719 errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
720 pd_lower, pd_upper, pd_special)));
721
722 /*
723 * Run through the line pointer array and collect data about live items.
724 */
725 nline = PageGetMaxOffsetNumber(page);
726 itemidptr = itemidbase;
727 nunused = totallen = 0;
728 last_offset = pd_special;
729 for (i = FirstOffsetNumber; i <= nline; i++)
730 {
731 lp = PageGetItemId(page, i);
732 if (ItemIdIsUsed(lp))
733 {
734 if (ItemIdHasStorage(lp))
735 {
736 itemidptr->offsetindex = i - 1;
737 itemidptr->itemoff = ItemIdGetOffset(lp);
738
739 if (last_offset > itemidptr->itemoff)
740 last_offset = itemidptr->itemoff;
741 else
742 presorted = false;
743
744 if (unlikely(itemidptr->itemoff < (int) pd_upper ||
745 itemidptr->itemoff >= (int) pd_special))
748 errmsg("corrupted line pointer: %u",
749 itemidptr->itemoff)));
750 itemidptr->alignedlen = MAXALIGN(ItemIdGetLength(lp));
751 totallen += itemidptr->alignedlen;
752 itemidptr++;
753 }
754
755 finalusedlp = i; /* Could be the final non-LP_UNUSED item */
756 }
757 else
758 {
759 /* Unused entries should have lp_len = 0, but make sure */
761 ItemIdSetUnused(lp);
762 nunused++;
763 }
764 }
765
766 nstorage = itemidptr - itemidbase;
767 if (nstorage == 0)
768 {
769 /* Page is completely empty, so just reset it quickly */
770 ((PageHeader) page)->pd_upper = pd_special;
771 }
772 else
773 {
774 /* Need to compact the page the hard way */
775 if (totallen > (Size) (pd_special - pd_lower))
778 errmsg("corrupted item lengths: total %u, available space %u",
779 (unsigned int) totallen, pd_special - pd_lower)));
780
781 compactify_tuples(itemidbase, nstorage, page, presorted);
782 }
783
784 if (finalusedlp != nline)
785 {
786 /* The last line pointer is not the last used line pointer */
787 int nunusedend = nline - finalusedlp;
788
789 Assert(nunused >= nunusedend && nunusedend > 0);
790
791 /* remove trailing unused line pointers from the count */
792 nunused -= nunusedend;
793 /* truncate the line pointer array */
794 ((PageHeader) page)->pd_lower -= (sizeof(ItemIdData) * nunusedend);
795 }
796
797 /* Set hint bit for PageAddItemExtended */
798 if (nunused > 0)
800 else
802}
static void PageSetHasFreeLinePointers(Page page)
Definition: bufpage.h:402
#define unlikely(x)
Definition: c.h:330

References itemIdCompactData::alignedlen, Assert, compactify_tuples(), ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ERROR, FirstOffsetNumber, i, InvalidOffsetNumber, ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, ItemIdIsUsed, ItemIdSetUnused, itemIdCompactData::itemoff, MAXALIGN, MaxHeapTuplesPerPage, itemIdCompactData::offsetindex, PageClearHasFreeLinePointers(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetHasFreeLinePointers(), SizeOfPageHeaderData, and unlikely.

Referenced by heap_page_prune_execute().

◆ PageRestoreTempPage()

void PageRestoreTempPage ( Page  tempPage,
Page  oldPage 
)

Definition at line 413 of file bufpage.c.

414{
415 Size pageSize;
416
417 pageSize = PageGetPageSize(tempPage);
418 memcpy((char *) oldPage, (char *) tempPage, pageSize);
419
420 pfree(tempPage);
421}
void pfree(void *pointer)
Definition: mcxt.c:1521

References PageGetPageSize(), and pfree().

Referenced by _bt_dedup_pass(), _bt_split(), btree_xlog_dedup(), btree_xlog_split(), createPostingTree(), ginbulkdelete(), and gistplacetopage().

◆ PageSetChecksumCopy()

char * PageSetChecksumCopy ( Page  page,
BlockNumber  blkno 
)

Definition at line 1499 of file bufpage.c.

1500{
1501 static char *pageCopy = NULL;
1502
1503 /* If we don't need a checksum, just return the passed-in data */
1504 if (PageIsNew(page) || !DataChecksumsEnabled())
1505 return (char *) page;
1506
1507 /*
1508 * We allocate the copy space once and use it over on each subsequent
1509 * call. The point of palloc'ing here, rather than having a static char
1510 * array, is first to ensure adequate alignment for the checksumming code
1511 * and second to avoid wasting space in processes that never call this.
1512 */
1513 if (pageCopy == NULL)
1515 BLCKSZ,
1517 0);
1518
1519 memcpy(pageCopy, (char *) page, BLCKSZ);
1520 ((PageHeader) pageCopy)->pd_checksum = pg_checksum_page(pageCopy, blkno);
1521 return pageCopy;
1522}
void * MemoryContextAllocAligned(MemoryContext context, Size size, Size alignto, int flags)
Definition: mcxt.c:1409
MemoryContext TopMemoryContext
Definition: mcxt.c:149
#define PG_IO_ALIGN_SIZE

References DataChecksumsEnabled(), MemoryContextAllocAligned(), PageIsNew(), pg_checksum_page(), PG_IO_ALIGN_SIZE, and TopMemoryContext.

Referenced by FlushBuffer().

◆ PageSetChecksumInplace()

void PageSetChecksumInplace ( Page  page,
BlockNumber  blkno 
)

Definition at line 1531 of file bufpage.c.

1532{
1533 /* If we don't need a checksum, just return */
1534 if (PageIsNew(page) || !DataChecksumsEnabled())
1535 return;
1536
1537 ((PageHeader) page)->pd_checksum = pg_checksum_page((char *) page, blkno);
1538}

References DataChecksumsEnabled(), PageIsNew(), and pg_checksum_page().

Referenced by _hash_alloc_buckets(), FlushRelationBuffers(), GetLocalVictimBuffer(), and smgr_bulk_flush().

◆ PageTruncateLinePointerArray()

void PageTruncateLinePointerArray ( Page  page)

Definition at line 824 of file bufpage.c.

825{
826 PageHeader phdr = (PageHeader) page;
827 bool countdone = false,
828 sethint = false;
829 int nunusedend = 0;
830
831 /* Scan line pointer array back-to-front */
832 for (int i = PageGetMaxOffsetNumber(page); i >= FirstOffsetNumber; i--)
833 {
834 ItemId lp = PageGetItemId(page, i);
835
836 if (!countdone && i > FirstOffsetNumber)
837 {
838 /*
839 * Still determining which line pointers from the end of the array
840 * will be truncated away. Either count another line pointer as
841 * safe to truncate, or notice that it's not safe to truncate
842 * additional line pointers (stop counting line pointers).
843 */
844 if (!ItemIdIsUsed(lp))
845 nunusedend++;
846 else
847 countdone = true;
848 }
849 else
850 {
851 /*
852 * Once we've stopped counting we still need to figure out if
853 * there are any remaining LP_UNUSED line pointers somewhere more
854 * towards the front of the array.
855 */
856 if (!ItemIdIsUsed(lp))
857 {
858 /*
859 * This is an unused line pointer that we won't be truncating
860 * away -- so there is at least one. Set hint on page.
861 */
862 sethint = true;
863 break;
864 }
865 }
866 }
867
868 if (nunusedend > 0)
869 {
870 phdr->pd_lower -= sizeof(ItemIdData) * nunusedend;
871
872#ifdef CLOBBER_FREED_MEMORY
873 memset((char *) page + phdr->pd_lower, 0x7F,
874 sizeof(ItemIdData) * nunusedend);
875#endif
876 }
877 else
878 Assert(sethint);
879
880 /* Set hint bit for PageAddItemExtended */
881 if (sethint)
883 else
885}

References Assert, FirstOffsetNumber, i, ItemIdIsUsed, PageClearHasFreeLinePointers(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetHasFreeLinePointers(), and PageHeaderData::pd_lower.

Referenced by heap_page_prune_execute(), and lazy_vacuum_heap_page().

Variable Documentation

◆ ignore_checksum_failure

bool ignore_checksum_failure = false

Definition at line 27 of file bufpage.c.

Referenced by PageIsVerifiedExtended().