PostgreSQL Source Code  git master
bufpage.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * bufpage.c
4  * POSTGRES standard buffer page code.
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/page/bufpage.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/htup_details.h"
18 #include "access/itup.h"
19 #include "access/xlog.h"
20 #include "pgstat.h"
21 #include "storage/checksum.h"
22 #include "utils/memdebug.h"
23 #include "utils/memutils.h"
24 
25 
26 /* GUC variable */
28 
29 
30 /* ----------------------------------------------------------------
31  * Page support functions
32  * ----------------------------------------------------------------
33  */
34 
35 /*
36  * PageInit
37  * Initializes the contents of a page.
38  * Note that we don't calculate an initial checksum here; that's not done
39  * until it's time to write.
40  */
41 void
42 PageInit(Page page, Size pageSize, Size specialSize)
43 {
44  PageHeader p = (PageHeader) page;
45 
46  specialSize = MAXALIGN(specialSize);
47 
48  Assert(pageSize == BLCKSZ);
49  Assert(pageSize > specialSize + SizeOfPageHeaderData);
50 
51  /* Make sure all fields of page are zero, as well as unused space */
52  MemSet(p, 0, pageSize);
53 
54  p->pd_flags = 0;
56  p->pd_upper = pageSize - specialSize;
57  p->pd_special = pageSize - specialSize;
59  /* p->pd_prune_xid = InvalidTransactionId; done by above MemSet */
60 }
61 
62 
63 /*
64  * PageIsVerified
65  * Check that the page header and checksum (if any) appear valid.
66  *
67  * This is called when a page has just been read in from disk. The idea is
68  * to cheaply detect trashed pages before we go nuts following bogus line
69  * pointers, testing invalid transaction identifiers, etc.
70  *
71  * It turns out to be necessary to allow zeroed pages here too. Even though
72  * this routine is *not* called when deliberately adding a page to a relation,
73  * there are scenarios in which a zeroed page might be found in a table.
74  * (Example: a backend extends a relation, then crashes before it can write
75  * any WAL entry about the new page. The kernel will already have the
76  * zeroed page in the file, and it will stay that way after restart.) So we
77  * allow zeroed pages here, and are careful that the page access macros
78  * treat such a page as empty and without free space. Eventually, VACUUM
79  * will clean up such a page and make it usable.
80  */
81 bool
83 {
84  PageHeader p = (PageHeader) page;
85  size_t *pagebytes;
86  int i;
87  bool checksum_failure = false;
88  bool header_sane = false;
89  bool all_zeroes = false;
90  uint16 checksum = 0;
91 
92  /*
93  * Don't verify page data unless the page passes basic non-zero test
94  */
95  if (!PageIsNew(page))
96  {
98  {
99  checksum = pg_checksum_page((char *) page, blkno);
100 
101  if (checksum != p->pd_checksum)
102  checksum_failure = true;
103  }
104 
105  /*
106  * The following checks don't prove the header is correct, only that
107  * it looks sane enough to allow into the buffer pool. Later usage of
108  * the block can still reveal problems, which is why we offer the
109  * checksum option.
110  */
111  if ((p->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
112  p->pd_lower <= p->pd_upper &&
113  p->pd_upper <= p->pd_special &&
114  p->pd_special <= BLCKSZ &&
115  p->pd_special == MAXALIGN(p->pd_special))
116  header_sane = true;
117 
118  if (header_sane && !checksum_failure)
119  return true;
120  }
121 
122  /* Check all-zeroes case */
123  all_zeroes = true;
124  pagebytes = (size_t *) page;
125  for (i = 0; i < (BLCKSZ / sizeof(size_t)); i++)
126  {
127  if (pagebytes[i] != 0)
128  {
129  all_zeroes = false;
130  break;
131  }
132  }
133 
134  if (all_zeroes)
135  return true;
136 
137  /*
138  * Throw a WARNING if the checksum fails, but only after we've checked for
139  * the all-zeroes case.
140  */
141  if (checksum_failure)
142  {
145  errmsg("page verification failed, calculated checksum %u but expected %u",
146  checksum, p->pd_checksum)));
147 
149 
150  if (header_sane && ignore_checksum_failure)
151  return true;
152  }
153 
154  return false;
155 }
156 
157 
158 /*
159  * PageAddItemExtended
160  *
161  * Add an item to a page. Return value is the offset at which it was
162  * inserted, or InvalidOffsetNumber if the item is not inserted for any
163  * reason. A WARNING is issued indicating the reason for the refusal.
164  *
165  * offsetNumber must be either InvalidOffsetNumber to specify finding a
166  * free line pointer, or a value between FirstOffsetNumber and one past
167  * the last existing item, to specify using that particular line pointer.
168  *
169  * If offsetNumber is valid and flag PAI_OVERWRITE is set, we just store
170  * the item at the specified offsetNumber, which must be either a
171  * currently-unused line pointer, or one past the last existing item.
172  *
173  * If offsetNumber is valid and flag PAI_OVERWRITE is not set, insert
174  * the item at the specified offsetNumber, moving existing items later
175  * in the array to make room.
176  *
177  * If offsetNumber is not valid, then assign a slot by finding the first
178  * one that is both unused and deallocated.
179  *
180  * If flag PAI_IS_HEAP is set, we enforce that there can't be more than
181  * MaxHeapTuplesPerPage line pointers on the page.
182  *
183  * !!! EREPORT(ERROR) IS DISALLOWED HERE !!!
184  */
187  Item item,
188  Size size,
189  OffsetNumber offsetNumber,
190  int flags)
191 {
192  PageHeader phdr = (PageHeader) page;
193  Size alignedSize;
194  int lower;
195  int upper;
196  ItemId itemId;
197  OffsetNumber limit;
198  bool needshuffle = false;
199 
200  /*
201  * Be wary about corrupted page pointers
202  */
203  if (phdr->pd_lower < SizeOfPageHeaderData ||
204  phdr->pd_lower > phdr->pd_upper ||
205  phdr->pd_upper > phdr->pd_special ||
206  phdr->pd_special > BLCKSZ)
207  ereport(PANIC,
209  errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
210  phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
211 
212  /*
213  * Select offsetNumber to place the new item at
214  */
216 
217  /* was offsetNumber passed in? */
218  if (OffsetNumberIsValid(offsetNumber))
219  {
220  /* yes, check it */
221  if ((flags & PAI_OVERWRITE) != 0)
222  {
223  if (offsetNumber < limit)
224  {
225  itemId = PageGetItemId(phdr, offsetNumber);
226  if (ItemIdIsUsed(itemId) || ItemIdHasStorage(itemId))
227  {
228  elog(WARNING, "will not overwrite a used ItemId");
229  return InvalidOffsetNumber;
230  }
231  }
232  }
233  else
234  {
235  if (offsetNumber < limit)
236  needshuffle = true; /* need to move existing linp's */
237  }
238  }
239  else
240  {
241  /* offsetNumber was not passed in, so find a free slot */
242  /* if no free slot, we'll put it at limit (1st open slot) */
243  if (PageHasFreeLinePointers(phdr))
244  {
245  /*
246  * Look for "recyclable" (unused) ItemId. We check for no storage
247  * as well, just to be paranoid --- unused items should never have
248  * storage.
249  */
250  for (offsetNumber = 1; offsetNumber < limit; offsetNumber++)
251  {
252  itemId = PageGetItemId(phdr, offsetNumber);
253  if (!ItemIdIsUsed(itemId) && !ItemIdHasStorage(itemId))
254  break;
255  }
256  if (offsetNumber >= limit)
257  {
258  /* the hint is wrong, so reset it */
260  }
261  }
262  else
263  {
264  /* don't bother searching if hint says there's no free slot */
265  offsetNumber = limit;
266  }
267  }
268 
269  /* Reject placing items beyond the first unused line pointer */
270  if (offsetNumber > limit)
271  {
272  elog(WARNING, "specified item offset is too large");
273  return InvalidOffsetNumber;
274  }
275 
276  /* Reject placing items beyond heap boundary, if heap */
277  if ((flags & PAI_IS_HEAP) != 0 && offsetNumber > MaxHeapTuplesPerPage)
278  {
279  elog(WARNING, "can't put more than MaxHeapTuplesPerPage items in a heap page");
280  return InvalidOffsetNumber;
281  }
282 
283  /*
284  * Compute new lower and upper pointers for page, see if it'll fit.
285  *
286  * Note: do arithmetic as signed ints, to avoid mistakes if, say,
287  * alignedSize > pd_upper.
288  */
289  if (offsetNumber == limit || needshuffle)
290  lower = phdr->pd_lower + sizeof(ItemIdData);
291  else
292  lower = phdr->pd_lower;
293 
294  alignedSize = MAXALIGN(size);
295 
296  upper = (int) phdr->pd_upper - (int) alignedSize;
297 
298  if (lower > upper)
299  return InvalidOffsetNumber;
300 
301  /*
302  * OK to insert the item. First, shuffle the existing pointers if needed.
303  */
304  itemId = PageGetItemId(phdr, offsetNumber);
305 
306  if (needshuffle)
307  memmove(itemId + 1, itemId,
308  (limit - offsetNumber) * sizeof(ItemIdData));
309 
310  /* set the line pointer */
311  ItemIdSetNormal(itemId, upper, size);
312 
313  /*
314  * Items normally contain no uninitialized bytes. Core bufpage consumers
315  * conform, but this is not a necessary coding rule; a new index AM could
316  * opt to depart from it. However, data type input functions and other
317  * C-language functions that synthesize datums should initialize all
318  * bytes; datumIsEqual() relies on this. Testing here, along with the
319  * similar check in printtup(), helps to catch such mistakes.
320  *
321  * Values of the "name" type retrieved via index-only scans may contain
322  * uninitialized bytes; see comment in btrescan(). Valgrind will report
323  * this as an error, but it is safe to ignore.
324  */
325  VALGRIND_CHECK_MEM_IS_DEFINED(item, size);
326 
327  /* copy the item's data onto the page */
328  memcpy((char *) page + upper, item, size);
329 
330  /* adjust page header */
331  phdr->pd_lower = (LocationIndex) lower;
332  phdr->pd_upper = (LocationIndex) upper;
333 
334  return offsetNumber;
335 }
336 
337 
338 /*
339  * PageGetTempPage
340  * Get a temporary page in local memory for special processing.
341  * The returned page is not initialized at all; caller must do that.
342  */
343 Page
345 {
346  Size pageSize;
347  Page temp;
348 
349  pageSize = PageGetPageSize(page);
350  temp = (Page) palloc(pageSize);
351 
352  return temp;
353 }
354 
355 /*
356  * PageGetTempPageCopy
357  * Get a temporary page in local memory for special processing.
358  * The page is initialized by copying the contents of the given page.
359  */
360 Page
362 {
363  Size pageSize;
364  Page temp;
365 
366  pageSize = PageGetPageSize(page);
367  temp = (Page) palloc(pageSize);
368 
369  memcpy(temp, page, pageSize);
370 
371  return temp;
372 }
373 
374 /*
375  * PageGetTempPageCopySpecial
376  * Get a temporary page in local memory for special processing.
377  * The page is PageInit'd with the same special-space size as the
378  * given page, and the special space is copied from the given page.
379  */
380 Page
382 {
383  Size pageSize;
384  Page temp;
385 
386  pageSize = PageGetPageSize(page);
387  temp = (Page) palloc(pageSize);
388 
389  PageInit(temp, pageSize, PageGetSpecialSize(page));
390  memcpy(PageGetSpecialPointer(temp),
391  PageGetSpecialPointer(page),
392  PageGetSpecialSize(page));
393 
394  return temp;
395 }
396 
397 /*
398  * PageRestoreTempPage
399  * Copy temporary page back to permanent page after special processing
400  * and release the temporary page.
401  */
402 void
403 PageRestoreTempPage(Page tempPage, Page oldPage)
404 {
405  Size pageSize;
406 
407  pageSize = PageGetPageSize(tempPage);
408  memcpy((char *) oldPage, (char *) tempPage, pageSize);
409 
410  pfree(tempPage);
411 }
412 
413 /*
414  * Tuple defrag support for PageRepairFragmentation and PageIndexMultiDelete
415  */
416 typedef struct itemIdCompactData
417 {
418  uint16 offsetindex; /* linp array index */
419  int16 itemoff; /* page offset of item data */
420  uint16 alignedlen; /* MAXALIGN(item data len) */
423 
424 /*
425  * After removing or marking some line pointers unused, move the tuples to
426  * remove the gaps caused by the removed items and reorder them back into
427  * reverse line pointer order in the page.
428  *
429  * This function can often be fairly hot, so it pays to take some measures to
430  * make it as optimal as possible.
431  *
432  * Callers may pass 'presorted' as true if the 'itemidbase' array is sorted in
433  * descending order of itemoff. When this is true we can just memmove()
434  * tuples towards the end of the page. This is quite a common case as it's
435  * the order that tuples are initially inserted into pages. When we call this
436  * function to defragment the tuples in the page then any new line pointers
437  * added to the page will keep that presorted order, so hitting this case is
438  * still very common for tables that are commonly updated.
439  *
440  * When the 'itemidbase' array is not presorted then we're unable to just
441  * memmove() tuples around freely. Doing so could cause us to overwrite the
442  * memory belonging to a tuple we've not moved yet. In this case, we copy all
443  * the tuples that need to be moved into a temporary buffer. We can then
444  * simply memcpy() out of that temp buffer back into the page at the correct
445  * location. Tuples are copied back into the page in the same order as the
446  * 'itemidbase' array, so we end up reordering the tuples back into reverse
447  * line pointer order. This will increase the chances of hitting the
448  * presorted case the next time around.
449  *
450  * Callers must ensure that nitems is > 0
451  */
452 static void
453 compactify_tuples(itemIdCompact itemidbase, int nitems, Page page, bool presorted)
454 {
455  PageHeader phdr = (PageHeader) page;
456  Offset upper;
457  Offset copy_tail;
458  Offset copy_head;
459  itemIdCompact itemidptr;
460  int i;
461 
462  /* Code within will not work correctly if nitems == 0 */
463  Assert(nitems > 0);
464 
465  if (presorted)
466  {
467 
468 #ifdef USE_ASSERT_CHECKING
469  {
470  /*
471  * Verify we've not gotten any new callers that are incorrectly
472  * passing a true presorted value.
473  */
474  Offset lastoff = phdr->pd_special;
475 
476  for (i = 0; i < nitems; i++)
477  {
478  itemidptr = &itemidbase[i];
479 
480  Assert(lastoff > itemidptr->itemoff);
481 
482  lastoff = itemidptr->itemoff;
483  }
484  }
485 #endif /* USE_ASSERT_CHECKING */
486 
487  /*
488  * 'itemidbase' is already in the optimal order, i.e, lower item
489  * pointers have a higher offset. This allows us to memmove() the
490  * tuples up to the end of the page without having to worry about
491  * overwriting other tuples that have not been moved yet.
492  *
493  * There's a good chance that there are tuples already right at the
494  * end of the page that we can simply skip over because they're
495  * already in the correct location within the page. We'll do that
496  * first...
497  */
498  upper = phdr->pd_special;
499  i = 0;
500  do
501  {
502  itemidptr = &itemidbase[i];
503  if (upper != itemidptr->itemoff + itemidptr->alignedlen)
504  break;
505  upper -= itemidptr->alignedlen;
506 
507  i++;
508  } while (i < nitems);
509 
510  /*
511  * Now that we've found the first tuple that needs to be moved, we can
512  * do the tuple compactification. We try and make the least number of
513  * memmove() calls and only call memmove() when there's a gap. When
514  * we see a gap we just move all tuples after the gap up until the
515  * point of the last move operation.
516  */
517  copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen;
518  for (; i < nitems; i++)
519  {
520  ItemId lp;
521 
522  itemidptr = &itemidbase[i];
523  lp = PageGetItemId(page, itemidptr->offsetindex + 1);
524 
525  if (copy_head != itemidptr->itemoff + itemidptr->alignedlen)
526  {
527  memmove((char *) page + upper,
528  page + copy_head,
529  copy_tail - copy_head);
530 
531  /*
532  * We've now moved all tuples already seen, but not the
533  * current tuple, so we set the copy_tail to the end of this
534  * tuple so it can be moved in another iteration of the loop.
535  */
536  copy_tail = itemidptr->itemoff + itemidptr->alignedlen;
537  }
538  /* shift the target offset down by the length of this tuple */
539  upper -= itemidptr->alignedlen;
540  /* point the copy_head to the start of this tuple */
541  copy_head = itemidptr->itemoff;
542 
543  /* update the line pointer to reference the new offset */
544  lp->lp_off = upper;
545 
546  }
547 
548  /* move the remaining tuples. */
549  memmove((char *) page + upper,
550  page + copy_head,
551  copy_tail - copy_head);
552  }
553  else
554  {
555  PGAlignedBlock scratch;
556  char *scratchptr = scratch.data;
557 
558  /*
559  * Non-presorted case: The tuples in the itemidbase array may be in
560  * any order. So, in order to move these to the end of the page we
561  * must make a temp copy of each tuple that needs to be moved before
562  * we copy them back into the page at the new offset.
563  *
564  * If a large percentage of tuples have been pruned (>75%) then we'll
565  * copy these into the temp buffer tuple-by-tuple, otherwise, we'll
566  * just do a single memcpy() for all tuples that need to be moved.
567  * When so many tuples have been removed there's likely to be a lot of
568  * gaps and it's unlikely that many non-movable tuples remain at the
569  * end of the page.
570  */
571  if (nitems < PageGetMaxOffsetNumber(page) / 4)
572  {
573  i = 0;
574  do
575  {
576  itemidptr = &itemidbase[i];
577  memcpy(scratchptr + itemidptr->itemoff, page + itemidptr->itemoff,
578  itemidptr->alignedlen);
579  i++;
580  } while (i < nitems);
581 
582  /* Set things up for the compactification code below */
583  i = 0;
584  itemidptr = &itemidbase[0];
585  upper = phdr->pd_special;
586  }
587  else
588  {
589  upper = phdr->pd_special;
590 
591  /*
592  * Many tuples are likely to already be in the correct location.
593  * There's no need to copy these into the temp buffer. Instead
594  * we'll just skip forward in the itemidbase array to the position
595  * that we do need to move tuples from so that the code below just
596  * leaves these ones alone.
597  */
598  i = 0;
599  do
600  {
601  itemidptr = &itemidbase[i];
602  if (upper != itemidptr->itemoff + itemidptr->alignedlen)
603  break;
604  upper -= itemidptr->alignedlen;
605 
606  i++;
607  } while (i < nitems);
608 
609  /* Copy all tuples that need to be moved into the temp buffer */
610  memcpy(scratchptr + phdr->pd_upper,
611  page + phdr->pd_upper,
612  upper - phdr->pd_upper);
613  }
614 
615  /*
616  * Do the tuple compactification. itemidptr is already pointing to
617  * the first tuple that we're going to move. Here we collapse the
618  * memcpy calls for adjacent tuples into a single call. This is done
619  * by delaying the memcpy call until we find a gap that needs to be
620  * closed.
621  */
622  copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen;
623  for (; i < nitems; i++)
624  {
625  ItemId lp;
626 
627  itemidptr = &itemidbase[i];
628  lp = PageGetItemId(page, itemidptr->offsetindex + 1);
629 
630  /* copy pending tuples when we detect a gap */
631  if (copy_head != itemidptr->itemoff + itemidptr->alignedlen)
632  {
633  memcpy((char *) page + upper,
634  scratchptr + copy_head,
635  copy_tail - copy_head);
636 
637  /*
638  * We've now copied all tuples already seen, but not the
639  * current tuple, so we set the copy_tail to the end of this
640  * tuple.
641  */
642  copy_tail = itemidptr->itemoff + itemidptr->alignedlen;
643  }
644  /* shift the target offset down by the length of this tuple */
645  upper -= itemidptr->alignedlen;
646  /* point the copy_head to the start of this tuple */
647  copy_head = itemidptr->itemoff;
648 
649  /* update the line pointer to reference the new offset */
650  lp->lp_off = upper;
651 
652  }
653 
654  /* Copy the remaining chunk */
655  memcpy((char *) page + upper,
656  scratchptr + copy_head,
657  copy_tail - copy_head);
658  }
659 
660  phdr->pd_upper = upper;
661 }
662 
663 /*
664  * PageRepairFragmentation
665  *
666  * Frees fragmented space on a page.
667  * It doesn't remove unused line pointers! Please don't change this.
668  *
669  * This routine is usable for heap pages only, but see PageIndexMultiDelete.
670  *
671  * As a side effect, the page's PD_HAS_FREE_LINES hint bit is updated.
672  */
673 void
675 {
676  Offset pd_lower = ((PageHeader) page)->pd_lower;
677  Offset pd_upper = ((PageHeader) page)->pd_upper;
678  Offset pd_special = ((PageHeader) page)->pd_special;
679  Offset last_offset;
681  itemIdCompact itemidptr;
682  ItemId lp;
683  int nline,
684  nstorage,
685  nunused;
686  int i;
687  Size totallen;
688  bool presorted = true; /* For now */
689 
690  /*
691  * It's worth the trouble to be more paranoid here than in most places,
692  * because we are about to reshuffle data in (what is usually) a shared
693  * disk buffer. If we aren't careful then corrupted pointers, lengths,
694  * etc could cause us to clobber adjacent disk buffers, spreading the data
695  * loss further. So, check everything.
696  */
697  if (pd_lower < SizeOfPageHeaderData ||
698  pd_lower > pd_upper ||
699  pd_upper > pd_special ||
700  pd_special > BLCKSZ ||
701  pd_special != MAXALIGN(pd_special))
702  ereport(ERROR,
704  errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
705  pd_lower, pd_upper, pd_special)));
706 
707  /*
708  * Run through the line pointer array and collect data about live items.
709  */
710  nline = PageGetMaxOffsetNumber(page);
711  itemidptr = itemidbase;
712  nunused = totallen = 0;
713  last_offset = pd_special;
714  for (i = FirstOffsetNumber; i <= nline; i++)
715  {
716  lp = PageGetItemId(page, i);
717  if (ItemIdIsUsed(lp))
718  {
719  if (ItemIdHasStorage(lp))
720  {
721  itemidptr->offsetindex = i - 1;
722  itemidptr->itemoff = ItemIdGetOffset(lp);
723 
724  if (last_offset > itemidptr->itemoff)
725  last_offset = itemidptr->itemoff;
726  else
727  presorted = false;
728 
729  if (unlikely(itemidptr->itemoff < (int) pd_upper ||
730  itemidptr->itemoff >= (int) pd_special))
731  ereport(ERROR,
733  errmsg("corrupted line pointer: %u",
734  itemidptr->itemoff)));
735  itemidptr->alignedlen = MAXALIGN(ItemIdGetLength(lp));
736  totallen += itemidptr->alignedlen;
737  itemidptr++;
738  }
739  }
740  else
741  {
742  /* Unused entries should have lp_len = 0, but make sure */
743  ItemIdSetUnused(lp);
744  nunused++;
745  }
746  }
747 
748  nstorage = itemidptr - itemidbase;
749  if (nstorage == 0)
750  {
751  /* Page is completely empty, so just reset it quickly */
752  ((PageHeader) page)->pd_upper = pd_special;
753  }
754  else
755  {
756  /* Need to compact the page the hard way */
757  if (totallen > (Size) (pd_special - pd_lower))
758  ereport(ERROR,
760  errmsg("corrupted item lengths: total %u, available space %u",
761  (unsigned int) totallen, pd_special - pd_lower)));
762 
763  compactify_tuples(itemidbase, nstorage, page, presorted);
764  }
765 
766  /* Set hint bit for PageAddItem */
767  if (nunused > 0)
769  else
771 }
772 
773 /*
774  * PageGetFreeSpace
775  * Returns the size of the free (allocatable) space on a page,
776  * reduced by the space needed for a new line pointer.
777  *
778  * Note: this should usually only be used on index pages. Use
779  * PageGetHeapFreeSpace on heap pages.
780  */
781 Size
783 {
784  int space;
785 
786  /*
787  * Use signed arithmetic here so that we behave sensibly if pd_lower >
788  * pd_upper.
789  */
790  space = (int) ((PageHeader) page)->pd_upper -
791  (int) ((PageHeader) page)->pd_lower;
792 
793  if (space < (int) sizeof(ItemIdData))
794  return 0;
795  space -= sizeof(ItemIdData);
796 
797  return (Size) space;
798 }
799 
800 /*
801  * PageGetFreeSpaceForMultipleTuples
802  * Returns the size of the free (allocatable) space on a page,
803  * reduced by the space needed for multiple new line pointers.
804  *
805  * Note: this should usually only be used on index pages. Use
806  * PageGetHeapFreeSpace on heap pages.
807  */
808 Size
810 {
811  int space;
812 
813  /*
814  * Use signed arithmetic here so that we behave sensibly if pd_lower >
815  * pd_upper.
816  */
817  space = (int) ((PageHeader) page)->pd_upper -
818  (int) ((PageHeader) page)->pd_lower;
819 
820  if (space < (int) (ntups * sizeof(ItemIdData)))
821  return 0;
822  space -= ntups * sizeof(ItemIdData);
823 
824  return (Size) space;
825 }
826 
827 /*
828  * PageGetExactFreeSpace
829  * Returns the size of the free (allocatable) space on a page,
830  * without any consideration for adding/removing line pointers.
831  */
832 Size
834 {
835  int space;
836 
837  /*
838  * Use signed arithmetic here so that we behave sensibly if pd_lower >
839  * pd_upper.
840  */
841  space = (int) ((PageHeader) page)->pd_upper -
842  (int) ((PageHeader) page)->pd_lower;
843 
844  if (space < 0)
845  return 0;
846 
847  return (Size) space;
848 }
849 
850 
851 /*
852  * PageGetHeapFreeSpace
853  * Returns the size of the free (allocatable) space on a page,
854  * reduced by the space needed for a new line pointer.
855  *
856  * The difference between this and PageGetFreeSpace is that this will return
857  * zero if there are already MaxHeapTuplesPerPage line pointers in the page
858  * and none are free. We use this to enforce that no more than
859  * MaxHeapTuplesPerPage line pointers are created on a heap page. (Although
860  * no more tuples than that could fit anyway, in the presence of redirected
861  * or dead line pointers it'd be possible to have too many line pointers.
862  * To avoid breaking code that assumes MaxHeapTuplesPerPage is a hard limit
863  * on the number of line pointers, we make this extra check.)
864  */
865 Size
867 {
868  Size space;
869 
870  space = PageGetFreeSpace(page);
871  if (space > 0)
872  {
873  OffsetNumber offnum,
874  nline;
875 
876  /*
877  * Are there already MaxHeapTuplesPerPage line pointers in the page?
878  */
879  nline = PageGetMaxOffsetNumber(page);
880  if (nline >= MaxHeapTuplesPerPage)
881  {
883  {
884  /*
885  * Since this is just a hint, we must confirm that there is
886  * indeed a free line pointer
887  */
888  for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
889  {
890  ItemId lp = PageGetItemId(page, offnum);
891 
892  if (!ItemIdIsUsed(lp))
893  break;
894  }
895 
896  if (offnum > nline)
897  {
898  /*
899  * The hint is wrong, but we can't clear it here since we
900  * don't have the ability to mark the page dirty.
901  */
902  space = 0;
903  }
904  }
905  else
906  {
907  /*
908  * Although the hint might be wrong, PageAddItem will believe
909  * it anyway, so we must believe it too.
910  */
911  space = 0;
912  }
913  }
914  }
915  return space;
916 }
917 
918 
919 /*
920  * PageIndexTupleDelete
921  *
922  * This routine does the work of removing a tuple from an index page.
923  *
924  * Unlike heap pages, we compact out the line pointer for the removed tuple.
925  */
926 void
928 {
929  PageHeader phdr = (PageHeader) page;
930  char *addr;
931  ItemId tup;
932  Size size;
933  unsigned offset;
934  int nbytes;
935  int offidx;
936  int nline;
937 
938  /*
939  * As with PageRepairFragmentation, paranoia seems justified.
940  */
941  if (phdr->pd_lower < SizeOfPageHeaderData ||
942  phdr->pd_lower > phdr->pd_upper ||
943  phdr->pd_upper > phdr->pd_special ||
944  phdr->pd_special > BLCKSZ ||
945  phdr->pd_special != MAXALIGN(phdr->pd_special))
946  ereport(ERROR,
948  errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
949  phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
950 
951  nline = PageGetMaxOffsetNumber(page);
952  if ((int) offnum <= 0 || (int) offnum > nline)
953  elog(ERROR, "invalid index offnum: %u", offnum);
954 
955  /* change offset number to offset index */
956  offidx = offnum - 1;
957 
958  tup = PageGetItemId(page, offnum);
959  Assert(ItemIdHasStorage(tup));
960  size = ItemIdGetLength(tup);
961  offset = ItemIdGetOffset(tup);
962 
963  if (offset < phdr->pd_upper || (offset + size) > phdr->pd_special ||
964  offset != MAXALIGN(offset))
965  ereport(ERROR,
967  errmsg("corrupted line pointer: offset = %u, size = %u",
968  offset, (unsigned int) size)));
969 
970  /* Amount of space to actually be deleted */
971  size = MAXALIGN(size);
972 
973  /*
974  * First, we want to get rid of the pd_linp entry for the index tuple. We
975  * copy all subsequent linp's back one slot in the array. We don't use
976  * PageGetItemId, because we are manipulating the _array_, not individual
977  * linp's.
978  */
979  nbytes = phdr->pd_lower -
980  ((char *) &phdr->pd_linp[offidx + 1] - (char *) phdr);
981 
982  if (nbytes > 0)
983  memmove((char *) &(phdr->pd_linp[offidx]),
984  (char *) &(phdr->pd_linp[offidx + 1]),
985  nbytes);
986 
987  /*
988  * Now move everything between the old upper bound (beginning of tuple
989  * space) and the beginning of the deleted tuple forward, so that space in
990  * the middle of the page is left free. If we've just deleted the tuple
991  * at the beginning of tuple space, then there's no need to do the copy.
992  */
993 
994  /* beginning of tuple space */
995  addr = (char *) page + phdr->pd_upper;
996 
997  if (offset > phdr->pd_upper)
998  memmove(addr + size, addr, offset - phdr->pd_upper);
999 
1000  /* adjust free space boundary pointers */
1001  phdr->pd_upper += size;
1002  phdr->pd_lower -= sizeof(ItemIdData);
1003 
1004  /*
1005  * Finally, we need to adjust the linp entries that remain.
1006  *
1007  * Anything that used to be before the deleted tuple's data was moved
1008  * forward by the size of the deleted tuple.
1009  */
1010  if (!PageIsEmpty(page))
1011  {
1012  int i;
1013 
1014  nline--; /* there's one less than when we started */
1015  for (i = 1; i <= nline; i++)
1016  {
1017  ItemId ii = PageGetItemId(phdr, i);
1018 
1019  Assert(ItemIdHasStorage(ii));
1020  if (ItemIdGetOffset(ii) <= offset)
1021  ii->lp_off += size;
1022  }
1023  }
1024 }
1025 
1026 
1027 /*
1028  * PageIndexMultiDelete
1029  *
1030  * This routine handles the case of deleting multiple tuples from an
1031  * index page at once. It is considerably faster than a loop around
1032  * PageIndexTupleDelete ... however, the caller *must* supply the array
1033  * of item numbers to be deleted in item number order!
1034  */
1035 void
1036 PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
1037 {
1038  PageHeader phdr = (PageHeader) page;
1039  Offset pd_lower = phdr->pd_lower;
1040  Offset pd_upper = phdr->pd_upper;
1041  Offset pd_special = phdr->pd_special;
1042  Offset last_offset;
1044  ItemIdData newitemids[MaxIndexTuplesPerPage];
1045  itemIdCompact itemidptr;
1046  ItemId lp;
1047  int nline,
1048  nused;
1049  Size totallen;
1050  Size size;
1051  unsigned offset;
1052  int nextitm;
1053  OffsetNumber offnum;
1054  bool presorted = true; /* For now */
1055 
1056  Assert(nitems <= MaxIndexTuplesPerPage);
1057 
1058  /*
1059  * If there aren't very many items to delete, then retail
1060  * PageIndexTupleDelete is the best way. Delete the items in reverse
1061  * order so we don't have to think about adjusting item numbers for
1062  * previous deletions.
1063  *
1064  * TODO: tune the magic number here
1065  */
1066  if (nitems <= 2)
1067  {
1068  while (--nitems >= 0)
1069  PageIndexTupleDelete(page, itemnos[nitems]);
1070  return;
1071  }
1072 
1073  /*
1074  * As with PageRepairFragmentation, paranoia seems justified.
1075  */
1076  if (pd_lower < SizeOfPageHeaderData ||
1077  pd_lower > pd_upper ||
1078  pd_upper > pd_special ||
1079  pd_special > BLCKSZ ||
1080  pd_special != MAXALIGN(pd_special))
1081  ereport(ERROR,
1083  errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1084  pd_lower, pd_upper, pd_special)));
1085 
1086  /*
1087  * Scan the line pointer array and build a list of just the ones we are
1088  * going to keep. Notice we do not modify the page yet, since we are
1089  * still validity-checking.
1090  */
1091  nline = PageGetMaxOffsetNumber(page);
1092  itemidptr = itemidbase;
1093  totallen = 0;
1094  nused = 0;
1095  nextitm = 0;
1096  last_offset = pd_special;
1097  for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
1098  {
1099  lp = PageGetItemId(page, offnum);
1100  Assert(ItemIdHasStorage(lp));
1101  size = ItemIdGetLength(lp);
1102  offset = ItemIdGetOffset(lp);
1103  if (offset < pd_upper ||
1104  (offset + size) > pd_special ||
1105  offset != MAXALIGN(offset))
1106  ereport(ERROR,
1108  errmsg("corrupted line pointer: offset = %u, size = %u",
1109  offset, (unsigned int) size)));
1110 
1111  if (nextitm < nitems && offnum == itemnos[nextitm])
1112  {
1113  /* skip item to be deleted */
1114  nextitm++;
1115  }
1116  else
1117  {
1118  itemidptr->offsetindex = nused; /* where it will go */
1119  itemidptr->itemoff = offset;
1120 
1121  if (last_offset > itemidptr->itemoff)
1122  last_offset = itemidptr->itemoff;
1123  else
1124  presorted = false;
1125 
1126  itemidptr->alignedlen = MAXALIGN(size);
1127  totallen += itemidptr->alignedlen;
1128  newitemids[nused] = *lp;
1129  itemidptr++;
1130  nused++;
1131  }
1132  }
1133 
1134  /* this will catch invalid or out-of-order itemnos[] */
1135  if (nextitm != nitems)
1136  elog(ERROR, "incorrect index offsets supplied");
1137 
1138  if (totallen > (Size) (pd_special - pd_lower))
1139  ereport(ERROR,
1141  errmsg("corrupted item lengths: total %u, available space %u",
1142  (unsigned int) totallen, pd_special - pd_lower)));
1143 
1144  /*
1145  * Looks good. Overwrite the line pointers with the copy, from which we've
1146  * removed all the unused items.
1147  */
1148  memcpy(phdr->pd_linp, newitemids, nused * sizeof(ItemIdData));
1149  phdr->pd_lower = SizeOfPageHeaderData + nused * sizeof(ItemIdData);
1150 
1151  /* and compactify the tuple data */
1152  if (nused > 0)
1153  compactify_tuples(itemidbase, nused, page, presorted);
1154  else
1155  phdr->pd_upper = pd_special;
1156 }
1157 
1158 
1159 /*
1160  * PageIndexTupleDeleteNoCompact
1161  *
1162  * Remove the specified tuple from an index page, but set its line pointer
1163  * to "unused" instead of compacting it out, except that it can be removed
1164  * if it's the last line pointer on the page.
1165  *
1166  * This is used for index AMs that require that existing TIDs of live tuples
1167  * remain unchanged, and are willing to allow unused line pointers instead.
1168  */
1169 void
1171 {
1172  PageHeader phdr = (PageHeader) page;
1173  char *addr;
1174  ItemId tup;
1175  Size size;
1176  unsigned offset;
1177  int nline;
1178 
1179  /*
1180  * As with PageRepairFragmentation, paranoia seems justified.
1181  */
1182  if (phdr->pd_lower < SizeOfPageHeaderData ||
1183  phdr->pd_lower > phdr->pd_upper ||
1184  phdr->pd_upper > phdr->pd_special ||
1185  phdr->pd_special > BLCKSZ ||
1186  phdr->pd_special != MAXALIGN(phdr->pd_special))
1187  ereport(ERROR,
1189  errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1190  phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1191 
1192  nline = PageGetMaxOffsetNumber(page);
1193  if ((int) offnum <= 0 || (int) offnum > nline)
1194  elog(ERROR, "invalid index offnum: %u", offnum);
1195 
1196  tup = PageGetItemId(page, offnum);
1197  Assert(ItemIdHasStorage(tup));
1198  size = ItemIdGetLength(tup);
1199  offset = ItemIdGetOffset(tup);
1200 
1201  if (offset < phdr->pd_upper || (offset + size) > phdr->pd_special ||
1202  offset != MAXALIGN(offset))
1203  ereport(ERROR,
1205  errmsg("corrupted line pointer: offset = %u, size = %u",
1206  offset, (unsigned int) size)));
1207 
1208  /* Amount of space to actually be deleted */
1209  size = MAXALIGN(size);
1210 
1211  /*
1212  * Either set the line pointer to "unused", or zap it if it's the last
1213  * one. (Note: it's possible that the next-to-last one(s) are already
1214  * unused, but we do not trouble to try to compact them out if so.)
1215  */
1216  if ((int) offnum < nline)
1217  ItemIdSetUnused(tup);
1218  else
1219  {
1220  phdr->pd_lower -= sizeof(ItemIdData);
1221  nline--; /* there's one less than when we started */
1222  }
1223 
1224  /*
1225  * Now move everything between the old upper bound (beginning of tuple
1226  * space) and the beginning of the deleted tuple forward, so that space in
1227  * the middle of the page is left free. If we've just deleted the tuple
1228  * at the beginning of tuple space, then there's no need to do the copy.
1229  */
1230 
1231  /* beginning of tuple space */
1232  addr = (char *) page + phdr->pd_upper;
1233 
1234  if (offset > phdr->pd_upper)
1235  memmove(addr + size, addr, offset - phdr->pd_upper);
1236 
1237  /* adjust free space boundary pointer */
1238  phdr->pd_upper += size;
1239 
1240  /*
1241  * Finally, we need to adjust the linp entries that remain.
1242  *
1243  * Anything that used to be before the deleted tuple's data was moved
1244  * forward by the size of the deleted tuple.
1245  */
1246  if (!PageIsEmpty(page))
1247  {
1248  int i;
1249 
1250  for (i = 1; i <= nline; i++)
1251  {
1252  ItemId ii = PageGetItemId(phdr, i);
1253 
1254  if (ItemIdHasStorage(ii) && ItemIdGetOffset(ii) <= offset)
1255  ii->lp_off += size;
1256  }
1257  }
1258 }
1259 
1260 
1261 /*
1262  * PageIndexTupleOverwrite
1263  *
1264  * Replace a specified tuple on an index page.
1265  *
1266  * The new tuple is placed exactly where the old one had been, shifting
1267  * other tuples' data up or down as needed to keep the page compacted.
1268  * This is better than deleting and reinserting the tuple, because it
1269  * avoids any data shifting when the tuple size doesn't change; and
1270  * even when it does, we avoid moving the line pointers around.
1271  * This could be used by an index AM that doesn't want to unset the
1272  * LP_DEAD bit when it happens to be set. It could conceivably also be
1273  * used by an index AM that cares about the physical order of tuples as
1274  * well as their logical/ItemId order.
1275  *
1276  * If there's insufficient space for the new tuple, return false. Other
1277  * errors represent data-corruption problems, so we just elog.
1278  */
1279 bool
1281  Item newtup, Size newsize)
1282 {
1283  PageHeader phdr = (PageHeader) page;
1284  ItemId tupid;
1285  int oldsize;
1286  unsigned offset;
1287  Size alignednewsize;
1288  int size_diff;
1289  int itemcount;
1290 
1291  /*
1292  * As with PageRepairFragmentation, paranoia seems justified.
1293  */
1294  if (phdr->pd_lower < SizeOfPageHeaderData ||
1295  phdr->pd_lower > phdr->pd_upper ||
1296  phdr->pd_upper > phdr->pd_special ||
1297  phdr->pd_special > BLCKSZ ||
1298  phdr->pd_special != MAXALIGN(phdr->pd_special))
1299  ereport(ERROR,
1301  errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
1302  phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
1303 
1304  itemcount = PageGetMaxOffsetNumber(page);
1305  if ((int) offnum <= 0 || (int) offnum > itemcount)
1306  elog(ERROR, "invalid index offnum: %u", offnum);
1307 
1308  tupid = PageGetItemId(page, offnum);
1309  Assert(ItemIdHasStorage(tupid));
1310  oldsize = ItemIdGetLength(tupid);
1311  offset = ItemIdGetOffset(tupid);
1312 
1313  if (offset < phdr->pd_upper || (offset + oldsize) > phdr->pd_special ||
1314  offset != MAXALIGN(offset))
1315  ereport(ERROR,
1317  errmsg("corrupted line pointer: offset = %u, size = %u",
1318  offset, (unsigned int) oldsize)));
1319 
1320  /*
1321  * Determine actual change in space requirement, check for page overflow.
1322  */
1323  oldsize = MAXALIGN(oldsize);
1324  alignednewsize = MAXALIGN(newsize);
1325  if (alignednewsize > oldsize + (phdr->pd_upper - phdr->pd_lower))
1326  return false;
1327 
1328  /*
1329  * Relocate existing data and update line pointers, unless the new tuple
1330  * is the same size as the old (after alignment), in which case there's
1331  * nothing to do. Notice that what we have to relocate is data before the
1332  * target tuple, not data after, so it's convenient to express size_diff
1333  * as the amount by which the tuple's size is decreasing, making it the
1334  * delta to add to pd_upper and affected line pointers.
1335  */
1336  size_diff = oldsize - (int) alignednewsize;
1337  if (size_diff != 0)
1338  {
1339  char *addr = (char *) page + phdr->pd_upper;
1340  int i;
1341 
1342  /* relocate all tuple data before the target tuple */
1343  memmove(addr + size_diff, addr, offset - phdr->pd_upper);
1344 
1345  /* adjust free space boundary pointer */
1346  phdr->pd_upper += size_diff;
1347 
1348  /* adjust affected line pointers too */
1349  for (i = FirstOffsetNumber; i <= itemcount; i++)
1350  {
1351  ItemId ii = PageGetItemId(phdr, i);
1352 
1353  /* Allow items without storage; currently only BRIN needs that */
1354  if (ItemIdHasStorage(ii) && ItemIdGetOffset(ii) <= offset)
1355  ii->lp_off += size_diff;
1356  }
1357  }
1358 
1359  /* Update the item's tuple length without changing its lp_flags field */
1360  tupid->lp_off = offset + size_diff;
1361  tupid->lp_len = newsize;
1362 
1363  /* Copy new tuple data onto page */
1364  memcpy(PageGetItem(page, tupid), newtup, newsize);
1365 
1366  return true;
1367 }
1368 
1369 
1370 /*
1371  * Set checksum for a page in shared buffers.
1372  *
1373  * If checksums are disabled, or if the page is not initialized, just return
1374  * the input. Otherwise, we must make a copy of the page before calculating
1375  * the checksum, to prevent concurrent modifications (e.g. setting hint bits)
1376  * from making the final checksum invalid. It doesn't matter if we include or
1377  * exclude hints during the copy, as long as we write a valid page and
1378  * associated checksum.
1379  *
1380  * Returns a pointer to the block-sized data that needs to be written. Uses
1381  * statically-allocated memory, so the caller must immediately write the
1382  * returned page and not refer to it again.
1383  */
1384 char *
1386 {
1387  static char *pageCopy = NULL;
1388 
1389  /* If we don't need a checksum, just return the passed-in data */
1390  if (PageIsNew(page) || !DataChecksumsEnabled())
1391  return (char *) page;
1392 
1393  /*
1394  * We allocate the copy space once and use it over on each subsequent
1395  * call. The point of palloc'ing here, rather than having a static char
1396  * array, is first to ensure adequate alignment for the checksumming code
1397  * and second to avoid wasting space in processes that never call this.
1398  */
1399  if (pageCopy == NULL)
1400  pageCopy = MemoryContextAlloc(TopMemoryContext, BLCKSZ);
1401 
1402  memcpy(pageCopy, (char *) page, BLCKSZ);
1403  ((PageHeader) pageCopy)->pd_checksum = pg_checksum_page(pageCopy, blkno);
1404  return pageCopy;
1405 }
1406 
1407 /*
1408  * Set checksum for a page in private memory.
1409  *
1410  * This must only be used when we know that no other process can be modifying
1411  * the page buffer.
1412  */
1413 void
1415 {
1416  /* If we don't need a checksum, just return */
1417  if (PageIsNew(page) || !DataChecksumsEnabled())
1418  return;
1419 
1420  ((PageHeader) page)->pd_checksum = pg_checksum_page((char *) page, blkno);
1421 }
signed short int16
Definition: c.h:361
#define PageClearHasFreeLinePointers(page)
Definition: bufpage.h:375
void pgstat_report_checksum_failure(void)
Definition: pgstat.c:1607
#define PageIsEmpty(page)
Definition: bufpage.h:222
void PageRestoreTempPage(Page tempPage, Page oldPage)
Definition: bufpage.c:403
bool PageIsVerified(Page page, BlockNumber blkno)
Definition: bufpage.c:82
uint16 pd_flags
Definition: bufpage.h:157
#define PageSetPageSizeAndVersion(page, size, version)
Definition: bufpage.h:285
Datum lower(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:44
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition: bufpage.c:927
bool DataChecksumsEnabled(void)
Definition: xlog.c:4931
#define VALGRIND_CHECK_MEM_IS_DEFINED(addr, size)
Definition: memdebug.h:23
#define PAI_OVERWRITE
Definition: bufpage.h:407
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define MaxHeapTuplesPerPage
Definition: htup_details.h:574
Pointer Item
Definition: item.h:17
struct itemIdCompactData itemIdCompactData
char * PageSetChecksumCopy(Page page, BlockNumber blkno)
Definition: bufpage.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:610
static bool checksum_failure
#define MemSet(start, val, len)
Definition: c.h:949
uint32 BlockNumber
Definition: block.h:31
Datum upper(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:75
#define SizeOfPageHeaderData
Definition: bufpage.h:216
#define PANIC
Definition: elog.h:53
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
#define PG_PAGE_LAYOUT_VERSION
Definition: bufpage.h:199
Size PageGetFreeSpace(Page page)
Definition: bufpage.c:782
uint16 OffsetNumber
Definition: off.h:24
itemIdCompactData * itemIdCompact
Definition: bufpage.c:422
uint16 pd_checksum
Definition: bufpage.h:156
Page PageGetTempPageCopySpecial(Page page)
Definition: bufpage.c:381
char data[BLCKSZ]
Definition: c.h:1082
unsigned short uint16
Definition: c.h:373
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:43
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:866
uint16 offsetindex
Definition: bufpage.c:418
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, Item newtup, Size newsize)
Definition: bufpage.c:1280
#define ItemIdSetNormal(itemId, off, len)
Definition: itemid.h:140
#define FirstOffsetNumber
Definition: off.h:27
static void compactify_tuples(itemIdCompact itemidbase, int nitems, Page page, bool presorted)
Definition: bufpage.c:453
#define PageGetPageSize(page)
Definition: bufpage.h:268
struct ItemIdData ItemIdData
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
MemoryContext TopMemoryContext
Definition: mcxt.c:44
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
LocationIndex pd_special
Definition: bufpage.h:160
#define WARNING
Definition: elog.h:40
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
Page PageGetTempPage(Page page)
Definition: bufpage.c:344
unsigned lp_off
Definition: itemid.h:27
uint16 LocationIndex
Definition: bufpage.h:87
unsigned lp_len
Definition: itemid.h:27
Size PageGetFreeSpaceForMultipleTuples(Page page, int ntups)
Definition: bufpage.c:809
#define InvalidOffsetNumber
Definition: off.h:26
#define ereport(elevel,...)
Definition: elog.h:144
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
void PageIndexTupleDeleteNoCompact(Page page, OffsetNumber offnum)
Definition: bufpage.c:1170
PageHeaderData * PageHeader
Definition: bufpage.h:166
#define Assert(condition)
Definition: c.h:745
signed int Offset
Definition: c.h:492
#define PD_VALID_FLAG_BITS
Definition: bufpage.h:185
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1036
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
size_t Size
Definition: c.h:473
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
OffsetNumber PageAddItemExtended(Page page, Item item, Size size, OffsetNumber offsetNumber, int flags)
Definition: bufpage.c:186
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1414
#define MAXALIGN(LEN)
Definition: c.h:698
#define PageGetSpecialSize(page)
Definition: bufpage.h:300
Size PageGetExactFreeSpace(Page page)
Definition: bufpage.c:833
bool ignore_checksum_failure
Definition: bufpage.c:27
void PageRepairFragmentation(Page page)
Definition: bufpage.c:674
#define PageIsNew(page)
Definition: bufpage.h:229
#define MaxIndexTuplesPerPage
Definition: itup.h:145
void * palloc(Size size)
Definition: mcxt.c:950
int errmsg(const char *fmt,...)
Definition: elog.c:824
Page PageGetTempPageCopy(Page page)
Definition: bufpage.c:361
#define PageSetHasFreeLinePointers(page)
Definition: bufpage.h:373
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:797
#define PAI_IS_HEAP
Definition: bufpage.h:408
#define elog(elevel,...)
Definition: elog.h:214
int i
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
ItemIdData pd_linp[FLEXIBLE_ARRAY_MEMBER]
Definition: bufpage.h:163
#define unlikely(x)
Definition: c.h:206
#define PageHasFreeLinePointers(page)
Definition: bufpage.h:371
uint16 alignedlen
Definition: bufpage.c:420
LocationIndex pd_upper
Definition: bufpage.h:159
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
uint16 pg_checksum_page(char *page, BlockNumber blkno)
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
LocationIndex pd_lower
Definition: bufpage.h:158
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42