PostgreSQL Source Code  git master
nbtdedup.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtdedup.c
4  * Deduplicate items in Postgres btrees.
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/nbtree/nbtdedup.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/nbtree.h"
18 #include "access/nbtxlog.h"
19 #include "miscadmin.h"
20 #include "utils/rel.h"
21 
22 static bool _bt_do_singleval(Relation rel, Page page, BTDedupState state,
23  OffsetNumber minoff, IndexTuple newitem);
25  Size newitemsz);
26 #ifdef USE_ASSERT_CHECKING
27 static bool _bt_posting_valid(IndexTuple posting);
28 #endif
29 
30 /*
31  * Deduplicate items on a leaf page. The page will have to be split by caller
32  * if we cannot successfully free at least newitemsz (we also need space for
33  * newitem's line pointer, which isn't included in caller's newitemsz).
34  *
35  * The general approach taken here is to perform as much deduplication as
36  * possible to free as much space as possible. Note, however, that "single
37  * value" strategy is sometimes used for !checkingunique callers, in which
38  * case deduplication will leave a few tuples untouched at the end of the
39  * page. The general idea is to prepare the page for an anticipated page
40  * split that uses nbtsplitloc.c's "single value" strategy to determine a
41  * split point. (There is no reason to deduplicate items that will end up on
42  * the right half of the page after the anticipated page split; better to
43  * handle those if and when the anticipated right half page gets its own
44  * deduplication pass, following further inserts of duplicates.)
45  *
46  * This function should be called during insertion, when the page doesn't have
47  * enough space to fit an incoming newitem. If the BTP_HAS_GARBAGE page flag
48  * was set, caller should have removed any LP_DEAD items by calling
49  * _bt_vacuum_one_page() before calling here. We may still have to kill
50  * LP_DEAD items here when the page's BTP_HAS_GARBAGE hint is falsely unset,
51  * but that should be rare. Also, _bt_vacuum_one_page() won't unset the
52  * BTP_HAS_GARBAGE flag when it finds no LP_DEAD items, so a successful
53  * deduplication pass will always clear it, just to keep things tidy.
54  */
55 void
57  IndexTuple newitem, Size newitemsz, bool checkingunique)
58 {
59  OffsetNumber offnum,
60  minoff,
61  maxoff;
62  Page page = BufferGetPage(buf);
63  BTPageOpaque opaque;
64  Page newpage;
67  int ndeletable = 0;
68  Size pagesaving = 0;
69  bool singlevalstrat = false;
70  int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
71 
72  /*
73  * We can't assume that there are no LP_DEAD items. For one thing, VACUUM
74  * will clear the BTP_HAS_GARBAGE hint without reliably removing items
75  * that are marked LP_DEAD. We don't want to unnecessarily unset LP_DEAD
76  * bits when deduplicating items. Allowing it would be correct, though
77  * wasteful.
78  */
79  opaque = (BTPageOpaque) PageGetSpecialPointer(page);
80  minoff = P_FIRSTDATAKEY(opaque);
81  maxoff = PageGetMaxOffsetNumber(page);
82  for (offnum = minoff;
83  offnum <= maxoff;
84  offnum = OffsetNumberNext(offnum))
85  {
86  ItemId itemid = PageGetItemId(page, offnum);
87 
88  if (ItemIdIsDead(itemid))
89  deletable[ndeletable++] = offnum;
90  }
91 
92  if (ndeletable > 0)
93  {
94  _bt_delitems_delete(rel, buf, deletable, ndeletable, heapRel);
95 
96  /*
97  * Return when a split will be avoided. This is equivalent to
98  * avoiding a split using the usual _bt_vacuum_one_page() path.
99  */
100  if (PageGetFreeSpace(page) >= newitemsz)
101  return;
102 
103  /*
104  * Reconsider number of items on page, in case _bt_delitems_delete()
105  * managed to delete an item or two
106  */
107  minoff = P_FIRSTDATAKEY(opaque);
108  maxoff = PageGetMaxOffsetNumber(page);
109  }
110 
111  /* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
112  newitemsz += sizeof(ItemIdData);
113 
114  /*
115  * By here, it's clear that deduplication will definitely be attempted.
116  * Initialize deduplication state.
117  *
118  * It would be possible for maxpostingsize (limit on posting list tuple
119  * size) to be set to one third of the page. However, it seems like a
120  * good idea to limit the size of posting lists to one sixth of a page.
121  * That ought to leave us with a good split point when pages full of
122  * duplicates can be split several times.
123  */
124  state = (BTDedupState) palloc(sizeof(BTDedupStateData));
125  state->deduplicate = true;
126  state->nmaxitems = 0;
127  state->maxpostingsize = Min(BTMaxItemSize(page) / 2, INDEX_SIZE_MASK);
128  /* Metadata about base tuple of current pending posting list */
129  state->base = NULL;
130  state->baseoff = InvalidOffsetNumber;
131  state->basetupsize = 0;
132  /* Metadata about current pending posting list TIDs */
133  state->htids = palloc(state->maxpostingsize);
134  state->nhtids = 0;
135  state->nitems = 0;
136  /* Size of all physical tuples to be replaced by pending posting list */
137  state->phystupsize = 0;
138  /* nintervals should be initialized to zero */
139  state->nintervals = 0;
140 
141  /* Determine if "single value" strategy should be used */
142  if (!checkingunique)
143  singlevalstrat = _bt_do_singleval(rel, page, state, minoff, newitem);
144 
145  /*
146  * Deduplicate items from page, and write them to newpage.
147  *
148  * Copy the original page's LSN into newpage copy. This will become the
149  * updated version of the page. We need this because XLogInsert will
150  * examine the LSN and possibly dump it in a page image.
151  */
152  newpage = PageGetTempPageCopySpecial(page);
153  PageSetLSN(newpage, PageGetLSN(page));
154 
155  /* Copy high key, if any */
156  if (!P_RIGHTMOST(opaque))
157  {
158  ItemId hitemid = PageGetItemId(page, P_HIKEY);
159  Size hitemsz = ItemIdGetLength(hitemid);
160  IndexTuple hitem = (IndexTuple) PageGetItem(page, hitemid);
161 
162  if (PageAddItem(newpage, (Item) hitem, hitemsz, P_HIKEY,
163  false, false) == InvalidOffsetNumber)
164  elog(ERROR, "deduplication failed to add highkey");
165  }
166 
167  for (offnum = minoff;
168  offnum <= maxoff;
169  offnum = OffsetNumberNext(offnum))
170  {
171  ItemId itemid = PageGetItemId(page, offnum);
172  IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
173 
174  Assert(!ItemIdIsDead(itemid));
175 
176  if (offnum == minoff)
177  {
178  /*
179  * No previous/base tuple for the data item -- use the data item
180  * as base tuple of pending posting list
181  */
182  _bt_dedup_start_pending(state, itup, offnum);
183  }
184  else if (state->deduplicate &&
185  _bt_keep_natts_fast(rel, state->base, itup) > nkeyatts &&
186  _bt_dedup_save_htid(state, itup))
187  {
188  /*
189  * Tuple is equal to base tuple of pending posting list. Heap
190  * TID(s) for itup have been saved in state.
191  */
192  }
193  else
194  {
195  /*
196  * Tuple is not equal to pending posting list tuple, or
197  * _bt_dedup_save_htid() opted to not merge current item into
198  * pending posting list for some other reason (e.g., adding more
199  * TIDs would have caused posting list to exceed current
200  * maxpostingsize).
201  *
202  * If state contains pending posting list with more than one item,
203  * form new posting tuple, and actually update the page. Else
204  * reset the state and move on without modifying the page.
205  */
206  pagesaving += _bt_dedup_finish_pending(newpage, state);
207 
208  if (singlevalstrat)
209  {
210  /*
211  * Single value strategy's extra steps.
212  *
213  * Lower maxpostingsize for sixth and final large posting list
214  * tuple at the point where 5 maxpostingsize-capped tuples
215  * have either been formed or observed.
216  *
217  * When a sixth maxpostingsize-capped item is formed/observed,
218  * stop merging together tuples altogether. The few tuples
219  * that remain at the end of the page won't be merged together
220  * at all (at least not until after a future page split takes
221  * place).
222  */
223  if (state->nmaxitems == 5)
224  _bt_singleval_fillfactor(page, state, newitemsz);
225  else if (state->nmaxitems == 6)
226  {
227  state->deduplicate = false;
228  singlevalstrat = false; /* won't be back here */
229  }
230  }
231 
232  /* itup starts new pending posting list */
233  _bt_dedup_start_pending(state, itup, offnum);
234  }
235  }
236 
237  /* Handle the last item */
238  pagesaving += _bt_dedup_finish_pending(newpage, state);
239 
240  /*
241  * If no items suitable for deduplication were found, newpage must be
242  * exactly the same as the original page, so just return from function.
243  *
244  * We could determine whether or not to proceed on the basis the space
245  * savings being sufficient to avoid an immediate page split instead. We
246  * don't do that because there is some small value in nbtsplitloc.c always
247  * operating against a page that is fully deduplicated (apart from
248  * newitem). Besides, most of the cost has already been paid.
249  */
250  if (state->nintervals == 0)
251  {
252  /* cannot leak memory here */
253  pfree(newpage);
254  pfree(state->htids);
255  pfree(state);
256  return;
257  }
258 
259  /*
260  * By here, it's clear that deduplication will definitely go ahead.
261  *
262  * Clear the BTP_HAS_GARBAGE page flag in the unlikely event that it is
263  * still falsely set, just to keep things tidy. (We can't rely on
264  * _bt_vacuum_one_page() having done this already, and we can't rely on a
265  * page split or VACUUM getting to it in the near future.)
266  */
267  if (P_HAS_GARBAGE(opaque))
268  {
269  BTPageOpaque nopaque = (BTPageOpaque) PageGetSpecialPointer(newpage);
270 
271  nopaque->btpo_flags &= ~BTP_HAS_GARBAGE;
272  }
273 
275 
276  PageRestoreTempPage(newpage, page);
277  MarkBufferDirty(buf);
278 
279  /* XLOG stuff */
280  if (RelationNeedsWAL(rel))
281  {
282  XLogRecPtr recptr;
283  xl_btree_dedup xlrec_dedup;
284 
285  xlrec_dedup.nintervals = state->nintervals;
286 
287  XLogBeginInsert();
289  XLogRegisterData((char *) &xlrec_dedup, SizeOfBtreeDedup);
290 
291  /*
292  * The intervals array is not in the buffer, but pretend that it is.
293  * When XLogInsert stores the whole buffer, the array need not be
294  * stored too.
295  */
296  XLogRegisterBufData(0, (char *) state->intervals,
297  state->nintervals * sizeof(BTDedupInterval));
298 
299  recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DEDUP);
300 
301  PageSetLSN(page, recptr);
302  }
303 
305 
306  /* Local space accounting should agree with page accounting */
307  Assert(pagesaving < newitemsz || PageGetExactFreeSpace(page) >= newitemsz);
308 
309  /* cannot leak memory here */
310  pfree(state->htids);
311  pfree(state);
312 }
313 
314 /*
315  * Create a new pending posting list tuple based on caller's base tuple.
316  *
317  * Every tuple processed by deduplication either becomes the base tuple for a
318  * posting list, or gets its heap TID(s) accepted into a pending posting list.
319  * A tuple that starts out as the base tuple for a posting list will only
320  * actually be rewritten within _bt_dedup_finish_pending() when it turns out
321  * that there are duplicates that can be merged into the base tuple.
322  */
323 void
325  OffsetNumber baseoff)
326 {
327  Assert(state->nhtids == 0);
328  Assert(state->nitems == 0);
329  Assert(!BTreeTupleIsPivot(base));
330 
331  /*
332  * Copy heap TID(s) from new base tuple for new candidate posting list
333  * into working state's array
334  */
335  if (!BTreeTupleIsPosting(base))
336  {
337  memcpy(state->htids, &base->t_tid, sizeof(ItemPointerData));
338  state->nhtids = 1;
339  state->basetupsize = IndexTupleSize(base);
340  }
341  else
342  {
343  int nposting;
344 
345  nposting = BTreeTupleGetNPosting(base);
346  memcpy(state->htids, BTreeTupleGetPosting(base),
347  sizeof(ItemPointerData) * nposting);
348  state->nhtids = nposting;
349  /* basetupsize should not include existing posting list */
351  }
352 
353  /*
354  * Save new base tuple itself -- it'll be needed if we actually create a
355  * new posting list from new pending posting list.
356  *
357  * Must maintain physical size of all existing tuples (including line
358  * pointer overhead) so that we can calculate space savings on page.
359  */
360  state->nitems = 1;
361  state->base = base;
362  state->baseoff = baseoff;
363  state->phystupsize = MAXALIGN(IndexTupleSize(base)) + sizeof(ItemIdData);
364  /* Also save baseoff in pending state for interval */
365  state->intervals[state->nintervals].baseoff = state->baseoff;
366 }
367 
368 /*
369  * Save itup heap TID(s) into pending posting list where possible.
370  *
371  * Returns bool indicating if the pending posting list managed by state now
372  * includes itup's heap TID(s).
373  */
374 bool
376 {
377  int nhtids;
378  ItemPointer htids;
379  Size mergedtupsz;
380 
381  Assert(!BTreeTupleIsPivot(itup));
382 
383  if (!BTreeTupleIsPosting(itup))
384  {
385  nhtids = 1;
386  htids = &itup->t_tid;
387  }
388  else
389  {
390  nhtids = BTreeTupleGetNPosting(itup);
391  htids = BTreeTupleGetPosting(itup);
392  }
393 
394  /*
395  * Don't append (have caller finish pending posting list as-is) if
396  * appending heap TID(s) from itup would put us over maxpostingsize limit.
397  *
398  * This calculation needs to match the code used within _bt_form_posting()
399  * for new posting list tuples.
400  */
401  mergedtupsz = MAXALIGN(state->basetupsize +
402  (state->nhtids + nhtids) * sizeof(ItemPointerData));
403 
404  if (mergedtupsz > state->maxpostingsize)
405  {
406  /*
407  * Count this as an oversized item for single value strategy, though
408  * only when there are 50 TIDs in the final posting list tuple. This
409  * limit (which is fairly arbitrary) avoids confusion about how many
410  * 1/6 of a page tuples have been encountered/created by the current
411  * deduplication pass.
412  *
413  * Note: We deliberately don't consider which deduplication pass
414  * merged together tuples to create this item (could be a previous
415  * deduplication pass, or current pass). See _bt_do_singleval()
416  * comments.
417  */
418  if (state->nhtids > 50)
419  state->nmaxitems++;
420 
421  return false;
422  }
423 
424  /*
425  * Save heap TIDs to pending posting list tuple -- itup can be merged into
426  * pending posting list
427  */
428  state->nitems++;
429  memcpy(state->htids + state->nhtids, htids,
430  sizeof(ItemPointerData) * nhtids);
431  state->nhtids += nhtids;
432  state->phystupsize += MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData);
433 
434  return true;
435 }
436 
437 /*
438  * Finalize pending posting list tuple, and add it to the page. Final tuple
439  * is based on saved base tuple, and saved list of heap TIDs.
440  *
441  * Returns space saving from deduplicating to make a new posting list tuple.
442  * Note that this includes line pointer overhead. This is zero in the case
443  * where no deduplication was possible.
444  */
445 Size
447 {
448  OffsetNumber tupoff;
449  Size tuplesz;
450  Size spacesaving;
451 
452  Assert(state->nitems > 0);
453  Assert(state->nitems <= state->nhtids);
454  Assert(state->intervals[state->nintervals].baseoff == state->baseoff);
455 
456  tupoff = OffsetNumberNext(PageGetMaxOffsetNumber(newpage));
457  if (state->nitems == 1)
458  {
459  /* Use original, unchanged base tuple */
460  tuplesz = IndexTupleSize(state->base);
461  if (PageAddItem(newpage, (Item) state->base, tuplesz, tupoff,
462  false, false) == InvalidOffsetNumber)
463  elog(ERROR, "deduplication failed to add tuple to page");
464 
465  spacesaving = 0;
466  }
467  else
468  {
469  IndexTuple final;
470 
471  /* Form a tuple with a posting list */
472  final = _bt_form_posting(state->base, state->htids, state->nhtids);
473  tuplesz = IndexTupleSize(final);
474  Assert(tuplesz <= state->maxpostingsize);
475 
476  /* Save final number of items for posting list */
477  state->intervals[state->nintervals].nitems = state->nitems;
478 
479  Assert(tuplesz == MAXALIGN(IndexTupleSize(final)));
480  if (PageAddItem(newpage, (Item) final, tuplesz, tupoff, false,
481  false) == InvalidOffsetNumber)
482  elog(ERROR, "deduplication failed to add tuple to page");
483 
484  pfree(final);
485  spacesaving = state->phystupsize - (tuplesz + sizeof(ItemIdData));
486  /* Increment nintervals, since we wrote a new posting list tuple */
487  state->nintervals++;
488  Assert(spacesaving > 0 && spacesaving < BLCKSZ);
489  }
490 
491  /* Reset state for next pending posting list */
492  state->nhtids = 0;
493  state->nitems = 0;
494  state->phystupsize = 0;
495 
496  return spacesaving;
497 }
498 
499 /*
500  * Determine if page non-pivot tuples (data items) are all duplicates of the
501  * same value -- if they are, deduplication's "single value" strategy should
502  * be applied. The general goal of this strategy is to ensure that
503  * nbtsplitloc.c (which uses its own single value strategy) will find a useful
504  * split point as further duplicates are inserted, and successive rightmost
505  * page splits occur among pages that store the same duplicate value. When
506  * the page finally splits, it should end up BTREE_SINGLEVAL_FILLFACTOR% full,
507  * just like it would if deduplication were disabled.
508  *
509  * We expect that affected workloads will require _several_ single value
510  * strategy deduplication passes (over a page that only stores duplicates)
511  * before the page is finally split. The first deduplication pass should only
512  * find regular non-pivot tuples. Later deduplication passes will find
513  * existing maxpostingsize-capped posting list tuples, which must be skipped
514  * over. The penultimate pass is generally the first pass that actually
515  * reaches _bt_singleval_fillfactor(), and so will deliberately leave behind a
516  * few untouched non-pivot tuples. The final deduplication pass won't free
517  * any space -- it will skip over everything without merging anything (it
518  * retraces the steps of the penultimate pass).
519  *
520  * Fortunately, having several passes isn't too expensive. Each pass (after
521  * the first pass) won't spend many cycles on the large posting list tuples
522  * left by previous passes. Each pass will find a large contiguous group of
523  * smaller duplicate tuples to merge together at the end of the page.
524  *
525  * Note: We deliberately don't bother checking if the high key is a distinct
526  * value (prior to the TID tiebreaker column) before proceeding, unlike
527  * nbtsplitloc.c. Its single value strategy only gets applied on the
528  * rightmost page of duplicates of the same value (other leaf pages full of
529  * duplicates will get a simple 50:50 page split instead of splitting towards
530  * the end of the page). There is little point in making the same distinction
531  * here.
532  */
533 static bool
535  OffsetNumber minoff, IndexTuple newitem)
536 {
537  int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
538  ItemId itemid;
539  IndexTuple itup;
540 
541  itemid = PageGetItemId(page, minoff);
542  itup = (IndexTuple) PageGetItem(page, itemid);
543 
544  if (_bt_keep_natts_fast(rel, newitem, itup) > nkeyatts)
545  {
546  itemid = PageGetItemId(page, PageGetMaxOffsetNumber(page));
547  itup = (IndexTuple) PageGetItem(page, itemid);
548 
549  if (_bt_keep_natts_fast(rel, newitem, itup) > nkeyatts)
550  return true;
551  }
552 
553  return false;
554 }
555 
556 /*
557  * Lower maxpostingsize when using "single value" strategy, to avoid a sixth
558  * and final maxpostingsize-capped tuple. The sixth and final posting list
559  * tuple will end up somewhat smaller than the first five. (Note: The first
560  * five tuples could actually just be very large duplicate tuples that
561  * couldn't be merged together at all. Deduplication will simply not modify
562  * the page when that happens.)
563  *
564  * When there are six posting lists on the page (after current deduplication
565  * pass goes on to create/observe a sixth very large tuple), caller should end
566  * its deduplication pass. It isn't useful to try to deduplicate items that
567  * are supposed to end up on the new right sibling page following the
568  * anticipated page split. A future deduplication pass of future right
569  * sibling page might take care of it. (This is why the first single value
570  * strategy deduplication pass for a given leaf page will generally find only
571  * plain non-pivot tuples -- see _bt_do_singleval() comments.)
572  */
573 static void
575 {
576  Size leftfree;
577  int reduction;
578 
579  /* This calculation needs to match nbtsplitloc.c */
580  leftfree = PageGetPageSize(page) - SizeOfPageHeaderData -
581  MAXALIGN(sizeof(BTPageOpaqueData));
582  /* Subtract size of new high key (includes pivot heap TID space) */
583  leftfree -= newitemsz + MAXALIGN(sizeof(ItemPointerData));
584 
585  /*
586  * Reduce maxpostingsize by an amount equal to target free space on left
587  * half of page
588  */
589  reduction = leftfree * ((100 - BTREE_SINGLEVAL_FILLFACTOR) / 100.0);
590  if (state->maxpostingsize > reduction)
591  state->maxpostingsize -= reduction;
592  else
593  state->maxpostingsize = 0;
594 }
595 
596 /*
597  * Build a posting list tuple based on caller's "base" index tuple and list of
598  * heap TIDs. When nhtids == 1, builds a standard non-pivot tuple without a
599  * posting list. (Posting list tuples can never have a single heap TID, partly
600  * because that ensures that deduplication always reduces final MAXALIGN()'d
601  * size of entire tuple.)
602  *
603  * Convention is that posting list starts at a MAXALIGN()'d offset (rather
604  * than a SHORTALIGN()'d offset), in line with the approach taken when
605  * appending a heap TID to new pivot tuple/high key during suffix truncation.
606  * This sometimes wastes a little space that was only needed as alignment
607  * padding in the original tuple. Following this convention simplifies the
608  * space accounting used when deduplicating a page (the same convention
609  * simplifies the accounting for choosing a point to split a page at).
610  *
611  * Note: Caller's "htids" array must be unique and already in ascending TID
612  * order. Any existing heap TIDs from "base" won't automatically appear in
613  * returned posting list tuple (they must be included in htids array.)
614  */
616 _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
617 {
618  uint32 keysize,
619  newsize;
620  IndexTuple itup;
621 
622  if (BTreeTupleIsPosting(base))
623  keysize = BTreeTupleGetPostingOffset(base);
624  else
625  keysize = IndexTupleSize(base);
626 
627  Assert(!BTreeTupleIsPivot(base));
628  Assert(nhtids > 0 && nhtids <= PG_UINT16_MAX);
629  Assert(keysize == MAXALIGN(keysize));
630 
631  /* Determine final size of new tuple */
632  if (nhtids > 1)
633  newsize = MAXALIGN(keysize +
634  nhtids * sizeof(ItemPointerData));
635  else
636  newsize = keysize;
637 
638  Assert(newsize <= INDEX_SIZE_MASK);
639  Assert(newsize == MAXALIGN(newsize));
640 
641  /* Allocate memory using palloc0() (matches index_form_tuple()) */
642  itup = palloc0(newsize);
643  memcpy(itup, base, keysize);
644  itup->t_info &= ~INDEX_SIZE_MASK;
645  itup->t_info |= newsize;
646  if (nhtids > 1)
647  {
648  /* Form posting list tuple */
649  BTreeTupleSetPosting(itup, nhtids, keysize);
650  memcpy(BTreeTupleGetPosting(itup), htids,
651  sizeof(ItemPointerData) * nhtids);
652  Assert(_bt_posting_valid(itup));
653  }
654  else
655  {
656  /* Form standard non-pivot tuple */
657  itup->t_info &= ~INDEX_ALT_TID_MASK;
658  ItemPointerCopy(htids, &itup->t_tid);
660  }
661 
662  return itup;
663 }
664 
665 /*
666  * Generate a replacement tuple by "updating" a posting list tuple so that it
667  * no longer has TIDs that need to be deleted.
668  *
669  * Used by VACUUM. Caller's vacposting argument points to the existing
670  * posting list tuple to be updated.
671  *
672  * On return, caller's vacposting argument will point to final "updated"
673  * tuple, which will be palloc()'d in caller's memory context.
674  */
675 void
677 {
678  IndexTuple origtuple = vacposting->itup;
679  uint32 keysize,
680  newsize;
681  IndexTuple itup;
682  int nhtids;
683  int ui,
684  d;
685  ItemPointer htids;
686 
687  nhtids = BTreeTupleGetNPosting(origtuple) - vacposting->ndeletedtids;
688 
689  Assert(_bt_posting_valid(origtuple));
690  Assert(nhtids > 0 && nhtids < BTreeTupleGetNPosting(origtuple));
691 
692  /*
693  * Determine final size of new tuple.
694  *
695  * This calculation needs to match the code used within _bt_form_posting()
696  * for new posting list tuples. We avoid calling _bt_form_posting() here
697  * to save ourselves a second memory allocation for a htids workspace.
698  */
699  keysize = BTreeTupleGetPostingOffset(origtuple);
700  if (nhtids > 1)
701  newsize = MAXALIGN(keysize +
702  nhtids * sizeof(ItemPointerData));
703  else
704  newsize = keysize;
705 
706  Assert(newsize <= INDEX_SIZE_MASK);
707  Assert(newsize == MAXALIGN(newsize));
708 
709  /* Allocate memory using palloc0() (matches index_form_tuple()) */
710  itup = palloc0(newsize);
711  memcpy(itup, origtuple, keysize);
712  itup->t_info &= ~INDEX_SIZE_MASK;
713  itup->t_info |= newsize;
714 
715  if (nhtids > 1)
716  {
717  /* Form posting list tuple */
718  BTreeTupleSetPosting(itup, nhtids, keysize);
719  htids = BTreeTupleGetPosting(itup);
720  }
721  else
722  {
723  /* Form standard non-pivot tuple */
724  itup->t_info &= ~INDEX_ALT_TID_MASK;
725  htids = &itup->t_tid;
726  }
727 
728  ui = 0;
729  d = 0;
730  for (int i = 0; i < BTreeTupleGetNPosting(origtuple); i++)
731  {
732  if (d < vacposting->ndeletedtids && vacposting->deletetids[d] == i)
733  {
734  d++;
735  continue;
736  }
737  htids[ui++] = *BTreeTupleGetPostingN(origtuple, i);
738  }
739  Assert(ui == nhtids);
740  Assert(d == vacposting->ndeletedtids);
741  Assert(nhtids == 1 || _bt_posting_valid(itup));
742  Assert(nhtids > 1 || ItemPointerIsValid(&itup->t_tid));
743 
744  /* vacposting arg's itup will now point to updated version */
745  vacposting->itup = itup;
746 }
747 
748 /*
749  * Prepare for a posting list split by swapping heap TID in newitem with heap
750  * TID from original posting list (the 'oposting' heap TID located at offset
751  * 'postingoff'). Modifies newitem, so caller should pass their own private
752  * copy that can safely be modified.
753  *
754  * Returns new posting list tuple, which is palloc()'d in caller's context.
755  * This is guaranteed to be the same size as 'oposting'. Modified newitem is
756  * what caller actually inserts. (This happens inside the same critical
757  * section that performs an in-place update of old posting list using new
758  * posting list returned here.)
759  *
760  * While the keys from newitem and oposting must be opclass equal, and must
761  * generate identical output when run through the underlying type's output
762  * function, it doesn't follow that their representations match exactly.
763  * Caller must avoid assuming that there can't be representational differences
764  * that make datums from oposting bigger or smaller than the corresponding
765  * datums from newitem. For example, differences in TOAST input state might
766  * break a faulty assumption about tuple size (the executor is entitled to
767  * apply TOAST compression based on its own criteria). It also seems possible
768  * that further representational variation will be introduced in the future,
769  * in order to support nbtree features like page-level prefix compression.
770  *
771  * See nbtree/README for details on the design of posting list splits.
772  */
774 _bt_swap_posting(IndexTuple newitem, IndexTuple oposting, int postingoff)
775 {
776  int nhtids;
777  char *replacepos;
778  char *replaceposright;
779  Size nmovebytes;
780  IndexTuple nposting;
781 
782  nhtids = BTreeTupleGetNPosting(oposting);
783  Assert(_bt_posting_valid(oposting));
784  Assert(postingoff > 0 && postingoff < nhtids);
785 
786  /*
787  * Move item pointers in posting list to make a gap for the new item's
788  * heap TID. We shift TIDs one place to the right, losing original
789  * rightmost TID. (nmovebytes must not include TIDs to the left of
790  * postingoff, nor the existing rightmost/max TID that gets overwritten.)
791  */
792  nposting = CopyIndexTuple(oposting);
793  replacepos = (char *) BTreeTupleGetPostingN(nposting, postingoff);
794  replaceposright = (char *) BTreeTupleGetPostingN(nposting, postingoff + 1);
795  nmovebytes = (nhtids - postingoff - 1) * sizeof(ItemPointerData);
796  memmove(replaceposright, replacepos, nmovebytes);
797 
798  /* Fill the gap at postingoff with TID of new item (original new TID) */
799  Assert(!BTreeTupleIsPivot(newitem) && !BTreeTupleIsPosting(newitem));
800  ItemPointerCopy(&newitem->t_tid, (ItemPointer) replacepos);
801 
802  /* Now copy oposting's rightmost/max TID into new item (final new TID) */
803  ItemPointerCopy(BTreeTupleGetMaxHeapTID(oposting), &newitem->t_tid);
804 
806  BTreeTupleGetHeapTID(newitem)) < 0);
807  Assert(_bt_posting_valid(nposting));
808 
809  return nposting;
810 }
811 
812 /*
813  * Verify posting list invariants for "posting", which must be a posting list
814  * tuple. Used within assertions.
815  */
816 #ifdef USE_ASSERT_CHECKING
817 static bool
818 _bt_posting_valid(IndexTuple posting)
819 {
820  ItemPointerData last;
821  ItemPointer htid;
822 
823  if (!BTreeTupleIsPosting(posting) || BTreeTupleGetNPosting(posting) < 2)
824  return false;
825 
826  /* Remember first heap TID for loop */
827  ItemPointerCopy(BTreeTupleGetHeapTID(posting), &last);
828  if (!ItemPointerIsValid(&last))
829  return false;
830 
831  /* Iterate, starting from second TID */
832  for (int i = 1; i < BTreeTupleGetNPosting(posting); i++)
833  {
834  htid = BTreeTupleGetPostingN(posting, i);
835 
836  if (!ItemPointerIsValid(htid))
837  return false;
838  if (ItemPointerCompare(htid, &last) <= 0)
839  return false;
840  ItemPointerCopy(htid, &last);
841  }
842 
843  return true;
844 }
845 #endif
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:368
IndexTuple base
Definition: nbtree.h:746
uint16 ndeletedtids
Definition: nbtree.h:782
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:616
void _bt_update_posting(BTVacuumPosting vacposting)
Definition: nbtdedup.c:676
void PageRestoreTempPage(Page tempPage, Page oldPage)
Definition: bufpage.c:403
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:348
void _bt_dedup_one_page(Relation rel, Buffer buf, Relation heapRel, IndexTuple newitem, Size newitemsz, bool checkingunique)
Definition: nbtdedup.c:56
uint16 nintervals
Definition: nbtxlog.h:172
OffsetNumber baseoff
Definition: nbtree.h:747
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:506
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1469
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
static ItemPointer BTreeTupleGetPosting(IndexTuple posting)
Definition: nbtree.h:405
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:244
ItemPointerData t_tid
Definition: itup.h:37
#define Min(x, y)
Definition: c.h:927
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
IndexTuple itup
Definition: nbtree.h:778
Pointer Item
Definition: item.h:17
#define INDEX_SIZE_MASK
Definition: itup.h:65
#define P_HAS_GARBAGE(opaque)
Definition: nbtree.h:220
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:416
void _bt_dedup_start_pending(BTDedupState state, IndexTuple base, OffsetNumber baseoff)
Definition: nbtdedup.c:324
#define SizeOfPageHeaderData
Definition: bufpage.h:216
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting, int postingoff)
Definition: nbtdedup.c:774
Size _bt_dedup_finish_pending(Page newpage, BTDedupState state)
Definition: nbtdedup.c:446
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
Size PageGetFreeSpace(Page page)
Definition: bufpage.c:574
uint16 OffsetNumber
Definition: off.h:24
Page PageGetTempPageCopySpecial(Page page)
Definition: bufpage.c:381
ItemPointer htids
Definition: nbtree.h:751
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
Size phystupsize
Definition: nbtree.h:754
#define ERROR
Definition: elog.h:43
#define INDEX_ALT_TID_MASK
Definition: nbtree.h:334
#define PG_UINT16_MAX
Definition: c.h:455
BTDedupInterval intervals[MaxIndexTuplesPerPage]
Definition: nbtree.h:763
static void BTreeTupleSetPosting(IndexTuple itup, uint16 nhtids, int postingoffset)
Definition: nbtree.h:372
static bool _bt_do_singleval(Relation rel, Page page, BTDedupState state, OffsetNumber minoff, IndexTuple newitem)
Definition: nbtdedup.c:534
void _bt_delitems_delete(Relation rel, Buffer buf, OffsetNumber *deletable, int ndeletable, Relation heapRel)
Definition: nbtpage.c:1290
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:510
static char * buf
Definition: pg_test_fsync.c:67
bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup)
Definition: nbtdedup.c:375
IndexTupleData * IndexTuple
Definition: itup.h:53
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define PageGetPageSize(page)
Definition: bufpage.h:268
unsigned int uint32
Definition: c.h:374
struct ItemIdData ItemIdData
#define SizeOfBtreeDedup
Definition: nbtxlog.h:177
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:475
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define BTREE_SINGLEVAL_FILLFACTOR
Definition: nbtree.h:196
#define XLOG_BTREE_DEDUP
Definition: nbtxlog.h:32
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:532
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
void * palloc0(Size size)
Definition: mcxt.c:980
uint16 deletetids[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtree.h:783
#define InvalidOffsetNumber
Definition: off.h:26
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:412
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:360
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:397
uint16 nitems
Definition: nbtree.h:717
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:745
Definition: regguts.h:298
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
size_t Size
Definition: c.h:473
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define MAXALIGN(LEN)
Definition: c.h:698
Size basetupsize
Definition: nbtree.h:748
#define RelationNeedsWAL(relation)
Definition: rel.h:562
Size maxpostingsize
Definition: nbtree.h:743
#define PageGetLSN(page)
Definition: bufpage.h:366
Size PageGetExactFreeSpace(Page page)
Definition: bufpage.c:625
#define BTMaxItemSize(page)
Definition: nbtree.h:157
#define P_HIKEY
Definition: nbtree.h:242
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:2418
#define MaxIndexTuplesPerPage
Definition: itup.h:145
void * palloc(Size size)
Definition: mcxt.c:949
bool deduplicate
Definition: nbtree.h:741
#define elog(elevel,...)
Definition: elog.h:214
int i
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:386
unsigned short t_info
Definition: itup.h:49
BTDedupStateData * BTDedupState
Definition: nbtree.h:766
OffsetNumber baseoff
Definition: nbtree.h:716
void XLogBeginInsert(void)
Definition: xloginsert.c:123
uint16 btpo_flags
Definition: nbtree.h:65
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:213
#define BTP_HAS_GARBAGE
Definition: nbtree.h:78
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
static void _bt_singleval_fillfactor(Page page, BTDedupState state, Size newitemsz)
Definition: nbtdedup.c:574
#define IndexTupleSize(itup)
Definition: itup.h:71
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161