PostgreSQL Source Code  git master
nbtdedup.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtdedup.c
4  * Deduplicate or bottom-up delete items in Postgres btrees.
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/nbtree/nbtdedup.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/nbtree.h"
18 #include "access/nbtxlog.h"
19 #include "miscadmin.h"
20 #include "utils/rel.h"
21 
23  TM_IndexDeleteOp *delstate);
24 static bool _bt_do_singleval(Relation rel, Page page, BTDedupState state,
25  OffsetNumber minoff, IndexTuple newitem);
27  Size newitemsz);
28 #ifdef USE_ASSERT_CHECKING
29 static bool _bt_posting_valid(IndexTuple posting);
30 #endif
31 
32 /*
33  * Perform a deduplication pass.
34  *
35  * The general approach taken here is to perform as much deduplication as
36  * possible to free as much space as possible. Note, however, that "single
37  * value" strategy is sometimes used for !checkingunique callers, in which
38  * case deduplication will leave a few tuples untouched at the end of the
39  * page. The general idea is to prepare the page for an anticipated page
40  * split that uses nbtsplitloc.c's "single value" strategy to determine a
41  * split point. (There is no reason to deduplicate items that will end up on
42  * the right half of the page after the anticipated page split; better to
43  * handle those if and when the anticipated right half page gets its own
44  * deduplication pass, following further inserts of duplicates.)
45  *
46  * The page will have to be split if we cannot successfully free at least
47  * newitemsz (we also need space for newitem's line pointer, which isn't
48  * included in caller's newitemsz).
49  *
50  * Note: Caller should have already deleted all existing items with their
51  * LP_DEAD bits set.
52  */
53 void
55  Size newitemsz, bool checkingunique)
56 {
57  OffsetNumber offnum,
58  minoff,
59  maxoff;
60  Page page = BufferGetPage(buf);
62  Page newpage;
64  Size pagesaving = 0;
65  bool singlevalstrat = false;
66  int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
67 
68  /* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
69  newitemsz += sizeof(ItemIdData);
70 
71  /*
72  * Initialize deduplication state.
73  *
74  * It would be possible for maxpostingsize (limit on posting list tuple
75  * size) to be set to one third of the page. However, it seems like a
76  * good idea to limit the size of posting lists to one sixth of a page.
77  * That ought to leave us with a good split point when pages full of
78  * duplicates can be split several times.
79  */
80  state = (BTDedupState) palloc(sizeof(BTDedupStateData));
81  state->deduplicate = true;
82  state->nmaxitems = 0;
83  state->maxpostingsize = Min(BTMaxItemSize(page) / 2, INDEX_SIZE_MASK);
84  /* Metadata about base tuple of current pending posting list */
85  state->base = NULL;
87  state->basetupsize = 0;
88  /* Metadata about current pending posting list TIDs */
89  state->htids = palloc(state->maxpostingsize);
90  state->nhtids = 0;
91  state->nitems = 0;
92  /* Size of all physical tuples to be replaced by pending posting list */
93  state->phystupsize = 0;
94  /* nintervals should be initialized to zero */
95  state->nintervals = 0;
96 
97  minoff = P_FIRSTDATAKEY(opaque);
98  maxoff = PageGetMaxOffsetNumber(page);
99 
100  /* Determine if "single value" strategy should be used */
101  if (!checkingunique)
102  singlevalstrat = _bt_do_singleval(rel, page, state, minoff, newitem);
103 
104  /*
105  * Deduplicate items from page, and write them to newpage.
106  *
107  * Copy the original page's LSN into newpage copy. This will become the
108  * updated version of the page. We need this because XLogInsert will
109  * examine the LSN and possibly dump it in a page image.
110  */
111  newpage = PageGetTempPageCopySpecial(page);
112  PageSetLSN(newpage, PageGetLSN(page));
113 
114  /* Copy high key, if any */
115  if (!P_RIGHTMOST(opaque))
116  {
117  ItemId hitemid = PageGetItemId(page, P_HIKEY);
118  Size hitemsz = ItemIdGetLength(hitemid);
119  IndexTuple hitem = (IndexTuple) PageGetItem(page, hitemid);
120 
121  if (PageAddItem(newpage, (Item) hitem, hitemsz, P_HIKEY,
122  false, false) == InvalidOffsetNumber)
123  elog(ERROR, "deduplication failed to add highkey");
124  }
125 
126  for (offnum = minoff;
127  offnum <= maxoff;
128  offnum = OffsetNumberNext(offnum))
129  {
130  ItemId itemid = PageGetItemId(page, offnum);
131  IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
132 
133  Assert(!ItemIdIsDead(itemid));
134 
135  if (offnum == minoff)
136  {
137  /*
138  * No previous/base tuple for the data item -- use the data item
139  * as base tuple of pending posting list
140  */
141  _bt_dedup_start_pending(state, itup, offnum);
142  }
143  else if (state->deduplicate &&
144  _bt_keep_natts_fast(rel, state->base, itup) > nkeyatts &&
145  _bt_dedup_save_htid(state, itup))
146  {
147  /*
148  * Tuple is equal to base tuple of pending posting list. Heap
149  * TID(s) for itup have been saved in state.
150  */
151  }
152  else
153  {
154  /*
155  * Tuple is not equal to pending posting list tuple, or
156  * _bt_dedup_save_htid() opted to not merge current item into
157  * pending posting list for some other reason (e.g., adding more
158  * TIDs would have caused posting list to exceed current
159  * maxpostingsize).
160  *
161  * If state contains pending posting list with more than one item,
162  * form new posting tuple, and actually update the page. Else
163  * reset the state and move on without modifying the page.
164  */
165  pagesaving += _bt_dedup_finish_pending(newpage, state);
166 
167  if (singlevalstrat)
168  {
169  /*
170  * Single value strategy's extra steps.
171  *
172  * Lower maxpostingsize for sixth and final large posting list
173  * tuple at the point where 5 maxpostingsize-capped tuples
174  * have either been formed or observed.
175  *
176  * When a sixth maxpostingsize-capped item is formed/observed,
177  * stop merging together tuples altogether. The few tuples
178  * that remain at the end of the page won't be merged together
179  * at all (at least not until after a future page split takes
180  * place).
181  */
182  if (state->nmaxitems == 5)
183  _bt_singleval_fillfactor(page, state, newitemsz);
184  else if (state->nmaxitems == 6)
185  {
186  state->deduplicate = false;
187  singlevalstrat = false; /* won't be back here */
188  }
189  }
190 
191  /* itup starts new pending posting list */
192  _bt_dedup_start_pending(state, itup, offnum);
193  }
194  }
195 
196  /* Handle the last item */
197  pagesaving += _bt_dedup_finish_pending(newpage, state);
198 
199  /*
200  * If no items suitable for deduplication were found, newpage must be
201  * exactly the same as the original page, so just return from function.
202  *
203  * We could determine whether or not to proceed on the basis the space
204  * savings being sufficient to avoid an immediate page split instead. We
205  * don't do that because there is some small value in nbtsplitloc.c always
206  * operating against a page that is fully deduplicated (apart from
207  * newitem). Besides, most of the cost has already been paid.
208  */
209  if (state->nintervals == 0)
210  {
211  /* cannot leak memory here */
212  pfree(newpage);
213  pfree(state->htids);
214  pfree(state);
215  return;
216  }
217 
218  /*
219  * By here, it's clear that deduplication will definitely go ahead.
220  *
221  * Clear the BTP_HAS_GARBAGE page flag. The index must be a heapkeyspace
222  * index, and as such we'll never pay attention to BTP_HAS_GARBAGE anyway.
223  * But keep things tidy.
224  */
225  if (P_HAS_GARBAGE(opaque))
226  {
227  BTPageOpaque nopaque = (BTPageOpaque) PageGetSpecialPointer(newpage);
228 
229  nopaque->btpo_flags &= ~BTP_HAS_GARBAGE;
230  }
231 
233 
234  PageRestoreTempPage(newpage, page);
235  MarkBufferDirty(buf);
236 
237  /* XLOG stuff */
238  if (RelationNeedsWAL(rel))
239  {
240  XLogRecPtr recptr;
241  xl_btree_dedup xlrec_dedup;
242 
243  xlrec_dedup.nintervals = state->nintervals;
244 
245  XLogBeginInsert();
247  XLogRegisterData((char *) &xlrec_dedup, SizeOfBtreeDedup);
248 
249  /*
250  * The intervals array is not in the buffer, but pretend that it is.
251  * When XLogInsert stores the whole buffer, the array need not be
252  * stored too.
253  */
254  XLogRegisterBufData(0, (char *) state->intervals,
255  state->nintervals * sizeof(BTDedupInterval));
256 
257  recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DEDUP);
258 
259  PageSetLSN(page, recptr);
260  }
261 
263 
264  /* Local space accounting should agree with page accounting */
265  Assert(pagesaving < newitemsz || PageGetExactFreeSpace(page) >= newitemsz);
266 
267  /* cannot leak memory here */
268  pfree(state->htids);
269  pfree(state);
270 }
271 
272 /*
273  * Perform bottom-up index deletion pass.
274  *
275  * See if duplicate index tuples (plus certain nearby tuples) are eligible to
276  * be deleted via bottom-up index deletion. The high level goal here is to
277  * entirely prevent "unnecessary" page splits caused by MVCC version churn
278  * from UPDATEs (when the UPDATEs don't logically modify any of the columns
279  * covered by the 'rel' index). This is qualitative, not quantitative -- we
280  * do not particularly care about once-off opportunities to delete many index
281  * tuples together.
282  *
283  * See nbtree/README for details on the design of nbtree bottom-up deletion.
284  * See access/tableam.h for a description of how we're expected to cooperate
285  * with the tableam.
286  *
287  * Returns true on success, in which case caller can assume page split will be
288  * avoided for a reasonable amount of time. Returns false when caller should
289  * deduplicate the page (if possible at all).
290  *
291  * Note: Occasionally we return true despite failing to delete enough items to
292  * avoid a split. This makes caller skip deduplication and go split the page
293  * right away. Our return value is always just advisory information.
294  *
295  * Note: Caller should have already deleted all existing items with their
296  * LP_DEAD bits set.
297  */
298 bool
300  Size newitemsz)
301 {
302  OffsetNumber offnum,
303  minoff,
304  maxoff;
305  Page page = BufferGetPage(buf);
308  TM_IndexDeleteOp delstate;
309  bool neverdedup;
310  int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
311 
312  /* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
313  newitemsz += sizeof(ItemIdData);
314 
315  /* Initialize deduplication state */
316  state = (BTDedupState) palloc(sizeof(BTDedupStateData));
317  state->deduplicate = true;
318  state->nmaxitems = 0;
319  state->maxpostingsize = BLCKSZ; /* We're not really deduplicating */
320  state->base = NULL;
321  state->baseoff = InvalidOffsetNumber;
322  state->basetupsize = 0;
323  state->htids = palloc(state->maxpostingsize);
324  state->nhtids = 0;
325  state->nitems = 0;
326  state->phystupsize = 0;
327  state->nintervals = 0;
328 
329  /*
330  * Initialize tableam state that describes bottom-up index deletion
331  * operation.
332  *
333  * We'll go on to ask the tableam to search for TIDs whose index tuples we
334  * can safely delete. The tableam will search until our leaf page space
335  * target is satisfied, or until the cost of continuing with the tableam
336  * operation seems too high. It focuses its efforts on TIDs associated
337  * with duplicate index tuples that we mark "promising".
338  *
339  * This space target is a little arbitrary. The tableam must be able to
340  * keep the costs and benefits in balance. We provide the tableam with
341  * exhaustive information about what might work, without directly
342  * concerning ourselves with avoiding work during the tableam call. Our
343  * role in costing the bottom-up deletion process is strictly advisory.
344  */
345  delstate.bottomup = true;
346  delstate.bottomupfreespace = Max(BLCKSZ / 16, newitemsz);
347  delstate.ndeltids = 0;
348  delstate.deltids = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexDelete));
349  delstate.status = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexStatus));
350 
351  minoff = P_FIRSTDATAKEY(opaque);
352  maxoff = PageGetMaxOffsetNumber(page);
353  for (offnum = minoff;
354  offnum <= maxoff;
355  offnum = OffsetNumberNext(offnum))
356  {
357  ItemId itemid = PageGetItemId(page, offnum);
358  IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
359 
360  Assert(!ItemIdIsDead(itemid));
361 
362  if (offnum == minoff)
363  {
364  /* itup starts first pending interval */
365  _bt_dedup_start_pending(state, itup, offnum);
366  }
367  else if (_bt_keep_natts_fast(rel, state->base, itup) > nkeyatts &&
368  _bt_dedup_save_htid(state, itup))
369  {
370  /* Tuple is equal; just added its TIDs to pending interval */
371  }
372  else
373  {
374  /* Finalize interval -- move its TIDs to delete state */
375  _bt_bottomupdel_finish_pending(page, state, &delstate);
376 
377  /* itup starts new pending interval */
378  _bt_dedup_start_pending(state, itup, offnum);
379  }
380  }
381  /* Finalize final interval -- move its TIDs to delete state */
382  _bt_bottomupdel_finish_pending(page, state, &delstate);
383 
384  /*
385  * We don't give up now in the event of having few (or even zero)
386  * promising tuples for the tableam because it's not up to us as the index
387  * AM to manage costs (note that the tableam might have heuristics of its
388  * own that work out what to do). We should at least avoid having our
389  * caller do a useless deduplication pass after we return in the event of
390  * zero promising tuples, though.
391  */
392  neverdedup = false;
393  if (state->nintervals == 0)
394  neverdedup = true;
395 
396  pfree(state->htids);
397  pfree(state);
398 
399  /* Ask tableam which TIDs are deletable, then physically delete them */
400  _bt_delitems_delete_check(rel, buf, heapRel, &delstate);
401 
402  pfree(delstate.deltids);
403  pfree(delstate.status);
404 
405  /* Report "success" to caller unconditionally to avoid deduplication */
406  if (neverdedup)
407  return true;
408 
409  /* Don't dedup when we won't end up back here any time soon anyway */
410  return PageGetExactFreeSpace(page) >= Max(BLCKSZ / 24, newitemsz);
411 }
412 
413 /*
414  * Create a new pending posting list tuple based on caller's base tuple.
415  *
416  * Every tuple processed by deduplication either becomes the base tuple for a
417  * posting list, or gets its heap TID(s) accepted into a pending posting list.
418  * A tuple that starts out as the base tuple for a posting list will only
419  * actually be rewritten within _bt_dedup_finish_pending() when it turns out
420  * that there are duplicates that can be merged into the base tuple.
421  */
422 void
424  OffsetNumber baseoff)
425 {
426  Assert(state->nhtids == 0);
427  Assert(state->nitems == 0);
428  Assert(!BTreeTupleIsPivot(base));
429 
430  /*
431  * Copy heap TID(s) from new base tuple for new candidate posting list
432  * into working state's array
433  */
434  if (!BTreeTupleIsPosting(base))
435  {
436  memcpy(state->htids, &base->t_tid, sizeof(ItemPointerData));
437  state->nhtids = 1;
438  state->basetupsize = IndexTupleSize(base);
439  }
440  else
441  {
442  int nposting;
443 
444  nposting = BTreeTupleGetNPosting(base);
445  memcpy(state->htids, BTreeTupleGetPosting(base),
446  sizeof(ItemPointerData) * nposting);
447  state->nhtids = nposting;
448  /* basetupsize should not include existing posting list */
450  }
451 
452  /*
453  * Save new base tuple itself -- it'll be needed if we actually create a
454  * new posting list from new pending posting list.
455  *
456  * Must maintain physical size of all existing tuples (including line
457  * pointer overhead) so that we can calculate space savings on page.
458  */
459  state->nitems = 1;
460  state->base = base;
461  state->baseoff = baseoff;
462  state->phystupsize = MAXALIGN(IndexTupleSize(base)) + sizeof(ItemIdData);
463  /* Also save baseoff in pending state for interval */
464  state->intervals[state->nintervals].baseoff = state->baseoff;
465 }
466 
467 /*
468  * Save itup heap TID(s) into pending posting list where possible.
469  *
470  * Returns bool indicating if the pending posting list managed by state now
471  * includes itup's heap TID(s).
472  */
473 bool
475 {
476  int nhtids;
477  ItemPointer htids;
478  Size mergedtupsz;
479 
480  Assert(!BTreeTupleIsPivot(itup));
481 
482  if (!BTreeTupleIsPosting(itup))
483  {
484  nhtids = 1;
485  htids = &itup->t_tid;
486  }
487  else
488  {
489  nhtids = BTreeTupleGetNPosting(itup);
490  htids = BTreeTupleGetPosting(itup);
491  }
492 
493  /*
494  * Don't append (have caller finish pending posting list as-is) if
495  * appending heap TID(s) from itup would put us over maxpostingsize limit.
496  *
497  * This calculation needs to match the code used within _bt_form_posting()
498  * for new posting list tuples.
499  */
500  mergedtupsz = MAXALIGN(state->basetupsize +
501  (state->nhtids + nhtids) * sizeof(ItemPointerData));
502 
503  if (mergedtupsz > state->maxpostingsize)
504  {
505  /*
506  * Count this as an oversized item for single value strategy, though
507  * only when there are 50 TIDs in the final posting list tuple. This
508  * limit (which is fairly arbitrary) avoids confusion about how many
509  * 1/6 of a page tuples have been encountered/created by the current
510  * deduplication pass.
511  *
512  * Note: We deliberately don't consider which deduplication pass
513  * merged together tuples to create this item (could be a previous
514  * deduplication pass, or current pass). See _bt_do_singleval()
515  * comments.
516  */
517  if (state->nhtids > 50)
518  state->nmaxitems++;
519 
520  return false;
521  }
522 
523  /*
524  * Save heap TIDs to pending posting list tuple -- itup can be merged into
525  * pending posting list
526  */
527  state->nitems++;
528  memcpy(state->htids + state->nhtids, htids,
529  sizeof(ItemPointerData) * nhtids);
530  state->nhtids += nhtids;
531  state->phystupsize += MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData);
532 
533  return true;
534 }
535 
536 /*
537  * Finalize pending posting list tuple, and add it to the page. Final tuple
538  * is based on saved base tuple, and saved list of heap TIDs.
539  *
540  * Returns space saving from deduplicating to make a new posting list tuple.
541  * Note that this includes line pointer overhead. This is zero in the case
542  * where no deduplication was possible.
543  */
544 Size
546 {
547  OffsetNumber tupoff;
548  Size tuplesz;
549  Size spacesaving;
550 
551  Assert(state->nitems > 0);
552  Assert(state->nitems <= state->nhtids);
553  Assert(state->intervals[state->nintervals].baseoff == state->baseoff);
554 
555  tupoff = OffsetNumberNext(PageGetMaxOffsetNumber(newpage));
556  if (state->nitems == 1)
557  {
558  /* Use original, unchanged base tuple */
559  tuplesz = IndexTupleSize(state->base);
560  if (PageAddItem(newpage, (Item) state->base, tuplesz, tupoff,
561  false, false) == InvalidOffsetNumber)
562  elog(ERROR, "deduplication failed to add tuple to page");
563 
564  spacesaving = 0;
565  }
566  else
567  {
568  IndexTuple final;
569 
570  /* Form a tuple with a posting list */
571  final = _bt_form_posting(state->base, state->htids, state->nhtids);
572  tuplesz = IndexTupleSize(final);
573  Assert(tuplesz <= state->maxpostingsize);
574 
575  /* Save final number of items for posting list */
576  state->intervals[state->nintervals].nitems = state->nitems;
577 
578  Assert(tuplesz == MAXALIGN(IndexTupleSize(final)));
579  if (PageAddItem(newpage, (Item) final, tuplesz, tupoff, false,
580  false) == InvalidOffsetNumber)
581  elog(ERROR, "deduplication failed to add tuple to page");
582 
583  pfree(final);
584  spacesaving = state->phystupsize - (tuplesz + sizeof(ItemIdData));
585  /* Increment nintervals, since we wrote a new posting list tuple */
586  state->nintervals++;
587  Assert(spacesaving > 0 && spacesaving < BLCKSZ);
588  }
589 
590  /* Reset state for next pending posting list */
591  state->nhtids = 0;
592  state->nitems = 0;
593  state->phystupsize = 0;
594 
595  return spacesaving;
596 }
597 
598 /*
599  * Finalize interval during bottom-up index deletion.
600  *
601  * During a bottom-up pass we expect that TIDs will be recorded in dedup state
602  * first, and then get moved over to delstate (in variable-sized batches) by
603  * calling here. Call here happens when the number of TIDs in a dedup
604  * interval is known, and interval gets finalized (i.e. when caller sees next
605  * tuple on the page is not a duplicate, or when caller runs out of tuples to
606  * process from leaf page).
607  *
608  * This is where bottom-up deletion determines and remembers which entries are
609  * duplicates. This will be important information to the tableam delete
610  * infrastructure later on. Plain index tuple duplicates are marked
611  * "promising" here, per tableam contract.
612  *
613  * Our approach to marking entries whose TIDs come from posting lists is more
614  * complicated. Posting lists can only be formed by a deduplication pass (or
615  * during an index build), so recent version churn affecting the pointed-to
616  * logical rows is not particularly likely. We may still give a weak signal
617  * about posting list tuples' entries (by marking just one of its TIDs/entries
618  * promising), though this is only a possibility in the event of further
619  * duplicate index tuples in final interval that covers posting list tuple (as
620  * in the plain tuple case). A weak signal/hint will be useful to the tableam
621  * when it has no stronger signal to go with for the deletion operation as a
622  * whole.
623  *
624  * The heuristics we use work well in practice because we only need to give
625  * the tableam the right _general_ idea about where to look. Garbage tends to
626  * naturally get concentrated in relatively few table blocks with workloads
627  * that bottom-up deletion targets. The tableam cannot possibly rank all
628  * available table blocks sensibly based on the hints we provide, but that's
629  * okay -- only the extremes matter. The tableam just needs to be able to
630  * predict which few table blocks will have the most tuples that are safe to
631  * delete for each deletion operation, with low variance across related
632  * deletion operations.
633  */
634 static void
636  TM_IndexDeleteOp *delstate)
637 {
638  bool dupinterval = (state->nitems > 1);
639 
640  Assert(state->nitems > 0);
641  Assert(state->nitems <= state->nhtids);
642  Assert(state->intervals[state->nintervals].baseoff == state->baseoff);
643 
644  for (int i = 0; i < state->nitems; i++)
645  {
646  OffsetNumber offnum = state->baseoff + i;
647  ItemId itemid = PageGetItemId(page, offnum);
648  IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
649  TM_IndexDelete *ideltid = &delstate->deltids[delstate->ndeltids];
650  TM_IndexStatus *istatus = &delstate->status[delstate->ndeltids];
651 
652  if (!BTreeTupleIsPosting(itup))
653  {
654  /* Simple case: A plain non-pivot tuple */
655  ideltid->tid = itup->t_tid;
656  ideltid->id = delstate->ndeltids;
657  istatus->idxoffnum = offnum;
658  istatus->knowndeletable = false; /* for now */
659  istatus->promising = dupinterval; /* simple rule */
660  istatus->freespace = ItemIdGetLength(itemid) + sizeof(ItemIdData);
661 
662  delstate->ndeltids++;
663  }
664  else
665  {
666  /*
667  * Complicated case: A posting list tuple.
668  *
669  * We make the conservative assumption that there can only be at
670  * most one affected logical row per posting list tuple. There
671  * will be at most one promising entry in deltids to represent
672  * this presumed lone logical row. Note that this isn't even
673  * considered unless the posting list tuple is also in an interval
674  * of duplicates -- this complicated rule is just a variant of the
675  * simple rule used to decide if plain index tuples are promising.
676  */
677  int nitem = BTreeTupleGetNPosting(itup);
678  bool firstpromising = false;
679  bool lastpromising = false;
680 
681  Assert(_bt_posting_valid(itup));
682 
683  if (dupinterval)
684  {
685  /*
686  * Complicated rule: either the first or last TID in the
687  * posting list gets marked promising (if any at all)
688  */
689  BlockNumber minblocklist,
690  midblocklist,
691  maxblocklist;
692  ItemPointer mintid,
693  midtid,
694  maxtid;
695 
696  mintid = BTreeTupleGetHeapTID(itup);
697  midtid = BTreeTupleGetPostingN(itup, nitem / 2);
698  maxtid = BTreeTupleGetMaxHeapTID(itup);
699  minblocklist = ItemPointerGetBlockNumber(mintid);
700  midblocklist = ItemPointerGetBlockNumber(midtid);
701  maxblocklist = ItemPointerGetBlockNumber(maxtid);
702 
703  /* Only entry with predominant table block can be promising */
704  firstpromising = (minblocklist == midblocklist);
705  lastpromising = (!firstpromising &&
706  midblocklist == maxblocklist);
707  }
708 
709  for (int p = 0; p < nitem; p++)
710  {
711  ItemPointer htid = BTreeTupleGetPostingN(itup, p);
712 
713  ideltid->tid = *htid;
714  ideltid->id = delstate->ndeltids;
715  istatus->idxoffnum = offnum;
716  istatus->knowndeletable = false; /* for now */
717  istatus->promising = false;
718  if ((firstpromising && p == 0) ||
719  (lastpromising && p == nitem - 1))
720  istatus->promising = true;
721  istatus->freespace = sizeof(ItemPointerData); /* at worst */
722 
723  ideltid++;
724  istatus++;
725  delstate->ndeltids++;
726  }
727  }
728  }
729 
730  if (dupinterval)
731  {
732  state->intervals[state->nintervals].nitems = state->nitems;
733  state->nintervals++;
734  }
735 
736  /* Reset state for next interval */
737  state->nhtids = 0;
738  state->nitems = 0;
739  state->phystupsize = 0;
740 }
741 
742 /*
743  * Determine if page non-pivot tuples (data items) are all duplicates of the
744  * same value -- if they are, deduplication's "single value" strategy should
745  * be applied. The general goal of this strategy is to ensure that
746  * nbtsplitloc.c (which uses its own single value strategy) will find a useful
747  * split point as further duplicates are inserted, and successive rightmost
748  * page splits occur among pages that store the same duplicate value. When
749  * the page finally splits, it should end up BTREE_SINGLEVAL_FILLFACTOR% full,
750  * just like it would if deduplication were disabled.
751  *
752  * We expect that affected workloads will require _several_ single value
753  * strategy deduplication passes (over a page that only stores duplicates)
754  * before the page is finally split. The first deduplication pass should only
755  * find regular non-pivot tuples. Later deduplication passes will find
756  * existing maxpostingsize-capped posting list tuples, which must be skipped
757  * over. The penultimate pass is generally the first pass that actually
758  * reaches _bt_singleval_fillfactor(), and so will deliberately leave behind a
759  * few untouched non-pivot tuples. The final deduplication pass won't free
760  * any space -- it will skip over everything without merging anything (it
761  * retraces the steps of the penultimate pass).
762  *
763  * Fortunately, having several passes isn't too expensive. Each pass (after
764  * the first pass) won't spend many cycles on the large posting list tuples
765  * left by previous passes. Each pass will find a large contiguous group of
766  * smaller duplicate tuples to merge together at the end of the page.
767  *
768  * Note: We deliberately don't bother checking if the high key is a distinct
769  * value (prior to the TID tiebreaker column) before proceeding, unlike
770  * nbtsplitloc.c. Its single value strategy only gets applied on the
771  * rightmost page of duplicates of the same value (other leaf pages full of
772  * duplicates will get a simple 50:50 page split instead of splitting towards
773  * the end of the page). There is little point in making the same distinction
774  * here.
775  */
776 static bool
778  OffsetNumber minoff, IndexTuple newitem)
779 {
780  int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
781  ItemId itemid;
782  IndexTuple itup;
783 
784  itemid = PageGetItemId(page, minoff);
785  itup = (IndexTuple) PageGetItem(page, itemid);
786 
787  if (_bt_keep_natts_fast(rel, newitem, itup) > nkeyatts)
788  {
789  itemid = PageGetItemId(page, PageGetMaxOffsetNumber(page));
790  itup = (IndexTuple) PageGetItem(page, itemid);
791 
792  if (_bt_keep_natts_fast(rel, newitem, itup) > nkeyatts)
793  return true;
794  }
795 
796  return false;
797 }
798 
799 /*
800  * Lower maxpostingsize when using "single value" strategy, to avoid a sixth
801  * and final maxpostingsize-capped tuple. The sixth and final posting list
802  * tuple will end up somewhat smaller than the first five. (Note: The first
803  * five tuples could actually just be very large duplicate tuples that
804  * couldn't be merged together at all. Deduplication will simply not modify
805  * the page when that happens.)
806  *
807  * When there are six posting lists on the page (after current deduplication
808  * pass goes on to create/observe a sixth very large tuple), caller should end
809  * its deduplication pass. It isn't useful to try to deduplicate items that
810  * are supposed to end up on the new right sibling page following the
811  * anticipated page split. A future deduplication pass of future right
812  * sibling page might take care of it. (This is why the first single value
813  * strategy deduplication pass for a given leaf page will generally find only
814  * plain non-pivot tuples -- see _bt_do_singleval() comments.)
815  */
816 static void
818 {
819  Size leftfree;
820  int reduction;
821 
822  /* This calculation needs to match nbtsplitloc.c */
823  leftfree = PageGetPageSize(page) - SizeOfPageHeaderData -
824  MAXALIGN(sizeof(BTPageOpaqueData));
825  /* Subtract size of new high key (includes pivot heap TID space) */
826  leftfree -= newitemsz + MAXALIGN(sizeof(ItemPointerData));
827 
828  /*
829  * Reduce maxpostingsize by an amount equal to target free space on left
830  * half of page
831  */
832  reduction = leftfree * ((100 - BTREE_SINGLEVAL_FILLFACTOR) / 100.0);
833  if (state->maxpostingsize > reduction)
834  state->maxpostingsize -= reduction;
835  else
836  state->maxpostingsize = 0;
837 }
838 
839 /*
840  * Build a posting list tuple based on caller's "base" index tuple and list of
841  * heap TIDs. When nhtids == 1, builds a standard non-pivot tuple without a
842  * posting list. (Posting list tuples can never have a single heap TID, partly
843  * because that ensures that deduplication always reduces final MAXALIGN()'d
844  * size of entire tuple.)
845  *
846  * Convention is that posting list starts at a MAXALIGN()'d offset (rather
847  * than a SHORTALIGN()'d offset), in line with the approach taken when
848  * appending a heap TID to new pivot tuple/high key during suffix truncation.
849  * This sometimes wastes a little space that was only needed as alignment
850  * padding in the original tuple. Following this convention simplifies the
851  * space accounting used when deduplicating a page (the same convention
852  * simplifies the accounting for choosing a point to split a page at).
853  *
854  * Note: Caller's "htids" array must be unique and already in ascending TID
855  * order. Any existing heap TIDs from "base" won't automatically appear in
856  * returned posting list tuple (they must be included in htids array.)
857  */
859 _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
860 {
861  uint32 keysize,
862  newsize;
863  IndexTuple itup;
864 
865  if (BTreeTupleIsPosting(base))
866  keysize = BTreeTupleGetPostingOffset(base);
867  else
868  keysize = IndexTupleSize(base);
869 
870  Assert(!BTreeTupleIsPivot(base));
871  Assert(nhtids > 0 && nhtids <= PG_UINT16_MAX);
872  Assert(keysize == MAXALIGN(keysize));
873 
874  /* Determine final size of new tuple */
875  if (nhtids > 1)
876  newsize = MAXALIGN(keysize +
877  nhtids * sizeof(ItemPointerData));
878  else
879  newsize = keysize;
880 
881  Assert(newsize <= INDEX_SIZE_MASK);
882  Assert(newsize == MAXALIGN(newsize));
883 
884  /* Allocate memory using palloc0() (matches index_form_tuple()) */
885  itup = palloc0(newsize);
886  memcpy(itup, base, keysize);
887  itup->t_info &= ~INDEX_SIZE_MASK;
888  itup->t_info |= newsize;
889  if (nhtids > 1)
890  {
891  /* Form posting list tuple */
892  BTreeTupleSetPosting(itup, nhtids, keysize);
893  memcpy(BTreeTupleGetPosting(itup), htids,
894  sizeof(ItemPointerData) * nhtids);
895  Assert(_bt_posting_valid(itup));
896  }
897  else
898  {
899  /* Form standard non-pivot tuple */
900  itup->t_info &= ~INDEX_ALT_TID_MASK;
901  ItemPointerCopy(htids, &itup->t_tid);
903  }
904 
905  return itup;
906 }
907 
908 /*
909  * Generate a replacement tuple by "updating" a posting list tuple so that it
910  * no longer has TIDs that need to be deleted.
911  *
912  * Used by both VACUUM and index deletion. Caller's vacposting argument
913  * points to the existing posting list tuple to be updated.
914  *
915  * On return, caller's vacposting argument will point to final "updated"
916  * tuple, which will be palloc()'d in caller's memory context.
917  */
918 void
920 {
921  IndexTuple origtuple = vacposting->itup;
922  uint32 keysize,
923  newsize;
924  IndexTuple itup;
925  int nhtids;
926  int ui,
927  d;
928  ItemPointer htids;
929 
930  nhtids = BTreeTupleGetNPosting(origtuple) - vacposting->ndeletedtids;
931 
932  Assert(_bt_posting_valid(origtuple));
933  Assert(nhtids > 0 && nhtids < BTreeTupleGetNPosting(origtuple));
934 
935  /*
936  * Determine final size of new tuple.
937  *
938  * This calculation needs to match the code used within _bt_form_posting()
939  * for new posting list tuples. We avoid calling _bt_form_posting() here
940  * to save ourselves a second memory allocation for a htids workspace.
941  */
942  keysize = BTreeTupleGetPostingOffset(origtuple);
943  if (nhtids > 1)
944  newsize = MAXALIGN(keysize +
945  nhtids * sizeof(ItemPointerData));
946  else
947  newsize = keysize;
948 
949  Assert(newsize <= INDEX_SIZE_MASK);
950  Assert(newsize == MAXALIGN(newsize));
951 
952  /* Allocate memory using palloc0() (matches index_form_tuple()) */
953  itup = palloc0(newsize);
954  memcpy(itup, origtuple, keysize);
955  itup->t_info &= ~INDEX_SIZE_MASK;
956  itup->t_info |= newsize;
957 
958  if (nhtids > 1)
959  {
960  /* Form posting list tuple */
961  BTreeTupleSetPosting(itup, nhtids, keysize);
962  htids = BTreeTupleGetPosting(itup);
963  }
964  else
965  {
966  /* Form standard non-pivot tuple */
967  itup->t_info &= ~INDEX_ALT_TID_MASK;
968  htids = &itup->t_tid;
969  }
970 
971  ui = 0;
972  d = 0;
973  for (int i = 0; i < BTreeTupleGetNPosting(origtuple); i++)
974  {
975  if (d < vacposting->ndeletedtids && vacposting->deletetids[d] == i)
976  {
977  d++;
978  continue;
979  }
980  htids[ui++] = *BTreeTupleGetPostingN(origtuple, i);
981  }
982  Assert(ui == nhtids);
983  Assert(d == vacposting->ndeletedtids);
984  Assert(nhtids == 1 || _bt_posting_valid(itup));
985  Assert(nhtids > 1 || ItemPointerIsValid(&itup->t_tid));
986 
987  /* vacposting arg's itup will now point to updated version */
988  vacposting->itup = itup;
989 }
990 
991 /*
992  * Prepare for a posting list split by swapping heap TID in newitem with heap
993  * TID from original posting list (the 'oposting' heap TID located at offset
994  * 'postingoff'). Modifies newitem, so caller should pass their own private
995  * copy that can safely be modified.
996  *
997  * Returns new posting list tuple, which is palloc()'d in caller's context.
998  * This is guaranteed to be the same size as 'oposting'. Modified newitem is
999  * what caller actually inserts. (This happens inside the same critical
1000  * section that performs an in-place update of old posting list using new
1001  * posting list returned here.)
1002  *
1003  * While the keys from newitem and oposting must be opclass equal, and must
1004  * generate identical output when run through the underlying type's output
1005  * function, it doesn't follow that their representations match exactly.
1006  * Caller must avoid assuming that there can't be representational differences
1007  * that make datums from oposting bigger or smaller than the corresponding
1008  * datums from newitem. For example, differences in TOAST input state might
1009  * break a faulty assumption about tuple size (the executor is entitled to
1010  * apply TOAST compression based on its own criteria). It also seems possible
1011  * that further representational variation will be introduced in the future,
1012  * in order to support nbtree features like page-level prefix compression.
1013  *
1014  * See nbtree/README for details on the design of posting list splits.
1015  */
1016 IndexTuple
1017 _bt_swap_posting(IndexTuple newitem, IndexTuple oposting, int postingoff)
1018 {
1019  int nhtids;
1020  char *replacepos;
1021  char *replaceposright;
1022  Size nmovebytes;
1023  IndexTuple nposting;
1024 
1025  nhtids = BTreeTupleGetNPosting(oposting);
1026  Assert(_bt_posting_valid(oposting));
1027 
1028  /*
1029  * The postingoff argument originated as a _bt_binsrch_posting() return
1030  * value. It will be 0 in the event of corruption that makes a leaf page
1031  * contain a non-pivot tuple that's somehow identical to newitem (no two
1032  * non-pivot tuples should ever have the same TID). This has been known
1033  * to happen in the field from time to time.
1034  *
1035  * Perform a basic sanity check to catch this case now.
1036  */
1037  if (!(postingoff > 0 && postingoff < nhtids))
1038  elog(ERROR, "posting list tuple with %d items cannot be split at offset %d",
1039  nhtids, postingoff);
1040 
1041  /*
1042  * Move item pointers in posting list to make a gap for the new item's
1043  * heap TID. We shift TIDs one place to the right, losing original
1044  * rightmost TID. (nmovebytes must not include TIDs to the left of
1045  * postingoff, nor the existing rightmost/max TID that gets overwritten.)
1046  */
1047  nposting = CopyIndexTuple(oposting);
1048  replacepos = (char *) BTreeTupleGetPostingN(nposting, postingoff);
1049  replaceposright = (char *) BTreeTupleGetPostingN(nposting, postingoff + 1);
1050  nmovebytes = (nhtids - postingoff - 1) * sizeof(ItemPointerData);
1051  memmove(replaceposright, replacepos, nmovebytes);
1052 
1053  /* Fill the gap at postingoff with TID of new item (original new TID) */
1054  Assert(!BTreeTupleIsPivot(newitem) && !BTreeTupleIsPosting(newitem));
1055  ItemPointerCopy(&newitem->t_tid, (ItemPointer) replacepos);
1056 
1057  /* Now copy oposting's rightmost/max TID into new item (final new TID) */
1058  ItemPointerCopy(BTreeTupleGetMaxHeapTID(oposting), &newitem->t_tid);
1059 
1061  BTreeTupleGetHeapTID(newitem)) < 0);
1062  Assert(_bt_posting_valid(nposting));
1063 
1064  return nposting;
1065 }
1066 
1067 /*
1068  * Verify posting list invariants for "posting", which must be a posting list
1069  * tuple. Used within assertions.
1070  */
1071 #ifdef USE_ASSERT_CHECKING
1072 static bool
1073 _bt_posting_valid(IndexTuple posting)
1074 {
1075  ItemPointerData last;
1076  ItemPointer htid;
1077 
1078  if (!BTreeTupleIsPosting(posting) || BTreeTupleGetNPosting(posting) < 2)
1079  return false;
1080 
1081  /* Remember first heap TID for loop */
1082  ItemPointerCopy(BTreeTupleGetHeapTID(posting), &last);
1083  if (!ItemPointerIsValid(&last))
1084  return false;
1085 
1086  /* Iterate, starting from second TID */
1087  for (int i = 1; i < BTreeTupleGetNPosting(posting); i++)
1088  {
1089  htid = BTreeTupleGetPostingN(posting, i);
1090 
1091  if (!ItemPointerIsValid(htid))
1092  return false;
1093  if (ItemPointerCompare(htid, &last) <= 0)
1094  return false;
1095  ItemPointerCopy(htid, &last);
1096  }
1097 
1098  return true;
1099 }
1100 #endif
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:378
TM_IndexDelete * deltids
Definition: tableam.h:228
IndexTuple base
Definition: nbtree.h:871
uint16 ndeletedtids
Definition: nbtree.h:908
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:859
void _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, TM_IndexDeleteOp *delstate)
Definition: nbtpage.c:1529
void _bt_update_posting(BTVacuumPosting vacposting)
Definition: nbtdedup.c:919
void PageRestoreTempPage(Page tempPage, Page oldPage)
Definition: bufpage.c:424
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:473
uint16 nintervals
Definition: nbtxlog.h:172
OffsetNumber baseoff
Definition: nbtree.h:872
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:631
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1556
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:232
static ItemPointer BTreeTupleGetPosting(IndexTuple posting)
Definition: nbtree.h:530
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:369
ItemPointerData t_tid
Definition: itup.h:37
#define Min(x, y)
Definition: c.h:986
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
IndexTuple itup
Definition: nbtree.h:904
Pointer Item
Definition: item.h:17
bool knowndeletable
Definition: tableam.h:196
#define INDEX_SIZE_MASK
Definition: itup.h:65
#define P_HAS_GARBAGE(opaque)
Definition: nbtree.h:225
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:416
uint32 BlockNumber
Definition: block.h:31
void _bt_dedup_start_pending(BTDedupState state, IndexTuple base, OffsetNumber baseoff)
Definition: nbtdedup.c:423
OffsetNumber idxoffnum
Definition: tableam.h:195
#define SizeOfPageHeaderData
Definition: bufpage.h:216
#define MaxTIDsPerBTreePage
Definition: nbtree.h:184
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting, int postingoff)
Definition: nbtdedup.c:1017
Size _bt_dedup_finish_pending(Page newpage, BTDedupState state)
Definition: nbtdedup.c:545
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:71
uint16 OffsetNumber
Definition: off.h:24
Page PageGetTempPageCopySpecial(Page page)
Definition: bufpage.c:402
ItemPointer htids
Definition: nbtree.h:876
void pfree(void *pointer)
Definition: mcxt.c:1169
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
Size phystupsize
Definition: nbtree.h:879
#define ERROR
Definition: elog.h:46
#define INDEX_ALT_TID_MASK
Definition: nbtree.h:459
#define PG_UINT16_MAX
Definition: c.h:522
BTDedupInterval intervals[MaxIndexTuplesPerPage]
Definition: nbtree.h:888
static void BTreeTupleSetPosting(IndexTuple itup, uint16 nhtids, int postingoffset)
Definition: nbtree.h:497
static bool _bt_do_singleval(Relation rel, Page page, BTDedupState state, OffsetNumber minoff, IndexTuple newitem)
Definition: nbtdedup.c:777
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:528
static char * buf
Definition: pg_test_fsync.c:68
bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup)
Definition: nbtdedup.c:474
IndexTupleData * IndexTuple
Definition: itup.h:53
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define PageGetPageSize(page)
Definition: bufpage.h:268
unsigned int uint32
Definition: c.h:441
struct ItemIdData ItemIdData
#define SizeOfBtreeDedup
Definition: nbtxlog.h:177
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:496
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool promising
Definition: tableam.h:199
#define BTREE_SINGLEVAL_FILLFACTOR
Definition: nbtree.h:201
#define XLOG_BTREE_DEDUP
Definition: nbtxlog.h:33
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:657
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:432
void * palloc0(Size size)
Definition: mcxt.c:1093
uint16 deletetids[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtree.h:909
TM_IndexStatus * status
Definition: tableam.h:229
static void _bt_bottomupdel_finish_pending(Page page, BTDedupState state, TM_IndexDeleteOp *delstate)
Definition: nbtdedup.c:635
ItemPointerData tid
Definition: tableam.h:189
#define InvalidOffsetNumber
Definition: off.h:26
void _bt_dedup_pass(Relation rel, Buffer buf, Relation heapRel, IndexTuple newitem, Size newitemsz, bool checkingunique)
Definition: nbtdedup.c:54
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:537
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:485
#define Max(x, y)
Definition: c.h:980
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:522
uint16 nitems
Definition: nbtree.h:842
bool _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel, Size newitemsz)
Definition: nbtdedup.c:299
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
Definition: regguts.h:317
struct ItemPointerData ItemPointerData
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
size_t Size
Definition: c.h:540
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define MAXALIGN(LEN)
Definition: c.h:757
Size basetupsize
Definition: nbtree.h:873
#define RelationNeedsWAL(relation)
Definition: rel.h:601
Size maxpostingsize
Definition: nbtree.h:868
#define PageGetLSN(page)
Definition: bufpage.h:366
Size PageGetExactFreeSpace(Page page)
Definition: bufpage.c:951
#define BTMaxItemSize(page)
Definition: nbtree.h:162
#define P_HIKEY
Definition: nbtree.h:367
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:2419
void * palloc(Size size)
Definition: mcxt.c:1062
bool deduplicate
Definition: nbtree.h:866
#define elog(elevel,...)
Definition: elog.h:232
int16 freespace
Definition: tableam.h:200
int i
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:511
int bottomupfreespace
Definition: tableam.h:224
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
unsigned short t_info
Definition: itup.h:49
BTDedupStateData * BTDedupState
Definition: nbtree.h:891
OffsetNumber baseoff
Definition: nbtree.h:841
void XLogBeginInsert(void)
Definition: xloginsert.c:135
uint16 btpo_flags
Definition: nbtree.h:67
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:218
#define BTP_HAS_GARBAGE
Definition: nbtree.h:80
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
static void _bt_singleval_fillfactor(Page page, BTDedupState state, Size newitemsz)
Definition: nbtdedup.c:817
#define IndexTupleSize(itup)
Definition: itup.h:71
#define ItemPointerCopy(fromPointer, toPointer)
Definition: itemptr.h:161