PostgreSQL Source Code  git master
nbtree.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtree.h
4  * header file for postgres btree access method implementation.
5  *
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/access/nbtree.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef NBTREE_H
15 #define NBTREE_H
16 
17 #include "access/amapi.h"
18 #include "access/itup.h"
19 #include "access/sdir.h"
20 #include "access/xlogreader.h"
21 #include "catalog/pg_am_d.h"
22 #include "catalog/pg_index.h"
23 #include "lib/stringinfo.h"
24 #include "storage/bufmgr.h"
25 #include "storage/shm_toc.h"
26 
27 /* There's room for a 16-bit vacuum cycle ID in BTPageOpaqueData */
28 typedef uint16 BTCycleId;
29 
30 /*
31  * BTPageOpaqueData -- At the end of every page, we store a pointer
32  * to both siblings in the tree. This is used to do forward/backward
33  * index scans. The next-page link is also critical for recovery when
34  * a search has navigated to the wrong page due to concurrent page splits
35  * or deletions; see src/backend/access/nbtree/README for more info.
36  *
37  * In addition, we store the page's btree level (counting upwards from
38  * zero at a leaf page) as well as some flag bits indicating the page type
39  * and status. If the page is deleted, we replace the level with the
40  * next-transaction-ID value indicating when it is safe to reclaim the page.
41  *
42  * We also store a "vacuum cycle ID". When a page is split while VACUUM is
43  * processing the index, a nonzero value associated with the VACUUM run is
44  * stored into both halves of the split page. (If VACUUM is not running,
45  * both pages receive zero cycleids.) This allows VACUUM to detect whether
46  * a page was split since it started, with a small probability of false match
47  * if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
48  * ago. Also, during a split, the BTP_SPLIT_END flag is cleared in the left
49  * (original) page, and set in the right page, but only if the next page
50  * to its right has a different cycleid.
51  *
52  * NOTE: the BTP_LEAF flag bit is redundant since level==0 could be tested
53  * instead.
54  */
55 
56 typedef struct BTPageOpaqueData
57 {
58  BlockNumber btpo_prev; /* left sibling, or P_NONE if leftmost */
59  BlockNumber btpo_next; /* right sibling, or P_NONE if rightmost */
60  union
61  {
62  uint32 level; /* tree level --- zero for leaf pages */
63  TransactionId xact; /* next transaction ID, if deleted */
64  } btpo;
65  uint16 btpo_flags; /* flag bits, see below */
66  BTCycleId btpo_cycleid; /* vacuum cycle ID of latest split */
68 
70 
71 /* Bits defined in btpo_flags */
72 #define BTP_LEAF (1 << 0) /* leaf page, i.e. not internal page */
73 #define BTP_ROOT (1 << 1) /* root page (has no parent) */
74 #define BTP_DELETED (1 << 2) /* page has been deleted from tree */
75 #define BTP_META (1 << 3) /* meta-page */
76 #define BTP_HALF_DEAD (1 << 4) /* empty, but still in tree */
77 #define BTP_SPLIT_END (1 << 5) /* rightmost page of split group */
78 #define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples */
79 #define BTP_INCOMPLETE_SPLIT (1 << 7) /* right sibling's downlink is missing */
80 
81 /*
82  * The max allowed value of a cycle ID is a bit less than 64K. This is
83  * for convenience of pg_filedump and similar utilities: we want to use
84  * the last 2 bytes of special space as an index type indicator, and
85  * restricting cycle ID lets btree use that space for vacuum cycle IDs
86  * while still allowing index type to be identified.
87  */
88 #define MAX_BT_CYCLE_ID 0xFF7F
89 
90 
91 /*
92  * The Meta page is always the first page in the btree index.
93  * Its primary purpose is to point to the location of the btree root page.
94  * We also point to the "fast" root, which is the current effective root;
95  * see README for discussion.
96  */
97 
98 typedef struct BTMetaPageData
99 {
100  uint32 btm_magic; /* should contain BTREE_MAGIC */
101  uint32 btm_version; /* nbtree version (always <= BTREE_VERSION) */
102  BlockNumber btm_root; /* current root location */
103  uint32 btm_level; /* tree level of the root page */
104  BlockNumber btm_fastroot; /* current "fast" root location */
105  uint32 btm_fastlevel; /* tree level of the "fast" root page */
106  /* remaining fields only valid when btm_version >= BTREE_NOVAC_VERSION */
107  TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among all deleted
108  * pages */
109  float8 btm_last_cleanup_num_heap_tuples; /* number of heap tuples
110  * during last cleanup */
111  bool btm_allequalimage; /* are all columns "equalimage"? */
113 
114 #define BTPageGetMeta(p) \
115  ((BTMetaPageData *) PageGetContents(p))
116 
117 /*
118  * The current Btree version is 4. That's what you'll get when you create
119  * a new index.
120  *
121  * Btree version 3 was used in PostgreSQL v11. It is mostly the same as
122  * version 4, but heap TIDs were not part of the keyspace. Index tuples
123  * with duplicate keys could be stored in any order. We continue to
124  * support reading and writing Btree versions 2 and 3, so that they don't
125  * need to be immediately re-indexed at pg_upgrade. In order to get the
126  * new heapkeyspace semantics, however, a REINDEX is needed.
127  *
128  * Deduplication is safe to use when the btm_allequalimage field is set to
129  * true. It's safe to read the btm_allequalimage field on version 3, but
130  * only version 4 indexes make use of deduplication. Even version 4
131  * indexes created on PostgreSQL v12 will need a REINDEX to make use of
132  * deduplication, though, since there is no other way to set
133  * btm_allequalimage to true (pg_upgrade hasn't been taught to set the
134  * metapage field).
135  *
136  * Btree version 2 is mostly the same as version 3. There are two new
137  * fields in the metapage that were introduced in version 3. A version 2
138  * metapage will be automatically upgraded to version 3 on the first
139  * insert to it. INCLUDE indexes cannot use version 2.
140  */
141 #define BTREE_METAPAGE 0 /* first page is meta */
142 #define BTREE_MAGIC 0x053162 /* magic number in metapage */
143 #define BTREE_VERSION 4 /* current version number */
144 #define BTREE_MIN_VERSION 2 /* minimum supported version */
145 #define BTREE_NOVAC_VERSION 3 /* version with all meta fields set */
146 
147 /*
148  * Maximum size of a btree index entry, including its tuple header.
149  *
150  * We actually need to be able to fit three items on every page,
151  * so restrict any one item to 1/3 the per-page available space.
152  *
153  * There are rare cases where _bt_truncate() will need to enlarge
154  * a heap index tuple to make space for a tiebreaker heap TID
155  * attribute, which we account for here.
156  */
157 #define BTMaxItemSize(page) \
158  MAXALIGN_DOWN((PageGetPageSize(page) - \
159  MAXALIGN(SizeOfPageHeaderData + \
160  3*sizeof(ItemIdData) + \
161  3*sizeof(ItemPointerData)) - \
162  MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
163 #define BTMaxItemSizeNoHeapTid(page) \
164  MAXALIGN_DOWN((PageGetPageSize(page) - \
165  MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
166  MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
167 
168 /*
169  * MaxTIDsPerBTreePage is an upper bound on the number of heap TIDs tuples
170  * that may be stored on a btree leaf page. It is used to size the
171  * per-page temporary buffers used by index scans.
172  *
173  * Note: we don't bother considering per-tuple overheads here to keep
174  * things simple (value is based on how many elements a single array of
175  * heap TIDs must have to fill the space between the page header and
176  * special area). The value is slightly higher (i.e. more conservative)
177  * than necessary as a result, which is considered acceptable.
178  */
179 #define MaxTIDsPerBTreePage \
180  (int) ((BLCKSZ - SizeOfPageHeaderData - sizeof(BTPageOpaqueData)) / \
181  sizeof(ItemPointerData))
182 
183 /*
184  * The leaf-page fillfactor defaults to 90% but is user-adjustable.
185  * For pages above the leaf level, we use a fixed 70% fillfactor.
186  * The fillfactor is applied during index build and when splitting
187  * a rightmost page; when splitting non-rightmost pages we try to
188  * divide the data equally. When splitting a page that's entirely
189  * filled with a single value (duplicates), the effective leaf-page
190  * fillfactor is 96%, regardless of whether the page is a rightmost
191  * page.
192  */
193 #define BTREE_MIN_FILLFACTOR 10
194 #define BTREE_DEFAULT_FILLFACTOR 90
195 #define BTREE_NONLEAF_FILLFACTOR 70
196 #define BTREE_SINGLEVAL_FILLFACTOR 96
197 
198 /*
199  * In general, the btree code tries to localize its knowledge about
200  * page layout to a couple of routines. However, we need a special
201  * value to indicate "no page number" in those places where we expect
202  * page numbers. We can use zero for this because we never need to
203  * make a pointer to the metadata page.
204  */
205 
206 #define P_NONE 0
207 
208 /*
209  * Macros to test whether a page is leftmost or rightmost on its tree level,
210  * as well as other state info kept in the opaque data.
211  */
212 #define P_LEFTMOST(opaque) ((opaque)->btpo_prev == P_NONE)
213 #define P_RIGHTMOST(opaque) ((opaque)->btpo_next == P_NONE)
214 #define P_ISLEAF(opaque) (((opaque)->btpo_flags & BTP_LEAF) != 0)
215 #define P_ISROOT(opaque) (((opaque)->btpo_flags & BTP_ROOT) != 0)
216 #define P_ISDELETED(opaque) (((opaque)->btpo_flags & BTP_DELETED) != 0)
217 #define P_ISMETA(opaque) (((opaque)->btpo_flags & BTP_META) != 0)
218 #define P_ISHALFDEAD(opaque) (((opaque)->btpo_flags & BTP_HALF_DEAD) != 0)
219 #define P_IGNORE(opaque) (((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0)
220 #define P_HAS_GARBAGE(opaque) (((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0)
221 #define P_INCOMPLETE_SPLIT(opaque) (((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0)
222 
223 /*
224  * Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost
225  * page. The high key is not a tuple that is used to visit the heap. It is
226  * a pivot tuple (see "Notes on B-Tree tuple format" below for definition).
227  * The high key on a page is required to be greater than or equal to any
228  * other key that appears on the page. If we find ourselves trying to
229  * insert a key that is strictly > high key, we know we need to move right
230  * (this should only happen if the page was split since we examined the
231  * parent page).
232  *
233  * Our insertion algorithm guarantees that we can use the initial least key
234  * on our right sibling as the high key. Once a page is created, its high
235  * key changes only if the page is split.
236  *
237  * On a non-rightmost page, the high key lives in item 1 and data items
238  * start in item 2. Rightmost pages have no high key, so we store data
239  * items beginning in item 1.
240  */
241 
242 #define P_HIKEY ((OffsetNumber) 1)
243 #define P_FIRSTKEY ((OffsetNumber) 2)
244 #define P_FIRSTDATAKEY(opaque) (P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY)
245 
246 /*
247  * Notes on B-Tree tuple format, and key and non-key attributes:
248  *
249  * INCLUDE B-Tree indexes have non-key attributes. These are extra
250  * attributes that may be returned by index-only scans, but do not influence
251  * the order of items in the index (formally, non-key attributes are not
252  * considered to be part of the key space). Non-key attributes are only
253  * present in leaf index tuples whose item pointers actually point to heap
254  * tuples (non-pivot tuples). _bt_check_natts() enforces the rules
255  * described here.
256  *
257  * Non-pivot tuple format (plain/non-posting variant):
258  *
259  * t_tid | t_info | key values | INCLUDE columns, if any
260  *
261  * t_tid points to the heap TID, which is a tiebreaker key column as of
262  * BTREE_VERSION 4.
263  *
264  * Non-pivot tuples complement pivot tuples, which only have key columns.
265  * The sole purpose of pivot tuples is to represent how the key space is
266  * separated. In general, any B-Tree index that has more than one level
267  * (i.e. any index that does not just consist of a metapage and a single
268  * leaf root page) must have some number of pivot tuples, since pivot
269  * tuples are used for traversing the tree. Suffix truncation can omit
270  * trailing key columns when a new pivot is formed, which makes minus
271  * infinity their logical value. Since BTREE_VERSION 4 indexes treat heap
272  * TID as a trailing key column that ensures that all index tuples are
273  * physically unique, it is necessary to represent heap TID as a trailing
274  * key column in pivot tuples, though very often this can be truncated
275  * away, just like any other key column. (Actually, the heap TID is
276  * omitted rather than truncated, since its representation is different to
277  * the non-pivot representation.)
278  *
279  * Pivot tuple format:
280  *
281  * t_tid | t_info | key values | [heap TID]
282  *
283  * We store the number of columns present inside pivot tuples by abusing
284  * their t_tid offset field, since pivot tuples never need to store a real
285  * offset (downlinks only need to store a block number in t_tid). The
286  * offset field only stores the number of columns/attributes when the
287  * INDEX_ALT_TID_MASK bit is set, which doesn't count the trailing heap
288  * TID column sometimes stored in pivot tuples -- that's represented by
289  * the presence of BT_PIVOT_HEAP_TID_ATTR. The INDEX_ALT_TID_MASK bit in
290  * t_info is always set on BTREE_VERSION 4 pivot tuples, since
291  * BTreeTupleIsPivot() must work reliably on heapkeyspace versions.
292  *
293  * In version 3 indexes, the INDEX_ALT_TID_MASK flag might not be set in
294  * pivot tuples. In that case, the number of key columns is implicitly
295  * the same as the number of key columns in the index. It is not usually
296  * set on version 2 indexes, which predate the introduction of INCLUDE
297  * indexes. (Only explicitly truncated pivot tuples explicitly represent
298  * the number of key columns on versions 2 and 3, whereas all pivot tuples
299  * are formed using truncation on version 4. A version 2 index will have
300  * it set for an internal page negative infinity item iff internal page
301  * split occurred after upgrade to Postgres 11+.)
302  *
303  * The 12 least significant offset bits from t_tid are used to represent
304  * the number of columns in INDEX_ALT_TID_MASK tuples, leaving 4 status
305  * bits (BT_RESERVED_OFFSET_MASK bits), 3 of which that are reserved for
306  * future use. BT_OFFSET_MASK should be large enough to store any number
307  * of columns/attributes <= INDEX_MAX_KEYS.
308  *
309  * Sometimes non-pivot tuples also use a representation that repurposes
310  * t_tid to store metadata rather than a TID. PostgreSQL v13 introduced a
311  * new non-pivot tuple format to support deduplication: posting list
312  * tuples. Deduplication merges together multiple equal non-pivot tuples
313  * into a logically equivalent, space efficient representation. A posting
314  * list is an array of ItemPointerData elements. Non-pivot tuples are
315  * merged together to form posting list tuples lazily, at the point where
316  * we'd otherwise have to split a leaf page.
317  *
318  * Posting tuple format (alternative non-pivot tuple representation):
319  *
320  * t_tid | t_info | key values | posting list (TID array)
321  *
322  * Posting list tuples are recognized as such by having the
323  * INDEX_ALT_TID_MASK status bit set in t_info and the BT_IS_POSTING status
324  * bit set in t_tid. These flags redefine the content of the posting
325  * tuple's t_tid to store an offset to the posting list, as well as the
326  * total number of posting list array elements.
327  *
328  * The 12 least significant offset bits from t_tid are used to represent
329  * the number of posting items present in the tuple, leaving 4 status
330  * bits (BT_RESERVED_OFFSET_MASK bits), 3 of which that are reserved for
331  * future use. Like any non-pivot tuple, the number of columns stored is
332  * always implicitly the total number in the index (in practice there can
333  * never be non-key columns stored, since deduplication is not supported
334  * with INCLUDE indexes). BT_OFFSET_MASK should be large enough to store
335  * any number of posting list TIDs that might be present in a tuple (since
336  * tuple size is subject to the INDEX_SIZE_MASK limit).
337  *
338  * Note well: The macros that deal with the number of attributes in tuples
339  * assume that a tuple with INDEX_ALT_TID_MASK set must be a pivot tuple or
340  * non-pivot posting tuple, and that a tuple without INDEX_ALT_TID_MASK set
341  * must be a non-pivot tuple (or must have the same number of attributes as
342  * the index has generally in the case of !heapkeyspace indexes).
343  */
344 #define INDEX_ALT_TID_MASK INDEX_AM_RESERVED_BIT
345 
346 /* Item pointer offset bits */
347 #define BT_RESERVED_OFFSET_MASK 0xF000
348 #define BT_OFFSET_MASK 0x0FFF
349 #define BT_PIVOT_HEAP_TID_ATTR 0x1000
350 #define BT_IS_POSTING 0x2000
351 
352 /*
353  * Note: BTreeTupleIsPivot() can have false negatives (but not false
354  * positives) when used with !heapkeyspace indexes
355  */
356 static inline bool
358 {
359  if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
360  return false;
361  /* absence of BT_IS_POSTING in offset number indicates pivot tuple */
363  return false;
364 
365  return true;
366 }
367 
368 static inline bool
370 {
371  if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
372  return false;
373  /* presence of BT_IS_POSTING in offset number indicates posting tuple */
375  return false;
376 
377  return true;
378 }
379 
380 static inline void
381 BTreeTupleSetPosting(IndexTuple itup, int nhtids, int postingoffset)
382 {
383  Assert(nhtids > 1 && (nhtids & BT_OFFSET_MASK) == nhtids);
384  Assert((size_t) postingoffset == MAXALIGN(postingoffset));
385  Assert(postingoffset < INDEX_SIZE_MASK);
386 
387  itup->t_info |= INDEX_ALT_TID_MASK;
388  ItemPointerSetOffsetNumber(&itup->t_tid, (nhtids | BT_IS_POSTING));
389  ItemPointerSetBlockNumber(&itup->t_tid, postingoffset);
390 }
391 
392 static inline uint16
394 {
395  OffsetNumber existing;
396 
397  Assert(BTreeTupleIsPosting(posting));
398 
399  existing = ItemPointerGetOffsetNumberNoCheck(&posting->t_tid);
400  return (existing & BT_OFFSET_MASK);
401 }
402 
403 static inline uint32
405 {
406  Assert(BTreeTupleIsPosting(posting));
407 
408  return ItemPointerGetBlockNumberNoCheck(&posting->t_tid);
409 }
410 
411 static inline ItemPointer
413 {
414  return (ItemPointer) ((char *) posting +
415  BTreeTupleGetPostingOffset(posting));
416 }
417 
418 static inline ItemPointer
420 {
421  return BTreeTupleGetPosting(posting) + n;
422 }
423 
424 /*
425  * Get/set downlink block number in pivot tuple.
426  *
427  * Note: Cannot assert that tuple is a pivot tuple. If we did so then
428  * !heapkeyspace indexes would exhibit false positive assertion failures.
429  */
430 static inline BlockNumber
432 {
433  return ItemPointerGetBlockNumberNoCheck(&pivot->t_tid);
434 }
435 
436 static inline void
438 {
439  ItemPointerSetBlockNumber(&pivot->t_tid, blkno);
440 }
441 
442 /*
443  * Get number of attributes within tuple.
444  *
445  * Note that this does not include an implicit tiebreaker heap TID
446  * attribute, if any. Note also that the number of key attributes must be
447  * explicitly represented in all heapkeyspace pivot tuples.
448  *
449  * Note: This is defined as a macro rather than an inline function to
450  * avoid including rel.h.
451  */
452 #define BTreeTupleGetNAtts(itup, rel) \
453  ( \
454  (BTreeTupleIsPivot(itup)) ? \
455  ( \
456  ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_OFFSET_MASK \
457  ) \
458  : \
459  IndexRelationGetNumberOfAttributes(rel) \
460  )
461 
462 /*
463  * Set number of attributes in tuple, making it into a pivot tuple
464  */
465 static inline void
467 {
468  Assert(natts <= INDEX_MAX_KEYS);
469 
470  itup->t_info |= INDEX_ALT_TID_MASK;
471  /* BT_IS_POSTING bit may be unset -- tuple always becomes a pivot tuple */
472  ItemPointerSetOffsetNumber(&itup->t_tid, natts);
473  Assert(BTreeTupleIsPivot(itup));
474 }
475 
476 /*
477  * Set the bit indicating heap TID attribute present in pivot tuple
478  */
479 static inline void
481 {
482  OffsetNumber existing;
483 
484  Assert(BTreeTupleIsPivot(pivot));
485 
486  existing = ItemPointerGetOffsetNumberNoCheck(&pivot->t_tid);
488  existing | BT_PIVOT_HEAP_TID_ATTR);
489 }
490 
491 /*
492  * Get/set leaf page's "top parent" link from its high key. Used during page
493  * deletion.
494  *
495  * Note: Cannot assert that tuple is a pivot tuple. If we did so then
496  * !heapkeyspace indexes would exhibit false positive assertion failures.
497  */
498 static inline BlockNumber
500 {
501  return ItemPointerGetBlockNumberNoCheck(&leafhikey->t_tid);
502 }
503 
504 static inline void
506 {
507  ItemPointerSetBlockNumber(&leafhikey->t_tid, blkno);
508  BTreeTupleSetNAtts(leafhikey, 0);
509 }
510 
511 /*
512  * Get tiebreaker heap TID attribute, if any.
513  *
514  * This returns the first/lowest heap TID in the case of a posting list tuple.
515  */
516 static inline ItemPointer
518 {
519  if (BTreeTupleIsPivot(itup))
520  {
521  /* Pivot tuple heap TID representation? */
524  return (ItemPointer) ((char *) itup + IndexTupleSize(itup) -
525  sizeof(ItemPointerData));
526 
527  /* Heap TID attribute was truncated */
528  return NULL;
529  }
530  else if (BTreeTupleIsPosting(itup))
531  return BTreeTupleGetPosting(itup);
532 
533  return &itup->t_tid;
534 }
535 
536 /*
537  * Get maximum heap TID attribute, which could be the only TID in the case of
538  * a non-pivot tuple that does not have a posting list tuple.
539  *
540  * Works with non-pivot tuples only.
541  */
542 static inline ItemPointer
544 {
545  Assert(!BTreeTupleIsPivot(itup));
546 
547  if (BTreeTupleIsPosting(itup))
548  {
549  uint16 nposting = BTreeTupleGetNPosting(itup);
550 
551  return BTreeTupleGetPostingN(itup, nposting - 1);
552  }
553 
554  return &itup->t_tid;
555 }
556 
557 /*
558  * Operator strategy numbers for B-tree have been moved to access/stratnum.h,
559  * because many places need to use them in ScanKeyInit() calls.
560  *
561  * The strategy numbers are chosen so that we can commute them by
562  * subtraction, thus:
563  */
564 #define BTCommuteStrategyNumber(strat) (BTMaxStrategyNumber + 1 - (strat))
565 
566 /*
567  * When a new operator class is declared, we require that the user
568  * supply us with an amproc procedure (BTORDER_PROC) for determining
569  * whether, for two keys a and b, a < b, a = b, or a > b. This routine
570  * must return < 0, 0, > 0, respectively, in these three cases.
571  *
572  * To facilitate accelerated sorting, an operator class may choose to
573  * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
574  * src/include/utils/sortsupport.h.
575  *
576  * To support window frames defined by "RANGE offset PRECEDING/FOLLOWING",
577  * an operator class may choose to offer a third amproc procedure
578  * (BTINRANGE_PROC), independently of whether it offers sortsupport.
579  * For full details, see doc/src/sgml/btree.sgml.
580  *
581  * To facilitate B-Tree deduplication, an operator class may choose to
582  * offer a forth amproc procedure (BTEQUALIMAGE_PROC). For full details,
583  * see doc/src/sgml/btree.sgml.
584  */
585 
586 #define BTORDER_PROC 1
587 #define BTSORTSUPPORT_PROC 2
588 #define BTINRANGE_PROC 3
589 #define BTEQUALIMAGE_PROC 4
590 #define BTOPTIONS_PROC 5
591 #define BTNProcs 5
592 
593 /*
594  * We need to be able to tell the difference between read and write
595  * requests for pages, in order to do locking correctly.
596  */
597 
598 #define BT_READ BUFFER_LOCK_SHARE
599 #define BT_WRITE BUFFER_LOCK_EXCLUSIVE
600 
601 /*
602  * BTStackData -- As we descend a tree, we push the location of pivot
603  * tuples whose downlink we are about to follow onto a private stack. If
604  * we split a leaf, we use this stack to walk back up the tree and insert
605  * data into its parent page at the correct location. We also have to
606  * recursively insert into the grandparent page if and when the parent page
607  * splits. Our private stack can become stale due to concurrent page
608  * splits and page deletions, but it should never give us an irredeemably
609  * bad picture.
610  */
611 typedef struct BTStackData
612 {
616 } BTStackData;
617 
619 
620 /*
621  * BTScanInsertData is the btree-private state needed to find an initial
622  * position for an indexscan, or to insert new tuples -- an "insertion
623  * scankey" (not to be confused with a search scankey). It's used to descend
624  * a B-Tree using _bt_search.
625  *
626  * heapkeyspace indicates if we expect all keys in the index to be physically
627  * unique because heap TID is used as a tiebreaker attribute, and if index may
628  * have truncated key attributes in pivot tuples. This is actually a property
629  * of the index relation itself (not an indexscan). heapkeyspace indexes are
630  * indexes whose version is >= version 4. It's convenient to keep this close
631  * by, rather than accessing the metapage repeatedly.
632  *
633  * allequalimage is set to indicate that deduplication is safe for the index.
634  * This is also a property of the index relation rather than an indexscan.
635  *
636  * anynullkeys indicates if any of the keys had NULL value when scankey was
637  * built from index tuple (note that already-truncated tuple key attributes
638  * set NULL as a placeholder key value, which also affects value of
639  * anynullkeys). This is a convenience for unique index non-pivot tuple
640  * insertion, which usually temporarily unsets scantid, but shouldn't iff
641  * anynullkeys is true. Value generally matches non-pivot tuple's HasNulls
642  * bit, but may not when inserting into an INCLUDE index (tuple header value
643  * is affected by the NULL-ness of both key and non-key attributes).
644  *
645  * When nextkey is false (the usual case), _bt_search and _bt_binsrch will
646  * locate the first item >= scankey. When nextkey is true, they will locate
647  * the first item > scan key.
648  *
649  * pivotsearch is set to true by callers that want to re-find a leaf page
650  * using a scankey built from a leaf page's high key. Most callers set this
651  * to false.
652  *
653  * scantid is the heap TID that is used as a final tiebreaker attribute. It
654  * is set to NULL when index scan doesn't need to find a position for a
655  * specific physical tuple. Must be set when inserting new tuples into
656  * heapkeyspace indexes, since every tuple in the tree unambiguously belongs
657  * in one exact position (it's never set with !heapkeyspace indexes, though).
658  * Despite the representational difference, nbtree search code considers
659  * scantid to be just another insertion scankey attribute.
660  *
661  * scankeys is an array of scan key entries for attributes that are compared
662  * before scantid (user-visible attributes). keysz is the size of the array.
663  * During insertion, there must be a scan key for every attribute, but when
664  * starting a regular index scan some can be omitted. The array is used as a
665  * flexible array member, though it's sized in a way that makes it possible to
666  * use stack allocations. See nbtree/README for full details.
667  */
668 typedef struct BTScanInsertData
669 {
673  bool nextkey;
675  ItemPointer scantid; /* tiebreaker for scankeys */
676  int keysz; /* Size of scankeys array */
677  ScanKeyData scankeys[INDEX_MAX_KEYS]; /* Must appear last */
679 
681 
682 /*
683  * BTInsertStateData is a working area used during insertion.
684  *
685  * This is filled in after descending the tree to the first leaf page the new
686  * tuple might belong on. Tracks the current position while performing
687  * uniqueness check, before we have determined which exact page to insert
688  * to.
689  *
690  * (This should be private to nbtinsert.c, but it's also used by
691  * _bt_binsrch_insert)
692  */
693 typedef struct BTInsertStateData
694 {
695  IndexTuple itup; /* Item we're inserting */
696  Size itemsz; /* Size of itup -- should be MAXALIGN()'d */
697  BTScanInsert itup_key; /* Insertion scankey */
698 
699  /* Buffer containing leaf page we're likely to insert itup on */
701 
702  /*
703  * Cache of bounds within the current buffer. Only used for insertions
704  * where _bt_check_unique is called. See _bt_binsrch_insert and
705  * _bt_findinsertloc for details.
706  */
710 
711  /*
712  * if _bt_binsrch_insert found the location inside existing posting list,
713  * save the position inside the list. -1 sentinel value indicates overlap
714  * with an existing posting list tuple that has its LP_DEAD bit set.
715  */
718 
720 
721 /*
722  * State used to representing an individual pending tuple during
723  * deduplication.
724  */
725 typedef struct BTDedupInterval
726 {
730 
731 /*
732  * BTDedupStateData is a working area used during deduplication.
733  *
734  * The status info fields track the state of a whole-page deduplication pass.
735  * State about the current pending posting list is also tracked.
736  *
737  * A pending posting list is comprised of a contiguous group of equal items
738  * from the page, starting from page offset number 'baseoff'. This is the
739  * offset number of the "base" tuple for new posting list. 'nitems' is the
740  * current total number of existing items from the page that will be merged to
741  * make a new posting list tuple, including the base tuple item. (Existing
742  * items may themselves be posting list tuples, or regular non-pivot tuples.)
743  *
744  * The total size of the existing tuples to be freed when pending posting list
745  * is processed gets tracked by 'phystupsize'. This information allows
746  * deduplication to calculate the space saving for each new posting list
747  * tuple, and for the entire pass over the page as a whole.
748  */
749 typedef struct BTDedupStateData
750 {
751  /* Deduplication status info for entire pass over page */
752  bool deduplicate; /* Still deduplicating page? */
753  Size maxpostingsize; /* Limit on size of final tuple */
754 
755  /* Metadata about base tuple of current pending posting list */
756  IndexTuple base; /* Use to form new posting list */
757  OffsetNumber baseoff; /* page offset of base */
758  Size basetupsize; /* base size without original posting list */
759 
760  /* Other metadata about pending posting list */
761  ItemPointer htids; /* Heap TIDs in pending posting list */
762  int nhtids; /* Number of heap TIDs in htids array */
763  int nitems; /* Number of existing tuples/line pointers */
764  Size phystupsize; /* Includes line pointer overhead */
765 
766  /*
767  * Array of tuples to go on new version of the page. Contains one entry
768  * for each group of consecutive items. Note that existing tuples that
769  * will not become posting list tuples do not appear in the array (they
770  * are implicitly unchanged by deduplication pass).
771  */
772  int nintervals; /* current size of intervals array */
775 
777 
778 /*
779  * BTVacuumPostingData is state that represents how to VACUUM a posting list
780  * tuple when some (though not all) of its TIDs are to be deleted.
781  *
782  * Convention is that itup field is the original posting list tuple on input,
783  * and palloc()'d final tuple used to overwrite existing tuple on output.
784  */
785 typedef struct BTVacuumPostingData
786 {
787  /* Tuple that will be/was updated */
790 
791  /* State needed to describe final itup in WAL */
795 
797 
798 /*
799  * BTScanOpaqueData is the btree-private state needed for an indexscan.
800  * This consists of preprocessed scan keys (see _bt_preprocess_keys() for
801  * details of the preprocessing), information about the current location
802  * of the scan, and information about the marked location, if any. (We use
803  * BTScanPosData to represent the data needed for each of current and marked
804  * locations.) In addition we can remember some known-killed index entries
805  * that must be marked before we can move off the current page.
806  *
807  * Index scans work a page at a time: we pin and read-lock the page, identify
808  * all the matching items on the page and save them in BTScanPosData, then
809  * release the read-lock while returning the items to the caller for
810  * processing. This approach minimizes lock/unlock traffic. Note that we
811  * keep the pin on the index page until the caller is done with all the items
812  * (this is needed for VACUUM synchronization, see nbtree/README). When we
813  * are ready to step to the next page, if the caller has told us any of the
814  * items were killed, we re-lock the page to mark them killed, then unlock.
815  * Finally we drop the pin and step to the next page in the appropriate
816  * direction.
817  *
818  * If we are doing an index-only scan, we save the entire IndexTuple for each
819  * matched item, otherwise only its heap TID and offset. The IndexTuples go
820  * into a separate workspace array; each BTScanPosItem stores its tuple's
821  * offset within that array. Posting list tuples store a "base" tuple once,
822  * allowing the same key to be returned for each TID in the posting list
823  * tuple.
824  */
825 
826 typedef struct BTScanPosItem /* what we remember about each match */
827 {
828  ItemPointerData heapTid; /* TID of referenced heap item */
829  OffsetNumber indexOffset; /* index item's location within page */
830  LocationIndex tupleOffset; /* IndexTuple's offset in workspace, if any */
831 } BTScanPosItem;
832 
833 typedef struct BTScanPosData
834 {
835  Buffer buf; /* if valid, the buffer is pinned */
836 
837  XLogRecPtr lsn; /* pos in the WAL stream when page was read */
838  BlockNumber currPage; /* page referenced by items array */
839  BlockNumber nextPage; /* page's right link when we scanned it */
840 
841  /*
842  * moreLeft and moreRight track whether we think there may be matching
843  * index entries to the left and right of the current page, respectively.
844  * We can clear the appropriate one of these flags when _bt_checkkeys()
845  * returns continuescan = false.
846  */
847  bool moreLeft;
848  bool moreRight;
849 
850  /*
851  * If we are doing an index-only scan, nextTupleOffset is the first free
852  * location in the associated tuple storage workspace.
853  */
855 
856  /*
857  * The items array is always ordered in index order (ie, increasing
858  * indexoffset). When scanning backwards it is convenient to fill the
859  * array back-to-front, so we start at the last slot and fill downwards.
860  * Hence we need both a first-valid-entry and a last-valid-entry counter.
861  * itemIndex is a cursor showing which entry was last returned to caller.
862  */
863  int firstItem; /* first valid index in items[] */
864  int lastItem; /* last valid index in items[] */
865  int itemIndex; /* current index in items[] */
866 
867  BTScanPosItem items[MaxTIDsPerBTreePage]; /* MUST BE LAST */
868 } BTScanPosData;
869 
871 
872 #define BTScanPosIsPinned(scanpos) \
873 ( \
874  AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
875  !BufferIsValid((scanpos).buf)), \
876  BufferIsValid((scanpos).buf) \
877 )
878 #define BTScanPosUnpin(scanpos) \
879  do { \
880  ReleaseBuffer((scanpos).buf); \
881  (scanpos).buf = InvalidBuffer; \
882  } while (0)
883 #define BTScanPosUnpinIfPinned(scanpos) \
884  do { \
885  if (BTScanPosIsPinned(scanpos)) \
886  BTScanPosUnpin(scanpos); \
887  } while (0)
888 
889 #define BTScanPosIsValid(scanpos) \
890 ( \
891  AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
892  !BufferIsValid((scanpos).buf)), \
893  BlockNumberIsValid((scanpos).currPage) \
894 )
895 #define BTScanPosInvalidate(scanpos) \
896  do { \
897  (scanpos).currPage = InvalidBlockNumber; \
898  (scanpos).nextPage = InvalidBlockNumber; \
899  (scanpos).buf = InvalidBuffer; \
900  (scanpos).lsn = InvalidXLogRecPtr; \
901  (scanpos).nextTupleOffset = 0; \
902  } while (0);
903 
904 /* We need one of these for each equality-type SK_SEARCHARRAY scan key */
905 typedef struct BTArrayKeyInfo
906 {
907  int scan_key; /* index of associated key in arrayKeyData */
908  int cur_elem; /* index of current element in elem_values */
909  int mark_elem; /* index of marked element in elem_values */
910  int num_elems; /* number of elems in current array value */
911  Datum *elem_values; /* array of num_elems Datums */
913 
914 typedef struct BTScanOpaqueData
915 {
916  /* these fields are set by _bt_preprocess_keys(): */
917  bool qual_ok; /* false if qual can never be satisfied */
918  int numberOfKeys; /* number of preprocessed scan keys */
919  ScanKey keyData; /* array of preprocessed scan keys */
920 
921  /* workspace for SK_SEARCHARRAY support */
922  ScanKey arrayKeyData; /* modified copy of scan->keyData */
923  int numArrayKeys; /* number of equality-type array keys (-1 if
924  * there are any unsatisfiable array keys) */
925  int arrayKeyCount; /* count indicating number of array scan keys
926  * processed */
927  BTArrayKeyInfo *arrayKeys; /* info about each equality-type array key */
928  MemoryContext arrayContext; /* scan-lifespan context for array data */
929 
930  /* info about killed items if any (killedItems is NULL if never used) */
931  int *killedItems; /* currPos.items indexes of killed items */
932  int numKilled; /* number of currently stored items */
933 
934  /*
935  * If we are doing an index-only scan, these are the tuple storage
936  * workspaces for the currPos and markPos respectively. Each is of size
937  * BLCKSZ, so it can hold as much as a full page's worth of tuples.
938  */
939  char *currTuples; /* tuple storage for currPos */
940  char *markTuples; /* tuple storage for markPos */
941 
942  /*
943  * If the marked position is on the same page as current position, we
944  * don't use markPos, but just keep the marked itemIndex in markItemIndex
945  * (all the rest of currPos is valid for the mark position). Hence, to
946  * determine if there is a mark, first look at markItemIndex, then at
947  * markPos.
948  */
949  int markItemIndex; /* itemIndex, or -1 if not valid */
950 
951  /* keep these last in struct for efficiency */
952  BTScanPosData currPos; /* current position data */
953  BTScanPosData markPos; /* marked position, if any */
955 
957 
958 /*
959  * We use some private sk_flags bits in preprocessed scan keys. We're allowed
960  * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
961  * index's indoption[] array entry for the index attribute.
962  */
963 #define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
964 #define SK_BT_REQBKWD 0x00020000 /* required to continue backward scan */
965 #define SK_BT_INDOPTION_SHIFT 24 /* must clear the above bits */
966 #define SK_BT_DESC (INDOPTION_DESC << SK_BT_INDOPTION_SHIFT)
967 #define SK_BT_NULLS_FIRST (INDOPTION_NULLS_FIRST << SK_BT_INDOPTION_SHIFT)
968 
969 typedef struct BTOptions
970 {
971  int32 varlena_header_; /* varlena header (do not touch directly!) */
972  int fillfactor; /* page fill factor in percent (0..100) */
973  /* fraction of newly inserted tuples prior to trigger index cleanup */
975  bool deduplicate_items; /* Try to deduplicate items? */
976 } BTOptions;
977 
978 #define BTGetFillFactor(relation) \
979  (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
980  relation->rd_rel->relam == BTREE_AM_OID), \
981  (relation)->rd_options ? \
982  ((BTOptions *) (relation)->rd_options)->fillfactor : \
983  BTREE_DEFAULT_FILLFACTOR)
984 #define BTGetTargetPageFreeSpace(relation) \
985  (BLCKSZ * (100 - BTGetFillFactor(relation)) / 100)
986 #define BTGetDeduplicateItems(relation) \
987  (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
988  relation->rd_rel->relam == BTREE_AM_OID), \
989  ((relation)->rd_options ? \
990  ((BTOptions *) (relation)->rd_options)->deduplicate_items : true))
991 
992 /*
993  * Constant definition for progress reporting. Phase numbers must match
994  * btbuildphasename.
995  */
996 /* PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE is 1 (see progress.h) */
997 #define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN 2
998 #define PROGRESS_BTREE_PHASE_PERFORMSORT_1 3
999 #define PROGRESS_BTREE_PHASE_PERFORMSORT_2 4
1000 #define PROGRESS_BTREE_PHASE_LEAF_LOAD 5
1001 
1002 /*
1003  * external entry points for btree, in nbtree.c
1004  */
1005 extern void btbuildempty(Relation index);
1006 extern bool btinsert(Relation rel, Datum *values, bool *isnull,
1007  ItemPointer ht_ctid, Relation heapRel,
1008  IndexUniqueCheck checkUnique,
1009  struct IndexInfo *indexInfo);
1010 extern IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys);
1011 extern Size btestimateparallelscan(void);
1012 extern void btinitparallelscan(void *target);
1013 extern bool btgettuple(IndexScanDesc scan, ScanDirection dir);
1014 extern int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
1015 extern void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
1016  ScanKey orderbys, int norderbys);
1017 extern void btparallelrescan(IndexScanDesc scan);
1018 extern void btendscan(IndexScanDesc scan);
1019 extern void btmarkpos(IndexScanDesc scan);
1020 extern void btrestrpos(IndexScanDesc scan);
1022  IndexBulkDeleteResult *stats,
1024  void *callback_state);
1026  IndexBulkDeleteResult *stats);
1027 extern bool btcanreturn(Relation index, int attno);
1028 
1029 /*
1030  * prototypes for internal functions in nbtree.c
1031  */
1032 extern bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno);
1033 extern void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page);
1034 extern void _bt_parallel_done(IndexScanDesc scan);
1036 
1037 /*
1038  * prototypes for functions in nbtdedup.c
1039  */
1040 extern void _bt_dedup_one_page(Relation rel, Buffer buf, Relation heapRel,
1041  IndexTuple newitem, Size newitemsz,
1042  bool checkingunique);
1044  OffsetNumber baseoff);
1048  int nhtids);
1049 extern void _bt_update_posting(BTVacuumPosting vacposting);
1050 extern IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting,
1051  int postingoff);
1052 
1053 /*
1054  * prototypes for functions in nbtinsert.c
1055  */
1056 extern bool _bt_doinsert(Relation rel, IndexTuple itup,
1057  IndexUniqueCheck checkUnique, Relation heapRel);
1058 extern void _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack);
1059 extern Buffer _bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child);
1060 
1061 /*
1062  * prototypes for functions in nbtsplitloc.c
1063  */
1064 extern OffsetNumber _bt_findsplitloc(Relation rel, Page page,
1065  OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
1066  bool *newitemonleft);
1067 
1068 /*
1069  * prototypes for functions in nbtpage.c
1070  */
1071 extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
1072  bool allequalimage);
1073 extern void _bt_update_meta_cleanup_info(Relation rel,
1074  TransactionId oldestBtpoXact, float8 numHeapTuples);
1075 extern void _bt_upgrademetapage(Page page);
1076 extern Buffer _bt_getroot(Relation rel, int access);
1077 extern Buffer _bt_gettrueroot(Relation rel);
1078 extern int _bt_getrootheight(Relation rel);
1079 extern void _bt_metaversion(Relation rel, bool *heapkeyspace,
1080  bool *allequalimage);
1081 extern void _bt_checkpage(Relation rel, Buffer buf);
1082 extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access);
1083 extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf,
1084  BlockNumber blkno, int access);
1085 extern void _bt_relbuf(Relation rel, Buffer buf);
1086 extern void _bt_pageinit(Page page, Size size);
1087 extern bool _bt_page_recyclable(Page page);
1088 extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
1089  OffsetNumber *deletable, int ndeletable,
1090  BTVacuumPosting *updatable, int nupdatable);
1091 extern void _bt_delitems_delete(Relation rel, Buffer buf,
1092  OffsetNumber *deletable, int ndeletable,
1093  Relation heapRel);
1094 extern int _bt_pagedel(Relation rel, Buffer buf);
1095 
1096 /*
1097  * prototypes for functions in nbtsearch.c
1098  */
1099 extern BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP,
1100  int access, Snapshot snapshot);
1102  bool forupdate, BTStack stack, int access, Snapshot snapshot);
1103 extern OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate);
1104 extern int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum);
1105 extern bool _bt_first(IndexScanDesc scan, ScanDirection dir);
1106 extern bool _bt_next(IndexScanDesc scan, ScanDirection dir);
1107 extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
1108  Snapshot snapshot);
1109 
1110 /*
1111  * prototypes for functions in nbtutils.c
1112  */
1113 extern BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup);
1114 extern void _bt_freestack(BTStack stack);
1115 extern void _bt_preprocess_array_keys(IndexScanDesc scan);
1116 extern void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir);
1117 extern bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir);
1118 extern void _bt_mark_array_keys(IndexScanDesc scan);
1119 extern void _bt_restore_array_keys(IndexScanDesc scan);
1120 extern void _bt_preprocess_keys(IndexScanDesc scan);
1121 extern bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
1122  int tupnatts, ScanDirection dir, bool *continuescan);
1123 extern void _bt_killitems(IndexScanDesc scan);
1125 extern BTCycleId _bt_start_vacuum(Relation rel);
1126 extern void _bt_end_vacuum(Relation rel);
1127 extern void _bt_end_vacuum_callback(int code, Datum arg);
1128 extern Size BTreeShmemSize(void);
1129 extern void BTreeShmemInit(void);
1130 extern bytea *btoptions(Datum reloptions, bool validate);
1131 extern bool btproperty(Oid index_oid, int attno,
1132  IndexAMProperty prop, const char *propname,
1133  bool *res, bool *isnull);
1134 extern char *btbuildphasename(int64 phasenum);
1135 extern IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft,
1136  IndexTuple firstright, BTScanInsert itup_key);
1137 extern int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft,
1138  IndexTuple firstright);
1139 extern bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page,
1140  OffsetNumber offnum);
1141 extern void _bt_check_third_page(Relation rel, Relation heap,
1142  bool needheaptidspace, Page page, IndexTuple newtup);
1143 extern bool _bt_allequalimage(Relation rel, bool debugmessage);
1144 
1145 /*
1146  * prototypes for functions in nbtvalidate.c
1147  */
1148 extern bool btvalidate(Oid opclassoid);
1149 
1150 /*
1151  * prototypes for functions in nbtsort.c
1152  */
1154  struct IndexInfo *indexInfo);
1155 extern void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc);
1156 
1157 #endif /* NBTREE_H */
void btparallelrescan(IndexScanDesc scan)
Definition: nbtree.c:606
BTInsertStateData * BTInsertState
Definition: nbtree.h:719
BTCycleId _bt_start_vacuum(Relation rel)
Definition: nbtutils.c:1948
char * intervals[]
struct BTInsertStateData BTInsertStateData
void btendscan(IndexScanDesc scan)
Definition: nbtree.c:460
#define ItemPointerGetOffsetNumberNoCheck(pointer)
Definition: itemptr.h:108
bool _bt_page_recyclable(Page page)
Definition: nbtpage.c:967
struct BTMetaPageData BTMetaPageData
IndexTuple base
Definition: nbtree.h:756
uint16 ndeletedtids
Definition: nbtree.h:792
bool _bt_first(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:853
bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:544
BlockNumber btpo_next
Definition: nbtree.h:59
struct BTOptions BTOptions
void _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, float8 numHeapTuples)
Definition: nbtpage.c:166
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2448
void _bt_preprocess_keys(IndexScanDesc scan)
Definition: nbtutils.c:742
IndexAMProperty
Definition: amapi.h:34
int _bt_getrootheight(Relation rel)
Definition: nbtpage.c:596
BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, Snapshot snapshot)
Definition: nbtsearch.c:100
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:357
bool moreRight
Definition: nbtree.h:848
bool bounds_valid
Definition: nbtree.h:707
OffsetNumber baseoff
Definition: nbtree.h:757
uint32 TransactionId
Definition: c.h:513
uint32 btm_version
Definition: nbtree.h:101
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:517
void _bt_dedup_one_page(Relation rel, Buffer buf, Relation heapRel, IndexTuple newitem, Size newitemsz, bool checkingunique)
Definition: nbtdedup.c:56
void _bt_pageinit(Page page, Size size)
Definition: nbtpage.c:952
int mark_elem
Definition: nbtree.h:909
ItemPointer scantid
Definition: nbtree.h:675
bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, int tupnatts, ScanDirection dir, bool *continuescan)
Definition: nbtutils.c:1355
MemoryContext arrayContext
Definition: nbtree.h:928
void _bt_checkpage(Relation rel, Buffer buf)
Definition: nbtpage.c:718
static ItemPointer BTreeTupleGetPosting(IndexTuple posting)
Definition: nbtree.h:412
int itemIndex
Definition: nbtree.h:865
char * currTuples
Definition: nbtree.h:939
OffsetNumber updatedoffset
Definition: nbtree.h:789
uint32 btm_magic
Definition: nbtree.h:100
ItemPointerData t_tid
Definition: itup.h:37
static void BTreeTupleSetPosting(IndexTuple itup, int nhtids, int postingoffset)
Definition: nbtree.h:381
IndexTuple itup
Definition: nbtree.h:788
union BTPageOpaqueData::@45 btpo
bool deduplicate_items
Definition: nbtree.h:975
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:431
BlockNumber currPage
Definition: nbtree.h:838
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:276
Buffer _bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child)
Definition: nbtinsert.c:2248
IndexBulkDeleteResult * btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: nbtree.c:862
#define INDEX_SIZE_MASK
Definition: itup.h:65
#define BT_IS_POSTING
Definition: nbtree.h:350
OffsetNumber stricthigh
Definition: nbtree.h:709
bool allequalimage
Definition: nbtree.h:671
int32 varlena_header_
Definition: nbtree.h:971
BTStackData * BTStack
Definition: nbtree.h:618
static void BTreeTupleSetNAtts(IndexTuple itup, int natts)
Definition: nbtree.h:466
struct BTDedupInterval BTDedupInterval
BTVacuumPostingData * BTVacuumPosting
Definition: nbtree.h:796
OffsetNumber indexOffset
Definition: nbtree.h:829
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:785
uint32 BlockNumber
Definition: block.h:31
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:2616
void btinitparallelscan(void *target)
Definition: nbtree.c:591
void _bt_end_vacuum_callback(int code, Datum arg)
Definition: nbtutils.c:2033
OffsetNumber low
Definition: nbtree.h:708
#define MaxTIDsPerBTreePage
Definition: nbtree.h:179
IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting, int postingoff)
Definition: nbtdedup.c:759
unsigned int Oid
Definition: postgres_ext.h:31
Datum * elem_values
Definition: nbtree.h:911
#define BT_OFFSET_MASK
Definition: nbtree.h:348
void _bt_delitems_vacuum(Relation rel, Buffer buf, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable)
Definition: nbtpage.c:1010
bool moreLeft
Definition: nbtree.h:847
TransactionId xact
Definition: nbtree.h:63
void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: nbtree.c:400
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:69
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:2179
struct BTScanPosData BTScanPosData
char * btbuildphasename(int64 phasenum)
Definition: nbtutils.c:2131
signed int int32
Definition: c.h:355
uint16 OffsetNumber
Definition: off.h:24
IndexBuildResult * btbuild(Relation heap, Relation index, struct IndexInfo *indexInfo)
Definition: nbtsort.c:305
int fillfactor
Definition: nbtree.h:972
Definition: type.h:89
void _bt_update_posting(BTVacuumPosting vacposting)
Definition: nbtdedup.c:661
void _bt_parallel_done(IndexScanDesc scan)
Definition: nbtree.c:728
IndexUniqueCheck
Definition: genam.h:112
bool _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, Relation heapRel)
Definition: nbtinsert.c:82
int nextTupleOffset
Definition: nbtree.h:854
ItemPointer htids
Definition: nbtree.h:761
int lastItem
Definition: nbtree.h:864
struct BTPageOpaqueData BTPageOpaqueData
bool btcanreturn(Relation index, int attno)
Definition: nbtree.c:1453
BlockNumber btm_fastroot
Definition: nbtree.h:104
void _bt_mark_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:603
float8 vacuum_cleanup_index_scale_factor
Definition: nbtree.h:974
unsigned short uint16
Definition: c.h:366
BTScanPosData markPos
Definition: nbtree.h:953
Size phystupsize
Definition: nbtree.h:764
double float8
Definition: c.h:491
uint16 BTCycleId
Definition: nbtree.h:28
#define INDEX_ALT_TID_MASK
Definition: nbtree.h:344
void _bt_parallel_advance_array_keys(IndexScanDesc scan)
Definition: nbtree.c:769
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:649
void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level, bool allequalimage)
Definition: nbtpage.c:53
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:956
struct BTArrayKeyInfo BTArrayKeyInfo
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:660
BTCycleId btpo_cycleid
Definition: nbtree.h:66
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
struct BTScanPosItem BTScanPosItem
int cur_elem
Definition: nbtree.h:908
void _bt_delitems_delete(Relation rel, Buffer buf, OffsetNumber *deletable, int ndeletable, Relation heapRel)
Definition: nbtpage.c:1176
int numArrayKeys
Definition: nbtree.h:923
void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition: nbtsort.c:1758
bool btm_allequalimage
Definition: nbtree.h:111
BlockNumber btpo_prev
Definition: nbtree.h:58
struct BTScanInsertData BTScanInsertData
OffsetNumber bts_offset
Definition: nbtree.h:614
static char * buf
Definition: pg_test_fsync.c:67
bool btvalidate(Oid opclassoid)
Definition: nbtvalidate.c:38
Buffer _bt_moveright(Relation rel, BTScanInsert key, Buffer buf, bool forupdate, BTStack stack, int access, Snapshot snapshot)
Definition: nbtsearch.c:246
IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys)
Definition: nbtree.c:354
int arrayKeyCount
Definition: nbtree.h:925
ScanDirection
Definition: sdir.h:22
void BTreeShmemInit(void)
Definition: nbtutils.c:2055
char * markTuples
Definition: nbtree.h:940
void btrestrpos(IndexScanDesc scan)
Definition: nbtree.c:526
unsigned int uint32
Definition: c.h:367
int firstItem
Definition: nbtree.h:863
IndexTuple itup
Definition: nbtree.h:695
BTCycleId _bt_vacuum_cycleid(Relation rel)
Definition: nbtutils.c:1914
Size btestimateparallelscan(void)
Definition: nbtree.c:582
static void BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
Definition: nbtree.h:437
void _bt_end_vacuum(Relation rel)
Definition: nbtutils.c:2005
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:175
ScanKey arrayKeyData
Definition: nbtree.h:922
void _bt_upgrademetapage(Page page)
Definition: nbtpage.c:93
bool pivotsearch
Definition: nbtree.h:674
void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page)
Definition: nbtree.c:705
BTScanPosData * BTScanPos
Definition: nbtree.h:870
uint32 btm_fastlevel
Definition: nbtree.h:105
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:543
bool anynullkeys
Definition: nbtree.h:672
uint32 level
Definition: nbtree.h:62
bool _bt_next(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:1455
IndexBulkDeleteResult * btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: nbtree.c:903
uintptr_t Datum
Definition: postgres.h:367
int num_elems
Definition: nbtree.h:910
uint16 LocationIndex
Definition: bufpage.h:87
BlockNumber btm_root
Definition: nbtree.h:102
Size _bt_dedup_finish_pending(Page newpage, BTDedupState state)
Definition: nbtdedup.c:431
bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup)
Definition: nbtdedup.c:377
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:2401
bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: nbtutils.c:2108
BlockNumber bts_blkno
Definition: nbtree.h:613
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:940
int markItemIndex
Definition: nbtree.h:949
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:419
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:452
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:369
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:404
uint16 nitems
Definition: nbtree.h:728
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:1718
int * killedItems
Definition: nbtree.h:931
uint64 XLogRecPtr
Definition: xlogdefs.h:21
BTScanInsert itup_key
Definition: nbtree.h:697
#define Assert(condition)
Definition: c.h:738
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:601
void _bt_dedup_start_pending(BTDedupState state, IndexTuple base, OffsetNumber baseoff)
Definition: nbtdedup.c:326
Definition: regguts.h:298
void _bt_restore_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:622
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:2674
Buffer _bt_getroot(Relation rel, int access)
Definition: nbtpage.c:264
void _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack)
Definition: nbtinsert.c:2172
Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
Definition: nbtpage.c:921
bool heapkeyspace
Definition: nbtree.h:670
struct ItemPointerData ItemPointerData
#define INDEX_MAX_KEYS
size_t Size
Definition: c.h:466
float8 btm_last_cleanup_num_heap_tuples
Definition: nbtree.h:109
int numberOfKeys
Definition: nbtree.h:918
#define MAXALIGN(LEN)
Definition: c.h:691
struct BTStackData * bts_parent
Definition: nbtree.h:615
Size basetupsize
Definition: nbtree.h:758
#define ItemPointerSetBlockNumber(pointer, blockNumber)
Definition: itemptr.h:138
Size maxpostingsize
Definition: nbtree.h:753
ItemPointerData heapTid
Definition: nbtree.h:828
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
BlockNumber nextPage
Definition: nbtree.h:839
static Datum values[MAXATTR]
Definition: bootstrap.c:167
struct BTVacuumPostingData BTVacuumPostingData
int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: nbtree.c:296
BTScanPosData currPos
Definition: nbtree.h:952
OffsetNumber _bt_findsplitloc(Relation rel, Page page, OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem, bool *newitemonleft)
Definition: nbtsplitloc.c:127
bool btgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: nbtree.c:222
#define MaxIndexTuplesPerPage
Definition: itup.h:145
ScanKey keyData
Definition: nbtree.h:919
void btmarkpos(IndexScanDesc scan)
Definition: nbtree.c:496
bool deduplicate
Definition: nbtree.h:752
#define ItemPointerGetBlockNumberNoCheck(pointer)
Definition: itemptr.h:89
uint32 btm_level
Definition: nbtree.h:103
Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost, Snapshot snapshot)
Definition: nbtsearch.c:2296
BTArrayKeyInfo * arrayKeys
Definition: nbtree.h:927
void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:518
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:90
static BlockNumber BTreeTupleGetTopParent(IndexTuple leafhikey)
Definition: nbtree.h:499
void * arg
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:393
Size BTreeShmemSize(void)
Definition: nbtutils.c:2042
Definition: c.h:555
static void BTreeTupleSetAltHeapTID(IndexTuple pivot)
Definition: nbtree.h:480
struct BTDedupStateData BTDedupStateData
TransactionId btm_oldest_btpo_xact
Definition: nbtree.h:107
XLogRecPtr lsn
Definition: nbtree.h:837
Buffer buf
Definition: nbtree.h:835
unsigned short t_info
Definition: itup.h:49
BTDedupStateData * BTDedupState
Definition: nbtree.h:776
bool btinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, struct IndexInfo *indexInfo)
Definition: nbtree.c:199
#define BT_PIVOT_HEAP_TID_ATTR
Definition: nbtree.h:349
bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno)
Definition: nbtree.c:647
BTScanInsertData * BTScanInsert
Definition: nbtree.h:680
OffsetNumber baseoff
Definition: nbtree.h:727
LocationIndex tupleOffset
Definition: nbtree.h:830
uint16 btpo_flags
Definition: nbtree.h:65
bytea * btoptions(Datum reloptions, bool validate)
Definition: nbtutils.c:2083
int Buffer
Definition: buf.h:23
struct BTStackData BTStackData
int scan_key
Definition: nbtree.h:907
void btbuildempty(Relation index)
Definition: nbtree.c:163
static void BTreeTupleSetTopParent(IndexTuple leafhikey, BlockNumber blkno)
Definition: nbtree.h:505
struct BTScanOpaqueData BTScanOpaqueData
void _bt_preprocess_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:203
int _bt_pagedel(Relation rel, Buffer buf)
Definition: nbtpage.c:1493
Pointer Page
Definition: bufpage.h:78
#define IndexTupleSize(itup)
Definition: itup.h:71
bool(* IndexBulkDeleteCallback)(ItemPointer itemptr, void *state)
Definition: genam.h:84
Buffer _bt_gettrueroot(Relation rel)
Definition: nbtpage.c:500