PostgreSQL Source Code  git master
nbtree.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtree.h
4  * header file for postgres btree access method implementation.
5  *
6  *
7  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/access/nbtree.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef NBTREE_H
15 #define NBTREE_H
16 
17 #include "access/amapi.h"
18 #include "access/itup.h"
19 #include "access/sdir.h"
20 #include "access/xlogreader.h"
21 #include "catalog/pg_index.h"
22 #include "lib/stringinfo.h"
23 #include "storage/bufmgr.h"
24 #include "storage/shm_toc.h"
25 
26 /* There's room for a 16-bit vacuum cycle ID in BTPageOpaqueData */
27 typedef uint16 BTCycleId;
28 
29 /*
30  * BTPageOpaqueData -- At the end of every page, we store a pointer
31  * to both siblings in the tree. This is used to do forward/backward
32  * index scans. The next-page link is also critical for recovery when
33  * a search has navigated to the wrong page due to concurrent page splits
34  * or deletions; see src/backend/access/nbtree/README for more info.
35  *
36  * In addition, we store the page's btree level (counting upwards from
37  * zero at a leaf page) as well as some flag bits indicating the page type
38  * and status. If the page is deleted, we replace the level with the
39  * next-transaction-ID value indicating when it is safe to reclaim the page.
40  *
41  * We also store a "vacuum cycle ID". When a page is split while VACUUM is
42  * processing the index, a nonzero value associated with the VACUUM run is
43  * stored into both halves of the split page. (If VACUUM is not running,
44  * both pages receive zero cycleids.) This allows VACUUM to detect whether
45  * a page was split since it started, with a small probability of false match
46  * if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
47  * ago. Also, during a split, the BTP_SPLIT_END flag is cleared in the left
48  * (original) page, and set in the right page, but only if the next page
49  * to its right has a different cycleid.
50  *
51  * NOTE: the BTP_LEAF flag bit is redundant since level==0 could be tested
52  * instead.
53  */
54 
55 typedef struct BTPageOpaqueData
56 {
57  BlockNumber btpo_prev; /* left sibling, or P_NONE if leftmost */
58  BlockNumber btpo_next; /* right sibling, or P_NONE if rightmost */
59  union
60  {
61  uint32 level; /* tree level --- zero for leaf pages */
62  TransactionId xact; /* next transaction ID, if deleted */
63  } btpo;
64  uint16 btpo_flags; /* flag bits, see below */
65  BTCycleId btpo_cycleid; /* vacuum cycle ID of latest split */
67 
69 
70 /* Bits defined in btpo_flags */
71 #define BTP_LEAF (1 << 0) /* leaf page, i.e. not internal page */
72 #define BTP_ROOT (1 << 1) /* root page (has no parent) */
73 #define BTP_DELETED (1 << 2) /* page has been deleted from tree */
74 #define BTP_META (1 << 3) /* meta-page */
75 #define BTP_HALF_DEAD (1 << 4) /* empty, but still in tree */
76 #define BTP_SPLIT_END (1 << 5) /* rightmost page of split group */
77 #define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples */
78 #define BTP_INCOMPLETE_SPLIT (1 << 7) /* right sibling's downlink is missing */
79 
80 /*
81  * The max allowed value of a cycle ID is a bit less than 64K. This is
82  * for convenience of pg_filedump and similar utilities: we want to use
83  * the last 2 bytes of special space as an index type indicator, and
84  * restricting cycle ID lets btree use that space for vacuum cycle IDs
85  * while still allowing index type to be identified.
86  */
87 #define MAX_BT_CYCLE_ID 0xFF7F
88 
89 
90 /*
91  * The Meta page is always the first page in the btree index.
92  * Its primary purpose is to point to the location of the btree root page.
93  * We also point to the "fast" root, which is the current effective root;
94  * see README for discussion.
95  */
96 
97 typedef struct BTMetaPageData
98 {
99  uint32 btm_magic; /* should contain BTREE_MAGIC */
100  uint32 btm_version; /* nbtree version (always <= BTREE_VERSION) */
101  BlockNumber btm_root; /* current root location */
102  uint32 btm_level; /* tree level of the root page */
103  BlockNumber btm_fastroot; /* current "fast" root location */
104  uint32 btm_fastlevel; /* tree level of the "fast" root page */
105  /* remaining fields only valid when btm_version >= BTREE_NOVAC_VERSION */
106  TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among all deleted
107  * pages */
108  float8 btm_last_cleanup_num_heap_tuples; /* number of heap tuples
109  * during last cleanup */
111 
112 #define BTPageGetMeta(p) \
113  ((BTMetaPageData *) PageGetContents(p))
114 
115 /*
116  * The current Btree version is 4. That's what you'll get when you create
117  * a new index.
118  *
119  * Btree version 3 was used in PostgreSQL v11. It is mostly the same as
120  * version 4, but heap TIDs were not part of the keyspace. Index tuples
121  * with duplicate keys could be stored in any order. We continue to
122  * support reading and writing Btree versions 2 and 3, so that they don't
123  * need to be immediately re-indexed at pg_upgrade. In order to get the
124  * new heapkeyspace semantics, however, a REINDEX is needed.
125  *
126  * Btree version 2 is mostly the same as version 3. There are two new
127  * fields in the metapage that were introduced in version 3. A version 2
128  * metapage will be automatically upgraded to version 3 on the first
129  * insert to it. INCLUDE indexes cannot use version 2.
130  */
131 #define BTREE_METAPAGE 0 /* first page is meta */
132 #define BTREE_MAGIC 0x053162 /* magic number in metapage */
133 #define BTREE_VERSION 4 /* current version number */
134 #define BTREE_MIN_VERSION 2 /* minimal supported version number */
135 #define BTREE_NOVAC_VERSION 3 /* minimal version with all meta fields */
136 
137 /*
138  * Maximum size of a btree index entry, including its tuple header.
139  *
140  * We actually need to be able to fit three items on every page,
141  * so restrict any one item to 1/3 the per-page available space.
142  *
143  * There are rare cases where _bt_truncate() will need to enlarge
144  * a heap index tuple to make space for a tiebreaker heap TID
145  * attribute, which we account for here.
146  */
147 #define BTMaxItemSize(page) \
148  MAXALIGN_DOWN((PageGetPageSize(page) - \
149  MAXALIGN(SizeOfPageHeaderData + \
150  3*sizeof(ItemIdData) + \
151  3*sizeof(ItemPointerData)) - \
152  MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
153 #define BTMaxItemSizeNoHeapTid(page) \
154  MAXALIGN_DOWN((PageGetPageSize(page) - \
155  MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
156  MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
157 
158 /*
159  * The leaf-page fillfactor defaults to 90% but is user-adjustable.
160  * For pages above the leaf level, we use a fixed 70% fillfactor.
161  * The fillfactor is applied during index build and when splitting
162  * a rightmost page; when splitting non-rightmost pages we try to
163  * divide the data equally. When splitting a page that's entirely
164  * filled with a single value (duplicates), the effective leaf-page
165  * fillfactor is 96%, regardless of whether the page is a rightmost
166  * page.
167  */
168 #define BTREE_MIN_FILLFACTOR 10
169 #define BTREE_DEFAULT_FILLFACTOR 90
170 #define BTREE_NONLEAF_FILLFACTOR 70
171 #define BTREE_SINGLEVAL_FILLFACTOR 96
172 
173 /*
174  * In general, the btree code tries to localize its knowledge about
175  * page layout to a couple of routines. However, we need a special
176  * value to indicate "no page number" in those places where we expect
177  * page numbers. We can use zero for this because we never need to
178  * make a pointer to the metadata page.
179  */
180 
181 #define P_NONE 0
182 
183 /*
184  * Macros to test whether a page is leftmost or rightmost on its tree level,
185  * as well as other state info kept in the opaque data.
186  */
187 #define P_LEFTMOST(opaque) ((opaque)->btpo_prev == P_NONE)
188 #define P_RIGHTMOST(opaque) ((opaque)->btpo_next == P_NONE)
189 #define P_ISLEAF(opaque) (((opaque)->btpo_flags & BTP_LEAF) != 0)
190 #define P_ISROOT(opaque) (((opaque)->btpo_flags & BTP_ROOT) != 0)
191 #define P_ISDELETED(opaque) (((opaque)->btpo_flags & BTP_DELETED) != 0)
192 #define P_ISMETA(opaque) (((opaque)->btpo_flags & BTP_META) != 0)
193 #define P_ISHALFDEAD(opaque) (((opaque)->btpo_flags & BTP_HALF_DEAD) != 0)
194 #define P_IGNORE(opaque) (((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0)
195 #define P_HAS_GARBAGE(opaque) (((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0)
196 #define P_INCOMPLETE_SPLIT(opaque) (((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0)
197 
198 /*
199  * Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost
200  * page. The high key is not a tuple that is used to visit the heap. It is
201  * a pivot tuple (see "Notes on B-Tree tuple format" below for definition).
202  * The high key on a page is required to be greater than or equal to any
203  * other key that appears on the page. If we find ourselves trying to
204  * insert a key that is strictly > high key, we know we need to move right
205  * (this should only happen if the page was split since we examined the
206  * parent page).
207  *
208  * Our insertion algorithm guarantees that we can use the initial least key
209  * on our right sibling as the high key. Once a page is created, its high
210  * key changes only if the page is split.
211  *
212  * On a non-rightmost page, the high key lives in item 1 and data items
213  * start in item 2. Rightmost pages have no high key, so we store data
214  * items beginning in item 1.
215  */
216 
217 #define P_HIKEY ((OffsetNumber) 1)
218 #define P_FIRSTKEY ((OffsetNumber) 2)
219 #define P_FIRSTDATAKEY(opaque) (P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY)
220 
221 /*
222  * Notes on B-Tree tuple format, and key and non-key attributes:
223  *
224  * INCLUDE B-Tree indexes have non-key attributes. These are extra
225  * attributes that may be returned by index-only scans, but do not influence
226  * the order of items in the index (formally, non-key attributes are not
227  * considered to be part of the key space). Non-key attributes are only
228  * present in leaf index tuples whose item pointers actually point to heap
229  * tuples (non-pivot tuples). _bt_check_natts() enforces the rules
230  * described here.
231  *
232  * Non-pivot tuple format:
233  *
234  * t_tid | t_info | key values | INCLUDE columns, if any
235  *
236  * t_tid points to the heap TID, which is a tiebreaker key column as of
237  * BTREE_VERSION 4. Currently, the INDEX_ALT_TID_MASK status bit is never
238  * set for non-pivot tuples.
239  *
240  * All other types of index tuples ("pivot" tuples) only have key columns,
241  * since pivot tuples only exist to represent how the key space is
242  * separated. In general, any B-Tree index that has more than one level
243  * (i.e. any index that does not just consist of a metapage and a single
244  * leaf root page) must have some number of pivot tuples, since pivot
245  * tuples are used for traversing the tree. Suffix truncation can omit
246  * trailing key columns when a new pivot is formed, which makes minus
247  * infinity their logical value. Since BTREE_VERSION 4 indexes treat heap
248  * TID as a trailing key column that ensures that all index tuples are
249  * physically unique, it is necessary to represent heap TID as a trailing
250  * key column in pivot tuples, though very often this can be truncated
251  * away, just like any other key column. (Actually, the heap TID is
252  * omitted rather than truncated, since its representation is different to
253  * the non-pivot representation.)
254  *
255  * Pivot tuple format:
256  *
257  * t_tid | t_info | key values | [heap TID]
258  *
259  * We store the number of columns present inside pivot tuples by abusing
260  * their t_tid offset field, since pivot tuples never need to store a real
261  * offset (downlinks only need to store a block number in t_tid). The
262  * offset field only stores the number of columns/attributes when the
263  * INDEX_ALT_TID_MASK bit is set, which doesn't count the trailing heap
264  * TID column sometimes stored in pivot tuples -- that's represented by
265  * the presence of BT_HEAP_TID_ATTR. The INDEX_ALT_TID_MASK bit in t_info
266  * is always set on BTREE_VERSION 4. BT_HEAP_TID_ATTR can only be set on
267  * BTREE_VERSION 4.
268  *
269  * In version 3 indexes, the INDEX_ALT_TID_MASK flag might not be set in
270  * pivot tuples. In that case, the number of key columns is implicitly
271  * the same as the number of key columns in the index. It is not usually
272  * set on version 2 indexes, which predate the introduction of INCLUDE
273  * indexes. (Only explicitly truncated pivot tuples explicitly represent
274  * the number of key columns on versions 2 and 3, whereas all pivot tuples
275  * are formed using truncation on version 4. A version 2 index will have
276  * it set for an internal page negative infinity item iff internal page
277  * split occurred after upgrade to Postgres 11+.)
278  *
279  * The 12 least significant offset bits from t_tid are used to represent
280  * the number of columns in INDEX_ALT_TID_MASK tuples, leaving 4 status
281  * bits (BT_RESERVED_OFFSET_MASK bits), 3 of which that are reserved for
282  * future use. BT_N_KEYS_OFFSET_MASK should be large enough to store any
283  * number of columns/attributes <= INDEX_MAX_KEYS.
284  *
285  * Note well: The macros that deal with the number of attributes in tuples
286  * assume that a tuple with INDEX_ALT_TID_MASK set must be a pivot tuple,
287  * and that a tuple without INDEX_ALT_TID_MASK set must be a non-pivot
288  * tuple (or must have the same number of attributes as the index has
289  * generally in the case of !heapkeyspace indexes). They will need to be
290  * updated if non-pivot tuples ever get taught to use INDEX_ALT_TID_MASK
291  * for something else.
292  */
293 #define INDEX_ALT_TID_MASK INDEX_AM_RESERVED_BIT
294 
295 /* Item pointer offset bits */
296 #define BT_RESERVED_OFFSET_MASK 0xF000
297 #define BT_N_KEYS_OFFSET_MASK 0x0FFF
298 #define BT_HEAP_TID_ATTR 0x1000
299 
300 /* Get/set downlink block number */
301 #define BTreeInnerTupleGetDownLink(itup) \
302  ItemPointerGetBlockNumberNoCheck(&((itup)->t_tid))
303 #define BTreeInnerTupleSetDownLink(itup, blkno) \
304  ItemPointerSetBlockNumber(&((itup)->t_tid), (blkno))
305 
306 /*
307  * Get/set leaf page highkey's link. During the second phase of deletion, the
308  * target leaf page's high key may point to an ancestor page (at all other
309  * times, the leaf level high key's link is not used). See the nbtree README
310  * for full details.
311  */
312 #define BTreeTupleGetTopParent(itup) \
313  ItemPointerGetBlockNumberNoCheck(&((itup)->t_tid))
314 #define BTreeTupleSetTopParent(itup, blkno) \
315  do { \
316  ItemPointerSetBlockNumber(&((itup)->t_tid), (blkno)); \
317  BTreeTupleSetNAtts((itup), 0); \
318  } while(0)
319 
320 /*
321  * Get/set number of attributes within B-tree index tuple.
322  *
323  * Note that this does not include an implicit tiebreaker heap TID
324  * attribute, if any. Note also that the number of key attributes must be
325  * explicitly represented in all heapkeyspace pivot tuples.
326  */
327 #define BTreeTupleGetNAtts(itup, rel) \
328  ( \
329  (itup)->t_info & INDEX_ALT_TID_MASK ? \
330  ( \
331  ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_N_KEYS_OFFSET_MASK \
332  ) \
333  : \
334  IndexRelationGetNumberOfAttributes(rel) \
335  )
336 #define BTreeTupleSetNAtts(itup, n) \
337  do { \
338  (itup)->t_info |= INDEX_ALT_TID_MASK; \
339  ItemPointerSetOffsetNumber(&(itup)->t_tid, (n) & BT_N_KEYS_OFFSET_MASK); \
340  } while(0)
341 
342 /*
343  * Get tiebreaker heap TID attribute, if any. Macro works with both pivot
344  * and non-pivot tuples, despite differences in how heap TID is represented.
345  */
346 #define BTreeTupleGetHeapTID(itup) \
347  ( \
348  (itup)->t_info & INDEX_ALT_TID_MASK && \
349  (ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_HEAP_TID_ATTR) != 0 ? \
350  ( \
351  (ItemPointer) (((char *) (itup) + IndexTupleSize(itup)) - \
352  sizeof(ItemPointerData)) \
353  ) \
354  : (itup)->t_info & INDEX_ALT_TID_MASK ? NULL : (ItemPointer) &((itup)->t_tid) \
355  )
356 /*
357  * Set the heap TID attribute for a tuple that uses the INDEX_ALT_TID_MASK
358  * representation (currently limited to pivot tuples)
359  */
360 #define BTreeTupleSetAltHeapTID(itup) \
361  do { \
362  Assert((itup)->t_info & INDEX_ALT_TID_MASK); \
363  ItemPointerSetOffsetNumber(&(itup)->t_tid, \
364  ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) | BT_HEAP_TID_ATTR); \
365  } while(0)
366 
367 /*
368  * Operator strategy numbers for B-tree have been moved to access/stratnum.h,
369  * because many places need to use them in ScanKeyInit() calls.
370  *
371  * The strategy numbers are chosen so that we can commute them by
372  * subtraction, thus:
373  */
374 #define BTCommuteStrategyNumber(strat) (BTMaxStrategyNumber + 1 - (strat))
375 
376 /*
377  * When a new operator class is declared, we require that the user
378  * supply us with an amproc procedure (BTORDER_PROC) for determining
379  * whether, for two keys a and b, a < b, a = b, or a > b. This routine
380  * must return < 0, 0, > 0, respectively, in these three cases.
381  *
382  * To facilitate accelerated sorting, an operator class may choose to
383  * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
384  * src/include/utils/sortsupport.h.
385  *
386  * To support window frames defined by "RANGE offset PRECEDING/FOLLOWING",
387  * an operator class may choose to offer a third amproc procedure
388  * (BTINRANGE_PROC), independently of whether it offers sortsupport.
389  * For full details, see doc/src/sgml/btree.sgml.
390  */
391 
392 #define BTORDER_PROC 1
393 #define BTSORTSUPPORT_PROC 2
394 #define BTINRANGE_PROC 3
395 #define BTNProcs 3
396 
397 /*
398  * We need to be able to tell the difference between read and write
399  * requests for pages, in order to do locking correctly.
400  */
401 
402 #define BT_READ BUFFER_LOCK_SHARE
403 #define BT_WRITE BUFFER_LOCK_EXCLUSIVE
404 
405 /*
406  * BTStackData -- As we descend a tree, we push the location of pivot
407  * tuples whose downlink we are about to follow onto a private stack. If
408  * we split a leaf, we use this stack to walk back up the tree and insert
409  * data into its parent page at the correct location. We also have to
410  * recursively insert into the grandparent page if and when the parent page
411  * splits. Our private stack can become stale due to concurrent page
412  * splits and page deletions, but it should never give us an irredeemably
413  * bad picture.
414  */
415 typedef struct BTStackData
416 {
420 } BTStackData;
421 
423 
424 /*
425  * BTScanInsertData is the btree-private state needed to find an initial
426  * position for an indexscan, or to insert new tuples -- an "insertion
427  * scankey" (not to be confused with a search scankey). It's used to descend
428  * a B-Tree using _bt_search.
429  *
430  * heapkeyspace indicates if we expect all keys in the index to be physically
431  * unique because heap TID is used as a tiebreaker attribute, and if index may
432  * have truncated key attributes in pivot tuples. This is actually a property
433  * of the index relation itself (not an indexscan). heapkeyspace indexes are
434  * indexes whose version is >= version 4. It's convenient to keep this close
435  * by, rather than accessing the metapage repeatedly.
436  *
437  * anynullkeys indicates if any of the keys had NULL value when scankey was
438  * built from index tuple (note that already-truncated tuple key attributes
439  * set NULL as a placeholder key value, which also affects value of
440  * anynullkeys). This is a convenience for unique index non-pivot tuple
441  * insertion, which usually temporarily unsets scantid, but shouldn't iff
442  * anynullkeys is true. Value generally matches non-pivot tuple's HasNulls
443  * bit, but may not when inserting into an INCLUDE index (tuple header value
444  * is affected by the NULL-ness of both key and non-key attributes).
445  *
446  * When nextkey is false (the usual case), _bt_search and _bt_binsrch will
447  * locate the first item >= scankey. When nextkey is true, they will locate
448  * the first item > scan key.
449  *
450  * pivotsearch is set to true by callers that want to re-find a leaf page
451  * using a scankey built from a leaf page's high key. Most callers set this
452  * to false.
453  *
454  * scantid is the heap TID that is used as a final tiebreaker attribute. It
455  * is set to NULL when index scan doesn't need to find a position for a
456  * specific physical tuple. Must be set when inserting new tuples into
457  * heapkeyspace indexes, since every tuple in the tree unambiguously belongs
458  * in one exact position (it's never set with !heapkeyspace indexes, though).
459  * Despite the representational difference, nbtree search code considers
460  * scantid to be just another insertion scankey attribute.
461  *
462  * scankeys is an array of scan key entries for attributes that are compared
463  * before scantid (user-visible attributes). keysz is the size of the array.
464  * During insertion, there must be a scan key for every attribute, but when
465  * starting a regular index scan some can be omitted. The array is used as a
466  * flexible array member, though it's sized in a way that makes it possible to
467  * use stack allocations. See nbtree/README for full details.
468  */
469 typedef struct BTScanInsertData
470 {
473  bool nextkey;
475  ItemPointer scantid; /* tiebreaker for scankeys */
476  int keysz; /* Size of scankeys array */
477  ScanKeyData scankeys[INDEX_MAX_KEYS]; /* Must appear last */
479 
481 
482 /*
483  * BTInsertStateData is a working area used during insertion.
484  *
485  * This is filled in after descending the tree to the first leaf page the new
486  * tuple might belong on. Tracks the current position while performing
487  * uniqueness check, before we have determined which exact page to insert
488  * to.
489  *
490  * (This should be private to nbtinsert.c, but it's also used by
491  * _bt_binsrch_insert)
492  */
493 typedef struct BTInsertStateData
494 {
495  IndexTuple itup; /* Item we're inserting */
496  Size itemsz; /* Size of itup -- should be MAXALIGN()'d */
497  BTScanInsert itup_key; /* Insertion scankey */
498 
499  /* Buffer containing leaf page we're likely to insert itup on */
501 
502  /*
503  * Cache of bounds within the current buffer. Only used for insertions
504  * where _bt_check_unique is called. See _bt_binsrch_insert and
505  * _bt_findinsertloc for details.
506  */
511 
513 
514 /*
515  * BTScanOpaqueData is the btree-private state needed for an indexscan.
516  * This consists of preprocessed scan keys (see _bt_preprocess_keys() for
517  * details of the preprocessing), information about the current location
518  * of the scan, and information about the marked location, if any. (We use
519  * BTScanPosData to represent the data needed for each of current and marked
520  * locations.) In addition we can remember some known-killed index entries
521  * that must be marked before we can move off the current page.
522  *
523  * Index scans work a page at a time: we pin and read-lock the page, identify
524  * all the matching items on the page and save them in BTScanPosData, then
525  * release the read-lock while returning the items to the caller for
526  * processing. This approach minimizes lock/unlock traffic. Note that we
527  * keep the pin on the index page until the caller is done with all the items
528  * (this is needed for VACUUM synchronization, see nbtree/README). When we
529  * are ready to step to the next page, if the caller has told us any of the
530  * items were killed, we re-lock the page to mark them killed, then unlock.
531  * Finally we drop the pin and step to the next page in the appropriate
532  * direction.
533  *
534  * If we are doing an index-only scan, we save the entire IndexTuple for each
535  * matched item, otherwise only its heap TID and offset. The IndexTuples go
536  * into a separate workspace array; each BTScanPosItem stores its tuple's
537  * offset within that array.
538  */
539 
540 typedef struct BTScanPosItem /* what we remember about each match */
541 {
542  ItemPointerData heapTid; /* TID of referenced heap item */
543  OffsetNumber indexOffset; /* index item's location within page */
544  LocationIndex tupleOffset; /* IndexTuple's offset in workspace, if any */
545 } BTScanPosItem;
546 
547 typedef struct BTScanPosData
548 {
549  Buffer buf; /* if valid, the buffer is pinned */
550 
551  XLogRecPtr lsn; /* pos in the WAL stream when page was read */
552  BlockNumber currPage; /* page referenced by items array */
553  BlockNumber nextPage; /* page's right link when we scanned it */
554 
555  /*
556  * moreLeft and moreRight track whether we think there may be matching
557  * index entries to the left and right of the current page, respectively.
558  * We can clear the appropriate one of these flags when _bt_checkkeys()
559  * returns continuescan = false.
560  */
561  bool moreLeft;
562  bool moreRight;
563 
564  /*
565  * If we are doing an index-only scan, nextTupleOffset is the first free
566  * location in the associated tuple storage workspace.
567  */
569 
570  /*
571  * The items array is always ordered in index order (ie, increasing
572  * indexoffset). When scanning backwards it is convenient to fill the
573  * array back-to-front, so we start at the last slot and fill downwards.
574  * Hence we need both a first-valid-entry and a last-valid-entry counter.
575  * itemIndex is a cursor showing which entry was last returned to caller.
576  */
577  int firstItem; /* first valid index in items[] */
578  int lastItem; /* last valid index in items[] */
579  int itemIndex; /* current index in items[] */
580 
581  BTScanPosItem items[MaxIndexTuplesPerPage]; /* MUST BE LAST */
582 } BTScanPosData;
583 
585 
586 #define BTScanPosIsPinned(scanpos) \
587 ( \
588  AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
589  !BufferIsValid((scanpos).buf)), \
590  BufferIsValid((scanpos).buf) \
591 )
592 #define BTScanPosUnpin(scanpos) \
593  do { \
594  ReleaseBuffer((scanpos).buf); \
595  (scanpos).buf = InvalidBuffer; \
596  } while (0)
597 #define BTScanPosUnpinIfPinned(scanpos) \
598  do { \
599  if (BTScanPosIsPinned(scanpos)) \
600  BTScanPosUnpin(scanpos); \
601  } while (0)
602 
603 #define BTScanPosIsValid(scanpos) \
604 ( \
605  AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
606  !BufferIsValid((scanpos).buf)), \
607  BlockNumberIsValid((scanpos).currPage) \
608 )
609 #define BTScanPosInvalidate(scanpos) \
610  do { \
611  (scanpos).currPage = InvalidBlockNumber; \
612  (scanpos).nextPage = InvalidBlockNumber; \
613  (scanpos).buf = InvalidBuffer; \
614  (scanpos).lsn = InvalidXLogRecPtr; \
615  (scanpos).nextTupleOffset = 0; \
616  } while (0);
617 
618 /* We need one of these for each equality-type SK_SEARCHARRAY scan key */
619 typedef struct BTArrayKeyInfo
620 {
621  int scan_key; /* index of associated key in arrayKeyData */
622  int cur_elem; /* index of current element in elem_values */
623  int mark_elem; /* index of marked element in elem_values */
624  int num_elems; /* number of elems in current array value */
625  Datum *elem_values; /* array of num_elems Datums */
627 
628 typedef struct BTScanOpaqueData
629 {
630  /* these fields are set by _bt_preprocess_keys(): */
631  bool qual_ok; /* false if qual can never be satisfied */
632  int numberOfKeys; /* number of preprocessed scan keys */
633  ScanKey keyData; /* array of preprocessed scan keys */
634 
635  /* workspace for SK_SEARCHARRAY support */
636  ScanKey arrayKeyData; /* modified copy of scan->keyData */
637  int numArrayKeys; /* number of equality-type array keys (-1 if
638  * there are any unsatisfiable array keys) */
639  int arrayKeyCount; /* count indicating number of array scan keys
640  * processed */
641  BTArrayKeyInfo *arrayKeys; /* info about each equality-type array key */
642  MemoryContext arrayContext; /* scan-lifespan context for array data */
643 
644  /* info about killed items if any (killedItems is NULL if never used) */
645  int *killedItems; /* currPos.items indexes of killed items */
646  int numKilled; /* number of currently stored items */
647 
648  /*
649  * If we are doing an index-only scan, these are the tuple storage
650  * workspaces for the currPos and markPos respectively. Each is of size
651  * BLCKSZ, so it can hold as much as a full page's worth of tuples.
652  */
653  char *currTuples; /* tuple storage for currPos */
654  char *markTuples; /* tuple storage for markPos */
655 
656  /*
657  * If the marked position is on the same page as current position, we
658  * don't use markPos, but just keep the marked itemIndex in markItemIndex
659  * (all the rest of currPos is valid for the mark position). Hence, to
660  * determine if there is a mark, first look at markItemIndex, then at
661  * markPos.
662  */
663  int markItemIndex; /* itemIndex, or -1 if not valid */
664 
665  /* keep these last in struct for efficiency */
666  BTScanPosData currPos; /* current position data */
667  BTScanPosData markPos; /* marked position, if any */
669 
671 
672 /*
673  * We use some private sk_flags bits in preprocessed scan keys. We're allowed
674  * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
675  * index's indoption[] array entry for the index attribute.
676  */
677 #define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
678 #define SK_BT_REQBKWD 0x00020000 /* required to continue backward scan */
679 #define SK_BT_INDOPTION_SHIFT 24 /* must clear the above bits */
680 #define SK_BT_DESC (INDOPTION_DESC << SK_BT_INDOPTION_SHIFT)
681 #define SK_BT_NULLS_FIRST (INDOPTION_NULLS_FIRST << SK_BT_INDOPTION_SHIFT)
682 
683 /*
684  * Constant definition for progress reporting. Phase numbers must match
685  * btbuildphasename.
686  */
687 /* PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE is 1 (see progress.h) */
688 #define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN 2
689 #define PROGRESS_BTREE_PHASE_PERFORMSORT_1 3
690 #define PROGRESS_BTREE_PHASE_PERFORMSORT_2 4
691 #define PROGRESS_BTREE_PHASE_LEAF_LOAD 5
692 
693 /*
694  * external entry points for btree, in nbtree.c
695  */
696 extern void btbuildempty(Relation index);
697 extern bool btinsert(Relation rel, Datum *values, bool *isnull,
698  ItemPointer ht_ctid, Relation heapRel,
699  IndexUniqueCheck checkUnique,
700  struct IndexInfo *indexInfo);
701 extern IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys);
702 extern Size btestimateparallelscan(void);
703 extern void btinitparallelscan(void *target);
704 extern bool btgettuple(IndexScanDesc scan, ScanDirection dir);
705 extern int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
706 extern void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
707  ScanKey orderbys, int norderbys);
708 extern void btparallelrescan(IndexScanDesc scan);
709 extern void btendscan(IndexScanDesc scan);
710 extern void btmarkpos(IndexScanDesc scan);
711 extern void btrestrpos(IndexScanDesc scan);
713  IndexBulkDeleteResult *stats,
715  void *callback_state);
717  IndexBulkDeleteResult *stats);
718 extern bool btcanreturn(Relation index, int attno);
719 
720 /*
721  * prototypes for internal functions in nbtree.c
722  */
723 extern bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno);
724 extern void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page);
725 extern void _bt_parallel_done(IndexScanDesc scan);
727 
728 /*
729  * prototypes for functions in nbtinsert.c
730  */
731 extern bool _bt_doinsert(Relation rel, IndexTuple itup,
732  IndexUniqueCheck checkUnique, Relation heapRel);
733 extern void _bt_finish_split(Relation rel, Buffer bbuf, BTStack stack);
734 extern Buffer _bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child);
735 
736 /*
737  * prototypes for functions in nbtsplitloc.c
738  */
739 extern OffsetNumber _bt_findsplitloc(Relation rel, Page page,
740  OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
741  bool *newitemonleft);
742 
743 /*
744  * prototypes for functions in nbtpage.c
745  */
746 extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level);
747 extern void _bt_update_meta_cleanup_info(Relation rel,
748  TransactionId oldestBtpoXact, float8 numHeapTuples);
749 extern void _bt_upgrademetapage(Page page);
750 extern Buffer _bt_getroot(Relation rel, int access);
751 extern Buffer _bt_gettrueroot(Relation rel);
752 extern int _bt_getrootheight(Relation rel);
753 extern bool _bt_heapkeyspace(Relation rel);
754 extern void _bt_checkpage(Relation rel, Buffer buf);
755 extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access);
756 extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf,
757  BlockNumber blkno, int access);
758 extern void _bt_relbuf(Relation rel, Buffer buf);
759 extern void _bt_pageinit(Page page, Size size);
760 extern bool _bt_page_recyclable(Page page);
761 extern void _bt_delitems_delete(Relation rel, Buffer buf,
762  OffsetNumber *itemnos, int nitems, Relation heapRel);
763 extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
764  OffsetNumber *itemnos, int nitems,
765  BlockNumber lastBlockVacuumed);
766 extern int _bt_pagedel(Relation rel, Buffer buf);
767 
768 /*
769  * prototypes for functions in nbtsearch.c
770  */
771 extern BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP,
772  int access, Snapshot snapshot);
774  bool forupdate, BTStack stack, int access, Snapshot snapshot);
775 extern OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate);
776 extern int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum);
777 extern bool _bt_first(IndexScanDesc scan, ScanDirection dir);
778 extern bool _bt_next(IndexScanDesc scan, ScanDirection dir);
779 extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
780  Snapshot snapshot);
781 
782 /*
783  * prototypes for functions in nbtutils.c
784  */
786 extern void _bt_freestack(BTStack stack);
787 extern void _bt_preprocess_array_keys(IndexScanDesc scan);
788 extern void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir);
789 extern bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir);
790 extern void _bt_mark_array_keys(IndexScanDesc scan);
791 extern void _bt_restore_array_keys(IndexScanDesc scan);
792 extern void _bt_preprocess_keys(IndexScanDesc scan);
793 extern bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
794  int tupnatts, ScanDirection dir, bool *continuescan);
795 extern void _bt_killitems(IndexScanDesc scan);
798 extern void _bt_end_vacuum(Relation rel);
799 extern void _bt_end_vacuum_callback(int code, Datum arg);
800 extern Size BTreeShmemSize(void);
801 extern void BTreeShmemInit(void);
802 extern bytea *btoptions(Datum reloptions, bool validate);
803 extern bool btproperty(Oid index_oid, int attno,
804  IndexAMProperty prop, const char *propname,
805  bool *res, bool *isnull);
806 extern char *btbuildphasename(int64 phasenum);
807 extern IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft,
808  IndexTuple firstright, BTScanInsert itup_key);
809 extern int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft,
810  IndexTuple firstright);
811 extern bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page,
812  OffsetNumber offnum);
813 extern void _bt_check_third_page(Relation rel, Relation heap,
814  bool needheaptidspace, Page page, IndexTuple newtup);
815 
816 /*
817  * prototypes for functions in nbtvalidate.c
818  */
819 extern bool btvalidate(Oid opclassoid);
820 
821 /*
822  * prototypes for functions in nbtsort.c
823  */
825  struct IndexInfo *indexInfo);
826 extern void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc);
827 
828 #endif /* NBTREE_H */
void btparallelrescan(IndexScanDesc scan)
Definition: nbtree.c:600
BTInsertStateData * BTInsertState
Definition: nbtree.h:512
BTCycleId _bt_start_vacuum(Relation rel)
Definition: nbtutils.c:1880
struct BTInsertStateData BTInsertStateData
void btendscan(IndexScanDesc scan)
Definition: nbtree.c:454
bool _bt_page_recyclable(Page page)
Definition: nbtpage.c:939
struct BTMetaPageData BTMetaPageData
bool _bt_first(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:741
bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:533
BlockNumber btpo_next
Definition: nbtree.h:58
void _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, float8 numHeapTuples)
Definition: nbtpage.c:158
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:2362
void _bt_preprocess_keys(IndexScanDesc scan)
Definition: nbtutils.c:731
IndexAMProperty
Definition: amapi.h:34
int _bt_getrootheight(Relation rel)
Definition: nbtpage.c:584
BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, Snapshot snapshot)
Definition: nbtsearch.c:92
bool moreRight
Definition: nbtree.h:562
bool bounds_valid
Definition: nbtree.h:507
uint32 TransactionId
Definition: c.h:508
uint32 btm_version
Definition: nbtree.h:100
void _bt_pageinit(Page page, Size size)
Definition: nbtpage.c:924
int mark_elem
Definition: nbtree.h:623
ItemPointer scantid
Definition: nbtree.h:475
bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, int tupnatts, ScanDirection dir, bool *continuescan)
Definition: nbtutils.c:1344
MemoryContext arrayContext
Definition: nbtree.h:642
void _bt_checkpage(Relation rel, Buffer buf)
Definition: nbtpage.c:690
int itemIndex
Definition: nbtree.h:579
char * currTuples
Definition: nbtree.h:653
uint32 btm_magic
Definition: nbtree.h:99
union BTPageOpaqueData::@46 btpo
void _bt_finish_split(Relation rel, Buffer bbuf, BTStack stack)
Definition: nbtinsert.c:1856
BlockNumber currPage
Definition: nbtree.h:552
Buffer _bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child)
Definition: nbtinsert.c:1932
IndexBulkDeleteResult * btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: nbtree.c:856
OffsetNumber stricthigh
Definition: nbtree.h:509
BTStackData * BTStack
Definition: nbtree.h:422
void _bt_delitems_vacuum(Relation rel, Buffer buf, OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed)
Definition: nbtpage.c:984
OffsetNumber indexOffset
Definition: nbtree.h:543
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:757
uint32 BlockNumber
Definition: block.h:31
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:2512
void btinitparallelscan(void *target)
Definition: nbtree.c:585
void _bt_end_vacuum_callback(int code, Datum arg)
Definition: nbtutils.c:1965
OffsetNumber low
Definition: nbtree.h:508
unsigned int Oid
Definition: postgres_ext.h:31
Datum * elem_values
Definition: nbtree.h:625
bool moreLeft
Definition: nbtree.h:561
TransactionId xact
Definition: nbtree.h:62
void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: nbtree.c:394
BTPageOpaqueData * BTPageOpaque
Definition: nbtree.h:68
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:2098
struct BTScanPosData BTScanPosData
char * btbuildphasename(int64 phasenum)
Definition: nbtutils.c:2050
signed int int32
Definition: c.h:347
uint16 OffsetNumber
Definition: off.h:24
IndexBuildResult * btbuild(Relation heap, Relation index, struct IndexInfo *indexInfo)
Definition: nbtsort.c:301
Definition: type.h:89
void _bt_parallel_done(IndexScanDesc scan)
Definition: nbtree.c:722
IndexUniqueCheck
Definition: genam.h:112
bool _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, Relation heapRel)
Definition: nbtinsert.c:79
bool _bt_heapkeyspace(Relation rel)
Definition: nbtpage.c:636
int nextTupleOffset
Definition: nbtree.h:568
int lastItem
Definition: nbtree.h:578
struct BTPageOpaqueData BTPageOpaqueData
bool btcanreturn(Relation index, int attno)
Definition: nbtree.c:1384
BlockNumber btm_fastroot
Definition: nbtree.h:103
void _bt_mark_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:592
unsigned short uint16
Definition: c.h:358
BTScanPosData markPos
Definition: nbtree.h:667
double float8
Definition: c.h:492
uint16 BTCycleId
Definition: nbtree.h:27
void _bt_parallel_advance_array_keys(IndexScanDesc scan)
Definition: nbtree.c:763
int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum)
Definition: nbtsearch.c:554
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:670
struct BTArrayKeyInfo BTArrayKeyInfo
BTCycleId btpo_cycleid
Definition: nbtree.h:65
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
struct BTScanPosItem BTScanPosItem
int cur_elem
Definition: nbtree.h:622
int numArrayKeys
Definition: nbtree.h:637
void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition: nbtsort.c:1589
BlockNumber btpo_prev
Definition: nbtree.h:57
struct BTScanInsertData BTScanInsertData
OffsetNumber bts_offset
Definition: nbtree.h:418
static char * buf
Definition: pg_test_fsync.c:67
bool btvalidate(Oid opclassoid)
Definition: nbtvalidate.c:38
Buffer _bt_moveright(Relation rel, BTScanInsert key, Buffer buf, bool forupdate, BTStack stack, int access, Snapshot snapshot)
Definition: nbtsearch.c:237
IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys)
Definition: nbtree.c:348
int arrayKeyCount
Definition: nbtree.h:639
ScanDirection
Definition: sdir.h:22
void BTreeShmemInit(void)
Definition: nbtutils.c:1987
char * markTuples
Definition: nbtree.h:654
void btrestrpos(IndexScanDesc scan)
Definition: nbtree.c:520
unsigned int uint32
Definition: c.h:359
int firstItem
Definition: nbtree.h:577
IndexTuple itup
Definition: nbtree.h:495
BTCycleId _bt_vacuum_cycleid(Relation rel)
Definition: nbtutils.c:1846
Size btestimateparallelscan(void)
Definition: nbtree.c:576
void _bt_end_vacuum(Relation rel)
Definition: nbtutils.c:1937
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:164
ScanKey arrayKeyData
Definition: nbtree.h:636
void _bt_upgrademetapage(Page page)
Definition: nbtpage.c:88
bool pivotsearch
Definition: nbtree.h:474
void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page)
Definition: nbtree.c:699
BTScanPosData * BTScanPos
Definition: nbtree.h:584
uint32 btm_fastlevel
Definition: nbtree.h:104
bool anynullkeys
Definition: nbtree.h:472
uint32 level
Definition: nbtree.h:61
bool _bt_next(IndexScanDesc scan, ScanDirection dir)
Definition: nbtsearch.c:1343
IndexBulkDeleteResult * btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: nbtree.c:897
uintptr_t Datum
Definition: postgres.h:367
int num_elems
Definition: nbtree.h:624
uint16 LocationIndex
Definition: bufpage.h:87
BlockNumber btm_root
Definition: nbtree.h:101
void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
Definition: nbtpage.c:50
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:2315
bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: nbtutils.c:2027
BlockNumber bts_blkno
Definition: nbtree.h:417
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:912
int markItemIndex
Definition: nbtree.h:663
OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
Definition: nbtsearch.c:440
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:1705
int * killedItems
Definition: nbtree.h:645
uint64 XLogRecPtr
Definition: xlogdefs.h:21
BTScanInsert itup_key
Definition: nbtree.h:497
void _bt_delitems_delete(Relation rel, Buffer buf, OffsetNumber *itemnos, int nitems, Relation heapRel)
Definition: nbtpage.c:1057
void _bt_restore_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:611
Buffer _bt_getroot(Relation rel, int access)
Definition: nbtpage.c:255
Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
Definition: nbtpage.c:893
bool heapkeyspace
Definition: nbtree.h:471
#define INDEX_MAX_KEYS
size_t Size
Definition: c.h:467
float8 btm_last_cleanup_num_heap_tuples
Definition: nbtree.h:108
int numberOfKeys
Definition: nbtree.h:632
struct BTStackData * bts_parent
Definition: nbtree.h:419
ItemPointerData heapTid
Definition: nbtree.h:542
BlockNumber nextPage
Definition: nbtree.h:553
static Datum values[MAXATTR]
Definition: bootstrap.c:167
int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: nbtree.c:290
BTScanPosData currPos
Definition: nbtree.h:666
OffsetNumber _bt_findsplitloc(Relation rel, Page page, OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem, bool *newitemonleft)
Definition: nbtsplitloc.c:127
bool btgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: nbtree.c:216
#define MaxIndexTuplesPerPage
Definition: itup.h:145
ScanKey keyData
Definition: nbtree.h:633
void btmarkpos(IndexScanDesc scan)
Definition: nbtree.c:490
uint32 btm_level
Definition: nbtree.h:102
Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost, Snapshot snapshot)
Definition: nbtsearch.c:2059
BTArrayKeyInfo * arrayKeys
Definition: nbtree.h:641
void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:507
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:86
void * arg
Size BTreeShmemSize(void)
Definition: nbtutils.c:1974
Definition: c.h:550
TransactionId btm_oldest_btpo_xact
Definition: nbtree.h:106
XLogRecPtr lsn
Definition: nbtree.h:551
Buffer buf
Definition: nbtree.h:549
bool btinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, struct IndexInfo *indexInfo)
Definition: nbtree.c:193
bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno)
Definition: nbtree.c:641
BTScanInsertData * BTScanInsert
Definition: nbtree.h:480
LocationIndex tupleOffset
Definition: nbtree.h:544
uint16 btpo_flags
Definition: nbtree.h:64
bytea * btoptions(Datum reloptions, bool validate)
Definition: nbtutils.c:2015
int Buffer
Definition: buf.h:23
struct BTStackData BTStackData
int scan_key
Definition: nbtree.h:621
void btbuildempty(Relation index)
Definition: nbtree.c:157
struct BTScanOpaqueData BTScanOpaqueData
void _bt_preprocess_array_keys(IndexScanDesc scan)
Definition: nbtutils.c:192
int _bt_pagedel(Relation rel, Buffer buf)
Definition: nbtpage.c:1304
Pointer Page
Definition: bufpage.h:78
bool(* IndexBulkDeleteCallback)(ItemPointer itemptr, void *state)
Definition: genam.h:84
Buffer _bt_gettrueroot(Relation rel)
Definition: nbtpage.c:488