PostgreSQL Source Code  git master
gist.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * gist.c
4  * interface routines for the postgres GiST index access method.
5  *
6  *
7  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * IDENTIFICATION
11  * src/backend/access/gist/gist.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/gist_private.h"
18 #include "access/gistscan.h"
19 #include "access/xloginsert.h"
20 #include "catalog/pg_collation.h"
21 #include "commands/vacuum.h"
22 #include "miscadmin.h"
23 #include "nodes/execnodes.h"
24 #include "storage/lmgr.h"
25 #include "storage/predicate.h"
26 #include "utils/builtins.h"
27 #include "utils/index_selfuncs.h"
28 #include "utils/memutils.h"
29 #include "utils/rel.h"
30 
31 /* non-export function prototypes */
32 static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate);
34  GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
36  GISTSTATE *giststate,
37  IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
39  bool unlockbuf, bool unlockleftchild);
41  GISTSTATE *giststate, List *splitinfo, bool unlockbuf);
42 static void gistprunepage(Relation rel, Page page, Buffer buffer,
43  Relation heapRel);
44 
45 
46 #define ROTATEDIST(d) do { \
47  SplitedPageLayout *tmp=(SplitedPageLayout*)palloc0(sizeof(SplitedPageLayout)); \
48  tmp->block.blkno = InvalidBlockNumber; \
49  tmp->buffer = InvalidBuffer; \
50  tmp->next = (d); \
51  (d)=tmp; \
52 } while(0)
53 
54 
55 /*
56  * GiST handler function: return IndexAmRoutine with access method parameters
57  * and callbacks.
58  */
59 Datum
61 {
63 
64  amroutine->amstrategies = 0;
65  amroutine->amsupport = GISTNProcs;
66  amroutine->amoptsprocnum = GIST_OPTIONS_PROC;
67  amroutine->amcanorder = false;
68  amroutine->amcanorderbyop = true;
69  amroutine->amcanbackward = false;
70  amroutine->amcanunique = false;
71  amroutine->amcanmulticol = true;
72  amroutine->amoptionalkey = true;
73  amroutine->amsearcharray = false;
74  amroutine->amsearchnulls = true;
75  amroutine->amstorage = true;
76  amroutine->amclusterable = true;
77  amroutine->ampredlocks = true;
78  amroutine->amcanparallel = false;
79  amroutine->amcaninclude = true;
80  amroutine->amusemaintenanceworkmem = false;
81  amroutine->amhotblocking = true;
82  amroutine->amparallelvacuumoptions =
84  amroutine->amkeytype = InvalidOid;
85 
86  amroutine->ambuild = gistbuild;
87  amroutine->ambuildempty = gistbuildempty;
88  amroutine->aminsert = gistinsert;
89  amroutine->ambulkdelete = gistbulkdelete;
91  amroutine->amcanreturn = gistcanreturn;
92  amroutine->amcostestimate = gistcostestimate;
93  amroutine->amoptions = gistoptions;
94  amroutine->amproperty = gistproperty;
95  amroutine->ambuildphasename = NULL;
96  amroutine->amvalidate = gistvalidate;
98  amroutine->ambeginscan = gistbeginscan;
99  amroutine->amrescan = gistrescan;
100  amroutine->amgettuple = gistgettuple;
101  amroutine->amgetbitmap = gistgetbitmap;
102  amroutine->amendscan = gistendscan;
103  amroutine->ammarkpos = NULL;
104  amroutine->amrestrpos = NULL;
105  amroutine->amestimateparallelscan = NULL;
106  amroutine->aminitparallelscan = NULL;
107  amroutine->amparallelrescan = NULL;
108 
109  PG_RETURN_POINTER(amroutine);
110 }
111 
112 /*
113  * Create and return a temporary memory context for use by GiST. We
114  * _always_ invoke user-provided methods in a temporary memory
115  * context, so that memory leaks in those functions cannot cause
116  * problems. Also, we use some additional temporary contexts in the
117  * GiST code itself, to avoid the need to do some awkward manual
118  * memory management.
119  */
122 {
124  "GiST temporary context",
126 }
127 
128 /*
129  * gistbuildempty() -- build an empty gist index in the initialization fork
130  */
131 void
133 {
134  Buffer buffer;
135 
136  /* Initialize the root page */
139 
140  /* Initialize and xlog buffer */
142  GISTInitBuffer(buffer, F_LEAF);
143  MarkBufferDirty(buffer);
144  log_newpage_buffer(buffer, true);
146 
147  /* Unlock and release the buffer */
148  UnlockReleaseBuffer(buffer);
149 }
150 
151 /*
152  * gistinsert -- wrapper for GiST tuple insertion.
153  *
154  * This is the public interface routine for tuple insertion in GiSTs.
155  * It doesn't do any work; just locks the relation and passes the buck.
156  */
157 bool
158 gistinsert(Relation r, Datum *values, bool *isnull,
159  ItemPointer ht_ctid, Relation heapRel,
160  IndexUniqueCheck checkUnique,
161  bool indexUnchanged,
162  IndexInfo *indexInfo)
163 {
164  GISTSTATE *giststate = (GISTSTATE *) indexInfo->ii_AmCache;
165  IndexTuple itup;
166  MemoryContext oldCxt;
167 
168  /* Initialize GISTSTATE cache if first call in this statement */
169  if (giststate == NULL)
170  {
171  oldCxt = MemoryContextSwitchTo(indexInfo->ii_Context);
172  giststate = initGISTstate(r);
173  giststate->tempCxt = createTempGistContext();
174  indexInfo->ii_AmCache = (void *) giststate;
175  MemoryContextSwitchTo(oldCxt);
176  }
177 
178  oldCxt = MemoryContextSwitchTo(giststate->tempCxt);
179 
180  itup = gistFormTuple(giststate, r,
181  values, isnull, true /* size is currently bogus */ );
182  itup->t_tid = *ht_ctid;
183 
184  gistdoinsert(r, itup, 0, giststate, heapRel, false);
185 
186  /* cleanup */
187  MemoryContextSwitchTo(oldCxt);
188  MemoryContextReset(giststate->tempCxt);
189 
190  return false;
191 }
192 
193 
194 /*
195  * Place tuples from 'itup' to 'buffer'. If 'oldoffnum' is valid, the tuple
196  * at that offset is atomically removed along with inserting the new tuples.
197  * This is used to replace a tuple with a new one.
198  *
199  * If 'leftchildbuf' is valid, we're inserting the downlink for the page
200  * to the right of 'leftchildbuf', or updating the downlink for 'leftchildbuf'.
201  * F_FOLLOW_RIGHT flag on 'leftchildbuf' is cleared and NSN is set.
202  *
203  * If 'markfollowright' is true and the page is split, the left child is
204  * marked with F_FOLLOW_RIGHT flag. That is the normal case. During buffered
205  * index build, however, there is no concurrent access and the page splitting
206  * is done in a slightly simpler fashion, and false is passed.
207  *
208  * If there is not enough room on the page, it is split. All the split
209  * pages are kept pinned and locked and returned in *splitinfo, the caller
210  * is responsible for inserting the downlinks for them. However, if
211  * 'buffer' is the root page and it needs to be split, gistplacetopage()
212  * performs the split as one atomic operation, and *splitinfo is set to NIL.
213  * In that case, we continue to hold the root page locked, and the child
214  * pages are released; note that new tuple(s) are *not* on the root page
215  * but in one of the new child pages.
216  *
217  * If 'newblkno' is not NULL, returns the block number of page the first
218  * new/updated tuple was inserted to. Usually it's the given page, but could
219  * be its right sibling if the page was split.
220  *
221  * Returns 'true' if the page was split, 'false' otherwise.
222  */
223 bool
224 gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
225  Buffer buffer,
226  IndexTuple *itup, int ntup, OffsetNumber oldoffnum,
227  BlockNumber *newblkno,
228  Buffer leftchildbuf,
229  List **splitinfo,
230  bool markfollowright,
231  Relation heapRel,
232  bool is_build)
233 {
234  BlockNumber blkno = BufferGetBlockNumber(buffer);
235  Page page = BufferGetPage(buffer);
236  bool is_leaf = (GistPageIsLeaf(page)) ? true : false;
237  XLogRecPtr recptr;
238  int i;
239  bool is_split;
240 
241  /*
242  * Refuse to modify a page that's incompletely split. This should not
243  * happen because we finish any incomplete splits while we walk down the
244  * tree. However, it's remotely possible that another concurrent inserter
245  * splits a parent page, and errors out before completing the split. We
246  * will just throw an error in that case, and leave any split we had in
247  * progress unfinished too. The next insert that comes along will clean up
248  * the mess.
249  */
250  if (GistFollowRight(page))
251  elog(ERROR, "concurrent GiST page split was incomplete");
252 
253  /* should never try to insert to a deleted page */
254  Assert(!GistPageIsDeleted(page));
255 
256  *splitinfo = NIL;
257 
258  /*
259  * if isupdate, remove old key: This node's key has been modified, either
260  * because a child split occurred or because we needed to adjust our key
261  * for an insert in a child node. Therefore, remove the old version of
262  * this node's key.
263  *
264  * for WAL replay, in the non-split case we handle this by setting up a
265  * one-element todelete array; in the split case, it's handled implicitly
266  * because the tuple vector passed to gistSplit won't include this tuple.
267  */
268  is_split = gistnospace(page, itup, ntup, oldoffnum, freespace);
269 
270  /*
271  * If leaf page is full, try at first to delete dead tuples. And then
272  * check again.
273  */
274  if (is_split && GistPageIsLeaf(page) && GistPageHasGarbage(page))
275  {
276  gistprunepage(rel, page, buffer, heapRel);
277  is_split = gistnospace(page, itup, ntup, oldoffnum, freespace);
278  }
279 
280  if (is_split)
281  {
282  /* no space for insertion */
283  IndexTuple *itvec;
284  int tlen;
285  SplitedPageLayout *dist = NULL,
286  *ptr;
287  BlockNumber oldrlink = InvalidBlockNumber;
288  GistNSN oldnsn = 0;
289  SplitedPageLayout rootpg;
290  bool is_rootsplit;
291  int npage;
292 
293  is_rootsplit = (blkno == GIST_ROOT_BLKNO);
294 
295  /*
296  * Form index tuples vector to split. If we're replacing an old tuple,
297  * remove the old version from the vector.
298  */
299  itvec = gistextractpage(page, &tlen);
300  if (OffsetNumberIsValid(oldoffnum))
301  {
302  /* on inner page we should remove old tuple */
303  int pos = oldoffnum - FirstOffsetNumber;
304 
305  tlen--;
306  if (pos != tlen)
307  memmove(itvec + pos, itvec + pos + 1, sizeof(IndexTuple) * (tlen - pos));
308  }
309  itvec = gistjoinvector(itvec, &tlen, itup, ntup);
310  dist = gistSplit(rel, page, itvec, tlen, giststate);
311 
312  /*
313  * Check that split didn't produce too many pages.
314  */
315  npage = 0;
316  for (ptr = dist; ptr; ptr = ptr->next)
317  npage++;
318  /* in a root split, we'll add one more page to the list below */
319  if (is_rootsplit)
320  npage++;
321  if (npage > GIST_MAX_SPLIT_PAGES)
322  elog(ERROR, "GiST page split into too many halves (%d, maximum %d)",
323  npage, GIST_MAX_SPLIT_PAGES);
324 
325  /*
326  * Set up pages to work with. Allocate new buffers for all but the
327  * leftmost page. The original page becomes the new leftmost page, and
328  * is just replaced with the new contents.
329  *
330  * For a root-split, allocate new buffers for all child pages, the
331  * original page is overwritten with new root page containing
332  * downlinks to the new child pages.
333  */
334  ptr = dist;
335  if (!is_rootsplit)
336  {
337  /* save old rightlink and NSN */
338  oldrlink = GistPageGetOpaque(page)->rightlink;
339  oldnsn = GistPageGetNSN(page);
340 
341  dist->buffer = buffer;
342  dist->block.blkno = BufferGetBlockNumber(buffer);
344 
345  /* clean all flags except F_LEAF */
346  GistPageGetOpaque(dist->page)->flags = (is_leaf) ? F_LEAF : 0;
347 
348  ptr = ptr->next;
349  }
350  for (; ptr; ptr = ptr->next)
351  {
352  /* Allocate new page */
353  ptr->buffer = gistNewBuffer(rel);
354  GISTInitBuffer(ptr->buffer, (is_leaf) ? F_LEAF : 0);
355  ptr->page = BufferGetPage(ptr->buffer);
356  ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
358  BufferGetBlockNumber(buffer),
359  BufferGetBlockNumber(ptr->buffer));
360  }
361 
362  /*
363  * Now that we know which blocks the new pages go to, set up downlink
364  * tuples to point to them.
365  */
366  for (ptr = dist; ptr; ptr = ptr->next)
367  {
368  ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno);
369  GistTupleSetValid(ptr->itup);
370  }
371 
372  /*
373  * If this is a root split, we construct the new root page with the
374  * downlinks here directly, instead of requiring the caller to insert
375  * them. Add the new root page to the list along with the child pages.
376  */
377  if (is_rootsplit)
378  {
379  IndexTuple *downlinks;
380  int ndownlinks = 0;
381  int i;
382 
383  rootpg.buffer = buffer;
385  GistPageGetOpaque(rootpg.page)->flags = 0;
386 
387  /* Prepare a vector of all the downlinks */
388  for (ptr = dist; ptr; ptr = ptr->next)
389  ndownlinks++;
390  downlinks = palloc(sizeof(IndexTuple) * ndownlinks);
391  for (i = 0, ptr = dist; ptr; ptr = ptr->next)
392  downlinks[i++] = ptr->itup;
393 
394  rootpg.block.blkno = GIST_ROOT_BLKNO;
395  rootpg.block.num = ndownlinks;
396  rootpg.list = gistfillitupvec(downlinks, ndownlinks,
397  &(rootpg.lenlist));
398  rootpg.itup = NULL;
399 
400  rootpg.next = dist;
401  dist = &rootpg;
402  }
403  else
404  {
405  /* Prepare split-info to be returned to caller */
406  for (ptr = dist; ptr; ptr = ptr->next)
407  {
409 
410  si->buf = ptr->buffer;
411  si->downlink = ptr->itup;
412  *splitinfo = lappend(*splitinfo, si);
413  }
414  }
415 
416  /*
417  * Fill all pages. All the pages are new, ie. freshly allocated empty
418  * pages, or a temporary copy of the old page.
419  */
420  for (ptr = dist; ptr; ptr = ptr->next)
421  {
422  char *data = (char *) (ptr->list);
423 
424  for (i = 0; i < ptr->block.num; i++)
425  {
426  IndexTuple thistup = (IndexTuple) data;
427 
428  if (PageAddItem(ptr->page, (Item) data, IndexTupleSize(thistup), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber)
429  elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(rel));
430 
431  /*
432  * If this is the first inserted/updated tuple, let the caller
433  * know which page it landed on.
434  */
435  if (newblkno && ItemPointerEquals(&thistup->t_tid, &(*itup)->t_tid))
436  *newblkno = ptr->block.blkno;
437 
438  data += IndexTupleSize(thistup);
439  }
440 
441  /* Set up rightlinks */
442  if (ptr->next && ptr->block.blkno != GIST_ROOT_BLKNO)
443  GistPageGetOpaque(ptr->page)->rightlink =
444  ptr->next->block.blkno;
445  else
446  GistPageGetOpaque(ptr->page)->rightlink = oldrlink;
447 
448  /*
449  * Mark the all but the right-most page with the follow-right
450  * flag. It will be cleared as soon as the downlink is inserted
451  * into the parent, but this ensures that if we error out before
452  * that, the index is still consistent. (in buffering build mode,
453  * any error will abort the index build anyway, so this is not
454  * needed.)
455  */
456  if (ptr->next && !is_rootsplit && markfollowright)
457  GistMarkFollowRight(ptr->page);
458  else
459  GistClearFollowRight(ptr->page);
460 
461  /*
462  * Copy the NSN of the original page to all pages. The
463  * F_FOLLOW_RIGHT flags ensure that scans will follow the
464  * rightlinks until the downlinks are inserted.
465  */
466  GistPageSetNSN(ptr->page, oldnsn);
467  }
468 
469  /*
470  * gistXLogSplit() needs to WAL log a lot of pages, prepare WAL
471  * insertion for that. NB: The number of pages and data segments
472  * specified here must match the calculations in gistXLogSplit()!
473  */
474  if (!is_build && RelationNeedsWAL(rel))
475  XLogEnsureRecordSpace(npage, 1 + npage * 2);
476 
478 
479  /*
480  * Must mark buffers dirty before XLogInsert, even though we'll still
481  * be changing their opaque fields below.
482  */
483  for (ptr = dist; ptr; ptr = ptr->next)
484  MarkBufferDirty(ptr->buffer);
485  if (BufferIsValid(leftchildbuf))
486  MarkBufferDirty(leftchildbuf);
487 
488  /*
489  * The first page in the chain was a temporary working copy meant to
490  * replace the old page. Copy it over the old page.
491  */
493  dist->page = BufferGetPage(dist->buffer);
494 
495  /*
496  * Write the WAL record.
497  *
498  * If we're building a new index, however, we don't WAL-log changes
499  * yet. The LSN-NSN interlock between parent and child requires that
500  * LSNs never move backwards, so set the LSNs to a value that's
501  * smaller than any real or fake unlogged LSN that might be generated
502  * later. (There can't be any concurrent scans during index build, so
503  * we don't need to be able to detect concurrent splits yet.)
504  */
505  if (is_build)
506  recptr = GistBuildLSN;
507  else
508  {
509  if (RelationNeedsWAL(rel))
510  recptr = gistXLogSplit(is_leaf,
511  dist, oldrlink, oldnsn, leftchildbuf,
512  markfollowright);
513  else
514  recptr = gistGetFakeLSN(rel);
515  }
516 
517  for (ptr = dist; ptr; ptr = ptr->next)
518  PageSetLSN(ptr->page, recptr);
519 
520  /*
521  * Return the new child buffers to the caller.
522  *
523  * If this was a root split, we've already inserted the downlink
524  * pointers, in the form of a new root page. Therefore we can release
525  * all the new buffers, and keep just the root page locked.
526  */
527  if (is_rootsplit)
528  {
529  for (ptr = dist->next; ptr; ptr = ptr->next)
530  UnlockReleaseBuffer(ptr->buffer);
531  }
532  }
533  else
534  {
535  /*
536  * Enough space. We always get here if ntup==0.
537  */
539 
540  /*
541  * Delete old tuple if any, then insert new tuple(s) if any. If
542  * possible, use the fast path of PageIndexTupleOverwrite.
543  */
544  if (OffsetNumberIsValid(oldoffnum))
545  {
546  if (ntup == 1)
547  {
548  /* One-for-one replacement, so use PageIndexTupleOverwrite */
549  if (!PageIndexTupleOverwrite(page, oldoffnum, (Item) *itup,
550  IndexTupleSize(*itup)))
551  elog(ERROR, "failed to add item to index page in \"%s\"",
553  }
554  else
555  {
556  /* Delete old, then append new tuple(s) to page */
557  PageIndexTupleDelete(page, oldoffnum);
558  gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);
559  }
560  }
561  else
562  {
563  /* Just append new tuples at the end of the page */
564  gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);
565  }
566 
567  MarkBufferDirty(buffer);
568 
569  if (BufferIsValid(leftchildbuf))
570  MarkBufferDirty(leftchildbuf);
571 
572  if (is_build)
573  recptr = GistBuildLSN;
574  else
575  {
576  if (RelationNeedsWAL(rel))
577  {
578  OffsetNumber ndeloffs = 0,
579  deloffs[1];
580 
581  if (OffsetNumberIsValid(oldoffnum))
582  {
583  deloffs[0] = oldoffnum;
584  ndeloffs = 1;
585  }
586 
587  recptr = gistXLogUpdate(buffer,
588  deloffs, ndeloffs, itup, ntup,
589  leftchildbuf);
590  }
591  else
592  recptr = gistGetFakeLSN(rel);
593  }
594  PageSetLSN(page, recptr);
595 
596  if (newblkno)
597  *newblkno = blkno;
598  }
599 
600  /*
601  * If we inserted the downlink for a child page, set NSN and clear
602  * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to
603  * follow the rightlink if and only if they looked at the parent page
604  * before we inserted the downlink.
605  *
606  * Note that we do this *after* writing the WAL record. That means that
607  * the possible full page image in the WAL record does not include these
608  * changes, and they must be replayed even if the page is restored from
609  * the full page image. There's a chicken-and-egg problem: if we updated
610  * the child pages first, we wouldn't know the recptr of the WAL record
611  * we're about to write.
612  */
613  if (BufferIsValid(leftchildbuf))
614  {
615  Page leftpg = BufferGetPage(leftchildbuf);
616 
617  GistPageSetNSN(leftpg, recptr);
618  GistClearFollowRight(leftpg);
619 
620  PageSetLSN(leftpg, recptr);
621  }
622 
624 
625  return is_split;
626 }
627 
628 /*
629  * Workhouse routine for doing insertion into a GiST index. Note that
630  * this routine assumes it is invoked in a short-lived memory context,
631  * so it does not bother releasing palloc'd allocations.
632  */
633 void
635  GISTSTATE *giststate, Relation heapRel, bool is_build)
636 {
637  ItemId iid;
638  IndexTuple idxtuple;
639  GISTInsertStack firststack;
640  GISTInsertStack *stack;
642  bool xlocked = false;
643 
644  memset(&state, 0, sizeof(GISTInsertState));
645  state.freespace = freespace;
646  state.r = r;
647  state.heapRel = heapRel;
648  state.is_build = is_build;
649 
650  /* Start from the root */
651  firststack.blkno = GIST_ROOT_BLKNO;
652  firststack.lsn = 0;
653  firststack.retry_from_parent = false;
654  firststack.parent = NULL;
655  firststack.downlinkoffnum = InvalidOffsetNumber;
656  state.stack = stack = &firststack;
657 
658  /*
659  * Walk down along the path of smallest penalty, updating the parent
660  * pointers with the key we're inserting as we go. If we crash in the
661  * middle, the tree is consistent, although the possible parent updates
662  * were a waste.
663  */
664  for (;;)
665  {
666  /*
667  * If we split an internal page while descending the tree, we have to
668  * retry at the parent. (Normally, the LSN-NSN interlock below would
669  * also catch this and cause us to retry. But LSNs are not updated
670  * during index build.)
671  */
672  while (stack->retry_from_parent)
673  {
674  if (xlocked)
675  LockBuffer(stack->buffer, GIST_UNLOCK);
676  xlocked = false;
677  ReleaseBuffer(stack->buffer);
678  state.stack = stack = stack->parent;
679  }
680 
681  if (XLogRecPtrIsInvalid(stack->lsn))
682  stack->buffer = ReadBuffer(state.r, stack->blkno);
683 
684  /*
685  * Be optimistic and grab shared lock first. Swap it for an exclusive
686  * lock later if we need to update the page.
687  */
688  if (!xlocked)
689  {
690  LockBuffer(stack->buffer, GIST_SHARE);
691  gistcheckpage(state.r, stack->buffer);
692  }
693 
694  stack->page = (Page) BufferGetPage(stack->buffer);
695  stack->lsn = xlocked ?
696  PageGetLSN(stack->page) : BufferGetLSNAtomic(stack->buffer);
698 
699  /*
700  * If this page was split but the downlink was never inserted to the
701  * parent because the inserting backend crashed before doing that, fix
702  * that now.
703  */
704  if (GistFollowRight(stack->page))
705  {
706  if (!xlocked)
707  {
708  LockBuffer(stack->buffer, GIST_UNLOCK);
710  xlocked = true;
711  /* someone might've completed the split when we unlocked */
712  if (!GistFollowRight(stack->page))
713  continue;
714  }
715  gistfixsplit(&state, giststate);
716 
717  UnlockReleaseBuffer(stack->buffer);
718  xlocked = false;
719  state.stack = stack = stack->parent;
720  continue;
721  }
722 
723  if ((stack->blkno != GIST_ROOT_BLKNO &&
724  stack->parent->lsn < GistPageGetNSN(stack->page)) ||
725  GistPageIsDeleted(stack->page))
726  {
727  /*
728  * Concurrent split or page deletion detected. There's no
729  * guarantee that the downlink for this page is consistent with
730  * the tuple we're inserting anymore, so go back to parent and
731  * rechoose the best child.
732  */
733  UnlockReleaseBuffer(stack->buffer);
734  xlocked = false;
735  state.stack = stack = stack->parent;
736  continue;
737  }
738 
739  if (!GistPageIsLeaf(stack->page))
740  {
741  /*
742  * This is an internal page so continue to walk down the tree.
743  * Find the child node that has the minimum insertion penalty.
744  */
745  BlockNumber childblkno;
746  IndexTuple newtup;
747  GISTInsertStack *item;
748  OffsetNumber downlinkoffnum;
749 
750  downlinkoffnum = gistchoose(state.r, stack->page, itup, giststate);
751  iid = PageGetItemId(stack->page, downlinkoffnum);
752  idxtuple = (IndexTuple) PageGetItem(stack->page, iid);
753  childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
754 
755  /*
756  * Check that it's not a leftover invalid tuple from pre-9.1
757  */
758  if (GistTupleIsInvalid(idxtuple))
759  ereport(ERROR,
760  (errmsg("index \"%s\" contains an inner tuple marked as invalid",
762  errdetail("This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1."),
763  errhint("Please REINDEX it.")));
764 
765  /*
766  * Check that the key representing the target child node is
767  * consistent with the key we're inserting. Update it if it's not.
768  */
769  newtup = gistgetadjusted(state.r, idxtuple, itup, giststate);
770  if (newtup)
771  {
772  /*
773  * Swap shared lock for an exclusive one. Beware, the page may
774  * change while we unlock/lock the page...
775  */
776  if (!xlocked)
777  {
778  LockBuffer(stack->buffer, GIST_UNLOCK);
780  xlocked = true;
781  stack->page = (Page) BufferGetPage(stack->buffer);
782 
783  if (PageGetLSN(stack->page) != stack->lsn)
784  {
785  /* the page was changed while we unlocked it, retry */
786  continue;
787  }
788  }
789 
790  /*
791  * Update the tuple.
792  *
793  * We still hold the lock after gistinserttuple(), but it
794  * might have to split the page to make the updated tuple fit.
795  * In that case the updated tuple might migrate to the other
796  * half of the split, so we have to go back to the parent and
797  * descend back to the half that's a better fit for the new
798  * tuple.
799  */
800  if (gistinserttuple(&state, stack, giststate, newtup,
801  downlinkoffnum))
802  {
803  /*
804  * If this was a root split, the root page continues to be
805  * the parent and the updated tuple went to one of the
806  * child pages, so we just need to retry from the root
807  * page.
808  */
809  if (stack->blkno != GIST_ROOT_BLKNO)
810  {
811  UnlockReleaseBuffer(stack->buffer);
812  xlocked = false;
813  state.stack = stack = stack->parent;
814  }
815  continue;
816  }
817  }
818  LockBuffer(stack->buffer, GIST_UNLOCK);
819  xlocked = false;
820 
821  /* descend to the chosen child */
822  item = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
823  item->blkno = childblkno;
824  item->parent = stack;
825  item->downlinkoffnum = downlinkoffnum;
826  state.stack = stack = item;
827  }
828  else
829  {
830  /*
831  * Leaf page. Insert the new key. We've already updated all the
832  * parents on the way down, but we might have to split the page if
833  * it doesn't fit. gistinserttuple() will take care of that.
834  */
835 
836  /*
837  * Swap shared lock for an exclusive one. Be careful, the page may
838  * change while we unlock/lock the page...
839  */
840  if (!xlocked)
841  {
842  LockBuffer(stack->buffer, GIST_UNLOCK);
844  xlocked = true;
845  stack->page = (Page) BufferGetPage(stack->buffer);
846  stack->lsn = PageGetLSN(stack->page);
847 
848  if (stack->blkno == GIST_ROOT_BLKNO)
849  {
850  /*
851  * the only page that can become inner instead of leaf is
852  * the root page, so for root we should recheck it
853  */
854  if (!GistPageIsLeaf(stack->page))
855  {
856  /*
857  * very rare situation: during unlock/lock index with
858  * number of pages = 1 was increased
859  */
860  LockBuffer(stack->buffer, GIST_UNLOCK);
861  xlocked = false;
862  continue;
863  }
864 
865  /*
866  * we don't need to check root split, because checking
867  * leaf/inner is enough to recognize split for root
868  */
869  }
870  else if ((GistFollowRight(stack->page) ||
871  stack->parent->lsn < GistPageGetNSN(stack->page)) ||
872  GistPageIsDeleted(stack->page))
873  {
874  /*
875  * The page was split or deleted while we momentarily
876  * unlocked the page. Go back to parent.
877  */
878  UnlockReleaseBuffer(stack->buffer);
879  xlocked = false;
880  state.stack = stack = stack->parent;
881  continue;
882  }
883  }
884 
885  /* now state.stack->(page, buffer and blkno) points to leaf page */
886 
887  gistinserttuple(&state, stack, giststate, itup,
889  LockBuffer(stack->buffer, GIST_UNLOCK);
890 
891  /* Release any pins we might still hold before exiting */
892  for (; stack; stack = stack->parent)
893  ReleaseBuffer(stack->buffer);
894  break;
895  }
896  }
897 }
898 
899 /*
900  * Traverse the tree to find path from root page to specified "child" block.
901  *
902  * returns a new insertion stack, starting from the parent of "child", up
903  * to the root. *downlinkoffnum is set to the offset of the downlink in the
904  * direct parent of child.
905  *
906  * To prevent deadlocks, this should lock only one page at a time.
907  */
908 static GISTInsertStack *
909 gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
910 {
911  Page page;
912  Buffer buffer;
913  OffsetNumber i,
914  maxoff;
915  ItemId iid;
916  IndexTuple idxtuple;
917  List *fifo;
918  GISTInsertStack *top,
919  *ptr;
920  BlockNumber blkno;
921 
922  top = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
923  top->blkno = GIST_ROOT_BLKNO;
925 
926  fifo = list_make1(top);
927  while (fifo != NIL)
928  {
929  /* Get next page to visit */
930  top = linitial(fifo);
931  fifo = list_delete_first(fifo);
932 
933  buffer = ReadBuffer(r, top->blkno);
934  LockBuffer(buffer, GIST_SHARE);
935  gistcheckpage(r, buffer);
936  page = (Page) BufferGetPage(buffer);
937 
938  if (GistPageIsLeaf(page))
939  {
940  /*
941  * Because we scan the index top-down, all the rest of the pages
942  * in the queue must be leaf pages as well.
943  */
944  UnlockReleaseBuffer(buffer);
945  break;
946  }
947 
948  /* currently, internal pages are never deleted */
949  Assert(!GistPageIsDeleted(page));
950 
951  top->lsn = BufferGetLSNAtomic(buffer);
952 
953  /*
954  * If F_FOLLOW_RIGHT is set, the page to the right doesn't have a
955  * downlink. This should not normally happen..
956  */
957  if (GistFollowRight(page))
958  elog(ERROR, "concurrent GiST page split was incomplete");
959 
960  if (top->parent && top->parent->lsn < GistPageGetNSN(page) &&
961  GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ )
962  {
963  /*
964  * Page was split while we looked elsewhere. We didn't see the
965  * downlink to the right page when we scanned the parent, so add
966  * it to the queue now.
967  *
968  * Put the right page ahead of the queue, so that we visit it
969  * next. That's important, because if this is the lowest internal
970  * level, just above leaves, we might already have queued up some
971  * leaf pages, and we assume that there can't be any non-leaf
972  * pages behind leaf pages.
973  */
974  ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
975  ptr->blkno = GistPageGetOpaque(page)->rightlink;
977  ptr->parent = top->parent;
978 
979  fifo = lcons(ptr, fifo);
980  }
981 
982  maxoff = PageGetMaxOffsetNumber(page);
983 
984  for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
985  {
986  iid = PageGetItemId(page, i);
987  idxtuple = (IndexTuple) PageGetItem(page, iid);
988  blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
989  if (blkno == child)
990  {
991  /* Found it! */
992  UnlockReleaseBuffer(buffer);
993  *downlinkoffnum = i;
994  return top;
995  }
996  else
997  {
998  /* Append this child to the list of pages to visit later */
999  ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
1000  ptr->blkno = blkno;
1001  ptr->downlinkoffnum = i;
1002  ptr->parent = top;
1003 
1004  fifo = lappend(fifo, ptr);
1005  }
1006  }
1007 
1008  UnlockReleaseBuffer(buffer);
1009  }
1010 
1011  elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u",
1012  RelationGetRelationName(r), child);
1013  return NULL; /* keep compiler quiet */
1014 }
1015 
1016 /*
1017  * Updates the stack so that child->parent is the correct parent of the
1018  * child. child->parent must be exclusively locked on entry, and will
1019  * remain so at exit, but it might not be the same page anymore.
1020  */
1021 static void
1023 {
1024  GISTInsertStack *parent = child->parent;
1025 
1026  gistcheckpage(r, parent->buffer);
1027  parent->page = (Page) BufferGetPage(parent->buffer);
1028 
1029  /* here we don't need to distinguish between split and page update */
1030  if (child->downlinkoffnum == InvalidOffsetNumber ||
1031  parent->lsn != PageGetLSN(parent->page))
1032  {
1033  /* parent is changed, look child in right links until found */
1034  OffsetNumber i,
1035  maxoff;
1036  ItemId iid;
1037  IndexTuple idxtuple;
1038  GISTInsertStack *ptr;
1039 
1040  while (true)
1041  {
1042  maxoff = PageGetMaxOffsetNumber(parent->page);
1043  for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
1044  {
1045  iid = PageGetItemId(parent->page, i);
1046  idxtuple = (IndexTuple) PageGetItem(parent->page, iid);
1047  if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == child->blkno)
1048  {
1049  /* yes!!, found */
1050  child->downlinkoffnum = i;
1051  return;
1052  }
1053  }
1054 
1055  parent->blkno = GistPageGetOpaque(parent->page)->rightlink;
1056  UnlockReleaseBuffer(parent->buffer);
1057  if (parent->blkno == InvalidBlockNumber)
1058  {
1059  /*
1060  * End of chain and still didn't find parent. It's a very-very
1061  * rare situation when root splitted.
1062  */
1063  break;
1064  }
1065  parent->buffer = ReadBuffer(r, parent->blkno);
1066  LockBuffer(parent->buffer, GIST_EXCLUSIVE);
1067  gistcheckpage(r, parent->buffer);
1068  parent->page = (Page) BufferGetPage(parent->buffer);
1069  }
1070 
1071  /*
1072  * awful!!, we need search tree to find parent ... , but before we
1073  * should release all old parent
1074  */
1075 
1076  ptr = child->parent->parent; /* child->parent already released
1077  * above */
1078  while (ptr)
1079  {
1080  ReleaseBuffer(ptr->buffer);
1081  ptr = ptr->parent;
1082  }
1083 
1084  /* ok, find new path */
1085  ptr = parent = gistFindPath(r, child->blkno, &child->downlinkoffnum);
1086 
1087  /* read all buffers as expected by caller */
1088  /* note we don't lock them or gistcheckpage them here! */
1089  while (ptr)
1090  {
1091  ptr->buffer = ReadBuffer(r, ptr->blkno);
1092  ptr->page = (Page) BufferGetPage(ptr->buffer);
1093  ptr = ptr->parent;
1094  }
1095 
1096  /* install new chain of parents to stack */
1097  child->parent = parent;
1098 
1099  /* make recursive call to normal processing */
1101  gistFindCorrectParent(r, child);
1102  }
1103 }
1104 
1105 /*
1106  * Form a downlink pointer for the page in 'buf'.
1107  */
1108 static IndexTuple
1110  GISTInsertStack *stack)
1111 {
1112  Page page = BufferGetPage(buf);
1113  OffsetNumber maxoff;
1114  OffsetNumber offset;
1115  IndexTuple downlink = NULL;
1116 
1117  maxoff = PageGetMaxOffsetNumber(page);
1118  for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
1119  {
1120  IndexTuple ituple = (IndexTuple)
1121  PageGetItem(page, PageGetItemId(page, offset));
1122 
1123  if (downlink == NULL)
1124  downlink = CopyIndexTuple(ituple);
1125  else
1126  {
1127  IndexTuple newdownlink;
1128 
1129  newdownlink = gistgetadjusted(rel, downlink, ituple,
1130  giststate);
1131  if (newdownlink)
1132  downlink = newdownlink;
1133  }
1134  }
1135 
1136  /*
1137  * If the page is completely empty, we can't form a meaningful downlink
1138  * for it. But we have to insert a downlink for the page. Any key will do,
1139  * as long as its consistent with the downlink of parent page, so that we
1140  * can legally insert it to the parent. A minimal one that matches as few
1141  * scans as possible would be best, to keep scans from doing useless work,
1142  * but we don't know how to construct that. So we just use the downlink of
1143  * the original page that was split - that's as far from optimal as it can
1144  * get but will do..
1145  */
1146  if (!downlink)
1147  {
1148  ItemId iid;
1149 
1151  gistFindCorrectParent(rel, stack);
1152  iid = PageGetItemId(stack->parent->page, stack->downlinkoffnum);
1153  downlink = (IndexTuple) PageGetItem(stack->parent->page, iid);
1154  downlink = CopyIndexTuple(downlink);
1155  LockBuffer(stack->parent->buffer, GIST_UNLOCK);
1156  }
1157 
1159  GistTupleSetValid(downlink);
1160 
1161  return downlink;
1162 }
1163 
1164 
1165 /*
1166  * Complete the incomplete split of state->stack->page.
1167  */
1168 static void
1170 {
1171  GISTInsertStack *stack = state->stack;
1172  Buffer buf;
1173  Page page;
1174  List *splitinfo = NIL;
1175 
1176  ereport(LOG,
1177  (errmsg("fixing incomplete split in index \"%s\", block %u",
1178  RelationGetRelationName(state->r), stack->blkno)));
1179 
1180  Assert(GistFollowRight(stack->page));
1182 
1183  buf = stack->buffer;
1184 
1185  /*
1186  * Read the chain of split pages, following the rightlinks. Construct a
1187  * downlink tuple for each page.
1188  */
1189  for (;;)
1190  {
1192  IndexTuple downlink;
1193 
1194  page = BufferGetPage(buf);
1195 
1196  /* Form the new downlink tuples to insert to parent */
1197  downlink = gistformdownlink(state->r, buf, giststate, stack);
1198 
1199  si->buf = buf;
1200  si->downlink = downlink;
1201 
1202  splitinfo = lappend(splitinfo, si);
1203 
1204  if (GistFollowRight(page))
1205  {
1206  /* lock next page */
1207  buf = ReadBuffer(state->r, GistPageGetOpaque(page)->rightlink);
1209  }
1210  else
1211  break;
1212  }
1213 
1214  /* Insert the downlinks */
1215  gistfinishsplit(state, stack, giststate, splitinfo, false);
1216 }
1217 
1218 /*
1219  * Insert or replace a tuple in stack->buffer. If 'oldoffnum' is valid, the
1220  * tuple at 'oldoffnum' is replaced, otherwise the tuple is inserted as new.
1221  * 'stack' represents the path from the root to the page being updated.
1222  *
1223  * The caller must hold an exclusive lock on stack->buffer. The lock is still
1224  * held on return, but the page might not contain the inserted tuple if the
1225  * page was split. The function returns true if the page was split, false
1226  * otherwise.
1227  */
1228 static bool
1230  GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
1231 {
1232  return gistinserttuples(state, stack, giststate, &tuple, 1, oldoffnum,
1233  InvalidBuffer, InvalidBuffer, false, false);
1234 }
1235 
1236 /* ----------------
1237  * An extended workhorse version of gistinserttuple(). This version allows
1238  * inserting multiple tuples, or replacing a single tuple with multiple tuples.
1239  * This is used to recursively update the downlinks in the parent when a page
1240  * is split.
1241  *
1242  * If leftchild and rightchild are valid, we're inserting/replacing the
1243  * downlink for rightchild, and leftchild is its left sibling. We clear the
1244  * F_FOLLOW_RIGHT flag and update NSN on leftchild, atomically with the
1245  * insertion of the downlink.
1246  *
1247  * To avoid holding locks for longer than necessary, when recursing up the
1248  * tree to update the parents, the locking is a bit peculiar here. On entry,
1249  * the caller must hold an exclusive lock on stack->buffer, as well as
1250  * leftchild and rightchild if given. On return:
1251  *
1252  * - Lock on stack->buffer is released, if 'unlockbuf' is true. The page is
1253  * always kept pinned, however.
1254  * - Lock on 'leftchild' is released, if 'unlockleftchild' is true. The page
1255  * is kept pinned.
1256  * - Lock and pin on 'rightchild' are always released.
1257  *
1258  * Returns 'true' if the page had to be split. Note that if the page was
1259  * split, the inserted/updated tuples might've been inserted to a right
1260  * sibling of stack->buffer instead of stack->buffer itself.
1261  */
1262 static bool
1264  GISTSTATE *giststate,
1265  IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
1267  bool unlockbuf, bool unlockleftchild)
1268 {
1269  List *splitinfo;
1270  bool is_split;
1271 
1272  /*
1273  * Check for any rw conflicts (in serializable isolation level) just
1274  * before we intend to modify the page
1275  */
1277 
1278  /* Insert the tuple(s) to the page, splitting the page if necessary */
1279  is_split = gistplacetopage(state->r, state->freespace, giststate,
1280  stack->buffer,
1281  tuples, ntup,
1282  oldoffnum, NULL,
1283  leftchild,
1284  &splitinfo,
1285  true,
1286  state->heapRel,
1287  state->is_build);
1288 
1289  /*
1290  * Before recursing up in case the page was split, release locks on the
1291  * child pages. We don't need to keep them locked when updating the
1292  * parent.
1293  */
1296  if (BufferIsValid(leftchild) && unlockleftchild)
1298 
1299  /*
1300  * If we had to split, insert/update the downlinks in the parent. If the
1301  * caller requested us to release the lock on stack->buffer, tell
1302  * gistfinishsplit() to do that as soon as it's safe to do so. If we
1303  * didn't have to split, release it ourselves.
1304  */
1305  if (splitinfo)
1306  gistfinishsplit(state, stack, giststate, splitinfo, unlockbuf);
1307  else if (unlockbuf)
1308  LockBuffer(stack->buffer, GIST_UNLOCK);
1309 
1310  return is_split;
1311 }
1312 
1313 /*
1314  * Finish an incomplete split by inserting/updating the downlinks in parent
1315  * page. 'splitinfo' contains all the child pages involved in the split,
1316  * from left-to-right.
1317  *
1318  * On entry, the caller must hold a lock on stack->buffer and all the child
1319  * pages in 'splitinfo'. If 'unlockbuf' is true, the lock on stack->buffer is
1320  * released on return. The child pages are always unlocked and unpinned.
1321  */
1322 static void
1324  GISTSTATE *giststate, List *splitinfo, bool unlockbuf)
1325 {
1326  GISTPageSplitInfo *right;
1327  GISTPageSplitInfo *left;
1328  IndexTuple tuples[2];
1329 
1330  /* A split always contains at least two halves */
1331  Assert(list_length(splitinfo) >= 2);
1332 
1333  /*
1334  * We need to insert downlinks for each new page, and update the downlink
1335  * for the original (leftmost) page in the split. Begin at the rightmost
1336  * page, inserting one downlink at a time until there's only two pages
1337  * left. Finally insert the downlink for the last new page and update the
1338  * downlink for the original page as one operation.
1339  */
1341 
1342  /*
1343  * Insert downlinks for the siblings from right to left, until there are
1344  * only two siblings left.
1345  */
1346  for (int pos = list_length(splitinfo) - 1; pos > 1; pos--)
1347  {
1348  right = (GISTPageSplitInfo *) list_nth(splitinfo, pos);
1349  left = (GISTPageSplitInfo *) list_nth(splitinfo, pos - 1);
1350 
1351  gistFindCorrectParent(state->r, stack);
1352  if (gistinserttuples(state, stack->parent, giststate,
1353  &right->downlink, 1,
1355  left->buf, right->buf, false, false))
1356  {
1357  /*
1358  * If the parent page was split, the existing downlink might have
1359  * moved.
1360  */
1362  }
1363  /* gistinserttuples() released the lock on right->buf. */
1364  }
1365 
1366  right = (GISTPageSplitInfo *) lsecond(splitinfo);
1367  left = (GISTPageSplitInfo *) linitial(splitinfo);
1368 
1369  /*
1370  * Finally insert downlink for the remaining right page and update the
1371  * downlink for the original page to not contain the tuples that were
1372  * moved to the new pages.
1373  */
1374  tuples[0] = left->downlink;
1375  tuples[1] = right->downlink;
1376  gistFindCorrectParent(state->r, stack);
1377  if (gistinserttuples(state, stack->parent, giststate,
1378  tuples, 2,
1379  stack->downlinkoffnum,
1380  left->buf, right->buf,
1381  true, /* Unlock parent */
1382  unlockbuf /* Unlock stack->buffer if caller wants
1383  * that */
1384  ))
1385  {
1386  /*
1387  * If the parent page was split, the downlink might have moved.
1388  */
1390  }
1391 
1392  Assert(left->buf == stack->buffer);
1393 
1394  /*
1395  * If we split the page because we had to adjust the downlink on an
1396  * internal page, while descending the tree for inserting a new tuple,
1397  * then this might no longer be the correct page for the new tuple. The
1398  * downlink to this page might not cover the new tuple anymore, it might
1399  * need to go to the newly-created right sibling instead. Tell the caller
1400  * to walk back up the stack, to re-check at the parent which page to
1401  * insert to.
1402  *
1403  * Normally, the LSN-NSN interlock during the tree descend would also
1404  * detect that a concurrent split happened (by ourselves), and cause us to
1405  * retry at the parent. But that mechanism doesn't work during index
1406  * build, because we don't do WAL-logging, and don't update LSNs, during
1407  * index build.
1408  */
1409  stack->retry_from_parent = true;
1410 }
1411 
1412 /*
1413  * gistSplit -- split a page in the tree and fill struct
1414  * used for XLOG and real writes buffers. Function is recursive, ie
1415  * it will split page until keys will fit in every page.
1416  */
1419  Page page,
1420  IndexTuple *itup, /* contains compressed entry */
1421  int len,
1422  GISTSTATE *giststate)
1423 {
1424  IndexTuple *lvectup,
1425  *rvectup;
1426  GistSplitVector v;
1427  int i;
1428  SplitedPageLayout *res = NULL;
1429 
1430  /* this should never recurse very deeply, but better safe than sorry */
1432 
1433  /* there's no point in splitting an empty page */
1434  Assert(len > 0);
1435 
1436  /*
1437  * If a single tuple doesn't fit on a page, no amount of splitting will
1438  * help.
1439  */
1440  if (len == 1)
1441  ereport(ERROR,
1442  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1443  errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
1444  IndexTupleSize(itup[0]), GiSTPageSize,
1446 
1447  memset(v.spl_lisnull, true,
1448  sizeof(bool) * giststate->nonLeafTupdesc->natts);
1449  memset(v.spl_risnull, true,
1450  sizeof(bool) * giststate->nonLeafTupdesc->natts);
1451  gistSplitByKey(r, page, itup, len, giststate, &v, 0);
1452 
1453  /* form left and right vector */
1454  lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * (len + 1));
1455  rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * (len + 1));
1456 
1457  for (i = 0; i < v.splitVector.spl_nleft; i++)
1458  lvectup[i] = itup[v.splitVector.spl_left[i] - 1];
1459 
1460  for (i = 0; i < v.splitVector.spl_nright; i++)
1461  rvectup[i] = itup[v.splitVector.spl_right[i] - 1];
1462 
1463  /* finalize splitting (may need another split) */
1464  if (!gistfitpage(rvectup, v.splitVector.spl_nright))
1465  {
1466  res = gistSplit(r, page, rvectup, v.splitVector.spl_nright, giststate);
1467  }
1468  else
1469  {
1470  ROTATEDIST(res);
1471  res->block.num = v.splitVector.spl_nright;
1472  res->list = gistfillitupvec(rvectup, v.splitVector.spl_nright, &(res->lenlist));
1473  res->itup = gistFormTuple(giststate, r, v.spl_rattr, v.spl_risnull, false);
1474  }
1475 
1476  if (!gistfitpage(lvectup, v.splitVector.spl_nleft))
1477  {
1478  SplitedPageLayout *resptr,
1479  *subres;
1480 
1481  resptr = subres = gistSplit(r, page, lvectup, v.splitVector.spl_nleft, giststate);
1482 
1483  /* install on list's tail */
1484  while (resptr->next)
1485  resptr = resptr->next;
1486 
1487  resptr->next = res;
1488  res = subres;
1489  }
1490  else
1491  {
1492  ROTATEDIST(res);
1493  res->block.num = v.splitVector.spl_nleft;
1494  res->list = gistfillitupvec(lvectup, v.splitVector.spl_nleft, &(res->lenlist));
1495  res->itup = gistFormTuple(giststate, r, v.spl_lattr, v.spl_lisnull, false);
1496  }
1497 
1498  return res;
1499 }
1500 
1501 /*
1502  * Create a GISTSTATE and fill it with information about the index
1503  */
1504 GISTSTATE *
1506 {
1507  GISTSTATE *giststate;
1508  MemoryContext scanCxt;
1509  MemoryContext oldCxt;
1510  int i;
1511 
1512  /* safety check to protect fixed-size arrays in GISTSTATE */
1513  if (index->rd_att->natts > INDEX_MAX_KEYS)
1514  elog(ERROR, "numberOfAttributes %d > %d",
1515  index->rd_att->natts, INDEX_MAX_KEYS);
1516 
1517  /* Create the memory context that will hold the GISTSTATE */
1519  "GiST scan context",
1521  oldCxt = MemoryContextSwitchTo(scanCxt);
1522 
1523  /* Create and fill in the GISTSTATE */
1524  giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
1525 
1526  giststate->scanCxt = scanCxt;
1527  giststate->tempCxt = scanCxt; /* caller must change this if needed */
1528  giststate->leafTupdesc = index->rd_att;
1529 
1530  /*
1531  * The truncated tupdesc for non-leaf index tuples, which doesn't contain
1532  * the INCLUDE attributes.
1533  *
1534  * It is used to form tuples during tuple adjustment and page split.
1535  * B-tree creates shortened tuple descriptor for every truncated tuple,
1536  * because it is doing this less often: it does not have to form truncated
1537  * tuples during page split. Also, B-tree is not adjusting tuples on
1538  * internal pages the way GiST does.
1539  */
1540  giststate->nonLeafTupdesc = CreateTupleDescCopyConstr(index->rd_att);
1541  giststate->nonLeafTupdesc->natts =
1543 
1545  {
1546  fmgr_info_copy(&(giststate->consistentFn[i]),
1548  scanCxt);
1549  fmgr_info_copy(&(giststate->unionFn[i]),
1551  scanCxt);
1552 
1553  /* opclasses are not required to provide a Compress method */
1555  fmgr_info_copy(&(giststate->compressFn[i]),
1557  scanCxt);
1558  else
1559  giststate->compressFn[i].fn_oid = InvalidOid;
1560 
1561  /* opclasses are not required to provide a Decompress method */
1563  fmgr_info_copy(&(giststate->decompressFn[i]),
1565  scanCxt);
1566  else
1567  giststate->decompressFn[i].fn_oid = InvalidOid;
1568 
1569  fmgr_info_copy(&(giststate->penaltyFn[i]),
1571  scanCxt);
1572  fmgr_info_copy(&(giststate->picksplitFn[i]),
1574  scanCxt);
1575  fmgr_info_copy(&(giststate->equalFn[i]),
1577  scanCxt);
1578 
1579  /* opclasses are not required to provide a Distance method */
1581  fmgr_info_copy(&(giststate->distanceFn[i]),
1583  scanCxt);
1584  else
1585  giststate->distanceFn[i].fn_oid = InvalidOid;
1586 
1587  /* opclasses are not required to provide a Fetch method */
1589  fmgr_info_copy(&(giststate->fetchFn[i]),
1591  scanCxt);
1592  else
1593  giststate->fetchFn[i].fn_oid = InvalidOid;
1594 
1595  /*
1596  * If the index column has a specified collation, we should honor that
1597  * while doing comparisons. However, we may have a collatable storage
1598  * type for a noncollatable indexed data type. If there's no index
1599  * collation then specify default collation in case the support
1600  * functions need collation. This is harmless if the support
1601  * functions don't care about collation, so we just do it
1602  * unconditionally. (We could alternatively call get_typcollation,
1603  * but that seems like expensive overkill --- there aren't going to be
1604  * any cases where a GiST storage type has a nondefault collation.)
1605  */
1606  if (OidIsValid(index->rd_indcollation[i]))
1607  giststate->supportCollation[i] = index->rd_indcollation[i];
1608  else
1609  giststate->supportCollation[i] = DEFAULT_COLLATION_OID;
1610  }
1611 
1612  /* No opclass information for INCLUDE attributes */
1613  for (; i < index->rd_att->natts; i++)
1614  {
1615  giststate->consistentFn[i].fn_oid = InvalidOid;
1616  giststate->unionFn[i].fn_oid = InvalidOid;
1617  giststate->compressFn[i].fn_oid = InvalidOid;
1618  giststate->decompressFn[i].fn_oid = InvalidOid;
1619  giststate->penaltyFn[i].fn_oid = InvalidOid;
1620  giststate->picksplitFn[i].fn_oid = InvalidOid;
1621  giststate->equalFn[i].fn_oid = InvalidOid;
1622  giststate->distanceFn[i].fn_oid = InvalidOid;
1623  giststate->fetchFn[i].fn_oid = InvalidOid;
1624  giststate->supportCollation[i] = InvalidOid;
1625  }
1626 
1627  MemoryContextSwitchTo(oldCxt);
1628 
1629  return giststate;
1630 }
1631 
1632 void
1634 {
1635  /* It's sufficient to delete the scanCxt */
1636  MemoryContextDelete(giststate->scanCxt);
1637 }
1638 
1639 /*
1640  * gistprunepage() -- try to remove LP_DEAD items from the given page.
1641  * Function assumes that buffer is exclusively locked.
1642  */
1643 static void
1644 gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel)
1645 {
1647  int ndeletable = 0;
1648  OffsetNumber offnum,
1649  maxoff;
1650 
1651  Assert(GistPageIsLeaf(page));
1652 
1653  /*
1654  * Scan over all items to see which ones need to be deleted according to
1655  * LP_DEAD flags.
1656  */
1657  maxoff = PageGetMaxOffsetNumber(page);
1658  for (offnum = FirstOffsetNumber;
1659  offnum <= maxoff;
1660  offnum = OffsetNumberNext(offnum))
1661  {
1662  ItemId itemId = PageGetItemId(page, offnum);
1663 
1664  if (ItemIdIsDead(itemId))
1665  deletable[ndeletable++] = offnum;
1666  }
1667 
1668  if (ndeletable > 0)
1669  {
1670  TransactionId latestRemovedXid = InvalidTransactionId;
1671 
1673  latestRemovedXid =
1674  index_compute_xid_horizon_for_tuples(rel, heapRel, buffer,
1675  deletable, ndeletable);
1676 
1678 
1679  PageIndexMultiDelete(page, deletable, ndeletable);
1680 
1681  /*
1682  * Mark the page as not containing any LP_DEAD items. This is not
1683  * certainly true (there might be some that have recently been marked,
1684  * but weren't included in our target-item list), but it will almost
1685  * always be true and it doesn't seem worth an additional page scan to
1686  * check it. Remember that F_HAS_GARBAGE is only a hint anyway.
1687  */
1689 
1690  MarkBufferDirty(buffer);
1691 
1692  /* XLOG stuff */
1693  if (RelationNeedsWAL(rel))
1694  {
1695  XLogRecPtr recptr;
1696 
1697  recptr = gistXLogDelete(buffer,
1698  deletable, ndeletable,
1699  latestRemovedXid);
1700 
1701  PageSetLSN(page, recptr);
1702  }
1703  else
1704  PageSetLSN(page, gistGetFakeLSN(rel));
1705 
1706  END_CRIT_SECTION();
1707  }
1708 
1709  /*
1710  * Note: if we didn't find any LP_DEAD items, then the page's
1711  * F_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
1712  * separate write to clear it, however. We will clear it when we split
1713  * the page.
1714  */
1715 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static Datum values[MAXATTR]
Definition: bootstrap.c:156
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2755
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3915
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3004
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3938
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1573
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4156
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:749
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:702
#define P_NEW
Definition: bufmgr.h:91
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
@ RBM_NORMAL
Definition: bufmgr.h:39
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void PageRestoreTempPage(Page tempPage, Page oldPage)
Definition: bufpage.c:424
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1161
Page PageGetTempPageCopySpecial(Page page)
Definition: bufpage.c:402
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, Item newtup, Size newsize)
Definition: bufpage.c:1405
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition: bufpage.c:1052
Pointer Page
Definition: bufpage.h:78
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:356
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:234
#define PageGetItem(page, itemId)
Definition: bufpage.h:339
#define PageSetLSN(page, lsn)
Definition: bufpage.h:367
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:415
#define PageGetLSN(page)
Definition: bufpage.h:365
uint32 TransactionId
Definition: c.h:587
#define OidIsValid(objectId)
Definition: c.h:710
size_t Size
Definition: c.h:540
int errdetail(const char *fmt,...)
Definition: elog.c:1037
int errhint(const char *fmt,...)
Definition: elog.c:1151
int errcode(int sqlerrcode)
Definition: elog.c:693
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define LOG
Definition: elog.h:25
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo, MemoryContext destcxt)
Definition: fmgr.c:594
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:361
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
#define rightchild(x)
Definition: fsmpage.c:30
#define leftchild(x)
Definition: fsmpage.c:29
TransactionId index_compute_xid_horizon_for_tuples(Relation irel, Relation hrel, Buffer ibuf, OffsetNumber *itemnos, int nitems)
Definition: genam.c:293
IndexUniqueCheck
Definition: genam.h:115
void gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate, Relation heapRel, bool is_build)
Definition: gist.c:634
static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
Definition: gist.c:1169
static IndexTuple gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, GISTInsertStack *stack)
Definition: gist.c:1109
static void gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel)
Definition: gist.c:1644
bool gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, Buffer buffer, IndexTuple *itup, int ntup, OffsetNumber oldoffnum, BlockNumber *newblkno, Buffer leftchildbuf, List **splitinfo, bool markfollowright, Relation heapRel, bool is_build)
Definition: gist.c:224
bool gistinsert(Relation r, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
Definition: gist.c:158
SplitedPageLayout * gistSplit(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate)
Definition: gist.c:1418
GISTSTATE * initGISTstate(Relation index)
Definition: gist.c:1505
void gistbuildempty(Relation index)
Definition: gist.c:132
static GISTInsertStack * gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
Definition: gist.c:909
static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, IndexTuple *tuples, int ntup, OffsetNumber oldoffnum, Buffer leftchild, Buffer rightchild, bool unlockbuf, bool unlockleftchild)
Definition: gist.c:1263
MemoryContext createTempGistContext(void)
Definition: gist.c:121
void freeGISTstate(GISTSTATE *giststate)
Definition: gist.c:1633
static void gistFindCorrectParent(Relation r, GISTInsertStack *child)
Definition: gist.c:1022
static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
Definition: gist.c:1229
#define ROTATEDIST(d)
Definition: gist.c:46
static void gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, List *splitinfo, bool unlockbuf)
Definition: gist.c:1323
Datum gisthandler(PG_FUNCTION_ARGS)
Definition: gist.c:60
#define GIST_DECOMPRESS_PROC
Definition: gist.h:33
#define GIST_PICKSPLIT_PROC
Definition: gist.h:35
#define GistMarkFollowRight(page)
Definition: gist.h:181
#define F_LEAF
Definition: gist.h:46
#define GIST_CONSISTENT_PROC
Definition: gist.h:30
#define GistClearFollowRight(page)
Definition: gist.h:182
#define GIST_UNION_PROC
Definition: gist.h:31
#define GIST_FETCH_PROC
Definition: gist.h:38
#define GIST_COMPRESS_PROC
Definition: gist.h:32
#define GISTNProcs
Definition: gist.h:41
#define GistClearPageHasGarbage(page)
Definition: gist.h:178
#define GIST_PENALTY_PROC
Definition: gist.h:34
#define GistPageIsLeaf(page)
Definition: gist.h:167
#define GistFollowRight(page)
Definition: gist.h:180
#define GIST_OPTIONS_PROC
Definition: gist.h:39
#define GIST_DISTANCE_PROC
Definition: gist.h:37
#define GistPageSetNSN(page, val)
Definition: gist.h:185
#define GistPageIsDeleted(page)
Definition: gist.h:170
#define GistPageGetOpaque(page)
Definition: gist.h:165
#define GIST_EQUAL_PROC
Definition: gist.h:36
#define GistPageHasGarbage(page)
Definition: gist.h:176
#define GistPageGetNSN(page)
Definition: gist.h:184
XLogRecPtr GistNSN
Definition: gist.h:60
#define GistBuildLSN
Definition: gist.h:67
#define GIST_MAX_SPLIT_PAGES
Definition: gist_private.h:39
#define GistTupleSetValid(itup)
Definition: gist_private.h:289
#define GIST_UNLOCK
Definition: gist_private.h:44
#define GIST_ROOT_BLKNO
Definition: gist_private.h:262
#define GIST_EXCLUSIVE
Definition: gist_private.h:43
#define GiSTPageSize
Definition: gist_private.h:475
#define GistTupleIsInvalid(itup)
Definition: gist_private.h:288
#define GIST_SHARE
Definition: gist_private.h:42
IndexBuildResult * gistbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: gistbuild.c:182
bool gistgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: gistget.c:614
int64 gistgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: gistget.c:745
bool gistcanreturn(Relation index, int attno)
Definition: gistget.c:795
IndexScanDesc gistbeginscan(Relation r, int nkeys, int norderbys)
Definition: gistscan.c:74
void gistendscan(IndexScanDesc scan)
Definition: gistscan.c:349
void gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, ScanKey orderbys, int norderbys)
Definition: gistscan.c:127
void gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate, GistSplitVector *v, int attno)
Definition: gistsplit.c:623
IndexTuple gistFormTuple(GISTSTATE *giststate, Relation r, Datum *attdata, bool *isnull, bool isleaf)
Definition: gistutil.c:575
bool gistproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: gistutil.c:942
Buffer gistNewBuffer(Relation r)
Definition: gistutil.c:824
bool gistnospace(Page page, IndexTuple *itvec, int len, OffsetNumber todelete, Size freespace)
Definition: gistutil.c:59
void gistfillbuffer(Page page, IndexTuple *itup, int len, OffsetNumber off)
Definition: gistutil.c:34
IndexTuple * gistextractpage(Page page, int *len)
Definition: gistutil.c:95
bool gistfitpage(IndexTuple *itvec, int len)
Definition: gistutil.c:79
IndexTuple gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
Definition: gistutil.c:316
OffsetNumber gistchoose(Relation r, Page p, IndexTuple it, GISTSTATE *giststate)
Definition: gistutil.c:374
XLogRecPtr gistGetFakeLSN(Relation rel)
Definition: gistutil.c:1025
IndexTuple * gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
Definition: gistutil.c:114
bytea * gistoptions(Datum reloptions, bool validate)
Definition: gistutil.c:921
void GISTInitBuffer(Buffer b, uint32 f)
Definition: gistutil.c:773
IndexTupleData * gistfillitupvec(IndexTuple *vec, int veclen, int *memlen)
Definition: gistutil.c:127
void gistcheckpage(Relation rel, Buffer buf)
Definition: gistutil.c:785
IndexBulkDeleteResult * gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: gistvacuum.c:59
IndexBulkDeleteResult * gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: gistvacuum.c:75
void gistadjustmembers(Oid opfamilyoid, Oid opclassoid, List *operators, List *functions)
Definition: gistvalidate.c:291
bool gistvalidate(Oid opclassoid)
Definition: gistvalidate.c:34
XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, TransactionId latestRemovedXid)
Definition: gistxlog.c:673
XLogRecPtr gistXLogUpdate(Buffer buffer, OffsetNumber *todelete, int ntodelete, IndexTuple *itup, int ituplen, Buffer leftchildbuf)
Definition: gistxlog.c:632
XLogRecPtr gistXLogSplit(bool page_is_leaf, SplitedPageLayout *dist, BlockNumber origrlink, GistNSN orignsn, Buffer leftchildbuf, bool markfollowright)
Definition: gistxlog.c:500
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:803
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:769
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:528
int i
Definition: isn.c:73
Pointer Item
Definition: item.h:17
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define ItemPointerSetBlockNumber(pointer, blockNumber)
Definition: itemptr.h:138
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
#define MaxIndexTuplesPerPage
Definition: itup.h:144
Assert(fmt[strlen(fmt) - 1] !='\n')
List * lappend(List *list, void *datum)
Definition: list.c:336
List * list_delete_first(List *list)
Definition: list.c:902
List * lcons(void *datum, List *list)
Definition: list.c:474
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:143
void * palloc0(Size size)
Definition: mcxt.c:1099
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:218
void * palloc(Size size)
Definition: mcxt.c:1068
#define AllocSetContextCreate
Definition: memutils.h:173
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:197
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
#define makeNode(_type_)
Definition: nodes.h:621
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define INDEX_MAX_KEYS
const void size_t len
const void * data
static int list_length(const List *l)
Definition: pg_list.h:149
#define NIL
Definition: pg_list.h:65
#define list_make1(x1)
Definition: pg_list.h:206
#define linitial(l)
Definition: pg_list.h:174
#define lsecond(l)
Definition: pg_list.h:179
static void * list_nth(const List *list, int n)
Definition: pg_list.h:278
static char * buf
Definition: pg_test_fsync.c:67
void check_stack_depth(void)
Definition: postgres.c:3500
uintptr_t Datum
Definition: postgres.h:411
#define InvalidOid
Definition: postgres_ext.h:36
void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3169
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4448
#define RelationGetRelationName(relation)
Definition: rel.h:523
#define RelationNeedsWAL(relation)
Definition: rel.h:613
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:508
@ INIT_FORKNUM
Definition: relpath.h:46
void gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: selfuncs.c:6989
Oid fn_oid
Definition: fmgr.h:59
BlockNumber blkno
Definition: gist_private.h:210
OffsetNumber downlinkoffnum
Definition: gist_private.h:228
struct GISTInsertStack * parent
Definition: gist_private.h:231
IndexTuple downlink
Definition: gist_private.h:422
FmgrInfo fetchFn[INDEX_MAX_KEYS]
Definition: gist_private.h:94
TupleDesc leafTupdesc
Definition: gist_private.h:80
TupleDesc nonLeafTupdesc
Definition: gist_private.h:81
FmgrInfo penaltyFn[INDEX_MAX_KEYS]
Definition: gist_private.h:90
MemoryContext tempCxt
Definition: gist_private.h:78
Oid supportCollation[INDEX_MAX_KEYS]
Definition: gist_private.h:97
FmgrInfo distanceFn[INDEX_MAX_KEYS]
Definition: gist_private.h:93
FmgrInfo consistentFn[INDEX_MAX_KEYS]
Definition: gist_private.h:86
MemoryContext scanCxt
Definition: gist_private.h:77
FmgrInfo decompressFn[INDEX_MAX_KEYS]
Definition: gist_private.h:89
FmgrInfo compressFn[INDEX_MAX_KEYS]
Definition: gist_private.h:88
FmgrInfo equalFn[INDEX_MAX_KEYS]
Definition: gist_private.h:92
FmgrInfo unionFn[INDEX_MAX_KEYS]
Definition: gist_private.h:87
FmgrInfo picksplitFn[INDEX_MAX_KEYS]
Definition: gist_private.h:91
int spl_nleft
Definition: gist.h:141
OffsetNumber * spl_right
Definition: gist.h:145
int spl_nright
Definition: gist.h:146
OffsetNumber * spl_left
Definition: gist.h:140
GIST_SPLITVEC splitVector
Definition: gist_private.h:237
Datum spl_lattr[INDEX_MAX_KEYS]
Definition: gist_private.h:239
bool spl_lisnull[INDEX_MAX_KEYS]
Definition: gist_private.h:241
Datum spl_rattr[INDEX_MAX_KEYS]
Definition: gist_private.h:243
bool spl_risnull[INDEX_MAX_KEYS]
Definition: gist_private.h:245
ambuildphasename_function ambuildphasename
Definition: amapi.h:270
ambuildempty_function ambuildempty
Definition: amapi.h:262
amvacuumcleanup_function amvacuumcleanup
Definition: amapi.h:265
bool amclusterable
Definition: amapi.h:238
amoptions_function amoptions
Definition: amapi.h:268
amestimateparallelscan_function amestimateparallelscan
Definition: amapi.h:282
amrestrpos_function amrestrpos
Definition: amapi.h:279
aminsert_function aminsert
Definition: amapi.h:263
amendscan_function amendscan
Definition: amapi.h:277
uint16 amoptsprocnum
Definition: amapi.h:218
amparallelrescan_function amparallelrescan
Definition: amapi.h:284
Oid amkeytype
Definition: amapi.h:252
bool ampredlocks
Definition: amapi.h:240
uint16 amsupport
Definition: amapi.h:216
amcostestimate_function amcostestimate
Definition: amapi.h:267
bool amcanorderbyop
Definition: amapi.h:222
amadjustmembers_function amadjustmembers
Definition: amapi.h:272
ambuild_function ambuild
Definition: amapi.h:261
bool amstorage
Definition: amapi.h:236
uint16 amstrategies
Definition: amapi.h:214
bool amoptionalkey
Definition: amapi.h:230
amgettuple_function amgettuple
Definition: amapi.h:275
amcanreturn_function amcanreturn
Definition: amapi.h:266
bool amcanunique
Definition: amapi.h:226
amgetbitmap_function amgetbitmap
Definition: amapi.h:276
amproperty_function amproperty
Definition: amapi.h:269
ambulkdelete_function ambulkdelete
Definition: amapi.h:264
bool amsearcharray
Definition: amapi.h:232
amvalidate_function amvalidate
Definition: amapi.h:271
ammarkpos_function ammarkpos
Definition: amapi.h:278
bool amcanmulticol
Definition: amapi.h:228
bool amusemaintenanceworkmem
Definition: amapi.h:246
ambeginscan_function ambeginscan
Definition: amapi.h:273
bool amcanparallel
Definition: amapi.h:242
amrescan_function amrescan
Definition: amapi.h:274
bool amcanorder
Definition: amapi.h:220
bool amhotblocking
Definition: amapi.h:248
aminitparallelscan_function aminitparallelscan
Definition: amapi.h:283
uint8 amparallelvacuumoptions
Definition: amapi.h:250
bool amcanbackward
Definition: amapi.h:224
bool amcaninclude
Definition: amapi.h:244
bool amsearchnulls
Definition: amapi.h:234
void * ii_AmCache
Definition: execnodes.h:184
MemoryContext ii_Context
Definition: execnodes.h:185
ItemPointerData t_tid
Definition: itup.h:37
Definition: pg_list.h:51
gistxlogPage block
Definition: gist_private.h:193
IndexTupleData * list
Definition: gist_private.h:194
struct SplitedPageLayout * next
Definition: gist_private.h:200
BlockNumber blkno
Definition: gist_private.h:186
Definition: type.h:90
Definition: regguts.h:318
#define InvalidTransactionId
Definition: transam.h:31
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:151
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:47
#define VACUUM_OPTION_PARALLEL_COND_CLEANUP
Definition: vacuum.h:54
#define XLogStandbyInfoActive()
Definition: xlog.h:118
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1177
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition: xloginsert.c:176