PostgreSQL Source Code  git master
gist.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * gist.c
4  * interface routines for the postgres GiST index access method.
5  *
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * IDENTIFICATION
11  * src/backend/access/gist/gist.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/gist_private.h"
18 #include "access/gistscan.h"
19 #include "catalog/pg_collation.h"
20 #include "commands/vacuum.h"
21 #include "miscadmin.h"
22 #include "nodes/execnodes.h"
23 #include "storage/lmgr.h"
24 #include "storage/predicate.h"
25 #include "utils/builtins.h"
26 #include "utils/index_selfuncs.h"
27 #include "utils/memutils.h"
28 #include "utils/rel.h"
29 
30 /* non-export function prototypes */
31 static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate);
33  GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
35  GISTSTATE *giststate,
36  IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
38  bool unlockbuf, bool unlockleftchild);
40  GISTSTATE *giststate, List *splitinfo, bool unlockbuf);
41 static void gistprunepage(Relation rel, Page page, Buffer buffer,
42  Relation heapRel);
43 
44 
45 #define ROTATEDIST(d) do { \
46  SplitedPageLayout *tmp=(SplitedPageLayout*)palloc0(sizeof(SplitedPageLayout)); \
47  tmp->block.blkno = InvalidBlockNumber; \
48  tmp->buffer = InvalidBuffer; \
49  tmp->next = (d); \
50  (d)=tmp; \
51 } while(0)
52 
53 
54 /*
55  * GiST handler function: return IndexAmRoutine with access method parameters
56  * and callbacks.
57  */
58 Datum
60 {
62 
63  amroutine->amstrategies = 0;
64  amroutine->amsupport = GISTNProcs;
65  amroutine->amcanorder = false;
66  amroutine->amcanorderbyop = true;
67  amroutine->amcanbackward = false;
68  amroutine->amcanunique = false;
69  amroutine->amcanmulticol = true;
70  amroutine->amoptionalkey = true;
71  amroutine->amsearcharray = false;
72  amroutine->amsearchnulls = true;
73  amroutine->amstorage = true;
74  amroutine->amclusterable = true;
75  amroutine->ampredlocks = true;
76  amroutine->amcanparallel = false;
77  amroutine->amcaninclude = true;
78  amroutine->amusemaintenanceworkmem = false;
79  amroutine->amparallelvacuumoptions =
81  amroutine->amkeytype = InvalidOid;
82 
83  amroutine->ambuild = gistbuild;
84  amroutine->ambuildempty = gistbuildempty;
85  amroutine->aminsert = gistinsert;
86  amroutine->ambulkdelete = gistbulkdelete;
88  amroutine->amcanreturn = gistcanreturn;
89  amroutine->amcostestimate = gistcostestimate;
90  amroutine->amoptions = gistoptions;
91  amroutine->amproperty = gistproperty;
92  amroutine->ambuildphasename = NULL;
93  amroutine->amvalidate = gistvalidate;
94  amroutine->ambeginscan = gistbeginscan;
95  amroutine->amrescan = gistrescan;
96  amroutine->amgettuple = gistgettuple;
97  amroutine->amgetbitmap = gistgetbitmap;
98  amroutine->amendscan = gistendscan;
99  amroutine->ammarkpos = NULL;
100  amroutine->amrestrpos = NULL;
101  amroutine->amestimateparallelscan = NULL;
102  amroutine->aminitparallelscan = NULL;
103  amroutine->amparallelrescan = NULL;
104 
105  PG_RETURN_POINTER(amroutine);
106 }
107 
108 /*
109  * Create and return a temporary memory context for use by GiST. We
110  * _always_ invoke user-provided methods in a temporary memory
111  * context, so that memory leaks in those functions cannot cause
112  * problems. Also, we use some additional temporary contexts in the
113  * GiST code itself, to avoid the need to do some awkward manual
114  * memory management.
115  */
118 {
120  "GiST temporary context",
122 }
123 
124 /*
125  * gistbuildempty() -- build an empty gist index in the initialization fork
126  */
127 void
129 {
130  Buffer buffer;
131 
132  /* Initialize the root page */
133  buffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL);
135 
136  /* Initialize and xlog buffer */
138  GISTInitBuffer(buffer, F_LEAF);
139  MarkBufferDirty(buffer);
140  log_newpage_buffer(buffer, true);
142 
143  /* Unlock and release the buffer */
144  UnlockReleaseBuffer(buffer);
145 }
146 
147 /*
148  * gistinsert -- wrapper for GiST tuple insertion.
149  *
150  * This is the public interface routine for tuple insertion in GiSTs.
151  * It doesn't do any work; just locks the relation and passes the buck.
152  */
153 bool
154 gistinsert(Relation r, Datum *values, bool *isnull,
155  ItemPointer ht_ctid, Relation heapRel,
156  IndexUniqueCheck checkUnique,
157  IndexInfo *indexInfo)
158 {
159  GISTSTATE *giststate = (GISTSTATE *) indexInfo->ii_AmCache;
160  IndexTuple itup;
161  MemoryContext oldCxt;
162 
163  /* Initialize GISTSTATE cache if first call in this statement */
164  if (giststate == NULL)
165  {
166  oldCxt = MemoryContextSwitchTo(indexInfo->ii_Context);
167  giststate = initGISTstate(r);
168  giststate->tempCxt = createTempGistContext();
169  indexInfo->ii_AmCache = (void *) giststate;
170  MemoryContextSwitchTo(oldCxt);
171  }
172 
173  oldCxt = MemoryContextSwitchTo(giststate->tempCxt);
174 
175  itup = gistFormTuple(giststate, r,
176  values, isnull, true /* size is currently bogus */ );
177  itup->t_tid = *ht_ctid;
178 
179  gistdoinsert(r, itup, 0, giststate, heapRel, false);
180 
181  /* cleanup */
182  MemoryContextSwitchTo(oldCxt);
183  MemoryContextReset(giststate->tempCxt);
184 
185  return false;
186 }
187 
188 
189 /*
190  * Place tuples from 'itup' to 'buffer'. If 'oldoffnum' is valid, the tuple
191  * at that offset is atomically removed along with inserting the new tuples.
192  * This is used to replace a tuple with a new one.
193  *
194  * If 'leftchildbuf' is valid, we're inserting the downlink for the page
195  * to the right of 'leftchildbuf', or updating the downlink for 'leftchildbuf'.
196  * F_FOLLOW_RIGHT flag on 'leftchildbuf' is cleared and NSN is set.
197  *
198  * If 'markfollowright' is true and the page is split, the left child is
199  * marked with F_FOLLOW_RIGHT flag. That is the normal case. During buffered
200  * index build, however, there is no concurrent access and the page splitting
201  * is done in a slightly simpler fashion, and false is passed.
202  *
203  * If there is not enough room on the page, it is split. All the split
204  * pages are kept pinned and locked and returned in *splitinfo, the caller
205  * is responsible for inserting the downlinks for them. However, if
206  * 'buffer' is the root page and it needs to be split, gistplacetopage()
207  * performs the split as one atomic operation, and *splitinfo is set to NIL.
208  * In that case, we continue to hold the root page locked, and the child
209  * pages are released; note that new tuple(s) are *not* on the root page
210  * but in one of the new child pages.
211  *
212  * If 'newblkno' is not NULL, returns the block number of page the first
213  * new/updated tuple was inserted to. Usually it's the given page, but could
214  * be its right sibling if the page was split.
215  *
216  * Returns 'true' if the page was split, 'false' otherwise.
217  */
218 bool
219 gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
220  Buffer buffer,
221  IndexTuple *itup, int ntup, OffsetNumber oldoffnum,
222  BlockNumber *newblkno,
223  Buffer leftchildbuf,
224  List **splitinfo,
225  bool markfollowright,
226  Relation heapRel,
227  bool is_build)
228 {
230  Page page = BufferGetPage(buffer);
231  bool is_leaf = (GistPageIsLeaf(page)) ? true : false;
232  XLogRecPtr recptr;
233  int i;
234  bool is_split;
235 
236  /*
237  * Refuse to modify a page that's incompletely split. This should not
238  * happen because we finish any incomplete splits while we walk down the
239  * tree. However, it's remotely possible that another concurrent inserter
240  * splits a parent page, and errors out before completing the split. We
241  * will just throw an error in that case, and leave any split we had in
242  * progress unfinished too. The next insert that comes along will clean up
243  * the mess.
244  */
245  if (GistFollowRight(page))
246  elog(ERROR, "concurrent GiST page split was incomplete");
247 
248  *splitinfo = NIL;
249 
250  /*
251  * if isupdate, remove old key: This node's key has been modified, either
252  * because a child split occurred or because we needed to adjust our key
253  * for an insert in a child node. Therefore, remove the old version of
254  * this node's key.
255  *
256  * for WAL replay, in the non-split case we handle this by setting up a
257  * one-element todelete array; in the split case, it's handled implicitly
258  * because the tuple vector passed to gistSplit won't include this tuple.
259  */
260  is_split = gistnospace(page, itup, ntup, oldoffnum, freespace);
261 
262  /*
263  * If leaf page is full, try at first to delete dead tuples. And then
264  * check again.
265  */
266  if (is_split && GistPageIsLeaf(page) && GistPageHasGarbage(page))
267  {
268  gistprunepage(rel, page, buffer, heapRel);
269  is_split = gistnospace(page, itup, ntup, oldoffnum, freespace);
270  }
271 
272  if (is_split)
273  {
274  /* no space for insertion */
275  IndexTuple *itvec;
276  int tlen;
277  SplitedPageLayout *dist = NULL,
278  *ptr;
279  BlockNumber oldrlink = InvalidBlockNumber;
280  GistNSN oldnsn = 0;
281  SplitedPageLayout rootpg;
282  bool is_rootsplit;
283  int npage;
284 
285  is_rootsplit = (blkno == GIST_ROOT_BLKNO);
286 
287  /*
288  * Form index tuples vector to split. If we're replacing an old tuple,
289  * remove the old version from the vector.
290  */
291  itvec = gistextractpage(page, &tlen);
292  if (OffsetNumberIsValid(oldoffnum))
293  {
294  /* on inner page we should remove old tuple */
295  int pos = oldoffnum - FirstOffsetNumber;
296 
297  tlen--;
298  if (pos != tlen)
299  memmove(itvec + pos, itvec + pos + 1, sizeof(IndexTuple) * (tlen - pos));
300  }
301  itvec = gistjoinvector(itvec, &tlen, itup, ntup);
302  dist = gistSplit(rel, page, itvec, tlen, giststate);
303 
304  /*
305  * Check that split didn't produce too many pages.
306  */
307  npage = 0;
308  for (ptr = dist; ptr; ptr = ptr->next)
309  npage++;
310  /* in a root split, we'll add one more page to the list below */
311  if (is_rootsplit)
312  npage++;
313  if (npage > GIST_MAX_SPLIT_PAGES)
314  elog(ERROR, "GiST page split into too many halves (%d, maximum %d)",
315  npage, GIST_MAX_SPLIT_PAGES);
316 
317  /*
318  * Set up pages to work with. Allocate new buffers for all but the
319  * leftmost page. The original page becomes the new leftmost page, and
320  * is just replaced with the new contents.
321  *
322  * For a root-split, allocate new buffers for all child pages, the
323  * original page is overwritten with new root page containing
324  * downlinks to the new child pages.
325  */
326  ptr = dist;
327  if (!is_rootsplit)
328  {
329  /* save old rightlink and NSN */
330  oldrlink = GistPageGetOpaque(page)->rightlink;
331  oldnsn = GistPageGetNSN(page);
332 
333  dist->buffer = buffer;
334  dist->block.blkno = BufferGetBlockNumber(buffer);
336 
337  /* clean all flags except F_LEAF */
338  GistPageGetOpaque(dist->page)->flags = (is_leaf) ? F_LEAF : 0;
339 
340  ptr = ptr->next;
341  }
342  for (; ptr; ptr = ptr->next)
343  {
344  /* Allocate new page */
345  ptr->buffer = gistNewBuffer(rel);
346  GISTInitBuffer(ptr->buffer, (is_leaf) ? F_LEAF : 0);
347  ptr->page = BufferGetPage(ptr->buffer);
348  ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
350  BufferGetBlockNumber(buffer),
351  BufferGetBlockNumber(ptr->buffer));
352  }
353 
354  /*
355  * Now that we know which blocks the new pages go to, set up downlink
356  * tuples to point to them.
357  */
358  for (ptr = dist; ptr; ptr = ptr->next)
359  {
360  ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno);
361  GistTupleSetValid(ptr->itup);
362  }
363 
364  /*
365  * If this is a root split, we construct the new root page with the
366  * downlinks here directly, instead of requiring the caller to insert
367  * them. Add the new root page to the list along with the child pages.
368  */
369  if (is_rootsplit)
370  {
371  IndexTuple *downlinks;
372  int ndownlinks = 0;
373  int i;
374 
375  rootpg.buffer = buffer;
377  GistPageGetOpaque(rootpg.page)->flags = 0;
378 
379  /* Prepare a vector of all the downlinks */
380  for (ptr = dist; ptr; ptr = ptr->next)
381  ndownlinks++;
382  downlinks = palloc(sizeof(IndexTuple) * ndownlinks);
383  for (i = 0, ptr = dist; ptr; ptr = ptr->next)
384  downlinks[i++] = ptr->itup;
385 
386  rootpg.block.blkno = GIST_ROOT_BLKNO;
387  rootpg.block.num = ndownlinks;
388  rootpg.list = gistfillitupvec(downlinks, ndownlinks,
389  &(rootpg.lenlist));
390  rootpg.itup = NULL;
391 
392  rootpg.next = dist;
393  dist = &rootpg;
394  }
395  else
396  {
397  /* Prepare split-info to be returned to caller */
398  for (ptr = dist; ptr; ptr = ptr->next)
399  {
401 
402  si->buf = ptr->buffer;
403  si->downlink = ptr->itup;
404  *splitinfo = lappend(*splitinfo, si);
405  }
406  }
407 
408  /*
409  * Fill all pages. All the pages are new, ie. freshly allocated empty
410  * pages, or a temporary copy of the old page.
411  */
412  for (ptr = dist; ptr; ptr = ptr->next)
413  {
414  char *data = (char *) (ptr->list);
415 
416  for (i = 0; i < ptr->block.num; i++)
417  {
418  IndexTuple thistup = (IndexTuple) data;
419 
420  if (PageAddItem(ptr->page, (Item) data, IndexTupleSize(thistup), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber)
421  elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(rel));
422 
423  /*
424  * If this is the first inserted/updated tuple, let the caller
425  * know which page it landed on.
426  */
427  if (newblkno && ItemPointerEquals(&thistup->t_tid, &(*itup)->t_tid))
428  *newblkno = ptr->block.blkno;
429 
430  data += IndexTupleSize(thistup);
431  }
432 
433  /* Set up rightlinks */
434  if (ptr->next && ptr->block.blkno != GIST_ROOT_BLKNO)
435  GistPageGetOpaque(ptr->page)->rightlink =
436  ptr->next->block.blkno;
437  else
438  GistPageGetOpaque(ptr->page)->rightlink = oldrlink;
439 
440  /*
441  * Mark the all but the right-most page with the follow-right
442  * flag. It will be cleared as soon as the downlink is inserted
443  * into the parent, but this ensures that if we error out before
444  * that, the index is still consistent. (in buffering build mode,
445  * any error will abort the index build anyway, so this is not
446  * needed.)
447  */
448  if (ptr->next && !is_rootsplit && markfollowright)
449  GistMarkFollowRight(ptr->page);
450  else
451  GistClearFollowRight(ptr->page);
452 
453  /*
454  * Copy the NSN of the original page to all pages. The
455  * F_FOLLOW_RIGHT flags ensure that scans will follow the
456  * rightlinks until the downlinks are inserted.
457  */
458  GistPageSetNSN(ptr->page, oldnsn);
459  }
460 
461  /*
462  * gistXLogSplit() needs to WAL log a lot of pages, prepare WAL
463  * insertion for that. NB: The number of pages and data segments
464  * specified here must match the calculations in gistXLogSplit()!
465  */
466  if (!is_build && RelationNeedsWAL(rel))
467  XLogEnsureRecordSpace(npage, 1 + npage * 2);
468 
470 
471  /*
472  * Must mark buffers dirty before XLogInsert, even though we'll still
473  * be changing their opaque fields below.
474  */
475  for (ptr = dist; ptr; ptr = ptr->next)
476  MarkBufferDirty(ptr->buffer);
477  if (BufferIsValid(leftchildbuf))
478  MarkBufferDirty(leftchildbuf);
479 
480  /*
481  * The first page in the chain was a temporary working copy meant to
482  * replace the old page. Copy it over the old page.
483  */
485  dist->page = BufferGetPage(dist->buffer);
486 
487  /*
488  * Write the WAL record.
489  *
490  * If we're building a new index, however, we don't WAL-log changes
491  * yet. The LSN-NSN interlock between parent and child requires that
492  * LSNs never move backwards, so set the LSNs to a value that's
493  * smaller than any real or fake unlogged LSN that might be generated
494  * later. (There can't be any concurrent scans during index build, so
495  * we don't need to be able to detect concurrent splits yet.)
496  */
497  if (is_build)
498  recptr = GistBuildLSN;
499  else
500  {
501  if (RelationNeedsWAL(rel))
502  recptr = gistXLogSplit(is_leaf,
503  dist, oldrlink, oldnsn, leftchildbuf,
504  markfollowright);
505  else
506  recptr = gistGetFakeLSN(rel);
507  }
508 
509  for (ptr = dist; ptr; ptr = ptr->next)
510  PageSetLSN(ptr->page, recptr);
511 
512  /*
513  * Return the new child buffers to the caller.
514  *
515  * If this was a root split, we've already inserted the downlink
516  * pointers, in the form of a new root page. Therefore we can release
517  * all the new buffers, and keep just the root page locked.
518  */
519  if (is_rootsplit)
520  {
521  for (ptr = dist->next; ptr; ptr = ptr->next)
522  UnlockReleaseBuffer(ptr->buffer);
523  }
524  }
525  else
526  {
527  /*
528  * Enough space. We always get here if ntup==0.
529  */
531 
532  /*
533  * Delete old tuple if any, then insert new tuple(s) if any. If
534  * possible, use the fast path of PageIndexTupleOverwrite.
535  */
536  if (OffsetNumberIsValid(oldoffnum))
537  {
538  if (ntup == 1)
539  {
540  /* One-for-one replacement, so use PageIndexTupleOverwrite */
541  if (!PageIndexTupleOverwrite(page, oldoffnum, (Item) *itup,
542  IndexTupleSize(*itup)))
543  elog(ERROR, "failed to add item to index page in \"%s\"",
545  }
546  else
547  {
548  /* Delete old, then append new tuple(s) to page */
549  PageIndexTupleDelete(page, oldoffnum);
550  gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);
551  }
552  }
553  else
554  {
555  /* Just append new tuples at the end of the page */
556  gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);
557  }
558 
559  MarkBufferDirty(buffer);
560 
561  if (BufferIsValid(leftchildbuf))
562  MarkBufferDirty(leftchildbuf);
563 
564  if (is_build)
565  recptr = GistBuildLSN;
566  else
567  {
568  if (RelationNeedsWAL(rel))
569  {
570  OffsetNumber ndeloffs = 0,
571  deloffs[1];
572 
573  if (OffsetNumberIsValid(oldoffnum))
574  {
575  deloffs[0] = oldoffnum;
576  ndeloffs = 1;
577  }
578 
579  recptr = gistXLogUpdate(buffer,
580  deloffs, ndeloffs, itup, ntup,
581  leftchildbuf);
582  }
583  else
584  recptr = gistGetFakeLSN(rel);
585  }
586  PageSetLSN(page, recptr);
587 
588  if (newblkno)
589  *newblkno = blkno;
590  }
591 
592  /*
593  * If we inserted the downlink for a child page, set NSN and clear
594  * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to
595  * follow the rightlink if and only if they looked at the parent page
596  * before we inserted the downlink.
597  *
598  * Note that we do this *after* writing the WAL record. That means that
599  * the possible full page image in the WAL record does not include these
600  * changes, and they must be replayed even if the page is restored from
601  * the full page image. There's a chicken-and-egg problem: if we updated
602  * the child pages first, we wouldn't know the recptr of the WAL record
603  * we're about to write.
604  */
605  if (BufferIsValid(leftchildbuf))
606  {
607  Page leftpg = BufferGetPage(leftchildbuf);
608 
609  GistPageSetNSN(leftpg, recptr);
610  GistClearFollowRight(leftpg);
611 
612  PageSetLSN(leftpg, recptr);
613  }
614 
616 
617  return is_split;
618 }
619 
620 /*
621  * Workhouse routine for doing insertion into a GiST index. Note that
622  * this routine assumes it is invoked in a short-lived memory context,
623  * so it does not bother releasing palloc'd allocations.
624  */
625 void
627  GISTSTATE *giststate, Relation heapRel, bool is_build)
628 {
629  ItemId iid;
630  IndexTuple idxtuple;
631  GISTInsertStack firststack;
632  GISTInsertStack *stack;
634  bool xlocked = false;
635 
636  memset(&state, 0, sizeof(GISTInsertState));
637  state.freespace = freespace;
638  state.r = r;
639  state.heapRel = heapRel;
640  state.is_build = is_build;
641 
642  /* Start from the root */
643  firststack.blkno = GIST_ROOT_BLKNO;
644  firststack.lsn = 0;
645  firststack.retry_from_parent = false;
646  firststack.parent = NULL;
647  firststack.downlinkoffnum = InvalidOffsetNumber;
648  state.stack = stack = &firststack;
649 
650  /*
651  * Walk down along the path of smallest penalty, updating the parent
652  * pointers with the key we're inserting as we go. If we crash in the
653  * middle, the tree is consistent, although the possible parent updates
654  * were a waste.
655  */
656  for (;;)
657  {
658  /*
659  * If we split an internal page while descending the tree, we have to
660  * retry at the parent. (Normally, the LSN-NSN interlock below would
661  * also catch this and cause us to retry. But LSNs are not updated
662  * during index build.)
663  */
664  while (stack->retry_from_parent)
665  {
666  if (xlocked)
667  LockBuffer(stack->buffer, GIST_UNLOCK);
668  xlocked = false;
669  ReleaseBuffer(stack->buffer);
670  state.stack = stack = stack->parent;
671  }
672 
673  if (XLogRecPtrIsInvalid(stack->lsn))
674  stack->buffer = ReadBuffer(state.r, stack->blkno);
675 
676  /*
677  * Be optimistic and grab shared lock first. Swap it for an exclusive
678  * lock later if we need to update the page.
679  */
680  if (!xlocked)
681  {
682  LockBuffer(stack->buffer, GIST_SHARE);
683  gistcheckpage(state.r, stack->buffer);
684  }
685 
686  stack->page = (Page) BufferGetPage(stack->buffer);
687  stack->lsn = xlocked ?
688  PageGetLSN(stack->page) : BufferGetLSNAtomic(stack->buffer);
689  Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn));
690 
691  /*
692  * If this page was split but the downlink was never inserted to the
693  * parent because the inserting backend crashed before doing that, fix
694  * that now.
695  */
696  if (GistFollowRight(stack->page))
697  {
698  if (!xlocked)
699  {
700  LockBuffer(stack->buffer, GIST_UNLOCK);
702  xlocked = true;
703  /* someone might've completed the split when we unlocked */
704  if (!GistFollowRight(stack->page))
705  continue;
706  }
707  gistfixsplit(&state, giststate);
708 
709  UnlockReleaseBuffer(stack->buffer);
710  xlocked = false;
711  state.stack = stack = stack->parent;
712  continue;
713  }
714 
715  if ((stack->blkno != GIST_ROOT_BLKNO &&
716  stack->parent->lsn < GistPageGetNSN(stack->page)) ||
717  GistPageIsDeleted(stack->page))
718  {
719  /*
720  * Concurrent split or page deletion detected. There's no
721  * guarantee that the downlink for this page is consistent with
722  * the tuple we're inserting anymore, so go back to parent and
723  * rechoose the best child.
724  */
725  UnlockReleaseBuffer(stack->buffer);
726  xlocked = false;
727  state.stack = stack = stack->parent;
728  continue;
729  }
730 
731  if (!GistPageIsLeaf(stack->page))
732  {
733  /*
734  * This is an internal page so continue to walk down the tree.
735  * Find the child node that has the minimum insertion penalty.
736  */
737  BlockNumber childblkno;
738  IndexTuple newtup;
739  GISTInsertStack *item;
740  OffsetNumber downlinkoffnum;
741 
742  downlinkoffnum = gistchoose(state.r, stack->page, itup, giststate);
743  iid = PageGetItemId(stack->page, downlinkoffnum);
744  idxtuple = (IndexTuple) PageGetItem(stack->page, iid);
745  childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
746 
747  /*
748  * Check that it's not a leftover invalid tuple from pre-9.1
749  */
750  if (GistTupleIsInvalid(idxtuple))
751  ereport(ERROR,
752  (errmsg("index \"%s\" contains an inner tuple marked as invalid",
754  errdetail("This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1."),
755  errhint("Please REINDEX it.")));
756 
757  /*
758  * Check that the key representing the target child node is
759  * consistent with the key we're inserting. Update it if it's not.
760  */
761  newtup = gistgetadjusted(state.r, idxtuple, itup, giststate);
762  if (newtup)
763  {
764  /*
765  * Swap shared lock for an exclusive one. Beware, the page may
766  * change while we unlock/lock the page...
767  */
768  if (!xlocked)
769  {
770  LockBuffer(stack->buffer, GIST_UNLOCK);
772  xlocked = true;
773  stack->page = (Page) BufferGetPage(stack->buffer);
774 
775  if (PageGetLSN(stack->page) != stack->lsn)
776  {
777  /* the page was changed while we unlocked it, retry */
778  continue;
779  }
780  }
781 
782  /*
783  * Update the tuple.
784  *
785  * We still hold the lock after gistinserttuple(), but it
786  * might have to split the page to make the updated tuple fit.
787  * In that case the updated tuple might migrate to the other
788  * half of the split, so we have to go back to the parent and
789  * descend back to the half that's a better fit for the new
790  * tuple.
791  */
792  if (gistinserttuple(&state, stack, giststate, newtup,
793  downlinkoffnum))
794  {
795  /*
796  * If this was a root split, the root page continues to be
797  * the parent and the updated tuple went to one of the
798  * child pages, so we just need to retry from the root
799  * page.
800  */
801  if (stack->blkno != GIST_ROOT_BLKNO)
802  {
803  UnlockReleaseBuffer(stack->buffer);
804  xlocked = false;
805  state.stack = stack = stack->parent;
806  }
807  continue;
808  }
809  }
810  LockBuffer(stack->buffer, GIST_UNLOCK);
811  xlocked = false;
812 
813  /* descend to the chosen child */
814  item = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
815  item->blkno = childblkno;
816  item->parent = stack;
817  item->downlinkoffnum = downlinkoffnum;
818  state.stack = stack = item;
819  }
820  else
821  {
822  /*
823  * Leaf page. Insert the new key. We've already updated all the
824  * parents on the way down, but we might have to split the page if
825  * it doesn't fit. gistinserttuple() will take care of that.
826  */
827 
828  /*
829  * Swap shared lock for an exclusive one. Be careful, the page may
830  * change while we unlock/lock the page...
831  */
832  if (!xlocked)
833  {
834  LockBuffer(stack->buffer, GIST_UNLOCK);
836  xlocked = true;
837  stack->page = (Page) BufferGetPage(stack->buffer);
838  stack->lsn = PageGetLSN(stack->page);
839 
840  if (stack->blkno == GIST_ROOT_BLKNO)
841  {
842  /*
843  * the only page that can become inner instead of leaf is
844  * the root page, so for root we should recheck it
845  */
846  if (!GistPageIsLeaf(stack->page))
847  {
848  /*
849  * very rare situation: during unlock/lock index with
850  * number of pages = 1 was increased
851  */
852  LockBuffer(stack->buffer, GIST_UNLOCK);
853  xlocked = false;
854  continue;
855  }
856 
857  /*
858  * we don't need to check root split, because checking
859  * leaf/inner is enough to recognize split for root
860  */
861  }
862  else if ((GistFollowRight(stack->page) ||
863  stack->parent->lsn < GistPageGetNSN(stack->page)) &&
864  GistPageIsDeleted(stack->page))
865  {
866  /*
867  * The page was split or deleted while we momentarily
868  * unlocked the page. Go back to parent.
869  */
870  UnlockReleaseBuffer(stack->buffer);
871  xlocked = false;
872  state.stack = stack = stack->parent;
873  continue;
874  }
875  }
876 
877  /* now state.stack->(page, buffer and blkno) points to leaf page */
878 
879  gistinserttuple(&state, stack, giststate, itup,
881  LockBuffer(stack->buffer, GIST_UNLOCK);
882 
883  /* Release any pins we might still hold before exiting */
884  for (; stack; stack = stack->parent)
885  ReleaseBuffer(stack->buffer);
886  break;
887  }
888  }
889 }
890 
891 /*
892  * Traverse the tree to find path from root page to specified "child" block.
893  *
894  * returns a new insertion stack, starting from the parent of "child", up
895  * to the root. *downlinkoffnum is set to the offset of the downlink in the
896  * direct parent of child.
897  *
898  * To prevent deadlocks, this should lock only one page at a time.
899  */
900 static GISTInsertStack *
902 {
903  Page page;
904  Buffer buffer;
905  OffsetNumber i,
906  maxoff;
907  ItemId iid;
908  IndexTuple idxtuple;
909  List *fifo;
910  GISTInsertStack *top,
911  *ptr;
913 
914  top = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
915  top->blkno = GIST_ROOT_BLKNO;
917 
918  fifo = list_make1(top);
919  while (fifo != NIL)
920  {
921  /* Get next page to visit */
922  top = linitial(fifo);
923  fifo = list_delete_first(fifo);
924 
925  buffer = ReadBuffer(r, top->blkno);
926  LockBuffer(buffer, GIST_SHARE);
927  gistcheckpage(r, buffer);
928  page = (Page) BufferGetPage(buffer);
929 
930  if (GistPageIsLeaf(page))
931  {
932  /*
933  * Because we scan the index top-down, all the rest of the pages
934  * in the queue must be leaf pages as well.
935  */
936  UnlockReleaseBuffer(buffer);
937  break;
938  }
939 
940  /* currently, internal pages are never deleted */
941  Assert(!GistPageIsDeleted(page));
942 
943  top->lsn = BufferGetLSNAtomic(buffer);
944 
945  /*
946  * If F_FOLLOW_RIGHT is set, the page to the right doesn't have a
947  * downlink. This should not normally happen..
948  */
949  if (GistFollowRight(page))
950  elog(ERROR, "concurrent GiST page split was incomplete");
951 
952  if (top->parent && top->parent->lsn < GistPageGetNSN(page) &&
953  GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ )
954  {
955  /*
956  * Page was split while we looked elsewhere. We didn't see the
957  * downlink to the right page when we scanned the parent, so add
958  * it to the queue now.
959  *
960  * Put the right page ahead of the queue, so that we visit it
961  * next. That's important, because if this is the lowest internal
962  * level, just above leaves, we might already have queued up some
963  * leaf pages, and we assume that there can't be any non-leaf
964  * pages behind leaf pages.
965  */
966  ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
967  ptr->blkno = GistPageGetOpaque(page)->rightlink;
969  ptr->parent = top->parent;
970 
971  fifo = lcons(ptr, fifo);
972  }
973 
974  maxoff = PageGetMaxOffsetNumber(page);
975 
976  for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
977  {
978  iid = PageGetItemId(page, i);
979  idxtuple = (IndexTuple) PageGetItem(page, iid);
980  blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
981  if (blkno == child)
982  {
983  /* Found it! */
984  UnlockReleaseBuffer(buffer);
985  *downlinkoffnum = i;
986  return top;
987  }
988  else
989  {
990  /* Append this child to the list of pages to visit later */
991  ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
992  ptr->blkno = blkno;
993  ptr->downlinkoffnum = i;
994  ptr->parent = top;
995 
996  fifo = lappend(fifo, ptr);
997  }
998  }
999 
1000  UnlockReleaseBuffer(buffer);
1001  }
1002 
1003  elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u",
1004  RelationGetRelationName(r), child);
1005  return NULL; /* keep compiler quiet */
1006 }
1007 
1008 /*
1009  * Updates the stack so that child->parent is the correct parent of the
1010  * child. child->parent must be exclusively locked on entry, and will
1011  * remain so at exit, but it might not be the same page anymore.
1012  */
1013 static void
1015 {
1016  GISTInsertStack *parent = child->parent;
1017 
1018  gistcheckpage(r, parent->buffer);
1019  parent->page = (Page) BufferGetPage(parent->buffer);
1020 
1021  /* here we don't need to distinguish between split and page update */
1022  if (child->downlinkoffnum == InvalidOffsetNumber ||
1023  parent->lsn != PageGetLSN(parent->page))
1024  {
1025  /* parent is changed, look child in right links until found */
1026  OffsetNumber i,
1027  maxoff;
1028  ItemId iid;
1029  IndexTuple idxtuple;
1030  GISTInsertStack *ptr;
1031 
1032  while (true)
1033  {
1034  maxoff = PageGetMaxOffsetNumber(parent->page);
1035  for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
1036  {
1037  iid = PageGetItemId(parent->page, i);
1038  idxtuple = (IndexTuple) PageGetItem(parent->page, iid);
1039  if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == child->blkno)
1040  {
1041  /* yes!!, found */
1042  child->downlinkoffnum = i;
1043  return;
1044  }
1045  }
1046 
1047  parent->blkno = GistPageGetOpaque(parent->page)->rightlink;
1048  UnlockReleaseBuffer(parent->buffer);
1049  if (parent->blkno == InvalidBlockNumber)
1050  {
1051  /*
1052  * End of chain and still didn't find parent. It's a very-very
1053  * rare situation when root splitted.
1054  */
1055  break;
1056  }
1057  parent->buffer = ReadBuffer(r, parent->blkno);
1058  LockBuffer(parent->buffer, GIST_EXCLUSIVE);
1059  gistcheckpage(r, parent->buffer);
1060  parent->page = (Page) BufferGetPage(parent->buffer);
1061  }
1062 
1063  /*
1064  * awful!!, we need search tree to find parent ... , but before we
1065  * should release all old parent
1066  */
1067 
1068  ptr = child->parent->parent; /* child->parent already released
1069  * above */
1070  while (ptr)
1071  {
1072  ReleaseBuffer(ptr->buffer);
1073  ptr = ptr->parent;
1074  }
1075 
1076  /* ok, find new path */
1077  ptr = parent = gistFindPath(r, child->blkno, &child->downlinkoffnum);
1078 
1079  /* read all buffers as expected by caller */
1080  /* note we don't lock them or gistcheckpage them here! */
1081  while (ptr)
1082  {
1083  ptr->buffer = ReadBuffer(r, ptr->blkno);
1084  ptr->page = (Page) BufferGetPage(ptr->buffer);
1085  ptr = ptr->parent;
1086  }
1087 
1088  /* install new chain of parents to stack */
1089  child->parent = parent;
1090 
1091  /* make recursive call to normal processing */
1093  gistFindCorrectParent(r, child);
1094  }
1095 }
1096 
1097 /*
1098  * Form a downlink pointer for the page in 'buf'.
1099  */
1100 static IndexTuple
1102  GISTInsertStack *stack)
1103 {
1104  Page page = BufferGetPage(buf);
1105  OffsetNumber maxoff;
1106  OffsetNumber offset;
1107  IndexTuple downlink = NULL;
1108 
1109  maxoff = PageGetMaxOffsetNumber(page);
1110  for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
1111  {
1112  IndexTuple ituple = (IndexTuple)
1113  PageGetItem(page, PageGetItemId(page, offset));
1114 
1115  if (downlink == NULL)
1116  downlink = CopyIndexTuple(ituple);
1117  else
1118  {
1119  IndexTuple newdownlink;
1120 
1121  newdownlink = gistgetadjusted(rel, downlink, ituple,
1122  giststate);
1123  if (newdownlink)
1124  downlink = newdownlink;
1125  }
1126  }
1127 
1128  /*
1129  * If the page is completely empty, we can't form a meaningful downlink
1130  * for it. But we have to insert a downlink for the page. Any key will do,
1131  * as long as its consistent with the downlink of parent page, so that we
1132  * can legally insert it to the parent. A minimal one that matches as few
1133  * scans as possible would be best, to keep scans from doing useless work,
1134  * but we don't know how to construct that. So we just use the downlink of
1135  * the original page that was split - that's as far from optimal as it can
1136  * get but will do..
1137  */
1138  if (!downlink)
1139  {
1140  ItemId iid;
1141 
1143  gistFindCorrectParent(rel, stack);
1144  iid = PageGetItemId(stack->parent->page, stack->downlinkoffnum);
1145  downlink = (IndexTuple) PageGetItem(stack->parent->page, iid);
1146  downlink = CopyIndexTuple(downlink);
1147  LockBuffer(stack->parent->buffer, GIST_UNLOCK);
1148  }
1149 
1151  GistTupleSetValid(downlink);
1152 
1153  return downlink;
1154 }
1155 
1156 
1157 /*
1158  * Complete the incomplete split of state->stack->page.
1159  */
1160 static void
1162 {
1163  GISTInsertStack *stack = state->stack;
1164  Buffer buf;
1165  Page page;
1166  List *splitinfo = NIL;
1167 
1168  elog(LOG, "fixing incomplete split in index \"%s\", block %u",
1169  RelationGetRelationName(state->r), stack->blkno);
1170 
1171  Assert(GistFollowRight(stack->page));
1173 
1174  buf = stack->buffer;
1175 
1176  /*
1177  * Read the chain of split pages, following the rightlinks. Construct a
1178  * downlink tuple for each page.
1179  */
1180  for (;;)
1181  {
1183  IndexTuple downlink;
1184 
1185  page = BufferGetPage(buf);
1186 
1187  /* Form the new downlink tuples to insert to parent */
1188  downlink = gistformdownlink(state->r, buf, giststate, stack);
1189 
1190  si->buf = buf;
1191  si->downlink = downlink;
1192 
1193  splitinfo = lappend(splitinfo, si);
1194 
1195  if (GistFollowRight(page))
1196  {
1197  /* lock next page */
1198  buf = ReadBuffer(state->r, GistPageGetOpaque(page)->rightlink);
1199  LockBuffer(buf, GIST_EXCLUSIVE);
1200  }
1201  else
1202  break;
1203  }
1204 
1205  /* Insert the downlinks */
1206  gistfinishsplit(state, stack, giststate, splitinfo, false);
1207 }
1208 
1209 /*
1210  * Insert or replace a tuple in stack->buffer. If 'oldoffnum' is valid, the
1211  * tuple at 'oldoffnum' is replaced, otherwise the tuple is inserted as new.
1212  * 'stack' represents the path from the root to the page being updated.
1213  *
1214  * The caller must hold an exclusive lock on stack->buffer. The lock is still
1215  * held on return, but the page might not contain the inserted tuple if the
1216  * page was split. The function returns true if the page was split, false
1217  * otherwise.
1218  */
1219 static bool
1221  GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
1222 {
1223  return gistinserttuples(state, stack, giststate, &tuple, 1, oldoffnum,
1224  InvalidBuffer, InvalidBuffer, false, false);
1225 }
1226 
1227 /* ----------------
1228  * An extended workhorse version of gistinserttuple(). This version allows
1229  * inserting multiple tuples, or replacing a single tuple with multiple tuples.
1230  * This is used to recursively update the downlinks in the parent when a page
1231  * is split.
1232  *
1233  * If leftchild and rightchild are valid, we're inserting/replacing the
1234  * downlink for rightchild, and leftchild is its left sibling. We clear the
1235  * F_FOLLOW_RIGHT flag and update NSN on leftchild, atomically with the
1236  * insertion of the downlink.
1237  *
1238  * To avoid holding locks for longer than necessary, when recursing up the
1239  * tree to update the parents, the locking is a bit peculiar here. On entry,
1240  * the caller must hold an exclusive lock on stack->buffer, as well as
1241  * leftchild and rightchild if given. On return:
1242  *
1243  * - Lock on stack->buffer is released, if 'unlockbuf' is true. The page is
1244  * always kept pinned, however.
1245  * - Lock on 'leftchild' is released, if 'unlockleftchild' is true. The page
1246  * is kept pinned.
1247  * - Lock and pin on 'rightchild' are always released.
1248  *
1249  * Returns 'true' if the page had to be split. Note that if the page was
1250  * split, the inserted/updated tuples might've been inserted to a right
1251  * sibling of stack->buffer instead of stack->buffer itself.
1252  */
1253 static bool
1255  GISTSTATE *giststate,
1256  IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
1258  bool unlockbuf, bool unlockleftchild)
1259 {
1260  List *splitinfo;
1261  bool is_split;
1262 
1263  /*
1264  * Check for any rw conflicts (in serializable isolation level) just
1265  * before we intend to modify the page
1266  */
1268 
1269  /* Insert the tuple(s) to the page, splitting the page if necessary */
1270  is_split = gistplacetopage(state->r, state->freespace, giststate,
1271  stack->buffer,
1272  tuples, ntup,
1273  oldoffnum, NULL,
1274  leftchild,
1275  &splitinfo,
1276  true,
1277  state->heapRel,
1278  state->is_build);
1279 
1280  /*
1281  * Before recursing up in case the page was split, release locks on the
1282  * child pages. We don't need to keep them locked when updating the
1283  * parent.
1284  */
1285  if (BufferIsValid(rightchild))
1286  UnlockReleaseBuffer(rightchild);
1287  if (BufferIsValid(leftchild) && unlockleftchild)
1288  LockBuffer(leftchild, GIST_UNLOCK);
1289 
1290  /*
1291  * If we had to split, insert/update the downlinks in the parent. If the
1292  * caller requested us to release the lock on stack->buffer, tell
1293  * gistfinishsplit() to do that as soon as it's safe to do so. If we
1294  * didn't have to split, release it ourselves.
1295  */
1296  if (splitinfo)
1297  gistfinishsplit(state, stack, giststate, splitinfo, unlockbuf);
1298  else if (unlockbuf)
1299  LockBuffer(stack->buffer, GIST_UNLOCK);
1300 
1301  return is_split;
1302 }
1303 
1304 /*
1305  * Finish an incomplete split by inserting/updating the downlinks in parent
1306  * page. 'splitinfo' contains all the child pages involved in the split,
1307  * from left-to-right.
1308  *
1309  * On entry, the caller must hold a lock on stack->buffer and all the child
1310  * pages in 'splitinfo'. If 'unlockbuf' is true, the lock on stack->buffer is
1311  * released on return. The child pages are always unlocked and unpinned.
1312  */
1313 static void
1315  GISTSTATE *giststate, List *splitinfo, bool unlockbuf)
1316 {
1317  GISTPageSplitInfo *right;
1318  GISTPageSplitInfo *left;
1319  IndexTuple tuples[2];
1320 
1321  /* A split always contains at least two halves */
1322  Assert(list_length(splitinfo) >= 2);
1323 
1324  /*
1325  * We need to insert downlinks for each new page, and update the downlink
1326  * for the original (leftmost) page in the split. Begin at the rightmost
1327  * page, inserting one downlink at a time until there's only two pages
1328  * left. Finally insert the downlink for the last new page and update the
1329  * downlink for the original page as one operation.
1330  */
1332 
1333  /*
1334  * Insert downlinks for the siblings from right to left, until there are
1335  * only two siblings left.
1336  */
1337  for (int pos = list_length(splitinfo) - 1; pos > 1; pos--)
1338  {
1339  right = (GISTPageSplitInfo *) list_nth(splitinfo, pos);
1340  left = (GISTPageSplitInfo *) list_nth(splitinfo, pos - 1);
1341 
1342  gistFindCorrectParent(state->r, stack);
1343  if (gistinserttuples(state, stack->parent, giststate,
1344  &right->downlink, 1,
1346  left->buf, right->buf, false, false))
1347  {
1348  /*
1349  * If the parent page was split, the existing downlink might
1350  * have moved.
1351  */
1353  }
1354  /* gistinserttuples() released the lock on right->buf. */
1355  }
1356 
1357  right = (GISTPageSplitInfo *) lsecond(splitinfo);
1358  left = (GISTPageSplitInfo *) linitial(splitinfo);
1359 
1360  /*
1361  * Finally insert downlink for the remaining right page and update the
1362  * downlink for the original page to not contain the tuples that were
1363  * moved to the new pages.
1364  */
1365  tuples[0] = left->downlink;
1366  tuples[1] = right->downlink;
1367  gistFindCorrectParent(state->r, stack);
1368  if (gistinserttuples(state, stack->parent, giststate,
1369  tuples, 2,
1370  stack->downlinkoffnum,
1371  left->buf, right->buf,
1372  true, /* Unlock parent */
1373  unlockbuf /* Unlock stack->buffer if caller wants that */
1374  ))
1375  {
1376  /*
1377  * If the parent page was split, the downlink might have moved.
1378  */
1380  }
1381 
1382  Assert(left->buf == stack->buffer);
1383 
1384  /*
1385  * If we split the page because we had to adjust the downlink on an
1386  * internal page, while descending the tree for inserting a new tuple,
1387  * then this might no longer be the correct page for the new tuple. The
1388  * downlink to this page might not cover the new tuple anymore, it might
1389  * need to go to the newly-created right sibling instead. Tell the caller
1390  * to walk back up the stack, to re-check at the parent which page to
1391  * insert to.
1392  *
1393  * Normally, the LSN-NSN interlock during the tree descend would also
1394  * detect that a concurrent split happened (by ourselves), and cause us to
1395  * retry at the parent. But that mechanism doesn't work during index
1396  * build, because we don't do WAL-logging, and don't update LSNs, during
1397  * index build.
1398  */
1399  stack->retry_from_parent = true;
1400 }
1401 
1402 /*
1403  * gistSplit -- split a page in the tree and fill struct
1404  * used for XLOG and real writes buffers. Function is recursive, ie
1405  * it will split page until keys will fit in every page.
1406  */
1409  Page page,
1410  IndexTuple *itup, /* contains compressed entry */
1411  int len,
1412  GISTSTATE *giststate)
1413 {
1414  IndexTuple *lvectup,
1415  *rvectup;
1416  GistSplitVector v;
1417  int i;
1418  SplitedPageLayout *res = NULL;
1419 
1420  /* this should never recurse very deeply, but better safe than sorry */
1422 
1423  /* there's no point in splitting an empty page */
1424  Assert(len > 0);
1425 
1426  /*
1427  * If a single tuple doesn't fit on a page, no amount of splitting will
1428  * help.
1429  */
1430  if (len == 1)
1431  ereport(ERROR,
1432  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1433  errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
1434  IndexTupleSize(itup[0]), GiSTPageSize,
1436 
1437  memset(v.spl_lisnull, true,
1438  sizeof(bool) * giststate->nonLeafTupdesc->natts);
1439  memset(v.spl_risnull, true,
1440  sizeof(bool) * giststate->nonLeafTupdesc->natts);
1441  gistSplitByKey(r, page, itup, len, giststate, &v, 0);
1442 
1443  /* form left and right vector */
1444  lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * (len + 1));
1445  rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * (len + 1));
1446 
1447  for (i = 0; i < v.splitVector.spl_nleft; i++)
1448  lvectup[i] = itup[v.splitVector.spl_left[i] - 1];
1449 
1450  for (i = 0; i < v.splitVector.spl_nright; i++)
1451  rvectup[i] = itup[v.splitVector.spl_right[i] - 1];
1452 
1453  /* finalize splitting (may need another split) */
1454  if (!gistfitpage(rvectup, v.splitVector.spl_nright))
1455  {
1456  res = gistSplit(r, page, rvectup, v.splitVector.spl_nright, giststate);
1457  }
1458  else
1459  {
1460  ROTATEDIST(res);
1461  res->block.num = v.splitVector.spl_nright;
1462  res->list = gistfillitupvec(rvectup, v.splitVector.spl_nright, &(res->lenlist));
1463  res->itup = gistFormTuple(giststate, r, v.spl_rattr, v.spl_risnull, false);
1464  }
1465 
1466  if (!gistfitpage(lvectup, v.splitVector.spl_nleft))
1467  {
1468  SplitedPageLayout *resptr,
1469  *subres;
1470 
1471  resptr = subres = gistSplit(r, page, lvectup, v.splitVector.spl_nleft, giststate);
1472 
1473  /* install on list's tail */
1474  while (resptr->next)
1475  resptr = resptr->next;
1476 
1477  resptr->next = res;
1478  res = subres;
1479  }
1480  else
1481  {
1482  ROTATEDIST(res);
1483  res->block.num = v.splitVector.spl_nleft;
1484  res->list = gistfillitupvec(lvectup, v.splitVector.spl_nleft, &(res->lenlist));
1485  res->itup = gistFormTuple(giststate, r, v.spl_lattr, v.spl_lisnull, false);
1486  }
1487 
1488  return res;
1489 }
1490 
1491 /*
1492  * Create a GISTSTATE and fill it with information about the index
1493  */
1494 GISTSTATE *
1496 {
1497  GISTSTATE *giststate;
1498  MemoryContext scanCxt;
1499  MemoryContext oldCxt;
1500  int i;
1501 
1502  /* safety check to protect fixed-size arrays in GISTSTATE */
1503  if (index->rd_att->natts > INDEX_MAX_KEYS)
1504  elog(ERROR, "numberOfAttributes %d > %d",
1505  index->rd_att->natts, INDEX_MAX_KEYS);
1506 
1507  /* Create the memory context that will hold the GISTSTATE */
1509  "GiST scan context",
1511  oldCxt = MemoryContextSwitchTo(scanCxt);
1512 
1513  /* Create and fill in the GISTSTATE */
1514  giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
1515 
1516  giststate->scanCxt = scanCxt;
1517  giststate->tempCxt = scanCxt; /* caller must change this if needed */
1518  giststate->leafTupdesc = index->rd_att;
1519 
1520  /*
1521  * The truncated tupdesc for non-leaf index tuples, which doesn't contain
1522  * the INCLUDE attributes.
1523  *
1524  * It is used to form tuples during tuple adjustment and page split.
1525  * B-tree creates shortened tuple descriptor for every truncated tuple,
1526  * because it is doing this less often: it does not have to form truncated
1527  * tuples during page split. Also, B-tree is not adjusting tuples on
1528  * internal pages the way GiST does.
1529  */
1530  giststate->nonLeafTupdesc = CreateTupleDescCopyConstr(index->rd_att);
1531  giststate->nonLeafTupdesc->natts =
1533 
1534  for (i = 0; i < IndexRelationGetNumberOfKeyAttributes(index); i++)
1535  {
1536  fmgr_info_copy(&(giststate->consistentFn[i]),
1537  index_getprocinfo(index, i + 1, GIST_CONSISTENT_PROC),
1538  scanCxt);
1539  fmgr_info_copy(&(giststate->unionFn[i]),
1540  index_getprocinfo(index, i + 1, GIST_UNION_PROC),
1541  scanCxt);
1542 
1543  /* opclasses are not required to provide a Compress method */
1544  if (OidIsValid(index_getprocid(index, i + 1, GIST_COMPRESS_PROC)))
1545  fmgr_info_copy(&(giststate->compressFn[i]),
1546  index_getprocinfo(index, i + 1, GIST_COMPRESS_PROC),
1547  scanCxt);
1548  else
1549  giststate->compressFn[i].fn_oid = InvalidOid;
1550 
1551  /* opclasses are not required to provide a Decompress method */
1552  if (OidIsValid(index_getprocid(index, i + 1, GIST_DECOMPRESS_PROC)))
1553  fmgr_info_copy(&(giststate->decompressFn[i]),
1554  index_getprocinfo(index, i + 1, GIST_DECOMPRESS_PROC),
1555  scanCxt);
1556  else
1557  giststate->decompressFn[i].fn_oid = InvalidOid;
1558 
1559  fmgr_info_copy(&(giststate->penaltyFn[i]),
1560  index_getprocinfo(index, i + 1, GIST_PENALTY_PROC),
1561  scanCxt);
1562  fmgr_info_copy(&(giststate->picksplitFn[i]),
1563  index_getprocinfo(index, i + 1, GIST_PICKSPLIT_PROC),
1564  scanCxt);
1565  fmgr_info_copy(&(giststate->equalFn[i]),
1566  index_getprocinfo(index, i + 1, GIST_EQUAL_PROC),
1567  scanCxt);
1568 
1569  /* opclasses are not required to provide a Distance method */
1570  if (OidIsValid(index_getprocid(index, i + 1, GIST_DISTANCE_PROC)))
1571  fmgr_info_copy(&(giststate->distanceFn[i]),
1572  index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC),
1573  scanCxt);
1574  else
1575  giststate->distanceFn[i].fn_oid = InvalidOid;
1576 
1577  /* opclasses are not required to provide a Fetch method */
1578  if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
1579  fmgr_info_copy(&(giststate->fetchFn[i]),
1580  index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
1581  scanCxt);
1582  else
1583  giststate->fetchFn[i].fn_oid = InvalidOid;
1584 
1585  /*
1586  * If the index column has a specified collation, we should honor that
1587  * while doing comparisons. However, we may have a collatable storage
1588  * type for a noncollatable indexed data type. If there's no index
1589  * collation then specify default collation in case the support
1590  * functions need collation. This is harmless if the support
1591  * functions don't care about collation, so we just do it
1592  * unconditionally. (We could alternatively call get_typcollation,
1593  * but that seems like expensive overkill --- there aren't going to be
1594  * any cases where a GiST storage type has a nondefault collation.)
1595  */
1596  if (OidIsValid(index->rd_indcollation[i]))
1597  giststate->supportCollation[i] = index->rd_indcollation[i];
1598  else
1599  giststate->supportCollation[i] = DEFAULT_COLLATION_OID;
1600  }
1601 
1602  /* No opclass information for INCLUDE attributes */
1603  for (; i < index->rd_att->natts; i++)
1604  {
1605  giststate->consistentFn[i].fn_oid = InvalidOid;
1606  giststate->unionFn[i].fn_oid = InvalidOid;
1607  giststate->compressFn[i].fn_oid = InvalidOid;
1608  giststate->decompressFn[i].fn_oid = InvalidOid;
1609  giststate->penaltyFn[i].fn_oid = InvalidOid;
1610  giststate->picksplitFn[i].fn_oid = InvalidOid;
1611  giststate->equalFn[i].fn_oid = InvalidOid;
1612  giststate->distanceFn[i].fn_oid = InvalidOid;
1613  giststate->fetchFn[i].fn_oid = InvalidOid;
1614  giststate->supportCollation[i] = InvalidOid;
1615  }
1616 
1617  MemoryContextSwitchTo(oldCxt);
1618 
1619  return giststate;
1620 }
1621 
1622 void
1624 {
1625  /* It's sufficient to delete the scanCxt */
1626  MemoryContextDelete(giststate->scanCxt);
1627 }
1628 
1629 /*
1630  * gistprunepage() -- try to remove LP_DEAD items from the given page.
1631  * Function assumes that buffer is exclusively locked.
1632  */
1633 static void
1634 gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel)
1635 {
1637  int ndeletable = 0;
1638  OffsetNumber offnum,
1639  maxoff;
1640  TransactionId latestRemovedXid = InvalidTransactionId;
1641 
1642  Assert(GistPageIsLeaf(page));
1643 
1644  /*
1645  * Scan over all items to see which ones need to be deleted according to
1646  * LP_DEAD flags.
1647  */
1648  maxoff = PageGetMaxOffsetNumber(page);
1649  for (offnum = FirstOffsetNumber;
1650  offnum <= maxoff;
1651  offnum = OffsetNumberNext(offnum))
1652  {
1653  ItemId itemId = PageGetItemId(page, offnum);
1654 
1655  if (ItemIdIsDead(itemId))
1656  deletable[ndeletable++] = offnum;
1657  }
1658 
1660  latestRemovedXid =
1661  index_compute_xid_horizon_for_tuples(rel, heapRel, buffer,
1662  deletable, ndeletable);
1663 
1664  if (ndeletable > 0)
1665  {
1667 
1668  PageIndexMultiDelete(page, deletable, ndeletable);
1669 
1670  /*
1671  * Mark the page as not containing any LP_DEAD items. This is not
1672  * certainly true (there might be some that have recently been marked,
1673  * but weren't included in our target-item list), but it will almost
1674  * always be true and it doesn't seem worth an additional page scan to
1675  * check it. Remember that F_HAS_GARBAGE is only a hint anyway.
1676  */
1678 
1679  MarkBufferDirty(buffer);
1680 
1681  /* XLOG stuff */
1682  if (RelationNeedsWAL(rel))
1683  {
1684  XLogRecPtr recptr;
1685 
1686  recptr = gistXLogDelete(buffer,
1687  deletable, ndeletable,
1688  latestRemovedXid);
1689 
1690  PageSetLSN(page, recptr);
1691  }
1692  else
1693  PageSetLSN(page, gistGetFakeLSN(rel));
1694 
1695  END_CRIT_SECTION();
1696  }
1697 
1698  /*
1699  * Note: if we didn't find any LP_DEAD items, then the page's
1700  * F_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
1701  * separate write to clear it, however. We will clear it when we split
1702  * the page.
1703  */
1704 }
#define GistFollowRight(page)
Definition: gist.h:153
ambeginscan_function ambeginscan
Definition: amapi.h:225
uint8 amparallelvacuumoptions
Definition: amapi.h:203
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:351
#define NIL
Definition: pg_list.h:65
BlockNumber blkno
Definition: gist_private.h:210
#define GistPageGetNSN(page)
Definition: gist.h:157
MemoryContext ii_Context
Definition: execnodes.h:177
ambulkdelete_function ambulkdelete
Definition: amapi.h:217
bool amcanmulticol
Definition: amapi.h:183
static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
Definition: gist.c:1220
uint16 amsupport
Definition: amapi.h:173
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:211
#define AllocSetContextCreate
Definition: memutils.h:170
#define GistBuildLSN
Definition: gist.h:58
#define GistPageIsDeleted(page)
Definition: gist.h:143
int errhint(const char *fmt,...)
Definition: elog.c:1069
#define GIST_FETCH_PROC
Definition: gist.h:37
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1009
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:794
Datum spl_lattr[INDEX_MAX_KEYS]
Definition: gist_private.h:239
void PageRestoreTempPage(Page tempPage, Page oldPage)
Definition: bufpage.c:403
static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
Definition: gist.c:1161
uint32 TransactionId
Definition: c.h:503
amgettuple_function amgettuple
Definition: amapi.h:227
BlockNumber blkno
Definition: gist_private.h:186
FmgrInfo fetchFn[INDEX_MAX_KEYS]
Definition: gist_private.h:94
IndexBulkDeleteResult * gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: gistvacuum.c:75
bool amcanorderbyop
Definition: amapi.h:177
#define GistClearPageHasGarbage(page)
Definition: gist.h:151
amproperty_function amproperty
Definition: amapi.h:222
#define GIST_EQUAL_PROC
Definition: gist.h:35
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition: bufpage.c:719
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
MemoryContext createTempGistContext(void)
Definition: gist.c:117
Oid supportCollation[INDEX_MAX_KEYS]
Definition: gist_private.h:97
FmgrInfo compressFn[INDEX_MAX_KEYS]
Definition: gist_private.h:88
IndexBulkDeleteResult * gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: gistvacuum.c:59
Datum gisthandler(PG_FUNCTION_ARGS)
Definition: gist.c:59
Relation heapRel
Definition: gist_private.h:254
FmgrInfo equalFn[INDEX_MAX_KEYS]
Definition: gist_private.h:92
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:642
ItemPointerData t_tid
Definition: itup.h:37
void gistfillbuffer(Page page, IndexTuple *itup, int len, OffsetNumber off)
Definition: gistutil.c:33
amparallelrescan_function amparallelrescan
Definition: amapi.h:236
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
bool amstorage
Definition: amapi.h:191
bool gistgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: gistget.c:614
OffsetNumber * spl_left
Definition: gist.h:113
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Pointer Item
Definition: item.h:17
#define GistPageSetNSN(page, val)
Definition: gist.h:158
#define InvalidBuffer
Definition: buf.h:25
GIST_SPLITVEC splitVector
Definition: gist_private.h:237
bool gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, Buffer buffer, IndexTuple *itup, int ntup, OffsetNumber oldoffnum, BlockNumber *newblkno, Buffer leftchildbuf, List **splitinfo, bool markfollowright, Relation heapRel, bool is_build)
Definition: gist.c:219
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:608
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:416
bool ampredlocks
Definition: amapi.h:195
#define GistTupleIsInvalid(itup)
Definition: gist_private.h:288
#define GIST_UNLOCK
Definition: gist_private.h:44
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:136
#define GistPageHasGarbage(page)
Definition: gist.h:149
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3375
#define P_NEW
Definition: bufmgr.h:81
aminsert_function aminsert
Definition: amapi.h:216
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
int spl_nleft
Definition: gist.h:114
#define LOG
Definition: elog.h:26
bytea * gistoptions(Datum reloptions, bool validate)
Definition: gistutil.c:908
IndexTupleData * list
Definition: gist_private.h:194
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
Oid amkeytype
Definition: amapi.h:205
#define OidIsValid(objectId)
Definition: c.h:634
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
IndexTuple gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
Definition: gistutil.c:315
bool amoptionalkey
Definition: amapi.h:185
void gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate, GistSplitVector *v, int attno)
Definition: gistsplit.c:623
amvalidate_function amvalidate
Definition: amapi.h:224
Datum spl_rattr[INDEX_MAX_KEYS]
Definition: gist_private.h:243
XLogRecPtr gistGetFakeLSN(Relation rel)
Definition: gistutil.c:1012
#define lsecond(l)
Definition: pg_list.h:200
XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, TransactionId latestRemovedXid)
Definition: gistxlog.c:673
bool gistinsert(Relation r, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
Definition: gist.c:154
uint16 OffsetNumber
Definition: off.h:24
Definition: type.h:89
Page PageGetTempPageCopySpecial(Page page)
Definition: bufpage.c:381
gistxlogPage block
Definition: gist_private.h:193
#define list_make1(x1)
Definition: pg_list.h:227
IndexBuildResult * gistbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: gistbuild.c:116
IndexUniqueCheck
Definition: genam.h:112
XLogRecPtr gistXLogSplit(bool page_is_leaf, SplitedPageLayout *dist, BlockNumber origrlink, GistNSN orignsn, Buffer leftchildbuf, bool markfollowright)
Definition: gistxlog.c:518
IndexTuple * gistextractpage(Page page, int *len)
Definition: gistutil.c:94
amgetbitmap_function amgetbitmap
Definition: amapi.h:228
FmgrInfo consistentFn[INDEX_MAX_KEYS]
Definition: gist_private.h:86
#define linitial(l)
Definition: pg_list.h:195
#define GIST_PICKSPLIT_PROC
Definition: gist.h:34
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3398
Oid * rd_indcollation
Definition: rel.h:174
#define ERROR
Definition: elog.h:43
static IndexTuple gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, GISTInsertStack *stack)
Definition: gist.c:1101
ambuild_function ambuild
Definition: amapi.h:214
#define GIST_COMPRESS_PROC
Definition: gist.h:31
#define GIST_MAX_SPLIT_PAGES
Definition: gist_private.h:39
amoptions_function amoptions
Definition: amapi.h:221
static void * list_nth(const List *list, int n)
Definition: pg_list.h:277
void gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate, Relation heapRel, bool is_build)
Definition: gist.c:626
int spl_nright
Definition: gist.h:119
bool amcaninclude
Definition: amapi.h:199
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo, MemoryContext destcxt)
Definition: fmgr.c:610
MemoryContext tempCxt
Definition: gist_private.h:78
BlockNumber blkno
Definition: ginvacuum.c:119
amcostestimate_function amcostestimate
Definition: amapi.h:220
IndexTuple downlink
Definition: gist_private.h:421
bool amcanunique
Definition: amapi.h:181
void gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, ScanKey orderbys, int norderbys)
Definition: gistscan.c:127
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, Item newtup, Size newsize)
Definition: bufpage.c:1058
void gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: selfuncs.c:6231
GISTInsertStack * stack
Definition: gist_private.h:258
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:509
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:2886
static char * buf
Definition: pg_test_fsync.c:67
amvacuumcleanup_function amvacuumcleanup
Definition: amapi.h:218
amendscan_function amendscan
Definition: amapi.h:229
#define memmove(d, s, c)
Definition: c.h:1265
bool amcanbackward
Definition: amapi.h:179
void check_stack_depth(void)
Definition: postgres.c:3288
static void gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel)
Definition: gist.c:1634
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
int errdetail(const char *fmt,...)
Definition: elog.c:955
FmgrInfo picksplitFn[INDEX_MAX_KEYS]
Definition: gist_private.h:91
#define InvalidTransactionId
Definition: transam.h:31
bool gistnospace(Page page, IndexTuple *itvec, int len, OffsetNumber todelete, Size freespace)
Definition: gistutil.c:58
#define RelationGetRelationName(relation)
Definition: rel.h:462
FmgrInfo penaltyFn[INDEX_MAX_KEYS]
Definition: gist_private.h:90
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:447
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:150
struct SplitedPageLayout * next
Definition: gist_private.h:200
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
void freeGISTstate(GISTSTATE *giststate)
Definition: gist.c:1623
#define ereport(elevel, rest)
Definition: elog.h:141
amrescan_function amrescan
Definition: amapi.h:226
bool amcanparallel
Definition: amapi.h:197
FmgrInfo decompressFn[INDEX_MAX_KEYS]
Definition: gist_private.h:89
int64 gistgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: gistget.c:745
List * lappend(List *list, void *datum)
Definition: list.c:322
void * ii_AmCache
Definition: execnodes.h:176
TupleDesc leafTupdesc
Definition: gist_private.h:80
OffsetNumber gistchoose(Relation r, Page p, IndexTuple it, GISTSTATE *giststate)
Definition: gistutil.c:373
void gistbuildempty(Relation index)
Definition: gist.c:128
#define GistPageIsLeaf(page)
Definition: gist.h:140
#define GistTupleSetValid(itup)
Definition: gist_private.h:289
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
OffsetNumber downlinkoffnum
Definition: gist_private.h:228
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
static void gistFindCorrectParent(Relation r, GISTInsertStack *child)
Definition: gist.c:1014
bool amsearchnulls
Definition: amapi.h:189
#define GiSTPageSize
Definition: gist_private.h:468
#define GistClearFollowRight(page)
Definition: gist.h:155
IndexTuple * gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
Definition: gistutil.c:113
static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, IndexTuple *tuples, int ntup, OffsetNumber oldoffnum, Buffer leftchild, Buffer rightchild, bool unlockbuf, bool unlockleftchild)
Definition: gist.c:1254
void * palloc0(Size size)
Definition: mcxt.c:980
uintptr_t Datum
Definition: postgres.h:367
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3612
#define ROTATEDIST(d)
Definition: gist.c:45
bool amclusterable
Definition: amapi.h:193
#define XLogStandbyInfoActive()
Definition: xlog.h:195
TupleDesc rd_att
Definition: rel.h:85
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4374
GISTSTATE * initGISTstate(Relation index)
Definition: gist.c:1495
struct DataPageDeleteStack * child
Definition: ginvacuum.c:116
bool amsearcharray
Definition: amapi.h:187
SplitedPageLayout * gistSplit(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate)
Definition: gist.c:1408
#define rightchild(x)
Definition: fsmpage.c:30
#define InvalidOffsetNumber
Definition: off.h:26
static GISTInsertStack * gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
Definition: gist.c:901
#define InvalidOid
Definition: postgres_ext.h:36
TupleDesc nonLeafTupdesc
Definition: gist_private.h:81
XLogRecPtr gistXLogUpdate(Buffer buffer, OffsetNumber *todelete, int ntodelete, IndexTuple *itup, int ituplen, Buffer leftchildbuf)
Definition: gistxlog.c:632
Oid fn_oid
Definition: fmgr.h:59
#define GIST_CONSISTENT_PROC
Definition: gist.h:29
bool amusemaintenanceworkmem
Definition: amapi.h:201
bool gistfitpage(IndexTuple *itvec, int len)
Definition: gistutil.c:78
#define GIST_UNION_PROC
Definition: gist.h:30
bool gistcanreturn(Relation index, int attno)
Definition: gistget.c:795
#define GistPageGetOpaque(page)
Definition: gist.h:138
List * lcons(void *datum, List *list)
Definition: list.c:454
#define makeNode(_type_)
Definition: nodes.h:573
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:728
#define VACUUM_OPTION_PARALLEL_COND_CLEANUP
Definition: vacuum.h:52
Definition: regguts.h:298
struct DataPageDeleteStack * parent
Definition: ginvacuum.c:117
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:828
void gistcheckpage(Relation rel, Buffer buf)
Definition: gistutil.c:770
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define INDEX_MAX_KEYS
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
size_t Size
Definition: c.h:456
IndexTupleData * gistfillitupvec(IndexTuple *vec, int veclen, int *memlen)
Definition: gistutil.c:126
#define GIST_PENALTY_PROC
Definition: gist.h:33
#define InvalidBlockNumber
Definition: block.h:33
static int list_length(const List *l)
Definition: pg_list.h:169
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition: xloginsert.c:146
#define leftchild(x)
Definition: fsmpage.c:29
#define GIST_DISTANCE_PROC
Definition: gist.h:36
OffsetNumber * spl_right
Definition: gist.h:118
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
#define GIST_SHARE
Definition: gist_private.h:42
ammarkpos_function ammarkpos
Definition: amapi.h:230
bool amcanorder
Definition: amapi.h:175
ambuildphasename_function ambuildphasename
Definition: amapi.h:223
FmgrInfo distanceFn[INDEX_MAX_KEYS]
Definition: gist_private.h:93
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:45
#define ItemPointerSetBlockNumber(pointer, blockNumber)
Definition: itemptr.h:138
#define GistMarkFollowRight(page)
Definition: gist.h:154
#define RelationNeedsWAL(relation)
Definition: rel.h:530
amestimateparallelscan_function amestimateparallelscan
Definition: amapi.h:234
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
XLogRecPtr GistNSN
Definition: gist.h:51
#define PageGetLSN(page)
Definition: bufpage.h:366
#define GISTNProcs
Definition: gist.h:38
static Datum values[MAXATTR]
Definition: bootstrap.c:167
uint16 amstrategies
Definition: amapi.h:171
static void gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, List *splitinfo, bool unlockbuf)
Definition: gist.c:1314
bool spl_risnull[INDEX_MAX_KEYS]
Definition: gist_private.h:245
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2623
bool gistvalidate(Oid opclassoid)
Definition: gistvalidate.c:34
#define MaxIndexTuplesPerPage
Definition: itup.h:145
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define F_LEAF
Definition: gist.h:43
#define elog(elevel,...)
Definition: elog.h:228
ambuildempty_function ambuildempty
Definition: amapi.h:215
int i
#define GIST_ROOT_BLKNO
Definition: gist_private.h:262
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
MemoryContext scanCxt
Definition: gist_private.h:77
void GISTInitBuffer(Buffer b, uint32 f)
Definition: gistutil.c:748
IndexTuple gistFormTuple(GISTSTATE *giststate, Relation r, Datum attdata[], bool isnull[], bool isleaf)
Definition: gistutil.c:574
#define PG_FUNCTION_ARGS
Definition: fmgr.h:188
#define GIST_DECOMPRESS_PROC
Definition: gist.h:32
IndexScanDesc gistbeginscan(Relation r, int nkeys, int norderbys)
Definition: gistscan.c:74
void gistendscan(IndexScanDesc scan)
Definition: gistscan.c:349
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
bool spl_lisnull[INDEX_MAX_KEYS]
Definition: gist_private.h:241
FmgrInfo unionFn[INDEX_MAX_KEYS]
Definition: gist_private.h:87
#define GIST_EXCLUSIVE
Definition: gist_private.h:43
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
Definition: pg_list.h:50
bool gistproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: gistutil.c:929
int Buffer
Definition: buf.h:23
struct GISTInsertStack * parent
Definition: gist_private.h:231
amcanreturn_function amcanreturn
Definition: amapi.h:219
Buffer gistNewBuffer(Relation r)
Definition: gistutil.c:809
void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3098
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define IndexTupleSize(itup)
Definition: itup.h:71
aminitparallelscan_function aminitparallelscan
Definition: amapi.h:235
List * list_delete_first(List *list)
Definition: list.c:861
TransactionId index_compute_xid_horizon_for_tuples(Relation irel, Relation hrel, Buffer ibuf, OffsetNumber *itemnos, int nitems)
Definition: genam.c:281
amrestrpos_function amrestrpos
Definition: amapi.h:231
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:760