PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
brin.c
Go to the documentation of this file.
1 /*
2  * brin.c
3  * Implementation of BRIN indexes for Postgres
4  *
5  * See src/backend/access/brin/README for details.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * IDENTIFICATION
11  * src/backend/access/brin/brin.c
12  *
13  * TODO
14  * * ScalarArrayOpExpr (amsearcharray -> SK_SEARCHARRAY)
15  */
16 #include "postgres.h"
17 
18 #include "access/brin.h"
19 #include "access/brin_page.h"
20 #include "access/brin_pageops.h"
21 #include "access/brin_xlog.h"
22 #include "access/reloptions.h"
23 #include "access/relscan.h"
24 #include "access/xloginsert.h"
25 #include "catalog/index.h"
26 #include "catalog/pg_am.h"
27 #include "miscadmin.h"
28 #include "pgstat.h"
29 #include "storage/bufmgr.h"
30 #include "storage/freespace.h"
31 #include "utils/builtins.h"
32 #include "utils/index_selfuncs.h"
33 #include "utils/memutils.h"
34 #include "utils/rel.h"
35 
36 
37 /*
38  * We use a BrinBuildState during initial construction of a BRIN index.
39  * The running state is kept in a BrinMemTuple.
40  */
41 typedef struct BrinBuildState
42 {
52 
53 /*
54  * Struct used as "opaque" during index scans
55  */
56 typedef struct BrinOpaque
57 {
61 } BrinOpaque;
62 
64  BrinRevmap *revmap, BlockNumber pagesPerRange);
66 static void brinsummarize(Relation index, Relation heapRel,
67  double *numSummarized, double *numExisting);
69 static void union_tuples(BrinDesc *bdesc, BrinMemTuple *a,
70  BrinTuple *b);
71 static void brin_vacuum_scan(Relation idxrel, BufferAccessStrategy strategy);
72 
73 
74 /*
75  * BRIN handler function: return IndexAmRoutine with access method parameters
76  * and callbacks.
77  */
78 Datum
80 {
82 
83  amroutine->amstrategies = 0;
85  amroutine->amcanorder = false;
86  amroutine->amcanorderbyop = false;
87  amroutine->amcanbackward = false;
88  amroutine->amcanunique = false;
89  amroutine->amcanmulticol = true;
90  amroutine->amoptionalkey = true;
91  amroutine->amsearcharray = false;
92  amroutine->amsearchnulls = true;
93  amroutine->amstorage = true;
94  amroutine->amclusterable = false;
95  amroutine->ampredlocks = false;
96  amroutine->amcanparallel = false;
97  amroutine->amkeytype = InvalidOid;
98 
99  amroutine->ambuild = brinbuild;
100  amroutine->ambuildempty = brinbuildempty;
101  amroutine->aminsert = brininsert;
102  amroutine->ambulkdelete = brinbulkdelete;
103  amroutine->amvacuumcleanup = brinvacuumcleanup;
104  amroutine->amcanreturn = NULL;
105  amroutine->amcostestimate = brincostestimate;
106  amroutine->amoptions = brinoptions;
107  amroutine->amproperty = NULL;
108  amroutine->amvalidate = brinvalidate;
109  amroutine->ambeginscan = brinbeginscan;
110  amroutine->amrescan = brinrescan;
111  amroutine->amgettuple = NULL;
112  amroutine->amgetbitmap = bringetbitmap;
113  amroutine->amendscan = brinendscan;
114  amroutine->ammarkpos = NULL;
115  amroutine->amrestrpos = NULL;
116  amroutine->amestimateparallelscan = NULL;
117  amroutine->aminitparallelscan = NULL;
118  amroutine->amparallelrescan = NULL;
119 
120  PG_RETURN_POINTER(amroutine);
121 }
122 
123 /*
124  * A tuple in the heap is being inserted. To keep a brin index up to date,
125  * we need to obtain the relevant index tuple and compare its stored values
126  * with those of the new tuple. If the tuple values are not consistent with
127  * the summary tuple, we need to update the index tuple.
128  *
129  * If the range is not currently summarized (i.e. the revmap returns NULL for
130  * it), there's nothing to do.
131  */
132 bool
133 brininsert(Relation idxRel, Datum *values, bool *nulls,
134  ItemPointer heaptid, Relation heapRel,
135  IndexUniqueCheck checkUnique,
136  IndexInfo *indexInfo)
137 {
138  BlockNumber pagesPerRange;
139  BrinDesc *bdesc = (BrinDesc *) indexInfo->ii_AmCache;
140  BrinRevmap *revmap;
142  MemoryContext tupcxt = NULL;
144 
145  revmap = brinRevmapInitialize(idxRel, &pagesPerRange, NULL);
146 
147  for (;;)
148  {
149  bool need_insert = false;
150  OffsetNumber off;
151  BrinTuple *brtup;
152  BrinMemTuple *dtup;
153  BlockNumber heapBlk;
154  int keyno;
155 
157 
158  heapBlk = ItemPointerGetBlockNumber(heaptid);
159  /* normalize the block number to be the first block in the range */
160  heapBlk = (heapBlk / pagesPerRange) * pagesPerRange;
161  brtup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off, NULL,
163 
164  /* if range is unsummarized, there's nothing to do */
165  if (!brtup)
166  break;
167 
168  /* First time through in this statement? */
169  if (bdesc == NULL)
170  {
171  MemoryContextSwitchTo(indexInfo->ii_Context);
172  bdesc = brin_build_desc(idxRel);
173  indexInfo->ii_AmCache = (void *) bdesc;
174  MemoryContextSwitchTo(oldcxt);
175  }
176  /* First time through in this brininsert call? */
177  if (tupcxt == NULL)
178  {
180  "brininsert cxt",
182  MemoryContextSwitchTo(tupcxt);
183  }
184 
185  dtup = brin_deform_tuple(bdesc, brtup);
186 
187  /*
188  * Compare the key values of the new tuple to the stored index values;
189  * our deformed tuple will get updated if the new tuple doesn't fit
190  * the original range (note this means we can't break out of the loop
191  * early). Make a note of whether this happens, so that we know to
192  * insert the modified tuple later.
193  */
194  for (keyno = 0; keyno < bdesc->bd_tupdesc->natts; keyno++)
195  {
196  Datum result;
197  BrinValues *bval;
198  FmgrInfo *addValue;
199 
200  bval = &dtup->bt_columns[keyno];
201  addValue = index_getprocinfo(idxRel, keyno + 1,
203  result = FunctionCall4Coll(addValue,
204  idxRel->rd_indcollation[keyno],
205  PointerGetDatum(bdesc),
206  PointerGetDatum(bval),
207  values[keyno],
208  nulls[keyno]);
209  /* if that returned true, we need to insert the updated tuple */
210  need_insert |= DatumGetBool(result);
211  }
212 
213  if (!need_insert)
214  {
215  /*
216  * The tuple is consistent with the new values, so there's nothing
217  * to do.
218  */
220  }
221  else
222  {
223  Page page = BufferGetPage(buf);
224  ItemId lp = PageGetItemId(page, off);
225  Size origsz;
226  BrinTuple *origtup;
227  Size newsz;
228  BrinTuple *newtup;
229  bool samepage;
230 
231  /*
232  * Make a copy of the old tuple, so that we can compare it after
233  * re-acquiring the lock.
234  */
235  origsz = ItemIdGetLength(lp);
236  origtup = brin_copy_tuple(brtup, origsz);
237 
238  /*
239  * Before releasing the lock, check if we can attempt a same-page
240  * update. Another process could insert a tuple concurrently in
241  * the same page though, so downstream we must be prepared to cope
242  * if this turns out to not be possible after all.
243  */
244  newtup = brin_form_tuple(bdesc, heapBlk, dtup, &newsz);
245  samepage = brin_can_do_samepage_update(buf, origsz, newsz);
247 
248  /*
249  * Try to update the tuple. If this doesn't work for whatever
250  * reason, we need to restart from the top; the revmap might be
251  * pointing at a different tuple for this block now, so we need to
252  * recompute to ensure both our new heap tuple and the other
253  * inserter's are covered by the combined tuple. It might be that
254  * we don't need to update at all.
255  */
256  if (!brin_doupdate(idxRel, pagesPerRange, revmap, heapBlk,
257  buf, off, origtup, origsz, newtup, newsz,
258  samepage))
259  {
260  /* no luck; start over */
262  continue;
263  }
264  }
265 
266  /* success! */
267  break;
268  }
269 
270  brinRevmapTerminate(revmap);
271  if (BufferIsValid(buf))
273  MemoryContextSwitchTo(oldcxt);
274  if (tupcxt != NULL)
275  MemoryContextDelete(tupcxt);
276 
277  return false;
278 }
279 
280 /*
281  * Initialize state for a BRIN index scan.
282  *
283  * We read the metapage here to determine the pages-per-range number that this
284  * index was built with. Note that since this cannot be changed while we're
285  * holding lock on index, it's not necessary to recompute it during brinrescan.
286  */
288 brinbeginscan(Relation r, int nkeys, int norderbys)
289 {
290  IndexScanDesc scan;
291  BrinOpaque *opaque;
292 
293  scan = RelationGetIndexScan(r, nkeys, norderbys);
294 
295  opaque = (BrinOpaque *) palloc(sizeof(BrinOpaque));
296  opaque->bo_rmAccess = brinRevmapInitialize(r, &opaque->bo_pagesPerRange,
297  scan->xs_snapshot);
298  opaque->bo_bdesc = brin_build_desc(r);
299  scan->opaque = opaque;
300 
301  return scan;
302 }
303 
304 /*
305  * Execute the index scan.
306  *
307  * This works by reading index TIDs from the revmap, and obtaining the index
308  * tuples pointed to by them; the summary values in the index tuples are
309  * compared to the scan keys. We return into the TID bitmap all the pages in
310  * ranges corresponding to index tuples that match the scan keys.
311  *
312  * If a TID from the revmap is read as InvalidTID, we know that range is
313  * unsummarized. Pages in those ranges need to be returned regardless of scan
314  * keys.
315  */
316 int64
318 {
319  Relation idxRel = scan->indexRelation;
321  BrinDesc *bdesc;
322  Oid heapOid;
323  Relation heapRel;
324  BrinOpaque *opaque;
325  BlockNumber nblocks;
326  BlockNumber heapBlk;
327  int totalpages = 0;
328  FmgrInfo *consistentFn;
329  MemoryContext oldcxt;
330  MemoryContext perRangeCxt;
331 
332  opaque = (BrinOpaque *) scan->opaque;
333  bdesc = opaque->bo_bdesc;
334  pgstat_count_index_scan(idxRel);
335 
336  /*
337  * We need to know the size of the table so that we know how long to
338  * iterate on the revmap.
339  */
340  heapOid = IndexGetRelation(RelationGetRelid(idxRel), false);
341  heapRel = heap_open(heapOid, AccessShareLock);
342  nblocks = RelationGetNumberOfBlocks(heapRel);
343  heap_close(heapRel, AccessShareLock);
344 
345  /*
346  * Make room for the consistent support procedures of indexed columns. We
347  * don't look them up here; we do that lazily the first time we see a scan
348  * key reference each of them. We rely on zeroing fn_oid to InvalidOid.
349  */
350  consistentFn = palloc0(sizeof(FmgrInfo) * bdesc->bd_tupdesc->natts);
351 
352  /*
353  * Setup and use a per-range memory context, which is reset every time we
354  * loop below. This avoids having to free the tuples within the loop.
355  */
357  "bringetbitmap cxt",
359  oldcxt = MemoryContextSwitchTo(perRangeCxt);
360 
361  /*
362  * Now scan the revmap. We start by querying for heap page 0,
363  * incrementing by the number of pages per range; this gives us a full
364  * view of the table.
365  */
366  for (heapBlk = 0; heapBlk < nblocks; heapBlk += opaque->bo_pagesPerRange)
367  {
368  bool addrange;
369  BrinTuple *tup;
370  OffsetNumber off;
371  Size size;
372 
374 
376 
377  tup = brinGetTupleForHeapBlock(opaque->bo_rmAccess, heapBlk, &buf,
378  &off, &size, BUFFER_LOCK_SHARE,
379  scan->xs_snapshot);
380  if (tup)
381  {
382  tup = brin_copy_tuple(tup, size);
384  }
385 
386  /*
387  * For page ranges with no indexed tuple, we must return the whole
388  * range; otherwise, compare it to the scan keys.
389  */
390  if (tup == NULL)
391  {
392  addrange = true;
393  }
394  else
395  {
396  BrinMemTuple *dtup;
397 
398  dtup = brin_deform_tuple(bdesc, tup);
399  if (dtup->bt_placeholder)
400  {
401  /*
402  * Placeholder tuples are always returned, regardless of the
403  * values stored in them.
404  */
405  addrange = true;
406  }
407  else
408  {
409  int keyno;
410 
411  /*
412  * Compare scan keys with summary values stored for the range.
413  * If scan keys are matched, the page range must be added to
414  * the bitmap. We initially assume the range needs to be
415  * added; in particular this serves the case where there are
416  * no keys.
417  */
418  addrange = true;
419  for (keyno = 0; keyno < scan->numberOfKeys; keyno++)
420  {
421  ScanKey key = &scan->keyData[keyno];
422  AttrNumber keyattno = key->sk_attno;
423  BrinValues *bval = &dtup->bt_columns[keyattno - 1];
424  Datum add;
425 
426  /*
427  * The collation of the scan key must match the collation
428  * used in the index column (but only if the search is not
429  * IS NULL/ IS NOT NULL). Otherwise we shouldn't be using
430  * this index ...
431  */
432  Assert((key->sk_flags & SK_ISNULL) ||
433  (key->sk_collation ==
434  bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
435 
436  /* First time this column? look up consistent function */
437  if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
438  {
439  FmgrInfo *tmp;
440 
441  tmp = index_getprocinfo(idxRel, keyattno,
443  fmgr_info_copy(&consistentFn[keyattno - 1], tmp,
445  }
446 
447  /*
448  * Check whether the scan key is consistent with the page
449  * range values; if so, have the pages in the range added
450  * to the output bitmap.
451  *
452  * When there are multiple scan keys, failure to meet the
453  * criteria for a single one of them is enough to discard
454  * the range as a whole, so break out of the loop as soon
455  * as a false return value is obtained.
456  */
457  add = FunctionCall3Coll(&consistentFn[keyattno - 1],
458  key->sk_collation,
459  PointerGetDatum(bdesc),
460  PointerGetDatum(bval),
461  PointerGetDatum(key));
462  addrange = DatumGetBool(add);
463  if (!addrange)
464  break;
465  }
466  }
467  }
468 
469  /* add the pages in the range to the output bitmap, if needed */
470  if (addrange)
471  {
472  BlockNumber pageno;
473 
474  for (pageno = heapBlk;
475  pageno <= heapBlk + opaque->bo_pagesPerRange - 1;
476  pageno++)
477  {
478  MemoryContextSwitchTo(oldcxt);
479  tbm_add_page(tbm, pageno);
480  totalpages++;
481  MemoryContextSwitchTo(perRangeCxt);
482  }
483  }
484  }
485 
486  MemoryContextSwitchTo(oldcxt);
487  MemoryContextDelete(perRangeCxt);
488 
489  if (buf != InvalidBuffer)
490  ReleaseBuffer(buf);
491 
492  /*
493  * XXX We have an approximation of the number of *pages* that our scan
494  * returns, but we don't have a precise idea of the number of heap tuples
495  * involved.
496  */
497  return totalpages * 10;
498 }
499 
500 /*
501  * Re-initialize state for a BRIN index scan
502  */
503 void
504 brinrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
505  ScanKey orderbys, int norderbys)
506 {
507  /*
508  * Other index AMs preprocess the scan keys at this point, or sometime
509  * early during the scan; this lets them optimize by removing redundant
510  * keys, or doing early returns when they are impossible to satisfy; see
511  * _bt_preprocess_keys for an example. Something like that could be added
512  * here someday, too.
513  */
514 
515  if (scankey && scan->numberOfKeys > 0)
516  memmove(scan->keyData, scankey,
517  scan->numberOfKeys * sizeof(ScanKeyData));
518 }
519 
520 /*
521  * Close down a BRIN index scan
522  */
523 void
525 {
526  BrinOpaque *opaque = (BrinOpaque *) scan->opaque;
527 
529  brin_free_desc(opaque->bo_bdesc);
530  pfree(opaque);
531 }
532 
533 /*
534  * Per-heap-tuple callback for IndexBuildHeapScan.
535  *
536  * Note we don't worry about the page range at the end of the table here; it is
537  * present in the build state struct after we're called the last time, but not
538  * inserted into the index. Caller must ensure to do so, if appropriate.
539  */
540 static void
542  HeapTuple htup,
543  Datum *values,
544  bool *isnull,
545  bool tupleIsAlive,
546  void *brstate)
547 {
548  BrinBuildState *state = (BrinBuildState *) brstate;
549  BlockNumber thisblock;
550  int i;
551 
552  thisblock = ItemPointerGetBlockNumber(&htup->t_self);
553 
554  /*
555  * If we're in a block that belongs to a future range, summarize what
556  * we've got and start afresh. Note the scan might have skipped many
557  * pages, if they were devoid of live tuples; make sure to insert index
558  * tuples for those too.
559  */
560  while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
561  {
562 
563  BRIN_elog((DEBUG2,
564  "brinbuildCallback: completed a range: %u--%u",
565  state->bs_currRangeStart,
566  state->bs_currRangeStart + state->bs_pagesPerRange));
567 
568  /* create the index tuple and insert it */
569  form_and_insert_tuple(state);
570 
571  /* set state to correspond to the next range */
572  state->bs_currRangeStart += state->bs_pagesPerRange;
573 
574  /* re-initialize state for it */
576  }
577 
578  /* Accumulate the current tuple into the running state */
579  for (i = 0; i < state->bs_bdesc->bd_tupdesc->natts; i++)
580  {
581  FmgrInfo *addValue;
582  BrinValues *col;
583 
584  col = &state->bs_dtuple->bt_columns[i];
585  addValue = index_getprocinfo(index, i + 1,
587 
588  /*
589  * Update dtuple state, if and as necessary.
590  */
591  FunctionCall4Coll(addValue,
592  state->bs_bdesc->bd_tupdesc->attrs[i]->attcollation,
593  PointerGetDatum(state->bs_bdesc),
594  PointerGetDatum(col),
595  values[i], isnull[i]);
596  }
597 }
598 
599 /*
600  * brinbuild() -- build a new BRIN index.
601  */
604 {
606  double reltuples;
607  double idxtuples;
608  BrinRevmap *revmap;
610  Buffer meta;
611  BlockNumber pagesPerRange;
612 
613  /*
614  * We expect to be called exactly once for any index relation.
615  */
616  if (RelationGetNumberOfBlocks(index) != 0)
617  elog(ERROR, "index \"%s\" already contains data",
618  RelationGetRelationName(index));
619 
620  /*
621  * Critical section not required, because on error the creation of the
622  * whole relation will be rolled back.
623  */
624 
625  meta = ReadBuffer(index, P_NEW);
628 
631  MarkBufferDirty(meta);
632 
633  if (RelationNeedsWAL(index))
634  {
635  xl_brin_createidx xlrec;
636  XLogRecPtr recptr;
637  Page page;
638 
640  xlrec.pagesPerRange = BrinGetPagesPerRange(index);
641 
642  XLogBeginInsert();
643  XLogRegisterData((char *) &xlrec, SizeOfBrinCreateIdx);
645 
646  recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_CREATE_INDEX);
647 
648  page = BufferGetPage(meta);
649  PageSetLSN(page, recptr);
650  }
651 
652  UnlockReleaseBuffer(meta);
653 
654  /*
655  * Initialize our state, including the deformed tuple state.
656  */
657  revmap = brinRevmapInitialize(index, &pagesPerRange, NULL);
658  state = initialize_brin_buildstate(index, revmap, pagesPerRange);
659 
660  /*
661  * Now scan the relation. No syncscan allowed here because we want the
662  * heap blocks in physical order.
663  */
664  reltuples = IndexBuildHeapScan(heap, index, indexInfo, false,
665  brinbuildCallback, (void *) state);
666 
667  /* process the final batch */
668  form_and_insert_tuple(state);
669 
670  /* release resources */
671  idxtuples = state->bs_numtuples;
674 
675  /*
676  * Return statistics
677  */
678  result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
679 
680  result->heap_tuples = reltuples;
681  result->index_tuples = idxtuples;
682 
683  return result;
684 }
685 
686 void
688 {
689  Buffer metabuf;
690 
691  /* An empty BRIN index has a metapage only. */
692  metabuf =
695 
696  /* Initialize and xlog metabuffer. */
700  MarkBufferDirty(metabuf);
701  log_newpage_buffer(metabuf, false);
703 
704  UnlockReleaseBuffer(metabuf);
705 }
706 
707 /*
708  * brinbulkdelete
709  * Since there are no per-heap-tuple index tuples in BRIN indexes,
710  * there's not a lot we can do here.
711  *
712  * XXX we could mark item tuples as "dirty" (when a minimum or maximum heap
713  * tuple is deleted), meaning the need to re-run summarization on the affected
714  * range. Would need to add an extra flag in brintuples for that.
715  */
718  IndexBulkDeleteCallback callback, void *callback_state)
719 {
720  /* allocate stats if first time through, else re-use existing struct */
721  if (stats == NULL)
723 
724  return stats;
725 }
726 
727 /*
728  * This routine is in charge of "vacuuming" a BRIN index: we just summarize
729  * ranges that are currently unsummarized.
730  */
733 {
734  Relation heapRel;
735 
736  /* No-op in ANALYZE ONLY mode */
737  if (info->analyze_only)
738  return stats;
739 
740  if (!stats)
742  stats->num_pages = RelationGetNumberOfBlocks(info->index);
743  /* rest of stats is initialized by zeroing */
744 
745  heapRel = heap_open(IndexGetRelation(RelationGetRelid(info->index), false),
747 
748  brin_vacuum_scan(info->index, info->strategy);
749 
750  brinsummarize(info->index, heapRel,
751  &stats->num_index_tuples, &stats->num_index_tuples);
752 
753  heap_close(heapRel, AccessShareLock);
754 
755  return stats;
756 }
757 
758 /*
759  * reloptions processor for BRIN indexes
760  */
761 bytea *
762 brinoptions(Datum reloptions, bool validate)
763 {
765  BrinOptions *rdopts;
766  int numoptions;
767  static const relopt_parse_elt tab[] = {
768  {"pages_per_range", RELOPT_TYPE_INT, offsetof(BrinOptions, pagesPerRange)}
769  };
770 
771  options = parseRelOptions(reloptions, validate, RELOPT_KIND_BRIN,
772  &numoptions);
773 
774  /* if none set, we're done */
775  if (numoptions == 0)
776  return NULL;
777 
778  rdopts = allocateReloptStruct(sizeof(BrinOptions), options, numoptions);
779 
780  fillRelOptions((void *) rdopts, sizeof(BrinOptions), options, numoptions,
781  validate, tab, lengthof(tab));
782 
783  pfree(options);
784 
785  return (bytea *) rdopts;
786 }
787 
788 /*
789  * SQL-callable function to scan through an index and summarize all ranges
790  * that are not currently summarized.
791  */
792 Datum
794 {
795  Oid indexoid = PG_GETARG_OID(0);
796  Oid heapoid;
797  Relation indexRel;
798  Relation heapRel;
799  double numSummarized = 0;
800 
801  /*
802  * We must lock table before index to avoid deadlocks. However, if the
803  * passed indexoid isn't an index then IndexGetRelation() will fail.
804  * Rather than emitting a not-very-helpful error message, postpone
805  * complaining, expecting that the is-it-an-index test below will fail.
806  */
807  heapoid = IndexGetRelation(indexoid, true);
808  if (OidIsValid(heapoid))
809  heapRel = heap_open(heapoid, ShareUpdateExclusiveLock);
810  else
811  heapRel = NULL;
812 
813  indexRel = index_open(indexoid, ShareUpdateExclusiveLock);
814 
815  /* Must be a BRIN index */
816  if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
817  indexRel->rd_rel->relam != BRIN_AM_OID)
818  ereport(ERROR,
819  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
820  errmsg("\"%s\" is not a BRIN index",
821  RelationGetRelationName(indexRel))));
822 
823  /* User must own the index (comparable to privileges needed for VACUUM) */
824  if (!pg_class_ownercheck(indexoid, GetUserId()))
826  RelationGetRelationName(indexRel));
827 
828  /*
829  * Since we did the IndexGetRelation call above without any lock, it's
830  * barely possible that a race against an index drop/recreation could have
831  * netted us the wrong table. Recheck.
832  */
833  if (heapRel == NULL || heapoid != IndexGetRelation(indexoid, false))
834  ereport(ERROR,
836  errmsg("could not open parent table of index %s",
837  RelationGetRelationName(indexRel))));
838 
839  /* OK, do it */
840  brinsummarize(indexRel, heapRel, &numSummarized, NULL);
841 
844 
845  PG_RETURN_INT32((int32) numSummarized);
846 }
847 
848 /*
849  * Build a BrinDesc used to create or scan a BRIN index
850  */
851 BrinDesc *
853 {
854  BrinOpcInfo **opcinfo;
855  BrinDesc *bdesc;
856  TupleDesc tupdesc;
857  int totalstored = 0;
858  int keyno;
859  long totalsize;
860  MemoryContext cxt;
861  MemoryContext oldcxt;
862 
864  "brin desc cxt",
866  oldcxt = MemoryContextSwitchTo(cxt);
867  tupdesc = RelationGetDescr(rel);
868 
869  /*
870  * Obtain BrinOpcInfo for each indexed column. While at it, accumulate
871  * the number of columns stored, since the number is opclass-defined.
872  */
873  opcinfo = (BrinOpcInfo **) palloc(sizeof(BrinOpcInfo *) * tupdesc->natts);
874  for (keyno = 0; keyno < tupdesc->natts; keyno++)
875  {
876  FmgrInfo *opcInfoFn;
877 
878  opcInfoFn = index_getprocinfo(rel, keyno + 1, BRIN_PROCNUM_OPCINFO);
879 
880  opcinfo[keyno] = (BrinOpcInfo *)
881  DatumGetPointer(FunctionCall1(opcInfoFn,
882  tupdesc->attrs[keyno]->atttypid));
883  totalstored += opcinfo[keyno]->oi_nstored;
884  }
885 
886  /* Allocate our result struct and fill it in */
887  totalsize = offsetof(BrinDesc, bd_info) +
888  sizeof(BrinOpcInfo *) * tupdesc->natts;
889 
890  bdesc = palloc(totalsize);
891  bdesc->bd_context = cxt;
892  bdesc->bd_index = rel;
893  bdesc->bd_tupdesc = tupdesc;
894  bdesc->bd_disktdesc = NULL; /* generated lazily */
895  bdesc->bd_totalstored = totalstored;
896 
897  for (keyno = 0; keyno < tupdesc->natts; keyno++)
898  bdesc->bd_info[keyno] = opcinfo[keyno];
899  pfree(opcinfo);
900 
901  MemoryContextSwitchTo(oldcxt);
902 
903  return bdesc;
904 }
905 
906 void
908 {
909  /* make sure the tupdesc is still valid */
910  Assert(bdesc->bd_tupdesc->tdrefcount >= 1);
911  /* no need for retail pfree */
913 }
914 
915 /*
916  * Initialize a BrinBuildState appropriate to create tuples on the given index.
917  */
918 static BrinBuildState *
920  BlockNumber pagesPerRange)
921 {
923 
924  state = palloc(sizeof(BrinBuildState));
925 
926  state->bs_irel = idxRel;
927  state->bs_numtuples = 0;
929  state->bs_pagesPerRange = pagesPerRange;
930  state->bs_currRangeStart = 0;
931  state->bs_rmAccess = revmap;
932  state->bs_bdesc = brin_build_desc(idxRel);
933  state->bs_dtuple = brin_new_memtuple(state->bs_bdesc);
934 
936 
937  return state;
938 }
939 
940 /*
941  * Release resources associated with a BrinBuildState.
942  */
943 static void
945 {
946  /* release the last index buffer used */
948  {
949  Page page;
950 
951  page = BufferGetPage(state->bs_currentInsertBuf);
954  PageGetFreeSpace(page));
956  }
957 
958  brin_free_desc(state->bs_bdesc);
959  pfree(state->bs_dtuple);
960  pfree(state);
961 }
962 
963 /*
964  * Summarize the given page range of the given index.
965  *
966  * This routine can run in parallel with insertions into the heap. To avoid
967  * missing those values from the summary tuple, we first insert a placeholder
968  * index tuple into the index, then execute the heap scan; transactions
969  * concurrent with the scan update the placeholder tuple. After the scan, we
970  * union the placeholder tuple with the one computed by this routine. The
971  * update of the index value happens in a loop, so that if somebody updates
972  * the placeholder tuple after we read it, we detect the case and try again.
973  * This ensures that the concurrently inserted tuples are not lost.
974  */
975 static void
977  BlockNumber heapBlk, BlockNumber heapNumBlks)
978 {
979  Buffer phbuf;
980  BrinTuple *phtup;
981  Size phsz;
982  OffsetNumber offset;
983  BlockNumber scanNumBlks;
984 
985  /*
986  * Insert the placeholder tuple
987  */
988  phbuf = InvalidBuffer;
989  phtup = brin_form_placeholder_tuple(state->bs_bdesc, heapBlk, &phsz);
990  offset = brin_doinsert(state->bs_irel, state->bs_pagesPerRange,
991  state->bs_rmAccess, &phbuf,
992  heapBlk, phtup, phsz);
993 
994  /*
995  * Execute the partial heap scan covering the heap blocks in the specified
996  * page range, summarizing the heap tuples in it. This scan stops just
997  * short of brinbuildCallback creating the new index entry.
998  *
999  * Note that it is critical we use the "any visible" mode of
1000  * IndexBuildHeapRangeScan here: otherwise, we would miss tuples inserted
1001  * by transactions that are still in progress, among other corner cases.
1002  */
1003  state->bs_currRangeStart = heapBlk;
1004  scanNumBlks = heapBlk + state->bs_pagesPerRange <= heapNumBlks ?
1005  state->bs_pagesPerRange : heapNumBlks - heapBlk;
1006  IndexBuildHeapRangeScan(heapRel, state->bs_irel, indexInfo, false, true,
1007  heapBlk, scanNumBlks,
1008  brinbuildCallback, (void *) state);
1009 
1010  /*
1011  * Now we update the values obtained by the scan with the placeholder
1012  * tuple. We do this in a loop which only terminates if we're able to
1013  * update the placeholder tuple successfully; if we are not, this means
1014  * somebody else modified the placeholder tuple after we read it.
1015  */
1016  for (;;)
1017  {
1018  BrinTuple *newtup;
1019  Size newsize;
1020  bool didupdate;
1021  bool samepage;
1022 
1024 
1025  /*
1026  * Update the summary tuple and try to update.
1027  */
1028  newtup = brin_form_tuple(state->bs_bdesc,
1029  heapBlk, state->bs_dtuple, &newsize);
1030  samepage = brin_can_do_samepage_update(phbuf, phsz, newsize);
1031  didupdate =
1032  brin_doupdate(state->bs_irel, state->bs_pagesPerRange,
1033  state->bs_rmAccess, heapBlk, phbuf, offset,
1034  phtup, phsz, newtup, newsize, samepage);
1035  brin_free_tuple(phtup);
1036  brin_free_tuple(newtup);
1037 
1038  /* If the update succeeded, we're done. */
1039  if (didupdate)
1040  break;
1041 
1042  /*
1043  * If the update didn't work, it might be because somebody updated the
1044  * placeholder tuple concurrently. Extract the new version, union it
1045  * with the values we have from the scan, and start over. (There are
1046  * other reasons for the update to fail, but it's simple to treat them
1047  * the same.)
1048  */
1049  phtup = brinGetTupleForHeapBlock(state->bs_rmAccess, heapBlk, &phbuf,
1050  &offset, &phsz, BUFFER_LOCK_SHARE,
1051  NULL);
1052  /* the placeholder tuple must exist */
1053  if (phtup == NULL)
1054  elog(ERROR, "missing placeholder tuple");
1055  phtup = brin_copy_tuple(phtup, phsz);
1057 
1058  /* merge it into the tuple from the heap scan */
1059  union_tuples(state->bs_bdesc, state->bs_dtuple, phtup);
1060  }
1061 
1062  ReleaseBuffer(phbuf);
1063 }
1064 
1065 /*
1066  * Scan a complete BRIN index, and summarize each page range that's not already
1067  * summarized. The index and heap must have been locked by caller in at
1068  * least ShareUpdateExclusiveLock mode.
1069  *
1070  * For each new index tuple inserted, *numSummarized (if not NULL) is
1071  * incremented; for each existing tuple, *numExisting (if not NULL) is
1072  * incremented.
1073  */
1074 static void
1075 brinsummarize(Relation index, Relation heapRel, double *numSummarized,
1076  double *numExisting)
1077 {
1078  BrinRevmap *revmap;
1080  IndexInfo *indexInfo = NULL;
1081  BlockNumber heapNumBlocks;
1082  BlockNumber heapBlk;
1083  BlockNumber pagesPerRange;
1084  Buffer buf;
1085 
1086  revmap = brinRevmapInitialize(index, &pagesPerRange, NULL);
1087 
1088  /*
1089  * Scan the revmap to find unsummarized items.
1090  */
1091  buf = InvalidBuffer;
1092  heapNumBlocks = RelationGetNumberOfBlocks(heapRel);
1093  for (heapBlk = 0; heapBlk < heapNumBlocks; heapBlk += pagesPerRange)
1094  {
1095  BrinTuple *tup;
1096  OffsetNumber off;
1097 
1099 
1100  tup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off, NULL,
1102  if (tup == NULL)
1103  {
1104  /* no revmap entry for this heap range. Summarize it. */
1105  if (state == NULL)
1106  {
1107  /* first time through */
1108  Assert(!indexInfo);
1109  state = initialize_brin_buildstate(index, revmap,
1110  pagesPerRange);
1111  indexInfo = BuildIndexInfo(index);
1112  }
1113  summarize_range(indexInfo, state, heapRel, heapBlk, heapNumBlocks);
1114 
1115  /* and re-initialize state for the next range */
1117 
1118  if (numSummarized)
1119  *numSummarized += 1.0;
1120  }
1121  else
1122  {
1123  if (numExisting)
1124  *numExisting += 1.0;
1126  }
1127  }
1128 
1129  if (BufferIsValid(buf))
1130  ReleaseBuffer(buf);
1131 
1132  /* free resources */
1133  brinRevmapTerminate(revmap);
1134  if (state)
1135  {
1137  pfree(indexInfo);
1138  }
1139 }
1140 
1141 /*
1142  * Given a deformed tuple in the build state, convert it into the on-disk
1143  * format and insert it into the index, making the revmap point to it.
1144  */
1145 static void
1147 {
1148  BrinTuple *tup;
1149  Size size;
1150 
1151  tup = brin_form_tuple(state->bs_bdesc, state->bs_currRangeStart,
1152  state->bs_dtuple, &size);
1153  brin_doinsert(state->bs_irel, state->bs_pagesPerRange, state->bs_rmAccess,
1154  &state->bs_currentInsertBuf, state->bs_currRangeStart,
1155  tup, size);
1156  state->bs_numtuples++;
1157 
1158  pfree(tup);
1159 }
1160 
1161 /*
1162  * Given two deformed tuples, adjust the first one so that it's consistent
1163  * with the summary values in both.
1164  */
1165 static void
1167 {
1168  int keyno;
1169  BrinMemTuple *db;
1170  MemoryContext cxt;
1171  MemoryContext oldcxt;
1172 
1173  /* Use our own memory context to avoid retail pfree */
1175  "brin union",
1177  oldcxt = MemoryContextSwitchTo(cxt);
1178  db = brin_deform_tuple(bdesc, b);
1179  MemoryContextSwitchTo(oldcxt);
1180 
1181  for (keyno = 0; keyno < bdesc->bd_tupdesc->natts; keyno++)
1182  {
1183  FmgrInfo *unionFn;
1184  BrinValues *col_a = &a->bt_columns[keyno];
1185  BrinValues *col_b = &db->bt_columns[keyno];
1186 
1187  unionFn = index_getprocinfo(bdesc->bd_index, keyno + 1,
1189  FunctionCall3Coll(unionFn,
1190  bdesc->bd_index->rd_indcollation[keyno],
1191  PointerGetDatum(bdesc),
1192  PointerGetDatum(col_a),
1193  PointerGetDatum(col_b));
1194  }
1195 
1196  MemoryContextDelete(cxt);
1197 }
1198 
1199 /*
1200  * brin_vacuum_scan
1201  * Do a complete scan of the index during VACUUM.
1202  *
1203  * This routine scans the complete index looking for uncatalogued index pages,
1204  * i.e. those that might have been lost due to a crash after index extension
1205  * and such.
1206  */
1207 static void
1209 {
1210  bool vacuum_fsm = false;
1211  BlockNumber blkno;
1212 
1213  /*
1214  * Scan the index in physical order, and clean up any possible mess in
1215  * each page.
1216  */
1217  for (blkno = 0; blkno < RelationGetNumberOfBlocks(idxrel); blkno++)
1218  {
1219  Buffer buf;
1220 
1222 
1223  buf = ReadBufferExtended(idxrel, MAIN_FORKNUM, blkno,
1224  RBM_NORMAL, strategy);
1225 
1226  vacuum_fsm |= brin_page_cleanup(idxrel, buf);
1227 
1228  ReleaseBuffer(buf);
1229  }
1230 
1231  /*
1232  * If we made any change to the FSM, make sure the new info is visible all
1233  * the way to the top.
1234  */
1235  if (vacuum_fsm)
1236  FreeSpaceMapVacuum(idxrel);
1237 }
void brin_free_desc(BrinDesc *bdesc)
Definition: brin.c:907
IndexBulkDeleteResult * brinbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: brin.c:717
ambeginscan_function ambeginscan
Definition: amapi.h:208
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:313
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
MemoryContext ii_Context
Definition: execnodes.h:84
Definition: fmgr.h:53
Oid IndexGetRelation(Oid indexId, bool missing_ok)
Definition: index.c:3281
ambulkdelete_function ambulkdelete
Definition: amapi.h:201
#define BRIN_CURRENT_VERSION
Definition: brin_page.h:72
bool amcanmulticol
Definition: amapi.h:179
uint16 amsupport
Definition: amapi.h:169
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
#define BRIN_elog(args)
Definition: brin_internal.h:81
#define BRIN_METAPAGE_BLKNO
Definition: brin_page.h:75
IndexBuildResult * brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: brin.c:603
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1010
bool brininsert(Relation idxRel, Datum *values, bool *nulls, ItemPointer heaptid, Relation heapRel, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
Definition: brin.c:133
#define ERRCODE_UNDEFINED_TABLE
Definition: pgbench.c:61
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:855
#define SizeOfBrinCreateIdx
Definition: brin_xlog.h:55
amgettuple_function amgettuple
Definition: amapi.h:210
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:181
#define BRIN_AM_OID
Definition: pg_am.h:85
#define RelationGetDescr(relation)
Definition: rel.h:429
void brinRevmapTerminate(BrinRevmap *revmap)
Definition: brin_revmap.c:103
Oid GetUserId(void)
Definition: miscinit.c:283
bool amcanorderbyop
Definition: amapi.h:173
static void union_tuples(BrinDesc *bdesc, BrinMemTuple *a, BrinTuple *b)
Definition: brin.c:1166
amproperty_function amproperty
Definition: amapi.h:206
BrinTuple * brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk, Buffer *buf, OffsetNumber *off, Size *size, int mode, Snapshot snapshot)
Definition: brin_revmap.c:191
bool brinvalidate(Oid opclassoid)
Definition: brin_validate.c:38
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define PointerGetDatum(X)
Definition: postgres.h:562
static void addrange(struct cvec *cv, chr from, chr to)
Definition: regc_cvec.c:90
Buffer bs_currentInsertBuf
Definition: brin.c:45
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:175
static void summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, BlockNumber heapBlk, BlockNumber heapNumBlks)
Definition: brin.c:976
bool analyze_only
Definition: genam.h:47
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
amparallelrescan_function amparallelrescan
Definition: amapi.h:219
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
BufferAccessStrategy strategy
Definition: genam.h:51
bool amstorage
Definition: amapi.h:187
Form_pg_attribute * attrs
Definition: tupdesc.h:74
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define XLOG_BRIN_CREATE_INDEX
Definition: brin_xlog.h:31
#define PG_RETURN_INT32(x)
Definition: fmgr.h:306
#define AccessShareLock
Definition: lockdefs.h:36
Snapshot xs_snapshot
Definition: relscan.h:90
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
BrinMemTuple * brin_deform_tuple(BrinDesc *brdesc, BrinTuple *tuple)
Definition: brin_tuple.c:398
#define BRIN_PROCNUM_OPCINFO
Definition: brin_internal.h:67
int64 bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: brin.c:317
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
int errcode(int sqlerrcode)
Definition: elog.c:575
Relation index
Definition: genam.h:46
static void brinbuildCallback(Relation index, HeapTuple htup, Datum *values, bool *isnull, bool tupleIsAlive, void *brstate)
Definition: brin.c:541
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: heapam.c:1263
bool ampredlocks
Definition: amapi.h:191
return result
Definition: formatting.c:1618
Datum FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4)
Definition: fmgr.c:1403
uint32 BlockNumber
Definition: block.h:31
static void form_and_insert_tuple(BrinBuildState *state)
Definition: brin.c:1146
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define P_NEW
Definition: bufmgr.h:82
BrinTuple * brin_copy_tuple(BrinTuple *tuple, Size len)
Definition: brin_tuple.c:317
#define heap_close(r, l)
Definition: heapam.h:97
aminsert_function aminsert
Definition: amapi.h:200
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:1639
#define lengthof(array)
Definition: c.h:562
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:114
unsigned int Oid
Definition: postgres_ext.h:31
bool brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, BlockNumber heapBlk, Buffer oldbuf, OffsetNumber oldoff, const BrinTuple *origtup, Size origsz, const BrinTuple *newtup, Size newsz, bool samepage)
Definition: brin_pageops.c:55
BrinMemTuple * brin_new_memtuple(BrinDesc *brdesc)
Definition: brin_tuple.c:348
Oid amkeytype
Definition: amapi.h:195
#define BrinGetPagesPerRange(relation)
Definition: brin.h:28
#define OidIsValid(objectId)
Definition: c.h:538
Relation bs_irel
Definition: brin.c:43
bool amoptionalkey
Definition: amapi.h:181
BlockNumber bs_currRangeStart
Definition: brin.c:47
void brinendscan(IndexScanDesc scan)
Definition: brin.c:524
amvalidate_function amvalidate
Definition: amapi.h:207
int natts
Definition: tupdesc.h:73
BlockNumber bo_pagesPerRange
Definition: brin.c:58
Size PageGetFreeSpace(Page page)
Definition: bufpage.c:582
signed int int32
Definition: c.h:256
Relation indexRelation
Definition: relscan.h:89
bytea * brinoptions(Datum reloptions, bool validate)
Definition: brin.c:762
uint16 OffsetNumber
Definition: off.h:24
Definition: type.h:90
OffsetNumber brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer *buffer, BlockNumber heapBlk, BrinTuple *tup, Size itemsz)
Definition: brin_pageops.c:342
IndexUniqueCheck
Definition: genam.h:111
struct BrinOpaque BrinOpaque
#define BRIN_PROCNUM_ADDVALUE
Definition: brin_internal.h:68
BlockNumber bs_pagesPerRange
Definition: brin.c:46
void pfree(void *pointer)
Definition: mcxt.c:950
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
amgetbitmap_function amgetbitmap
Definition: amapi.h:211
void brin_metapage_init(Page page, BlockNumber pagesPerRange, uint16 version)
Definition: brin_pageops.c:480
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
bool bt_placeholder
Definition: brin_tuple.h:38
Oid * rd_indcollation
Definition: rel.h:193
#define ERROR
Definition: elog.h:43
Relation bd_index
Definition: brin_internal.h:47
ambuild_function ambuild
Definition: amapi.h:198
amoptions_function amoptions
Definition: amapi.h:205
static void brinsummarize(Relation index, Relation heapRel, double *numSummarized, double *numExisting)
Definition: brin.c:1075
BlockNumber num_pages
Definition: genam.h:73
ItemPointerData t_self
Definition: htup.h:65
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo, MemoryContext destcxt)
Definition: fmgr.c:583
#define DEBUG2
Definition: elog.h:24
amcostestimate_function amcostestimate
Definition: amapi.h:204
uint16 oi_nstored
Definition: brin_internal.h:28
bool amcanunique
Definition: amapi.h:177
void * allocateReloptStruct(Size base, relopt_value *options, int numoptions)
Definition: reloptions.c:1208
int bd_totalstored
Definition: brin_internal.h:56
struct BrinBuildState BrinBuildState
BrinValues bt_columns[FLEXIBLE_ARRAY_MEMBER]
Definition: brin_tuple.h:41
#define BufferIsInvalid(buffer)
Definition: buf.h:31
static char * buf
Definition: pg_test_fsync.c:65
amvacuumcleanup_function amvacuumcleanup
Definition: amapi.h:202
BrinRevmap * bo_rmAccess
Definition: brin.c:59
amendscan_function amendscan
Definition: amapi.h:212
#define memmove(d, s, c)
Definition: c.h:1058
#define PG_GETARG_OID(n)
Definition: fmgr.h:232
bool amcanbackward
Definition: amapi.h:175
void aclcheck_error(AclResult aclerr, AclObjectKind objectkind, const char *objectname)
Definition: aclchk.c:3382
static void brin_vacuum_scan(Relation idxrel, BufferAccessStrategy strategy)
Definition: brin.c:1208
BrinRevmap * bs_rmAccess
Definition: brin.c:48
#define DatumGetBool(X)
Definition: postgres.h:399
#define RelationGetRelationName(relation)
Definition: rel.h:437
#define pgstat_count_index_scan(rel)
Definition: pgstat.h:1236
TupleDesc bd_tupdesc
Definition: brin_internal.h:50
void brinrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: brin.c:504
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
static uint64 totalsize
#define SK_ISNULL
Definition: skey.h:115
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
bool brin_can_do_samepage_update(Buffer buffer, Size origsz, Size newsz)
Definition: brin_pageops.c:323
amrescan_function amrescan
Definition: amapi.h:209
bool amcanparallel
Definition: amapi.h:193
IndexScanDesc brinbeginscan(Relation r, int nkeys, int norderbys)
Definition: brin.c:288
void * ii_AmCache
Definition: execnodes.h:83
void FreeSpaceMapVacuum(Relation rel)
Definition: freespace.c:379
static char ** options
void fillRelOptions(void *rdopts, Size basesize, relopt_value *options, int numoptions, bool validate, const relopt_parse_elt *elems, int numelems)
Definition: reloptions.c:1232
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
#define MemoryContextResetAndDeleteChildren(ctx)
Definition: memutils.h:67
bool amsearchnulls
Definition: amapi.h:185
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
BrinDesc * bo_bdesc
Definition: brin.c:60
void * palloc0(Size size)
Definition: mcxt.c:878
void brin_memtuple_initialize(BrinMemTuple *dtuple, BrinDesc *brdesc)
Definition: brin_tuple.c:378
uintptr_t Datum
Definition: postgres.h:372
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1287
bool amclusterable
Definition: amapi.h:189
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
BrinOpcInfo * bd_info[FLEXIBLE_ARRAY_MEMBER]
Definition: brin_internal.h:59
bool amsearcharray
Definition: amapi.h:183
#define InvalidOid
Definition: postgres_ext.h:36
Datum FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3)
Definition: fmgr.c:1378
void brin_free_tuple(BrinTuple *tuple)
Definition: brin_tuple.c:308
#define makeNode(_type_)
Definition: nodes.h:570
BrinDesc * brin_build_desc(Relation rel)
Definition: brin.c:852
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
int sk_flags
Definition: skey.h:66
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
Definition: regguts.h:298
#define BRIN_PROCNUM_CONSISTENT
Definition: brin_internal.h:69
bool pg_class_ownercheck(Oid class_oid, Oid roleid)
Definition: aclchk.c:4529
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
size_t Size
Definition: c.h:356
void brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: selfuncs.c:7706
BrinRevmap * brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange, Snapshot snapshot)
Definition: brin_revmap.c:71
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ammarkpos_function ammarkpos
Definition: amapi.h:213
bool amcanorder
Definition: amapi.h:171
ScanKey keyData
Definition: relscan.h:93
#define RelationNeedsWAL(relation)
Definition: rel.h:506
amestimateparallelscan_function amestimateparallelscan
Definition: amapi.h:217
#define DatumGetPointer(X)
Definition: postgres.h:555
int bs_numtuples
Definition: brin.c:44
MemoryContext bd_context
Definition: brin_internal.h:44
#define BRIN_LAST_OPTIONAL_PROCNUM
Definition: brin_internal.h:74
static Datum values[MAXATTR]
Definition: bootstrap.c:162
uint16 amstrategies
Definition: amapi.h:167
#define BRIN_PROCNUM_UNION
Definition: brin_internal.h:70
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void tbm_add_page(TIDBitmap *tbm, BlockNumber pageno)
Definition: tidbitmap.c:469
double IndexBuildHeapScan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, IndexBuildCallback callback, void *callback_state)
Definition: index.c:2169
void * palloc(Size size)
Definition: mcxt.c:849
int errmsg(const char *fmt,...)
Definition: elog.c:797
TupleDesc bd_disktdesc
Definition: brin_internal.h:53
BrinTuple * brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple, Size *size)
Definition: brin_tuple.c:89
int tdrefcount
Definition: tupdesc.h:80
Oid sk_collation
Definition: skey.h:70
ambuildempty_function ambuildempty
Definition: amapi.h:199
int i
#define FunctionCall1(flinfo, arg1)
Definition: fmgr.h:594
#define RELKIND_INDEX
Definition: pg_class.h:161
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
IndexBulkDeleteResult * brinvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: brin.c:732
BlockNumber pagesPerRange
Definition: brin_xlog.h:52
IndexScanDesc RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
Definition: genam.c:78
Definition: c.h:439
#define PG_FUNCTION_ARGS
Definition: fmgr.h:150
relopt_value * parseRelOptions(Datum options, bool validate, relopt_kind kind, int *numrelopts)
Definition: reloptions.c:1010
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
Datum brin_summarize_new_values(PG_FUNCTION_ARGS)
Definition: brin.c:793
bool brin_page_cleanup(Relation idxrel, Buffer buf)
Definition: brin_pageops.c:597
BrinMemTuple * bs_dtuple
Definition: brin.c:50
static BrinBuildState * initialize_brin_buildstate(Relation idxRel, BrinRevmap *revmap, BlockNumber pagesPerRange)
Definition: brin.c:919
void brinbuildempty(Relation index)
Definition: brin.c:687
BrinDesc * bs_bdesc
Definition: brin.c:49
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
double num_index_tuples
Definition: genam.h:76
int Buffer
Definition: buf.h:23
BrinTuple * brin_form_placeholder_tuple(BrinDesc *brdesc, BlockNumber blkno, Size *size)
Definition: brin_tuple.c:263
amcanreturn_function amcanreturn
Definition: amapi.h:203
int16 AttrNumber
Definition: attnum.h:21
#define RelationGetRelid(relation)
Definition: rel.h:417
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:151
bool(* IndexBulkDeleteCallback)(ItemPointer itemptr, void *state)
Definition: genam.h:83
Datum brinhandler(PG_FUNCTION_ARGS)
Definition: brin.c:79
#define offsetof(type, field)
Definition: c.h:555
double IndexBuildHeapRangeScan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, bool anyvisible, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state)
Definition: index.c:2194
AttrNumber sk_attno
Definition: skey.h:67
Pointer Page
Definition: bufpage.h:74
double index_tuples
Definition: genam.h:33
aminitparallelscan_function aminitparallelscan
Definition: amapi.h:218
double heap_tuples
Definition: genam.h:32
static void terminate_brin_buildstate(BrinBuildState *state)
Definition: brin.c:944
amrestrpos_function amrestrpos
Definition: amapi.h:214