PostgreSQL Source Code git master
Loading...
Searching...
No Matches
brin.c
Go to the documentation of this file.
1/*
2 * brin.c
3 * Implementation of BRIN indexes for Postgres
4 *
5 * See src/backend/access/brin/README for details.
6 *
7 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * IDENTIFICATION
11 * src/backend/access/brin/brin.c
12 *
13 * TODO
14 * * ScalarArrayOpExpr (amsearcharray -> SK_SEARCHARRAY)
15 */
16#include "postgres.h"
17
18#include "access/brin.h"
19#include "access/brin_page.h"
20#include "access/brin_pageops.h"
21#include "access/brin_xlog.h"
22#include "access/relation.h"
23#include "access/reloptions.h"
24#include "access/relscan.h"
25#include "access/table.h"
26#include "access/tableam.h"
27#include "access/xloginsert.h"
28#include "catalog/index.h"
29#include "catalog/pg_am.h"
30#include "commands/vacuum.h"
31#include "miscadmin.h"
32#include "pgstat.h"
34#include "storage/bufmgr.h"
35#include "storage/freespace.h"
36#include "tcop/tcopprot.h"
37#include "utils/acl.h"
38#include "utils/datum.h"
39#include "utils/fmgrprotos.h"
40#include "utils/guc.h"
42#include "utils/memutils.h"
43#include "utils/rel.h"
44#include "utils/tuplesort.h"
45
46/* Magic numbers for parallel state sharing */
47#define PARALLEL_KEY_BRIN_SHARED UINT64CONST(0xB000000000000001)
48#define PARALLEL_KEY_TUPLESORT UINT64CONST(0xB000000000000002)
49#define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xB000000000000003)
50#define PARALLEL_KEY_WAL_USAGE UINT64CONST(0xB000000000000004)
51#define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xB000000000000005)
52
53/*
54 * Status for index builds performed in parallel. This is allocated in a
55 * dynamic shared memory segment.
56 */
57typedef struct BrinShared
58{
59 /*
60 * These fields are not modified during the build. They primarily exist
61 * for the benefit of worker processes that need to create state
62 * corresponding to that used by the leader.
63 */
69
70 /* Query ID, for report in worker processes */
72
73 /*
74 * workersdonecv is used to monitor the progress of workers. All parallel
75 * participants must indicate that they are done before leader can use
76 * results built by the workers (and before leader can write the data into
77 * the index).
78 */
80
81 /*
82 * mutex protects all fields before heapdesc.
83 *
84 * These fields contain status information of interest to BRIN index
85 * builds that must work just the same when an index is built in parallel.
86 */
88
89 /*
90 * Mutable state that is maintained by workers, and reported back to
91 * leader at end of the scans.
92 *
93 * nparticipantsdone is number of worker processes finished.
94 *
95 * reltuples is the total number of input heap tuples.
96 *
97 * indtuples is the total number of tuples that made it into the index.
98 */
100 double reltuples;
101 double indtuples;
102
103 /*
104 * ParallelTableScanDescData data follows. Can't directly embed here, as
105 * implementations of the parallel table scan desc interface might need
106 * stronger alignment.
107 */
109
110/*
111 * Return pointer to a BrinShared's parallel table scan.
112 *
113 * c.f. shm_toc_allocate as to why BUFFERALIGN is used, rather than just
114 * MAXALIGN.
115 */
116#define ParallelTableScanFromBrinShared(shared) \
117 (ParallelTableScanDesc) ((char *) (shared) + BUFFERALIGN(sizeof(BrinShared)))
118
119/*
120 * Status for leader in parallel index build.
121 */
122typedef struct BrinLeader
123{
124 /* parallel context itself */
126
127 /*
128 * nparticipanttuplesorts is the exact number of worker processes
129 * successfully launched, plus one leader process if it participates as a
130 * worker (only DISABLE_LEADER_PARTICIPATION builds avoid leader
131 * participating as a worker).
132 */
134
135 /*
136 * Leader process convenience pointers to shared state (leader avoids TOC
137 * lookups).
138 *
139 * brinshared is the shared state for entire build. sharedsort is the
140 * shared, tuplesort-managed state passed to each process tuplesort.
141 * snapshot is the snapshot used by the scan iff an MVCC snapshot is
142 * required.
143 */
150
151/*
152 * We use a BrinBuildState during initial construction of a BRIN index.
153 * The running state is kept in a BrinMemTuple.
154 */
155typedef struct BrinBuildState
156{
167
171
172 /*
173 * bs_leader is only present when a parallel index build is performed, and
174 * only in the leader process. (Actually, only the leader process has a
175 * BrinBuildState.)
176 */
179
180 /*
181 * The sortstate is used by workers (including the leader). It has to be
182 * part of the build state, because that's the only thing passed to the
183 * build callback etc.
184 */
187
188/*
189 * We use a BrinInsertState to capture running state spanning multiple
190 * brininsert invocations, within the same command.
191 */
198
199/*
200 * Struct used as "opaque" during index scans
201 */
208
209#define BRIN_ALL_BLOCKRANGES InvalidBlockNumber
210
213 BlockNumber pagesPerRange,
218 bool include_partial, double *numSummarized, double *numExisting);
222 BrinTuple *b);
225 BrinMemTuple *dtup, const Datum *values, const bool *nulls);
226static bool check_null_keys(BrinValues *bval, ScanKey *nullkeys, int nnullkeys);
229
230/* parallel index builds */
232 bool isconcurrent, int request);
238 Relation heap, Relation index);
240 BrinShared *brinshared,
241 Sharedsort *sharedsort,
242 Relation heap, Relation index,
243 int sortmem, bool progress);
244
245/*
246 * BRIN handler function: return IndexAmRoutine with access method parameters
247 * and callbacks.
248 */
249Datum
251{
252 static const IndexAmRoutine amroutine = {
254 .amstrategies = 0,
255 .amsupport = BRIN_LAST_OPTIONAL_PROCNUM,
256 .amoptsprocnum = BRIN_PROCNUM_OPTIONS,
257 .amcanorder = false,
258 .amcanorderbyop = false,
259 .amcanhash = false,
260 .amconsistentequality = false,
261 .amconsistentordering = false,
262 .amcanbackward = false,
263 .amcanunique = false,
264 .amcanmulticol = true,
265 .amoptionalkey = true,
266 .amsearcharray = false,
267 .amsearchnulls = true,
268 .amstorage = true,
269 .amclusterable = false,
270 .ampredlocks = false,
271 .amcanparallel = false,
272 .amcanbuildparallel = true,
273 .amcaninclude = false,
274 .amusemaintenanceworkmem = false,
275 .amsummarizing = true,
276 .amparallelvacuumoptions =
278 .amkeytype = InvalidOid,
279
280 .ambuild = brinbuild,
281 .ambuildempty = brinbuildempty,
282 .aminsert = brininsert,
283 .aminsertcleanup = brininsertcleanup,
284 .ambulkdelete = brinbulkdelete,
285 .amvacuumcleanup = brinvacuumcleanup,
286 .amcanreturn = NULL,
287 .amcostestimate = brincostestimate,
288 .amgettreeheight = NULL,
289 .amoptions = brinoptions,
290 .amproperty = NULL,
291 .ambuildphasename = NULL,
292 .amvalidate = brinvalidate,
293 .amadjustmembers = NULL,
294 .ambeginscan = brinbeginscan,
295 .amrescan = brinrescan,
296 .amgettuple = NULL,
297 .amgetbitmap = bringetbitmap,
298 .amendscan = brinendscan,
299 .ammarkpos = NULL,
300 .amrestrpos = NULL,
301 .amestimateparallelscan = NULL,
302 .aminitparallelscan = NULL,
303 .amparallelrescan = NULL,
304 .amtranslatestrategy = NULL,
305 .amtranslatecmptype = NULL,
306 };
307
309}
310
311/*
312 * Initialize a BrinInsertState to maintain state to be used across multiple
313 * tuple inserts, within the same command.
314 */
315static BrinInsertState *
317{
318 BrinInsertState *bistate;
320
323 bistate->bis_desc = brin_build_desc(idxRel);
325 &bistate->bis_pages_per_range);
326 indexInfo->ii_AmCache = bistate;
328
329 return bistate;
330}
331
332/*
333 * A tuple in the heap is being inserted. To keep a brin index up to date,
334 * we need to obtain the relevant index tuple and compare its stored values
335 * with those of the new tuple. If the tuple values are not consistent with
336 * the summary tuple, we need to update the index tuple.
337 *
338 * If autosummarization is enabled, check if we need to summarize the previous
339 * page range.
340 *
341 * If the range is not currently summarized (i.e. the revmap returns NULL for
342 * it), there's nothing to do for this tuple.
343 */
344bool
348 bool indexUnchanged,
349 IndexInfo *indexInfo)
350{
351 BlockNumber pagesPerRange;
353 BlockNumber heapBlk;
354 BrinInsertState *bistate = (BrinInsertState *) indexInfo->ii_AmCache;
360 bool autosummarize = BrinGetAutoSummarize(idxRel);
361
362 /*
363 * If first time through in this statement, initialize the insert state
364 * that we keep for all the inserts in the command.
365 */
366 if (!bistate)
367 bistate = initialize_brin_insertstate(idxRel, indexInfo);
368
369 revmap = bistate->bis_rmAccess;
370 bdesc = bistate->bis_desc;
371 pagesPerRange = bistate->bis_pages_per_range;
372
373 /*
374 * origHeapBlk is the block number where the insertion occurred. heapBlk
375 * is the first block in the corresponding page range.
376 */
378 heapBlk = (origHeapBlk / pagesPerRange) * pagesPerRange;
379
380 for (;;)
381 {
382 bool need_insert = false;
383 OffsetNumber off;
386
388
389 /*
390 * If auto-summarization is enabled and we just inserted the first
391 * tuple into the first block of a new non-first page range, request a
392 * summarization run of the previous range.
393 */
394 if (autosummarize &&
395 heapBlk > 0 &&
396 heapBlk == origHeapBlk &&
398 {
399 BlockNumber lastPageRange = heapBlk - 1;
401
405 if (!lastPageTuple)
406 {
407 bool recorded;
408
412 if (!recorded)
413 ereport(LOG,
415 errmsg("request for BRIN range summarization for index \"%s\" page %u was not recorded",
417 lastPageRange)));
418 }
419 else
421 }
422
423 brtup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off,
425
426 /* if range is unsummarized, there's nothing to do */
427 if (!brtup)
428 break;
429
430 /* First time through in this brininsert call? */
431 if (tupcxt == NULL)
432 {
434 "brininsert cxt",
437 }
438
440
442
443 if (!need_insert)
444 {
445 /*
446 * The tuple is consistent with the new values, so there's nothing
447 * to do.
448 */
450 }
451 else
452 {
453 Page page = BufferGetPage(buf);
454 ItemId lp = PageGetItemId(page, off);
455 Size origsz;
457 Size newsz;
459 bool samepage;
460
461 /*
462 * Make a copy of the old tuple, so that we can compare it after
463 * re-acquiring the lock.
464 */
467
468 /*
469 * Before releasing the lock, check if we can attempt a same-page
470 * update. Another process could insert a tuple concurrently in
471 * the same page though, so downstream we must be prepared to cope
472 * if this turns out to not be possible after all.
473 */
474 newtup = brin_form_tuple(bdesc, heapBlk, dtup, &newsz);
477
478 /*
479 * Try to update the tuple. If this doesn't work for whatever
480 * reason, we need to restart from the top; the revmap might be
481 * pointing at a different tuple for this block now, so we need to
482 * recompute to ensure both our new heap tuple and the other
483 * inserter's are covered by the combined tuple. It might be that
484 * we don't need to update at all.
485 */
486 if (!brin_doupdate(idxRel, pagesPerRange, revmap, heapBlk,
487 buf, off, origtup, origsz, newtup, newsz,
488 samepage))
489 {
490 /* no luck; start over */
492 continue;
493 }
494 }
495
496 /* success! */
497 break;
498 }
499
500 if (BufferIsValid(buf))
503 if (tupcxt != NULL)
505
506 return false;
507}
508
509/*
510 * Callback to clean up the BrinInsertState once all tuple inserts are done.
511 */
512void
514{
515 BrinInsertState *bistate = (BrinInsertState *) indexInfo->ii_AmCache;
516
517 /* bail out if cache not initialized */
518 if (bistate == NULL)
519 return;
520
521 /* do this first to avoid dangling pointer if we fail partway through */
522 indexInfo->ii_AmCache = NULL;
523
524 /*
525 * Clean up the revmap. Note that the brinDesc has already been cleaned up
526 * as part of its own memory context.
527 */
529 pfree(bistate);
530}
531
532/*
533 * Initialize state for a BRIN index scan.
534 *
535 * We read the metapage here to determine the pages-per-range number that this
536 * index was built with. Note that since this cannot be changed while we're
537 * holding lock on index, it's not necessary to recompute it during brinrescan.
538 */
540brinbeginscan(Relation r, int nkeys, int norderbys)
541{
542 IndexScanDesc scan;
543 BrinOpaque *opaque;
544
545 scan = RelationGetIndexScan(r, nkeys, norderbys);
546
547 opaque = palloc_object(BrinOpaque);
549 opaque->bo_bdesc = brin_build_desc(r);
550 scan->opaque = opaque;
551
552 return scan;
553}
554
555/*
556 * Execute the index scan.
557 *
558 * This works by reading index TIDs from the revmap, and obtaining the index
559 * tuples pointed to by them; the summary values in the index tuples are
560 * compared to the scan keys. We return into the TID bitmap all the pages in
561 * ranges corresponding to index tuples that match the scan keys.
562 *
563 * If a TID from the revmap is read as InvalidTID, we know that range is
564 * unsummarized. Pages in those ranges need to be returned regardless of scan
565 * keys.
566 */
567int64
569{
573 Oid heapOid;
574 Relation heapRel;
575 BrinOpaque *opaque;
576 BlockNumber nblocks;
577 int64 totalpages = 0;
578 FmgrInfo *consistentFn;
583 Size btupsz = 0;
584 ScanKey **keys,
585 **nullkeys;
586 int *nkeys,
587 *nnullkeys;
588 char *ptr;
589 Size len;
590 char *tmp PG_USED_FOR_ASSERTS_ONLY;
591
592 opaque = (BrinOpaque *) scan->opaque;
593 bdesc = opaque->bo_bdesc;
595 if (scan->instrument)
596 scan->instrument->nsearches++;
597
598 /*
599 * We need to know the size of the table so that we know how long to
600 * iterate on the revmap.
601 */
602 heapOid = IndexGetRelation(RelationGetRelid(idxRel), false);
603 heapRel = table_open(heapOid, AccessShareLock);
604 nblocks = RelationGetNumberOfBlocks(heapRel);
606
607 /*
608 * Make room for the consistent support procedures of indexed columns. We
609 * don't look them up here; we do that lazily the first time we see a scan
610 * key reference each of them. We rely on zeroing fn_oid to InvalidOid.
611 */
612 consistentFn = palloc0_array(FmgrInfo, bdesc->bd_tupdesc->natts);
613
614 /*
615 * Make room for per-attribute lists of scan keys that we'll pass to the
616 * consistent support procedure. We don't know which attributes have scan
617 * keys, so we allocate space for all attributes. That may use more memory
618 * but it's probably cheaper than determining which attributes are used.
619 *
620 * We keep null and regular keys separate, so that we can pass just the
621 * regular keys to the consistent function easily.
622 *
623 * To reduce the allocation overhead, we allocate one big chunk and then
624 * carve it into smaller arrays ourselves. All the pieces have exactly the
625 * same lifetime, so that's OK.
626 *
627 * XXX The widest index can have 32 attributes, so the amount of wasted
628 * memory is negligible. We could invent a more compact approach (with
629 * just space for used attributes) but that would make the matching more
630 * complex so it's not a good trade-off.
631 */
632 len =
633 MAXALIGN(sizeof(ScanKey *) * bdesc->bd_tupdesc->natts) + /* regular keys */
634 MAXALIGN(sizeof(ScanKey) * scan->numberOfKeys) * bdesc->bd_tupdesc->natts +
635 MAXALIGN(sizeof(int) * bdesc->bd_tupdesc->natts) +
636 MAXALIGN(sizeof(ScanKey *) * bdesc->bd_tupdesc->natts) + /* NULL keys */
637 MAXALIGN(sizeof(ScanKey) * scan->numberOfKeys) * bdesc->bd_tupdesc->natts +
638 MAXALIGN(sizeof(int) * bdesc->bd_tupdesc->natts);
639
640 ptr = palloc(len);
641 tmp = ptr;
642
643 keys = (ScanKey **) ptr;
644 ptr += MAXALIGN(sizeof(ScanKey *) * bdesc->bd_tupdesc->natts);
645
646 nullkeys = (ScanKey **) ptr;
647 ptr += MAXALIGN(sizeof(ScanKey *) * bdesc->bd_tupdesc->natts);
648
649 nkeys = (int *) ptr;
650 ptr += MAXALIGN(sizeof(int) * bdesc->bd_tupdesc->natts);
651
652 nnullkeys = (int *) ptr;
653 ptr += MAXALIGN(sizeof(int) * bdesc->bd_tupdesc->natts);
654
655 for (int i = 0; i < bdesc->bd_tupdesc->natts; i++)
656 {
657 keys[i] = (ScanKey *) ptr;
658 ptr += MAXALIGN(sizeof(ScanKey) * scan->numberOfKeys);
659
660 nullkeys[i] = (ScanKey *) ptr;
661 ptr += MAXALIGN(sizeof(ScanKey) * scan->numberOfKeys);
662 }
663
664 Assert(tmp + len == ptr);
665
666 /* zero the number of keys */
667 memset(nkeys, 0, sizeof(int) * bdesc->bd_tupdesc->natts);
668 memset(nnullkeys, 0, sizeof(int) * bdesc->bd_tupdesc->natts);
669
670 /* Preprocess the scan keys - split them into per-attribute arrays. */
671 for (int keyno = 0; keyno < scan->numberOfKeys; keyno++)
672 {
673 ScanKey key = &scan->keyData[keyno];
674 AttrNumber keyattno = key->sk_attno;
675
676 /*
677 * The collation of the scan key must match the collation used in the
678 * index column (but only if the search is not IS NULL/ IS NOT NULL).
679 * Otherwise we shouldn't be using this index ...
680 */
681 Assert((key->sk_flags & SK_ISNULL) ||
682 (key->sk_collation ==
683 TupleDescAttr(bdesc->bd_tupdesc,
684 keyattno - 1)->attcollation));
685
686 /*
687 * First time we see this index attribute, so init as needed.
688 *
689 * This is a bit of an overkill - we don't know how many scan keys are
690 * there for this attribute, so we simply allocate the largest number
691 * possible (as if all keys were for this attribute). This may waste a
692 * bit of memory, but we only expect small number of scan keys in
693 * general, so this should be negligible, and repeated repalloc calls
694 * are not free either.
695 */
696 if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
697 {
698 FmgrInfo *tmp;
699
700 /* First time we see this attribute, so no key/null keys. */
701 Assert(nkeys[keyattno - 1] == 0);
702 Assert(nnullkeys[keyattno - 1] == 0);
703
706 fmgr_info_copy(&consistentFn[keyattno - 1], tmp,
708 }
709
710 /* Add key to the proper per-attribute array. */
711 if (key->sk_flags & SK_ISNULL)
712 {
713 nullkeys[keyattno - 1][nnullkeys[keyattno - 1]] = key;
714 nnullkeys[keyattno - 1]++;
715 }
716 else
717 {
718 keys[keyattno - 1][nkeys[keyattno - 1]] = key;
719 nkeys[keyattno - 1]++;
720 }
721 }
722
723 /* allocate an initial in-memory tuple, out of the per-range memcxt */
725
726 /*
727 * Setup and use a per-range memory context, which is reset every time we
728 * loop below. This avoids having to free the tuples within the loop.
729 */
731 "bringetbitmap cxt",
734
735 /*
736 * Now scan the revmap. We start by querying for heap page 0,
737 * incrementing by the number of pages per range; this gives us a full
738 * view of the table. We make use of uint64 for heapBlk as a BlockNumber
739 * could wrap for tables with close to 2^32 pages.
740 */
741 for (uint64 heapBlk = 0; heapBlk < nblocks; heapBlk += opaque->bo_pagesPerRange)
742 {
743 bool addrange;
744 bool gottuple = false;
745 BrinTuple *tup;
746 OffsetNumber off;
747 Size size;
748
750
752
754 &off, &size, BUFFER_LOCK_SHARE);
755 if (tup)
756 {
757 gottuple = true;
758 btup = brin_copy_tuple(tup, size, btup, &btupsz);
760 }
761
762 /*
763 * For page ranges with no indexed tuple, we must return the whole
764 * range; otherwise, compare it to the scan keys.
765 */
766 if (!gottuple)
767 {
768 addrange = true;
769 }
770 else
771 {
773 if (dtup->bt_placeholder)
774 {
775 /*
776 * Placeholder tuples are always returned, regardless of the
777 * values stored in them.
778 */
779 addrange = true;
780 }
781 else
782 {
783 int attno;
784
785 /*
786 * Compare scan keys with summary values stored for the range.
787 * If scan keys are matched, the page range must be added to
788 * the bitmap. We initially assume the range needs to be
789 * added; in particular this serves the case where there are
790 * no keys.
791 */
792 addrange = true;
793 for (attno = 1; attno <= bdesc->bd_tupdesc->natts; attno++)
794 {
795 BrinValues *bval;
796 Datum add;
797 Oid collation;
798
799 /*
800 * skip attributes without any scan keys (both regular and
801 * IS [NOT] NULL)
802 */
803 if (nkeys[attno - 1] == 0 && nnullkeys[attno - 1] == 0)
804 continue;
805
806 bval = &dtup->bt_columns[attno - 1];
807
808 /*
809 * If the BRIN tuple indicates that this range is empty,
810 * we can skip it: there's nothing to match. We don't
811 * need to examine the next columns.
812 */
813 if (dtup->bt_empty_range)
814 {
815 addrange = false;
816 break;
817 }
818
819 /*
820 * First check if there are any IS [NOT] NULL scan keys,
821 * and if we're violating them. In that case we can
822 * terminate early, without invoking the support function.
823 *
824 * As there may be more keys, we can only determine
825 * mismatch within this loop.
826 */
827 if (bdesc->bd_info[attno - 1]->oi_regular_nulls &&
828 !check_null_keys(bval, nullkeys[attno - 1],
829 nnullkeys[attno - 1]))
830 {
831 /*
832 * If any of the IS [NOT] NULL keys failed, the page
833 * range as a whole can't pass. So terminate the loop.
834 */
835 addrange = false;
836 break;
837 }
838
839 /*
840 * So either there are no IS [NOT] NULL keys, or all
841 * passed. If there are no regular scan keys, we're done -
842 * the page range matches. If there are regular keys, but
843 * the page range is marked as 'all nulls' it can't
844 * possibly pass (we're assuming the operators are
845 * strict).
846 */
847
848 /* No regular scan keys - page range as a whole passes. */
849 if (!nkeys[attno - 1])
850 continue;
851
852 Assert((nkeys[attno - 1] > 0) &&
853 (nkeys[attno - 1] <= scan->numberOfKeys));
854
855 /* If it is all nulls, it cannot possibly be consistent. */
856 if (bval->bv_allnulls)
857 {
858 addrange = false;
859 break;
860 }
861
862 /*
863 * Collation from the first key (has to be the same for
864 * all keys for the same attribute).
865 */
866 collation = keys[attno - 1][0]->sk_collation;
867
868 /*
869 * Check whether the scan key is consistent with the page
870 * range values; if so, have the pages in the range added
871 * to the output bitmap.
872 *
873 * The opclass may or may not support processing of
874 * multiple scan keys. We can determine that based on the
875 * number of arguments - functions with extra parameter
876 * (number of scan keys) do support this, otherwise we
877 * have to simply pass the scan keys one by one.
878 */
879 if (consistentFn[attno - 1].fn_nargs >= 4)
880 {
881 /* Check all keys at once */
882 add = FunctionCall4Coll(&consistentFn[attno - 1],
883 collation,
885 PointerGetDatum(bval),
886 PointerGetDatum(keys[attno - 1]),
887 Int32GetDatum(nkeys[attno - 1]));
889 }
890 else
891 {
892 /*
893 * Check keys one by one
894 *
895 * When there are multiple scan keys, failure to meet
896 * the criteria for a single one of them is enough to
897 * discard the range as a whole, so break out of the
898 * loop as soon as a false return value is obtained.
899 */
900 int keyno;
901
902 for (keyno = 0; keyno < nkeys[attno - 1]; keyno++)
903 {
904 add = FunctionCall3Coll(&consistentFn[attno - 1],
905 keys[attno - 1][keyno]->sk_collation,
907 PointerGetDatum(bval),
908 PointerGetDatum(keys[attno - 1][keyno]));
910 if (!addrange)
911 break;
912 }
913 }
914
915 /*
916 * If we found a scan key eliminating the range, no need
917 * to check additional ones.
918 */
919 if (!addrange)
920 break;
921 }
922 }
923 }
924
925 /* add the pages in the range to the output bitmap, if needed */
926 if (addrange)
927 {
928 uint64 pageno;
929
930 for (pageno = heapBlk;
931 pageno <= Min(nblocks, heapBlk + opaque->bo_pagesPerRange) - 1;
932 pageno++)
933 {
935 tbm_add_page(tbm, pageno);
936 totalpages++;
938 }
939 }
940 }
941
944
945 if (buf != InvalidBuffer)
947
948 /*
949 * XXX We have an approximation of the number of *pages* that our scan
950 * returns, but we don't have a precise idea of the number of heap tuples
951 * involved.
952 */
953 return totalpages * 10;
954}
955
956/*
957 * Re-initialize state for a BRIN index scan
958 */
959void
961 ScanKey orderbys, int norderbys)
962{
963 /*
964 * Other index AMs preprocess the scan keys at this point, or sometime
965 * early during the scan; this lets them optimize by removing redundant
966 * keys, or doing early returns when they are impossible to satisfy; see
967 * _bt_preprocess_keys for an example. Something like that could be added
968 * here someday, too.
969 */
970
971 if (scankey && scan->numberOfKeys > 0)
972 memcpy(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData));
973}
974
975/*
976 * Close down a BRIN index scan
977 */
978void
980{
981 BrinOpaque *opaque = (BrinOpaque *) scan->opaque;
982
984 brin_free_desc(opaque->bo_bdesc);
985 pfree(opaque);
986}
987
988/*
989 * Per-heap-tuple callback for table_index_build_scan.
990 *
991 * Note we don't worry about the page range at the end of the table here; it is
992 * present in the build state struct after we're called the last time, but not
993 * inserted into the index. Caller must ensure to do so, if appropriate.
994 */
995static void
997 ItemPointer tid,
998 Datum *values,
999 bool *isnull,
1000 bool tupleIsAlive,
1001 void *brstate)
1002{
1005
1007
1008 /*
1009 * If we're in a block that belongs to a future range, summarize what
1010 * we've got and start afresh. Note the scan might have skipped many
1011 * pages, if they were devoid of live tuples; make sure to insert index
1012 * tuples for those too.
1013 */
1014 while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
1015 {
1016
1018 "brinbuildCallback: completed a range: %u--%u",
1019 state->bs_currRangeStart,
1020 state->bs_currRangeStart + state->bs_pagesPerRange));
1021
1022 /* create the index tuple and insert it */
1024
1025 /* set state to correspond to the next range */
1026 state->bs_currRangeStart += state->bs_pagesPerRange;
1027
1028 /* re-initialize state for it */
1029 brin_memtuple_initialize(state->bs_dtuple, state->bs_bdesc);
1030 }
1031
1032 /* Accumulate the current tuple into the running state */
1033 (void) add_values_to_range(index, state->bs_bdesc, state->bs_dtuple,
1034 values, isnull);
1035}
1036
1037/*
1038 * Per-heap-tuple callback for table_index_build_scan with parallelism.
1039 *
1040 * A version of the callback used by parallel index builds. The main difference
1041 * is that instead of writing the BRIN tuples into the index, we write them
1042 * into a shared tuplesort, and leave the insertion up to the leader (which may
1043 * reorder them a bit etc.). The callback also does not generate empty ranges,
1044 * those will be added by the leader when merging results from workers.
1045 */
1046static void
1048 ItemPointer tid,
1049 Datum *values,
1050 bool *isnull,
1051 bool tupleIsAlive,
1052 void *brstate)
1053{
1056
1058
1059 /*
1060 * If we're in a block that belongs to a different range, summarize what
1061 * we've got and start afresh. Note the scan might have skipped many
1062 * pages, if they were devoid of live tuples; we do not create empty BRIN
1063 * ranges here - the leader is responsible for filling them in.
1064 *
1065 * Unlike serial builds, parallel index builds allow synchronized seqscans
1066 * (because that's what parallel scans do). This means the block may wrap
1067 * around to the beginning of the relation, so the condition needs to
1068 * check for both future and past ranges.
1069 */
1070 if ((thisblock < state->bs_currRangeStart) ||
1071 (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1))
1072 {
1073
1075 "brinbuildCallbackParallel: completed a range: %u--%u",
1076 state->bs_currRangeStart,
1077 state->bs_currRangeStart + state->bs_pagesPerRange));
1078
1079 /* create the index tuple and write it into the tuplesort */
1081
1082 /*
1083 * Set state to correspond to the next range (for this block).
1084 *
1085 * This skips ranges that are either empty (and so we don't get any
1086 * tuples to summarize), or processed by other workers. We can't
1087 * differentiate those cases here easily, so we leave it up to the
1088 * leader to fill empty ranges where needed.
1089 */
1090 state->bs_currRangeStart
1091 = state->bs_pagesPerRange * (thisblock / state->bs_pagesPerRange);
1092
1093 /* re-initialize state for it */
1094 brin_memtuple_initialize(state->bs_dtuple, state->bs_bdesc);
1095 }
1096
1097 /* Accumulate the current tuple into the running state */
1098 (void) add_values_to_range(index, state->bs_bdesc, state->bs_dtuple,
1099 values, isnull);
1100}
1101
1102/*
1103 * brinbuild() -- build a new BRIN index.
1104 */
1107{
1108 IndexBuildResult *result;
1109 double reltuples;
1110 double idxtuples;
1113 Buffer meta;
1114 BlockNumber pagesPerRange;
1115
1116 /*
1117 * We expect to be called exactly once for any index relation.
1118 */
1120 elog(ERROR, "index \"%s\" already contains data",
1122
1123 /*
1124 * Critical section not required, because on error the creation of the
1125 * whole relation will be rolled back.
1126 */
1127
1131
1134 MarkBufferDirty(meta);
1135
1137 {
1140 Page page;
1141
1143 xlrec.pagesPerRange = BrinGetPagesPerRange(index);
1144
1148
1150
1151 page = BufferGetPage(meta);
1152 PageSetLSN(page, recptr);
1153 }
1154
1155 UnlockReleaseBuffer(meta);
1156
1157 /*
1158 * Initialize our state, including the deformed tuple state.
1159 */
1160 revmap = brinRevmapInitialize(index, &pagesPerRange);
1161 state = initialize_brin_buildstate(index, revmap, pagesPerRange,
1163
1164 /*
1165 * Attempt to launch parallel worker scan when required
1166 *
1167 * XXX plan_create_index_workers makes the number of workers dependent on
1168 * maintenance_work_mem, requiring 32MB for each worker. That makes sense
1169 * for btree, but not for BRIN, which can do with much less memory. So
1170 * maybe make that somehow less strict, optionally?
1171 */
1172 if (indexInfo->ii_ParallelWorkers > 0)
1173 _brin_begin_parallel(state, heap, index, indexInfo->ii_Concurrent,
1174 indexInfo->ii_ParallelWorkers);
1175
1176 /*
1177 * If parallel build requested and at least one worker process was
1178 * successfully launched, set up coordination state, wait for workers to
1179 * complete. Then read all tuples from the shared tuplesort and insert
1180 * them into the index.
1181 *
1182 * In serial mode, simply scan the table and build the index one index
1183 * tuple at a time.
1184 */
1185 if (state->bs_leader)
1186 {
1188
1190 coordinate->isWorker = false;
1191 coordinate->nParticipants =
1192 state->bs_leader->nparticipanttuplesorts;
1193 coordinate->sharedsort = state->bs_leader->sharedsort;
1194
1195 /*
1196 * Begin leader tuplesort.
1197 *
1198 * In cases where parallelism is involved, the leader receives the
1199 * same share of maintenance_work_mem as a serial sort (it is
1200 * generally treated in the same way as a serial sort once we return).
1201 * Parallel worker Tuplesortstates will have received only a fraction
1202 * of maintenance_work_mem, though.
1203 *
1204 * We rely on the lifetime of the Leader Tuplesortstate almost not
1205 * overlapping with any worker Tuplesortstate's lifetime. There may
1206 * be some small overlap, but that's okay because we rely on leader
1207 * Tuplesortstate only allocating a small, fixed amount of memory
1208 * here. When its tuplesort_performsort() is called (by our caller),
1209 * and significant amounts of memory are likely to be used, all
1210 * workers must have already freed almost all memory held by their
1211 * Tuplesortstates (they are about to go away completely, too). The
1212 * overall effect is that maintenance_work_mem always represents an
1213 * absolute high watermark on the amount of memory used by a CREATE
1214 * INDEX operation, regardless of the use of parallelism or any other
1215 * factor.
1216 */
1217 state->bs_sortstate =
1220
1221 /* scan the relation and merge per-worker results */
1222 reltuples = _brin_parallel_merge(state);
1223
1224 _brin_end_parallel(state->bs_leader, state);
1225 }
1226 else /* no parallel index build */
1227 {
1228 /*
1229 * Now scan the relation. No syncscan allowed here because we want
1230 * the heap blocks in physical order (we want to produce the ranges
1231 * starting from block 0, and the callback also relies on this to not
1232 * generate summary for the same range twice).
1233 */
1234 reltuples = table_index_build_scan(heap, index, indexInfo, false, true,
1236
1237 /*
1238 * process the final batch
1239 *
1240 * XXX Note this does not update state->bs_currRangeStart, i.e. it
1241 * stays set to the last range added to the index. This is OK, because
1242 * that's what brin_fill_empty_ranges expects.
1243 */
1245
1246 /*
1247 * Backfill the final ranges with empty data.
1248 *
1249 * This saves us from doing what amounts to full table scans when the
1250 * index with a predicate like WHERE (nonnull_column IS NULL), or
1251 * other very selective predicates.
1252 */
1254 state->bs_currRangeStart,
1255 state->bs_maxRangeStart);
1256 }
1257
1258 /* release resources */
1259 idxtuples = state->bs_numtuples;
1260 brinRevmapTerminate(state->bs_rmAccess);
1262
1263 /*
1264 * Return statistics
1265 */
1267
1268 result->heap_tuples = reltuples;
1269 result->index_tuples = idxtuples;
1270
1271 return result;
1272}
1273
1274void
1276{
1278
1279 /* An empty BRIN index has a metapage only. */
1282
1283 /* Initialize and xlog metabuffer. */
1290
1292}
1293
1294/*
1295 * brinbulkdelete
1296 * Since there are no per-heap-tuple index tuples in BRIN indexes,
1297 * there's not a lot we can do here.
1298 *
1299 * XXX we could mark item tuples as "dirty" (when a minimum or maximum heap
1300 * tuple is deleted), meaning the need to re-run summarization on the affected
1301 * range. Would need to add an extra flag in brintuples for that.
1302 */
1305 IndexBulkDeleteCallback callback, void *callback_state)
1306{
1307 /* allocate stats if first time through, else re-use existing struct */
1308 if (stats == NULL)
1310
1311 return stats;
1312}
1313
1314/*
1315 * This routine is in charge of "vacuuming" a BRIN index: we just summarize
1316 * ranges that are currently unsummarized.
1317 */
1320{
1321 Relation heapRel;
1322
1323 /* No-op in ANALYZE ONLY mode */
1324 if (info->analyze_only)
1325 return stats;
1326
1327 if (!stats)
1330 /* rest of stats is initialized by zeroing */
1331
1332 heapRel = table_open(IndexGetRelation(RelationGetRelid(info->index), false),
1334
1335 brin_vacuum_scan(info->index, info->strategy);
1336
1337 brinsummarize(info->index, heapRel, BRIN_ALL_BLOCKRANGES, false,
1338 &stats->num_index_tuples, &stats->num_index_tuples);
1339
1340 table_close(heapRel, AccessShareLock);
1341
1342 return stats;
1343}
1344
1345/*
1346 * reloptions processor for BRIN indexes
1347 */
1348bytea *
1349brinoptions(Datum reloptions, bool validate)
1350{
1351 static const relopt_parse_elt tab[] = {
1352 {"pages_per_range", RELOPT_TYPE_INT, offsetof(BrinOptions, pagesPerRange)},
1353 {"autosummarize", RELOPT_TYPE_BOOL, offsetof(BrinOptions, autosummarize)}
1354 };
1355
1356 return (bytea *) build_reloptions(reloptions, validate,
1358 sizeof(BrinOptions),
1359 tab, lengthof(tab));
1360}
1361
1362/*
1363 * SQL-callable function to scan through an index and summarize all ranges
1364 * that are not currently summarized.
1365 */
1366Datum
1375
1376/*
1377 * SQL-callable function to summarize the indicated page range, if not already
1378 * summarized. If the second argument is BRIN_ALL_BLOCKRANGES, all
1379 * unsummarized ranges are summarized.
1380 */
1381Datum
1383{
1384 Oid indexoid = PG_GETARG_OID(0);
1386 BlockNumber heapBlk;
1387 Oid heapoid;
1388 Relation indexRel;
1389 Relation heapRel;
1390 Oid save_userid;
1391 int save_sec_context;
1392 int save_nestlevel;
1393 double numSummarized = 0;
1394
1395 if (RecoveryInProgress())
1396 ereport(ERROR,
1398 errmsg("recovery is in progress"),
1399 errhint("BRIN control functions cannot be executed during recovery.")));
1400
1402 ereport(ERROR,
1404 errmsg("block number out of range: %" PRId64, heapBlk64)));
1405 heapBlk = (BlockNumber) heapBlk64;
1406
1407 /*
1408 * We must lock table before index to avoid deadlocks. However, if the
1409 * passed indexoid isn't an index then IndexGetRelation() will fail.
1410 * Rather than emitting a not-very-helpful error message, postpone
1411 * complaining, expecting that the is-it-an-index test below will fail.
1412 */
1413 heapoid = IndexGetRelation(indexoid, true);
1414 if (OidIsValid(heapoid))
1415 {
1417
1418 /*
1419 * Autovacuum calls us. For its benefit, switch to the table owner's
1420 * userid, so that any index functions are run as that user. Also
1421 * lock down security-restricted operations and arrange to make GUC
1422 * variable changes local to this command. This is harmless, albeit
1423 * unnecessary, when called from SQL, because we fail shortly if the
1424 * user does not own the index.
1425 */
1426 GetUserIdAndSecContext(&save_userid, &save_sec_context);
1427 SetUserIdAndSecContext(heapRel->rd_rel->relowner,
1428 save_sec_context | SECURITY_RESTRICTED_OPERATION);
1429 save_nestlevel = NewGUCNestLevel();
1431 }
1432 else
1433 {
1434 heapRel = NULL;
1435 /* Set these just to suppress "uninitialized variable" warnings */
1436 save_userid = InvalidOid;
1437 save_sec_context = -1;
1438 save_nestlevel = -1;
1439 }
1440
1441 indexRel = index_open(indexoid, ShareUpdateExclusiveLock);
1442
1443 /* Must be a BRIN index */
1444 if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
1445 indexRel->rd_rel->relam != BRIN_AM_OID)
1446 ereport(ERROR,
1448 errmsg("\"%s\" is not a BRIN index",
1449 RelationGetRelationName(indexRel))));
1450
1451 /* User must own the index (comparable to privileges needed for VACUUM) */
1452 if (heapRel != NULL && !object_ownercheck(RelationRelationId, indexoid, save_userid))
1454 RelationGetRelationName(indexRel));
1455
1456 /*
1457 * Since we did the IndexGetRelation call above without any lock, it's
1458 * barely possible that a race against an index drop/recreation could have
1459 * netted us the wrong table. Recheck.
1460 */
1461 if (heapRel == NULL || heapoid != IndexGetRelation(indexoid, false))
1462 ereport(ERROR,
1464 errmsg("could not open parent table of index \"%s\"",
1465 RelationGetRelationName(indexRel))));
1466
1467 /* see gin_clean_pending_list() */
1468 if (indexRel->rd_index->indisvalid)
1469 brinsummarize(indexRel, heapRel, heapBlk, true, &numSummarized, NULL);
1470 else
1473 errmsg("index \"%s\" is not valid",
1474 RelationGetRelationName(indexRel))));
1475
1476 /* Roll back any GUC changes executed by index functions */
1477 AtEOXact_GUC(false, save_nestlevel);
1478
1479 /* Restore userid and security context */
1480 SetUserIdAndSecContext(save_userid, save_sec_context);
1481
1484
1486}
1487
1488/*
1489 * SQL-callable interface to mark a range as no longer summarized
1490 */
1491Datum
1493{
1494 Oid indexoid = PG_GETARG_OID(0);
1496 BlockNumber heapBlk;
1497 Oid heapoid;
1498 Relation heapRel;
1499 Relation indexRel;
1500 bool done;
1501
1502 if (RecoveryInProgress())
1503 ereport(ERROR,
1505 errmsg("recovery is in progress"),
1506 errhint("BRIN control functions cannot be executed during recovery.")));
1507
1508 if (heapBlk64 > MaxBlockNumber || heapBlk64 < 0)
1509 ereport(ERROR,
1511 errmsg("block number out of range: %" PRId64,
1512 heapBlk64)));
1513 heapBlk = (BlockNumber) heapBlk64;
1514
1515 /*
1516 * We must lock table before index to avoid deadlocks. However, if the
1517 * passed indexoid isn't an index then IndexGetRelation() will fail.
1518 * Rather than emitting a not-very-helpful error message, postpone
1519 * complaining, expecting that the is-it-an-index test below will fail.
1520 *
1521 * Unlike brin_summarize_range(), autovacuum never calls this. Hence, we
1522 * don't switch userid.
1523 */
1524 heapoid = IndexGetRelation(indexoid, true);
1525 if (OidIsValid(heapoid))
1527 else
1528 heapRel = NULL;
1529
1530 indexRel = index_open(indexoid, ShareUpdateExclusiveLock);
1531
1532 /* Must be a BRIN index */
1533 if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
1534 indexRel->rd_rel->relam != BRIN_AM_OID)
1535 ereport(ERROR,
1537 errmsg("\"%s\" is not a BRIN index",
1538 RelationGetRelationName(indexRel))));
1539
1540 /* User must own the index (comparable to privileges needed for VACUUM) */
1543 RelationGetRelationName(indexRel));
1544
1545 /*
1546 * Since we did the IndexGetRelation call above without any lock, it's
1547 * barely possible that a race against an index drop/recreation could have
1548 * netted us the wrong table. Recheck.
1549 */
1550 if (heapRel == NULL || heapoid != IndexGetRelation(indexoid, false))
1551 ereport(ERROR,
1553 errmsg("could not open parent table of index \"%s\"",
1554 RelationGetRelationName(indexRel))));
1555
1556 /* see gin_clean_pending_list() */
1557 if (indexRel->rd_index->indisvalid)
1558 {
1559 /* the revmap does the hard work */
1560 do
1561 {
1562 done = brinRevmapDesummarizeRange(indexRel, heapBlk);
1563 }
1564 while (!done);
1565 }
1566 else
1569 errmsg("index \"%s\" is not valid",
1570 RelationGetRelationName(indexRel))));
1571
1574
1576}
1577
1578/*
1579 * Build a BrinDesc used to create or scan a BRIN index
1580 */
1581BrinDesc *
1583{
1585 BrinDesc *bdesc;
1586 TupleDesc tupdesc;
1587 int totalstored = 0;
1588 int keyno;
1589 long totalsize;
1590 MemoryContext cxt;
1592
1594 "brin desc cxt",
1597 tupdesc = RelationGetDescr(rel);
1598
1599 /*
1600 * Obtain BrinOpcInfo for each indexed column. While at it, accumulate
1601 * the number of columns stored, since the number is opclass-defined.
1602 */
1603 opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts);
1604 for (keyno = 0; keyno < tupdesc->natts; keyno++)
1605 {
1607 Form_pg_attribute attr = TupleDescAttr(tupdesc, keyno);
1608
1610
1611 opcinfo[keyno] = (BrinOpcInfo *)
1613 totalstored += opcinfo[keyno]->oi_nstored;
1614 }
1615
1616 /* Allocate our result struct and fill it in */
1617 totalsize = offsetof(BrinDesc, bd_info) +
1618 sizeof(BrinOpcInfo *) * tupdesc->natts;
1619
1620 bdesc = palloc(totalsize);
1621 bdesc->bd_context = cxt;
1622 bdesc->bd_index = rel;
1623 bdesc->bd_tupdesc = tupdesc;
1624 bdesc->bd_disktdesc = NULL; /* generated lazily */
1625 bdesc->bd_totalstored = totalstored;
1626
1627 for (keyno = 0; keyno < tupdesc->natts; keyno++)
1628 bdesc->bd_info[keyno] = opcinfo[keyno];
1629 pfree(opcinfo);
1630
1632
1633 return bdesc;
1634}
1635
1636void
1638{
1639 /* make sure the tupdesc is still valid */
1640 Assert(bdesc->bd_tupdesc->tdrefcount >= 1);
1641 /* no need for retail pfree */
1642 MemoryContextDelete(bdesc->bd_context);
1643}
1644
1645/*
1646 * Fetch index's statistical data into *stats
1647 */
1648void
1665
1666/*
1667 * Initialize a BrinBuildState appropriate to create tuples on the given index.
1668 */
1669static BrinBuildState *
1671 BlockNumber pagesPerRange, BlockNumber tablePages)
1672{
1675
1677
1678 state->bs_irel = idxRel;
1679 state->bs_numtuples = 0;
1680 state->bs_reltuples = 0;
1681 state->bs_currentInsertBuf = InvalidBuffer;
1682 state->bs_pagesPerRange = pagesPerRange;
1683 state->bs_currRangeStart = 0;
1684 state->bs_rmAccess = revmap;
1685 state->bs_bdesc = brin_build_desc(idxRel);
1686 state->bs_dtuple = brin_new_memtuple(state->bs_bdesc);
1687 state->bs_leader = NULL;
1688 state->bs_worker_id = 0;
1689 state->bs_sortstate = NULL;
1690 state->bs_context = CurrentMemoryContext;
1691 state->bs_emptyTuple = NULL;
1692 state->bs_emptyTupleLen = 0;
1693
1694 /* Remember the memory context to use for an empty tuple, if needed. */
1695 state->bs_context = CurrentMemoryContext;
1696 state->bs_emptyTuple = NULL;
1697 state->bs_emptyTupleLen = 0;
1698
1699 /*
1700 * Calculate the start of the last page range. Page numbers are 0-based,
1701 * so to calculate the index we need to subtract one. The integer division
1702 * gives us the index of the page range.
1703 */
1704 if (tablePages > 0)
1705 lastRange = ((tablePages - 1) / pagesPerRange) * pagesPerRange;
1706
1707 /* Now calculate the start of the next range. */
1708 state->bs_maxRangeStart = lastRange + state->bs_pagesPerRange;
1709
1710 return state;
1711}
1712
1713/*
1714 * Release resources associated with a BrinBuildState.
1715 */
1716static void
1718{
1719 /*
1720 * Release the last index buffer used. We might as well ensure that
1721 * whatever free space remains in that page is available in FSM, too.
1722 */
1723 if (!BufferIsInvalid(state->bs_currentInsertBuf))
1724 {
1725 Page page;
1726 Size freespace;
1728
1729 page = BufferGetPage(state->bs_currentInsertBuf);
1730 freespace = PageGetFreeSpace(page);
1731 blk = BufferGetBlockNumber(state->bs_currentInsertBuf);
1732 ReleaseBuffer(state->bs_currentInsertBuf);
1733 RecordPageWithFreeSpace(state->bs_irel, blk, freespace);
1734 FreeSpaceMapVacuumRange(state->bs_irel, blk, blk + 1);
1735 }
1736
1737 brin_free_desc(state->bs_bdesc);
1738 pfree(state->bs_dtuple);
1739 pfree(state);
1740}
1741
1742/*
1743 * On the given BRIN index, summarize the heap page range that corresponds
1744 * to the heap block number given.
1745 *
1746 * This routine can run in parallel with insertions into the heap. To avoid
1747 * missing those values from the summary tuple, we first insert a placeholder
1748 * index tuple into the index, then execute the heap scan; transactions
1749 * concurrent with the scan update the placeholder tuple. After the scan, we
1750 * union the placeholder tuple with the one computed by this routine. The
1751 * update of the index value happens in a loop, so that if somebody updates
1752 * the placeholder tuple after we read it, we detect the case and try again.
1753 * This ensures that the concurrently inserted tuples are not lost.
1754 *
1755 * A further corner case is this routine being asked to summarize the partial
1756 * range at the end of the table. heapNumBlocks is the (possibly outdated)
1757 * table size; if we notice that the requested range lies beyond that size,
1758 * we re-compute the table size after inserting the placeholder tuple, to
1759 * avoid missing pages that were appended recently.
1760 */
1761static void
1764{
1765 Buffer phbuf;
1767 Size phsz;
1768 OffsetNumber offset;
1770
1771 /*
1772 * Insert the placeholder tuple
1773 */
1775 phtup = brin_form_placeholder_tuple(state->bs_bdesc, heapBlk, &phsz);
1776 offset = brin_doinsert(state->bs_irel, state->bs_pagesPerRange,
1777 state->bs_rmAccess, &phbuf,
1778 heapBlk, phtup, phsz);
1779
1780 /*
1781 * Compute range end. We hold ShareUpdateExclusive lock on table, so it
1782 * cannot shrink concurrently (but it can grow).
1783 */
1784 Assert(heapBlk % state->bs_pagesPerRange == 0);
1785 if (heapBlk + state->bs_pagesPerRange > heapNumBlks)
1786 {
1787 /*
1788 * If we're asked to scan what we believe to be the final range on the
1789 * table (i.e. a range that might be partial) we need to recompute our
1790 * idea of what the latest page is after inserting the placeholder
1791 * tuple. Anyone that grows the table later will update the
1792 * placeholder tuple, so it doesn't matter that we won't scan these
1793 * pages ourselves. Careful: the table might have been extended
1794 * beyond the current range, so clamp our result.
1795 *
1796 * Fortunately, this should occur infrequently.
1797 */
1798 scanNumBlks = Min(RelationGetNumberOfBlocks(heapRel) - heapBlk,
1799 state->bs_pagesPerRange);
1800 }
1801 else
1802 {
1803 /* Easy case: range is known to be complete */
1804 scanNumBlks = state->bs_pagesPerRange;
1805 }
1806
1807 /*
1808 * Execute the partial heap scan covering the heap blocks in the specified
1809 * page range, summarizing the heap tuples in it. This scan stops just
1810 * short of brinbuildCallback creating the new index entry.
1811 *
1812 * Note that it is critical we use the "any visible" mode of
1813 * table_index_build_range_scan here: otherwise, we would miss tuples
1814 * inserted by transactions that are still in progress, among other corner
1815 * cases.
1816 */
1817 state->bs_currRangeStart = heapBlk;
1818 table_index_build_range_scan(heapRel, state->bs_irel, indexInfo, false, true, false,
1819 heapBlk, scanNumBlks,
1821
1822 /*
1823 * Now we update the values obtained by the scan with the placeholder
1824 * tuple. We do this in a loop which only terminates if we're able to
1825 * update the placeholder tuple successfully; if we are not, this means
1826 * somebody else modified the placeholder tuple after we read it.
1827 */
1828 for (;;)
1829 {
1831 Size newsize;
1832 bool didupdate;
1833 bool samepage;
1834
1836
1837 /*
1838 * Update the summary tuple and try to update.
1839 */
1840 newtup = brin_form_tuple(state->bs_bdesc,
1841 heapBlk, state->bs_dtuple, &newsize);
1843 didupdate =
1844 brin_doupdate(state->bs_irel, state->bs_pagesPerRange,
1845 state->bs_rmAccess, heapBlk, phbuf, offset,
1849
1850 /* If the update succeeded, we're done. */
1851 if (didupdate)
1852 break;
1853
1854 /*
1855 * If the update didn't work, it might be because somebody updated the
1856 * placeholder tuple concurrently. Extract the new version, union it
1857 * with the values we have from the scan, and start over. (There are
1858 * other reasons for the update to fail, but it's simple to treat them
1859 * the same.)
1860 */
1861 phtup = brinGetTupleForHeapBlock(state->bs_rmAccess, heapBlk, &phbuf,
1862 &offset, &phsz, BUFFER_LOCK_SHARE);
1863 /* the placeholder tuple must exist */
1864 if (phtup == NULL)
1865 elog(ERROR, "missing placeholder tuple");
1868
1869 /* merge it into the tuple from the heap scan */
1870 union_tuples(state->bs_bdesc, state->bs_dtuple, phtup);
1871 }
1872
1874}
1875
1876/*
1877 * Summarize page ranges that are not already summarized. If pageRange is
1878 * BRIN_ALL_BLOCKRANGES then the whole table is scanned; otherwise, only the
1879 * page range containing the given heap page number is scanned.
1880 * If include_partial is true, then the partial range at the end of the table
1881 * is summarized, otherwise not.
1882 *
1883 * For each new index tuple inserted, *numSummarized (if not NULL) is
1884 * incremented; for each existing tuple, *numExisting (if not NULL) is
1885 * incremented.
1886 */
1887static void
1889 bool include_partial, double *numSummarized, double *numExisting)
1890{
1893 IndexInfo *indexInfo = NULL;
1895 BlockNumber pagesPerRange;
1896 Buffer buf;
1898
1899 revmap = brinRevmapInitialize(index, &pagesPerRange);
1900
1901 /* determine range of pages to process */
1904 startBlk = 0;
1905 else
1906 {
1907 startBlk = (pageRange / pagesPerRange) * pagesPerRange;
1908 heapNumBlocks = Min(heapNumBlocks, startBlk + pagesPerRange);
1909 }
1910 if (startBlk > heapNumBlocks)
1911 {
1912 /* Nothing to do if start point is beyond end of table */
1914 return;
1915 }
1916
1917 /*
1918 * Scan the revmap to find unsummarized items.
1919 */
1921 for (; startBlk < heapNumBlocks; startBlk += pagesPerRange)
1922 {
1923 BrinTuple *tup;
1924 OffsetNumber off;
1925
1926 /*
1927 * Unless requested to summarize even a partial range, go away now if
1928 * we think the next range is partial. Caller would pass true when it
1929 * is typically run once bulk data loading is done
1930 * (brin_summarize_new_values), and false when it is typically the
1931 * result of arbitrarily-scheduled maintenance command (vacuuming).
1932 */
1933 if (!include_partial &&
1934 (startBlk + pagesPerRange > heapNumBlocks))
1935 break;
1936
1938
1941 if (tup == NULL)
1942 {
1943 /* no revmap entry for this heap range. Summarize it. */
1944 if (state == NULL)
1945 {
1946 /* first time through */
1947 Assert(!indexInfo);
1949 pagesPerRange,
1951 indexInfo = BuildIndexInfo(index);
1952 }
1953 summarize_range(indexInfo, state, heapRel, startBlk, heapNumBlocks);
1954
1955 /* and re-initialize state for the next range */
1956 brin_memtuple_initialize(state->bs_dtuple, state->bs_bdesc);
1957
1958 if (numSummarized)
1959 *numSummarized += 1.0;
1960 }
1961 else
1962 {
1963 if (numExisting)
1964 *numExisting += 1.0;
1966 }
1967 }
1968
1969 if (BufferIsValid(buf))
1971
1972 /* free resources */
1974 if (state)
1975 {
1977 pfree(indexInfo);
1978 }
1979}
1980
1981/*
1982 * Given a deformed tuple in the build state, convert it into the on-disk
1983 * format and insert it into the index, making the revmap point to it.
1984 */
1985static void
1987{
1988 BrinTuple *tup;
1989 Size size;
1990
1991 tup = brin_form_tuple(state->bs_bdesc, state->bs_currRangeStart,
1992 state->bs_dtuple, &size);
1993 brin_doinsert(state->bs_irel, state->bs_pagesPerRange, state->bs_rmAccess,
1994 &state->bs_currentInsertBuf, state->bs_currRangeStart,
1995 tup, size);
1996 state->bs_numtuples++;
1997
1998 pfree(tup);
1999}
2000
2001/*
2002 * Given a deformed tuple in the build state, convert it into the on-disk
2003 * format and write it to a (shared) tuplesort (the leader will insert it
2004 * into the index later).
2005 */
2006static void
2008{
2009 BrinTuple *tup;
2010 Size size;
2011
2012 /* don't insert empty tuples in parallel build */
2013 if (state->bs_dtuple->bt_empty_range)
2014 return;
2015
2016 tup = brin_form_tuple(state->bs_bdesc, state->bs_currRangeStart,
2017 state->bs_dtuple, &size);
2018
2019 /* write the BRIN tuple to the tuplesort */
2020 tuplesort_putbrintuple(state->bs_sortstate, tup, size);
2021
2022 state->bs_numtuples++;
2023
2024 pfree(tup);
2025}
2026
2027/*
2028 * Given two deformed tuples, adjust the first one so that it's consistent
2029 * with the summary values in both.
2030 */
2031static void
2033{
2034 int keyno;
2035 BrinMemTuple *db;
2036 MemoryContext cxt;
2038
2039 /* Use our own memory context to avoid retail pfree */
2041 "brin union",
2044 db = brin_deform_tuple(bdesc, b, NULL);
2046
2047 /*
2048 * Check if the ranges are empty.
2049 *
2050 * If at least one of them is empty, we don't need to call per-key union
2051 * functions at all. If "b" is empty, we just use "a" as the result (it
2052 * might be empty fine, but that's fine). If "a" is empty but "b" is not,
2053 * we use "b" as the result (but we have to copy the data into "a" first).
2054 *
2055 * Only when both ranges are non-empty, we actually do the per-key merge.
2056 */
2057
2058 /* If "b" is empty - ignore it and just use "a" (even if it's empty etc.). */
2059 if (db->bt_empty_range)
2060 {
2061 /* skip the per-key merge */
2063 return;
2064 }
2065
2066 /*
2067 * Now we know "b" is not empty. If "a" is empty, then "b" is the result.
2068 * But we need to copy the data from "b" to "a" first, because that's how
2069 * we pass result out.
2070 *
2071 * We have to copy all the global/per-key flags etc. too.
2072 */
2073 if (a->bt_empty_range)
2074 {
2075 for (keyno = 0; keyno < bdesc->bd_tupdesc->natts; keyno++)
2076 {
2077 int i;
2078 BrinValues *col_a = &a->bt_columns[keyno];
2079 BrinValues *col_b = &db->bt_columns[keyno];
2080 BrinOpcInfo *opcinfo = bdesc->bd_info[keyno];
2081
2082 col_a->bv_allnulls = col_b->bv_allnulls;
2083 col_a->bv_hasnulls = col_b->bv_hasnulls;
2084
2085 /* If "b" has no data, we're done. */
2086 if (col_b->bv_allnulls)
2087 continue;
2088
2089 for (i = 0; i < opcinfo->oi_nstored; i++)
2090 col_a->bv_values[i] =
2091 datumCopy(col_b->bv_values[i],
2092 opcinfo->oi_typcache[i]->typbyval,
2093 opcinfo->oi_typcache[i]->typlen);
2094 }
2095
2096 /* "a" started empty, but "b" was not empty, so remember that */
2097 a->bt_empty_range = false;
2098
2099 /* skip the per-key merge */
2101 return;
2102 }
2103
2104 /* Now we know neither range is empty. */
2105 for (keyno = 0; keyno < bdesc->bd_tupdesc->natts; keyno++)
2106 {
2107 FmgrInfo *unionFn;
2108 BrinValues *col_a = &a->bt_columns[keyno];
2109 BrinValues *col_b = &db->bt_columns[keyno];
2110 BrinOpcInfo *opcinfo = bdesc->bd_info[keyno];
2111
2112 if (opcinfo->oi_regular_nulls)
2113 {
2114 /* Does the "b" summary represent any NULL values? */
2115 bool b_has_nulls = (col_b->bv_hasnulls || col_b->bv_allnulls);
2116
2117 /* Adjust "hasnulls". */
2118 if (!col_a->bv_allnulls && b_has_nulls)
2119 col_a->bv_hasnulls = true;
2120
2121 /* If there are no values in B, there's nothing left to do. */
2122 if (col_b->bv_allnulls)
2123 continue;
2124
2125 /*
2126 * Adjust "allnulls". If A doesn't have values, just copy the
2127 * values from B into A, and we're done. We cannot run the
2128 * operators in this case, because values in A might contain
2129 * garbage. Note we already established that B contains values.
2130 *
2131 * Also adjust "hasnulls" in order not to forget the summary
2132 * represents NULL values. This is not redundant with the earlier
2133 * update, because that only happens when allnulls=false.
2134 */
2135 if (col_a->bv_allnulls)
2136 {
2137 int i;
2138
2139 col_a->bv_allnulls = false;
2140 col_a->bv_hasnulls = true;
2141
2142 for (i = 0; i < opcinfo->oi_nstored; i++)
2143 col_a->bv_values[i] =
2144 datumCopy(col_b->bv_values[i],
2145 opcinfo->oi_typcache[i]->typbyval,
2146 opcinfo->oi_typcache[i]->typlen);
2147
2148 continue;
2149 }
2150 }
2151
2152 unionFn = index_getprocinfo(bdesc->bd_index, keyno + 1,
2154 FunctionCall3Coll(unionFn,
2155 bdesc->bd_index->rd_indcollation[keyno],
2159 }
2160
2162}
2163
2164/*
2165 * brin_vacuum_scan
2166 * Do a complete scan of the index during VACUUM.
2167 *
2168 * This routine scans the complete index looking for uncataloged index pages,
2169 * i.e. those that might have been lost due to a crash after index extension
2170 * and such.
2171 */
2172static void
2174{
2176 ReadStream *stream;
2177 Buffer buf;
2178
2179 p.current_blocknum = 0;
2181
2182 /*
2183 * It is safe to use batchmode as block_range_read_stream_cb takes no
2184 * locks.
2185 */
2189 strategy,
2190 idxrel,
2193 &p,
2194 0);
2195
2196 /*
2197 * Scan the index in physical order, and clean up any possible mess in
2198 * each page.
2199 */
2200 while ((buf = read_stream_next_buffer(stream, NULL)) != InvalidBuffer)
2201 {
2203
2205
2207 }
2208
2209 read_stream_end(stream);
2210
2211 /*
2212 * Update all upper pages in the index's FSM, as well. This ensures not
2213 * only that we propagate leaf-page FSM updates made by brin_page_cleanup,
2214 * but also that any pre-existing damage or out-of-dateness is repaired.
2215 */
2217}
2218
2219static bool
2221 const Datum *values, const bool *nulls)
2222{
2223 int keyno;
2224
2225 /* If the range starts empty, we're certainly going to modify it. */
2226 bool modified = dtup->bt_empty_range;
2227
2228 /*
2229 * Compare the key values of the new tuple to the stored index values; our
2230 * deformed tuple will get updated if the new tuple doesn't fit the
2231 * original range (note this means we can't break out of the loop early).
2232 * Make a note of whether this happens, so that we know to insert the
2233 * modified tuple later.
2234 */
2235 for (keyno = 0; keyno < bdesc->bd_tupdesc->natts; keyno++)
2236 {
2237 Datum result;
2238 BrinValues *bval;
2240 bool has_nulls;
2241
2242 bval = &dtup->bt_columns[keyno];
2243
2244 /*
2245 * Does the range have actual NULL values? Either of the flags can be
2246 * set, but we ignore the state before adding first row.
2247 *
2248 * We have to remember this, because we'll modify the flags and we
2249 * need to know if the range started as empty.
2250 */
2251 has_nulls = ((!dtup->bt_empty_range) &&
2252 (bval->bv_hasnulls || bval->bv_allnulls));
2253
2254 /*
2255 * If the value we're adding is NULL, handle it locally. Otherwise
2256 * call the BRIN_PROCNUM_ADDVALUE procedure.
2257 */
2258 if (bdesc->bd_info[keyno]->oi_regular_nulls && nulls[keyno])
2259 {
2260 /*
2261 * If the new value is null, we record that we saw it if it's the
2262 * first one; otherwise, there's nothing to do.
2263 */
2264 if (!bval->bv_hasnulls)
2265 {
2266 bval->bv_hasnulls = true;
2267 modified = true;
2268 }
2269
2270 continue;
2271 }
2272
2273 addValue = index_getprocinfo(idxRel, keyno + 1,
2275 result = FunctionCall4Coll(addValue,
2276 idxRel->rd_indcollation[keyno],
2278 PointerGetDatum(bval),
2279 values[keyno],
2280 BoolGetDatum(nulls[keyno]));
2281 /* if that returned true, we need to insert the updated tuple */
2282 modified |= DatumGetBool(result);
2283
2284 /*
2285 * If the range was had actual NULL values (i.e. did not start empty),
2286 * make sure we don't forget about the NULL values. Either the
2287 * allnulls flag is still set to true, or (if the opclass cleared it)
2288 * we need to set hasnulls=true.
2289 *
2290 * XXX This can only happen when the opclass modified the tuple, so
2291 * the modified flag should be set.
2292 */
2293 if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls))
2294 {
2296 bval->bv_hasnulls = true;
2297 }
2298 }
2299
2300 /*
2301 * After updating summaries for all the keys, mark it as not empty.
2302 *
2303 * If we're actually changing the flag value (i.e. tuple started as
2304 * empty), we should have modified the tuple. So we should not see empty
2305 * range that was not modified.
2306 */
2307 Assert(!dtup->bt_empty_range || modified);
2308 dtup->bt_empty_range = false;
2309
2310 return modified;
2311}
2312
2313static bool
2315{
2316 int keyno;
2317
2318 /*
2319 * First check if there are any IS [NOT] NULL scan keys, and if we're
2320 * violating them.
2321 */
2322 for (keyno = 0; keyno < nnullkeys; keyno++)
2323 {
2324 ScanKey key = nullkeys[keyno];
2325
2326 Assert(key->sk_attno == bval->bv_attno);
2327
2328 /* Handle only IS NULL/IS NOT NULL tests */
2329 if (!(key->sk_flags & SK_ISNULL))
2330 continue;
2331
2332 if (key->sk_flags & SK_SEARCHNULL)
2333 {
2334 /* IS NULL scan key, but range has no NULLs */
2335 if (!bval->bv_allnulls && !bval->bv_hasnulls)
2336 return false;
2337 }
2338 else if (key->sk_flags & SK_SEARCHNOTNULL)
2339 {
2340 /*
2341 * For IS NOT NULL, we can only skip ranges that are known to have
2342 * only nulls.
2343 */
2344 if (bval->bv_allnulls)
2345 return false;
2346 }
2347 else
2348 {
2349 /*
2350 * Neither IS NULL nor IS NOT NULL was used; assume all indexable
2351 * operators are strict and thus return false with NULL value in
2352 * the scan key.
2353 */
2354 return false;
2355 }
2356 }
2357
2358 return true;
2359}
2360
2361/*
2362 * Create parallel context, and launch workers for leader.
2363 *
2364 * buildstate argument should be initialized (with the exception of the
2365 * tuplesort states, which may later be created based on shared
2366 * state initially set up here).
2367 *
2368 * isconcurrent indicates if operation is CREATE INDEX CONCURRENTLY.
2369 *
2370 * request is the target number of parallel worker processes to launch.
2371 *
2372 * Sets buildstate's BrinLeader, which caller must use to shut down parallel
2373 * mode by passing it to _brin_end_parallel() at the very end of its index
2374 * build. If not even a single worker process can be launched, this is
2375 * never set, and caller should proceed with a serial index build.
2376 */
2377static void
2379 bool isconcurrent, int request)
2380{
2381 ParallelContext *pcxt;
2382 int scantuplesortstates;
2383 Snapshot snapshot;
2385 Size estsort;
2386 BrinShared *brinshared;
2387 Sharedsort *sharedsort;
2389 WalUsage *walusage;
2390 BufferUsage *bufferusage;
2391 bool leaderparticipates = true;
2392 int querylen;
2393
2394#ifdef DISABLE_LEADER_PARTICIPATION
2395 leaderparticipates = false;
2396#endif
2397
2398 /*
2399 * Enter parallel mode, and create context for parallel build of brin
2400 * index
2401 */
2403 Assert(request > 0);
2404 pcxt = CreateParallelContext("postgres", "_brin_parallel_build_main",
2405 request);
2406
2407 scantuplesortstates = leaderparticipates ? request + 1 : request;
2408
2409 /*
2410 * Prepare for scan of the base relation. In a normal index build, we use
2411 * SnapshotAny because we must retrieve all tuples and do our own time
2412 * qual checks (because we have to index RECENTLY_DEAD tuples). In a
2413 * concurrent build, we take a regular MVCC snapshot and index whatever's
2414 * live according to that.
2415 */
2416 if (!isconcurrent)
2417 snapshot = SnapshotAny;
2418 else
2420
2421 /*
2422 * Estimate size for our own PARALLEL_KEY_BRIN_SHARED workspace.
2423 */
2426 estsort = tuplesort_estimate_shared(scantuplesortstates);
2428
2430
2431 /*
2432 * Estimate space for WalUsage and BufferUsage -- PARALLEL_KEY_WAL_USAGE
2433 * and PARALLEL_KEY_BUFFER_USAGE.
2434 *
2435 * If there are no extensions loaded that care, we could skip this. We
2436 * have no way of knowing whether anyone's looking at pgWalUsage or
2437 * pgBufferUsage, so do it unconditionally.
2438 */
2440 mul_size(sizeof(WalUsage), pcxt->nworkers));
2443 mul_size(sizeof(BufferUsage), pcxt->nworkers));
2445
2446 /* Finally, estimate PARALLEL_KEY_QUERY_TEXT space */
2448 {
2452 }
2453 else
2454 querylen = 0; /* keep compiler quiet */
2455
2456 /* Everyone's had a chance to ask for space, so now create the DSM */
2458
2459 /* If no DSM segment was available, back out (do serial build) */
2460 if (pcxt->seg == NULL)
2461 {
2462 if (IsMVCCSnapshot(snapshot))
2463 UnregisterSnapshot(snapshot);
2466 return;
2467 }
2468
2469 /* Store shared build state, for which we reserved space */
2470 brinshared = (BrinShared *) shm_toc_allocate(pcxt->toc, estbrinshared);
2471 /* Initialize immutable state */
2472 brinshared->heaprelid = RelationGetRelid(heap);
2473 brinshared->indexrelid = RelationGetRelid(index);
2474 brinshared->isconcurrent = isconcurrent;
2475 brinshared->scantuplesortstates = scantuplesortstates;
2476 brinshared->pagesPerRange = buildstate->bs_pagesPerRange;
2477 brinshared->queryid = pgstat_get_my_query_id();
2479 SpinLockInit(&brinshared->mutex);
2480
2481 /* Initialize mutable state */
2482 brinshared->nparticipantsdone = 0;
2483 brinshared->reltuples = 0.0;
2484 brinshared->indtuples = 0.0;
2485
2488 snapshot);
2489
2490 /*
2491 * Store shared tuplesort-private state, for which we reserved space.
2492 * Then, initialize opaque state using tuplesort routine.
2493 */
2494 sharedsort = (Sharedsort *) shm_toc_allocate(pcxt->toc, estsort);
2495 tuplesort_initialize_shared(sharedsort, scantuplesortstates,
2496 pcxt->seg);
2497
2498 /*
2499 * Store shared tuplesort-private state, for which we reserved space.
2500 * Then, initialize opaque state using tuplesort routine.
2501 */
2502 shm_toc_insert(pcxt->toc, PARALLEL_KEY_BRIN_SHARED, brinshared);
2503 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLESORT, sharedsort);
2504
2505 /* Store query string for workers */
2507 {
2508 char *sharedquery;
2509
2510 sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
2513 }
2514
2515 /*
2516 * Allocate space for each worker's WalUsage and BufferUsage; no need to
2517 * initialize.
2518 */
2519 walusage = shm_toc_allocate(pcxt->toc,
2520 mul_size(sizeof(WalUsage), pcxt->nworkers));
2521 shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage);
2522 bufferusage = shm_toc_allocate(pcxt->toc,
2523 mul_size(sizeof(BufferUsage), pcxt->nworkers));
2524 shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufferusage);
2525
2526 /* Launch workers, saving status for leader/caller */
2528 brinleader->pcxt = pcxt;
2529 brinleader->nparticipanttuplesorts = pcxt->nworkers_launched;
2531 brinleader->nparticipanttuplesorts++;
2532 brinleader->brinshared = brinshared;
2533 brinleader->sharedsort = sharedsort;
2534 brinleader->snapshot = snapshot;
2535 brinleader->walusage = walusage;
2536 brinleader->bufferusage = bufferusage;
2537
2538 /* If no workers were successfully launched, back out (do serial build) */
2539 if (pcxt->nworkers_launched == 0)
2540 {
2542 return;
2543 }
2544
2545 /* Save leader state now that it's clear build will be parallel */
2546 buildstate->bs_leader = brinleader;
2547
2548 /* Join heap scan ourselves */
2551
2552 /*
2553 * Caller needs to wait for all launched workers when we return. Make
2554 * sure that the failure-to-start case will not hang forever.
2555 */
2557}
2558
2559/*
2560 * Shut down workers, destroy parallel context, and end parallel mode.
2561 */
2562static void
2564{
2565 int i;
2566
2567 /* Shutdown worker processes */
2569
2570 /*
2571 * Next, accumulate WAL usage. (This must wait for the workers to finish,
2572 * or we might get incomplete data.)
2573 */
2574 for (i = 0; i < brinleader->pcxt->nworkers_launched; i++)
2575 InstrAccumParallelQuery(&brinleader->bufferusage[i], &brinleader->walusage[i]);
2576
2577 /* Free last reference to MVCC snapshot, if one was used */
2578 if (IsMVCCSnapshot(brinleader->snapshot))
2579 UnregisterSnapshot(brinleader->snapshot);
2582}
2583
2584/*
2585 * Within leader, wait for end of heap scan.
2586 *
2587 * When called, parallel heap scan started by _brin_begin_parallel() will
2588 * already be underway within worker processes (when leader participates
2589 * as a worker, we should end up here just as workers are finishing).
2590 *
2591 * Returns the total number of heap tuples scanned.
2592 */
2593static double
2595{
2596 BrinShared *brinshared = state->bs_leader->brinshared;
2597 int nparticipanttuplesorts;
2598
2599 nparticipanttuplesorts = state->bs_leader->nparticipanttuplesorts;
2600 for (;;)
2601 {
2602 SpinLockAcquire(&brinshared->mutex);
2603 if (brinshared->nparticipantsdone == nparticipanttuplesorts)
2604 {
2605 /* copy the data into leader state */
2606 state->bs_reltuples = brinshared->reltuples;
2607 state->bs_numtuples = brinshared->indtuples;
2608
2609 SpinLockRelease(&brinshared->mutex);
2610 break;
2611 }
2612 SpinLockRelease(&brinshared->mutex);
2613
2616 }
2617
2619
2620 return state->bs_reltuples;
2621}
2622
2623/*
2624 * Within leader, wait for end of heap scan and merge per-worker results.
2625 *
2626 * After waiting for all workers to finish, merge the per-worker results into
2627 * the complete index. The results from each worker are sorted by block number
2628 * (start of the page range). While combining the per-worker results we merge
2629 * summaries for the same page range, and also fill-in empty summaries for
2630 * ranges without any tuples.
2631 *
2632 * Returns the total number of heap tuples scanned.
2633 */
2634static double
2636{
2637 BrinTuple *btup;
2639 Size tuplen;
2640 BlockNumber prevblkno = InvalidBlockNumber;
2642 oldCxt;
2643 double reltuples;
2644
2645 /* wait for workers to scan table and produce partial results */
2646 reltuples = _brin_parallel_heapscan(state);
2647
2648 /* do the actual sort in the leader */
2649 tuplesort_performsort(state->bs_sortstate);
2650
2651 /*
2652 * Initialize BrinMemTuple we'll use to union summaries from workers (in
2653 * case they happened to produce parts of the same page range).
2654 */
2655 memtuple = brin_new_memtuple(state->bs_bdesc);
2656
2657 /*
2658 * Create a memory context we'll reset to combine results for a single
2659 * page range (received from the workers). We don't expect huge number of
2660 * overlaps under regular circumstances, because for large tables the
2661 * chunk size is likely larger than the BRIN page range), but it can
2662 * happen, and the union functions may do all kinds of stuff. So we better
2663 * reset the context once in a while.
2664 */
2666 "brin union",
2669
2670 /*
2671 * Read the BRIN tuples from the shared tuplesort, sorted by block number.
2672 * That probably gives us an index that is cheaper to scan, thanks to
2673 * mostly getting data from the same index page as before.
2674 */
2675 while ((btup = tuplesort_getbrintuple(state->bs_sortstate, &tuplen, true)) != NULL)
2676 {
2677 /* Ranges should be multiples of pages_per_range for the index. */
2678 Assert(btup->bt_blkno % state->bs_leader->brinshared->pagesPerRange == 0);
2679
2680 /*
2681 * Do we need to union summaries for the same page range?
2682 *
2683 * If this is the first brin tuple we read, then just deform it into
2684 * the memtuple, and continue with the next one from tuplesort. We
2685 * however may need to insert empty summaries into the index.
2686 *
2687 * If it's the same block as the last we saw, we simply union the brin
2688 * tuple into it, and we're done - we don't even need to insert empty
2689 * ranges, because that was done earlier when we saw the first brin
2690 * tuple (for this range).
2691 *
2692 * Finally, if it's not the first brin tuple, and it's not the same
2693 * page range, we need to do the insert and then deform the tuple into
2694 * the memtuple. Then we'll insert empty ranges before the new brin
2695 * tuple, if needed.
2696 */
2697 if (prevblkno == InvalidBlockNumber)
2698 {
2699 /* First brin tuples, just deform into memtuple. */
2701
2702 /* continue to insert empty pages before thisblock */
2703 }
2704 else if (memtuple->bt_blkno == btup->bt_blkno)
2705 {
2706 /*
2707 * Not the first brin tuple, but same page range as the previous
2708 * one, so we can merge it into the memtuple.
2709 */
2710 union_tuples(state->bs_bdesc, memtuple, btup);
2711 continue;
2712 }
2713 else
2714 {
2715 BrinTuple *tmp;
2716 Size len;
2717
2718 /*
2719 * We got brin tuple for a different page range, so form a brin
2720 * tuple from the memtuple, insert it, and re-init the memtuple
2721 * from the new brin tuple.
2722 */
2723 tmp = brin_form_tuple(state->bs_bdesc, memtuple->bt_blkno,
2724 memtuple, &len);
2725
2726 brin_doinsert(state->bs_irel, state->bs_pagesPerRange, state->bs_rmAccess,
2727 &state->bs_currentInsertBuf, tmp->bt_blkno, tmp, len);
2728
2729 /*
2730 * Reset the per-output-range context. This frees all the memory
2731 * possibly allocated by the union functions, and also the BRIN
2732 * tuple we just formed and inserted.
2733 */
2735
2737
2738 /* continue to insert empty pages before thisblock */
2739 }
2740
2741 /* Fill empty ranges for all ranges missing in the tuplesort. */
2742 brin_fill_empty_ranges(state, prevblkno, btup->bt_blkno);
2743
2744 prevblkno = btup->bt_blkno;
2745 }
2746
2747 tuplesort_end(state->bs_sortstate);
2748
2749 /* Fill the BRIN tuple for the last page range with data. */
2750 if (prevblkno != InvalidBlockNumber)
2751 {
2752 BrinTuple *tmp;
2753 Size len;
2754
2755 tmp = brin_form_tuple(state->bs_bdesc, memtuple->bt_blkno,
2756 memtuple, &len);
2757
2758 brin_doinsert(state->bs_irel, state->bs_pagesPerRange, state->bs_rmAccess,
2759 &state->bs_currentInsertBuf, tmp->bt_blkno, tmp, len);
2760
2761 pfree(tmp);
2762 }
2763
2764 /* Fill empty ranges at the end, for all ranges missing in the tuplesort. */
2765 brin_fill_empty_ranges(state, prevblkno, state->bs_maxRangeStart);
2766
2767 /*
2768 * Switch back to the original memory context, and destroy the one we
2769 * created to isolate the union_tuple calls.
2770 */
2773
2774 return reltuples;
2775}
2776
2777/*
2778 * Returns size of shared memory required to store state for a parallel
2779 * brin index build based on the snapshot its parallel scan will use.
2780 */
2781static Size
2783{
2784 /* c.f. shm_toc_allocate as to why BUFFERALIGN is used */
2785 return add_size(BUFFERALIGN(sizeof(BrinShared)),
2786 table_parallelscan_estimate(heap, snapshot));
2787}
2788
2789/*
2790 * Within leader, participate as a parallel worker.
2791 */
2792static void
2794{
2795 BrinLeader *brinleader = buildstate->bs_leader;
2796 int sortmem;
2797
2798 /*
2799 * Might as well use reliable figure when doling out maintenance_work_mem
2800 * (when requested number of workers were not launched, this will be
2801 * somewhat higher than it is for other workers).
2802 */
2804
2805 /* Perform work common to all participants */
2807 brinleader->sharedsort, heap, index, sortmem, true);
2808}
2809
2810/*
2811 * Perform a worker's portion of a parallel sort.
2812 *
2813 * This generates a tuplesort for the worker portion of the table.
2814 *
2815 * sortmem is the amount of working memory to use within each worker,
2816 * expressed in KBs.
2817 *
2818 * When this returns, workers are done, and need only release resources.
2819 */
2820static void
2822 BrinShared *brinshared, Sharedsort *sharedsort,
2823 Relation heap, Relation index,
2824 int sortmem, bool progress)
2825{
2827 TableScanDesc scan;
2828 double reltuples;
2829 IndexInfo *indexInfo;
2830
2831 /* Initialize local tuplesort coordination state */
2833 coordinate->isWorker = true;
2834 coordinate->nParticipants = -1;
2835 coordinate->sharedsort = sharedsort;
2836
2837 /* Begin "partial" tuplesort */
2840
2841 /* Join parallel scan */
2842 indexInfo = BuildIndexInfo(index);
2843 indexInfo->ii_Concurrent = brinshared->isconcurrent;
2844
2845 scan = table_beginscan_parallel(heap,
2847
2848 reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
2850
2851 /* insert the last item */
2853
2854 /* sort the BRIN ranges built by this worker */
2855 tuplesort_performsort(state->bs_sortstate);
2856
2857 state->bs_reltuples += reltuples;
2858
2859 /*
2860 * Done. Record ambuild statistics.
2861 */
2862 SpinLockAcquire(&brinshared->mutex);
2863 brinshared->nparticipantsdone++;
2864 brinshared->reltuples += state->bs_reltuples;
2865 brinshared->indtuples += state->bs_numtuples;
2866 SpinLockRelease(&brinshared->mutex);
2867
2868 /* Notify leader */
2870
2871 tuplesort_end(state->bs_sortstate);
2872}
2873
2874/*
2875 * Perform work within a launched parallel process.
2876 */
2877void
2879{
2880 char *sharedquery;
2881 BrinShared *brinshared;
2882 Sharedsort *sharedsort;
2884 Relation heapRel;
2885 Relation indexRel;
2888 WalUsage *walusage;
2889 BufferUsage *bufferusage;
2890 int sortmem;
2891
2892 /*
2893 * The only possible status flag that can be set to the parallel worker is
2894 * PROC_IN_SAFE_IC.
2895 */
2896 Assert((MyProc->statusFlags == 0) ||
2898
2899 /* Set debug_query_string for individual workers first */
2902
2903 /* Report the query string from leader */
2905
2906 /* Look up brin shared state */
2907 brinshared = shm_toc_lookup(toc, PARALLEL_KEY_BRIN_SHARED, false);
2908
2909 /* Open relations using lock modes known to be obtained by index.c */
2910 if (!brinshared->isconcurrent)
2911 {
2914 }
2915 else
2916 {
2919 }
2920
2921 /* Track query ID */
2922 pgstat_report_query_id(brinshared->queryid, false);
2923
2924 /* Open relations within worker */
2925 heapRel = table_open(brinshared->heaprelid, heapLockmode);
2926 indexRel = index_open(brinshared->indexrelid, indexLockmode);
2927
2929 brinshared->pagesPerRange,
2931
2932 /* Look up shared state private to tuplesort.c */
2933 sharedsort = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT, false);
2934 tuplesort_attach_shared(sharedsort, seg);
2935
2936 /* Prepare to track buffer usage during parallel execution */
2938
2939 /*
2940 * Might as well use reliable figure when doling out maintenance_work_mem
2941 * (when requested number of workers were not launched, this will be
2942 * somewhat higher than it is for other workers).
2943 */
2945
2946 _brin_parallel_scan_and_build(buildstate, brinshared, sharedsort,
2947 heapRel, indexRel, sortmem, false);
2948
2949 /* Report WAL/buffer usage during parallel execution */
2950 bufferusage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
2951 walusage = shm_toc_lookup(toc, PARALLEL_KEY_WAL_USAGE, false);
2953 &walusage[ParallelWorkerNumber]);
2954
2955 index_close(indexRel, indexLockmode);
2956 table_close(heapRel, heapLockmode);
2957}
2958
2959/*
2960 * brin_build_empty_tuple
2961 * Maybe initialize a BRIN tuple representing empty range.
2962 *
2963 * Returns a BRIN tuple representing an empty page range starting at the
2964 * specified block number. The empty tuple is initialized only once, when it's
2965 * needed for the first time, stored in the memory context bs_context to ensure
2966 * proper life span, and reused on following calls. All empty tuples are
2967 * exactly the same except for the bt_blkno field, which is set to the value
2968 * in blkno parameter.
2969 */
2970static void
2972{
2973 /* First time an empty tuple is requested? If yes, initialize it. */
2974 if (state->bs_emptyTuple == NULL)
2975 {
2978
2979 /* Allocate the tuple in context for the whole index build. */
2980 oldcxt = MemoryContextSwitchTo(state->bs_context);
2981
2982 state->bs_emptyTuple = brin_form_tuple(state->bs_bdesc, blkno, dtuple,
2983 &state->bs_emptyTupleLen);
2984
2986 }
2987 else
2988 {
2989 /* If we already have an empty tuple, just update the block. */
2990 state->bs_emptyTuple->bt_blkno = blkno;
2991 }
2992}
2993
2994/*
2995 * brin_fill_empty_ranges
2996 * Add BRIN index tuples representing empty page ranges.
2997 *
2998 * prevRange/nextRange determine for which page ranges to add empty summaries.
2999 * Both boundaries are exclusive, i.e. only ranges starting at blkno for which
3000 * (prevRange < blkno < nextRange) will be added to the index.
3001 *
3002 * If prevRange is InvalidBlockNumber, this means there was no previous page
3003 * range (i.e. the first empty range to add is for blkno=0).
3004 *
3005 * The empty tuple is built only once, and then reused for all future calls.
3006 */
3007static void
3010{
3011 BlockNumber blkno;
3012
3013 /*
3014 * If we already summarized some ranges, we need to start with the next
3015 * one. Otherwise start from the first range of the table.
3016 */
3017 blkno = (prevRange == InvalidBlockNumber) ? 0 : (prevRange + state->bs_pagesPerRange);
3018
3019 /* Generate empty ranges until we hit the next non-empty range. */
3020 while (blkno < nextRange)
3021 {
3022 /* Did we already build the empty tuple? If not, do it now. */
3024
3025 brin_doinsert(state->bs_irel, state->bs_pagesPerRange, state->bs_rmAccess,
3026 &state->bs_currentInsertBuf,
3027 blkno, state->bs_emptyTuple, state->bs_emptyTupleLen);
3028
3029 /* try next page range */
3030 blkno += state->bs_pagesPerRange;
3031 }
3032}
@ ACLCHECK_NOT_OWNER
Definition acl.h:185
void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname)
Definition aclchk.c:2654
bool object_ownercheck(Oid classid, Oid objectid, Oid roleid)
Definition aclchk.c:4090
int16 AttrNumber
Definition attnum.h:21
static bool validate(Port *port, const char *auth)
Definition auth-oauth.c:638
bool AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId, BlockNumber blkno)
@ AVW_BRINSummarizeRange
Definition autovacuum.h:25
int ParallelWorkerNumber
Definition parallel.c:115
void InitializeParallelDSM(ParallelContext *pcxt)
Definition parallel.c:211
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition parallel.c:803
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition parallel.c:581
void DestroyParallelContext(ParallelContext *pcxt)
Definition parallel.c:957
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition parallel.c:173
void WaitForParallelWorkersToAttach(ParallelContext *pcxt)
Definition parallel.c:700
void pgstat_report_query_id(int64 query_id, bool force)
int64 pgstat_get_my_query_id(void)
void pgstat_report_activity(BackendState state, const char *cmd_str)
@ STATE_RUNNING
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
#define MaxBlockNumber
Definition block.h:35
static Datum values[MAXATTR]
Definition bootstrap.c:155
#define PARALLEL_KEY_BUFFER_USAGE
Definition brin.c:51
void brininsertcleanup(Relation index, IndexInfo *indexInfo)
Definition brin.c:513
static double _brin_parallel_merge(BrinBuildState *state)
Definition brin.c:2635
static void brin_vacuum_scan(Relation idxrel, BufferAccessStrategy strategy)
Definition brin.c:2173
Datum brin_desummarize_range(PG_FUNCTION_ARGS)
Definition brin.c:1492
void brinrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition brin.c:960
static void terminate_brin_buildstate(BrinBuildState *state)
Definition brin.c:1717
#define PARALLEL_KEY_BRIN_SHARED
Definition brin.c:47
Datum brin_summarize_range(PG_FUNCTION_ARGS)
Definition brin.c:1382
IndexBulkDeleteResult * brinbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition brin.c:1304
static void form_and_spill_tuple(BrinBuildState *state)
Definition brin.c:2007
#define BRIN_ALL_BLOCKRANGES
Definition brin.c:209
Datum brin_summarize_new_values(PG_FUNCTION_ARGS)
Definition brin.c:1367
IndexScanDesc brinbeginscan(Relation r, int nkeys, int norderbys)
Definition brin.c:540
bytea * brinoptions(Datum reloptions, bool validate)
Definition brin.c:1349
int64 bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition brin.c:568
static void brinsummarize(Relation index, Relation heapRel, BlockNumber pageRange, bool include_partial, double *numSummarized, double *numExisting)
Definition brin.c:1888
static void form_and_insert_tuple(BrinBuildState *state)
Definition brin.c:1986
void brinbuildempty(Relation index)
Definition brin.c:1275
void brin_free_desc(BrinDesc *bdesc)
Definition brin.c:1637
static void union_tuples(BrinDesc *bdesc, BrinMemTuple *a, BrinTuple *b)
Definition brin.c:2032
static void _brin_parallel_scan_and_build(BrinBuildState *state, BrinShared *brinshared, Sharedsort *sharedsort, Relation heap, Relation index, int sortmem, bool progress)
Definition brin.c:2821
static BrinBuildState * initialize_brin_buildstate(Relation idxRel, BrinRevmap *revmap, BlockNumber pagesPerRange, BlockNumber tablePages)
Definition brin.c:1670
static void _brin_begin_parallel(BrinBuildState *buildstate, Relation heap, Relation index, bool isconcurrent, int request)
Definition brin.c:2378
void brinGetStats(Relation index, BrinStatsData *stats)
Definition brin.c:1649
static void _brin_leader_participate_as_worker(BrinBuildState *buildstate, Relation heap, Relation index)
Definition brin.c:2793
static bool add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup, const Datum *values, const bool *nulls)
Definition brin.c:2220
static void _brin_end_parallel(BrinLeader *brinleader, BrinBuildState *state)
Definition brin.c:2563
static Size _brin_parallel_estimate_shared(Relation heap, Snapshot snapshot)
Definition brin.c:2782
static void brin_fill_empty_ranges(BrinBuildState *state, BlockNumber prevRange, BlockNumber nextRange)
Definition brin.c:3008
IndexBuildResult * brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition brin.c:1106
IndexBulkDeleteResult * brinvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition brin.c:1319
static void summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, BlockNumber heapBlk, BlockNumber heapNumBlks)
Definition brin.c:1762
#define ParallelTableScanFromBrinShared(shared)
Definition brin.c:116
#define PARALLEL_KEY_TUPLESORT
Definition brin.c:48
static void brinbuildCallbackParallel(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *brstate)
Definition brin.c:1047
bool brininsert(Relation idxRel, Datum *values, bool *nulls, ItemPointer heaptid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
Definition brin.c:345
#define PARALLEL_KEY_QUERY_TEXT
Definition brin.c:49
Datum brinhandler(PG_FUNCTION_ARGS)
Definition brin.c:250
BrinDesc * brin_build_desc(Relation rel)
Definition brin.c:1582
void _brin_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition brin.c:2878
static void brin_build_empty_tuple(BrinBuildState *state, BlockNumber blkno)
Definition brin.c:2971
#define PARALLEL_KEY_WAL_USAGE
Definition brin.c:50
static double _brin_parallel_heapscan(BrinBuildState *state)
Definition brin.c:2594
static BrinInsertState * initialize_brin_insertstate(Relation idxRel, IndexInfo *indexInfo)
Definition brin.c:316
static void brinbuildCallback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *brstate)
Definition brin.c:996
void brinendscan(IndexScanDesc scan)
Definition brin.c:979
static bool check_null_keys(BrinValues *bval, ScanKey *nullkeys, int nnullkeys)
Definition brin.c:2314
#define BrinGetPagesPerRange(relation)
Definition brin.h:41
#define BrinGetAutoSummarize(relation)
Definition brin.h:47
#define BRIN_LAST_OPTIONAL_PROCNUM
#define BRIN_PROCNUM_UNION
#define BRIN_PROCNUM_OPTIONS
#define BRIN_PROCNUM_OPCINFO
#define BRIN_PROCNUM_CONSISTENT
#define BRIN_elog(args)
#define BRIN_PROCNUM_ADDVALUE
#define BRIN_CURRENT_VERSION
Definition brin_page.h:72
#define BRIN_METAPAGE_BLKNO
Definition brin_page.h:75
bool brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, BlockNumber heapBlk, Buffer oldbuf, OffsetNumber oldoff, const BrinTuple *origtup, Size origsz, const BrinTuple *newtup, Size newsz, bool samepage)
void brin_page_cleanup(Relation idxrel, Buffer buf)
OffsetNumber brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer *buffer, BlockNumber heapBlk, const BrinTuple *tup, Size itemsz)
void brin_metapage_init(Page page, BlockNumber pagesPerRange, uint16 version)
bool brin_can_do_samepage_update(Buffer buffer, Size origsz, Size newsz)
bool brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
void brinRevmapTerminate(BrinRevmap *revmap)
BrinRevmap * brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange)
Definition brin_revmap.c:70
BrinTuple * brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk, Buffer *buf, OffsetNumber *off, Size *size, int mode)
BrinTuple * brin_copy_tuple(BrinTuple *tuple, Size len, BrinTuple *dest, Size *destsz)
Definition brin_tuple.c:445
BrinTuple * brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple, Size *size)
Definition brin_tuple.c:99
BrinMemTuple * brin_new_memtuple(BrinDesc *brdesc)
Definition brin_tuple.c:481
void brin_free_tuple(BrinTuple *tuple)
Definition brin_tuple.c:432
BrinTuple * brin_form_placeholder_tuple(BrinDesc *brdesc, BlockNumber blkno, Size *size)
Definition brin_tuple.c:387
BrinMemTuple * brin_memtuple_initialize(BrinMemTuple *dtuple, BrinDesc *brdesc)
Definition brin_tuple.c:510
BrinMemTuple * brin_deform_tuple(BrinDesc *brdesc, BrinTuple *tuple, BrinMemTuple *dMemtuple)
Definition brin_tuple.c:552
bool brinvalidate(Oid opclassoid)
#define SizeOfBrinCreateIdx
Definition brin_xlog.h:55
#define XLOG_BRIN_CREATE_INDEX
Definition brin_xlog.h:31
int Buffer
Definition buf.h:23
#define BufferIsInvalid(buffer)
Definition buf.h:31
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4356
Buffer ExtendBufferedRel(BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
Definition bufmgr.c:964
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5501
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5518
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3056
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition bufmgr.c:864
#define RelationGetNumberOfBlocks(reln)
Definition bufmgr.h:307
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
@ BUFFER_LOCK_SHARE
Definition bufmgr.h:210
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:205
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
@ EB_SKIP_EXTENSION_LOCK
Definition bufmgr.h:75
@ EB_LOCK_FIRST
Definition bufmgr.h:87
#define BMR_REL(p_rel)
Definition bufmgr.h:114
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:417
Size PageGetFreeSpace(const PageData *page)
Definition bufpage.c:906
static char * PageGetContents(Page page)
Definition bufpage.h:257
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:243
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:390
PageData * Page
Definition bufpage.h:81
#define Min(x, y)
Definition c.h:1007
#define MAXALIGN(LEN)
Definition c.h:836
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:223
#define BUFFERALIGN(LEN)
Definition c.h:838
#define Assert(condition)
Definition c.h:883
int64_t int64
Definition c.h:553
int32_t int32
Definition c.h:552
uint64_t uint64
Definition c.h:557
#define lengthof(array)
Definition c.h:813
#define OidIsValid(objectId)
Definition c.h:798
size_t Size
Definition c.h:629
bool ConditionVariableCancelSleep(void)
void ConditionVariableInit(ConditionVariable *cv)
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
void ConditionVariableSignal(ConditionVariable *cv)
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition datum.c:132
int errhint(const char *fmt,...)
Definition elog.c:1330
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define LOG
Definition elog.h:31
#define DEBUG2
Definition elog.h:29
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_array(type, count)
Definition fe_memutils.h:77
#define palloc0_object(type)
Definition fe_memutils.h:75
Datum FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3, Datum arg4)
Definition fmgr.c:1197
Datum FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, Datum arg3)
Definition fmgr.c:1172
void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo, MemoryContext destcxt)
Definition fmgr.c:581
#define PG_RETURN_VOID()
Definition fmgr.h:350
#define PG_GETARG_OID(n)
Definition fmgr.h:275
#define DirectFunctionCall2(func, arg1, arg2)
Definition fmgr.h:686
#define PG_GETARG_DATUM(n)
Definition fmgr.h:268
#define PG_GETARG_INT64(n)
Definition fmgr.h:284
#define FunctionCall1(flinfo, arg1)
Definition fmgr.h:702
#define PG_RETURN_INT32(x)
Definition fmgr.h:355
#define PG_RETURN_POINTER(x)
Definition fmgr.h:363
#define PG_FUNCTION_ARGS
Definition fmgr.h:193
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition freespace.c:377
void FreeSpaceMapVacuum(Relation rel)
Definition freespace.c:358
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition freespace.c:194
IndexScanDesc RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
Definition genam.c:80
bool(* IndexBulkDeleteCallback)(ItemPointer itemptr, void *state)
Definition genam.h:93
IndexUniqueCheck
Definition genam.h:122
int maintenance_work_mem
Definition globals.c:133
int NewGUCNestLevel(void)
Definition guc.c:2110
void RestrictSearchPath(void)
Definition guc.c:2121
void AtEOXact_GUC(bool isCommit, int nestLevel)
Definition guc.c:2137
Oid IndexGetRelation(Oid indexId, bool missing_ok)
Definition index.c:3581
IndexInfo * BuildIndexInfo(Relation index)
Definition index.c:2426
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition indexam.c:917
void index_close(Relation relation, LOCKMODE lockmode)
Definition indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition indexam.c:133
void InstrAccumParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition instrument.c:215
void InstrEndParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition instrument.c:205
void InstrStartParallelQuery(void)
Definition instrument.c:197
int b
Definition isn.c:74
int a
Definition isn.c:73
int i
Definition isn.c:77
#define ItemIdGetLength(itemId)
Definition itemid.h:59
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition itemptr.h:103
int LOCKMODE
Definition lockdefs.h:26
#define AccessExclusiveLock
Definition lockdefs.h:43
#define AccessShareLock
Definition lockdefs.h:36
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
#define ShareLock
Definition lockdefs.h:40
#define RowExclusiveLock
Definition lockdefs.h:38
void MemoryContextReset(MemoryContext context)
Definition mcxt.c:403
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define ALLOCSET_SMALL_SIZES
Definition memutils.h:170
#define SECURITY_RESTRICTED_OPERATION
Definition miscadmin.h:319
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define END_CRIT_SECTION()
Definition miscadmin.h:152
void GetUserIdAndSecContext(Oid *userid, int *sec_context)
Definition miscinit.c:612
Oid GetUserId(void)
Definition miscinit.c:469
void SetUserIdAndSecContext(Oid userid, int sec_context)
Definition miscinit.c:619
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
@ OBJECT_INDEX
FormData_pg_attribute * Form_pg_attribute
const void size_t len
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int progress
Definition pgbench.c:262
#define ERRCODE_UNDEFINED_TABLE
Definition pgbench.c:79
#define pgstat_count_index_scan(rel)
Definition pgstat.h:705
const char * debug_query_string
Definition postgres.c:89
static Datum Int64GetDatum(int64 X)
Definition postgres.h:423
static bool DatumGetBool(Datum X)
Definition postgres.h:100
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352
static Datum BoolGetDatum(bool X)
Definition postgres.h:112
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:262
uint64_t Datum
Definition postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:342
static Datum Int32GetDatum(int32 X)
Definition postgres.h:222
#define InvalidOid
unsigned int Oid
static int fb(int x)
#define PROC_IN_SAFE_IC
Definition proc.h:59
Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
ReadStream * read_stream_begin_relation(int flags, BufferAccessStrategy strategy, Relation rel, ForkNumber forknum, ReadStreamBlockNumberCB callback, void *callback_private_data, size_t per_buffer_data_size)
void read_stream_end(ReadStream *stream)
BlockNumber block_range_read_stream_cb(ReadStream *stream, void *callback_private_data, void *per_buffer_data)
#define READ_STREAM_MAINTENANCE
Definition read_stream.h:28
#define READ_STREAM_USE_BATCHING
Definition read_stream.h:64
#define READ_STREAM_FULL
Definition read_stream.h:43
static void addrange(struct cvec *cv, chr from, chr to)
Definition regc_cvec.c:90
#define RelationGetRelid(relation)
Definition rel.h:514
#define RelationGetDescr(relation)
Definition rel.h:540
#define RelationGetRelationName(relation)
Definition rel.h:548
#define RelationNeedsWAL(relation)
Definition rel.h:637
void * build_reloptions(Datum reloptions, bool validate, relopt_kind kind, Size relopt_struct_size, const relopt_parse_elt *relopt_elems, int num_relopt_elems)
@ RELOPT_KIND_BRIN
Definition reloptions.h:53
@ RELOPT_TYPE_INT
Definition reloptions.h:33
@ RELOPT_TYPE_BOOL
Definition reloptions.h:31
@ MAIN_FORKNUM
Definition relpath.h:58
@ INIT_FORKNUM
Definition relpath.h:61
void brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition selfuncs.c:8991
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition shm_toc.c:171
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition shm_toc.c:232
#define shm_toc_estimate_chunk(e, sz)
Definition shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition shmem.c:495
Size mul_size(Size s1, Size s2)
Definition shmem.c:510
#define SK_SEARCHNOTNULL
Definition skey.h:122
#define SK_SEARCHNULL
Definition skey.h:121
#define SK_ISNULL
Definition skey.h:115
Snapshot GetTransactionSnapshot(void)
Definition snapmgr.c:272
void UnregisterSnapshot(Snapshot snapshot)
Definition snapmgr.c:866
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition snapmgr.c:824
#define SnapshotAny
Definition snapmgr.h:33
#define IsMVCCSnapshot(snapshot)
Definition snapmgr.h:55
#define SpinLockInit(lock)
Definition spin.h:57
#define SpinLockRelease(lock)
Definition spin.h:61
#define SpinLockAcquire(lock)
Definition spin.h:59
PGPROC * MyProc
Definition proc.c:67
BlockNumber bs_maxRangeStart
Definition brin.c:163
Size bs_emptyTupleLen
Definition brin.c:169
MemoryContext bs_context
Definition brin.c:170
BrinMemTuple * bs_dtuple
Definition brin.c:166
Relation bs_irel
Definition brin.c:157
BlockNumber bs_pagesPerRange
Definition brin.c:161
double bs_numtuples
Definition brin.c:158
Buffer bs_currentInsertBuf
Definition brin.c:160
BrinRevmap * bs_rmAccess
Definition brin.c:164
Tuplesortstate * bs_sortstate
Definition brin.c:185
BrinLeader * bs_leader
Definition brin.c:177
int bs_worker_id
Definition brin.c:178
BlockNumber bs_currRangeStart
Definition brin.c:162
double bs_reltuples
Definition brin.c:159
BrinDesc * bs_bdesc
Definition brin.c:165
BrinTuple * bs_emptyTuple
Definition brin.c:168
BrinDesc * bis_desc
Definition brin.c:195
BrinRevmap * bis_rmAccess
Definition brin.c:194
BlockNumber bis_pages_per_range
Definition brin.c:196
int nparticipanttuplesorts
Definition brin.c:133
WalUsage * walusage
Definition brin.c:147
BrinShared * brinshared
Definition brin.c:144
BufferUsage * bufferusage
Definition brin.c:148
Snapshot snapshot
Definition brin.c:146
Sharedsort * sharedsort
Definition brin.c:145
ParallelContext * pcxt
Definition brin.c:125
BrinValues bt_columns[FLEXIBLE_ARRAY_MEMBER]
Definition brin_tuple.h:55
bool bt_empty_range
Definition brin_tuple.h:47
BlockNumber lastRevmapPage
Definition brin_page.h:69
BlockNumber pagesPerRange
Definition brin_page.h:68
BlockNumber bo_pagesPerRange
Definition brin.c:204
BrinDesc * bo_bdesc
Definition brin.c:206
BrinRevmap * bo_rmAccess
Definition brin.c:205
slock_t mutex
Definition brin.c:87
int scantuplesortstates
Definition brin.c:68
int nparticipantsdone
Definition brin.c:99
Oid heaprelid
Definition brin.c:64
BlockNumber pagesPerRange
Definition brin.c:67
ConditionVariable workersdonecv
Definition brin.c:79
Oid indexrelid
Definition brin.c:65
bool isconcurrent
Definition brin.c:66
double indtuples
Definition brin.c:101
int64 queryid
Definition brin.c:71
double reltuples
Definition brin.c:100
BlockNumber revmapNumPages
Definition brin.h:36
BlockNumber pagesPerRange
Definition brin.h:35
BlockNumber bt_blkno
Definition brin_tuple.h:66
bool bv_hasnulls
Definition brin_tuple.h:32
AttrNumber bv_attno
Definition brin_tuple.h:31
bool bv_allnulls
Definition brin_tuple.h:33
NodeTag type
Definition amapi.h:234
double heap_tuples
Definition genam.h:38
double index_tuples
Definition genam.h:39
BlockNumber num_pages
Definition genam.h:83
double num_index_tuples
Definition genam.h:85
void * ii_AmCache
Definition execnodes.h:225
int ii_ParallelWorkers
Definition execnodes.h:220
bool ii_Concurrent
Definition execnodes.h:212
MemoryContext ii_Context
Definition execnodes.h:228
struct ScanKeyData * keyData
Definition relscan.h:142
struct IndexScanInstrumentation * instrument
Definition relscan.h:160
Relation indexRelation
Definition relscan.h:138
Relation index
Definition genam.h:52
bool analyze_only
Definition genam.h:54
BufferAccessStrategy strategy
Definition genam.h:59
uint8 statusFlags
Definition proc.h:265
dsm_segment * seg
Definition parallel.h:42
shm_toc_estimator estimator
Definition parallel.h:41
shm_toc * toc
Definition parallel.h:44
int nworkers_launched
Definition parallel.h:37
Form_pg_index rd_index
Definition rel.h:192
Form_pg_class rd_rel
Definition rel.h:111
Oid sk_collation
Definition skey.h:70
Definition type.h:96
Definition c.h:716
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
Definition tableam.c:166
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
Definition tableam.c:131
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
Definition tableam.c:146
static double table_index_build_range_scan(Relation table_rel, Relation index_rel, IndexInfo *index_info, bool allow_sync, bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition tableam.h:1787
static double table_index_build_scan(Relation table_rel, Relation index_rel, IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition tableam.h:1754
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
void tbm_add_page(TIDBitmap *tbm, BlockNumber pageno)
Definition tidbitmap.c:432
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:160
void tuplesort_performsort(Tuplesortstate *state)
Definition tuplesort.c:1348
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
Definition tuplesort.c:2921
Size tuplesort_estimate_shared(int nWorkers)
Definition tuplesort.c:2900
void tuplesort_end(Tuplesortstate *state)
Definition tuplesort.c:936
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
Definition tuplesort.c:2944
#define TUPLESORT_NONE
Definition tuplesort.h:67
Tuplesortstate * tuplesort_begin_index_brin(int workMem, SortCoordinate coordinate, int sortopt)
BrinTuple * tuplesort_getbrintuple(Tuplesortstate *state, Size *len, bool forward)
void tuplesort_putbrintuple(Tuplesortstate *state, BrinTuple *tuple, Size size)
#define VACUUM_OPTION_PARALLEL_CLEANUP
Definition vacuum.h:63
void ExitParallelMode(void)
Definition xact.c:1065
void EnterParallelMode(void)
Definition xact.c:1052
bool RecoveryInProgress(void)
Definition xlog.c:6461
uint64 XLogRecPtr
Definition xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:478
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:368
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:245
void XLogBeginInsert(void)
Definition xloginsert.c:152
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define REGBUF_WILL_INIT
Definition xloginsert.h:34