PostgreSQL Source Code git master
nbtsort.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nbtsort.c
4 * Build a btree from sorted input by loading leaf pages sequentially.
5 *
6 * NOTES
7 *
8 * We use tuplesort.c to sort the given index tuples into order.
9 * Then we scan the index tuples in order and build the btree pages
10 * for each level. We load source tuples into leaf-level pages.
11 * Whenever we fill a page at one level, we add a link to it to its
12 * parent level (starting a new parent level if necessary). When
13 * done, we write out each final page on each level, adding it to
14 * its parent level. When we have only one page on a level, it must be
15 * the root -- it can be attached to the btree metapage and we are done.
16 *
17 * It is not wise to pack the pages entirely full, since then *any*
18 * insertion would cause a split (and not only of the leaf page; the need
19 * for a split would cascade right up the tree). The steady-state load
20 * factor for btrees is usually estimated at 70%. We choose to pack leaf
21 * pages to the user-controllable fill factor (default 90%) while upper pages
22 * are always packed to 70%. This gives us reasonable density (there aren't
23 * many upper pages if the keys are reasonable-size) without risking a lot of
24 * cascading splits during early insertions.
25 *
26 * We use the bulk smgr loading facility to bypass the buffer cache and
27 * WAL-log the pages efficiently.
28 *
29 * This code isn't concerned about the FSM at all. The caller is responsible
30 * for initializing that.
31 *
32 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
33 * Portions Copyright (c) 1994, Regents of the University of California
34 *
35 * IDENTIFICATION
36 * src/backend/access/nbtree/nbtsort.c
37 *
38 *-------------------------------------------------------------------------
39 */
40
41#include "postgres.h"
42
43#include "access/nbtree.h"
44#include "access/parallel.h"
45#include "access/relscan.h"
46#include "access/table.h"
47#include "access/xact.h"
48#include "catalog/index.h"
49#include "commands/progress.h"
50#include "executor/instrument.h"
51#include "miscadmin.h"
52#include "pgstat.h"
53#include "storage/bulk_write.h"
54#include "tcop/tcopprot.h"
55#include "utils/rel.h"
56#include "utils/sortsupport.h"
57#include "utils/tuplesort.h"
58
59
60/* Magic numbers for parallel state sharing */
61#define PARALLEL_KEY_BTREE_SHARED UINT64CONST(0xA000000000000001)
62#define PARALLEL_KEY_TUPLESORT UINT64CONST(0xA000000000000002)
63#define PARALLEL_KEY_TUPLESORT_SPOOL2 UINT64CONST(0xA000000000000003)
64#define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xA000000000000004)
65#define PARALLEL_KEY_WAL_USAGE UINT64CONST(0xA000000000000005)
66#define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xA000000000000006)
67
68/*
69 * DISABLE_LEADER_PARTICIPATION disables the leader's participation in
70 * parallel index builds. This may be useful as a debugging aid.
71#undef DISABLE_LEADER_PARTICIPATION
72 */
73
74/*
75 * Status record for spooling/sorting phase. (Note we may have two of
76 * these due to the special requirements for uniqueness-checking with
77 * dead tuples.)
78 */
79typedef struct BTSpool
80{
81 Tuplesortstate *sortstate; /* state data for tuplesort.c */
87
88/*
89 * Status for index builds performed in parallel. This is allocated in a
90 * dynamic shared memory segment. Note that there is a separate tuplesort TOC
91 * entry, private to tuplesort.c but allocated by this module on its behalf.
92 */
93typedef struct BTShared
94{
95 /*
96 * These fields are not modified during the sort. They primarily exist
97 * for the benefit of worker processes that need to create BTSpool state
98 * corresponding to that used by the leader.
99 */
106
107 /* Query ID, for report in worker processes */
109
110 /*
111 * workersdonecv is used to monitor the progress of workers. All parallel
112 * participants must indicate that they are done before leader can use
113 * mutable state that workers maintain during scan (and before leader can
114 * proceed to tuplesort_performsort()).
115 */
117
118 /*
119 * mutex protects all fields before heapdesc.
120 *
121 * These fields contain status information of interest to B-Tree index
122 * builds that must work just the same when an index is built in parallel.
123 */
124 slock_t mutex;
125
126 /*
127 * Mutable state that is maintained by workers, and reported back to
128 * leader at end of parallel scan.
129 *
130 * nparticipantsdone is number of worker processes finished.
131 *
132 * reltuples is the total number of input heap tuples.
133 *
134 * havedead indicates if RECENTLY_DEAD tuples were encountered during
135 * build.
136 *
137 * indtuples is the total number of tuples that made it into the index.
138 *
139 * brokenhotchain indicates if any worker detected a broken HOT chain
140 * during build.
141 */
143 double reltuples;
145 double indtuples;
147
148 /*
149 * ParallelTableScanDescData data follows. Can't directly embed here, as
150 * implementations of the parallel table scan desc interface might need
151 * stronger alignment.
152 */
154
155/*
156 * Return pointer to a BTShared's parallel table scan.
157 *
158 * c.f. shm_toc_allocate as to why BUFFERALIGN is used, rather than just
159 * MAXALIGN.
160 */
161#define ParallelTableScanFromBTShared(shared) \
162 (ParallelTableScanDesc) ((char *) (shared) + BUFFERALIGN(sizeof(BTShared)))
163
164/*
165 * Status for leader in parallel index build.
166 */
167typedef struct BTLeader
168{
169 /* parallel context itself */
171
172 /*
173 * nparticipanttuplesorts is the exact number of worker processes
174 * successfully launched, plus one leader process if it participates as a
175 * worker (only DISABLE_LEADER_PARTICIPATION builds avoid leader
176 * participating as a worker).
177 */
179
180 /*
181 * Leader process convenience pointers to shared state (leader avoids TOC
182 * lookups).
183 *
184 * btshared is the shared state for entire build. sharedsort is the
185 * shared, tuplesort-managed state passed to each process tuplesort.
186 * sharedsort2 is the corresponding btspool2 shared state, used only when
187 * building unique indexes. snapshot is the snapshot used by the scan iff
188 * an MVCC snapshot is required.
189 */
197
198/*
199 * Working state for btbuild and its callback.
200 *
201 * When parallel CREATE INDEX is used, there is a BTBuildState for each
202 * participant.
203 */
204typedef struct BTBuildState
205{
211
212 /*
213 * spool2 is needed only when the index is a unique index. Dead tuples are
214 * put into spool2 instead of spool in order to avoid uniqueness check.
215 */
217 double indtuples;
218
219 /*
220 * btleader is only present when a parallel index build is performed, and
221 * only in the leader process. (Actually, only the leader has a
222 * BTBuildState. Workers have their own spool and spool2, though.)
223 */
226
227/*
228 * Status record for a btree page being built. We have one of these
229 * for each active tree level.
230 */
231typedef struct BTPageState
232{
233 BulkWriteBuffer btps_buf; /* workspace for page building */
234 BlockNumber btps_blkno; /* block # to write this page at */
235 IndexTuple btps_lowkey; /* page's strict lower bound pivot tuple */
236 OffsetNumber btps_lastoff; /* last item offset loaded */
237 Size btps_lastextra; /* last item's extra posting list space */
238 uint32 btps_level; /* tree level (0 = leaf) */
239 Size btps_full; /* "full" if less than this much free space */
240 struct BTPageState *btps_next; /* link to parent level, if any */
242
243/*
244 * Overall status record for index writing phase.
245 */
246typedef struct BTWriteState
247{
251 BTScanInsert inskey; /* generic insertion scankey */
252 BlockNumber btws_pages_alloced; /* # pages allocated */
254
255
256static double _bt_spools_heapscan(Relation heap, Relation index,
257 BTBuildState *buildstate, IndexInfo *indexInfo);
258static void _bt_spooldestroy(BTSpool *btspool);
259static void _bt_spool(BTSpool *btspool, ItemPointer self,
260 Datum *values, bool *isnull);
261static void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2);
263 bool *isnull, bool tupleIsAlive, void *state);
264static BulkWriteBuffer _bt_blnewpage(BTWriteState *wstate, uint32 level);
265static BTPageState *_bt_pagestate(BTWriteState *wstate, uint32 level);
266static void _bt_slideleft(Page rightmostpage);
267static void _bt_sortaddtup(Page page, Size itemsize,
268 IndexTuple itup, OffsetNumber itup_off,
269 bool newfirstdataitem);
270static void _bt_buildadd(BTWriteState *wstate, BTPageState *state,
271 IndexTuple itup, Size truncextra);
274 BTDedupState dstate);
275static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state);
276static void _bt_load(BTWriteState *wstate,
277 BTSpool *btspool, BTSpool *btspool2);
278static void _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent,
279 int request);
280static void _bt_end_parallel(BTLeader *btleader);
282static double _bt_parallel_heapscan(BTBuildState *buildstate,
283 bool *brokenhotchain);
284static void _bt_leader_participate_as_worker(BTBuildState *buildstate);
285static void _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2,
286 BTShared *btshared, Sharedsort *sharedsort,
287 Sharedsort *sharedsort2, int sortmem,
288 bool progress);
289
290
291/*
292 * btbuild() -- build a new btree index.
293 */
296{
297 IndexBuildResult *result;
298 BTBuildState buildstate;
299 double reltuples;
300
301#ifdef BTREE_BUILD_STATS
303 ResetUsage();
304#endif /* BTREE_BUILD_STATS */
305
306 buildstate.isunique = indexInfo->ii_Unique;
307 buildstate.nulls_not_distinct = indexInfo->ii_NullsNotDistinct;
308 buildstate.havedead = false;
309 buildstate.heap = heap;
310 buildstate.spool = NULL;
311 buildstate.spool2 = NULL;
312 buildstate.indtuples = 0;
313 buildstate.btleader = NULL;
314
315 /*
316 * We expect to be called exactly once for any index relation. If that's
317 * not the case, big trouble's what we have.
318 */
320 elog(ERROR, "index \"%s\" already contains data",
322
323 reltuples = _bt_spools_heapscan(heap, index, &buildstate, indexInfo);
324
325 /*
326 * Finish the build by (1) completing the sort of the spool file, (2)
327 * inserting the sorted tuples into btree pages and (3) building the upper
328 * levels. Finally, it may also be necessary to end use of parallelism.
329 */
330 _bt_leafbuild(buildstate.spool, buildstate.spool2);
331 _bt_spooldestroy(buildstate.spool);
332 if (buildstate.spool2)
333 _bt_spooldestroy(buildstate.spool2);
334 if (buildstate.btleader)
335 _bt_end_parallel(buildstate.btleader);
336
337 result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
338
339 result->heap_tuples = reltuples;
340 result->index_tuples = buildstate.indtuples;
341
342#ifdef BTREE_BUILD_STATS
344 {
345 ShowUsage("BTREE BUILD STATS");
346 ResetUsage();
347 }
348#endif /* BTREE_BUILD_STATS */
349
350 return result;
351}
352
353/*
354 * Create and initialize one or two spool structures, and save them in caller's
355 * buildstate argument. May also fill-in fields within indexInfo used by index
356 * builds.
357 *
358 * Scans the heap, possibly in parallel, filling spools with IndexTuples. This
359 * routine encapsulates all aspects of managing parallelism. Caller need only
360 * call _bt_end_parallel() in parallel case after it is done with spool/spool2.
361 *
362 * Returns the total number of heap tuples scanned.
363 */
364static double
366 IndexInfo *indexInfo)
367{
368 BTSpool *btspool = (BTSpool *) palloc0(sizeof(BTSpool));
369 SortCoordinate coordinate = NULL;
370 double reltuples = 0;
371
372 /*
373 * We size the sort area as maintenance_work_mem rather than work_mem to
374 * speed index creation. This should be OK since a single backend can't
375 * run multiple index creations in parallel (see also: notes on
376 * parallelism and maintenance_work_mem below).
377 */
378 btspool->heap = heap;
379 btspool->index = index;
380 btspool->isunique = indexInfo->ii_Unique;
381 btspool->nulls_not_distinct = indexInfo->ii_NullsNotDistinct;
382
383 /* Save as primary spool */
384 buildstate->spool = btspool;
385
386 /* Report table scan phase started */
389
390 /* Attempt to launch parallel worker scan when required */
391 if (indexInfo->ii_ParallelWorkers > 0)
392 _bt_begin_parallel(buildstate, indexInfo->ii_Concurrent,
393 indexInfo->ii_ParallelWorkers);
394
395 /*
396 * If parallel build requested and at least one worker process was
397 * successfully launched, set up coordination state
398 */
399 if (buildstate->btleader)
400 {
401 coordinate = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
402 coordinate->isWorker = false;
403 coordinate->nParticipants =
404 buildstate->btleader->nparticipanttuplesorts;
405 coordinate->sharedsort = buildstate->btleader->sharedsort;
406 }
407
408 /*
409 * Begin serial/leader tuplesort.
410 *
411 * In cases where parallelism is involved, the leader receives the same
412 * share of maintenance_work_mem as a serial sort (it is generally treated
413 * in the same way as a serial sort once we return). Parallel worker
414 * Tuplesortstates will have received only a fraction of
415 * maintenance_work_mem, though.
416 *
417 * We rely on the lifetime of the Leader Tuplesortstate almost not
418 * overlapping with any worker Tuplesortstate's lifetime. There may be
419 * some small overlap, but that's okay because we rely on leader
420 * Tuplesortstate only allocating a small, fixed amount of memory here.
421 * When its tuplesort_performsort() is called (by our caller), and
422 * significant amounts of memory are likely to be used, all workers must
423 * have already freed almost all memory held by their Tuplesortstates
424 * (they are about to go away completely, too). The overall effect is
425 * that maintenance_work_mem always represents an absolute high watermark
426 * on the amount of memory used by a CREATE INDEX operation, regardless of
427 * the use of parallelism or any other factor.
428 */
429 buildstate->spool->sortstate =
430 tuplesort_begin_index_btree(heap, index, buildstate->isunique,
431 buildstate->nulls_not_distinct,
432 maintenance_work_mem, coordinate,
434
435 /*
436 * If building a unique index, put dead tuples in a second spool to keep
437 * them out of the uniqueness check. We expect that the second spool (for
438 * dead tuples) won't get very full, so we give it only work_mem.
439 */
440 if (indexInfo->ii_Unique)
441 {
442 BTSpool *btspool2 = (BTSpool *) palloc0(sizeof(BTSpool));
443 SortCoordinate coordinate2 = NULL;
444
445 /* Initialize secondary spool */
446 btspool2->heap = heap;
447 btspool2->index = index;
448 btspool2->isunique = false;
449 /* Save as secondary spool */
450 buildstate->spool2 = btspool2;
451
452 if (buildstate->btleader)
453 {
454 /*
455 * Set up non-private state that is passed to
456 * tuplesort_begin_index_btree() about the basic high level
457 * coordination of a parallel sort.
458 */
459 coordinate2 = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
460 coordinate2->isWorker = false;
461 coordinate2->nParticipants =
462 buildstate->btleader->nparticipanttuplesorts;
463 coordinate2->sharedsort = buildstate->btleader->sharedsort2;
464 }
465
466 /*
467 * We expect that the second one (for dead tuples) won't get very
468 * full, so we give it only work_mem
469 */
470 buildstate->spool2->sortstate =
471 tuplesort_begin_index_btree(heap, index, false, false, work_mem,
472 coordinate2, TUPLESORT_NONE);
473 }
474
475 /* Fill spool using either serial or parallel heap scan */
476 if (!buildstate->btleader)
477 reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
478 _bt_build_callback, buildstate,
479 NULL);
480 else
481 reltuples = _bt_parallel_heapscan(buildstate,
482 &indexInfo->ii_BrokenHotChain);
483
484 /*
485 * Set the progress target for the next phase. Reset the block number
486 * values set by table_index_build_scan
487 */
488 {
489 const int progress_index[] = {
493 };
494 const int64 progress_vals[] = {
495 buildstate->indtuples,
496 0, 0
497 };
498
499 pgstat_progress_update_multi_param(3, progress_index, progress_vals);
500 }
501
502 /* okay, all heap tuples are spooled */
503 if (buildstate->spool2 && !buildstate->havedead)
504 {
505 /* spool2 turns out to be unnecessary */
506 _bt_spooldestroy(buildstate->spool2);
507 buildstate->spool2 = NULL;
508 }
509
510 return reltuples;
511}
512
513/*
514 * clean up a spool structure and its substructures.
515 */
516static void
518{
519 tuplesort_end(btspool->sortstate);
520 pfree(btspool);
521}
522
523/*
524 * spool an index entry into the sort file.
525 */
526static void
527_bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, bool *isnull)
528{
530 self, values, isnull);
531}
532
533/*
534 * given a spool loaded by successive calls to _bt_spool,
535 * create an entire btree.
536 */
537static void
538_bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
539{
540 BTWriteState wstate;
541
542#ifdef BTREE_BUILD_STATS
544 {
545 ShowUsage("BTREE BUILD (Spool) STATISTICS");
546 ResetUsage();
547 }
548#endif /* BTREE_BUILD_STATS */
549
550 /* Execute the sort */
554 if (btspool2)
555 {
559 }
560
561 wstate.heap = btspool->heap;
562 wstate.index = btspool->index;
563 wstate.inskey = _bt_mkscankey(wstate.index, NULL);
564 /* _bt_mkscankey() won't set allequalimage without metapage */
565 wstate.inskey->allequalimage = _bt_allequalimage(wstate.index, true);
566
567 /* reserve the metapage */
569
572 _bt_load(&wstate, btspool, btspool2);
573}
574
575/*
576 * Per-tuple callback for table_index_build_scan
577 */
578static void
580 ItemPointer tid,
581 Datum *values,
582 bool *isnull,
583 bool tupleIsAlive,
584 void *state)
585{
586 BTBuildState *buildstate = (BTBuildState *) state;
587
588 /*
589 * insert the index tuple into the appropriate spool file for subsequent
590 * processing
591 */
592 if (tupleIsAlive || buildstate->spool2 == NULL)
593 _bt_spool(buildstate->spool, tid, values, isnull);
594 else
595 {
596 /* dead tuples are put into spool2 */
597 buildstate->havedead = true;
598 _bt_spool(buildstate->spool2, tid, values, isnull);
599 }
600
601 buildstate->indtuples += 1;
602}
603
604/*
605 * allocate workspace for a new, clean btree page, not linked to any siblings.
606 */
607static BulkWriteBuffer
609{
611 Page page;
612 BTPageOpaque opaque;
613
615 page = (Page) buf;
616
617 /* Zero the page and set up standard page header info */
618 _bt_pageinit(page, BLCKSZ);
619
620 /* Initialize BT opaque state */
621 opaque = BTPageGetOpaque(page);
622 opaque->btpo_prev = opaque->btpo_next = P_NONE;
623 opaque->btpo_level = level;
624 opaque->btpo_flags = (level > 0) ? 0 : BTP_LEAF;
625 opaque->btpo_cycleid = 0;
626
627 /* Make the P_HIKEY line pointer appear allocated */
628 ((PageHeader) page)->pd_lower += sizeof(ItemIdData);
629
630 return buf;
631}
632
633/*
634 * emit a completed btree page, and release the working storage.
635 */
636static void
638{
639 smgr_bulk_write(wstate->bulkstate, blkno, buf, true);
640 /* smgr_bulk_write took ownership of 'buf' */
641}
642
643/*
644 * allocate and initialize a new BTPageState. the returned structure
645 * is suitable for immediate use by _bt_buildadd.
646 */
647static BTPageState *
649{
651
652 /* create initial page for level */
653 state->btps_buf = _bt_blnewpage(wstate, level);
654
655 /* and assign it a page position */
656 state->btps_blkno = wstate->btws_pages_alloced++;
657
658 state->btps_lowkey = NULL;
659 /* initialize lastoff so first item goes into P_FIRSTKEY */
660 state->btps_lastoff = P_HIKEY;
661 state->btps_lastextra = 0;
662 state->btps_level = level;
663 /* set "full" threshold based on level. See notes at head of file. */
664 if (level > 0)
665 state->btps_full = (BLCKSZ * (100 - BTREE_NONLEAF_FILLFACTOR) / 100);
666 else
667 state->btps_full = BTGetTargetPageFreeSpace(wstate->index);
668
669 /* no parent level, yet */
670 state->btps_next = NULL;
671
672 return state;
673}
674
675/*
676 * Slide the array of ItemIds from the page back one slot (from P_FIRSTKEY to
677 * P_HIKEY, overwriting P_HIKEY).
678 *
679 * _bt_blnewpage() makes the P_HIKEY line pointer appear allocated, but the
680 * rightmost page on its level is not supposed to get a high key. Now that
681 * it's clear that this page is a rightmost page, remove the unneeded empty
682 * P_HIKEY line pointer space.
683 */
684static void
685_bt_slideleft(Page rightmostpage)
686{
687 OffsetNumber off;
688 OffsetNumber maxoff;
689 ItemId previi;
690
691 maxoff = PageGetMaxOffsetNumber(rightmostpage);
692 Assert(maxoff >= P_FIRSTKEY);
693 previi = PageGetItemId(rightmostpage, P_HIKEY);
694 for (off = P_FIRSTKEY; off <= maxoff; off = OffsetNumberNext(off))
695 {
696 ItemId thisii = PageGetItemId(rightmostpage, off);
697
698 *previi = *thisii;
699 previi = thisii;
700 }
701 ((PageHeader) rightmostpage)->pd_lower -= sizeof(ItemIdData);
702}
703
704/*
705 * Add an item to a page being built.
706 *
707 * This is very similar to nbtinsert.c's _bt_pgaddtup(), but this variant
708 * raises an error directly.
709 *
710 * Note that our nbtsort.c caller does not know yet if the page will be
711 * rightmost. Offset P_FIRSTKEY is always assumed to be the first data key by
712 * caller. Page that turns out to be the rightmost on its level is fixed by
713 * calling _bt_slideleft().
714 */
715static void
717 Size itemsize,
718 IndexTuple itup,
719 OffsetNumber itup_off,
720 bool newfirstdataitem)
721{
722 IndexTupleData trunctuple;
723
724 if (newfirstdataitem)
725 {
726 trunctuple = *itup;
727 trunctuple.t_info = sizeof(IndexTupleData);
728 BTreeTupleSetNAtts(&trunctuple, 0, false);
729 itup = &trunctuple;
730 itemsize = sizeof(IndexTupleData);
731 }
732
733 if (PageAddItem(page, (Item) itup, itemsize, itup_off,
734 false, false) == InvalidOffsetNumber)
735 elog(ERROR, "failed to add item to the index page");
736}
737
738/*----------
739 * Add an item to a disk page from the sort output (or add a posting list
740 * item formed from the sort output).
741 *
742 * We must be careful to observe the page layout conventions of nbtsearch.c:
743 * - rightmost pages start data items at P_HIKEY instead of at P_FIRSTKEY.
744 * - on non-leaf pages, the key portion of the first item need not be
745 * stored, we should store only the link.
746 *
747 * A leaf page being built looks like:
748 *
749 * +----------------+---------------------------------+
750 * | PageHeaderData | linp0 linp1 linp2 ... |
751 * +-----------+----+---------------------------------+
752 * | ... linpN | |
753 * +-----------+--------------------------------------+
754 * | ^ last |
755 * | |
756 * +-------------+------------------------------------+
757 * | | itemN ... |
758 * +-------------+------------------+-----------------+
759 * | ... item3 item2 item1 | "special space" |
760 * +--------------------------------+-----------------+
761 *
762 * Contrast this with the diagram in bufpage.h; note the mismatch
763 * between linps and items. This is because we reserve linp0 as a
764 * placeholder for the pointer to the "high key" item; when we have
765 * filled up the page, we will set linp0 to point to itemN and clear
766 * linpN. On the other hand, if we find this is the last (rightmost)
767 * page, we leave the items alone and slide the linp array over. If
768 * the high key is to be truncated, offset 1 is deleted, and we insert
769 * the truncated high key at offset 1.
770 *
771 * 'last' pointer indicates the last offset added to the page.
772 *
773 * 'truncextra' is the size of the posting list in itup, if any. This
774 * information is stashed for the next call here, when we may benefit
775 * from considering the impact of truncating away the posting list on
776 * the page before deciding to finish the page off. Posting lists are
777 * often relatively large, so it is worth going to the trouble of
778 * accounting for the saving from truncating away the posting list of
779 * the tuple that becomes the high key (that may be the only way to
780 * get close to target free space on the page). Note that this is
781 * only used for the soft fillfactor-wise limit, not the critical hard
782 * limit.
783 *----------
784 */
785static void
787 Size truncextra)
788{
789 BulkWriteBuffer nbuf;
790 Page npage;
791 BlockNumber nblkno;
792 OffsetNumber last_off;
793 Size last_truncextra;
794 Size pgspc;
795 Size itupsz;
796 bool isleaf;
797
798 /*
799 * This is a handy place to check for cancel interrupts during the btree
800 * load phase of index creation.
801 */
803
804 nbuf = state->btps_buf;
805 npage = (Page) nbuf;
806 nblkno = state->btps_blkno;
807 last_off = state->btps_lastoff;
808 last_truncextra = state->btps_lastextra;
809 state->btps_lastextra = truncextra;
810
811 pgspc = PageGetFreeSpace(npage);
812 itupsz = IndexTupleSize(itup);
813 itupsz = MAXALIGN(itupsz);
814 /* Leaf case has slightly different rules due to suffix truncation */
815 isleaf = (state->btps_level == 0);
816
817 /*
818 * Check whether the new item can fit on a btree page on current level at
819 * all.
820 *
821 * Every newly built index will treat heap TID as part of the keyspace,
822 * which imposes the requirement that new high keys must occasionally have
823 * a heap TID appended within _bt_truncate(). That may leave a new pivot
824 * tuple one or two MAXALIGN() quantums larger than the original
825 * firstright tuple it's derived from. v4 deals with the problem by
826 * decreasing the limit on the size of tuples inserted on the leaf level
827 * by the same small amount. Enforce the new v4+ limit on the leaf level,
828 * and the old limit on internal levels, since pivot tuples may need to
829 * make use of the reserved space. This should never fail on internal
830 * pages.
831 */
832 if (unlikely(itupsz > BTMaxItemSize(npage)))
833 _bt_check_third_page(wstate->index, wstate->heap, isleaf, npage,
834 itup);
835
836 /*
837 * Check to see if current page will fit new item, with space left over to
838 * append a heap TID during suffix truncation when page is a leaf page.
839 *
840 * It is guaranteed that we can fit at least 2 non-pivot tuples plus a
841 * high key with heap TID when finishing off a leaf page, since we rely on
842 * _bt_check_third_page() rejecting oversized non-pivot tuples. On
843 * internal pages we can always fit 3 pivot tuples with larger internal
844 * page tuple limit (includes page high key).
845 *
846 * Most of the time, a page is only "full" in the sense that the soft
847 * fillfactor-wise limit has been exceeded. However, we must always leave
848 * at least two items plus a high key on each page before starting a new
849 * page. Disregard fillfactor and insert on "full" current page if we
850 * don't have the minimum number of items yet. (Note that we deliberately
851 * assume that suffix truncation neither enlarges nor shrinks new high key
852 * when applying soft limit, except when last tuple has a posting list.)
853 */
854 Assert(last_truncextra == 0 || isleaf);
855 if (pgspc < itupsz + (isleaf ? MAXALIGN(sizeof(ItemPointerData)) : 0) ||
856 (pgspc + last_truncextra < state->btps_full && last_off > P_FIRSTKEY))
857 {
858 /*
859 * Finish off the page and write it out.
860 */
861 BulkWriteBuffer obuf = nbuf;
862 Page opage = npage;
863 BlockNumber oblkno = nblkno;
864 ItemId ii;
865 ItemId hii;
866 IndexTuple oitup;
867
868 /* Create new page of same level */
869 nbuf = _bt_blnewpage(wstate, state->btps_level);
870 npage = (Page) nbuf;
871
872 /* and assign it a page position */
873 nblkno = wstate->btws_pages_alloced++;
874
875 /*
876 * We copy the last item on the page into the new page, and then
877 * rearrange the old page so that the 'last item' becomes its high key
878 * rather than a true data item. There had better be at least two
879 * items on the page already, else the page would be empty of useful
880 * data.
881 */
882 Assert(last_off > P_FIRSTKEY);
883 ii = PageGetItemId(opage, last_off);
884 oitup = (IndexTuple) PageGetItem(opage, ii);
885 _bt_sortaddtup(npage, ItemIdGetLength(ii), oitup, P_FIRSTKEY,
886 !isleaf);
887
888 /*
889 * Move 'last' into the high key position on opage. _bt_blnewpage()
890 * allocated empty space for a line pointer when opage was first
891 * created, so this is a matter of rearranging already-allocated space
892 * on page, and initializing high key line pointer. (Actually, leaf
893 * pages must also swap oitup with a truncated version of oitup, which
894 * is sometimes larger than oitup, though never by more than the space
895 * needed to append a heap TID.)
896 */
897 hii = PageGetItemId(opage, P_HIKEY);
898 *hii = *ii;
899 ItemIdSetUnused(ii); /* redundant */
900 ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
901
902 if (isleaf)
903 {
904 IndexTuple lastleft;
905 IndexTuple truncated;
906
907 /*
908 * Truncate away any unneeded attributes from high key on leaf
909 * level. This is only done at the leaf level because downlinks
910 * in internal pages are either negative infinity items, or get
911 * their contents from copying from one level down. See also:
912 * _bt_split().
913 *
914 * We don't try to bias our choice of split point to make it more
915 * likely that _bt_truncate() can truncate away more attributes,
916 * whereas the split point used within _bt_split() is chosen much
917 * more delicately. Even still, the lastleft and firstright
918 * tuples passed to _bt_truncate() here are at least not fully
919 * equal to each other when deduplication is used, unless there is
920 * a large group of duplicates (also, unique index builds usually
921 * have few or no spool2 duplicates). When the split point is
922 * between two unequal tuples, _bt_truncate() will avoid including
923 * a heap TID in the new high key, which is the most important
924 * benefit of suffix truncation.
925 *
926 * Overwrite the old item with new truncated high key directly.
927 * oitup is already located at the physical beginning of tuple
928 * space, so this should directly reuse the existing tuple space.
929 */
930 ii = PageGetItemId(opage, OffsetNumberPrev(last_off));
931 lastleft = (IndexTuple) PageGetItem(opage, ii);
932
933 Assert(IndexTupleSize(oitup) > last_truncextra);
934 truncated = _bt_truncate(wstate->index, lastleft, oitup,
935 wstate->inskey);
936 if (!PageIndexTupleOverwrite(opage, P_HIKEY, (Item) truncated,
937 IndexTupleSize(truncated)))
938 elog(ERROR, "failed to add high key to the index page");
939 pfree(truncated);
940
941 /* oitup should continue to point to the page's high key */
942 hii = PageGetItemId(opage, P_HIKEY);
943 oitup = (IndexTuple) PageGetItem(opage, hii);
944 }
945
946 /*
947 * Link the old page into its parent, using its low key. If we don't
948 * have a parent, we have to create one; this adds a new btree level.
949 */
950 if (state->btps_next == NULL)
951 state->btps_next = _bt_pagestate(wstate, state->btps_level + 1);
952
953 Assert((BTreeTupleGetNAtts(state->btps_lowkey, wstate->index) <=
955 BTreeTupleGetNAtts(state->btps_lowkey, wstate->index) > 0) ||
957 Assert(BTreeTupleGetNAtts(state->btps_lowkey, wstate->index) == 0 ||
958 !P_LEFTMOST(BTPageGetOpaque(opage)));
959 BTreeTupleSetDownLink(state->btps_lowkey, oblkno);
960 _bt_buildadd(wstate, state->btps_next, state->btps_lowkey, 0);
961 pfree(state->btps_lowkey);
962
963 /*
964 * Save a copy of the high key from the old page. It is also the low
965 * key for the new page.
966 */
967 state->btps_lowkey = CopyIndexTuple(oitup);
968
969 /*
970 * Set the sibling links for both pages.
971 */
972 {
973 BTPageOpaque oopaque = BTPageGetOpaque(opage);
974 BTPageOpaque nopaque = BTPageGetOpaque(npage);
975
976 oopaque->btpo_next = nblkno;
977 nopaque->btpo_prev = oblkno;
978 nopaque->btpo_next = P_NONE; /* redundant */
979 }
980
981 /*
982 * Write out the old page. _bt_blwritepage takes ownership of the
983 * 'opage' buffer.
984 */
985 _bt_blwritepage(wstate, obuf, oblkno);
986
987 /*
988 * Reset last_off to point to new page
989 */
990 last_off = P_FIRSTKEY;
991 }
992
993 /*
994 * By here, either original page is still the current page, or a new page
995 * was created that became the current page. Either way, the current page
996 * definitely has space for new item.
997 *
998 * If the new item is the first for its page, it must also be the first
999 * item on its entire level. On later same-level pages, a low key for a
1000 * page will be copied from the prior page in the code above. Generate a
1001 * minus infinity low key here instead.
1002 */
1003 if (last_off == P_HIKEY)
1004 {
1005 Assert(state->btps_lowkey == NULL);
1006 state->btps_lowkey = palloc0(sizeof(IndexTupleData));
1007 state->btps_lowkey->t_info = sizeof(IndexTupleData);
1008 BTreeTupleSetNAtts(state->btps_lowkey, 0, false);
1009 }
1010
1011 /*
1012 * Add the new item into the current page.
1013 */
1014 last_off = OffsetNumberNext(last_off);
1015 _bt_sortaddtup(npage, itupsz, itup, last_off,
1016 !isleaf && last_off == P_FIRSTKEY);
1017
1018 state->btps_buf = nbuf;
1019 state->btps_blkno = nblkno;
1020 state->btps_lastoff = last_off;
1021}
1022
1023/*
1024 * Finalize pending posting list tuple, and add it to the index. Final tuple
1025 * is based on saved base tuple, and saved list of heap TIDs.
1026 *
1027 * This is almost like _bt_dedup_finish_pending(), but it adds a new tuple
1028 * using _bt_buildadd().
1029 */
1030static void
1032 BTDedupState dstate)
1033{
1034 Assert(dstate->nitems > 0);
1035
1036 if (dstate->nitems == 1)
1037 _bt_buildadd(wstate, state, dstate->base, 0);
1038 else
1039 {
1040 IndexTuple postingtuple;
1041 Size truncextra;
1042
1043 /* form a tuple with a posting list */
1044 postingtuple = _bt_form_posting(dstate->base,
1045 dstate->htids,
1046 dstate->nhtids);
1047 /* Calculate posting list overhead */
1048 truncextra = IndexTupleSize(postingtuple) -
1049 BTreeTupleGetPostingOffset(postingtuple);
1050
1051 _bt_buildadd(wstate, state, postingtuple, truncextra);
1052 pfree(postingtuple);
1053 }
1054
1055 dstate->nmaxitems = 0;
1056 dstate->nhtids = 0;
1057 dstate->nitems = 0;
1058 dstate->phystupsize = 0;
1059}
1060
1061/*
1062 * Finish writing out the completed btree.
1063 */
1064static void
1066{
1067 BTPageState *s;
1068 BlockNumber rootblkno = P_NONE;
1069 uint32 rootlevel = 0;
1070 BulkWriteBuffer metabuf;
1071
1072 /*
1073 * Each iteration of this loop completes one more level of the tree.
1074 */
1075 for (s = state; s != NULL; s = s->btps_next)
1076 {
1077 BlockNumber blkno;
1078 BTPageOpaque opaque;
1079
1080 blkno = s->btps_blkno;
1081 opaque = BTPageGetOpaque((Page) s->btps_buf);
1082
1083 /*
1084 * We have to link the last page on this level to somewhere.
1085 *
1086 * If we're at the top, it's the root, so attach it to the metapage.
1087 * Otherwise, add an entry for it to its parent using its low key.
1088 * This may cause the last page of the parent level to split, but
1089 * that's not a problem -- we haven't gotten to it yet.
1090 */
1091 if (s->btps_next == NULL)
1092 {
1093 opaque->btpo_flags |= BTP_ROOT;
1094 rootblkno = blkno;
1095 rootlevel = s->btps_level;
1096 }
1097 else
1098 {
1101 BTreeTupleGetNAtts(s->btps_lowkey, wstate->index) > 0) ||
1102 P_LEFTMOST(opaque));
1103 Assert(BTreeTupleGetNAtts(s->btps_lowkey, wstate->index) == 0 ||
1104 !P_LEFTMOST(opaque));
1106 _bt_buildadd(wstate, s->btps_next, s->btps_lowkey, 0);
1107 pfree(s->btps_lowkey);
1108 s->btps_lowkey = NULL;
1109 }
1110
1111 /*
1112 * This is the rightmost page, so the ItemId array needs to be slid
1113 * back one slot. Then we can dump out the page.
1114 */
1116 _bt_blwritepage(wstate, s->btps_buf, s->btps_blkno);
1117 s->btps_buf = NULL; /* writepage took ownership of the buffer */
1118 }
1119
1120 /*
1121 * As the last step in the process, construct the metapage and make it
1122 * point to the new root (unless we had no data at all, in which case it's
1123 * set to point to "P_NONE"). This changes the index to the "valid" state
1124 * by filling in a valid magic number in the metapage.
1125 */
1126 metabuf = smgr_bulk_get_buf(wstate->bulkstate);
1127 _bt_initmetapage((Page) metabuf, rootblkno, rootlevel,
1128 wstate->inskey->allequalimage);
1129 _bt_blwritepage(wstate, metabuf, BTREE_METAPAGE);
1130}
1131
1132/*
1133 * Read tuples in correct sort order from tuplesort, and load them into
1134 * btree leaves.
1135 */
1136static void
1137_bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
1138{
1139 BTPageState *state = NULL;
1140 bool merge = (btspool2 != NULL);
1141 IndexTuple itup,
1142 itup2 = NULL;
1143 bool load1;
1144 TupleDesc tupdes = RelationGetDescr(wstate->index);
1145 int i,
1147 SortSupport sortKeys;
1148 int64 tuples_done = 0;
1149 bool deduplicate;
1150
1151 wstate->bulkstate = smgr_bulk_start_rel(wstate->index, MAIN_FORKNUM);
1152
1153 deduplicate = wstate->inskey->allequalimage && !btspool->isunique &&
1155
1156 if (merge)
1157 {
1158 /*
1159 * Another BTSpool for dead tuples exists. Now we have to merge
1160 * btspool and btspool2.
1161 */
1162
1163 /* the preparation of merge */
1164 itup = tuplesort_getindextuple(btspool->sortstate, true);
1165 itup2 = tuplesort_getindextuple(btspool2->sortstate, true);
1166
1167 /* Prepare SortSupport data for each column */
1168 sortKeys = (SortSupport) palloc0(keysz * sizeof(SortSupportData));
1169
1170 for (i = 0; i < keysz; i++)
1171 {
1172 SortSupport sortKey = sortKeys + i;
1173 ScanKey scanKey = wstate->inskey->scankeys + i;
1174 int16 strategy;
1175
1176 sortKey->ssup_cxt = CurrentMemoryContext;
1177 sortKey->ssup_collation = scanKey->sk_collation;
1178 sortKey->ssup_nulls_first =
1179 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
1180 sortKey->ssup_attno = scanKey->sk_attno;
1181 /* Abbreviation is not supported here */
1182 sortKey->abbreviate = false;
1183
1184 Assert(sortKey->ssup_attno != 0);
1185
1186 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
1188
1189 PrepareSortSupportFromIndexRel(wstate->index, strategy, sortKey);
1190 }
1191
1192 for (;;)
1193 {
1194 load1 = true; /* load BTSpool next ? */
1195 if (itup2 == NULL)
1196 {
1197 if (itup == NULL)
1198 break;
1199 }
1200 else if (itup != NULL)
1201 {
1202 int32 compare = 0;
1203
1204 for (i = 1; i <= keysz; i++)
1205 {
1206 SortSupport entry;
1207 Datum attrDatum1,
1208 attrDatum2;
1209 bool isNull1,
1210 isNull2;
1211
1212 entry = sortKeys + i - 1;
1213 attrDatum1 = index_getattr(itup, i, tupdes, &isNull1);
1214 attrDatum2 = index_getattr(itup2, i, tupdes, &isNull2);
1215
1216 compare = ApplySortComparator(attrDatum1, isNull1,
1217 attrDatum2, isNull2,
1218 entry);
1219 if (compare > 0)
1220 {
1221 load1 = false;
1222 break;
1223 }
1224 else if (compare < 0)
1225 break;
1226 }
1227
1228 /*
1229 * If key values are equal, we sort on ItemPointer. This is
1230 * required for btree indexes, since heap TID is treated as an
1231 * implicit last key attribute in order to ensure that all
1232 * keys in the index are physically unique.
1233 */
1234 if (compare == 0)
1235 {
1236 compare = ItemPointerCompare(&itup->t_tid, &itup2->t_tid);
1237 Assert(compare != 0);
1238 if (compare > 0)
1239 load1 = false;
1240 }
1241 }
1242 else
1243 load1 = false;
1244
1245 /* When we see first tuple, create first index page */
1246 if (state == NULL)
1247 state = _bt_pagestate(wstate, 0);
1248
1249 if (load1)
1250 {
1251 _bt_buildadd(wstate, state, itup, 0);
1252 itup = tuplesort_getindextuple(btspool->sortstate, true);
1253 }
1254 else
1255 {
1256 _bt_buildadd(wstate, state, itup2, 0);
1257 itup2 = tuplesort_getindextuple(btspool2->sortstate, true);
1258 }
1259
1260 /* Report progress */
1262 ++tuples_done);
1263 }
1264 pfree(sortKeys);
1265 }
1266 else if (deduplicate)
1267 {
1268 /* merge is unnecessary, deduplicate into posting lists */
1269 BTDedupState dstate;
1270
1271 dstate = (BTDedupState) palloc(sizeof(BTDedupStateData));
1272 dstate->deduplicate = true; /* unused */
1273 dstate->nmaxitems = 0; /* unused */
1274 dstate->maxpostingsize = 0; /* set later */
1275 /* Metadata about base tuple of current pending posting list */
1276 dstate->base = NULL;
1277 dstate->baseoff = InvalidOffsetNumber; /* unused */
1278 dstate->basetupsize = 0;
1279 /* Metadata about current pending posting list TIDs */
1280 dstate->htids = NULL;
1281 dstate->nhtids = 0;
1282 dstate->nitems = 0;
1283 dstate->phystupsize = 0; /* unused */
1284 dstate->nintervals = 0; /* unused */
1285
1286 while ((itup = tuplesort_getindextuple(btspool->sortstate,
1287 true)) != NULL)
1288 {
1289 /* When we see first tuple, create first index page */
1290 if (state == NULL)
1291 {
1292 state = _bt_pagestate(wstate, 0);
1293
1294 /*
1295 * Limit size of posting list tuples to 1/10 space we want to
1296 * leave behind on the page, plus space for final item's line
1297 * pointer. This is equal to the space that we'd like to
1298 * leave behind on each leaf page when fillfactor is 90,
1299 * allowing us to get close to fillfactor% space utilization
1300 * when there happen to be a great many duplicates. (This
1301 * makes higher leaf fillfactor settings ineffective when
1302 * building indexes that have many duplicates, but packing
1303 * leaf pages full with few very large tuples doesn't seem
1304 * like a useful goal.)
1305 */
1306 dstate->maxpostingsize = MAXALIGN_DOWN((BLCKSZ * 10 / 100)) -
1307 sizeof(ItemIdData);
1308 Assert(dstate->maxpostingsize <= BTMaxItemSize((Page) state->btps_buf) &&
1309 dstate->maxpostingsize <= INDEX_SIZE_MASK);
1310 dstate->htids = palloc(dstate->maxpostingsize);
1311
1312 /* start new pending posting list with itup copy */
1315 }
1316 else if (_bt_keep_natts_fast(wstate->index, dstate->base,
1317 itup) > keysz &&
1318 _bt_dedup_save_htid(dstate, itup))
1319 {
1320 /*
1321 * Tuple is equal to base tuple of pending posting list. Heap
1322 * TID from itup has been saved in state.
1323 */
1324 }
1325 else
1326 {
1327 /*
1328 * Tuple is not equal to pending posting list tuple, or
1329 * _bt_dedup_save_htid() opted to not merge current item into
1330 * pending posting list.
1331 */
1332 _bt_sort_dedup_finish_pending(wstate, state, dstate);
1333 pfree(dstate->base);
1334
1335 /* start new pending posting list with itup copy */
1338 }
1339
1340 /* Report progress */
1342 ++tuples_done);
1343 }
1344
1345 if (state)
1346 {
1347 /*
1348 * Handle the last item (there must be a last item when the
1349 * tuplesort returned one or more tuples)
1350 */
1351 _bt_sort_dedup_finish_pending(wstate, state, dstate);
1352 pfree(dstate->base);
1353 pfree(dstate->htids);
1354 }
1355
1356 pfree(dstate);
1357 }
1358 else
1359 {
1360 /* merging and deduplication are both unnecessary */
1361 while ((itup = tuplesort_getindextuple(btspool->sortstate,
1362 true)) != NULL)
1363 {
1364 /* When we see first tuple, create first index page */
1365 if (state == NULL)
1366 state = _bt_pagestate(wstate, 0);
1367
1368 _bt_buildadd(wstate, state, itup, 0);
1369
1370 /* Report progress */
1372 ++tuples_done);
1373 }
1374 }
1375
1376 /* Close down final pages and write the metapage */
1377 _bt_uppershutdown(wstate, state);
1378 smgr_bulk_finish(wstate->bulkstate);
1379}
1380
1381/*
1382 * Create parallel context, and launch workers for leader.
1383 *
1384 * buildstate argument should be initialized (with the exception of the
1385 * tuplesort state in spools, which may later be created based on shared
1386 * state initially set up here).
1387 *
1388 * isconcurrent indicates if operation is CREATE INDEX CONCURRENTLY.
1389 *
1390 * request is the target number of parallel worker processes to launch.
1391 *
1392 * Sets buildstate's BTLeader, which caller must use to shut down parallel
1393 * mode by passing it to _bt_end_parallel() at the very end of its index
1394 * build. If not even a single worker process can be launched, this is
1395 * never set, and caller should proceed with a serial index build.
1396 */
1397static void
1398_bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request)
1399{
1400 ParallelContext *pcxt;
1401 int scantuplesortstates;
1402 Snapshot snapshot;
1403 Size estbtshared;
1404 Size estsort;
1405 BTShared *btshared;
1406 Sharedsort *sharedsort;
1407 Sharedsort *sharedsort2;
1408 BTSpool *btspool = buildstate->spool;
1409 BTLeader *btleader = (BTLeader *) palloc0(sizeof(BTLeader));
1410 WalUsage *walusage;
1411 BufferUsage *bufferusage;
1412 bool leaderparticipates = true;
1413 int querylen;
1414
1415#ifdef DISABLE_LEADER_PARTICIPATION
1416 leaderparticipates = false;
1417#endif
1418
1419 /*
1420 * Enter parallel mode, and create context for parallel build of btree
1421 * index
1422 */
1424 Assert(request > 0);
1425 pcxt = CreateParallelContext("postgres", "_bt_parallel_build_main",
1426 request);
1427
1428 scantuplesortstates = leaderparticipates ? request + 1 : request;
1429
1430 /*
1431 * Prepare for scan of the base relation. In a normal index build, we use
1432 * SnapshotAny because we must retrieve all tuples and do our own time
1433 * qual checks (because we have to index RECENTLY_DEAD tuples). In a
1434 * concurrent build, we take a regular MVCC snapshot and index whatever's
1435 * live according to that.
1436 */
1437 if (!isconcurrent)
1438 snapshot = SnapshotAny;
1439 else
1441
1442 /*
1443 * Estimate size for our own PARALLEL_KEY_BTREE_SHARED workspace, and
1444 * PARALLEL_KEY_TUPLESORT tuplesort workspace
1445 */
1446 estbtshared = _bt_parallel_estimate_shared(btspool->heap, snapshot);
1447 shm_toc_estimate_chunk(&pcxt->estimator, estbtshared);
1448 estsort = tuplesort_estimate_shared(scantuplesortstates);
1449 shm_toc_estimate_chunk(&pcxt->estimator, estsort);
1450
1451 /*
1452 * Unique case requires a second spool, and so we may have to account for
1453 * another shared workspace for that -- PARALLEL_KEY_TUPLESORT_SPOOL2
1454 */
1455 if (!btspool->isunique)
1457 else
1458 {
1459 shm_toc_estimate_chunk(&pcxt->estimator, estsort);
1461 }
1462
1463 /*
1464 * Estimate space for WalUsage and BufferUsage -- PARALLEL_KEY_WAL_USAGE
1465 * and PARALLEL_KEY_BUFFER_USAGE.
1466 *
1467 * If there are no extensions loaded that care, we could skip this. We
1468 * have no way of knowing whether anyone's looking at pgWalUsage or
1469 * pgBufferUsage, so do it unconditionally.
1470 */
1472 mul_size(sizeof(WalUsage), pcxt->nworkers));
1475 mul_size(sizeof(BufferUsage), pcxt->nworkers));
1477
1478 /* Finally, estimate PARALLEL_KEY_QUERY_TEXT space */
1480 {
1481 querylen = strlen(debug_query_string);
1482 shm_toc_estimate_chunk(&pcxt->estimator, querylen + 1);
1484 }
1485 else
1486 querylen = 0; /* keep compiler quiet */
1487
1488 /* Everyone's had a chance to ask for space, so now create the DSM */
1490
1491 /* If no DSM segment was available, back out (do serial build) */
1492 if (pcxt->seg == NULL)
1493 {
1494 if (IsMVCCSnapshot(snapshot))
1495 UnregisterSnapshot(snapshot);
1498 return;
1499 }
1500
1501 /* Store shared build state, for which we reserved space */
1502 btshared = (BTShared *) shm_toc_allocate(pcxt->toc, estbtshared);
1503 /* Initialize immutable state */
1504 btshared->heaprelid = RelationGetRelid(btspool->heap);
1505 btshared->indexrelid = RelationGetRelid(btspool->index);
1506 btshared->isunique = btspool->isunique;
1507 btshared->nulls_not_distinct = btspool->nulls_not_distinct;
1508 btshared->isconcurrent = isconcurrent;
1509 btshared->scantuplesortstates = scantuplesortstates;
1510 btshared->queryid = pgstat_get_my_query_id();
1512 SpinLockInit(&btshared->mutex);
1513 /* Initialize mutable state */
1514 btshared->nparticipantsdone = 0;
1515 btshared->reltuples = 0.0;
1516 btshared->havedead = false;
1517 btshared->indtuples = 0.0;
1518 btshared->brokenhotchain = false;
1521 snapshot);
1522
1523 /*
1524 * Store shared tuplesort-private state, for which we reserved space.
1525 * Then, initialize opaque state using tuplesort routine.
1526 */
1527 sharedsort = (Sharedsort *) shm_toc_allocate(pcxt->toc, estsort);
1528 tuplesort_initialize_shared(sharedsort, scantuplesortstates,
1529 pcxt->seg);
1530
1532 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLESORT, sharedsort);
1533
1534 /* Unique case requires a second spool, and associated shared state */
1535 if (!btspool->isunique)
1536 sharedsort2 = NULL;
1537 else
1538 {
1539 /*
1540 * Store additional shared tuplesort-private state, for which we
1541 * reserved space. Then, initialize opaque state using tuplesort
1542 * routine.
1543 */
1544 sharedsort2 = (Sharedsort *) shm_toc_allocate(pcxt->toc, estsort);
1545 tuplesort_initialize_shared(sharedsort2, scantuplesortstates,
1546 pcxt->seg);
1547
1549 }
1550
1551 /* Store query string for workers */
1553 {
1554 char *sharedquery;
1555
1556 sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
1557 memcpy(sharedquery, debug_query_string, querylen + 1);
1558 shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, sharedquery);
1559 }
1560
1561 /*
1562 * Allocate space for each worker's WalUsage and BufferUsage; no need to
1563 * initialize.
1564 */
1565 walusage = shm_toc_allocate(pcxt->toc,
1566 mul_size(sizeof(WalUsage), pcxt->nworkers));
1567 shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage);
1568 bufferusage = shm_toc_allocate(pcxt->toc,
1569 mul_size(sizeof(BufferUsage), pcxt->nworkers));
1570 shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufferusage);
1571
1572 /* Launch workers, saving status for leader/caller */
1574 btleader->pcxt = pcxt;
1575 btleader->nparticipanttuplesorts = pcxt->nworkers_launched;
1576 if (leaderparticipates)
1577 btleader->nparticipanttuplesorts++;
1578 btleader->btshared = btshared;
1579 btleader->sharedsort = sharedsort;
1580 btleader->sharedsort2 = sharedsort2;
1581 btleader->snapshot = snapshot;
1582 btleader->walusage = walusage;
1583 btleader->bufferusage = bufferusage;
1584
1585 /* If no workers were successfully launched, back out (do serial build) */
1586 if (pcxt->nworkers_launched == 0)
1587 {
1588 _bt_end_parallel(btleader);
1589 return;
1590 }
1591
1592 /* Save leader state now that it's clear build will be parallel */
1593 buildstate->btleader = btleader;
1594
1595 /* Join heap scan ourselves */
1596 if (leaderparticipates)
1598
1599 /*
1600 * Caller needs to wait for all launched workers when we return. Make
1601 * sure that the failure-to-start case will not hang forever.
1602 */
1604}
1605
1606/*
1607 * Shut down workers, destroy parallel context, and end parallel mode.
1608 */
1609static void
1611{
1612 int i;
1613
1614 /* Shutdown worker processes */
1616
1617 /*
1618 * Next, accumulate WAL usage. (This must wait for the workers to finish,
1619 * or we might get incomplete data.)
1620 */
1621 for (i = 0; i < btleader->pcxt->nworkers_launched; i++)
1622 InstrAccumParallelQuery(&btleader->bufferusage[i], &btleader->walusage[i]);
1623
1624 /* Free last reference to MVCC snapshot, if one was used */
1625 if (IsMVCCSnapshot(btleader->snapshot))
1626 UnregisterSnapshot(btleader->snapshot);
1627 DestroyParallelContext(btleader->pcxt);
1629}
1630
1631/*
1632 * Returns size of shared memory required to store state for a parallel
1633 * btree index build based on the snapshot its parallel scan will use.
1634 */
1635static Size
1637{
1638 /* c.f. shm_toc_allocate as to why BUFFERALIGN is used */
1639 return add_size(BUFFERALIGN(sizeof(BTShared)),
1640 table_parallelscan_estimate(heap, snapshot));
1641}
1642
1643/*
1644 * Within leader, wait for end of heap scan.
1645 *
1646 * When called, parallel heap scan started by _bt_begin_parallel() will
1647 * already be underway within worker processes (when leader participates
1648 * as a worker, we should end up here just as workers are finishing).
1649 *
1650 * Fills in fields needed for ambuild statistics, and lets caller set
1651 * field indicating that some worker encountered a broken HOT chain.
1652 *
1653 * Returns the total number of heap tuples scanned.
1654 */
1655static double
1656_bt_parallel_heapscan(BTBuildState *buildstate, bool *brokenhotchain)
1657{
1658 BTShared *btshared = buildstate->btleader->btshared;
1659 int nparticipanttuplesorts;
1660 double reltuples;
1661
1662 nparticipanttuplesorts = buildstate->btleader->nparticipanttuplesorts;
1663 for (;;)
1664 {
1665 SpinLockAcquire(&btshared->mutex);
1666 if (btshared->nparticipantsdone == nparticipanttuplesorts)
1667 {
1668 buildstate->havedead = btshared->havedead;
1669 buildstate->indtuples = btshared->indtuples;
1670 *brokenhotchain = btshared->brokenhotchain;
1671 reltuples = btshared->reltuples;
1672 SpinLockRelease(&btshared->mutex);
1673 break;
1674 }
1675 SpinLockRelease(&btshared->mutex);
1676
1678 WAIT_EVENT_PARALLEL_CREATE_INDEX_SCAN);
1679 }
1680
1682
1683 return reltuples;
1684}
1685
1686/*
1687 * Within leader, participate as a parallel worker.
1688 */
1689static void
1691{
1692 BTLeader *btleader = buildstate->btleader;
1693 BTSpool *leaderworker;
1694 BTSpool *leaderworker2;
1695 int sortmem;
1696
1697 /* Allocate memory and initialize private spool */
1698 leaderworker = (BTSpool *) palloc0(sizeof(BTSpool));
1699 leaderworker->heap = buildstate->spool->heap;
1700 leaderworker->index = buildstate->spool->index;
1701 leaderworker->isunique = buildstate->spool->isunique;
1702 leaderworker->nulls_not_distinct = buildstate->spool->nulls_not_distinct;
1703
1704 /* Initialize second spool, if required */
1705 if (!btleader->btshared->isunique)
1706 leaderworker2 = NULL;
1707 else
1708 {
1709 /* Allocate memory for worker's own private secondary spool */
1710 leaderworker2 = (BTSpool *) palloc0(sizeof(BTSpool));
1711
1712 /* Initialize worker's own secondary spool */
1713 leaderworker2->heap = leaderworker->heap;
1714 leaderworker2->index = leaderworker->index;
1715 leaderworker2->isunique = false;
1716 }
1717
1718 /*
1719 * Might as well use reliable figure when doling out maintenance_work_mem
1720 * (when requested number of workers were not launched, this will be
1721 * somewhat higher than it is for other workers).
1722 */
1723 sortmem = maintenance_work_mem / btleader->nparticipanttuplesorts;
1724
1725 /* Perform work common to all participants */
1726 _bt_parallel_scan_and_sort(leaderworker, leaderworker2, btleader->btshared,
1727 btleader->sharedsort, btleader->sharedsort2,
1728 sortmem, true);
1729
1730#ifdef BTREE_BUILD_STATS
1732 {
1733 ShowUsage("BTREE BUILD (Leader Partial Spool) STATISTICS");
1734 ResetUsage();
1735 }
1736#endif /* BTREE_BUILD_STATS */
1737}
1738
1739/*
1740 * Perform work within a launched parallel process.
1741 */
1742void
1744{
1745 char *sharedquery;
1746 BTSpool *btspool;
1747 BTSpool *btspool2;
1748 BTShared *btshared;
1749 Sharedsort *sharedsort;
1750 Sharedsort *sharedsort2;
1751 Relation heapRel;
1752 Relation indexRel;
1753 LOCKMODE heapLockmode;
1754 LOCKMODE indexLockmode;
1755 WalUsage *walusage;
1756 BufferUsage *bufferusage;
1757 int sortmem;
1758
1759#ifdef BTREE_BUILD_STATS
1761 ResetUsage();
1762#endif /* BTREE_BUILD_STATS */
1763
1764 /*
1765 * The only possible status flag that can be set to the parallel worker is
1766 * PROC_IN_SAFE_IC.
1767 */
1768 Assert((MyProc->statusFlags == 0) ||
1770
1771 /* Set debug_query_string for individual workers first */
1772 sharedquery = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, true);
1773 debug_query_string = sharedquery;
1774
1775 /* Report the query string from leader */
1777
1778 /* Look up nbtree shared state */
1779 btshared = shm_toc_lookup(toc, PARALLEL_KEY_BTREE_SHARED, false);
1780
1781 /* Open relations using lock modes known to be obtained by index.c */
1782 if (!btshared->isconcurrent)
1783 {
1784 heapLockmode = ShareLock;
1785 indexLockmode = AccessExclusiveLock;
1786 }
1787 else
1788 {
1789 heapLockmode = ShareUpdateExclusiveLock;
1790 indexLockmode = RowExclusiveLock;
1791 }
1792
1793 /* Track query ID */
1794 pgstat_report_query_id(btshared->queryid, false);
1795
1796 /* Open relations within worker */
1797 heapRel = table_open(btshared->heaprelid, heapLockmode);
1798 indexRel = index_open(btshared->indexrelid, indexLockmode);
1799
1800 /* Initialize worker's own spool */
1801 btspool = (BTSpool *) palloc0(sizeof(BTSpool));
1802 btspool->heap = heapRel;
1803 btspool->index = indexRel;
1804 btspool->isunique = btshared->isunique;
1805 btspool->nulls_not_distinct = btshared->nulls_not_distinct;
1806
1807 /* Look up shared state private to tuplesort.c */
1808 sharedsort = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT, false);
1809 tuplesort_attach_shared(sharedsort, seg);
1810 if (!btshared->isunique)
1811 {
1812 btspool2 = NULL;
1813 sharedsort2 = NULL;
1814 }
1815 else
1816 {
1817 /* Allocate memory for worker's own private secondary spool */
1818 btspool2 = (BTSpool *) palloc0(sizeof(BTSpool));
1819
1820 /* Initialize worker's own secondary spool */
1821 btspool2->heap = btspool->heap;
1822 btspool2->index = btspool->index;
1823 btspool2->isunique = false;
1824 /* Look up shared state private to tuplesort.c */
1825 sharedsort2 = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT_SPOOL2, false);
1826 tuplesort_attach_shared(sharedsort2, seg);
1827 }
1828
1829 /* Prepare to track buffer usage during parallel execution */
1831
1832 /* Perform sorting of spool, and possibly a spool2 */
1833 sortmem = maintenance_work_mem / btshared->scantuplesortstates;
1834 _bt_parallel_scan_and_sort(btspool, btspool2, btshared, sharedsort,
1835 sharedsort2, sortmem, false);
1836
1837 /* Report WAL/buffer usage during parallel execution */
1838 bufferusage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
1839 walusage = shm_toc_lookup(toc, PARALLEL_KEY_WAL_USAGE, false);
1841 &walusage[ParallelWorkerNumber]);
1842
1843#ifdef BTREE_BUILD_STATS
1845 {
1846 ShowUsage("BTREE BUILD (Worker Partial Spool) STATISTICS");
1847 ResetUsage();
1848 }
1849#endif /* BTREE_BUILD_STATS */
1850
1851 index_close(indexRel, indexLockmode);
1852 table_close(heapRel, heapLockmode);
1853}
1854
1855/*
1856 * Perform a worker's portion of a parallel sort.
1857 *
1858 * This generates a tuplesort for passed btspool, and a second tuplesort
1859 * state if a second btspool is need (i.e. for unique index builds). All
1860 * other spool fields should already be set when this is called.
1861 *
1862 * sortmem is the amount of working memory to use within each worker,
1863 * expressed in KBs.
1864 *
1865 * When this returns, workers are done, and need only release resources.
1866 */
1867static void
1869 BTShared *btshared, Sharedsort *sharedsort,
1870 Sharedsort *sharedsort2, int sortmem, bool progress)
1871{
1872 SortCoordinate coordinate;
1873 BTBuildState buildstate;
1874 TableScanDesc scan;
1875 double reltuples;
1876 IndexInfo *indexInfo;
1877
1878 /* Initialize local tuplesort coordination state */
1879 coordinate = palloc0(sizeof(SortCoordinateData));
1880 coordinate->isWorker = true;
1881 coordinate->nParticipants = -1;
1882 coordinate->sharedsort = sharedsort;
1883
1884 /* Begin "partial" tuplesort */
1885 btspool->sortstate = tuplesort_begin_index_btree(btspool->heap,
1886 btspool->index,
1887 btspool->isunique,
1888 btspool->nulls_not_distinct,
1889 sortmem, coordinate,
1891
1892 /*
1893 * Just as with serial case, there may be a second spool. If so, a
1894 * second, dedicated spool2 partial tuplesort is required.
1895 */
1896 if (btspool2)
1897 {
1898 SortCoordinate coordinate2;
1899
1900 /*
1901 * We expect that the second one (for dead tuples) won't get very
1902 * full, so we give it only work_mem (unless sortmem is less for
1903 * worker). Worker processes are generally permitted to allocate
1904 * work_mem independently.
1905 */
1906 coordinate2 = palloc0(sizeof(SortCoordinateData));
1907 coordinate2->isWorker = true;
1908 coordinate2->nParticipants = -1;
1909 coordinate2->sharedsort = sharedsort2;
1910 btspool2->sortstate =
1911 tuplesort_begin_index_btree(btspool->heap, btspool->index, false, false,
1912 Min(sortmem, work_mem), coordinate2,
1913 false);
1914 }
1915
1916 /* Fill in buildstate for _bt_build_callback() */
1917 buildstate.isunique = btshared->isunique;
1918 buildstate.nulls_not_distinct = btshared->nulls_not_distinct;
1919 buildstate.havedead = false;
1920 buildstate.heap = btspool->heap;
1921 buildstate.spool = btspool;
1922 buildstate.spool2 = btspool2;
1923 buildstate.indtuples = 0;
1924 buildstate.btleader = NULL;
1925
1926 /* Join parallel scan */
1927 indexInfo = BuildIndexInfo(btspool->index);
1928 indexInfo->ii_Concurrent = btshared->isconcurrent;
1929 scan = table_beginscan_parallel(btspool->heap,
1931 reltuples = table_index_build_scan(btspool->heap, btspool->index, indexInfo,
1933 &buildstate, scan);
1934
1935 /* Execute this worker's part of the sort */
1936 if (progress)
1940 if (btspool2)
1941 {
1942 if (progress)
1946 }
1947
1948 /*
1949 * Done. Record ambuild statistics, and whether we encountered a broken
1950 * HOT chain.
1951 */
1952 SpinLockAcquire(&btshared->mutex);
1953 btshared->nparticipantsdone++;
1954 btshared->reltuples += reltuples;
1955 if (buildstate.havedead)
1956 btshared->havedead = true;
1957 btshared->indtuples += buildstate.indtuples;
1958 if (indexInfo->ii_BrokenHotChain)
1959 btshared->brokenhotchain = true;
1960 SpinLockRelease(&btshared->mutex);
1961
1962 /* Notify leader */
1964
1965 /* We can end tuplesorts immediately */
1966 tuplesort_end(btspool->sortstate);
1967 if (btspool2)
1968 tuplesort_end(btspool2->sortstate);
1969}
int ParallelWorkerNumber
Definition: parallel.c:114
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:207
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:792
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition: parallel.c:569
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:946
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition: parallel.c:169
void WaitForParallelWorkersToAttach(ParallelContext *pcxt)
Definition: parallel.c:689
void pgstat_progress_update_param(int index, int64 val)
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
uint64 pgstat_get_my_query_id(void)
void pgstat_report_query_id(uint64 query_id, bool force)
void pgstat_report_activity(BackendState state, const char *cmd_str)
@ STATE_RUNNING
uint32 BlockNumber
Definition: block.h:31
static Datum values[MAXATTR]
Definition: bootstrap.c:151
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:273
Size PageGetFreeSpace(const PageData *page)
Definition: bufpage.c:896
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, Item newtup, Size newsize)
Definition: bufpage.c:1394
PageHeaderData * PageHeader
Definition: bufpage.h:174
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
PageData * Page
Definition: bufpage.h:82
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:471
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
BulkWriteState * smgr_bulk_start_rel(Relation rel, ForkNumber forknum)
Definition: bulk_write.c:87
void smgr_bulk_write(BulkWriteState *bulkstate, BlockNumber blocknum, BulkWriteBuffer buf, bool page_std)
Definition: bulk_write.c:323
BulkWriteBuffer smgr_bulk_get_buf(BulkWriteState *bulkstate)
Definition: bulk_write.c:347
void smgr_bulk_finish(BulkWriteState *bulkstate)
Definition: bulk_write.c:130
#define MAXALIGN_DOWN(LEN)
Definition: c.h:780
#define Min(x, y)
Definition: c.h:961
#define MAXALIGN(LEN)
Definition: c.h:768
#define BUFFERALIGN(LEN)
Definition: c.h:770
#define Assert(condition)
Definition: c.h:815
int64_t int64
Definition: c.h:485
int16_t int16
Definition: c.h:483
int32_t int32
Definition: c.h:484
uint64_t uint64
Definition: c.h:489
#define unlikely(x)
Definition: c.h:333
uint32_t uint32
Definition: c.h:488
size_t Size
Definition: c.h:562
bool ConditionVariableCancelSleep(void)
void ConditionVariableInit(ConditionVariable *cv)
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
void ConditionVariableSignal(ConditionVariable *cv)
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
int maintenance_work_mem
Definition: globals.c:132
int work_mem
Definition: globals.c:130
bool log_btree_build_stats
Definition: guc_tables.c:507
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2427
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:547
void InstrAccumParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition: instrument.c:218
void InstrEndParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition: instrument.c:208
void InstrStartParallelQuery(void)
Definition: instrument.c:200
int i
Definition: isn.c:72
Pointer Item
Definition: item.h:17
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
struct ItemIdData ItemIdData
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:51
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
struct IndexTupleData IndexTupleData
#define INDEX_SIZE_MASK
Definition: itup.h:65
int LOCKMODE
Definition: lockdefs.h:26
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
#define ShareLock
Definition: lockdefs.h:40
#define RowExclusiveLock
Definition: lockdefs.h:38
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
void * palloc(Size size)
Definition: mcxt.c:1317
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup)
Definition: nbtdedup.c:484
void _bt_dedup_start_pending(BTDedupState state, IndexTuple base, OffsetNumber baseoff)
Definition: nbtdedup.c:433
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:864
void _bt_pageinit(Page page, Size size)
Definition: nbtpage.c:1129
void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level, bool allequalimage)
Definition: nbtpage.c:67
#define BTGetDeduplicateItems(relation)
Definition: nbtree.h:1141
#define BTGetTargetPageFreeSpace(relation)
Definition: nbtree.h:1139
#define BTP_LEAF
Definition: nbtree.h:76
#define P_HIKEY
Definition: nbtree.h:367
#define PROGRESS_BTREE_PHASE_PERFORMSORT_2
Definition: nbtree.h:1154
#define PROGRESS_BTREE_PHASE_LEAF_LOAD
Definition: nbtree.h:1155
#define P_LEFTMOST(opaque)
Definition: nbtree.h:218
#define BTPageGetOpaque(page)
Definition: nbtree.h:73
#define BTP_ROOT
Definition: nbtree.h:77
static void BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
Definition: nbtree.h:562
#define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN
Definition: nbtree.h:1152
#define PROGRESS_BTREE_PHASE_PERFORMSORT_1
Definition: nbtree.h:1153
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:529
#define P_NONE
Definition: nbtree.h:212
#define BTMaxItemSize(page)
Definition: nbtree.h:164
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1123
#define BTREE_METAPAGE
Definition: nbtree.h:148
#define SK_BT_DESC
Definition: nbtree.h:1122
BTDedupStateData * BTDedupState
Definition: nbtree.h:898
#define P_FIRSTKEY
Definition: nbtree.h:368
static void BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
Definition: nbtree.h:595
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:577
#define BTREE_NONLEAF_FILLFACTOR
Definition: nbtree.h:201
#define PARALLEL_KEY_BUFFER_USAGE
Definition: nbtsort.c:66
#define ParallelTableScanFromBTShared(shared)
Definition: nbtsort.c:161
static void _bt_blwritepage(BTWriteState *wstate, BulkWriteBuffer buf, BlockNumber blkno)
Definition: nbtsort.c:637
static void _bt_sortaddtup(Page page, Size itemsize, IndexTuple itup, OffsetNumber itup_off, bool newfirstdataitem)
Definition: nbtsort.c:716
static void _bt_slideleft(Page rightmostpage)
Definition: nbtsort.c:685
static BTPageState * _bt_pagestate(BTWriteState *wstate, uint32 level)
Definition: nbtsort.c:648
static void _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
Definition: nbtsort.c:1137
static void _bt_end_parallel(BTLeader *btleader)
Definition: nbtsort.c:1610
#define PARALLEL_KEY_TUPLESORT_SPOOL2
Definition: nbtsort.c:63
static void _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2, BTShared *btshared, Sharedsort *sharedsort, Sharedsort *sharedsort2, int sortmem, bool progress)
Definition: nbtsort.c:1868
static Size _bt_parallel_estimate_shared(Relation heap, Snapshot snapshot)
Definition: nbtsort.c:1636
struct BTPageState BTPageState
static void _bt_sort_dedup_finish_pending(BTWriteState *wstate, BTPageState *state, BTDedupState dstate)
Definition: nbtsort.c:1031
struct BTSpool BTSpool
static double _bt_parallel_heapscan(BTBuildState *buildstate, bool *brokenhotchain)
Definition: nbtsort.c:1656
static void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
Definition: nbtsort.c:538
#define PARALLEL_KEY_BTREE_SHARED
Definition: nbtsort.c:61
IndexBuildResult * btbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: nbtsort.c:295
static void _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request)
Definition: nbtsort.c:1398
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, Size truncextra)
Definition: nbtsort.c:786
struct BTBuildState BTBuildState
void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
Definition: nbtsort.c:1743
struct BTLeader BTLeader
static void _bt_build_callback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: nbtsort.c:579
static double _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate, IndexInfo *indexInfo)
Definition: nbtsort.c:365
static void _bt_spooldestroy(BTSpool *btspool)
Definition: nbtsort.c:517
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
Definition: nbtsort.c:1065
#define PARALLEL_KEY_TUPLESORT
Definition: nbtsort.c:62
#define PARALLEL_KEY_QUERY_TEXT
Definition: nbtsort.c:64
#define PARALLEL_KEY_WAL_USAGE
Definition: nbtsort.c:65
static BulkWriteBuffer _bt_blnewpage(BTWriteState *wstate, uint32 level)
Definition: nbtsort.c:608
static void _bt_leader_participate_as_worker(BTBuildState *buildstate)
Definition: nbtsort.c:1690
struct BTWriteState BTWriteState
static void _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, bool *isnull)
Definition: nbtsort.c:527
struct BTShared BTShared
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:3239
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:80
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:2813
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:3032
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:3297
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static pairingheap_node * merge(pairingheap *heap, pairingheap_node *a, pairingheap_node *b)
Definition: pairingheap.c:79
static char * buf
Definition: pg_test_fsync.c:72
static int progress
Definition: pgbench.c:261
const char * debug_query_string
Definition: postgres.c:87
void ShowUsage(const char *title)
Definition: postgres.c:4984
void ResetUsage(void)
Definition: postgres.c:4977
uintptr_t Datum
Definition: postgres.h:69
unsigned int Oid
Definition: postgres_ext.h:32
#define PROC_IN_SAFE_IC
Definition: proc.h:59
#define PROGRESS_CREATEIDX_TUPLES_TOTAL
Definition: progress.h:87
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:123
#define PROGRESS_CREATEIDX_TUPLES_DONE
Definition: progress.h:88
#define PROGRESS_CREATEIDX_SUBPHASE
Definition: progress.h:86
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:122
#define RelationGetRelid(relation)
Definition: rel.h:505
#define RelationGetDescr(relation)
Definition: rel.h:531
#define RelationGetRelationName(relation)
Definition: rel.h:539
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:524
@ MAIN_FORKNUM
Definition: relpath.h:58
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:212
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:794
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:752
#define SnapshotAny
Definition: snapmgr.h:33
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:55
void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, SortSupport ssup)
Definition: sortsupport.c:161
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
PGPROC * MyProc
Definition: proc.c:66
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define BTLessStrategyNumber
Definition: stratnum.h:29
bool isunique
Definition: nbtsort.c:206
BTSpool * spool
Definition: nbtsort.c:210
BTLeader * btleader
Definition: nbtsort.c:224
bool nulls_not_distinct
Definition: nbtsort.c:207
bool havedead
Definition: nbtsort.c:208
Relation heap
Definition: nbtsort.c:209
BTSpool * spool2
Definition: nbtsort.c:216
double indtuples
Definition: nbtsort.c:217
Size maxpostingsize
Definition: nbtree.h:875
ItemPointer htids
Definition: nbtree.h:883
bool deduplicate
Definition: nbtree.h:873
OffsetNumber baseoff
Definition: nbtree.h:879
Size basetupsize
Definition: nbtree.h:880
IndexTuple base
Definition: nbtree.h:878
Size phystupsize
Definition: nbtree.h:886
ParallelContext * pcxt
Definition: nbtsort.c:170
BTShared * btshared
Definition: nbtsort.c:190
Sharedsort * sharedsort
Definition: nbtsort.c:191
Sharedsort * sharedsort2
Definition: nbtsort.c:192
int nparticipanttuplesorts
Definition: nbtsort.c:178
BufferUsage * bufferusage
Definition: nbtsort.c:195
Snapshot snapshot
Definition: nbtsort.c:193
WalUsage * walusage
Definition: nbtsort.c:194
BlockNumber btpo_next
Definition: nbtree.h:65
BlockNumber btpo_prev
Definition: nbtree.h:64
uint16 btpo_flags
Definition: nbtree.h:67
uint32 btpo_level
Definition: nbtree.h:66
BTCycleId btpo_cycleid
Definition: nbtree.h:68
IndexTuple btps_lowkey
Definition: nbtsort.c:235
Size btps_full
Definition: nbtsort.c:239
BulkWriteBuffer btps_buf
Definition: nbtsort.c:233
OffsetNumber btps_lastoff
Definition: nbtsort.c:236
Size btps_lastextra
Definition: nbtsort.c:237
BlockNumber btps_blkno
Definition: nbtsort.c:234
struct BTPageState * btps_next
Definition: nbtsort.c:240
uint32 btps_level
Definition: nbtsort.c:238
bool allequalimage
Definition: nbtree.h:792
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:798
slock_t mutex
Definition: nbtsort.c:124
bool isconcurrent
Definition: nbtsort.c:104
double indtuples
Definition: nbtsort.c:145
double reltuples
Definition: nbtsort.c:143
Oid heaprelid
Definition: nbtsort.c:100
bool brokenhotchain
Definition: nbtsort.c:146
bool isunique
Definition: nbtsort.c:102
int nparticipantsdone
Definition: nbtsort.c:142
ConditionVariable workersdonecv
Definition: nbtsort.c:116
int scantuplesortstates
Definition: nbtsort.c:105
uint64 queryid
Definition: nbtsort.c:108
Oid indexrelid
Definition: nbtsort.c:101
bool havedead
Definition: nbtsort.c:144
bool nulls_not_distinct
Definition: nbtsort.c:103
bool isunique
Definition: nbtsort.c:84
bool nulls_not_distinct
Definition: nbtsort.c:85
Relation heap
Definition: nbtsort.c:82
Relation index
Definition: nbtsort.c:83
Tuplesortstate * sortstate
Definition: nbtsort.c:81
BulkWriteState * bulkstate
Definition: nbtsort.c:250
Relation heap
Definition: nbtsort.c:248
Relation index
Definition: nbtsort.c:249
BlockNumber btws_pages_alloced
Definition: nbtsort.c:252
BTScanInsert inskey
Definition: nbtsort.c:251
double heap_tuples
Definition: genam.h:34
double index_tuples
Definition: genam.h:35
bool ii_Unique
Definition: execnodes.h:208
bool ii_BrokenHotChain
Definition: execnodes.h:214
bool ii_NullsNotDistinct
Definition: execnodes.h:209
int ii_ParallelWorkers
Definition: execnodes.h:217
bool ii_Concurrent
Definition: execnodes.h:213
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
uint8 statusFlags
Definition: proc.h:242
dsm_segment * seg
Definition: parallel.h:42
shm_toc_estimator estimator
Definition: parallel.h:41
shm_toc * toc
Definition: parallel.h:44
int nworkers_launched
Definition: parallel.h:37
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
Sharedsort * sharedsort
Definition: tuplesort.h:58
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66
Definition: type.h:96
Definition: regguts.h:323
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
Definition: tableam.c:165
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
Definition: tableam.c:130
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
Definition: tableam.c:145
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1780
void tuplesort_performsort(Tuplesortstate *state)
Definition: tuplesort.c:1363
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
Definition: tuplesort.c:2938
Size tuplesort_estimate_shared(int nWorkers)
Definition: tuplesort.c:2917
void tuplesort_end(Tuplesortstate *state)
Definition: tuplesort.c:951
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
Definition: tuplesort.c:2961
struct SortCoordinateData * SortCoordinate
Definition: tuplesort.h:61
#define TUPLESORT_NONE
Definition: tuplesort.h:93
IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward)
void tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, ItemPointer self, const Datum *values, const bool *isnull)
Tuplesortstate * tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
void ExitParallelMode(void)
Definition: xact.c:1063
void EnterParallelMode(void)
Definition: xact.c:1050