PostgreSQL Source Code  git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
nbtsort.c File Reference
#include "postgres.h"
#include "access/nbtree.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/table.h"
#include "access/xact.h"
#include "catalog/index.h"
#include "commands/progress.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bulk_write.h"
#include "tcop/tcopprot.h"
#include "utils/rel.h"
#include "utils/sortsupport.h"
#include "utils/tuplesort.h"
Include dependency graph for nbtsort.c:

Go to the source code of this file.

Data Structures

struct  BTSpool
 
struct  BTShared
 
struct  BTLeader
 
struct  BTBuildState
 
struct  BTPageState
 
struct  BTWriteState
 

Macros

#define PARALLEL_KEY_BTREE_SHARED   UINT64CONST(0xA000000000000001)
 
#define PARALLEL_KEY_TUPLESORT   UINT64CONST(0xA000000000000002)
 
#define PARALLEL_KEY_TUPLESORT_SPOOL2   UINT64CONST(0xA000000000000003)
 
#define PARALLEL_KEY_QUERY_TEXT   UINT64CONST(0xA000000000000004)
 
#define PARALLEL_KEY_WAL_USAGE   UINT64CONST(0xA000000000000005)
 
#define PARALLEL_KEY_BUFFER_USAGE   UINT64CONST(0xA000000000000006)
 
#define ParallelTableScanFromBTShared(shared)    (ParallelTableScanDesc) ((char *) (shared) + BUFFERALIGN(sizeof(BTShared)))
 

Typedefs

typedef struct BTSpool BTSpool
 
typedef struct BTShared BTShared
 
typedef struct BTLeader BTLeader
 
typedef struct BTBuildState BTBuildState
 
typedef struct BTPageState BTPageState
 
typedef struct BTWriteState BTWriteState
 

Functions

static double _bt_spools_heapscan (Relation heap, Relation index, BTBuildState *buildstate, IndexInfo *indexInfo)
 
static void _bt_spooldestroy (BTSpool *btspool)
 
static void _bt_spool (BTSpool *btspool, ItemPointer self, Datum *values, bool *isnull)
 
static void _bt_leafbuild (BTSpool *btspool, BTSpool *btspool2)
 
static void _bt_build_callback (Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
 
static BulkWriteBuffer _bt_blnewpage (BTWriteState *wstate, uint32 level)
 
static BTPageState_bt_pagestate (BTWriteState *wstate, uint32 level)
 
static void _bt_slideleft (Page rightmostpage)
 
static void _bt_sortaddtup (Page page, Size itemsize, IndexTuple itup, OffsetNumber itup_off, bool newfirstdataitem)
 
static void _bt_buildadd (BTWriteState *wstate, BTPageState *state, IndexTuple itup, Size truncextra)
 
static void _bt_sort_dedup_finish_pending (BTWriteState *wstate, BTPageState *state, BTDedupState dstate)
 
static void _bt_uppershutdown (BTWriteState *wstate, BTPageState *state)
 
static void _bt_load (BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
 
static void _bt_begin_parallel (BTBuildState *buildstate, bool isconcurrent, int request)
 
static void _bt_end_parallel (BTLeader *btleader)
 
static Size _bt_parallel_estimate_shared (Relation heap, Snapshot snapshot)
 
static double _bt_parallel_heapscan (BTBuildState *buildstate, bool *brokenhotchain)
 
static void _bt_leader_participate_as_worker (BTBuildState *buildstate)
 
static void _bt_parallel_scan_and_sort (BTSpool *btspool, BTSpool *btspool2, BTShared *btshared, Sharedsort *sharedsort, Sharedsort *sharedsort2, int sortmem, bool progress)
 
IndexBuildResultbtbuild (Relation heap, Relation index, IndexInfo *indexInfo)
 
static void _bt_blwritepage (BTWriteState *wstate, BulkWriteBuffer buf, BlockNumber blkno)
 
void _bt_parallel_build_main (dsm_segment *seg, shm_toc *toc)
 

Macro Definition Documentation

◆ PARALLEL_KEY_BTREE_SHARED

#define PARALLEL_KEY_BTREE_SHARED   UINT64CONST(0xA000000000000001)

Definition at line 61 of file nbtsort.c.

◆ PARALLEL_KEY_BUFFER_USAGE

#define PARALLEL_KEY_BUFFER_USAGE   UINT64CONST(0xA000000000000006)

Definition at line 66 of file nbtsort.c.

◆ PARALLEL_KEY_QUERY_TEXT

#define PARALLEL_KEY_QUERY_TEXT   UINT64CONST(0xA000000000000004)

Definition at line 64 of file nbtsort.c.

◆ PARALLEL_KEY_TUPLESORT

#define PARALLEL_KEY_TUPLESORT   UINT64CONST(0xA000000000000002)

Definition at line 62 of file nbtsort.c.

◆ PARALLEL_KEY_TUPLESORT_SPOOL2

#define PARALLEL_KEY_TUPLESORT_SPOOL2   UINT64CONST(0xA000000000000003)

Definition at line 63 of file nbtsort.c.

◆ PARALLEL_KEY_WAL_USAGE

#define PARALLEL_KEY_WAL_USAGE   UINT64CONST(0xA000000000000005)

Definition at line 65 of file nbtsort.c.

◆ ParallelTableScanFromBTShared

#define ParallelTableScanFromBTShared (   shared)     (ParallelTableScanDesc) ((char *) (shared) + BUFFERALIGN(sizeof(BTShared)))

Definition at line 161 of file nbtsort.c.

Typedef Documentation

◆ BTBuildState

typedef struct BTBuildState BTBuildState

◆ BTLeader

typedef struct BTLeader BTLeader

◆ BTPageState

typedef struct BTPageState BTPageState

◆ BTShared

typedef struct BTShared BTShared

◆ BTSpool

typedef struct BTSpool BTSpool

◆ BTWriteState

typedef struct BTWriteState BTWriteState

Function Documentation

◆ _bt_begin_parallel()

static void _bt_begin_parallel ( BTBuildState buildstate,
bool  isconcurrent,
int  request 
)
static

Definition at line 1398 of file nbtsort.c.

1399 {
1400  ParallelContext *pcxt;
1401  int scantuplesortstates;
1402  Snapshot snapshot;
1403  Size estbtshared;
1404  Size estsort;
1405  BTShared *btshared;
1406  Sharedsort *sharedsort;
1407  Sharedsort *sharedsort2;
1408  BTSpool *btspool = buildstate->spool;
1409  BTLeader *btleader = (BTLeader *) palloc0(sizeof(BTLeader));
1410  WalUsage *walusage;
1411  BufferUsage *bufferusage;
1412  bool leaderparticipates = true;
1413  int querylen;
1414 
1415 #ifdef DISABLE_LEADER_PARTICIPATION
1416  leaderparticipates = false;
1417 #endif
1418 
1419  /*
1420  * Enter parallel mode, and create context for parallel build of btree
1421  * index
1422  */
1424  Assert(request > 0);
1425  pcxt = CreateParallelContext("postgres", "_bt_parallel_build_main",
1426  request);
1427 
1428  scantuplesortstates = leaderparticipates ? request + 1 : request;
1429 
1430  /*
1431  * Prepare for scan of the base relation. In a normal index build, we use
1432  * SnapshotAny because we must retrieve all tuples and do our own time
1433  * qual checks (because we have to index RECENTLY_DEAD tuples). In a
1434  * concurrent build, we take a regular MVCC snapshot and index whatever's
1435  * live according to that.
1436  */
1437  if (!isconcurrent)
1438  snapshot = SnapshotAny;
1439  else
1441 
1442  /*
1443  * Estimate size for our own PARALLEL_KEY_BTREE_SHARED workspace, and
1444  * PARALLEL_KEY_TUPLESORT tuplesort workspace
1445  */
1446  estbtshared = _bt_parallel_estimate_shared(btspool->heap, snapshot);
1447  shm_toc_estimate_chunk(&pcxt->estimator, estbtshared);
1448  estsort = tuplesort_estimate_shared(scantuplesortstates);
1449  shm_toc_estimate_chunk(&pcxt->estimator, estsort);
1450 
1451  /*
1452  * Unique case requires a second spool, and so we may have to account for
1453  * another shared workspace for that -- PARALLEL_KEY_TUPLESORT_SPOOL2
1454  */
1455  if (!btspool->isunique)
1456  shm_toc_estimate_keys(&pcxt->estimator, 2);
1457  else
1458  {
1459  shm_toc_estimate_chunk(&pcxt->estimator, estsort);
1460  shm_toc_estimate_keys(&pcxt->estimator, 3);
1461  }
1462 
1463  /*
1464  * Estimate space for WalUsage and BufferUsage -- PARALLEL_KEY_WAL_USAGE
1465  * and PARALLEL_KEY_BUFFER_USAGE.
1466  *
1467  * If there are no extensions loaded that care, we could skip this. We
1468  * have no way of knowing whether anyone's looking at pgWalUsage or
1469  * pgBufferUsage, so do it unconditionally.
1470  */
1472  mul_size(sizeof(WalUsage), pcxt->nworkers));
1473  shm_toc_estimate_keys(&pcxt->estimator, 1);
1475  mul_size(sizeof(BufferUsage), pcxt->nworkers));
1476  shm_toc_estimate_keys(&pcxt->estimator, 1);
1477 
1478  /* Finally, estimate PARALLEL_KEY_QUERY_TEXT space */
1479  if (debug_query_string)
1480  {
1481  querylen = strlen(debug_query_string);
1482  shm_toc_estimate_chunk(&pcxt->estimator, querylen + 1);
1483  shm_toc_estimate_keys(&pcxt->estimator, 1);
1484  }
1485  else
1486  querylen = 0; /* keep compiler quiet */
1487 
1488  /* Everyone's had a chance to ask for space, so now create the DSM */
1489  InitializeParallelDSM(pcxt);
1490 
1491  /* If no DSM segment was available, back out (do serial build) */
1492  if (pcxt->seg == NULL)
1493  {
1494  if (IsMVCCSnapshot(snapshot))
1495  UnregisterSnapshot(snapshot);
1496  DestroyParallelContext(pcxt);
1497  ExitParallelMode();
1498  return;
1499  }
1500 
1501  /* Store shared build state, for which we reserved space */
1502  btshared = (BTShared *) shm_toc_allocate(pcxt->toc, estbtshared);
1503  /* Initialize immutable state */
1504  btshared->heaprelid = RelationGetRelid(btspool->heap);
1505  btshared->indexrelid = RelationGetRelid(btspool->index);
1506  btshared->isunique = btspool->isunique;
1507  btshared->nulls_not_distinct = btspool->nulls_not_distinct;
1508  btshared->isconcurrent = isconcurrent;
1509  btshared->scantuplesortstates = scantuplesortstates;
1510  btshared->queryid = pgstat_get_my_query_id();
1512  SpinLockInit(&btshared->mutex);
1513  /* Initialize mutable state */
1514  btshared->nparticipantsdone = 0;
1515  btshared->reltuples = 0.0;
1516  btshared->havedead = false;
1517  btshared->indtuples = 0.0;
1518  btshared->brokenhotchain = false;
1521  snapshot);
1522 
1523  /*
1524  * Store shared tuplesort-private state, for which we reserved space.
1525  * Then, initialize opaque state using tuplesort routine.
1526  */
1527  sharedsort = (Sharedsort *) shm_toc_allocate(pcxt->toc, estsort);
1528  tuplesort_initialize_shared(sharedsort, scantuplesortstates,
1529  pcxt->seg);
1530 
1531  shm_toc_insert(pcxt->toc, PARALLEL_KEY_BTREE_SHARED, btshared);
1532  shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLESORT, sharedsort);
1533 
1534  /* Unique case requires a second spool, and associated shared state */
1535  if (!btspool->isunique)
1536  sharedsort2 = NULL;
1537  else
1538  {
1539  /*
1540  * Store additional shared tuplesort-private state, for which we
1541  * reserved space. Then, initialize opaque state using tuplesort
1542  * routine.
1543  */
1544  sharedsort2 = (Sharedsort *) shm_toc_allocate(pcxt->toc, estsort);
1545  tuplesort_initialize_shared(sharedsort2, scantuplesortstates,
1546  pcxt->seg);
1547 
1548  shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLESORT_SPOOL2, sharedsort2);
1549  }
1550 
1551  /* Store query string for workers */
1552  if (debug_query_string)
1553  {
1554  char *sharedquery;
1555 
1556  sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
1557  memcpy(sharedquery, debug_query_string, querylen + 1);
1558  shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, sharedquery);
1559  }
1560 
1561  /*
1562  * Allocate space for each worker's WalUsage and BufferUsage; no need to
1563  * initialize.
1564  */
1565  walusage = shm_toc_allocate(pcxt->toc,
1566  mul_size(sizeof(WalUsage), pcxt->nworkers));
1567  shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage);
1568  bufferusage = shm_toc_allocate(pcxt->toc,
1569  mul_size(sizeof(BufferUsage), pcxt->nworkers));
1570  shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufferusage);
1571 
1572  /* Launch workers, saving status for leader/caller */
1573  LaunchParallelWorkers(pcxt);
1574  btleader->pcxt = pcxt;
1575  btleader->nparticipanttuplesorts = pcxt->nworkers_launched;
1576  if (leaderparticipates)
1577  btleader->nparticipanttuplesorts++;
1578  btleader->btshared = btshared;
1579  btleader->sharedsort = sharedsort;
1580  btleader->sharedsort2 = sharedsort2;
1581  btleader->snapshot = snapshot;
1582  btleader->walusage = walusage;
1583  btleader->bufferusage = bufferusage;
1584 
1585  /* If no workers were successfully launched, back out (do serial build) */
1586  if (pcxt->nworkers_launched == 0)
1587  {
1588  _bt_end_parallel(btleader);
1589  return;
1590  }
1591 
1592  /* Save leader state now that it's clear build will be parallel */
1593  buildstate->btleader = btleader;
1594 
1595  /* Join heap scan ourselves */
1596  if (leaderparticipates)
1598 
1599  /*
1600  * Caller needs to wait for all launched workers when we return. Make
1601  * sure that the failure-to-start case will not hang forever.
1602  */
1604 }
void InitializeParallelDSM(ParallelContext *pcxt)
Definition: parallel.c:207
void LaunchParallelWorkers(ParallelContext *pcxt)
Definition: parallel.c:569
void DestroyParallelContext(ParallelContext *pcxt)
Definition: parallel.c:946
ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, int nworkers)
Definition: parallel.c:169
void WaitForParallelWorkersToAttach(ParallelContext *pcxt)
Definition: parallel.c:689
uint64 pgstat_get_my_query_id(void)
#define Assert(condition)
Definition: c.h:863
size_t Size
Definition: c.h:610
void ConditionVariableInit(ConditionVariable *cv)
void * palloc0(Size size)
Definition: mcxt.c:1347
#define PARALLEL_KEY_BUFFER_USAGE
Definition: nbtsort.c:66
#define ParallelTableScanFromBTShared(shared)
Definition: nbtsort.c:161
static void _bt_end_parallel(BTLeader *btleader)
Definition: nbtsort.c:1610
#define PARALLEL_KEY_TUPLESORT_SPOOL2
Definition: nbtsort.c:63
static Size _bt_parallel_estimate_shared(Relation heap, Snapshot snapshot)
Definition: nbtsort.c:1636
#define PARALLEL_KEY_BTREE_SHARED
Definition: nbtsort.c:61
#define PARALLEL_KEY_TUPLESORT
Definition: nbtsort.c:62
#define PARALLEL_KEY_QUERY_TEXT
Definition: nbtsort.c:64
#define PARALLEL_KEY_WAL_USAGE
Definition: nbtsort.c:65
static void _bt_leader_participate_as_worker(BTBuildState *buildstate)
Definition: nbtsort.c:1690
const char * debug_query_string
Definition: postgres.c:87
#define RelationGetRelid(relation)
Definition: rel.h:505
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:216
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:836
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:794
#define SnapshotAny
Definition: snapmgr.h:33
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:62
#define SpinLockInit(lock)
Definition: spin.h:57
BTSpool * spool
Definition: nbtsort.c:210
BTLeader * btleader
Definition: nbtsort.c:224
ParallelContext * pcxt
Definition: nbtsort.c:170
BTShared * btshared
Definition: nbtsort.c:190
Sharedsort * sharedsort
Definition: nbtsort.c:191
Sharedsort * sharedsort2
Definition: nbtsort.c:192
int nparticipanttuplesorts
Definition: nbtsort.c:178
BufferUsage * bufferusage
Definition: nbtsort.c:195
Snapshot snapshot
Definition: nbtsort.c:193
WalUsage * walusage
Definition: nbtsort.c:194
slock_t mutex
Definition: nbtsort.c:124
bool isconcurrent
Definition: nbtsort.c:104
double indtuples
Definition: nbtsort.c:145
double reltuples
Definition: nbtsort.c:143
Oid heaprelid
Definition: nbtsort.c:100
bool brokenhotchain
Definition: nbtsort.c:146
bool isunique
Definition: nbtsort.c:102
int nparticipantsdone
Definition: nbtsort.c:142
ConditionVariable workersdonecv
Definition: nbtsort.c:116
int scantuplesortstates
Definition: nbtsort.c:105
uint64 queryid
Definition: nbtsort.c:108
Oid indexrelid
Definition: nbtsort.c:101
bool havedead
Definition: nbtsort.c:144
bool nulls_not_distinct
Definition: nbtsort.c:103
bool isunique
Definition: nbtsort.c:84
bool nulls_not_distinct
Definition: nbtsort.c:85
Relation heap
Definition: nbtsort.c:82
Relation index
Definition: nbtsort.c:83
dsm_segment * seg
Definition: parallel.h:42
shm_toc_estimator estimator
Definition: parallel.h:41
shm_toc * toc
Definition: parallel.h:44
int nworkers_launched
Definition: parallel.h:37
void table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan, Snapshot snapshot)
Definition: tableam.c:145
void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, dsm_segment *seg)
Definition: tuplesort.c:2938
Size tuplesort_estimate_shared(int nWorkers)
Definition: tuplesort.c:2917
void ExitParallelMode(void)
Definition: xact.c:1063
void EnterParallelMode(void)
Definition: xact.c:1050

References _bt_end_parallel(), _bt_leader_participate_as_worker(), _bt_parallel_estimate_shared(), Assert, BTShared::brokenhotchain, BTBuildState::btleader, BTLeader::btshared, BTLeader::bufferusage, ConditionVariableInit(), CreateParallelContext(), debug_query_string, DestroyParallelContext(), EnterParallelMode(), ParallelContext::estimator, ExitParallelMode(), GetTransactionSnapshot(), BTShared::havedead, BTSpool::heap, BTShared::heaprelid, BTSpool::index, BTShared::indexrelid, BTShared::indtuples, InitializeParallelDSM(), BTShared::isconcurrent, IsMVCCSnapshot, BTSpool::isunique, BTShared::isunique, LaunchParallelWorkers(), mul_size(), BTShared::mutex, BTShared::nparticipantsdone, BTLeader::nparticipanttuplesorts, BTSpool::nulls_not_distinct, BTShared::nulls_not_distinct, ParallelContext::nworkers, ParallelContext::nworkers_launched, palloc0(), PARALLEL_KEY_BTREE_SHARED, PARALLEL_KEY_BUFFER_USAGE, PARALLEL_KEY_QUERY_TEXT, PARALLEL_KEY_TUPLESORT, PARALLEL_KEY_TUPLESORT_SPOOL2, PARALLEL_KEY_WAL_USAGE, ParallelTableScanFromBTShared, BTLeader::pcxt, pgstat_get_my_query_id(), BTShared::queryid, RegisterSnapshot(), RelationGetRelid, BTShared::reltuples, BTShared::scantuplesortstates, ParallelContext::seg, BTLeader::sharedsort, BTLeader::sharedsort2, shm_toc_allocate(), shm_toc_estimate_chunk, shm_toc_estimate_keys, shm_toc_insert(), BTLeader::snapshot, SnapshotAny, SpinLockInit, BTBuildState::spool, table_parallelscan_initialize(), ParallelContext::toc, tuplesort_estimate_shared(), tuplesort_initialize_shared(), UnregisterSnapshot(), WaitForParallelWorkersToAttach(), BTLeader::walusage, and BTShared::workersdonecv.

Referenced by _bt_spools_heapscan().

◆ _bt_blnewpage()

static BulkWriteBuffer _bt_blnewpage ( BTWriteState wstate,
uint32  level 
)
static

Definition at line 608 of file nbtsort.c.

609 {
611  Page page;
612  BTPageOpaque opaque;
613 
614  buf = smgr_bulk_get_buf(wstate->bulkstate);
615  page = (Page) buf;
616 
617  /* Zero the page and set up standard page header info */
618  _bt_pageinit(page, BLCKSZ);
619 
620  /* Initialize BT opaque state */
621  opaque = BTPageGetOpaque(page);
622  opaque->btpo_prev = opaque->btpo_next = P_NONE;
623  opaque->btpo_level = level;
624  opaque->btpo_flags = (level > 0) ? 0 : BTP_LEAF;
625  opaque->btpo_cycleid = 0;
626 
627  /* Make the P_HIKEY line pointer appear allocated */
628  ((PageHeader) page)->pd_lower += sizeof(ItemIdData);
629 
630  return buf;
631 }
PageHeaderData * PageHeader
Definition: bufpage.h:173
Pointer Page
Definition: bufpage.h:81
BulkWriteBuffer smgr_bulk_get_buf(BulkWriteState *bulkstate)
Definition: bulk_write.c:347
struct ItemIdData ItemIdData
void _bt_pageinit(Page page, Size size)
Definition: nbtpage.c:1129
#define BTP_LEAF
Definition: nbtree.h:76
#define BTPageGetOpaque(page)
Definition: nbtree.h:73
#define P_NONE
Definition: nbtree.h:212
static char * buf
Definition: pg_test_fsync.c:72
BlockNumber btpo_next
Definition: nbtree.h:65
BlockNumber btpo_prev
Definition: nbtree.h:64
uint16 btpo_flags
Definition: nbtree.h:67
uint32 btpo_level
Definition: nbtree.h:66
BTCycleId btpo_cycleid
Definition: nbtree.h:68
BulkWriteState * bulkstate
Definition: nbtsort.c:250

References _bt_pageinit(), BTP_LEAF, BTPageGetOpaque, BTPageOpaqueData::btpo_cycleid, BTPageOpaqueData::btpo_flags, BTPageOpaqueData::btpo_level, BTPageOpaqueData::btpo_next, BTPageOpaqueData::btpo_prev, buf, BTWriteState::bulkstate, P_NONE, and smgr_bulk_get_buf().

Referenced by _bt_buildadd(), and _bt_pagestate().

◆ _bt_blwritepage()

static void _bt_blwritepage ( BTWriteState wstate,
BulkWriteBuffer  buf,
BlockNumber  blkno 
)
static

Definition at line 637 of file nbtsort.c.

638 {
639  smgr_bulk_write(wstate->bulkstate, blkno, buf, true);
640  /* smgr_bulk_write took ownership of 'buf' */
641 }
void smgr_bulk_write(BulkWriteState *bulkstate, BlockNumber blocknum, BulkWriteBuffer buf, bool page_std)
Definition: bulk_write.c:323

References buf, BTWriteState::bulkstate, and smgr_bulk_write().

Referenced by _bt_buildadd(), and _bt_uppershutdown().

◆ _bt_build_callback()

static void _bt_build_callback ( Relation  index,
ItemPointer  tid,
Datum values,
bool isnull,
bool  tupleIsAlive,
void *  state 
)
static

Definition at line 579 of file nbtsort.c.

585 {
586  BTBuildState *buildstate = (BTBuildState *) state;
587 
588  /*
589  * insert the index tuple into the appropriate spool file for subsequent
590  * processing
591  */
592  if (tupleIsAlive || buildstate->spool2 == NULL)
593  _bt_spool(buildstate->spool, tid, values, isnull);
594  else
595  {
596  /* dead tuples are put into spool2 */
597  buildstate->havedead = true;
598  _bt_spool(buildstate->spool2, tid, values, isnull);
599  }
600 
601  buildstate->indtuples += 1;
602 }
static Datum values[MAXATTR]
Definition: bootstrap.c:151
static void _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, bool *isnull)
Definition: nbtsort.c:527
bool havedead
Definition: nbtsort.c:208
BTSpool * spool2
Definition: nbtsort.c:216
double indtuples
Definition: nbtsort.c:217
Definition: regguts.h:323

References _bt_spool(), BTBuildState::havedead, BTBuildState::indtuples, BTBuildState::spool, BTBuildState::spool2, and values.

Referenced by _bt_parallel_scan_and_sort(), and _bt_spools_heapscan().

◆ _bt_buildadd()

static void _bt_buildadd ( BTWriteState wstate,
BTPageState state,
IndexTuple  itup,
Size  truncextra 
)
static

Definition at line 786 of file nbtsort.c.

788 {
789  BulkWriteBuffer nbuf;
790  Page npage;
791  BlockNumber nblkno;
792  OffsetNumber last_off;
793  Size last_truncextra;
794  Size pgspc;
795  Size itupsz;
796  bool isleaf;
797 
798  /*
799  * This is a handy place to check for cancel interrupts during the btree
800  * load phase of index creation.
801  */
803 
804  nbuf = state->btps_buf;
805  npage = (Page) nbuf;
806  nblkno = state->btps_blkno;
807  last_off = state->btps_lastoff;
808  last_truncextra = state->btps_lastextra;
809  state->btps_lastextra = truncextra;
810 
811  pgspc = PageGetFreeSpace(npage);
812  itupsz = IndexTupleSize(itup);
813  itupsz = MAXALIGN(itupsz);
814  /* Leaf case has slightly different rules due to suffix truncation */
815  isleaf = (state->btps_level == 0);
816 
817  /*
818  * Check whether the new item can fit on a btree page on current level at
819  * all.
820  *
821  * Every newly built index will treat heap TID as part of the keyspace,
822  * which imposes the requirement that new high keys must occasionally have
823  * a heap TID appended within _bt_truncate(). That may leave a new pivot
824  * tuple one or two MAXALIGN() quantums larger than the original
825  * firstright tuple it's derived from. v4 deals with the problem by
826  * decreasing the limit on the size of tuples inserted on the leaf level
827  * by the same small amount. Enforce the new v4+ limit on the leaf level,
828  * and the old limit on internal levels, since pivot tuples may need to
829  * make use of the reserved space. This should never fail on internal
830  * pages.
831  */
832  if (unlikely(itupsz > BTMaxItemSize(npage)))
833  _bt_check_third_page(wstate->index, wstate->heap, isleaf, npage,
834  itup);
835 
836  /*
837  * Check to see if current page will fit new item, with space left over to
838  * append a heap TID during suffix truncation when page is a leaf page.
839  *
840  * It is guaranteed that we can fit at least 2 non-pivot tuples plus a
841  * high key with heap TID when finishing off a leaf page, since we rely on
842  * _bt_check_third_page() rejecting oversized non-pivot tuples. On
843  * internal pages we can always fit 3 pivot tuples with larger internal
844  * page tuple limit (includes page high key).
845  *
846  * Most of the time, a page is only "full" in the sense that the soft
847  * fillfactor-wise limit has been exceeded. However, we must always leave
848  * at least two items plus a high key on each page before starting a new
849  * page. Disregard fillfactor and insert on "full" current page if we
850  * don't have the minimum number of items yet. (Note that we deliberately
851  * assume that suffix truncation neither enlarges nor shrinks new high key
852  * when applying soft limit, except when last tuple has a posting list.)
853  */
854  Assert(last_truncextra == 0 || isleaf);
855  if (pgspc < itupsz + (isleaf ? MAXALIGN(sizeof(ItemPointerData)) : 0) ||
856  (pgspc + last_truncextra < state->btps_full && last_off > P_FIRSTKEY))
857  {
858  /*
859  * Finish off the page and write it out.
860  */
861  BulkWriteBuffer obuf = nbuf;
862  Page opage = npage;
863  BlockNumber oblkno = nblkno;
864  ItemId ii;
865  ItemId hii;
866  IndexTuple oitup;
867 
868  /* Create new page of same level */
869  nbuf = _bt_blnewpage(wstate, state->btps_level);
870  npage = (Page) nbuf;
871 
872  /* and assign it a page position */
873  nblkno = wstate->btws_pages_alloced++;
874 
875  /*
876  * We copy the last item on the page into the new page, and then
877  * rearrange the old page so that the 'last item' becomes its high key
878  * rather than a true data item. There had better be at least two
879  * items on the page already, else the page would be empty of useful
880  * data.
881  */
882  Assert(last_off > P_FIRSTKEY);
883  ii = PageGetItemId(opage, last_off);
884  oitup = (IndexTuple) PageGetItem(opage, ii);
885  _bt_sortaddtup(npage, ItemIdGetLength(ii), oitup, P_FIRSTKEY,
886  !isleaf);
887 
888  /*
889  * Move 'last' into the high key position on opage. _bt_blnewpage()
890  * allocated empty space for a line pointer when opage was first
891  * created, so this is a matter of rearranging already-allocated space
892  * on page, and initializing high key line pointer. (Actually, leaf
893  * pages must also swap oitup with a truncated version of oitup, which
894  * is sometimes larger than oitup, though never by more than the space
895  * needed to append a heap TID.)
896  */
897  hii = PageGetItemId(opage, P_HIKEY);
898  *hii = *ii;
899  ItemIdSetUnused(ii); /* redundant */
900  ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
901 
902  if (isleaf)
903  {
904  IndexTuple lastleft;
905  IndexTuple truncated;
906 
907  /*
908  * Truncate away any unneeded attributes from high key on leaf
909  * level. This is only done at the leaf level because downlinks
910  * in internal pages are either negative infinity items, or get
911  * their contents from copying from one level down. See also:
912  * _bt_split().
913  *
914  * We don't try to bias our choice of split point to make it more
915  * likely that _bt_truncate() can truncate away more attributes,
916  * whereas the split point used within _bt_split() is chosen much
917  * more delicately. Even still, the lastleft and firstright
918  * tuples passed to _bt_truncate() here are at least not fully
919  * equal to each other when deduplication is used, unless there is
920  * a large group of duplicates (also, unique index builds usually
921  * have few or no spool2 duplicates). When the split point is
922  * between two unequal tuples, _bt_truncate() will avoid including
923  * a heap TID in the new high key, which is the most important
924  * benefit of suffix truncation.
925  *
926  * Overwrite the old item with new truncated high key directly.
927  * oitup is already located at the physical beginning of tuple
928  * space, so this should directly reuse the existing tuple space.
929  */
930  ii = PageGetItemId(opage, OffsetNumberPrev(last_off));
931  lastleft = (IndexTuple) PageGetItem(opage, ii);
932 
933  Assert(IndexTupleSize(oitup) > last_truncextra);
934  truncated = _bt_truncate(wstate->index, lastleft, oitup,
935  wstate->inskey);
936  if (!PageIndexTupleOverwrite(opage, P_HIKEY, (Item) truncated,
937  IndexTupleSize(truncated)))
938  elog(ERROR, "failed to add high key to the index page");
939  pfree(truncated);
940 
941  /* oitup should continue to point to the page's high key */
942  hii = PageGetItemId(opage, P_HIKEY);
943  oitup = (IndexTuple) PageGetItem(opage, hii);
944  }
945 
946  /*
947  * Link the old page into its parent, using its low key. If we don't
948  * have a parent, we have to create one; this adds a new btree level.
949  */
950  if (state->btps_next == NULL)
951  state->btps_next = _bt_pagestate(wstate, state->btps_level + 1);
952 
953  Assert((BTreeTupleGetNAtts(state->btps_lowkey, wstate->index) <=
955  BTreeTupleGetNAtts(state->btps_lowkey, wstate->index) > 0) ||
956  P_LEFTMOST(BTPageGetOpaque(opage)));
957  Assert(BTreeTupleGetNAtts(state->btps_lowkey, wstate->index) == 0 ||
958  !P_LEFTMOST(BTPageGetOpaque(opage)));
959  BTreeTupleSetDownLink(state->btps_lowkey, oblkno);
960  _bt_buildadd(wstate, state->btps_next, state->btps_lowkey, 0);
961  pfree(state->btps_lowkey);
962 
963  /*
964  * Save a copy of the high key from the old page. It is also the low
965  * key for the new page.
966  */
967  state->btps_lowkey = CopyIndexTuple(oitup);
968 
969  /*
970  * Set the sibling links for both pages.
971  */
972  {
973  BTPageOpaque oopaque = BTPageGetOpaque(opage);
974  BTPageOpaque nopaque = BTPageGetOpaque(npage);
975 
976  oopaque->btpo_next = nblkno;
977  nopaque->btpo_prev = oblkno;
978  nopaque->btpo_next = P_NONE; /* redundant */
979  }
980 
981  /*
982  * Write out the old page. _bt_blwritepage takes ownership of the
983  * 'opage' buffer.
984  */
985  _bt_blwritepage(wstate, obuf, oblkno);
986 
987  /*
988  * Reset last_off to point to new page
989  */
990  last_off = P_FIRSTKEY;
991  }
992 
993  /*
994  * By here, either original page is still the current page, or a new page
995  * was created that became the current page. Either way, the current page
996  * definitely has space for new item.
997  *
998  * If the new item is the first for its page, it must also be the first
999  * item on its entire level. On later same-level pages, a low key for a
1000  * page will be copied from the prior page in the code above. Generate a
1001  * minus infinity low key here instead.
1002  */
1003  if (last_off == P_HIKEY)
1004  {
1005  Assert(state->btps_lowkey == NULL);
1006  state->btps_lowkey = palloc0(sizeof(IndexTupleData));
1007  state->btps_lowkey->t_info = sizeof(IndexTupleData);
1008  BTreeTupleSetNAtts(state->btps_lowkey, 0, false);
1009  }
1010 
1011  /*
1012  * Add the new item into the current page.
1013  */
1014  last_off = OffsetNumberNext(last_off);
1015  _bt_sortaddtup(npage, itupsz, itup, last_off,
1016  !isleaf && last_off == P_FIRSTKEY);
1017 
1018  state->btps_buf = nbuf;
1019  state->btps_blkno = nblkno;
1020  state->btps_lastoff = last_off;
1021 }
uint32 BlockNumber
Definition: block.h:31
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, Item newtup, Size newsize)
Definition: bufpage.c:1394
Size PageGetFreeSpace(Page page)
Definition: bufpage.c:896
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:354
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
#define MAXALIGN(LEN)
Definition: c.h:816
#define unlikely(x)
Definition: c.h:326
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:547
Pointer Item
Definition: item.h:17
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
struct IndexTupleData IndexTupleData
void pfree(void *pointer)
Definition: mcxt.c:1521
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define P_HIKEY
Definition: nbtree.h:367
#define P_LEFTMOST(opaque)
Definition: nbtree.h:218
static void BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
Definition: nbtree.h:562
#define BTMaxItemSize(page)
Definition: nbtree.h:164
#define P_FIRSTKEY
Definition: nbtree.h:368
static void BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
Definition: nbtree.h:595
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:577
static void _bt_blwritepage(BTWriteState *wstate, BulkWriteBuffer buf, BlockNumber blkno)
Definition: nbtsort.c:637
static void _bt_sortaddtup(Page page, Size itemsize, IndexTuple itup, OffsetNumber itup_off, bool newfirstdataitem)
Definition: nbtsort.c:716
static BTPageState * _bt_pagestate(BTWriteState *wstate, uint32 level)
Definition: nbtsort.c:648
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, Size truncextra)
Definition: nbtsort.c:786
static BulkWriteBuffer _bt_blnewpage(BTWriteState *wstate, uint32 level)
Definition: nbtsort.c:608
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:5084
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:4658
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:524
Relation heap
Definition: nbtsort.c:248
Relation index
Definition: nbtsort.c:249
BlockNumber btws_pages_alloced
Definition: nbtsort.c:252
BTScanInsert inskey
Definition: nbtsort.c:251

References _bt_blnewpage(), _bt_blwritepage(), _bt_check_third_page(), _bt_pagestate(), _bt_sortaddtup(), _bt_truncate(), Assert, BTMaxItemSize, BTPageGetOpaque, BTPageOpaqueData::btpo_next, BTPageOpaqueData::btpo_prev, BTreeTupleGetNAtts, BTreeTupleSetDownLink(), BTreeTupleSetNAtts(), BTWriteState::btws_pages_alloced, CHECK_FOR_INTERRUPTS, CopyIndexTuple(), elog, ERROR, BTWriteState::heap, BTWriteState::index, IndexRelationGetNumberOfKeyAttributes, IndexTupleSize, BTWriteState::inskey, ItemIdGetLength, ItemIdSetUnused, MAXALIGN, OffsetNumberNext, OffsetNumberPrev, P_FIRSTKEY, P_HIKEY, P_LEFTMOST, P_NONE, PageGetFreeSpace(), PageGetItem(), PageGetItemId(), PageIndexTupleOverwrite(), palloc0(), pfree(), and unlikely.

Referenced by _bt_load(), _bt_sort_dedup_finish_pending(), and _bt_uppershutdown().

◆ _bt_end_parallel()

static void _bt_end_parallel ( BTLeader btleader)
static

Definition at line 1610 of file nbtsort.c.

1611 {
1612  int i;
1613 
1614  /* Shutdown worker processes */
1616 
1617  /*
1618  * Next, accumulate WAL usage. (This must wait for the workers to finish,
1619  * or we might get incomplete data.)
1620  */
1621  for (i = 0; i < btleader->pcxt->nworkers_launched; i++)
1622  InstrAccumParallelQuery(&btleader->bufferusage[i], &btleader->walusage[i]);
1623 
1624  /* Free last reference to MVCC snapshot, if one was used */
1625  if (IsMVCCSnapshot(btleader->snapshot))
1626  UnregisterSnapshot(btleader->snapshot);
1627  DestroyParallelContext(btleader->pcxt);
1628  ExitParallelMode();
1629 }
void WaitForParallelWorkersToFinish(ParallelContext *pcxt)
Definition: parallel.c:792
void InstrAccumParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition: instrument.c:218
int i
Definition: isn.c:72

References BTLeader::bufferusage, DestroyParallelContext(), ExitParallelMode(), i, InstrAccumParallelQuery(), IsMVCCSnapshot, ParallelContext::nworkers_launched, BTLeader::pcxt, BTLeader::snapshot, UnregisterSnapshot(), WaitForParallelWorkersToFinish(), and BTLeader::walusage.

Referenced by _bt_begin_parallel(), and btbuild().

◆ _bt_leader_participate_as_worker()

static void _bt_leader_participate_as_worker ( BTBuildState buildstate)
static

Definition at line 1690 of file nbtsort.c.

1691 {
1692  BTLeader *btleader = buildstate->btleader;
1693  BTSpool *leaderworker;
1694  BTSpool *leaderworker2;
1695  int sortmem;
1696 
1697  /* Allocate memory and initialize private spool */
1698  leaderworker = (BTSpool *) palloc0(sizeof(BTSpool));
1699  leaderworker->heap = buildstate->spool->heap;
1700  leaderworker->index = buildstate->spool->index;
1701  leaderworker->isunique = buildstate->spool->isunique;
1702  leaderworker->nulls_not_distinct = buildstate->spool->nulls_not_distinct;
1703 
1704  /* Initialize second spool, if required */
1705  if (!btleader->btshared->isunique)
1706  leaderworker2 = NULL;
1707  else
1708  {
1709  /* Allocate memory for worker's own private secondary spool */
1710  leaderworker2 = (BTSpool *) palloc0(sizeof(BTSpool));
1711 
1712  /* Initialize worker's own secondary spool */
1713  leaderworker2->heap = leaderworker->heap;
1714  leaderworker2->index = leaderworker->index;
1715  leaderworker2->isunique = false;
1716  }
1717 
1718  /*
1719  * Might as well use reliable figure when doling out maintenance_work_mem
1720  * (when requested number of workers were not launched, this will be
1721  * somewhat higher than it is for other workers).
1722  */
1723  sortmem = maintenance_work_mem / btleader->nparticipanttuplesorts;
1724 
1725  /* Perform work common to all participants */
1726  _bt_parallel_scan_and_sort(leaderworker, leaderworker2, btleader->btshared,
1727  btleader->sharedsort, btleader->sharedsort2,
1728  sortmem, true);
1729 
1730 #ifdef BTREE_BUILD_STATS
1732  {
1733  ShowUsage("BTREE BUILD (Leader Partial Spool) STATISTICS");
1734  ResetUsage();
1735  }
1736 #endif /* BTREE_BUILD_STATS */
1737 }
int maintenance_work_mem
Definition: globals.c:132
bool log_btree_build_stats
Definition: guc_tables.c:507
static void _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2, BTShared *btshared, Sharedsort *sharedsort, Sharedsort *sharedsort2, int sortmem, bool progress)
Definition: nbtsort.c:1868
void ShowUsage(const char *title)
Definition: postgres.c:5118
void ResetUsage(void)
Definition: postgres.c:5111

References _bt_parallel_scan_and_sort(), BTBuildState::btleader, BTLeader::btshared, BTSpool::heap, BTSpool::index, BTSpool::isunique, BTShared::isunique, log_btree_build_stats, maintenance_work_mem, BTLeader::nparticipanttuplesorts, BTSpool::nulls_not_distinct, palloc0(), ResetUsage(), BTLeader::sharedsort, BTLeader::sharedsort2, ShowUsage(), and BTBuildState::spool.

Referenced by _bt_begin_parallel().

◆ _bt_leafbuild()

static void _bt_leafbuild ( BTSpool btspool,
BTSpool btspool2 
)
static

Definition at line 538 of file nbtsort.c.

539 {
540  BTWriteState wstate;
541 
542 #ifdef BTREE_BUILD_STATS
544  {
545  ShowUsage("BTREE BUILD (Spool) STATISTICS");
546  ResetUsage();
547  }
548 #endif /* BTREE_BUILD_STATS */
549 
550  /* Execute the sort */
554  if (btspool2)
555  {
558  tuplesort_performsort(btspool2->sortstate);
559  }
560 
561  wstate.heap = btspool->heap;
562  wstate.index = btspool->index;
563  wstate.inskey = _bt_mkscankey(wstate.index, NULL);
564  /* _bt_mkscankey() won't set allequalimage without metapage */
565  wstate.inskey->allequalimage = _bt_allequalimage(wstate.index, true);
566 
567  /* reserve the metapage */
568  wstate.btws_pages_alloced = BTREE_METAPAGE + 1;
569 
572  _bt_load(&wstate, btspool, btspool2);
573 }
void pgstat_progress_update_param(int index, int64 val)
#define PROGRESS_BTREE_PHASE_PERFORMSORT_2
Definition: nbtree.h:1149
#define PROGRESS_BTREE_PHASE_LEAF_LOAD
Definition: nbtree.h:1150
#define PROGRESS_BTREE_PHASE_PERFORMSORT_1
Definition: nbtree.h:1148
#define BTREE_METAPAGE
Definition: nbtree.h:148
static void _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
Definition: nbtsort.c:1137
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:129
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:5142
#define PROGRESS_CREATEIDX_SUBPHASE
Definition: progress.h:86
bool allequalimage
Definition: nbtree.h:787
Tuplesortstate * sortstate
Definition: nbtsort.c:81
void tuplesort_performsort(Tuplesortstate *state)
Definition: tuplesort.c:1363

References _bt_allequalimage(), _bt_load(), _bt_mkscankey(), BTScanInsertData::allequalimage, BTREE_METAPAGE, BTWriteState::btws_pages_alloced, BTSpool::heap, BTWriteState::heap, BTSpool::index, BTWriteState::index, BTWriteState::inskey, log_btree_build_stats, pgstat_progress_update_param(), PROGRESS_BTREE_PHASE_LEAF_LOAD, PROGRESS_BTREE_PHASE_PERFORMSORT_1, PROGRESS_BTREE_PHASE_PERFORMSORT_2, PROGRESS_CREATEIDX_SUBPHASE, ResetUsage(), ShowUsage(), BTSpool::sortstate, and tuplesort_performsort().

Referenced by btbuild().

◆ _bt_load()

static void _bt_load ( BTWriteState wstate,
BTSpool btspool,
BTSpool btspool2 
)
static

Definition at line 1137 of file nbtsort.c.

1138 {
1139  BTPageState *state = NULL;
1140  bool merge = (btspool2 != NULL);
1141  IndexTuple itup,
1142  itup2 = NULL;
1143  bool load1;
1144  TupleDesc tupdes = RelationGetDescr(wstate->index);
1145  int i,
1147  SortSupport sortKeys;
1148  int64 tuples_done = 0;
1149  bool deduplicate;
1150 
1151  wstate->bulkstate = smgr_bulk_start_rel(wstate->index, MAIN_FORKNUM);
1152 
1153  deduplicate = wstate->inskey->allequalimage && !btspool->isunique &&
1154  BTGetDeduplicateItems(wstate->index);
1155 
1156  if (merge)
1157  {
1158  /*
1159  * Another BTSpool for dead tuples exists. Now we have to merge
1160  * btspool and btspool2.
1161  */
1162 
1163  /* the preparation of merge */
1164  itup = tuplesort_getindextuple(btspool->sortstate, true);
1165  itup2 = tuplesort_getindextuple(btspool2->sortstate, true);
1166 
1167  /* Prepare SortSupport data for each column */
1168  sortKeys = (SortSupport) palloc0(keysz * sizeof(SortSupportData));
1169 
1170  for (i = 0; i < keysz; i++)
1171  {
1172  SortSupport sortKey = sortKeys + i;
1173  ScanKey scanKey = wstate->inskey->scankeys + i;
1174  int16 strategy;
1175 
1176  sortKey->ssup_cxt = CurrentMemoryContext;
1177  sortKey->ssup_collation = scanKey->sk_collation;
1178  sortKey->ssup_nulls_first =
1179  (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
1180  sortKey->ssup_attno = scanKey->sk_attno;
1181  /* Abbreviation is not supported here */
1182  sortKey->abbreviate = false;
1183 
1184  Assert(sortKey->ssup_attno != 0);
1185 
1186  strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
1188 
1189  PrepareSortSupportFromIndexRel(wstate->index, strategy, sortKey);
1190  }
1191 
1192  for (;;)
1193  {
1194  load1 = true; /* load BTSpool next ? */
1195  if (itup2 == NULL)
1196  {
1197  if (itup == NULL)
1198  break;
1199  }
1200  else if (itup != NULL)
1201  {
1202  int32 compare = 0;
1203 
1204  for (i = 1; i <= keysz; i++)
1205  {
1206  SortSupport entry;
1207  Datum attrDatum1,
1208  attrDatum2;
1209  bool isNull1,
1210  isNull2;
1211 
1212  entry = sortKeys + i - 1;
1213  attrDatum1 = index_getattr(itup, i, tupdes, &isNull1);
1214  attrDatum2 = index_getattr(itup2, i, tupdes, &isNull2);
1215 
1216  compare = ApplySortComparator(attrDatum1, isNull1,
1217  attrDatum2, isNull2,
1218  entry);
1219  if (compare > 0)
1220  {
1221  load1 = false;
1222  break;
1223  }
1224  else if (compare < 0)
1225  break;
1226  }
1227 
1228  /*
1229  * If key values are equal, we sort on ItemPointer. This is
1230  * required for btree indexes, since heap TID is treated as an
1231  * implicit last key attribute in order to ensure that all
1232  * keys in the index are physically unique.
1233  */
1234  if (compare == 0)
1235  {
1236  compare = ItemPointerCompare(&itup->t_tid, &itup2->t_tid);
1237  Assert(compare != 0);
1238  if (compare > 0)
1239  load1 = false;
1240  }
1241  }
1242  else
1243  load1 = false;
1244 
1245  /* When we see first tuple, create first index page */
1246  if (state == NULL)
1247  state = _bt_pagestate(wstate, 0);
1248 
1249  if (load1)
1250  {
1251  _bt_buildadd(wstate, state, itup, 0);
1252  itup = tuplesort_getindextuple(btspool->sortstate, true);
1253  }
1254  else
1255  {
1256  _bt_buildadd(wstate, state, itup2, 0);
1257  itup2 = tuplesort_getindextuple(btspool2->sortstate, true);
1258  }
1259 
1260  /* Report progress */
1262  ++tuples_done);
1263  }
1264  pfree(sortKeys);
1265  }
1266  else if (deduplicate)
1267  {
1268  /* merge is unnecessary, deduplicate into posting lists */
1269  BTDedupState dstate;
1270 
1271  dstate = (BTDedupState) palloc(sizeof(BTDedupStateData));
1272  dstate->deduplicate = true; /* unused */
1273  dstate->nmaxitems = 0; /* unused */
1274  dstate->maxpostingsize = 0; /* set later */
1275  /* Metadata about base tuple of current pending posting list */
1276  dstate->base = NULL;
1277  dstate->baseoff = InvalidOffsetNumber; /* unused */
1278  dstate->basetupsize = 0;
1279  /* Metadata about current pending posting list TIDs */
1280  dstate->htids = NULL;
1281  dstate->nhtids = 0;
1282  dstate->nitems = 0;
1283  dstate->phystupsize = 0; /* unused */
1284  dstate->nintervals = 0; /* unused */
1285 
1286  while ((itup = tuplesort_getindextuple(btspool->sortstate,
1287  true)) != NULL)
1288  {
1289  /* When we see first tuple, create first index page */
1290  if (state == NULL)
1291  {
1292  state = _bt_pagestate(wstate, 0);
1293 
1294  /*
1295  * Limit size of posting list tuples to 1/10 space we want to
1296  * leave behind on the page, plus space for final item's line
1297  * pointer. This is equal to the space that we'd like to
1298  * leave behind on each leaf page when fillfactor is 90,
1299  * allowing us to get close to fillfactor% space utilization
1300  * when there happen to be a great many duplicates. (This
1301  * makes higher leaf fillfactor settings ineffective when
1302  * building indexes that have many duplicates, but packing
1303  * leaf pages full with few very large tuples doesn't seem
1304  * like a useful goal.)
1305  */
1306  dstate->maxpostingsize = MAXALIGN_DOWN((BLCKSZ * 10 / 100)) -
1307  sizeof(ItemIdData);
1308  Assert(dstate->maxpostingsize <= BTMaxItemSize((Page) state->btps_buf) &&
1309  dstate->maxpostingsize <= INDEX_SIZE_MASK);
1310  dstate->htids = palloc(dstate->maxpostingsize);
1311 
1312  /* start new pending posting list with itup copy */
1315  }
1316  else if (_bt_keep_natts_fast(wstate->index, dstate->base,
1317  itup) > keysz &&
1318  _bt_dedup_save_htid(dstate, itup))
1319  {
1320  /*
1321  * Tuple is equal to base tuple of pending posting list. Heap
1322  * TID from itup has been saved in state.
1323  */
1324  }
1325  else
1326  {
1327  /*
1328  * Tuple is not equal to pending posting list tuple, or
1329  * _bt_dedup_save_htid() opted to not merge current item into
1330  * pending posting list.
1331  */
1332  _bt_sort_dedup_finish_pending(wstate, state, dstate);
1333  pfree(dstate->base);
1334 
1335  /* start new pending posting list with itup copy */
1338  }
1339 
1340  /* Report progress */
1342  ++tuples_done);
1343  }
1344 
1345  if (state)
1346  {
1347  /*
1348  * Handle the last item (there must be a last item when the
1349  * tuplesort returned one or more tuples)
1350  */
1351  _bt_sort_dedup_finish_pending(wstate, state, dstate);
1352  pfree(dstate->base);
1353  pfree(dstate->htids);
1354  }
1355 
1356  pfree(dstate);
1357  }
1358  else
1359  {
1360  /* merging and deduplication are both unnecessary */
1361  while ((itup = tuplesort_getindextuple(btspool->sortstate,
1362  true)) != NULL)
1363  {
1364  /* When we see first tuple, create first index page */
1365  if (state == NULL)
1366  state = _bt_pagestate(wstate, 0);
1367 
1368  _bt_buildadd(wstate, state, itup, 0);
1369 
1370  /* Report progress */
1372  ++tuples_done);
1373  }
1374  }
1375 
1376  /* Close down final pages and write the metapage */
1377  _bt_uppershutdown(wstate, state);
1378  smgr_bulk_finish(wstate->bulkstate);
1379 }
void smgr_bulk_finish(BulkWriteState *bulkstate)
Definition: bulk_write.c:130
BulkWriteState * smgr_bulk_start_rel(Relation rel, ForkNumber forknum)
Definition: bulk_write.c:87
#define MAXALIGN_DOWN(LEN)
Definition: c.h:828
signed short int16
Definition: c.h:507
signed int int32
Definition: c.h:508
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:51
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:117
#define INDEX_SIZE_MASK
Definition: itup.h:65
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void * palloc(Size size)
Definition: mcxt.c:1317
bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup)
Definition: nbtdedup.c:484
void _bt_dedup_start_pending(BTDedupState state, IndexTuple base, OffsetNumber baseoff)
Definition: nbtdedup.c:433
#define BTGetDeduplicateItems(relation)
Definition: nbtree.h:1136
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1118
#define SK_BT_DESC
Definition: nbtree.h:1117
BTDedupStateData * BTDedupState
Definition: nbtree.h:893
static void _bt_sort_dedup_finish_pending(BTWriteState *wstate, BTPageState *state, BTDedupState dstate)
Definition: nbtsort.c:1031
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
Definition: nbtsort.c:1065
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:4877
#define InvalidOffsetNumber
Definition: off.h:26
static pairingheap_node * merge(pairingheap *heap, pairingheap_node *a, pairingheap_node *b)
Definition: pairingheap.c:79
uintptr_t Datum
Definition: postgres.h:64
#define PROGRESS_CREATEIDX_TUPLES_DONE
Definition: progress.h:88
#define RelationGetDescr(relation)
Definition: rel.h:531
@ MAIN_FORKNUM
Definition: relpath.h:58
void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, SortSupport ssup)
Definition: sortsupport.c:161
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define BTLessStrategyNumber
Definition: stratnum.h:29
Size maxpostingsize
Definition: nbtree.h:870
ItemPointer htids
Definition: nbtree.h:878
bool deduplicate
Definition: nbtree.h:868
OffsetNumber baseoff
Definition: nbtree.h:874
Size basetupsize
Definition: nbtree.h:875
IndexTuple base
Definition: nbtree.h:873
Size phystupsize
Definition: nbtree.h:881
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:793
ItemPointerData t_tid
Definition: itup.h:37
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66
IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward)

References _bt_buildadd(), _bt_dedup_save_htid(), _bt_dedup_start_pending(), _bt_keep_natts_fast(), _bt_pagestate(), _bt_sort_dedup_finish_pending(), _bt_uppershutdown(), SortSupportData::abbreviate, BTScanInsertData::allequalimage, ApplySortComparator(), Assert, BTDedupStateData::base, BTDedupStateData::baseoff, BTDedupStateData::basetupsize, BTGetDeduplicateItems, BTGreaterStrategyNumber, BTLessStrategyNumber, BTMaxItemSize, BTWriteState::bulkstate, compare(), CopyIndexTuple(), CurrentMemoryContext, BTDedupStateData::deduplicate, BTDedupStateData::htids, i, BTWriteState::index, index_getattr(), INDEX_SIZE_MASK, IndexRelationGetNumberOfKeyAttributes, BTWriteState::inskey, InvalidOffsetNumber, BTSpool::isunique, ItemPointerCompare(), MAIN_FORKNUM, MAXALIGN_DOWN, BTDedupStateData::maxpostingsize, merge(), BTDedupStateData::nhtids, BTDedupStateData::nintervals, BTDedupStateData::nitems, BTDedupStateData::nmaxitems, palloc(), palloc0(), pfree(), pgstat_progress_update_param(), BTDedupStateData::phystupsize, PrepareSortSupportFromIndexRel(), PROGRESS_CREATEIDX_TUPLES_DONE, RelationGetDescr, BTScanInsertData::scankeys, ScanKeyData::sk_attno, SK_BT_DESC, SK_BT_NULLS_FIRST, ScanKeyData::sk_collation, ScanKeyData::sk_flags, smgr_bulk_finish(), smgr_bulk_start_rel(), BTSpool::sortstate, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, IndexTupleData::t_tid, and tuplesort_getindextuple().

Referenced by _bt_leafbuild().

◆ _bt_pagestate()

static BTPageState * _bt_pagestate ( BTWriteState wstate,
uint32  level 
)
static

Definition at line 648 of file nbtsort.c.

649 {
651 
652  /* create initial page for level */
653  state->btps_buf = _bt_blnewpage(wstate, level);
654 
655  /* and assign it a page position */
656  state->btps_blkno = wstate->btws_pages_alloced++;
657 
658  state->btps_lowkey = NULL;
659  /* initialize lastoff so first item goes into P_FIRSTKEY */
660  state->btps_lastoff = P_HIKEY;
661  state->btps_lastextra = 0;
662  state->btps_level = level;
663  /* set "full" threshold based on level. See notes at head of file. */
664  if (level > 0)
665  state->btps_full = (BLCKSZ * (100 - BTREE_NONLEAF_FILLFACTOR) / 100);
666  else
667  state->btps_full = BTGetTargetPageFreeSpace(wstate->index);
668 
669  /* no parent level, yet */
670  state->btps_next = NULL;
671 
672  return state;
673 }
#define BTGetTargetPageFreeSpace(relation)
Definition: nbtree.h:1134
#define BTREE_NONLEAF_FILLFACTOR
Definition: nbtree.h:201

References _bt_blnewpage(), BTGetTargetPageFreeSpace, BTREE_NONLEAF_FILLFACTOR, BTWriteState::btws_pages_alloced, BTWriteState::index, P_HIKEY, and palloc0().

Referenced by _bt_buildadd(), and _bt_load().

◆ _bt_parallel_build_main()

void _bt_parallel_build_main ( dsm_segment seg,
shm_toc toc 
)

Definition at line 1743 of file nbtsort.c.

1744 {
1745  char *sharedquery;
1746  BTSpool *btspool;
1747  BTSpool *btspool2;
1748  BTShared *btshared;
1749  Sharedsort *sharedsort;
1750  Sharedsort *sharedsort2;
1751  Relation heapRel;
1752  Relation indexRel;
1753  LOCKMODE heapLockmode;
1754  LOCKMODE indexLockmode;
1755  WalUsage *walusage;
1756  BufferUsage *bufferusage;
1757  int sortmem;
1758 
1759 #ifdef BTREE_BUILD_STATS
1761  ResetUsage();
1762 #endif /* BTREE_BUILD_STATS */
1763 
1764  /*
1765  * The only possible status flag that can be set to the parallel worker is
1766  * PROC_IN_SAFE_IC.
1767  */
1768  Assert((MyProc->statusFlags == 0) ||
1770 
1771  /* Set debug_query_string for individual workers first */
1772  sharedquery = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, true);
1773  debug_query_string = sharedquery;
1774 
1775  /* Report the query string from leader */
1777 
1778  /* Look up nbtree shared state */
1779  btshared = shm_toc_lookup(toc, PARALLEL_KEY_BTREE_SHARED, false);
1780 
1781  /* Open relations using lock modes known to be obtained by index.c */
1782  if (!btshared->isconcurrent)
1783  {
1784  heapLockmode = ShareLock;
1785  indexLockmode = AccessExclusiveLock;
1786  }
1787  else
1788  {
1789  heapLockmode = ShareUpdateExclusiveLock;
1790  indexLockmode = RowExclusiveLock;
1791  }
1792 
1793  /* Track query ID */
1794  pgstat_report_query_id(btshared->queryid, false);
1795 
1796  /* Open relations within worker */
1797  heapRel = table_open(btshared->heaprelid, heapLockmode);
1798  indexRel = index_open(btshared->indexrelid, indexLockmode);
1799 
1800  /* Initialize worker's own spool */
1801  btspool = (BTSpool *) palloc0(sizeof(BTSpool));
1802  btspool->heap = heapRel;
1803  btspool->index = indexRel;
1804  btspool->isunique = btshared->isunique;
1805  btspool->nulls_not_distinct = btshared->nulls_not_distinct;
1806 
1807  /* Look up shared state private to tuplesort.c */
1808  sharedsort = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT, false);
1809  tuplesort_attach_shared(sharedsort, seg);
1810  if (!btshared->isunique)
1811  {
1812  btspool2 = NULL;
1813  sharedsort2 = NULL;
1814  }
1815  else
1816  {
1817  /* Allocate memory for worker's own private secondary spool */
1818  btspool2 = (BTSpool *) palloc0(sizeof(BTSpool));
1819 
1820  /* Initialize worker's own secondary spool */
1821  btspool2->heap = btspool->heap;
1822  btspool2->index = btspool->index;
1823  btspool2->isunique = false;
1824  /* Look up shared state private to tuplesort.c */
1825  sharedsort2 = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT_SPOOL2, false);
1826  tuplesort_attach_shared(sharedsort2, seg);
1827  }
1828 
1829  /* Prepare to track buffer usage during parallel execution */
1831 
1832  /* Perform sorting of spool, and possibly a spool2 */
1833  sortmem = maintenance_work_mem / btshared->scantuplesortstates;
1834  _bt_parallel_scan_and_sort(btspool, btspool2, btshared, sharedsort,
1835  sharedsort2, sortmem, false);
1836 
1837  /* Report WAL/buffer usage during parallel execution */
1838  bufferusage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
1839  walusage = shm_toc_lookup(toc, PARALLEL_KEY_WAL_USAGE, false);
1841  &walusage[ParallelWorkerNumber]);
1842 
1843 #ifdef BTREE_BUILD_STATS
1845  {
1846  ShowUsage("BTREE BUILD (Worker Partial Spool) STATISTICS");
1847  ResetUsage();
1848  }
1849 #endif /* BTREE_BUILD_STATS */
1850 
1851  index_close(indexRel, indexLockmode);
1852  table_close(heapRel, heapLockmode);
1853 }
int ParallelWorkerNumber
Definition: parallel.c:114
void pgstat_report_query_id(uint64 query_id, bool force)
void pgstat_report_activity(BackendState state, const char *cmd_str)
@ STATE_RUNNING
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
void InstrEndParallelQuery(BufferUsage *bufusage, WalUsage *walusage)
Definition: instrument.c:208
void InstrStartParallelQuery(void)
Definition: instrument.c:200
int LOCKMODE
Definition: lockdefs.h:26
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
#define ShareLock
Definition: lockdefs.h:40
#define RowExclusiveLock
Definition: lockdefs.h:38
#define PROC_IN_SAFE_IC
Definition: proc.h:59
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
PGPROC * MyProc
Definition: proc.c:66
uint8 statusFlags
Definition: proc.h:242
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg)
Definition: tuplesort.c:2961

References _bt_parallel_scan_and_sort(), AccessExclusiveLock, Assert, debug_query_string, BTSpool::heap, BTShared::heaprelid, BTSpool::index, index_close(), index_open(), BTShared::indexrelid, InstrEndParallelQuery(), InstrStartParallelQuery(), BTShared::isconcurrent, BTSpool::isunique, BTShared::isunique, log_btree_build_stats, maintenance_work_mem, MyProc, BTSpool::nulls_not_distinct, BTShared::nulls_not_distinct, palloc0(), PARALLEL_KEY_BTREE_SHARED, PARALLEL_KEY_BUFFER_USAGE, PARALLEL_KEY_QUERY_TEXT, PARALLEL_KEY_TUPLESORT, PARALLEL_KEY_TUPLESORT_SPOOL2, PARALLEL_KEY_WAL_USAGE, ParallelWorkerNumber, pgstat_report_activity(), pgstat_report_query_id(), PROC_IN_SAFE_IC, BTShared::queryid, ResetUsage(), RowExclusiveLock, BTShared::scantuplesortstates, ShareLock, ShareUpdateExclusiveLock, shm_toc_lookup(), ShowUsage(), STATE_RUNNING, PGPROC::statusFlags, table_close(), table_open(), and tuplesort_attach_shared().

◆ _bt_parallel_estimate_shared()

static Size _bt_parallel_estimate_shared ( Relation  heap,
Snapshot  snapshot 
)
static

Definition at line 1636 of file nbtsort.c.

1637 {
1638  /* c.f. shm_toc_allocate as to why BUFFERALIGN is used */
1639  return add_size(BUFFERALIGN(sizeof(BTShared)),
1640  table_parallelscan_estimate(heap, snapshot));
1641 }
#define BUFFERALIGN(LEN)
Definition: c.h:818
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size table_parallelscan_estimate(Relation rel, Snapshot snapshot)
Definition: tableam.c:130

References add_size(), BUFFERALIGN, and table_parallelscan_estimate().

Referenced by _bt_begin_parallel().

◆ _bt_parallel_heapscan()

static double _bt_parallel_heapscan ( BTBuildState buildstate,
bool brokenhotchain 
)
static

Definition at line 1656 of file nbtsort.c.

1657 {
1658  BTShared *btshared = buildstate->btleader->btshared;
1659  int nparticipanttuplesorts;
1660  double reltuples;
1661 
1662  nparticipanttuplesorts = buildstate->btleader->nparticipanttuplesorts;
1663  for (;;)
1664  {
1665  SpinLockAcquire(&btshared->mutex);
1666  if (btshared->nparticipantsdone == nparticipanttuplesorts)
1667  {
1668  buildstate->havedead = btshared->havedead;
1669  buildstate->indtuples = btshared->indtuples;
1670  *brokenhotchain = btshared->brokenhotchain;
1671  reltuples = btshared->reltuples;
1672  SpinLockRelease(&btshared->mutex);
1673  break;
1674  }
1675  SpinLockRelease(&btshared->mutex);
1676 
1678  WAIT_EVENT_PARALLEL_CREATE_INDEX_SCAN);
1679  }
1680 
1682 
1683  return reltuples;
1684 }
bool ConditionVariableCancelSleep(void)
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59

References BTShared::brokenhotchain, BTBuildState::btleader, BTLeader::btshared, ConditionVariableCancelSleep(), ConditionVariableSleep(), BTShared::havedead, BTBuildState::havedead, BTShared::indtuples, BTBuildState::indtuples, BTShared::mutex, BTShared::nparticipantsdone, BTLeader::nparticipanttuplesorts, BTShared::reltuples, SpinLockAcquire, SpinLockRelease, and BTShared::workersdonecv.

Referenced by _bt_spools_heapscan().

◆ _bt_parallel_scan_and_sort()

static void _bt_parallel_scan_and_sort ( BTSpool btspool,
BTSpool btspool2,
BTShared btshared,
Sharedsort sharedsort,
Sharedsort sharedsort2,
int  sortmem,
bool  progress 
)
static

Definition at line 1868 of file nbtsort.c.

1871 {
1872  SortCoordinate coordinate;
1873  BTBuildState buildstate;
1874  TableScanDesc scan;
1875  double reltuples;
1876  IndexInfo *indexInfo;
1877 
1878  /* Initialize local tuplesort coordination state */
1879  coordinate = palloc0(sizeof(SortCoordinateData));
1880  coordinate->isWorker = true;
1881  coordinate->nParticipants = -1;
1882  coordinate->sharedsort = sharedsort;
1883 
1884  /* Begin "partial" tuplesort */
1885  btspool->sortstate = tuplesort_begin_index_btree(btspool->heap,
1886  btspool->index,
1887  btspool->isunique,
1888  btspool->nulls_not_distinct,
1889  sortmem, coordinate,
1890  TUPLESORT_NONE);
1891 
1892  /*
1893  * Just as with serial case, there may be a second spool. If so, a
1894  * second, dedicated spool2 partial tuplesort is required.
1895  */
1896  if (btspool2)
1897  {
1898  SortCoordinate coordinate2;
1899 
1900  /*
1901  * We expect that the second one (for dead tuples) won't get very
1902  * full, so we give it only work_mem (unless sortmem is less for
1903  * worker). Worker processes are generally permitted to allocate
1904  * work_mem independently.
1905  */
1906  coordinate2 = palloc0(sizeof(SortCoordinateData));
1907  coordinate2->isWorker = true;
1908  coordinate2->nParticipants = -1;
1909  coordinate2->sharedsort = sharedsort2;
1910  btspool2->sortstate =
1911  tuplesort_begin_index_btree(btspool->heap, btspool->index, false, false,
1912  Min(sortmem, work_mem), coordinate2,
1913  false);
1914  }
1915 
1916  /* Fill in buildstate for _bt_build_callback() */
1917  buildstate.isunique = btshared->isunique;
1918  buildstate.nulls_not_distinct = btshared->nulls_not_distinct;
1919  buildstate.havedead = false;
1920  buildstate.heap = btspool->heap;
1921  buildstate.spool = btspool;
1922  buildstate.spool2 = btspool2;
1923  buildstate.indtuples = 0;
1924  buildstate.btleader = NULL;
1925 
1926  /* Join parallel scan */
1927  indexInfo = BuildIndexInfo(btspool->index);
1928  indexInfo->ii_Concurrent = btshared->isconcurrent;
1929  scan = table_beginscan_parallel(btspool->heap,
1930  ParallelTableScanFromBTShared(btshared));
1931  reltuples = table_index_build_scan(btspool->heap, btspool->index, indexInfo,
1933  (void *) &buildstate, scan);
1934 
1935  /* Execute this worker's part of the sort */
1936  if (progress)
1939  tuplesort_performsort(btspool->sortstate);
1940  if (btspool2)
1941  {
1942  if (progress)
1945  tuplesort_performsort(btspool2->sortstate);
1946  }
1947 
1948  /*
1949  * Done. Record ambuild statistics, and whether we encountered a broken
1950  * HOT chain.
1951  */
1952  SpinLockAcquire(&btshared->mutex);
1953  btshared->nparticipantsdone++;
1954  btshared->reltuples += reltuples;
1955  if (buildstate.havedead)
1956  btshared->havedead = true;
1957  btshared->indtuples += buildstate.indtuples;
1958  if (indexInfo->ii_BrokenHotChain)
1959  btshared->brokenhotchain = true;
1960  SpinLockRelease(&btshared->mutex);
1961 
1962  /* Notify leader */
1964 
1965  /* We can end tuplesorts immediately */
1966  tuplesort_end(btspool->sortstate);
1967  if (btspool2)
1968  tuplesort_end(btspool2->sortstate);
1969 }
#define Min(x, y)
Definition: c.h:1009
void ConditionVariableSignal(ConditionVariable *cv)
int work_mem
Definition: globals.c:130
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2425
static void _bt_build_callback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: nbtsort.c:579
static int progress
Definition: pgbench.c:261
bool isunique
Definition: nbtsort.c:206
bool nulls_not_distinct
Definition: nbtsort.c:207
Relation heap
Definition: nbtsort.c:209
bool ii_BrokenHotChain
Definition: execnodes.h:205
bool ii_Concurrent
Definition: execnodes.h:204
Sharedsort * sharedsort
Definition: tuplesort.h:58
TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
Definition: tableam.c:165
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1784
void tuplesort_end(Tuplesortstate *state)
Definition: tuplesort.c:951
#define TUPLESORT_NONE
Definition: tuplesort.h:93
Tuplesortstate * tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)

References _bt_build_callback(), BTShared::brokenhotchain, BTBuildState::btleader, BuildIndexInfo(), ConditionVariableSignal(), BTShared::havedead, BTBuildState::havedead, BTSpool::heap, BTBuildState::heap, IndexInfo::ii_BrokenHotChain, IndexInfo::ii_Concurrent, BTSpool::index, BTShared::indtuples, BTBuildState::indtuples, BTShared::isconcurrent, BTSpool::isunique, BTShared::isunique, BTBuildState::isunique, SortCoordinateData::isWorker, Min, BTShared::mutex, SortCoordinateData::nParticipants, BTShared::nparticipantsdone, BTSpool::nulls_not_distinct, BTShared::nulls_not_distinct, BTBuildState::nulls_not_distinct, palloc0(), ParallelTableScanFromBTShared, pgstat_progress_update_param(), progress, PROGRESS_BTREE_PHASE_PERFORMSORT_1, PROGRESS_BTREE_PHASE_PERFORMSORT_2, PROGRESS_CREATEIDX_SUBPHASE, BTShared::reltuples, SortCoordinateData::sharedsort, BTSpool::sortstate, SpinLockAcquire, SpinLockRelease, BTBuildState::spool, BTBuildState::spool2, table_beginscan_parallel(), table_index_build_scan(), tuplesort_begin_index_btree(), tuplesort_end(), TUPLESORT_NONE, tuplesort_performsort(), work_mem, and BTShared::workersdonecv.

Referenced by _bt_leader_participate_as_worker(), and _bt_parallel_build_main().

◆ _bt_slideleft()

static void _bt_slideleft ( Page  rightmostpage)
static

Definition at line 685 of file nbtsort.c.

686 {
687  OffsetNumber off;
688  OffsetNumber maxoff;
689  ItemId previi;
690 
691  maxoff = PageGetMaxOffsetNumber(rightmostpage);
692  Assert(maxoff >= P_FIRSTKEY);
693  previi = PageGetItemId(rightmostpage, P_HIKEY);
694  for (off = P_FIRSTKEY; off <= maxoff; off = OffsetNumberNext(off))
695  {
696  ItemId thisii = PageGetItemId(rightmostpage, off);
697 
698  *previi = *thisii;
699  previi = thisii;
700  }
701  ((PageHeader) rightmostpage)->pd_lower -= sizeof(ItemIdData);
702 }
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:372

References Assert, OffsetNumberNext, P_FIRSTKEY, P_HIKEY, PageGetItemId(), and PageGetMaxOffsetNumber().

Referenced by _bt_uppershutdown().

◆ _bt_sort_dedup_finish_pending()

static void _bt_sort_dedup_finish_pending ( BTWriteState wstate,
BTPageState state,
BTDedupState  dstate 
)
static

Definition at line 1031 of file nbtsort.c.

1033 {
1034  Assert(dstate->nitems > 0);
1035 
1036  if (dstate->nitems == 1)
1037  _bt_buildadd(wstate, state, dstate->base, 0);
1038  else
1039  {
1040  IndexTuple postingtuple;
1041  Size truncextra;
1042 
1043  /* form a tuple with a posting list */
1044  postingtuple = _bt_form_posting(dstate->base,
1045  dstate->htids,
1046  dstate->nhtids);
1047  /* Calculate posting list overhead */
1048  truncextra = IndexTupleSize(postingtuple) -
1049  BTreeTupleGetPostingOffset(postingtuple);
1050 
1051  _bt_buildadd(wstate, state, postingtuple, truncextra);
1052  pfree(postingtuple);
1053  }
1054 
1055  dstate->nmaxitems = 0;
1056  dstate->nhtids = 0;
1057  dstate->nitems = 0;
1058  dstate->phystupsize = 0;
1059 }
IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids, int nhtids)
Definition: nbtdedup.c:864
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:529

References _bt_buildadd(), _bt_form_posting(), Assert, BTDedupStateData::base, BTreeTupleGetPostingOffset(), BTDedupStateData::htids, IndexTupleSize, BTDedupStateData::nhtids, BTDedupStateData::nitems, BTDedupStateData::nmaxitems, pfree(), and BTDedupStateData::phystupsize.

Referenced by _bt_load().

◆ _bt_sortaddtup()

static void _bt_sortaddtup ( Page  page,
Size  itemsize,
IndexTuple  itup,
OffsetNumber  itup_off,
bool  newfirstdataitem 
)
static

Definition at line 716 of file nbtsort.c.

721 {
722  IndexTupleData trunctuple;
723 
724  if (newfirstdataitem)
725  {
726  trunctuple = *itup;
727  trunctuple.t_info = sizeof(IndexTupleData);
728  BTreeTupleSetNAtts(&trunctuple, 0, false);
729  itup = &trunctuple;
730  itemsize = sizeof(IndexTupleData);
731  }
732 
733  if (PageAddItem(page, (Item) itup, itemsize, itup_off,
734  false, false) == InvalidOffsetNumber)
735  elog(ERROR, "failed to add item to the index page");
736 }
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:471
unsigned short t_info
Definition: itup.h:49

References BTreeTupleSetNAtts(), elog, ERROR, InvalidOffsetNumber, PageAddItem, and IndexTupleData::t_info.

Referenced by _bt_buildadd().

◆ _bt_spool()

static void _bt_spool ( BTSpool btspool,
ItemPointer  self,
Datum values,
bool isnull 
)
static

Definition at line 527 of file nbtsort.c.

528 {
529  tuplesort_putindextuplevalues(btspool->sortstate, btspool->index,
530  self, values, isnull);
531 }
void tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, ItemPointer self, const Datum *values, const bool *isnull)

References BTSpool::index, BTSpool::sortstate, tuplesort_putindextuplevalues(), and values.

Referenced by _bt_build_callback().

◆ _bt_spooldestroy()

static void _bt_spooldestroy ( BTSpool btspool)
static

Definition at line 517 of file nbtsort.c.

518 {
519  tuplesort_end(btspool->sortstate);
520  pfree(btspool);
521 }

References pfree(), BTSpool::sortstate, and tuplesort_end().

Referenced by _bt_spools_heapscan(), and btbuild().

◆ _bt_spools_heapscan()

static double _bt_spools_heapscan ( Relation  heap,
Relation  index,
BTBuildState buildstate,
IndexInfo indexInfo 
)
static

Definition at line 365 of file nbtsort.c.

367 {
368  BTSpool *btspool = (BTSpool *) palloc0(sizeof(BTSpool));
369  SortCoordinate coordinate = NULL;
370  double reltuples = 0;
371 
372  /*
373  * We size the sort area as maintenance_work_mem rather than work_mem to
374  * speed index creation. This should be OK since a single backend can't
375  * run multiple index creations in parallel (see also: notes on
376  * parallelism and maintenance_work_mem below).
377  */
378  btspool->heap = heap;
379  btspool->index = index;
380  btspool->isunique = indexInfo->ii_Unique;
381  btspool->nulls_not_distinct = indexInfo->ii_NullsNotDistinct;
382 
383  /* Save as primary spool */
384  buildstate->spool = btspool;
385 
386  /* Report table scan phase started */
389 
390  /* Attempt to launch parallel worker scan when required */
391  if (indexInfo->ii_ParallelWorkers > 0)
392  _bt_begin_parallel(buildstate, indexInfo->ii_Concurrent,
393  indexInfo->ii_ParallelWorkers);
394 
395  /*
396  * If parallel build requested and at least one worker process was
397  * successfully launched, set up coordination state
398  */
399  if (buildstate->btleader)
400  {
401  coordinate = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
402  coordinate->isWorker = false;
403  coordinate->nParticipants =
404  buildstate->btleader->nparticipanttuplesorts;
405  coordinate->sharedsort = buildstate->btleader->sharedsort;
406  }
407 
408  /*
409  * Begin serial/leader tuplesort.
410  *
411  * In cases where parallelism is involved, the leader receives the same
412  * share of maintenance_work_mem as a serial sort (it is generally treated
413  * in the same way as a serial sort once we return). Parallel worker
414  * Tuplesortstates will have received only a fraction of
415  * maintenance_work_mem, though.
416  *
417  * We rely on the lifetime of the Leader Tuplesortstate almost not
418  * overlapping with any worker Tuplesortstate's lifetime. There may be
419  * some small overlap, but that's okay because we rely on leader
420  * Tuplesortstate only allocating a small, fixed amount of memory here.
421  * When its tuplesort_performsort() is called (by our caller), and
422  * significant amounts of memory are likely to be used, all workers must
423  * have already freed almost all memory held by their Tuplesortstates
424  * (they are about to go away completely, too). The overall effect is
425  * that maintenance_work_mem always represents an absolute high watermark
426  * on the amount of memory used by a CREATE INDEX operation, regardless of
427  * the use of parallelism or any other factor.
428  */
429  buildstate->spool->sortstate =
430  tuplesort_begin_index_btree(heap, index, buildstate->isunique,
431  buildstate->nulls_not_distinct,
432  maintenance_work_mem, coordinate,
434 
435  /*
436  * If building a unique index, put dead tuples in a second spool to keep
437  * them out of the uniqueness check. We expect that the second spool (for
438  * dead tuples) won't get very full, so we give it only work_mem.
439  */
440  if (indexInfo->ii_Unique)
441  {
442  BTSpool *btspool2 = (BTSpool *) palloc0(sizeof(BTSpool));
443  SortCoordinate coordinate2 = NULL;
444 
445  /* Initialize secondary spool */
446  btspool2->heap = heap;
447  btspool2->index = index;
448  btspool2->isunique = false;
449  /* Save as secondary spool */
450  buildstate->spool2 = btspool2;
451 
452  if (buildstate->btleader)
453  {
454  /*
455  * Set up non-private state that is passed to
456  * tuplesort_begin_index_btree() about the basic high level
457  * coordination of a parallel sort.
458  */
459  coordinate2 = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
460  coordinate2->isWorker = false;
461  coordinate2->nParticipants =
462  buildstate->btleader->nparticipanttuplesorts;
463  coordinate2->sharedsort = buildstate->btleader->sharedsort2;
464  }
465 
466  /*
467  * We expect that the second one (for dead tuples) won't get very
468  * full, so we give it only work_mem
469  */
470  buildstate->spool2->sortstate =
471  tuplesort_begin_index_btree(heap, index, false, false, work_mem,
472  coordinate2, TUPLESORT_NONE);
473  }
474 
475  /* Fill spool using either serial or parallel heap scan */
476  if (!buildstate->btleader)
477  reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
478  _bt_build_callback, (void *) buildstate,
479  NULL);
480  else
481  reltuples = _bt_parallel_heapscan(buildstate,
482  &indexInfo->ii_BrokenHotChain);
483 
484  /*
485  * Set the progress target for the next phase. Reset the block number
486  * values set by table_index_build_scan
487  */
488  {
489  const int progress_index[] = {
493  };
494  const int64 progress_vals[] = {
495  buildstate->indtuples,
496  0, 0
497  };
498 
499  pgstat_progress_update_multi_param(3, progress_index, progress_vals);
500  }
501 
502  /* okay, all heap tuples are spooled */
503  if (buildstate->spool2 && !buildstate->havedead)
504  {
505  /* spool2 turns out to be unnecessary */
506  _bt_spooldestroy(buildstate->spool2);
507  buildstate->spool2 = NULL;
508  }
509 
510  return reltuples;
511 }
void pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val)
#define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN
Definition: nbtree.h:1147
static double _bt_parallel_heapscan(BTBuildState *buildstate, bool *brokenhotchain)
Definition: nbtsort.c:1656
static void _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request)
Definition: nbtsort.c:1398
static void _bt_spooldestroy(BTSpool *btspool)
Definition: nbtsort.c:517
#define PROGRESS_CREATEIDX_TUPLES_TOTAL
Definition: progress.h:87
#define PROGRESS_SCAN_BLOCKS_DONE
Definition: progress.h:123
#define PROGRESS_SCAN_BLOCKS_TOTAL
Definition: progress.h:122
bool ii_Unique
Definition: execnodes.h:199
bool ii_NullsNotDistinct
Definition: execnodes.h:200
int ii_ParallelWorkers
Definition: execnodes.h:208
Definition: type.h:95
struct SortCoordinateData * SortCoordinate
Definition: tuplesort.h:61

References _bt_begin_parallel(), _bt_build_callback(), _bt_parallel_heapscan(), _bt_spooldestroy(), BTBuildState::btleader, BTBuildState::havedead, BTSpool::heap, IndexInfo::ii_BrokenHotChain, IndexInfo::ii_Concurrent, IndexInfo::ii_NullsNotDistinct, IndexInfo::ii_ParallelWorkers, IndexInfo::ii_Unique, BTSpool::index, BTBuildState::indtuples, BTSpool::isunique, BTBuildState::isunique, SortCoordinateData::isWorker, maintenance_work_mem, SortCoordinateData::nParticipants, BTLeader::nparticipanttuplesorts, BTSpool::nulls_not_distinct, BTBuildState::nulls_not_distinct, palloc0(), pgstat_progress_update_multi_param(), pgstat_progress_update_param(), PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN, PROGRESS_CREATEIDX_SUBPHASE, PROGRESS_CREATEIDX_TUPLES_TOTAL, PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL, BTLeader::sharedsort, SortCoordinateData::sharedsort, BTLeader::sharedsort2, BTSpool::sortstate, BTBuildState::spool, BTBuildState::spool2, table_index_build_scan(), tuplesort_begin_index_btree(), TUPLESORT_NONE, and work_mem.

Referenced by btbuild().

◆ _bt_uppershutdown()

static void _bt_uppershutdown ( BTWriteState wstate,
BTPageState state 
)
static

Definition at line 1065 of file nbtsort.c.

1066 {
1067  BTPageState *s;
1068  BlockNumber rootblkno = P_NONE;
1069  uint32 rootlevel = 0;
1070  BulkWriteBuffer metabuf;
1071 
1072  /*
1073  * Each iteration of this loop completes one more level of the tree.
1074  */
1075  for (s = state; s != NULL; s = s->btps_next)
1076  {
1077  BlockNumber blkno;
1078  BTPageOpaque opaque;
1079 
1080  blkno = s->btps_blkno;
1081  opaque = BTPageGetOpaque((Page) s->btps_buf);
1082 
1083  /*
1084  * We have to link the last page on this level to somewhere.
1085  *
1086  * If we're at the top, it's the root, so attach it to the metapage.
1087  * Otherwise, add an entry for it to its parent using its low key.
1088  * This may cause the last page of the parent level to split, but
1089  * that's not a problem -- we haven't gotten to it yet.
1090  */
1091  if (s->btps_next == NULL)
1092  {
1093  opaque->btpo_flags |= BTP_ROOT;
1094  rootblkno = blkno;
1095  rootlevel = s->btps_level;
1096  }
1097  else
1098  {
1099  Assert((BTreeTupleGetNAtts(s->btps_lowkey, wstate->index) <=
1101  BTreeTupleGetNAtts(s->btps_lowkey, wstate->index) > 0) ||
1102  P_LEFTMOST(opaque));
1103  Assert(BTreeTupleGetNAtts(s->btps_lowkey, wstate->index) == 0 ||
1104  !P_LEFTMOST(opaque));
1106  _bt_buildadd(wstate, s->btps_next, s->btps_lowkey, 0);
1107  pfree(s->btps_lowkey);
1108  s->btps_lowkey = NULL;
1109  }
1110 
1111  /*
1112  * This is the rightmost page, so the ItemId array needs to be slid
1113  * back one slot. Then we can dump out the page.
1114  */
1115  _bt_slideleft((Page) s->btps_buf);
1116  _bt_blwritepage(wstate, s->btps_buf, s->btps_blkno);
1117  s->btps_buf = NULL; /* writepage took ownership of the buffer */
1118  }
1119 
1120  /*
1121  * As the last step in the process, construct the metapage and make it
1122  * point to the new root (unless we had no data at all, in which case it's
1123  * set to point to "P_NONE"). This changes the index to the "valid" state
1124  * by filling in a valid magic number in the metapage.
1125  */
1126  metabuf = smgr_bulk_get_buf(wstate->bulkstate);
1127  _bt_initmetapage((Page) metabuf, rootblkno, rootlevel,
1128  wstate->inskey->allequalimage);
1129  _bt_blwritepage(wstate, metabuf, BTREE_METAPAGE);
1130 }
unsigned int uint32
Definition: c.h:518
void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level, bool allequalimage)
Definition: nbtpage.c:67
#define BTP_ROOT
Definition: nbtree.h:77
static void _bt_slideleft(Page rightmostpage)
Definition: nbtsort.c:685
IndexTuple btps_lowkey
Definition: nbtsort.c:235
BulkWriteBuffer btps_buf
Definition: nbtsort.c:233
BlockNumber btps_blkno
Definition: nbtsort.c:234
struct BTPageState * btps_next
Definition: nbtsort.c:240
uint32 btps_level
Definition: nbtsort.c:238

References _bt_blwritepage(), _bt_buildadd(), _bt_initmetapage(), _bt_slideleft(), BTScanInsertData::allequalimage, Assert, BTP_ROOT, BTPageGetOpaque, BTPageOpaqueData::btpo_flags, BTPageState::btps_blkno, BTPageState::btps_buf, BTPageState::btps_level, BTPageState::btps_lowkey, BTPageState::btps_next, BTREE_METAPAGE, BTreeTupleGetNAtts, BTreeTupleSetDownLink(), BTWriteState::bulkstate, BTWriteState::index, IndexRelationGetNumberOfKeyAttributes, BTWriteState::inskey, P_LEFTMOST, P_NONE, pfree(), and smgr_bulk_get_buf().

Referenced by _bt_load().

◆ btbuild()

IndexBuildResult* btbuild ( Relation  heap,
Relation  index,
IndexInfo indexInfo 
)

Definition at line 295 of file nbtsort.c.

296 {
297  IndexBuildResult *result;
298  BTBuildState buildstate;
299  double reltuples;
300 
301 #ifdef BTREE_BUILD_STATS
303  ResetUsage();
304 #endif /* BTREE_BUILD_STATS */
305 
306  buildstate.isunique = indexInfo->ii_Unique;
307  buildstate.nulls_not_distinct = indexInfo->ii_NullsNotDistinct;
308  buildstate.havedead = false;
309  buildstate.heap = heap;
310  buildstate.spool = NULL;
311  buildstate.spool2 = NULL;
312  buildstate.indtuples = 0;
313  buildstate.btleader = NULL;
314 
315  /*
316  * We expect to be called exactly once for any index relation. If that's
317  * not the case, big trouble's what we have.
318  */
320  elog(ERROR, "index \"%s\" already contains data",
322 
323  reltuples = _bt_spools_heapscan(heap, index, &buildstate, indexInfo);
324 
325  /*
326  * Finish the build by (1) completing the sort of the spool file, (2)
327  * inserting the sorted tuples into btree pages and (3) building the upper
328  * levels. Finally, it may also be necessary to end use of parallelism.
329  */
330  _bt_leafbuild(buildstate.spool, buildstate.spool2);
331  _bt_spooldestroy(buildstate.spool);
332  if (buildstate.spool2)
333  _bt_spooldestroy(buildstate.spool2);
334  if (buildstate.btleader)
335  _bt_end_parallel(buildstate.btleader);
336 
337  result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
338 
339  result->heap_tuples = reltuples;
340  result->index_tuples = buildstate.indtuples;
341 
342 #ifdef BTREE_BUILD_STATS
344  {
345  ShowUsage("BTREE BUILD STATS");
346  ResetUsage();
347  }
348 #endif /* BTREE_BUILD_STATS */
349 
350  return result;
351 }
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:273
static void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
Definition: nbtsort.c:538
static double _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate, IndexInfo *indexInfo)
Definition: nbtsort.c:365
#define RelationGetRelationName(relation)
Definition: rel.h:539
double heap_tuples
Definition: genam.h:32
double index_tuples
Definition: genam.h:33

References _bt_end_parallel(), _bt_leafbuild(), _bt_spooldestroy(), _bt_spools_heapscan(), BTBuildState::btleader, elog, ERROR, BTBuildState::havedead, BTBuildState::heap, IndexBuildResult::heap_tuples, IndexInfo::ii_NullsNotDistinct, IndexInfo::ii_Unique, IndexBuildResult::index_tuples, BTBuildState::indtuples, BTBuildState::isunique, log_btree_build_stats, BTBuildState::nulls_not_distinct, palloc(), RelationGetNumberOfBlocks, RelationGetRelationName, ResetUsage(), ShowUsage(), BTBuildState::spool, and BTBuildState::spool2.

Referenced by bthandler().