PostgreSQL Source Code  git master
hash.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "access/relscan.h"
#include "access/tableam.h"
#include "access/xloginsert.h"
#include "catalog/index.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "optimizer/plancat.h"
#include "pgstat.h"
#include "utils/builtins.h"
#include "utils/index_selfuncs.h"
#include "utils/rel.h"
Include dependency graph for hash.c:

Go to the source code of this file.

Data Structures

struct  HashBuildState
 

Functions

static void hashbuildCallback (Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
 
Datum hashhandler (PG_FUNCTION_ARGS)
 
IndexBuildResulthashbuild (Relation heap, Relation index, IndexInfo *indexInfo)
 
void hashbuildempty (Relation index)
 
bool hashinsert (Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
 
bool hashgettuple (IndexScanDesc scan, ScanDirection dir)
 
int64 hashgetbitmap (IndexScanDesc scan, TIDBitmap *tbm)
 
IndexScanDesc hashbeginscan (Relation rel, int nkeys, int norderbys)
 
void hashrescan (IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
 
void hashendscan (IndexScanDesc scan)
 
IndexBulkDeleteResulthashbulkdelete (IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
 
IndexBulkDeleteResulthashvacuumcleanup (IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 
void hashbucketcleanup (Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
 

Function Documentation

◆ hashbeginscan()

IndexScanDesc hashbeginscan ( Relation  rel,
int  nkeys,
int  norderbys 
)

Definition at line 366 of file hash.c.

367 {
368  IndexScanDesc scan;
369  HashScanOpaque so;
370 
371  /* no order by operators allowed */
372  Assert(norderbys == 0);
373 
374  scan = RelationGetIndexScan(rel, nkeys, norderbys);
375 
376  so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
380 
381  so->hashso_buc_populated = false;
382  so->hashso_buc_split = false;
383 
384  so->killedItems = NULL;
385  so->numKilled = 0;
386 
387  scan->opaque = so;
388 
389  return scan;
390 }
#define InvalidBuffer
Definition: buf.h:25
IndexScanDesc RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
Definition: genam.c:81
#define HashScanPosInvalidate(scanpos)
Definition: hash.h:144
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:192
Assert(fmt[strlen(fmt) - 1] !='\n')
void * palloc(Size size)
Definition: mcxt.c:1226
bool hashso_buc_split
Definition: hash.h:180
HashScanPosData currPos
Definition: hash.h:189
bool hashso_buc_populated
Definition: hash.h:174
Buffer hashso_split_bucket_buf
Definition: hash.h:171
Buffer hashso_bucket_buf
Definition: hash.h:164
int * killedItems
Definition: hash.h:182

References Assert(), HashScanOpaqueData::currPos, HashScanPosInvalidate, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_bucket_buf, HashScanOpaqueData::hashso_split_bucket_buf, InvalidBuffer, HashScanOpaqueData::killedItems, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, palloc(), and RelationGetIndexScan().

Referenced by hashhandler().

◆ hashbucketcleanup()

void hashbucketcleanup ( Relation  rel,
Bucket  cur_bucket,
Buffer  bucket_buf,
BlockNumber  bucket_blkno,
BufferAccessStrategy  bstrategy,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask,
double *  tuples_removed,
double *  num_index_tuples,
bool  split_cleanup,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 686 of file hash.c.

692 {
693  BlockNumber blkno;
694  Buffer buf;
696  bool bucket_dirty = false;
697 
698  blkno = bucket_blkno;
699  buf = bucket_buf;
700 
701  if (split_cleanup)
702  new_bucket = _hash_get_newbucket_from_oldbucket(rel, cur_bucket,
703  lowmask, maxbucket);
704 
705  /* Scan each page in bucket */
706  for (;;)
707  {
708  HashPageOpaque opaque;
709  OffsetNumber offno;
710  OffsetNumber maxoffno;
711  Buffer next_buf;
712  Page page;
713  OffsetNumber deletable[MaxOffsetNumber];
714  int ndeletable = 0;
715  bool retain_pin = false;
716  bool clear_dead_marking = false;
717 
719 
720  page = BufferGetPage(buf);
721  opaque = HashPageGetOpaque(page);
722 
723  /* Scan each tuple in page */
724  maxoffno = PageGetMaxOffsetNumber(page);
725  for (offno = FirstOffsetNumber;
726  offno <= maxoffno;
727  offno = OffsetNumberNext(offno))
728  {
729  ItemPointer htup;
730  IndexTuple itup;
731  Bucket bucket;
732  bool kill_tuple = false;
733 
734  itup = (IndexTuple) PageGetItem(page,
735  PageGetItemId(page, offno));
736  htup = &(itup->t_tid);
737 
738  /*
739  * To remove the dead tuples, we strictly want to rely on results
740  * of callback function. refer btvacuumpage for detailed reason.
741  */
742  if (callback && callback(htup, callback_state))
743  {
744  kill_tuple = true;
745  if (tuples_removed)
746  *tuples_removed += 1;
747  }
748  else if (split_cleanup)
749  {
750  /* delete the tuples that are moved by split. */
752  maxbucket,
753  highmask,
754  lowmask);
755  /* mark the item for deletion */
756  if (bucket != cur_bucket)
757  {
758  /*
759  * We expect tuples to either belong to current bucket or
760  * new_bucket. This is ensured because we don't allow
761  * further splits from bucket that contains garbage. See
762  * comments in _hash_expandtable.
763  */
764  Assert(bucket == new_bucket);
765  kill_tuple = true;
766  }
767  }
768 
769  if (kill_tuple)
770  {
771  /* mark the item for deletion */
772  deletable[ndeletable++] = offno;
773  }
774  else
775  {
776  /* we're keeping it, so count it */
777  if (num_index_tuples)
778  *num_index_tuples += 1;
779  }
780  }
781 
782  /* retain the pin on primary bucket page till end of bucket scan */
783  if (blkno == bucket_blkno)
784  retain_pin = true;
785  else
786  retain_pin = false;
787 
788  blkno = opaque->hasho_nextblkno;
789 
790  /*
791  * Apply deletions, advance to next page and write page if needed.
792  */
793  if (ndeletable > 0)
794  {
795  /* No ereport(ERROR) until changes are logged */
797 
798  PageIndexMultiDelete(page, deletable, ndeletable);
799  bucket_dirty = true;
800 
801  /*
802  * Let us mark the page as clean if vacuum removes the DEAD tuples
803  * from an index page. We do this by clearing
804  * LH_PAGE_HAS_DEAD_TUPLES flag.
805  */
806  if (tuples_removed && *tuples_removed > 0 &&
807  H_HAS_DEAD_TUPLES(opaque))
808  {
810  clear_dead_marking = true;
811  }
812 
814 
815  /* XLOG stuff */
816  if (RelationNeedsWAL(rel))
817  {
818  xl_hash_delete xlrec;
819  XLogRecPtr recptr;
820 
821  xlrec.clear_dead_marking = clear_dead_marking;
822  xlrec.is_primary_bucket_page = (buf == bucket_buf);
823 
824  XLogBeginInsert();
825  XLogRegisterData((char *) &xlrec, SizeOfHashDelete);
826 
827  /*
828  * bucket buffer was not changed, but still needs to be
829  * registered to ensure that we can acquire a cleanup lock on
830  * it during replay.
831  */
832  if (!xlrec.is_primary_bucket_page)
833  {
835 
836  XLogRegisterBuffer(0, bucket_buf, flags);
837  }
838 
840  XLogRegisterBufData(1, (char *) deletable,
841  ndeletable * sizeof(OffsetNumber));
842 
843  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_DELETE);
844  PageSetLSN(BufferGetPage(buf), recptr);
845  }
846 
848  }
849 
850  /* bail out if there are no more pages to scan. */
851  if (!BlockNumberIsValid(blkno))
852  break;
853 
854  next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
856  bstrategy);
857 
858  /*
859  * release the lock on previous page after acquiring the lock on next
860  * page
861  */
862  if (retain_pin)
864  else
865  _hash_relbuf(rel, buf);
866 
867  buf = next_buf;
868  }
869 
870  /*
871  * lock the bucket page to clear the garbage flag and squeeze the bucket.
872  * if the current buffer is same as bucket buffer, then we already have
873  * lock on bucket page.
874  */
875  if (buf != bucket_buf)
876  {
877  _hash_relbuf(rel, buf);
878  LockBuffer(bucket_buf, BUFFER_LOCK_EXCLUSIVE);
879  }
880 
881  /*
882  * Clear the garbage flag from bucket after deleting the tuples that are
883  * moved by split. We purposefully clear the flag before squeeze bucket,
884  * so that after restart, vacuum shouldn't again try to delete the moved
885  * by split tuples.
886  */
887  if (split_cleanup)
888  {
889  HashPageOpaque bucket_opaque;
890  Page page;
891 
892  page = BufferGetPage(bucket_buf);
893  bucket_opaque = HashPageGetOpaque(page);
894 
895  /* No ereport(ERROR) until changes are logged */
897 
898  bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
899  MarkBufferDirty(bucket_buf);
900 
901  /* XLOG stuff */
902  if (RelationNeedsWAL(rel))
903  {
904  XLogRecPtr recptr;
905 
906  XLogBeginInsert();
907  XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD);
908 
909  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_CLEANUP);
910  PageSetLSN(page, recptr);
911  }
912 
914  }
915 
916  /*
917  * If we have deleted anything, try to compact free space. For squeezing
918  * the bucket, we must have a cleanup lock, else it can impact the
919  * ordering of tuples for a scan that has started before it.
920  */
921  if (bucket_dirty && IsBufferCleanupOK(bucket_buf))
922  _hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
923  bstrategy);
924  else
925  LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK);
926 }
uint32 BlockNumber
Definition: block.h:31
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
int Buffer
Definition: buf.h:23
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:5105
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2198
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4808
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:157
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:159
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1161
Pointer Page
Definition: bufpage.h:78
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:171
unsigned char uint8
Definition: c.h:493
#define HashPageGetOpaque(page)
Definition: hash.h:88
#define HASH_WRITE
Definition: hash.h:340
#define H_HAS_DEAD_TUPLES(opaque)
Definition: hash.h:93
uint32 Bucket
Definition: hash.h:35
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:60
#define LH_PAGE_HAS_DEAD_TUPLES
Definition: hash.h:61
#define LH_OVERFLOW_PAGE
Definition: hash.h:54
#define InvalidBucket
Definition: hash.h:37
#define XLOG_HASH_SPLIT_CLEANUP
Definition: hash_xlog.h:37
#define SizeOfHashDelete
Definition: hash_xlog.h:186
#define XLOG_HASH_DELETE
Definition: hash_xlog.h:36
void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, Buffer bucket_buf, BufferAccessStrategy bstrategy)
Definition: hashovfl.c:827
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:266
Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
Definition: hashpage.c:239
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:292
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:126
Bucket _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, uint32 lowmask, uint32 maxbucket)
Definition: hashutil.c:495
IndexTupleData * IndexTuple
Definition: itup.h:53
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
#define MaxOffsetNumber
Definition: off.h:28
static char * buf
Definition: pg_test_fsync.c:73
#define RelationNeedsWAL(relation)
Definition: rel.h:629
BlockNumber hasho_nextblkno
Definition: hash.h:80
uint16 hasho_flag
Definition: hash.h:82
ItemPointerData t_tid
Definition: itup.h:37
bool clear_dead_marking
Definition: hash_xlog.h:180
bool is_primary_bucket_page
Definition: hash_xlog.h:182
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46
void vacuum_delay_point(void)
Definition: vacuum.c:2322
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRegisterData(char *data, uint32 len)
Definition: xloginsert.c:365
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:475
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
Definition: xloginsert.c:406
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
#define REGBUF_NO_CHANGE
Definition: xloginsert.h:36
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:32

References _hash_get_indextuple_hashkey(), _hash_get_newbucket_from_oldbucket(), _hash_getbuf_with_strategy(), _hash_hashkey2bucket(), _hash_relbuf(), _hash_squeezebucket(), Assert(), BlockNumberIsValid(), buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), callback(), xl_hash_delete::clear_dead_marking, END_CRIT_SECTION, FirstOffsetNumber, H_HAS_DEAD_TUPLES, HASH_WRITE, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageGetOpaque, InvalidBucket, xl_hash_delete::is_primary_bucket_page, IsBufferCleanupOK(), LH_BUCKET_NEEDS_SPLIT_CLEANUP, LH_OVERFLOW_PAGE, LH_PAGE_HAS_DEAD_TUPLES, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIndexMultiDelete(), PageSetLSN(), PG_USED_FOR_ASSERTS_ONLY, REGBUF_NO_CHANGE, REGBUF_NO_IMAGE, REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashDelete, START_CRIT_SECTION, IndexTupleData::t_tid, vacuum_delay_point(), XLOG_HASH_DELETE, XLOG_HASH_SPLIT_CLEANUP, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_expandtable(), _hash_splitbucket(), and hashbulkdelete().

◆ hashbuild()

IndexBuildResult* hashbuild ( Relation  heap,
Relation  index,
IndexInfo indexInfo 
)

Definition at line 114 of file hash.c.

115 {
116  IndexBuildResult *result;
117  BlockNumber relpages;
118  double reltuples;
119  double allvisfrac;
120  uint32 num_buckets;
121  long sort_threshold;
122  HashBuildState buildstate;
123 
124  /*
125  * We expect to be called exactly once for any index relation. If that's
126  * not the case, big trouble's what we have.
127  */
129  elog(ERROR, "index \"%s\" already contains data",
131 
132  /* Estimate the number of rows currently present in the table */
133  estimate_rel_size(heap, NULL, &relpages, &reltuples, &allvisfrac);
134 
135  /* Initialize the hash index metadata page and initial buckets */
136  num_buckets = _hash_init(index, reltuples, MAIN_FORKNUM);
137 
138  /*
139  * If we just insert the tuples into the index in scan order, then
140  * (assuming their hash codes are pretty random) there will be no locality
141  * of access to the index, and if the index is bigger than available RAM
142  * then we'll thrash horribly. To prevent that scenario, we can sort the
143  * tuples by (expected) bucket number. However, such a sort is useless
144  * overhead when the index does fit in RAM. We choose to sort if the
145  * initial index size exceeds maintenance_work_mem, or the number of
146  * buffers usable for the index, whichever is less. (Limiting by the
147  * number of buffers should reduce thrashing between PG buffers and kernel
148  * buffers, which seems useful even if no physical I/O results. Limiting
149  * by maintenance_work_mem is useful to allow easy testing of the sort
150  * code path, and may be useful to DBAs as an additional control knob.)
151  *
152  * NOTE: this test will need adjustment if a bucket is ever different from
153  * one page. Also, "initial index size" accounting does not include the
154  * metapage, nor the first bitmap page.
155  */
156  sort_threshold = (maintenance_work_mem * 1024L) / BLCKSZ;
157  if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
158  sort_threshold = Min(sort_threshold, NBuffers);
159  else
160  sort_threshold = Min(sort_threshold, NLocBuffer);
161 
162  if (num_buckets >= (uint32) sort_threshold)
163  buildstate.spool = _h_spoolinit(heap, index, num_buckets);
164  else
165  buildstate.spool = NULL;
166 
167  /* prepare to build the index */
168  buildstate.indtuples = 0;
169  buildstate.heapRel = heap;
170 
171  /* do the heap scan */
172  reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
174  (void *) &buildstate, NULL);
176  buildstate.indtuples);
177 
178  if (buildstate.spool)
179  {
180  /* sort the tuples and insert them into the index */
181  _h_indexbuild(buildstate.spool, buildstate.heapRel);
182  _h_spooldestroy(buildstate.spool);
183  }
184 
185  /*
186  * Return statistics
187  */
188  result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
189 
190  result->heap_tuples = reltuples;
191  result->index_tuples = buildstate.indtuples;
192 
193  return result;
194 }
void pgstat_progress_update_param(int index, int64 val)
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:229
unsigned int uint32
Definition: c.h:495
#define Min(x, y)
Definition: c.h:993
#define ERROR
Definition: elog.h:39
int NBuffers
Definition: globals.c:138
int maintenance_work_mem
Definition: globals.c:129
static void hashbuildCallback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: hash.c:209
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition: hashpage.c:327
void _h_indexbuild(HSpool *hspool, Relation heapRel)
Definition: hashsort.c:120
HSpool * _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
Definition: hashsort.c:60
void _h_spooldestroy(HSpool *hspool)
Definition: hashsort.c:99
int NLocBuffer
Definition: localbuf.c:43
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1013
#define PROGRESS_CREATEIDX_TUPLES_TOTAL
Definition: progress.h:86
#define RelationGetRelationName(relation)
Definition: rel.h:538
@ MAIN_FORKNUM
Definition: relpath.h:50
HSpool * spool
Definition: hash.c:39
Relation heapRel
Definition: hash.c:41
double indtuples
Definition: hash.c:40
double heap_tuples
Definition: genam.h:32
double index_tuples
Definition: genam.h:33
Definition: type.h:95
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1772

References _h_indexbuild(), _h_spooldestroy(), _h_spoolinit(), _hash_init(), elog(), ERROR, estimate_rel_size(), hashbuildCallback(), IndexBuildResult::heap_tuples, HashBuildState::heapRel, IndexBuildResult::index_tuples, HashBuildState::indtuples, MAIN_FORKNUM, maintenance_work_mem, Min, NBuffers, NLocBuffer, palloc(), pgstat_progress_update_param(), PROGRESS_CREATEIDX_TUPLES_TOTAL, RelationGetNumberOfBlocks, RelationGetRelationName, HashBuildState::spool, and table_index_build_scan().

Referenced by hashhandler().

◆ hashbuildCallback()

static void hashbuildCallback ( Relation  index,
ItemPointer  tid,
Datum values,
bool isnull,
bool  tupleIsAlive,
void *  state 
)
static

Definition at line 209 of file hash.c.

215 {
216  HashBuildState *buildstate = (HashBuildState *) state;
217  Datum index_values[1];
218  bool index_isnull[1];
219  IndexTuple itup;
220 
221  /* convert data to a hash key; on failure, do not insert anything */
223  values, isnull,
224  index_values, index_isnull))
225  return;
226 
227  /* Either spool the tuple for sorting, or just put it into the index */
228  if (buildstate->spool)
229  _h_spool(buildstate->spool, tid, index_values, index_isnull);
230  else
231  {
232  /* form an index tuple and point it at the heap tuple */
234  index_values, index_isnull);
235  itup->t_tid = *tid;
236  _hash_doinsert(index, itup, buildstate->heapRel, false);
237  pfree(itup);
238  }
239 
240  buildstate->indtuples += 1;
241 }
static Datum values[MAXATTR]
Definition: bootstrap.c:156
void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel, bool sorted)
Definition: hashinsert.c:40
void _h_spool(HSpool *hspool, ItemPointer self, const Datum *values, const bool *isnull)
Definition: hashsort.c:109
bool _hash_convert_tuple(Relation index, Datum *user_values, bool *user_isnull, Datum *index_values, bool *index_isnull)
Definition: hashutil.c:319
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
Definition: indextuple.c:44
void pfree(void *pointer)
Definition: mcxt.c:1456
uintptr_t Datum
Definition: postgres.h:64
#define RelationGetDescr(relation)
Definition: rel.h:530
Definition: regguts.h:323

References _h_spool(), _hash_convert_tuple(), _hash_doinsert(), HashBuildState::heapRel, index_form_tuple(), HashBuildState::indtuples, pfree(), RelationGetDescr, HashBuildState::spool, IndexTupleData::t_tid, and values.

Referenced by hashbuild().

◆ hashbuildempty()

void hashbuildempty ( Relation  index)

Definition at line 200 of file hash.c.

201 {
203 }
@ INIT_FORKNUM
Definition: relpath.h:53

References _hash_init(), and INIT_FORKNUM.

Referenced by hashhandler().

◆ hashbulkdelete()

IndexBulkDeleteResult* hashbulkdelete ( IndexVacuumInfo info,
IndexBulkDeleteResult stats,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 461 of file hash.c.

463 {
464  Relation rel = info->index;
465  double tuples_removed;
466  double num_index_tuples;
467  double orig_ntuples;
468  Bucket orig_maxbucket;
469  Bucket cur_maxbucket;
470  Bucket cur_bucket;
471  Buffer metabuf = InvalidBuffer;
472  HashMetaPage metap;
473  HashMetaPage cachedmetap;
474 
475  tuples_removed = 0;
476  num_index_tuples = 0;
477 
478  /*
479  * We need a copy of the metapage so that we can use its hashm_spares[]
480  * values to compute bucket page addresses, but a cached copy should be
481  * good enough. (If not, we'll detect that further down and refresh the
482  * cache as necessary.)
483  */
484  cachedmetap = _hash_getcachedmetap(rel, &metabuf, false);
485  Assert(cachedmetap != NULL);
486 
487  orig_maxbucket = cachedmetap->hashm_maxbucket;
488  orig_ntuples = cachedmetap->hashm_ntuples;
489 
490  /* Scan the buckets that we know exist */
491  cur_bucket = 0;
492  cur_maxbucket = orig_maxbucket;
493 
494 loop_top:
495  while (cur_bucket <= cur_maxbucket)
496  {
497  BlockNumber bucket_blkno;
498  BlockNumber blkno;
499  Buffer bucket_buf;
500  Buffer buf;
501  HashPageOpaque bucket_opaque;
502  Page page;
503  bool split_cleanup = false;
504 
505  /* Get address of bucket's start page */
506  bucket_blkno = BUCKET_TO_BLKNO(cachedmetap, cur_bucket);
507 
508  blkno = bucket_blkno;
509 
510  /*
511  * We need to acquire a cleanup lock on the primary bucket page to out
512  * wait concurrent scans before deleting the dead tuples.
513  */
514  buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy);
517 
518  page = BufferGetPage(buf);
519  bucket_opaque = HashPageGetOpaque(page);
520 
521  /*
522  * If the bucket contains tuples that are moved by split, then we need
523  * to delete such tuples. We can't delete such tuples if the split
524  * operation on bucket is not finished as those are needed by scans.
525  */
526  if (!H_BUCKET_BEING_SPLIT(bucket_opaque) &&
527  H_NEEDS_SPLIT_CLEANUP(bucket_opaque))
528  {
529  split_cleanup = true;
530 
531  /*
532  * This bucket might have been split since we last held a lock on
533  * the metapage. If so, hashm_maxbucket, hashm_highmask and
534  * hashm_lowmask might be old enough to cause us to fail to remove
535  * tuples left behind by the most recent split. To prevent that,
536  * now that the primary page of the target bucket has been locked
537  * (and thus can't be further split), check whether we need to
538  * update our cached metapage data.
539  */
540  Assert(bucket_opaque->hasho_prevblkno != InvalidBlockNumber);
541  if (bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
542  {
543  cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
544  Assert(cachedmetap != NULL);
545  }
546  }
547 
548  bucket_buf = buf;
549 
550  hashbucketcleanup(rel, cur_bucket, bucket_buf, blkno, info->strategy,
551  cachedmetap->hashm_maxbucket,
552  cachedmetap->hashm_highmask,
553  cachedmetap->hashm_lowmask, &tuples_removed,
554  &num_index_tuples, split_cleanup,
555  callback, callback_state);
556 
557  _hash_dropbuf(rel, bucket_buf);
558 
559  /* Advance to next bucket */
560  cur_bucket++;
561  }
562 
563  if (BufferIsInvalid(metabuf))
565 
566  /* Write-lock metapage and check for split since we started */
568  metap = HashPageGetMeta(BufferGetPage(metabuf));
569 
570  if (cur_maxbucket != metap->hashm_maxbucket)
571  {
572  /* There's been a split, so process the additional bucket(s) */
573  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
574  cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
575  Assert(cachedmetap != NULL);
576  cur_maxbucket = cachedmetap->hashm_maxbucket;
577  goto loop_top;
578  }
579 
580  /* Okay, we're really done. Update tuple count in metapage. */
582 
583  if (orig_maxbucket == metap->hashm_maxbucket &&
584  orig_ntuples == metap->hashm_ntuples)
585  {
586  /*
587  * No one has split or inserted anything since start of scan, so
588  * believe our count as gospel.
589  */
590  metap->hashm_ntuples = num_index_tuples;
591  }
592  else
593  {
594  /*
595  * Otherwise, our count is untrustworthy since we may have
596  * double-scanned tuples in split buckets. Proceed by dead-reckoning.
597  * (Note: we still return estimated_count = false, because using this
598  * count is better than not updating reltuples at all.)
599  */
600  if (metap->hashm_ntuples > tuples_removed)
601  metap->hashm_ntuples -= tuples_removed;
602  else
603  metap->hashm_ntuples = 0;
604  num_index_tuples = metap->hashm_ntuples;
605  }
606 
607  MarkBufferDirty(metabuf);
608 
609  /* XLOG stuff */
610  if (RelationNeedsWAL(rel))
611  {
613  XLogRecPtr recptr;
614 
615  xlrec.ntuples = metap->hashm_ntuples;
616 
617  XLogBeginInsert();
618  XLogRegisterData((char *) &xlrec, SizeOfHashUpdateMetaPage);
619 
620  XLogRegisterBuffer(0, metabuf, REGBUF_STANDARD);
621 
622  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_UPDATE_META_PAGE);
623  PageSetLSN(BufferGetPage(metabuf), recptr);
624  }
625 
627 
628  _hash_relbuf(rel, metabuf);
629 
630  /* return statistics */
631  if (stats == NULL)
633  stats->estimated_count = false;
634  stats->num_index_tuples = num_index_tuples;
635  stats->tuples_removed += tuples_removed;
636  /* hashvacuumcleanup will fill in num_pages */
637 
638  return stats;
639 }
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsInvalid(buffer)
Definition: buf.h:31
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4888
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:782
@ RBM_NORMAL
Definition: bufmgr.h:44
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:686
#define HASH_NOLOCK
Definition: hash.h:341
#define LH_BUCKET_PAGE
Definition: hash.h:55
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:91
#define LH_META_PAGE
Definition: hash.h:57
#define HashPageGetMeta(page)
Definition: hash.h:323
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:39
#define HASH_METAPAGE
Definition: hash.h:198
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:90
#define XLOG_HASH_UPDATE_META_PAGE
Definition: hash_xlog.h:38
#define SizeOfHashUpdateMetaPage
Definition: hash_xlog.h:200
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1501
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:277
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:70
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:211
void * palloc0(Size size)
Definition: mcxt.c:1257
uint32 hashm_lowmask
Definition: hash.h:256
uint32 hashm_maxbucket
Definition: hash.h:254
double hashm_ntuples
Definition: hash.h:248
uint32 hashm_highmask
Definition: hash.h:255
BlockNumber hasho_prevblkno
Definition: hash.h:79
bool estimated_count
Definition: genam.h:78
double tuples_removed
Definition: genam.h:80
double num_index_tuples
Definition: genam.h:79
Relation index
Definition: genam.h:46
BufferAccessStrategy strategy
Definition: genam.h:53

References _hash_checkpage(), _hash_dropbuf(), _hash_getbuf(), _hash_getcachedmetap(), _hash_relbuf(), Assert(), BUCKET_TO_BLKNO, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsInvalid, callback(), END_CRIT_SECTION, IndexBulkDeleteResult::estimated_count, H_BUCKET_BEING_SPLIT, H_NEEDS_SPLIT_CLEANUP, HASH_METAPAGE, HASH_NOLOCK, hashbucketcleanup(), HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ntuples, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, HashPageGetOpaque, IndexVacuumInfo::index, InvalidBlockNumber, InvalidBuffer, LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, MarkBufferDirty(), xl_hash_update_meta_page::ntuples, IndexBulkDeleteResult::num_index_tuples, PageSetLSN(), palloc0(), RBM_NORMAL, ReadBufferExtended(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashUpdateMetaPage, START_CRIT_SECTION, IndexVacuumInfo::strategy, IndexBulkDeleteResult::tuples_removed, XLOG_HASH_UPDATE_META_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by hashhandler().

◆ hashendscan()

void hashendscan ( IndexScanDesc  scan)

Definition at line 430 of file hash.c.

431 {
432  HashScanOpaque so = (HashScanOpaque) scan->opaque;
433  Relation rel = scan->indexRelation;
434 
436  {
437  /* Before leaving current page, deal with any killed items */
438  if (so->numKilled > 0)
439  _hash_kill_items(scan);
440  }
441 
442  _hash_dropscanbuf(rel, so);
443 
444  if (so->killedItems != NULL)
445  pfree(so->killedItems);
446  pfree(so);
447  scan->opaque = NULL;
448 }
#define HashScanPosIsValid(scanpos)
Definition: hash.h:137
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:289
void _hash_kill_items(IndexScanDesc scan)
Definition: hashutil.c:537
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Relation indexRelation
Definition: relscan.h:118

References _hash_dropscanbuf(), _hash_kill_items(), HashScanOpaqueData::currPos, HashScanPosIsValid, if(), IndexScanDescData::indexRelation, HashScanOpaqueData::killedItems, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, and pfree().

Referenced by hashhandler().

◆ hashgetbitmap()

int64 hashgetbitmap ( IndexScanDesc  scan,
TIDBitmap tbm 
)

Definition at line 334 of file hash.c.

335 {
336  HashScanOpaque so = (HashScanOpaque) scan->opaque;
337  bool res;
338  int64 ntids = 0;
339  HashScanPosItem *currItem;
340 
342 
343  while (res)
344  {
345  currItem = &so->currPos.items[so->currPos.itemIndex];
346 
347  /*
348  * _hash_first and _hash_next handle eliminate dead index entries
349  * whenever scan->ignore_killed_tuples is true. Therefore, there's
350  * nothing to do here except add the results to the TIDBitmap.
351  */
352  tbm_add_tuples(tbm, &(currItem->heapTid), 1, true);
353  ntids++;
354 
356  }
357 
358  return ntids;
359 }
bool _hash_first(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:288
bool _hash_next(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:48
@ ForwardScanDirection
Definition: sdir.h:28
HashScanPosItem items[MaxIndexTuplesPerPage]
Definition: hash.h:127
int itemIndex
Definition: hash.h:125
void tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids, bool recheck)
Definition: tidbitmap.c:376

References _hash_first(), _hash_next(), HashScanOpaqueData::currPos, ForwardScanDirection, HashScanPosData::itemIndex, HashScanPosData::items, IndexScanDescData::opaque, res, and tbm_add_tuples().

Referenced by hashhandler().

◆ hashgettuple()

bool hashgettuple ( IndexScanDesc  scan,
ScanDirection  dir 
)

Definition at line 282 of file hash.c.

283 {
284  HashScanOpaque so = (HashScanOpaque) scan->opaque;
285  bool res;
286 
287  /* Hash indexes are always lossy since we store only the hash code */
288  scan->xs_recheck = true;
289 
290  /*
291  * If we've already initialized this scan, we can just advance it in the
292  * appropriate direction. If we haven't done so yet, we call a routine to
293  * get the first item in the scan.
294  */
296  res = _hash_first(scan, dir);
297  else
298  {
299  /*
300  * Check to see if we should kill the previously-fetched tuple.
301  */
302  if (scan->kill_prior_tuple)
303  {
304  /*
305  * Yes, so remember it for later. (We'll deal with all such tuples
306  * at once right after leaving the index page or at end of scan.)
307  * In case if caller reverses the indexscan direction it is quite
308  * possible that the same item might get entered multiple times.
309  * But, we don't detect that; instead, we just forget any excess
310  * entries.
311  */
312  if (so->killedItems == NULL)
313  so->killedItems = (int *)
314  palloc(MaxIndexTuplesPerPage * sizeof(int));
315 
317  so->killedItems[so->numKilled++] = so->currPos.itemIndex;
318  }
319 
320  /*
321  * Now continue the scan.
322  */
323  res = _hash_next(scan, dir);
324  }
325 
326  return res;
327 }
#define MaxIndexTuplesPerPage
Definition: itup.h:165
bool kill_prior_tuple
Definition: relscan.h:128

References _hash_first(), _hash_next(), HashScanOpaqueData::currPos, HashScanPosIsValid, if(), HashScanPosData::itemIndex, IndexScanDescData::kill_prior_tuple, HashScanOpaqueData::killedItems, MaxIndexTuplesPerPage, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, palloc(), res, and IndexScanDescData::xs_recheck.

Referenced by hashhandler().

◆ hashhandler()

Datum hashhandler ( PG_FUNCTION_ARGS  )

Definition at line 57 of file hash.c.

58 {
60 
61  amroutine->amstrategies = HTMaxStrategyNumber;
62  amroutine->amsupport = HASHNProcs;
63  amroutine->amoptsprocnum = HASHOPTIONS_PROC;
64  amroutine->amcanorder = false;
65  amroutine->amcanorderbyop = false;
66  amroutine->amcanbackward = true;
67  amroutine->amcanunique = false;
68  amroutine->amcanmulticol = false;
69  amroutine->amoptionalkey = false;
70  amroutine->amsearcharray = false;
71  amroutine->amsearchnulls = false;
72  amroutine->amstorage = false;
73  amroutine->amclusterable = false;
74  amroutine->ampredlocks = true;
75  amroutine->amcanparallel = false;
76  amroutine->amcaninclude = false;
77  amroutine->amusemaintenanceworkmem = false;
78  amroutine->amsummarizing = false;
79  amroutine->amparallelvacuumoptions =
81  amroutine->amkeytype = INT4OID;
82 
83  amroutine->ambuild = hashbuild;
84  amroutine->ambuildempty = hashbuildempty;
85  amroutine->aminsert = hashinsert;
86  amroutine->aminsertcleanup = NULL;
87  amroutine->ambulkdelete = hashbulkdelete;
89  amroutine->amcanreturn = NULL;
90  amroutine->amcostestimate = hashcostestimate;
91  amroutine->amoptions = hashoptions;
92  amroutine->amproperty = NULL;
93  amroutine->ambuildphasename = NULL;
94  amroutine->amvalidate = hashvalidate;
96  amroutine->ambeginscan = hashbeginscan;
97  amroutine->amrescan = hashrescan;
98  amroutine->amgettuple = hashgettuple;
99  amroutine->amgetbitmap = hashgetbitmap;
100  amroutine->amendscan = hashendscan;
101  amroutine->ammarkpos = NULL;
102  amroutine->amrestrpos = NULL;
103  amroutine->amestimateparallelscan = NULL;
104  amroutine->aminitparallelscan = NULL;
105  amroutine->amparallelrescan = NULL;
106 
107  PG_RETURN_POINTER(amroutine);
108 }
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:361
bool hashinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
Definition: hash.c:250
IndexBulkDeleteResult * hashvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: hash.c:647
IndexBulkDeleteResult * hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:461
bool hashgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: hash.c:282
void hashbuildempty(Relation index)
Definition: hash.c:200
IndexScanDesc hashbeginscan(Relation rel, int nkeys, int norderbys)
Definition: hash.c:366
void hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: hash.c:396
void hashendscan(IndexScanDesc scan)
Definition: hash.c:430
IndexBuildResult * hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: hash.c:114
int64 hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: hash.c:334
#define HASHNProcs
Definition: hash.h:358
#define HASHOPTIONS_PROC
Definition: hash.h:357
bytea * hashoptions(Datum reloptions, bool validate)
Definition: hashutil.c:276
void hashadjustmembers(Oid opfamilyoid, Oid opclassoid, List *operators, List *functions)
Definition: hashvalidate.c:352
bool hashvalidate(Oid opclassoid)
Definition: hashvalidate.c:47
#define makeNode(_type_)
Definition: nodes.h:176
void hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: selfuncs.c:7043
#define HTMaxStrategyNumber
Definition: stratnum.h:43
ambuildphasename_function ambuildphasename
Definition: amapi.h:274
ambuildempty_function ambuildempty
Definition: amapi.h:265
amvacuumcleanup_function amvacuumcleanup
Definition: amapi.h:269
bool amclusterable
Definition: amapi.h:241
amoptions_function amoptions
Definition: amapi.h:272
amestimateparallelscan_function amestimateparallelscan
Definition: amapi.h:286
amrestrpos_function amrestrpos
Definition: amapi.h:283
aminsert_function aminsert
Definition: amapi.h:266
amendscan_function amendscan
Definition: amapi.h:281
uint16 amoptsprocnum
Definition: amapi.h:221
amparallelrescan_function amparallelrescan
Definition: amapi.h:288
Oid amkeytype
Definition: amapi.h:255
bool ampredlocks
Definition: amapi.h:243
uint16 amsupport
Definition: amapi.h:219
amcostestimate_function amcostestimate
Definition: amapi.h:271
bool amcanorderbyop
Definition: amapi.h:225
amadjustmembers_function amadjustmembers
Definition: amapi.h:276
ambuild_function ambuild
Definition: amapi.h:264
bool amstorage
Definition: amapi.h:239
uint16 amstrategies
Definition: amapi.h:217
bool amoptionalkey
Definition: amapi.h:233
amgettuple_function amgettuple
Definition: amapi.h:279
amcanreturn_function amcanreturn
Definition: amapi.h:270
bool amcanunique
Definition: amapi.h:229
amgetbitmap_function amgetbitmap
Definition: amapi.h:280
amproperty_function amproperty
Definition: amapi.h:273
ambulkdelete_function ambulkdelete
Definition: amapi.h:268
bool amsearcharray
Definition: amapi.h:235
bool amsummarizing
Definition: amapi.h:251
amvalidate_function amvalidate
Definition: amapi.h:275
ammarkpos_function ammarkpos
Definition: amapi.h:282
bool amcanmulticol
Definition: amapi.h:231
bool amusemaintenanceworkmem
Definition: amapi.h:249
ambeginscan_function ambeginscan
Definition: amapi.h:277
bool amcanparallel
Definition: amapi.h:245
amrescan_function amrescan
Definition: amapi.h:278
bool amcanorder
Definition: amapi.h:223
aminitparallelscan_function aminitparallelscan
Definition: amapi.h:287
uint8 amparallelvacuumoptions
Definition: amapi.h:253
aminsertcleanup_function aminsertcleanup
Definition: amapi.h:267
bool amcanbackward
Definition: amapi.h:227
bool amcaninclude
Definition: amapi.h:247
bool amsearchnulls
Definition: amapi.h:237
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:47

References IndexAmRoutine::amadjustmembers, IndexAmRoutine::ambeginscan, IndexAmRoutine::ambuild, IndexAmRoutine::ambuildempty, IndexAmRoutine::ambuildphasename, IndexAmRoutine::ambulkdelete, IndexAmRoutine::amcanbackward, IndexAmRoutine::amcaninclude, IndexAmRoutine::amcanmulticol, IndexAmRoutine::amcanorder, IndexAmRoutine::amcanorderbyop, IndexAmRoutine::amcanparallel, IndexAmRoutine::amcanreturn, IndexAmRoutine::amcanunique, IndexAmRoutine::amclusterable, IndexAmRoutine::amcostestimate, IndexAmRoutine::amendscan, IndexAmRoutine::amestimateparallelscan, IndexAmRoutine::amgetbitmap, IndexAmRoutine::amgettuple, IndexAmRoutine::aminitparallelscan, IndexAmRoutine::aminsert, IndexAmRoutine::aminsertcleanup, IndexAmRoutine::amkeytype, IndexAmRoutine::ammarkpos, IndexAmRoutine::amoptionalkey, IndexAmRoutine::amoptions, IndexAmRoutine::amoptsprocnum, IndexAmRoutine::amparallelrescan, IndexAmRoutine::amparallelvacuumoptions, IndexAmRoutine::ampredlocks, IndexAmRoutine::amproperty, IndexAmRoutine::amrescan, IndexAmRoutine::amrestrpos, IndexAmRoutine::amsearcharray, IndexAmRoutine::amsearchnulls, IndexAmRoutine::amstorage, IndexAmRoutine::amstrategies, IndexAmRoutine::amsummarizing, IndexAmRoutine::amsupport, IndexAmRoutine::amusemaintenanceworkmem, IndexAmRoutine::amvacuumcleanup, IndexAmRoutine::amvalidate, hashadjustmembers(), hashbeginscan(), hashbuild(), hashbuildempty(), hashbulkdelete(), hashcostestimate(), hashendscan(), hashgetbitmap(), hashgettuple(), hashinsert(), HASHNProcs, hashoptions(), HASHOPTIONS_PROC, hashrescan(), hashvacuumcleanup(), hashvalidate(), HTMaxStrategyNumber, makeNode, PG_RETURN_POINTER, and VACUUM_OPTION_PARALLEL_BULKDEL.

◆ hashinsert()

bool hashinsert ( Relation  rel,
Datum values,
bool isnull,
ItemPointer  ht_ctid,
Relation  heapRel,
IndexUniqueCheck  checkUnique,
bool  indexUnchanged,
IndexInfo indexInfo 
)

Definition at line 250 of file hash.c.

255 {
256  Datum index_values[1];
257  bool index_isnull[1];
258  IndexTuple itup;
259 
260  /* convert data to a hash key; on failure, do not insert anything */
261  if (!_hash_convert_tuple(rel,
262  values, isnull,
263  index_values, index_isnull))
264  return false;
265 
266  /* form an index tuple and point it at the heap tuple */
267  itup = index_form_tuple(RelationGetDescr(rel), index_values, index_isnull);
268  itup->t_tid = *ht_ctid;
269 
270  _hash_doinsert(rel, itup, heapRel, false);
271 
272  pfree(itup);
273 
274  return false;
275 }

References _hash_convert_tuple(), _hash_doinsert(), index_form_tuple(), pfree(), RelationGetDescr, IndexTupleData::t_tid, and values.

Referenced by hashhandler().

◆ hashrescan()

void hashrescan ( IndexScanDesc  scan,
ScanKey  scankey,
int  nscankeys,
ScanKey  orderbys,
int  norderbys 
)

Definition at line 396 of file hash.c.

398 {
399  HashScanOpaque so = (HashScanOpaque) scan->opaque;
400  Relation rel = scan->indexRelation;
401 
403  {
404  /* Before leaving current page, deal with any killed items */
405  if (so->numKilled > 0)
406  _hash_kill_items(scan);
407  }
408 
409  _hash_dropscanbuf(rel, so);
410 
411  /* set position invalid (this will cause _hash_first call) */
413 
414  /* Update scan key, if a new one is given */
415  if (scankey && scan->numberOfKeys > 0)
416  {
417  memmove(scan->keyData,
418  scankey,
419  scan->numberOfKeys * sizeof(ScanKeyData));
420  }
421 
422  so->hashso_buc_populated = false;
423  so->hashso_buc_split = false;
424 }
struct ScanKeyData * keyData
Definition: relscan.h:122

References _hash_dropscanbuf(), _hash_kill_items(), HashScanOpaqueData::currPos, HashScanPosInvalidate, HashScanPosIsValid, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, if(), IndexScanDescData::indexRelation, IndexScanDescData::keyData, IndexScanDescData::numberOfKeys, HashScanOpaqueData::numKilled, and IndexScanDescData::opaque.

Referenced by hashhandler().

◆ hashvacuumcleanup()

IndexBulkDeleteResult* hashvacuumcleanup ( IndexVacuumInfo info,
IndexBulkDeleteResult stats 
)

Definition at line 647 of file hash.c.

648 {
649  Relation rel = info->index;
650  BlockNumber num_pages;
651 
652  /* If hashbulkdelete wasn't called, return NULL signifying no change */
653  /* Note: this covers the analyze_only case too */
654  if (stats == NULL)
655  return NULL;
656 
657  /* update statistics */
658  num_pages = RelationGetNumberOfBlocks(rel);
659  stats->num_pages = num_pages;
660 
661  return stats;
662 }
BlockNumber num_pages
Definition: genam.h:77

References IndexVacuumInfo::index, IndexBulkDeleteResult::num_pages, and RelationGetNumberOfBlocks.

Referenced by hashhandler().