PostgreSQL Source Code  git master
hash.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "access/relscan.h"
#include "access/tableam.h"
#include "catalog/index.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "optimizer/plancat.h"
#include "pgstat.h"
#include "utils/builtins.h"
#include "utils/index_selfuncs.h"
#include "utils/rel.h"
Include dependency graph for hash.c:

Go to the source code of this file.

Data Structures

struct  HashBuildState
 

Functions

static void hashbuildCallback (Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
 
Datum hashhandler (PG_FUNCTION_ARGS)
 
IndexBuildResulthashbuild (Relation heap, Relation index, IndexInfo *indexInfo)
 
void hashbuildempty (Relation index)
 
bool hashinsert (Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
 
bool hashgettuple (IndexScanDesc scan, ScanDirection dir)
 
int64 hashgetbitmap (IndexScanDesc scan, TIDBitmap *tbm)
 
IndexScanDesc hashbeginscan (Relation rel, int nkeys, int norderbys)
 
void hashrescan (IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
 
void hashendscan (IndexScanDesc scan)
 
IndexBulkDeleteResulthashbulkdelete (IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
 
IndexBulkDeleteResulthashvacuumcleanup (IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 
void hashbucketcleanup (Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
 

Function Documentation

◆ hashbeginscan()

IndexScanDesc hashbeginscan ( Relation  rel,
int  nkeys,
int  norderbys 
)

Definition at line 361 of file hash.c.

References Assert, HashScanOpaqueData::currPos, HashScanPosInvalidate, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_bucket_buf, HashScanOpaqueData::hashso_split_bucket_buf, InvalidBuffer, HashScanOpaqueData::killedItems, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, palloc(), and RelationGetIndexScan().

Referenced by hashhandler().

362 {
363  IndexScanDesc scan;
364  HashScanOpaque so;
365 
366  /* no order by operators allowed */
367  Assert(norderbys == 0);
368 
369  scan = RelationGetIndexScan(rel, nkeys, norderbys);
370 
371  so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
375 
376  so->hashso_buc_populated = false;
377  so->hashso_buc_split = false;
378 
379  so->killedItems = NULL;
380  so->numKilled = 0;
381 
382  scan->opaque = so;
383 
384  return scan;
385 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:190
#define InvalidBuffer
Definition: buf.h:25
int * killedItems
Definition: hash.h:180
Buffer hashso_bucket_buf
Definition: hash.h:162
#define HashScanPosInvalidate(scanpos)
Definition: hash.h:142
bool hashso_buc_populated
Definition: hash.h:172
#define Assert(condition)
Definition: c.h:738
HashScanPosData currPos
Definition: hash.h:187
bool hashso_buc_split
Definition: hash.h:178
void * palloc(Size size)
Definition: mcxt.c:949
Buffer hashso_split_bucket_buf
Definition: hash.h:169
IndexScanDesc RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
Definition: genam.c:80

◆ hashbucketcleanup()

void hashbucketcleanup ( Relation  rel,
Bucket  cur_bucket,
Buffer  bucket_buf,
BlockNumber  bucket_blkno,
BufferAccessStrategy  bstrategy,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask,
double *  tuples_removed,
double *  num_index_tuples,
bool  split_cleanup,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 681 of file hash.c.

References _hash_get_indextuple_hashkey(), _hash_get_newbucket_from_oldbucket(), _hash_getbuf_with_strategy(), _hash_hashkey2bucket(), _hash_relbuf(), _hash_squeezebucket(), Assert, DataPageDeleteStack::blkno, BlockNumberIsValid, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, callback(), xl_hash_delete::clear_dead_marking, END_CRIT_SECTION, FirstOffsetNumber, H_HAS_DEAD_TUPLES, HASH_WRITE, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, InvalidBucket, xl_hash_delete::is_primary_bucket_page, IsBufferCleanupOK(), LH_BUCKET_NEEDS_SPLIT_CLEANUP, LH_OVERFLOW_PAGE, LH_PAGE_HAS_DEAD_TUPLES, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageGetSpecialPointer, PageIndexMultiDelete(), PageSetLSN, PG_USED_FOR_ASSERTS_ONLY, REGBUF_NO_IMAGE, REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashDelete, START_CRIT_SECTION, IndexTupleData::t_tid, vacuum_delay_point(), XLOG_HASH_DELETE, XLOG_HASH_SPLIT_CLEANUP, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_expandtable(), _hash_splitbucket(), and hashbulkdelete().

687 {
688  BlockNumber blkno;
689  Buffer buf;
691  bool bucket_dirty = false;
692 
693  blkno = bucket_blkno;
694  buf = bucket_buf;
695 
696  if (split_cleanup)
697  new_bucket = _hash_get_newbucket_from_oldbucket(rel, cur_bucket,
698  lowmask, maxbucket);
699 
700  /* Scan each page in bucket */
701  for (;;)
702  {
703  HashPageOpaque opaque;
704  OffsetNumber offno;
705  OffsetNumber maxoffno;
706  Buffer next_buf;
707  Page page;
708  OffsetNumber deletable[MaxOffsetNumber];
709  int ndeletable = 0;
710  bool retain_pin = false;
711  bool clear_dead_marking = false;
712 
714 
715  page = BufferGetPage(buf);
716  opaque = (HashPageOpaque) PageGetSpecialPointer(page);
717 
718  /* Scan each tuple in page */
719  maxoffno = PageGetMaxOffsetNumber(page);
720  for (offno = FirstOffsetNumber;
721  offno <= maxoffno;
722  offno = OffsetNumberNext(offno))
723  {
724  ItemPointer htup;
725  IndexTuple itup;
726  Bucket bucket;
727  bool kill_tuple = false;
728 
729  itup = (IndexTuple) PageGetItem(page,
730  PageGetItemId(page, offno));
731  htup = &(itup->t_tid);
732 
733  /*
734  * To remove the dead tuples, we strictly want to rely on results
735  * of callback function. refer btvacuumpage for detailed reason.
736  */
737  if (callback && callback(htup, callback_state))
738  {
739  kill_tuple = true;
740  if (tuples_removed)
741  *tuples_removed += 1;
742  }
743  else if (split_cleanup)
744  {
745  /* delete the tuples that are moved by split. */
747  maxbucket,
748  highmask,
749  lowmask);
750  /* mark the item for deletion */
751  if (bucket != cur_bucket)
752  {
753  /*
754  * We expect tuples to either belong to current bucket or
755  * new_bucket. This is ensured because we don't allow
756  * further splits from bucket that contains garbage. See
757  * comments in _hash_expandtable.
758  */
759  Assert(bucket == new_bucket);
760  kill_tuple = true;
761  }
762  }
763 
764  if (kill_tuple)
765  {
766  /* mark the item for deletion */
767  deletable[ndeletable++] = offno;
768  }
769  else
770  {
771  /* we're keeping it, so count it */
772  if (num_index_tuples)
773  *num_index_tuples += 1;
774  }
775  }
776 
777  /* retain the pin on primary bucket page till end of bucket scan */
778  if (blkno == bucket_blkno)
779  retain_pin = true;
780  else
781  retain_pin = false;
782 
783  blkno = opaque->hasho_nextblkno;
784 
785  /*
786  * Apply deletions, advance to next page and write page if needed.
787  */
788  if (ndeletable > 0)
789  {
790  /* No ereport(ERROR) until changes are logged */
792 
793  PageIndexMultiDelete(page, deletable, ndeletable);
794  bucket_dirty = true;
795 
796  /*
797  * Let us mark the page as clean if vacuum removes the DEAD tuples
798  * from an index page. We do this by clearing
799  * LH_PAGE_HAS_DEAD_TUPLES flag.
800  */
801  if (tuples_removed && *tuples_removed > 0 &&
802  H_HAS_DEAD_TUPLES(opaque))
803  {
805  clear_dead_marking = true;
806  }
807 
808  MarkBufferDirty(buf);
809 
810  /* XLOG stuff */
811  if (RelationNeedsWAL(rel))
812  {
813  xl_hash_delete xlrec;
814  XLogRecPtr recptr;
815 
816  xlrec.clear_dead_marking = clear_dead_marking;
817  xlrec.is_primary_bucket_page = (buf == bucket_buf) ? true : false;
818 
819  XLogBeginInsert();
820  XLogRegisterData((char *) &xlrec, SizeOfHashDelete);
821 
822  /*
823  * bucket buffer needs to be registered to ensure that we can
824  * acquire a cleanup lock on it during replay.
825  */
826  if (!xlrec.is_primary_bucket_page)
828 
830  XLogRegisterBufData(1, (char *) deletable,
831  ndeletable * sizeof(OffsetNumber));
832 
833  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_DELETE);
834  PageSetLSN(BufferGetPage(buf), recptr);
835  }
836 
838  }
839 
840  /* bail out if there are no more pages to scan. */
841  if (!BlockNumberIsValid(blkno))
842  break;
843 
844  next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
846  bstrategy);
847 
848  /*
849  * release the lock on previous page after acquiring the lock on next
850  * page
851  */
852  if (retain_pin)
854  else
855  _hash_relbuf(rel, buf);
856 
857  buf = next_buf;
858  }
859 
860  /*
861  * lock the bucket page to clear the garbage flag and squeeze the bucket.
862  * if the current buffer is same as bucket buffer, then we already have
863  * lock on bucket page.
864  */
865  if (buf != bucket_buf)
866  {
867  _hash_relbuf(rel, buf);
868  LockBuffer(bucket_buf, BUFFER_LOCK_EXCLUSIVE);
869  }
870 
871  /*
872  * Clear the garbage flag from bucket after deleting the tuples that are
873  * moved by split. We purposefully clear the flag before squeeze bucket,
874  * so that after restart, vacuum shouldn't again try to delete the moved
875  * by split tuples.
876  */
877  if (split_cleanup)
878  {
879  HashPageOpaque bucket_opaque;
880  Page page;
881 
882  page = BufferGetPage(bucket_buf);
883  bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
884 
885  /* No ereport(ERROR) until changes are logged */
887 
888  bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
889  MarkBufferDirty(bucket_buf);
890 
891  /* XLOG stuff */
892  if (RelationNeedsWAL(rel))
893  {
894  XLogRecPtr recptr;
895 
896  XLogBeginInsert();
897  XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD);
898 
899  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_CLEANUP);
900  PageSetLSN(page, recptr);
901  }
902 
904  }
905 
906  /*
907  * If we have deleted anything, try to compact free space. For squeezing
908  * the bucket, we must have a cleanup lock, else it can impact the
909  * ordering of tuples for a scan that has started before it.
910  */
911  if (bucket_dirty && IsBufferCleanupOK(bucket_buf))
912  _hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
913  bstrategy);
914  else
915  LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK);
916 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:362
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:126
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1468
#define SizeOfHashDelete
Definition: hash_xlog.h:192
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:214
#define MaxOffsetNumber
Definition: off.h:28
Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
Definition: hashpage.c:238
ItemPointerData t_tid
Definition: itup.h:37
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define XLOG_HASH_SPLIT_CLEANUP
Definition: hash_xlog.h:40
bool clear_dead_marking
Definition: hash_xlog.h:186
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:60
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
uint32 Bucket
Definition: hash.h:35
#define InvalidBucket
Definition: hash.h:37
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:292
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
bool is_primary_bucket_page
Definition: hash_xlog.h:188
static char * buf
Definition: pg_test_fsync.c:67
#define HASH_WRITE
Definition: hash.h:338
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
#define REGBUF_STANDARD
Definition: xloginsert.h:35
Bucket _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, uint32 lowmask, uint32 maxbucket)
Definition: hashutil.c:495
#define XLOG_HASH_DELETE
Definition: hash_xlog.h:39
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:3972
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:324
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:416
#define LH_OVERFLOW_PAGE
Definition: hash.h:54
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:265
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:828
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:32
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:86
void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, Buffer bucket_buf, BufferAccessStrategy bstrategy)
Definition: hashovfl.c:805
#define RelationNeedsWAL(relation)
Definition: rel.h:562
uint16 hasho_flag
Definition: hash.h:82
BlockNumber hasho_nextblkno
Definition: hash.h:80
void vacuum_delay_point(void)
Definition: vacuum.c:1995
void XLogBeginInsert(void)
Definition: xloginsert.c:121
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define H_HAS_DEAD_TUPLES(opaque)
Definition: hash.h:91
#define LH_PAGE_HAS_DEAD_TUPLES
Definition: hash.h:61
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:121

◆ hashbuild()

IndexBuildResult* hashbuild ( Relation  heap,
Relation  index,
IndexInfo indexInfo 
)

Definition at line 110 of file hash.c.

References _h_indexbuild(), _h_spooldestroy(), _h_spoolinit(), _hash_init(), elog, ERROR, estimate_rel_size(), hashbuildCallback(), IndexBuildResult::heap_tuples, HashBuildState::heapRel, IndexBuildResult::index_tuples, HashBuildState::indtuples, MAIN_FORKNUM, maintenance_work_mem, Min, NBuffers, NLocBuffer, palloc(), pgstat_progress_update_param(), PROGRESS_CREATEIDX_TUPLES_TOTAL, RelationData::rd_rel, RelationGetNumberOfBlocks, RelationGetRelationName, HashBuildState::spool, and table_index_build_scan().

Referenced by hashhandler().

111 {
112  IndexBuildResult *result;
113  BlockNumber relpages;
114  double reltuples;
115  double allvisfrac;
116  uint32 num_buckets;
117  long sort_threshold;
118  HashBuildState buildstate;
119 
120  /*
121  * We expect to be called exactly once for any index relation. If that's
122  * not the case, big trouble's what we have.
123  */
124  if (RelationGetNumberOfBlocks(index) != 0)
125  elog(ERROR, "index \"%s\" already contains data",
126  RelationGetRelationName(index));
127 
128  /* Estimate the number of rows currently present in the table */
129  estimate_rel_size(heap, NULL, &relpages, &reltuples, &allvisfrac);
130 
131  /* Initialize the hash index metadata page and initial buckets */
132  num_buckets = _hash_init(index, reltuples, MAIN_FORKNUM);
133 
134  /*
135  * If we just insert the tuples into the index in scan order, then
136  * (assuming their hash codes are pretty random) there will be no locality
137  * of access to the index, and if the index is bigger than available RAM
138  * then we'll thrash horribly. To prevent that scenario, we can sort the
139  * tuples by (expected) bucket number. However, such a sort is useless
140  * overhead when the index does fit in RAM. We choose to sort if the
141  * initial index size exceeds maintenance_work_mem, or the number of
142  * buffers usable for the index, whichever is less. (Limiting by the
143  * number of buffers should reduce thrashing between PG buffers and kernel
144  * buffers, which seems useful even if no physical I/O results. Limiting
145  * by maintenance_work_mem is useful to allow easy testing of the sort
146  * code path, and may be useful to DBAs as an additional control knob.)
147  *
148  * NOTE: this test will need adjustment if a bucket is ever different from
149  * one page. Also, "initial index size" accounting does not include the
150  * metapage, nor the first bitmap page.
151  */
152  sort_threshold = (maintenance_work_mem * 1024L) / BLCKSZ;
153  if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
154  sort_threshold = Min(sort_threshold, NBuffers);
155  else
156  sort_threshold = Min(sort_threshold, NLocBuffer);
157 
158  if (num_buckets >= (uint32) sort_threshold)
159  buildstate.spool = _h_spoolinit(heap, index, num_buckets);
160  else
161  buildstate.spool = NULL;
162 
163  /* prepare to build the index */
164  buildstate.indtuples = 0;
165  buildstate.heapRel = heap;
166 
167  /* do the heap scan */
168  reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
170  (void *) &buildstate, NULL);
172  buildstate.indtuples);
173 
174  if (buildstate.spool)
175  {
176  /* sort the tuples and insert them into the index */
177  _h_indexbuild(buildstate.spool, buildstate.heapRel);
178  _h_spooldestroy(buildstate.spool);
179  }
180 
181  /*
182  * Return statistics
183  */
184  result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
185 
186  result->heap_tuples = reltuples;
187  result->index_tuples = buildstate.indtuples;
188 
189  return result;
190 }
void pgstat_progress_update_param(int index, int64 val)
Definition: pgstat.c:3235
#define Min(x, y)
Definition: c.h:920
double indtuples
Definition: hash.c:39
#define PROGRESS_CREATEIDX_TUPLES_TOTAL
Definition: progress.h:84
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:109
#define ERROR
Definition: elog.h:43
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1524
int NLocBuffer
Definition: localbuf.c:41
void _h_indexbuild(HSpool *hspool, Relation heapRel)
Definition: hashsort.c:119
#define RelationGetRelationName(relation)
Definition: rel.h:490
unsigned int uint32
Definition: c.h:367
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:949
HSpool * spool
Definition: hash.c:38
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:211
int maintenance_work_mem
Definition: globals.c:122
Relation heapRel
Definition: hash.c:40
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition: hashpage.c:326
void _h_spooldestroy(HSpool *hspool)
Definition: hashsort.c:98
void * palloc(Size size)
Definition: mcxt.c:949
#define elog(elevel,...)
Definition: elog.h:214
int NBuffers
Definition: globals.c:131
static void hashbuildCallback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: hash.c:205
HSpool * _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
Definition: hashsort.c:59
double index_tuples
Definition: genam.h:33
double heap_tuples
Definition: genam.h:32

◆ hashbuildCallback()

static void hashbuildCallback ( Relation  index,
ItemPointer  tid,
Datum values,
bool isnull,
bool  tupleIsAlive,
void *  state 
)
static

Definition at line 205 of file hash.c.

References _h_spool(), _hash_convert_tuple(), _hash_doinsert(), HashBuildState::heapRel, index_form_tuple(), HashBuildState::indtuples, pfree(), RelationGetDescr, HashBuildState::spool, and IndexTupleData::t_tid.

Referenced by hashbuild().

211 {
212  HashBuildState *buildstate = (HashBuildState *) state;
213  Datum index_values[1];
214  bool index_isnull[1];
215  IndexTuple itup;
216 
217  /* convert data to a hash key; on failure, do not insert anything */
218  if (!_hash_convert_tuple(index,
219  values, isnull,
220  index_values, index_isnull))
221  return;
222 
223  /* Either spool the tuple for sorting, or just put it into the index */
224  if (buildstate->spool)
225  _h_spool(buildstate->spool, tid, index_values, index_isnull);
226  else
227  {
228  /* form an index tuple and point it at the heap tuple */
229  itup = index_form_tuple(RelationGetDescr(index),
230  index_values, index_isnull);
231  itup->t_tid = *tid;
232  _hash_doinsert(index, itup, buildstate->heapRel);
233  pfree(itup);
234  }
235 
236  buildstate->indtuples += 1;
237 }
void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
Definition: hashinsert.c:36
#define RelationGetDescr(relation)
Definition: rel.h:482
ItemPointerData t_tid
Definition: itup.h:37
double indtuples
Definition: hash.c:39
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:47
void pfree(void *pointer)
Definition: mcxt.c:1056
bool _hash_convert_tuple(Relation index, Datum *user_values, bool *user_isnull, Datum *index_values, bool *index_isnull)
Definition: hashutil.c:319
void _h_spool(HSpool *hspool, ItemPointer self, Datum *values, bool *isnull)
Definition: hashsort.c:108
HSpool * spool
Definition: hash.c:38
uintptr_t Datum
Definition: postgres.h:367
Relation heapRel
Definition: hash.c:40
Definition: regguts.h:298
static Datum values[MAXATTR]
Definition: bootstrap.c:167

◆ hashbuildempty()

void hashbuildempty ( Relation  index)

Definition at line 196 of file hash.c.

References _hash_init(), and INIT_FORKNUM.

Referenced by hashhandler().

197 {
198  _hash_init(index, 0, INIT_FORKNUM);
199 }
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition: hashpage.c:326

◆ hashbulkdelete()

IndexBulkDeleteResult* hashbulkdelete ( IndexVacuumInfo info,
IndexBulkDeleteResult stats,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 456 of file hash.c.

References _hash_checkpage(), _hash_dropbuf(), _hash_getbuf(), _hash_getcachedmetap(), _hash_relbuf(), Assert, DataPageDeleteStack::blkno, BUCKET_TO_BLKNO, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsInvalid, END_CRIT_SECTION, IndexBulkDeleteResult::estimated_count, H_BUCKET_BEING_SPLIT, H_NEEDS_SPLIT_CLEANUP, HASH_METAPAGE, HASH_NOLOCK, hashbucketcleanup(), HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ntuples, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, IndexVacuumInfo::index, InvalidBlockNumber, InvalidBuffer, LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, MarkBufferDirty(), xl_hash_update_meta_page::ntuples, IndexBulkDeleteResult::num_index_tuples, PageGetSpecialPointer, PageSetLSN, palloc0(), RBM_NORMAL, ReadBufferExtended(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashUpdateMetaPage, START_CRIT_SECTION, IndexVacuumInfo::strategy, IndexBulkDeleteResult::tuples_removed, XLOG_HASH_UPDATE_META_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by hashhandler().

458 {
459  Relation rel = info->index;
460  double tuples_removed;
461  double num_index_tuples;
462  double orig_ntuples;
463  Bucket orig_maxbucket;
464  Bucket cur_maxbucket;
465  Bucket cur_bucket;
466  Buffer metabuf = InvalidBuffer;
467  HashMetaPage metap;
468  HashMetaPage cachedmetap;
469 
470  tuples_removed = 0;
471  num_index_tuples = 0;
472 
473  /*
474  * We need a copy of the metapage so that we can use its hashm_spares[]
475  * values to compute bucket page addresses, but a cached copy should be
476  * good enough. (If not, we'll detect that further down and refresh the
477  * cache as necessary.)
478  */
479  cachedmetap = _hash_getcachedmetap(rel, &metabuf, false);
480  Assert(cachedmetap != NULL);
481 
482  orig_maxbucket = cachedmetap->hashm_maxbucket;
483  orig_ntuples = cachedmetap->hashm_ntuples;
484 
485  /* Scan the buckets that we know exist */
486  cur_bucket = 0;
487  cur_maxbucket = orig_maxbucket;
488 
489 loop_top:
490  while (cur_bucket <= cur_maxbucket)
491  {
492  BlockNumber bucket_blkno;
493  BlockNumber blkno;
494  Buffer bucket_buf;
495  Buffer buf;
496  HashPageOpaque bucket_opaque;
497  Page page;
498  bool split_cleanup = false;
499 
500  /* Get address of bucket's start page */
501  bucket_blkno = BUCKET_TO_BLKNO(cachedmetap, cur_bucket);
502 
503  blkno = bucket_blkno;
504 
505  /*
506  * We need to acquire a cleanup lock on the primary bucket page to out
507  * wait concurrent scans before deleting the dead tuples.
508  */
509  buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy);
511  _hash_checkpage(rel, buf, LH_BUCKET_PAGE);
512 
513  page = BufferGetPage(buf);
514  bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
515 
516  /*
517  * If the bucket contains tuples that are moved by split, then we need
518  * to delete such tuples. We can't delete such tuples if the split
519  * operation on bucket is not finished as those are needed by scans.
520  */
521  if (!H_BUCKET_BEING_SPLIT(bucket_opaque) &&
522  H_NEEDS_SPLIT_CLEANUP(bucket_opaque))
523  {
524  split_cleanup = true;
525 
526  /*
527  * This bucket might have been split since we last held a lock on
528  * the metapage. If so, hashm_maxbucket, hashm_highmask and
529  * hashm_lowmask might be old enough to cause us to fail to remove
530  * tuples left behind by the most recent split. To prevent that,
531  * now that the primary page of the target bucket has been locked
532  * (and thus can't be further split), check whether we need to
533  * update our cached metapage data.
534  */
535  Assert(bucket_opaque->hasho_prevblkno != InvalidBlockNumber);
536  if (bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
537  {
538  cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
539  Assert(cachedmetap != NULL);
540  }
541  }
542 
543  bucket_buf = buf;
544 
545  hashbucketcleanup(rel, cur_bucket, bucket_buf, blkno, info->strategy,
546  cachedmetap->hashm_maxbucket,
547  cachedmetap->hashm_highmask,
548  cachedmetap->hashm_lowmask, &tuples_removed,
549  &num_index_tuples, split_cleanup,
550  callback, callback_state);
551 
552  _hash_dropbuf(rel, bucket_buf);
553 
554  /* Advance to next bucket */
555  cur_bucket++;
556  }
557 
558  if (BufferIsInvalid(metabuf))
560 
561  /* Write-lock metapage and check for split since we started */
563  metap = HashPageGetMeta(BufferGetPage(metabuf));
564 
565  if (cur_maxbucket != metap->hashm_maxbucket)
566  {
567  /* There's been a split, so process the additional bucket(s) */
568  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
569  cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
570  Assert(cachedmetap != NULL);
571  cur_maxbucket = cachedmetap->hashm_maxbucket;
572  goto loop_top;
573  }
574 
575  /* Okay, we're really done. Update tuple count in metapage. */
577 
578  if (orig_maxbucket == metap->hashm_maxbucket &&
579  orig_ntuples == metap->hashm_ntuples)
580  {
581  /*
582  * No one has split or inserted anything since start of scan, so
583  * believe our count as gospel.
584  */
585  metap->hashm_ntuples = num_index_tuples;
586  }
587  else
588  {
589  /*
590  * Otherwise, our count is untrustworthy since we may have
591  * double-scanned tuples in split buckets. Proceed by dead-reckoning.
592  * (Note: we still return estimated_count = false, because using this
593  * count is better than not updating reltuples at all.)
594  */
595  if (metap->hashm_ntuples > tuples_removed)
596  metap->hashm_ntuples -= tuples_removed;
597  else
598  metap->hashm_ntuples = 0;
599  num_index_tuples = metap->hashm_ntuples;
600  }
601 
602  MarkBufferDirty(metabuf);
603 
604  /* XLOG stuff */
605  if (RelationNeedsWAL(rel))
606  {
608  XLogRecPtr recptr;
609 
610  xlrec.ntuples = metap->hashm_ntuples;
611 
612  XLogBeginInsert();
613  XLogRegisterData((char *) &xlrec, SizeOfHashUpdateMetaPage);
614 
615  XLogRegisterBuffer(0, metabuf, REGBUF_STANDARD);
616 
617  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_UPDATE_META_PAGE);
618  PageSetLSN(BufferGetPage(metabuf), recptr);
619  }
620 
622 
623  _hash_relbuf(rel, metabuf);
624 
625  /* return statistics */
626  if (stats == NULL)
628  stats->estimated_count = false;
629  stats->num_index_tuples = num_index_tuples;
630  stats->tuples_removed += tuples_removed;
631  /* hashvacuumcleanup will fill in num_pages */
632 
633  return stats;
634 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3779
double tuples_removed
Definition: genam.h:78
#define LH_META_PAGE
Definition: hash.h:57
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1468
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:214
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:652
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
BufferAccessStrategy strategy
Definition: genam.h:52
uint32 hashm_highmask
Definition: hash.h:253
#define InvalidBuffer
Definition: buf.h:25
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
Relation index
Definition: genam.h:46
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:681
uint32 BlockNumber
Definition: block.h:31
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:276
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:69
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
uint32 hashm_lowmask
Definition: hash.h:254
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:39
uint32 Bucket
Definition: hash.h:35
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:88
BlockNumber hasho_prevblkno
Definition: hash.h:79
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
#define BufferIsInvalid(buffer)
Definition: buf.h:31
static char * buf
Definition: pg_test_fsync.c:67
#define HASH_NOLOCK
Definition: hash.h:339
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define SizeOfHashUpdateMetaPage
Definition: hash_xlog.h:206
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:324
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:416
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:211
void * palloc0(Size size)
Definition: mcxt.c:980
#define HASH_METAPAGE
Definition: hash.h:196
double hashm_ntuples
Definition: hash.h:246
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1497
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:265
#define LH_BUCKET_PAGE
Definition: hash.h:55
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:89
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
#define PageGetSpecialPointer(page)
Definition: bufpage.h:326
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:86
#define RelationNeedsWAL(relation)
Definition: rel.h:562
uint32 hashm_maxbucket
Definition: hash.h:252
#define HashPageGetMeta(page)
Definition: hash.h:321
void XLogBeginInsert(void)
Definition: xloginsert.c:121
#define XLOG_HASH_UPDATE_META_PAGE
Definition: hash_xlog.h:43
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
double num_index_tuples
Definition: genam.h:77
int Buffer
Definition: buf.h:23
bool estimated_count
Definition: genam.h:76
Pointer Page
Definition: bufpage.h:78

◆ hashendscan()

void hashendscan ( IndexScanDesc  scan)

Definition at line 425 of file hash.c.

References _hash_dropscanbuf(), _hash_kill_items(), HashScanOpaqueData::currPos, HashScanPosIsValid, IndexScanDescData::indexRelation, HashScanOpaqueData::killedItems, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, and pfree().

Referenced by hashhandler().

426 {
427  HashScanOpaque so = (HashScanOpaque) scan->opaque;
428  Relation rel = scan->indexRelation;
429 
430  if (HashScanPosIsValid(so->currPos))
431  {
432  /* Before leaving current page, deal with any killed items */
433  if (so->numKilled > 0)
434  _hash_kill_items(scan);
435  }
436 
437  _hash_dropscanbuf(rel, so);
438 
439  if (so->killedItems != NULL)
440  pfree(so->killedItems);
441  pfree(so);
442  scan->opaque = NULL;
443 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:190
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:288
#define HashScanPosIsValid(scanpos)
Definition: hash.h:135
Relation indexRelation
Definition: relscan.h:103
int * killedItems
Definition: hash.h:180
void pfree(void *pointer)
Definition: mcxt.c:1056
void _hash_kill_items(IndexScanDesc scan)
Definition: hashutil.c:537
HashScanPosData currPos
Definition: hash.h:187

◆ hashgetbitmap()

int64 hashgetbitmap ( IndexScanDesc  scan,
TIDBitmap tbm 
)

Definition at line 329 of file hash.c.

References _hash_first(), _hash_next(), HashScanOpaqueData::currPos, ForwardScanDirection, HashScanPosData::itemIndex, HashScanPosData::items, IndexScanDescData::opaque, and tbm_add_tuples().

Referenced by hashhandler().

330 {
331  HashScanOpaque so = (HashScanOpaque) scan->opaque;
332  bool res;
333  int64 ntids = 0;
334  HashScanPosItem *currItem;
335 
336  res = _hash_first(scan, ForwardScanDirection);
337 
338  while (res)
339  {
340  currItem = &so->currPos.items[so->currPos.itemIndex];
341 
342  /*
343  * _hash_first and _hash_next handle eliminate dead index entries
344  * whenever scan->ignore_killed_tuples is true. Therefore, there's
345  * nothing to do here except add the results to the TIDBitmap.
346  */
347  tbm_add_tuples(tbm, &(currItem->heapTid), 1, true);
348  ntids++;
349 
350  res = _hash_next(scan, ForwardScanDirection);
351  }
352 
353  return ntids;
354 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:190
void tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids, bool recheck)
Definition: tidbitmap.c:376
bool _hash_first(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:292
bool _hash_next(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:48
HashScanPosData currPos
Definition: hash.h:187
int itemIndex
Definition: hash.h:123
HashScanPosItem items[MaxIndexTuplesPerPage]
Definition: hash.h:125

◆ hashgettuple()

bool hashgettuple ( IndexScanDesc  scan,
ScanDirection  dir 
)

Definition at line 277 of file hash.c.

References _hash_first(), _hash_next(), HashScanOpaqueData::currPos, HashScanPosIsValid, HashScanPosData::itemIndex, IndexScanDescData::kill_prior_tuple, HashScanOpaqueData::killedItems, MaxIndexTuplesPerPage, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, palloc(), and IndexScanDescData::xs_recheck.

Referenced by hashhandler().

278 {
279  HashScanOpaque so = (HashScanOpaque) scan->opaque;
280  bool res;
281 
282  /* Hash indexes are always lossy since we store only the hash code */
283  scan->xs_recheck = true;
284 
285  /*
286  * If we've already initialized this scan, we can just advance it in the
287  * appropriate direction. If we haven't done so yet, we call a routine to
288  * get the first item in the scan.
289  */
290  if (!HashScanPosIsValid(so->currPos))
291  res = _hash_first(scan, dir);
292  else
293  {
294  /*
295  * Check to see if we should kill the previously-fetched tuple.
296  */
297  if (scan->kill_prior_tuple)
298  {
299  /*
300  * Yes, so remember it for later. (We'll deal with all such tuples
301  * at once right after leaving the index page or at end of scan.)
302  * In case if caller reverses the indexscan direction it is quite
303  * possible that the same item might get entered multiple times.
304  * But, we don't detect that; instead, we just forget any excess
305  * entries.
306  */
307  if (so->killedItems == NULL)
308  so->killedItems = (int *)
309  palloc(MaxIndexTuplesPerPage * sizeof(int));
310 
312  so->killedItems[so->numKilled++] = so->currPos.itemIndex;
313  }
314 
315  /*
316  * Now continue the scan.
317  */
318  res = _hash_next(scan, dir);
319  }
320 
321  return res;
322 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:190
#define HashScanPosIsValid(scanpos)
Definition: hash.h:135
int * killedItems
Definition: hash.h:180
bool _hash_first(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:292
bool _hash_next(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:48
HashScanPosData currPos
Definition: hash.h:187
#define MaxIndexTuplesPerPage
Definition: itup.h:145
void * palloc(Size size)
Definition: mcxt.c:949
bool kill_prior_tuple
Definition: relscan.h:113
int itemIndex
Definition: hash.h:123

◆ hashhandler()

Datum hashhandler ( PG_FUNCTION_ARGS  )

Definition at line 56 of file hash.c.

References IndexAmRoutine::ambeginscan, IndexAmRoutine::ambuild, IndexAmRoutine::ambuildempty, IndexAmRoutine::ambuildphasename, IndexAmRoutine::ambulkdelete, IndexAmRoutine::amcanbackward, IndexAmRoutine::amcaninclude, IndexAmRoutine::amcanmulticol, IndexAmRoutine::amcanorder, IndexAmRoutine::amcanorderbyop, IndexAmRoutine::amcanparallel, IndexAmRoutine::amcanreturn, IndexAmRoutine::amcanunique, IndexAmRoutine::amclusterable, IndexAmRoutine::amcostestimate, IndexAmRoutine::amendscan, IndexAmRoutine::amestimateparallelscan, IndexAmRoutine::amgetbitmap, IndexAmRoutine::amgettuple, IndexAmRoutine::aminitparallelscan, IndexAmRoutine::aminsert, IndexAmRoutine::amkeytype, IndexAmRoutine::ammarkpos, IndexAmRoutine::amoptionalkey, IndexAmRoutine::amoptions, IndexAmRoutine::amoptsprocnum, IndexAmRoutine::amparallelrescan, IndexAmRoutine::amparallelvacuumoptions, IndexAmRoutine::ampredlocks, IndexAmRoutine::amproperty, IndexAmRoutine::amrescan, IndexAmRoutine::amrestrpos, IndexAmRoutine::amsearcharray, IndexAmRoutine::amsearchnulls, IndexAmRoutine::amstorage, IndexAmRoutine::amstrategies, IndexAmRoutine::amsupport, IndexAmRoutine::amusemaintenanceworkmem, IndexAmRoutine::amvacuumcleanup, IndexAmRoutine::amvalidate, hashbeginscan(), hashbuild(), hashbuildempty(), hashbulkdelete(), hashcostestimate(), hashendscan(), hashgetbitmap(), hashgettuple(), hashinsert(), HASHNProcs, hashoptions(), HASHOPTIONS_PROC, hashrescan(), hashvacuumcleanup(), hashvalidate(), HTMaxStrategyNumber, makeNode, PG_RETURN_POINTER, and VACUUM_OPTION_PARALLEL_BULKDEL.

57 {
59 
60  amroutine->amstrategies = HTMaxStrategyNumber;
61  amroutine->amsupport = HASHNProcs;
62  amroutine->amoptsprocnum = HASHOPTIONS_PROC;
63  amroutine->amcanorder = false;
64  amroutine->amcanorderbyop = false;
65  amroutine->amcanbackward = true;
66  amroutine->amcanunique = false;
67  amroutine->amcanmulticol = false;
68  amroutine->amoptionalkey = false;
69  amroutine->amsearcharray = false;
70  amroutine->amsearchnulls = false;
71  amroutine->amstorage = false;
72  amroutine->amclusterable = false;
73  amroutine->ampredlocks = true;
74  amroutine->amcanparallel = false;
75  amroutine->amcaninclude = false;
76  amroutine->amusemaintenanceworkmem = false;
77  amroutine->amparallelvacuumoptions =
79  amroutine->amkeytype = INT4OID;
80 
81  amroutine->ambuild = hashbuild;
82  amroutine->ambuildempty = hashbuildempty;
83  amroutine->aminsert = hashinsert;
84  amroutine->ambulkdelete = hashbulkdelete;
86  amroutine->amcanreturn = NULL;
87  amroutine->amcostestimate = hashcostestimate;
88  amroutine->amoptions = hashoptions;
89  amroutine->amproperty = NULL;
90  amroutine->ambuildphasename = NULL;
91  amroutine->amvalidate = hashvalidate;
92  amroutine->ambeginscan = hashbeginscan;
93  amroutine->amrescan = hashrescan;
94  amroutine->amgettuple = hashgettuple;
95  amroutine->amgetbitmap = hashgetbitmap;
96  amroutine->amendscan = hashendscan;
97  amroutine->ammarkpos = NULL;
98  amroutine->amrestrpos = NULL;
99  amroutine->amestimateparallelscan = NULL;
100  amroutine->aminitparallelscan = NULL;
101  amroutine->amparallelrescan = NULL;
102 
103  PG_RETURN_POINTER(amroutine);
104 }
ambeginscan_function ambeginscan
Definition: amapi.h:227
uint8 amparallelvacuumoptions
Definition: amapi.h:205
bytea * hashoptions(Datum reloptions, bool validate)
Definition: hashutil.c:276
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:360
ambulkdelete_function ambulkdelete
Definition: amapi.h:219
bool hashgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: hash.c:277
bool amcanmulticol
Definition: amapi.h:185
uint16 amsupport
Definition: amapi.h:173
#define HTMaxStrategyNumber
Definition: stratnum.h:43
#define HASHNProcs
Definition: hash.h:356
amgettuple_function amgettuple
Definition: amapi.h:229
bool amcanorderbyop
Definition: amapi.h:179
IndexBulkDeleteResult * hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:456
amproperty_function amproperty
Definition: amapi.h:224
amparallelrescan_function amparallelrescan
Definition: amapi.h:238
void hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: selfuncs.c:6438
bool amstorage
Definition: amapi.h:193
bool ampredlocks
Definition: amapi.h:197
IndexScanDesc hashbeginscan(Relation rel, int nkeys, int norderbys)
Definition: hash.c:361
aminsert_function aminsert
Definition: amapi.h:218
bool hashinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
Definition: hash.c:246
Oid amkeytype
Definition: amapi.h:207
bool amoptionalkey
Definition: amapi.h:187
amvalidate_function amvalidate
Definition: amapi.h:226
void hashbuildempty(Relation index)
Definition: hash.c:196
amgetbitmap_function amgetbitmap
Definition: amapi.h:230
ambuild_function ambuild
Definition: amapi.h:216
amoptions_function amoptions
Definition: amapi.h:223
IndexBuildResult * hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: hash.c:110
bool amcaninclude
Definition: amapi.h:201
amcostestimate_function amcostestimate
Definition: amapi.h:222
bool amcanunique
Definition: amapi.h:183
amvacuumcleanup_function amvacuumcleanup
Definition: amapi.h:220
amendscan_function amendscan
Definition: amapi.h:231
bool amcanbackward
Definition: amapi.h:181
amrescan_function amrescan
Definition: amapi.h:228
bool amcanparallel
Definition: amapi.h:199
int64 hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: hash.c:329
bool amsearchnulls
Definition: amapi.h:191
void hashendscan(IndexScanDesc scan)
Definition: hash.c:425
bool amclusterable
Definition: amapi.h:195
#define HASHOPTIONS_PROC
Definition: hash.h:355
bool amsearcharray
Definition: amapi.h:189
bool amusemaintenanceworkmem
Definition: amapi.h:203
IndexBulkDeleteResult * hashvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: hash.c:642
#define makeNode(_type_)
Definition: nodes.h:577
ammarkpos_function ammarkpos
Definition: amapi.h:232
bool amcanorder
Definition: amapi.h:177
ambuildphasename_function ambuildphasename
Definition: amapi.h:225
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:45
amestimateparallelscan_function amestimateparallelscan
Definition: amapi.h:236
bool hashvalidate(Oid opclassoid)
Definition: hashvalidate.c:44
uint16 amstrategies
Definition: amapi.h:171
uint16 amoptsprocnum
Definition: amapi.h:175
ambuildempty_function ambuildempty
Definition: amapi.h:217
amcanreturn_function amcanreturn
Definition: amapi.h:221
void hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: hash.c:391
aminitparallelscan_function aminitparallelscan
Definition: amapi.h:237
amrestrpos_function amrestrpos
Definition: amapi.h:233

◆ hashinsert()

bool hashinsert ( Relation  rel,
Datum values,
bool isnull,
ItemPointer  ht_ctid,
Relation  heapRel,
IndexUniqueCheck  checkUnique,
IndexInfo indexInfo 
)

Definition at line 246 of file hash.c.

References _hash_convert_tuple(), _hash_doinsert(), index_form_tuple(), pfree(), RelationGetDescr, and IndexTupleData::t_tid.

Referenced by hashhandler().

250 {
251  Datum index_values[1];
252  bool index_isnull[1];
253  IndexTuple itup;
254 
255  /* convert data to a hash key; on failure, do not insert anything */
256  if (!_hash_convert_tuple(rel,
257  values, isnull,
258  index_values, index_isnull))
259  return false;
260 
261  /* form an index tuple and point it at the heap tuple */
262  itup = index_form_tuple(RelationGetDescr(rel), index_values, index_isnull);
263  itup->t_tid = *ht_ctid;
264 
265  _hash_doinsert(rel, itup, heapRel);
266 
267  pfree(itup);
268 
269  return false;
270 }
void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
Definition: hashinsert.c:36
#define RelationGetDescr(relation)
Definition: rel.h:482
ItemPointerData t_tid
Definition: itup.h:37
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:47
void pfree(void *pointer)
Definition: mcxt.c:1056
bool _hash_convert_tuple(Relation index, Datum *user_values, bool *user_isnull, Datum *index_values, bool *index_isnull)
Definition: hashutil.c:319
uintptr_t Datum
Definition: postgres.h:367
static Datum values[MAXATTR]
Definition: bootstrap.c:167

◆ hashrescan()

void hashrescan ( IndexScanDesc  scan,
ScanKey  scankey,
int  nscankeys,
ScanKey  orderbys,
int  norderbys 
)

Definition at line 391 of file hash.c.

References _hash_dropscanbuf(), _hash_kill_items(), HashScanOpaqueData::currPos, HashScanPosInvalidate, HashScanPosIsValid, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, IndexScanDescData::indexRelation, IndexScanDescData::keyData, IndexScanDescData::numberOfKeys, HashScanOpaqueData::numKilled, and IndexScanDescData::opaque.

Referenced by hashhandler().

393 {
394  HashScanOpaque so = (HashScanOpaque) scan->opaque;
395  Relation rel = scan->indexRelation;
396 
397  if (HashScanPosIsValid(so->currPos))
398  {
399  /* Before leaving current page, deal with any killed items */
400  if (so->numKilled > 0)
401  _hash_kill_items(scan);
402  }
403 
404  _hash_dropscanbuf(rel, so);
405 
406  /* set position invalid (this will cause _hash_first call) */
408 
409  /* Update scan key, if a new one is given */
410  if (scankey && scan->numberOfKeys > 0)
411  {
412  memmove(scan->keyData,
413  scankey,
414  scan->numberOfKeys * sizeof(ScanKeyData));
415  }
416 
417  so->hashso_buc_populated = false;
418  so->hashso_buc_split = false;
419 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:190
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:288
#define HashScanPosIsValid(scanpos)
Definition: hash.h:135
Relation indexRelation
Definition: relscan.h:103
#define HashScanPosInvalidate(scanpos)
Definition: hash.h:142
bool hashso_buc_populated
Definition: hash.h:172
void _hash_kill_items(IndexScanDesc scan)
Definition: hashutil.c:537
struct ScanKeyData * keyData
Definition: relscan.h:107
HashScanPosData currPos
Definition: hash.h:187
bool hashso_buc_split
Definition: hash.h:178

◆ hashvacuumcleanup()

IndexBulkDeleteResult* hashvacuumcleanup ( IndexVacuumInfo info,
IndexBulkDeleteResult stats 
)

Definition at line 642 of file hash.c.

References IndexVacuumInfo::index, IndexBulkDeleteResult::num_pages, and RelationGetNumberOfBlocks.

Referenced by hashhandler().

643 {
644  Relation rel = info->index;
645  BlockNumber num_pages;
646 
647  /* If hashbulkdelete wasn't called, return NULL signifying no change */
648  /* Note: this covers the analyze_only case too */
649  if (stats == NULL)
650  return NULL;
651 
652  /* update statistics */
653  num_pages = RelationGetNumberOfBlocks(rel);
654  stats->num_pages = num_pages;
655 
656  return stats;
657 }
Relation index
Definition: genam.h:46
uint32 BlockNumber
Definition: block.h:31
BlockNumber num_pages
Definition: genam.h:74
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:211