PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
hash.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "access/relscan.h"
#include "catalog/index.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "optimizer/plancat.h"
#include "utils/builtins.h"
#include "utils/index_selfuncs.h"
#include "utils/rel.h"
Include dependency graph for hash.c:

Go to the source code of this file.

Data Structures

struct  HashBuildState
 

Functions

static void hashbuildCallback (Relation index, HeapTuple htup, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
 
Datum hashhandler (PG_FUNCTION_ARGS)
 
IndexBuildResulthashbuild (Relation heap, Relation index, IndexInfo *indexInfo)
 
void hashbuildempty (Relation index)
 
bool hashinsert (Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
 
bool hashgettuple (IndexScanDesc scan, ScanDirection dir)
 
int64 hashgetbitmap (IndexScanDesc scan, TIDBitmap *tbm)
 
IndexScanDesc hashbeginscan (Relation rel, int nkeys, int norderbys)
 
void hashrescan (IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
 
void hashendscan (IndexScanDesc scan)
 
IndexBulkDeleteResulthashbulkdelete (IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
 
IndexBulkDeleteResulthashvacuumcleanup (IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 
void hashbucketcleanup (Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
 

Function Documentation

IndexScanDesc hashbeginscan ( Relation  rel,
int  nkeys,
int  norderbys 
)

Definition at line 440 of file hash.c.

References Assert, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_bucket_buf, HashScanOpaqueData::hashso_curbuf, HashScanOpaqueData::hashso_curpos, HashScanOpaqueData::hashso_heappos, HashScanOpaqueData::hashso_split_bucket_buf, InvalidBuffer, ItemPointerSetInvalid, HashScanOpaqueData::killedItems, NULL, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, palloc(), and RelationGetIndexScan().

Referenced by hashhandler().

441 {
442  IndexScanDesc scan;
443  HashScanOpaque so;
444 
445  /* no order by operators allowed */
446  Assert(norderbys == 0);
447 
448  scan = RelationGetIndexScan(rel, nkeys, norderbys);
449 
450  so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
454  /* set position invalid (this will cause _hash_first call) */
457 
458  so->hashso_buc_populated = false;
459  so->hashso_buc_split = false;
460 
461  so->killedItems = NULL;
462  so->numKilled = 0;
463 
464  scan->opaque = so;
465 
466  return scan;
467 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:152
#define InvalidBuffer
Definition: buf.h:25
HashScanPosItem * killedItems
Definition: hash.h:148
Buffer hashso_bucket_buf
Definition: hash.h:124
bool hashso_buc_populated
Definition: hash.h:140
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:676
ItemPointerData hashso_curpos
Definition: hash.h:134
bool hashso_buc_split
Definition: hash.h:146
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:150
void * palloc(Size size)
Definition: mcxt.c:849
Buffer hashso_split_bucket_buf
Definition: hash.h:131
IndexScanDesc RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
Definition: genam.c:78
ItemPointerData hashso_heappos
Definition: hash.h:137
Buffer hashso_curbuf
Definition: hash.h:121
void hashbucketcleanup ( Relation  rel,
Bucket  cur_bucket,
Buffer  bucket_buf,
BlockNumber  bucket_blkno,
BufferAccessStrategy  bstrategy,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask,
double *  tuples_removed,
double *  num_index_tuples,
bool  split_cleanup,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 773 of file hash.c.

References _hash_get_indextuple_hashkey(), _hash_get_newbucket_from_oldbucket(), _hash_getbuf_with_strategy(), _hash_hashkey2bucket(), _hash_relbuf(), _hash_squeezebucket(), Assert, GistBDItem::blkno, BlockNumberIsValid, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, callback(), xl_hash_delete::clear_dead_marking, END_CRIT_SECTION, FirstOffsetNumber, H_HAS_DEAD_TUPLES, HASH_WRITE, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, InvalidBucket, xl_hash_delete::is_primary_bucket_page, IsBufferCleanupOK(), LH_BUCKET_NEEDS_SPLIT_CLEANUP, LH_OVERFLOW_PAGE, LH_PAGE_HAS_DEAD_TUPLES, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageGetSpecialPointer, PageIndexMultiDelete(), PageSetLSN, PG_USED_FOR_ASSERTS_ONLY, REGBUF_NO_IMAGE, REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashDelete, START_CRIT_SECTION, IndexTupleData::t_tid, vacuum_delay_point(), XLOG_HASH_DELETE, XLOG_HASH_SPLIT_CLEANUP, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_expandtable(), _hash_splitbucket(), and hashbulkdelete().

779 {
780  BlockNumber blkno;
781  Buffer buf;
783  bool bucket_dirty = false;
784 
785  blkno = bucket_blkno;
786  buf = bucket_buf;
787 
788  if (split_cleanup)
789  new_bucket = _hash_get_newbucket_from_oldbucket(rel, cur_bucket,
790  lowmask, maxbucket);
791 
792  /* Scan each page in bucket */
793  for (;;)
794  {
795  HashPageOpaque opaque;
796  OffsetNumber offno;
797  OffsetNumber maxoffno;
798  Buffer next_buf;
799  Page page;
800  OffsetNumber deletable[MaxOffsetNumber];
801  int ndeletable = 0;
802  bool retain_pin = false;
803  bool clear_dead_marking = false;
804 
806 
807  page = BufferGetPage(buf);
808  opaque = (HashPageOpaque) PageGetSpecialPointer(page);
809 
810  /* Scan each tuple in page */
811  maxoffno = PageGetMaxOffsetNumber(page);
812  for (offno = FirstOffsetNumber;
813  offno <= maxoffno;
814  offno = OffsetNumberNext(offno))
815  {
816  ItemPointer htup;
817  IndexTuple itup;
818  Bucket bucket;
819  bool kill_tuple = false;
820 
821  itup = (IndexTuple) PageGetItem(page,
822  PageGetItemId(page, offno));
823  htup = &(itup->t_tid);
824 
825  /*
826  * To remove the dead tuples, we strictly want to rely on results
827  * of callback function. refer btvacuumpage for detailed reason.
828  */
829  if (callback && callback(htup, callback_state))
830  {
831  kill_tuple = true;
832  if (tuples_removed)
833  *tuples_removed += 1;
834  }
835  else if (split_cleanup)
836  {
837  /* delete the tuples that are moved by split. */
839  maxbucket,
840  highmask,
841  lowmask);
842  /* mark the item for deletion */
843  if (bucket != cur_bucket)
844  {
845  /*
846  * We expect tuples to either belong to current bucket or
847  * new_bucket. This is ensured because we don't allow
848  * further splits from bucket that contains garbage. See
849  * comments in _hash_expandtable.
850  */
851  Assert(bucket == new_bucket);
852  kill_tuple = true;
853  }
854  }
855 
856  if (kill_tuple)
857  {
858  /* mark the item for deletion */
859  deletable[ndeletable++] = offno;
860  }
861  else
862  {
863  /* we're keeping it, so count it */
864  if (num_index_tuples)
865  *num_index_tuples += 1;
866  }
867  }
868 
869  /* retain the pin on primary bucket page till end of bucket scan */
870  if (blkno == bucket_blkno)
871  retain_pin = true;
872  else
873  retain_pin = false;
874 
875  blkno = opaque->hasho_nextblkno;
876 
877  /*
878  * Apply deletions, advance to next page and write page if needed.
879  */
880  if (ndeletable > 0)
881  {
882  /* No ereport(ERROR) until changes are logged */
884 
885  PageIndexMultiDelete(page, deletable, ndeletable);
886  bucket_dirty = true;
887 
888  /*
889  * Let us mark the page as clean if vacuum removes the DEAD tuples
890  * from an index page. We do this by clearing
891  * LH_PAGE_HAS_DEAD_TUPLES flag.
892  */
893  if (tuples_removed && *tuples_removed > 0 &&
894  H_HAS_DEAD_TUPLES(opaque))
895  {
897  clear_dead_marking = true;
898  }
899 
900  MarkBufferDirty(buf);
901 
902  /* XLOG stuff */
903  if (RelationNeedsWAL(rel))
904  {
905  xl_hash_delete xlrec;
906  XLogRecPtr recptr;
907 
908  xlrec.clear_dead_marking = clear_dead_marking;
909  xlrec.is_primary_bucket_page = (buf == bucket_buf) ? true : false;
910 
911  XLogBeginInsert();
912  XLogRegisterData((char *) &xlrec, SizeOfHashDelete);
913 
914  /*
915  * bucket buffer needs to be registered to ensure that we can
916  * acquire a cleanup lock on it during replay.
917  */
918  if (!xlrec.is_primary_bucket_page)
920 
922  XLogRegisterBufData(1, (char *) deletable,
923  ndeletable * sizeof(OffsetNumber));
924 
925  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_DELETE);
926  PageSetLSN(BufferGetPage(buf), recptr);
927  }
928 
930  }
931 
932  /* bail out if there are no more pages to scan. */
933  if (!BlockNumberIsValid(blkno))
934  break;
935 
936  next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
938  bstrategy);
939 
940  /*
941  * release the lock on previous page after acquiring the lock on next
942  * page
943  */
944  if (retain_pin)
946  else
947  _hash_relbuf(rel, buf);
948 
949  buf = next_buf;
950  }
951 
952  /*
953  * lock the bucket page to clear the garbage flag and squeeze the bucket.
954  * if the current buffer is same as bucket buffer, then we already have
955  * lock on bucket page.
956  */
957  if (buf != bucket_buf)
958  {
959  _hash_relbuf(rel, buf);
960  LockBuffer(bucket_buf, BUFFER_LOCK_EXCLUSIVE);
961  }
962 
963  /*
964  * Clear the garbage flag from bucket after deleting the tuples that are
965  * moved by split. We purposefully clear the flag before squeeze bucket,
966  * so that after restart, vacuum shouldn't again try to delete the moved
967  * by split tuples.
968  */
969  if (split_cleanup)
970  {
971  HashPageOpaque bucket_opaque;
972  Page page;
973 
974  page = BufferGetPage(bucket_buf);
975  bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
976 
977  /* No ereport(ERROR) until changes are logged */
979 
980  bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
981  MarkBufferDirty(bucket_buf);
982 
983  /* XLOG stuff */
984  if (RelationNeedsWAL(rel))
985  {
986  XLogRecPtr recptr;
987 
988  XLogBeginInsert();
989  XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD);
990 
991  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_CLEANUP);
992  PageSetLSN(page, recptr);
993  }
994 
996  }
997 
998  /*
999  * If we have deleted anything, try to compact free space. For squeezing
1000  * the bucket, we must have a cleanup lock, else it can impact the
1001  * ordering of tuples for a scan that has started before it.
1002  */
1003  if (bucket_dirty && IsBufferCleanupOK(bucket_buf))
1004  _hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
1005  bstrategy);
1006  else
1007  LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK);
1008 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:125
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
#define SizeOfHashDelete
Definition: hash_xlog.h:205
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define MaxOffsetNumber
Definition: off.h:28
Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
Definition: hashpage.c:247
ItemPointerData t_tid
Definition: itup.h:37
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_HASH_SPLIT_CLEANUP
Definition: hash_xlog.h:40
bool clear_dead_marking
Definition: hash_xlog.h:199
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:59
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
uint32 Bucket
Definition: hash.h:34
#define InvalidBucket
Definition: hash.h:36
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:299
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
bool is_primary_bucket_page
Definition: hash_xlog.h:201
static char * buf
Definition: pg_test_fsync.c:66
#define HASH_WRITE
Definition: hash.h:282
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
#define REGBUF_STANDARD
Definition: xloginsert.h:34
Bucket _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, uint32 lowmask, uint32 maxbucket)
Definition: hashutil.c:502
#define XLOG_HASH_DELETE
Definition: hash_xlog.h:39
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:3774
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define LH_OVERFLOW_PAGE
Definition: hash.h:53
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:274
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:676
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:836
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define PageGetSpecialPointer(page)
Definition: bufpage.h:322
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:31
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, Buffer bucket_buf, BufferAccessStrategy bstrategy)
Definition: hashovfl.c:804
#define RelationNeedsWAL(relation)
Definition: rel.h:505
uint16 hasho_flag
Definition: hash.h:81
BlockNumber hasho_nextblkno
Definition: hash.h:79
void vacuum_delay_point(void)
Definition: vacuum.c:1560
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:991
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define H_HAS_DEAD_TUPLES(opaque)
Definition: hash.h:90
#define LH_PAGE_HAS_DEAD_TUPLES
Definition: hash.h:60
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
IndexBuildResult* hashbuild ( Relation  heap,
Relation  index,
IndexInfo indexInfo 
)

Definition at line 103 of file hash.c.

References _h_indexbuild(), _h_spooldestroy(), _h_spoolinit(), _hash_init(), elog, ERROR, estimate_rel_size(), hashbuildCallback(), IndexBuildResult::heap_tuples, HashBuildState::heapRel, IndexBuildResult::index_tuples, IndexBuildHeapScan(), HashBuildState::indtuples, MAIN_FORKNUM, maintenance_work_mem, Min, NBuffers, NLocBuffer, NULL, palloc(), RelationData::rd_rel, RelationGetNumberOfBlocks, RelationGetRelationName, RELPERSISTENCE_TEMP, result, and HashBuildState::spool.

Referenced by hashhandler().

104 {
106  BlockNumber relpages;
107  double reltuples;
108  double allvisfrac;
109  uint32 num_buckets;
110  long sort_threshold;
111  HashBuildState buildstate;
112 
113  /*
114  * We expect to be called exactly once for any index relation. If that's
115  * not the case, big trouble's what we have.
116  */
117  if (RelationGetNumberOfBlocks(index) != 0)
118  elog(ERROR, "index \"%s\" already contains data",
119  RelationGetRelationName(index));
120 
121  /* Estimate the number of rows currently present in the table */
122  estimate_rel_size(heap, NULL, &relpages, &reltuples, &allvisfrac);
123 
124  /* Initialize the hash index metadata page and initial buckets */
125  num_buckets = _hash_init(index, reltuples, MAIN_FORKNUM);
126 
127  /*
128  * If we just insert the tuples into the index in scan order, then
129  * (assuming their hash codes are pretty random) there will be no locality
130  * of access to the index, and if the index is bigger than available RAM
131  * then we'll thrash horribly. To prevent that scenario, we can sort the
132  * tuples by (expected) bucket number. However, such a sort is useless
133  * overhead when the index does fit in RAM. We choose to sort if the
134  * initial index size exceeds maintenance_work_mem, or the number of
135  * buffers usable for the index, whichever is less. (Limiting by the
136  * number of buffers should reduce thrashing between PG buffers and kernel
137  * buffers, which seems useful even if no physical I/O results. Limiting
138  * by maintenance_work_mem is useful to allow easy testing of the sort
139  * code path, and may be useful to DBAs as an additional control knob.)
140  *
141  * NOTE: this test will need adjustment if a bucket is ever different from
142  * one page. Also, "initial index size" accounting does not include the
143  * metapage, nor the first bitmap page.
144  */
145  sort_threshold = (maintenance_work_mem * 1024L) / BLCKSZ;
146  if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
147  sort_threshold = Min(sort_threshold, NBuffers);
148  else
149  sort_threshold = Min(sort_threshold, NLocBuffer);
150 
151  if (num_buckets >= (uint32) sort_threshold)
152  buildstate.spool = _h_spoolinit(heap, index, num_buckets);
153  else
154  buildstate.spool = NULL;
155 
156  /* prepare to build the index */
157  buildstate.indtuples = 0;
158  buildstate.heapRel = heap;
159 
160  /* do the heap scan */
161  reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
162  hashbuildCallback, (void *) &buildstate);
163 
164  if (buildstate.spool)
165  {
166  /* sort the tuples and insert them into the index */
167  _h_indexbuild(buildstate.spool, buildstate.heapRel);
168  _h_spooldestroy(buildstate.spool);
169  }
170 
171  /*
172  * Return statistics
173  */
174  result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
175 
176  result->heap_tuples = reltuples;
177  result->index_tuples = buildstate.indtuples;
178 
179  return result;
180 }
static void hashbuildCallback(Relation index, HeapTuple htup, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: hash.c:195
#define Min(x, y)
Definition: c.h:807
double indtuples
Definition: hash.c:38
return result
Definition: formatting.c:1633
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:114
#define ERROR
Definition: elog.h:43
int NLocBuffer
Definition: localbuf.c:41
void _h_indexbuild(HSpool *hspool, Relation heapRel)
Definition: hashsort.c:115
#define RelationGetRelationName(relation)
Definition: rel.h:436
unsigned int uint32
Definition: c.h:268
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:908
HSpool * spool
Definition: hash.c:37
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
int maintenance_work_mem
Definition: globals.c:114
Relation heapRel
Definition: hash.c:39
#define NULL
Definition: c.h:229
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition: hashpage.c:335
void _h_spooldestroy(HSpool *hspool)
Definition: hashsort.c:94
double IndexBuildHeapScan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, IndexBuildCallback callback, void *callback_state)
Definition: index.c:2175
void * palloc(Size size)
Definition: mcxt.c:849
int NBuffers
Definition: globals.c:123
#define elog
Definition: elog.h:219
HSpool * _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
Definition: hashsort.c:56
#define RELPERSISTENCE_TEMP
Definition: pg_class.h:172
double index_tuples
Definition: genam.h:33
double heap_tuples
Definition: genam.h:32
static void hashbuildCallback ( Relation  index,
HeapTuple  htup,
Datum values,
bool isnull,
bool  tupleIsAlive,
void *  state 
)
static

Definition at line 195 of file hash.c.

References _h_spool(), _hash_convert_tuple(), _hash_doinsert(), HashBuildState::heapRel, index_form_tuple(), HashBuildState::indtuples, pfree(), RelationGetDescr, HashBuildState::spool, HeapTupleData::t_self, and IndexTupleData::t_tid.

Referenced by hashbuild().

201 {
202  HashBuildState *buildstate = (HashBuildState *) state;
203  Datum index_values[1];
204  bool index_isnull[1];
205  IndexTuple itup;
206 
207  /* convert data to a hash key; on failure, do not insert anything */
208  if (!_hash_convert_tuple(index,
209  values, isnull,
210  index_values, index_isnull))
211  return;
212 
213  /* Either spool the tuple for sorting, or just put it into the index */
214  if (buildstate->spool)
215  _h_spool(buildstate->spool, &htup->t_self,
216  index_values, index_isnull);
217  else
218  {
219  /* form an index tuple and point it at the heap tuple */
220  itup = index_form_tuple(RelationGetDescr(index),
221  index_values, index_isnull);
222  itup->t_tid = htup->t_self;
223  _hash_doinsert(index, itup, buildstate->heapRel);
224  pfree(itup);
225  }
226 
227  buildstate->indtuples += 1;
228 }
void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
Definition: hashinsert.c:36
#define RelationGetDescr(relation)
Definition: rel.h:428
ItemPointerData t_tid
Definition: itup.h:37
double indtuples
Definition: hash.c:38
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:37
void pfree(void *pointer)
Definition: mcxt.c:950
ItemPointerData t_self
Definition: htup.h:65
bool _hash_convert_tuple(Relation index, Datum *user_values, bool *user_isnull, Datum *index_values, bool *index_isnull)
Definition: hashutil.c:326
void _h_spool(HSpool *hspool, ItemPointer self, Datum *values, bool *isnull)
Definition: hashsort.c:104
HSpool * spool
Definition: hash.c:37
uintptr_t Datum
Definition: postgres.h:372
Relation heapRel
Definition: hash.c:39
Definition: regguts.h:298
static Datum values[MAXATTR]
Definition: bootstrap.c:163
void hashbuildempty ( Relation  index)

Definition at line 186 of file hash.c.

References _hash_init(), and INIT_FORKNUM.

Referenced by hashhandler().

187 {
188  _hash_init(index, 0, INIT_FORKNUM);
189 }
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition: hashpage.c:335
IndexBulkDeleteResult* hashbulkdelete ( IndexVacuumInfo info,
IndexBulkDeleteResult stats,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 547 of file hash.c.

References _hash_checkpage(), _hash_dropbuf(), _hash_getbuf(), _hash_getcachedmetap(), _hash_relbuf(), Assert, GistBDItem::blkno, BUCKET_TO_BLKNO, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsInvalid, END_CRIT_SECTION, IndexBulkDeleteResult::estimated_count, H_BUCKET_BEING_SPLIT, H_NEEDS_SPLIT_CLEANUP, HASH_METAPAGE, HASH_NOLOCK, hashbucketcleanup(), HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ntuples, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, IndexVacuumInfo::index, InvalidBlockNumber, InvalidBuffer, LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, MarkBufferDirty(), xl_hash_update_meta_page::ntuples, NULL, IndexBulkDeleteResult::num_index_tuples, PageGetSpecialPointer, PageSetLSN, palloc0(), RBM_NORMAL, ReadBufferExtended(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashUpdateMetaPage, START_CRIT_SECTION, IndexVacuumInfo::strategy, IndexBulkDeleteResult::tuples_removed, XLOG_HASH_UPDATE_META_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by hashhandler().

549 {
550  Relation rel = info->index;
551  double tuples_removed;
552  double num_index_tuples;
553  double orig_ntuples;
554  Bucket orig_maxbucket;
555  Bucket cur_maxbucket;
556  Bucket cur_bucket;
557  Buffer metabuf = InvalidBuffer;
558  HashMetaPage metap;
559  HashMetaPage cachedmetap;
560 
561  tuples_removed = 0;
562  num_index_tuples = 0;
563 
564  /*
565  * We need a copy of the metapage so that we can use its hashm_spares[]
566  * values to compute bucket page addresses, but a cached copy should be
567  * good enough. (If not, we'll detect that further down and refresh the
568  * cache as necessary.)
569  */
570  cachedmetap = _hash_getcachedmetap(rel, &metabuf, false);
571  Assert(cachedmetap != NULL);
572 
573  orig_maxbucket = cachedmetap->hashm_maxbucket;
574  orig_ntuples = cachedmetap->hashm_ntuples;
575 
576  /* Scan the buckets that we know exist */
577  cur_bucket = 0;
578  cur_maxbucket = orig_maxbucket;
579 
580 loop_top:
581  while (cur_bucket <= cur_maxbucket)
582  {
583  BlockNumber bucket_blkno;
584  BlockNumber blkno;
585  Buffer bucket_buf;
586  Buffer buf;
587  HashPageOpaque bucket_opaque;
588  Page page;
589  bool split_cleanup = false;
590 
591  /* Get address of bucket's start page */
592  bucket_blkno = BUCKET_TO_BLKNO(cachedmetap, cur_bucket);
593 
594  blkno = bucket_blkno;
595 
596  /*
597  * We need to acquire a cleanup lock on the primary bucket page to out
598  * wait concurrent scans before deleting the dead tuples.
599  */
600  buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy);
602  _hash_checkpage(rel, buf, LH_BUCKET_PAGE);
603 
604  page = BufferGetPage(buf);
605  bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
606 
607  /*
608  * If the bucket contains tuples that are moved by split, then we need
609  * to delete such tuples. We can't delete such tuples if the split
610  * operation on bucket is not finished as those are needed by scans.
611  */
612  if (!H_BUCKET_BEING_SPLIT(bucket_opaque) &&
613  H_NEEDS_SPLIT_CLEANUP(bucket_opaque))
614  {
615  split_cleanup = true;
616 
617  /*
618  * This bucket might have been split since we last held a lock on
619  * the metapage. If so, hashm_maxbucket, hashm_highmask and
620  * hashm_lowmask might be old enough to cause us to fail to remove
621  * tuples left behind by the most recent split. To prevent that,
622  * now that the primary page of the target bucket has been locked
623  * (and thus can't be further split), check whether we need to
624  * update our cached metapage data.
625  */
626  Assert(bucket_opaque->hasho_prevblkno != InvalidBlockNumber);
627  if (bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
628  {
629  cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
630  Assert(cachedmetap != NULL);
631  }
632  }
633 
634  bucket_buf = buf;
635 
636  hashbucketcleanup(rel, cur_bucket, bucket_buf, blkno, info->strategy,
637  cachedmetap->hashm_maxbucket,
638  cachedmetap->hashm_highmask,
639  cachedmetap->hashm_lowmask, &tuples_removed,
640  &num_index_tuples, split_cleanup,
641  callback, callback_state);
642 
643  _hash_dropbuf(rel, bucket_buf);
644 
645  /* Advance to next bucket */
646  cur_bucket++;
647  }
648 
649  if (BufferIsInvalid(metabuf))
651 
652  /* Write-lock metapage and check for split since we started */
654  metap = HashPageGetMeta(BufferGetPage(metabuf));
655 
656  if (cur_maxbucket != metap->hashm_maxbucket)
657  {
658  /* There's been a split, so process the additional bucket(s) */
659  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
660  cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
661  Assert(cachedmetap != NULL);
662  cur_maxbucket = cachedmetap->hashm_maxbucket;
663  goto loop_top;
664  }
665 
666  /* Okay, we're really done. Update tuple count in metapage. */
668 
669  if (orig_maxbucket == metap->hashm_maxbucket &&
670  orig_ntuples == metap->hashm_ntuples)
671  {
672  /*
673  * No one has split or inserted anything since start of scan, so
674  * believe our count as gospel.
675  */
676  metap->hashm_ntuples = num_index_tuples;
677  }
678  else
679  {
680  /*
681  * Otherwise, our count is untrustworthy since we may have
682  * double-scanned tuples in split buckets. Proceed by dead-reckoning.
683  * (Note: we still return estimated_count = false, because using this
684  * count is better than not updating reltuples at all.)
685  */
686  if (metap->hashm_ntuples > tuples_removed)
687  metap->hashm_ntuples -= tuples_removed;
688  else
689  metap->hashm_ntuples = 0;
690  num_index_tuples = metap->hashm_ntuples;
691  }
692 
693  MarkBufferDirty(metabuf);
694 
695  /* XLOG stuff */
696  if (RelationNeedsWAL(rel))
697  {
699  XLogRecPtr recptr;
700 
701  xlrec.ntuples = metap->hashm_ntuples;
702 
703  XLogBeginInsert();
704  XLogRegisterData((char *) &xlrec, SizeOfHashUpdateMetaPage);
705 
706  XLogRegisterBuffer(0, metabuf, REGBUF_STANDARD);
707 
708  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_UPDATE_META_PAGE);
709  PageSetLSN(BufferGetPage(metabuf), recptr);
710  }
711 
713 
714  _hash_relbuf(rel, metabuf);
715 
716  /* return statistics */
717  if (stats == NULL)
719  stats->estimated_count = false;
720  stats->num_index_tuples = num_index_tuples;
721  stats->tuples_removed += tuples_removed;
722  /* hashvacuumcleanup will fill in num_pages */
723 
724  return stats;
725 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3603
double tuples_removed
Definition: genam.h:77
#define LH_META_PAGE
Definition: hash.h:56
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
BufferAccessStrategy strategy
Definition: genam.h:51
uint32 hashm_highmask
Definition: hash.h:212
#define InvalidBuffer
Definition: buf.h:25
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
Relation index
Definition: genam.h:46
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:773
uint32 BlockNumber
Definition: block.h:31
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:285
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:78
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
uint32 hashm_lowmask
Definition: hash.h:213
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
uint32 Bucket
Definition: hash.h:34
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:87
BlockNumber hasho_prevblkno
Definition: hash.h:78
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
#define BufferIsInvalid(buffer)
Definition: buf.h:31
static char * buf
Definition: pg_test_fsync.c:66
#define HASH_NOLOCK
Definition: hash.h:283
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHashUpdateMetaPage
Definition: hash_xlog.h:219
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:225
void * palloc0(Size size)
Definition: mcxt.c:878
#define HASH_METAPAGE
Definition: hash.h:158
double hashm_ntuples
Definition: hash.h:205
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1502
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:274
#define LH_BUCKET_PAGE
Definition: hash.h:54
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:88
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:676
#define PageGetSpecialPointer(page)
Definition: bufpage.h:322
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
#define RelationNeedsWAL(relation)
Definition: rel.h:505
uint32 hashm_maxbucket
Definition: hash.h:211
#define HashPageGetMeta(page)
Definition: hash.h:265
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define XLOG_HASH_UPDATE_META_PAGE
Definition: hash_xlog.h:43
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
double num_index_tuples
Definition: genam.h:76
int Buffer
Definition: buf.h:23
bool estimated_count
Definition: genam.h:75
Pointer Page
Definition: bufpage.h:74
void hashendscan ( IndexScanDesc  scan)

Definition at line 512 of file hash.c.

References _hash_dropscanbuf(), _hash_kill_items(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, HashScanOpaqueData::hashso_curbuf, IndexScanDescData::indexRelation, HashScanOpaqueData::killedItems, LockBuffer(), NULL, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, and pfree().

Referenced by hashhandler().

513 {
514  HashScanOpaque so = (HashScanOpaque) scan->opaque;
515  Relation rel = scan->indexRelation;
516 
517  /*
518  * Before leaving current page, deal with any killed items. Also, ensure
519  * that we acquire lock on current page before calling _hash_kill_items.
520  */
521  if (so->numKilled > 0)
522  {
524  _hash_kill_items(scan);
526  }
527 
528  _hash_dropscanbuf(rel, so);
529 
530  if (so->killedItems != NULL)
531  pfree(so->killedItems);
532  pfree(so);
533  scan->opaque = NULL;
534 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:152
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:297
Relation indexRelation
Definition: relscan.h:90
void pfree(void *pointer)
Definition: mcxt.c:950
HashScanPosItem * killedItems
Definition: hash.h:148
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
void _hash_kill_items(IndexScanDesc scan)
Definition: hashutil.c:529
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
Buffer hashso_curbuf
Definition: hash.h:121
int64 hashgetbitmap ( IndexScanDesc  scan,
TIDBitmap tbm 
)

Definition at line 394 of file hash.c.

References _hash_first(), _hash_next(), BufferGetPage, ForwardScanDirection, HashScanOpaqueData::hashso_curbuf, HashScanOpaqueData::hashso_curpos, HashScanOpaqueData::hashso_heappos, IndexScanDescData::ignore_killed_tuples, ItemIdIsDead, ItemPointerGetOffsetNumber, IndexScanDescData::opaque, PageGetItemId, and tbm_add_tuples().

Referenced by hashhandler().

395 {
396  HashScanOpaque so = (HashScanOpaque) scan->opaque;
397  bool res;
398  int64 ntids = 0;
399 
400  res = _hash_first(scan, ForwardScanDirection);
401 
402  while (res)
403  {
404  bool add_tuple;
405 
406  /*
407  * Skip killed tuples if asked to.
408  */
409  if (scan->ignore_killed_tuples)
410  {
411  Page page;
412  OffsetNumber offnum;
413 
414  offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos));
415  page = BufferGetPage(so->hashso_curbuf);
416  add_tuple = !ItemIdIsDead(PageGetItemId(page, offnum));
417  }
418  else
419  add_tuple = true;
420 
421  /* Save tuple ID, and continue scanning */
422  if (add_tuple)
423  {
424  /* Note we mark the tuple ID as requiring recheck */
425  tbm_add_tuples(tbm, &(so->hashso_heappos), 1, true);
426  ntids++;
427  }
428 
429  res = _hash_next(scan, ForwardScanDirection);
430  }
431 
432  return ntids;
433 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:152
void tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids, bool recheck)
Definition: tidbitmap.c:403
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
bool ignore_killed_tuples
Definition: relscan.h:101
uint16 OffsetNumber
Definition: off.h:24
bool _hash_first(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:222
bool _hash_next(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:34
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
ItemPointerData hashso_curpos
Definition: hash.h:134
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
ItemPointerData hashso_heappos
Definition: hash.h:137
Buffer hashso_curbuf
Definition: hash.h:121
Pointer Page
Definition: bufpage.h:74
bool hashgettuple ( IndexScanDesc  scan,
ScanDirection  dir 
)

Definition at line 268 of file hash.c.

References _hash_first(), _hash_next(), Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, elog, ERROR, HashScanOpaqueData::hashso_curbuf, HashScanOpaqueData::hashso_curpos, HashScanOpaqueData::hashso_heappos, HashScanPosItem::heapTid, IndexScanDescData::ignore_killed_tuples, HashScanPosItem::indexOffset, IndexScanDescData::indexRelation, ItemIdIsDead, ItemPointerEquals(), ItemPointerGetOffsetNumber, ItemPointerIsValid, ItemPointerSetOffsetNumber, IndexScanDescData::kill_prior_tuple, HashScanOpaqueData::killedItems, LockBuffer(), MaxIndexTuplesPerPage, NULL, HashScanOpaqueData::numKilled, OffsetNumberNext, IndexScanDescData::opaque, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, palloc(), RelationGetRelationName, HeapTupleData::t_self, IndexTupleData::t_tid, IndexScanDescData::xs_ctup, and IndexScanDescData::xs_recheck.

Referenced by hashhandler().

269 {
270  HashScanOpaque so = (HashScanOpaque) scan->opaque;
271  Relation rel = scan->indexRelation;
272  Buffer buf;
273  Page page;
274  OffsetNumber offnum;
275  ItemPointer current;
276  bool res;
277 
278  /* Hash indexes are always lossy since we store only the hash code */
279  scan->xs_recheck = true;
280 
281  /*
282  * We hold pin but not lock on current buffer while outside the hash AM.
283  * Reacquire the read lock here.
284  */
285  if (BufferIsValid(so->hashso_curbuf))
287 
288  /*
289  * If we've already initialized this scan, we can just advance it in the
290  * appropriate direction. If we haven't done so yet, we call a routine to
291  * get the first item in the scan.
292  */
293  current = &(so->hashso_curpos);
294  if (ItemPointerIsValid(current))
295  {
296  /*
297  * An insertion into the current index page could have happened while
298  * we didn't have read lock on it. Re-find our position by looking
299  * for the TID we previously returned. (Because we hold a pin on the
300  * primary bucket page, no deletions or splits could have occurred;
301  * therefore we can expect that the TID still exists in the current
302  * index page, at an offset >= where we were.)
303  */
304  OffsetNumber maxoffnum;
305 
306  buf = so->hashso_curbuf;
308  page = BufferGetPage(buf);
309 
310  /*
311  * We don't need test for old snapshot here as the current buffer is
312  * pinned, so vacuum can't clean the page.
313  */
314  maxoffnum = PageGetMaxOffsetNumber(page);
315  for (offnum = ItemPointerGetOffsetNumber(current);
316  offnum <= maxoffnum;
317  offnum = OffsetNumberNext(offnum))
318  {
319  IndexTuple itup;
320 
321  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
322  if (ItemPointerEquals(&(so->hashso_heappos), &(itup->t_tid)))
323  break;
324  }
325  if (offnum > maxoffnum)
326  elog(ERROR, "failed to re-find scan position within index \"%s\"",
328  ItemPointerSetOffsetNumber(current, offnum);
329 
330  /*
331  * Check to see if we should kill the previously-fetched tuple.
332  */
333  if (scan->kill_prior_tuple)
334  {
335  /*
336  * Yes, so remember it for later. (We'll deal with all such tuples
337  * at once right after leaving the index page or at end of scan.)
338  * In case if caller reverses the indexscan direction it is quite
339  * possible that the same item might get entered multiple times.
340  * But, we don't detect that; instead, we just forget any excess
341  * entries.
342  */
343  if (so->killedItems == NULL)
345  sizeof(HashScanPosItem));
346 
348  {
352  so->numKilled++;
353  }
354  }
355 
356  /*
357  * Now continue the scan.
358  */
359  res = _hash_next(scan, dir);
360  }
361  else
362  res = _hash_first(scan, dir);
363 
364  /*
365  * Skip killed tuples if asked to.
366  */
367  if (scan->ignore_killed_tuples)
368  {
369  while (res)
370  {
371  offnum = ItemPointerGetOffsetNumber(current);
372  page = BufferGetPage(so->hashso_curbuf);
373  if (!ItemIdIsDead(PageGetItemId(page, offnum)))
374  break;
375  res = _hash_next(scan, dir);
376  }
377  }
378 
379  /* Release read lock on current buffer, but keep it pinned */
380  if (BufferIsValid(so->hashso_curbuf))
382 
383  /* Return current heap TID on success */
384  scan->xs_ctup.t_self = so->hashso_heappos;
385 
386  return res;
387 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:152
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
ItemPointerData t_tid
Definition: itup.h:37
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
bool ignore_killed_tuples
Definition: relscan.h:101
Relation indexRelation
Definition: relscan.h:90
uint16 OffsetNumber
Definition: off.h:24
#define ERROR
Definition: elog.h:43
bool _hash_first(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:222
ItemPointerData t_self
Definition: htup.h:65
bool _hash_next(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:34
static char * buf
Definition: pg_test_fsync.c:66
IndexTupleData * IndexTuple
Definition: itup.h:53
#define RelationGetRelationName(relation)
Definition: rel.h:436
HashScanPosItem * killedItems
Definition: hash.h:148
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:676
HeapTupleData xs_ctup
Definition: relscan.h:120
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ItemPointerData hashso_curpos
Definition: hash.h:134
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:126
#define MaxIndexTuplesPerPage
Definition: itup.h:137
void * palloc(Size size)
Definition: mcxt.c:849
OffsetNumber indexOffset
Definition: hash.h:103
bool kill_prior_tuple
Definition: relscan.h:100
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
ItemPointerData heapTid
Definition: hash.h:102
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
ItemPointerData hashso_heappos
Definition: hash.h:137
Buffer hashso_curbuf
Definition: hash.h:121
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
Datum hashhandler ( PG_FUNCTION_ARGS  )

Definition at line 55 of file hash.c.

References IndexAmRoutine::ambeginscan, IndexAmRoutine::ambuild, IndexAmRoutine::ambuildempty, IndexAmRoutine::ambulkdelete, IndexAmRoutine::amcanbackward, IndexAmRoutine::amcanmulticol, IndexAmRoutine::amcanorder, IndexAmRoutine::amcanorderbyop, IndexAmRoutine::amcanparallel, IndexAmRoutine::amcanreturn, IndexAmRoutine::amcanunique, IndexAmRoutine::amclusterable, IndexAmRoutine::amcostestimate, IndexAmRoutine::amendscan, IndexAmRoutine::amestimateparallelscan, IndexAmRoutine::amgetbitmap, IndexAmRoutine::amgettuple, IndexAmRoutine::aminitparallelscan, IndexAmRoutine::aminsert, IndexAmRoutine::amkeytype, IndexAmRoutine::ammarkpos, IndexAmRoutine::amoptionalkey, IndexAmRoutine::amoptions, IndexAmRoutine::amparallelrescan, IndexAmRoutine::ampredlocks, IndexAmRoutine::amproperty, IndexAmRoutine::amrescan, IndexAmRoutine::amrestrpos, IndexAmRoutine::amsearcharray, IndexAmRoutine::amsearchnulls, IndexAmRoutine::amstorage, IndexAmRoutine::amstrategies, IndexAmRoutine::amsupport, IndexAmRoutine::amvacuumcleanup, IndexAmRoutine::amvalidate, hashbeginscan(), hashbuild(), hashbuildempty(), hashbulkdelete(), hashcostestimate(), hashendscan(), hashgetbitmap(), hashgettuple(), hashinsert(), HASHNProcs, hashoptions(), hashrescan(), hashvacuumcleanup(), hashvalidate(), HTMaxStrategyNumber, INT4OID, makeNode, NULL, and PG_RETURN_POINTER.

56 {
58 
59  amroutine->amstrategies = HTMaxStrategyNumber;
60  amroutine->amsupport = HASHNProcs;
61  amroutine->amcanorder = false;
62  amroutine->amcanorderbyop = false;
63  amroutine->amcanbackward = true;
64  amroutine->amcanunique = false;
65  amroutine->amcanmulticol = false;
66  amroutine->amoptionalkey = false;
67  amroutine->amsearcharray = false;
68  amroutine->amsearchnulls = false;
69  amroutine->amstorage = false;
70  amroutine->amclusterable = false;
71  amroutine->ampredlocks = false;
72  amroutine->amcanparallel = false;
73  amroutine->amkeytype = INT4OID;
74 
75  amroutine->ambuild = hashbuild;
76  amroutine->ambuildempty = hashbuildempty;
77  amroutine->aminsert = hashinsert;
78  amroutine->ambulkdelete = hashbulkdelete;
80  amroutine->amcanreturn = NULL;
81  amroutine->amcostestimate = hashcostestimate;
82  amroutine->amoptions = hashoptions;
83  amroutine->amproperty = NULL;
84  amroutine->amvalidate = hashvalidate;
85  amroutine->ambeginscan = hashbeginscan;
86  amroutine->amrescan = hashrescan;
87  amroutine->amgettuple = hashgettuple;
88  amroutine->amgetbitmap = hashgetbitmap;
89  amroutine->amendscan = hashendscan;
90  amroutine->ammarkpos = NULL;
91  amroutine->amrestrpos = NULL;
92  amroutine->amestimateparallelscan = NULL;
93  amroutine->aminitparallelscan = NULL;
94  amroutine->amparallelrescan = NULL;
95 
96  PG_RETURN_POINTER(amroutine);
97 }
ambeginscan_function ambeginscan
Definition: amapi.h:208
bytea * hashoptions(Datum reloptions, bool validate)
Definition: hashutil.c:290
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:321
ambulkdelete_function ambulkdelete
Definition: amapi.h:201
bool hashgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: hash.c:268
bool amcanmulticol
Definition: amapi.h:179
uint16 amsupport
Definition: amapi.h:169
#define HASHNProcs
Definition: hash.h:297
amgettuple_function amgettuple
Definition: amapi.h:210
bool amcanorderbyop
Definition: amapi.h:173
IndexBulkDeleteResult * hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:547
amproperty_function amproperty
Definition: amapi.h:206
amparallelrescan_function amparallelrescan
Definition: amapi.h:219
void hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: selfuncs.c:7002
bool amstorage
Definition: amapi.h:187
#define INT4OID
Definition: pg_type.h:316
bool ampredlocks
Definition: amapi.h:191
IndexScanDesc hashbeginscan(Relation rel, int nkeys, int norderbys)
Definition: hash.c:440
#define HTMaxStrategyNumber
Definition: hash.h:289
aminsert_function aminsert
Definition: amapi.h:200
bool hashinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, IndexInfo *indexInfo)
Definition: hash.c:237
Oid amkeytype
Definition: amapi.h:195
bool amoptionalkey
Definition: amapi.h:181
amvalidate_function amvalidate
Definition: amapi.h:207
void hashbuildempty(Relation index)
Definition: hash.c:186
amgetbitmap_function amgetbitmap
Definition: amapi.h:211
ambuild_function ambuild
Definition: amapi.h:198
amoptions_function amoptions
Definition: amapi.h:205
IndexBuildResult * hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: hash.c:103
amcostestimate_function amcostestimate
Definition: amapi.h:204
bool amcanunique
Definition: amapi.h:177
amvacuumcleanup_function amvacuumcleanup
Definition: amapi.h:202
amendscan_function amendscan
Definition: amapi.h:212
bool amcanbackward
Definition: amapi.h:175
amrescan_function amrescan
Definition: amapi.h:209
bool amcanparallel
Definition: amapi.h:193
int64 hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: hash.c:394
bool amsearchnulls
Definition: amapi.h:185
void hashendscan(IndexScanDesc scan)
Definition: hash.c:512
bool amclusterable
Definition: amapi.h:189
bool amsearcharray
Definition: amapi.h:183
IndexBulkDeleteResult * hashvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: hash.c:733
#define makeNode(_type_)
Definition: nodes.h:557
#define NULL
Definition: c.h:229
ammarkpos_function ammarkpos
Definition: amapi.h:213
bool amcanorder
Definition: amapi.h:171
amestimateparallelscan_function amestimateparallelscan
Definition: amapi.h:217
bool hashvalidate(Oid opclassoid)
Definition: hashvalidate.c:44
uint16 amstrategies
Definition: amapi.h:167
ambuildempty_function ambuildempty
Definition: amapi.h:199
amcanreturn_function amcanreturn
Definition: amapi.h:203
void hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: hash.c:473
aminitparallelscan_function aminitparallelscan
Definition: amapi.h:218
amrestrpos_function amrestrpos
Definition: amapi.h:214
bool hashinsert ( Relation  rel,
Datum values,
bool isnull,
ItemPointer  ht_ctid,
Relation  heapRel,
IndexUniqueCheck  checkUnique,
IndexInfo indexInfo 
)

Definition at line 237 of file hash.c.

References _hash_convert_tuple(), _hash_doinsert(), index_form_tuple(), pfree(), RelationGetDescr, and IndexTupleData::t_tid.

Referenced by hashhandler().

241 {
242  Datum index_values[1];
243  bool index_isnull[1];
244  IndexTuple itup;
245 
246  /* convert data to a hash key; on failure, do not insert anything */
247  if (!_hash_convert_tuple(rel,
248  values, isnull,
249  index_values, index_isnull))
250  return false;
251 
252  /* form an index tuple and point it at the heap tuple */
253  itup = index_form_tuple(RelationGetDescr(rel), index_values, index_isnull);
254  itup->t_tid = *ht_ctid;
255 
256  _hash_doinsert(rel, itup, heapRel);
257 
258  pfree(itup);
259 
260  return false;
261 }
void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
Definition: hashinsert.c:36
#define RelationGetDescr(relation)
Definition: rel.h:428
ItemPointerData t_tid
Definition: itup.h:37
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:37
void pfree(void *pointer)
Definition: mcxt.c:950
bool _hash_convert_tuple(Relation index, Datum *user_values, bool *user_isnull, Datum *index_values, bool *index_isnull)
Definition: hashutil.c:326
uintptr_t Datum
Definition: postgres.h:372
static Datum values[MAXATTR]
Definition: bootstrap.c:163
void hashrescan ( IndexScanDesc  scan,
ScanKey  scankey,
int  nscankeys,
ScanKey  orderbys,
int  norderbys 
)

Definition at line 473 of file hash.c.

References _hash_dropscanbuf(), _hash_kill_items(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_curbuf, HashScanOpaqueData::hashso_curpos, HashScanOpaqueData::hashso_heappos, IndexScanDescData::indexRelation, ItemPointerSetInvalid, IndexScanDescData::keyData, LockBuffer(), memmove, IndexScanDescData::numberOfKeys, HashScanOpaqueData::numKilled, and IndexScanDescData::opaque.

Referenced by hashhandler().

475 {
476  HashScanOpaque so = (HashScanOpaque) scan->opaque;
477  Relation rel = scan->indexRelation;
478 
479  /*
480  * Before leaving current page, deal with any killed items. Also, ensure
481  * that we acquire lock on current page before calling _hash_kill_items.
482  */
483  if (so->numKilled > 0)
484  {
486  _hash_kill_items(scan);
488  }
489 
490  _hash_dropscanbuf(rel, so);
491 
492  /* set position invalid (this will cause _hash_first call) */
495 
496  /* Update scan key, if a new one is given */
497  if (scankey && scan->numberOfKeys > 0)
498  {
499  memmove(scan->keyData,
500  scankey,
501  scan->numberOfKeys * sizeof(ScanKeyData));
502  }
503 
504  so->hashso_buc_populated = false;
505  so->hashso_buc_split = false;
506 }
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:152
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:297
Relation indexRelation
Definition: relscan.h:90
#define memmove(d, s, c)
Definition: c.h:1059
bool hashso_buc_populated
Definition: hash.h:140
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
void _hash_kill_items(IndexScanDesc scan)
Definition: hashutil.c:529
ItemPointerData hashso_curpos
Definition: hash.h:134
ScanKey keyData
Definition: relscan.h:94
bool hashso_buc_split
Definition: hash.h:146
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:150
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
ItemPointerData hashso_heappos
Definition: hash.h:137
Buffer hashso_curbuf
Definition: hash.h:121
IndexBulkDeleteResult* hashvacuumcleanup ( IndexVacuumInfo info,
IndexBulkDeleteResult stats 
)

Definition at line 733 of file hash.c.

References IndexVacuumInfo::index, NULL, IndexBulkDeleteResult::num_pages, and RelationGetNumberOfBlocks.

Referenced by hashhandler().

734 {
735  Relation rel = info->index;
736  BlockNumber num_pages;
737 
738  /* If hashbulkdelete wasn't called, return NULL signifying no change */
739  /* Note: this covers the analyze_only case too */
740  if (stats == NULL)
741  return NULL;
742 
743  /* update statistics */
744  num_pages = RelationGetNumberOfBlocks(rel);
745  stats->num_pages = num_pages;
746 
747  return stats;
748 }
Relation index
Definition: genam.h:46
uint32 BlockNumber
Definition: block.h:31
BlockNumber num_pages
Definition: genam.h:73
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define NULL
Definition: c.h:229