PostgreSQL Source Code git master
hash.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "access/relscan.h"
#include "access/stratnum.h"
#include "access/tableam.h"
#include "access/xloginsert.h"
#include "commands/progress.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "nodes/execnodes.h"
#include "optimizer/plancat.h"
#include "pgstat.h"
#include "utils/fmgrprotos.h"
#include "utils/index_selfuncs.h"
#include "utils/rel.h"
Include dependency graph for hash.c:

Go to the source code of this file.

Data Structures

struct  HashBuildState
 

Functions

static void hashbuildCallback (Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
 
Datum hashhandler (PG_FUNCTION_ARGS)
 
IndexBuildResulthashbuild (Relation heap, Relation index, IndexInfo *indexInfo)
 
void hashbuildempty (Relation index)
 
bool hashinsert (Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
 
bool hashgettuple (IndexScanDesc scan, ScanDirection dir)
 
int64 hashgetbitmap (IndexScanDesc scan, TIDBitmap *tbm)
 
IndexScanDesc hashbeginscan (Relation rel, int nkeys, int norderbys)
 
void hashrescan (IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
 
void hashendscan (IndexScanDesc scan)
 
IndexBulkDeleteResulthashbulkdelete (IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
 
IndexBulkDeleteResulthashvacuumcleanup (IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 
void hashbucketcleanup (Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
 
CompareType hashtranslatestrategy (StrategyNumber strategy, Oid opfamily, Oid opcintype)
 
StrategyNumber hashtranslatecmptype (CompareType cmptype, Oid opfamily, Oid opcintype)
 

Function Documentation

◆ hashbeginscan()

IndexScanDesc hashbeginscan ( Relation  rel,
int  nkeys,
int  norderbys 
)

Definition at line 371 of file hash.c.

372{
373 IndexScanDesc scan;
375
376 /* no order by operators allowed */
377 Assert(norderbys == 0);
378
379 scan = RelationGetIndexScan(rel, nkeys, norderbys);
380
385
386 so->hashso_buc_populated = false;
387 so->hashso_buc_split = false;
388
389 so->killedItems = NULL;
390 so->numKilled = 0;
391
392 scan->opaque = so;
393
394 return scan;
395}
#define InvalidBuffer
Definition: buf.h:25
#define Assert(condition)
Definition: c.h:815
IndexScanDesc RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
Definition: genam.c:80
#define HashScanPosInvalidate(scanpos)
Definition: hash.h:144
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:192
void * palloc(Size size)
Definition: mcxt.c:1317
bool hashso_buc_split
Definition: hash.h:180
HashScanPosData currPos
Definition: hash.h:189
bool hashso_buc_populated
Definition: hash.h:174
Buffer hashso_split_bucket_buf
Definition: hash.h:171
Buffer hashso_bucket_buf
Definition: hash.h:164
int * killedItems
Definition: hash.h:182

References Assert, HashScanOpaqueData::currPos, HashScanPosInvalidate, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_bucket_buf, HashScanOpaqueData::hashso_split_bucket_buf, InvalidBuffer, HashScanOpaqueData::killedItems, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, palloc(), and RelationGetIndexScan().

Referenced by hashhandler().

◆ hashbucketcleanup()

void hashbucketcleanup ( Relation  rel,
Bucket  cur_bucket,
Buffer  bucket_buf,
BlockNumber  bucket_blkno,
BufferAccessStrategy  bstrategy,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask,
double *  tuples_removed,
double *  num_index_tuples,
bool  split_cleanup,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 687 of file hash.c.

693{
694 BlockNumber blkno;
695 Buffer buf;
697 bool bucket_dirty = false;
698
699 blkno = bucket_blkno;
700 buf = bucket_buf;
701
702 if (split_cleanup)
703 new_bucket = _hash_get_newbucket_from_oldbucket(rel, cur_bucket,
704 lowmask, maxbucket);
705
706 /* Scan each page in bucket */
707 for (;;)
708 {
709 HashPageOpaque opaque;
710 OffsetNumber offno;
711 OffsetNumber maxoffno;
712 Buffer next_buf;
713 Page page;
714 OffsetNumber deletable[MaxOffsetNumber];
715 int ndeletable = 0;
716 bool retain_pin = false;
717 bool clear_dead_marking = false;
718
719 vacuum_delay_point(false);
720
721 page = BufferGetPage(buf);
722 opaque = HashPageGetOpaque(page);
723
724 /* Scan each tuple in page */
725 maxoffno = PageGetMaxOffsetNumber(page);
726 for (offno = FirstOffsetNumber;
727 offno <= maxoffno;
728 offno = OffsetNumberNext(offno))
729 {
730 ItemPointer htup;
731 IndexTuple itup;
732 Bucket bucket;
733 bool kill_tuple = false;
734
735 itup = (IndexTuple) PageGetItem(page,
736 PageGetItemId(page, offno));
737 htup = &(itup->t_tid);
738
739 /*
740 * To remove the dead tuples, we strictly want to rely on results
741 * of callback function. refer btvacuumpage for detailed reason.
742 */
743 if (callback && callback(htup, callback_state))
744 {
745 kill_tuple = true;
746 if (tuples_removed)
747 *tuples_removed += 1;
748 }
749 else if (split_cleanup)
750 {
751 /* delete the tuples that are moved by split. */
753 maxbucket,
754 highmask,
755 lowmask);
756 /* mark the item for deletion */
757 if (bucket != cur_bucket)
758 {
759 /*
760 * We expect tuples to either belong to current bucket or
761 * new_bucket. This is ensured because we don't allow
762 * further splits from bucket that contains garbage. See
763 * comments in _hash_expandtable.
764 */
765 Assert(bucket == new_bucket);
766 kill_tuple = true;
767 }
768 }
769
770 if (kill_tuple)
771 {
772 /* mark the item for deletion */
773 deletable[ndeletable++] = offno;
774 }
775 else
776 {
777 /* we're keeping it, so count it */
778 if (num_index_tuples)
779 *num_index_tuples += 1;
780 }
781 }
782
783 /* retain the pin on primary bucket page till end of bucket scan */
784 if (blkno == bucket_blkno)
785 retain_pin = true;
786 else
787 retain_pin = false;
788
789 blkno = opaque->hasho_nextblkno;
790
791 /*
792 * Apply deletions, advance to next page and write page if needed.
793 */
794 if (ndeletable > 0)
795 {
796 /* No ereport(ERROR) until changes are logged */
798
799 PageIndexMultiDelete(page, deletable, ndeletable);
800 bucket_dirty = true;
801
802 /*
803 * Let us mark the page as clean if vacuum removes the DEAD tuples
804 * from an index page. We do this by clearing
805 * LH_PAGE_HAS_DEAD_TUPLES flag.
806 */
807 if (tuples_removed && *tuples_removed > 0 &&
808 H_HAS_DEAD_TUPLES(opaque))
809 {
810 opaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
811 clear_dead_marking = true;
812 }
813
815
816 /* XLOG stuff */
817 if (RelationNeedsWAL(rel))
818 {
819 xl_hash_delete xlrec;
820 XLogRecPtr recptr;
821
822 xlrec.clear_dead_marking = clear_dead_marking;
823 xlrec.is_primary_bucket_page = (buf == bucket_buf);
824
827
828 /*
829 * bucket buffer was not changed, but still needs to be
830 * registered to ensure that we can acquire a cleanup lock on
831 * it during replay.
832 */
833 if (!xlrec.is_primary_bucket_page)
834 {
836
837 XLogRegisterBuffer(0, bucket_buf, flags);
838 }
839
841 XLogRegisterBufData(1, deletable,
842 ndeletable * sizeof(OffsetNumber));
843
844 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_DELETE);
845 PageSetLSN(BufferGetPage(buf), recptr);
846 }
847
849 }
850
851 /* bail out if there are no more pages to scan. */
852 if (!BlockNumberIsValid(blkno))
853 break;
854
855 next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
857 bstrategy);
858
859 /*
860 * release the lock on previous page after acquiring the lock on next
861 * page
862 */
863 if (retain_pin)
865 else
866 _hash_relbuf(rel, buf);
867
868 buf = next_buf;
869 }
870
871 /*
872 * lock the bucket page to clear the garbage flag and squeeze the bucket.
873 * if the current buffer is same as bucket buffer, then we already have
874 * lock on bucket page.
875 */
876 if (buf != bucket_buf)
877 {
878 _hash_relbuf(rel, buf);
880 }
881
882 /*
883 * Clear the garbage flag from bucket after deleting the tuples that are
884 * moved by split. We purposefully clear the flag before squeeze bucket,
885 * so that after restart, vacuum shouldn't again try to delete the moved
886 * by split tuples.
887 */
888 if (split_cleanup)
889 {
890 HashPageOpaque bucket_opaque;
891 Page page;
892
893 page = BufferGetPage(bucket_buf);
894 bucket_opaque = HashPageGetOpaque(page);
895
896 /* No ereport(ERROR) until changes are logged */
898
899 bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
900 MarkBufferDirty(bucket_buf);
901
902 /* XLOG stuff */
903 if (RelationNeedsWAL(rel))
904 {
905 XLogRecPtr recptr;
906
908 XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD);
909
910 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_CLEANUP);
911 PageSetLSN(page, recptr);
912 }
913
915 }
916
917 /*
918 * If we have deleted anything, try to compact free space. For squeezing
919 * the bucket, we must have a cleanup lock, else it can impact the
920 * ordering of tuples for a scan that has started before it.
921 */
922 if (bucket_dirty && IsBufferCleanupOK(bucket_buf))
923 _hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
924 bstrategy);
925 else
926 LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK);
927}
uint32 BlockNumber
Definition: block.h:31
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
int Buffer
Definition: buf.h:23
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:5394
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2529
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5097
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:189
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:396
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:191
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1150
static Item PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:354
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:244
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
PageData * Page
Definition: bufpage.h:82
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:372
uint8_t uint8
Definition: c.h:486
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:204
#define HashPageGetOpaque(page)
Definition: hash.h:88
#define HASH_WRITE
Definition: hash.h:340
#define H_HAS_DEAD_TUPLES(opaque)
Definition: hash.h:93
uint32 Bucket
Definition: hash.h:35
#define LH_OVERFLOW_PAGE
Definition: hash.h:54
#define InvalidBucket
Definition: hash.h:37
#define XLOG_HASH_SPLIT_CLEANUP
Definition: hash_xlog.h:37
#define SizeOfHashDelete
Definition: hash_xlog.h:186
#define XLOG_HASH_DELETE
Definition: hash_xlog.h:36
void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, Buffer bucket_buf, BufferAccessStrategy bstrategy)
Definition: hashovfl.c:842
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:266
Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
Definition: hashpage.c:239
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:291
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:125
Bucket _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, uint32 lowmask, uint32 maxbucket)
Definition: hashutil.c:494
IndexTupleData * IndexTuple
Definition: itup.h:53
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
#define MaxOffsetNumber
Definition: off.h:28
static char * buf
Definition: pg_test_fsync.c:72
#define RelationNeedsWAL(relation)
Definition: rel.h:635
BlockNumber hasho_nextblkno
Definition: hash.h:80
uint16 hasho_flag
Definition: hash.h:82
ItemPointerData t_tid
Definition: itup.h:37
bool clear_dead_marking
Definition: hash_xlog.h:180
bool is_primary_bucket_page
Definition: hash_xlog.h:182
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46
void vacuum_delay_point(bool is_analyze)
Definition: vacuum.c:2387
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:474
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition: xloginsert.c:405
void XLogRegisterData(const void *data, uint32 len)
Definition: xloginsert.c:364
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:242
void XLogBeginInsert(void)
Definition: xloginsert.c:149
#define REGBUF_NO_CHANGE
Definition: xloginsert.h:37
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:33

References _hash_get_indextuple_hashkey(), _hash_get_newbucket_from_oldbucket(), _hash_getbuf_with_strategy(), _hash_hashkey2bucket(), _hash_relbuf(), _hash_squeezebucket(), Assert, BlockNumberIsValid(), buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), callback(), xl_hash_delete::clear_dead_marking, END_CRIT_SECTION, FirstOffsetNumber, H_HAS_DEAD_TUPLES, HASH_WRITE, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageGetOpaque, InvalidBucket, xl_hash_delete::is_primary_bucket_page, IsBufferCleanupOK(), LH_OVERFLOW_PAGE, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIndexMultiDelete(), PageSetLSN(), PG_USED_FOR_ASSERTS_ONLY, REGBUF_NO_CHANGE, REGBUF_NO_IMAGE, REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashDelete, START_CRIT_SECTION, IndexTupleData::t_tid, vacuum_delay_point(), XLOG_HASH_DELETE, XLOG_HASH_SPLIT_CLEANUP, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_expandtable(), _hash_splitbucket(), and hashbulkdelete().

◆ hashbuild()

IndexBuildResult * hashbuild ( Relation  heap,
Relation  index,
IndexInfo indexInfo 
)

Definition at line 119 of file hash.c.

120{
121 IndexBuildResult *result;
122 BlockNumber relpages;
123 double reltuples;
124 double allvisfrac;
125 uint32 num_buckets;
126 Size sort_threshold;
127 HashBuildState buildstate;
128
129 /*
130 * We expect to be called exactly once for any index relation. If that's
131 * not the case, big trouble's what we have.
132 */
134 elog(ERROR, "index \"%s\" already contains data",
136
137 /* Estimate the number of rows currently present in the table */
138 estimate_rel_size(heap, NULL, &relpages, &reltuples, &allvisfrac);
139
140 /* Initialize the hash index metadata page and initial buckets */
141 num_buckets = _hash_init(index, reltuples, MAIN_FORKNUM);
142
143 /*
144 * If we just insert the tuples into the index in scan order, then
145 * (assuming their hash codes are pretty random) there will be no locality
146 * of access to the index, and if the index is bigger than available RAM
147 * then we'll thrash horribly. To prevent that scenario, we can sort the
148 * tuples by (expected) bucket number. However, such a sort is useless
149 * overhead when the index does fit in RAM. We choose to sort if the
150 * initial index size exceeds maintenance_work_mem, or the number of
151 * buffers usable for the index, whichever is less. (Limiting by the
152 * number of buffers should reduce thrashing between PG buffers and kernel
153 * buffers, which seems useful even if no physical I/O results. Limiting
154 * by maintenance_work_mem is useful to allow easy testing of the sort
155 * code path, and may be useful to DBAs as an additional control knob.)
156 *
157 * NOTE: this test will need adjustment if a bucket is ever different from
158 * one page. Also, "initial index size" accounting does not include the
159 * metapage, nor the first bitmap page.
160 */
161 sort_threshold = (maintenance_work_mem * (Size) 1024) / BLCKSZ;
162 if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
163 sort_threshold = Min(sort_threshold, NBuffers);
164 else
165 sort_threshold = Min(sort_threshold, NLocBuffer);
166
167 if (num_buckets >= sort_threshold)
168 buildstate.spool = _h_spoolinit(heap, index, num_buckets);
169 else
170 buildstate.spool = NULL;
171
172 /* prepare to build the index */
173 buildstate.indtuples = 0;
174 buildstate.heapRel = heap;
175
176 /* do the heap scan */
177 reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
179 &buildstate, NULL);
181 buildstate.indtuples);
182
183 if (buildstate.spool)
184 {
185 /* sort the tuples and insert them into the index */
186 _h_indexbuild(buildstate.spool, buildstate.heapRel);
187 _h_spooldestroy(buildstate.spool);
188 }
189
190 /*
191 * Return statistics
192 */
193 result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
194
195 result->heap_tuples = reltuples;
196 result->index_tuples = buildstate.indtuples;
197
198 return result;
199}
void pgstat_progress_update_param(int index, int64 val)
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:273
#define Min(x, y)
Definition: c.h:961
uint32_t uint32
Definition: c.h:488
size_t Size
Definition: c.h:562
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
int NBuffers
Definition: globals.c:141
int maintenance_work_mem
Definition: globals.c:132
static void hashbuildCallback(Relation index, ItemPointer tid, Datum *values, bool *isnull, bool tupleIsAlive, void *state)
Definition: hash.c:214
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition: hashpage.c:327
void _h_indexbuild(HSpool *hspool, Relation heapRel)
Definition: hashsort.c:120
HSpool * _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
Definition: hashsort.c:60
void _h_spooldestroy(HSpool *hspool)
Definition: hashsort.c:99
int NLocBuffer
Definition: localbuf.c:42
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1067
#define PROGRESS_CREATEIDX_TUPLES_TOTAL
Definition: progress.h:89
#define RelationGetRelationName(relation)
Definition: rel.h:546
@ MAIN_FORKNUM
Definition: relpath.h:58
HSpool * spool
Definition: hash.c:40
Relation heapRel
Definition: hash.c:42
double indtuples
Definition: hash.c:41
double heap_tuples
Definition: genam.h:34
double index_tuples
Definition: genam.h:35
Definition: type.h:96
static double table_index_build_scan(Relation table_rel, Relation index_rel, struct IndexInfo *index_info, bool allow_sync, bool progress, IndexBuildCallback callback, void *callback_state, TableScanDesc scan)
Definition: tableam.h:1781

References _h_indexbuild(), _h_spooldestroy(), _h_spoolinit(), _hash_init(), elog, ERROR, estimate_rel_size(), hashbuildCallback(), IndexBuildResult::heap_tuples, HashBuildState::heapRel, IndexBuildResult::index_tuples, HashBuildState::indtuples, MAIN_FORKNUM, maintenance_work_mem, Min, NBuffers, NLocBuffer, palloc(), pgstat_progress_update_param(), PROGRESS_CREATEIDX_TUPLES_TOTAL, RelationGetNumberOfBlocks, RelationGetRelationName, HashBuildState::spool, and table_index_build_scan().

Referenced by hashhandler().

◆ hashbuildCallback()

static void hashbuildCallback ( Relation  index,
ItemPointer  tid,
Datum values,
bool *  isnull,
bool  tupleIsAlive,
void *  state 
)
static

Definition at line 214 of file hash.c.

220{
221 HashBuildState *buildstate = (HashBuildState *) state;
222 Datum index_values[1];
223 bool index_isnull[1];
224 IndexTuple itup;
225
226 /* convert data to a hash key; on failure, do not insert anything */
228 values, isnull,
229 index_values, index_isnull))
230 return;
231
232 /* Either spool the tuple for sorting, or just put it into the index */
233 if (buildstate->spool)
234 _h_spool(buildstate->spool, tid, index_values, index_isnull);
235 else
236 {
237 /* form an index tuple and point it at the heap tuple */
239 index_values, index_isnull);
240 itup->t_tid = *tid;
241 _hash_doinsert(index, itup, buildstate->heapRel, false);
242 pfree(itup);
243 }
244
245 buildstate->indtuples += 1;
246}
static Datum values[MAXATTR]
Definition: bootstrap.c:151
void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel, bool sorted)
Definition: hashinsert.c:38
void _h_spool(HSpool *hspool, ItemPointer self, const Datum *values, const bool *isnull)
Definition: hashsort.c:109
bool _hash_convert_tuple(Relation index, Datum *user_values, bool *user_isnull, Datum *index_values, bool *index_isnull)
Definition: hashutil.c:318
IndexTuple index_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
Definition: indextuple.c:44
void pfree(void *pointer)
Definition: mcxt.c:1521
uintptr_t Datum
Definition: postgres.h:69
#define RelationGetDescr(relation)
Definition: rel.h:538
Definition: regguts.h:323

References _h_spool(), _hash_convert_tuple(), _hash_doinsert(), HashBuildState::heapRel, index_form_tuple(), HashBuildState::indtuples, pfree(), RelationGetDescr, HashBuildState::spool, IndexTupleData::t_tid, and values.

Referenced by hashbuild().

◆ hashbuildempty()

void hashbuildempty ( Relation  index)

Definition at line 205 of file hash.c.

206{
208}
@ INIT_FORKNUM
Definition: relpath.h:61

References _hash_init(), and INIT_FORKNUM.

Referenced by hashhandler().

◆ hashbulkdelete()

IndexBulkDeleteResult * hashbulkdelete ( IndexVacuumInfo info,
IndexBulkDeleteResult stats,
IndexBulkDeleteCallback  callback,
void *  callback_state 
)

Definition at line 462 of file hash.c.

464{
465 Relation rel = info->index;
466 double tuples_removed;
467 double num_index_tuples;
468 double orig_ntuples;
469 Bucket orig_maxbucket;
470 Bucket cur_maxbucket;
471 Bucket cur_bucket;
472 Buffer metabuf = InvalidBuffer;
473 HashMetaPage metap;
474 HashMetaPage cachedmetap;
475
476 tuples_removed = 0;
477 num_index_tuples = 0;
478
479 /*
480 * We need a copy of the metapage so that we can use its hashm_spares[]
481 * values to compute bucket page addresses, but a cached copy should be
482 * good enough. (If not, we'll detect that further down and refresh the
483 * cache as necessary.)
484 */
485 cachedmetap = _hash_getcachedmetap(rel, &metabuf, false);
486 Assert(cachedmetap != NULL);
487
488 orig_maxbucket = cachedmetap->hashm_maxbucket;
489 orig_ntuples = cachedmetap->hashm_ntuples;
490
491 /* Scan the buckets that we know exist */
492 cur_bucket = 0;
493 cur_maxbucket = orig_maxbucket;
494
495loop_top:
496 while (cur_bucket <= cur_maxbucket)
497 {
498 BlockNumber bucket_blkno;
499 BlockNumber blkno;
500 Buffer bucket_buf;
501 Buffer buf;
502 HashPageOpaque bucket_opaque;
503 Page page;
504 bool split_cleanup = false;
505
506 /* Get address of bucket's start page */
507 bucket_blkno = BUCKET_TO_BLKNO(cachedmetap, cur_bucket);
508
509 blkno = bucket_blkno;
510
511 /*
512 * We need to acquire a cleanup lock on the primary bucket page to out
513 * wait concurrent scans before deleting the dead tuples.
514 */
518
519 page = BufferGetPage(buf);
520 bucket_opaque = HashPageGetOpaque(page);
521
522 /*
523 * If the bucket contains tuples that are moved by split, then we need
524 * to delete such tuples. We can't delete such tuples if the split
525 * operation on bucket is not finished as those are needed by scans.
526 */
527 if (!H_BUCKET_BEING_SPLIT(bucket_opaque) &&
528 H_NEEDS_SPLIT_CLEANUP(bucket_opaque))
529 {
530 split_cleanup = true;
531
532 /*
533 * This bucket might have been split since we last held a lock on
534 * the metapage. If so, hashm_maxbucket, hashm_highmask and
535 * hashm_lowmask might be old enough to cause us to fail to remove
536 * tuples left behind by the most recent split. To prevent that,
537 * now that the primary page of the target bucket has been locked
538 * (and thus can't be further split), check whether we need to
539 * update our cached metapage data.
540 */
541 Assert(bucket_opaque->hasho_prevblkno != InvalidBlockNumber);
542 if (bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
543 {
544 cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
545 Assert(cachedmetap != NULL);
546 }
547 }
548
549 bucket_buf = buf;
550
551 hashbucketcleanup(rel, cur_bucket, bucket_buf, blkno, info->strategy,
552 cachedmetap->hashm_maxbucket,
553 cachedmetap->hashm_highmask,
554 cachedmetap->hashm_lowmask, &tuples_removed,
555 &num_index_tuples, split_cleanup,
556 callback, callback_state);
557
558 _hash_dropbuf(rel, bucket_buf);
559
560 /* Advance to next bucket */
561 cur_bucket++;
562 }
563
564 if (BufferIsInvalid(metabuf))
566
567 /* Write-lock metapage and check for split since we started */
569 metap = HashPageGetMeta(BufferGetPage(metabuf));
570
571 if (cur_maxbucket != metap->hashm_maxbucket)
572 {
573 /* There's been a split, so process the additional bucket(s) */
575 cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
576 Assert(cachedmetap != NULL);
577 cur_maxbucket = cachedmetap->hashm_maxbucket;
578 goto loop_top;
579 }
580
581 /* Okay, we're really done. Update tuple count in metapage. */
583
584 if (orig_maxbucket == metap->hashm_maxbucket &&
585 orig_ntuples == metap->hashm_ntuples)
586 {
587 /*
588 * No one has split or inserted anything since start of scan, so
589 * believe our count as gospel.
590 */
591 metap->hashm_ntuples = num_index_tuples;
592 }
593 else
594 {
595 /*
596 * Otherwise, our count is untrustworthy since we may have
597 * double-scanned tuples in split buckets. Proceed by dead-reckoning.
598 * (Note: we still return estimated_count = false, because using this
599 * count is better than not updating reltuples at all.)
600 */
601 if (metap->hashm_ntuples > tuples_removed)
602 metap->hashm_ntuples -= tuples_removed;
603 else
604 metap->hashm_ntuples = 0;
605 num_index_tuples = metap->hashm_ntuples;
606 }
607
608 MarkBufferDirty(metabuf);
609
610 /* XLOG stuff */
611 if (RelationNeedsWAL(rel))
612 {
614 XLogRecPtr recptr;
615
616 xlrec.ntuples = metap->hashm_ntuples;
617
620
622
623 recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_UPDATE_META_PAGE);
624 PageSetLSN(BufferGetPage(metabuf), recptr);
625 }
626
628
629 _hash_relbuf(rel, metabuf);
630
631 /* return statistics */
632 if (stats == NULL)
634 stats->estimated_count = false;
635 stats->num_index_tuples = num_index_tuples;
636 stats->tuples_removed += tuples_removed;
637 /* hashvacuumcleanup will fill in num_pages */
638
639 return stats;
640}
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsInvalid(buffer)
Definition: buf.h:31
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5177
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:793
@ RBM_NORMAL
Definition: bufmgr.h:45
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:687
#define HASH_NOLOCK
Definition: hash.h:341
#define LH_BUCKET_PAGE
Definition: hash.h:55
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:91
#define LH_META_PAGE
Definition: hash.h:57
#define HashPageGetMeta(page)
Definition: hash.h:323
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:39
#define HASH_METAPAGE
Definition: hash.h:198
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:90
#define XLOG_HASH_UPDATE_META_PAGE
Definition: hash_xlog.h:38
#define SizeOfHashUpdateMetaPage
Definition: hash_xlog.h:200
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1501
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:277
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:70
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:210
void * palloc0(Size size)
Definition: mcxt.c:1347
uint32 hashm_lowmask
Definition: hash.h:256
uint32 hashm_maxbucket
Definition: hash.h:254
double hashm_ntuples
Definition: hash.h:248
uint32 hashm_highmask
Definition: hash.h:255
BlockNumber hasho_prevblkno
Definition: hash.h:79
bool estimated_count
Definition: genam.h:80
double tuples_removed
Definition: genam.h:82
double num_index_tuples
Definition: genam.h:81
Relation index
Definition: genam.h:48
BufferAccessStrategy strategy
Definition: genam.h:55

References _hash_checkpage(), _hash_dropbuf(), _hash_getbuf(), _hash_getcachedmetap(), _hash_relbuf(), Assert, BUCKET_TO_BLKNO, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsInvalid, callback(), END_CRIT_SECTION, IndexBulkDeleteResult::estimated_count, H_BUCKET_BEING_SPLIT, H_NEEDS_SPLIT_CLEANUP, HASH_METAPAGE, HASH_NOLOCK, hashbucketcleanup(), HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ntuples, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, HashPageGetOpaque, IndexVacuumInfo::index, InvalidBlockNumber, InvalidBuffer, LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), LockBufferForCleanup(), MAIN_FORKNUM, MarkBufferDirty(), xl_hash_update_meta_page::ntuples, IndexBulkDeleteResult::num_index_tuples, PageSetLSN(), palloc0(), RBM_NORMAL, ReadBufferExtended(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashUpdateMetaPage, START_CRIT_SECTION, IndexVacuumInfo::strategy, IndexBulkDeleteResult::tuples_removed, XLOG_HASH_UPDATE_META_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by hashhandler().

◆ hashendscan()

void hashendscan ( IndexScanDesc  scan)

Definition at line 431 of file hash.c.

432{
434 Relation rel = scan->indexRelation;
435
437 {
438 /* Before leaving current page, deal with any killed items */
439 if (so->numKilled > 0)
440 _hash_kill_items(scan);
441 }
442
443 _hash_dropscanbuf(rel, so);
444
445 if (so->killedItems != NULL)
446 pfree(so->killedItems);
447 pfree(so);
448 scan->opaque = NULL;
449}
#define HashScanPosIsValid(scanpos)
Definition: hash.h:137
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:289
void _hash_kill_items(IndexScanDesc scan)
Definition: hashutil.c:536
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:76
Relation indexRelation
Definition: relscan.h:135

References _hash_dropscanbuf(), _hash_kill_items(), HashScanOpaqueData::currPos, HashScanPosIsValid, if(), IndexScanDescData::indexRelation, HashScanOpaqueData::killedItems, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, and pfree().

Referenced by hashhandler().

◆ hashgetbitmap()

int64 hashgetbitmap ( IndexScanDesc  scan,
TIDBitmap tbm 
)

Definition at line 339 of file hash.c.

340{
342 bool res;
343 int64 ntids = 0;
344 HashScanPosItem *currItem;
345
347
348 while (res)
349 {
350 currItem = &so->currPos.items[so->currPos.itemIndex];
351
352 /*
353 * _hash_first and _hash_next handle eliminate dead index entries
354 * whenever scan->ignore_killed_tuples is true. Therefore, there's
355 * nothing to do here except add the results to the TIDBitmap.
356 */
357 tbm_add_tuples(tbm, &(currItem->heapTid), 1, true);
358 ntids++;
359
361 }
362
363 return ntids;
364}
int64_t int64
Definition: c.h:485
bool _hash_first(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:288
bool _hash_next(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:48
@ ForwardScanDirection
Definition: sdir.h:28
HashScanPosItem items[MaxIndexTuplesPerPage]
Definition: hash.h:127
int itemIndex
Definition: hash.h:125
void tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids, bool recheck)
Definition: tidbitmap.c:377

References _hash_first(), _hash_next(), HashScanOpaqueData::currPos, ForwardScanDirection, HashScanPosData::itemIndex, HashScanPosData::items, IndexScanDescData::opaque, res, and tbm_add_tuples().

Referenced by hashhandler().

◆ hashgettuple()

bool hashgettuple ( IndexScanDesc  scan,
ScanDirection  dir 
)

Definition at line 287 of file hash.c.

288{
290 bool res;
291
292 /* Hash indexes are always lossy since we store only the hash code */
293 scan->xs_recheck = true;
294
295 /*
296 * If we've already initialized this scan, we can just advance it in the
297 * appropriate direction. If we haven't done so yet, we call a routine to
298 * get the first item in the scan.
299 */
301 res = _hash_first(scan, dir);
302 else
303 {
304 /*
305 * Check to see if we should kill the previously-fetched tuple.
306 */
307 if (scan->kill_prior_tuple)
308 {
309 /*
310 * Yes, so remember it for later. (We'll deal with all such tuples
311 * at once right after leaving the index page or at end of scan.)
312 * In case if caller reverses the indexscan direction it is quite
313 * possible that the same item might get entered multiple times.
314 * But, we don't detect that; instead, we just forget any excess
315 * entries.
316 */
317 if (so->killedItems == NULL)
318 so->killedItems = (int *)
319 palloc(MaxIndexTuplesPerPage * sizeof(int));
320
322 so->killedItems[so->numKilled++] = so->currPos.itemIndex;
323 }
324
325 /*
326 * Now continue the scan.
327 */
328 res = _hash_next(scan, dir);
329 }
330
331 return res;
332}
#define MaxIndexTuplesPerPage
Definition: itup.h:181
bool kill_prior_tuple
Definition: relscan.h:145

References _hash_first(), _hash_next(), HashScanOpaqueData::currPos, HashScanPosIsValid, if(), HashScanPosData::itemIndex, IndexScanDescData::kill_prior_tuple, HashScanOpaqueData::killedItems, MaxIndexTuplesPerPage, HashScanOpaqueData::numKilled, IndexScanDescData::opaque, palloc(), res, and IndexScanDescData::xs_recheck.

Referenced by hashhandler().

◆ hashhandler()

Datum hashhandler ( PG_FUNCTION_ARGS  )

Definition at line 58 of file hash.c.

59{
61
63 amroutine->amsupport = HASHNProcs;
65 amroutine->amcanorder = false;
66 amroutine->amcanorderbyop = false;
67 amroutine->amcanbackward = true;
68 amroutine->amcanunique = false;
69 amroutine->amcanmulticol = false;
70 amroutine->amoptionalkey = false;
71 amroutine->amsearcharray = false;
72 amroutine->amsearchnulls = false;
73 amroutine->amstorage = false;
74 amroutine->amclusterable = false;
75 amroutine->ampredlocks = true;
76 amroutine->amcanparallel = false;
77 amroutine->amcanbuildparallel = false;
78 amroutine->amcaninclude = false;
79 amroutine->amusemaintenanceworkmem = false;
80 amroutine->amsummarizing = false;
81 amroutine->amparallelvacuumoptions =
83 amroutine->amkeytype = INT4OID;
84
85 amroutine->ambuild = hashbuild;
86 amroutine->ambuildempty = hashbuildempty;
87 amroutine->aminsert = hashinsert;
88 amroutine->aminsertcleanup = NULL;
89 amroutine->ambulkdelete = hashbulkdelete;
91 amroutine->amcanreturn = NULL;
93 amroutine->amgettreeheight = NULL;
94 amroutine->amoptions = hashoptions;
95 amroutine->amproperty = NULL;
96 amroutine->ambuildphasename = NULL;
97 amroutine->amvalidate = hashvalidate;
99 amroutine->ambeginscan = hashbeginscan;
100 amroutine->amrescan = hashrescan;
101 amroutine->amgettuple = hashgettuple;
102 amroutine->amgetbitmap = hashgetbitmap;
103 amroutine->amendscan = hashendscan;
104 amroutine->ammarkpos = NULL;
105 amroutine->amrestrpos = NULL;
106 amroutine->amestimateparallelscan = NULL;
107 amroutine->aminitparallelscan = NULL;
108 amroutine->amparallelrescan = NULL;
111
112 PG_RETURN_POINTER(amroutine);
113}
#define PG_RETURN_POINTER(x)
Definition: fmgr.h:361
bool hashinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, IndexUniqueCheck checkUnique, bool indexUnchanged, IndexInfo *indexInfo)
Definition: hash.c:255
IndexBulkDeleteResult * hashvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
Definition: hash.c:648
IndexBuildResult * hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
Definition: hash.c:119
StrategyNumber hashtranslatecmptype(CompareType cmptype, Oid opfamily, Oid opcintype)
Definition: hash.c:938
bool hashgettuple(IndexScanDesc scan, ScanDirection dir)
Definition: hash.c:287
void hashbuildempty(Relation index)
Definition: hash.c:205
IndexScanDesc hashbeginscan(Relation rel, int nkeys, int norderbys)
Definition: hash.c:371
IndexBulkDeleteResult * hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:462
CompareType hashtranslatestrategy(StrategyNumber strategy, Oid opfamily, Oid opcintype)
Definition: hash.c:930
void hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys)
Definition: hash.c:401
void hashendscan(IndexScanDesc scan)
Definition: hash.c:431
int64 hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
Definition: hash.c:339
#define HASHNProcs
Definition: hash.h:358
#define HASHOPTIONS_PROC
Definition: hash.h:357
bytea * hashoptions(Datum reloptions, bool validate)
Definition: hashutil.c:275
void hashadjustmembers(Oid opfamilyoid, Oid opclassoid, List *operators, List *functions)
Definition: hashvalidate.c:263
bool hashvalidate(Oid opclassoid)
Definition: hashvalidate.c:40
#define makeNode(_type_)
Definition: nodes.h:155
void hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, double *indexCorrelation, double *indexPages)
Definition: selfuncs.c:7165
#define HTMaxStrategyNumber
Definition: stratnum.h:43
ambuildphasename_function ambuildphasename
Definition: amapi.h:297
ambuildempty_function ambuildempty
Definition: amapi.h:287
amvacuumcleanup_function amvacuumcleanup
Definition: amapi.h:291
bool amclusterable
Definition: amapi.h:261
amoptions_function amoptions
Definition: amapi.h:295
amestimateparallelscan_function amestimateparallelscan
Definition: amapi.h:309
amrestrpos_function amrestrpos
Definition: amapi.h:306
aminsert_function aminsert
Definition: amapi.h:288
amendscan_function amendscan
Definition: amapi.h:304
amtranslate_strategy_function amtranslatestrategy
Definition: amapi.h:314
uint16 amoptsprocnum
Definition: amapi.h:241
amparallelrescan_function amparallelrescan
Definition: amapi.h:311
Oid amkeytype
Definition: amapi.h:277
bool ampredlocks
Definition: amapi.h:263
uint16 amsupport
Definition: amapi.h:239
amtranslate_cmptype_function amtranslatecmptype
Definition: amapi.h:315
amcostestimate_function amcostestimate
Definition: amapi.h:293
bool amcanorderbyop
Definition: amapi.h:245
amadjustmembers_function amadjustmembers
Definition: amapi.h:299
ambuild_function ambuild
Definition: amapi.h:286
bool amstorage
Definition: amapi.h:259
uint16 amstrategies
Definition: amapi.h:237
bool amoptionalkey
Definition: amapi.h:253
amgettuple_function amgettuple
Definition: amapi.h:302
amcanreturn_function amcanreturn
Definition: amapi.h:292
bool amcanunique
Definition: amapi.h:249
amgetbitmap_function amgetbitmap
Definition: amapi.h:303
amproperty_function amproperty
Definition: amapi.h:296
ambulkdelete_function ambulkdelete
Definition: amapi.h:290
bool amsearcharray
Definition: amapi.h:255
bool amsummarizing
Definition: amapi.h:273
amvalidate_function amvalidate
Definition: amapi.h:298
ammarkpos_function ammarkpos
Definition: amapi.h:305
bool amcanmulticol
Definition: amapi.h:251
bool amusemaintenanceworkmem
Definition: amapi.h:271
ambeginscan_function ambeginscan
Definition: amapi.h:300
bool amcanparallel
Definition: amapi.h:265
amrescan_function amrescan
Definition: amapi.h:301
bool amcanorder
Definition: amapi.h:243
bool amcanbuildparallel
Definition: amapi.h:267
aminitparallelscan_function aminitparallelscan
Definition: amapi.h:310
uint8 amparallelvacuumoptions
Definition: amapi.h:275
aminsertcleanup_function aminsertcleanup
Definition: amapi.h:289
bool amcanbackward
Definition: amapi.h:247
amgettreeheight_function amgettreeheight
Definition: amapi.h:294
bool amcaninclude
Definition: amapi.h:269
bool amsearchnulls
Definition: amapi.h:257
#define VACUUM_OPTION_PARALLEL_BULKDEL
Definition: vacuum.h:48

References IndexAmRoutine::amadjustmembers, IndexAmRoutine::ambeginscan, IndexAmRoutine::ambuild, IndexAmRoutine::ambuildempty, IndexAmRoutine::ambuildphasename, IndexAmRoutine::ambulkdelete, IndexAmRoutine::amcanbackward, IndexAmRoutine::amcanbuildparallel, IndexAmRoutine::amcaninclude, IndexAmRoutine::amcanmulticol, IndexAmRoutine::amcanorder, IndexAmRoutine::amcanorderbyop, IndexAmRoutine::amcanparallel, IndexAmRoutine::amcanreturn, IndexAmRoutine::amcanunique, IndexAmRoutine::amclusterable, IndexAmRoutine::amcostestimate, IndexAmRoutine::amendscan, IndexAmRoutine::amestimateparallelscan, IndexAmRoutine::amgetbitmap, IndexAmRoutine::amgettreeheight, IndexAmRoutine::amgettuple, IndexAmRoutine::aminitparallelscan, IndexAmRoutine::aminsert, IndexAmRoutine::aminsertcleanup, IndexAmRoutine::amkeytype, IndexAmRoutine::ammarkpos, IndexAmRoutine::amoptionalkey, IndexAmRoutine::amoptions, IndexAmRoutine::amoptsprocnum, IndexAmRoutine::amparallelrescan, IndexAmRoutine::amparallelvacuumoptions, IndexAmRoutine::ampredlocks, IndexAmRoutine::amproperty, IndexAmRoutine::amrescan, IndexAmRoutine::amrestrpos, IndexAmRoutine::amsearcharray, IndexAmRoutine::amsearchnulls, IndexAmRoutine::amstorage, IndexAmRoutine::amstrategies, IndexAmRoutine::amsummarizing, IndexAmRoutine::amsupport, IndexAmRoutine::amtranslatecmptype, IndexAmRoutine::amtranslatestrategy, IndexAmRoutine::amusemaintenanceworkmem, IndexAmRoutine::amvacuumcleanup, IndexAmRoutine::amvalidate, hashadjustmembers(), hashbeginscan(), hashbuild(), hashbuildempty(), hashbulkdelete(), hashcostestimate(), hashendscan(), hashgetbitmap(), hashgettuple(), hashinsert(), HASHNProcs, hashoptions(), HASHOPTIONS_PROC, hashrescan(), hashtranslatecmptype(), hashtranslatestrategy(), hashvacuumcleanup(), hashvalidate(), HTMaxStrategyNumber, makeNode, PG_RETURN_POINTER, and VACUUM_OPTION_PARALLEL_BULKDEL.

◆ hashinsert()

bool hashinsert ( Relation  rel,
Datum values,
bool *  isnull,
ItemPointer  ht_ctid,
Relation  heapRel,
IndexUniqueCheck  checkUnique,
bool  indexUnchanged,
IndexInfo indexInfo 
)

Definition at line 255 of file hash.c.

260{
261 Datum index_values[1];
262 bool index_isnull[1];
263 IndexTuple itup;
264
265 /* convert data to a hash key; on failure, do not insert anything */
266 if (!_hash_convert_tuple(rel,
267 values, isnull,
268 index_values, index_isnull))
269 return false;
270
271 /* form an index tuple and point it at the heap tuple */
272 itup = index_form_tuple(RelationGetDescr(rel), index_values, index_isnull);
273 itup->t_tid = *ht_ctid;
274
275 _hash_doinsert(rel, itup, heapRel, false);
276
277 pfree(itup);
278
279 return false;
280}

References _hash_convert_tuple(), _hash_doinsert(), index_form_tuple(), pfree(), RelationGetDescr, IndexTupleData::t_tid, and values.

Referenced by hashhandler().

◆ hashrescan()

void hashrescan ( IndexScanDesc  scan,
ScanKey  scankey,
int  nscankeys,
ScanKey  orderbys,
int  norderbys 
)

Definition at line 401 of file hash.c.

403{
405 Relation rel = scan->indexRelation;
406
408 {
409 /* Before leaving current page, deal with any killed items */
410 if (so->numKilled > 0)
411 _hash_kill_items(scan);
412 }
413
414 _hash_dropscanbuf(rel, so);
415
416 /* set position invalid (this will cause _hash_first call) */
418
419 /* Update scan key, if a new one is given */
420 if (scankey && scan->numberOfKeys > 0)
421 memcpy(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData));
422
423 so->hashso_buc_populated = false;
424 so->hashso_buc_split = false;
425}
struct ScanKeyData * keyData
Definition: relscan.h:139

References _hash_dropscanbuf(), _hash_kill_items(), HashScanOpaqueData::currPos, HashScanPosInvalidate, HashScanPosIsValid, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, if(), IndexScanDescData::indexRelation, IndexScanDescData::keyData, IndexScanDescData::numberOfKeys, HashScanOpaqueData::numKilled, and IndexScanDescData::opaque.

Referenced by hashhandler().

◆ hashtranslatecmptype()

StrategyNumber hashtranslatecmptype ( CompareType  cmptype,
Oid  opfamily,
Oid  opcintype 
)

Definition at line 938 of file hash.c.

939{
940 if (cmptype == COMPARE_EQ)
942 return InvalidStrategy;
943}
@ COMPARE_EQ
Definition: cmptype.h:36
#define InvalidStrategy
Definition: stratnum.h:24
#define HTEqualStrategyNumber
Definition: stratnum.h:41

References COMPARE_EQ, HTEqualStrategyNumber, and InvalidStrategy.

Referenced by hashhandler().

◆ hashtranslatestrategy()

CompareType hashtranslatestrategy ( StrategyNumber  strategy,
Oid  opfamily,
Oid  opcintype 
)

Definition at line 930 of file hash.c.

931{
932 if (strategy == HTEqualStrategyNumber)
933 return COMPARE_EQ;
934 return COMPARE_INVALID;
935}
@ COMPARE_INVALID
Definition: cmptype.h:33

References COMPARE_EQ, COMPARE_INVALID, and HTEqualStrategyNumber.

Referenced by hashhandler().

◆ hashvacuumcleanup()

IndexBulkDeleteResult * hashvacuumcleanup ( IndexVacuumInfo info,
IndexBulkDeleteResult stats 
)

Definition at line 648 of file hash.c.

649{
650 Relation rel = info->index;
651 BlockNumber num_pages;
652
653 /* If hashbulkdelete wasn't called, return NULL signifying no change */
654 /* Note: this covers the analyze_only case too */
655 if (stats == NULL)
656 return NULL;
657
658 /* update statistics */
659 num_pages = RelationGetNumberOfBlocks(rel);
660 stats->num_pages = num_pages;
661
662 return stats;
663}
BlockNumber num_pages
Definition: genam.h:79

References IndexVacuumInfo::index, IndexBulkDeleteResult::num_pages, and RelationGetNumberOfBlocks.

Referenced by hashhandler().