PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
hashpage.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
Include dependency graph for hashpage.c:

Go to the source code of this file.

Macros

#define USELOCKING(rel)   (!RELATION_IS_LOCAL(rel))
 

Functions

static bool _hash_alloc_buckets (Relation rel, BlockNumber firstblock, uint32 nblocks)
 
static void _hash_splitbucket (Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, uint32 maxbucket, uint32 highmask, uint32 lowmask)
 
static void _hash_splitbucket_guts (Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
 
Buffer _hash_getbuf (Relation rel, BlockNumber blkno, int access, int flags)
 
Buffer _hash_getbuf_with_condlock_cleanup (Relation rel, BlockNumber blkno, int flags)
 
Buffer _hash_getinitbuf (Relation rel, BlockNumber blkno)
 
Buffer _hash_getnewbuf (Relation rel, BlockNumber blkno, ForkNumber forkNum)
 
Buffer _hash_getbuf_with_strategy (Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
 
void _hash_relbuf (Relation rel, Buffer buf)
 
void _hash_dropbuf (Relation rel, Buffer buf)
 
void _hash_dropscanbuf (Relation rel, HashScanOpaque so)
 
uint32 _hash_metapinit (Relation rel, double num_tuples, ForkNumber forkNum)
 
void _hash_pageinit (Page page, Size size)
 
void _hash_expandtable (Relation rel, Buffer metabuf)
 
void _hash_finish_split (Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
 
HashMetaPage _hash_getcachedmetap (Relation rel, Buffer *metabuf, bool force_refresh)
 
Buffer _hash_getbucketbuf_from_hashkey (Relation rel, uint32 hashkey, int access, HashMetaPage *cachedmetap)
 

Macro Definition Documentation

#define USELOCKING (   rel)    (!RELATION_IS_LOCAL(rel))

Definition at line 58 of file hashpage.c.

Function Documentation

static bool _hash_alloc_buckets ( Relation  rel,
BlockNumber  firstblock,
uint32  nblocks 
)
static

Definition at line 781 of file hashpage.c.

References InvalidBlockNumber, MAIN_FORKNUM, MemSet, RelationData::rd_smgr, RelationOpenSmgr, and smgrextend().

Referenced by _hash_expandtable().

782 {
783  BlockNumber lastblock;
784  char zerobuf[BLCKSZ];
785 
786  lastblock = firstblock + nblocks - 1;
787 
788  /*
789  * Check for overflow in block number calculation; if so, we cannot extend
790  * the index anymore.
791  */
792  if (lastblock < firstblock || lastblock == InvalidBlockNumber)
793  return false;
794 
795  MemSet(zerobuf, 0, sizeof(zerobuf));
796 
797  RelationOpenSmgr(rel);
798  smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
799 
800  return true;
801 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
#define MemSet(start, val, len)
Definition: c.h:853
uint32 BlockNumber
Definition: block.h:31
#define RelationOpenSmgr(relation)
Definition: rel.h:457
#define InvalidBlockNumber
Definition: block.h:33
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:600
void _hash_dropbuf ( Relation  rel,
Buffer  buf 
)

Definition at line 256 of file hashpage.c.

References ReleaseBuffer().

Referenced by _hash_doinsert(), _hash_dropscanbuf(), _hash_expandtable(), _hash_first(), _hash_getbucketbuf_from_hashkey(), _hash_readprev(), and hashbulkdelete().

257 {
259 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
static char * buf
Definition: pg_test_fsync.c:65
void _hash_dropscanbuf ( Relation  rel,
HashScanOpaque  so 
)

Definition at line 268 of file hashpage.c.

References _hash_dropbuf(), BufferIsValid, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_bucket_buf, HashScanOpaqueData::hashso_curbuf, HashScanOpaqueData::hashso_split_bucket_buf, and InvalidBuffer.

Referenced by _hash_step(), hashendscan(), and hashrescan().

269 {
270  /* release pin we hold on primary bucket page */
271  if (BufferIsValid(so->hashso_bucket_buf) &&
272  so->hashso_bucket_buf != so->hashso_curbuf)
275 
276  /* release pin we hold on primary bucket page of bucket being split */
281 
282  /* release any pin we still hold */
283  if (BufferIsValid(so->hashso_curbuf))
284  _hash_dropbuf(rel, so->hashso_curbuf);
286 
287  /* reset split scan */
288  so->hashso_buc_populated = false;
289  so->hashso_buc_split = false;
290 }
#define InvalidBuffer
Definition: buf.h:25
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:256
Buffer hashso_bucket_buf
Definition: hash.h:115
bool hashso_buc_populated
Definition: hash.h:131
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
bool hashso_buc_split
Definition: hash.h:137
Buffer hashso_split_bucket_buf
Definition: hash.h:122
Buffer hashso_curbuf
Definition: hash.h:112
void _hash_expandtable ( Relation  rel,
Buffer  metabuf 
)

Definition at line 490 of file hashpage.c.

References _hash_alloc_buckets(), _hash_checkpage(), _hash_dropbuf(), _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), _hash_getnewbuf(), _hash_log2(), _hash_relbuf(), _hash_splitbucket(), Assert, BUCKET_TO_BLKNO, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, END_CRIT_SECTION, H_BUCKET_BEING_SPLIT, H_NEEDS_SPLIT_CLEANUP, hashbucketcleanup(), HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ntuples, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_spares, HashPageGetMeta, IsBufferCleanupOK(), LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), MAIN_FORKNUM, MarkBufferDirty(), NULL, PageGetSpecialPointer, and START_CRIT_SECTION.

Referenced by _hash_doinsert().

491 {
492  HashMetaPage metap;
493  Bucket old_bucket;
494  Bucket new_bucket;
495  uint32 spare_ndx;
496  BlockNumber start_oblkno;
497  BlockNumber start_nblkno;
498  Buffer buf_nblkno;
499  Buffer buf_oblkno;
500  Page opage;
501  HashPageOpaque oopaque;
502  uint32 maxbucket;
503  uint32 highmask;
504  uint32 lowmask;
505 
506 restart_expand:
507 
508  /*
509  * Write-lock the meta page. It used to be necessary to acquire a
510  * heavyweight lock to begin a split, but that is no longer required.
511  */
513 
514  _hash_checkpage(rel, metabuf, LH_META_PAGE);
515  metap = HashPageGetMeta(BufferGetPage(metabuf));
516 
517  /*
518  * Check to see if split is still needed; someone else might have already
519  * done one while we waited for the lock.
520  *
521  * Make sure this stays in sync with _hash_doinsert()
522  */
523  if (metap->hashm_ntuples <=
524  (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
525  goto fail;
526 
527  /*
528  * Can't split anymore if maxbucket has reached its maximum possible
529  * value.
530  *
531  * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
532  * the calculation maxbucket+1 mustn't overflow). Currently we restrict
533  * to half that because of overflow looping in _hash_log2() and
534  * insufficient space in hashm_spares[]. It's moot anyway because an
535  * index with 2^32 buckets would certainly overflow BlockNumber and hence
536  * _hash_alloc_buckets() would fail, but if we supported buckets smaller
537  * than a disk block then this would be an independent constraint.
538  *
539  * If you change this, see also the maximum initial number of buckets in
540  * _hash_metapinit().
541  */
542  if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
543  goto fail;
544 
545  /*
546  * Determine which bucket is to be split, and attempt to take cleanup lock
547  * on the old bucket. If we can't get the lock, give up.
548  *
549  * The cleanup lock protects us not only against other backends, but
550  * against our own backend as well.
551  *
552  * The cleanup lock is mainly to protect the split from concurrent
553  * inserts. See src/backend/access/hash/README, Lock Definitions for
554  * further details. Due to this locking restriction, if there is any
555  * pending scan, the split will give up which is not good, but harmless.
556  */
557  new_bucket = metap->hashm_maxbucket + 1;
558 
559  old_bucket = (new_bucket & metap->hashm_lowmask);
560 
561  start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
562 
563  buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
564  if (!buf_oblkno)
565  goto fail;
566 
567  opage = BufferGetPage(buf_oblkno);
568  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
569 
570  /*
571  * We want to finish the split from a bucket as there is no apparent
572  * benefit by not doing so and it will make the code complicated to finish
573  * the split that involves multiple buckets considering the case where new
574  * split also fails. We don't need to consider the new bucket for
575  * completing the split here as it is not possible that a re-split of new
576  * bucket starts when there is still a pending split from old bucket.
577  */
578  if (H_BUCKET_BEING_SPLIT(oopaque))
579  {
580  /*
581  * Copy bucket mapping info now; refer the comment in code below where
582  * we copy this information before calling _hash_splitbucket to see
583  * why this is okay.
584  */
585  maxbucket = metap->hashm_maxbucket;
586  highmask = metap->hashm_highmask;
587  lowmask = metap->hashm_lowmask;
588 
589  /*
590  * Release the lock on metapage and old_bucket, before completing the
591  * split.
592  */
593  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
594  LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
595 
596  _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
597  highmask, lowmask);
598 
599  /* release the pin on old buffer and retry for expand. */
600  _hash_dropbuf(rel, buf_oblkno);
601 
602  goto restart_expand;
603  }
604 
605  /*
606  * Clean the tuples remained from the previous split. This operation
607  * requires cleanup lock and we already have one on the old bucket, so
608  * let's do it. We also don't want to allow further splits from the bucket
609  * till the garbage of previous split is cleaned. This has two
610  * advantages; first, it helps in avoiding the bloat due to garbage and
611  * second is, during cleanup of bucket, we are always sure that the
612  * garbage tuples belong to most recently split bucket. On the contrary,
613  * if we allow cleanup of bucket after meta page is updated to indicate
614  * the new split and before the actual split, the cleanup operation won't
615  * be able to decide whether the tuple has been moved to the newly created
616  * bucket and ended up deleting such tuples.
617  */
618  if (H_NEEDS_SPLIT_CLEANUP(oopaque))
619  {
620  /*
621  * Copy bucket mapping info now; refer to the comment in code below
622  * where we copy this information before calling _hash_splitbucket
623  * to see why this is okay.
624  */
625  maxbucket = metap->hashm_maxbucket;
626  highmask = metap->hashm_highmask;
627  lowmask = metap->hashm_lowmask;
628 
629  /* Release the metapage lock. */
630  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
631 
632  hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
633  maxbucket, highmask, lowmask, NULL, NULL, true,
634  NULL, NULL);
635 
636  _hash_dropbuf(rel, buf_oblkno);
637 
638  goto restart_expand;
639  }
640 
641  /*
642  * There shouldn't be any active scan on new bucket.
643  *
644  * Note: it is safe to compute the new bucket's blkno here, even though we
645  * may still need to update the BUCKET_TO_BLKNO mapping. This is because
646  * the current value of hashm_spares[hashm_ovflpoint] correctly shows
647  * where we are going to put a new splitpoint's worth of buckets.
648  */
649  start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
650 
651  /*
652  * If the split point is increasing (hashm_maxbucket's log base 2
653  * increases), we need to allocate a new batch of bucket pages.
654  */
655  spare_ndx = _hash_log2(new_bucket + 1);
656  if (spare_ndx > metap->hashm_ovflpoint)
657  {
658  Assert(spare_ndx == metap->hashm_ovflpoint + 1);
659 
660  /*
661  * The number of buckets in the new splitpoint is equal to the total
662  * number already in existence, i.e. new_bucket. Currently this maps
663  * one-to-one to blocks required, but someday we may need a more
664  * complicated calculation here.
665  */
666  if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
667  {
668  /* can't split due to BlockNumber overflow */
669  _hash_relbuf(rel, buf_oblkno);
670  goto fail;
671  }
672  }
673 
674  /*
675  * Physically allocate the new bucket's primary page. We want to do this
676  * before changing the metapage's mapping info, in case we can't get the
677  * disk space. Ideally, we don't need to check for cleanup lock on new
678  * bucket as no other backend could find this bucket unless meta page is
679  * updated. However, it is good to be consistent with old bucket locking.
680  */
681  buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
682  if (!IsBufferCleanupOK(buf_nblkno))
683  {
684  _hash_relbuf(rel, buf_oblkno);
685  _hash_relbuf(rel, buf_nblkno);
686  goto fail;
687  }
688 
689 
690  /*
691  * Okay to proceed with split. Update the metapage bucket mapping info.
692  *
693  * Since we are scribbling on the metapage data right in the shared
694  * buffer, any failure in this next little bit leaves us with a big
695  * problem: the metapage is effectively corrupt but could get written back
696  * to disk. We don't really expect any failure, but just to be sure,
697  * establish a critical section.
698  */
700 
701  metap->hashm_maxbucket = new_bucket;
702 
703  if (new_bucket > metap->hashm_highmask)
704  {
705  /* Starting a new doubling */
706  metap->hashm_lowmask = metap->hashm_highmask;
707  metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
708  }
709 
710  /*
711  * If the split point is increasing (hashm_maxbucket's log base 2
712  * increases), we need to adjust the hashm_spares[] array and
713  * hashm_ovflpoint so that future overflow pages will be created beyond
714  * this new batch of bucket pages.
715  */
716  if (spare_ndx > metap->hashm_ovflpoint)
717  {
718  metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
719  metap->hashm_ovflpoint = spare_ndx;
720  }
721 
722  /* Done mucking with metapage */
724 
725  /*
726  * Copy bucket mapping info now; this saves re-accessing the meta page
727  * inside _hash_splitbucket's inner loop. Note that once we drop the
728  * split lock, other splits could begin, so these values might be out of
729  * date before _hash_splitbucket finishes. That's okay, since all it
730  * needs is to tell which of these two buckets to map hashkeys into.
731  */
732  maxbucket = metap->hashm_maxbucket;
733  highmask = metap->hashm_highmask;
734  lowmask = metap->hashm_lowmask;
735 
736  /* Write out the metapage and drop lock, but keep pin */
737  MarkBufferDirty(metabuf);
738  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
739 
740  /* Relocate records to the new bucket */
741  _hash_splitbucket(rel, metabuf,
742  old_bucket, new_bucket,
743  buf_oblkno, buf_nblkno,
744  maxbucket, highmask, lowmask);
745 
746  return;
747 
748  /* Here if decide not to split or fail to acquire old bucket lock */
749 fail:
750 
751  /* We didn't write the metapage, so just drop lock */
752  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
753 }
static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:830
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define LH_META_PAGE
Definition: hash.h:56
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
uint16 hashm_ffactor
Definition: hash.h:179
uint32 hashm_highmask
Definition: hash.h:185
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:711
uint32 BlockNumber
Definition: block.h:31
Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
Definition: hashpage.c:177
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:256
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
uint32 hashm_lowmask
Definition: hash.h:186
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
Definition: hashpage.c:781
uint32 Bucket
Definition: hash.h:34
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:86
void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1095
unsigned int uint32
Definition: c.h:265
Buffer _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
Definition: hashpage.c:105
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:3757
uint32 hashm_ovflpoint
Definition: hash.h:187
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:158
double hashm_ntuples
Definition: hash.h:178
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition: hash.h:192
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:245
#define LH_BUCKET_PAGE
Definition: hash.h:54
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:87
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:671
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:84
uint32 hashm_maxbucket
Definition: hash.h:184
#define HashPageGetMeta(page)
Definition: hash.h:238
uint32 _hash_log2(uint32 num)
Definition: hashutil.c:140
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
void _hash_finish_split ( Relation  rel,
Buffer  metabuf,
Buffer  obuf,
Bucket  obucket,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask 
)

Definition at line 1095 of file hashpage.c.

References _hash_get_newblock_from_oldbucket(), _hash_getbuf(), _hash_relbuf(), _hash_splitbucket_guts(), Assert, BlockNumberIsValid, BUFFER_LOCK_UNLOCK, BufferGetPage, ConditionalLockBufferForCleanup(), CurrentMemoryContext, HASHCTL::entrysize, FirstOffsetNumber, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, HASH_READ, hash_search(), HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_nextblkno, HASHCTL::hcxt, InvalidBuffer, HASHCTL::keysize, LH_BUCKET_PAGE, LH_OVERFLOW_PAGE, LockBuffer(), OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageGetSpecialPointer, and IndexTupleData::t_tid.

Referenced by _hash_doinsert(), and _hash_expandtable().

1097 {
1098  HASHCTL hash_ctl;
1099  HTAB *tidhtab;
1100  Buffer bucket_nbuf = InvalidBuffer;
1101  Buffer nbuf;
1102  Page npage;
1103  BlockNumber nblkno;
1104  BlockNumber bucket_nblkno;
1105  HashPageOpaque npageopaque;
1106  Bucket nbucket;
1107  bool found;
1108 
1109  /* Initialize hash tables used to track TIDs */
1110  memset(&hash_ctl, 0, sizeof(hash_ctl));
1111  hash_ctl.keysize = sizeof(ItemPointerData);
1112  hash_ctl.entrysize = sizeof(ItemPointerData);
1113  hash_ctl.hcxt = CurrentMemoryContext;
1114 
1115  tidhtab =
1116  hash_create("bucket ctids",
1117  256, /* arbitrary initial size */
1118  &hash_ctl,
1120 
1121  bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1122 
1123  /*
1124  * Scan the new bucket and build hash table of TIDs
1125  */
1126  for (;;)
1127  {
1128  OffsetNumber noffnum;
1129  OffsetNumber nmaxoffnum;
1130 
1131  nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1133 
1134  /* remember the primary bucket buffer to acquire cleanup lock on it. */
1135  if (nblkno == bucket_nblkno)
1136  bucket_nbuf = nbuf;
1137 
1138  npage = BufferGetPage(nbuf);
1139  npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1140 
1141  /* Scan each tuple in new page */
1142  nmaxoffnum = PageGetMaxOffsetNumber(npage);
1143  for (noffnum = FirstOffsetNumber;
1144  noffnum <= nmaxoffnum;
1145  noffnum = OffsetNumberNext(noffnum))
1146  {
1147  IndexTuple itup;
1148 
1149  /* Fetch the item's TID and insert it in hash table. */
1150  itup = (IndexTuple) PageGetItem(npage,
1151  PageGetItemId(npage, noffnum));
1152 
1153  (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1154 
1155  Assert(!found);
1156  }
1157 
1158  nblkno = npageopaque->hasho_nextblkno;
1159 
1160  /*
1161  * release our write lock without modifying buffer and ensure to
1162  * retain the pin on primary bucket.
1163  */
1164  if (nbuf == bucket_nbuf)
1166  else
1167  _hash_relbuf(rel, nbuf);
1168 
1169  /* Exit loop if no more overflow pages in new bucket */
1170  if (!BlockNumberIsValid(nblkno))
1171  break;
1172  }
1173 
1174  /*
1175  * Conditionally get the cleanup lock on old and new buckets to perform
1176  * the split operation. If we don't get the cleanup locks, silently give
1177  * up and next insertion on old bucket will try again to complete the
1178  * split.
1179  */
1181  {
1182  hash_destroy(tidhtab);
1183  return;
1184  }
1185  if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1186  {
1188  hash_destroy(tidhtab);
1189  return;
1190  }
1191 
1192  npage = BufferGetPage(bucket_nbuf);
1193  npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1194  nbucket = npageopaque->hasho_bucket;
1195 
1196  _hash_splitbucket_guts(rel, metabuf, obucket,
1197  nbucket, obuf, bucket_nbuf, tidhtab,
1198  maxbucket, highmask, lowmask);
1199 
1200  _hash_relbuf(rel, bucket_nbuf);
1202  hash_destroy(tidhtab);
1203 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:793
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
MemoryContext hcxt
Definition: hsearch.h:78
ItemPointerData t_tid
Definition: itup.h:37
#define InvalidBuffer
Definition: buf.h:25
Size entrysize
Definition: hsearch.h:73
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:79
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
#define HASH_READ
Definition: hash.h:254
uint32 Bucket
Definition: hash.h:34
Definition: dynahash.c:193
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3701
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define HASH_BLOBS
Definition: hsearch.h:88
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
#define LH_OVERFLOW_PAGE
Definition: hash.h:53
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
Size keysize
Definition: hsearch.h:72
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:245
BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket)
Definition: hashutil.c:402
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
static void _hash_splitbucket_guts(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:892
#define LH_BUCKET_PAGE
Definition: hash.h:54
#define Assert(condition)
Definition: c.h:671
Bucket hasho_bucket
Definition: hash.h:79
struct ItemPointerData ItemPointerData
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:84
BlockNumber hasho_nextblkno
Definition: hash.h:78
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
Buffer _hash_getbucketbuf_from_hashkey ( Relation  rel,
uint32  hashkey,
int  access,
HashMetaPage cachedmetap 
)

Definition at line 1274 of file hashpage.c.

References _hash_dropbuf(), _hash_getbuf(), _hash_getcachedmetap(), _hash_hashkey2bucket(), _hash_relbuf(), Assert, BUCKET_TO_BLKNO, buf, BufferGetPage, BufferIsValid, HASH_READ, HASH_WRITE, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_prevblkno, InvalidBlockNumber, InvalidBuffer, LH_BUCKET_PAGE, NULL, and PageGetSpecialPointer.

Referenced by _hash_doinsert(), and _hash_first().

1276 {
1277  HashMetaPage metap;
1278  Buffer buf;
1279  Buffer metabuf = InvalidBuffer;
1280  Page page;
1281  Bucket bucket;
1282  BlockNumber blkno;
1283  HashPageOpaque opaque;
1284 
1285  /* We read from target bucket buffer, hence locking is must. */
1286  Assert(access == HASH_READ || access == HASH_WRITE);
1287 
1288  metap = _hash_getcachedmetap(rel, &metabuf, false);
1289  Assert(metap != NULL);
1290 
1291  /*
1292  * Loop until we get a lock on the correct target bucket.
1293  */
1294  for (;;)
1295  {
1296  /*
1297  * Compute the target bucket number, and convert to block number.
1298  */
1299  bucket = _hash_hashkey2bucket(hashkey,
1300  metap->hashm_maxbucket,
1301  metap->hashm_highmask,
1302  metap->hashm_lowmask);
1303 
1304  blkno = BUCKET_TO_BLKNO(metap, bucket);
1305 
1306  /* Fetch the primary bucket page for the bucket */
1307  buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1308  page = BufferGetPage(buf);
1309  opaque = (HashPageOpaque) PageGetSpecialPointer(page);
1310  Assert(opaque->hasho_bucket == bucket);
1311 
1312  /*
1313  * If this bucket hasn't been split, we're done.
1314  *
1315  * NB: The check for InvalidBlockNumber is only needed for on-disk
1316  * compatibility with indexes created before we started storing
1317  * hashm_maxbucket in the primary page's hasho_prevblkno.
1318  */
1319  if (opaque->hasho_prevblkno == InvalidBlockNumber ||
1320  opaque->hasho_prevblkno <= metap->hashm_maxbucket)
1321  break;
1322 
1323  /* Drop lock on this buffer, update cached metapage, and retry. */
1324  _hash_relbuf(rel, buf);
1325  metap = _hash_getcachedmetap(rel, &metabuf, true);
1326  Assert(metap != NULL);
1327  }
1328 
1329  if (BufferIsValid(metabuf))
1330  _hash_dropbuf(rel, metabuf);
1331 
1332  if (cachedmetap)
1333  *cachedmetap = metap;
1334 
1335  return buf;
1336 }
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:124
uint32 hashm_highmask
Definition: hash.h:185
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:256
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:79
uint32 hashm_lowmask
Definition: hash.h:186
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
#define HASH_READ
Definition: hash.h:254
uint32 Bucket
Definition: hash.h:34
BlockNumber hasho_prevblkno
Definition: hash.h:77
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_WRITE
Definition: hash.h:255
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1216
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:245
#define LH_BUCKET_PAGE
Definition: hash.h:54
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:671
Bucket hasho_bucket
Definition: hash.h:79
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:84
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
uint32 hashm_maxbucket
Definition: hash.h:184
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
Buffer _hash_getbuf ( Relation  rel,
BlockNumber  blkno,
int  access,
int  flags 
)

Definition at line 79 of file hashpage.c.

References _hash_checkpage(), buf, elog, ERROR, HASH_NOLOCK, LockBuffer(), P_NEW, and ReadBuffer().

Referenced by _hash_addovflpage(), _hash_doinsert(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getovflpage(), _hash_readnext(), _hash_readprev(), _hash_splitbucket_guts(), hash_bitmap_info(), hashbulkdelete(), and pgstathashindex().

80 {
81  Buffer buf;
82 
83  if (blkno == P_NEW)
84  elog(ERROR, "hash AM does not use P_NEW");
85 
86  buf = ReadBuffer(rel, blkno);
87 
88  if (access != HASH_NOLOCK)
89  LockBuffer(buf, access);
90 
91  /* ref count and lock type are correct */
92 
93  _hash_checkpage(rel, buf, flags);
94 
95  return buf;
96 }
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_NOLOCK
Definition: hash.h:256
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:158
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
Buffer _hash_getbuf_with_condlock_cleanup ( Relation  rel,
BlockNumber  blkno,
int  flags 
)

Definition at line 105 of file hashpage.c.

References _hash_checkpage(), buf, ConditionalLockBufferForCleanup(), elog, ERROR, InvalidBuffer, P_NEW, ReadBuffer(), and ReleaseBuffer().

Referenced by _hash_expandtable().

106 {
107  Buffer buf;
108 
109  if (blkno == P_NEW)
110  elog(ERROR, "hash AM does not use P_NEW");
111 
112  buf = ReadBuffer(rel, blkno);
113 
115  {
116  ReleaseBuffer(buf);
117  return InvalidBuffer;
118  }
119 
120  /* ref count and lock type are correct */
121 
122  _hash_checkpage(rel, buf, flags);
123 
124  return buf;
125 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
#define P_NEW
Definition: bufmgr.h:82
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3701
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:158
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
Buffer _hash_getbuf_with_strategy ( Relation  rel,
BlockNumber  blkno,
int  access,
int  flags,
BufferAccessStrategy  bstrategy 
)

Definition at line 218 of file hashpage.c.

References _hash_checkpage(), buf, elog, ERROR, HASH_NOLOCK, LockBuffer(), MAIN_FORKNUM, P_NEW, RBM_NORMAL, and ReadBufferExtended().

Referenced by _hash_freeovflpage(), _hash_squeezebucket(), hashbucketcleanup(), and pgstat_hash_page().

221 {
222  Buffer buf;
223 
224  if (blkno == P_NEW)
225  elog(ERROR, "hash AM does not use P_NEW");
226 
227  buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
228 
229  if (access != HASH_NOLOCK)
230  LockBuffer(buf, access);
231 
232  /* ref count and lock type are correct */
233 
234  _hash_checkpage(rel, buf, flags);
235 
236  return buf;
237 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_NOLOCK
Definition: hash.h:256
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:158
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
HashMetaPage _hash_getcachedmetap ( Relation  rel,
Buffer metabuf,
bool  force_refresh 
)

Definition at line 1216 of file hashpage.c.

References _hash_getbuf(), Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HASH_METAPAGE, HASH_READ, HashPageGetMeta, LH_META_PAGE, LockBuffer(), MemoryContextAlloc(), NULL, RelationData::rd_amcache, and RelationData::rd_indexcxt.

Referenced by _hash_getbucketbuf_from_hashkey(), and hashbulkdelete().

1217 {
1218  Page page;
1219 
1220  Assert(metabuf);
1221  if (force_refresh || rel->rd_amcache == NULL)
1222  {
1223  char *cache = NULL;
1224 
1225  /*
1226  * It's important that we don't set rd_amcache to an invalid
1227  * value. Either MemoryContextAlloc or _hash_getbuf could fail,
1228  * so don't install a pointer to the newly-allocated storage in the
1229  * actual relcache entry until both have succeeeded.
1230  */
1231  if (rel->rd_amcache == NULL)
1232  cache = MemoryContextAlloc(rel->rd_indexcxt,
1233  sizeof(HashMetaPageData));
1234 
1235  /* Read the metapage. */
1236  if (BufferIsValid(*metabuf))
1237  LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
1238  else
1239  *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
1240  LH_META_PAGE);
1241  page = BufferGetPage(*metabuf);
1242 
1243  /* Populate the cache. */
1244  if (rel->rd_amcache == NULL)
1245  rel->rd_amcache = cache;
1246  memcpy(rel->rd_amcache, HashPageGetMeta(page),
1247  sizeof(HashMetaPageData));
1248 
1249  /* Release metapage lock, but keep the pin. */
1250  LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
1251  }
1252 
1253  return (HashMetaPage) rel->rd_amcache;
1254 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define LH_META_PAGE
Definition: hash.h:56
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:79
#define HASH_READ
Definition: hash.h:254
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define HASH_METAPAGE
Definition: hash.h:146
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:671
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define HashPageGetMeta(page)
Definition: hash.h:238
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:749
MemoryContext rd_indexcxt
Definition: rel.h:175
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
void * rd_amcache
Definition: rel.h:188
Pointer Page
Definition: bufpage.h:74
Buffer _hash_getinitbuf ( Relation  rel,
BlockNumber  blkno 
)

Definition at line 144 of file hashpage.c.

References _hash_pageinit(), buf, BufferGetPage, BufferGetPageSize, elog, ERROR, MAIN_FORKNUM, NULL, P_NEW, RBM_ZERO_AND_LOCK, and ReadBufferExtended().

Referenced by _hash_getovflpage().

145 {
146  Buffer buf;
147 
148  if (blkno == P_NEW)
149  elog(ERROR, "hash AM does not use P_NEW");
150 
152  NULL);
153 
154  /* ref count and lock type are correct */
155 
156  /* initialize the page */
158 
159  return buf;
160 }
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:471
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
#define NULL
Definition: c.h:226
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
Buffer _hash_getnewbuf ( Relation  rel,
BlockNumber  blkno,
ForkNumber  forkNum 
)

Definition at line 177 of file hashpage.c.

References _hash_pageinit(), buf, BufferGetBlockNumber(), BufferGetPage, BufferGetPageSize, elog, ERROR, HASH_WRITE, LockBuffer(), NULL, P_NEW, RBM_NORMAL, RBM_ZERO_AND_LOCK, ReadBufferExtended(), RelationGetNumberOfBlocksInFork(), and RelationGetRelationName.

Referenced by _hash_expandtable(), _hash_getovflpage(), _hash_initbitmap(), and _hash_metapinit().

178 {
179  BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
180  Buffer buf;
181 
182  if (blkno == P_NEW)
183  elog(ERROR, "hash AM does not use P_NEW");
184  if (blkno > nblocks)
185  elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
187 
188  /* smgr insists we use P_NEW to extend the relation */
189  if (blkno == nblocks)
190  {
191  buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
192  if (BufferGetBlockNumber(buf) != blkno)
193  elog(ERROR, "unexpected hash relation size: %u, should be %u",
194  BufferGetBlockNumber(buf), blkno);
195  LockBuffer(buf, HASH_WRITE);
196  }
197  else
198  {
199  buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
200  NULL);
201  }
202 
203  /* ref count and lock type are correct */
204 
205  /* initialize the page */
207 
208  return buf;
209 }
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:471
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
uint32 BlockNumber
Definition: block.h:31
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_WRITE
Definition: hash.h:255
#define RelationGetRelationName(relation)
Definition: rel.h:433
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2771
#define NULL
Definition: c.h:226
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2588
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
uint32 _hash_metapinit ( Relation  rel,
double  num_tuples,
ForkNumber  forkNum 
)

Definition at line 306 of file hashpage.c.

References _hash_getnewbuf(), _hash_initbitmap(), _hash_log2(), _hash_relbuf(), Assert, BMPG_MASK, BMPG_SHIFT, BUCKET_TO_BLKNO, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BYTE_TO_BIT, CHECK_FOR_INTERRUPTS, elog, ERROR, HASH_DEFAULT_FILLFACTOR, HASH_MAGIC, HASH_MAX_SPLITPOINTS, HASH_METAPAGE, HASH_VERSION, HashGetMaxBitmapSize, HashMetaPageData::hashm_bmshift, HashMetaPageData::hashm_bmsize, HashMetaPageData::hashm_bsize, HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_firstfree, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_magic, HashMetaPageData::hashm_mapp, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_nmaps, HashMetaPageData::hashm_ntuples, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_procid, HashMetaPageData::hashm_spares, HashMetaPageData::hashm_version, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, HASHPROC, i, index_getprocid(), InvalidBlockNumber, LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), MarkBufferDirty(), MAXALIGN, MemSet, PageGetSpecialPointer, RelationGetNumberOfBlocksInFork(), RelationGetRelationName, and RelationGetTargetPageUsage.

Referenced by hashbuild(), and hashbuildempty().

307 {
308  HashMetaPage metap;
309  HashPageOpaque pageopaque;
310  Buffer metabuf;
311  Buffer buf;
312  Page pg;
313  int32 data_width;
314  int32 item_width;
315  int32 ffactor;
316  double dnumbuckets;
317  uint32 num_buckets;
318  uint32 log2_num_buckets;
319  uint32 i;
320 
321  /* safety check */
322  if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
323  elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
325 
326  /*
327  * Determine the target fill factor (in tuples per bucket) for this index.
328  * The idea is to make the fill factor correspond to pages about as full
329  * as the user-settable fillfactor parameter says. We can compute it
330  * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
331  */
332  data_width = sizeof(uint32);
333  item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
334  sizeof(ItemIdData); /* include the line pointer */
335  ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
336  /* keep to a sane range */
337  if (ffactor < 10)
338  ffactor = 10;
339 
340  /*
341  * Choose the number of initial bucket pages to match the fill factor
342  * given the estimated number of tuples. We round up the result to the
343  * next power of 2, however, and always force at least 2 bucket pages. The
344  * upper limit is determined by considerations explained in
345  * _hash_expandtable().
346  */
347  dnumbuckets = num_tuples / ffactor;
348  if (dnumbuckets <= 2.0)
349  num_buckets = 2;
350  else if (dnumbuckets >= (double) 0x40000000)
351  num_buckets = 0x40000000;
352  else
353  num_buckets = ((uint32) 1) << _hash_log2((uint32) dnumbuckets);
354 
355  log2_num_buckets = _hash_log2(num_buckets);
356  Assert(num_buckets == (((uint32) 1) << log2_num_buckets));
357  Assert(log2_num_buckets < HASH_MAX_SPLITPOINTS);
358 
359  /*
360  * We initialize the metapage, the first N bucket pages, and the first
361  * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
362  * calls to occur. This ensures that the smgr level has the right idea of
363  * the physical index length.
364  */
365  metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
366  pg = BufferGetPage(metabuf);
367 
368  pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
369  pageopaque->hasho_prevblkno = InvalidBlockNumber;
370  pageopaque->hasho_nextblkno = InvalidBlockNumber;
371  pageopaque->hasho_bucket = -1;
372  pageopaque->hasho_flag = LH_META_PAGE;
373  pageopaque->hasho_page_id = HASHO_PAGE_ID;
374 
375  metap = HashPageGetMeta(pg);
376 
377  metap->hashm_magic = HASH_MAGIC;
378  metap->hashm_version = HASH_VERSION;
379  metap->hashm_ntuples = 0;
380  metap->hashm_nmaps = 0;
381  metap->hashm_ffactor = ffactor;
382  metap->hashm_bsize = HashGetMaxBitmapSize(pg);
383  /* find largest bitmap array size that will fit in page size */
384  for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
385  {
386  if ((1 << i) <= metap->hashm_bsize)
387  break;
388  }
389  Assert(i > 0);
390  metap->hashm_bmsize = 1 << i;
391  metap->hashm_bmshift = i + BYTE_TO_BIT;
392  Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
393 
394  /*
395  * Label the index with its primary hash support function's OID. This is
396  * pretty useless for normal operation (in fact, hashm_procid is not used
397  * anywhere), but it might be handy for forensic purposes so we keep it.
398  */
399  metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
400 
401  /*
402  * We initialize the index with N buckets, 0 .. N-1, occupying physical
403  * blocks 1 to N. The first freespace bitmap page is in block N+1. Since
404  * N is a power of 2, we can set the masks this way:
405  */
406  metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1;
407  metap->hashm_highmask = (num_buckets << 1) - 1;
408 
409  MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
410  MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
411 
412  /* Set up mapping for one spare page after the initial splitpoints */
413  metap->hashm_spares[log2_num_buckets] = 1;
414  metap->hashm_ovflpoint = log2_num_buckets;
415  metap->hashm_firstfree = 0;
416 
417  /*
418  * Release buffer lock on the metapage while we initialize buckets.
419  * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
420  * won't accomplish anything. It's a bad idea to hold buffer locks for
421  * long intervals in any case, since that can block the bgwriter.
422  */
423  MarkBufferDirty(metabuf);
424  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
425 
426  /*
427  * Initialize the first N buckets
428  */
429  for (i = 0; i < num_buckets; i++)
430  {
431  /* Allow interrupts, in case N is huge */
433 
434  buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), forkNum);
435  pg = BufferGetPage(buf);
436  pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
437 
438  /*
439  * Set hasho_prevblkno with current hashm_maxbucket. This value will
440  * be used to validate cached HashMetaPageData. See
441  * _hash_getbucketbuf_from_hashkey().
442  */
443  pageopaque->hasho_prevblkno = metap->hashm_maxbucket;
444  pageopaque->hasho_nextblkno = InvalidBlockNumber;
445  pageopaque->hasho_bucket = i;
446  pageopaque->hasho_flag = LH_BUCKET_PAGE;
447  pageopaque->hasho_page_id = HASHO_PAGE_ID;
448  MarkBufferDirty(buf);
449  _hash_relbuf(rel, buf);
450  }
451 
452  /* Now reacquire buffer lock on metapage */
454 
455  /*
456  * Initialize first bitmap page
457  */
458  _hash_initbitmap(rel, metap, num_buckets + 1, forkNum);
459 
460  /* all done */
461  MarkBufferDirty(metabuf);
462  _hash_relbuf(rel, metabuf);
463 
464  return num_buckets;
465 }
#define HASH_DEFAULT_FILLFACTOR
Definition: hash.h:211
uint16 hashm_bmshift
Definition: hash.h:183
uint16 hasho_page_id
Definition: hash.h:81
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
RegProcedure hashm_procid
Definition: hash.h:191
#define LH_META_PAGE
Definition: hash.h:56
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
#define BYTE_TO_BIT
Definition: hash.h:216
uint32 hashm_magic
Definition: hash.h:176
uint16 hashm_ffactor
Definition: hash.h:179
uint32 hashm_highmask
Definition: hash.h:185
#define MemSet(start, val, len)
Definition: c.h:853
Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
Definition: hashpage.c:177
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define HASH_VERSION
Definition: hash.h:149
uint32 hashm_lowmask
Definition: hash.h:186
signed int int32
Definition: c.h:253
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
#define HASH_MAX_SPLITPOINTS
Definition: hash.h:171
#define HASH_MAGIC
Definition: hash.h:148
#define HashGetMaxBitmapSize(page)
Definition: hash.h:234
BlockNumber hasho_prevblkno
Definition: hash.h:77
#define ERROR
Definition: elog.h:43
uint32 hashm_version
Definition: hash.h:177
uint32 hashm_nmaps
Definition: hash.h:190
static char * buf
Definition: pg_test_fsync.c:65
#define BMPG_MASK(metap)
Definition: hash.h:229
#define RelationGetRelationName(relation)
Definition: rel.h:433
unsigned int uint32
Definition: c.h:265
struct ItemIdData ItemIdData
#define BMPG_SHIFT(metap)
Definition: hash.h:228
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
uint32 hashm_ovflpoint
Definition: hash.h:187
uint16 hashm_bsize
Definition: hash.h:180
#define HASH_METAPAGE
Definition: hash.h:146
double hashm_ntuples
Definition: hash.h:178
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
uint32 hashm_firstfree
Definition: hash.h:189
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition: hash.h:192
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:245
#define LH_BUCKET_PAGE
Definition: hash.h:54
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2771
#define RelationGetTargetPageUsage(relation, defaultff)
Definition: rel.h:297
#define Assert(condition)
Definition: c.h:671
Bucket hasho_bucket
Definition: hash.h:79
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:84
#define HASHO_PAGE_ID
Definition: hash.h:96
#define MAXALIGN(LEN)
Definition: c.h:584
uint32 hashm_maxbucket
Definition: hash.h:184
uint16 hasho_flag
Definition: hash.h:80
#define HashPageGetMeta(page)
Definition: hash.h:238
uint32 _hash_log2(uint32 num)
Definition: hashutil.c:140
#define HASHPROC
Definition: hash.h:269
int i
BlockNumber hasho_nextblkno
Definition: hash.h:78
uint16 hashm_bmsize
Definition: hash.h:181
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
#define elog
Definition: elog.h:219
void _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno, ForkNumber forkNum)
Definition: hashovfl.c:546
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]
Definition: hash.h:194
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:821
void _hash_pageinit ( Page  page,
Size  size 
)

Definition at line 471 of file hashpage.c.

References Assert, PageInit(), and PageIsNew.

Referenced by _hash_getinitbuf(), and _hash_getnewbuf().

472 {
473  Assert(PageIsNew(page));
474  PageInit(page, size, sizeof(HashPageOpaqueData));
475 }
#define Assert(condition)
Definition: c.h:671
#define PageIsNew(page)
Definition: bufpage.h:226
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:41
static void _hash_splitbucket ( Relation  rel,
Buffer  metabuf,
Bucket  obucket,
Bucket  nbucket,
Buffer  obuf,
Buffer  nbuf,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask 
)
static

Definition at line 830 of file hashpage.c.

References _hash_relbuf(), _hash_splitbucket_guts(), BufferGetPage, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, InvalidBlockNumber, LH_BUCKET_BEING_POPULATED, LH_BUCKET_BEING_SPLIT, LH_BUCKET_PAGE, NULL, and PageGetSpecialPointer.

Referenced by _hash_expandtable().

839 {
840  Page opage;
841  Page npage;
842  HashPageOpaque oopaque;
843  HashPageOpaque nopaque;
844 
845  opage = BufferGetPage(obuf);
846  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
847 
848  /*
849  * Mark the old bucket to indicate that split is in progress. (At
850  * operation end, we will clear the split-in-progress flag.) Also,
851  * for a primary bucket page, hasho_prevblkno stores the number of
852  * buckets that existed as of the last split, so we must update that
853  * value here.
854  */
855  oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
856  oopaque->hasho_prevblkno = maxbucket;
857 
858  npage = BufferGetPage(nbuf);
859 
860  /*
861  * initialize the new bucket's primary page and mark it to indicate that
862  * split is in progress.
863  */
864  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
865  nopaque->hasho_prevblkno = maxbucket;
867  nopaque->hasho_bucket = nbucket;
869  nopaque->hasho_page_id = HASHO_PAGE_ID;
870 
871  _hash_splitbucket_guts(rel, metabuf, obucket,
872  nbucket, obuf, nbuf, NULL,
873  maxbucket, highmask, lowmask);
874 
875  /* all done, now release the locks and pins on primary buckets. */
876  _hash_relbuf(rel, obuf);
877  _hash_relbuf(rel, nbuf);
878 }
uint16 hasho_page_id
Definition: hash.h:81
BlockNumber hasho_prevblkno
Definition: hash.h:77
#define LH_BUCKET_BEING_SPLIT
Definition: hash.h:58
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define LH_BUCKET_BEING_POPULATED
Definition: hash.h:57
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:245
static void _hash_splitbucket_guts(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:892
#define LH_BUCKET_PAGE
Definition: hash.h:54
#define NULL
Definition: c.h:226
Bucket hasho_bucket
Definition: hash.h:79
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:84
#define HASHO_PAGE_ID
Definition: hash.h:96
uint16 hasho_flag
Definition: hash.h:80
BlockNumber hasho_nextblkno
Definition: hash.h:78
Pointer Page
Definition: bufpage.h:74
static void _hash_splitbucket_guts ( Relation  rel,
Buffer  metabuf,
Bucket  obucket,
Bucket  nbucket,
Buffer  obuf,
Buffer  nbuf,
HTAB htab,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask 
)
static

Definition at line 892 of file hashpage.c.

References _hash_addovflpage(), _hash_get_indextuple_hashkey(), _hash_getbuf(), _hash_hashkey2bucket(), _hash_pgaddtup(), _hash_relbuf(), Assert, BlockNumberIsValid, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, CopyIndexTuple(), FirstOffsetNumber, HASH_FIND, HASH_READ, hash_search(), HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, INDEX_MOVED_BY_SPLIT_MASK, IndexTupleDSize, ItemIdIsDead, LH_BUCKET_BEING_POPULATED, LH_BUCKET_BEING_SPLIT, LH_BUCKET_NEEDS_SPLIT_CLEANUP, LH_OVERFLOW_PAGE, LockBuffer(), MarkBufferDirty(), MAXALIGN, OffsetNumberNext, PageGetFreeSpace(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageGetSpecialPointer, pfree(), IndexTupleData::t_info, and IndexTupleData::t_tid.

Referenced by _hash_finish_split(), and _hash_splitbucket().

902 {
903  Buffer bucket_obuf;
904  Buffer bucket_nbuf;
905  Page opage;
906  Page npage;
907  HashPageOpaque oopaque;
908  HashPageOpaque nopaque;
909 
910  bucket_obuf = obuf;
911  opage = BufferGetPage(obuf);
912  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
913 
914  bucket_nbuf = nbuf;
915  npage = BufferGetPage(nbuf);
916  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
917 
918  /*
919  * Partition the tuples in the old bucket between the old bucket and the
920  * new bucket, advancing along the old bucket's overflow bucket chain and
921  * adding overflow pages to the new bucket as needed. Outer loop iterates
922  * once per page in old bucket.
923  */
924  for (;;)
925  {
926  BlockNumber oblkno;
927  OffsetNumber ooffnum;
928  OffsetNumber omaxoffnum;
929 
930  /* Scan each tuple in old page */
931  omaxoffnum = PageGetMaxOffsetNumber(opage);
932  for (ooffnum = FirstOffsetNumber;
933  ooffnum <= omaxoffnum;
934  ooffnum = OffsetNumberNext(ooffnum))
935  {
936  IndexTuple itup;
937  Size itemsz;
938  Bucket bucket;
939  bool found = false;
940 
941  /* skip dead tuples */
942  if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
943  continue;
944 
945  /*
946  * Before inserting a tuple, probe the hash table containing TIDs
947  * of tuples belonging to new bucket, if we find a match, then
948  * skip that tuple, else fetch the item's hash key (conveniently
949  * stored in the item) and determine which bucket it now belongs
950  * in.
951  */
952  itup = (IndexTuple) PageGetItem(opage,
953  PageGetItemId(opage, ooffnum));
954 
955  if (htab)
956  (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
957 
958  if (found)
959  continue;
960 
962  maxbucket, highmask, lowmask);
963 
964  if (bucket == nbucket)
965  {
966  IndexTuple new_itup;
967 
968  /*
969  * make a copy of index tuple as we have to scribble on it.
970  */
971  new_itup = CopyIndexTuple(itup);
972 
973  /*
974  * mark the index tuple as moved by split, such tuples are
975  * skipped by scan if there is split in progress for a bucket.
976  */
977  new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
978 
979  /*
980  * insert the tuple into the new bucket. if it doesn't fit on
981  * the current page in the new bucket, we must allocate a new
982  * overflow page and place the tuple on that page instead.
983  */
984  itemsz = IndexTupleDSize(*new_itup);
985  itemsz = MAXALIGN(itemsz);
986 
987  if (PageGetFreeSpace(npage) < itemsz)
988  {
989  /* write out nbuf and drop lock, but keep pin */
990  MarkBufferDirty(nbuf);
992  /* chain to a new overflow page */
993  nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
994  npage = BufferGetPage(nbuf);
995  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
996  }
997 
998  /*
999  * Insert tuple on new page, using _hash_pgaddtup to ensure
1000  * correct ordering by hashkey. This is a tad inefficient
1001  * since we may have to shuffle itempointers repeatedly.
1002  * Possible future improvement: accumulate all the items for
1003  * the new page and qsort them before insertion.
1004  */
1005  (void) _hash_pgaddtup(rel, nbuf, itemsz, new_itup);
1006 
1007  /* be tidy */
1008  pfree(new_itup);
1009  }
1010  else
1011  {
1012  /*
1013  * the tuple stays on this page, so nothing to do.
1014  */
1015  Assert(bucket == obucket);
1016  }
1017  }
1018 
1019  oblkno = oopaque->hasho_nextblkno;
1020 
1021  /* retain the pin on the old primary bucket */
1022  if (obuf == bucket_obuf)
1024  else
1025  _hash_relbuf(rel, obuf);
1026 
1027  /* Exit loop if no more overflow pages in old bucket */
1028  if (!BlockNumberIsValid(oblkno))
1029  break;
1030 
1031  /* Else, advance to next old page */
1032  obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
1033  opage = BufferGetPage(obuf);
1034  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1035  }
1036 
1037  /*
1038  * We're at the end of the old bucket chain, so we're done partitioning
1039  * the tuples. Mark the old and new buckets to indicate split is
1040  * finished.
1041  *
1042  * To avoid deadlocks due to locking order of buckets, first lock the old
1043  * bucket and then the new bucket.
1044  */
1045  if (nbuf == bucket_nbuf)
1046  {
1047  MarkBufferDirty(bucket_nbuf);
1048  LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1049  }
1050  else
1051  {
1052  MarkBufferDirty(nbuf);
1053  _hash_relbuf(rel, nbuf);
1054  }
1055 
1056  LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
1057  opage = BufferGetPage(bucket_obuf);
1058  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1059 
1060  LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
1061  npage = BufferGetPage(bucket_nbuf);
1062  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1063 
1064  oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1066 
1067  /*
1068  * After the split is finished, mark the old bucket to indicate that it
1069  * contains deletable tuples. Vacuum will clear split-cleanup flag after
1070  * deleting such tuples.
1071  */
1073 
1074  /*
1075  * now write the buffers, here we don't release the locks as caller is
1076  * responsible to release locks.
1077  */
1078  MarkBufferDirty(bucket_obuf);
1079  MarkBufferDirty(bucket_nbuf);
1080 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:124
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
ItemPointerData t_tid
Definition: itup.h:37
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:79
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:59
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
Size PageGetFreeSpace(Page page)
Definition: bufpage.c:582
uint16 OffsetNumber
Definition: off.h:24
#define HASH_READ
Definition: hash.h:254
uint32 Bucket
Definition: hash.h:34
void pfree(void *pointer)
Definition: mcxt.c:992
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:232
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:434
#define IndexTupleDSize(itup)
Definition: itup.h:71
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
#define LH_BUCKET_BEING_SPLIT
Definition: hash.h:58
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define LH_BUCKET_BEING_POPULATED
Definition: hash.h:57
#define LH_OVERFLOW_PAGE
Definition: hash.h:53
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:245
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define Assert(condition)
Definition: c.h:671
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
size_t Size
Definition: c.h:353
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:84
Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
Definition: hashovfl.c:109
#define MAXALIGN(LEN)
Definition: c.h:584
OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, Size itemsize, IndexTuple itup)
Definition: hashinsert.c:211
uint16 hasho_flag
Definition: hash.h:80
BlockNumber hasho_nextblkno
Definition: hash.h:78
unsigned short t_info
Definition: itup.h:49
#define INDEX_MOVED_BY_SPLIT_MASK
Definition: hash.h:208
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74