PostgreSQL Source Code  git master
hashpage.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "access/xloginsert.h"
#include "miscadmin.h"
#include "port/pg_bitutils.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/smgr.h"
Include dependency graph for hashpage.c:

Go to the source code of this file.

Functions

static bool _hash_alloc_buckets (Relation rel, BlockNumber firstblock, uint32 nblocks)
 
static void _hash_splitbucket (Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
 
static void log_split_page (Relation rel, Buffer buf)
 
Buffer _hash_getbuf (Relation rel, BlockNumber blkno, int access, int flags)
 
Buffer _hash_getbuf_with_condlock_cleanup (Relation rel, BlockNumber blkno, int flags)
 
Buffer _hash_getinitbuf (Relation rel, BlockNumber blkno)
 
void _hash_initbuf (Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage)
 
Buffer _hash_getnewbuf (Relation rel, BlockNumber blkno, ForkNumber forkNum)
 
Buffer _hash_getbuf_with_strategy (Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
 
void _hash_relbuf (Relation rel, Buffer buf)
 
void _hash_dropbuf (Relation rel, Buffer buf)
 
void _hash_dropscanbuf (Relation rel, HashScanOpaque so)
 
uint32 _hash_init (Relation rel, double num_tuples, ForkNumber forkNum)
 
void _hash_init_metabuffer (Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage)
 
void _hash_pageinit (Page page, Size size)
 
void _hash_expandtable (Relation rel, Buffer metabuf)
 
void _hash_finish_split (Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
 
HashMetaPage _hash_getcachedmetap (Relation rel, Buffer *metabuf, bool force_refresh)
 
Buffer _hash_getbucketbuf_from_hashkey (Relation rel, uint32 hashkey, int access, HashMetaPage *cachedmetap)
 

Function Documentation

◆ _hash_alloc_buckets()

static bool _hash_alloc_buckets ( Relation  rel,
BlockNumber  firstblock,
uint32  nblocks 
)
static

Definition at line 988 of file hashpage.c.

989 {
990  BlockNumber lastblock;
991  PGAlignedBlock zerobuf;
992  Page page;
993  HashPageOpaque ovflopaque;
994 
995  lastblock = firstblock + nblocks - 1;
996 
997  /*
998  * Check for overflow in block number calculation; if so, we cannot extend
999  * the index anymore.
1000  */
1001  if (lastblock < firstblock || lastblock == InvalidBlockNumber)
1002  return false;
1003 
1004  page = (Page) zerobuf.data;
1005 
1006  /*
1007  * Initialize the page. Just zeroing the page won't work; see
1008  * _hash_freeovflpage for similar usage. We take care to make the special
1009  * space valid for the benefit of tools such as pageinspect.
1010  */
1011  _hash_pageinit(page, BLCKSZ);
1012 
1013  ovflopaque = HashPageGetOpaque(page);
1014 
1015  ovflopaque->hasho_prevblkno = InvalidBlockNumber;
1016  ovflopaque->hasho_nextblkno = InvalidBlockNumber;
1017  ovflopaque->hasho_bucket = InvalidBucket;
1018  ovflopaque->hasho_flag = LH_UNUSED_PAGE;
1019  ovflopaque->hasho_page_id = HASHO_PAGE_ID;
1020 
1021  if (RelationNeedsWAL(rel))
1022  log_newpage(&rel->rd_node,
1023  MAIN_FORKNUM,
1024  lastblock,
1025  zerobuf.data,
1026  true);
1027 
1028  PageSetChecksumInplace(page, lastblock);
1029  smgrextend(RelationGetSmgr(rel), MAIN_FORKNUM, lastblock, zerobuf.data,
1030  false);
1031 
1032  return true;
1033 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1539
Pointer Page
Definition: bufpage.h:78
#define HashPageGetOpaque(page)
Definition: hash.h:88
#define LH_UNUSED_PAGE
Definition: hash.h:53
#define HASHO_PAGE_ID
Definition: hash.h:101
#define InvalidBucket
Definition: hash.h:37
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:596
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:556
#define RelationNeedsWAL(relation)
Definition: rel.h:613
@ MAIN_FORKNUM
Definition: relpath.h:43
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:493
BlockNumber hasho_nextblkno
Definition: hash.h:80
uint16 hasho_flag
Definition: hash.h:82
BlockNumber hasho_prevblkno
Definition: hash.h:79
uint16 hasho_page_id
Definition: hash.h:83
Bucket hasho_bucket
Definition: hash.h:81
RelFileNode rd_node
Definition: rel.h:56
char data[BLCKSZ]
Definition: c.h:1138
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:1083

References _hash_pageinit(), PGAlignedBlock::data, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetOpaque, InvalidBlockNumber, InvalidBucket, LH_UNUSED_PAGE, log_newpage(), MAIN_FORKNUM, PageSetChecksumInplace(), RelationData::rd_node, RelationGetSmgr(), RelationNeedsWAL, and smgrextend().

Referenced by _hash_expandtable().

◆ _hash_dropbuf()

void _hash_dropbuf ( Relation  rel,
Buffer  buf 
)

Definition at line 277 of file hashpage.c.

278 {
280 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3915
static char * buf
Definition: pg_test_fsync.c:67

References buf, and ReleaseBuffer().

Referenced by _hash_doinsert(), _hash_dropscanbuf(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_getbucketbuf_from_hashkey(), _hash_next(), _hash_readprev(), and hashbulkdelete().

◆ _hash_dropscanbuf()

void _hash_dropscanbuf ( Relation  rel,
HashScanOpaque  so 
)

Definition at line 289 of file hashpage.c.

290 {
291  /* release pin we hold on primary bucket page */
292  if (BufferIsValid(so->hashso_bucket_buf) &&
293  so->hashso_bucket_buf != so->currPos.buf)
296 
297  /* release pin we hold on primary bucket page of bucket being split */
302 
303  /* release any pin we still hold */
304  if (BufferIsValid(so->currPos.buf))
305  _hash_dropbuf(rel, so->currPos.buf);
306  so->currPos.buf = InvalidBuffer;
307 
308  /* reset split scan */
309  so->hashso_buc_populated = false;
310  so->hashso_buc_split = false;
311 }
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:277
bool hashso_buc_split
Definition: hash.h:180
HashScanPosData currPos
Definition: hash.h:189
bool hashso_buc_populated
Definition: hash.h:174
Buffer hashso_split_bucket_buf
Definition: hash.h:171
Buffer hashso_bucket_buf
Definition: hash.h:164
Buffer buf
Definition: hash.h:111

References _hash_dropbuf(), HashScanPosData::buf, BufferIsValid, HashScanOpaqueData::currPos, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_bucket_buf, HashScanOpaqueData::hashso_split_bucket_buf, and InvalidBuffer.

Referenced by _hash_next(), hashendscan(), and hashrescan().

◆ _hash_expandtable()

void _hash_expandtable ( Relation  rel,
Buffer  metabuf 
)

Definition at line 614 of file hashpage.c.

615 {
616  HashMetaPage metap;
617  Bucket old_bucket;
618  Bucket new_bucket;
619  uint32 spare_ndx;
620  BlockNumber start_oblkno;
621  BlockNumber start_nblkno;
622  Buffer buf_nblkno;
623  Buffer buf_oblkno;
624  Page opage;
625  Page npage;
626  HashPageOpaque oopaque;
627  HashPageOpaque nopaque;
628  uint32 maxbucket;
629  uint32 highmask;
630  uint32 lowmask;
631  bool metap_update_masks = false;
632  bool metap_update_splitpoint = false;
633 
634 restart_expand:
635 
636  /*
637  * Write-lock the meta page. It used to be necessary to acquire a
638  * heavyweight lock to begin a split, but that is no longer required.
639  */
641 
642  _hash_checkpage(rel, metabuf, LH_META_PAGE);
643  metap = HashPageGetMeta(BufferGetPage(metabuf));
644 
645  /*
646  * Check to see if split is still needed; someone else might have already
647  * done one while we waited for the lock.
648  *
649  * Make sure this stays in sync with _hash_doinsert()
650  */
651  if (metap->hashm_ntuples <=
652  (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
653  goto fail;
654 
655  /*
656  * Can't split anymore if maxbucket has reached its maximum possible
657  * value.
658  *
659  * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
660  * the calculation maxbucket+1 mustn't overflow). Currently we restrict
661  * to half that to prevent failure of pg_ceil_log2_32() and insufficient
662  * space in hashm_spares[]. It's moot anyway because an index with 2^32
663  * buckets would certainly overflow BlockNumber and hence
664  * _hash_alloc_buckets() would fail, but if we supported buckets smaller
665  * than a disk block then this would be an independent constraint.
666  *
667  * If you change this, see also the maximum initial number of buckets in
668  * _hash_init().
669  */
670  if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
671  goto fail;
672 
673  /*
674  * Determine which bucket is to be split, and attempt to take cleanup lock
675  * on the old bucket. If we can't get the lock, give up.
676  *
677  * The cleanup lock protects us not only against other backends, but
678  * against our own backend as well.
679  *
680  * The cleanup lock is mainly to protect the split from concurrent
681  * inserts. See src/backend/access/hash/README, Lock Definitions for
682  * further details. Due to this locking restriction, if there is any
683  * pending scan, the split will give up which is not good, but harmless.
684  */
685  new_bucket = metap->hashm_maxbucket + 1;
686 
687  old_bucket = (new_bucket & metap->hashm_lowmask);
688 
689  start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
690 
691  buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
692  if (!buf_oblkno)
693  goto fail;
694 
695  opage = BufferGetPage(buf_oblkno);
696  oopaque = HashPageGetOpaque(opage);
697 
698  /*
699  * We want to finish the split from a bucket as there is no apparent
700  * benefit by not doing so and it will make the code complicated to finish
701  * the split that involves multiple buckets considering the case where new
702  * split also fails. We don't need to consider the new bucket for
703  * completing the split here as it is not possible that a re-split of new
704  * bucket starts when there is still a pending split from old bucket.
705  */
706  if (H_BUCKET_BEING_SPLIT(oopaque))
707  {
708  /*
709  * Copy bucket mapping info now; refer the comment in code below where
710  * we copy this information before calling _hash_splitbucket to see
711  * why this is okay.
712  */
713  maxbucket = metap->hashm_maxbucket;
714  highmask = metap->hashm_highmask;
715  lowmask = metap->hashm_lowmask;
716 
717  /*
718  * Release the lock on metapage and old_bucket, before completing the
719  * split.
720  */
721  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
722  LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
723 
724  _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
725  highmask, lowmask);
726 
727  /* release the pin on old buffer and retry for expand. */
728  _hash_dropbuf(rel, buf_oblkno);
729 
730  goto restart_expand;
731  }
732 
733  /*
734  * Clean the tuples remained from the previous split. This operation
735  * requires cleanup lock and we already have one on the old bucket, so
736  * let's do it. We also don't want to allow further splits from the bucket
737  * till the garbage of previous split is cleaned. This has two
738  * advantages; first, it helps in avoiding the bloat due to garbage and
739  * second is, during cleanup of bucket, we are always sure that the
740  * garbage tuples belong to most recently split bucket. On the contrary,
741  * if we allow cleanup of bucket after meta page is updated to indicate
742  * the new split and before the actual split, the cleanup operation won't
743  * be able to decide whether the tuple has been moved to the newly created
744  * bucket and ended up deleting such tuples.
745  */
746  if (H_NEEDS_SPLIT_CLEANUP(oopaque))
747  {
748  /*
749  * Copy bucket mapping info now; refer to the comment in code below
750  * where we copy this information before calling _hash_splitbucket to
751  * see why this is okay.
752  */
753  maxbucket = metap->hashm_maxbucket;
754  highmask = metap->hashm_highmask;
755  lowmask = metap->hashm_lowmask;
756 
757  /* Release the metapage lock. */
758  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
759 
760  hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
761  maxbucket, highmask, lowmask, NULL, NULL, true,
762  NULL, NULL);
763 
764  _hash_dropbuf(rel, buf_oblkno);
765 
766  goto restart_expand;
767  }
768 
769  /*
770  * There shouldn't be any active scan on new bucket.
771  *
772  * Note: it is safe to compute the new bucket's blkno here, even though we
773  * may still need to update the BUCKET_TO_BLKNO mapping. This is because
774  * the current value of hashm_spares[hashm_ovflpoint] correctly shows
775  * where we are going to put a new splitpoint's worth of buckets.
776  */
777  start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
778 
779  /*
780  * If the split point is increasing we need to allocate a new batch of
781  * bucket pages.
782  */
783  spare_ndx = _hash_spareindex(new_bucket + 1);
784  if (spare_ndx > metap->hashm_ovflpoint)
785  {
786  uint32 buckets_to_add;
787 
788  Assert(spare_ndx == metap->hashm_ovflpoint + 1);
789 
790  /*
791  * We treat allocation of buckets as a separate WAL-logged action.
792  * Even if we fail after this operation, won't leak bucket pages;
793  * rather, the next split will consume this space. In any case, even
794  * without failure we don't use all the space in one split operation.
795  */
796  buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
797  if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
798  {
799  /* can't split due to BlockNumber overflow */
800  _hash_relbuf(rel, buf_oblkno);
801  goto fail;
802  }
803  }
804 
805  /*
806  * Physically allocate the new bucket's primary page. We want to do this
807  * before changing the metapage's mapping info, in case we can't get the
808  * disk space. Ideally, we don't need to check for cleanup lock on new
809  * bucket as no other backend could find this bucket unless meta page is
810  * updated. However, it is good to be consistent with old bucket locking.
811  */
812  buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
813  if (!IsBufferCleanupOK(buf_nblkno))
814  {
815  _hash_relbuf(rel, buf_oblkno);
816  _hash_relbuf(rel, buf_nblkno);
817  goto fail;
818  }
819 
820  /*
821  * Since we are scribbling on the pages in the shared buffers, establish a
822  * critical section. Any failure in this next code leaves us with a big
823  * problem: the metapage is effectively corrupt but could get written back
824  * to disk.
825  */
827 
828  /*
829  * Okay to proceed with split. Update the metapage bucket mapping info.
830  */
831  metap->hashm_maxbucket = new_bucket;
832 
833  if (new_bucket > metap->hashm_highmask)
834  {
835  /* Starting a new doubling */
836  metap->hashm_lowmask = metap->hashm_highmask;
837  metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
838  metap_update_masks = true;
839  }
840 
841  /*
842  * If the split point is increasing we need to adjust the hashm_spares[]
843  * array and hashm_ovflpoint so that future overflow pages will be created
844  * beyond this new batch of bucket pages.
845  */
846  if (spare_ndx > metap->hashm_ovflpoint)
847  {
848  metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
849  metap->hashm_ovflpoint = spare_ndx;
850  metap_update_splitpoint = true;
851  }
852 
853  MarkBufferDirty(metabuf);
854 
855  /*
856  * Copy bucket mapping info now; this saves re-accessing the meta page
857  * inside _hash_splitbucket's inner loop. Note that once we drop the
858  * split lock, other splits could begin, so these values might be out of
859  * date before _hash_splitbucket finishes. That's okay, since all it
860  * needs is to tell which of these two buckets to map hashkeys into.
861  */
862  maxbucket = metap->hashm_maxbucket;
863  highmask = metap->hashm_highmask;
864  lowmask = metap->hashm_lowmask;
865 
866  opage = BufferGetPage(buf_oblkno);
867  oopaque = HashPageGetOpaque(opage);
868 
869  /*
870  * Mark the old bucket to indicate that split is in progress. (At
871  * operation end, we will clear the split-in-progress flag.) Also, for a
872  * primary bucket page, hasho_prevblkno stores the number of buckets that
873  * existed as of the last split, so we must update that value here.
874  */
875  oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
876  oopaque->hasho_prevblkno = maxbucket;
877 
878  MarkBufferDirty(buf_oblkno);
879 
880  npage = BufferGetPage(buf_nblkno);
881 
882  /*
883  * initialize the new bucket's primary page and mark it to indicate that
884  * split is in progress.
885  */
886  nopaque = HashPageGetOpaque(npage);
887  nopaque->hasho_prevblkno = maxbucket;
889  nopaque->hasho_bucket = new_bucket;
891  nopaque->hasho_page_id = HASHO_PAGE_ID;
892 
893  MarkBufferDirty(buf_nblkno);
894 
895  /* XLOG stuff */
896  if (RelationNeedsWAL(rel))
897  {
899  XLogRecPtr recptr;
900 
901  xlrec.new_bucket = maxbucket;
902  xlrec.old_bucket_flag = oopaque->hasho_flag;
903  xlrec.new_bucket_flag = nopaque->hasho_flag;
904  xlrec.flags = 0;
905 
906  XLogBeginInsert();
907 
908  XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
909  XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
910  XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
911 
912  if (metap_update_masks)
913  {
915  XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32));
916  XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32));
917  }
918 
919  if (metap_update_splitpoint)
920  {
922  XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint,
923  sizeof(uint32));
925  (char *) &metap->hashm_spares[metap->hashm_ovflpoint],
926  sizeof(uint32));
927  }
928 
929  XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage);
930 
931  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
932 
933  PageSetLSN(BufferGetPage(buf_oblkno), recptr);
934  PageSetLSN(BufferGetPage(buf_nblkno), recptr);
935  PageSetLSN(BufferGetPage(metabuf), recptr);
936  }
937 
939 
940  /* drop lock, but keep pin */
941  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
942 
943  /* Relocate records to the new bucket */
944  _hash_splitbucket(rel, metabuf,
945  old_bucket, new_bucket,
946  buf_oblkno, buf_nblkno, NULL,
947  maxbucket, highmask, lowmask);
948 
949  /* all done, now release the pins on primary buckets. */
950  _hash_dropbuf(rel, buf_oblkno);
951  _hash_dropbuf(rel, buf_nblkno);
952 
953  return;
954 
955  /* Here if decide not to split or fail to acquire old bucket lock */
956 fail:
957 
958  /* We didn't write the metapage, so just drop lock */
959  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
960 }
int Buffer
Definition: buf.h:23
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:4446
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1573
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4156
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageSetLSN(page, lsn)
Definition: bufpage.h:367
unsigned int uint32
Definition: c.h:441
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:685
#define LH_BUCKET_PAGE
Definition: hash.h:55
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:91
#define LH_META_PAGE
Definition: hash.h:57
#define HashPageGetMeta(page)
Definition: hash.h:323
#define LH_BUCKET_BEING_POPULATED
Definition: hash.h:58
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:39
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:90
uint32 Bucket
Definition: hash.h:35
#define LH_BUCKET_BEING_SPLIT
Definition: hash.h:59
#define XLOG_HASH_SPLIT_ALLOCATE_PAGE
Definition: hash_xlog.h:31
#define SizeOfHashSplitAllocPage
Definition: hash_xlog.h:100
#define XLH_SPLIT_META_UPDATE_SPLITPOINT
Definition: hash_xlog.h:46
#define XLH_SPLIT_META_UPDATE_MASKS
Definition: hash_xlog.h:45
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:266
Buffer _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
Definition: hashpage.c:96
static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1069
void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1352
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
Definition: hashpage.c:988
Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
Definition: hashpage.c:198
uint32 _hash_spareindex(uint32 num_bucket)
Definition: hashutil.c:143
uint32 _hash_get_totalbuckets(uint32 splitpoint_phase)
Definition: hashutil.c:175
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:211
Assert(fmt[strlen(fmt) - 1] !='\n')
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
uint32 hashm_lowmask
Definition: hash.h:256
uint32 hashm_maxbucket
Definition: hash.h:254
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition: hash.h:262
double hashm_ntuples
Definition: hash.h:248
uint32 hashm_ovflpoint
Definition: hash.h:257
uint32 hashm_highmask
Definition: hash.h:255
uint16 hashm_ffactor
Definition: hash.h:249
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:443
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:389
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:351
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33

References _hash_alloc_buckets(), _hash_checkpage(), _hash_dropbuf(), _hash_finish_split(), _hash_get_totalbuckets(), _hash_getbuf_with_condlock_cleanup(), _hash_getnewbuf(), _hash_relbuf(), _hash_spareindex(), _hash_splitbucket(), Assert(), BUCKET_TO_BLKNO, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, END_CRIT_SECTION, xl_hash_split_allocate_page::flags, H_BUCKET_BEING_SPLIT, H_NEEDS_SPLIT_CLEANUP, hashbucketcleanup(), HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ntuples, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_spares, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, HashPageGetOpaque, InvalidBlockNumber, IsBufferCleanupOK(), LH_BUCKET_BEING_POPULATED, LH_BUCKET_BEING_SPLIT, LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), MAIN_FORKNUM, MarkBufferDirty(), xl_hash_split_allocate_page::new_bucket, xl_hash_split_allocate_page::new_bucket_flag, xl_hash_split_allocate_page::old_bucket_flag, PageSetLSN, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationNeedsWAL, SizeOfHashSplitAllocPage, START_CRIT_SECTION, XLH_SPLIT_META_UPDATE_MASKS, XLH_SPLIT_META_UPDATE_SPLITPOINT, XLOG_HASH_SPLIT_ALLOCATE_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_doinsert().

◆ _hash_finish_split()

void _hash_finish_split ( Relation  rel,
Buffer  metabuf,
Buffer  obuf,
Bucket  obucket,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask 
)

Definition at line 1352 of file hashpage.c.

1354 {
1355  HASHCTL hash_ctl;
1356  HTAB *tidhtab;
1357  Buffer bucket_nbuf = InvalidBuffer;
1358  Buffer nbuf;
1359  Page npage;
1360  BlockNumber nblkno;
1361  BlockNumber bucket_nblkno;
1362  HashPageOpaque npageopaque;
1363  Bucket nbucket;
1364  bool found;
1365 
1366  /* Initialize hash tables used to track TIDs */
1367  hash_ctl.keysize = sizeof(ItemPointerData);
1368  hash_ctl.entrysize = sizeof(ItemPointerData);
1369  hash_ctl.hcxt = CurrentMemoryContext;
1370 
1371  tidhtab =
1372  hash_create("bucket ctids",
1373  256, /* arbitrary initial size */
1374  &hash_ctl,
1376 
1377  bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1378 
1379  /*
1380  * Scan the new bucket and build hash table of TIDs
1381  */
1382  for (;;)
1383  {
1384  OffsetNumber noffnum;
1385  OffsetNumber nmaxoffnum;
1386 
1387  nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1389 
1390  /* remember the primary bucket buffer to acquire cleanup lock on it. */
1391  if (nblkno == bucket_nblkno)
1392  bucket_nbuf = nbuf;
1393 
1394  npage = BufferGetPage(nbuf);
1395  npageopaque = HashPageGetOpaque(npage);
1396 
1397  /* Scan each tuple in new page */
1398  nmaxoffnum = PageGetMaxOffsetNumber(npage);
1399  for (noffnum = FirstOffsetNumber;
1400  noffnum <= nmaxoffnum;
1401  noffnum = OffsetNumberNext(noffnum))
1402  {
1403  IndexTuple itup;
1404 
1405  /* Fetch the item's TID and insert it in hash table. */
1406  itup = (IndexTuple) PageGetItem(npage,
1407  PageGetItemId(npage, noffnum));
1408 
1409  (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1410 
1411  Assert(!found);
1412  }
1413 
1414  nblkno = npageopaque->hasho_nextblkno;
1415 
1416  /*
1417  * release our write lock without modifying buffer and ensure to
1418  * retain the pin on primary bucket.
1419  */
1420  if (nbuf == bucket_nbuf)
1422  else
1423  _hash_relbuf(rel, nbuf);
1424 
1425  /* Exit loop if no more overflow pages in new bucket */
1426  if (!BlockNumberIsValid(nblkno))
1427  break;
1428  }
1429 
1430  /*
1431  * Conditionally get the cleanup lock on old and new buckets to perform
1432  * the split operation. If we don't get the cleanup locks, silently give
1433  * up and next insertion on old bucket will try again to complete the
1434  * split.
1435  */
1437  {
1438  hash_destroy(tidhtab);
1439  return;
1440  }
1441  if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1442  {
1444  hash_destroy(tidhtab);
1445  return;
1446  }
1447 
1448  npage = BufferGetPage(bucket_nbuf);
1449  npageopaque = HashPageGetOpaque(npage);
1450  nbucket = npageopaque->hasho_bucket;
1451 
1452  _hash_splitbucket(rel, metabuf, obucket,
1453  nbucket, obuf, bucket_nbuf, tidhtab,
1454  maxbucket, highmask, lowmask);
1455 
1456  _hash_dropbuf(rel, bucket_nbuf);
1457  hash_destroy(tidhtab);
1458 }
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4390
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:356
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:234
#define PageGetItem(page, itemId)
Definition: bufpage.h:339
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:862
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
#define HASH_READ
Definition: hash.h:339
#define LH_OVERFLOW_PAGE
Definition: hash.h:54
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:70
BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket)
Definition: hashutil.c:462
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct ItemPointerData ItemPointerData
IndexTupleData * IndexTuple
Definition: itup.h:53
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220
ItemPointerData t_tid
Definition: itup.h:37

References _hash_dropbuf(), _hash_get_newblock_from_oldbucket(), _hash_getbuf(), _hash_relbuf(), _hash_splitbucket(), Assert(), BlockNumberIsValid, BUFFER_LOCK_UNLOCK, BufferGetPage, ConditionalLockBufferForCleanup(), CurrentMemoryContext, HASHCTL::entrysize, FirstOffsetNumber, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, HASH_READ, hash_search(), HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_nextblkno, HashPageGetOpaque, HASHCTL::hcxt, InvalidBuffer, HASHCTL::keysize, LH_BUCKET_PAGE, LH_OVERFLOW_PAGE, LockBuffer(), OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, and IndexTupleData::t_tid.

Referenced by _hash_doinsert(), and _hash_expandtable().

◆ _hash_getbucketbuf_from_hashkey()

Buffer _hash_getbucketbuf_from_hashkey ( Relation  rel,
uint32  hashkey,
int  access,
HashMetaPage cachedmetap 
)

Definition at line 1555 of file hashpage.c.

1557 {
1558  HashMetaPage metap;
1559  Buffer buf;
1560  Buffer metabuf = InvalidBuffer;
1561  Page page;
1562  Bucket bucket;
1563  BlockNumber blkno;
1564  HashPageOpaque opaque;
1565 
1566  /* We read from target bucket buffer, hence locking is must. */
1567  Assert(access == HASH_READ || access == HASH_WRITE);
1568 
1569  metap = _hash_getcachedmetap(rel, &metabuf, false);
1570  Assert(metap != NULL);
1571 
1572  /*
1573  * Loop until we get a lock on the correct target bucket.
1574  */
1575  for (;;)
1576  {
1577  /*
1578  * Compute the target bucket number, and convert to block number.
1579  */
1580  bucket = _hash_hashkey2bucket(hashkey,
1581  metap->hashm_maxbucket,
1582  metap->hashm_highmask,
1583  metap->hashm_lowmask);
1584 
1585  blkno = BUCKET_TO_BLKNO(metap, bucket);
1586 
1587  /* Fetch the primary bucket page for the bucket */
1588  buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1589  page = BufferGetPage(buf);
1590  opaque = HashPageGetOpaque(page);
1591  Assert(opaque->hasho_bucket == bucket);
1593 
1594  /*
1595  * If this bucket hasn't been split, we're done.
1596  */
1597  if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
1598  break;
1599 
1600  /* Drop lock on this buffer, update cached metapage, and retry. */
1601  _hash_relbuf(rel, buf);
1602  metap = _hash_getcachedmetap(rel, &metabuf, true);
1603  Assert(metap != NULL);
1604  }
1605 
1606  if (BufferIsValid(metabuf))
1607  _hash_dropbuf(rel, metabuf);
1608 
1609  if (cachedmetap)
1610  *cachedmetap = metap;
1611 
1612  return buf;
1613 }
#define HASH_WRITE
Definition: hash.h:340
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1497
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:126

References _hash_dropbuf(), _hash_getbuf(), _hash_getcachedmetap(), _hash_hashkey2bucket(), _hash_relbuf(), Assert(), BUCKET_TO_BLKNO, buf, BufferGetPage, BufferIsValid, HASH_READ, HASH_WRITE, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_prevblkno, HashPageGetOpaque, InvalidBlockNumber, InvalidBuffer, and LH_BUCKET_PAGE.

Referenced by _hash_doinsert(), and _hash_first().

◆ _hash_getbuf()

Buffer _hash_getbuf ( Relation  rel,
BlockNumber  blkno,
int  access,
int  flags 
)

Definition at line 70 of file hashpage.c.

71 {
72  Buffer buf;
73 
74  if (blkno == P_NEW)
75  elog(ERROR, "hash AM does not use P_NEW");
76 
77  buf = ReadBuffer(rel, blkno);
78 
79  if (access != HASH_NOLOCK)
80  LockBuffer(buf, access);
81 
82  /* ref count and lock type are correct */
83 
84  _hash_checkpage(rel, buf, flags);
85 
86  return buf;
87 }
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:702
#define P_NEW
Definition: bufmgr.h:91
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define HASH_NOLOCK
Definition: hash.h:341

References _hash_checkpage(), buf, elog, ERROR, HASH_NOLOCK, LockBuffer(), P_NEW, and ReadBuffer().

Referenced by _hash_addovflpage(), _hash_doinsert(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_kill_items(), _hash_next(), _hash_readnext(), _hash_readprev(), _hash_splitbucket(), hash_bitmap_info(), hashbulkdelete(), and pgstathashindex().

◆ _hash_getbuf_with_condlock_cleanup()

Buffer _hash_getbuf_with_condlock_cleanup ( Relation  rel,
BlockNumber  blkno,
int  flags 
)

Definition at line 96 of file hashpage.c.

97 {
98  Buffer buf;
99 
100  if (blkno == P_NEW)
101  elog(ERROR, "hash AM does not use P_NEW");
102 
103  buf = ReadBuffer(rel, blkno);
104 
106  {
108  return InvalidBuffer;
109  }
110 
111  /* ref count and lock type are correct */
112 
113  _hash_checkpage(rel, buf, flags);
114 
115  return buf;
116 }

References _hash_checkpage(), buf, ConditionalLockBufferForCleanup(), elog, ERROR, InvalidBuffer, P_NEW, ReadBuffer(), and ReleaseBuffer().

Referenced by _hash_expandtable().

◆ _hash_getbuf_with_strategy()

Buffer _hash_getbuf_with_strategy ( Relation  rel,
BlockNumber  blkno,
int  access,
int  flags,
BufferAccessStrategy  bstrategy 
)

Definition at line 239 of file hashpage.c.

242 {
243  Buffer buf;
244 
245  if (blkno == P_NEW)
246  elog(ERROR, "hash AM does not use P_NEW");
247 
248  buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
249 
250  if (access != HASH_NOLOCK)
251  LockBuffer(buf, access);
252 
253  /* ref count and lock type are correct */
254 
255  _hash_checkpage(rel, buf, flags);
256 
257  return buf;
258 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:749
@ RBM_NORMAL
Definition: bufmgr.h:39

References _hash_checkpage(), buf, elog, ERROR, HASH_NOLOCK, LockBuffer(), MAIN_FORKNUM, P_NEW, RBM_NORMAL, and ReadBufferExtended().

Referenced by _hash_freeovflpage(), _hash_squeezebucket(), hashbucketcleanup(), and pgstat_hash_page().

◆ _hash_getcachedmetap()

HashMetaPage _hash_getcachedmetap ( Relation  rel,
Buffer metabuf,
bool  force_refresh 
)

Definition at line 1497 of file hashpage.c.

1498 {
1499  Page page;
1500 
1501  Assert(metabuf);
1502  if (force_refresh || rel->rd_amcache == NULL)
1503  {
1504  char *cache = NULL;
1505 
1506  /*
1507  * It's important that we don't set rd_amcache to an invalid value.
1508  * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
1509  * install a pointer to the newly-allocated storage in the actual
1510  * relcache entry until both have succeeded.
1511  */
1512  if (rel->rd_amcache == NULL)
1513  cache = MemoryContextAlloc(rel->rd_indexcxt,
1514  sizeof(HashMetaPageData));
1515 
1516  /* Read the metapage. */
1517  if (BufferIsValid(*metabuf))
1518  LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
1519  else
1520  *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
1521  LH_META_PAGE);
1522  page = BufferGetPage(*metabuf);
1523 
1524  /* Populate the cache. */
1525  if (rel->rd_amcache == NULL)
1526  rel->rd_amcache = cache;
1527  memcpy(rel->rd_amcache, HashPageGetMeta(page),
1528  sizeof(HashMetaPageData));
1529 
1530  /* Release metapage lock, but keep the pin. */
1531  LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
1532  }
1533 
1534  return (HashMetaPage) rel->rd_amcache;
1535 }
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define HASH_METAPAGE
Definition: hash.h:198
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
void * rd_amcache
Definition: rel.h:225
MemoryContext rd_indexcxt
Definition: rel.h:200

References _hash_getbuf(), Assert(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HASH_METAPAGE, HASH_READ, HashPageGetMeta, LH_META_PAGE, LockBuffer(), MemoryContextAlloc(), RelationData::rd_amcache, and RelationData::rd_indexcxt.

Referenced by _hash_getbucketbuf_from_hashkey(), and hashbulkdelete().

◆ _hash_getinitbuf()

Buffer _hash_getinitbuf ( Relation  rel,
BlockNumber  blkno 
)

Definition at line 135 of file hashpage.c.

136 {
137  Buffer buf;
138 
139  if (blkno == P_NEW)
140  elog(ERROR, "hash AM does not use P_NEW");
141 
143  NULL);
144 
145  /* ref count and lock type are correct */
146 
147  /* initialize the page */
149 
150  return buf;
151 }
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:156
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:40

References _hash_pageinit(), buf, BufferGetPage, BufferGetPageSize, elog, ERROR, MAIN_FORKNUM, P_NEW, RBM_ZERO_AND_LOCK, and ReadBufferExtended().

Referenced by _hash_addovflpage().

◆ _hash_getnewbuf()

Buffer _hash_getnewbuf ( Relation  rel,
BlockNumber  blkno,
ForkNumber  forkNum 
)

Definition at line 198 of file hashpage.c.

199 {
200  BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
201  Buffer buf;
202 
203  if (blkno == P_NEW)
204  elog(ERROR, "hash AM does not use P_NEW");
205  if (blkno > nblocks)
206  elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
208 
209  /* smgr insists we use P_NEW to extend the relation */
210  if (blkno == nblocks)
211  {
212  buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
213  if (BufferGetBlockNumber(buf) != blkno)
214  elog(ERROR, "unexpected hash relation size: %u, should be %u",
215  BufferGetBlockNumber(buf), blkno);
217  }
218  else
219  {
220  buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
221  NULL);
222  }
223 
224  /* ref count and lock type are correct */
225 
226  /* initialize the page */
228 
229  return buf;
230 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2755
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2942
#define RelationGetRelationName(relation)
Definition: rel.h:523

References _hash_pageinit(), buf, BufferGetBlockNumber(), BufferGetPage, BufferGetPageSize, elog, ERROR, HASH_WRITE, LockBuffer(), P_NEW, RBM_NORMAL, RBM_ZERO_AND_LOCK, ReadBufferExtended(), RelationGetNumberOfBlocksInFork(), and RelationGetRelationName.

Referenced by _hash_addovflpage(), _hash_expandtable(), and _hash_init().

◆ _hash_init()

uint32 _hash_init ( Relation  rel,
double  num_tuples,
ForkNumber  forkNum 
)

Definition at line 327 of file hashpage.c.

328 {
329  Buffer metabuf;
330  Buffer buf;
331  Buffer bitmapbuf;
332  Page pg;
333  HashMetaPage metap;
334  RegProcedure procid;
335  int32 data_width;
336  int32 item_width;
337  int32 ffactor;
338  uint32 num_buckets;
339  uint32 i;
340  bool use_wal;
341 
342  /* safety check */
343  if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
344  elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
346 
347  /*
348  * WAL log creation of pages if the relation is persistent, or this is the
349  * init fork. Init forks for unlogged relations always need to be WAL
350  * logged.
351  */
352  use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
353 
354  /*
355  * Determine the target fill factor (in tuples per bucket) for this index.
356  * The idea is to make the fill factor correspond to pages about as full
357  * as the user-settable fillfactor parameter says. We can compute it
358  * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
359  */
360  data_width = sizeof(uint32);
361  item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
362  sizeof(ItemIdData); /* include the line pointer */
363  ffactor = HashGetTargetPageUsage(rel) / item_width;
364  /* keep to a sane range */
365  if (ffactor < 10)
366  ffactor = 10;
367 
368  procid = index_getprocid(rel, 1, HASHSTANDARD_PROC);
369 
370  /*
371  * We initialize the metapage, the first N bucket pages, and the first
372  * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
373  * calls to occur. This ensures that the smgr level has the right idea of
374  * the physical index length.
375  *
376  * Critical section not required, because on error the creation of the
377  * whole relation will be rolled back.
378  */
379  metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
380  _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
381  MarkBufferDirty(metabuf);
382 
383  pg = BufferGetPage(metabuf);
384  metap = HashPageGetMeta(pg);
385 
386  /* XLOG stuff */
387  if (use_wal)
388  {
390  XLogRecPtr recptr;
391 
392  xlrec.num_tuples = num_tuples;
393  xlrec.procid = metap->hashm_procid;
394  xlrec.ffactor = metap->hashm_ffactor;
395 
396  XLogBeginInsert();
397  XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage);
399 
400  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
401 
402  PageSetLSN(BufferGetPage(metabuf), recptr);
403  }
404 
405  num_buckets = metap->hashm_maxbucket + 1;
406 
407  /*
408  * Release buffer lock on the metapage while we initialize buckets.
409  * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
410  * won't accomplish anything. It's a bad idea to hold buffer locks for
411  * long intervals in any case, since that can block the bgwriter.
412  */
413  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
414 
415  /*
416  * Initialize and WAL Log the first N buckets
417  */
418  for (i = 0; i < num_buckets; i++)
419  {
420  BlockNumber blkno;
421 
422  /* Allow interrupts, in case N is huge */
424 
425  blkno = BUCKET_TO_BLKNO(metap, i);
426  buf = _hash_getnewbuf(rel, blkno, forkNum);
429 
430  if (use_wal)
431  log_newpage(&rel->rd_node,
432  forkNum,
433  blkno,
435  true);
436  _hash_relbuf(rel, buf);
437  }
438 
439  /* Now reacquire buffer lock on metapage */
441 
442  /*
443  * Initialize bitmap page
444  */
445  bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
446  _hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
447  MarkBufferDirty(bitmapbuf);
448 
449  /* add the new bitmap page to the metapage's list of bitmaps */
450  /* metapage already has a write lock */
451  if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
452  ereport(ERROR,
453  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
454  errmsg("out of overflow pages in hash index \"%s\"",
455  RelationGetRelationName(rel))));
456 
457  metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
458 
459  metap->hashm_nmaps++;
460  MarkBufferDirty(metabuf);
461 
462  /* XLOG stuff */
463  if (use_wal)
464  {
466  XLogRecPtr recptr;
467 
468  xlrec.bmsize = metap->hashm_bmsize;
469 
470  XLogBeginInsert();
471  XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage);
472  XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
473 
474  /*
475  * This is safe only because nobody else can be modifying the index at
476  * this stage; it's only visible to the transaction that is creating
477  * it.
478  */
479  XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
480 
481  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
482 
483  PageSetLSN(BufferGetPage(bitmapbuf), recptr);
484  PageSetLSN(BufferGetPage(metabuf), recptr);
485  }
486 
487  /* all done */
488  _hash_relbuf(rel, bitmapbuf);
489  _hash_relbuf(rel, metabuf);
490 
491  return num_buckets;
492 }
#define MAXALIGN(LEN)
Definition: c.h:757
signed int int32
Definition: c.h:429
regproc RegProcedure
Definition: c.h:585
int errcode(int sqlerrcode)
Definition: elog.c:693
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define ereport(elevel,...)
Definition: elog.h:143
#define HASH_MAX_BITMAPS
Definition: hash.h:230
#define HASHSTANDARD_PROC
Definition: hash.h:355
#define HashGetTargetPageUsage(relation)
Definition: hash.h:281
#define SizeOfHashInitBitmapPage
Definition: hash_xlog.h:233
#define XLOG_HASH_INIT_BITMAP_PAGE
Definition: hash_xlog.h:28
#define XLOG_HASH_INIT_META_PAGE
Definition: hash_xlog.h:27
#define SizeOfHashInitMetaPage
Definition: hash_xlog.h:217
void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
Definition: hashovfl.c:741
void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage)
Definition: hashpage.c:157
void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage)
Definition: hashpage.c:498
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:769
int i
Definition: isn.c:73
struct ItemIdData ItemIdData
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
@ INIT_FORKNUM
Definition: relpath.h:46
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]
Definition: hash.h:264
RegProcedure hashm_procid
Definition: hash.h:261
uint16 hashm_bmsize
Definition: hash.h:251
uint32 hashm_nmaps
Definition: hash.h:260
RegProcedure procid
Definition: hash_xlog.h:213

References _hash_getnewbuf(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_relbuf(), xl_hash_init_bitmap_page::bmsize, BUCKET_TO_BLKNO, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, CHECK_FOR_INTERRUPTS, elog, ereport, errcode(), errmsg(), ERROR, xl_hash_init_meta_page::ffactor, HASH_MAX_BITMAPS, HASH_METAPAGE, HashGetTargetPageUsage, HashMetaPageData::hashm_bmsize, HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_mapp, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_nmaps, HashMetaPageData::hashm_procid, HashPageGetMeta, HASHSTANDARD_PROC, i, index_getprocid(), INIT_FORKNUM, LH_BUCKET_PAGE, LockBuffer(), log_newpage(), MarkBufferDirty(), MAXALIGN, xl_hash_init_meta_page::num_tuples, PageSetLSN, xl_hash_init_meta_page::procid, RelationData::rd_node, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetNumberOfBlocksInFork(), RelationGetRelationName, RelationNeedsWAL, SizeOfHashInitBitmapPage, SizeOfHashInitMetaPage, XLOG_HASH_INIT_BITMAP_PAGE, XLOG_HASH_INIT_META_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by hashbuild(), and hashbuildempty().

◆ _hash_init_metabuffer()

void _hash_init_metabuffer ( Buffer  buf,
double  num_tuples,
RegProcedure  procid,
uint16  ffactor,
bool  initpage 
)

Definition at line 498 of file hashpage.c.

500 {
501  HashMetaPage metap;
502  HashPageOpaque pageopaque;
503  Page page;
504  double dnumbuckets;
505  uint32 num_buckets;
506  uint32 spare_index;
507  uint32 lshift;
508 
509  /*
510  * Choose the number of initial bucket pages to match the fill factor
511  * given the estimated number of tuples. We round up the result to the
512  * total number of buckets which has to be allocated before using its
513  * hashm_spares element. However always force at least 2 bucket pages. The
514  * upper limit is determined by considerations explained in
515  * _hash_expandtable().
516  */
517  dnumbuckets = num_tuples / ffactor;
518  if (dnumbuckets <= 2.0)
519  num_buckets = 2;
520  else if (dnumbuckets >= (double) 0x40000000)
521  num_buckets = 0x40000000;
522  else
523  num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets));
524 
525  spare_index = _hash_spareindex(num_buckets);
526  Assert(spare_index < HASH_MAX_SPLITPOINTS);
527 
528  page = BufferGetPage(buf);
529  if (initpage)
531 
532  pageopaque = HashPageGetOpaque(page);
533  pageopaque->hasho_prevblkno = InvalidBlockNumber;
534  pageopaque->hasho_nextblkno = InvalidBlockNumber;
535  pageopaque->hasho_bucket = InvalidBucket;
536  pageopaque->hasho_flag = LH_META_PAGE;
537  pageopaque->hasho_page_id = HASHO_PAGE_ID;
538 
539  metap = HashPageGetMeta(page);
540 
541  metap->hashm_magic = HASH_MAGIC;
542  metap->hashm_version = HASH_VERSION;
543  metap->hashm_ntuples = 0;
544  metap->hashm_nmaps = 0;
545  metap->hashm_ffactor = ffactor;
546  metap->hashm_bsize = HashGetMaxBitmapSize(page);
547 
548  /* find largest bitmap array size that will fit in page size */
549  lshift = pg_leftmost_one_pos32(metap->hashm_bsize);
550  Assert(lshift > 0);
551  metap->hashm_bmsize = 1 << lshift;
552  metap->hashm_bmshift = lshift + BYTE_TO_BIT;
553  Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
554 
555  /*
556  * Label the index with its primary hash support function's OID. This is
557  * pretty useless for normal operation (in fact, hashm_procid is not used
558  * anywhere), but it might be handy for forensic purposes so we keep it.
559  */
560  metap->hashm_procid = procid;
561 
562  /*
563  * We initialize the index with N buckets, 0 .. N-1, occupying physical
564  * blocks 1 to N. The first freespace bitmap page is in block N+1.
565  */
566  metap->hashm_maxbucket = num_buckets - 1;
567 
568  /*
569  * Set highmask as next immediate ((2 ^ x) - 1), which should be
570  * sufficient to cover num_buckets.
571  */
572  metap->hashm_highmask = pg_nextpower2_32(num_buckets + 1) - 1;
573  metap->hashm_lowmask = (metap->hashm_highmask >> 1);
574 
575  MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
576  MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
577 
578  /* Set up mapping for one spare page after the initial splitpoints */
579  metap->hashm_spares[spare_index] = 1;
580  metap->hashm_ovflpoint = spare_index;
581  metap->hashm_firstfree = 0;
582 
583  /*
584  * Set pd_lower just past the end of the metadata. This is essential,
585  * because without doing so, metadata will be lost if xlog.c compresses
586  * the page.
587  */
588  ((PageHeader) page)->pd_lower =
589  ((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
590 }
PageHeaderData * PageHeader
Definition: bufpage.h:166
#define MemSet(start, val, len)
Definition: c.h:1008
#define BMPG_MASK(metap)
Definition: hash.h:314
#define HASH_VERSION
Definition: hash.h:201
#define HASH_MAX_SPLITPOINTS
Definition: hash.h:239
#define BYTE_TO_BIT
Definition: hash.h:301
#define HashGetMaxBitmapSize(page)
Definition: hash.h:319
#define BMPG_SHIFT(metap)
Definition: hash.h:313
#define HASH_MAGIC
Definition: hash.h:200
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:140
static int pg_leftmost_one_pos32(uint32 word)
Definition: pg_bitutils.h:26
uint32 hashm_version
Definition: hash.h:247
uint32 hashm_firstfree
Definition: hash.h:259
uint16 hashm_bsize
Definition: hash.h:250
uint32 hashm_magic
Definition: hash.h:246
uint16 hashm_bmshift
Definition: hash.h:253

References _hash_get_totalbuckets(), _hash_pageinit(), _hash_spareindex(), Assert(), BMPG_MASK, BMPG_SHIFT, buf, BufferGetPage, BufferGetPageSize, BYTE_TO_BIT, HASH_MAGIC, HASH_MAX_SPLITPOINTS, HASH_VERSION, HashGetMaxBitmapSize, HashMetaPageData::hashm_bmshift, HashMetaPageData::hashm_bmsize, HashMetaPageData::hashm_bsize, HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_firstfree, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_magic, HashMetaPageData::hashm_mapp, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_nmaps, HashMetaPageData::hashm_ntuples, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_procid, HashMetaPageData::hashm_spares, HashMetaPageData::hashm_version, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, HashPageGetOpaque, InvalidBlockNumber, InvalidBucket, LH_META_PAGE, MemSet, pg_leftmost_one_pos32(), and pg_nextpower2_32().

Referenced by _hash_init(), and hash_xlog_init_meta_page().

◆ _hash_initbuf()

void _hash_initbuf ( Buffer  buf,
uint32  max_bucket,
uint32  num_bucket,
uint32  flag,
bool  initpage 
)

Definition at line 157 of file hashpage.c.

159 {
160  HashPageOpaque pageopaque;
161  Page page;
162 
163  page = BufferGetPage(buf);
164 
165  /* initialize the page */
166  if (initpage)
168 
169  pageopaque = HashPageGetOpaque(page);
170 
171  /*
172  * Set hasho_prevblkno with current hashm_maxbucket. This value will be
173  * used to validate cached HashMetaPageData. See
174  * _hash_getbucketbuf_from_hashkey().
175  */
176  pageopaque->hasho_prevblkno = max_bucket;
177  pageopaque->hasho_nextblkno = InvalidBlockNumber;
178  pageopaque->hasho_bucket = num_bucket;
179  pageopaque->hasho_flag = flag;
180  pageopaque->hasho_page_id = HASHO_PAGE_ID;
181 }
char * flag(int b)
Definition: test-ctype.c:33

References _hash_pageinit(), buf, BufferGetPage, BufferGetPageSize, flag(), HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetOpaque, and InvalidBlockNumber.

Referenced by _hash_init(), hash_xlog_add_ovfl_page(), and hash_xlog_split_allocate_page().

◆ _hash_pageinit()

void _hash_pageinit ( Page  page,
Size  size 
)

Definition at line 596 of file hashpage.c.

597 {
598  PageInit(page, size, sizeof(HashPageOpaqueData));
599 }
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42

References PageInit().

Referenced by _hash_alloc_buckets(), _hash_freeovflpage(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), and hash_xlog_squeeze_page().

◆ _hash_relbuf()

◆ _hash_splitbucket()

static void _hash_splitbucket ( Relation  rel,
Buffer  metabuf,
Bucket  obucket,
Bucket  nbucket,
Buffer  obuf,
Buffer  nbuf,
HTAB htab,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask 
)
static

Definition at line 1069 of file hashpage.c.

1079 {
1080  Buffer bucket_obuf;
1081  Buffer bucket_nbuf;
1082  Page opage;
1083  Page npage;
1084  HashPageOpaque oopaque;
1085  HashPageOpaque nopaque;
1086  OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
1088  Size all_tups_size = 0;
1089  int i;
1090  uint16 nitups = 0;
1091 
1092  bucket_obuf = obuf;
1093  opage = BufferGetPage(obuf);
1094  oopaque = HashPageGetOpaque(opage);
1095 
1096  bucket_nbuf = nbuf;
1097  npage = BufferGetPage(nbuf);
1098  nopaque = HashPageGetOpaque(npage);
1099 
1100  /* Copy the predicate locks from old bucket to new bucket. */
1102  BufferGetBlockNumber(bucket_obuf),
1103  BufferGetBlockNumber(bucket_nbuf));
1104 
1105  /*
1106  * Partition the tuples in the old bucket between the old bucket and the
1107  * new bucket, advancing along the old bucket's overflow bucket chain and
1108  * adding overflow pages to the new bucket as needed. Outer loop iterates
1109  * once per page in old bucket.
1110  */
1111  for (;;)
1112  {
1113  BlockNumber oblkno;
1114  OffsetNumber ooffnum;
1115  OffsetNumber omaxoffnum;
1116 
1117  /* Scan each tuple in old page */
1118  omaxoffnum = PageGetMaxOffsetNumber(opage);
1119  for (ooffnum = FirstOffsetNumber;
1120  ooffnum <= omaxoffnum;
1121  ooffnum = OffsetNumberNext(ooffnum))
1122  {
1123  IndexTuple itup;
1124  Size itemsz;
1125  Bucket bucket;
1126  bool found = false;
1127 
1128  /* skip dead tuples */
1129  if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
1130  continue;
1131 
1132  /*
1133  * Before inserting a tuple, probe the hash table containing TIDs
1134  * of tuples belonging to new bucket, if we find a match, then
1135  * skip that tuple, else fetch the item's hash key (conveniently
1136  * stored in the item) and determine which bucket it now belongs
1137  * in.
1138  */
1139  itup = (IndexTuple) PageGetItem(opage,
1140  PageGetItemId(opage, ooffnum));
1141 
1142  if (htab)
1143  (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
1144 
1145  if (found)
1146  continue;
1147 
1149  maxbucket, highmask, lowmask);
1150 
1151  if (bucket == nbucket)
1152  {
1153  IndexTuple new_itup;
1154 
1155  /*
1156  * make a copy of index tuple as we have to scribble on it.
1157  */
1158  new_itup = CopyIndexTuple(itup);
1159 
1160  /*
1161  * mark the index tuple as moved by split, such tuples are
1162  * skipped by scan if there is split in progress for a bucket.
1163  */
1164  new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
1165 
1166  /*
1167  * insert the tuple into the new bucket. if it doesn't fit on
1168  * the current page in the new bucket, we must allocate a new
1169  * overflow page and place the tuple on that page instead.
1170  */
1171  itemsz = IndexTupleSize(new_itup);
1172  itemsz = MAXALIGN(itemsz);
1173 
1174  if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
1175  {
1176  /*
1177  * Change the shared buffer state in critical section,
1178  * otherwise any error could make it unrecoverable.
1179  */
1181 
1182  _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1183  MarkBufferDirty(nbuf);
1184  /* log the split operation before releasing the lock */
1185  log_split_page(rel, nbuf);
1186 
1187  END_CRIT_SECTION();
1188 
1189  /* drop lock, but keep pin */
1191 
1192  /* be tidy */
1193  for (i = 0; i < nitups; i++)
1194  pfree(itups[i]);
1195  nitups = 0;
1196  all_tups_size = 0;
1197 
1198  /* chain to a new overflow page */
1199  nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf));
1200  npage = BufferGetPage(nbuf);
1201  nopaque = HashPageGetOpaque(npage);
1202  }
1203 
1204  itups[nitups++] = new_itup;
1205  all_tups_size += itemsz;
1206  }
1207  else
1208  {
1209  /*
1210  * the tuple stays on this page, so nothing to do.
1211  */
1212  Assert(bucket == obucket);
1213  }
1214  }
1215 
1216  oblkno = oopaque->hasho_nextblkno;
1217 
1218  /* retain the pin on the old primary bucket */
1219  if (obuf == bucket_obuf)
1221  else
1222  _hash_relbuf(rel, obuf);
1223 
1224  /* Exit loop if no more overflow pages in old bucket */
1225  if (!BlockNumberIsValid(oblkno))
1226  {
1227  /*
1228  * Change the shared buffer state in critical section, otherwise
1229  * any error could make it unrecoverable.
1230  */
1232 
1233  _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1234  MarkBufferDirty(nbuf);
1235  /* log the split operation before releasing the lock */
1236  log_split_page(rel, nbuf);
1237 
1238  END_CRIT_SECTION();
1239 
1240  if (nbuf == bucket_nbuf)
1242  else
1243  _hash_relbuf(rel, nbuf);
1244 
1245  /* be tidy */
1246  for (i = 0; i < nitups; i++)
1247  pfree(itups[i]);
1248  break;
1249  }
1250 
1251  /* Else, advance to next old page */
1252  obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
1253  opage = BufferGetPage(obuf);
1254  oopaque = HashPageGetOpaque(opage);
1255  }
1256 
1257  /*
1258  * We're at the end of the old bucket chain, so we're done partitioning
1259  * the tuples. Mark the old and new buckets to indicate split is
1260  * finished.
1261  *
1262  * To avoid deadlocks due to locking order of buckets, first lock the old
1263  * bucket and then the new bucket.
1264  */
1265  LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
1266  opage = BufferGetPage(bucket_obuf);
1267  oopaque = HashPageGetOpaque(opage);
1268 
1269  LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
1270  npage = BufferGetPage(bucket_nbuf);
1271  nopaque = HashPageGetOpaque(npage);
1272 
1274 
1275  oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1277 
1278  /*
1279  * After the split is finished, mark the old bucket to indicate that it
1280  * contains deletable tuples. We will clear split-cleanup flag after
1281  * deleting such tuples either at the end of split or at the next split
1282  * from old bucket or at the time of vacuum.
1283  */
1285 
1286  /*
1287  * now write the buffers, here we don't release the locks as caller is
1288  * responsible to release locks.
1289  */
1290  MarkBufferDirty(bucket_obuf);
1291  MarkBufferDirty(bucket_nbuf);
1292 
1293  if (RelationNeedsWAL(rel))
1294  {
1295  XLogRecPtr recptr;
1296  xl_hash_split_complete xlrec;
1297 
1298  xlrec.old_bucket_flag = oopaque->hasho_flag;
1299  xlrec.new_bucket_flag = nopaque->hasho_flag;
1300 
1301  XLogBeginInsert();
1302 
1303  XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
1304 
1305  XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
1306  XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
1307 
1308  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
1309 
1310  PageSetLSN(BufferGetPage(bucket_obuf), recptr);
1311  PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
1312  }
1313 
1314  END_CRIT_SECTION();
1315 
1316  /*
1317  * If possible, clean up the old bucket. We might not be able to do this
1318  * if someone else has a pin on it, but if not then we can go ahead. This
1319  * isn't absolutely necessary, but it reduces bloat; if we don't do it
1320  * now, VACUUM will do it eventually, but maybe not until new overflow
1321  * pages have been allocated. Note that there's no need to clean up the
1322  * new bucket.
1323  */
1324  if (IsBufferCleanupOK(bucket_obuf))
1325  {
1326  LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1327  hashbucketcleanup(rel, obucket, bucket_obuf,
1328  BufferGetBlockNumber(bucket_obuf), NULL,
1329  maxbucket, highmask, lowmask, NULL, NULL, true,
1330  NULL, NULL);
1331  }
1332  else
1333  {
1334  LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1335  LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK);
1336  }
1337 }
Size PageGetFreeSpaceForMultipleTuples(Page page, int ntups)
Definition: bufpage.c:934
unsigned short uint16
Definition: c.h:440
size_t Size
Definition: c.h:540
#define INDEX_MOVED_BY_SPLIT_MASK
Definition: hash.h:293
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:60
#define SizeOfHashSplitComplete
Definition: hash_xlog.h:117
#define XLOG_HASH_SPLIT_COMPLETE
Definition: hash_xlog.h:33
void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups, OffsetNumber *itup_offsets, uint16 nitups)
Definition: hashinsert.c:300
Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
Definition: hashovfl.c:112
static void log_split_page(Relation rel, Buffer buf)
Definition: hashpage.c:1470
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:292
@ HASH_FIND
Definition: hsearch.h:113
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:528
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define IndexTupleSize(itup)
Definition: itup.h:70
#define MaxIndexTuplesPerPage
Definition: itup.h:144
void pfree(void *pointer)
Definition: mcxt.c:1175
void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3169
unsigned short t_info
Definition: itup.h:49

References _hash_addovflpage(), _hash_get_indextuple_hashkey(), _hash_getbuf(), _hash_hashkey2bucket(), _hash_pgaddmultitup(), _hash_relbuf(), Assert(), BlockNumberIsValid, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CopyIndexTuple(), END_CRIT_SECTION, FirstOffsetNumber, HASH_FIND, HASH_READ, hash_search(), hashbucketcleanup(), HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageGetOpaque, i, INDEX_MOVED_BY_SPLIT_MASK, IndexTupleSize, IsBufferCleanupOK(), ItemIdIsDead, LH_BUCKET_BEING_POPULATED, LH_BUCKET_BEING_SPLIT, LH_BUCKET_NEEDS_SPLIT_CLEANUP, LH_OVERFLOW_PAGE, LockBuffer(), log_split_page(), MarkBufferDirty(), MAXALIGN, MaxIndexTuplesPerPage, xl_hash_split_complete::new_bucket_flag, OffsetNumberNext, xl_hash_split_complete::old_bucket_flag, PageGetFreeSpaceForMultipleTuples(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, pfree(), PredicateLockPageSplit(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashSplitComplete, START_CRIT_SECTION, IndexTupleData::t_info, IndexTupleData::t_tid, XLOG_HASH_SPLIT_COMPLETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_expandtable(), and _hash_finish_split().

◆ log_split_page()

static void log_split_page ( Relation  rel,
Buffer  buf 
)
static

Definition at line 1470 of file hashpage.c.

1471 {
1472  if (RelationNeedsWAL(rel))
1473  {
1474  XLogRecPtr recptr;
1475 
1476  XLogBeginInsert();
1477 
1479 
1480  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
1481 
1482  PageSetLSN(BufferGetPage(buf), recptr);
1483  }
1484 }
#define XLOG_HASH_SPLIT_PAGE
Definition: hash_xlog.h:32
#define REGBUF_FORCE_IMAGE
Definition: xloginsert.h:31

References buf, BufferGetPage, PageSetLSN, REGBUF_FORCE_IMAGE, REGBUF_STANDARD, RelationNeedsWAL, XLOG_HASH_SPLIT_PAGE, XLogBeginInsert(), XLogInsert(), and XLogRegisterBuffer().

Referenced by _hash_splitbucket().