PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
hashpage.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
Include dependency graph for hashpage.c:

Go to the source code of this file.

Macros

#define USELOCKING(rel)   (!RELATION_IS_LOCAL(rel))
 

Functions

static bool _hash_alloc_buckets (Relation rel, BlockNumber firstblock, uint32 nblocks)
 
static void _hash_splitbucket (Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
 
static void log_split_page (Relation rel, Buffer buf)
 
Buffer _hash_getbuf (Relation rel, BlockNumber blkno, int access, int flags)
 
Buffer _hash_getbuf_with_condlock_cleanup (Relation rel, BlockNumber blkno, int flags)
 
Buffer _hash_getinitbuf (Relation rel, BlockNumber blkno)
 
void _hash_initbuf (Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage)
 
Buffer _hash_getnewbuf (Relation rel, BlockNumber blkno, ForkNumber forkNum)
 
Buffer _hash_getbuf_with_strategy (Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
 
void _hash_relbuf (Relation rel, Buffer buf)
 
void _hash_dropbuf (Relation rel, Buffer buf)
 
void _hash_dropscanbuf (Relation rel, HashScanOpaque so)
 
uint32 _hash_init (Relation rel, double num_tuples, ForkNumber forkNum)
 
void _hash_init_metabuffer (Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage)
 
void _hash_pageinit (Page page, Size size)
 
void _hash_expandtable (Relation rel, Buffer metabuf)
 
void _hash_finish_split (Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
 
HashMetaPage _hash_getcachedmetap (Relation rel, Buffer *metabuf, bool force_refresh)
 
Buffer _hash_getbucketbuf_from_hashkey (Relation rel, uint32 hashkey, int access, HashMetaPage *cachedmetap)
 

Macro Definition Documentation

#define USELOCKING (   rel)    (!RELATION_IS_LOCAL(rel))

Definition at line 57 of file hashpage.c.

Function Documentation

static bool _hash_alloc_buckets ( Relation  rel,
BlockNumber  firstblock,
uint32  nblocks 
)
static

Definition at line 987 of file hashpage.c.

References _hash_pageinit(), InvalidBlockNumber, log_newpage(), MAIN_FORKNUM, RelationData::rd_node, RelationData::rd_smgr, RelationNeedsWAL, RelationOpenSmgr, and smgrextend().

Referenced by _hash_expandtable().

988 {
989  BlockNumber lastblock;
990  char zerobuf[BLCKSZ];
991  Page page;
992 
993  lastblock = firstblock + nblocks - 1;
994 
995  /*
996  * Check for overflow in block number calculation; if so, we cannot extend
997  * the index anymore.
998  */
999  if (lastblock < firstblock || lastblock == InvalidBlockNumber)
1000  return false;
1001 
1002  page = (Page) zerobuf;
1003 
1004  /*
1005  * Initialize the freed overflow page. Just zeroing the page won't work,
1006  * See _hash_freeovflpage for similar usage.
1007  */
1008  _hash_pageinit(page, BLCKSZ);
1009 
1010  if (RelationNeedsWAL(rel))
1011  log_newpage(&rel->rd_node,
1012  MAIN_FORKNUM,
1013  lastblock,
1014  zerobuf,
1015  true);
1016 
1017  RelationOpenSmgr(rel);
1018  smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
1019 
1020  return true;
1021 }
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:592
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
uint32 BlockNumber
Definition: block.h:31
#define RelationOpenSmgr(relation)
Definition: rel.h:457
RelFileNode rd_node
Definition: rel.h:85
#define InvalidBlockNumber
Definition: block.h:33
#define RelationNeedsWAL(relation)
Definition: rel.h:502
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:600
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:973
Pointer Page
Definition: bufpage.h:74
void _hash_dropbuf ( Relation  rel,
Buffer  buf 
)

Definition at line 285 of file hashpage.c.

References ReleaseBuffer().

Referenced by _hash_doinsert(), _hash_dropscanbuf(), _hash_expandtable(), _hash_first(), _hash_getbucketbuf_from_hashkey(), _hash_readprev(), and hashbulkdelete().

286 {
288 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
static char * buf
Definition: pg_test_fsync.c:65
void _hash_dropscanbuf ( Relation  rel,
HashScanOpaque  so 
)

Definition at line 297 of file hashpage.c.

References _hash_dropbuf(), BufferIsValid, HashScanOpaqueData::hashso_buc_populated, HashScanOpaqueData::hashso_buc_split, HashScanOpaqueData::hashso_bucket_buf, HashScanOpaqueData::hashso_curbuf, HashScanOpaqueData::hashso_split_bucket_buf, and InvalidBuffer.

Referenced by _hash_step(), hashendscan(), and hashrescan().

298 {
299  /* release pin we hold on primary bucket page */
300  if (BufferIsValid(so->hashso_bucket_buf) &&
301  so->hashso_bucket_buf != so->hashso_curbuf)
304 
305  /* release pin we hold on primary bucket page of bucket being split */
310 
311  /* release any pin we still hold */
312  if (BufferIsValid(so->hashso_curbuf))
313  _hash_dropbuf(rel, so->hashso_curbuf);
315 
316  /* reset split scan */
317  so->hashso_buc_populated = false;
318  so->hashso_buc_split = false;
319 }
#define InvalidBuffer
Definition: buf.h:25
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:285
Buffer hashso_bucket_buf
Definition: hash.h:124
bool hashso_buc_populated
Definition: hash.h:140
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
bool hashso_buc_split
Definition: hash.h:146
Buffer hashso_split_bucket_buf
Definition: hash.h:131
Buffer hashso_curbuf
Definition: hash.h:121
void _hash_expandtable ( Relation  rel,
Buffer  metabuf 
)

Definition at line 610 of file hashpage.c.

References _hash_alloc_buckets(), _hash_checkpage(), _hash_dropbuf(), _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), _hash_getnewbuf(), _hash_log2(), _hash_relbuf(), _hash_splitbucket(), Assert, BUCKET_TO_BLKNO, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, END_CRIT_SECTION, xl_hash_split_allocate_page::flags, H_BUCKET_BEING_SPLIT, H_NEEDS_SPLIT_CLEANUP, hashbucketcleanup(), HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ntuples, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_spares, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, InvalidBlockNumber, IsBufferCleanupOK(), LH_BUCKET_BEING_POPULATED, LH_BUCKET_BEING_SPLIT, LH_BUCKET_PAGE, LH_META_PAGE, LockBuffer(), MAIN_FORKNUM, MarkBufferDirty(), xl_hash_split_allocate_page::new_bucket, xl_hash_split_allocate_page::new_bucket_flag, NULL, xl_hash_split_allocate_page::old_bucket_flag, PageGetSpecialPointer, PageSetLSN, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationNeedsWAL, SizeOfHashSplitAllocPage, START_CRIT_SECTION, XLH_SPLIT_META_UPDATE_MASKS, XLH_SPLIT_META_UPDATE_SPLITPOINT, XLOG_HASH_SPLIT_ALLOCATE_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_doinsert().

611 {
612  HashMetaPage metap;
613  Bucket old_bucket;
614  Bucket new_bucket;
615  uint32 spare_ndx;
616  BlockNumber start_oblkno;
617  BlockNumber start_nblkno;
618  Buffer buf_nblkno;
619  Buffer buf_oblkno;
620  Page opage;
621  Page npage;
622  HashPageOpaque oopaque;
623  HashPageOpaque nopaque;
624  uint32 maxbucket;
625  uint32 highmask;
626  uint32 lowmask;
627  bool metap_update_masks = false;
628  bool metap_update_splitpoint = false;
629 
630 restart_expand:
631 
632  /*
633  * Write-lock the meta page. It used to be necessary to acquire a
634  * heavyweight lock to begin a split, but that is no longer required.
635  */
637 
638  _hash_checkpage(rel, metabuf, LH_META_PAGE);
639  metap = HashPageGetMeta(BufferGetPage(metabuf));
640 
641  /*
642  * Check to see if split is still needed; someone else might have already
643  * done one while we waited for the lock.
644  *
645  * Make sure this stays in sync with _hash_doinsert()
646  */
647  if (metap->hashm_ntuples <=
648  (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
649  goto fail;
650 
651  /*
652  * Can't split anymore if maxbucket has reached its maximum possible
653  * value.
654  *
655  * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
656  * the calculation maxbucket+1 mustn't overflow). Currently we restrict
657  * to half that because of overflow looping in _hash_log2() and
658  * insufficient space in hashm_spares[]. It's moot anyway because an
659  * index with 2^32 buckets would certainly overflow BlockNumber and hence
660  * _hash_alloc_buckets() would fail, but if we supported buckets smaller
661  * than a disk block then this would be an independent constraint.
662  *
663  * If you change this, see also the maximum initial number of buckets in
664  * _hash_init().
665  */
666  if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
667  goto fail;
668 
669  /*
670  * Determine which bucket is to be split, and attempt to take cleanup lock
671  * on the old bucket. If we can't get the lock, give up.
672  *
673  * The cleanup lock protects us not only against other backends, but
674  * against our own backend as well.
675  *
676  * The cleanup lock is mainly to protect the split from concurrent
677  * inserts. See src/backend/access/hash/README, Lock Definitions for
678  * further details. Due to this locking restriction, if there is any
679  * pending scan, the split will give up which is not good, but harmless.
680  */
681  new_bucket = metap->hashm_maxbucket + 1;
682 
683  old_bucket = (new_bucket & metap->hashm_lowmask);
684 
685  start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
686 
687  buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
688  if (!buf_oblkno)
689  goto fail;
690 
691  opage = BufferGetPage(buf_oblkno);
692  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
693 
694  /*
695  * We want to finish the split from a bucket as there is no apparent
696  * benefit by not doing so and it will make the code complicated to finish
697  * the split that involves multiple buckets considering the case where new
698  * split also fails. We don't need to consider the new bucket for
699  * completing the split here as it is not possible that a re-split of new
700  * bucket starts when there is still a pending split from old bucket.
701  */
702  if (H_BUCKET_BEING_SPLIT(oopaque))
703  {
704  /*
705  * Copy bucket mapping info now; refer the comment in code below where
706  * we copy this information before calling _hash_splitbucket to see
707  * why this is okay.
708  */
709  maxbucket = metap->hashm_maxbucket;
710  highmask = metap->hashm_highmask;
711  lowmask = metap->hashm_lowmask;
712 
713  /*
714  * Release the lock on metapage and old_bucket, before completing the
715  * split.
716  */
717  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
718  LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
719 
720  _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
721  highmask, lowmask);
722 
723  /* release the pin on old buffer and retry for expand. */
724  _hash_dropbuf(rel, buf_oblkno);
725 
726  goto restart_expand;
727  }
728 
729  /*
730  * Clean the tuples remained from the previous split. This operation
731  * requires cleanup lock and we already have one on the old bucket, so
732  * let's do it. We also don't want to allow further splits from the bucket
733  * till the garbage of previous split is cleaned. This has two
734  * advantages; first, it helps in avoiding the bloat due to garbage and
735  * second is, during cleanup of bucket, we are always sure that the
736  * garbage tuples belong to most recently split bucket. On the contrary,
737  * if we allow cleanup of bucket after meta page is updated to indicate
738  * the new split and before the actual split, the cleanup operation won't
739  * be able to decide whether the tuple has been moved to the newly created
740  * bucket and ended up deleting such tuples.
741  */
742  if (H_NEEDS_SPLIT_CLEANUP(oopaque))
743  {
744  /*
745  * Copy bucket mapping info now; refer to the comment in code below
746  * where we copy this information before calling _hash_splitbucket
747  * to see why this is okay.
748  */
749  maxbucket = metap->hashm_maxbucket;
750  highmask = metap->hashm_highmask;
751  lowmask = metap->hashm_lowmask;
752 
753  /* Release the metapage lock. */
754  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
755 
756  hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
757  maxbucket, highmask, lowmask, NULL, NULL, true,
758  NULL, NULL);
759 
760  _hash_dropbuf(rel, buf_oblkno);
761 
762  goto restart_expand;
763  }
764 
765  /*
766  * There shouldn't be any active scan on new bucket.
767  *
768  * Note: it is safe to compute the new bucket's blkno here, even though we
769  * may still need to update the BUCKET_TO_BLKNO mapping. This is because
770  * the current value of hashm_spares[hashm_ovflpoint] correctly shows
771  * where we are going to put a new splitpoint's worth of buckets.
772  */
773  start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
774 
775  /*
776  * If the split point is increasing (hashm_maxbucket's log base 2
777  * increases), we need to allocate a new batch of bucket pages.
778  */
779  spare_ndx = _hash_log2(new_bucket + 1);
780  if (spare_ndx > metap->hashm_ovflpoint)
781  {
782  Assert(spare_ndx == metap->hashm_ovflpoint + 1);
783 
784  /*
785  * The number of buckets in the new splitpoint is equal to the total
786  * number already in existence, i.e. new_bucket. Currently this maps
787  * one-to-one to blocks required, but someday we may need a more
788  * complicated calculation here. We treat allocation of buckets as a
789  * separate WAL-logged action. Even if we fail after this operation,
790  * won't leak bucket pages; rather, the next split will consume this
791  * space. In any case, even without failure we don't use all the space
792  * in one split operation.
793  */
794  if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
795  {
796  /* can't split due to BlockNumber overflow */
797  _hash_relbuf(rel, buf_oblkno);
798  goto fail;
799  }
800  }
801 
802  /*
803  * Physically allocate the new bucket's primary page. We want to do this
804  * before changing the metapage's mapping info, in case we can't get the
805  * disk space. Ideally, we don't need to check for cleanup lock on new
806  * bucket as no other backend could find this bucket unless meta page is
807  * updated. However, it is good to be consistent with old bucket locking.
808  */
809  buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
810  if (!IsBufferCleanupOK(buf_nblkno))
811  {
812  _hash_relbuf(rel, buf_oblkno);
813  _hash_relbuf(rel, buf_nblkno);
814  goto fail;
815  }
816 
817  /*
818  * Since we are scribbling on the pages in the shared buffers, establish a
819  * critical section. Any failure in this next code leaves us with a big
820  * problem: the metapage is effectively corrupt but could get written back
821  * to disk.
822  */
824 
825  /*
826  * Okay to proceed with split. Update the metapage bucket mapping info.
827  */
828  metap->hashm_maxbucket = new_bucket;
829 
830  if (new_bucket > metap->hashm_highmask)
831  {
832  /* Starting a new doubling */
833  metap->hashm_lowmask = metap->hashm_highmask;
834  metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
835  metap_update_masks = true;
836  }
837 
838  /*
839  * If the split point is increasing (hashm_maxbucket's log base 2
840  * increases), we need to adjust the hashm_spares[] array and
841  * hashm_ovflpoint so that future overflow pages will be created beyond
842  * this new batch of bucket pages.
843  */
844  if (spare_ndx > metap->hashm_ovflpoint)
845  {
846  metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
847  metap->hashm_ovflpoint = spare_ndx;
848  metap_update_splitpoint = true;
849  }
850 
851  MarkBufferDirty(metabuf);
852 
853  /*
854  * Copy bucket mapping info now; this saves re-accessing the meta page
855  * inside _hash_splitbucket's inner loop. Note that once we drop the
856  * split lock, other splits could begin, so these values might be out of
857  * date before _hash_splitbucket finishes. That's okay, since all it
858  * needs is to tell which of these two buckets to map hashkeys into.
859  */
860  maxbucket = metap->hashm_maxbucket;
861  highmask = metap->hashm_highmask;
862  lowmask = metap->hashm_lowmask;
863 
864  opage = BufferGetPage(buf_oblkno);
865  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
866 
867  /*
868  * Mark the old bucket to indicate that split is in progress. (At
869  * operation end, we will clear the split-in-progress flag.) Also,
870  * for a primary bucket page, hasho_prevblkno stores the number of
871  * buckets that existed as of the last split, so we must update that
872  * value here.
873  */
874  oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
875  oopaque->hasho_prevblkno = maxbucket;
876 
877  MarkBufferDirty(buf_oblkno);
878 
879  npage = BufferGetPage(buf_nblkno);
880 
881  /*
882  * initialize the new bucket's primary page and mark it to indicate that
883  * split is in progress.
884  */
885  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
886  nopaque->hasho_prevblkno = maxbucket;
888  nopaque->hasho_bucket = new_bucket;
890  nopaque->hasho_page_id = HASHO_PAGE_ID;
891 
892  MarkBufferDirty(buf_nblkno);
893 
894  /* XLOG stuff */
895  if (RelationNeedsWAL(rel))
896  {
898  XLogRecPtr recptr;
899 
900  xlrec.new_bucket = maxbucket;
901  xlrec.old_bucket_flag = oopaque->hasho_flag;
902  xlrec.new_bucket_flag = nopaque->hasho_flag;
903  xlrec.flags = 0;
904 
905  XLogBeginInsert();
906 
907  XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
908  XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
909  XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
910 
911  if (metap_update_masks)
912  {
914  XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32));
915  XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32));
916  }
917 
918  if (metap_update_splitpoint)
919  {
921  XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint,
922  sizeof(uint32));
924  (char *) &metap->hashm_spares[metap->hashm_ovflpoint],
925  sizeof(uint32));
926  }
927 
928  XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage);
929 
930  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
931 
932  PageSetLSN(BufferGetPage(buf_oblkno), recptr);
933  PageSetLSN(BufferGetPage(buf_nblkno), recptr);
934  PageSetLSN(BufferGetPage(metabuf), recptr);
935  }
936 
938 
939  /* drop lock, but keep pin */
940  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
941 
942  /* Relocate records to the new bucket */
943  _hash_splitbucket(rel, metabuf,
944  old_bucket, new_bucket,
945  buf_oblkno, buf_nblkno, NULL,
946  maxbucket, highmask, lowmask);
947 
948  /* all done, now release the locks and pins on primary buckets. */
949  _hash_relbuf(rel, buf_oblkno);
950  _hash_relbuf(rel, buf_nblkno);
951 
952  return;
953 
954  /* Here if decide not to split or fail to acquire old bucket lock */
955 fail:
956 
957  /* We didn't write the metapage, so just drop lock */
958  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
959 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
uint16 hasho_page_id
Definition: hash.h:82
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define XLH_SPLIT_META_UPDATE_SPLITPOINT
Definition: hash_xlog.h:53
#define LH_META_PAGE
Definition: hash.h:56
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
uint16 hashm_ffactor
Definition: hash.h:191
uint32 hashm_highmask
Definition: hash.h:197
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:763
uint32 BlockNumber
Definition: block.h:31
Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
Definition: hashpage.c:206
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:285
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
uint32 hashm_lowmask
Definition: hash.h:198
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
Definition: hashpage.c:987
uint32 Bucket
Definition: hash.h:34
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:87
BlockNumber hasho_prevblkno
Definition: hash.h:78
#define SizeOfHashSplitAllocPage
Definition: hash_xlog.h:120
void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1311
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define LH_BUCKET_BEING_SPLIT
Definition: hash.h:58
unsigned int uint32
Definition: c.h:268
Buffer _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
Definition: hashpage.c:104
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:3774
uint32 hashm_ovflpoint
Definition: hash.h:199
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
#define LH_BUCKET_BEING_POPULATED
Definition: hash.h:57
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:159
double hashm_ntuples
Definition: hash.h:190
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition: hash.h:204
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:274
#define LH_BUCKET_PAGE
Definition: hash.h:54
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:88
static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1056
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
Bucket hasho_bucket
Definition: hash.h:80
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
#define HASHO_PAGE_ID
Definition: hash.h:98
#define RelationNeedsWAL(relation)
Definition: rel.h:502
uint32 hashm_maxbucket
Definition: hash.h:196
uint16 hasho_flag
Definition: hash.h:81
#define XLOG_HASH_SPLIT_ALLOCATE_PAGE
Definition: hash_xlog.h:31
#define HashPageGetMeta(page)
Definition: hash.h:250
uint32 _hash_log2(uint32 num)
Definition: hashutil.c:141
BlockNumber hasho_nextblkno
Definition: hash.h:79
#define XLH_SPLIT_META_UPDATE_MASKS
Definition: hash_xlog.h:52
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
void _hash_finish_split ( Relation  rel,
Buffer  metabuf,
Buffer  obuf,
Bucket  obucket,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask 
)

Definition at line 1311 of file hashpage.c.

References _hash_get_newblock_from_oldbucket(), _hash_getbuf(), _hash_relbuf(), _hash_splitbucket(), Assert, BlockNumberIsValid, BUFFER_LOCK_UNLOCK, BufferGetPage, ConditionalLockBufferForCleanup(), CurrentMemoryContext, HASHCTL::entrysize, FirstOffsetNumber, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, HASH_READ, hash_search(), HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_nextblkno, HASHCTL::hcxt, InvalidBuffer, HASHCTL::keysize, LH_BUCKET_PAGE, LH_OVERFLOW_PAGE, LockBuffer(), OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageGetSpecialPointer, and IndexTupleData::t_tid.

Referenced by _hash_doinsert(), and _hash_expandtable().

1313 {
1314  HASHCTL hash_ctl;
1315  HTAB *tidhtab;
1316  Buffer bucket_nbuf = InvalidBuffer;
1317  Buffer nbuf;
1318  Page npage;
1319  BlockNumber nblkno;
1320  BlockNumber bucket_nblkno;
1321  HashPageOpaque npageopaque;
1322  Bucket nbucket;
1323  bool found;
1324 
1325  /* Initialize hash tables used to track TIDs */
1326  memset(&hash_ctl, 0, sizeof(hash_ctl));
1327  hash_ctl.keysize = sizeof(ItemPointerData);
1328  hash_ctl.entrysize = sizeof(ItemPointerData);
1329  hash_ctl.hcxt = CurrentMemoryContext;
1330 
1331  tidhtab =
1332  hash_create("bucket ctids",
1333  256, /* arbitrary initial size */
1334  &hash_ctl,
1336 
1337  bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1338 
1339  /*
1340  * Scan the new bucket and build hash table of TIDs
1341  */
1342  for (;;)
1343  {
1344  OffsetNumber noffnum;
1345  OffsetNumber nmaxoffnum;
1346 
1347  nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1349 
1350  /* remember the primary bucket buffer to acquire cleanup lock on it. */
1351  if (nblkno == bucket_nblkno)
1352  bucket_nbuf = nbuf;
1353 
1354  npage = BufferGetPage(nbuf);
1355  npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1356 
1357  /* Scan each tuple in new page */
1358  nmaxoffnum = PageGetMaxOffsetNumber(npage);
1359  for (noffnum = FirstOffsetNumber;
1360  noffnum <= nmaxoffnum;
1361  noffnum = OffsetNumberNext(noffnum))
1362  {
1363  IndexTuple itup;
1364 
1365  /* Fetch the item's TID and insert it in hash table. */
1366  itup = (IndexTuple) PageGetItem(npage,
1367  PageGetItemId(npage, noffnum));
1368 
1369  (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1370 
1371  Assert(!found);
1372  }
1373 
1374  nblkno = npageopaque->hasho_nextblkno;
1375 
1376  /*
1377  * release our write lock without modifying buffer and ensure to
1378  * retain the pin on primary bucket.
1379  */
1380  if (nbuf == bucket_nbuf)
1382  else
1383  _hash_relbuf(rel, nbuf);
1384 
1385  /* Exit loop if no more overflow pages in new bucket */
1386  if (!BlockNumberIsValid(nblkno))
1387  break;
1388  }
1389 
1390  /*
1391  * Conditionally get the cleanup lock on old and new buckets to perform
1392  * the split operation. If we don't get the cleanup locks, silently give
1393  * up and next insertion on old bucket will try again to complete the
1394  * split.
1395  */
1397  {
1398  hash_destroy(tidhtab);
1399  return;
1400  }
1401  if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1402  {
1404  hash_destroy(tidhtab);
1405  return;
1406  }
1407 
1408  npage = BufferGetPage(bucket_nbuf);
1409  npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1410  nbucket = npageopaque->hasho_bucket;
1411 
1412  _hash_splitbucket(rel, metabuf, obucket,
1413  nbucket, obuf, bucket_nbuf, tidhtab,
1414  maxbucket, highmask, lowmask);
1415 
1416  _hash_relbuf(rel, bucket_nbuf);
1418  hash_destroy(tidhtab);
1419 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:793
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
MemoryContext hcxt
Definition: hsearch.h:78
ItemPointerData t_tid
Definition: itup.h:37
#define InvalidBuffer
Definition: buf.h:25
Size entrysize
Definition: hsearch.h:73
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:78
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
#define HASH_READ
Definition: hash.h:266
uint32 Bucket
Definition: hash.h:34
Definition: dynahash.c:193
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define HASH_BLOBS
Definition: hsearch.h:88
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
#define LH_OVERFLOW_PAGE
Definition: hash.h:53
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
Size keysize
Definition: hsearch.h:72
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:274
BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket)
Definition: hashutil.c:403
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define LH_BUCKET_PAGE
Definition: hash.h:54
static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1056
#define Assert(condition)
Definition: c.h:675
Bucket hasho_bucket
Definition: hash.h:80
struct ItemPointerData ItemPointerData
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
BlockNumber hasho_nextblkno
Definition: hash.h:79
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
Buffer _hash_getbucketbuf_from_hashkey ( Relation  rel,
uint32  hashkey,
int  access,
HashMetaPage cachedmetap 
)

Definition at line 1516 of file hashpage.c.

References _hash_dropbuf(), _hash_getbuf(), _hash_getcachedmetap(), _hash_hashkey2bucket(), _hash_relbuf(), Assert, BUCKET_TO_BLKNO, buf, BufferGetPage, BufferIsValid, HASH_READ, HASH_WRITE, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_prevblkno, InvalidBlockNumber, InvalidBuffer, LH_BUCKET_PAGE, NULL, and PageGetSpecialPointer.

Referenced by _hash_doinsert(), and _hash_first().

1518 {
1519  HashMetaPage metap;
1520  Buffer buf;
1521  Buffer metabuf = InvalidBuffer;
1522  Page page;
1523  Bucket bucket;
1524  BlockNumber blkno;
1525  HashPageOpaque opaque;
1526 
1527  /* We read from target bucket buffer, hence locking is must. */
1528  Assert(access == HASH_READ || access == HASH_WRITE);
1529 
1530  metap = _hash_getcachedmetap(rel, &metabuf, false);
1531  Assert(metap != NULL);
1532 
1533  /*
1534  * Loop until we get a lock on the correct target bucket.
1535  */
1536  for (;;)
1537  {
1538  /*
1539  * Compute the target bucket number, and convert to block number.
1540  */
1541  bucket = _hash_hashkey2bucket(hashkey,
1542  metap->hashm_maxbucket,
1543  metap->hashm_highmask,
1544  metap->hashm_lowmask);
1545 
1546  blkno = BUCKET_TO_BLKNO(metap, bucket);
1547 
1548  /* Fetch the primary bucket page for the bucket */
1549  buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1550  page = BufferGetPage(buf);
1551  opaque = (HashPageOpaque) PageGetSpecialPointer(page);
1552  Assert(opaque->hasho_bucket == bucket);
1553 
1554  /*
1555  * If this bucket hasn't been split, we're done.
1556  *
1557  * NB: The check for InvalidBlockNumber is only needed for on-disk
1558  * compatibility with indexes created before we started storing
1559  * hashm_maxbucket in the primary page's hasho_prevblkno.
1560  */
1561  if (opaque->hasho_prevblkno == InvalidBlockNumber ||
1562  opaque->hasho_prevblkno <= metap->hashm_maxbucket)
1563  break;
1564 
1565  /* Drop lock on this buffer, update cached metapage, and retry. */
1566  _hash_relbuf(rel, buf);
1567  metap = _hash_getcachedmetap(rel, &metabuf, true);
1568  Assert(metap != NULL);
1569  }
1570 
1571  if (BufferIsValid(metabuf))
1572  _hash_dropbuf(rel, metabuf);
1573 
1574  if (cachedmetap)
1575  *cachedmetap = metap;
1576 
1577  return buf;
1578 }
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:125
uint32 hashm_highmask
Definition: hash.h:197
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:285
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:78
uint32 hashm_lowmask
Definition: hash.h:198
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
#define HASH_READ
Definition: hash.h:266
uint32 Bucket
Definition: hash.h:34
BlockNumber hasho_prevblkno
Definition: hash.h:78
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_WRITE
Definition: hash.h:267
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1458
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:274
#define LH_BUCKET_PAGE
Definition: hash.h:54
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
Bucket hasho_bucket
Definition: hash.h:80
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
uint32 hashm_maxbucket
Definition: hash.h:196
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
Buffer _hash_getbuf ( Relation  rel,
BlockNumber  blkno,
int  access,
int  flags 
)

Definition at line 78 of file hashpage.c.

References _hash_checkpage(), buf, elog, ERROR, HASH_NOLOCK, LockBuffer(), P_NEW, and ReadBuffer().

Referenced by _hash_addovflpage(), _hash_doinsert(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readprev(), _hash_splitbucket(), hash_bitmap_info(), hashbulkdelete(), and pgstathashindex().

79 {
80  Buffer buf;
81 
82  if (blkno == P_NEW)
83  elog(ERROR, "hash AM does not use P_NEW");
84 
85  buf = ReadBuffer(rel, blkno);
86 
87  if (access != HASH_NOLOCK)
88  LockBuffer(buf, access);
89 
90  /* ref count and lock type are correct */
91 
92  _hash_checkpage(rel, buf, flags);
93 
94  return buf;
95 }
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_NOLOCK
Definition: hash.h:268
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:159
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
Buffer _hash_getbuf_with_condlock_cleanup ( Relation  rel,
BlockNumber  blkno,
int  flags 
)

Definition at line 104 of file hashpage.c.

References _hash_checkpage(), buf, ConditionalLockBufferForCleanup(), elog, ERROR, InvalidBuffer, P_NEW, ReadBuffer(), and ReleaseBuffer().

Referenced by _hash_expandtable().

105 {
106  Buffer buf;
107 
108  if (blkno == P_NEW)
109  elog(ERROR, "hash AM does not use P_NEW");
110 
111  buf = ReadBuffer(rel, blkno);
112 
114  {
115  ReleaseBuffer(buf);
116  return InvalidBuffer;
117  }
118 
119  /* ref count and lock type are correct */
120 
121  _hash_checkpage(rel, buf, flags);
122 
123  return buf;
124 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define P_NEW
Definition: bufmgr.h:82
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:159
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
Buffer _hash_getbuf_with_strategy ( Relation  rel,
BlockNumber  blkno,
int  access,
int  flags,
BufferAccessStrategy  bstrategy 
)

Definition at line 247 of file hashpage.c.

References _hash_checkpage(), buf, elog, ERROR, HASH_NOLOCK, LockBuffer(), MAIN_FORKNUM, P_NEW, RBM_NORMAL, and ReadBufferExtended().

Referenced by _hash_freeovflpage(), _hash_squeezebucket(), hashbucketcleanup(), and pgstat_hash_page().

250 {
251  Buffer buf;
252 
253  if (blkno == P_NEW)
254  elog(ERROR, "hash AM does not use P_NEW");
255 
256  buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
257 
258  if (access != HASH_NOLOCK)
259  LockBuffer(buf, access);
260 
261  /* ref count and lock type are correct */
262 
263  _hash_checkpage(rel, buf, flags);
264 
265  return buf;
266 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_NOLOCK
Definition: hash.h:268
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:159
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
HashMetaPage _hash_getcachedmetap ( Relation  rel,
Buffer metabuf,
bool  force_refresh 
)

Definition at line 1458 of file hashpage.c.

References _hash_getbuf(), Assert, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HASH_METAPAGE, HASH_READ, HashPageGetMeta, LH_META_PAGE, LockBuffer(), MemoryContextAlloc(), NULL, RelationData::rd_amcache, and RelationData::rd_indexcxt.

Referenced by _hash_getbucketbuf_from_hashkey(), and hashbulkdelete().

1459 {
1460  Page page;
1461 
1462  Assert(metabuf);
1463  if (force_refresh || rel->rd_amcache == NULL)
1464  {
1465  char *cache = NULL;
1466 
1467  /*
1468  * It's important that we don't set rd_amcache to an invalid
1469  * value. Either MemoryContextAlloc or _hash_getbuf could fail,
1470  * so don't install a pointer to the newly-allocated storage in the
1471  * actual relcache entry until both have succeeeded.
1472  */
1473  if (rel->rd_amcache == NULL)
1474  cache = MemoryContextAlloc(rel->rd_indexcxt,
1475  sizeof(HashMetaPageData));
1476 
1477  /* Read the metapage. */
1478  if (BufferIsValid(*metabuf))
1479  LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
1480  else
1481  *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
1482  LH_META_PAGE);
1483  page = BufferGetPage(*metabuf);
1484 
1485  /* Populate the cache. */
1486  if (rel->rd_amcache == NULL)
1487  rel->rd_amcache = cache;
1488  memcpy(rel->rd_amcache, HashPageGetMeta(page),
1489  sizeof(HashMetaPageData));
1490 
1491  /* Release metapage lock, but keep the pin. */
1492  LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
1493  }
1494 
1495  return (HashMetaPage) rel->rd_amcache;
1496 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define LH_META_PAGE
Definition: hash.h:56
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:78
#define HASH_READ
Definition: hash.h:266
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define HASH_METAPAGE
Definition: hash.h:158
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define HashPageGetMeta(page)
Definition: hash.h:250
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:707
MemoryContext rd_indexcxt
Definition: rel.h:175
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
void * rd_amcache
Definition: rel.h:188
Pointer Page
Definition: bufpage.h:74
Buffer _hash_getinitbuf ( Relation  rel,
BlockNumber  blkno 
)

Definition at line 143 of file hashpage.c.

References _hash_pageinit(), buf, BufferGetPage, BufferGetPageSize, elog, ERROR, MAIN_FORKNUM, NULL, P_NEW, RBM_ZERO_AND_LOCK, and ReadBufferExtended().

Referenced by _hash_addovflpage().

144 {
145  Buffer buf;
146 
147  if (blkno == P_NEW)
148  elog(ERROR, "hash AM does not use P_NEW");
149 
151  NULL);
152 
153  /* ref count and lock type are correct */
154 
155  /* initialize the page */
157 
158  return buf;
159 }
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:592
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
#define NULL
Definition: c.h:229
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
Buffer _hash_getnewbuf ( Relation  rel,
BlockNumber  blkno,
ForkNumber  forkNum 
)

Definition at line 206 of file hashpage.c.

References _hash_pageinit(), buf, BufferGetBlockNumber(), BufferGetPage, BufferGetPageSize, elog, ERROR, HASH_WRITE, LockBuffer(), NULL, P_NEW, RBM_NORMAL, RBM_ZERO_AND_LOCK, ReadBufferExtended(), RelationGetNumberOfBlocksInFork(), and RelationGetRelationName.

Referenced by _hash_addovflpage(), _hash_expandtable(), and _hash_init().

207 {
208  BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
209  Buffer buf;
210 
211  if (blkno == P_NEW)
212  elog(ERROR, "hash AM does not use P_NEW");
213  if (blkno > nblocks)
214  elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
216 
217  /* smgr insists we use P_NEW to extend the relation */
218  if (blkno == nblocks)
219  {
220  buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
221  if (BufferGetBlockNumber(buf) != blkno)
222  elog(ERROR, "unexpected hash relation size: %u, should be %u",
223  BufferGetBlockNumber(buf), blkno);
224  LockBuffer(buf, HASH_WRITE);
225  }
226  else
227  {
228  buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
229  NULL);
230  }
231 
232  /* ref count and lock type are correct */
233 
234  /* initialize the page */
236 
237  return buf;
238 }
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:592
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
uint32 BlockNumber
Definition: block.h:31
#define P_NEW
Definition: bufmgr.h:82
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:65
#define HASH_WRITE
Definition: hash.h:267
#define RelationGetRelationName(relation)
Definition: rel.h:433
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2788
#define NULL
Definition: c.h:229
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define elog
Definition: elog.h:219
int Buffer
Definition: buf.h:23
uint32 _hash_init ( Relation  rel,
double  num_tuples,
ForkNumber  forkNum 
)

Definition at line 335 of file hashpage.c.

References _hash_getnewbuf(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_relbuf(), xl_hash_init_bitmap_page::bmsize, BUCKET_TO_BLKNO, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, CHECK_FOR_INTERRUPTS, elog, ereport, errcode(), errmsg(), ERROR, xl_hash_init_meta_page::ffactor, HASH_DEFAULT_FILLFACTOR, HASH_MAX_BITMAPS, HASH_METAPAGE, HashMetaPageData::hashm_bmsize, HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_mapp, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_nmaps, HashMetaPageData::hashm_procid, HashPageGetMeta, HASHPROC, i, index_getprocid(), LH_BUCKET_PAGE, LockBuffer(), log_newpage(), MarkBufferDirty(), MAXALIGN, xl_hash_init_meta_page::num_tuples, PageSetLSN, xl_hash_init_meta_page::procid, RelationData::rd_node, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetNumberOfBlocksInFork(), RelationGetRelationName, RelationGetTargetPageUsage, RelationNeedsWAL, SizeOfHashInitBitmapPage, SizeOfHashInitMetaPage, XLOG_HASH_INIT_BITMAP_PAGE, XLOG_HASH_INIT_META_PAGE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by hashbuild(), and hashbuildempty().

336 {
337  Buffer metabuf;
338  Buffer buf;
339  Buffer bitmapbuf;
340  Page pg;
341  HashMetaPage metap;
342  RegProcedure procid;
343  int32 data_width;
344  int32 item_width;
345  int32 ffactor;
346  uint32 num_buckets;
347  uint32 i;
348 
349  /* safety check */
350  if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
351  elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
353 
354  /*
355  * Determine the target fill factor (in tuples per bucket) for this index.
356  * The idea is to make the fill factor correspond to pages about as full
357  * as the user-settable fillfactor parameter says. We can compute it
358  * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
359  */
360  data_width = sizeof(uint32);
361  item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
362  sizeof(ItemIdData); /* include the line pointer */
363  ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
364  /* keep to a sane range */
365  if (ffactor < 10)
366  ffactor = 10;
367 
368  procid = index_getprocid(rel, 1, HASHPROC);
369 
370  /*
371  * We initialize the metapage, the first N bucket pages, and the first
372  * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
373  * calls to occur. This ensures that the smgr level has the right idea of
374  * the physical index length.
375  *
376  * Critical section not required, because on error the creation of the
377  * whole relation will be rolled back.
378  */
379  metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
380  _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
381  MarkBufferDirty(metabuf);
382 
383  pg = BufferGetPage(metabuf);
384  metap = HashPageGetMeta(pg);
385 
386  /* XLOG stuff */
387  if (RelationNeedsWAL(rel))
388  {
390  XLogRecPtr recptr;
391 
392  xlrec.num_tuples = num_tuples;
393  xlrec.procid = metap->hashm_procid;
394  xlrec.ffactor = metap->hashm_ffactor;
395 
396  XLogBeginInsert();
397  XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage);
399 
400  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
401 
402  PageSetLSN(BufferGetPage(metabuf), recptr);
403  }
404 
405  num_buckets = metap->hashm_maxbucket + 1;
406 
407  /*
408  * Release buffer lock on the metapage while we initialize buckets.
409  * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
410  * won't accomplish anything. It's a bad idea to hold buffer locks for
411  * long intervals in any case, since that can block the bgwriter.
412  */
413  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
414 
415  /*
416  * Initialize and WAL Log the first N buckets
417  */
418  for (i = 0; i < num_buckets; i++)
419  {
420  BlockNumber blkno;
421 
422  /* Allow interrupts, in case N is huge */
424 
425  blkno = BUCKET_TO_BLKNO(metap, i);
426  buf = _hash_getnewbuf(rel, blkno, forkNum);
427  _hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
428  MarkBufferDirty(buf);
429 
430  log_newpage(&rel->rd_node,
431  forkNum,
432  blkno,
433  BufferGetPage(buf),
434  true);
435  _hash_relbuf(rel, buf);
436  }
437 
438  /* Now reacquire buffer lock on metapage */
440 
441  /*
442  * Initialize bitmap page
443  */
444  bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
445  _hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
446  MarkBufferDirty(bitmapbuf);
447 
448  /* add the new bitmap page to the metapage's list of bitmaps */
449  /* metapage already has a write lock */
450  if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
451  ereport(ERROR,
452  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
453  errmsg("out of overflow pages in hash index \"%s\"",
454  RelationGetRelationName(rel))));
455 
456  metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
457 
458  metap->hashm_nmaps++;
459  MarkBufferDirty(metabuf);
460 
461  /* XLOG stuff */
462  if (RelationNeedsWAL(rel))
463  {
465  XLogRecPtr recptr;
466 
467  xlrec.bmsize = metap->hashm_bmsize;
468 
469  XLogBeginInsert();
470  XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage);
471  XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
472 
473  /*
474  * This is safe only because nobody else can be modifying the index at
475  * this stage; it's only visible to the transaction that is creating
476  * it.
477  */
478  XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
479 
480  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
481 
482  PageSetLSN(BufferGetPage(bitmapbuf), recptr);
483  PageSetLSN(BufferGetPage(metabuf), recptr);
484  }
485 
486  /* all done */
487  _hash_relbuf(rel, bitmapbuf);
488  _hash_relbuf(rel, metabuf);
489 
490  return num_buckets;
491 }
#define HASH_DEFAULT_FILLFACTOR
Definition: hash.h:223
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
RegProcedure hashm_procid
Definition: hash.h:203
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
regproc RegProcedure
Definition: c.h:395
#define XLOG_HASH_INIT_BITMAP_PAGE
Definition: hash_xlog.h:28
uint16 hashm_ffactor
Definition: hash.h:191
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
int errcode(int sqlerrcode)
Definition: elog.c:575
uint32 BlockNumber
Definition: block.h:31
Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
Definition: hashpage.c:206
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
signed int int32
Definition: c.h:256
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
#define ERROR
Definition: elog.h:43
#define SizeOfHashInitMetaPage
Definition: hash_xlog.h:237
void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage)
Definition: hashpage.c:497
#define SizeOfHashInitBitmapPage
Definition: hash_xlog.h:253
uint32 hashm_nmaps
Definition: hash.h:202
static char * buf
Definition: pg_test_fsync.c:65
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define RelationGetRelationName(relation)
Definition: rel.h:433
unsigned int uint32
Definition: c.h:268
struct ItemIdData ItemIdData
#define XLOG_HASH_INIT_META_PAGE
Definition: hash_xlog.h:27
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define HASH_MAX_BITMAPS
Definition: hash.h:184
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define HASH_METAPAGE
Definition: hash.h:158
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:274
void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
Definition: hashovfl.c:727
#define LH_BUCKET_PAGE
Definition: hash.h:54
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2788
RelFileNode rd_node
Definition: rel.h:85
#define RelationGetTargetPageUsage(relation, defaultff)
Definition: rel.h:297
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage)
Definition: hashpage.c:165
#define MAXALIGN(LEN)
Definition: c.h:588
#define RelationNeedsWAL(relation)
Definition: rel.h:502
uint32 hashm_maxbucket
Definition: hash.h:196
RegProcedure procid
Definition: hash_xlog.h:233
#define HashPageGetMeta(page)
Definition: hash.h:250
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define HASHPROC
Definition: hash.h:281
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:973
int i
uint16 hashm_bmsize
Definition: hash.h:193
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
#define elog
Definition: elog.h:219
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]
Definition: hash.h:206
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:821
void _hash_init_metabuffer ( Buffer  buf,
double  num_tuples,
RegProcedure  procid,
uint16  ffactor,
bool  initpage 
)

Definition at line 497 of file hashpage.c.

References _hash_log2(), _hash_pageinit(), Assert, BMPG_MASK, BMPG_SHIFT, BufferGetPage, BufferGetPageSize, BYTE_TO_BIT, HASH_MAGIC, HASH_MAX_SPLITPOINTS, HASH_VERSION, HashGetMaxBitmapSize, HashMetaPageData::hashm_bmshift, HashMetaPageData::hashm_bmsize, HashMetaPageData::hashm_bsize, HashMetaPageData::hashm_ffactor, HashMetaPageData::hashm_firstfree, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_magic, HashMetaPageData::hashm_mapp, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_nmaps, HashMetaPageData::hashm_ntuples, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_procid, HashMetaPageData::hashm_spares, HashMetaPageData::hashm_version, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, i, InvalidBlockNumber, LH_META_PAGE, MemSet, and PageGetSpecialPointer.

Referenced by _hash_init(), and hash_xlog_init_meta_page().

499 {
500  HashMetaPage metap;
501  HashPageOpaque pageopaque;
502  Page page;
503  double dnumbuckets;
504  uint32 num_buckets;
505  uint32 log2_num_buckets;
506  uint32 i;
507 
508  /*
509  * Choose the number of initial bucket pages to match the fill factor
510  * given the estimated number of tuples. We round up the result to the
511  * next power of 2, however, and always force at least 2 bucket pages. The
512  * upper limit is determined by considerations explained in
513  * _hash_expandtable().
514  */
515  dnumbuckets = num_tuples / ffactor;
516  if (dnumbuckets <= 2.0)
517  num_buckets = 2;
518  else if (dnumbuckets >= (double) 0x40000000)
519  num_buckets = 0x40000000;
520  else
521  num_buckets = ((uint32) 1) << _hash_log2((uint32) dnumbuckets);
522 
523  log2_num_buckets = _hash_log2(num_buckets);
524  Assert(num_buckets == (((uint32) 1) << log2_num_buckets));
525  Assert(log2_num_buckets < HASH_MAX_SPLITPOINTS);
526 
527  page = BufferGetPage(buf);
528  if (initpage)
530 
531  pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
532  pageopaque->hasho_prevblkno = InvalidBlockNumber;
533  pageopaque->hasho_nextblkno = InvalidBlockNumber;
534  pageopaque->hasho_bucket = -1;
535  pageopaque->hasho_flag = LH_META_PAGE;
536  pageopaque->hasho_page_id = HASHO_PAGE_ID;
537 
538  metap = HashPageGetMeta(page);
539 
540  metap->hashm_magic = HASH_MAGIC;
541  metap->hashm_version = HASH_VERSION;
542  metap->hashm_ntuples = 0;
543  metap->hashm_nmaps = 0;
544  metap->hashm_ffactor = ffactor;
545  metap->hashm_bsize = HashGetMaxBitmapSize(page);
546  /* find largest bitmap array size that will fit in page size */
547  for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
548  {
549  if ((1 << i) <= metap->hashm_bsize)
550  break;
551  }
552  Assert(i > 0);
553  metap->hashm_bmsize = 1 << i;
554  metap->hashm_bmshift = i + BYTE_TO_BIT;
555  Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
556 
557  /*
558  * Label the index with its primary hash support function's OID. This is
559  * pretty useless for normal operation (in fact, hashm_procid is not used
560  * anywhere), but it might be handy for forensic purposes so we keep it.
561  */
562  metap->hashm_procid = procid;
563 
564  /*
565  * We initialize the index with N buckets, 0 .. N-1, occupying physical
566  * blocks 1 to N. The first freespace bitmap page is in block N+1. Since
567  * N is a power of 2, we can set the masks this way:
568  */
569  metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1;
570  metap->hashm_highmask = (num_buckets << 1) - 1;
571 
572  MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
573  MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
574 
575  /* Set up mapping for one spare page after the initial splitpoints */
576  metap->hashm_spares[log2_num_buckets] = 1;
577  metap->hashm_ovflpoint = log2_num_buckets;
578  metap->hashm_firstfree = 0;
579 
580  /*
581  * Set pd_lower just past the end of the metadata. This is to log full
582  * page image of metapage in xloginsert.c.
583  */
584  ((PageHeader) page)->pd_lower =
585  ((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
586 }
uint16 hashm_bmshift
Definition: hash.h:195
uint16 hasho_page_id
Definition: hash.h:82
RegProcedure hashm_procid
Definition: hash.h:203
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:592
#define LH_META_PAGE
Definition: hash.h:56
#define BYTE_TO_BIT
Definition: hash.h:228
uint32 hashm_magic
Definition: hash.h:188
uint16 hashm_ffactor
Definition: hash.h:191
uint32 hashm_highmask
Definition: hash.h:197
#define MemSet(start, val, len)
Definition: c.h:857
#define HASH_VERSION
Definition: hash.h:161
uint32 hashm_lowmask
Definition: hash.h:198
#define HASH_MAX_SPLITPOINTS
Definition: hash.h:183
#define HASH_MAGIC
Definition: hash.h:160
#define HashGetMaxBitmapSize(page)
Definition: hash.h:246
BlockNumber hasho_prevblkno
Definition: hash.h:78
uint32 hashm_version
Definition: hash.h:189
uint32 hashm_nmaps
Definition: hash.h:202
static char * buf
Definition: pg_test_fsync.c:65
#define BMPG_MASK(metap)
Definition: hash.h:241
unsigned int uint32
Definition: c.h:268
#define BMPG_SHIFT(metap)
Definition: hash.h:240
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
uint32 hashm_ovflpoint
Definition: hash.h:199
uint16 hashm_bsize
Definition: hash.h:192
double hashm_ntuples
Definition: hash.h:190
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
uint32 hashm_firstfree
Definition: hash.h:201
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition: hash.h:204
PageHeaderData * PageHeader
Definition: bufpage.h:162
#define Assert(condition)
Definition: c.h:675
Bucket hasho_bucket
Definition: hash.h:80
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
#define HASHO_PAGE_ID
Definition: hash.h:98
uint32 hashm_maxbucket
Definition: hash.h:196
uint16 hasho_flag
Definition: hash.h:81
#define HashPageGetMeta(page)
Definition: hash.h:250
uint32 _hash_log2(uint32 num)
Definition: hashutil.c:141
int i
BlockNumber hasho_nextblkno
Definition: hash.h:79
uint16 hashm_bmsize
Definition: hash.h:193
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]
Definition: hash.h:206
Pointer Page
Definition: bufpage.h:74
void _hash_initbuf ( Buffer  buf,
uint32  max_bucket,
uint32  num_bucket,
uint32  flag,
bool  initpage 
)

Definition at line 165 of file hashpage.c.

References _hash_pageinit(), BufferGetPage, BufferGetPageSize, flag(), HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, InvalidBlockNumber, and PageGetSpecialPointer.

Referenced by _hash_init(), hash_xlog_add_ovfl_page(), and hash_xlog_split_allocate_page().

167 {
168  HashPageOpaque pageopaque;
169  Page page;
170 
171  page = BufferGetPage(buf);
172 
173  /* initialize the page */
174  if (initpage)
176 
177  pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
178 
179  /*
180  * Set hasho_prevblkno with current hashm_maxbucket. This value will
181  * be used to validate cached HashMetaPageData. See
182  * _hash_getbucketbuf_from_hashkey().
183  */
184  pageopaque->hasho_prevblkno = max_bucket;
185  pageopaque->hasho_nextblkno = InvalidBlockNumber;
186  pageopaque->hasho_bucket = num_bucket;
187  pageopaque->hasho_flag = flag;
188  pageopaque->hasho_page_id = HASHO_PAGE_ID;
189 }
uint16 hasho_page_id
Definition: hash.h:82
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:592
BlockNumber hasho_prevblkno
Definition: hash.h:78
static char * buf
Definition: pg_test_fsync.c:65
char * flag(int b)
Definition: test-ctype.c:33
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
Bucket hasho_bucket
Definition: hash.h:80
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
#define HASHO_PAGE_ID
Definition: hash.h:98
uint16 hasho_flag
Definition: hash.h:81
BlockNumber hasho_nextblkno
Definition: hash.h:79
Pointer Page
Definition: bufpage.h:74
void _hash_pageinit ( Page  page,
Size  size 
)

Definition at line 592 of file hashpage.c.

References PageInit().

Referenced by _hash_alloc_buckets(), _hash_freeovflpage(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), and hash_xlog_squeeze_page().

593 {
594  PageInit(page, size, sizeof(HashPageOpaqueData));
595 }
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:41
static void _hash_splitbucket ( Relation  rel,
Buffer  metabuf,
Bucket  obucket,
Bucket  nbucket,
Buffer  obuf,
Buffer  nbuf,
HTAB htab,
uint32  maxbucket,
uint32  highmask,
uint32  lowmask 
)
static

Definition at line 1056 of file hashpage.c.

References _hash_addovflpage(), _hash_get_indextuple_hashkey(), _hash_getbuf(), _hash_hashkey2bucket(), _hash_pgaddmultitup(), _hash_relbuf(), Assert, BlockNumberIsValid, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, CopyIndexTuple(), END_CRIT_SECTION, FirstOffsetNumber, HASH_FIND, HASH_READ, hash_search(), HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, i, INDEX_MOVED_BY_SPLIT_MASK, IndexTupleDSize, ItemIdIsDead, LH_BUCKET_BEING_POPULATED, LH_BUCKET_BEING_SPLIT, LH_BUCKET_NEEDS_SPLIT_CLEANUP, LH_OVERFLOW_PAGE, LockBuffer(), log_split_page(), MarkBufferDirty(), MAXALIGN, MaxIndexTuplesPerPage, xl_hash_split_complete::new_bucket_flag, OffsetNumberNext, xl_hash_split_complete::old_bucket_flag, PageGetFreeSpaceForMultipleTuples(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageGetSpecialPointer, PageSetLSN, pfree(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHashSplitComplete, START_CRIT_SECTION, IndexTupleData::t_info, IndexTupleData::t_tid, XLOG_HASH_SPLIT_COMPLETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _hash_expandtable(), and _hash_finish_split().

1066 {
1067  Buffer bucket_obuf;
1068  Buffer bucket_nbuf;
1069  Page opage;
1070  Page npage;
1071  HashPageOpaque oopaque;
1072  HashPageOpaque nopaque;
1073  OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
1075  Size all_tups_size = 0;
1076  int i;
1077  uint16 nitups = 0;
1078 
1079  bucket_obuf = obuf;
1080  opage = BufferGetPage(obuf);
1081  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1082 
1083  bucket_nbuf = nbuf;
1084  npage = BufferGetPage(nbuf);
1085  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1086 
1087  /*
1088  * Partition the tuples in the old bucket between the old bucket and the
1089  * new bucket, advancing along the old bucket's overflow bucket chain and
1090  * adding overflow pages to the new bucket as needed. Outer loop iterates
1091  * once per page in old bucket.
1092  */
1093  for (;;)
1094  {
1095  BlockNumber oblkno;
1096  OffsetNumber ooffnum;
1097  OffsetNumber omaxoffnum;
1098 
1099  /* Scan each tuple in old page */
1100  omaxoffnum = PageGetMaxOffsetNumber(opage);
1101  for (ooffnum = FirstOffsetNumber;
1102  ooffnum <= omaxoffnum;
1103  ooffnum = OffsetNumberNext(ooffnum))
1104  {
1105  IndexTuple itup;
1106  Size itemsz;
1107  Bucket bucket;
1108  bool found = false;
1109 
1110  /* skip dead tuples */
1111  if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
1112  continue;
1113 
1114  /*
1115  * Before inserting a tuple, probe the hash table containing TIDs
1116  * of tuples belonging to new bucket, if we find a match, then
1117  * skip that tuple, else fetch the item's hash key (conveniently
1118  * stored in the item) and determine which bucket it now belongs
1119  * in.
1120  */
1121  itup = (IndexTuple) PageGetItem(opage,
1122  PageGetItemId(opage, ooffnum));
1123 
1124  if (htab)
1125  (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
1126 
1127  if (found)
1128  continue;
1129 
1131  maxbucket, highmask, lowmask);
1132 
1133  if (bucket == nbucket)
1134  {
1135  IndexTuple new_itup;
1136 
1137  /*
1138  * make a copy of index tuple as we have to scribble on it.
1139  */
1140  new_itup = CopyIndexTuple(itup);
1141 
1142  /*
1143  * mark the index tuple as moved by split, such tuples are
1144  * skipped by scan if there is split in progress for a bucket.
1145  */
1146  new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
1147 
1148  /*
1149  * insert the tuple into the new bucket. if it doesn't fit on
1150  * the current page in the new bucket, we must allocate a new
1151  * overflow page and place the tuple on that page instead.
1152  */
1153  itemsz = IndexTupleDSize(*new_itup);
1154  itemsz = MAXALIGN(itemsz);
1155 
1156  if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
1157  {
1158  /*
1159  * Change the shared buffer state in critical section,
1160  * otherwise any error could make it unrecoverable.
1161  */
1163 
1164  _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1165  MarkBufferDirty(nbuf);
1166  /* log the split operation before releasing the lock */
1167  log_split_page(rel, nbuf);
1168 
1169  END_CRIT_SECTION();
1170 
1171  /* drop lock, but keep pin */
1173 
1174  /* be tidy */
1175  for (i = 0; i < nitups; i++)
1176  pfree(itups[i]);
1177  nitups = 0;
1178  all_tups_size = 0;
1179 
1180  /* chain to a new overflow page */
1181  nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
1182  npage = BufferGetPage(nbuf);
1183  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1184  }
1185 
1186  itups[nitups++] = new_itup;
1187  all_tups_size += itemsz;
1188  }
1189  else
1190  {
1191  /*
1192  * the tuple stays on this page, so nothing to do.
1193  */
1194  Assert(bucket == obucket);
1195  }
1196  }
1197 
1198  oblkno = oopaque->hasho_nextblkno;
1199 
1200  /* retain the pin on the old primary bucket */
1201  if (obuf == bucket_obuf)
1203  else
1204  _hash_relbuf(rel, obuf);
1205 
1206  /* Exit loop if no more overflow pages in old bucket */
1207  if (!BlockNumberIsValid(oblkno))
1208  {
1209  /*
1210  * Change the shared buffer state in critical section, otherwise
1211  * any error could make it unrecoverable.
1212  */
1214 
1215  _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1216  MarkBufferDirty(nbuf);
1217  /* log the split operation before releasing the lock */
1218  log_split_page(rel, nbuf);
1219 
1220  END_CRIT_SECTION();
1221 
1222  if (nbuf == bucket_nbuf)
1224  else
1225  _hash_relbuf(rel, nbuf);
1226 
1227  /* be tidy */
1228  for (i = 0; i < nitups; i++)
1229  pfree(itups[i]);
1230  break;
1231  }
1232 
1233  /* Else, advance to next old page */
1234  obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
1235  opage = BufferGetPage(obuf);
1236  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1237  }
1238 
1239  /*
1240  * We're at the end of the old bucket chain, so we're done partitioning
1241  * the tuples. Mark the old and new buckets to indicate split is
1242  * finished.
1243  *
1244  * To avoid deadlocks due to locking order of buckets, first lock the old
1245  * bucket and then the new bucket.
1246  */
1247  LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
1248  opage = BufferGetPage(bucket_obuf);
1249  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1250 
1251  LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
1252  npage = BufferGetPage(bucket_nbuf);
1253  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1254 
1256 
1257  oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1259 
1260  /*
1261  * After the split is finished, mark the old bucket to indicate that it
1262  * contains deletable tuples. Vacuum will clear split-cleanup flag after
1263  * deleting such tuples.
1264  */
1266 
1267  /*
1268  * now write the buffers, here we don't release the locks as caller is
1269  * responsible to release locks.
1270  */
1271  MarkBufferDirty(bucket_obuf);
1272  MarkBufferDirty(bucket_nbuf);
1273 
1274  if (RelationNeedsWAL(rel))
1275  {
1276  XLogRecPtr recptr;
1277  xl_hash_split_complete xlrec;
1278 
1279  xlrec.old_bucket_flag = oopaque->hasho_flag;
1280  xlrec.new_bucket_flag = nopaque->hasho_flag;
1281 
1282  XLogBeginInsert();
1283 
1284  XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
1285 
1286  XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
1287  XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
1288 
1289  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
1290 
1291  PageSetLSN(BufferGetPage(bucket_obuf), recptr);
1292  PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
1293  }
1294 
1295  END_CRIT_SECTION();
1296 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups, OffsetNumber *itup_offsets, uint16 nitups)
Definition: hashinsert.c:298
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:125
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
ItemPointerData t_tid
Definition: itup.h:37
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
uint32 BlockNumber
Definition: block.h:31
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:78
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:59
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
#define HASH_READ
Definition: hash.h:266
uint32 Bucket
Definition: hash.h:34
unsigned short uint16
Definition: c.h:267
void pfree(void *pointer)
Definition: mcxt.c:950
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:233
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:434
#define IndexTupleDSize(itup)
Definition: itup.h:71
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define LH_BUCKET_BEING_SPLIT
Definition: hash.h:58
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
#define LH_BUCKET_BEING_POPULATED
Definition: hash.h:57
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define LH_OVERFLOW_PAGE
Definition: hash.h:53
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
Size PageGetFreeSpaceForMultipleTuples(Page page, int ntups)
Definition: bufpage.c:609
static void log_split_page(Relation rel, Buffer buf)
Definition: hashpage.c:1431
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:274
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
#define SizeOfHashSplitComplete
Definition: hash_xlog.h:137
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
size_t Size
Definition: c.h:356
#define PageGetSpecialPointer(page)
Definition: bufpage.h:323
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:85
Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
Definition: hashovfl.c:110
#define MAXALIGN(LEN)
Definition: c.h:588
#define RelationNeedsWAL(relation)
Definition: rel.h:502
#define XLOG_HASH_SPLIT_COMPLETE
Definition: hash_xlog.h:33
uint16 hasho_flag
Definition: hash.h:81
#define MaxIndexTuplesPerPage
Definition: itup.h:137
int i
BlockNumber hasho_nextblkno
Definition: hash.h:79
unsigned short t_info
Definition: itup.h:49
#define INDEX_MOVED_BY_SPLIT_MASK
Definition: hash.h:220
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
static void log_split_page ( Relation  rel,
Buffer  buf 
)
static

Definition at line 1431 of file hashpage.c.

References BufferGetPage, PageSetLSN, REGBUF_FORCE_IMAGE, REGBUF_STANDARD, RelationNeedsWAL, XLOG_HASH_SPLIT_PAGE, XLogBeginInsert(), XLogInsert(), and XLogRegisterBuffer().

Referenced by _hash_splitbucket().

1432 {
1433  if (RelationNeedsWAL(rel))
1434  {
1435  XLogRecPtr recptr;
1436 
1437  XLogBeginInsert();
1438 
1440 
1441  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
1442 
1443  PageSetLSN(BufferGetPage(buf), recptr);
1444  }
1445 }
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define XLOG_HASH_SPLIT_PAGE
Definition: hash_xlog.h:32
static char * buf
Definition: pg_test_fsync.c:65
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define REGBUF_FORCE_IMAGE
Definition: xloginsert.h:30
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define RelationNeedsWAL(relation)
Definition: rel.h:502
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365