PostgreSQL Source Code  git master
hashpage.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * hashpage.c
4  * Hash table page management code for the Postgres hash access method
5  *
6  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/hash/hashpage.c
12  *
13  * NOTES
14  * Postgres hash pages look like ordinary relation pages. The opaque
15  * data at high addresses includes information about the page including
16  * whether a page is an overflow page or a true bucket, the bucket
17  * number, and the block numbers of the preceding and following pages
18  * in the same bucket.
19  *
20  * The first page in a hash relation, page zero, is special -- it stores
21  * information describing the hash table; it is referred to as the
22  * "meta page." Pages one and higher store the actual data.
23  *
24  * There are also bitmap pages, which are not manipulated here;
25  * see hashovfl.c.
26  *
27  *-------------------------------------------------------------------------
28  */
29 #include "postgres.h"
30 
31 #include "access/hash.h"
32 #include "access/hash_xlog.h"
33 #include "miscadmin.h"
34 #include "storage/lmgr.h"
35 #include "storage/smgr.h"
36 #include "storage/predicate.h"
37 
38 
39 static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
40  uint32 nblocks);
41 static void _hash_splitbucket(Relation rel, Buffer metabuf,
42  Bucket obucket, Bucket nbucket,
43  Buffer obuf,
44  Buffer nbuf,
45  HTAB *htab,
46  uint32 maxbucket,
47  uint32 highmask, uint32 lowmask);
48 static void log_split_page(Relation rel, Buffer buf);
49 
50 
51 /*
52  * We use high-concurrency locking on hash indexes (see README for an overview
53  * of the locking rules). However, we can skip taking lmgr locks when the
54  * index is local to the current backend (ie, either temp or new in the
55  * current transaction). No one else can see it, so there's no reason to
56  * take locks. We still take buffer-level locks, but not lmgr locks.
57  */
58 #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
59 
60 
61 /*
62  * _hash_getbuf() -- Get a buffer by block number for read or write.
63  *
64  * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
65  * 'flags' is a bitwise OR of the allowed page types.
66  *
67  * This must be used only to fetch pages that are expected to be valid
68  * already. _hash_checkpage() is applied using the given flags.
69  *
70  * When this routine returns, the appropriate lock is set on the
71  * requested buffer and its reference count has been incremented
72  * (ie, the buffer is "locked and pinned").
73  *
74  * P_NEW is disallowed because this routine can only be used
75  * to access pages that are known to be before the filesystem EOF.
76  * Extending the index should be done with _hash_getnewbuf.
77  */
78 Buffer
79 _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
80 {
81  Buffer buf;
82 
83  if (blkno == P_NEW)
84  elog(ERROR, "hash AM does not use P_NEW");
85 
86  buf = ReadBuffer(rel, blkno);
87 
88  if (access != HASH_NOLOCK)
89  LockBuffer(buf, access);
90 
91  /* ref count and lock type are correct */
92 
93  _hash_checkpage(rel, buf, flags);
94 
95  return buf;
96 }
97 
98 /*
99  * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
100  *
101  * We read the page and try to acquire a cleanup lock. If we get it,
102  * we return the buffer; otherwise, we return InvalidBuffer.
103  */
104 Buffer
106 {
107  Buffer buf;
108 
109  if (blkno == P_NEW)
110  elog(ERROR, "hash AM does not use P_NEW");
111 
112  buf = ReadBuffer(rel, blkno);
113 
115  {
116  ReleaseBuffer(buf);
117  return InvalidBuffer;
118  }
119 
120  /* ref count and lock type are correct */
121 
122  _hash_checkpage(rel, buf, flags);
123 
124  return buf;
125 }
126 
127 /*
128  * _hash_getinitbuf() -- Get and initialize a buffer by block number.
129  *
130  * This must be used only to fetch pages that are known to be before
131  * the index's filesystem EOF, but are to be filled from scratch.
132  * _hash_pageinit() is applied automatically. Otherwise it has
133  * effects similar to _hash_getbuf() with access = HASH_WRITE.
134  *
135  * When this routine returns, a write lock is set on the
136  * requested buffer and its reference count has been incremented
137  * (ie, the buffer is "locked and pinned").
138  *
139  * P_NEW is disallowed because this routine can only be used
140  * to access pages that are known to be before the filesystem EOF.
141  * Extending the index should be done with _hash_getnewbuf.
142  */
143 Buffer
145 {
146  Buffer buf;
147 
148  if (blkno == P_NEW)
149  elog(ERROR, "hash AM does not use P_NEW");
150 
152  NULL);
153 
154  /* ref count and lock type are correct */
155 
156  /* initialize the page */
158 
159  return buf;
160 }
161 
162 /*
163  * _hash_initbuf() -- Get and initialize a buffer by bucket number.
164  */
165 void
166 _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
167  bool initpage)
168 {
169  HashPageOpaque pageopaque;
170  Page page;
171 
172  page = BufferGetPage(buf);
173 
174  /* initialize the page */
175  if (initpage)
176  _hash_pageinit(page, BufferGetPageSize(buf));
177 
178  pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
179 
180  /*
181  * Set hasho_prevblkno with current hashm_maxbucket. This value will be
182  * used to validate cached HashMetaPageData. See
183  * _hash_getbucketbuf_from_hashkey().
184  */
185  pageopaque->hasho_prevblkno = max_bucket;
186  pageopaque->hasho_nextblkno = InvalidBlockNumber;
187  pageopaque->hasho_bucket = num_bucket;
188  pageopaque->hasho_flag = flag;
189  pageopaque->hasho_page_id = HASHO_PAGE_ID;
190 }
191 
192 /*
193  * _hash_getnewbuf() -- Get a new page at the end of the index.
194  *
195  * This has the same API as _hash_getinitbuf, except that we are adding
196  * a page to the index, and hence expect the page to be past the
197  * logical EOF. (However, we have to support the case where it isn't,
198  * since a prior try might have crashed after extending the filesystem
199  * EOF but before updating the metapage to reflect the added page.)
200  *
201  * It is caller's responsibility to ensure that only one process can
202  * extend the index at a time. In practice, this function is called
203  * only while holding write lock on the metapage, because adding a page
204  * is always associated with an update of metapage data.
205  */
206 Buffer
208 {
209  BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
210  Buffer buf;
211 
212  if (blkno == P_NEW)
213  elog(ERROR, "hash AM does not use P_NEW");
214  if (blkno > nblocks)
215  elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
217 
218  /* smgr insists we use P_NEW to extend the relation */
219  if (blkno == nblocks)
220  {
221  buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
222  if (BufferGetBlockNumber(buf) != blkno)
223  elog(ERROR, "unexpected hash relation size: %u, should be %u",
224  BufferGetBlockNumber(buf), blkno);
225  LockBuffer(buf, HASH_WRITE);
226  }
227  else
228  {
229  buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
230  NULL);
231  }
232 
233  /* ref count and lock type are correct */
234 
235  /* initialize the page */
237 
238  return buf;
239 }
240 
241 /*
242  * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
243  *
244  * This is identical to _hash_getbuf() but also allows a buffer access
245  * strategy to be specified. We use this for VACUUM operations.
246  */
247 Buffer
249  int access, int flags,
250  BufferAccessStrategy bstrategy)
251 {
252  Buffer buf;
253 
254  if (blkno == P_NEW)
255  elog(ERROR, "hash AM does not use P_NEW");
256 
257  buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
258 
259  if (access != HASH_NOLOCK)
260  LockBuffer(buf, access);
261 
262  /* ref count and lock type are correct */
263 
264  _hash_checkpage(rel, buf, flags);
265 
266  return buf;
267 }
268 
269 /*
270  * _hash_relbuf() -- release a locked buffer.
271  *
272  * Lock and pin (refcount) are both dropped.
273  */
274 void
276 {
277  UnlockReleaseBuffer(buf);
278 }
279 
280 /*
281  * _hash_dropbuf() -- release an unlocked buffer.
282  *
283  * This is used to unpin a buffer on which we hold no lock.
284  */
285 void
287 {
288  ReleaseBuffer(buf);
289 }
290 
291 /*
292  * _hash_dropscanbuf() -- release buffers used in scan.
293  *
294  * This routine unpins the buffers used during scan on which we
295  * hold no lock.
296  */
297 void
299 {
300  /* release pin we hold on primary bucket page */
301  if (BufferIsValid(so->hashso_bucket_buf) &&
302  so->hashso_bucket_buf != so->currPos.buf)
305 
306  /* release pin we hold on primary bucket page of bucket being split */
311 
312  /* release any pin we still hold */
313  if (BufferIsValid(so->currPos.buf))
314  _hash_dropbuf(rel, so->currPos.buf);
315  so->currPos.buf = InvalidBuffer;
316 
317  /* reset split scan */
318  so->hashso_buc_populated = false;
319  so->hashso_buc_split = false;
320 }
321 
322 
323 /*
324  * _hash_init() -- Initialize the metadata page of a hash index,
325  * the initial buckets, and the initial bitmap page.
326  *
327  * The initial number of buckets is dependent on num_tuples, an estimate
328  * of the number of tuples to be loaded into the index initially. The
329  * chosen number of buckets is returned.
330  *
331  * We are fairly cavalier about locking here, since we know that no one else
332  * could be accessing this index. In particular the rule about not holding
333  * multiple buffer locks is ignored.
334  */
335 uint32
336 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
337 {
338  Buffer metabuf;
339  Buffer buf;
340  Buffer bitmapbuf;
341  Page pg;
342  HashMetaPage metap;
343  RegProcedure procid;
344  int32 data_width;
345  int32 item_width;
346  int32 ffactor;
347  uint32 num_buckets;
348  uint32 i;
349  bool use_wal;
350 
351  /* safety check */
352  if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
353  elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
355 
356  /*
357  * WAL log creation of pages if the relation is persistent, or this is the
358  * init fork. Init forks for unlogged relations always need to be WAL
359  * logged.
360  */
361  use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
362 
363  /*
364  * Determine the target fill factor (in tuples per bucket) for this index.
365  * The idea is to make the fill factor correspond to pages about as full
366  * as the user-settable fillfactor parameter says. We can compute it
367  * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
368  */
369  data_width = sizeof(uint32);
370  item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
371  sizeof(ItemIdData); /* include the line pointer */
372  ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
373  /* keep to a sane range */
374  if (ffactor < 10)
375  ffactor = 10;
376 
377  procid = index_getprocid(rel, 1, HASHSTANDARD_PROC);
378 
379  /*
380  * We initialize the metapage, the first N bucket pages, and the first
381  * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
382  * calls to occur. This ensures that the smgr level has the right idea of
383  * the physical index length.
384  *
385  * Critical section not required, because on error the creation of the
386  * whole relation will be rolled back.
387  */
388  metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
389  _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
390  MarkBufferDirty(metabuf);
391 
392  pg = BufferGetPage(metabuf);
393  metap = HashPageGetMeta(pg);
394 
395  /* XLOG stuff */
396  if (use_wal)
397  {
399  XLogRecPtr recptr;
400 
401  xlrec.num_tuples = num_tuples;
402  xlrec.procid = metap->hashm_procid;
403  xlrec.ffactor = metap->hashm_ffactor;
404 
405  XLogBeginInsert();
406  XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage);
408 
409  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
410 
411  PageSetLSN(BufferGetPage(metabuf), recptr);
412  }
413 
414  num_buckets = metap->hashm_maxbucket + 1;
415 
416  /*
417  * Release buffer lock on the metapage while we initialize buckets.
418  * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
419  * won't accomplish anything. It's a bad idea to hold buffer locks for
420  * long intervals in any case, since that can block the bgwriter.
421  */
422  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
423 
424  /*
425  * Initialize and WAL Log the first N buckets
426  */
427  for (i = 0; i < num_buckets; i++)
428  {
429  BlockNumber blkno;
430 
431  /* Allow interrupts, in case N is huge */
433 
434  blkno = BUCKET_TO_BLKNO(metap, i);
435  buf = _hash_getnewbuf(rel, blkno, forkNum);
436  _hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
437  MarkBufferDirty(buf);
438 
439  if (use_wal)
440  log_newpage(&rel->rd_node,
441  forkNum,
442  blkno,
443  BufferGetPage(buf),
444  true);
445  _hash_relbuf(rel, buf);
446  }
447 
448  /* Now reacquire buffer lock on metapage */
450 
451  /*
452  * Initialize bitmap page
453  */
454  bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
455  _hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
456  MarkBufferDirty(bitmapbuf);
457 
458  /* add the new bitmap page to the metapage's list of bitmaps */
459  /* metapage already has a write lock */
460  if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
461  ereport(ERROR,
462  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
463  errmsg("out of overflow pages in hash index \"%s\"",
464  RelationGetRelationName(rel))));
465 
466  metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
467 
468  metap->hashm_nmaps++;
469  MarkBufferDirty(metabuf);
470 
471  /* XLOG stuff */
472  if (use_wal)
473  {
475  XLogRecPtr recptr;
476 
477  xlrec.bmsize = metap->hashm_bmsize;
478 
479  XLogBeginInsert();
480  XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage);
481  XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
482 
483  /*
484  * This is safe only because nobody else can be modifying the index at
485  * this stage; it's only visible to the transaction that is creating
486  * it.
487  */
488  XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
489 
490  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
491 
492  PageSetLSN(BufferGetPage(bitmapbuf), recptr);
493  PageSetLSN(BufferGetPage(metabuf), recptr);
494  }
495 
496  /* all done */
497  _hash_relbuf(rel, bitmapbuf);
498  _hash_relbuf(rel, metabuf);
499 
500  return num_buckets;
501 }
502 
503 /*
504  * _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
505  */
506 void
507 _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
508  uint16 ffactor, bool initpage)
509 {
510  HashMetaPage metap;
511  HashPageOpaque pageopaque;
512  Page page;
513  double dnumbuckets;
514  uint32 num_buckets;
515  uint32 spare_index;
516  uint32 i;
517 
518  /*
519  * Choose the number of initial bucket pages to match the fill factor
520  * given the estimated number of tuples. We round up the result to the
521  * total number of buckets which has to be allocated before using its
522  * _hashm_spare element. However always force at least 2 bucket pages. The
523  * upper limit is determined by considerations explained in
524  * _hash_expandtable().
525  */
526  dnumbuckets = num_tuples / ffactor;
527  if (dnumbuckets <= 2.0)
528  num_buckets = 2;
529  else if (dnumbuckets >= (double) 0x40000000)
530  num_buckets = 0x40000000;
531  else
532  num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets));
533 
534  spare_index = _hash_spareindex(num_buckets);
535  Assert(spare_index < HASH_MAX_SPLITPOINTS);
536 
537  page = BufferGetPage(buf);
538  if (initpage)
539  _hash_pageinit(page, BufferGetPageSize(buf));
540 
541  pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
542  pageopaque->hasho_prevblkno = InvalidBlockNumber;
543  pageopaque->hasho_nextblkno = InvalidBlockNumber;
544  pageopaque->hasho_bucket = -1;
545  pageopaque->hasho_flag = LH_META_PAGE;
546  pageopaque->hasho_page_id = HASHO_PAGE_ID;
547 
548  metap = HashPageGetMeta(page);
549 
550  metap->hashm_magic = HASH_MAGIC;
551  metap->hashm_version = HASH_VERSION;
552  metap->hashm_ntuples = 0;
553  metap->hashm_nmaps = 0;
554  metap->hashm_ffactor = ffactor;
555  metap->hashm_bsize = HashGetMaxBitmapSize(page);
556  /* find largest bitmap array size that will fit in page size */
557  for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
558  {
559  if ((1 << i) <= metap->hashm_bsize)
560  break;
561  }
562  Assert(i > 0);
563  metap->hashm_bmsize = 1 << i;
564  metap->hashm_bmshift = i + BYTE_TO_BIT;
565  Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
566 
567  /*
568  * Label the index with its primary hash support function's OID. This is
569  * pretty useless for normal operation (in fact, hashm_procid is not used
570  * anywhere), but it might be handy for forensic purposes so we keep it.
571  */
572  metap->hashm_procid = procid;
573 
574  /*
575  * We initialize the index with N buckets, 0 .. N-1, occupying physical
576  * blocks 1 to N. The first freespace bitmap page is in block N+1.
577  */
578  metap->hashm_maxbucket = num_buckets - 1;
579 
580  /*
581  * Set highmask as next immediate ((2 ^ x) - 1), which should be
582  * sufficient to cover num_buckets.
583  */
584  metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
585  metap->hashm_lowmask = (metap->hashm_highmask >> 1);
586 
587  MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
588  MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
589 
590  /* Set up mapping for one spare page after the initial splitpoints */
591  metap->hashm_spares[spare_index] = 1;
592  metap->hashm_ovflpoint = spare_index;
593  metap->hashm_firstfree = 0;
594 
595  /*
596  * Set pd_lower just past the end of the metadata. This is essential,
597  * because without doing so, metadata will be lost if xlog.c compresses
598  * the page.
599  */
600  ((PageHeader) page)->pd_lower =
601  ((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
602 }
603 
604 /*
605  * _hash_pageinit() -- Initialize a new hash index page.
606  */
607 void
609 {
610  PageInit(page, size, sizeof(HashPageOpaqueData));
611 }
612 
613 /*
614  * Attempt to expand the hash table by creating one new bucket.
615  *
616  * This will silently do nothing if we don't get cleanup lock on old or
617  * new bucket.
618  *
619  * Complete the pending splits and remove the tuples from old bucket,
620  * if there are any left over from the previous split.
621  *
622  * The caller must hold a pin, but no lock, on the metapage buffer.
623  * The buffer is returned in the same state.
624  */
625 void
627 {
628  HashMetaPage metap;
629  Bucket old_bucket;
630  Bucket new_bucket;
631  uint32 spare_ndx;
632  BlockNumber start_oblkno;
633  BlockNumber start_nblkno;
634  Buffer buf_nblkno;
635  Buffer buf_oblkno;
636  Page opage;
637  Page npage;
638  HashPageOpaque oopaque;
639  HashPageOpaque nopaque;
640  uint32 maxbucket;
641  uint32 highmask;
642  uint32 lowmask;
643  bool metap_update_masks = false;
644  bool metap_update_splitpoint = false;
645 
646 restart_expand:
647 
648  /*
649  * Write-lock the meta page. It used to be necessary to acquire a
650  * heavyweight lock to begin a split, but that is no longer required.
651  */
653 
654  _hash_checkpage(rel, metabuf, LH_META_PAGE);
655  metap = HashPageGetMeta(BufferGetPage(metabuf));
656 
657  /*
658  * Check to see if split is still needed; someone else might have already
659  * done one while we waited for the lock.
660  *
661  * Make sure this stays in sync with _hash_doinsert()
662  */
663  if (metap->hashm_ntuples <=
664  (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
665  goto fail;
666 
667  /*
668  * Can't split anymore if maxbucket has reached its maximum possible
669  * value.
670  *
671  * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
672  * the calculation maxbucket+1 mustn't overflow). Currently we restrict
673  * to half that because of overflow looping in _hash_log2() and
674  * insufficient space in hashm_spares[]. It's moot anyway because an
675  * index with 2^32 buckets would certainly overflow BlockNumber and hence
676  * _hash_alloc_buckets() would fail, but if we supported buckets smaller
677  * than a disk block then this would be an independent constraint.
678  *
679  * If you change this, see also the maximum initial number of buckets in
680  * _hash_init().
681  */
682  if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
683  goto fail;
684 
685  /*
686  * Determine which bucket is to be split, and attempt to take cleanup lock
687  * on the old bucket. If we can't get the lock, give up.
688  *
689  * The cleanup lock protects us not only against other backends, but
690  * against our own backend as well.
691  *
692  * The cleanup lock is mainly to protect the split from concurrent
693  * inserts. See src/backend/access/hash/README, Lock Definitions for
694  * further details. Due to this locking restriction, if there is any
695  * pending scan, the split will give up which is not good, but harmless.
696  */
697  new_bucket = metap->hashm_maxbucket + 1;
698 
699  old_bucket = (new_bucket & metap->hashm_lowmask);
700 
701  start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
702 
703  buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
704  if (!buf_oblkno)
705  goto fail;
706 
707  opage = BufferGetPage(buf_oblkno);
708  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
709 
710  /*
711  * We want to finish the split from a bucket as there is no apparent
712  * benefit by not doing so and it will make the code complicated to finish
713  * the split that involves multiple buckets considering the case where new
714  * split also fails. We don't need to consider the new bucket for
715  * completing the split here as it is not possible that a re-split of new
716  * bucket starts when there is still a pending split from old bucket.
717  */
718  if (H_BUCKET_BEING_SPLIT(oopaque))
719  {
720  /*
721  * Copy bucket mapping info now; refer the comment in code below where
722  * we copy this information before calling _hash_splitbucket to see
723  * why this is okay.
724  */
725  maxbucket = metap->hashm_maxbucket;
726  highmask = metap->hashm_highmask;
727  lowmask = metap->hashm_lowmask;
728 
729  /*
730  * Release the lock on metapage and old_bucket, before completing the
731  * split.
732  */
733  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
734  LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
735 
736  _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
737  highmask, lowmask);
738 
739  /* release the pin on old buffer and retry for expand. */
740  _hash_dropbuf(rel, buf_oblkno);
741 
742  goto restart_expand;
743  }
744 
745  /*
746  * Clean the tuples remained from the previous split. This operation
747  * requires cleanup lock and we already have one on the old bucket, so
748  * let's do it. We also don't want to allow further splits from the bucket
749  * till the garbage of previous split is cleaned. This has two
750  * advantages; first, it helps in avoiding the bloat due to garbage and
751  * second is, during cleanup of bucket, we are always sure that the
752  * garbage tuples belong to most recently split bucket. On the contrary,
753  * if we allow cleanup of bucket after meta page is updated to indicate
754  * the new split and before the actual split, the cleanup operation won't
755  * be able to decide whether the tuple has been moved to the newly created
756  * bucket and ended up deleting such tuples.
757  */
758  if (H_NEEDS_SPLIT_CLEANUP(oopaque))
759  {
760  /*
761  * Copy bucket mapping info now; refer to the comment in code below
762  * where we copy this information before calling _hash_splitbucket to
763  * see why this is okay.
764  */
765  maxbucket = metap->hashm_maxbucket;
766  highmask = metap->hashm_highmask;
767  lowmask = metap->hashm_lowmask;
768 
769  /* Release the metapage lock. */
770  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
771 
772  hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
773  maxbucket, highmask, lowmask, NULL, NULL, true,
774  NULL, NULL);
775 
776  _hash_dropbuf(rel, buf_oblkno);
777 
778  goto restart_expand;
779  }
780 
781  /*
782  * There shouldn't be any active scan on new bucket.
783  *
784  * Note: it is safe to compute the new bucket's blkno here, even though we
785  * may still need to update the BUCKET_TO_BLKNO mapping. This is because
786  * the current value of hashm_spares[hashm_ovflpoint] correctly shows
787  * where we are going to put a new splitpoint's worth of buckets.
788  */
789  start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
790 
791  /*
792  * If the split point is increasing we need to allocate a new batch of
793  * bucket pages.
794  */
795  spare_ndx = _hash_spareindex(new_bucket + 1);
796  if (spare_ndx > metap->hashm_ovflpoint)
797  {
798  uint32 buckets_to_add;
799 
800  Assert(spare_ndx == metap->hashm_ovflpoint + 1);
801 
802  /*
803  * We treat allocation of buckets as a separate WAL-logged action.
804  * Even if we fail after this operation, won't leak bucket pages;
805  * rather, the next split will consume this space. In any case, even
806  * without failure we don't use all the space in one split operation.
807  */
808  buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
809  if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
810  {
811  /* can't split due to BlockNumber overflow */
812  _hash_relbuf(rel, buf_oblkno);
813  goto fail;
814  }
815  }
816 
817  /*
818  * Physically allocate the new bucket's primary page. We want to do this
819  * before changing the metapage's mapping info, in case we can't get the
820  * disk space. Ideally, we don't need to check for cleanup lock on new
821  * bucket as no other backend could find this bucket unless meta page is
822  * updated. However, it is good to be consistent with old bucket locking.
823  */
824  buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
825  if (!IsBufferCleanupOK(buf_nblkno))
826  {
827  _hash_relbuf(rel, buf_oblkno);
828  _hash_relbuf(rel, buf_nblkno);
829  goto fail;
830  }
831 
832  /*
833  * Since we are scribbling on the pages in the shared buffers, establish a
834  * critical section. Any failure in this next code leaves us with a big
835  * problem: the metapage is effectively corrupt but could get written back
836  * to disk.
837  */
839 
840  /*
841  * Okay to proceed with split. Update the metapage bucket mapping info.
842  */
843  metap->hashm_maxbucket = new_bucket;
844 
845  if (new_bucket > metap->hashm_highmask)
846  {
847  /* Starting a new doubling */
848  metap->hashm_lowmask = metap->hashm_highmask;
849  metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
850  metap_update_masks = true;
851  }
852 
853  /*
854  * If the split point is increasing we need to adjust the hashm_spares[]
855  * array and hashm_ovflpoint so that future overflow pages will be created
856  * beyond this new batch of bucket pages.
857  */
858  if (spare_ndx > metap->hashm_ovflpoint)
859  {
860  metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
861  metap->hashm_ovflpoint = spare_ndx;
862  metap_update_splitpoint = true;
863  }
864 
865  MarkBufferDirty(metabuf);
866 
867  /*
868  * Copy bucket mapping info now; this saves re-accessing the meta page
869  * inside _hash_splitbucket's inner loop. Note that once we drop the
870  * split lock, other splits could begin, so these values might be out of
871  * date before _hash_splitbucket finishes. That's okay, since all it
872  * needs is to tell which of these two buckets to map hashkeys into.
873  */
874  maxbucket = metap->hashm_maxbucket;
875  highmask = metap->hashm_highmask;
876  lowmask = metap->hashm_lowmask;
877 
878  opage = BufferGetPage(buf_oblkno);
879  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
880 
881  /*
882  * Mark the old bucket to indicate that split is in progress. (At
883  * operation end, we will clear the split-in-progress flag.) Also, for a
884  * primary bucket page, hasho_prevblkno stores the number of buckets that
885  * existed as of the last split, so we must update that value here.
886  */
887  oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
888  oopaque->hasho_prevblkno = maxbucket;
889 
890  MarkBufferDirty(buf_oblkno);
891 
892  npage = BufferGetPage(buf_nblkno);
893 
894  /*
895  * initialize the new bucket's primary page and mark it to indicate that
896  * split is in progress.
897  */
898  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
899  nopaque->hasho_prevblkno = maxbucket;
901  nopaque->hasho_bucket = new_bucket;
903  nopaque->hasho_page_id = HASHO_PAGE_ID;
904 
905  MarkBufferDirty(buf_nblkno);
906 
907  /* XLOG stuff */
908  if (RelationNeedsWAL(rel))
909  {
911  XLogRecPtr recptr;
912 
913  xlrec.new_bucket = maxbucket;
914  xlrec.old_bucket_flag = oopaque->hasho_flag;
915  xlrec.new_bucket_flag = nopaque->hasho_flag;
916  xlrec.flags = 0;
917 
918  XLogBeginInsert();
919 
920  XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
921  XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
922  XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
923 
924  if (metap_update_masks)
925  {
927  XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32));
928  XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32));
929  }
930 
931  if (metap_update_splitpoint)
932  {
934  XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint,
935  sizeof(uint32));
937  (char *) &metap->hashm_spares[metap->hashm_ovflpoint],
938  sizeof(uint32));
939  }
940 
941  XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage);
942 
943  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
944 
945  PageSetLSN(BufferGetPage(buf_oblkno), recptr);
946  PageSetLSN(BufferGetPage(buf_nblkno), recptr);
947  PageSetLSN(BufferGetPage(metabuf), recptr);
948  }
949 
951 
952  /* drop lock, but keep pin */
953  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
954 
955  /* Relocate records to the new bucket */
956  _hash_splitbucket(rel, metabuf,
957  old_bucket, new_bucket,
958  buf_oblkno, buf_nblkno, NULL,
959  maxbucket, highmask, lowmask);
960 
961  /* all done, now release the pins on primary buckets. */
962  _hash_dropbuf(rel, buf_oblkno);
963  _hash_dropbuf(rel, buf_nblkno);
964 
965  return;
966 
967  /* Here if decide not to split or fail to acquire old bucket lock */
968 fail:
969 
970  /* We didn't write the metapage, so just drop lock */
971  LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
972 }
973 
974 
975 /*
976  * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
977  *
978  * This does not need to initialize the new bucket pages; we'll do that as
979  * each one is used by _hash_expandtable(). But we have to extend the logical
980  * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
981  * sync with ours, so that we don't get complaints from smgr.
982  *
983  * We do this by writing a page of zeroes at the end of the splitpoint range.
984  * We expect that the filesystem will ensure that the intervening pages read
985  * as zeroes too. On many filesystems this "hole" will not be allocated
986  * immediately, which means that the index file may end up more fragmented
987  * than if we forced it all to be allocated now; but since we don't scan
988  * hash indexes sequentially anyway, that probably doesn't matter.
989  *
990  * XXX It's annoying that this code is executed with the metapage lock held.
991  * We need to interlock against _hash_addovflpage() adding a new overflow page
992  * concurrently, but it'd likely be better to use LockRelationForExtension
993  * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
994  * so it may not be worth worrying about.
995  *
996  * Returns true if successful, or false if allocation failed due to
997  * BlockNumber overflow.
998  */
999 static bool
1001 {
1002  BlockNumber lastblock;
1003  char zerobuf[BLCKSZ];
1004  Page page;
1005  HashPageOpaque ovflopaque;
1006 
1007  lastblock = firstblock + nblocks - 1;
1008 
1009  /*
1010  * Check for overflow in block number calculation; if so, we cannot extend
1011  * the index anymore.
1012  */
1013  if (lastblock < firstblock || lastblock == InvalidBlockNumber)
1014  return false;
1015 
1016  page = (Page) zerobuf;
1017 
1018  /*
1019  * Initialize the page. Just zeroing the page won't work; see
1020  * _hash_freeovflpage for similar usage. We take care to make the special
1021  * space valid for the benefit of tools such as pageinspect.
1022  */
1023  _hash_pageinit(page, BLCKSZ);
1024 
1025  ovflopaque = (HashPageOpaque) PageGetSpecialPointer(page);
1026 
1027  ovflopaque->hasho_prevblkno = InvalidBlockNumber;
1028  ovflopaque->hasho_nextblkno = InvalidBlockNumber;
1029  ovflopaque->hasho_bucket = -1;
1030  ovflopaque->hasho_flag = LH_UNUSED_PAGE;
1031  ovflopaque->hasho_page_id = HASHO_PAGE_ID;
1032 
1033  if (RelationNeedsWAL(rel))
1034  log_newpage(&rel->rd_node,
1035  MAIN_FORKNUM,
1036  lastblock,
1037  zerobuf,
1038  true);
1039 
1040  RelationOpenSmgr(rel);
1041  smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
1042 
1043  return true;
1044 }
1045 
1046 
1047 /*
1048  * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
1049  *
1050  * This routine is used to partition the tuples between old and new bucket and
1051  * is used to finish the incomplete split operations. To finish the previously
1052  * interrupted split operation, the caller needs to fill htab. If htab is set,
1053  * then we skip the movement of tuples that exists in htab, otherwise NULL
1054  * value of htab indicates movement of all the tuples that belong to the new
1055  * bucket.
1056  *
1057  * We are splitting a bucket that consists of a base bucket page and zero
1058  * or more overflow (bucket chain) pages. We must relocate tuples that
1059  * belong in the new bucket.
1060  *
1061  * The caller must hold cleanup locks on both buckets to ensure that
1062  * no one else is trying to access them (see README).
1063  *
1064  * The caller must hold a pin, but no lock, on the metapage buffer.
1065  * The buffer is returned in the same state. (The metapage is only
1066  * touched if it becomes necessary to add or remove overflow pages.)
1067  *
1068  * Split needs to retain pin on primary bucket pages of both old and new
1069  * buckets till end of operation. This is to prevent vacuum from starting
1070  * while a split is in progress.
1071  *
1072  * In addition, the caller must have created the new bucket's base page,
1073  * which is passed in buffer nbuf, pinned and write-locked. The lock will be
1074  * released here and pin must be released by the caller. (The API is set up
1075  * this way because we must do _hash_getnewbuf() before releasing the metapage
1076  * write lock. So instead of passing the new bucket's start block number, we
1077  * pass an actual buffer.)
1078  */
1079 static void
1081  Buffer metabuf,
1082  Bucket obucket,
1083  Bucket nbucket,
1084  Buffer obuf,
1085  Buffer nbuf,
1086  HTAB *htab,
1087  uint32 maxbucket,
1088  uint32 highmask,
1089  uint32 lowmask)
1090 {
1091  Buffer bucket_obuf;
1092  Buffer bucket_nbuf;
1093  Page opage;
1094  Page npage;
1095  HashPageOpaque oopaque;
1096  HashPageOpaque nopaque;
1097  OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
1099  Size all_tups_size = 0;
1100  int i;
1101  uint16 nitups = 0;
1102 
1103  bucket_obuf = obuf;
1104  opage = BufferGetPage(obuf);
1105  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1106 
1107  bucket_nbuf = nbuf;
1108  npage = BufferGetPage(nbuf);
1109  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1110 
1111  /* Copy the predicate locks from old bucket to new bucket. */
1113  BufferGetBlockNumber(bucket_obuf),
1114  BufferGetBlockNumber(bucket_nbuf));
1115 
1116  /*
1117  * Partition the tuples in the old bucket between the old bucket and the
1118  * new bucket, advancing along the old bucket's overflow bucket chain and
1119  * adding overflow pages to the new bucket as needed. Outer loop iterates
1120  * once per page in old bucket.
1121  */
1122  for (;;)
1123  {
1124  BlockNumber oblkno;
1125  OffsetNumber ooffnum;
1126  OffsetNumber omaxoffnum;
1127 
1128  /* Scan each tuple in old page */
1129  omaxoffnum = PageGetMaxOffsetNumber(opage);
1130  for (ooffnum = FirstOffsetNumber;
1131  ooffnum <= omaxoffnum;
1132  ooffnum = OffsetNumberNext(ooffnum))
1133  {
1134  IndexTuple itup;
1135  Size itemsz;
1136  Bucket bucket;
1137  bool found = false;
1138 
1139  /* skip dead tuples */
1140  if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
1141  continue;
1142 
1143  /*
1144  * Before inserting a tuple, probe the hash table containing TIDs
1145  * of tuples belonging to new bucket, if we find a match, then
1146  * skip that tuple, else fetch the item's hash key (conveniently
1147  * stored in the item) and determine which bucket it now belongs
1148  * in.
1149  */
1150  itup = (IndexTuple) PageGetItem(opage,
1151  PageGetItemId(opage, ooffnum));
1152 
1153  if (htab)
1154  (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
1155 
1156  if (found)
1157  continue;
1158 
1160  maxbucket, highmask, lowmask);
1161 
1162  if (bucket == nbucket)
1163  {
1164  IndexTuple new_itup;
1165 
1166  /*
1167  * make a copy of index tuple as we have to scribble on it.
1168  */
1169  new_itup = CopyIndexTuple(itup);
1170 
1171  /*
1172  * mark the index tuple as moved by split, such tuples are
1173  * skipped by scan if there is split in progress for a bucket.
1174  */
1175  new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
1176 
1177  /*
1178  * insert the tuple into the new bucket. if it doesn't fit on
1179  * the current page in the new bucket, we must allocate a new
1180  * overflow page and place the tuple on that page instead.
1181  */
1182  itemsz = IndexTupleSize(new_itup);
1183  itemsz = MAXALIGN(itemsz);
1184 
1185  if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
1186  {
1187  /*
1188  * Change the shared buffer state in critical section,
1189  * otherwise any error could make it unrecoverable.
1190  */
1192 
1193  _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1194  MarkBufferDirty(nbuf);
1195  /* log the split operation before releasing the lock */
1196  log_split_page(rel, nbuf);
1197 
1198  END_CRIT_SECTION();
1199 
1200  /* drop lock, but keep pin */
1202 
1203  /* be tidy */
1204  for (i = 0; i < nitups; i++)
1205  pfree(itups[i]);
1206  nitups = 0;
1207  all_tups_size = 0;
1208 
1209  /* chain to a new overflow page */
1210  nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
1211  npage = BufferGetPage(nbuf);
1212  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1213  }
1214 
1215  itups[nitups++] = new_itup;
1216  all_tups_size += itemsz;
1217  }
1218  else
1219  {
1220  /*
1221  * the tuple stays on this page, so nothing to do.
1222  */
1223  Assert(bucket == obucket);
1224  }
1225  }
1226 
1227  oblkno = oopaque->hasho_nextblkno;
1228 
1229  /* retain the pin on the old primary bucket */
1230  if (obuf == bucket_obuf)
1232  else
1233  _hash_relbuf(rel, obuf);
1234 
1235  /* Exit loop if no more overflow pages in old bucket */
1236  if (!BlockNumberIsValid(oblkno))
1237  {
1238  /*
1239  * Change the shared buffer state in critical section, otherwise
1240  * any error could make it unrecoverable.
1241  */
1243 
1244  _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1245  MarkBufferDirty(nbuf);
1246  /* log the split operation before releasing the lock */
1247  log_split_page(rel, nbuf);
1248 
1249  END_CRIT_SECTION();
1250 
1251  if (nbuf == bucket_nbuf)
1253  else
1254  _hash_relbuf(rel, nbuf);
1255 
1256  /* be tidy */
1257  for (i = 0; i < nitups; i++)
1258  pfree(itups[i]);
1259  break;
1260  }
1261 
1262  /* Else, advance to next old page */
1263  obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
1264  opage = BufferGetPage(obuf);
1265  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1266  }
1267 
1268  /*
1269  * We're at the end of the old bucket chain, so we're done partitioning
1270  * the tuples. Mark the old and new buckets to indicate split is
1271  * finished.
1272  *
1273  * To avoid deadlocks due to locking order of buckets, first lock the old
1274  * bucket and then the new bucket.
1275  */
1276  LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
1277  opage = BufferGetPage(bucket_obuf);
1278  oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1279 
1280  LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
1281  npage = BufferGetPage(bucket_nbuf);
1282  nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1283 
1285 
1286  oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1288 
1289  /*
1290  * After the split is finished, mark the old bucket to indicate that it
1291  * contains deletable tuples. We will clear split-cleanup flag after
1292  * deleting such tuples either at the end of split or at the next split
1293  * from old bucket or at the time of vacuum.
1294  */
1296 
1297  /*
1298  * now write the buffers, here we don't release the locks as caller is
1299  * responsible to release locks.
1300  */
1301  MarkBufferDirty(bucket_obuf);
1302  MarkBufferDirty(bucket_nbuf);
1303 
1304  if (RelationNeedsWAL(rel))
1305  {
1306  XLogRecPtr recptr;
1307  xl_hash_split_complete xlrec;
1308 
1309  xlrec.old_bucket_flag = oopaque->hasho_flag;
1310  xlrec.new_bucket_flag = nopaque->hasho_flag;
1311 
1312  XLogBeginInsert();
1313 
1314  XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
1315 
1316  XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
1317  XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
1318 
1319  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
1320 
1321  PageSetLSN(BufferGetPage(bucket_obuf), recptr);
1322  PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
1323  }
1324 
1325  END_CRIT_SECTION();
1326 
1327  /*
1328  * If possible, clean up the old bucket. We might not be able to do this
1329  * if someone else has a pin on it, but if not then we can go ahead. This
1330  * isn't absolutely necessary, but it reduces bloat; if we don't do it
1331  * now, VACUUM will do it eventually, but maybe not until new overflow
1332  * pages have been allocated. Note that there's no need to clean up the
1333  * new bucket.
1334  */
1335  if (IsBufferCleanupOK(bucket_obuf))
1336  {
1337  LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1338  hashbucketcleanup(rel, obucket, bucket_obuf,
1339  BufferGetBlockNumber(bucket_obuf), NULL,
1340  maxbucket, highmask, lowmask, NULL, NULL, true,
1341  NULL, NULL);
1342  }
1343  else
1344  {
1345  LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1346  LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK);
1347  }
1348 }
1349 
1350 /*
1351  * _hash_finish_split() -- Finish the previously interrupted split operation
1352  *
1353  * To complete the split operation, we form the hash table of TIDs in new
1354  * bucket which is then used by split operation to skip tuples that are
1355  * already moved before the split operation was previously interrupted.
1356  *
1357  * The caller must hold a pin, but no lock, on the metapage and old bucket's
1358  * primary page buffer. The buffers are returned in the same state. (The
1359  * metapage is only touched if it becomes necessary to add or remove overflow
1360  * pages.)
1361  */
1362 void
1363 _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
1364  uint32 maxbucket, uint32 highmask, uint32 lowmask)
1365 {
1366  HASHCTL hash_ctl;
1367  HTAB *tidhtab;
1368  Buffer bucket_nbuf = InvalidBuffer;
1369  Buffer nbuf;
1370  Page npage;
1371  BlockNumber nblkno;
1372  BlockNumber bucket_nblkno;
1373  HashPageOpaque npageopaque;
1374  Bucket nbucket;
1375  bool found;
1376 
1377  /* Initialize hash tables used to track TIDs */
1378  memset(&hash_ctl, 0, sizeof(hash_ctl));
1379  hash_ctl.keysize = sizeof(ItemPointerData);
1380  hash_ctl.entrysize = sizeof(ItemPointerData);
1381  hash_ctl.hcxt = CurrentMemoryContext;
1382 
1383  tidhtab =
1384  hash_create("bucket ctids",
1385  256, /* arbitrary initial size */
1386  &hash_ctl,
1388 
1389  bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1390 
1391  /*
1392  * Scan the new bucket and build hash table of TIDs
1393  */
1394  for (;;)
1395  {
1396  OffsetNumber noffnum;
1397  OffsetNumber nmaxoffnum;
1398 
1399  nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1401 
1402  /* remember the primary bucket buffer to acquire cleanup lock on it. */
1403  if (nblkno == bucket_nblkno)
1404  bucket_nbuf = nbuf;
1405 
1406  npage = BufferGetPage(nbuf);
1407  npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1408 
1409  /* Scan each tuple in new page */
1410  nmaxoffnum = PageGetMaxOffsetNumber(npage);
1411  for (noffnum = FirstOffsetNumber;
1412  noffnum <= nmaxoffnum;
1413  noffnum = OffsetNumberNext(noffnum))
1414  {
1415  IndexTuple itup;
1416 
1417  /* Fetch the item's TID and insert it in hash table. */
1418  itup = (IndexTuple) PageGetItem(npage,
1419  PageGetItemId(npage, noffnum));
1420 
1421  (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1422 
1423  Assert(!found);
1424  }
1425 
1426  nblkno = npageopaque->hasho_nextblkno;
1427 
1428  /*
1429  * release our write lock without modifying buffer and ensure to
1430  * retain the pin on primary bucket.
1431  */
1432  if (nbuf == bucket_nbuf)
1434  else
1435  _hash_relbuf(rel, nbuf);
1436 
1437  /* Exit loop if no more overflow pages in new bucket */
1438  if (!BlockNumberIsValid(nblkno))
1439  break;
1440  }
1441 
1442  /*
1443  * Conditionally get the cleanup lock on old and new buckets to perform
1444  * the split operation. If we don't get the cleanup locks, silently give
1445  * up and next insertion on old bucket will try again to complete the
1446  * split.
1447  */
1449  {
1450  hash_destroy(tidhtab);
1451  return;
1452  }
1453  if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1454  {
1456  hash_destroy(tidhtab);
1457  return;
1458  }
1459 
1460  npage = BufferGetPage(bucket_nbuf);
1461  npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1462  nbucket = npageopaque->hasho_bucket;
1463 
1464  _hash_splitbucket(rel, metabuf, obucket,
1465  nbucket, obuf, bucket_nbuf, tidhtab,
1466  maxbucket, highmask, lowmask);
1467 
1468  _hash_dropbuf(rel, bucket_nbuf);
1469  hash_destroy(tidhtab);
1470 }
1471 
1472 /*
1473  * log_split_page() -- Log the split operation
1474  *
1475  * We log the split operation when the new page in new bucket gets full,
1476  * so we log the entire page.
1477  *
1478  * 'buf' must be locked by the caller which is also responsible for unlocking
1479  * it.
1480  */
1481 static void
1483 {
1484  if (RelationNeedsWAL(rel))
1485  {
1486  XLogRecPtr recptr;
1487 
1488  XLogBeginInsert();
1489 
1491 
1492  recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
1493 
1494  PageSetLSN(BufferGetPage(buf), recptr);
1495  }
1496 }
1497 
1498 /*
1499  * _hash_getcachedmetap() -- Returns cached metapage data.
1500  *
1501  * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
1502  * the metapage. If not set, we'll set it before returning if we have to
1503  * refresh the cache, and return with a pin but no lock on it; caller is
1504  * responsible for releasing the pin.
1505  *
1506  * We refresh the cache if it's not initialized yet or force_refresh is true.
1507  */
1509 _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
1510 {
1511  Page page;
1512 
1513  Assert(metabuf);
1514  if (force_refresh || rel->rd_amcache == NULL)
1515  {
1516  char *cache = NULL;
1517 
1518  /*
1519  * It's important that we don't set rd_amcache to an invalid value.
1520  * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
1521  * install a pointer to the newly-allocated storage in the actual
1522  * relcache entry until both have succeeeded.
1523  */
1524  if (rel->rd_amcache == NULL)
1525  cache = MemoryContextAlloc(rel->rd_indexcxt,
1526  sizeof(HashMetaPageData));
1527 
1528  /* Read the metapage. */
1529  if (BufferIsValid(*metabuf))
1530  LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
1531  else
1532  *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
1533  LH_META_PAGE);
1534  page = BufferGetPage(*metabuf);
1535 
1536  /* Populate the cache. */
1537  if (rel->rd_amcache == NULL)
1538  rel->rd_amcache = cache;
1539  memcpy(rel->rd_amcache, HashPageGetMeta(page),
1540  sizeof(HashMetaPageData));
1541 
1542  /* Release metapage lock, but keep the pin. */
1543  LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
1544  }
1545 
1546  return (HashMetaPage) rel->rd_amcache;
1547 }
1548 
1549 /*
1550  * _hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given
1551  * hashkey.
1552  *
1553  * Bucket pages do not move or get removed once they are allocated. This give
1554  * us an opportunity to use the previously saved metapage contents to reach
1555  * the target bucket buffer, instead of reading from the metapage every time.
1556  * This saves one buffer access every time we want to reach the target bucket
1557  * buffer, which is very helpful savings in bufmgr traffic and contention.
1558  *
1559  * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
1560  * bucket buffer has to be locked for reading or writing.
1561  *
1562  * The out parameter cachedmetap is set with metapage contents used for
1563  * hashkey to bucket buffer mapping. Some callers need this info to reach the
1564  * old bucket in case of bucket split, see _hash_doinsert().
1565  */
1566 Buffer
1568  HashMetaPage *cachedmetap)
1569 {
1570  HashMetaPage metap;
1571  Buffer buf;
1572  Buffer metabuf = InvalidBuffer;
1573  Page page;
1574  Bucket bucket;
1575  BlockNumber blkno;
1576  HashPageOpaque opaque;
1577 
1578  /* We read from target bucket buffer, hence locking is must. */
1579  Assert(access == HASH_READ || access == HASH_WRITE);
1580 
1581  metap = _hash_getcachedmetap(rel, &metabuf, false);
1582  Assert(metap != NULL);
1583 
1584  /*
1585  * Loop until we get a lock on the correct target bucket.
1586  */
1587  for (;;)
1588  {
1589  /*
1590  * Compute the target bucket number, and convert to block number.
1591  */
1592  bucket = _hash_hashkey2bucket(hashkey,
1593  metap->hashm_maxbucket,
1594  metap->hashm_highmask,
1595  metap->hashm_lowmask);
1596 
1597  blkno = BUCKET_TO_BLKNO(metap, bucket);
1598 
1599  /* Fetch the primary bucket page for the bucket */
1600  buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1601  page = BufferGetPage(buf);
1602  opaque = (HashPageOpaque) PageGetSpecialPointer(page);
1603  Assert(opaque->hasho_bucket == bucket);
1605 
1606  /*
1607  * If this bucket hasn't been split, we're done.
1608  */
1609  if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
1610  break;
1611 
1612  /* Drop lock on this buffer, update cached metapage, and retry. */
1613  _hash_relbuf(rel, buf);
1614  metap = _hash_getcachedmetap(rel, &metabuf, true);
1615  Assert(metap != NULL);
1616  }
1617 
1618  if (BufferIsValid(metabuf))
1619  _hash_dropbuf(rel, metabuf);
1620 
1621  if (cachedmetap)
1622  *cachedmetap = metap;
1623 
1624  return buf;
1625 }
#define HASH_DEFAULT_FILLFACTOR
Definition: hash.h:286
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
uint16 hashm_bmshift
Definition: hash.h:258
uint16 hasho_page_id
Definition: hash.h:93
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups, OffsetNumber *itup_offsets, uint16 nitups)
Definition: hashinsert.c:300
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:814
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:298
RegProcedure hashm_procid
Definition: hash.h:266
#define HASH_CONTEXT
Definition: hsearch.h:93
#define HASH_ELEM
Definition: hsearch.h:87
MemoryContext hcxt
Definition: hsearch.h:78
#define XLH_SPLIT_META_UPDATE_SPLITPOINT
Definition: hash_xlog.h:52
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:608
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:125
#define LH_META_PAGE
Definition: hash.h:67
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
Definition: hashpage.c:248
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:640
#define BYTE_TO_BIT
Definition: hash.h:291
ItemPointerData t_tid
Definition: itup.h:37
regproc RegProcedure
Definition: c.h:472
uint32 hashm_magic
Definition: hash.h:251
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define XLOG_HASH_INIT_BITMAP_PAGE
Definition: hash_xlog.h:28
uint16 hashm_ffactor
Definition: hash.h:254
uint32 hashm_highmask
Definition: hash.h:260
#define InvalidBuffer
Definition: buf.h:25
Size entrysize
Definition: hsearch.h:73
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access, HashMetaPage *cachedmetap)
Definition: hashpage.c:1567
#define MemSet(start, val, len)
Definition: c.h:908
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition: hash.c:673
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
Definition: hashpage.c:207
#define P_NEW
Definition: bufmgr.h:82
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:286
Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno)
Definition: hashpage.c:144
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
#define XLOG_HASH_SPLIT_PAGE
Definition: hash_xlog.h:32
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:79
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:70
#define HASH_VERSION
Definition: hash.h:209
#define ItemIdIsDead(itemId)
Definition: itemid.h:112
uint32 hashm_lowmask
Definition: hash.h:261
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
signed int int32
Definition: c.h:313
#define BUCKET_TO_BLKNO(metap, B)
Definition: hash.h:38
#define HASH_MAX_SPLITPOINTS
Definition: hash.h:244
#define LH_UNUSED_PAGE
Definition: hash.h:63
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
Definition: hashpage.c:1000
uint16 OffsetNumber
Definition: off.h:24
#define HASH_MAGIC
Definition: hash.h:208
#define HASH_READ
Definition: hash.h:329
#define HashGetMaxBitmapSize(page)
Definition: hash.h:309
uint32 Bucket
Definition: hash.h:34
#define RelationOpenSmgr(relation)
Definition: rel.h:465
Definition: dynahash.c:208
unsigned short uint16
Definition: c.h:324
void pfree(void *pointer)
Definition: mcxt.c:1031
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition: hash.h:98
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3718
BlockNumber hasho_prevblkno
Definition: hash.h:89
#define ERROR
Definition: elog.h:43
#define SizeOfHashSplitAllocPage
Definition: hash_xlog.h:119
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:299
#define SizeOfHashInitMetaPage
Definition: hash_xlog.h:236
uint32 _hash_spareindex(uint32 num_bucket)
Definition: hashutil.c:157
void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage)
Definition: hashpage.c:507
uint32 _hash_get_totalbuckets(uint32 splitpoint_phase)
Definition: hashutil.c:189
uint32 hashm_version
Definition: hash.h:252
#define SizeOfHashInitBitmapPage
Definition: hash_xlog.h:252
uint32 hashm_nmaps
Definition: hash.h:265
void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1363
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:441
static char * buf
Definition: pg_test_fsync.c:67
#define HASH_WRITE
Definition: hash.h:330
#define HASH_NOLOCK
Definition: hash.h:331
#define BMPG_MASK(metap)
Definition: hash.h:304
#define FirstOffsetNumber
Definition: off.h:27
IndexTupleData * IndexTuple
Definition: itup.h:53
char * flag(int b)
Definition: test-ctype.c:33
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define RelationGetRelationName(relation)
Definition: rel.h:441
#define LH_BUCKET_BEING_SPLIT
Definition: hash.h:69
unsigned int uint32
Definition: c.h:325
struct ItemIdData ItemIdData
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
#define XLOG_HASH_INIT_META_PAGE
Definition: hash_xlog.h:27
Buffer _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
Definition: hashpage.c:105
void _hash_expandtable(Relation rel, Buffer metabuf)
Definition: hashpage.c:626
#define HASHSTANDARD_PROC
Definition: hash.h:351
#define BMPG_SHIFT(metap)
Definition: hash.h:303
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
Buffer hashso_bucket_buf
Definition: hash.h:172
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:3774
ForkNumber
Definition: relpath.h:40
#define HASH_MAX_BITMAPS
Definition: hash.h:235
#define REGBUF_FORCE_IMAGE
Definition: xloginsert.h:30
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
uint32 hashm_ovflpoint
Definition: hash.h:262
uint16 hashm_bsize
Definition: hash.h:255
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
#define LH_BUCKET_BEING_POPULATED
Definition: hash.h:68
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define HASH_BLOBS
Definition: hsearch.h:88
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:225
#define HASH_METAPAGE
Definition: hash.h:206
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
double hashm_ntuples
Definition: hash.h:253
bool hashso_buc_populated
Definition: hash.h:182
Buffer buf
Definition: hash.h:119
#define BufferGetPageSize(buffer)
Definition: bufmgr.h:147
#define LH_OVERFLOW_PAGE
Definition: hash.h:64
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint32 hashm_firstfree
Definition: hash.h:264
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition: hash.h:267
Size keysize
Definition: hsearch.h:72
Size PageGetFreeSpaceForMultipleTuples(Page page, int ntups)
Definition: bufpage.c:605
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition: hashpage.c:1509
static void log_split_page(Relation rel, Buffer buf)
Definition: hashpage.c:1482
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:275
BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket)
Definition: hashutil.c:469
void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
Definition: hashovfl.c:740
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define LH_BUCKET_PAGE
Definition: hash.h:65
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2788
#define H_BUCKET_BEING_SPLIT(opaque)
Definition: hash.h:99
RelFileNode rd_node
Definition: rel.h:55
PageHeaderData * PageHeader
Definition: bufpage.h:162
static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashpage.c:1080
#define RelationGetTargetPageUsage(relation, defaultff)
Definition: rel.h:291
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:699
Bucket hasho_bucket
Definition: hash.h:91
void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage)
Definition: hashpage.c:166
#define SizeOfHashSplitComplete
Definition: hash_xlog.h:136
struct ItemPointerData ItemPointerData
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define OffsetNumberNext(offsetNumber)
Definition: off.h:53
size_t Size
Definition: c.h:433
#define PageGetSpecialPointer(page)
Definition: bufpage.h:322
#define InvalidBlockNumber
Definition: block.h:33
HashPageOpaqueData * HashPageOpaque
Definition: hash.h:96
#define HASHO_PAGE_ID
Definition: hash.h:109
Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
Definition: hashovfl.c:111
#define MAXALIGN(LEN)
Definition: c.h:652
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define RelationNeedsWAL(relation)
Definition: rel.h:510
uint32 hashm_maxbucket
Definition: hash.h:259
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:600
#define XLOG_HASH_SPLIT_COMPLETE
Definition: hash_xlog.h:33
HashScanPosData currPos
Definition: hash.h:197
uint16 hasho_flag
Definition: hash.h:92
bool hashso_buc_split
Definition: hash.h:188
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition: hashpage.c:336
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define XLOG_HASH_SPLIT_ALLOCATE_PAGE
Definition: hash_xlog.h:31
RegProcedure procid
Definition: hash_xlog.h:232
#define MaxIndexTuplesPerPage
Definition: itup.h:145
#define HashPageGetMeta(page)
Definition: hash.h:313
uint32 _hash_log2(uint32 num)
Definition: hashutil.c:141
int errmsg(const char *fmt,...)
Definition: elog.c:797
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:771
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:972
MemoryContext rd_indexcxt
Definition: rel.h:151
int i
Buffer hashso_split_bucket_buf
Definition: hash.h:179
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
BlockNumber hasho_nextblkno
Definition: hash.h:90
uint16 hashm_bmsize
Definition: hash.h:256
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
#define elog
Definition: elog.h:219
unsigned short t_info
Definition: itup.h:49
void * rd_amcache
Definition: rel.h:164
#define XLH_SPLIT_META_UPDATE_MASKS
Definition: hash_xlog.h:51
#define INDEX_MOVED_BY_SPLIT_MASK
Definition: hash.h:283
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]
Definition: hash.h:269
int Buffer
Definition: buf.h:23
void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3065
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define IndexTupleSize(itup)
Definition: itup.h:71
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:41
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:821