PostgreSQL Source Code git master
Loading...
Searching...
No Matches
hashpage.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * hashpage.c
4 * Hash table page management code for the Postgres hash access method
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/hash/hashpage.c
12 *
13 * NOTES
14 * Postgres hash pages look like ordinary relation pages. The opaque
15 * data at high addresses includes information about the page including
16 * whether a page is an overflow page or a true bucket, the bucket
17 * number, and the block numbers of the preceding and following pages
18 * in the same bucket.
19 *
20 * The first page in a hash relation, page zero, is special -- it stores
21 * information describing the hash table; it is referred to as the
22 * "meta page." Pages one and higher store the actual data.
23 *
24 * There are also bitmap pages, which are not manipulated here;
25 * see hashovfl.c.
26 *
27 *-------------------------------------------------------------------------
28 */
29#include "postgres.h"
30
31#include "access/hash.h"
32#include "access/hash_xlog.h"
33#include "access/xloginsert.h"
34#include "miscadmin.h"
35#include "port/pg_bitutils.h"
36#include "storage/predicate.h"
37#include "storage/smgr.h"
38#include "utils/rel.h"
39
40static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
41 uint32 nblocks);
46 HTAB *htab,
49static void log_split_page(Relation rel, Buffer buf);
50
51
52/*
53 * _hash_getbuf() -- Get a buffer by block number for read or write.
54 *
55 * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
56 * 'flags' is a bitwise OR of the allowed page types.
57 *
58 * This must be used only to fetch pages that are expected to be valid
59 * already. _hash_checkpage() is applied using the given flags.
60 *
61 * When this routine returns, the appropriate lock is set on the
62 * requested buffer and its reference count has been incremented
63 * (ie, the buffer is "locked and pinned").
64 *
65 * P_NEW is disallowed because this routine can only be used
66 * to access pages that are known to be before the filesystem EOF.
67 * Extending the index should be done with _hash_getnewbuf.
68 */
70_hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
71{
72 Buffer buf;
73
74 if (blkno == P_NEW)
75 elog(ERROR, "hash AM does not use P_NEW");
76
77 buf = ReadBuffer(rel, blkno);
78
79 if (access != HASH_NOLOCK)
81
82 /* ref count and lock type are correct */
83
84 _hash_checkpage(rel, buf, flags);
85
86 return buf;
87}
88
89/*
90 * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
91 *
92 * We read the page and try to acquire a cleanup lock. If we get it,
93 * we return the buffer; otherwise, we return InvalidBuffer.
94 */
97{
98 Buffer buf;
99
100 if (blkno == P_NEW)
101 elog(ERROR, "hash AM does not use P_NEW");
102
103 buf = ReadBuffer(rel, blkno);
104
106 {
108 return InvalidBuffer;
109 }
110
111 /* ref count and lock type are correct */
112
113 _hash_checkpage(rel, buf, flags);
114
115 return buf;
116}
117
118/*
119 * _hash_getinitbuf() -- Get and initialize a buffer by block number.
120 *
121 * This must be used only to fetch pages that are known to be before
122 * the index's filesystem EOF, but are to be filled from scratch.
123 * _hash_pageinit() is applied automatically. Otherwise it has
124 * effects similar to _hash_getbuf() with access = HASH_WRITE.
125 *
126 * When this routine returns, a write lock is set on the
127 * requested buffer and its reference count has been incremented
128 * (ie, the buffer is "locked and pinned").
129 *
130 * P_NEW is disallowed because this routine can only be used
131 * to access pages that are known to be before the filesystem EOF.
132 * Extending the index should be done with _hash_getnewbuf.
133 */
134Buffer
136{
137 Buffer buf;
138
139 if (blkno == P_NEW)
140 elog(ERROR, "hash AM does not use P_NEW");
141
143 NULL);
144
145 /* ref count and lock type are correct */
146
147 /* initialize the page */
149
150 return buf;
151}
152
153/*
154 * _hash_initbuf() -- Get and initialize a buffer by bucket number.
155 */
156void
158 bool initpage)
159{
161 Page page;
162
163 page = BufferGetPage(buf);
164
165 /* initialize the page */
166 if (initpage)
168
170
171 /*
172 * Set hasho_prevblkno with current hashm_maxbucket. This value will be
173 * used to validate cached HashMetaPageData. See
174 * _hash_getbucketbuf_from_hashkey().
175 */
176 pageopaque->hasho_prevblkno = max_bucket;
177 pageopaque->hasho_nextblkno = InvalidBlockNumber;
178 pageopaque->hasho_bucket = num_bucket;
179 pageopaque->hasho_flag = flag;
180 pageopaque->hasho_page_id = HASHO_PAGE_ID;
181}
182
183/*
184 * _hash_getnewbuf() -- Get a new page at the end of the index.
185 *
186 * This has the same API as _hash_getinitbuf, except that we are adding
187 * a page to the index, and hence expect the page to be past the
188 * logical EOF. (However, we have to support the case where it isn't,
189 * since a prior try might have crashed after extending the filesystem
190 * EOF but before updating the metapage to reflect the added page.)
191 *
192 * It is caller's responsibility to ensure that only one process can
193 * extend the index at a time. In practice, this function is called
194 * only while holding write lock on the metapage, because adding a page
195 * is always associated with an update of metapage data.
196 */
197Buffer
199{
200 BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
201 Buffer buf;
202
203 if (blkno == P_NEW)
204 elog(ERROR, "hash AM does not use P_NEW");
205 if (blkno > nblocks)
206 elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
208
209 /* smgr insists we explicitly extend the relation */
210 if (blkno == nblocks)
211 {
212 buf = ExtendBufferedRel(BMR_REL(rel), forkNum, NULL,
214 if (BufferGetBlockNumber(buf) != blkno)
215 elog(ERROR, "unexpected hash relation size: %u, should be %u",
216 BufferGetBlockNumber(buf), blkno);
217 }
218 else
219 {
220 buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
221 NULL);
222 }
223
224 /* ref count and lock type are correct */
225
226 /* initialize the page */
228
229 return buf;
230}
231
232/*
233 * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
234 *
235 * This is identical to _hash_getbuf() but also allows a buffer access
236 * strategy to be specified. We use this for VACUUM operations.
237 */
238Buffer
240 int access, int flags,
241 BufferAccessStrategy bstrategy)
242{
243 Buffer buf;
244
245 if (blkno == P_NEW)
246 elog(ERROR, "hash AM does not use P_NEW");
247
248 buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
249
250 if (access != HASH_NOLOCK)
252
253 /* ref count and lock type are correct */
254
255 _hash_checkpage(rel, buf, flags);
256
257 return buf;
258}
259
260/*
261 * _hash_relbuf() -- release a locked buffer.
262 *
263 * Lock and pin (refcount) are both dropped.
264 */
265void
270
271/*
272 * _hash_dropbuf() -- release an unlocked buffer.
273 *
274 * This is used to unpin a buffer on which we hold no lock.
275 */
276void
281
282/*
283 * _hash_dropscanbuf() -- release buffers used in scan.
284 *
285 * This routine unpins the buffers used during scan on which we
286 * hold no lock.
287 */
288void
290{
291 /* release pin we hold on primary bucket page */
292 if (BufferIsValid(so->hashso_bucket_buf) &&
293 so->hashso_bucket_buf != so->currPos.buf)
294 _hash_dropbuf(rel, so->hashso_bucket_buf);
295 so->hashso_bucket_buf = InvalidBuffer;
296
297 /* release pin we hold on primary bucket page of bucket being split */
298 if (BufferIsValid(so->hashso_split_bucket_buf) &&
299 so->hashso_split_bucket_buf != so->currPos.buf)
300 _hash_dropbuf(rel, so->hashso_split_bucket_buf);
301 so->hashso_split_bucket_buf = InvalidBuffer;
302
303 /* release any pin we still hold */
304 if (BufferIsValid(so->currPos.buf))
305 _hash_dropbuf(rel, so->currPos.buf);
306 so->currPos.buf = InvalidBuffer;
307
308 /* reset split scan */
309 so->hashso_buc_populated = false;
310 so->hashso_buc_split = false;
311}
312
313
314/*
315 * _hash_init() -- Initialize the metadata page of a hash index,
316 * the initial buckets, and the initial bitmap page.
317 *
318 * The initial number of buckets is dependent on num_tuples, an estimate
319 * of the number of tuples to be loaded into the index initially. The
320 * chosen number of buckets is returned.
321 *
322 * We are fairly cavalier about locking here, since we know that no one else
323 * could be accessing this index. In particular the rule about not holding
324 * multiple buffer locks is ignored.
325 */
326uint32
327_hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
328{
330 Buffer buf;
332 Page pg;
333 HashMetaPage metap;
334 RegProcedure procid;
337 int32 ffactor;
339 uint32 i;
340 bool use_wal;
341
342 /* safety check */
343 if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
344 elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
346
347 /*
348 * WAL log creation of pages if the relation is persistent, or this is the
349 * init fork. Init forks for unlogged relations always need to be WAL
350 * logged.
351 */
352 use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
353
354 /*
355 * Determine the target fill factor (in tuples per bucket) for this index.
356 * The idea is to make the fill factor correspond to pages about as full
357 * as the user-settable fillfactor parameter says. We can compute it
358 * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
359 */
360 data_width = sizeof(uint32);
362 sizeof(ItemIdData); /* include the line pointer */
363 ffactor = HashGetTargetPageUsage(rel) / item_width;
364 /* keep to a sane range */
365 if (ffactor < 10)
366 ffactor = 10;
367
368 procid = index_getprocid(rel, 1, HASHSTANDARD_PROC);
369
370 /*
371 * We initialize the metapage, the first N bucket pages, and the first
372 * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
373 * calls to occur. This ensures that the smgr level has the right idea of
374 * the physical index length.
375 *
376 * Critical section not required, because on error the creation of the
377 * whole relation will be rolled back.
378 */
379 metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
380 _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
382
384 metap = HashPageGetMeta(pg);
385
386 /* XLOG stuff */
387 if (use_wal)
388 {
391
392 xlrec.num_tuples = num_tuples;
393 xlrec.procid = metap->hashm_procid;
394 xlrec.ffactor = metap->hashm_ffactor;
395
399
401
403 }
404
405 num_buckets = metap->hashm_maxbucket + 1;
406
407 /*
408 * Release buffer lock on the metapage while we initialize buckets.
409 * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
410 * won't accomplish anything. It's a bad idea to hold buffer locks for
411 * long intervals in any case, since that can block the bgwriter.
412 */
414
415 /*
416 * Initialize and WAL Log the first N buckets
417 */
418 for (i = 0; i < num_buckets; i++)
419 {
420 BlockNumber blkno;
421
422 /* Allow interrupts, in case N is huge */
424
425 blkno = BUCKET_TO_BLKNO(metap, i);
426 buf = _hash_getnewbuf(rel, blkno, forkNum);
429
430 if (use_wal)
432 forkNum,
433 blkno,
435 true);
436 _hash_relbuf(rel, buf);
437 }
438
439 /* Now reacquire buffer lock on metapage */
441
442 /*
443 * Initialize bitmap page
444 */
445 bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
448
449 /* add the new bitmap page to the metapage's list of bitmaps */
450 /* metapage already has a write lock */
451 if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
454 errmsg("out of overflow pages in hash index \"%s\"",
456
457 metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
458
459 metap->hashm_nmaps++;
461
462 /* XLOG stuff */
463 if (use_wal)
464 {
467
468 xlrec.bmsize = metap->hashm_bmsize;
469
473
474 /*
475 * This is safe only because nobody else can be modifying the index at
476 * this stage; it's only visible to the transaction that is creating
477 * it.
478 */
480
482
485 }
486
487 /* all done */
489 _hash_relbuf(rel, metabuf);
490
491 return num_buckets;
492}
493
494/*
495 * _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
496 */
497void
498_hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
499 uint16 ffactor, bool initpage)
500{
501 HashMetaPage metap;
503 Page page;
504 double dnumbuckets;
508
509 /*
510 * Choose the number of initial bucket pages to match the fill factor
511 * given the estimated number of tuples. We round up the result to the
512 * total number of buckets which has to be allocated before using its
513 * hashm_spares element. However always force at least 2 bucket pages. The
514 * upper limit is determined by considerations explained in
515 * _hash_expandtable().
516 */
517 dnumbuckets = num_tuples / ffactor;
518 if (dnumbuckets <= 2.0)
519 num_buckets = 2;
520 else if (dnumbuckets >= (double) 0x40000000)
521 num_buckets = 0x40000000;
522 else
524
527
528 page = BufferGetPage(buf);
529 if (initpage)
531
533 pageopaque->hasho_prevblkno = InvalidBlockNumber;
534 pageopaque->hasho_nextblkno = InvalidBlockNumber;
535 pageopaque->hasho_bucket = InvalidBucket;
536 pageopaque->hasho_flag = LH_META_PAGE;
537 pageopaque->hasho_page_id = HASHO_PAGE_ID;
538
539 metap = HashPageGetMeta(page);
540
541 metap->hashm_magic = HASH_MAGIC;
543 metap->hashm_ntuples = 0;
544 metap->hashm_nmaps = 0;
545 metap->hashm_ffactor = ffactor;
546 metap->hashm_bsize = HashGetMaxBitmapSize(page);
547
548 /* find largest bitmap array size that will fit in page size */
550 Assert(lshift > 0);
551 metap->hashm_bmsize = 1 << lshift;
553 Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
554
555 /*
556 * Label the index with its primary hash support function's OID. This is
557 * pretty useless for normal operation (in fact, hashm_procid is not used
558 * anywhere), but it might be handy for forensic purposes so we keep it.
559 */
560 metap->hashm_procid = procid;
561
562 /*
563 * We initialize the index with N buckets, 0 .. N-1, occupying physical
564 * blocks 1 to N. The first freespace bitmap page is in block N+1.
565 */
566 metap->hashm_maxbucket = num_buckets - 1;
567
568 /*
569 * Set highmask as next immediate ((2 ^ x) - 1), which should be
570 * sufficient to cover num_buckets.
571 */
573 metap->hashm_lowmask = (metap->hashm_highmask >> 1);
574
575 MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
576 MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
577
578 /* Set up mapping for one spare page after the initial splitpoints */
579 metap->hashm_spares[spare_index] = 1;
581 metap->hashm_firstfree = 0;
582
583 /*
584 * Set pd_lower just past the end of the metadata. This is essential,
585 * because without doing so, metadata will be lost if xlog.c compresses
586 * the page.
587 */
588 ((PageHeader) page)->pd_lower =
589 ((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
590}
591
592/*
593 * _hash_pageinit() -- Initialize a new hash index page.
594 */
595void
597{
598 PageInit(page, size, sizeof(HashPageOpaqueData));
599}
600
601/*
602 * Attempt to expand the hash table by creating one new bucket.
603 *
604 * This will silently do nothing if we don't get cleanup lock on old or
605 * new bucket.
606 *
607 * Complete the pending splits and remove the tuples from old bucket,
608 * if there are any left over from the previous split.
609 *
610 * The caller must hold a pin, but no lock, on the metapage buffer.
611 * The buffer is returned in the same state.
612 */
613void
615{
616 HashMetaPage metap;
618 Bucket new_bucket;
624 Page opage;
625 Page npage;
631 bool metap_update_masks = false;
632 bool metap_update_splitpoint = false;
634
636
637 /*
638 * Write-lock the meta page. It used to be necessary to acquire a
639 * heavyweight lock to begin a split, but that is no longer required.
640 */
642
645
646 /*
647 * Check to see if split is still needed; someone else might have already
648 * done one while we waited for the lock.
649 *
650 * Make sure this stays in sync with _hash_doinsert()
651 */
652 if (metap->hashm_ntuples <=
653 (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
654 goto fail;
655
656 /*
657 * Can't split anymore if maxbucket has reached its maximum possible
658 * value.
659 *
660 * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
661 * the calculation maxbucket+1 mustn't overflow). Currently we restrict
662 * to half that to prevent failure of pg_ceil_log2_32() and insufficient
663 * space in hashm_spares[]. It's moot anyway because an index with 2^32
664 * buckets would certainly overflow BlockNumber and hence
665 * _hash_alloc_buckets() would fail, but if we supported buckets smaller
666 * than a disk block then this would be an independent constraint.
667 *
668 * If you change this, see also the maximum initial number of buckets in
669 * _hash_init().
670 */
671 if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
672 goto fail;
673
674 /*
675 * Determine which bucket is to be split, and attempt to take cleanup lock
676 * on the old bucket. If we can't get the lock, give up.
677 *
678 * The cleanup lock protects us not only against other backends, but
679 * against our own backend as well.
680 *
681 * The cleanup lock is mainly to protect the split from concurrent
682 * inserts. See src/backend/access/hash/README, Lock Definitions for
683 * further details. Due to this locking restriction, if there is any
684 * pending scan, the split will give up which is not good, but harmless.
685 */
686 new_bucket = metap->hashm_maxbucket + 1;
687
688 old_bucket = (new_bucket & metap->hashm_lowmask);
689
691
693 if (!buf_oblkno)
694 goto fail;
695
698
699 /*
700 * We want to finish the split from a bucket as there is no apparent
701 * benefit by not doing so and it will make the code complicated to finish
702 * the split that involves multiple buckets considering the case where new
703 * split also fails. We don't need to consider the new bucket for
704 * completing the split here as it is not possible that a re-split of new
705 * bucket starts when there is still a pending split from old bucket.
706 */
708 {
709 /*
710 * Copy bucket mapping info now; refer the comment in code below where
711 * we copy this information before calling _hash_splitbucket to see
712 * why this is okay.
713 */
714 maxbucket = metap->hashm_maxbucket;
715 highmask = metap->hashm_highmask;
716 lowmask = metap->hashm_lowmask;
717
718 /*
719 * Release the lock on metapage and old_bucket, before completing the
720 * split.
721 */
724
727
728 /* release the pin on old buffer and retry for expand. */
730
731 goto restart_expand;
732 }
733
734 /*
735 * Clean the tuples remained from the previous split. This operation
736 * requires cleanup lock and we already have one on the old bucket, so
737 * let's do it. We also don't want to allow further splits from the bucket
738 * till the garbage of previous split is cleaned. This has two
739 * advantages; first, it helps in avoiding the bloat due to garbage and
740 * second is, during cleanup of bucket, we are always sure that the
741 * garbage tuples belong to most recently split bucket. On the contrary,
742 * if we allow cleanup of bucket after meta page is updated to indicate
743 * the new split and before the actual split, the cleanup operation won't
744 * be able to decide whether the tuple has been moved to the newly created
745 * bucket and ended up deleting such tuples.
746 */
748 {
749 /*
750 * Copy bucket mapping info now; refer to the comment in code below
751 * where we copy this information before calling _hash_splitbucket to
752 * see why this is okay.
753 */
754 maxbucket = metap->hashm_maxbucket;
755 highmask = metap->hashm_highmask;
756 lowmask = metap->hashm_lowmask;
757
758 /* Release the metapage lock. */
760
763 NULL, NULL);
764
766
767 goto restart_expand;
768 }
769
770 /*
771 * There shouldn't be any active scan on new bucket.
772 *
773 * Note: it is safe to compute the new bucket's blkno here, even though we
774 * may still need to update the BUCKET_TO_BLKNO mapping. This is because
775 * the current value of hashm_spares[hashm_ovflpoint] correctly shows
776 * where we are going to put a new splitpoint's worth of buckets.
777 */
778 start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
779
780 /*
781 * If the split point is increasing we need to allocate a new batch of
782 * bucket pages.
783 */
784 spare_ndx = _hash_spareindex(new_bucket + 1);
785 if (spare_ndx > metap->hashm_ovflpoint)
786 {
788
789 Assert(spare_ndx == metap->hashm_ovflpoint + 1);
790
791 /*
792 * We treat allocation of buckets as a separate WAL-logged action.
793 * Even if we fail after this operation, won't leak bucket pages;
794 * rather, the next split will consume this space. In any case, even
795 * without failure we don't use all the space in one split operation.
796 */
799 {
800 /* can't split due to BlockNumber overflow */
802 goto fail;
803 }
804 }
805
806 /*
807 * Physically allocate the new bucket's primary page. We want to do this
808 * before changing the metapage's mapping info, in case we can't get the
809 * disk space.
810 *
811 * XXX It doesn't make sense to call _hash_getnewbuf first, zeroing the
812 * buffer, and then only afterwards check whether we have a cleanup lock.
813 * However, since no scan can be accessing the buffer yet, any concurrent
814 * accesses will just be from processes like the bgwriter or checkpointer
815 * which don't care about its contents, so it doesn't really matter.
816 */
819 {
822 goto fail;
823 }
824
825 /*
826 * Since we are scribbling on the pages in the shared buffers, establish a
827 * critical section. Any failure in this next code leaves us with a big
828 * problem: the metapage is effectively corrupt but could get written back
829 * to disk.
830 */
832
833 /*
834 * Okay to proceed with split. Update the metapage bucket mapping info.
835 */
836 metap->hashm_maxbucket = new_bucket;
837
838 if (new_bucket > metap->hashm_highmask)
839 {
840 /* Starting a new doubling */
841 metap->hashm_lowmask = metap->hashm_highmask;
842 metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
843 metap_update_masks = true;
844 }
845
846 /*
847 * If the split point is increasing we need to adjust the hashm_spares[]
848 * array and hashm_ovflpoint so that future overflow pages will be created
849 * beyond this new batch of bucket pages.
850 */
851 if (spare_ndx > metap->hashm_ovflpoint)
852 {
853 metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
854 metap->hashm_ovflpoint = spare_ndx;
856 }
857
859
860 /*
861 * Copy bucket mapping info now; this saves re-accessing the meta page
862 * inside _hash_splitbucket's inner loop. Note that once we drop the
863 * split lock, other splits could begin, so these values might be out of
864 * date before _hash_splitbucket finishes. That's okay, since all it
865 * needs is to tell which of these two buckets to map hashkeys into.
866 */
867 maxbucket = metap->hashm_maxbucket;
868 highmask = metap->hashm_highmask;
869 lowmask = metap->hashm_lowmask;
870
873
874 /*
875 * Mark the old bucket to indicate that split is in progress. (At
876 * operation end, we will clear the split-in-progress flag.) Also, for a
877 * primary bucket page, hasho_prevblkno stores the number of buckets that
878 * existed as of the last split, so we must update that value here.
879 */
880 oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
881 oopaque->hasho_prevblkno = maxbucket;
882
884
885 npage = BufferGetPage(buf_nblkno);
886
887 /*
888 * initialize the new bucket's primary page and mark it to indicate that
889 * split is in progress.
890 */
891 nopaque = HashPageGetOpaque(npage);
892 nopaque->hasho_prevblkno = maxbucket;
893 nopaque->hasho_nextblkno = InvalidBlockNumber;
894 nopaque->hasho_bucket = new_bucket;
896 nopaque->hasho_page_id = HASHO_PAGE_ID;
897
899
900 /* XLOG stuff */
901 if (RelationNeedsWAL(rel))
902 {
904
906 xlrec.old_bucket_flag = oopaque->hasho_flag;
907 xlrec.new_bucket_flag = nopaque->hasho_flag;
908 xlrec.flags = 0;
909
911
915
917 {
919 XLogRegisterBufData(2, &metap->hashm_lowmask, sizeof(uint32));
920 XLogRegisterBufData(2, &metap->hashm_highmask, sizeof(uint32));
921 }
922
924 {
927 sizeof(uint32));
929 &metap->hashm_spares[metap->hashm_ovflpoint],
930 sizeof(uint32));
931 }
932
934
936 }
937 else
938 recptr = XLogGetFakeLSN(rel);
939
943
945
946 /* drop lock, but keep pin */
948
949 /* Relocate records to the new bucket */
951 old_bucket, new_bucket,
954
955 /* all done, now release the pins on primary buckets. */
958
959 return;
960
961 /* Here if decide not to split or fail to acquire old bucket lock */
962fail:
963
964 /* We didn't write the metapage, so just drop lock */
966}
967
968
969/*
970 * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
971 *
972 * This does not need to initialize the new bucket pages; we'll do that as
973 * each one is used by _hash_expandtable(). But we have to extend the logical
974 * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
975 * sync with ours, so that we don't get complaints from smgr.
976 *
977 * We do this by writing a page of zeroes at the end of the splitpoint range.
978 * We expect that the filesystem will ensure that the intervening pages read
979 * as zeroes too. On many filesystems this "hole" will not be allocated
980 * immediately, which means that the index file may end up more fragmented
981 * than if we forced it all to be allocated now; but since we don't scan
982 * hash indexes sequentially anyway, that probably doesn't matter.
983 *
984 * XXX It's annoying that this code is executed with the metapage lock held.
985 * We need to interlock against _hash_addovflpage() adding a new overflow page
986 * concurrently, but it'd likely be better to use LockRelationForExtension
987 * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
988 * so it may not be worth worrying about.
989 *
990 * Returns true if successful, or false if allocation failed due to
991 * BlockNumber overflow.
992 */
993static bool
995{
998 Page page;
1000
1001 lastblock = firstblock + nblocks - 1;
1002
1003 /*
1004 * Check for overflow in block number calculation; if so, we cannot extend
1005 * the index anymore.
1006 */
1007 if (lastblock < firstblock || lastblock == InvalidBlockNumber)
1008 return false;
1009
1010 page = (Page) zerobuf.data;
1011
1012 /*
1013 * Initialize the page. Just zeroing the page won't work; see
1014 * _hash_freeovflpage for similar usage. We take care to make the special
1015 * space valid for the benefit of tools such as pageinspect.
1016 */
1017 _hash_pageinit(page, BLCKSZ);
1018
1020
1021 ovflopaque->hasho_prevblkno = InvalidBlockNumber;
1022 ovflopaque->hasho_nextblkno = InvalidBlockNumber;
1023 ovflopaque->hasho_bucket = InvalidBucket;
1024 ovflopaque->hasho_flag = LH_UNUSED_PAGE;
1025 ovflopaque->hasho_page_id = HASHO_PAGE_ID;
1026
1027 if (RelationNeedsWAL(rel))
1028 log_newpage(&rel->rd_locator,
1030 lastblock,
1031 zerobuf.data,
1032 true);
1033
1036 false);
1037
1038 return true;
1039}
1040
1041
1042/*
1043 * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
1044 *
1045 * This routine is used to partition the tuples between old and new bucket and
1046 * is used to finish the incomplete split operations. To finish the previously
1047 * interrupted split operation, the caller needs to fill htab. If htab is set,
1048 * then we skip the movement of tuples that exists in htab, otherwise NULL
1049 * value of htab indicates movement of all the tuples that belong to the new
1050 * bucket.
1051 *
1052 * We are splitting a bucket that consists of a base bucket page and zero
1053 * or more overflow (bucket chain) pages. We must relocate tuples that
1054 * belong in the new bucket.
1055 *
1056 * The caller must hold cleanup locks on both buckets to ensure that
1057 * no one else is trying to access them (see README).
1058 *
1059 * The caller must hold a pin, but no lock, on the metapage buffer.
1060 * The buffer is returned in the same state. (The metapage is only
1061 * touched if it becomes necessary to add or remove overflow pages.)
1062 *
1063 * Split needs to retain pin on primary bucket pages of both old and new
1064 * buckets till end of operation. This is to prevent vacuum from starting
1065 * while a split is in progress.
1066 *
1067 * In addition, the caller must have created the new bucket's base page,
1068 * which is passed in buffer nbuf, pinned and write-locked. The lock will be
1069 * released here and pin must be released by the caller. (The API is set up
1070 * this way because we must do _hash_getnewbuf() before releasing the metapage
1071 * write lock. So instead of passing the new bucket's start block number, we
1072 * pass an actual buffer.)
1073 */
1074static void
1079 Buffer obuf,
1080 Buffer nbuf,
1081 HTAB *htab,
1085{
1088 Page opage;
1089 Page npage;
1094 Size all_tups_size = 0;
1095 int i;
1096 uint16 nitups = 0;
1098
1099 bucket_obuf = obuf;
1102
1103 bucket_nbuf = nbuf;
1104 npage = BufferGetPage(nbuf);
1105 nopaque = HashPageGetOpaque(npage);
1106
1107 /* Copy the predicate locks from old bucket to new bucket. */
1111
1112 /*
1113 * Partition the tuples in the old bucket between the old bucket and the
1114 * new bucket, advancing along the old bucket's overflow bucket chain and
1115 * adding overflow pages to the new bucket as needed. Outer loop iterates
1116 * once per page in old bucket.
1117 */
1118 for (;;)
1119 {
1123
1124 /* Scan each tuple in old page */
1129 {
1130 IndexTuple itup;
1131 Size itemsz;
1132 Bucket bucket;
1133 bool found = false;
1134
1135 /* skip dead tuples */
1137 continue;
1138
1139 /*
1140 * Before inserting a tuple, probe the hash table containing TIDs
1141 * of tuples belonging to new bucket, if we find a match, then
1142 * skip that tuple, else fetch the item's hash key (conveniently
1143 * stored in the item) and determine which bucket it now belongs
1144 * in.
1145 */
1146 itup = (IndexTuple) PageGetItem(opage,
1148
1149 if (htab)
1150 (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
1151
1152 if (found)
1153 continue;
1154
1157
1158 if (bucket == nbucket)
1159 {
1161
1162 /*
1163 * make a copy of index tuple as we have to scribble on it.
1164 */
1165 new_itup = CopyIndexTuple(itup);
1166
1167 /*
1168 * mark the index tuple as moved by split, such tuples are
1169 * skipped by scan if there is split in progress for a bucket.
1170 */
1172
1173 /*
1174 * insert the tuple into the new bucket. if it doesn't fit on
1175 * the current page in the new bucket, we must allocate a new
1176 * overflow page and place the tuple on that page instead.
1177 */
1178 itemsz = IndexTupleSize(new_itup);
1179 itemsz = MAXALIGN(itemsz);
1180
1181 if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
1182 {
1183 /*
1184 * Change the shared buffer state in critical section,
1185 * otherwise any error could make it unrecoverable.
1186 */
1188
1191 /* log the split operation before releasing the lock */
1192 log_split_page(rel, nbuf);
1193
1195
1196 /* drop lock, but keep pin */
1198
1199 /* be tidy */
1200 for (i = 0; i < nitups; i++)
1201 pfree(itups[i]);
1202 nitups = 0;
1203 all_tups_size = 0;
1204
1205 /* chain to a new overflow page */
1207 npage = BufferGetPage(nbuf);
1208 nopaque = HashPageGetOpaque(npage);
1209 }
1210
1211 itups[nitups++] = new_itup;
1212 all_tups_size += itemsz;
1213 }
1214 else
1215 {
1216 /*
1217 * the tuple stays on this page, so nothing to do.
1218 */
1219 Assert(bucket == obucket);
1220 }
1221 }
1222
1223 oblkno = oopaque->hasho_nextblkno;
1224
1225 /* retain the pin on the old primary bucket */
1226 if (obuf == bucket_obuf)
1228 else
1229 _hash_relbuf(rel, obuf);
1230
1231 /* Exit loop if no more overflow pages in old bucket */
1233 {
1234 /*
1235 * Change the shared buffer state in critical section, otherwise
1236 * any error could make it unrecoverable.
1237 */
1239
1242 /* log the split operation before releasing the lock */
1243 log_split_page(rel, nbuf);
1244
1246
1247 if (nbuf == bucket_nbuf)
1249 else
1250 _hash_relbuf(rel, nbuf);
1251
1252 /* be tidy */
1253 for (i = 0; i < nitups; i++)
1254 pfree(itups[i]);
1255 break;
1256 }
1257
1258 /* Else, advance to next old page */
1262 }
1263
1264 /*
1265 * We're at the end of the old bucket chain, so we're done partitioning
1266 * the tuples. Mark the old and new buckets to indicate split is
1267 * finished.
1268 *
1269 * To avoid deadlocks due to locking order of buckets, first lock the old
1270 * bucket and then the new bucket.
1271 */
1275
1277 npage = BufferGetPage(bucket_nbuf);
1278 nopaque = HashPageGetOpaque(npage);
1279
1281
1282 oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1283 nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
1284
1285 /*
1286 * After the split is finished, mark the old bucket to indicate that it
1287 * contains deletable tuples. We will clear split-cleanup flag after
1288 * deleting such tuples either at the end of split or at the next split
1289 * from old bucket or at the time of vacuum.
1290 */
1292
1293 /*
1294 * now write the buffers, here we don't release the locks as caller is
1295 * responsible to release locks.
1296 */
1299
1300 if (RelationNeedsWAL(rel))
1301 {
1303
1304 xlrec.old_bucket_flag = oopaque->hasho_flag;
1305 xlrec.new_bucket_flag = nopaque->hasho_flag;
1306
1308
1310
1313
1315 }
1316 else
1317 recptr = XLogGetFakeLSN(rel);
1318
1321
1323
1324 /*
1325 * If possible, clean up the old bucket. We might not be able to do this
1326 * if someone else has a pin on it, but if not then we can go ahead. This
1327 * isn't absolutely necessary, but it reduces bloat; if we don't do it
1328 * now, VACUUM will do it eventually, but maybe not until new overflow
1329 * pages have been allocated. Note that there's no need to clean up the
1330 * new bucket.
1331 */
1333 {
1338 NULL, NULL);
1339 }
1340 else
1341 {
1344 }
1345}
1346
1347/*
1348 * _hash_finish_split() -- Finish the previously interrupted split operation
1349 *
1350 * To complete the split operation, we form the hash table of TIDs in new
1351 * bucket which is then used by split operation to skip tuples that are
1352 * already moved before the split operation was previously interrupted.
1353 *
1354 * The caller must hold a pin, but no lock, on the metapage and old bucket's
1355 * primary page buffer. The buffers are returned in the same state. (The
1356 * metapage is only touched if it becomes necessary to add or remove overflow
1357 * pages.)
1358 */
1359void
1362{
1364 HTAB *tidhtab;
1366 Buffer nbuf;
1367 Page npage;
1372 bool found;
1373
1374 /* Initialize hash tables used to track TIDs */
1375 hash_ctl.keysize = sizeof(ItemPointerData);
1376 hash_ctl.entrysize = sizeof(ItemPointerData);
1378
1379 tidhtab =
1380 hash_create("bucket ctids",
1381 256, /* arbitrary initial size */
1382 &hash_ctl,
1384
1386
1387 /*
1388 * Scan the new bucket and build hash table of TIDs
1389 */
1390 for (;;)
1391 {
1394
1397
1398 /* remember the primary bucket buffer to acquire cleanup lock on it. */
1399 if (nblkno == bucket_nblkno)
1400 bucket_nbuf = nbuf;
1401
1402 npage = BufferGetPage(nbuf);
1404
1405 /* Scan each tuple in new page */
1410 {
1411 IndexTuple itup;
1412
1413 /* Fetch the item's TID and insert it in hash table. */
1414 itup = (IndexTuple) PageGetItem(npage,
1415 PageGetItemId(npage, noffnum));
1416
1417 (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1418
1419 Assert(!found);
1420 }
1421
1422 nblkno = npageopaque->hasho_nextblkno;
1423
1424 /*
1425 * release our write lock without modifying buffer and ensure to
1426 * retain the pin on primary bucket.
1427 */
1428 if (nbuf == bucket_nbuf)
1430 else
1431 _hash_relbuf(rel, nbuf);
1432
1433 /* Exit loop if no more overflow pages in new bucket */
1435 break;
1436 }
1437
1438 /*
1439 * Conditionally get the cleanup lock on old and new buckets to perform
1440 * the split operation. If we don't get the cleanup locks, silently give
1441 * up and next insertion on old bucket will try again to complete the
1442 * split.
1443 */
1445 {
1447 return;
1448 }
1450 {
1453 return;
1454 }
1455
1456 npage = BufferGetPage(bucket_nbuf);
1458 nbucket = npageopaque->hasho_bucket;
1459
1463
1466}
1467
1468/*
1469 * log_split_page() -- Log the split operation
1470 *
1471 * We log the split operation when the new page in new bucket gets full,
1472 * so we log the entire page.
1473 *
1474 * 'buf' must be locked by the caller which is also responsible for unlocking
1475 * it.
1476 */
1477static void
1493
1494/*
1495 * _hash_getcachedmetap() -- Returns cached metapage data.
1496 *
1497 * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
1498 * the metapage. If not set, we'll set it before returning if we have to
1499 * refresh the cache, and return with a pin but no lock on it; caller is
1500 * responsible for releasing the pin.
1501 *
1502 * We refresh the cache if it's not initialized yet or force_refresh is true.
1503 */
1506{
1507 Page page;
1508
1509 Assert(metabuf);
1510 if (force_refresh || rel->rd_amcache == NULL)
1511 {
1512 char *cache = NULL;
1513
1514 /*
1515 * It's important that we don't set rd_amcache to an invalid value.
1516 * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
1517 * install a pointer to the newly-allocated storage in the actual
1518 * relcache entry until both have succeeded.
1519 */
1520 if (rel->rd_amcache == NULL)
1521 cache = MemoryContextAlloc(rel->rd_indexcxt,
1522 sizeof(HashMetaPageData));
1523
1524 /* Read the metapage. */
1525 if (BufferIsValid(*metabuf))
1527 else
1529 LH_META_PAGE);
1530 page = BufferGetPage(*metabuf);
1531
1532 /* Populate the cache. */
1533 if (rel->rd_amcache == NULL)
1534 rel->rd_amcache = cache;
1535 memcpy(rel->rd_amcache, HashPageGetMeta(page),
1536 sizeof(HashMetaPageData));
1537
1538 /* Release metapage lock, but keep the pin. */
1540 }
1541
1542 return (HashMetaPage) rel->rd_amcache;
1543}
1544
1545/*
1546 * _hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given
1547 * hashkey.
1548 *
1549 * Bucket pages do not move or get removed once they are allocated. This give
1550 * us an opportunity to use the previously saved metapage contents to reach
1551 * the target bucket buffer, instead of reading from the metapage every time.
1552 * This saves one buffer access every time we want to reach the target bucket
1553 * buffer, which is very helpful savings in bufmgr traffic and contention.
1554 *
1555 * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
1556 * bucket buffer has to be locked for reading or writing.
1557 *
1558 * The out parameter cachedmetap is set with metapage contents used for
1559 * hashkey to bucket buffer mapping. Some callers need this info to reach the
1560 * old bucket in case of bucket split, see _hash_doinsert().
1561 */
1562Buffer
1565{
1566 HashMetaPage metap;
1567 Buffer buf;
1569 Page page;
1570 Bucket bucket;
1571 BlockNumber blkno;
1572 HashPageOpaque opaque;
1573
1574 /* We read from target bucket buffer, hence locking is must. */
1576
1577 metap = _hash_getcachedmetap(rel, &metabuf, false);
1578 Assert(metap != NULL);
1579
1580 /*
1581 * Loop until we get a lock on the correct target bucket.
1582 */
1583 for (;;)
1584 {
1585 /*
1586 * Compute the target bucket number, and convert to block number.
1587 */
1589 metap->hashm_maxbucket,
1590 metap->hashm_highmask,
1591 metap->hashm_lowmask);
1592
1593 blkno = BUCKET_TO_BLKNO(metap, bucket);
1594
1595 /* Fetch the primary bucket page for the bucket */
1596 buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1597 page = BufferGetPage(buf);
1598 opaque = HashPageGetOpaque(page);
1599 Assert(opaque->hasho_bucket == bucket);
1601
1602 /*
1603 * If this bucket hasn't been split, we're done.
1604 */
1605 if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
1606 break;
1607
1608 /* Drop lock on this buffer, update cached metapage, and retry. */
1609 _hash_relbuf(rel, buf);
1610 metap = _hash_getcachedmetap(rel, &metabuf, true);
1611 Assert(metap != NULL);
1612 }
1613
1615 _hash_dropbuf(rel, metabuf);
1616
1617 if (cachedmetap)
1618 *cachedmetap = metap;
1619
1620 return buf;
1621}
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition block.h:71
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4446
bool IsBufferCleanupOK(Buffer buffer)
Definition bufmgr.c:6901
Buffer ExtendBufferedRel(BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
Definition bufmgr.c:979
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition bufmgr.c:4645
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5586
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5603
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3147
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:926
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition bufmgr.c:879
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition bufmgr.c:6843
#define P_NEW
Definition bufmgr.h:200
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:468
@ BUFFER_LOCK_SHARE
Definition bufmgr.h:212
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:222
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:207
static Size BufferGetPageSize(Buffer buffer)
Definition bufmgr.h:457
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:334
@ EB_SKIP_EXTENSION_LOCK
Definition bufmgr.h:75
@ EB_LOCK_FIRST
Definition bufmgr.h:87
@ RBM_ZERO_AND_LOCK
Definition bufmgr.h:47
@ RBM_NORMAL
Definition bufmgr.h:46
#define BMR_REL(p_rel)
Definition bufmgr.h:114
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:419
Size PageGetFreeSpaceForMultipleTuples(const PageData *page, int ntups)
Definition bufpage.c:943
void PageSetChecksum(Page page, BlockNumber blkno)
Definition bufpage.c:1518
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
PageHeaderData * PageHeader
Definition bufpage.h:199
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:268
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:378
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:416
PageData * Page
Definition bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:396
#define MAXALIGN(LEN)
Definition c.h:896
#define Assert(condition)
Definition c.h:943
regproc RegProcedure
Definition c.h:734
int32_t int32
Definition c.h:620
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
#define MemSet(start, val, len)
Definition c.h:1107
size_t Size
Definition c.h:689
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:889
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:360
void hash_destroy(HTAB *hashp)
Definition dynahash.c:802
int errcode(int sqlerrcode)
Definition elog.c:874
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, uint32 maxbucket, uint32 highmask, uint32 lowmask, double *tuples_removed, double *num_index_tuples, bool split_cleanup, IndexBulkDeleteCallback callback, void *callback_state)
Definition hash.c:767
#define HASH_NOLOCK
Definition hash.h:341
#define HashPageGetOpaque(page)
Definition hash.h:88
#define LH_BUCKET_PAGE
Definition hash.h:55
#define HASH_MAX_BITMAPS
Definition hash.h:230
#define HASHSTANDARD_PROC
Definition hash.h:355
#define BMPG_MASK(metap)
Definition hash.h:314
#define HASH_VERSION
Definition hash.h:201
#define HASH_MAX_SPLITPOINTS
Definition hash.h:239
#define BYTE_TO_BIT
Definition hash.h:301
#define HASH_WRITE
Definition hash.h:340
#define LH_UNUSED_PAGE
Definition hash.h:53
#define H_BUCKET_BEING_SPLIT(opaque)
Definition hash.h:91
#define LH_META_PAGE
Definition hash.h:57
#define HASHO_PAGE_ID
Definition hash.h:101
#define HashPageGetMeta(page)
Definition hash.h:323
#define LH_BUCKET_BEING_POPULATED
Definition hash.h:58
#define HASH_READ
Definition hash.h:339
#define BUCKET_TO_BLKNO(metap, B)
Definition hash.h:39
#define HashGetMaxBitmapSize(page)
Definition hash.h:319
#define INDEX_MOVED_BY_SPLIT_MASK
Definition hash.h:293
#define HASH_METAPAGE
Definition hash.h:198
#define H_NEEDS_SPLIT_CLEANUP(opaque)
Definition hash.h:90
uint32 Bucket
Definition hash.h:35
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition hash.h:60
#define LH_BUCKET_BEING_SPLIT
Definition hash.h:59
#define HashGetTargetPageUsage(relation)
Definition hash.h:281
#define BMPG_SHIFT(metap)
Definition hash.h:313
#define HASH_MAGIC
Definition hash.h:200
#define LH_OVERFLOW_PAGE
Definition hash.h:54
#define InvalidBucket
Definition hash.h:37
#define SizeOfHashInitBitmapPage
Definition hash_xlog.h:234
#define XLOG_HASH_INIT_BITMAP_PAGE
Definition hash_xlog.h:28
#define SizeOfHashSplitComplete
Definition hash_xlog.h:117
#define XLOG_HASH_SPLIT_ALLOCATE_PAGE
Definition hash_xlog.h:31
#define XLOG_HASH_SPLIT_PAGE
Definition hash_xlog.h:32
#define XLOG_HASH_INIT_META_PAGE
Definition hash_xlog.h:27
#define XLOG_HASH_SPLIT_COMPLETE
Definition hash_xlog.h:33
#define SizeOfHashSplitAllocPage
Definition hash_xlog.h:100
#define SizeOfHashInitMetaPage
Definition hash_xlog.h:218
#define XLH_SPLIT_META_UPDATE_SPLITPOINT
Definition hash_xlog.h:46
#define XLH_SPLIT_META_UPDATE_MASKS
Definition hash_xlog.h:45
void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups, OffsetNumber *itup_offsets, uint16 nitups)
Definition hashinsert.c:331
void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
Definition hashovfl.c:778
Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
Definition hashovfl.c:112
Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno)
Definition hashpage.c:135
HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Definition hashpage.c:1505
void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage)
Definition hashpage.c:157
void _hash_relbuf(Relation rel, Buffer buf)
Definition hashpage.c:266
Buffer _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
Definition hashpage.c:96
void _hash_pageinit(Page page, Size size)
Definition hashpage.c:596
static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition hashpage.c:1075
uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
Definition hashpage.c:327
void _hash_dropbuf(Relation rel, Buffer buf)
Definition hashpage.c:277
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition hashpage.c:289
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition hashpage.c:70
void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition hashpage.c:1360
Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access, HashMetaPage *cachedmetap)
Definition hashpage.c:1563
void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage)
Definition hashpage.c:498
static void log_split_page(Relation rel, Buffer buf)
Definition hashpage.c:1478
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
Definition hashpage.c:994
Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy)
Definition hashpage.c:239
Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
Definition hashpage.c:198
void _hash_expandtable(Relation rel, Buffer metabuf)
Definition hashpage.c:614
uint32 _hash_spareindex(uint32 num_bucket)
Definition hashutil.c:142
BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket)
Definition hashutil.c:461
uint32 _hash_get_totalbuckets(uint32 splitpoint_phase)
Definition hashutil.c:174
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition hashutil.c:291
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition hashutil.c:125
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition hashutil.c:210
@ HASH_FIND
Definition hsearch.h:108
@ HASH_ENTER
Definition hsearch.h:109
#define HASH_CONTEXT
Definition hsearch.h:97
#define HASH_ELEM
Definition hsearch.h:90
#define HASH_BLOBS
Definition hsearch.h:92
RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum)
Definition indexam.c:851
IndexTuple CopyIndexTuple(IndexTuple source)
Definition indextuple.c:479
int i
Definition isn.c:77
#define ItemIdIsDead(itemId)
Definition itemid.h:113
IndexTupleData * IndexTuple
Definition itup.h:53
static Size IndexTupleSize(const IndexTupleData *itup)
Definition itup.h:71
#define MaxIndexTuplesPerPage
Definition itup.h:181
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:125
#define END_CRIT_SECTION()
Definition miscadmin.h:154
static char * errmsg
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
static uint32 pg_nextpower2_32(uint32 num)
static int pg_leftmost_one_pos32(uint32 word)
Definition pg_bitutils.h:41
static char buf[DEFAULT_XLOG_SEG_SIZE]
void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition predicate.c:3074
static int fb(int x)
short access
static SMgrRelation RelationGetSmgr(Relation rel)
Definition rel.h:578
#define RelationGetRelationName(relation)
Definition rel.h:550
#define RelationNeedsWAL(relation)
Definition rel.h:639
ForkNumber
Definition relpath.h:56
@ MAIN_FORKNUM
Definition relpath.h:58
@ INIT_FORKNUM
Definition relpath.h:61
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition smgr.c:620
uint32 hashm_version
Definition hash.h:247
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]
Definition hash.h:264
uint32 hashm_lowmask
Definition hash.h:256
uint32 hashm_maxbucket
Definition hash.h:254
RegProcedure hashm_procid
Definition hash.h:261
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition hash.h:262
double hashm_ntuples
Definition hash.h:248
uint32 hashm_firstfree
Definition hash.h:259
uint16 hashm_bmsize
Definition hash.h:251
uint16 hashm_bsize
Definition hash.h:250
uint32 hashm_ovflpoint
Definition hash.h:257
uint32 hashm_highmask
Definition hash.h:255
uint32 hashm_magic
Definition hash.h:246
uint16 hashm_bmshift
Definition hash.h:253
uint32 hashm_nmaps
Definition hash.h:260
uint16 hashm_ffactor
Definition hash.h:249
BlockNumber hasho_prevblkno
Definition hash.h:79
Bucket hasho_bucket
Definition hash.h:81
ItemPointerData t_tid
Definition itup.h:37
void * rd_amcache
Definition rel.h:229
MemoryContext rd_indexcxt
Definition rel.h:204
RelFileLocator rd_locator
Definition rel.h:57
char * flag(int b)
Definition test-ctype.c:33
uint64 XLogRecPtr
Definition xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:482
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:413
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:372
XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:246
void XLogBeginInsert(void)
Definition xloginsert.c:153
XLogRecPtr XLogGetFakeLSN(Relation rel)
Definition xloginsert.c:562
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define REGBUF_FORCE_IMAGE
Definition xloginsert.h:32
#define REGBUF_WILL_INIT
Definition xloginsert.h:34