PostgreSQL Source Code git master
Loading...
Searching...
No Matches
hashsearch.c File Reference
#include "postgres.h"
#include "access/hash.h"
#include "access/relscan.h"
#include "miscadmin.h"
#include "executor/instrument_node.h"
#include "pgstat.h"
#include "storage/predicate.h"
#include "utils/rel.h"
Include dependency graph for hashsearch.c:

Go to the source code of this file.

Functions

static bool _hash_readpage (IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
 
static int _hash_load_qualified_items (IndexScanDesc scan, Page page, OffsetNumber offnum, ScanDirection dir)
 
static void _hash_saveitem (HashScanOpaque so, int itemIndex, OffsetNumber offnum, IndexTuple itup)
 
static void _hash_readnext (IndexScanDesc scan, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
 
bool _hash_next (IndexScanDesc scan, ScanDirection dir)
 
static void _hash_readprev (IndexScanDesc scan, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
 
bool _hash_first (IndexScanDesc scan, ScanDirection dir)
 

Function Documentation

◆ _hash_first()

bool _hash_first ( IndexScanDesc  scan,
ScanDirection  dir 
)

Definition at line 289 of file hashsearch.c.

290{
291 Relation rel = scan->indexRelation;
293 ScanKey cur;
296 Buffer buf;
297 Page page;
298 HashPageOpaque opaque;
300
302 if (scan->instrument)
303 scan->instrument->nsearches++;
304
305 /*
306 * We do not support hash scans with no index qualification, because we
307 * would have to read the whole index rather than just one bucket. That
308 * creates a whole raft of problems, since we haven't got a practical way
309 * to lock all the buckets against splits or compactions.
310 */
311 if (scan->numberOfKeys < 1)
314 errmsg("hash indexes do not support whole-index scans")));
315
316 /* There may be more than one index qual, but we hash only the first */
317 cur = &scan->keyData[0];
318
319 /* We support only single-column hash indexes */
320 Assert(cur->sk_attno == 1);
321 /* And there's only one operator strategy, too */
322 Assert(cur->sk_strategy == HTEqualStrategyNumber);
323
324 /*
325 * If the constant in the index qual is NULL, assume it cannot match any
326 * items in the index.
327 */
328 if (cur->sk_flags & SK_ISNULL)
329 return false;
330
331 /*
332 * Okay to compute the hash key. We want to do this before acquiring any
333 * locks, in case a user-defined hash function happens to be slow.
334 *
335 * If scankey operator is not a cross-type comparison, we can use the
336 * cached hash function; otherwise gotta look it up in the catalogs.
337 *
338 * We support the convention that sk_subtype == InvalidOid means the
339 * opclass input type; this is a hack to simplify life for ScanKeyInit().
340 */
341 if (cur->sk_subtype == rel->rd_opcintype[0] ||
342 cur->sk_subtype == InvalidOid)
343 hashkey = _hash_datum2hashkey(rel, cur->sk_argument);
344 else
345 hashkey = _hash_datum2hashkey_type(rel, cur->sk_argument,
346 cur->sk_subtype);
347
348 so->hashso_sk_hash = hashkey;
349
352 page = BufferGetPage(buf);
353 opaque = HashPageGetOpaque(page);
354 bucket = opaque->hasho_bucket;
355
356 so->hashso_bucket_buf = buf;
357
358 /*
359 * If a bucket split is in progress, then while scanning the bucket being
360 * populated, we need to skip tuples that were copied from bucket being
361 * split. We also need to maintain a pin on the bucket being split to
362 * ensure that split-cleanup work done by vacuum doesn't remove tuples
363 * from it till this scan is done. We need to maintain a pin on the
364 * bucket being populated to ensure that vacuum doesn't squeeze that
365 * bucket till this scan is complete; otherwise, the ordering of tuples
366 * can't be maintained during forward and backward scans. Here, we have
367 * to be cautious about locking order: first, acquire the lock on bucket
368 * being split; then, release the lock on it but not the pin; then,
369 * acquire a lock on bucket being populated and again re-verify whether
370 * the bucket split is still in progress. Acquiring the lock on bucket
371 * being split first ensures that the vacuum waits for this scan to
372 * finish.
373 */
374 if (H_BUCKET_BEING_POPULATED(opaque))
375 {
378
380
381 /*
382 * release the lock on new bucket and re-acquire it after acquiring
383 * the lock on old bucket.
384 */
386
388
389 /*
390 * remember the split bucket buffer so as to use it later for
391 * scanning.
392 */
393 so->hashso_split_bucket_buf = old_buf;
395
397 page = BufferGetPage(buf);
398 opaque = HashPageGetOpaque(page);
399 Assert(opaque->hasho_bucket == bucket);
400
401 if (H_BUCKET_BEING_POPULATED(opaque))
402 so->hashso_buc_populated = true;
403 else
404 {
405 _hash_dropbuf(rel, so->hashso_split_bucket_buf);
406 so->hashso_split_bucket_buf = InvalidBuffer;
407 }
408 }
409
410 /* If a backwards scan is requested, move to the end of the chain */
412 {
413 /*
414 * Backward scans that start during split needs to start from end of
415 * bucket being split.
416 */
417 while (BlockNumberIsValid(opaque->hasho_nextblkno) ||
418 (so->hashso_buc_populated && !so->hashso_buc_split))
419 _hash_readnext(scan, &buf, &page, &opaque);
420 }
421
422 /* remember which buffer we have pinned, if any */
423 Assert(BufferIsInvalid(so->currPos.buf));
424 so->currPos.buf = buf;
425
426 /* Now find all the tuples satisfying the qualification from a page */
427 if (!_hash_readpage(scan, &buf, dir))
428 return false;
429
430 /* OK, itemIndex says what to return */
431 currItem = &so->currPos.items[so->currPos.itemIndex];
432 scan->xs_heaptid = currItem->heapTid;
433
434 /* if we're here, _hash_readpage found a valid tuples */
435 return true;
436}
uint32 BlockNumber
Definition block.h:31
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition block.h:71
int Buffer
Definition buf.h:23
#define BufferIsInvalid(buffer)
Definition buf.h:31
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4356
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
@ BUFFER_LOCK_SHARE
Definition bufmgr.h:210
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:205
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
PageData * Page
Definition bufpage.h:81
#define Assert(condition)
Definition c.h:873
uint32_t uint32
Definition c.h:546
struct cursor * cur
Definition ecpg.c:29
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define ERROR
Definition elog.h:39
#define ereport(elevel,...)
Definition elog.h:150
#define HashPageGetOpaque(page)
Definition hash.h:88
#define LH_BUCKET_PAGE
Definition hash.h:55
#define HASH_READ
Definition hash.h:339
#define H_BUCKET_BEING_POPULATED(opaque)
Definition hash.h:92
uint32 Bucket
Definition hash.h:35
HashScanOpaqueData * HashScanOpaque
Definition hash.h:192
void _hash_dropbuf(Relation rel, Buffer buf)
Definition hashpage.c:277
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition hashpage.c:70
Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access, HashMetaPage *cachedmetap)
Definition hashpage.c:1559
static void _hash_readnext(IndexScanDesc scan, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
Definition hashsearch.c:132
static bool _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
Definition hashsearch.c:449
uint32 _hash_datum2hashkey(Relation rel, Datum key)
Definition hashutil.c:82
BlockNumber _hash_get_oldblock_from_newbucket(Relation rel, Bucket new_bucket)
Definition hashutil.c:422
uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype)
Definition hashutil.c:102
static char buf[DEFAULT_XLOG_SEG_SIZE]
#define pgstat_count_index_scan(rel)
Definition pgstat.h:705
#define InvalidOid
void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
Definition predicate.c:2597
static int fb(int x)
#define ScanDirectionIsBackward(direction)
Definition sdir.h:50
#define SK_ISNULL
Definition skey.h:115
#define HTEqualStrategyNumber
Definition stratnum.h:41
BlockNumber hasho_nextblkno
Definition hash.h:80
Bucket hasho_bucket
Definition hash.h:81
struct ScanKeyData * keyData
Definition relscan.h:142
struct IndexScanInstrumentation * instrument
Definition relscan.h:160
Relation indexRelation
Definition relscan.h:138
ItemPointerData xs_heaptid
Definition relscan.h:173
struct SnapshotData * xs_snapshot
Definition relscan.h:139

References _hash_datum2hashkey(), _hash_datum2hashkey_type(), _hash_dropbuf(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getbuf(), _hash_readnext(), _hash_readpage(), Assert, BlockNumberIsValid(), buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsInvalid, cur, ereport, errcode(), errmsg(), ERROR, fb(), H_BUCKET_BEING_POPULATED, HASH_READ, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_nextblkno, HashPageGetOpaque, HTEqualStrategyNumber, IndexScanDescData::indexRelation, IndexScanDescData::instrument, InvalidBuffer, InvalidOid, IndexScanDescData::keyData, LH_BUCKET_PAGE, LockBuffer(), IndexScanInstrumentation::nsearches, IndexScanDescData::numberOfKeys, IndexScanDescData::opaque, pgstat_count_index_scan, PredicateLockPage(), RelationData::rd_opcintype, ScanDirectionIsBackward, SK_ISNULL, IndexScanDescData::xs_heaptid, and IndexScanDescData::xs_snapshot.

Referenced by hashgetbitmap(), and hashgettuple().

◆ _hash_load_qualified_items()

static int _hash_load_qualified_items ( IndexScanDesc  scan,
Page  page,
OffsetNumber  offnum,
ScanDirection  dir 
)
static

Definition at line 605 of file hashsearch.c.

607{
609 IndexTuple itup;
610 int itemIndex;
611 OffsetNumber maxoff;
612
613 maxoff = PageGetMaxOffsetNumber(page);
614
615 if (ScanDirectionIsForward(dir))
616 {
617 /* load items[] in ascending order */
618 itemIndex = 0;
619
620 while (offnum <= maxoff)
621 {
622 Assert(offnum >= FirstOffsetNumber);
623 itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
624
625 /*
626 * skip the tuples that are moved by split operation for the scan
627 * that has started when split was in progress. Also, skip the
628 * tuples that are marked as dead.
629 */
630 if ((so->hashso_buc_populated && !so->hashso_buc_split &&
632 (scan->ignore_killed_tuples &&
633 (ItemIdIsDead(PageGetItemId(page, offnum)))))
634 {
635 offnum = OffsetNumberNext(offnum); /* move forward */
636 continue;
637 }
638
639 if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup) &&
640 _hash_checkqual(scan, itup))
641 {
642 /* tuple is qualified, so remember it */
643 _hash_saveitem(so, itemIndex, offnum, itup);
644 itemIndex++;
645 }
646 else
647 {
648 /*
649 * No more matching tuples exist in this page. so, exit while
650 * loop.
651 */
652 break;
653 }
654
655 offnum = OffsetNumberNext(offnum);
656 }
657
658 Assert(itemIndex <= MaxIndexTuplesPerPage);
659 return itemIndex;
660 }
661 else
662 {
663 /* load items[] in descending order */
664 itemIndex = MaxIndexTuplesPerPage;
665
666 while (offnum >= FirstOffsetNumber)
667 {
668 Assert(offnum <= maxoff);
669 itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
670
671 /*
672 * skip the tuples that are moved by split operation for the scan
673 * that has started when split was in progress. Also, skip the
674 * tuples that are marked as dead.
675 */
676 if ((so->hashso_buc_populated && !so->hashso_buc_split &&
678 (scan->ignore_killed_tuples &&
679 (ItemIdIsDead(PageGetItemId(page, offnum)))))
680 {
681 offnum = OffsetNumberPrev(offnum); /* move back */
682 continue;
683 }
684
685 if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup) &&
686 _hash_checkqual(scan, itup))
687 {
688 itemIndex--;
689 /* tuple is qualified, so remember it */
690 _hash_saveitem(so, itemIndex, offnum, itup);
691 }
692 else
693 {
694 /*
695 * No more matching tuples exist in this page. so, exit while
696 * loop.
697 */
698 break;
699 }
700
701 offnum = OffsetNumberPrev(offnum);
702 }
703
704 Assert(itemIndex >= 0);
705 return itemIndex;
706 }
707}
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:243
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:353
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:371
#define INDEX_MOVED_BY_SPLIT_MASK
Definition hash.h:293
static void _hash_saveitem(HashScanOpaque so, int itemIndex, OffsetNumber offnum, IndexTuple itup)
Definition hashsearch.c:711
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition hashutil.c:291
bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup)
Definition hashutil.c:31
#define ItemIdIsDead(itemId)
Definition itemid.h:113
IndexTupleData * IndexTuple
Definition itup.h:53
#define MaxIndexTuplesPerPage
Definition itup.h:181
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition off.h:54
#define ScanDirectionIsForward(direction)
Definition sdir.h:64
bool ignore_killed_tuples
Definition relscan.h:149
unsigned short t_info
Definition itup.h:49

References _hash_checkqual(), _hash_get_indextuple_hashkey(), _hash_saveitem(), Assert, fb(), FirstOffsetNumber, IndexScanDescData::ignore_killed_tuples, INDEX_MOVED_BY_SPLIT_MASK, ItemIdIsDead, MaxIndexTuplesPerPage, OffsetNumberNext, OffsetNumberPrev, IndexScanDescData::opaque, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), ScanDirectionIsForward, and IndexTupleData::t_info.

Referenced by _hash_readpage().

◆ _hash_next()

bool _hash_next ( IndexScanDesc  scan,
ScanDirection  dir 
)

Definition at line 49 of file hashsearch.c.

50{
51 Relation rel = scan->indexRelation;
54 BlockNumber blkno;
55 Buffer buf;
56 bool end_of_scan = false;
57
58 /*
59 * Advance to the next tuple on the current page; or if done, try to read
60 * data from the next or previous page based on the scan direction. Before
61 * moving to the next or previous page make sure that we deal with all the
62 * killed items.
63 */
65 {
66 if (++so->currPos.itemIndex > so->currPos.lastItem)
67 {
68 if (so->numKilled > 0)
69 _hash_kill_items(scan);
70
71 blkno = so->currPos.nextPage;
72 if (BlockNumberIsValid(blkno))
73 {
75 if (!_hash_readpage(scan, &buf, dir))
76 end_of_scan = true;
77 }
78 else
79 end_of_scan = true;
80 }
81 }
82 else
83 {
84 if (--so->currPos.itemIndex < so->currPos.firstItem)
85 {
86 if (so->numKilled > 0)
87 _hash_kill_items(scan);
88
89 blkno = so->currPos.prevPage;
90 if (BlockNumberIsValid(blkno))
91 {
92 buf = _hash_getbuf(rel, blkno, HASH_READ,
94
95 /*
96 * We always maintain the pin on bucket page for whole scan
97 * operation, so releasing the additional pin we have acquired
98 * here.
99 */
100 if (buf == so->hashso_bucket_buf ||
101 buf == so->hashso_split_bucket_buf)
102 _hash_dropbuf(rel, buf);
103
104 if (!_hash_readpage(scan, &buf, dir))
105 end_of_scan = true;
106 }
107 else
108 end_of_scan = true;
109 }
110 }
111
112 if (end_of_scan)
113 {
114 _hash_dropscanbuf(rel, so);
115 HashScanPosInvalidate(so->currPos);
116 return false;
117 }
118
119 /* OK, itemIndex says what to return */
120 currItem = &so->currPos.items[so->currPos.itemIndex];
121 scan->xs_heaptid = currItem->heapTid;
122
123 return true;
124}
#define HashScanPosInvalidate(scanpos)
Definition hash.h:144
#define LH_OVERFLOW_PAGE
Definition hash.h:54
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition hashpage.c:289
void _hash_kill_items(IndexScanDesc scan)
Definition hashutil.c:536

References _hash_dropbuf(), _hash_dropscanbuf(), _hash_getbuf(), _hash_kill_items(), _hash_readpage(), BlockNumberIsValid(), buf, fb(), HASH_READ, HashScanPosInvalidate, IndexScanDescData::indexRelation, LH_BUCKET_PAGE, LH_OVERFLOW_PAGE, IndexScanDescData::opaque, ScanDirectionIsForward, and IndexScanDescData::xs_heaptid.

Referenced by hashgetbitmap(), and hashgettuple().

◆ _hash_readnext()

static void _hash_readnext ( IndexScanDesc  scan,
Buffer bufp,
Page pagep,
HashPageOpaque opaquep 
)
static

Definition at line 132 of file hashsearch.c.

134{
135 BlockNumber blkno;
136 Relation rel = scan->indexRelation;
138 bool block_found = false;
139
140 blkno = (*opaquep)->hasho_nextblkno;
141
142 /*
143 * Retain the pin on primary bucket page till the end of scan. Refer the
144 * comments in _hash_first to know the reason of retaining pin.
145 */
146 if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
148 else
149 _hash_relbuf(rel, *bufp);
150
152 /* check for interrupts while we're not holding any buffer lock */
154 if (BlockNumberIsValid(blkno))
155 {
157 block_found = true;
158 }
159 else if (so->hashso_buc_populated && !so->hashso_buc_split)
160 {
161 /*
162 * end of bucket, scan bucket being split if there was a split in
163 * progress at the start of scan.
164 */
165 *bufp = so->hashso_split_bucket_buf;
166
167 /*
168 * buffer for bucket being split must be valid as we acquire the pin
169 * on it before the start of scan and retain it till end of scan.
170 */
172
175
176 /*
177 * setting hashso_buc_split to true indicates that we are scanning
178 * bucket being split.
179 */
180 so->hashso_buc_split = true;
181
182 block_found = true;
183 }
184
185 if (block_found)
186 {
189 }
190}
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:417
void _hash_relbuf(Relation rel, Buffer buf)
Definition hashpage.c:266
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123

References _hash_getbuf(), _hash_relbuf(), Assert, BlockNumberIsValid(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CHECK_FOR_INTERRUPTS, fb(), HASH_READ, HashPageGetOpaque, IndexScanDescData::indexRelation, InvalidBuffer, LH_OVERFLOW_PAGE, LockBuffer(), IndexScanDescData::opaque, PredicateLockPage(), and IndexScanDescData::xs_snapshot.

Referenced by _hash_first(), _hash_readpage(), and _hash_readprev().

◆ _hash_readpage()

static bool _hash_readpage ( IndexScanDesc  scan,
Buffer bufP,
ScanDirection  dir 
)
static

Definition at line 449 of file hashsearch.c.

450{
451 Relation rel = scan->indexRelation;
453 Buffer buf;
454 Page page;
455 HashPageOpaque opaque;
456 OffsetNumber offnum;
457 uint16 itemIndex;
458
459 buf = *bufP;
462 page = BufferGetPage(buf);
463 opaque = HashPageGetOpaque(page);
464
465 so->currPos.buf = buf;
466 so->currPos.currPage = BufferGetBlockNumber(buf);
467
468 if (ScanDirectionIsForward(dir))
469 {
471
472 for (;;)
473 {
474 /* new page, locate starting position by binary search */
475 offnum = _hash_binsearch(page, so->hashso_sk_hash);
476
477 itemIndex = _hash_load_qualified_items(scan, page, offnum, dir);
478
479 if (itemIndex != 0)
480 break;
481
482 /*
483 * Could not find any matching tuples in the current page, move to
484 * the next page. Before leaving the current page, deal with any
485 * killed items.
486 */
487 if (so->numKilled > 0)
488 _hash_kill_items(scan);
489
490 /*
491 * If this is a primary bucket page, hasho_prevblkno is not a real
492 * block number.
493 */
494 if (so->currPos.buf == so->hashso_bucket_buf ||
495 so->currPos.buf == so->hashso_split_bucket_buf)
497 else
498 prev_blkno = opaque->hasho_prevblkno;
499
500 _hash_readnext(scan, &buf, &page, &opaque);
501 if (BufferIsValid(buf))
502 {
503 so->currPos.buf = buf;
504 so->currPos.currPage = BufferGetBlockNumber(buf);
505 }
506 else
507 {
508 /*
509 * Remember next and previous block numbers for scrollable
510 * cursors to know the start position and return false
511 * indicating that no more matching tuples were found. Also,
512 * don't reset currPage or lsn, because we expect
513 * _hash_kill_items to be called for the old page after this
514 * function returns.
515 */
516 so->currPos.prevPage = prev_blkno;
517 so->currPos.nextPage = InvalidBlockNumber;
518 so->currPos.buf = buf;
519 return false;
520 }
521 }
522
523 so->currPos.firstItem = 0;
524 so->currPos.lastItem = itemIndex - 1;
525 so->currPos.itemIndex = 0;
526 }
527 else
528 {
530
531 for (;;)
532 {
533 /* new page, locate starting position by binary search */
534 offnum = _hash_binsearch_last(page, so->hashso_sk_hash);
535
536 itemIndex = _hash_load_qualified_items(scan, page, offnum, dir);
537
538 if (itemIndex != MaxIndexTuplesPerPage)
539 break;
540
541 /*
542 * Could not find any matching tuples in the current page, move to
543 * the previous page. Before leaving the current page, deal with
544 * any killed items.
545 */
546 if (so->numKilled > 0)
547 _hash_kill_items(scan);
548
549 if (so->currPos.buf == so->hashso_bucket_buf ||
550 so->currPos.buf == so->hashso_split_bucket_buf)
551 next_blkno = opaque->hasho_nextblkno;
552
553 _hash_readprev(scan, &buf, &page, &opaque);
554 if (BufferIsValid(buf))
555 {
556 so->currPos.buf = buf;
557 so->currPos.currPage = BufferGetBlockNumber(buf);
558 }
559 else
560 {
561 /*
562 * Remember next and previous block numbers for scrollable
563 * cursors to know the start position and return false
564 * indicating that no more matching tuples were found. Also,
565 * don't reset currPage or lsn, because we expect
566 * _hash_kill_items to be called for the old page after this
567 * function returns.
568 */
569 so->currPos.prevPage = InvalidBlockNumber;
570 so->currPos.nextPage = next_blkno;
571 so->currPos.buf = buf;
572 return false;
573 }
574 }
575
576 so->currPos.firstItem = itemIndex;
577 so->currPos.lastItem = MaxIndexTuplesPerPage - 1;
578 so->currPos.itemIndex = MaxIndexTuplesPerPage - 1;
579 }
580
581 if (so->currPos.buf == so->hashso_bucket_buf ||
582 so->currPos.buf == so->hashso_split_bucket_buf)
583 {
584 so->currPos.prevPage = InvalidBlockNumber;
585 so->currPos.nextPage = opaque->hasho_nextblkno;
586 LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
587 }
588 else
589 {
590 so->currPos.prevPage = opaque->hasho_prevblkno;
591 so->currPos.nextPage = opaque->hasho_nextblkno;
592 _hash_relbuf(rel, so->currPos.buf);
593 so->currPos.buf = InvalidBuffer;
594 }
595
596 Assert(so->currPos.firstItem <= so->currPos.lastItem);
597 return true;
598}
#define InvalidBlockNumber
Definition block.h:33
uint16_t uint16
Definition c.h:545
static void _hash_readprev(IndexScanDesc scan, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
Definition hashsearch.c:198
static int _hash_load_qualified_items(IndexScanDesc scan, Page page, OffsetNumber offnum, ScanDirection dir)
Definition hashsearch.c:605
OffsetNumber _hash_binsearch(Page page, uint32 hash_value)
Definition hashutil.c:350
OffsetNumber _hash_binsearch_last(Page page, uint32 hash_value)
Definition hashutil.c:388
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition hashutil.c:210
BlockNumber hasho_prevblkno
Definition hash.h:79

References _hash_binsearch(), _hash_binsearch_last(), _hash_checkpage(), _hash_kill_items(), _hash_load_qualified_items(), _hash_readnext(), _hash_readprev(), _hash_relbuf(), Assert, buf, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), fb(), HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_prevblkno, HashPageGetOpaque, IndexScanDescData::indexRelation, InvalidBlockNumber, InvalidBuffer, LH_BUCKET_PAGE, LH_OVERFLOW_PAGE, LockBuffer(), MaxIndexTuplesPerPage, IndexScanDescData::opaque, and ScanDirectionIsForward.

Referenced by _hash_first(), and _hash_next().

◆ _hash_readprev()

static void _hash_readprev ( IndexScanDesc  scan,
Buffer bufp,
Page pagep,
HashPageOpaque opaquep 
)
static

Definition at line 198 of file hashsearch.c.

200{
201 BlockNumber blkno;
202 Relation rel = scan->indexRelation;
204 bool haveprevblk;
205
206 blkno = (*opaquep)->hasho_prevblkno;
207
208 /*
209 * Retain the pin on primary bucket page till the end of scan. Refer the
210 * comments in _hash_first to know the reason of retaining pin.
211 */
212 if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
213 {
215 haveprevblk = false;
216 }
217 else
218 {
219 _hash_relbuf(rel, *bufp);
220 haveprevblk = true;
221 }
222
224 /* check for interrupts while we're not holding any buffer lock */
226
227 if (haveprevblk)
228 {
230 *bufp = _hash_getbuf(rel, blkno, HASH_READ,
234
235 /*
236 * We always maintain the pin on bucket page for whole scan operation,
237 * so releasing the additional pin we have acquired here.
238 */
239 if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
240 _hash_dropbuf(rel, *bufp);
241 }
242 else if (so->hashso_buc_populated && so->hashso_buc_split)
243 {
244 /*
245 * end of bucket, scan bucket being populated if there was a split in
246 * progress at the start of scan.
247 */
248 *bufp = so->hashso_bucket_buf;
249
250 /*
251 * buffer for bucket being populated must be valid as we acquire the
252 * pin on it before the start of scan and retain it till end of scan.
253 */
255
259
260 /* move to the end of bucket chain */
261 while (BlockNumberIsValid((*opaquep)->hasho_nextblkno))
263
264 /*
265 * setting hashso_buc_split to false indicates that we are scanning
266 * bucket being populated.
267 */
268 so->hashso_buc_split = false;
269 }
270}

References _hash_dropbuf(), _hash_getbuf(), _hash_readnext(), _hash_relbuf(), Assert, BlockNumberIsValid(), BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsValid(), CHECK_FOR_INTERRUPTS, fb(), HASH_READ, HashPageGetOpaque, IndexScanDescData::indexRelation, InvalidBuffer, LH_BUCKET_PAGE, LH_OVERFLOW_PAGE, LockBuffer(), and IndexScanDescData::opaque.

Referenced by _hash_readpage().

◆ _hash_saveitem()

static void _hash_saveitem ( HashScanOpaque  so,
int  itemIndex,
OffsetNumber  offnum,
IndexTuple  itup 
)
inlinestatic

Definition at line 711 of file hashsearch.c.

713{
714 HashScanPosItem *currItem = &so->currPos.items[itemIndex];
715
716 currItem->heapTid = itup->t_tid;
717 currItem->indexOffset = offnum;
718}
ItemPointerData heapTid
Definition hash.h:105
ItemPointerData t_tid
Definition itup.h:37

References fb(), HashScanPosItem::heapTid, and IndexTupleData::t_tid.

Referenced by _hash_load_qualified_items().