PostgreSQL Source Code  git master
hashsearch.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * hashsearch.c
4  * search code for postgres hash tables
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/hash/hashsearch.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/hash.h"
18 #include "access/relscan.h"
19 #include "miscadmin.h"
20 #include "pgstat.h"
21 #include "storage/predicate.h"
22 #include "utils/rel.h"
23 
24 static bool _hash_readpage(IndexScanDesc scan, Buffer *bufP,
25  ScanDirection dir);
26 static int _hash_load_qualified_items(IndexScanDesc scan, Page page,
27  OffsetNumber offnum, ScanDirection dir);
28 static inline void _hash_saveitem(HashScanOpaque so, int itemIndex,
29  OffsetNumber offnum, IndexTuple itup);
30 static void _hash_readnext(IndexScanDesc scan, Buffer *bufp,
31  Page *pagep, HashPageOpaque *opaquep);
32 
33 /*
34  * _hash_next() -- Get the next item in a scan.
35  *
36  * On entry, so->currPos describes the current page, which may
37  * be pinned but not locked, and so->currPos.itemIndex identifies
38  * which item was previously returned.
39  *
40  * On successful exit, scan->xs_heaptid is set to the TID of the next
41  * heap tuple. so->currPos is updated as needed.
42  *
43  * On failure exit (no more tuples), we return false with pin
44  * held on bucket page but no pins or locks held on overflow
45  * page.
46  */
47 bool
49 {
50  Relation rel = scan->indexRelation;
52  HashScanPosItem *currItem;
53  BlockNumber blkno;
54  Buffer buf;
55  bool end_of_scan = false;
56 
57  /*
58  * Advance to the next tuple on the current page; or if done, try to read
59  * data from the next or previous page based on the scan direction. Before
60  * moving to the next or previous page make sure that we deal with all the
61  * killed items.
62  */
64  {
65  if (++so->currPos.itemIndex > so->currPos.lastItem)
66  {
67  if (so->numKilled > 0)
68  _hash_kill_items(scan);
69 
70  blkno = so->currPos.nextPage;
71  if (BlockNumberIsValid(blkno))
72  {
74  if (!_hash_readpage(scan, &buf, dir))
75  end_of_scan = true;
76  }
77  else
78  end_of_scan = true;
79  }
80  }
81  else
82  {
83  if (--so->currPos.itemIndex < so->currPos.firstItem)
84  {
85  if (so->numKilled > 0)
86  _hash_kill_items(scan);
87 
88  blkno = so->currPos.prevPage;
89  if (BlockNumberIsValid(blkno))
90  {
91  buf = _hash_getbuf(rel, blkno, HASH_READ,
93 
94  /*
95  * We always maintain the pin on bucket page for whole scan
96  * operation, so releasing the additional pin we have acquired
97  * here.
98  */
99  if (buf == so->hashso_bucket_buf ||
101  _hash_dropbuf(rel, buf);
102 
103  if (!_hash_readpage(scan, &buf, dir))
104  end_of_scan = true;
105  }
106  else
107  end_of_scan = true;
108  }
109  }
110 
111  if (end_of_scan)
112  {
113  _hash_dropscanbuf(rel, so);
115  return false;
116  }
117 
118  /* OK, itemIndex says what to return */
119  currItem = &so->currPos.items[so->currPos.itemIndex];
120  scan->xs_heaptid = currItem->heapTid;
121 
122  return true;
123 }
124 
125 /*
126  * Advance to next page in a bucket, if any. If we are scanning the bucket
127  * being populated during split operation then this function advances to the
128  * bucket being split after the last bucket page of bucket being populated.
129  */
130 static void
132  Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
133 {
134  BlockNumber blkno;
135  Relation rel = scan->indexRelation;
136  HashScanOpaque so = (HashScanOpaque) scan->opaque;
137  bool block_found = false;
138 
139  blkno = (*opaquep)->hasho_nextblkno;
140 
141  /*
142  * Retain the pin on primary bucket page till the end of scan. Refer the
143  * comments in _hash_first to know the reason of retaining pin.
144  */
145  if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
147  else
148  _hash_relbuf(rel, *bufp);
149 
150  *bufp = InvalidBuffer;
151  /* check for interrupts while we're not holding any buffer lock */
153  if (BlockNumberIsValid(blkno))
154  {
155  *bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
156  block_found = true;
157  }
158  else if (so->hashso_buc_populated && !so->hashso_buc_split)
159  {
160  /*
161  * end of bucket, scan bucket being split if there was a split in
162  * progress at the start of scan.
163  */
164  *bufp = so->hashso_split_bucket_buf;
165 
166  /*
167  * buffer for bucket being split must be valid as we acquire the pin
168  * on it before the start of scan and retain it till end of scan.
169  */
170  Assert(BufferIsValid(*bufp));
171 
174 
175  /*
176  * setting hashso_buc_split to true indicates that we are scanning
177  * bucket being split.
178  */
179  so->hashso_buc_split = true;
180 
181  block_found = true;
182  }
183 
184  if (block_found)
185  {
186  *pagep = BufferGetPage(*bufp);
187  *opaquep = HashPageGetOpaque(*pagep);
188  }
189 }
190 
191 /*
192  * Advance to previous page in a bucket, if any. If the current scan has
193  * started during split operation then this function advances to bucket
194  * being populated after the first bucket page of bucket being split.
195  */
196 static void
198  Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
199 {
200  BlockNumber blkno;
201  Relation rel = scan->indexRelation;
202  HashScanOpaque so = (HashScanOpaque) scan->opaque;
203  bool haveprevblk;
204 
205  blkno = (*opaquep)->hasho_prevblkno;
206 
207  /*
208  * Retain the pin on primary bucket page till the end of scan. Refer the
209  * comments in _hash_first to know the reason of retaining pin.
210  */
211  if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
212  {
214  haveprevblk = false;
215  }
216  else
217  {
218  _hash_relbuf(rel, *bufp);
219  haveprevblk = true;
220  }
221 
222  *bufp = InvalidBuffer;
223  /* check for interrupts while we're not holding any buffer lock */
225 
226  if (haveprevblk)
227  {
228  Assert(BlockNumberIsValid(blkno));
229  *bufp = _hash_getbuf(rel, blkno, HASH_READ,
231  *pagep = BufferGetPage(*bufp);
232  *opaquep = HashPageGetOpaque(*pagep);
233 
234  /*
235  * We always maintain the pin on bucket page for whole scan operation,
236  * so releasing the additional pin we have acquired here.
237  */
238  if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
239  _hash_dropbuf(rel, *bufp);
240  }
241  else if (so->hashso_buc_populated && so->hashso_buc_split)
242  {
243  /*
244  * end of bucket, scan bucket being populated if there was a split in
245  * progress at the start of scan.
246  */
247  *bufp = so->hashso_bucket_buf;
248 
249  /*
250  * buffer for bucket being populated must be valid as we acquire the
251  * pin on it before the start of scan and retain it till end of scan.
252  */
253  Assert(BufferIsValid(*bufp));
254 
256  *pagep = BufferGetPage(*bufp);
257  *opaquep = HashPageGetOpaque(*pagep);
258 
259  /* move to the end of bucket chain */
260  while (BlockNumberIsValid((*opaquep)->hasho_nextblkno))
261  _hash_readnext(scan, bufp, pagep, opaquep);
262 
263  /*
264  * setting hashso_buc_split to false indicates that we are scanning
265  * bucket being populated.
266  */
267  so->hashso_buc_split = false;
268  }
269 }
270 
271 /*
272  * _hash_first() -- Find the first item in a scan.
273  *
274  * We find the first item (or, if backward scan, the last item) in the
275  * index that satisfies the qualification associated with the scan
276  * descriptor.
277  *
278  * On successful exit, if the page containing current index tuple is an
279  * overflow page, both pin and lock are released whereas if it is a bucket
280  * page then it is pinned but not locked and data about the matching
281  * tuple(s) on the page has been loaded into so->currPos,
282  * scan->xs_heaptid is set to the heap TID of the current tuple.
283  *
284  * On failure exit (no more tuples), we return false, with pin held on
285  * bucket page but no pins or locks held on overflow page.
286  */
287 bool
289 {
290  Relation rel = scan->indexRelation;
291  HashScanOpaque so = (HashScanOpaque) scan->opaque;
292  ScanKey cur;
293  uint32 hashkey;
294  Bucket bucket;
295  Buffer buf;
296  Page page;
297  HashPageOpaque opaque;
298  HashScanPosItem *currItem;
299 
301 
302  /*
303  * We do not support hash scans with no index qualification, because we
304  * would have to read the whole index rather than just one bucket. That
305  * creates a whole raft of problems, since we haven't got a practical way
306  * to lock all the buckets against splits or compactions.
307  */
308  if (scan->numberOfKeys < 1)
309  ereport(ERROR,
310  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
311  errmsg("hash indexes do not support whole-index scans")));
312 
313  /* There may be more than one index qual, but we hash only the first */
314  cur = &scan->keyData[0];
315 
316  /* We support only single-column hash indexes */
317  Assert(cur->sk_attno == 1);
318  /* And there's only one operator strategy, too */
319  Assert(cur->sk_strategy == HTEqualStrategyNumber);
320 
321  /*
322  * If the constant in the index qual is NULL, assume it cannot match any
323  * items in the index.
324  */
325  if (cur->sk_flags & SK_ISNULL)
326  return false;
327 
328  /*
329  * Okay to compute the hash key. We want to do this before acquiring any
330  * locks, in case a user-defined hash function happens to be slow.
331  *
332  * If scankey operator is not a cross-type comparison, we can use the
333  * cached hash function; otherwise gotta look it up in the catalogs.
334  *
335  * We support the convention that sk_subtype == InvalidOid means the
336  * opclass input type; this is a hack to simplify life for ScanKeyInit().
337  */
338  if (cur->sk_subtype == rel->rd_opcintype[0] ||
339  cur->sk_subtype == InvalidOid)
340  hashkey = _hash_datum2hashkey(rel, cur->sk_argument);
341  else
342  hashkey = _hash_datum2hashkey_type(rel, cur->sk_argument,
343  cur->sk_subtype);
344 
345  so->hashso_sk_hash = hashkey;
346 
347  buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_READ, NULL);
349  page = BufferGetPage(buf);
350  opaque = HashPageGetOpaque(page);
351  bucket = opaque->hasho_bucket;
352 
353  so->hashso_bucket_buf = buf;
354 
355  /*
356  * If a bucket split is in progress, then while scanning the bucket being
357  * populated, we need to skip tuples that were copied from bucket being
358  * split. We also need to maintain a pin on the bucket being split to
359  * ensure that split-cleanup work done by vacuum doesn't remove tuples
360  * from it till this scan is done. We need to maintain a pin on the
361  * bucket being populated to ensure that vacuum doesn't squeeze that
362  * bucket till this scan is complete; otherwise, the ordering of tuples
363  * can't be maintained during forward and backward scans. Here, we have
364  * to be cautious about locking order: first, acquire the lock on bucket
365  * being split; then, release the lock on it but not the pin; then,
366  * acquire a lock on bucket being populated and again re-verify whether
367  * the bucket split is still in progress. Acquiring the lock on bucket
368  * being split first ensures that the vacuum waits for this scan to
369  * finish.
370  */
371  if (H_BUCKET_BEING_POPULATED(opaque))
372  {
373  BlockNumber old_blkno;
374  Buffer old_buf;
375 
376  old_blkno = _hash_get_oldblock_from_newbucket(rel, bucket);
377 
378  /*
379  * release the lock on new bucket and re-acquire it after acquiring
380  * the lock on old bucket.
381  */
383 
384  old_buf = _hash_getbuf(rel, old_blkno, HASH_READ, LH_BUCKET_PAGE);
385 
386  /*
387  * remember the split bucket buffer so as to use it later for
388  * scanning.
389  */
390  so->hashso_split_bucket_buf = old_buf;
391  LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
392 
394  page = BufferGetPage(buf);
395  opaque = HashPageGetOpaque(page);
396  Assert(opaque->hasho_bucket == bucket);
397 
398  if (H_BUCKET_BEING_POPULATED(opaque))
399  so->hashso_buc_populated = true;
400  else
401  {
404  }
405  }
406 
407  /* If a backwards scan is requested, move to the end of the chain */
408  if (ScanDirectionIsBackward(dir))
409  {
410  /*
411  * Backward scans that start during split needs to start from end of
412  * bucket being split.
413  */
414  while (BlockNumberIsValid(opaque->hasho_nextblkno) ||
416  _hash_readnext(scan, &buf, &page, &opaque);
417  }
418 
419  /* remember which buffer we have pinned, if any */
421  so->currPos.buf = buf;
422 
423  /* Now find all the tuples satisfying the qualification from a page */
424  if (!_hash_readpage(scan, &buf, dir))
425  return false;
426 
427  /* OK, itemIndex says what to return */
428  currItem = &so->currPos.items[so->currPos.itemIndex];
429  scan->xs_heaptid = currItem->heapTid;
430 
431  /* if we're here, _hash_readpage found a valid tuples */
432  return true;
433 }
434 
435 /*
436  * _hash_readpage() -- Load data from current index page into so->currPos
437  *
438  * We scan all the items in the current index page and save them into
439  * so->currPos if it satisfies the qualification. If no matching items
440  * are found in the current page, we move to the next or previous page
441  * in a bucket chain as indicated by the direction.
442  *
443  * Return true if any matching items are found else return false.
444  */
445 static bool
447 {
448  Relation rel = scan->indexRelation;
449  HashScanOpaque so = (HashScanOpaque) scan->opaque;
450  Buffer buf;
451  Page page;
452  HashPageOpaque opaque;
453  OffsetNumber offnum;
454  uint16 itemIndex;
455 
456  buf = *bufP;
459  page = BufferGetPage(buf);
460  opaque = HashPageGetOpaque(page);
461 
462  so->currPos.buf = buf;
464 
465  if (ScanDirectionIsForward(dir))
466  {
467  BlockNumber prev_blkno = InvalidBlockNumber;
468 
469  for (;;)
470  {
471  /* new page, locate starting position by binary search */
472  offnum = _hash_binsearch(page, so->hashso_sk_hash);
473 
474  itemIndex = _hash_load_qualified_items(scan, page, offnum, dir);
475 
476  if (itemIndex != 0)
477  break;
478 
479  /*
480  * Could not find any matching tuples in the current page, move to
481  * the next page. Before leaving the current page, deal with any
482  * killed items.
483  */
484  if (so->numKilled > 0)
485  _hash_kill_items(scan);
486 
487  /*
488  * If this is a primary bucket page, hasho_prevblkno is not a real
489  * block number.
490  */
491  if (so->currPos.buf == so->hashso_bucket_buf ||
493  prev_blkno = InvalidBlockNumber;
494  else
495  prev_blkno = opaque->hasho_prevblkno;
496 
497  _hash_readnext(scan, &buf, &page, &opaque);
498  if (BufferIsValid(buf))
499  {
500  so->currPos.buf = buf;
502  }
503  else
504  {
505  /*
506  * Remember next and previous block numbers for scrollable
507  * cursors to know the start position and return false
508  * indicating that no more matching tuples were found. Also,
509  * don't reset currPage or lsn, because we expect
510  * _hash_kill_items to be called for the old page after this
511  * function returns.
512  */
513  so->currPos.prevPage = prev_blkno;
515  so->currPos.buf = buf;
516  return false;
517  }
518  }
519 
520  so->currPos.firstItem = 0;
521  so->currPos.lastItem = itemIndex - 1;
522  so->currPos.itemIndex = 0;
523  }
524  else
525  {
526  BlockNumber next_blkno = InvalidBlockNumber;
527 
528  for (;;)
529  {
530  /* new page, locate starting position by binary search */
531  offnum = _hash_binsearch_last(page, so->hashso_sk_hash);
532 
533  itemIndex = _hash_load_qualified_items(scan, page, offnum, dir);
534 
535  if (itemIndex != MaxIndexTuplesPerPage)
536  break;
537 
538  /*
539  * Could not find any matching tuples in the current page, move to
540  * the previous page. Before leaving the current page, deal with
541  * any killed items.
542  */
543  if (so->numKilled > 0)
544  _hash_kill_items(scan);
545 
546  if (so->currPos.buf == so->hashso_bucket_buf ||
548  next_blkno = opaque->hasho_nextblkno;
549 
550  _hash_readprev(scan, &buf, &page, &opaque);
551  if (BufferIsValid(buf))
552  {
553  so->currPos.buf = buf;
555  }
556  else
557  {
558  /*
559  * Remember next and previous block numbers for scrollable
560  * cursors to know the start position and return false
561  * indicating that no more matching tuples were found. Also,
562  * don't reset currPage or lsn, because we expect
563  * _hash_kill_items to be called for the old page after this
564  * function returns.
565  */
567  so->currPos.nextPage = next_blkno;
568  so->currPos.buf = buf;
569  return false;
570  }
571  }
572 
573  so->currPos.firstItem = itemIndex;
576  }
577 
578  if (so->currPos.buf == so->hashso_bucket_buf ||
580  {
582  so->currPos.nextPage = opaque->hasho_nextblkno;
584  }
585  else
586  {
587  so->currPos.prevPage = opaque->hasho_prevblkno;
588  so->currPos.nextPage = opaque->hasho_nextblkno;
589  _hash_relbuf(rel, so->currPos.buf);
590  so->currPos.buf = InvalidBuffer;
591  }
592 
594  return true;
595 }
596 
597 /*
598  * Load all the qualified items from a current index page
599  * into so->currPos. Helper function for _hash_readpage.
600  */
601 static int
603  OffsetNumber offnum, ScanDirection dir)
604 {
605  HashScanOpaque so = (HashScanOpaque) scan->opaque;
606  IndexTuple itup;
607  int itemIndex;
608  OffsetNumber maxoff;
609 
610  maxoff = PageGetMaxOffsetNumber(page);
611 
612  if (ScanDirectionIsForward(dir))
613  {
614  /* load items[] in ascending order */
615  itemIndex = 0;
616 
617  while (offnum <= maxoff)
618  {
619  Assert(offnum >= FirstOffsetNumber);
620  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
621 
622  /*
623  * skip the tuples that are moved by split operation for the scan
624  * that has started when split was in progress. Also, skip the
625  * tuples that are marked as dead.
626  */
627  if ((so->hashso_buc_populated && !so->hashso_buc_split &&
628  (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) ||
629  (scan->ignore_killed_tuples &&
630  (ItemIdIsDead(PageGetItemId(page, offnum)))))
631  {
632  offnum = OffsetNumberNext(offnum); /* move forward */
633  continue;
634  }
635 
636  if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup) &&
637  _hash_checkqual(scan, itup))
638  {
639  /* tuple is qualified, so remember it */
640  _hash_saveitem(so, itemIndex, offnum, itup);
641  itemIndex++;
642  }
643  else
644  {
645  /*
646  * No more matching tuples exist in this page. so, exit while
647  * loop.
648  */
649  break;
650  }
651 
652  offnum = OffsetNumberNext(offnum);
653  }
654 
655  Assert(itemIndex <= MaxIndexTuplesPerPage);
656  return itemIndex;
657  }
658  else
659  {
660  /* load items[] in descending order */
661  itemIndex = MaxIndexTuplesPerPage;
662 
663  while (offnum >= FirstOffsetNumber)
664  {
665  Assert(offnum <= maxoff);
666  itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
667 
668  /*
669  * skip the tuples that are moved by split operation for the scan
670  * that has started when split was in progress. Also, skip the
671  * tuples that are marked as dead.
672  */
673  if ((so->hashso_buc_populated && !so->hashso_buc_split &&
674  (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) ||
675  (scan->ignore_killed_tuples &&
676  (ItemIdIsDead(PageGetItemId(page, offnum)))))
677  {
678  offnum = OffsetNumberPrev(offnum); /* move back */
679  continue;
680  }
681 
682  if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup) &&
683  _hash_checkqual(scan, itup))
684  {
685  itemIndex--;
686  /* tuple is qualified, so remember it */
687  _hash_saveitem(so, itemIndex, offnum, itup);
688  }
689  else
690  {
691  /*
692  * No more matching tuples exist in this page. so, exit while
693  * loop.
694  */
695  break;
696  }
697 
698  offnum = OffsetNumberPrev(offnum);
699  }
700 
701  Assert(itemIndex >= 0);
702  return itemIndex;
703  }
704 }
705 
706 /* Save an index item into so->currPos.items[itemIndex] */
707 static inline void
708 _hash_saveitem(HashScanOpaque so, int itemIndex,
709  OffsetNumber offnum, IndexTuple itup)
710 {
711  HashScanPosItem *currItem = &so->currPos.items[itemIndex];
712 
713  currItem->heapTid = itup->t_tid;
714  currItem->indexOffset = offnum;
715 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
int Buffer
Definition: buf.h:23
#define BufferIsInvalid(buffer)
Definition: buf.h:31
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3667
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5085
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:197
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:198
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:408
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:359
Pointer Page
Definition: bufpage.h:78
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
unsigned short uint16
Definition: c.h:505
unsigned int uint32
Definition: c.h:506
#define Assert(condition)
Definition: c.h:858
struct cursor * cur
Definition: ecpg.c:28
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
#define HashPageGetOpaque(page)
Definition: hash.h:88
#define LH_BUCKET_PAGE
Definition: hash.h:55
#define HashScanPosInvalidate(scanpos)
Definition: hash.h:144
#define HASH_READ
Definition: hash.h:339
#define H_BUCKET_BEING_POPULATED(opaque)
Definition: hash.h:92
#define INDEX_MOVED_BY_SPLIT_MASK
Definition: hash.h:293
uint32 Bucket
Definition: hash.h:35
HashScanOpaqueData * HashScanOpaque
Definition: hash.h:192
#define LH_OVERFLOW_PAGE
Definition: hash.h:54
void _hash_relbuf(Relation rel, Buffer buf)
Definition: hashpage.c:266
void _hash_dropbuf(Relation rel, Buffer buf)
Definition: hashpage.c:277
void _hash_dropscanbuf(Relation rel, HashScanOpaque so)
Definition: hashpage.c:289
Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
Definition: hashpage.c:70
Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access, HashMetaPage *cachedmetap)
Definition: hashpage.c:1559
bool _hash_first(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:288
static void _hash_saveitem(HashScanOpaque so, int itemIndex, OffsetNumber offnum, IndexTuple itup)
Definition: hashsearch.c:708
static void _hash_readnext(IndexScanDesc scan, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
Definition: hashsearch.c:131
bool _hash_next(IndexScanDesc scan, ScanDirection dir)
Definition: hashsearch.c:48
static bool _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
Definition: hashsearch.c:446
static void _hash_readprev(IndexScanDesc scan, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
Definition: hashsearch.c:197
static int _hash_load_qualified_items(IndexScanDesc scan, Page page, OffsetNumber offnum, ScanDirection dir)
Definition: hashsearch.c:602
uint32 _hash_get_indextuple_hashkey(IndexTuple itup)
Definition: hashutil.c:291
bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup)
Definition: hashutil.c:31
OffsetNumber _hash_binsearch(Page page, uint32 hash_value)
Definition: hashutil.c:350
uint32 _hash_datum2hashkey(Relation rel, Datum key)
Definition: hashutil.c:82
OffsetNumber _hash_binsearch_last(Page page, uint32 hash_value)
Definition: hashutil.c:388
void _hash_checkpage(Relation rel, Buffer buf, int flags)
Definition: hashutil.c:210
BlockNumber _hash_get_oldblock_from_newbucket(Relation rel, Bucket new_bucket)
Definition: hashutil.c:422
uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype)
Definition: hashutil.c:102
void _hash_kill_items(IndexScanDesc scan)
Definition: hashutil.c:536
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
IndexTupleData * IndexTuple
Definition: itup.h:53
#define MaxIndexTuplesPerPage
Definition: itup.h:165
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static char * buf
Definition: pg_test_fsync.c:73
#define pgstat_count_index_scan(rel)
Definition: pgstat.h:625
#define InvalidOid
Definition: postgres_ext.h:36
void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
Definition: predicate.c:2584
#define ScanDirectionIsForward(direction)
Definition: sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:50
ScanDirection
Definition: sdir.h:25
#define SK_ISNULL
Definition: skey.h:115
#define HTEqualStrategyNumber
Definition: stratnum.h:41
bool hashso_buc_split
Definition: hash.h:180
HashScanPosData currPos
Definition: hash.h:189
bool hashso_buc_populated
Definition: hash.h:174
Buffer hashso_split_bucket_buf
Definition: hash.h:171
Buffer hashso_bucket_buf
Definition: hash.h:164
uint32 hashso_sk_hash
Definition: hash.h:161
BlockNumber prevPage
Definition: hash.h:114
BlockNumber nextPage
Definition: hash.h:113
BlockNumber currPage
Definition: hash.h:112
HashScanPosItem items[MaxIndexTuplesPerPage]
Definition: hash.h:127
int lastItem
Definition: hash.h:124
int firstItem
Definition: hash.h:123
Buffer buf
Definition: hash.h:111
int itemIndex
Definition: hash.h:125
ItemPointerData heapTid
Definition: hash.h:105
OffsetNumber indexOffset
Definition: hash.h:106
struct ScanKeyData * keyData
Definition: relscan.h:122
bool ignore_killed_tuples
Definition: relscan.h:129
Relation indexRelation
Definition: relscan.h:118
ItemPointerData xs_heaptid
Definition: relscan.h:147
struct SnapshotData * xs_snapshot
Definition: relscan.h:119
ItemPointerData t_tid
Definition: itup.h:37
Oid * rd_opcintype
Definition: rel.h:208