PostgreSQL Source Code  git master
hash_xlog.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "access/xlogutils.h"
#include "storage/standby.h"
Include dependency graph for hash_xlog.c:

Go to the source code of this file.

Functions

static void hash_xlog_init_meta_page (XLogReaderState *record)
 
static void hash_xlog_init_bitmap_page (XLogReaderState *record)
 
static void hash_xlog_insert (XLogReaderState *record)
 
static void hash_xlog_add_ovfl_page (XLogReaderState *record)
 
static void hash_xlog_split_allocate_page (XLogReaderState *record)
 
static void hash_xlog_split_page (XLogReaderState *record)
 
static void hash_xlog_split_complete (XLogReaderState *record)
 
static void hash_xlog_move_page_contents (XLogReaderState *record)
 
static void hash_xlog_squeeze_page (XLogReaderState *record)
 
static void hash_xlog_delete (XLogReaderState *record)
 
static void hash_xlog_split_cleanup (XLogReaderState *record)
 
static void hash_xlog_update_meta_page (XLogReaderState *record)
 
static void hash_xlog_vacuum_one_page (XLogReaderState *record)
 
void hash_redo (XLogReaderState *record)
 
void hash_mask (char *pagedata, BlockNumber blkno)
 

Function Documentation

◆ hash_mask()

void hash_mask ( char *  pagedata,
BlockNumber  blkno 
)

Definition at line 1104 of file hash_xlog.c.

1105 {
1106  Page page = (Page) pagedata;
1107  HashPageOpaque opaque;
1108  int pagetype;
1109 
1111 
1112  mask_page_hint_bits(page);
1113  mask_unused_space(page);
1114 
1115  opaque = HashPageGetOpaque(page);
1116 
1117  pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
1118  if (pagetype == LH_UNUSED_PAGE)
1119  {
1120  /*
1121  * Mask everything on a UNUSED page.
1122  */
1123  mask_page_content(page);
1124  }
1125  else if (pagetype == LH_BUCKET_PAGE ||
1126  pagetype == LH_OVERFLOW_PAGE)
1127  {
1128  /*
1129  * In hash bucket and overflow pages, it is possible to modify the
1130  * LP_FLAGS without emitting any WAL record. Hence, mask the line
1131  * pointer flags. See hashgettuple(), _hash_kill_items() for details.
1132  */
1133  mask_lp_flags(page);
1134  }
1135 
1136  /*
1137  * It is possible that the hint bit LH_PAGE_HAS_DEAD_TUPLES may remain
1138  * unlogged. So, mask it. See _hash_kill_items() for details.
1139  */
1140  opaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
1141 }
void mask_lp_flags(Page page)
Definition: bufmask.c:95
void mask_page_content(Page page)
Definition: bufmask.c:119
void mask_page_lsn_and_checksum(Page page)
Definition: bufmask.c:31
void mask_unused_space(Page page)
Definition: bufmask.c:71
void mask_page_hint_bits(Page page)
Definition: bufmask.c:46
Pointer Page
Definition: bufpage.h:78
#define HashPageGetOpaque(page)
Definition: hash.h:88
#define LH_BUCKET_PAGE
Definition: hash.h:55
#define LH_UNUSED_PAGE
Definition: hash.h:53
#define LH_PAGE_TYPE
Definition: hash.h:63
#define LH_PAGE_HAS_DEAD_TUPLES
Definition: hash.h:61
#define LH_OVERFLOW_PAGE
Definition: hash.h:54
uint16 hasho_flag
Definition: hash.h:82

References HashPageOpaqueData::hasho_flag, HashPageGetOpaque, LH_BUCKET_PAGE, LH_OVERFLOW_PAGE, LH_PAGE_HAS_DEAD_TUPLES, LH_PAGE_TYPE, LH_UNUSED_PAGE, mask_lp_flags(), mask_page_content(), mask_page_hint_bits(), mask_page_lsn_and_checksum(), and mask_unused_space().

◆ hash_redo()

void hash_redo ( XLogReaderState record)

Definition at line 1050 of file hash_xlog.c.

1051 {
1052  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1053 
1054  switch (info)
1055  {
1057  hash_xlog_init_meta_page(record);
1058  break;
1061  break;
1062  case XLOG_HASH_INSERT:
1063  hash_xlog_insert(record);
1064  break;
1066  hash_xlog_add_ovfl_page(record);
1067  break;
1070  break;
1071  case XLOG_HASH_SPLIT_PAGE:
1072  hash_xlog_split_page(record);
1073  break;
1075  hash_xlog_split_complete(record);
1076  break;
1079  break;
1081  hash_xlog_squeeze_page(record);
1082  break;
1083  case XLOG_HASH_DELETE:
1084  hash_xlog_delete(record);
1085  break;
1087  hash_xlog_split_cleanup(record);
1088  break;
1091  break;
1093  hash_xlog_vacuum_one_page(record);
1094  break;
1095  default:
1096  elog(PANIC, "hash_redo: unknown op code %u", info);
1097  }
1098 }
unsigned char uint8
Definition: c.h:491
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:224
static void hash_xlog_split_cleanup(XLogReaderState *record)
Definition: hash_xlog.c:922
static void hash_xlog_add_ovfl_page(XLogReaderState *record)
Definition: hash_xlog.c:173
static void hash_xlog_split_page(XLogReaderState *record)
Definition: hash_xlog.c:428
static void hash_xlog_init_meta_page(XLogReaderState *record)
Definition: hash_xlog.c:27
static void hash_xlog_split_complete(XLogReaderState *record)
Definition: hash_xlog.c:442
static void hash_xlog_update_meta_page(XLogReaderState *record)
Definition: hash_xlog.c:947
static void hash_xlog_vacuum_one_page(XLogReaderState *record)
Definition: hash_xlog.c:974
static void hash_xlog_squeeze_page(XLogReaderState *record)
Definition: hash_xlog.c:627
static void hash_xlog_insert(XLogReaderState *record)
Definition: hash_xlog.c:125
static void hash_xlog_move_page_contents(XLogReaderState *record)
Definition: hash_xlog.c:501
static void hash_xlog_split_allocate_page(XLogReaderState *record)
Definition: hash_xlog.c:311
static void hash_xlog_delete(XLogReaderState *record)
Definition: hash_xlog.c:844
static void hash_xlog_init_bitmap_page(XLogReaderState *record)
Definition: hash_xlog.c:63
#define XLOG_HASH_INIT_BITMAP_PAGE
Definition: hash_xlog.h:28
#define XLOG_HASH_SQUEEZE_PAGE
Definition: hash_xlog.h:35
#define XLOG_HASH_SPLIT_CLEANUP
Definition: hash_xlog.h:37
#define XLOG_HASH_ADD_OVFL_PAGE
Definition: hash_xlog.h:30
#define XLOG_HASH_UPDATE_META_PAGE
Definition: hash_xlog.h:38
#define XLOG_HASH_INSERT
Definition: hash_xlog.h:29
#define XLOG_HASH_SPLIT_ALLOCATE_PAGE
Definition: hash_xlog.h:31
#define XLOG_HASH_SPLIT_PAGE
Definition: hash_xlog.h:32
#define XLOG_HASH_INIT_META_PAGE
Definition: hash_xlog.h:27
#define XLOG_HASH_DELETE
Definition: hash_xlog.h:36
#define XLOG_HASH_SPLIT_COMPLETE
Definition: hash_xlog.h:33
#define XLOG_HASH_MOVE_PAGE_CONTENTS
Definition: hash_xlog.h:34
#define XLOG_HASH_VACUUM_ONE_PAGE
Definition: hash_xlog.h:40
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:410
#define XLR_INFO_MASK
Definition: xlogrecord.h:62

References elog, hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), PANIC, XLOG_HASH_ADD_OVFL_PAGE, XLOG_HASH_DELETE, XLOG_HASH_INIT_BITMAP_PAGE, XLOG_HASH_INIT_META_PAGE, XLOG_HASH_INSERT, XLOG_HASH_MOVE_PAGE_CONTENTS, XLOG_HASH_SPLIT_ALLOCATE_PAGE, XLOG_HASH_SPLIT_CLEANUP, XLOG_HASH_SPLIT_COMPLETE, XLOG_HASH_SPLIT_PAGE, XLOG_HASH_SQUEEZE_PAGE, XLOG_HASH_UPDATE_META_PAGE, XLOG_HASH_VACUUM_ONE_PAGE, XLogRecGetInfo, and XLR_INFO_MASK.

◆ hash_xlog_add_ovfl_page()

static void hash_xlog_add_ovfl_page ( XLogReaderState record)
static

Definition at line 173 of file hash_xlog.c.

174 {
175  XLogRecPtr lsn = record->EndRecPtr;
177  Buffer leftbuf;
178  Buffer ovflbuf;
179  Buffer metabuf;
180  BlockNumber leftblk;
181  BlockNumber rightblk;
182  BlockNumber newmapblk = InvalidBlockNumber;
183  Page ovflpage;
184  HashPageOpaque ovflopaque;
185  uint32 *num_bucket;
186  char *data;
188  bool new_bmpage = false;
189 
190  XLogRecGetBlockTag(record, 0, NULL, NULL, &rightblk);
191  XLogRecGetBlockTag(record, 1, NULL, NULL, &leftblk);
192 
193  ovflbuf = XLogInitBufferForRedo(record, 0);
194  Assert(BufferIsValid(ovflbuf));
195 
196  data = XLogRecGetBlockData(record, 0, &datalen);
197  num_bucket = (uint32 *) data;
198  Assert(datalen == sizeof(uint32));
199  _hash_initbuf(ovflbuf, InvalidBlockNumber, *num_bucket, LH_OVERFLOW_PAGE,
200  true);
201  /* update backlink */
202  ovflpage = BufferGetPage(ovflbuf);
203  ovflopaque = HashPageGetOpaque(ovflpage);
204  ovflopaque->hasho_prevblkno = leftblk;
205 
206  PageSetLSN(ovflpage, lsn);
207  MarkBufferDirty(ovflbuf);
208 
209  if (XLogReadBufferForRedo(record, 1, &leftbuf) == BLK_NEEDS_REDO)
210  {
211  Page leftpage;
212  HashPageOpaque leftopaque;
213 
214  leftpage = BufferGetPage(leftbuf);
215  leftopaque = HashPageGetOpaque(leftpage);
216  leftopaque->hasho_nextblkno = rightblk;
217 
218  PageSetLSN(leftpage, lsn);
219  MarkBufferDirty(leftbuf);
220  }
221 
222  if (BufferIsValid(leftbuf))
223  UnlockReleaseBuffer(leftbuf);
224  UnlockReleaseBuffer(ovflbuf);
225 
226  /*
227  * Note: in normal operation, we'd update the bitmap and meta page while
228  * still holding lock on the overflow pages. But during replay it's not
229  * necessary to hold those locks, since no other index updates can be
230  * happening concurrently.
231  */
232  if (XLogRecHasBlockRef(record, 2))
233  {
234  Buffer mapbuffer;
235 
236  if (XLogReadBufferForRedo(record, 2, &mapbuffer) == BLK_NEEDS_REDO)
237  {
238  Page mappage = (Page) BufferGetPage(mapbuffer);
239  uint32 *freep = NULL;
240  uint32 *bitmap_page_bit;
241 
242  freep = HashPageGetBitmap(mappage);
243 
244  data = XLogRecGetBlockData(record, 2, &datalen);
245  bitmap_page_bit = (uint32 *) data;
246 
247  SETBIT(freep, *bitmap_page_bit);
248 
249  PageSetLSN(mappage, lsn);
250  MarkBufferDirty(mapbuffer);
251  }
252  if (BufferIsValid(mapbuffer))
253  UnlockReleaseBuffer(mapbuffer);
254  }
255 
256  if (XLogRecHasBlockRef(record, 3))
257  {
258  Buffer newmapbuf;
259 
260  newmapbuf = XLogInitBufferForRedo(record, 3);
261 
262  _hash_initbitmapbuffer(newmapbuf, xlrec->bmsize, true);
263 
264  new_bmpage = true;
265  newmapblk = BufferGetBlockNumber(newmapbuf);
266 
267  MarkBufferDirty(newmapbuf);
268  PageSetLSN(BufferGetPage(newmapbuf), lsn);
269 
270  UnlockReleaseBuffer(newmapbuf);
271  }
272 
273  if (XLogReadBufferForRedo(record, 4, &metabuf) == BLK_NEEDS_REDO)
274  {
275  HashMetaPage metap;
276  Page page;
277  uint32 *firstfree_ovflpage;
278 
279  data = XLogRecGetBlockData(record, 4, &datalen);
280  firstfree_ovflpage = (uint32 *) data;
281 
282  page = BufferGetPage(metabuf);
283  metap = HashPageGetMeta(page);
284  metap->hashm_firstfree = *firstfree_ovflpage;
285 
286  if (!xlrec->bmpage_found)
287  {
288  metap->hashm_spares[metap->hashm_ovflpoint]++;
289 
290  if (new_bmpage)
291  {
292  Assert(BlockNumberIsValid(newmapblk));
293 
294  metap->hashm_mapp[metap->hashm_nmaps] = newmapblk;
295  metap->hashm_nmaps++;
296  metap->hashm_spares[metap->hashm_ovflpoint]++;
297  }
298  }
299 
300  PageSetLSN(page, lsn);
301  MarkBufferDirty(metabuf);
302  }
303  if (BufferIsValid(metabuf))
304  UnlockReleaseBuffer(metabuf);
305 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define SETBIT(x, i)
Definition: blutils.c:32
int Buffer
Definition: buf.h:23
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3377
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4577
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2189
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:301
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
unsigned int uint32
Definition: c.h:493
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:169
size_t Size
Definition: c.h:592
#define HashPageGetBitmap(page)
Definition: hash.h:316
#define HashPageGetMeta(page)
Definition: hash.h:323
void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
Definition: hashovfl.c:777
void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage)
Definition: hashpage.c:157
Assert(fmt[strlen(fmt) - 1] !='\n')
const void * data
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]
Definition: hash.h:264
uint32 hashm_spares[HASH_MAX_SPLITPOINTS]
Definition: hash.h:262
uint32 hashm_firstfree
Definition: hash.h:259
uint32 hashm_ovflpoint
Definition: hash.h:257
uint32 hashm_nmaps
Definition: hash.h:260
BlockNumber hasho_nextblkno
Definition: hash.h:80
BlockNumber hasho_prevblkno
Definition: hash.h:79
XLogRecPtr EndRecPtr
Definition: xlogreader.h:207
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1971
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
Definition: xlogreader.c:2025
#define XLogRecGetData(decoder)
Definition: xlogreader.h:415
#define XLogRecHasBlockRef(decoder, block_id)
Definition: xlogreader.h:420
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition: xlogutils.c:314
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition: xlogutils.c:326
@ BLK_NEEDS_REDO
Definition: xlogutils.h:71

References _hash_initbitmapbuffer(), _hash_initbuf(), Assert(), BLK_NEEDS_REDO, BlockNumberIsValid(), xl_hash_add_ovfl_page::bmpage_found, xl_hash_add_ovfl_page::bmsize, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), data, XLogReaderState::EndRecPtr, HashMetaPageData::hashm_firstfree, HashMetaPageData::hashm_mapp, HashMetaPageData::hashm_nmaps, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_spares, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_prevblkno, HashPageGetBitmap, HashPageGetMeta, HashPageGetOpaque, InvalidBlockNumber, LH_OVERFLOW_PAGE, MarkBufferDirty(), PageSetLSN(), PG_USED_FOR_ASSERTS_ONLY, SETBIT, UnlockReleaseBuffer(), XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, and XLogRecHasBlockRef.

Referenced by hash_redo().

◆ hash_xlog_delete()

static void hash_xlog_delete ( XLogReaderState record)
static

Definition at line 844 of file hash_xlog.c.

845 {
846  XLogRecPtr lsn = record->EndRecPtr;
847  xl_hash_delete *xldata = (xl_hash_delete *) XLogRecGetData(record);
848  Buffer bucketbuf = InvalidBuffer;
849  Buffer deletebuf;
850  Page page;
852 
853  /*
854  * Ensure we have a cleanup lock on primary bucket page before we start
855  * with the actual replay operation. This is to ensure that neither a
856  * scan can start nor a scan can be already-in-progress during the replay
857  * of this operation. If we allow scans during this operation, then they
858  * can miss some records or show the same record multiple times.
859  */
860  if (xldata->is_primary_bucket_page)
861  action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &deletebuf);
862  else
863  {
864  /*
865  * we don't care for return value as the purpose of reading bucketbuf
866  * is to ensure a cleanup lock on primary bucket page.
867  */
868  (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
869 
870  action = XLogReadBufferForRedo(record, 1, &deletebuf);
871  }
872 
873  /* replay the record for deleting entries in bucket page */
874  if (action == BLK_NEEDS_REDO)
875  {
876  char *ptr;
877  Size len;
878 
879  ptr = XLogRecGetBlockData(record, 1, &len);
880 
881  page = (Page) BufferGetPage(deletebuf);
882 
883  if (len > 0)
884  {
885  OffsetNumber *unused;
886  OffsetNumber *unend;
887 
888  unused = (OffsetNumber *) ptr;
889  unend = (OffsetNumber *) ((char *) ptr + len);
890 
891  if ((unend - unused) > 0)
892  PageIndexMultiDelete(page, unused, unend - unused);
893  }
894 
895  /*
896  * Mark the page as not containing any LP_DEAD items only if
897  * clear_dead_marking flag is set to true. See comments in
898  * hashbucketcleanup() for details.
899  */
900  if (xldata->clear_dead_marking)
901  {
902  HashPageOpaque pageopaque;
903 
904  pageopaque = HashPageGetOpaque(page);
905  pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
906  }
907 
908  PageSetLSN(page, lsn);
909  MarkBufferDirty(deletebuf);
910  }
911  if (BufferIsValid(deletebuf))
912  UnlockReleaseBuffer(deletebuf);
913 
914  if (BufferIsValid(bucketbuf))
915  UnlockReleaseBuffer(bucketbuf);
916 }
#define InvalidBuffer
Definition: buf.h:25
@ RBM_NORMAL
Definition: bufmgr.h:44
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1161
uint16 OffsetNumber
Definition: off.h:24
const void size_t len
bool clear_dead_marking
Definition: hash_xlog.h:180
bool is_primary_bucket_page
Definition: hash_xlog.h:182
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:351
XLogRedoAction
Definition: xlogutils.h:70

References generate_unaccent_rules::action, BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), xl_hash_delete::clear_dead_marking, XLogReaderState::EndRecPtr, HashPageOpaqueData::hasho_flag, HashPageGetOpaque, InvalidBuffer, xl_hash_delete::is_primary_bucket_page, len, LH_PAGE_HAS_DEAD_TUPLES, MarkBufferDirty(), PageIndexMultiDelete(), PageSetLSN(), RBM_NORMAL, UnlockReleaseBuffer(), XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_init_bitmap_page()

static void hash_xlog_init_bitmap_page ( XLogReaderState record)
static

Definition at line 63 of file hash_xlog.c.

64 {
65  XLogRecPtr lsn = record->EndRecPtr;
66  Buffer bitmapbuf;
67  Buffer metabuf;
68  Page page;
69  HashMetaPage metap;
70  uint32 num_buckets;
71  ForkNumber forknum;
72 
74 
75  /*
76  * Initialize bitmap page
77  */
78  bitmapbuf = XLogInitBufferForRedo(record, 0);
79  _hash_initbitmapbuffer(bitmapbuf, xlrec->bmsize, true);
80  PageSetLSN(BufferGetPage(bitmapbuf), lsn);
81  MarkBufferDirty(bitmapbuf);
82 
83  /*
84  * Force the on-disk state of init forks to always be in sync with the
85  * state in shared buffers. See XLogReadBufferForRedoExtended. We need
86  * special handling for init forks as create index operations don't log a
87  * full page image of the metapage.
88  */
89  XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
90  if (forknum == INIT_FORKNUM)
91  FlushOneBuffer(bitmapbuf);
92  UnlockReleaseBuffer(bitmapbuf);
93 
94  /* add the new bitmap page to the metapage's list of bitmaps */
95  if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
96  {
97  /*
98  * Note: in normal operation, we'd update the metapage while still
99  * holding lock on the bitmap page. But during replay it's not
100  * necessary to hold that lock, since nobody can see it yet; the
101  * creating transaction hasn't yet committed.
102  */
103  page = BufferGetPage(metabuf);
104  metap = HashPageGetMeta(page);
105 
106  num_buckets = metap->hashm_maxbucket + 1;
107  metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
108  metap->hashm_nmaps++;
109 
110  PageSetLSN(page, lsn);
111  MarkBufferDirty(metabuf);
112 
113  XLogRecGetBlockTag(record, 1, NULL, &forknum, NULL);
114  if (forknum == INIT_FORKNUM)
115  FlushOneBuffer(metabuf);
116  }
117  if (BufferIsValid(metabuf))
118  UnlockReleaseBuffer(metabuf);
119 }
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:4540
ForkNumber
Definition: relpath.h:48
@ INIT_FORKNUM
Definition: relpath.h:53
uint32 hashm_maxbucket
Definition: hash.h:254

References _hash_initbitmapbuffer(), BLK_NEEDS_REDO, xl_hash_init_bitmap_page::bmsize, BufferGetPage(), BufferIsValid(), XLogReaderState::EndRecPtr, FlushOneBuffer(), HashMetaPageData::hashm_mapp, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_nmaps, HashPageGetMeta, INIT_FORKNUM, MarkBufferDirty(), PageSetLSN(), UnlockReleaseBuffer(), XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockTag(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_init_meta_page()

static void hash_xlog_init_meta_page ( XLogReaderState record)
static

Definition at line 27 of file hash_xlog.c.

28 {
29  XLogRecPtr lsn = record->EndRecPtr;
30  Page page;
31  Buffer metabuf;
32  ForkNumber forknum;
33 
35 
36  /* create the index' metapage */
37  metabuf = XLogInitBufferForRedo(record, 0);
38  Assert(BufferIsValid(metabuf));
39  _hash_init_metabuffer(metabuf, xlrec->num_tuples, xlrec->procid,
40  xlrec->ffactor, true);
41  page = (Page) BufferGetPage(metabuf);
42  PageSetLSN(page, lsn);
43  MarkBufferDirty(metabuf);
44 
45  /*
46  * Force the on-disk state of init forks to always be in sync with the
47  * state in shared buffers. See XLogReadBufferForRedoExtended. We need
48  * special handling for init forks as create index operations don't log a
49  * full page image of the metapage.
50  */
51  XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
52  if (forknum == INIT_FORKNUM)
53  FlushOneBuffer(metabuf);
54 
55  /* all done */
56  UnlockReleaseBuffer(metabuf);
57 }
void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage)
Definition: hashpage.c:498
RegProcedure procid
Definition: hash_xlog.h:213

References _hash_init_metabuffer(), Assert(), BufferGetPage(), BufferIsValid(), XLogReaderState::EndRecPtr, xl_hash_init_meta_page::ffactor, FlushOneBuffer(), INIT_FORKNUM, MarkBufferDirty(), xl_hash_init_meta_page::num_tuples, PageSetLSN(), xl_hash_init_meta_page::procid, UnlockReleaseBuffer(), XLogInitBufferForRedo(), XLogRecGetBlockTag(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_insert()

static void hash_xlog_insert ( XLogReaderState record)
static

Definition at line 125 of file hash_xlog.c.

126 {
127  HashMetaPage metap;
128  XLogRecPtr lsn = record->EndRecPtr;
129  xl_hash_insert *xlrec = (xl_hash_insert *) XLogRecGetData(record);
130  Buffer buffer;
131  Page page;
132 
133  if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
134  {
135  Size datalen;
136  char *datapos = XLogRecGetBlockData(record, 0, &datalen);
137 
138  page = BufferGetPage(buffer);
139 
140  if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum,
141  false, false) == InvalidOffsetNumber)
142  elog(PANIC, "hash_xlog_insert: failed to add item");
143 
144  PageSetLSN(page, lsn);
145  MarkBufferDirty(buffer);
146  }
147  if (BufferIsValid(buffer))
148  UnlockReleaseBuffer(buffer);
149 
150  if (XLogReadBufferForRedo(record, 1, &buffer) == BLK_NEEDS_REDO)
151  {
152  /*
153  * Note: in normal operation, we'd update the metapage while still
154  * holding lock on the page we inserted into. But during replay it's
155  * not necessary to hold that lock, since no other index updates can
156  * be happening concurrently.
157  */
158  page = BufferGetPage(buffer);
159  metap = HashPageGetMeta(page);
160  metap->hashm_ntuples += 1;
161 
162  PageSetLSN(page, lsn);
163  MarkBufferDirty(buffer);
164  }
165  if (BufferIsValid(buffer))
166  UnlockReleaseBuffer(buffer);
167 }
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:468
Pointer Item
Definition: item.h:17
#define InvalidOffsetNumber
Definition: off.h:26
double hashm_ntuples
Definition: hash.h:248
OffsetNumber offnum
Definition: hash_xlog.h:58

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), elog, XLogReaderState::EndRecPtr, HashMetaPageData::hashm_ntuples, HashPageGetMeta, InvalidOffsetNumber, MarkBufferDirty(), xl_hash_insert::offnum, PageAddItem, PageSetLSN(), PANIC, UnlockReleaseBuffer(), XLogReadBufferForRedo(), XLogRecGetBlockData(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_move_page_contents()

static void hash_xlog_move_page_contents ( XLogReaderState record)
static

Definition at line 501 of file hash_xlog.c.

502 {
503  XLogRecPtr lsn = record->EndRecPtr;
505  Buffer bucketbuf = InvalidBuffer;
506  Buffer writebuf = InvalidBuffer;
507  Buffer deletebuf = InvalidBuffer;
509 
510  /*
511  * Ensure we have a cleanup lock on primary bucket page before we start
512  * with the actual replay operation. This is to ensure that neither a
513  * scan can start nor a scan can be already-in-progress during the replay
514  * of this operation. If we allow scans during this operation, then they
515  * can miss some records or show the same record multiple times.
516  */
517  if (xldata->is_prim_bucket_same_wrt)
518  action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
519  else
520  {
521  /*
522  * we don't care for return value as the purpose of reading bucketbuf
523  * is to ensure a cleanup lock on primary bucket page.
524  */
525  (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
526 
527  action = XLogReadBufferForRedo(record, 1, &writebuf);
528  }
529 
530  /* replay the record for adding entries in overflow buffer */
531  if (action == BLK_NEEDS_REDO)
532  {
533  Page writepage;
534  char *begin;
535  char *data;
536  Size datalen;
537  uint16 ninserted = 0;
538 
539  data = begin = XLogRecGetBlockData(record, 1, &datalen);
540 
541  writepage = (Page) BufferGetPage(writebuf);
542 
543  if (xldata->ntups > 0)
544  {
545  OffsetNumber *towrite = (OffsetNumber *) data;
546 
547  data += sizeof(OffsetNumber) * xldata->ntups;
548 
549  while (data - begin < datalen)
550  {
551  IndexTuple itup = (IndexTuple) data;
552  Size itemsz;
553  OffsetNumber l;
554 
555  itemsz = IndexTupleSize(itup);
556  itemsz = MAXALIGN(itemsz);
557 
558  data += itemsz;
559 
560  l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
561  if (l == InvalidOffsetNumber)
562  elog(ERROR, "hash_xlog_move_page_contents: failed to add item to hash index page, size %d bytes",
563  (int) itemsz);
564 
565  ninserted++;
566  }
567  }
568 
569  /*
570  * number of tuples inserted must be same as requested in REDO record.
571  */
572  Assert(ninserted == xldata->ntups);
573 
574  PageSetLSN(writepage, lsn);
575  MarkBufferDirty(writebuf);
576  }
577 
578  /* replay the record for deleting entries from overflow buffer */
579  if (XLogReadBufferForRedo(record, 2, &deletebuf) == BLK_NEEDS_REDO)
580  {
581  Page page;
582  char *ptr;
583  Size len;
584 
585  ptr = XLogRecGetBlockData(record, 2, &len);
586 
587  page = (Page) BufferGetPage(deletebuf);
588 
589  if (len > 0)
590  {
591  OffsetNumber *unused;
592  OffsetNumber *unend;
593 
594  unused = (OffsetNumber *) ptr;
595  unend = (OffsetNumber *) ((char *) ptr + len);
596 
597  if ((unend - unused) > 0)
598  PageIndexMultiDelete(page, unused, unend - unused);
599  }
600 
601  PageSetLSN(page, lsn);
602  MarkBufferDirty(deletebuf);
603  }
604 
605  /*
606  * Replay is complete, now we can release the buffers. We release locks at
607  * end of replay operation to ensure that we hold lock on primary bucket
608  * page till end of operation. We can optimize by releasing the lock on
609  * write buffer as soon as the operation for same is complete, if it is
610  * not same as primary bucket page, but that doesn't seem to be worth
611  * complicating the code.
612  */
613  if (BufferIsValid(deletebuf))
614  UnlockReleaseBuffer(deletebuf);
615 
616  if (BufferIsValid(writebuf))
617  UnlockReleaseBuffer(writebuf);
618 
619  if (BufferIsValid(bucketbuf))
620  UnlockReleaseBuffer(bucketbuf);
621 }
unsigned short uint16
Definition: c.h:492
#define MAXALIGN(LEN)
Definition: c.h:798
#define ERROR
Definition: elog.h:39
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
while(p+4<=pend)

References generate_unaccent_rules::action, Assert(), BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), data, elog, XLogReaderState::EndRecPtr, ERROR, IndexTupleSize, InvalidBuffer, InvalidOffsetNumber, xl_hash_move_page_contents::is_prim_bucket_same_wrt, len, MarkBufferDirty(), MAXALIGN, xl_hash_move_page_contents::ntups, PageAddItem, PageIndexMultiDelete(), PageSetLSN(), RBM_NORMAL, UnlockReleaseBuffer(), while(), XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_split_allocate_page()

static void hash_xlog_split_allocate_page ( XLogReaderState record)
static

Definition at line 311 of file hash_xlog.c.

312 {
313  XLogRecPtr lsn = record->EndRecPtr;
315  Buffer oldbuf;
316  Buffer newbuf;
317  Buffer metabuf;
319  char *data;
321 
322  /*
323  * To be consistent with normal operation, here we take cleanup locks on
324  * both the old and new buckets even though there can't be any concurrent
325  * inserts.
326  */
327 
328  /* replay the record for old bucket */
329  action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &oldbuf);
330 
331  /*
332  * Note that we still update the page even if it was restored from a full
333  * page image, because the special space is not included in the image.
334  */
336  {
337  Page oldpage;
338  HashPageOpaque oldopaque;
339 
340  oldpage = BufferGetPage(oldbuf);
341  oldopaque = HashPageGetOpaque(oldpage);
342 
343  oldopaque->hasho_flag = xlrec->old_bucket_flag;
344  oldopaque->hasho_prevblkno = xlrec->new_bucket;
345 
346  PageSetLSN(oldpage, lsn);
347  MarkBufferDirty(oldbuf);
348  }
349 
350  /* replay the record for new bucket */
352  &newbuf);
353  _hash_initbuf(newbuf, xlrec->new_bucket, xlrec->new_bucket,
354  xlrec->new_bucket_flag, true);
355  MarkBufferDirty(newbuf);
356  PageSetLSN(BufferGetPage(newbuf), lsn);
357 
358  /*
359  * We can release the lock on old bucket early as well but doing here to
360  * consistent with normal operation.
361  */
362  if (BufferIsValid(oldbuf))
363  UnlockReleaseBuffer(oldbuf);
364  if (BufferIsValid(newbuf))
365  UnlockReleaseBuffer(newbuf);
366 
367  /*
368  * Note: in normal operation, we'd update the meta page while still
369  * holding lock on the old and new bucket pages. But during replay it's
370  * not necessary to hold those locks, since no other bucket splits can be
371  * happening concurrently.
372  */
373 
374  /* replay the record for metapage changes */
375  if (XLogReadBufferForRedo(record, 2, &metabuf) == BLK_NEEDS_REDO)
376  {
377  Page page;
378  HashMetaPage metap;
379 
380  page = BufferGetPage(metabuf);
381  metap = HashPageGetMeta(page);
382  metap->hashm_maxbucket = xlrec->new_bucket;
383 
384  data = XLogRecGetBlockData(record, 2, &datalen);
385 
386  if (xlrec->flags & XLH_SPLIT_META_UPDATE_MASKS)
387  {
388  uint32 lowmask;
389  uint32 *highmask;
390 
391  /* extract low and high masks. */
392  memcpy(&lowmask, data, sizeof(uint32));
393  highmask = (uint32 *) ((char *) data + sizeof(uint32));
394 
395  /* update metapage */
396  metap->hashm_lowmask = lowmask;
397  metap->hashm_highmask = *highmask;
398 
399  data += sizeof(uint32) * 2;
400  }
401 
403  {
404  uint32 ovflpoint;
405  uint32 *ovflpages;
406 
407  /* extract information of overflow pages. */
408  memcpy(&ovflpoint, data, sizeof(uint32));
409  ovflpages = (uint32 *) ((char *) data + sizeof(uint32));
410 
411  /* update metapage */
412  metap->hashm_spares[ovflpoint] = *ovflpages;
413  metap->hashm_ovflpoint = ovflpoint;
414  }
415 
416  MarkBufferDirty(metabuf);
417  PageSetLSN(BufferGetPage(metabuf), lsn);
418  }
419 
420  if (BufferIsValid(metabuf))
421  UnlockReleaseBuffer(metabuf);
422 }
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:47
#define XLH_SPLIT_META_UPDATE_SPLITPOINT
Definition: hash_xlog.h:46
#define XLH_SPLIT_META_UPDATE_MASKS
Definition: hash_xlog.h:45
uint32 hashm_lowmask
Definition: hash.h:256
uint32 hashm_highmask
Definition: hash.h:255
@ BLK_RESTORED
Definition: xlogutils.h:73

References _hash_initbuf(), generate_unaccent_rules::action, BLK_NEEDS_REDO, BLK_RESTORED, BufferGetPage(), BufferIsValid(), data, XLogReaderState::EndRecPtr, xl_hash_split_allocate_page::flags, HashMetaPageData::hashm_highmask, HashMetaPageData::hashm_lowmask, HashMetaPageData::hashm_maxbucket, HashMetaPageData::hashm_ovflpoint, HashMetaPageData::hashm_spares, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_prevblkno, HashPageGetMeta, HashPageGetOpaque, MarkBufferDirty(), xl_hash_split_allocate_page::new_bucket, xl_hash_split_allocate_page::new_bucket_flag, xl_hash_split_allocate_page::old_bucket_flag, PageSetLSN(), PG_USED_FOR_ASSERTS_ONLY, RBM_NORMAL, RBM_ZERO_AND_CLEANUP_LOCK, UnlockReleaseBuffer(), XLH_SPLIT_META_UPDATE_MASKS, XLH_SPLIT_META_UPDATE_SPLITPOINT, XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_split_cleanup()

static void hash_xlog_split_cleanup ( XLogReaderState record)
static

Definition at line 922 of file hash_xlog.c.

923 {
924  XLogRecPtr lsn = record->EndRecPtr;
925  Buffer buffer;
926  Page page;
927 
928  if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
929  {
930  HashPageOpaque bucket_opaque;
931 
932  page = (Page) BufferGetPage(buffer);
933 
934  bucket_opaque = HashPageGetOpaque(page);
935  bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
936  PageSetLSN(page, lsn);
937  MarkBufferDirty(buffer);
938  }
939  if (BufferIsValid(buffer))
940  UnlockReleaseBuffer(buffer);
941 }
#define LH_BUCKET_NEEDS_SPLIT_CLEANUP
Definition: hash.h:60

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), XLogReaderState::EndRecPtr, HashPageOpaqueData::hasho_flag, HashPageGetOpaque, LH_BUCKET_NEEDS_SPLIT_CLEANUP, MarkBufferDirty(), PageSetLSN(), UnlockReleaseBuffer(), and XLogReadBufferForRedo().

Referenced by hash_redo().

◆ hash_xlog_split_complete()

static void hash_xlog_split_complete ( XLogReaderState record)
static

Definition at line 442 of file hash_xlog.c.

443 {
444  XLogRecPtr lsn = record->EndRecPtr;
446  Buffer oldbuf;
447  Buffer newbuf;
449 
450  /* replay the record for old bucket */
451  action = XLogReadBufferForRedo(record, 0, &oldbuf);
452 
453  /*
454  * Note that we still update the page even if it was restored from a full
455  * page image, because the bucket flag is not included in the image.
456  */
458  {
459  Page oldpage;
460  HashPageOpaque oldopaque;
461 
462  oldpage = BufferGetPage(oldbuf);
463  oldopaque = HashPageGetOpaque(oldpage);
464 
465  oldopaque->hasho_flag = xlrec->old_bucket_flag;
466 
467  PageSetLSN(oldpage, lsn);
468  MarkBufferDirty(oldbuf);
469  }
470  if (BufferIsValid(oldbuf))
471  UnlockReleaseBuffer(oldbuf);
472 
473  /* replay the record for new bucket */
474  action = XLogReadBufferForRedo(record, 1, &newbuf);
475 
476  /*
477  * Note that we still update the page even if it was restored from a full
478  * page image, because the bucket flag is not included in the image.
479  */
481  {
482  Page newpage;
483  HashPageOpaque nopaque;
484 
485  newpage = BufferGetPage(newbuf);
486  nopaque = HashPageGetOpaque(newpage);
487 
488  nopaque->hasho_flag = xlrec->new_bucket_flag;
489 
490  PageSetLSN(newpage, lsn);
491  MarkBufferDirty(newbuf);
492  }
493  if (BufferIsValid(newbuf))
494  UnlockReleaseBuffer(newbuf);
495 }

References generate_unaccent_rules::action, BLK_NEEDS_REDO, BLK_RESTORED, BufferGetPage(), BufferIsValid(), XLogReaderState::EndRecPtr, HashPageOpaqueData::hasho_flag, HashPageGetOpaque, MarkBufferDirty(), xl_hash_split_complete::new_bucket_flag, xl_hash_split_complete::old_bucket_flag, PageSetLSN(), UnlockReleaseBuffer(), XLogReadBufferForRedo(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_split_page()

static void hash_xlog_split_page ( XLogReaderState record)
static

Definition at line 428 of file hash_xlog.c.

429 {
430  Buffer buf;
431 
432  if (XLogReadBufferForRedo(record, 0, &buf) != BLK_RESTORED)
433  elog(ERROR, "Hash split record did not contain a full-page image");
434 
436 }
static char * buf
Definition: pg_test_fsync.c:73

References BLK_RESTORED, buf, elog, ERROR, UnlockReleaseBuffer(), and XLogReadBufferForRedo().

Referenced by hash_redo().

◆ hash_xlog_squeeze_page()

static void hash_xlog_squeeze_page ( XLogReaderState record)
static

Definition at line 627 of file hash_xlog.c.

628 {
629  XLogRecPtr lsn = record->EndRecPtr;
631  Buffer bucketbuf = InvalidBuffer;
632  Buffer writebuf = InvalidBuffer;
633  Buffer ovflbuf;
634  Buffer prevbuf = InvalidBuffer;
635  Buffer mapbuf;
637 
638  /*
639  * Ensure we have a cleanup lock on primary bucket page before we start
640  * with the actual replay operation. This is to ensure that neither a
641  * scan can start nor a scan can be already-in-progress during the replay
642  * of this operation. If we allow scans during this operation, then they
643  * can miss some records or show the same record multiple times.
644  */
645  if (xldata->is_prim_bucket_same_wrt)
646  action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
647  else
648  {
649  /*
650  * we don't care for return value as the purpose of reading bucketbuf
651  * is to ensure a cleanup lock on primary bucket page.
652  */
653  (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
654 
655  if (xldata->ntups > 0 || xldata->is_prev_bucket_same_wrt)
656  action = XLogReadBufferForRedo(record, 1, &writebuf);
657  else
659  }
660 
661  /* replay the record for adding entries in overflow buffer */
662  if (action == BLK_NEEDS_REDO)
663  {
664  Page writepage;
665  char *begin;
666  char *data;
667  Size datalen;
668  uint16 ninserted = 0;
669 
670  data = begin = XLogRecGetBlockData(record, 1, &datalen);
671 
672  writepage = (Page) BufferGetPage(writebuf);
673 
674  if (xldata->ntups > 0)
675  {
676  OffsetNumber *towrite = (OffsetNumber *) data;
677 
678  data += sizeof(OffsetNumber) * xldata->ntups;
679 
680  while (data - begin < datalen)
681  {
682  IndexTuple itup = (IndexTuple) data;
683  Size itemsz;
684  OffsetNumber l;
685 
686  itemsz = IndexTupleSize(itup);
687  itemsz = MAXALIGN(itemsz);
688 
689  data += itemsz;
690 
691  l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
692  if (l == InvalidOffsetNumber)
693  elog(ERROR, "hash_xlog_squeeze_page: failed to add item to hash index page, size %d bytes",
694  (int) itemsz);
695 
696  ninserted++;
697  }
698  }
699 
700  /*
701  * number of tuples inserted must be same as requested in REDO record.
702  */
703  Assert(ninserted == xldata->ntups);
704 
705  /*
706  * if the page on which are adding tuples is a page previous to freed
707  * overflow page, then update its nextblkno.
708  */
709  if (xldata->is_prev_bucket_same_wrt)
710  {
711  HashPageOpaque writeopaque = HashPageGetOpaque(writepage);
712 
713  writeopaque->hasho_nextblkno = xldata->nextblkno;
714  }
715 
716  PageSetLSN(writepage, lsn);
717  MarkBufferDirty(writebuf);
718  }
719 
720  /* replay the record for initializing overflow buffer */
721  if (XLogReadBufferForRedo(record, 2, &ovflbuf) == BLK_NEEDS_REDO)
722  {
723  Page ovflpage;
724  HashPageOpaque ovflopaque;
725 
726  ovflpage = BufferGetPage(ovflbuf);
727 
728  _hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf));
729 
730  ovflopaque = HashPageGetOpaque(ovflpage);
731 
732  ovflopaque->hasho_prevblkno = InvalidBlockNumber;
733  ovflopaque->hasho_nextblkno = InvalidBlockNumber;
734  ovflopaque->hasho_bucket = InvalidBucket;
735  ovflopaque->hasho_flag = LH_UNUSED_PAGE;
736  ovflopaque->hasho_page_id = HASHO_PAGE_ID;
737 
738  PageSetLSN(ovflpage, lsn);
739  MarkBufferDirty(ovflbuf);
740  }
741  if (BufferIsValid(ovflbuf))
742  UnlockReleaseBuffer(ovflbuf);
743 
744  /* replay the record for page previous to the freed overflow page */
745  if (!xldata->is_prev_bucket_same_wrt &&
746  XLogReadBufferForRedo(record, 3, &prevbuf) == BLK_NEEDS_REDO)
747  {
748  Page prevpage = BufferGetPage(prevbuf);
749  HashPageOpaque prevopaque = HashPageGetOpaque(prevpage);
750 
751  prevopaque->hasho_nextblkno = xldata->nextblkno;
752 
753  PageSetLSN(prevpage, lsn);
754  MarkBufferDirty(prevbuf);
755  }
756  if (BufferIsValid(prevbuf))
757  UnlockReleaseBuffer(prevbuf);
758 
759  /* replay the record for page next to the freed overflow page */
760  if (XLogRecHasBlockRef(record, 4))
761  {
762  Buffer nextbuf;
763 
764  if (XLogReadBufferForRedo(record, 4, &nextbuf) == BLK_NEEDS_REDO)
765  {
766  Page nextpage = BufferGetPage(nextbuf);
767  HashPageOpaque nextopaque = HashPageGetOpaque(nextpage);
768 
769  nextopaque->hasho_prevblkno = xldata->prevblkno;
770 
771  PageSetLSN(nextpage, lsn);
772  MarkBufferDirty(nextbuf);
773  }
774  if (BufferIsValid(nextbuf))
775  UnlockReleaseBuffer(nextbuf);
776  }
777 
778  if (BufferIsValid(writebuf))
779  UnlockReleaseBuffer(writebuf);
780 
781  if (BufferIsValid(bucketbuf))
782  UnlockReleaseBuffer(bucketbuf);
783 
784  /*
785  * Note: in normal operation, we'd update the bitmap and meta page while
786  * still holding lock on the primary bucket page and overflow pages. But
787  * during replay it's not necessary to hold those locks, since no other
788  * index updates can be happening concurrently.
789  */
790  /* replay the record for bitmap page */
791  if (XLogReadBufferForRedo(record, 5, &mapbuf) == BLK_NEEDS_REDO)
792  {
793  Page mappage = (Page) BufferGetPage(mapbuf);
794  uint32 *freep = NULL;
795  char *data;
796  uint32 *bitmap_page_bit;
797  Size datalen;
798 
799  freep = HashPageGetBitmap(mappage);
800 
801  data = XLogRecGetBlockData(record, 5, &datalen);
802  bitmap_page_bit = (uint32 *) data;
803 
804  CLRBIT(freep, *bitmap_page_bit);
805 
806  PageSetLSN(mappage, lsn);
807  MarkBufferDirty(mapbuf);
808  }
809  if (BufferIsValid(mapbuf))
810  UnlockReleaseBuffer(mapbuf);
811 
812  /* replay the record for meta page */
813  if (XLogRecHasBlockRef(record, 6))
814  {
815  Buffer metabuf;
816 
817  if (XLogReadBufferForRedo(record, 6, &metabuf) == BLK_NEEDS_REDO)
818  {
819  HashMetaPage metap;
820  Page page;
821  char *data;
822  uint32 *firstfree_ovflpage;
823  Size datalen;
824 
825  data = XLogRecGetBlockData(record, 6, &datalen);
826  firstfree_ovflpage = (uint32 *) data;
827 
828  page = BufferGetPage(metabuf);
829  metap = HashPageGetMeta(page);
830  metap->hashm_firstfree = *firstfree_ovflpage;
831 
832  PageSetLSN(page, lsn);
833  MarkBufferDirty(metabuf);
834  }
835  if (BufferIsValid(metabuf))
836  UnlockReleaseBuffer(metabuf);
837  }
838 }
#define CLRBIT(x, i)
Definition: blutils.c:31
static Size BufferGetPageSize(Buffer buffer)
Definition: bufmgr.h:339
#define HASHO_PAGE_ID
Definition: hash.h:101
#define InvalidBucket
Definition: hash.h:37
void _hash_pageinit(Page page, Size size)
Definition: hashpage.c:596
uint16 hasho_page_id
Definition: hash.h:83
Bucket hasho_bucket
Definition: hash.h:81
BlockNumber prevblkno
Definition: hash_xlog.h:155
bool is_prim_bucket_same_wrt
Definition: hash_xlog.h:158
bool is_prev_bucket_same_wrt
Definition: hash_xlog.h:161
BlockNumber nextblkno
Definition: hash_xlog.h:156
@ BLK_NOTFOUND
Definition: xlogutils.h:74

References _hash_pageinit(), generate_unaccent_rules::action, Assert(), BLK_NEEDS_REDO, BLK_NOTFOUND, BufferGetPage(), BufferGetPageSize(), BufferIsValid(), CLRBIT, data, elog, XLogReaderState::EndRecPtr, ERROR, HashMetaPageData::hashm_firstfree, HashPageOpaqueData::hasho_bucket, HashPageOpaqueData::hasho_flag, HashPageOpaqueData::hasho_nextblkno, HashPageOpaqueData::hasho_page_id, HASHO_PAGE_ID, HashPageOpaqueData::hasho_prevblkno, HashPageGetBitmap, HashPageGetMeta, HashPageGetOpaque, IndexTupleSize, InvalidBlockNumber, InvalidBucket, InvalidBuffer, InvalidOffsetNumber, xl_hash_squeeze_page::is_prev_bucket_same_wrt, xl_hash_squeeze_page::is_prim_bucket_same_wrt, LH_UNUSED_PAGE, MarkBufferDirty(), MAXALIGN, xl_hash_squeeze_page::nextblkno, xl_hash_squeeze_page::ntups, PageAddItem, PageSetLSN(), xl_hash_squeeze_page::prevblkno, RBM_NORMAL, UnlockReleaseBuffer(), while(), XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), XLogRecGetData, and XLogRecHasBlockRef.

Referenced by hash_redo().

◆ hash_xlog_update_meta_page()

static void hash_xlog_update_meta_page ( XLogReaderState record)
static

Definition at line 947 of file hash_xlog.c.

948 {
949  HashMetaPage metap;
950  XLogRecPtr lsn = record->EndRecPtr;
952  Buffer metabuf;
953  Page page;
954 
955  if (XLogReadBufferForRedo(record, 0, &metabuf) == BLK_NEEDS_REDO)
956  {
957  page = BufferGetPage(metabuf);
958  metap = HashPageGetMeta(page);
959 
960  metap->hashm_ntuples = xldata->ntuples;
961 
962  PageSetLSN(page, lsn);
963  MarkBufferDirty(metabuf);
964  }
965  if (BufferIsValid(metabuf))
966  UnlockReleaseBuffer(metabuf);
967 }

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), XLogReaderState::EndRecPtr, HashMetaPageData::hashm_ntuples, HashPageGetMeta, MarkBufferDirty(), xl_hash_update_meta_page::ntuples, PageSetLSN(), UnlockReleaseBuffer(), XLogReadBufferForRedo(), and XLogRecGetData.

Referenced by hash_redo().

◆ hash_xlog_vacuum_one_page()

static void hash_xlog_vacuum_one_page ( XLogReaderState record)
static

Definition at line 974 of file hash_xlog.c.

975 {
976  XLogRecPtr lsn = record->EndRecPtr;
977  xl_hash_vacuum_one_page *xldata;
978  Buffer buffer;
979  Buffer metabuf;
980  Page page;
982  HashPageOpaque pageopaque;
983  OffsetNumber *toDelete;
984 
985  xldata = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
986  toDelete = xldata->offsets;
987 
988  /*
989  * If we have any conflict processing to do, it must happen before we
990  * update the page.
991  *
992  * Hash index records that are marked as LP_DEAD and being removed during
993  * hash index tuple insertion can conflict with standby queries. You might
994  * think that vacuum records would conflict as well, but we've handled
995  * that already. XLOG_HEAP2_PRUNE_VACUUM_SCAN records provide the highest
996  * xid cleaned by the vacuum of the heap and so we can resolve any
997  * conflicts just once when that arrives. After that we know that no
998  * conflicts exist from individual hash index vacuum records on that
999  * index.
1000  */
1001  if (InHotStandby)
1002  {
1003  RelFileLocator rlocator;
1004 
1005  XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
1007  xldata->isCatalogRel,
1008  rlocator);
1009  }
1010 
1011  action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer);
1012 
1013  if (action == BLK_NEEDS_REDO)
1014  {
1015  page = (Page) BufferGetPage(buffer);
1016 
1017  PageIndexMultiDelete(page, toDelete, xldata->ntuples);
1018 
1019  /*
1020  * Mark the page as not containing any LP_DEAD items. See comments in
1021  * _hash_vacuum_one_page() for details.
1022  */
1023  pageopaque = HashPageGetOpaque(page);
1024  pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
1025 
1026  PageSetLSN(page, lsn);
1027  MarkBufferDirty(buffer);
1028  }
1029  if (BufferIsValid(buffer))
1030  UnlockReleaseBuffer(buffer);
1031 
1032  if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
1033  {
1034  Page metapage;
1035  HashMetaPage metap;
1036 
1037  metapage = BufferGetPage(metabuf);
1038  metap = HashPageGetMeta(metapage);
1039 
1040  metap->hashm_ntuples -= xldata->ntuples;
1041 
1042  PageSetLSN(metapage, lsn);
1043  MarkBufferDirty(metabuf);
1044  }
1045  if (BufferIsValid(metabuf))
1046  UnlockReleaseBuffer(metabuf);
1047 }
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition: standby.c:467
TransactionId snapshotConflictHorizon
Definition: hash_xlog.h:247
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: hash_xlog.h:253
#define InHotStandby
Definition: xlogutils.h:57

References generate_unaccent_rules::action, BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), XLogReaderState::EndRecPtr, HashMetaPageData::hashm_ntuples, HashPageOpaqueData::hasho_flag, HashPageGetMeta, HashPageGetOpaque, InHotStandby, xl_hash_vacuum_one_page::isCatalogRel, LH_PAGE_HAS_DEAD_TUPLES, MarkBufferDirty(), xl_hash_vacuum_one_page::ntuples, xl_hash_vacuum_one_page::offsets, PageIndexMultiDelete(), PageSetLSN(), RBM_NORMAL, ResolveRecoveryConflictWithSnapshot(), xl_hash_vacuum_one_page::snapshotConflictHorizon, UnlockReleaseBuffer(), XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockTag(), and XLogRecGetData.

Referenced by hash_redo().