PostgreSQL Source Code  git master
xloginsert.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * xloginsert.c
4  * Functions for constructing WAL records
5  *
6  * Constructing a WAL record begins with a call to XLogBeginInsert,
7  * followed by a number of XLogRegister* calls. The registered data is
8  * collected in private working memory, and finally assembled into a chain
9  * of XLogRecData structs by a call to XLogRecordAssemble(). See
10  * access/transam/README for details.
11  *
12  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
13  * Portions Copyright (c) 1994, Regents of the University of California
14  *
15  * src/backend/access/transam/xloginsert.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 
20 #include "postgres.h"
21 
22 #include "access/xact.h"
23 #include "access/xlog.h"
24 #include "access/xlog_internal.h"
25 #include "access/xloginsert.h"
26 #include "catalog/pg_control.h"
27 #include "common/pg_lzcompress.h"
28 #include "executor/instrument.h"
29 #include "miscadmin.h"
30 #include "pg_trace.h"
31 #include "replication/origin.h"
32 #include "storage/bufmgr.h"
33 #include "storage/proc.h"
34 #include "utils/memutils.h"
35 
36 /*
37  * Guess the maximum buffer size required to store a compressed version of
38  * backup block image.
39  */
40 #ifdef USE_LZ4
41 #include <lz4.h>
42 #define LZ4_MAX_BLCKSZ LZ4_COMPRESSBOUND(BLCKSZ)
43 #else
44 #define LZ4_MAX_BLCKSZ 0
45 #endif
46 
47 #define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
48 
49 #define COMPRESS_BUFSIZE Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ)
50 
51 /*
52  * For each block reference registered with XLogRegisterBuffer, we fill in
53  * a registered_buffer struct.
54  */
55 typedef struct
56 {
57  bool in_use; /* is this slot in use? */
58  uint8 flags; /* REGBUF_* flags */
59  RelFileNode rnode; /* identifies the relation and block */
62  Page page; /* page content */
63  uint32 rdata_len; /* total length of data in rdata chain */
64  XLogRecData *rdata_head; /* head of the chain of data registered with
65  * this block */
66  XLogRecData *rdata_tail; /* last entry in the chain, or &rdata_head if
67  * empty */
68 
69  XLogRecData bkp_rdatas[2]; /* temporary rdatas used to hold references to
70  * backup block data in XLogRecordAssemble() */
71 
72  /* buffer to store a compressed version of backup block image */
73  char compressed_page[COMPRESS_BUFSIZE];
75 
77 static int max_registered_buffers; /* allocated size */
78 static int max_registered_block_id = 0; /* highest block_id + 1 currently
79  * registered */
80 
81 /*
82  * A chain of XLogRecDatas to hold the "main data" of a WAL record, registered
83  * with XLogRegisterData(...).
84  */
86 static XLogRecData *mainrdata_last = (XLogRecData *) &mainrdata_head;
87 static uint32 mainrdata_len; /* total # of bytes in chain */
88 
89 /* flags for the in-progress insertion */
91 
92 /*
93  * These are used to hold the record header while constructing a record.
94  * 'hdr_scratch' is not a plain variable, but is palloc'd at initialization,
95  * because we want it to be MAXALIGNed and padding bytes zeroed.
96  *
97  * For simplicity, it's allocated large enough to hold the headers for any
98  * WAL record.
99  */
101 static char *hdr_scratch = NULL;
102 
103 #define SizeOfXlogOrigin (sizeof(RepOriginId) + sizeof(char))
104 #define SizeOfXLogTransactionId (sizeof(TransactionId) + sizeof(char))
105 
106 #define HEADER_SCRATCH_SIZE \
107  (SizeOfXLogRecord + \
108  MaxSizeOfXLogRecordBlockHeader * (XLR_MAX_BLOCK_ID + 1) + \
109  SizeOfXLogRecordDataHeaderLong + SizeOfXlogOrigin + \
110  SizeOfXLogTransactionId)
111 
112 /*
113  * An array of XLogRecData structs, to hold registered data.
114  */
116 static int num_rdatas; /* entries currently used */
117 static int max_rdatas; /* allocated size */
118 
119 static bool begininsert_called = false;
120 
121 /* Memory context to hold the registered buffer and data references. */
123 
124 static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
126  XLogRecPtr *fpw_lsn, int *num_fpi);
127 static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
128  uint16 hole_length, char *dest, uint16 *dlen);
129 
130 /*
131  * Begin constructing a WAL record. This must be called before the
132  * XLogRegister* functions and XLogInsert().
133  */
134 void
136 {
138  Assert(mainrdata_last == (XLogRecData *) &mainrdata_head);
139  Assert(mainrdata_len == 0);
140 
141  /* cross-check on whether we should be here or not */
142  if (!XLogInsertAllowed())
143  elog(ERROR, "cannot make new WAL entries during recovery");
144 
145  if (begininsert_called)
146  elog(ERROR, "XLogBeginInsert was already called");
147 
148  begininsert_called = true;
149 }
150 
151 /*
152  * Ensure that there are enough buffer and data slots in the working area,
153  * for subsequent XLogRegisterBuffer, XLogRegisterData and XLogRegisterBufData
154  * calls.
155  *
156  * There is always space for a small number of buffers and data chunks, enough
157  * for most record types. This function is for the exceptional cases that need
158  * more.
159  */
160 void
161 XLogEnsureRecordSpace(int max_block_id, int ndatas)
162 {
163  int nbuffers;
164 
165  /*
166  * This must be called before entering a critical section, because
167  * allocating memory inside a critical section can fail. repalloc() will
168  * check the same, but better to check it here too so that we fail
169  * consistently even if the arrays happen to be large enough already.
170  */
171  Assert(CritSectionCount == 0);
172 
173  /* the minimum values can't be decreased */
174  if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
175  max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
176  if (ndatas < XLR_NORMAL_RDATAS)
177  ndatas = XLR_NORMAL_RDATAS;
178 
179  if (max_block_id > XLR_MAX_BLOCK_ID)
180  elog(ERROR, "maximum number of WAL record block references exceeded");
181  nbuffers = max_block_id + 1;
182 
183  if (nbuffers > max_registered_buffers)
184  {
185  registered_buffers = (registered_buffer *)
186  repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
187 
188  /*
189  * At least the padding bytes in the structs must be zeroed, because
190  * they are included in WAL data, but initialize it all for tidiness.
191  */
192  MemSet(&registered_buffers[max_registered_buffers], 0,
193  (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
194  max_registered_buffers = nbuffers;
195  }
196 
197  if (ndatas > max_rdatas)
198  {
199  rdatas = (XLogRecData *) repalloc(rdatas, sizeof(XLogRecData) * ndatas);
200  max_rdatas = ndatas;
201  }
202 }
203 
204 /*
205  * Reset WAL record construction buffers.
206  */
207 void
209 {
210  int i;
211 
212  /* reset the subxact assignment flag (if needed) */
215 
216  for (i = 0; i < max_registered_block_id; i++)
217  registered_buffers[i].in_use = false;
218 
219  num_rdatas = 0;
220  max_registered_block_id = 0;
221  mainrdata_len = 0;
222  mainrdata_last = (XLogRecData *) &mainrdata_head;
223  curinsert_flags = 0;
224  begininsert_called = false;
225 }
226 
227 /*
228  * Register a reference to a buffer with the WAL record being constructed.
229  * This must be called for every page that the WAL-logged operation modifies.
230  */
231 void
232 XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
233 {
234  registered_buffer *regbuf;
235 
236  /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
237  Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
239 
240  if (block_id >= max_registered_block_id)
241  {
242  if (block_id >= max_registered_buffers)
243  elog(ERROR, "too many registered buffers");
244  max_registered_block_id = block_id + 1;
245  }
246 
247  regbuf = &registered_buffers[block_id];
248 
249  BufferGetTag(buffer, &regbuf->rnode, &regbuf->forkno, &regbuf->block);
250  regbuf->page = BufferGetPage(buffer);
251  regbuf->flags = flags;
252  regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
253  regbuf->rdata_len = 0;
254 
255  /*
256  * Check that this page hasn't already been registered with some other
257  * block_id.
258  */
259 #ifdef USE_ASSERT_CHECKING
260  {
261  int i;
262 
263  for (i = 0; i < max_registered_block_id; i++)
264  {
265  registered_buffer *regbuf_old = &registered_buffers[i];
266 
267  if (i == block_id || !regbuf_old->in_use)
268  continue;
269 
270  Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
271  regbuf_old->forkno != regbuf->forkno ||
272  regbuf_old->block != regbuf->block);
273  }
274  }
275 #endif
276 
277  regbuf->in_use = true;
278 }
279 
280 /*
281  * Like XLogRegisterBuffer, but for registering a block that's not in the
282  * shared buffer pool (i.e. when you don't have a Buffer for it).
283  */
284 void
285 XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
286  BlockNumber blknum, Page page, uint8 flags)
287 {
288  registered_buffer *regbuf;
289 
291 
292  if (block_id >= max_registered_block_id)
293  max_registered_block_id = block_id + 1;
294 
295  if (block_id >= max_registered_buffers)
296  elog(ERROR, "too many registered buffers");
297 
298  regbuf = &registered_buffers[block_id];
299 
300  regbuf->rnode = *rnode;
301  regbuf->forkno = forknum;
302  regbuf->block = blknum;
303  regbuf->page = page;
304  regbuf->flags = flags;
305  regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
306  regbuf->rdata_len = 0;
307 
308  /*
309  * Check that this page hasn't already been registered with some other
310  * block_id.
311  */
312 #ifdef USE_ASSERT_CHECKING
313  {
314  int i;
315 
316  for (i = 0; i < max_registered_block_id; i++)
317  {
318  registered_buffer *regbuf_old = &registered_buffers[i];
319 
320  if (i == block_id || !regbuf_old->in_use)
321  continue;
322 
323  Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
324  regbuf_old->forkno != regbuf->forkno ||
325  regbuf_old->block != regbuf->block);
326  }
327  }
328 #endif
329 
330  regbuf->in_use = true;
331 }
332 
333 /*
334  * Add data to the WAL record that's being constructed.
335  *
336  * The data is appended to the "main chunk", available at replay with
337  * XLogRecGetData().
338  */
339 void
340 XLogRegisterData(char *data, int len)
341 {
342  XLogRecData *rdata;
343 
345 
346  if (num_rdatas >= max_rdatas)
347  elog(ERROR, "too much WAL data");
348  rdata = &rdatas[num_rdatas++];
349 
350  rdata->data = data;
351  rdata->len = len;
352 
353  /*
354  * we use the mainrdata_last pointer to track the end of the chain, so no
355  * need to clear 'next' here.
356  */
357 
358  mainrdata_last->next = rdata;
359  mainrdata_last = rdata;
360 
361  mainrdata_len += len;
362 }
363 
364 /*
365  * Add buffer-specific data to the WAL record that's being constructed.
366  *
367  * Block_id must reference a block previously registered with
368  * XLogRegisterBuffer(). If this is called more than once for the same
369  * block_id, the data is appended.
370  *
371  * The maximum amount of data that can be registered per block is 65535
372  * bytes. That should be plenty; if you need more than BLCKSZ bytes to
373  * reconstruct the changes to the page, you might as well just log a full
374  * copy of it. (the "main data" that's not associated with a block is not
375  * limited)
376  */
377 void
378 XLogRegisterBufData(uint8 block_id, char *data, int len)
379 {
380  registered_buffer *regbuf;
381  XLogRecData *rdata;
382 
384 
385  /* find the registered buffer struct */
386  regbuf = &registered_buffers[block_id];
387  if (!regbuf->in_use)
388  elog(ERROR, "no block with id %d registered with WAL insertion",
389  block_id);
390 
391  if (num_rdatas >= max_rdatas)
392  elog(ERROR, "too much WAL data");
393  rdata = &rdatas[num_rdatas++];
394 
395  rdata->data = data;
396  rdata->len = len;
397 
398  regbuf->rdata_tail->next = rdata;
399  regbuf->rdata_tail = rdata;
400  regbuf->rdata_len += len;
401 }
402 
403 /*
404  * Set insert status flags for the upcoming WAL record.
405  *
406  * The flags that can be used here are:
407  * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
408  * included in the record.
409  * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
410  * durability, which allows to avoid triggering WAL archiving and other
411  * background activity.
412  * - XLOG_INCLUDE_XID, a message-passing hack between XLogRecordAssemble
413  * and XLogResetInsertion.
414  */
415 void
417 {
419  curinsert_flags |= flags;
420 }
421 
422 /*
423  * Insert an XLOG record having the specified RMID and info bytes, with the
424  * body of the record being the data and buffer references registered earlier
425  * with XLogRegister* calls.
426  *
427  * Returns XLOG pointer to end of record (beginning of next record).
428  * This can be used as LSN for data pages affected by the logged action.
429  * (LSN is the XLOG point up to which the XLOG must be flushed to disk
430  * before the data page can be written out. This implements the basic
431  * WAL rule "write the log before the data".)
432  */
435 {
436  XLogRecPtr EndPos;
437 
438  /* XLogBeginInsert() must have been called. */
439  if (!begininsert_called)
440  elog(ERROR, "XLogBeginInsert was not called");
441 
442  /*
443  * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
444  * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
445  */
446  if ((info & ~(XLR_RMGR_INFO_MASK |
448  XLR_CHECK_CONSISTENCY)) != 0)
449  elog(PANIC, "invalid xlog info mask %02X", info);
450 
451  TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
452 
453  /*
454  * In bootstrap mode, we don't actually log anything but XLOG resources;
455  * return a phony record pointer.
456  */
457  if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
458  {
460  EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
461  return EndPos;
462  }
463 
464  do
465  {
467  bool doPageWrites;
468  XLogRecPtr fpw_lsn;
469  XLogRecData *rdt;
470  int num_fpi = 0;
471 
472  /*
473  * Get values needed to decide whether to do full-page writes. Since
474  * we don't yet have an insertion lock, these could change under us,
475  * but XLogInsertRecord will recheck them once it has a lock.
476  */
477  GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites);
478 
479  rdt = XLogRecordAssemble(rmid, info, RedoRecPtr, doPageWrites,
480  &fpw_lsn, &num_fpi);
481 
482  EndPos = XLogInsertRecord(rdt, fpw_lsn, curinsert_flags, num_fpi);
483  } while (EndPos == InvalidXLogRecPtr);
484 
486 
487  return EndPos;
488 }
489 
490 /*
491  * Assemble a WAL record from the registered data and buffers into an
492  * XLogRecData chain, ready for insertion with XLogInsertRecord().
493  *
494  * The record header fields are filled in, except for the xl_prev field. The
495  * calculated CRC does not include the record header yet.
496  *
497  * If there are any registered buffers, and a full-page image was not taken
498  * of all of them, *fpw_lsn is set to the lowest LSN among such pages. This
499  * signals that the assembled record is only good for insertion on the
500  * assumption that the RedoRecPtr and doPageWrites values were up-to-date.
501  */
502 static XLogRecData *
505  XLogRecPtr *fpw_lsn, int *num_fpi)
506 {
507  XLogRecData *rdt;
508  uint32 total_len = 0;
509  int block_id;
510  pg_crc32c rdata_crc;
511  registered_buffer *prev_regbuf = NULL;
512  XLogRecData *rdt_datas_last;
513  XLogRecord *rechdr;
514  char *scratch = hdr_scratch;
515 
516  /*
517  * Note: this function can be called multiple times for the same record.
518  * All the modifications we do to the rdata chains below must handle that.
519  */
520 
521  /* The record begins with the fixed-size header */
522  rechdr = (XLogRecord *) scratch;
523  scratch += SizeOfXLogRecord;
524 
525  hdr_rdt.next = NULL;
526  rdt_datas_last = &hdr_rdt;
527  hdr_rdt.data = hdr_scratch;
528 
529  /*
530  * Enforce consistency checks for this record if user is looking for it.
531  * Do this before at the beginning of this routine to give the possibility
532  * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
533  * a record.
534  */
535  if (wal_consistency_checking[rmid])
536  info |= XLR_CHECK_CONSISTENCY;
537 
538  /*
539  * Make an rdata chain containing all the data portions of all block
540  * references. This includes the data for full-page images. Also append
541  * the headers for the block references in the scratch buffer.
542  */
543  *fpw_lsn = InvalidXLogRecPtr;
544  for (block_id = 0; block_id < max_registered_block_id; block_id++)
545  {
546  registered_buffer *regbuf = &registered_buffers[block_id];
547  bool needs_backup;
548  bool needs_data;
551  XLogRecordBlockCompressHeader cbimg = {0};
552  bool samerel;
553  bool is_compressed = false;
554  bool include_image;
555 
556  if (!regbuf->in_use)
557  continue;
558 
559  /* Determine if this block needs to be backed up */
560  if (regbuf->flags & REGBUF_FORCE_IMAGE)
561  needs_backup = true;
562  else if (regbuf->flags & REGBUF_NO_IMAGE)
563  needs_backup = false;
564  else if (!doPageWrites)
565  needs_backup = false;
566  else
567  {
568  /*
569  * We assume page LSN is first data on *every* page that can be
570  * passed to XLogInsert, whether it has the standard page layout
571  * or not.
572  */
573  XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
574 
575  needs_backup = (page_lsn <= RedoRecPtr);
576  if (!needs_backup)
577  {
578  if (*fpw_lsn == InvalidXLogRecPtr || page_lsn < *fpw_lsn)
579  *fpw_lsn = page_lsn;
580  }
581  }
582 
583  /* Determine if the buffer data needs to included */
584  if (regbuf->rdata_len == 0)
585  needs_data = false;
586  else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
587  needs_data = true;
588  else
589  needs_data = !needs_backup;
590 
591  bkpb.id = block_id;
592  bkpb.fork_flags = regbuf->forkno;
593  bkpb.data_length = 0;
594 
595  if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
597 
598  /*
599  * If needs_backup is true or WAL checking is enabled for current
600  * resource manager, log a full-page write for the current block.
601  */
602  include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0;
603 
604  if (include_image)
605  {
606  Page page = regbuf->page;
607  uint16 compressed_len = 0;
608 
609  /*
610  * The page needs to be backed up, so calculate its hole length
611  * and offset.
612  */
613  if (regbuf->flags & REGBUF_STANDARD)
614  {
615  /* Assume we can omit data between pd_lower and pd_upper */
616  uint16 lower = ((PageHeader) page)->pd_lower;
617  uint16 upper = ((PageHeader) page)->pd_upper;
618 
619  if (lower >= SizeOfPageHeaderData &&
620  upper > lower &&
621  upper <= BLCKSZ)
622  {
623  bimg.hole_offset = lower;
624  cbimg.hole_length = upper - lower;
625  }
626  else
627  {
628  /* No "hole" to remove */
629  bimg.hole_offset = 0;
630  cbimg.hole_length = 0;
631  }
632  }
633  else
634  {
635  /* Not a standard page header, don't try to eliminate "hole" */
636  bimg.hole_offset = 0;
637  cbimg.hole_length = 0;
638  }
639 
640  /*
641  * Try to compress a block image if wal_compression is enabled
642  */
644  {
645  is_compressed =
647  cbimg.hole_length,
648  regbuf->compressed_page,
649  &compressed_len);
650  }
651 
652  /*
653  * Fill in the remaining fields in the XLogRecordBlockHeader
654  * struct
655  */
657 
658  /* Report a full page image constructed for the WAL record */
659  *num_fpi += 1;
660 
661  /*
662  * Construct XLogRecData entries for the page content.
663  */
664  rdt_datas_last->next = &regbuf->bkp_rdatas[0];
665  rdt_datas_last = rdt_datas_last->next;
666 
667  bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
668 
669  /*
670  * If WAL consistency checking is enabled for the resource manager
671  * of this WAL record, a full-page image is included in the record
672  * for the block modified. During redo, the full-page is replayed
673  * only if BKPIMAGE_APPLY is set.
674  */
675  if (needs_backup)
676  bimg.bimg_info |= BKPIMAGE_APPLY;
677 
678  if (is_compressed)
679  {
680  /* The current compression is stored in the WAL record */
681  bimg.length = compressed_len;
682 
683  /* Set the compression method used for this block */
685  {
688  break;
689 
690  case WAL_COMPRESSION_LZ4:
691 #ifdef USE_LZ4
693 #else
694  elog(ERROR, "LZ4 is not supported by this build");
695 #endif
696  break;
697 
699  Assert(false); /* cannot happen */
700  break;
701  /* no default case, so that compiler will warn */
702  }
703 
704  rdt_datas_last->data = regbuf->compressed_page;
705  rdt_datas_last->len = compressed_len;
706  }
707  else
708  {
709  bimg.length = BLCKSZ - cbimg.hole_length;
710 
711  if (cbimg.hole_length == 0)
712  {
713  rdt_datas_last->data = page;
714  rdt_datas_last->len = BLCKSZ;
715  }
716  else
717  {
718  /* must skip the hole */
719  rdt_datas_last->data = page;
720  rdt_datas_last->len = bimg.hole_offset;
721 
722  rdt_datas_last->next = &regbuf->bkp_rdatas[1];
723  rdt_datas_last = rdt_datas_last->next;
724 
725  rdt_datas_last->data =
726  page + (bimg.hole_offset + cbimg.hole_length);
727  rdt_datas_last->len =
728  BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
729  }
730  }
731 
732  total_len += bimg.length;
733  }
734 
735  if (needs_data)
736  {
737  /*
738  * Link the caller-supplied rdata chain for this buffer to the
739  * overall list.
740  */
742  bkpb.data_length = regbuf->rdata_len;
743  total_len += regbuf->rdata_len;
744 
745  rdt_datas_last->next = regbuf->rdata_head;
746  rdt_datas_last = regbuf->rdata_tail;
747  }
748 
749  if (prev_regbuf && RelFileNodeEquals(regbuf->rnode, prev_regbuf->rnode))
750  {
751  samerel = true;
753  }
754  else
755  samerel = false;
756  prev_regbuf = regbuf;
757 
758  /* Ok, copy the header to the scratch buffer */
759  memcpy(scratch, &bkpb, SizeOfXLogRecordBlockHeader);
760  scratch += SizeOfXLogRecordBlockHeader;
761  if (include_image)
762  {
763  memcpy(scratch, &bimg, SizeOfXLogRecordBlockImageHeader);
765  if (cbimg.hole_length != 0 && is_compressed)
766  {
767  memcpy(scratch, &cbimg,
770  }
771  }
772  if (!samerel)
773  {
774  memcpy(scratch, &regbuf->rnode, sizeof(RelFileNode));
775  scratch += sizeof(RelFileNode);
776  }
777  memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
778  scratch += sizeof(BlockNumber);
779  }
780 
781  /* followed by the record's origin, if any */
784  {
785  *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
786  memcpy(scratch, &replorigin_session_origin, sizeof(replorigin_session_origin));
787  scratch += sizeof(replorigin_session_origin);
788  }
789 
790  /* followed by toplevel XID, if not already included in previous record */
792  {
794 
795  /* update the flag (later used by XLogResetInsertion) */
797 
798  *(scratch++) = (char) XLR_BLOCK_ID_TOPLEVEL_XID;
799  memcpy(scratch, &xid, sizeof(TransactionId));
800  scratch += sizeof(TransactionId);
801  }
802 
803  /* followed by main data, if any */
804  if (mainrdata_len > 0)
805  {
806  if (mainrdata_len > 255)
807  {
808  *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
809  memcpy(scratch, &mainrdata_len, sizeof(uint32));
810  scratch += sizeof(uint32);
811  }
812  else
813  {
814  *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
815  *(scratch++) = (uint8) mainrdata_len;
816  }
817  rdt_datas_last->next = mainrdata_head;
818  rdt_datas_last = mainrdata_last;
819  total_len += mainrdata_len;
820  }
821  rdt_datas_last->next = NULL;
822 
823  hdr_rdt.len = (scratch - hdr_scratch);
824  total_len += hdr_rdt.len;
825 
826  /*
827  * Calculate CRC of the data
828  *
829  * Note that the record header isn't added into the CRC initially since we
830  * don't know the prev-link yet. Thus, the CRC will represent the CRC of
831  * the whole record in the order: rdata, then backup blocks, then record
832  * header.
833  */
834  INIT_CRC32C(rdata_crc);
836  for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
837  COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
838 
839  /*
840  * Fill in the fields in the record header. Prev-link is filled in later,
841  * once we know where in the WAL the record will be inserted. The CRC does
842  * not include the record header yet.
843  */
845  rechdr->xl_tot_len = total_len;
846  rechdr->xl_info = info;
847  rechdr->xl_rmid = rmid;
848  rechdr->xl_prev = InvalidXLogRecPtr;
849  rechdr->xl_crc = rdata_crc;
850 
851  return &hdr_rdt;
852 }
853 
854 /*
855  * Create a compressed version of a backup block image.
856  *
857  * Returns false if compression fails (i.e., compressed result is actually
858  * bigger than original). Otherwise, returns true and sets 'dlen' to
859  * the length of compressed block image.
860  */
861 static bool
862 XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
863  char *dest, uint16 *dlen)
864 {
865  int32 orig_len = BLCKSZ - hole_length;
866  int32 len = -1;
867  int32 extra_bytes = 0;
868  char *source;
869  PGAlignedBlock tmp;
870 
871  if (hole_length != 0)
872  {
873  /* must skip the hole */
874  source = tmp.data;
875  memcpy(source, page, hole_offset);
876  memcpy(source + hole_offset,
877  page + (hole_offset + hole_length),
878  BLCKSZ - (hole_length + hole_offset));
879 
880  /*
881  * Extra data needs to be stored in WAL record for the compressed
882  * version of block image if the hole exists.
883  */
885  }
886  else
887  source = page;
888 
890  {
892  len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
893  break;
894 
895  case WAL_COMPRESSION_LZ4:
896 #ifdef USE_LZ4
897  len = LZ4_compress_default(source, dest, orig_len,
899  if (len <= 0)
900  len = -1; /* failure */
901 #else
902  elog(ERROR, "LZ4 is not supported by this build");
903 #endif
904  break;
905 
907  Assert(false); /* cannot happen */
908  break;
909  /* no default case, so that compiler will warn */
910  }
911 
912  /*
913  * We recheck the actual size even if compression reports success and see
914  * if the number of bytes saved by compression is larger than the length
915  * of extra data needed for the compressed version of block image.
916  */
917  if (len >= 0 &&
918  len + extra_bytes < orig_len)
919  {
920  *dlen = (uint16) len; /* successful compression */
921  return true;
922  }
923  return false;
924 }
925 
926 /*
927  * Determine whether the buffer referenced has to be backed up.
928  *
929  * Since we don't yet have the insert lock, fullPageWrites and forcePageWrites
930  * could change later, so the result should be used for optimization purposes
931  * only.
932  */
933 bool
935 {
937  bool doPageWrites;
938  Page page;
939 
940  GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites);
941 
942  page = BufferGetPage(buffer);
943 
944  if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
945  return true; /* buffer requires backup */
946 
947  return false; /* buffer does not need to be backed up */
948 }
949 
950 /*
951  * Write a backup block if needed when we are setting a hint. Note that
952  * this may be called for a variety of page types, not just heaps.
953  *
954  * Callable while holding just share lock on the buffer content.
955  *
956  * We can't use the plain backup block mechanism since that relies on the
957  * Buffer being exclusively locked. Since some modifications (setting LSN, hint
958  * bits) are allowed in a sharelocked buffer that can lead to wal checksum
959  * failures. So instead we copy the page and insert the copied data as normal
960  * record data.
961  *
962  * We only need to do something if page has not yet been full page written in
963  * this checkpoint round. The LSN of the inserted wal record is returned if we
964  * had to write, InvalidXLogRecPtr otherwise.
965  *
966  * It is possible that multiple concurrent backends could attempt to write WAL
967  * records. In that case, multiple copies of the same block would be recorded
968  * in separate WAL records by different backends, though that is still OK from
969  * a correctness perspective.
970  */
972 XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
973 {
974  XLogRecPtr recptr = InvalidXLogRecPtr;
975  XLogRecPtr lsn;
977 
978  /*
979  * Ensure no checkpoint can change our view of RedoRecPtr.
980  */
982 
983  /*
984  * Update RedoRecPtr so that we can make the right decision
985  */
986  RedoRecPtr = GetRedoRecPtr();
987 
988  /*
989  * We assume page LSN is first data on *every* page that can be passed to
990  * XLogInsert, whether it has the standard page layout or not. Since we're
991  * only holding a share-lock on the page, we must take the buffer header
992  * lock when we look at the LSN.
993  */
994  lsn = BufferGetLSNAtomic(buffer);
995 
996  if (lsn <= RedoRecPtr)
997  {
998  int flags = 0;
999  PGAlignedBlock copied_buffer;
1000  char *origdata = (char *) BufferGetBlock(buffer);
1001  RelFileNode rnode;
1002  ForkNumber forkno;
1003  BlockNumber blkno;
1004 
1005  /*
1006  * Copy buffer so we don't have to worry about concurrent hint bit or
1007  * lsn updates. We assume pd_lower/upper cannot be changed without an
1008  * exclusive lock, so the contents bkp are not racy.
1009  */
1010  if (buffer_std)
1011  {
1012  /* Assume we can omit data between pd_lower and pd_upper */
1013  Page page = BufferGetPage(buffer);
1014  uint16 lower = ((PageHeader) page)->pd_lower;
1015  uint16 upper = ((PageHeader) page)->pd_upper;
1016 
1017  memcpy(copied_buffer.data, origdata, lower);
1018  memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
1019  }
1020  else
1021  memcpy(copied_buffer.data, origdata, BLCKSZ);
1022 
1023  XLogBeginInsert();
1024 
1025  if (buffer_std)
1026  flags |= REGBUF_STANDARD;
1027 
1028  BufferGetTag(buffer, &rnode, &forkno, &blkno);
1029  XLogRegisterBlock(0, &rnode, forkno, blkno, copied_buffer.data, flags);
1030 
1031  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_FOR_HINT);
1032  }
1033 
1034  return recptr;
1035 }
1036 
1037 /*
1038  * Write a WAL record containing a full image of a page. Caller is responsible
1039  * for writing the page to disk after calling this routine.
1040  *
1041  * Note: If you're using this function, you should be building pages in private
1042  * memory and writing them directly to smgr. If you're using buffers, call
1043  * log_newpage_buffer instead.
1044  *
1045  * If the page follows the standard page layout, with a PageHeader and unused
1046  * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1047  * the unused space to be left out from the WAL record, making it smaller.
1048  */
1049 XLogRecPtr
1051  Page page, bool page_std)
1052 {
1053  int flags;
1054  XLogRecPtr recptr;
1055 
1056  flags = REGBUF_FORCE_IMAGE;
1057  if (page_std)
1058  flags |= REGBUF_STANDARD;
1059 
1060  XLogBeginInsert();
1061  XLogRegisterBlock(0, rnode, forkNum, blkno, page, flags);
1062  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
1063 
1064  /*
1065  * The page may be uninitialized. If so, we can't set the LSN because that
1066  * would corrupt the page.
1067  */
1068  if (!PageIsNew(page))
1069  {
1070  PageSetLSN(page, recptr);
1071  }
1072 
1073  return recptr;
1074 }
1075 
1076 /*
1077  * Like log_newpage(), but allows logging multiple pages in one operation.
1078  * It is more efficient than calling log_newpage() for each page separately,
1079  * because we can write multiple pages in a single WAL record.
1080  */
1081 void
1082 log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
1083  BlockNumber *blknos, Page *pages, bool page_std)
1084 {
1085  int flags;
1086  XLogRecPtr recptr;
1087  int i;
1088  int j;
1089 
1090  flags = REGBUF_FORCE_IMAGE;
1091  if (page_std)
1092  flags |= REGBUF_STANDARD;
1093 
1094  /*
1095  * Iterate over all the pages. They are collected into batches of
1096  * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1097  * batch.
1098  */
1100 
1101  i = 0;
1102  while (i < num_pages)
1103  {
1104  int batch_start = i;
1105  int nbatch;
1106 
1107  XLogBeginInsert();
1108 
1109  nbatch = 0;
1110  while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1111  {
1112  XLogRegisterBlock(nbatch, rnode, forkNum, blknos[i], pages[i], flags);
1113  i++;
1114  nbatch++;
1115  }
1116 
1117  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
1118 
1119  for (j = batch_start; j < i; j++)
1120  {
1121  /*
1122  * The page may be uninitialized. If so, we can't set the LSN
1123  * because that would corrupt the page.
1124  */
1125  if (!PageIsNew(pages[j]))
1126  {
1127  PageSetLSN(pages[j], recptr);
1128  }
1129  }
1130  }
1131 }
1132 
1133 /*
1134  * Write a WAL record containing a full image of a page.
1135  *
1136  * Caller should initialize the buffer and mark it dirty before calling this
1137  * function. This function will set the page LSN.
1138  *
1139  * If the page follows the standard page layout, with a PageHeader and unused
1140  * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1141  * the unused space to be left out from the WAL record, making it smaller.
1142  */
1143 XLogRecPtr
1144 log_newpage_buffer(Buffer buffer, bool page_std)
1145 {
1146  Page page = BufferGetPage(buffer);
1147  RelFileNode rnode;
1148  ForkNumber forkNum;
1149  BlockNumber blkno;
1150 
1151  /* Shared buffers should be modified in a critical section. */
1152  Assert(CritSectionCount > 0);
1153 
1154  BufferGetTag(buffer, &rnode, &forkNum, &blkno);
1155 
1156  return log_newpage(&rnode, forkNum, blkno, page, page_std);
1157 }
1158 
1159 /*
1160  * WAL-log a range of blocks in a relation.
1161  *
1162  * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
1163  * written to the WAL. If the range is large, this is done in multiple WAL
1164  * records.
1165  *
1166  * If all page follows the standard page layout, with a PageHeader and unused
1167  * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1168  * the unused space to be left out from the WAL records, making them smaller.
1169  *
1170  * NOTE: This function acquires exclusive-locks on the pages. Typically, this
1171  * is used on a newly-built relation, and the caller is holding a
1172  * AccessExclusiveLock on it, so no other backend can be accessing it at the
1173  * same time. If that's not the case, you must ensure that this does not
1174  * cause a deadlock through some other means.
1175  */
1176 void
1178  BlockNumber startblk, BlockNumber endblk,
1179  bool page_std)
1180 {
1181  int flags;
1182  BlockNumber blkno;
1183 
1184  flags = REGBUF_FORCE_IMAGE;
1185  if (page_std)
1186  flags |= REGBUF_STANDARD;
1187 
1188  /*
1189  * Iterate over all the pages in the range. They are collected into
1190  * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1191  * for each batch.
1192  */
1194 
1195  blkno = startblk;
1196  while (blkno < endblk)
1197  {
1198  Buffer bufpack[XLR_MAX_BLOCK_ID];
1199  XLogRecPtr recptr;
1200  int nbufs;
1201  int i;
1202 
1204 
1205  /* Collect a batch of blocks. */
1206  nbufs = 0;
1207  while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1208  {
1209  Buffer buf = ReadBufferExtended(rel, forkNum, blkno,
1210  RBM_NORMAL, NULL);
1211 
1213 
1214  /*
1215  * Completely empty pages are not WAL-logged. Writing a WAL record
1216  * would change the LSN, and we don't want that. We want the page
1217  * to stay empty.
1218  */
1219  if (!PageIsNew(BufferGetPage(buf)))
1220  bufpack[nbufs++] = buf;
1221  else
1222  UnlockReleaseBuffer(buf);
1223  blkno++;
1224  }
1225 
1226  /* Write WAL record for this batch. */
1227  XLogBeginInsert();
1228 
1230  for (i = 0; i < nbufs; i++)
1231  {
1232  XLogRegisterBuffer(i, bufpack[i], flags);
1233  MarkBufferDirty(bufpack[i]);
1234  }
1235 
1236  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
1237 
1238  for (i = 0; i < nbufs; i++)
1239  {
1240  PageSetLSN(BufferGetPage(bufpack[i]), recptr);
1241  UnlockReleaseBuffer(bufpack[i]);
1242  }
1243  END_CRIT_SECTION();
1244  }
1245 }
1246 
1247 /*
1248  * Allocate working buffers needed for WAL record construction.
1249  */
1250 void
1252 {
1253  /* Initialize the working areas */
1254  if (xloginsert_cxt == NULL)
1255  {
1256  xloginsert_cxt = AllocSetContextCreate(TopMemoryContext,
1257  "WAL record construction",
1259  }
1260 
1261  if (registered_buffers == NULL)
1262  {
1263  registered_buffers = (registered_buffer *)
1264  MemoryContextAllocZero(xloginsert_cxt,
1265  sizeof(registered_buffer) * (XLR_NORMAL_MAX_BLOCK_ID + 1));
1267  }
1268  if (rdatas == NULL)
1269  {
1270  rdatas = MemoryContextAlloc(xloginsert_cxt,
1271  sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1273  }
1274 
1275  /*
1276  * Allocate a buffer to hold the header information for a WAL record.
1277  */
1278  if (hdr_scratch == NULL)
1279  hdr_scratch = MemoryContextAllocZero(xloginsert_cxt,
1281 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:378
static XLogRecData hdr_rdt
Definition: xloginsert.c:100
#define INIT_CRC32C(crc)
Definition: pg_crc32c.h:41
void MarkSubTransactionAssigned(void)
Definition: xact.c:6150
XLogRecPtr xl_prev
Definition: xlogrecord.h:45
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define AllocSetContextCreate
Definition: memutils.h:173
#define BKPIMAGE_HAS_HOLE
Definition: xlogrecord.h:146
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1144
static uint32 mainrdata_len
Definition: xloginsert.c:87
#define XLR_SPECIAL_REL_UPDATE
Definition: xlogrecord.h:71
#define HEADER_SCRATCH_SIZE
Definition: xloginsert.c:106
uint32 TransactionId
Definition: c.h:587
Datum lower(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:46
#define XLR_BLOCK_ID_DATA_LONG
Definition: xlogrecord.h:229
#define COMPRESS_BUFSIZE
Definition: xloginsert.c:49
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1565
PGPROC * MyProc
Definition: proc.c:68
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:232
uint32 pg_crc32c
Definition: pg_crc32c.h:38
#define XLR_NORMAL_RDATAS
Definition: xloginsert.h:28
void InitXLogInsert(void)
Definition: xloginsert.c:1251
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:741
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:972
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
BlockNumber block
Definition: xloginsert.c:61
static bool begininsert_called
Definition: xloginsert.c:119
unsigned char uint8
Definition: c.h:439
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
#define MemSet(start, val, len)
Definition: c.h:1008
RmgrId xl_rmid
Definition: xlogrecord.h:47
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:213
uint32 BlockNumber
Definition: block.h:31
Datum upper(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:77
XLogRecData * rdata_head
Definition: xloginsert.c:64
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define SizeOfPageHeaderData
Definition: bufpage.h:216
#define PANIC
Definition: elog.h:50
#define XLR_CHECK_CONSISTENCY
Definition: xlogrecord.h:80
signed int int32
Definition: c.h:429
RelFileNode rnode
Definition: xloginsert.c:59
char data[BLCKSZ]
Definition: c.h:1141
void XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum, BlockNumber blknum, Page page, uint8 flags)
Definition: xloginsert.c:285
unsigned short uint16
Definition: c.h:440
static bool doPageWrites
Definition: xlog.c:374
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3791
#define ERROR
Definition: elog.h:46
#define XLR_BLOCK_ID_TOPLEVEL_XID
Definition: xlogrecord.h:231
bool delayChkpt
Definition: proc.h:187
static int max_registered_buffers
Definition: xloginsert.c:77
void XLogResetInsertion(void)
Definition: xloginsert.c:208
uint32 xl_tot_len
Definition: xlogrecord.h:43
static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi)
Definition: xloginsert.c:503
#define SizeOfXLogRecordBlockImageHeader
Definition: xlogrecord.h:142
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:195
static XLogRecPtr RedoRecPtr
Definition: xlog.c:367
#define BKPIMAGE_APPLY
Definition: xlogrecord.h:147
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3008
struct RelFileNode RelFileNode
static char * buf
Definition: pg_test_fsync.c:68
static XLogRecData * mainrdata_head
Definition: xloginsert.c:85
#define REGBUF_STANDARD
Definition: xloginsert.h:35
TransactionId GetCurrentTransactionIdIfAny(void)
Definition: xact.c:456
unsigned int uint32
Definition: c.h:441
TransactionId GetTopTransactionIdIfAny(void)
Definition: xact.c:426
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:416
volatile uint32 CritSectionCount
Definition: globals.c:41
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:186
static int max_rdatas
Definition: xloginsert.c:117
XLogRecData * rdata_tail
Definition: xloginsert.c:66
bool XLogInsertAllowed(void)
Definition: xlog.c:8454
MemoryContext TopMemoryContext
Definition: mcxt.c:48
ForkNumber
Definition: relpath.h:40
#define XLR_MAX_BLOCK_ID
Definition: xlogrecord.h:226
static registered_buffer * registered_buffers
Definition: xloginsert.c:76
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
#define SizeOfXLogRecord
Definition: xlogrecord.h:55
#define REGBUF_FORCE_IMAGE
Definition: xloginsert.h:31
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:434
bool XLogCheckBufferNeedsBackup(Buffer buffer)
Definition: xloginsert.c:934
int wal_compression
Definition: xlog.c:102
#define BKPBLOCK_SAME_REL
Definition: xlogrecord.h:187
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:38
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4007
#define BKPBLOCK_HAS_IMAGE
Definition: xlogrecord.h:184
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:906
bool IsSubTransactionAssignmentPending(void)
Definition: xact.c:6122
#define XLOG_INCLUDE_XID
Definition: xlog.h:215
uint8 RmgrId
Definition: rmgr.h:11
WalCompression
Definition: xlog.h:129
static XLogRecData * mainrdata_last
Definition: xloginsert.c:86
#define XLR_NORMAL_MAX_BLOCK_ID
Definition: xloginsert.h:27
PageHeaderData * PageHeader
Definition: bufpage.h:166
static int num_rdatas
Definition: xloginsert.c:116
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
RepOriginId replorigin_session_origin
Definition: origin.c:154
bool * wal_consistency_checking
Definition: xlog.c:104
#define XLOG_FPI_FOR_HINT
Definition: pg_control.h:77
static rewind_source * source
Definition: pg_rewind.c:79
uint8 xl_info
Definition: xlogrecord.h:46
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:32
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition: xloginsert.c:161
#define XLR_BLOCK_ID_ORIGIN
Definition: xlogrecord.h:230
ForkNumber forkno
Definition: xloginsert.c:60
char compressed_page[COMPRESS_BUFSIZE]
Definition: xloginsert.c:73
XLogRecPtr GetRedoRecPtr(void)
Definition: xlog.c:8638
pg_crc32c xl_crc
Definition: xlogrecord.h:49
struct XLogRecData * next
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1182
#define InvalidRepOriginId
Definition: origin.h:33
#define PageGetLSN(page)
Definition: bufpage.h:366
#define XLR_BLOCK_ID_DATA_SHORT
Definition: xlogrecord.h:228
static char * hdr_scratch
Definition: xloginsert.c:101
void log_newpage_range(Relation rel, ForkNumber forkNum, BlockNumber startblk, BlockNumber endblk, bool page_std)
Definition: xloginsert.c:1177
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:406
void log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
Definition: xloginsert.c:1082
TransactionId xl_xid
Definition: xlogrecord.h:44
#define BKPIMAGE_COMPRESS_LZ4
Definition: xlogrecord.h:151
static int max_registered_block_id
Definition: xloginsert.c:78
#define XLOG_FPI
Definition: pg_control.h:78
#define PageIsNew(page)
Definition: bufpage.h:229
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition: xlog.c:8667
#define BKPIMAGE_COMPRESS_PGLZ
Definition: xlogrecord.h:150
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:1050
#define elog(elevel,...)
Definition: elog.h:232
int i
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi)
Definition: xlog.c:1014
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:120
const PGLZ_Strategy *const PGLZ_strategy_default
static bool XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length, char *dest, uint16 *dlen)
Definition: xloginsert.c:862
static uint8 curinsert_flags
Definition: xloginsert.c:90
XLogRecData bkp_rdatas[2]
Definition: xloginsert.c:69
void BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: bufmgr.c:2769
#define COMP_CRC32C(crc, data, len)
Definition: pg_crc32c.h:89
void XLogBeginInsert(void)
Definition: xloginsert.c:135
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define BufferGetBlock(buffer)
Definition: bufmgr.h:136
#define SizeOfXLogRecordBlockCompressHeader
Definition: xlogrecord.h:164
static MemoryContext xloginsert_cxt
Definition: xloginsert.c:122
Pointer Page
Definition: bufpage.h:78
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static XLogRecData * rdatas
Definition: xloginsert.c:115
#define BKPBLOCK_HAS_DATA
Definition: xlogrecord.h:185
#define SizeOfXLogLongPHD
Definition: xlog_internal.h:69
#define XLR_RMGR_INFO_MASK
Definition: xlogrecord.h:63
#define SizeOfXLogRecordBlockHeader
Definition: xlogrecord.h:104