PostgreSQL Source Code  git master
xloginsert.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * xloginsert.c
4  * Functions for constructing WAL records
5  *
6  * Constructing a WAL record begins with a call to XLogBeginInsert,
7  * followed by a number of XLogRegister* calls. The registered data is
8  * collected in private working memory, and finally assembled into a chain
9  * of XLogRecData structs by a call to XLogRecordAssemble(). See
10  * access/transam/README for details.
11  *
12  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
13  * Portions Copyright (c) 1994, Regents of the University of California
14  *
15  * src/backend/access/transam/xloginsert.c
16  *
17  *-------------------------------------------------------------------------
18  */
19 
20 #include "postgres.h"
21 
22 #include "access/xact.h"
23 #include "access/xlog.h"
24 #include "access/xlog_internal.h"
25 #include "access/xloginsert.h"
26 #include "catalog/pg_control.h"
27 #include "common/pg_lzcompress.h"
28 #include "executor/instrument.h"
29 #include "miscadmin.h"
30 #include "pg_trace.h"
31 #include "replication/origin.h"
32 #include "storage/bufmgr.h"
33 #include "storage/proc.h"
34 #include "utils/memutils.h"
35 
36 /* Buffer size required to store a compressed version of backup block image */
37 #define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
38 
39 /*
40  * For each block reference registered with XLogRegisterBuffer, we fill in
41  * a registered_buffer struct.
42  */
43 typedef struct
44 {
45  bool in_use; /* is this slot in use? */
46  uint8 flags; /* REGBUF_* flags */
47  RelFileNode rnode; /* identifies the relation and block */
50  Page page; /* page content */
51  uint32 rdata_len; /* total length of data in rdata chain */
52  XLogRecData *rdata_head; /* head of the chain of data registered with
53  * this block */
54  XLogRecData *rdata_tail; /* last entry in the chain, or &rdata_head if
55  * empty */
56 
57  XLogRecData bkp_rdatas[2]; /* temporary rdatas used to hold references to
58  * backup block data in XLogRecordAssemble() */
59 
60  /* buffer to store a compressed version of backup block image */
61  char compressed_page[PGLZ_MAX_BLCKSZ];
63 
65 static int max_registered_buffers; /* allocated size */
66 static int max_registered_block_id = 0; /* highest block_id + 1 currently
67  * registered */
68 
69 /*
70  * A chain of XLogRecDatas to hold the "main data" of a WAL record, registered
71  * with XLogRegisterData(...).
72  */
74 static XLogRecData *mainrdata_last = (XLogRecData *) &mainrdata_head;
75 static uint32 mainrdata_len; /* total # of bytes in chain */
76 
77 /* flags for the in-progress insertion */
79 
80 /*
81  * These are used to hold the record header while constructing a record.
82  * 'hdr_scratch' is not a plain variable, but is palloc'd at initialization,
83  * because we want it to be MAXALIGNed and padding bytes zeroed.
84  *
85  * For simplicity, it's allocated large enough to hold the headers for any
86  * WAL record.
87  */
89 static char *hdr_scratch = NULL;
90 
91 #define SizeOfXlogOrigin (sizeof(RepOriginId) + sizeof(char))
92 #define SizeOfXLogTransactionId (sizeof(TransactionId) + sizeof(char))
93 
94 #define HEADER_SCRATCH_SIZE \
95  (SizeOfXLogRecord + \
96  MaxSizeOfXLogRecordBlockHeader * (XLR_MAX_BLOCK_ID + 1) + \
97  SizeOfXLogRecordDataHeaderLong + SizeOfXlogOrigin + \
98  SizeOfXLogTransactionId)
99 
100 /*
101  * An array of XLogRecData structs, to hold registered data.
102  */
104 static int num_rdatas; /* entries currently used */
105 static int max_rdatas; /* allocated size */
106 
107 static bool begininsert_called = false;
108 
109 /* Memory context to hold the registered buffer and data references. */
111 
112 static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
114  XLogRecPtr *fpw_lsn, int *num_fpi);
115 static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
116  uint16 hole_length, char *dest, uint16 *dlen);
117 
118 /*
119  * Begin constructing a WAL record. This must be called before the
120  * XLogRegister* functions and XLogInsert().
121  */
122 void
124 {
126  Assert(mainrdata_last == (XLogRecData *) &mainrdata_head);
127  Assert(mainrdata_len == 0);
128 
129  /* cross-check on whether we should be here or not */
130  if (!XLogInsertAllowed())
131  elog(ERROR, "cannot make new WAL entries during recovery");
132 
133  if (begininsert_called)
134  elog(ERROR, "XLogBeginInsert was already called");
135 
136  begininsert_called = true;
137 }
138 
139 /*
140  * Ensure that there are enough buffer and data slots in the working area,
141  * for subsequent XLogRegisterBuffer, XLogRegisterData and XLogRegisterBufData
142  * calls.
143  *
144  * There is always space for a small number of buffers and data chunks, enough
145  * for most record types. This function is for the exceptional cases that need
146  * more.
147  */
148 void
149 XLogEnsureRecordSpace(int max_block_id, int ndatas)
150 {
151  int nbuffers;
152 
153  /*
154  * This must be called before entering a critical section, because
155  * allocating memory inside a critical section can fail. repalloc() will
156  * check the same, but better to check it here too so that we fail
157  * consistently even if the arrays happen to be large enough already.
158  */
159  Assert(CritSectionCount == 0);
160 
161  /* the minimum values can't be decreased */
162  if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
163  max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
164  if (ndatas < XLR_NORMAL_RDATAS)
165  ndatas = XLR_NORMAL_RDATAS;
166 
167  if (max_block_id > XLR_MAX_BLOCK_ID)
168  elog(ERROR, "maximum number of WAL record block references exceeded");
169  nbuffers = max_block_id + 1;
170 
171  if (nbuffers > max_registered_buffers)
172  {
173  registered_buffers = (registered_buffer *)
174  repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
175 
176  /*
177  * At least the padding bytes in the structs must be zeroed, because
178  * they are included in WAL data, but initialize it all for tidiness.
179  */
180  MemSet(&registered_buffers[max_registered_buffers], 0,
181  (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
182  max_registered_buffers = nbuffers;
183  }
184 
185  if (ndatas > max_rdatas)
186  {
187  rdatas = (XLogRecData *) repalloc(rdatas, sizeof(XLogRecData) * ndatas);
188  max_rdatas = ndatas;
189  }
190 }
191 
192 /*
193  * Reset WAL record construction buffers.
194  */
195 void
197 {
198  int i;
199 
200  /* reset the subxact assignment flag (if needed) */
203 
204  for (i = 0; i < max_registered_block_id; i++)
205  registered_buffers[i].in_use = false;
206 
207  num_rdatas = 0;
208  max_registered_block_id = 0;
209  mainrdata_len = 0;
210  mainrdata_last = (XLogRecData *) &mainrdata_head;
211  curinsert_flags = 0;
212  begininsert_called = false;
213 }
214 
215 /*
216  * Register a reference to a buffer with the WAL record being constructed.
217  * This must be called for every page that the WAL-logged operation modifies.
218  */
219 void
220 XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
221 {
222  registered_buffer *regbuf;
223 
224  /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
225  Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
227 
228  if (block_id >= max_registered_block_id)
229  {
230  if (block_id >= max_registered_buffers)
231  elog(ERROR, "too many registered buffers");
232  max_registered_block_id = block_id + 1;
233  }
234 
235  regbuf = &registered_buffers[block_id];
236 
237  BufferGetTag(buffer, &regbuf->rnode, &regbuf->forkno, &regbuf->block);
238  regbuf->page = BufferGetPage(buffer);
239  regbuf->flags = flags;
240  regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
241  regbuf->rdata_len = 0;
242 
243  /*
244  * Check that this page hasn't already been registered with some other
245  * block_id.
246  */
247 #ifdef USE_ASSERT_CHECKING
248  {
249  int i;
250 
251  for (i = 0; i < max_registered_block_id; i++)
252  {
253  registered_buffer *regbuf_old = &registered_buffers[i];
254 
255  if (i == block_id || !regbuf_old->in_use)
256  continue;
257 
258  Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
259  regbuf_old->forkno != regbuf->forkno ||
260  regbuf_old->block != regbuf->block);
261  }
262  }
263 #endif
264 
265  regbuf->in_use = true;
266 }
267 
268 /*
269  * Like XLogRegisterBuffer, but for registering a block that's not in the
270  * shared buffer pool (i.e. when you don't have a Buffer for it).
271  */
272 void
273 XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
274  BlockNumber blknum, Page page, uint8 flags)
275 {
276  registered_buffer *regbuf;
277 
278  /* This is currently only used to WAL-log a full-page image of a page */
279  Assert(flags & REGBUF_FORCE_IMAGE);
281 
282  if (block_id >= max_registered_block_id)
283  max_registered_block_id = block_id + 1;
284 
285  if (block_id >= max_registered_buffers)
286  elog(ERROR, "too many registered buffers");
287 
288  regbuf = &registered_buffers[block_id];
289 
290  regbuf->rnode = *rnode;
291  regbuf->forkno = forknum;
292  regbuf->block = blknum;
293  regbuf->page = page;
294  regbuf->flags = flags;
295  regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
296  regbuf->rdata_len = 0;
297 
298  /*
299  * Check that this page hasn't already been registered with some other
300  * block_id.
301  */
302 #ifdef USE_ASSERT_CHECKING
303  {
304  int i;
305 
306  for (i = 0; i < max_registered_block_id; i++)
307  {
308  registered_buffer *regbuf_old = &registered_buffers[i];
309 
310  if (i == block_id || !regbuf_old->in_use)
311  continue;
312 
313  Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
314  regbuf_old->forkno != regbuf->forkno ||
315  regbuf_old->block != regbuf->block);
316  }
317  }
318 #endif
319 
320  regbuf->in_use = true;
321 }
322 
323 /*
324  * Add data to the WAL record that's being constructed.
325  *
326  * The data is appended to the "main chunk", available at replay with
327  * XLogRecGetData().
328  */
329 void
330 XLogRegisterData(char *data, int len)
331 {
332  XLogRecData *rdata;
333 
335 
336  if (num_rdatas >= max_rdatas)
337  elog(ERROR, "too much WAL data");
338  rdata = &rdatas[num_rdatas++];
339 
340  rdata->data = data;
341  rdata->len = len;
342 
343  /*
344  * we use the mainrdata_last pointer to track the end of the chain, so no
345  * need to clear 'next' here.
346  */
347 
348  mainrdata_last->next = rdata;
349  mainrdata_last = rdata;
350 
351  mainrdata_len += len;
352 }
353 
354 /*
355  * Add buffer-specific data to the WAL record that's being constructed.
356  *
357  * Block_id must reference a block previously registered with
358  * XLogRegisterBuffer(). If this is called more than once for the same
359  * block_id, the data is appended.
360  *
361  * The maximum amount of data that can be registered per block is 65535
362  * bytes. That should be plenty; if you need more than BLCKSZ bytes to
363  * reconstruct the changes to the page, you might as well just log a full
364  * copy of it. (the "main data" that's not associated with a block is not
365  * limited)
366  */
367 void
368 XLogRegisterBufData(uint8 block_id, char *data, int len)
369 {
370  registered_buffer *regbuf;
371  XLogRecData *rdata;
372 
374 
375  /* find the registered buffer struct */
376  regbuf = &registered_buffers[block_id];
377  if (!regbuf->in_use)
378  elog(ERROR, "no block with id %d registered with WAL insertion",
379  block_id);
380 
381  if (num_rdatas >= max_rdatas)
382  elog(ERROR, "too much WAL data");
383  rdata = &rdatas[num_rdatas++];
384 
385  rdata->data = data;
386  rdata->len = len;
387 
388  regbuf->rdata_tail->next = rdata;
389  regbuf->rdata_tail = rdata;
390  regbuf->rdata_len += len;
391 }
392 
393 /*
394  * Set insert status flags for the upcoming WAL record.
395  *
396  * The flags that can be used here are:
397  * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
398  * included in the record.
399  * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
400  * durability, which allows to avoid triggering WAL archiving and other
401  * background activity.
402  */
403 void
405 {
407  curinsert_flags |= flags;
408 }
409 
410 /*
411  * Insert an XLOG record having the specified RMID and info bytes, with the
412  * body of the record being the data and buffer references registered earlier
413  * with XLogRegister* calls.
414  *
415  * Returns XLOG pointer to end of record (beginning of next record).
416  * This can be used as LSN for data pages affected by the logged action.
417  * (LSN is the XLOG point up to which the XLOG must be flushed to disk
418  * before the data page can be written out. This implements the basic
419  * WAL rule "write the log before the data".)
420  */
423 {
424  XLogRecPtr EndPos;
425 
426  /* XLogBeginInsert() must have been called. */
427  if (!begininsert_called)
428  elog(ERROR, "XLogBeginInsert was not called");
429 
430  /*
431  * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
432  * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
433  */
434  if ((info & ~(XLR_RMGR_INFO_MASK |
436  XLR_CHECK_CONSISTENCY)) != 0)
437  elog(PANIC, "invalid xlog info mask %02X", info);
438 
439  TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
440 
441  /*
442  * In bootstrap mode, we don't actually log anything but XLOG resources;
443  * return a phony record pointer.
444  */
445  if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
446  {
448  EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
449  return EndPos;
450  }
451 
452  do
453  {
455  bool doPageWrites;
456  XLogRecPtr fpw_lsn;
457  XLogRecData *rdt;
458  int num_fpi = 0;
459 
460  /*
461  * Get values needed to decide whether to do full-page writes. Since
462  * we don't yet have an insertion lock, these could change under us,
463  * but XLogInsertRecord will recheck them once it has a lock.
464  */
465  GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites);
466 
467  rdt = XLogRecordAssemble(rmid, info, RedoRecPtr, doPageWrites,
468  &fpw_lsn, &num_fpi);
469 
470  EndPos = XLogInsertRecord(rdt, fpw_lsn, curinsert_flags, num_fpi);
471  } while (EndPos == InvalidXLogRecPtr);
472 
474 
475  return EndPos;
476 }
477 
478 /*
479  * Assemble a WAL record from the registered data and buffers into an
480  * XLogRecData chain, ready for insertion with XLogInsertRecord().
481  *
482  * The record header fields are filled in, except for the xl_prev field. The
483  * calculated CRC does not include the record header yet.
484  *
485  * If there are any registered buffers, and a full-page image was not taken
486  * of all of them, *fpw_lsn is set to the lowest LSN among such pages. This
487  * signals that the assembled record is only good for insertion on the
488  * assumption that the RedoRecPtr and doPageWrites values were up-to-date.
489  */
490 static XLogRecData *
493  XLogRecPtr *fpw_lsn, int *num_fpi)
494 {
495  XLogRecData *rdt;
496  uint32 total_len = 0;
497  int block_id;
498  pg_crc32c rdata_crc;
499  registered_buffer *prev_regbuf = NULL;
500  XLogRecData *rdt_datas_last;
501  XLogRecord *rechdr;
502  char *scratch = hdr_scratch;
503 
504  /*
505  * Note: this function can be called multiple times for the same record.
506  * All the modifications we do to the rdata chains below must handle that.
507  */
508 
509  /* The record begins with the fixed-size header */
510  rechdr = (XLogRecord *) scratch;
511  scratch += SizeOfXLogRecord;
512 
513  hdr_rdt.next = NULL;
514  rdt_datas_last = &hdr_rdt;
515  hdr_rdt.data = hdr_scratch;
516 
517  /*
518  * Enforce consistency checks for this record if user is looking for it.
519  * Do this before at the beginning of this routine to give the possibility
520  * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
521  * a record.
522  */
523  if (wal_consistency_checking[rmid])
524  info |= XLR_CHECK_CONSISTENCY;
525 
526  /*
527  * Make an rdata chain containing all the data portions of all block
528  * references. This includes the data for full-page images. Also append
529  * the headers for the block references in the scratch buffer.
530  */
531  *fpw_lsn = InvalidXLogRecPtr;
532  for (block_id = 0; block_id < max_registered_block_id; block_id++)
533  {
534  registered_buffer *regbuf = &registered_buffers[block_id];
535  bool needs_backup;
536  bool needs_data;
539  XLogRecordBlockCompressHeader cbimg = {0};
540  bool samerel;
541  bool is_compressed = false;
542  bool include_image;
543 
544  if (!regbuf->in_use)
545  continue;
546 
547  /* Determine if this block needs to be backed up */
548  if (regbuf->flags & REGBUF_FORCE_IMAGE)
549  needs_backup = true;
550  else if (regbuf->flags & REGBUF_NO_IMAGE)
551  needs_backup = false;
552  else if (!doPageWrites)
553  needs_backup = false;
554  else
555  {
556  /*
557  * We assume page LSN is first data on *every* page that can be
558  * passed to XLogInsert, whether it has the standard page layout
559  * or not.
560  */
561  XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
562 
563  needs_backup = (page_lsn <= RedoRecPtr);
564  if (!needs_backup)
565  {
566  if (*fpw_lsn == InvalidXLogRecPtr || page_lsn < *fpw_lsn)
567  *fpw_lsn = page_lsn;
568  }
569  }
570 
571  /* Determine if the buffer data needs to included */
572  if (regbuf->rdata_len == 0)
573  needs_data = false;
574  else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
575  needs_data = true;
576  else
577  needs_data = !needs_backup;
578 
579  bkpb.id = block_id;
580  bkpb.fork_flags = regbuf->forkno;
581  bkpb.data_length = 0;
582 
583  if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
585 
586  /*
587  * If needs_backup is true or WAL checking is enabled for current
588  * resource manager, log a full-page write for the current block.
589  */
590  include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0;
591 
592  if (include_image)
593  {
594  Page page = regbuf->page;
595  uint16 compressed_len = 0;
596 
597  /*
598  * The page needs to be backed up, so calculate its hole length
599  * and offset.
600  */
601  if (regbuf->flags & REGBUF_STANDARD)
602  {
603  /* Assume we can omit data between pd_lower and pd_upper */
604  uint16 lower = ((PageHeader) page)->pd_lower;
605  uint16 upper = ((PageHeader) page)->pd_upper;
606 
607  if (lower >= SizeOfPageHeaderData &&
608  upper > lower &&
609  upper <= BLCKSZ)
610  {
611  bimg.hole_offset = lower;
612  cbimg.hole_length = upper - lower;
613  }
614  else
615  {
616  /* No "hole" to remove */
617  bimg.hole_offset = 0;
618  cbimg.hole_length = 0;
619  }
620  }
621  else
622  {
623  /* Not a standard page header, don't try to eliminate "hole" */
624  bimg.hole_offset = 0;
625  cbimg.hole_length = 0;
626  }
627 
628  /*
629  * Try to compress a block image if wal_compression is enabled
630  */
631  if (wal_compression)
632  {
633  is_compressed =
635  cbimg.hole_length,
636  regbuf->compressed_page,
637  &compressed_len);
638  }
639 
640  /*
641  * Fill in the remaining fields in the XLogRecordBlockHeader
642  * struct
643  */
645 
646  /* Report a full page image constructed for the WAL record */
647  *num_fpi += 1;
648 
649  /*
650  * Construct XLogRecData entries for the page content.
651  */
652  rdt_datas_last->next = &regbuf->bkp_rdatas[0];
653  rdt_datas_last = rdt_datas_last->next;
654 
655  bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
656 
657  /*
658  * If WAL consistency checking is enabled for the resource manager
659  * of this WAL record, a full-page image is included in the record
660  * for the block modified. During redo, the full-page is replayed
661  * only if BKPIMAGE_APPLY is set.
662  */
663  if (needs_backup)
664  bimg.bimg_info |= BKPIMAGE_APPLY;
665 
666  if (is_compressed)
667  {
668  bimg.length = compressed_len;
670 
671  rdt_datas_last->data = regbuf->compressed_page;
672  rdt_datas_last->len = compressed_len;
673  }
674  else
675  {
676  bimg.length = BLCKSZ - cbimg.hole_length;
677 
678  if (cbimg.hole_length == 0)
679  {
680  rdt_datas_last->data = page;
681  rdt_datas_last->len = BLCKSZ;
682  }
683  else
684  {
685  /* must skip the hole */
686  rdt_datas_last->data = page;
687  rdt_datas_last->len = bimg.hole_offset;
688 
689  rdt_datas_last->next = &regbuf->bkp_rdatas[1];
690  rdt_datas_last = rdt_datas_last->next;
691 
692  rdt_datas_last->data =
693  page + (bimg.hole_offset + cbimg.hole_length);
694  rdt_datas_last->len =
695  BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
696  }
697  }
698 
699  total_len += bimg.length;
700  }
701 
702  if (needs_data)
703  {
704  /*
705  * Link the caller-supplied rdata chain for this buffer to the
706  * overall list.
707  */
709  bkpb.data_length = regbuf->rdata_len;
710  total_len += regbuf->rdata_len;
711 
712  rdt_datas_last->next = regbuf->rdata_head;
713  rdt_datas_last = regbuf->rdata_tail;
714  }
715 
716  if (prev_regbuf && RelFileNodeEquals(regbuf->rnode, prev_regbuf->rnode))
717  {
718  samerel = true;
720  }
721  else
722  samerel = false;
723  prev_regbuf = regbuf;
724 
725  /* Ok, copy the header to the scratch buffer */
726  memcpy(scratch, &bkpb, SizeOfXLogRecordBlockHeader);
727  scratch += SizeOfXLogRecordBlockHeader;
728  if (include_image)
729  {
730  memcpy(scratch, &bimg, SizeOfXLogRecordBlockImageHeader);
732  if (cbimg.hole_length != 0 && is_compressed)
733  {
734  memcpy(scratch, &cbimg,
737  }
738  }
739  if (!samerel)
740  {
741  memcpy(scratch, &regbuf->rnode, sizeof(RelFileNode));
742  scratch += sizeof(RelFileNode);
743  }
744  memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
745  scratch += sizeof(BlockNumber);
746  }
747 
748  /* followed by the record's origin, if any */
751  {
752  *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
753  memcpy(scratch, &replorigin_session_origin, sizeof(replorigin_session_origin));
754  scratch += sizeof(replorigin_session_origin);
755  }
756 
757  /* followed by toplevel XID, if not already included in previous record */
759  {
761 
762  /* update the flag (later used by XLogResetInsertion) */
764 
765  *(scratch++) = (char) XLR_BLOCK_ID_TOPLEVEL_XID;
766  memcpy(scratch, &xid, sizeof(TransactionId));
767  scratch += sizeof(TransactionId);
768  }
769 
770  /* followed by main data, if any */
771  if (mainrdata_len > 0)
772  {
773  if (mainrdata_len > 255)
774  {
775  *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
776  memcpy(scratch, &mainrdata_len, sizeof(uint32));
777  scratch += sizeof(uint32);
778  }
779  else
780  {
781  *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
782  *(scratch++) = (uint8) mainrdata_len;
783  }
784  rdt_datas_last->next = mainrdata_head;
785  rdt_datas_last = mainrdata_last;
786  total_len += mainrdata_len;
787  }
788  rdt_datas_last->next = NULL;
789 
790  hdr_rdt.len = (scratch - hdr_scratch);
791  total_len += hdr_rdt.len;
792 
793  /*
794  * Calculate CRC of the data
795  *
796  * Note that the record header isn't added into the CRC initially since we
797  * don't know the prev-link yet. Thus, the CRC will represent the CRC of
798  * the whole record in the order: rdata, then backup blocks, then record
799  * header.
800  */
801  INIT_CRC32C(rdata_crc);
803  for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
804  COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
805 
806  /*
807  * Fill in the fields in the record header. Prev-link is filled in later,
808  * once we know where in the WAL the record will be inserted. The CRC does
809  * not include the record header yet.
810  */
812  rechdr->xl_tot_len = total_len;
813  rechdr->xl_info = info;
814  rechdr->xl_rmid = rmid;
815  rechdr->xl_prev = InvalidXLogRecPtr;
816  rechdr->xl_crc = rdata_crc;
817 
818  return &hdr_rdt;
819 }
820 
821 /*
822  * Create a compressed version of a backup block image.
823  *
824  * Returns false if compression fails (i.e., compressed result is actually
825  * bigger than original). Otherwise, returns true and sets 'dlen' to
826  * the length of compressed block image.
827  */
828 static bool
829 XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
830  char *dest, uint16 *dlen)
831 {
832  int32 orig_len = BLCKSZ - hole_length;
833  int32 len;
834  int32 extra_bytes = 0;
835  char *source;
836  PGAlignedBlock tmp;
837 
838  if (hole_length != 0)
839  {
840  /* must skip the hole */
841  source = tmp.data;
842  memcpy(source, page, hole_offset);
843  memcpy(source + hole_offset,
844  page + (hole_offset + hole_length),
845  BLCKSZ - (hole_length + hole_offset));
846 
847  /*
848  * Extra data needs to be stored in WAL record for the compressed
849  * version of block image if the hole exists.
850  */
852  }
853  else
854  source = page;
855 
856  /*
857  * We recheck the actual size even if pglz_compress() reports success and
858  * see if the number of bytes saved by compression is larger than the
859  * length of extra data needed for the compressed version of block image.
860  */
861  len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
862  if (len >= 0 &&
863  len + extra_bytes < orig_len)
864  {
865  *dlen = (uint16) len; /* successful compression */
866  return true;
867  }
868  return false;
869 }
870 
871 /*
872  * Determine whether the buffer referenced has to be backed up.
873  *
874  * Since we don't yet have the insert lock, fullPageWrites and forcePageWrites
875  * could change later, so the result should be used for optimization purposes
876  * only.
877  */
878 bool
880 {
882  bool doPageWrites;
883  Page page;
884 
885  GetFullPageWriteInfo(&RedoRecPtr, &doPageWrites);
886 
887  page = BufferGetPage(buffer);
888 
889  if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
890  return true; /* buffer requires backup */
891 
892  return false; /* buffer does not need to be backed up */
893 }
894 
895 /*
896  * Write a backup block if needed when we are setting a hint. Note that
897  * this may be called for a variety of page types, not just heaps.
898  *
899  * Callable while holding just share lock on the buffer content.
900  *
901  * We can't use the plain backup block mechanism since that relies on the
902  * Buffer being exclusively locked. Since some modifications (setting LSN, hint
903  * bits) are allowed in a sharelocked buffer that can lead to wal checksum
904  * failures. So instead we copy the page and insert the copied data as normal
905  * record data.
906  *
907  * We only need to do something if page has not yet been full page written in
908  * this checkpoint round. The LSN of the inserted wal record is returned if we
909  * had to write, InvalidXLogRecPtr otherwise.
910  *
911  * It is possible that multiple concurrent backends could attempt to write WAL
912  * records. In that case, multiple copies of the same block would be recorded
913  * in separate WAL records by different backends, though that is still OK from
914  * a correctness perspective.
915  */
917 XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
918 {
919  XLogRecPtr recptr = InvalidXLogRecPtr;
920  XLogRecPtr lsn;
922 
923  /*
924  * Ensure no checkpoint can change our view of RedoRecPtr.
925  */
927 
928  /*
929  * Update RedoRecPtr so that we can make the right decision
930  */
931  RedoRecPtr = GetRedoRecPtr();
932 
933  /*
934  * We assume page LSN is first data on *every* page that can be passed to
935  * XLogInsert, whether it has the standard page layout or not. Since we're
936  * only holding a share-lock on the page, we must take the buffer header
937  * lock when we look at the LSN.
938  */
939  lsn = BufferGetLSNAtomic(buffer);
940 
941  if (lsn <= RedoRecPtr)
942  {
943  int flags;
944  PGAlignedBlock copied_buffer;
945  char *origdata = (char *) BufferGetBlock(buffer);
946  RelFileNode rnode;
947  ForkNumber forkno;
948  BlockNumber blkno;
949 
950  /*
951  * Copy buffer so we don't have to worry about concurrent hint bit or
952  * lsn updates. We assume pd_lower/upper cannot be changed without an
953  * exclusive lock, so the contents bkp are not racy.
954  */
955  if (buffer_std)
956  {
957  /* Assume we can omit data between pd_lower and pd_upper */
958  Page page = BufferGetPage(buffer);
959  uint16 lower = ((PageHeader) page)->pd_lower;
960  uint16 upper = ((PageHeader) page)->pd_upper;
961 
962  memcpy(copied_buffer.data, origdata, lower);
963  memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
964  }
965  else
966  memcpy(copied_buffer.data, origdata, BLCKSZ);
967 
968  XLogBeginInsert();
969 
970  flags = REGBUF_FORCE_IMAGE;
971  if (buffer_std)
972  flags |= REGBUF_STANDARD;
973 
974  BufferGetTag(buffer, &rnode, &forkno, &blkno);
975  XLogRegisterBlock(0, &rnode, forkno, blkno, copied_buffer.data, flags);
976 
977  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_FOR_HINT);
978  }
979 
980  return recptr;
981 }
982 
983 /*
984  * Write a WAL record containing a full image of a page. Caller is responsible
985  * for writing the page to disk after calling this routine.
986  *
987  * Note: If you're using this function, you should be building pages in private
988  * memory and writing them directly to smgr. If you're using buffers, call
989  * log_newpage_buffer instead.
990  *
991  * If the page follows the standard page layout, with a PageHeader and unused
992  * space between pd_lower and pd_upper, set 'page_std' to true. That allows
993  * the unused space to be left out from the WAL record, making it smaller.
994  */
997  Page page, bool page_std)
998 {
999  int flags;
1000  XLogRecPtr recptr;
1001 
1002  flags = REGBUF_FORCE_IMAGE;
1003  if (page_std)
1004  flags |= REGBUF_STANDARD;
1005 
1006  XLogBeginInsert();
1007  XLogRegisterBlock(0, rnode, forkNum, blkno, page, flags);
1008  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
1009 
1010  /*
1011  * The page may be uninitialized. If so, we can't set the LSN because that
1012  * would corrupt the page.
1013  */
1014  if (!PageIsNew(page))
1015  {
1016  PageSetLSN(page, recptr);
1017  }
1018 
1019  return recptr;
1020 }
1021 
1022 /*
1023  * Like log_newpage(), but allows logging multiple pages in one operation.
1024  * It is more efficient than calling log_newpage() for each page separately,
1025  * because we can write multiple pages in a single WAL record.
1026  */
1027 void
1028 log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
1029  BlockNumber *blknos, Page *pages, bool page_std)
1030 {
1031  int flags;
1032  XLogRecPtr recptr;
1033  int i;
1034  int j;
1035 
1036  flags = REGBUF_FORCE_IMAGE;
1037  if (page_std)
1038  flags |= REGBUF_STANDARD;
1039 
1040  /*
1041  * Iterate over all the pages. They are collected into batches of
1042  * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1043  * batch.
1044  */
1046 
1047  i = 0;
1048  while (i < num_pages)
1049  {
1050  int batch_start = i;
1051  int nbatch;
1052 
1053  XLogBeginInsert();
1054 
1055  nbatch = 0;
1056  while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1057  {
1058  XLogRegisterBlock(nbatch, rnode, forkNum, blknos[i], pages[i], flags);
1059  i++;
1060  nbatch++;
1061  }
1062 
1063  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
1064 
1065  for (j = batch_start; j < i; j++)
1066  {
1067  /*
1068  * The page may be uninitialized. If so, we can't set the LSN because that
1069  * would corrupt the page.
1070  */
1071  if (!PageIsNew(pages[j]))
1072  {
1073  PageSetLSN(pages[j], recptr);
1074  }
1075  }
1076  }
1077 }
1078 
1079 /*
1080  * Write a WAL record containing a full image of a page.
1081  *
1082  * Caller should initialize the buffer and mark it dirty before calling this
1083  * function. This function will set the page LSN.
1084  *
1085  * If the page follows the standard page layout, with a PageHeader and unused
1086  * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1087  * the unused space to be left out from the WAL record, making it smaller.
1088  */
1089 XLogRecPtr
1090 log_newpage_buffer(Buffer buffer, bool page_std)
1091 {
1092  Page page = BufferGetPage(buffer);
1093  RelFileNode rnode;
1094  ForkNumber forkNum;
1095  BlockNumber blkno;
1096 
1097  /* Shared buffers should be modified in a critical section. */
1098  Assert(CritSectionCount > 0);
1099 
1100  BufferGetTag(buffer, &rnode, &forkNum, &blkno);
1101 
1102  return log_newpage(&rnode, forkNum, blkno, page, page_std);
1103 }
1104 
1105 /*
1106  * WAL-log a range of blocks in a relation.
1107  *
1108  * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
1109  * written to the WAL. If the range is large, this is done in multiple WAL
1110  * records.
1111  *
1112  * If all page follows the standard page layout, with a PageHeader and unused
1113  * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1114  * the unused space to be left out from the WAL records, making them smaller.
1115  *
1116  * NOTE: This function acquires exclusive-locks on the pages. Typically, this
1117  * is used on a newly-built relation, and the caller is holding a
1118  * AccessExclusiveLock on it, so no other backend can be accessing it at the
1119  * same time. If that's not the case, you must ensure that this does not
1120  * cause a deadlock through some other means.
1121  */
1122 void
1124  BlockNumber startblk, BlockNumber endblk,
1125  bool page_std)
1126 {
1127  int flags;
1128  BlockNumber blkno;
1129 
1130  flags = REGBUF_FORCE_IMAGE;
1131  if (page_std)
1132  flags |= REGBUF_STANDARD;
1133 
1134  /*
1135  * Iterate over all the pages in the range. They are collected into
1136  * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1137  * for each batch.
1138  */
1140 
1141  blkno = startblk;
1142  while (blkno < endblk)
1143  {
1144  Buffer bufpack[XLR_MAX_BLOCK_ID];
1145  XLogRecPtr recptr;
1146  int nbufs;
1147  int i;
1148 
1150 
1151  /* Collect a batch of blocks. */
1152  nbufs = 0;
1153  while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1154  {
1155  Buffer buf = ReadBufferExtended(rel, forkNum, blkno,
1156  RBM_NORMAL, NULL);
1157 
1159 
1160  /*
1161  * Completely empty pages are not WAL-logged. Writing a WAL record
1162  * would change the LSN, and we don't want that. We want the page
1163  * to stay empty.
1164  */
1165  if (!PageIsNew(BufferGetPage(buf)))
1166  bufpack[nbufs++] = buf;
1167  else
1168  UnlockReleaseBuffer(buf);
1169  blkno++;
1170  }
1171 
1172  /* Write WAL record for this batch. */
1173  XLogBeginInsert();
1174 
1176  for (i = 0; i < nbufs; i++)
1177  {
1178  XLogRegisterBuffer(i, bufpack[i], flags);
1179  MarkBufferDirty(bufpack[i]);
1180  }
1181 
1182  recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
1183 
1184  for (i = 0; i < nbufs; i++)
1185  {
1186  PageSetLSN(BufferGetPage(bufpack[i]), recptr);
1187  UnlockReleaseBuffer(bufpack[i]);
1188  }
1189  END_CRIT_SECTION();
1190  }
1191 }
1192 
1193 /*
1194  * Allocate working buffers needed for WAL record construction.
1195  */
1196 void
1198 {
1199  /* Initialize the working areas */
1200  if (xloginsert_cxt == NULL)
1201  {
1202  xloginsert_cxt = AllocSetContextCreate(TopMemoryContext,
1203  "WAL record construction",
1205  }
1206 
1207  if (registered_buffers == NULL)
1208  {
1209  registered_buffers = (registered_buffer *)
1210  MemoryContextAllocZero(xloginsert_cxt,
1211  sizeof(registered_buffer) * (XLR_NORMAL_MAX_BLOCK_ID + 1));
1213  }
1214  if (rdatas == NULL)
1215  {
1216  rdatas = MemoryContextAlloc(xloginsert_cxt,
1217  sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1219  }
1220 
1221  /*
1222  * Allocate a buffer to hold the header information for a WAL record.
1223  */
1224  if (hdr_scratch == NULL)
1225  hdr_scratch = MemoryContextAllocZero(xloginsert_cxt,
1227 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:368
static XLogRecData hdr_rdt
Definition: xloginsert.c:88
#define INIT_CRC32C(crc)
Definition: pg_crc32c.h:41
void MarkSubTransactionAssigned(void)
Definition: xact.c:6107
XLogRecPtr xl_prev
Definition: xlogrecord.h:45
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define AllocSetContextCreate
Definition: memutils.h:170
#define BKPIMAGE_HAS_HOLE
Definition: xlogrecord.h:146
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1090
static uint32 mainrdata_len
Definition: xloginsert.c:75
#define XLR_SPECIAL_REL_UPDATE
Definition: xlogrecord.h:71
#define HEADER_SCRATCH_SIZE
Definition: xloginsert.c:94
uint32 TransactionId
Definition: c.h:520
Datum lower(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:44
#define XLR_BLOCK_ID_DATA_LONG
Definition: xlogrecord.h:224
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1469
PGPROC * MyProc
Definition: proc.c:67
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
uint32 pg_crc32c
Definition: pg_crc32c.h:38
#define XLR_NORMAL_RDATAS
Definition: xloginsert.h:28
void InitXLogInsert(void)
Definition: xloginsert.c:1197
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:653
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:917
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
BlockNumber block
Definition: xloginsert.c:49
static bool begininsert_called
Definition: xloginsert.c:107
unsigned char uint8
Definition: c.h:372
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define MemSet(start, val, len)
Definition: c.h:949
RmgrId xl_rmid
Definition: xlogrecord.h:47
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:238
uint32 BlockNumber
Definition: block.h:31
Datum upper(PG_FUNCTION_ARGS)
Definition: oracle_compat.c:75
XLogRecData * rdata_head
Definition: xloginsert.c:52
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define SizeOfPageHeaderData
Definition: bufpage.h:216
#define PANIC
Definition: elog.h:53
#define XLR_CHECK_CONSISTENCY
Definition: xlogrecord.h:80
signed int int32
Definition: c.h:362
RelFileNode rnode
Definition: xloginsert.c:47
char data[BLCKSZ]
Definition: c.h:1082
void XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum, BlockNumber blknum, Page page, uint8 flags)
Definition: xloginsert.c:273
unsigned short uint16
Definition: c.h:373
static bool doPageWrites
Definition: xlog.c:381
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3534
#define ERROR
Definition: elog.h:43
#define XLR_BLOCK_ID_TOPLEVEL_XID
Definition: xlogrecord.h:226
bool delayChkpt
Definition: proc.h:176
static int max_registered_buffers
Definition: xloginsert.c:65
void XLogResetInsertion(void)
Definition: xloginsert.c:196
uint32 xl_tot_len
Definition: xlogrecord.h:43
static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi)
Definition: xloginsert.c:491
#define SizeOfXLogRecordBlockImageHeader
Definition: xlogrecord.h:142
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
static XLogRecPtr RedoRecPtr
Definition: xlog.c:374
#define BKPIMAGE_APPLY
Definition: xlogrecord.h:148
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:2924
struct RelFileNode RelFileNode
static char * buf
Definition: pg_test_fsync.c:68
static XLogRecData * mainrdata_head
Definition: xloginsert.c:73
#define REGBUF_STANDARD
Definition: xloginsert.h:35
TransactionId GetCurrentTransactionIdIfAny(void)
Definition: xact.c:455
unsigned int uint32
Definition: c.h:374
TransactionId GetTopTransactionIdIfAny(void)
Definition: xact.c:425
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:404
volatile uint32 CritSectionCount
Definition: globals.c:38
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:182
static int max_rdatas
Definition: xloginsert.c:105
XLogRecData * rdata_tail
Definition: xloginsert.c:54
bool XLogInsertAllowed(void)
Definition: xlog.c:8187
MemoryContext TopMemoryContext
Definition: mcxt.c:44
ForkNumber
Definition: relpath.h:40
#define XLR_MAX_BLOCK_ID
Definition: xlogrecord.h:221
static registered_buffer * registered_buffers
Definition: xloginsert.c:64
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
#define SizeOfXLogRecord
Definition: xlogrecord.h:55
#define REGBUF_FORCE_IMAGE
Definition: xloginsert.h:31
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
bool XLogCheckBufferNeedsBackup(Buffer buffer)
Definition: xloginsert.c:879
#define BKPBLOCK_SAME_REL
Definition: xlogrecord.h:183
#define BKPIMAGE_IS_COMPRESSED
Definition: xlogrecord.h:147
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:38
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3750
#define BKPBLOCK_HAS_IMAGE
Definition: xlogrecord.h:180
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:840
bool IsSubTransactionAssignmentPending(void)
Definition: xact.c:6079
#define XLOG_INCLUDE_XID
Definition: xlog.h:240
uint8 RmgrId
Definition: rmgr.h:11
static XLogRecData * mainrdata_last
Definition: xloginsert.c:74
#define XLR_NORMAL_MAX_BLOCK_ID
Definition: xloginsert.h:27
#define PGLZ_MAX_BLCKSZ
Definition: xloginsert.c:37
PageHeaderData * PageHeader
Definition: bufpage.h:166
static int num_rdatas
Definition: xloginsert.c:104
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:745
RepOriginId replorigin_session_origin
Definition: origin.c:154
bool * wal_consistency_checking
Definition: xlog.c:102
#define XLOG_FPI_FOR_HINT
Definition: pg_control.h:77
uint8 xl_info
Definition: xlogrecord.h:46
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:32
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition: xloginsert.c:149
#define XLR_BLOCK_ID_ORIGIN
Definition: xlogrecord.h:225
ForkNumber forkno
Definition: xloginsert.c:48
XLogRecPtr GetRedoRecPtr(void)
Definition: xlog.c:8366
pg_crc32c xl_crc
Definition: xlogrecord.h:49
struct XLogRecData * next
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1070
#define InvalidRepOriginId
Definition: origin.h:33
#define PageGetLSN(page)
Definition: bufpage.h:366
#define XLR_BLOCK_ID_DATA_SHORT
Definition: xlogrecord.h:223
static char * hdr_scratch
Definition: xloginsert.c:89
void log_newpage_range(Relation rel, ForkNumber forkNum, BlockNumber startblk, BlockNumber endblk, bool page_std)
Definition: xloginsert.c:1123
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:393
void log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
Definition: xloginsert.c:1028
TransactionId xl_xid
Definition: xlogrecord.h:44
static int max_registered_block_id
Definition: xloginsert.c:66
#define XLOG_FPI
Definition: pg_control.h:78
#define PageIsNew(page)
Definition: bufpage.h:229
char compressed_page[PGLZ_MAX_BLCKSZ]
Definition: xloginsert.c:61
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:797
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition: xlog.c:8395
XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, Page page, bool page_std)
Definition: xloginsert.c:996
#define elog(elevel,...)
Definition: elog.h:214
int i
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi)
Definition: xlog.c:999
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
bool wal_compression
Definition: xlog.c:100
const PGLZ_Strategy *const PGLZ_strategy_default
static bool XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length, char *dest, uint16 *dlen)
Definition: xloginsert.c:829
static uint8 curinsert_flags
Definition: xloginsert.c:78
XLogRecData bkp_rdatas[2]
Definition: xloginsert.c:57
void BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: bufmgr.c:2682
#define COMP_CRC32C(crc, data, len)
Definition: pg_crc32c.h:89
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define BufferGetBlock(buffer)
Definition: bufmgr.h:136
#define SizeOfXLogRecordBlockCompressHeader
Definition: xlogrecord.h:160
static MemoryContext xloginsert_cxt
Definition: xloginsert.c:110
Pointer Page
Definition: bufpage.h:78
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static XLogRecData * rdatas
Definition: xloginsert.c:103
#define BKPBLOCK_HAS_DATA
Definition: xlogrecord.h:181
#define SizeOfXLogLongPHD
Definition: xlog_internal.h:69
#define XLR_RMGR_INFO_MASK
Definition: xlogrecord.h:63
#define SizeOfXLogRecordBlockHeader
Definition: xlogrecord.h:104