PostgreSQL Source Code git master
Loading...
Searching...
No Matches
xloginsert.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * xloginsert.c
4 * Functions for constructing WAL records
5 *
6 * Constructing a WAL record begins with a call to XLogBeginInsert,
7 * followed by a number of XLogRegister* calls. The registered data is
8 * collected in private working memory, and finally assembled into a chain
9 * of XLogRecData structs by a call to XLogRecordAssemble(). See
10 * access/transam/README for details.
11 *
12 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
13 * Portions Copyright (c) 1994, Regents of the University of California
14 *
15 * src/backend/access/transam/xloginsert.c
16 *
17 *-------------------------------------------------------------------------
18 */
19
20#include "postgres.h"
21
22#ifdef USE_LZ4
23#include <lz4.h>
24#endif
25
26#ifdef USE_ZSTD
27#include <zstd.h>
28#endif
29
30#include "access/xact.h"
31#include "access/xlog.h"
33#include "access/xloginsert.h"
34#include "catalog/pg_control.h"
36#include "executor/instrument.h"
37#include "miscadmin.h"
38#include "pg_trace.h"
39#include "replication/origin.h"
40#include "storage/bufmgr.h"
41#include "storage/proc.h"
42#include "utils/memutils.h"
44#include "utils/rel.h"
45
46/*
47 * Guess the maximum buffer size required to store a compressed version of
48 * backup block image.
49 */
50#ifdef USE_LZ4
51#define LZ4_MAX_BLCKSZ LZ4_COMPRESSBOUND(BLCKSZ)
52#else
53#define LZ4_MAX_BLCKSZ 0
54#endif
55
56#ifdef USE_ZSTD
57#define ZSTD_MAX_BLCKSZ ZSTD_COMPRESSBOUND(BLCKSZ)
58#else
59#define ZSTD_MAX_BLCKSZ 0
60#endif
61
62#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
63
64/* Buffer size required to store a compressed version of backup block image */
65#define COMPRESS_BUFSIZE Max(Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ), ZSTD_MAX_BLCKSZ)
66
67/*
68 * For each block reference registered with XLogRegisterBuffer, we fill in
69 * a registered_buffer struct.
70 */
71typedef struct
72{
73 bool in_use; /* is this slot in use? */
74 uint8 flags; /* REGBUF_* flags */
75 RelFileLocator rlocator; /* identifies the relation and block */
78 const PageData *page; /* page content */
79 uint32 rdata_len; /* total length of data in rdata chain */
80 XLogRecData *rdata_head; /* head of the chain of data registered with
81 * this block */
82 XLogRecData *rdata_tail; /* last entry in the chain, or &rdata_head if
83 * empty */
84
85 XLogRecData bkp_rdatas[2]; /* temporary rdatas used to hold references to
86 * backup block data in XLogRecordAssemble() */
87
88 /* buffer to store a compressed version of backup block image */
89 char compressed_page[COMPRESS_BUFSIZE];
91
93static int max_registered_buffers; /* allocated size */
94static int max_registered_block_id = 0; /* highest block_id + 1 currently
95 * registered */
96
97/*
98 * A chain of XLogRecDatas to hold the "main data" of a WAL record, registered
99 * with XLogRegisterData(...).
100 */
103static uint64 mainrdata_len; /* total # of bytes in chain */
104
105/* flags for the in-progress insertion */
107
108/*
109 * These are used to hold the record header while constructing a record.
110 * 'hdr_scratch' is not a plain variable, but is palloc'd at initialization,
111 * because we want it to be MAXALIGNed and padding bytes zeroed.
112 *
113 * For simplicity, it's allocated large enough to hold the headers for any
114 * WAL record.
115 */
117static char *hdr_scratch = NULL;
118
119#define SizeOfXlogOrigin (sizeof(ReplOriginId) + sizeof(char))
120#define SizeOfXLogTransactionId (sizeof(TransactionId) + sizeof(char))
121
122#define HEADER_SCRATCH_SIZE \
123 (SizeOfXLogRecord + \
124 MaxSizeOfXLogRecordBlockHeader * (XLR_MAX_BLOCK_ID + 1) + \
125 SizeOfXLogRecordDataHeaderLong + SizeOfXlogOrigin + \
126 SizeOfXLogTransactionId)
127
128/*
129 * An array of XLogRecData structs, to hold registered data.
130 */
132static int num_rdatas; /* entries currently used */
133static int max_rdatas; /* allocated size */
134
135static bool begininsert_called = false;
136
137/* Memory context to hold the registered buffer and data references. */
139
140static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
144 bool *topxid_included);
145static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset,
146 uint16 hole_length, void *dest, uint16 *dlen);
147
148/*
149 * Begin constructing a WAL record. This must be called before the
150 * XLogRegister* functions and XLogInsert().
151 */
152void
154{
157 Assert(mainrdata_len == 0);
158
159 /* cross-check on whether we should be here or not */
160 if (!XLogInsertAllowed())
161 elog(ERROR, "cannot make new WAL entries during recovery");
162
164 elog(ERROR, "XLogBeginInsert was already called");
165
166 begininsert_called = true;
167}
168
169/*
170 * Ensure that there are enough buffer and data slots in the working area,
171 * for subsequent XLogRegisterBuffer, XLogRegisterData and XLogRegisterBufData
172 * calls.
173 *
174 * There is always space for a small number of buffers and data chunks, enough
175 * for most record types. This function is for the exceptional cases that need
176 * more.
177 */
178void
179XLogEnsureRecordSpace(int max_block_id, int ndatas)
180{
181 int nbuffers;
182
183 /*
184 * This must be called before entering a critical section, because
185 * allocating memory inside a critical section can fail. repalloc() will
186 * check the same, but better to check it here too so that we fail
187 * consistently even if the arrays happen to be large enough already.
188 */
190
191 /* the minimum values can't be decreased */
192 if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
193 max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
196
197 if (max_block_id > XLR_MAX_BLOCK_ID)
198 elog(ERROR, "maximum number of WAL record block references exceeded");
199 nbuffers = max_block_id + 1;
200
201 if (nbuffers > max_registered_buffers)
202 {
204 repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
205
206 /*
207 * At least the padding bytes in the structs must be zeroed, because
208 * they are included in WAL data, but initialize it all for tidiness.
209 */
211 (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
212 max_registered_buffers = nbuffers;
213 }
214
215 if (ndatas > max_rdatas)
216 {
219 }
220}
221
222/*
223 * Reset WAL record construction buffers.
224 */
225void
227{
228 int i;
229
230 for (i = 0; i < max_registered_block_id; i++)
231 registered_buffers[i].in_use = false;
232
233 num_rdatas = 0;
235 mainrdata_len = 0;
237 curinsert_flags = 0;
238 begininsert_called = false;
239}
240
241/*
242 * Register a reference to a buffer with the WAL record being constructed.
243 * This must be called for every page that the WAL-logged operation modifies.
244 */
245void
247{
249
250 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
251 Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
253
254 /*
255 * Ordinarily, buffer should be exclusive-locked and marked dirty before
256 * we get here, otherwise we could end up violating one of the rules in
257 * access/transam/README.
258 *
259 * Some callers intentionally register a clean page and never update that
260 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
261 * bypass these checks.
262 */
263#ifdef USE_ASSERT_CHECKING
264 if (!(flags & REGBUF_NO_CHANGE))
266 BufferIsDirty(buffer));
267#endif
268
270 {
272 elog(ERROR, "too many registered buffers");
274 }
275
277
278 BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
279 regbuf->page = BufferGetPage(buffer);
280 regbuf->flags = flags;
281 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
282 regbuf->rdata_len = 0;
283
284 /*
285 * Check that this page hasn't already been registered with some other
286 * block_id.
287 */
288#ifdef USE_ASSERT_CHECKING
289 {
290 int i;
291
292 for (i = 0; i < max_registered_block_id; i++)
293 {
295
296 if (i == block_id || !regbuf_old->in_use)
297 continue;
298
299 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
300 regbuf_old->forkno != regbuf->forkno ||
301 regbuf_old->block != regbuf->block);
302 }
303 }
304#endif
305
306 regbuf->in_use = true;
307}
308
309/*
310 * Like XLogRegisterBuffer, but for registering a block that's not in the
311 * shared buffer pool (i.e. when you don't have a Buffer for it).
312 */
313void
315 BlockNumber blknum, const PageData *page, uint8 flags)
316{
318
320
323
325 elog(ERROR, "too many registered buffers");
326
328
329 regbuf->rlocator = *rlocator;
330 regbuf->forkno = forknum;
331 regbuf->block = blknum;
332 regbuf->page = page;
333 regbuf->flags = flags;
334 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
335 regbuf->rdata_len = 0;
336
337 /*
338 * Check that this page hasn't already been registered with some other
339 * block_id.
340 */
341#ifdef USE_ASSERT_CHECKING
342 {
343 int i;
344
345 for (i = 0; i < max_registered_block_id; i++)
346 {
348
349 if (i == block_id || !regbuf_old->in_use)
350 continue;
351
352 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
353 regbuf_old->forkno != regbuf->forkno ||
354 regbuf_old->block != regbuf->block);
355 }
356 }
357#endif
358
359 regbuf->in_use = true;
360}
361
362/*
363 * Add data to the WAL record that's being constructed.
364 *
365 * The data is appended to the "main chunk", available at replay with
366 * XLogRecGetData().
367 */
368void
370{
372
374
375 if (num_rdatas >= max_rdatas)
377 (errmsg_internal("too much WAL data"),
378 errdetail_internal("%d out of %d data segments are already in use.",
380 rdata = &rdatas[num_rdatas++];
381
382 rdata->data = data;
383 rdata->len = len;
384
385 /*
386 * we use the mainrdata_last pointer to track the end of the chain, so no
387 * need to clear 'next' here.
388 */
389
392
394}
395
396/*
397 * Add buffer-specific data to the WAL record that's being constructed.
398 *
399 * Block_id must reference a block previously registered with
400 * XLogRegisterBuffer(). If this is called more than once for the same
401 * block_id, the data is appended.
402 *
403 * The maximum amount of data that can be registered per block is 65535
404 * bytes. That should be plenty; if you need more than BLCKSZ bytes to
405 * reconstruct the changes to the page, you might as well just log a full
406 * copy of it. (the "main data" that's not associated with a block is not
407 * limited)
408 */
409void
411{
414
416
417 /* find the registered buffer struct */
419 if (!regbuf->in_use)
420 elog(ERROR, "no block with id %d registered with WAL insertion",
421 block_id);
422
423 /*
424 * Check against max_rdatas and ensure we do not register more data per
425 * buffer than can be handled by the physical data format; i.e. that
426 * regbuf->rdata_len does not grow beyond what
427 * XLogRecordBlockHeader->data_length can hold.
428 */
429 if (num_rdatas >= max_rdatas)
431 (errmsg_internal("too much WAL data"),
432 errdetail_internal("%d out of %d data segments are already in use.",
434 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)
436 (errmsg_internal("too much WAL data"),
437 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
438 UINT16_MAX, block_id, regbuf->rdata_len, len)));
439
440 rdata = &rdatas[num_rdatas++];
441
442 rdata->data = data;
443 rdata->len = len;
444
445 regbuf->rdata_tail->next = rdata;
446 regbuf->rdata_tail = rdata;
447 regbuf->rdata_len += len;
448}
449
450/*
451 * Set insert status flags for the upcoming WAL record.
452 *
453 * The flags that can be used here are:
454 * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
455 * included in the record.
456 * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
457 * durability, which allows to avoid triggering WAL archiving and other
458 * background activity.
459 */
460void
466
467/*
468 * Insert an XLOG record having the specified RMID and info bytes, with the
469 * body of the record being the data and buffer references registered earlier
470 * with XLogRegister* calls.
471 *
472 * Returns XLOG pointer to end of record (beginning of next record).
473 * This can be used as LSN for data pages affected by the logged action.
474 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
475 * before the data page can be written out. This implements the basic
476 * WAL rule "write the log before the data".)
477 */
480{
482
483 /* XLogBeginInsert() must have been called. */
485 elog(ERROR, "XLogBeginInsert was not called");
486
487 /*
488 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
489 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
490 */
491 if ((info & ~(XLR_RMGR_INFO_MASK |
494 elog(PANIC, "invalid xlog info mask %02X", info);
495
496 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
497
498 /*
499 * In bootstrap mode, we don't actually log anything but XLOG resources;
500 * return a phony record pointer.
501 */
502 if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
503 {
505 EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
506 return EndPos;
507 }
508
509 do
510 {
512 bool doPageWrites;
513 bool topxid_included = false;
516 int num_fpi = 0;
517 uint64 fpi_bytes = 0;
518
519 /*
520 * Get values needed to decide whether to do full-page writes. Since
521 * we don't yet have an insertion lock, these could change under us,
522 * but XLogInsertRecord will recheck them once it has a lock.
523 */
525
529
532 } while (!XLogRecPtrIsValid(EndPos));
533
535
536 return EndPos;
537}
538
539/*
540 * Simple wrapper to XLogInsert to insert a WAL record with elementary
541 * contents (only an int64 is supported as value currently).
542 */
545{
547 XLogRegisterData(&value, sizeof(value));
548 return XLogInsert(rmid, info);
549}
550
551/*
552 * XLogGetFakeLSN - get a fake LSN for an index page that isn't WAL-logged.
553 *
554 * Some index AMs use LSNs to detect concurrent page modifications, but not
555 * all index pages are WAL-logged. This function provides a sequence of fake
556 * LSNs for that purpose.
557 */
560{
561 if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
562 {
563 /*
564 * Temporary relations are only accessible in our session, so a simple
565 * backend-local counter will do.
566 */
567 static XLogRecPtr counter = FirstNormalUnloggedLSN;
568
569 return counter++;
570 }
571 else if (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
572 {
573 /*
574 * Unlogged relations are accessible from other backends, and survive
575 * (clean) restarts. GetFakeLSNForUnloggedRel() handles that for us.
576 */
578 }
579 else
580 {
581 /*
582 * WAL-logging on this relation will start after commit, so its LSNs
583 * must be distinct numbers smaller than the LSN at the next commit.
584 * Emit a dummy WAL record if insert-LSN hasn't advanced after the
585 * last call.
586 */
589
592
593 /* No need for an actual record if we already have a distinct LSN */
596
598 return currlsn;
599 }
600}
601
602/*
603 * Assemble a WAL record from the registered data and buffers into an
604 * XLogRecData chain, ready for insertion with XLogInsertRecord().
605 *
606 * The record header fields are filled in, except for the xl_prev field. The
607 * calculated CRC does not include the record header yet.
608 *
609 * If there are any registered buffers, and a full-page image was not taken
610 * of all of them, *fpw_lsn is set to the lowest LSN among such pages. This
611 * signals that the assembled record is only good for insertion on the
612 * assumption that the RedoRecPtr and doPageWrites values were up-to-date.
613 *
614 * *topxid_included is set if the topmost transaction ID is logged with the
615 * current subtransaction.
616 */
617static XLogRecData *
621 bool *topxid_included)
622{
624 uint64 total_len = 0;
625 int block_id;
630 char *scratch = hdr_scratch;
631
632 /*
633 * Note: this function can be called multiple times for the same record.
634 * All the modifications we do to the rdata chains below must handle that.
635 */
636
637 /* The record begins with the fixed-size header */
640
641 hdr_rdt.next = NULL;
644
645 /*
646 * Enforce consistency checks for this record if user is looking for it.
647 * Do this before at the beginning of this routine to give the possibility
648 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
649 * a record.
650 */
651 if (wal_consistency_checking[rmid])
652 info |= XLR_CHECK_CONSISTENCY;
653
654 /*
655 * Make an rdata chain containing all the data portions of all block
656 * references. This includes the data for full-page images. Also append
657 * the headers for the block references in the scratch buffer.
658 */
661 {
663 bool needs_backup;
664 bool needs_data;
668 bool samerel;
669 bool is_compressed = false;
670 bool include_image;
671
672 if (!regbuf->in_use)
673 continue;
674
675 /* Determine if this block needs to be backed up */
676 if (regbuf->flags & REGBUF_FORCE_IMAGE)
677 needs_backup = true;
678 else if (regbuf->flags & REGBUF_NO_IMAGE)
679 needs_backup = false;
680 else if (!doPageWrites)
681 needs_backup = false;
682 else
683 {
684 /*
685 * We assume page LSN is first data on *every* page that can be
686 * passed to XLogInsert, whether it has the standard page layout
687 * or not.
688 */
689 XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
690
691 needs_backup = (page_lsn <= RedoRecPtr);
692 if (!needs_backup)
693 {
694 if (!XLogRecPtrIsValid(*fpw_lsn) || page_lsn < *fpw_lsn)
695 *fpw_lsn = page_lsn;
696 }
697 }
698
699 /* Determine if the buffer data needs to included */
700 if (regbuf->rdata_len == 0)
701 needs_data = false;
702 else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
703 needs_data = true;
704 else
706
707 bkpb.id = block_id;
708 bkpb.fork_flags = regbuf->forkno;
709 bkpb.data_length = 0;
710
711 if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
712 bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
713
714 /*
715 * If needs_backup is true or WAL checking is enabled for current
716 * resource manager, log a full-page write for the current block.
717 */
719
720 if (include_image)
721 {
722 const PageData *page = regbuf->page;
724
725 /*
726 * The page needs to be backed up, so calculate its hole length
727 * and offset.
728 */
729 if (regbuf->flags & REGBUF_STANDARD)
730 {
731 /* Assume we can omit data between pd_lower and pd_upper */
732 uint16 lower = ((const PageHeaderData *) page)->pd_lower;
733 uint16 upper = ((const PageHeaderData *) page)->pd_upper;
734
736 upper > lower &&
737 upper <= BLCKSZ)
738 {
739 bimg.hole_offset = lower;
740 cbimg.hole_length = upper - lower;
741 }
742 else
743 {
744 /* No "hole" to remove */
745 bimg.hole_offset = 0;
746 cbimg.hole_length = 0;
747 }
748 }
749 else
750 {
751 /* Not a standard page header, don't try to eliminate "hole" */
752 bimg.hole_offset = 0;
753 cbimg.hole_length = 0;
754 }
755
756 /*
757 * Try to compress a block image if wal_compression is enabled
758 */
760 {
762 XLogCompressBackupBlock(page, bimg.hole_offset,
763 cbimg.hole_length,
764 regbuf->compressed_page,
766 }
767
768 /*
769 * Fill in the remaining fields in the XLogRecordBlockHeader
770 * struct
771 */
772 bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
773
774 /* Report a full page image constructed for the WAL record */
775 *num_fpi += 1;
776
777 /*
778 * Construct XLogRecData entries for the page content.
779 */
780 rdt_datas_last->next = &regbuf->bkp_rdatas[0];
782
783 bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
784
785 /*
786 * If WAL consistency checking is enabled for the resource manager
787 * of this WAL record, a full-page image is included in the record
788 * for the block modified. During redo, the full-page is replayed
789 * only if BKPIMAGE_APPLY is set.
790 */
791 if (needs_backup)
792 bimg.bimg_info |= BKPIMAGE_APPLY;
793
794 if (is_compressed)
795 {
796 /* The current compression is stored in the WAL record */
797 bimg.length = compressed_len;
798
799 /* Set the compression method used for this block */
801 {
803 bimg.bimg_info |= BKPIMAGE_COMPRESS_PGLZ;
804 break;
805
807#ifdef USE_LZ4
808 bimg.bimg_info |= BKPIMAGE_COMPRESS_LZ4;
809#else
810 elog(ERROR, "LZ4 is not supported by this build");
811#endif
812 break;
813
815#ifdef USE_ZSTD
816 bimg.bimg_info |= BKPIMAGE_COMPRESS_ZSTD;
817#else
818 elog(ERROR, "zstd is not supported by this build");
819#endif
820 break;
821
823 Assert(false); /* cannot happen */
824 break;
825 /* no default case, so that compiler will warn */
826 }
827
828 rdt_datas_last->data = regbuf->compressed_page;
830 }
831 else
832 {
833 bimg.length = BLCKSZ - cbimg.hole_length;
834
835 if (cbimg.hole_length == 0)
836 {
837 rdt_datas_last->data = page;
838 rdt_datas_last->len = BLCKSZ;
839 }
840 else
841 {
842 /* must skip the hole */
843 rdt_datas_last->data = page;
844 rdt_datas_last->len = bimg.hole_offset;
845
846 rdt_datas_last->next = &regbuf->bkp_rdatas[1];
848
849 rdt_datas_last->data =
850 page + (bimg.hole_offset + cbimg.hole_length);
851 rdt_datas_last->len =
852 BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
853 }
854 }
855
856 total_len += bimg.length;
857
858 /* Track the WAL full page images in bytes */
859 *fpi_bytes += bimg.length;
860 }
861
862 if (needs_data)
863 {
864 /*
865 * When copying to XLogRecordBlockHeader, the length is narrowed
866 * to an uint16. Double-check that it is still correct.
867 */
868 Assert(regbuf->rdata_len <= UINT16_MAX);
869
870 /*
871 * Link the caller-supplied rdata chain for this buffer to the
872 * overall list.
873 */
874 bkpb.fork_flags |= BKPBLOCK_HAS_DATA;
875 bkpb.data_length = (uint16) regbuf->rdata_len;
876 total_len += regbuf->rdata_len;
877
878 rdt_datas_last->next = regbuf->rdata_head;
879 rdt_datas_last = regbuf->rdata_tail;
880 }
881
882 if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
883 {
884 samerel = true;
885 bkpb.fork_flags |= BKPBLOCK_SAME_REL;
886 }
887 else
888 samerel = false;
890
891 /* Ok, copy the header to the scratch buffer */
894 if (include_image)
895 {
898 if (cbimg.hole_length != 0 && is_compressed)
899 {
903 }
904 }
905 if (!samerel)
906 {
907 memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
908 scratch += sizeof(RelFileLocator);
909 }
910 memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
911 scratch += sizeof(BlockNumber);
912 }
913
914 /* followed by the record's origin, if any */
917 {
918 *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
921 }
922
923 /* followed by toplevel XID, if not already included in previous record */
925 {
927
928 /* Set the flag that the top xid is included in the WAL */
929 *topxid_included = true;
930
932 memcpy(scratch, &xid, sizeof(TransactionId));
933 scratch += sizeof(TransactionId);
934 }
935
936 /* followed by main data, if any */
937 if (mainrdata_len > 0)
938 {
939 if (mainrdata_len > 255)
940 {
942
945 (errmsg_internal("too much WAL data"),
946 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",
948 PG_UINT32_MAX)));
949
951 *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
953 scratch += sizeof(uint32);
954 }
955 else
956 {
957 *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
958 *(scratch++) = (uint8) mainrdata_len;
959 }
962 total_len += mainrdata_len;
963 }
965
967 total_len += hdr_rdt.len;
968
969 /*
970 * Calculate CRC of the data
971 *
972 * Note that the record header isn't added into the CRC initially since we
973 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
974 * the whole record in the order: rdata, then backup blocks, then record
975 * header.
976 */
979 for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
980 COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
981
982 /*
983 * Ensure that the XLogRecord is not too large.
984 *
985 * XLogReader machinery is only able to handle records up to a certain
986 * size (ignoring machine resource limitations), so make sure that we will
987 * not emit records larger than the sizes advertised to be supported.
988 */
989 if (total_len > XLogRecordMaxSize)
991 (errmsg_internal("oversized WAL record"),
992 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",
993 total_len, XLogRecordMaxSize, rmid, info)));
994
995 /*
996 * Fill in the fields in the record header. Prev-link is filled in later,
997 * once we know where in the WAL the record will be inserted. The CRC does
998 * not include the record header yet.
999 */
1001 rechdr->xl_tot_len = (uint32) total_len;
1002 rechdr->xl_info = info;
1003 rechdr->xl_rmid = rmid;
1004 rechdr->xl_prev = InvalidXLogRecPtr;
1005 rechdr->xl_crc = rdata_crc;
1006
1007 return &hdr_rdt;
1008}
1009
1010/*
1011 * Create a compressed version of a backup block image.
1012 *
1013 * Returns false if compression fails (i.e., compressed result is actually
1014 * bigger than original). Otherwise, returns true and sets 'dlen' to
1015 * the length of compressed block image.
1016 */
1017static bool
1018XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length,
1019 void *dest, uint16 *dlen)
1020{
1021 int32 orig_len = BLCKSZ - hole_length;
1022 int32 len = -1;
1023 int32 extra_bytes = 0;
1024 const void *source;
1025 PGAlignedBlock tmp;
1026
1027 if (hole_length != 0)
1028 {
1029 /* must skip the hole */
1030 memcpy(tmp.data, page, hole_offset);
1031 memcpy(tmp.data + hole_offset,
1032 page + (hole_offset + hole_length),
1033 BLCKSZ - (hole_length + hole_offset));
1034 source = tmp.data;
1035
1036 /*
1037 * Extra data needs to be stored in WAL record for the compressed
1038 * version of block image if the hole exists.
1039 */
1041 }
1042 else
1043 source = page;
1044
1046 {
1049 break;
1050
1052#ifdef USE_LZ4
1055 if (len <= 0)
1056 len = -1; /* failure */
1057#else
1058 elog(ERROR, "LZ4 is not supported by this build");
1059#endif
1060 break;
1061
1063#ifdef USE_ZSTD
1066 if (ZSTD_isError(len))
1067 len = -1; /* failure */
1068#else
1069 elog(ERROR, "zstd is not supported by this build");
1070#endif
1071 break;
1072
1074 Assert(false); /* cannot happen */
1075 break;
1076 /* no default case, so that compiler will warn */
1077 }
1078
1079 /*
1080 * We recheck the actual size even if compression reports success and see
1081 * if the number of bytes saved by compression is larger than the length
1082 * of extra data needed for the compressed version of block image.
1083 */
1084 if (len >= 0 &&
1086 {
1087 *dlen = (uint16) len; /* successful compression */
1088 return true;
1089 }
1090 return false;
1091}
1092
1093/*
1094 * Determine whether the buffer referenced has to be backed up.
1095 *
1096 * Since we don't yet have the insert lock, fullPageWrites and runningBackups
1097 * (which forces full-page writes) could change later, so the result should
1098 * be used for optimization purposes only.
1099 */
1100bool
1102{
1104 bool doPageWrites;
1105 Page page;
1106
1108
1109 page = BufferGetPage(buffer);
1110
1111 if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
1112 return true; /* buffer requires backup */
1113
1114 return false; /* buffer does not need to be backed up */
1115}
1116
1117/*
1118 * Write a backup block if needed when we are setting a hint. Note that
1119 * this may be called for a variety of page types, not just heaps.
1120 *
1121 * Callable while holding just a share-exclusive lock on the buffer
1122 * content. That suffices to prevent concurrent modifications of the
1123 * buffer. The buffer already needs to have been marked dirty by
1124 * MarkBufferDirtyHint().
1125 *
1126 * We can't use the plain backup block mechanism since that relies on the
1127 * Buffer being exclusively locked. Since some modifications (setting LSN, hint
1128 * bits) are allowed in a sharelocked buffer that can lead to wal checksum
1129 * failures. So instead we copy the page and insert the copied data as normal
1130 * record data.
1131 *
1132 * We only need to do something if page has not yet been full page written in
1133 * this checkpoint round. The LSN of the inserted wal record is returned if we
1134 * had to write, InvalidXLogRecPtr otherwise.
1135 */
1138{
1140 XLogRecPtr lsn;
1142
1143 /* this also verifies that we hold an appropriate lock */
1144 Assert(BufferIsDirty(buffer));
1145
1146 /*
1147 * Update RedoRecPtr so that we can make the right decision. It's possible
1148 * that a new checkpoint will start just after GetRedoRecPtr(), but that
1149 * is ok, as the buffer is already dirty, ensuring that any BufferSync()
1150 * started after the buffer was marked dirty cannot complete without
1151 * flushing this buffer. If a checkpoint started between marking the
1152 * buffer dirty and this check, we will emit an unnecessary WAL record (as
1153 * the buffer will be written out as part of the checkpoint), but the
1154 * window for that is not big.
1155 */
1157
1158 /*
1159 * We assume page LSN is first data on *every* page that can be passed to
1160 * XLogInsert, whether it has the standard page layout or not.
1161 */
1162 lsn = PageGetLSN(BufferGetPage(buffer));
1163
1164 if (lsn <= RedoRecPtr)
1165 {
1166 int flags = 0;
1168 char *origdata = (char *) BufferGetBlock(buffer);
1169 RelFileLocator rlocator;
1170 ForkNumber forkno;
1171 BlockNumber blkno;
1172
1173 /*
1174 * Copy buffer so we don't have to worry about concurrent hint bit or
1175 * lsn updates. We assume pd_lower/upper cannot be changed without an
1176 * exclusive lock, so the contents bkp are not racy.
1177 */
1178 if (buffer_std)
1179 {
1180 /* Assume we can omit data between pd_lower and pd_upper */
1181 Page page = BufferGetPage(buffer);
1182 uint16 lower = ((PageHeader) page)->pd_lower;
1183 uint16 upper = ((PageHeader) page)->pd_upper;
1184
1187 }
1188 else
1190
1192
1193 if (buffer_std)
1194 flags |= REGBUF_STANDARD;
1195
1196 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
1197 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
1198
1200 }
1201
1202 return recptr;
1203}
1204
1205/*
1206 * Write a WAL record containing a full image of a page. Caller is responsible
1207 * for writing the page to disk after calling this routine.
1208 *
1209 * Note: If you're using this function, you should be building pages in private
1210 * memory and writing them directly to smgr. If you're using buffers, call
1211 * log_newpage_buffer instead.
1212 *
1213 * If the page follows the standard page layout, with a PageHeader and unused
1214 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1215 * the unused space to be left out from the WAL record, making it smaller.
1216 */
1219 Page page, bool page_std)
1220{
1221 int flags;
1223
1224 flags = REGBUF_FORCE_IMAGE;
1225 if (page_std)
1226 flags |= REGBUF_STANDARD;
1227
1229 XLogRegisterBlock(0, rlocator, forknum, blkno, page, flags);
1231
1232 /*
1233 * The page may be uninitialized. If so, we can't set the LSN because that
1234 * would corrupt the page.
1235 */
1236 if (!PageIsNew(page))
1237 {
1238 PageSetLSN(page, recptr);
1239 }
1240
1241 return recptr;
1242}
1243
1244/*
1245 * Like log_newpage(), but allows logging multiple pages in one operation.
1246 * It is more efficient than calling log_newpage() for each page separately,
1247 * because we can write multiple pages in a single WAL record.
1248 */
1249void
1250log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages,
1251 BlockNumber *blknos, Page *pages, bool page_std)
1252{
1253 int flags;
1255 int i;
1256 int j;
1257
1258 flags = REGBUF_FORCE_IMAGE;
1259 if (page_std)
1260 flags |= REGBUF_STANDARD;
1261
1262 /*
1263 * Iterate over all the pages. They are collected into batches of
1264 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1265 * batch.
1266 */
1268
1269 i = 0;
1270 while (i < num_pages)
1271 {
1272 int batch_start = i;
1273 int nbatch;
1274
1276
1277 nbatch = 0;
1278 while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1279 {
1280 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);
1281 i++;
1282 nbatch++;
1283 }
1284
1286
1287 for (j = batch_start; j < i; j++)
1288 {
1289 /*
1290 * The page may be uninitialized. If so, we can't set the LSN
1291 * because that would corrupt the page.
1292 */
1293 if (!PageIsNew(pages[j]))
1294 {
1295 PageSetLSN(pages[j], recptr);
1296 }
1297 }
1298 }
1299}
1300
1301/*
1302 * Write a WAL record containing a full image of a page.
1303 *
1304 * Caller should initialize the buffer and mark it dirty before calling this
1305 * function. This function will set the page LSN.
1306 *
1307 * If the page follows the standard page layout, with a PageHeader and unused
1308 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1309 * the unused space to be left out from the WAL record, making it smaller.
1310 */
1312log_newpage_buffer(Buffer buffer, bool page_std)
1313{
1314 Page page = BufferGetPage(buffer);
1315 RelFileLocator rlocator;
1316 ForkNumber forknum;
1317 BlockNumber blkno;
1318
1319 /* Shared buffers should be modified in a critical section. */
1321
1322 BufferGetTag(buffer, &rlocator, &forknum, &blkno);
1323
1324 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1325}
1326
1327/*
1328 * WAL-log a range of blocks in a relation.
1329 *
1330 * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
1331 * written to the WAL. If the range is large, this is done in multiple WAL
1332 * records.
1333 *
1334 * If all page follows the standard page layout, with a PageHeader and unused
1335 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1336 * the unused space to be left out from the WAL records, making them smaller.
1337 *
1338 * NOTE: This function acquires exclusive-locks on the pages. Typically, this
1339 * is used on a newly-built relation, and the caller is holding a
1340 * AccessExclusiveLock on it, so no other backend can be accessing it at the
1341 * same time. If that's not the case, you must ensure that this does not
1342 * cause a deadlock through some other means.
1343 */
1344void
1347 bool page_std)
1348{
1349 int flags;
1350 BlockNumber blkno;
1351
1352 flags = REGBUF_FORCE_IMAGE;
1353 if (page_std)
1354 flags |= REGBUF_STANDARD;
1355
1356 /*
1357 * Iterate over all the pages in the range. They are collected into
1358 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1359 * for each batch.
1360 */
1362
1363 blkno = startblk;
1364 while (blkno < endblk)
1365 {
1368 int nbufs;
1369 int i;
1370
1372
1373 /* Collect a batch of blocks. */
1374 nbufs = 0;
1375 while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1376 {
1377 Buffer buf = ReadBufferExtended(rel, forknum, blkno,
1378 RBM_NORMAL, NULL);
1379
1381
1382 /*
1383 * Completely empty pages are not WAL-logged. Writing a WAL record
1384 * would change the LSN, and we don't want that. We want the page
1385 * to stay empty.
1386 */
1388 bufpack[nbufs++] = buf;
1389 else
1391 blkno++;
1392 }
1393
1394 /* Nothing more to do if all remaining blocks were empty. */
1395 if (nbufs == 0)
1396 break;
1397
1398 /* Write WAL record for this batch. */
1400
1402 for (i = 0; i < nbufs; i++)
1403 {
1405 XLogRegisterBuffer(i, bufpack[i], flags);
1406 }
1407
1409
1410 for (i = 0; i < nbufs; i++)
1412
1414
1415 for (i = 0; i < nbufs; i++)
1417 }
1418}
1419
1420/*
1421 * Allocate working buffers needed for WAL record construction.
1422 */
1423void
1425{
1426#ifdef USE_ASSERT_CHECKING
1427
1428 /*
1429 * Check that any records assembled can be decoded. This is capped based
1430 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1431 * addend covers the larger allocate_recordbuf() demand. This code path
1432 * is called once per backend, more than enough for this check.
1433 */
1434 size_t max_required =
1436
1438#endif
1439
1440 /* Initialize the working areas */
1441 if (xloginsert_cxt == NULL)
1442 {
1444 "WAL record construction",
1446 }
1447
1448 if (registered_buffers == NULL)
1449 {
1454 }
1455 if (rdatas == NULL)
1456 {
1458 sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1460 }
1461
1462 /*
1463 * Allocate a buffer to hold the header information for a WAL record.
1464 */
1465 if (hdr_scratch == NULL)
1468}
uint32 BlockNumber
Definition block.h:31
int Buffer
Definition buf.h:23
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
Definition bufmgr.c:3003
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition bufmgr.c:4378
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3030
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5522
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3063
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:921
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:470
static Block BufferGetBlock(Buffer buffer)
Definition bufmgr.h:437
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:220
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:332
@ RBM_NORMAL
Definition bufmgr.h:46
PageHeaderData * PageHeader
Definition bufpage.h:199
char PageData
Definition bufpage.h:80
static bool PageIsNew(const PageData *page)
Definition bufpage.h:259
#define SizeOfPageHeaderData
Definition bufpage.h:242
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:417
PageData * Page
Definition bufpage.h:81
static XLogRecPtr PageGetLSN(const PageData *page)
Definition bufpage.h:411
uint8_t uint8
Definition c.h:616
#define PG_UINT32_MAX
Definition c.h:676
#define Assert(condition)
Definition c.h:945
int64_t int64
Definition c.h:615
int32_t int32
Definition c.h:614
uint64_t uint64
Definition c.h:619
uint16_t uint16
Definition c.h:617
uint32_t uint32
Definition c.h:618
#define MemSet(start, val, len)
Definition c.h:1109
uint32 TransactionId
Definition c.h:738
int int errdetail_internal(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define PANIC
Definition elog.h:42
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
volatile uint32 CritSectionCount
Definition globals.c:45
static struct @174 value
int j
Definition isn.c:78
int i
Definition isn.c:77
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
MemoryContext TopMemoryContext
Definition mcxt.c:166
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define AllocSizeIsValid(size)
Definition memutils.h:42
#define IsBootstrapProcessingMode()
Definition miscadmin.h:477
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define END_CRIT_SECTION()
Definition miscadmin.h:152
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
ReplOriginXactState replorigin_xact_state
Definition origin.c:167
#define InvalidReplOriginId
Definition origin.h:33
#define XLOG_FPI
Definition pg_control.h:80
#define XLOG_FPI_FOR_HINT
Definition pg_control.h:79
uint32 pg_crc32c
Definition pg_crc32c.h:38
#define COMP_CRC32C(crc, data, len)
Definition pg_crc32c.h:153
#define INIT_CRC32C(crc)
Definition pg_crc32c.h:41
const void size_t len
const void * data
const PGLZ_Strategy *const PGLZ_strategy_default
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
static rewind_source * source
Definition pg_rewind.c:89
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int fb(int x)
#define RelationNeedsWAL(relation)
Definition rel.h:637
#define RelationIsPermanent(relation)
Definition rel.h:626
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition relpath.h:56
uint8 RmgrId
Definition rmgr.h:11
char data[BLCKSZ]
Definition c.h:1206
Form_pg_class rd_rel
Definition rel.h:111
ReplOriginId origin
Definition origin.h:45
const void * data
struct XLogRecData * next
XLogRecData * rdata_tail
Definition xloginsert.c:82
BlockNumber block
Definition xloginsert.c:77
XLogRecData * rdata_head
Definition xloginsert.c:80
ForkNumber forkno
Definition xloginsert.c:76
RelFileLocator rlocator
Definition xloginsert.c:75
const PageData * page
Definition xloginsert.c:78
Datum batch_start(PG_FUNCTION_ARGS)
Definition test_aio.c:668
TransactionId GetTopTransactionIdIfAny(void)
Definition xact.c:443
TransactionId GetCurrentTransactionIdIfAny(void)
Definition xact.c:473
bool IsSubxactTopXidLogPending(void)
Definition xact.c:561
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition xlog.c:6577
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, uint64 fpi_bytes, bool topxid_included)
Definition xlog.c:750
XLogRecPtr GetRedoRecPtr(void)
Definition xlog.c:6547
static XLogRecPtr RedoRecPtr
Definition xlog.c:277
static bool doPageWrites
Definition xlog.c:290
int wal_compression
Definition xlog.c:128
bool XLogInsertAllowed(void)
Definition xlog.c:6499
XLogRecPtr GetFakeLSNForUnloggedRel(void)
Definition xlog.c:4660
XLogRecPtr GetXLogInsertEndRecPtr(void)
Definition xlog.c:9630
XLogRecPtr XLogAssignLSN(void)
Definition xlog.c:8237
bool * wal_consistency_checking
Definition xlog.c:130
#define XLOG_INCLUDE_ORIGIN
Definition xlog.h:165
WalCompression
Definition xlog.h:82
@ WAL_COMPRESSION_NONE
Definition xlog.h:83
@ WAL_COMPRESSION_LZ4
Definition xlog.h:85
@ WAL_COMPRESSION_PGLZ
Definition xlog.h:84
@ WAL_COMPRESSION_ZSTD
Definition xlog.h:86
#define SizeOfXLogLongPHD
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define FirstNormalUnloggedLSN
Definition xlogdefs.h:37
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
static XLogRecData * mainrdata_head
Definition xloginsert.c:101
static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
XLogRecPtr XLogSimpleInsertInt64(RmgrId rmid, uint8 info, int64 value)
Definition xloginsert.c:544
static int max_registered_buffers
Definition xloginsert.c:93
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:479
static uint8 curinsert_flags
Definition xloginsert.c:106
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:410
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:369
static uint64 mainrdata_len
Definition xloginsert.c:103
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
static bool begininsert_called
Definition xloginsert.c:135
static int max_registered_block_id
Definition xloginsert.c:94
XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
void InitXLogInsert(void)
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:461
static int num_rdatas
Definition xloginsert.c:132
void log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
Definition xloginsert.c:314
static XLogRecData * mainrdata_last
Definition xloginsert.c:102
static MemoryContext xloginsert_cxt
Definition xloginsert.c:138
void log_newpage_range(Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
void XLogResetInsertion(void)
Definition xloginsert.c:226
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
static XLogRecData hdr_rdt
Definition xloginsert.c:116
static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, bool *topxid_included)
Definition xloginsert.c:618
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:246
static char * hdr_scratch
Definition xloginsert.c:117
static XLogRecData * rdatas
Definition xloginsert.c:131
void XLogBeginInsert(void)
Definition xloginsert.c:153
XLogRecPtr XLogGetFakeLSN(Relation rel)
Definition xloginsert.c:559
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition xloginsert.c:179
#define COMPRESS_BUFSIZE
Definition xloginsert.c:65
static registered_buffer * registered_buffers
Definition xloginsert.c:92
static int max_rdatas
Definition xloginsert.c:133
#define HEADER_SCRATCH_SIZE
Definition xloginsert.c:122
#define REGBUF_NO_CHANGE
Definition xloginsert.h:37
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define XLR_NORMAL_MAX_BLOCK_ID
Definition xloginsert.h:28
#define REGBUF_FORCE_IMAGE
Definition xloginsert.h:32
#define XLR_NORMAL_RDATAS
Definition xloginsert.h:29
#define REGBUF_NO_IMAGE
Definition xloginsert.h:33
#define REGBUF_KEEP_DATA
Definition xloginsert.h:36
#define REGBUF_WILL_INIT
Definition xloginsert.h:34
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
#define SizeOfXLogRecordBlockImageHeader
Definition xlogrecord.h:153
#define XLogRecordMaxSize
Definition xlogrecord.h:74
#define BKPIMAGE_COMPRESS_ZSTD
Definition xlogrecord.h:162
#define BKPBLOCK_HAS_DATA
Definition xlogrecord.h:198
#define BKPIMAGE_APPLY
Definition xlogrecord.h:158
#define BKPIMAGE_HAS_HOLE
Definition xlogrecord.h:157
#define XLR_BLOCK_ID_DATA_LONG
Definition xlogrecord.h:242
#define BKPBLOCK_WILL_INIT
Definition xlogrecord.h:199
#define XLR_RMGR_INFO_MASK
Definition xlogrecord.h:63
#define BKPIMAGE_COMPRESS_LZ4
Definition xlogrecord.h:161
#define XLR_BLOCK_ID_TOPLEVEL_XID
Definition xlogrecord.h:244
#define XLR_BLOCK_ID_DATA_SHORT
Definition xlogrecord.h:241
#define XLR_MAX_BLOCK_ID
Definition xlogrecord.h:239
#define SizeOfXLogRecordBlockCompressHeader
Definition xlogrecord.h:177
#define BKPBLOCK_SAME_REL
Definition xlogrecord.h:200
#define XLR_SPECIAL_REL_UPDATE
Definition xlogrecord.h:82
#define SizeOfXLogRecordBlockHeader
Definition xlogrecord.h:115
#define BKPIMAGE_COMPRESS_PGLZ
Definition xlogrecord.h:160
#define XLR_BLOCK_ID_ORIGIN
Definition xlogrecord.h:243
#define SizeOfXLogRecord
Definition xlogrecord.h:55
#define BKPBLOCK_HAS_IMAGE
Definition xlogrecord.h:197
#define XLR_CHECK_CONSISTENCY
Definition xlogrecord.h:91