PostgreSQL Source Code git master
Loading...
Searching...
No Matches
xloginsert.c File Reference
#include "postgres.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xloginsert.h"
#include "catalog/pg_control.h"
#include "common/pg_lzcompress.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "replication/origin.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
#include "utils/memutils.h"
#include "utils/pgstat_internal.h"
Include dependency graph for xloginsert.c:

Go to the source code of this file.

Data Structures

struct  registered_buffer
 

Macros

#define LZ4_MAX_BLCKSZ   0
 
#define ZSTD_MAX_BLCKSZ   0
 
#define PGLZ_MAX_BLCKSZ   PGLZ_MAX_OUTPUT(BLCKSZ)
 
#define COMPRESS_BUFSIZE   Max(Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ), ZSTD_MAX_BLCKSZ)
 
#define SizeOfXlogOrigin   (sizeof(RepOriginId) + sizeof(char))
 
#define SizeOfXLogTransactionId   (sizeof(TransactionId) + sizeof(char))
 
#define HEADER_SCRATCH_SIZE
 

Functions

static XLogRecDataXLogRecordAssemble (RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, bool *topxid_included)
 
static bool XLogCompressBackupBlock (const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
 
void XLogBeginInsert (void)
 
void XLogEnsureRecordSpace (int max_block_id, int ndatas)
 
void XLogResetInsertion (void)
 
void XLogRegisterBuffer (uint8 block_id, Buffer buffer, uint8 flags)
 
void XLogRegisterBlock (uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
 
void XLogRegisterData (const void *data, uint32 len)
 
void XLogRegisterBufData (uint8 block_id, const void *data, uint32 len)
 
void XLogSetRecordFlags (uint8 flags)
 
XLogRecPtr XLogInsert (RmgrId rmid, uint8 info)
 
XLogRecPtr XLogSimpleInsertInt64 (RmgrId rmid, uint8 info, int64 value)
 
bool XLogCheckBufferNeedsBackup (Buffer buffer)
 
XLogRecPtr XLogSaveBufferForHint (Buffer buffer, bool buffer_std)
 
XLogRecPtr log_newpage (RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
 
void log_newpages (RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
 
XLogRecPtr log_newpage_buffer (Buffer buffer, bool page_std)
 
void log_newpage_range (Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
 
void InitXLogInsert (void)
 

Variables

static registered_bufferregistered_buffers
 
static int max_registered_buffers
 
static int max_registered_block_id = 0
 
static XLogRecDatamainrdata_head
 
static XLogRecDatamainrdata_last = (XLogRecData *) &mainrdata_head
 
static uint64 mainrdata_len
 
static uint8 curinsert_flags = 0
 
static XLogRecData hdr_rdt
 
static charhdr_scratch = NULL
 
static XLogRecDatardatas
 
static int num_rdatas
 
static int max_rdatas
 
static bool begininsert_called = false
 
static MemoryContext xloginsert_cxt
 

Macro Definition Documentation

◆ COMPRESS_BUFSIZE

Definition at line 64 of file xloginsert.c.

◆ HEADER_SCRATCH_SIZE

#define HEADER_SCRATCH_SIZE
Value:
#define SizeOfXlogOrigin
Definition xloginsert.c:118
#define SizeOfXLogTransactionId
Definition xloginsert.c:119
#define XLR_MAX_BLOCK_ID
Definition xlogrecord.h:239
#define MaxSizeOfXLogRecordBlockHeader
Definition xlogrecord.h:184
#define SizeOfXLogRecordDataHeaderLong
Definition xlogrecord.h:225
#define SizeOfXLogRecord
Definition xlogrecord.h:55

Definition at line 121 of file xloginsert.c.

152{
155 Assert(mainrdata_len == 0);
156
157 /* cross-check on whether we should be here or not */
158 if (!XLogInsertAllowed())
159 elog(ERROR, "cannot make new WAL entries during recovery");
160
162 elog(ERROR, "XLogBeginInsert was already called");
163
164 begininsert_called = true;
165}
166
167/*
168 * Ensure that there are enough buffer and data slots in the working area,
169 * for subsequent XLogRegisterBuffer, XLogRegisterData and XLogRegisterBufData
170 * calls.
171 *
172 * There is always space for a small number of buffers and data chunks, enough
173 * for most record types. This function is for the exceptional cases that need
174 * more.
175 */
176void
177XLogEnsureRecordSpace(int max_block_id, int ndatas)
178{
179 int nbuffers;
180
181 /*
182 * This must be called before entering a critical section, because
183 * allocating memory inside a critical section can fail. repalloc() will
184 * check the same, but better to check it here too so that we fail
185 * consistently even if the arrays happen to be large enough already.
186 */
188
189 /* the minimum values can't be decreased */
190 if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
191 max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
194
195 if (max_block_id > XLR_MAX_BLOCK_ID)
196 elog(ERROR, "maximum number of WAL record block references exceeded");
197 nbuffers = max_block_id + 1;
198
199 if (nbuffers > max_registered_buffers)
200 {
202 repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
203
204 /*
205 * At least the padding bytes in the structs must be zeroed, because
206 * they are included in WAL data, but initialize it all for tidiness.
207 */
209 (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
210 max_registered_buffers = nbuffers;
211 }
212
213 if (ndatas > max_rdatas)
214 {
217 }
218}
219
220/*
221 * Reset WAL record construction buffers.
222 */
223void
225{
226 int i;
227
228 for (i = 0; i < max_registered_block_id; i++)
229 registered_buffers[i].in_use = false;
230
231 num_rdatas = 0;
233 mainrdata_len = 0;
235 curinsert_flags = 0;
236 begininsert_called = false;
237}
238
239/*
240 * Register a reference to a buffer with the WAL record being constructed.
241 * This must be called for every page that the WAL-logged operation modifies.
242 */
243void
245{
247
248 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
249 Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
251
252 /*
253 * Ordinarily, buffer should be exclusive-locked and marked dirty before
254 * we get here, otherwise we could end up violating one of the rules in
255 * access/transam/README.
256 *
257 * Some callers intentionally register a clean page and never update that
258 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
259 * bypass these checks.
260 */
261#ifdef USE_ASSERT_CHECKING
262 if (!(flags & REGBUF_NO_CHANGE))
264 BufferIsDirty(buffer));
265#endif
266
268 {
270 elog(ERROR, "too many registered buffers");
272 }
273
275
276 BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
277 regbuf->page = BufferGetPage(buffer);
278 regbuf->flags = flags;
279 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
280 regbuf->rdata_len = 0;
281
282 /*
283 * Check that this page hasn't already been registered with some other
284 * block_id.
285 */
286#ifdef USE_ASSERT_CHECKING
287 {
288 int i;
289
290 for (i = 0; i < max_registered_block_id; i++)
291 {
293
294 if (i == block_id || !regbuf_old->in_use)
295 continue;
296
297 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
298 regbuf_old->forkno != regbuf->forkno ||
299 regbuf_old->block != regbuf->block);
300 }
301 }
302#endif
303
304 regbuf->in_use = true;
305}
306
307/*
308 * Like XLogRegisterBuffer, but for registering a block that's not in the
309 * shared buffer pool (i.e. when you don't have a Buffer for it).
310 */
311void
313 BlockNumber blknum, const PageData *page, uint8 flags)
314{
316
318
321
323 elog(ERROR, "too many registered buffers");
324
326
327 regbuf->rlocator = *rlocator;
328 regbuf->forkno = forknum;
329 regbuf->block = blknum;
330 regbuf->page = page;
331 regbuf->flags = flags;
332 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
333 regbuf->rdata_len = 0;
334
335 /*
336 * Check that this page hasn't already been registered with some other
337 * block_id.
338 */
339#ifdef USE_ASSERT_CHECKING
340 {
341 int i;
342
343 for (i = 0; i < max_registered_block_id; i++)
344 {
346
347 if (i == block_id || !regbuf_old->in_use)
348 continue;
349
350 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
351 regbuf_old->forkno != regbuf->forkno ||
352 regbuf_old->block != regbuf->block);
353 }
354 }
355#endif
356
357 regbuf->in_use = true;
358}
359
360/*
361 * Add data to the WAL record that's being constructed.
362 *
363 * The data is appended to the "main chunk", available at replay with
364 * XLogRecGetData().
365 */
366void
367XLogRegisterData(const void *data, uint32 len)
368{
370
372
373 if (num_rdatas >= max_rdatas)
375 (errmsg_internal("too much WAL data"),
376 errdetail_internal("%d out of %d data segments are already in use.",
378 rdata = &rdatas[num_rdatas++];
379
380 rdata->data = data;
381 rdata->len = len;
382
383 /*
384 * we use the mainrdata_last pointer to track the end of the chain, so no
385 * need to clear 'next' here.
386 */
387
390
392}
393
394/*
395 * Add buffer-specific data to the WAL record that's being constructed.
396 *
397 * Block_id must reference a block previously registered with
398 * XLogRegisterBuffer(). If this is called more than once for the same
399 * block_id, the data is appended.
400 *
401 * The maximum amount of data that can be registered per block is 65535
402 * bytes. That should be plenty; if you need more than BLCKSZ bytes to
403 * reconstruct the changes to the page, you might as well just log a full
404 * copy of it. (the "main data" that's not associated with a block is not
405 * limited)
406 */
407void
409{
412
414
415 /* find the registered buffer struct */
417 if (!regbuf->in_use)
418 elog(ERROR, "no block with id %d registered with WAL insertion",
419 block_id);
420
421 /*
422 * Check against max_rdatas and ensure we do not register more data per
423 * buffer than can be handled by the physical data format; i.e. that
424 * regbuf->rdata_len does not grow beyond what
425 * XLogRecordBlockHeader->data_length can hold.
426 */
427 if (num_rdatas >= max_rdatas)
429 (errmsg_internal("too much WAL data"),
430 errdetail_internal("%d out of %d data segments are already in use.",
432 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)
434 (errmsg_internal("too much WAL data"),
435 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
436 UINT16_MAX, block_id, regbuf->rdata_len, len)));
437
438 rdata = &rdatas[num_rdatas++];
439
440 rdata->data = data;
441 rdata->len = len;
442
443 regbuf->rdata_tail->next = rdata;
444 regbuf->rdata_tail = rdata;
445 regbuf->rdata_len += len;
446}
447
448/*
449 * Set insert status flags for the upcoming WAL record.
450 *
451 * The flags that can be used here are:
452 * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
453 * included in the record.
454 * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
455 * durability, which allows to avoid triggering WAL archiving and other
456 * background activity.
457 */
458void
460{
462 curinsert_flags |= flags;
463}
464
465/*
466 * Insert an XLOG record having the specified RMID and info bytes, with the
467 * body of the record being the data and buffer references registered earlier
468 * with XLogRegister* calls.
469 *
470 * Returns XLOG pointer to end of record (beginning of next record).
471 * This can be used as LSN for data pages affected by the logged action.
472 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
473 * before the data page can be written out. This implements the basic
474 * WAL rule "write the log before the data".)
475 */
477XLogInsert(RmgrId rmid, uint8 info)
478{
480
481 /* XLogBeginInsert() must have been called. */
483 elog(ERROR, "XLogBeginInsert was not called");
484
485 /*
486 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
487 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
488 */
489 if ((info & ~(XLR_RMGR_INFO_MASK |
492 elog(PANIC, "invalid xlog info mask %02X", info);
493
494 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
495
496 /*
497 * In bootstrap mode, we don't actually log anything but XLOG resources;
498 * return a phony record pointer.
499 */
500 if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
501 {
503 EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
504 return EndPos;
505 }
506
507 do
508 {
510 bool doPageWrites;
511 bool topxid_included = false;
514 int num_fpi = 0;
515 uint64 fpi_bytes = 0;
516
517 /*
518 * Get values needed to decide whether to do full-page writes. Since
519 * we don't yet have an insertion lock, these could change under us,
520 * but XLogInsertRecord will recheck them once it has a lock.
521 */
523
527
530 } while (!XLogRecPtrIsValid(EndPos));
531
533
534 return EndPos;
535}
536
537/*
538 * Simple wrapper to XLogInsert to insert a WAL record with elementary
539 * contents (only an int64 is supported as value currently).
540 */
543{
545 XLogRegisterData(&value, sizeof(value));
546 return XLogInsert(rmid, info);
547}
548
549/*
550 * Assemble a WAL record from the registered data and buffers into an
551 * XLogRecData chain, ready for insertion with XLogInsertRecord().
552 *
553 * The record header fields are filled in, except for the xl_prev field. The
554 * calculated CRC does not include the record header yet.
555 *
556 * If there are any registered buffers, and a full-page image was not taken
557 * of all of them, *fpw_lsn is set to the lowest LSN among such pages. This
558 * signals that the assembled record is only good for insertion on the
559 * assumption that the RedoRecPtr and doPageWrites values were up-to-date.
560 *
561 * *topxid_included is set if the topmost transaction ID is logged with the
562 * current subtransaction.
563 */
564static XLogRecData *
568 bool *topxid_included)
569{
571 uint64 total_len = 0;
572 int block_id;
577 char *scratch = hdr_scratch;
578
579 /*
580 * Note: this function can be called multiple times for the same record.
581 * All the modifications we do to the rdata chains below must handle that.
582 */
583
584 /* The record begins with the fixed-size header */
587
588 hdr_rdt.next = NULL;
591
592 /*
593 * Enforce consistency checks for this record if user is looking for it.
594 * Do this before at the beginning of this routine to give the possibility
595 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
596 * a record.
597 */
598 if (wal_consistency_checking[rmid])
599 info |= XLR_CHECK_CONSISTENCY;
600
601 /*
602 * Make an rdata chain containing all the data portions of all block
603 * references. This includes the data for full-page images. Also append
604 * the headers for the block references in the scratch buffer.
605 */
608 {
610 bool needs_backup;
611 bool needs_data;
615 bool samerel;
616 bool is_compressed = false;
617 bool include_image;
618
619 if (!regbuf->in_use)
620 continue;
621
622 /* Determine if this block needs to be backed up */
623 if (regbuf->flags & REGBUF_FORCE_IMAGE)
624 needs_backup = true;
625 else if (regbuf->flags & REGBUF_NO_IMAGE)
626 needs_backup = false;
627 else if (!doPageWrites)
628 needs_backup = false;
629 else
630 {
631 /*
632 * We assume page LSN is first data on *every* page that can be
633 * passed to XLogInsert, whether it has the standard page layout
634 * or not.
635 */
636 XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
637
638 needs_backup = (page_lsn <= RedoRecPtr);
639 if (!needs_backup)
640 {
641 if (!XLogRecPtrIsValid(*fpw_lsn) || page_lsn < *fpw_lsn)
642 *fpw_lsn = page_lsn;
643 }
644 }
645
646 /* Determine if the buffer data needs to included */
647 if (regbuf->rdata_len == 0)
648 needs_data = false;
649 else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
650 needs_data = true;
651 else
653
654 bkpb.id = block_id;
655 bkpb.fork_flags = regbuf->forkno;
656 bkpb.data_length = 0;
657
658 if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
659 bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
660
661 /*
662 * If needs_backup is true or WAL checking is enabled for current
663 * resource manager, log a full-page write for the current block.
664 */
666
667 if (include_image)
668 {
669 const PageData *page = regbuf->page;
671
672 /*
673 * The page needs to be backed up, so calculate its hole length
674 * and offset.
675 */
676 if (regbuf->flags & REGBUF_STANDARD)
677 {
678 /* Assume we can omit data between pd_lower and pd_upper */
679 uint16 lower = ((PageHeader) page)->pd_lower;
680 uint16 upper = ((PageHeader) page)->pd_upper;
681
683 upper > lower &&
684 upper <= BLCKSZ)
685 {
686 bimg.hole_offset = lower;
687 cbimg.hole_length = upper - lower;
688 }
689 else
690 {
691 /* No "hole" to remove */
692 bimg.hole_offset = 0;
693 cbimg.hole_length = 0;
694 }
695 }
696 else
697 {
698 /* Not a standard page header, don't try to eliminate "hole" */
699 bimg.hole_offset = 0;
700 cbimg.hole_length = 0;
701 }
702
703 /*
704 * Try to compress a block image if wal_compression is enabled
705 */
707 {
709 XLogCompressBackupBlock(page, bimg.hole_offset,
710 cbimg.hole_length,
711 regbuf->compressed_page,
713 }
714
715 /*
716 * Fill in the remaining fields in the XLogRecordBlockHeader
717 * struct
718 */
719 bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
720
721 /* Report a full page image constructed for the WAL record */
722 *num_fpi += 1;
723
724 /*
725 * Construct XLogRecData entries for the page content.
726 */
727 rdt_datas_last->next = &regbuf->bkp_rdatas[0];
729
730 bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
731
732 /*
733 * If WAL consistency checking is enabled for the resource manager
734 * of this WAL record, a full-page image is included in the record
735 * for the block modified. During redo, the full-page is replayed
736 * only if BKPIMAGE_APPLY is set.
737 */
738 if (needs_backup)
739 bimg.bimg_info |= BKPIMAGE_APPLY;
740
741 if (is_compressed)
742 {
743 /* The current compression is stored in the WAL record */
744 bimg.length = compressed_len;
745
746 /* Set the compression method used for this block */
748 {
750 bimg.bimg_info |= BKPIMAGE_COMPRESS_PGLZ;
751 break;
752
754#ifdef USE_LZ4
755 bimg.bimg_info |= BKPIMAGE_COMPRESS_LZ4;
756#else
757 elog(ERROR, "LZ4 is not supported by this build");
758#endif
759 break;
760
762#ifdef USE_ZSTD
763 bimg.bimg_info |= BKPIMAGE_COMPRESS_ZSTD;
764#else
765 elog(ERROR, "zstd is not supported by this build");
766#endif
767 break;
768
770 Assert(false); /* cannot happen */
771 break;
772 /* no default case, so that compiler will warn */
773 }
774
775 rdt_datas_last->data = regbuf->compressed_page;
777 }
778 else
779 {
780 bimg.length = BLCKSZ - cbimg.hole_length;
781
782 if (cbimg.hole_length == 0)
783 {
784 rdt_datas_last->data = page;
785 rdt_datas_last->len = BLCKSZ;
786 }
787 else
788 {
789 /* must skip the hole */
790 rdt_datas_last->data = page;
791 rdt_datas_last->len = bimg.hole_offset;
792
793 rdt_datas_last->next = &regbuf->bkp_rdatas[1];
795
796 rdt_datas_last->data =
797 page + (bimg.hole_offset + cbimg.hole_length);
798 rdt_datas_last->len =
799 BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
800 }
801 }
802
803 total_len += bimg.length;
804
805 /* Track the WAL full page images in bytes */
806 *fpi_bytes += bimg.length;
807 }
808
809 if (needs_data)
810 {
811 /*
812 * When copying to XLogRecordBlockHeader, the length is narrowed
813 * to an uint16. Double-check that it is still correct.
814 */
815 Assert(regbuf->rdata_len <= UINT16_MAX);
816
817 /*
818 * Link the caller-supplied rdata chain for this buffer to the
819 * overall list.
820 */
821 bkpb.fork_flags |= BKPBLOCK_HAS_DATA;
822 bkpb.data_length = (uint16) regbuf->rdata_len;
823 total_len += regbuf->rdata_len;
824
825 rdt_datas_last->next = regbuf->rdata_head;
826 rdt_datas_last = regbuf->rdata_tail;
827 }
828
829 if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
830 {
831 samerel = true;
832 bkpb.fork_flags |= BKPBLOCK_SAME_REL;
833 }
834 else
835 samerel = false;
837
838 /* Ok, copy the header to the scratch buffer */
841 if (include_image)
842 {
845 if (cbimg.hole_length != 0 && is_compressed)
846 {
850 }
851 }
852 if (!samerel)
853 {
854 memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
855 scratch += sizeof(RelFileLocator);
856 }
857 memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
858 scratch += sizeof(BlockNumber);
859 }
860
861 /* followed by the record's origin, if any */
864 {
865 *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
868 }
869
870 /* followed by toplevel XID, if not already included in previous record */
872 {
874
875 /* Set the flag that the top xid is included in the WAL */
876 *topxid_included = true;
877
879 memcpy(scratch, &xid, sizeof(TransactionId));
880 scratch += sizeof(TransactionId);
881 }
882
883 /* followed by main data, if any */
884 if (mainrdata_len > 0)
885 {
886 if (mainrdata_len > 255)
887 {
889
892 (errmsg_internal("too much WAL data"),
893 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",
895 PG_UINT32_MAX)));
896
898 *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
900 scratch += sizeof(uint32);
901 }
902 else
903 {
904 *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
905 *(scratch++) = (uint8) mainrdata_len;
906 }
909 total_len += mainrdata_len;
910 }
912
914 total_len += hdr_rdt.len;
915
916 /*
917 * Calculate CRC of the data
918 *
919 * Note that the record header isn't added into the CRC initially since we
920 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
921 * the whole record in the order: rdata, then backup blocks, then record
922 * header.
923 */
926 for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
927 COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
928
929 /*
930 * Ensure that the XLogRecord is not too large.
931 *
932 * XLogReader machinery is only able to handle records up to a certain
933 * size (ignoring machine resource limitations), so make sure that we will
934 * not emit records larger than the sizes advertised to be supported.
935 */
936 if (total_len > XLogRecordMaxSize)
938 (errmsg_internal("oversized WAL record"),
939 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",
940 total_len, XLogRecordMaxSize, rmid, info)));
941
942 /*
943 * Fill in the fields in the record header. Prev-link is filled in later,
944 * once we know where in the WAL the record will be inserted. The CRC does
945 * not include the record header yet.
946 */
948 rechdr->xl_tot_len = (uint32) total_len;
949 rechdr->xl_info = info;
950 rechdr->xl_rmid = rmid;
951 rechdr->xl_prev = InvalidXLogRecPtr;
952 rechdr->xl_crc = rdata_crc;
953
954 return &hdr_rdt;
955}
956
957/*
958 * Create a compressed version of a backup block image.
959 *
960 * Returns false if compression fails (i.e., compressed result is actually
961 * bigger than original). Otherwise, returns true and sets 'dlen' to
962 * the length of compressed block image.
963 */
964static bool
965XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length,
966 void *dest, uint16 *dlen)
967{
968 int32 orig_len = BLCKSZ - hole_length;
969 int32 len = -1;
970 int32 extra_bytes = 0;
971 const void *source;
972 PGAlignedBlock tmp;
973
974 if (hole_length != 0)
975 {
976 /* must skip the hole */
977 memcpy(tmp.data, page, hole_offset);
978 memcpy(tmp.data + hole_offset,
979 page + (hole_offset + hole_length),
980 BLCKSZ - (hole_length + hole_offset));
981 source = tmp.data;
982
983 /*
984 * Extra data needs to be stored in WAL record for the compressed
985 * version of block image if the hole exists.
986 */
988 }
989 else
990 source = page;
991
993 {
996 break;
997
999#ifdef USE_LZ4
1002 if (len <= 0)
1003 len = -1; /* failure */
1004#else
1005 elog(ERROR, "LZ4 is not supported by this build");
1006#endif
1007 break;
1008
1010#ifdef USE_ZSTD
1013 if (ZSTD_isError(len))
1014 len = -1; /* failure */
1015#else
1016 elog(ERROR, "zstd is not supported by this build");
1017#endif
1018 break;
1019
1021 Assert(false); /* cannot happen */
1022 break;
1023 /* no default case, so that compiler will warn */
1024 }
1025
1026 /*
1027 * We recheck the actual size even if compression reports success and see
1028 * if the number of bytes saved by compression is larger than the length
1029 * of extra data needed for the compressed version of block image.
1030 */
1031 if (len >= 0 &&
1033 {
1034 *dlen = (uint16) len; /* successful compression */
1035 return true;
1036 }
1037 return false;
1038}
1039
1040/*
1041 * Determine whether the buffer referenced has to be backed up.
1042 *
1043 * Since we don't yet have the insert lock, fullPageWrites and runningBackups
1044 * (which forces full-page writes) could change later, so the result should
1045 * be used for optimization purposes only.
1046 */
1047bool
1049{
1051 bool doPageWrites;
1052 Page page;
1053
1055
1056 page = BufferGetPage(buffer);
1057
1058 if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
1059 return true; /* buffer requires backup */
1060
1061 return false; /* buffer does not need to be backed up */
1062}
1063
1064/*
1065 * Write a backup block if needed when we are setting a hint. Note that
1066 * this may be called for a variety of page types, not just heaps.
1067 *
1068 * Callable while holding just share lock on the buffer content.
1069 *
1070 * We can't use the plain backup block mechanism since that relies on the
1071 * Buffer being exclusively locked. Since some modifications (setting LSN, hint
1072 * bits) are allowed in a sharelocked buffer that can lead to wal checksum
1073 * failures. So instead we copy the page and insert the copied data as normal
1074 * record data.
1075 *
1076 * We only need to do something if page has not yet been full page written in
1077 * this checkpoint round. The LSN of the inserted wal record is returned if we
1078 * had to write, InvalidXLogRecPtr otherwise.
1079 *
1080 * It is possible that multiple concurrent backends could attempt to write WAL
1081 * records. In that case, multiple copies of the same block would be recorded
1082 * in separate WAL records by different backends, though that is still OK from
1083 * a correctness perspective.
1084 */
1087{
1089 XLogRecPtr lsn;
1091
1092 /*
1093 * Ensure no checkpoint can change our view of RedoRecPtr.
1094 */
1096
1097 /*
1098 * Update RedoRecPtr so that we can make the right decision
1099 */
1101
1102 /*
1103 * We assume page LSN is first data on *every* page that can be passed to
1104 * XLogInsert, whether it has the standard page layout or not. Since we're
1105 * only holding a share-lock on the page, we must take the buffer header
1106 * lock when we look at the LSN.
1107 */
1108 lsn = BufferGetLSNAtomic(buffer);
1109
1110 if (lsn <= RedoRecPtr)
1111 {
1112 int flags = 0;
1114 char *origdata = (char *) BufferGetBlock(buffer);
1115 RelFileLocator rlocator;
1116 ForkNumber forkno;
1117 BlockNumber blkno;
1118
1119 /*
1120 * Copy buffer so we don't have to worry about concurrent hint bit or
1121 * lsn updates. We assume pd_lower/upper cannot be changed without an
1122 * exclusive lock, so the contents bkp are not racy.
1123 */
1124 if (buffer_std)
1125 {
1126 /* Assume we can omit data between pd_lower and pd_upper */
1127 Page page = BufferGetPage(buffer);
1128 uint16 lower = ((PageHeader) page)->pd_lower;
1129 uint16 upper = ((PageHeader) page)->pd_upper;
1130
1133 }
1134 else
1136
1138
1139 if (buffer_std)
1140 flags |= REGBUF_STANDARD;
1141
1142 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
1143 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
1144
1146 }
1147
1148 return recptr;
1149}
1150
1151/*
1152 * Write a WAL record containing a full image of a page. Caller is responsible
1153 * for writing the page to disk after calling this routine.
1154 *
1155 * Note: If you're using this function, you should be building pages in private
1156 * memory and writing them directly to smgr. If you're using buffers, call
1157 * log_newpage_buffer instead.
1158 *
1159 * If the page follows the standard page layout, with a PageHeader and unused
1160 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1161 * the unused space to be left out from the WAL record, making it smaller.
1162 */
1164log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno,
1165 Page page, bool page_std)
1166{
1167 int flags;
1169
1170 flags = REGBUF_FORCE_IMAGE;
1171 if (page_std)
1172 flags |= REGBUF_STANDARD;
1173
1175 XLogRegisterBlock(0, rlocator, forknum, blkno, page, flags);
1177
1178 /*
1179 * The page may be uninitialized. If so, we can't set the LSN because that
1180 * would corrupt the page.
1181 */
1182 if (!PageIsNew(page))
1183 {
1184 PageSetLSN(page, recptr);
1185 }
1186
1187 return recptr;
1188}
1189
1190/*
1191 * Like log_newpage(), but allows logging multiple pages in one operation.
1192 * It is more efficient than calling log_newpage() for each page separately,
1193 * because we can write multiple pages in a single WAL record.
1194 */
1195void
1196log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages,
1197 BlockNumber *blknos, Page *pages, bool page_std)
1198{
1199 int flags;
1201 int i;
1202 int j;
1203
1204 flags = REGBUF_FORCE_IMAGE;
1205 if (page_std)
1206 flags |= REGBUF_STANDARD;
1207
1208 /*
1209 * Iterate over all the pages. They are collected into batches of
1210 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1211 * batch.
1212 */
1214
1215 i = 0;
1216 while (i < num_pages)
1217 {
1218 int batch_start = i;
1219 int nbatch;
1220
1222
1223 nbatch = 0;
1224 while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1225 {
1226 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);
1227 i++;
1228 nbatch++;
1229 }
1230
1232
1233 for (j = batch_start; j < i; j++)
1234 {
1235 /*
1236 * The page may be uninitialized. If so, we can't set the LSN
1237 * because that would corrupt the page.
1238 */
1239 if (!PageIsNew(pages[j]))
1240 {
1241 PageSetLSN(pages[j], recptr);
1242 }
1243 }
1244 }
1245}
1246
1247/*
1248 * Write a WAL record containing a full image of a page.
1249 *
1250 * Caller should initialize the buffer and mark it dirty before calling this
1251 * function. This function will set the page LSN.
1252 *
1253 * If the page follows the standard page layout, with a PageHeader and unused
1254 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1255 * the unused space to be left out from the WAL record, making it smaller.
1256 */
1258log_newpage_buffer(Buffer buffer, bool page_std)
1259{
1260 Page page = BufferGetPage(buffer);
1261 RelFileLocator rlocator;
1262 ForkNumber forknum;
1263 BlockNumber blkno;
1264
1265 /* Shared buffers should be modified in a critical section. */
1267
1268 BufferGetTag(buffer, &rlocator, &forknum, &blkno);
1269
1270 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1271}
1272
1273/*
1274 * WAL-log a range of blocks in a relation.
1275 *
1276 * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
1277 * written to the WAL. If the range is large, this is done in multiple WAL
1278 * records.
1279 *
1280 * If all page follows the standard page layout, with a PageHeader and unused
1281 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1282 * the unused space to be left out from the WAL records, making them smaller.
1283 *
1284 * NOTE: This function acquires exclusive-locks on the pages. Typically, this
1285 * is used on a newly-built relation, and the caller is holding a
1286 * AccessExclusiveLock on it, so no other backend can be accessing it at the
1287 * same time. If that's not the case, you must ensure that this does not
1288 * cause a deadlock through some other means.
1289 */
1290void
1293 bool page_std)
1294{
1295 int flags;
1296 BlockNumber blkno;
1297
1298 flags = REGBUF_FORCE_IMAGE;
1299 if (page_std)
1300 flags |= REGBUF_STANDARD;
1301
1302 /*
1303 * Iterate over all the pages in the range. They are collected into
1304 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1305 * for each batch.
1306 */
1308
1309 blkno = startblk;
1310 while (blkno < endblk)
1311 {
1314 int nbufs;
1315 int i;
1316
1318
1319 /* Collect a batch of blocks. */
1320 nbufs = 0;
1321 while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1322 {
1323 Buffer buf = ReadBufferExtended(rel, forknum, blkno,
1324 RBM_NORMAL, NULL);
1325
1327
1328 /*
1329 * Completely empty pages are not WAL-logged. Writing a WAL record
1330 * would change the LSN, and we don't want that. We want the page
1331 * to stay empty.
1332 */
1334 bufpack[nbufs++] = buf;
1335 else
1337 blkno++;
1338 }
1339
1340 /* Nothing more to do if all remaining blocks were empty. */
1341 if (nbufs == 0)
1342 break;
1343
1344 /* Write WAL record for this batch. */
1346
1348 for (i = 0; i < nbufs; i++)
1349 {
1351 XLogRegisterBuffer(i, bufpack[i], flags);
1352 }
1353
1355
1356 for (i = 0; i < nbufs; i++)
1357 {
1360 }
1362 }
1363}
1364
1365/*
1366 * Allocate working buffers needed for WAL record construction.
1367 */
1368void
1369InitXLogInsert(void)
1370{
1371#ifdef USE_ASSERT_CHECKING
1372
1373 /*
1374 * Check that any records assembled can be decoded. This is capped based
1375 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1376 * addend covers the larger allocate_recordbuf() demand. This code path
1377 * is called once per backend, more than enough for this check.
1378 */
1379 size_t max_required =
1381
1383#endif
1384
1385 /* Initialize the working areas */
1386 if (xloginsert_cxt == NULL)
1387 {
1389 "WAL record construction",
1391 }
1392
1393 if (registered_buffers == NULL)
1394 {
1399 }
1400 if (rdatas == NULL)
1401 {
1403 sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1405 }
1406
1407 /*
1408 * Allocate a buffer to hold the header information for a WAL record.
1409 */
1410 if (hdr_scratch == NULL)
1413}
uint32 BlockNumber
Definition block.h:31
int Buffer
Definition buf.h:23
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
Definition bufmgr.c:2997
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition bufmgr.c:4377
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3024
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition bufmgr.c:4634
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5518
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3056
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:911
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
static Block BufferGetBlock(Buffer buffer)
Definition bufmgr.h:433
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:220
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
@ RBM_NORMAL
Definition bufmgr.h:46
PageHeaderData * PageHeader
Definition bufpage.h:173
char PageData
Definition bufpage.h:80
static bool PageIsNew(const PageData *page)
Definition bufpage.h:233
#define SizeOfPageHeaderData
Definition bufpage.h:216
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:390
PageData * Page
Definition bufpage.h:81
static XLogRecPtr PageGetLSN(const PageData *page)
Definition bufpage.h:385
uint8_t uint8
Definition c.h:554
#define PG_UINT32_MAX
Definition c.h:614
#define Assert(condition)
Definition c.h:883
int64_t int64
Definition c.h:553
int32_t int32
Definition c.h:552
uint64_t uint64
Definition c.h:557
uint16_t uint16
Definition c.h:555
uint32_t uint32
Definition c.h:556
#define MemSet(start, val, len)
Definition c.h:1023
uint32 TransactionId
Definition c.h:676
int errmsg_internal(const char *fmt,...)
Definition elog.c:1170
int errdetail_internal(const char *fmt,...)
Definition elog.c:1243
#define PANIC
Definition elog.h:42
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
volatile uint32 CritSectionCount
Definition globals.c:45
static struct @172 value
int j
Definition isn.c:78
int i
Definition isn.c:77
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
MemoryContext TopMemoryContext
Definition mcxt.c:166
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define AllocSizeIsValid(size)
Definition memutils.h:42
#define IsBootstrapProcessingMode()
Definition miscadmin.h:477
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define END_CRIT_SECTION()
Definition miscadmin.h:152
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
RepOriginId replorigin_session_origin
Definition origin.c:166
#define InvalidRepOriginId
Definition origin.h:33
#define XLOG_FPI
Definition pg_control.h:80
#define XLOG_FPI_FOR_HINT
Definition pg_control.h:79
uint32 pg_crc32c
Definition pg_crc32c.h:38
#define COMP_CRC32C(crc, data, len)
Definition pg_crc32c.h:153
#define INIT_CRC32C(crc)
Definition pg_crc32c.h:41
const void size_t len
const void * data
const PGLZ_Strategy *const PGLZ_strategy_default
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
static rewind_source * source
Definition pg_rewind.c:89
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int fb(int x)
#define DELAY_CHKPT_START
Definition proc.h:135
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition relpath.h:56
uint8 RmgrId
Definition rmgr.h:11
PGPROC * MyProc
Definition proc.c:67
char data[BLCKSZ]
Definition c.h:1120
int delayChkptFlags
Definition proc.h:263
const void * data
struct XLogRecData * next
RelFileLocator rlocator
Definition xloginsert.c:74
Datum batch_start(PG_FUNCTION_ARGS)
Definition test_aio.c:668
TransactionId GetTopTransactionIdIfAny(void)
Definition xact.c:442
TransactionId GetCurrentTransactionIdIfAny(void)
Definition xact.c:472
bool IsSubxactTopXidLogPending(void)
Definition xact.c:560
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition xlog.c:6594
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, uint64 fpi_bytes, bool topxid_included)
Definition xlog.c:750
XLogRecPtr GetRedoRecPtr(void)
Definition xlog.c:6564
static XLogRecPtr RedoRecPtr
Definition xlog.c:276
static bool doPageWrites
Definition xlog.c:289
int wal_compression
Definition xlog.c:127
bool XLogInsertAllowed(void)
Definition xlog.c:6516
bool * wal_consistency_checking
Definition xlog.c:129
#define XLOG_INCLUDE_ORIGIN
Definition xlog.h:165
WalCompression
Definition xlog.h:82
@ WAL_COMPRESSION_NONE
Definition xlog.h:83
@ WAL_COMPRESSION_LZ4
Definition xlog.h:85
@ WAL_COMPRESSION_PGLZ
Definition xlog.h:84
@ WAL_COMPRESSION_ZSTD
Definition xlog.h:86
#define SizeOfXLogLongPHD
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
static XLogRecData * mainrdata_head
Definition xloginsert.c:100
static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
Definition xloginsert.c:966
XLogRecPtr XLogSimpleInsertInt64(RmgrId rmid, uint8 info, int64 value)
Definition xloginsert.c:543
static int max_registered_buffers
Definition xloginsert.c:92
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:478
static uint8 curinsert_flags
Definition xloginsert.c:105
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:409
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:368
static uint64 mainrdata_len
Definition xloginsert.c:102
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
static bool begininsert_called
Definition xloginsert.c:134
static int max_registered_block_id
Definition xloginsert.c:93
XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
void InitXLogInsert(void)
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:460
static int num_rdatas
Definition xloginsert.c:131
void log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
Definition xloginsert.c:313
static XLogRecData * mainrdata_last
Definition xloginsert.c:101
static MemoryContext xloginsert_cxt
Definition xloginsert.c:137
void log_newpage_range(Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
void XLogResetInsertion(void)
Definition xloginsert.c:225
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
static XLogRecData hdr_rdt
Definition xloginsert.c:115
static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, bool *topxid_included)
Definition xloginsert.c:566
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:245
static char * hdr_scratch
Definition xloginsert.c:116
static XLogRecData * rdatas
Definition xloginsert.c:130
void XLogBeginInsert(void)
Definition xloginsert.c:152
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition xloginsert.c:178
#define COMPRESS_BUFSIZE
Definition xloginsert.c:64
static registered_buffer * registered_buffers
Definition xloginsert.c:91
static int max_rdatas
Definition xloginsert.c:132
#define HEADER_SCRATCH_SIZE
Definition xloginsert.c:121
#define REGBUF_NO_CHANGE
Definition xloginsert.h:37
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define XLR_NORMAL_MAX_BLOCK_ID
Definition xloginsert.h:28
#define REGBUF_FORCE_IMAGE
Definition xloginsert.h:32
#define XLR_NORMAL_RDATAS
Definition xloginsert.h:29
#define REGBUF_NO_IMAGE
Definition xloginsert.h:33
#define REGBUF_KEEP_DATA
Definition xloginsert.h:36
#define REGBUF_WILL_INIT
Definition xloginsert.h:34
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
#define SizeOfXLogRecordBlockImageHeader
Definition xlogrecord.h:153
#define XLogRecordMaxSize
Definition xlogrecord.h:74
#define BKPIMAGE_COMPRESS_ZSTD
Definition xlogrecord.h:162
#define BKPBLOCK_HAS_DATA
Definition xlogrecord.h:198
#define BKPIMAGE_APPLY
Definition xlogrecord.h:158
#define BKPIMAGE_HAS_HOLE
Definition xlogrecord.h:157
#define XLR_BLOCK_ID_DATA_LONG
Definition xlogrecord.h:242
#define BKPBLOCK_WILL_INIT
Definition xlogrecord.h:199
#define XLR_RMGR_INFO_MASK
Definition xlogrecord.h:63
#define BKPIMAGE_COMPRESS_LZ4
Definition xlogrecord.h:161
#define XLR_BLOCK_ID_TOPLEVEL_XID
Definition xlogrecord.h:244
#define XLR_BLOCK_ID_DATA_SHORT
Definition xlogrecord.h:241
#define SizeOfXLogRecordBlockCompressHeader
Definition xlogrecord.h:177
#define BKPBLOCK_SAME_REL
Definition xlogrecord.h:200
#define XLR_SPECIAL_REL_UPDATE
Definition xlogrecord.h:82
#define SizeOfXLogRecordBlockHeader
Definition xlogrecord.h:115
#define BKPIMAGE_COMPRESS_PGLZ
Definition xlogrecord.h:160
#define XLR_BLOCK_ID_ORIGIN
Definition xlogrecord.h:243
#define BKPBLOCK_HAS_IMAGE
Definition xlogrecord.h:197
#define XLR_CHECK_CONSISTENCY
Definition xlogrecord.h:91

◆ LZ4_MAX_BLCKSZ

#define LZ4_MAX_BLCKSZ   0

Definition at line 52 of file xloginsert.c.

◆ PGLZ_MAX_BLCKSZ

#define PGLZ_MAX_BLCKSZ   PGLZ_MAX_OUTPUT(BLCKSZ)

Definition at line 61 of file xloginsert.c.

◆ SizeOfXlogOrigin

#define SizeOfXlogOrigin   (sizeof(RepOriginId) + sizeof(char))

Definition at line 118 of file xloginsert.c.

◆ SizeOfXLogTransactionId

#define SizeOfXLogTransactionId   (sizeof(TransactionId) + sizeof(char))

Definition at line 119 of file xloginsert.c.

◆ ZSTD_MAX_BLCKSZ

#define ZSTD_MAX_BLCKSZ   0

Definition at line 58 of file xloginsert.c.

Function Documentation

◆ InitXLogInsert()

void InitXLogInsert ( void  )

Definition at line 1370 of file xloginsert.c.

1371{
1372#ifdef USE_ASSERT_CHECKING
1373
1374 /*
1375 * Check that any records assembled can be decoded. This is capped based
1376 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1377 * addend covers the larger allocate_recordbuf() demand. This code path
1378 * is called once per backend, more than enough for this check.
1379 */
1380 size_t max_required =
1382
1384#endif
1385
1386 /* Initialize the working areas */
1387 if (xloginsert_cxt == NULL)
1388 {
1390 "WAL record construction",
1392 }
1393
1394 if (registered_buffers == NULL)
1395 {
1400 }
1401 if (rdatas == NULL)
1402 {
1404 sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1406 }
1407
1408 /*
1409 * Allocate a buffer to hold the header information for a WAL record.
1410 */
1411 if (hdr_scratch == NULL)
1414}

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, AllocSizeIsValid, Assert, DecodeXLogRecordRequiredSpace(), fb(), hdr_scratch, HEADER_SCRATCH_SIZE, max_rdatas, max_registered_buffers, MemoryContextAlloc(), MemoryContextAllocZero(), rdatas, registered_buffers, TopMemoryContext, xloginsert_cxt, XLogRecordMaxSize, XLR_NORMAL_MAX_BLOCK_ID, and XLR_NORMAL_RDATAS.

Referenced by BaseInit().

◆ log_newpage()

XLogRecPtr log_newpage ( RelFileLocator rlocator,
ForkNumber  forknum,
BlockNumber  blkno,
Page  page,
bool  page_std 
)

Definition at line 1165 of file xloginsert.c.

1167{
1168 int flags;
1170
1171 flags = REGBUF_FORCE_IMAGE;
1172 if (page_std)
1173 flags |= REGBUF_STANDARD;
1174
1176 XLogRegisterBlock(0, rlocator, forknum, blkno, page, flags);
1178
1179 /*
1180 * The page may be uninitialized. If so, we can't set the LSN because that
1181 * would corrupt the page.
1182 */
1183 if (!PageIsNew(page))
1184 {
1185 PageSetLSN(page, recptr);
1186 }
1187
1188 return recptr;
1189}

References fb(), PageIsNew(), PageSetLSN(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, XLOG_FPI, XLogBeginInsert(), XLogInsert(), and XLogRegisterBlock().

Referenced by _hash_alloc_buckets(), _hash_init(), and log_newpage_buffer().

◆ log_newpage_buffer()

XLogRecPtr log_newpage_buffer ( Buffer  buffer,
bool  page_std 
)

Definition at line 1259 of file xloginsert.c.

1260{
1261 Page page = BufferGetPage(buffer);
1262 RelFileLocator rlocator;
1263 ForkNumber forknum;
1264 BlockNumber blkno;
1265
1266 /* Shared buffers should be modified in a critical section. */
1268
1269 BufferGetTag(buffer, &rlocator, &forknum, &blkno);
1270
1271 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1272}

References Assert, BufferGetPage(), BufferGetTag(), CritSectionCount, and log_newpage().

Referenced by brin_initialize_empty_new_buffer(), brinbuildempty(), FreeSpaceMapPrepareTruncateRel(), ginbuildempty(), gistbuildempty(), heap_force_common(), lazy_scan_new_or_empty(), RelationCopyStorageUsingBuffer(), and visibilitymap_prepare_truncate().

◆ log_newpage_range()

void log_newpage_range ( Relation  rel,
ForkNumber  forknum,
BlockNumber  startblk,
BlockNumber  endblk,
bool  page_std 
)

Definition at line 1292 of file xloginsert.c.

1295{
1296 int flags;
1297 BlockNumber blkno;
1298
1299 flags = REGBUF_FORCE_IMAGE;
1300 if (page_std)
1301 flags |= REGBUF_STANDARD;
1302
1303 /*
1304 * Iterate over all the pages in the range. They are collected into
1305 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1306 * for each batch.
1307 */
1309
1310 blkno = startblk;
1311 while (blkno < endblk)
1312 {
1315 int nbufs;
1316 int i;
1317
1319
1320 /* Collect a batch of blocks. */
1321 nbufs = 0;
1322 while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1323 {
1324 Buffer buf = ReadBufferExtended(rel, forknum, blkno,
1325 RBM_NORMAL, NULL);
1326
1328
1329 /*
1330 * Completely empty pages are not WAL-logged. Writing a WAL record
1331 * would change the LSN, and we don't want that. We want the page
1332 * to stay empty.
1333 */
1335 bufpack[nbufs++] = buf;
1336 else
1338 blkno++;
1339 }
1340
1341 /* Nothing more to do if all remaining blocks were empty. */
1342 if (nbufs == 0)
1343 break;
1344
1345 /* Write WAL record for this batch. */
1347
1349 for (i = 0; i < nbufs; i++)
1350 {
1352 XLogRegisterBuffer(i, bufpack[i], flags);
1353 }
1354
1356
1357 for (i = 0; i < nbufs; i++)
1358 {
1361 }
1363 }
1364}

References buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), CHECK_FOR_INTERRUPTS, END_CRIT_SECTION, fb(), i, LockBuffer(), MarkBufferDirty(), PageIsNew(), PageSetLSN(), RBM_NORMAL, ReadBufferExtended(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, START_CRIT_SECTION, UnlockReleaseBuffer(), XLOG_FPI, XLogBeginInsert(), XLogEnsureRecordSpace(), XLogInsert(), XLogRegisterBuffer(), and XLR_MAX_BLOCK_ID.

Referenced by ginbuild(), gistbuild(), smgrDoPendingSyncs(), and spgbuild().

◆ log_newpages()

void log_newpages ( RelFileLocator rlocator,
ForkNumber  forknum,
int  num_pages,
BlockNumber blknos,
Page pages,
bool  page_std 
)

Definition at line 1197 of file xloginsert.c.

1199{
1200 int flags;
1202 int i;
1203 int j;
1204
1205 flags = REGBUF_FORCE_IMAGE;
1206 if (page_std)
1207 flags |= REGBUF_STANDARD;
1208
1209 /*
1210 * Iterate over all the pages. They are collected into batches of
1211 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1212 * batch.
1213 */
1215
1216 i = 0;
1217 while (i < num_pages)
1218 {
1219 int batch_start = i;
1220 int nbatch;
1221
1223
1224 nbatch = 0;
1225 while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1226 {
1227 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);
1228 i++;
1229 nbatch++;
1230 }
1231
1233
1234 for (j = batch_start; j < i; j++)
1235 {
1236 /*
1237 * The page may be uninitialized. If so, we can't set the LSN
1238 * because that would corrupt the page.
1239 */
1240 if (!PageIsNew(pages[j]))
1241 {
1242 PageSetLSN(pages[j], recptr);
1243 }
1244 }
1245 }
1246}

References batch_start(), fb(), i, j, PageIsNew(), PageSetLSN(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, XLOG_FPI, XLogBeginInsert(), XLogEnsureRecordSpace(), XLogInsert(), XLogRegisterBlock(), and XLR_MAX_BLOCK_ID.

Referenced by smgr_bulk_flush().

◆ XLogBeginInsert()

void XLogBeginInsert ( void  )

Definition at line 152 of file xloginsert.c.

153{
156 Assert(mainrdata_len == 0);
157
158 /* cross-check on whether we should be here or not */
159 if (!XLogInsertAllowed())
160 elog(ERROR, "cannot make new WAL entries during recovery");
161
163 elog(ERROR, "XLogBeginInsert was already called");
164
165 begininsert_called = true;
166}

References Assert, begininsert_called, elog, ERROR, mainrdata_head, mainrdata_last, mainrdata_len, max_registered_block_id, and XLogInsertAllowed().

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogAssignLSN(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_newpage(), log_newpage_range(), log_newpages(), log_smgrcreate(), log_split_page(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), RequestXLogSwitch(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), XLogSaveBufferForHint(), XLogSimpleInsertInt64(), and xlogVacuumPage().

◆ XLogCheckBufferNeedsBackup()

bool XLogCheckBufferNeedsBackup ( Buffer  buffer)

Definition at line 1049 of file xloginsert.c.

1050{
1052 bool doPageWrites;
1053 Page page;
1054
1056
1057 page = BufferGetPage(buffer);
1058
1059 if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
1060 return true; /* buffer requires backup */
1061
1062 return false; /* buffer does not need to be backed up */
1063}

References BufferGetPage(), doPageWrites, GetFullPageWriteInfo(), PageGetLSN(), and RedoRecPtr.

Referenced by heap_page_will_freeze(), and log_heap_update().

◆ XLogCompressBackupBlock()

static bool XLogCompressBackupBlock ( const PageData page,
uint16  hole_offset,
uint16  hole_length,
void dest,
uint16 dlen 
)
static

Definition at line 966 of file xloginsert.c.

968{
969 int32 orig_len = BLCKSZ - hole_length;
970 int32 len = -1;
971 int32 extra_bytes = 0;
972 const void *source;
973 PGAlignedBlock tmp;
974
975 if (hole_length != 0)
976 {
977 /* must skip the hole */
978 memcpy(tmp.data, page, hole_offset);
979 memcpy(tmp.data + hole_offset,
980 page + (hole_offset + hole_length),
981 BLCKSZ - (hole_length + hole_offset));
982 source = tmp.data;
983
984 /*
985 * Extra data needs to be stored in WAL record for the compressed
986 * version of block image if the hole exists.
987 */
989 }
990 else
991 source = page;
992
994 {
997 break;
998
1000#ifdef USE_LZ4
1003 if (len <= 0)
1004 len = -1; /* failure */
1005#else
1006 elog(ERROR, "LZ4 is not supported by this build");
1007#endif
1008 break;
1009
1011#ifdef USE_ZSTD
1014 if (ZSTD_isError(len))
1015 len = -1; /* failure */
1016#else
1017 elog(ERROR, "zstd is not supported by this build");
1018#endif
1019 break;
1020
1022 Assert(false); /* cannot happen */
1023 break;
1024 /* no default case, so that compiler will warn */
1025 }
1026
1027 /*
1028 * We recheck the actual size even if compression reports success and see
1029 * if the number of bytes saved by compression is larger than the length
1030 * of extra data needed for the compressed version of block image.
1031 */
1032 if (len >= 0 &&
1034 {
1035 *dlen = (uint16) len; /* successful compression */
1036 return true;
1037 }
1038 return false;
1039}

References Assert, COMPRESS_BUFSIZE, PGAlignedBlock::data, elog, ERROR, fb(), len, pglz_compress(), PGLZ_strategy_default, SizeOfXLogRecordBlockCompressHeader, source, wal_compression, WAL_COMPRESSION_LZ4, WAL_COMPRESSION_NONE, WAL_COMPRESSION_PGLZ, and WAL_COMPRESSION_ZSTD.

Referenced by XLogRecordAssemble().

◆ XLogEnsureRecordSpace()

void XLogEnsureRecordSpace ( int  max_block_id,
int  ndatas 
)

Definition at line 178 of file xloginsert.c.

179{
180 int nbuffers;
181
182 /*
183 * This must be called before entering a critical section, because
184 * allocating memory inside a critical section can fail. repalloc() will
185 * check the same, but better to check it here too so that we fail
186 * consistently even if the arrays happen to be large enough already.
187 */
189
190 /* the minimum values can't be decreased */
191 if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
192 max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
195
196 if (max_block_id > XLR_MAX_BLOCK_ID)
197 elog(ERROR, "maximum number of WAL record block references exceeded");
198 nbuffers = max_block_id + 1;
199
200 if (nbuffers > max_registered_buffers)
201 {
203 repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
204
205 /*
206 * At least the padding bytes in the structs must be zeroed, because
207 * they are included in WAL data, but initialize it all for tidiness.
208 */
210 (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
211 max_registered_buffers = nbuffers;
212 }
213
214 if (ndatas > max_rdatas)
215 {
218 }
219}

References Assert, CritSectionCount, elog, ERROR, fb(), max_rdatas, max_registered_buffers, MemSet, rdatas, registered_buffers, repalloc(), XLR_MAX_BLOCK_ID, XLR_NORMAL_MAX_BLOCK_ID, and XLR_NORMAL_RDATAS.

Referenced by _hash_freeovflpage(), _hash_squeezebucket(), EndPrepare(), gistplacetopage(), log_newpage_range(), log_newpages(), and shiftList().

◆ XLogInsert()

XLogRecPtr XLogInsert ( RmgrId  rmid,
uint8  info 
)

Definition at line 478 of file xloginsert.c.

479{
481
482 /* XLogBeginInsert() must have been called. */
484 elog(ERROR, "XLogBeginInsert was not called");
485
486 /*
487 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
488 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
489 */
490 if ((info & ~(XLR_RMGR_INFO_MASK |
493 elog(PANIC, "invalid xlog info mask %02X", info);
494
495 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
496
497 /*
498 * In bootstrap mode, we don't actually log anything but XLOG resources;
499 * return a phony record pointer.
500 */
501 if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
502 {
504 EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
505 return EndPos;
506 }
507
508 do
509 {
511 bool doPageWrites;
512 bool topxid_included = false;
515 int num_fpi = 0;
516 uint64 fpi_bytes = 0;
517
518 /*
519 * Get values needed to decide whether to do full-page writes. Since
520 * we don't yet have an insertion lock, these could change under us,
521 * but XLogInsertRecord will recheck them once it has a lock.
522 */
524
528
531 } while (!XLogRecPtrIsValid(EndPos));
532
534
535 return EndPos;
536}

References begininsert_called, curinsert_flags, doPageWrites, elog, ERROR, fb(), GetFullPageWriteInfo(), IsBootstrapProcessingMode, PANIC, RedoRecPtr, SizeOfXLogLongPHD, XLogInsertRecord(), XLogRecordAssemble(), XLogRecPtrIsValid, XLogResetInsertion(), XLR_CHECK_CONSISTENCY, XLR_RMGR_INFO_MASK, and XLR_SPECIAL_REL_UPDATE.

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogAssignLSN(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_newpage(), log_newpage_range(), log_newpages(), log_smgrcreate(), log_split_page(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), RequestXLogSwitch(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), XLogSaveBufferForHint(), XLogSimpleInsertInt64(), and xlogVacuumPage().

◆ XLogRecordAssemble()

static XLogRecData * XLogRecordAssemble ( RmgrId  rmid,
uint8  info,
XLogRecPtr  RedoRecPtr,
bool  doPageWrites,
XLogRecPtr fpw_lsn,
int num_fpi,
uint64 fpi_bytes,
bool topxid_included 
)
static

Definition at line 566 of file xloginsert.c.

570{
572 uint64 total_len = 0;
573 int block_id;
578 char *scratch = hdr_scratch;
579
580 /*
581 * Note: this function can be called multiple times for the same record.
582 * All the modifications we do to the rdata chains below must handle that.
583 */
584
585 /* The record begins with the fixed-size header */
588
589 hdr_rdt.next = NULL;
592
593 /*
594 * Enforce consistency checks for this record if user is looking for it.
595 * Do this before at the beginning of this routine to give the possibility
596 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
597 * a record.
598 */
599 if (wal_consistency_checking[rmid])
600 info |= XLR_CHECK_CONSISTENCY;
601
602 /*
603 * Make an rdata chain containing all the data portions of all block
604 * references. This includes the data for full-page images. Also append
605 * the headers for the block references in the scratch buffer.
606 */
609 {
611 bool needs_backup;
612 bool needs_data;
616 bool samerel;
617 bool is_compressed = false;
618 bool include_image;
619
620 if (!regbuf->in_use)
621 continue;
622
623 /* Determine if this block needs to be backed up */
624 if (regbuf->flags & REGBUF_FORCE_IMAGE)
625 needs_backup = true;
626 else if (regbuf->flags & REGBUF_NO_IMAGE)
627 needs_backup = false;
628 else if (!doPageWrites)
629 needs_backup = false;
630 else
631 {
632 /*
633 * We assume page LSN is first data on *every* page that can be
634 * passed to XLogInsert, whether it has the standard page layout
635 * or not.
636 */
637 XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
638
639 needs_backup = (page_lsn <= RedoRecPtr);
640 if (!needs_backup)
641 {
642 if (!XLogRecPtrIsValid(*fpw_lsn) || page_lsn < *fpw_lsn)
643 *fpw_lsn = page_lsn;
644 }
645 }
646
647 /* Determine if the buffer data needs to included */
648 if (regbuf->rdata_len == 0)
649 needs_data = false;
650 else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
651 needs_data = true;
652 else
654
655 bkpb.id = block_id;
656 bkpb.fork_flags = regbuf->forkno;
657 bkpb.data_length = 0;
658
659 if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
660 bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
661
662 /*
663 * If needs_backup is true or WAL checking is enabled for current
664 * resource manager, log a full-page write for the current block.
665 */
667
668 if (include_image)
669 {
670 const PageData *page = regbuf->page;
672
673 /*
674 * The page needs to be backed up, so calculate its hole length
675 * and offset.
676 */
677 if (regbuf->flags & REGBUF_STANDARD)
678 {
679 /* Assume we can omit data between pd_lower and pd_upper */
680 uint16 lower = ((PageHeader) page)->pd_lower;
681 uint16 upper = ((PageHeader) page)->pd_upper;
682
684 upper > lower &&
685 upper <= BLCKSZ)
686 {
687 bimg.hole_offset = lower;
688 cbimg.hole_length = upper - lower;
689 }
690 else
691 {
692 /* No "hole" to remove */
693 bimg.hole_offset = 0;
694 cbimg.hole_length = 0;
695 }
696 }
697 else
698 {
699 /* Not a standard page header, don't try to eliminate "hole" */
700 bimg.hole_offset = 0;
701 cbimg.hole_length = 0;
702 }
703
704 /*
705 * Try to compress a block image if wal_compression is enabled
706 */
708 {
710 XLogCompressBackupBlock(page, bimg.hole_offset,
711 cbimg.hole_length,
712 regbuf->compressed_page,
714 }
715
716 /*
717 * Fill in the remaining fields in the XLogRecordBlockHeader
718 * struct
719 */
720 bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
721
722 /* Report a full page image constructed for the WAL record */
723 *num_fpi += 1;
724
725 /*
726 * Construct XLogRecData entries for the page content.
727 */
728 rdt_datas_last->next = &regbuf->bkp_rdatas[0];
730
731 bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
732
733 /*
734 * If WAL consistency checking is enabled for the resource manager
735 * of this WAL record, a full-page image is included in the record
736 * for the block modified. During redo, the full-page is replayed
737 * only if BKPIMAGE_APPLY is set.
738 */
739 if (needs_backup)
740 bimg.bimg_info |= BKPIMAGE_APPLY;
741
742 if (is_compressed)
743 {
744 /* The current compression is stored in the WAL record */
745 bimg.length = compressed_len;
746
747 /* Set the compression method used for this block */
749 {
751 bimg.bimg_info |= BKPIMAGE_COMPRESS_PGLZ;
752 break;
753
755#ifdef USE_LZ4
756 bimg.bimg_info |= BKPIMAGE_COMPRESS_LZ4;
757#else
758 elog(ERROR, "LZ4 is not supported by this build");
759#endif
760 break;
761
763#ifdef USE_ZSTD
764 bimg.bimg_info |= BKPIMAGE_COMPRESS_ZSTD;
765#else
766 elog(ERROR, "zstd is not supported by this build");
767#endif
768 break;
769
771 Assert(false); /* cannot happen */
772 break;
773 /* no default case, so that compiler will warn */
774 }
775
776 rdt_datas_last->data = regbuf->compressed_page;
778 }
779 else
780 {
781 bimg.length = BLCKSZ - cbimg.hole_length;
782
783 if (cbimg.hole_length == 0)
784 {
785 rdt_datas_last->data = page;
786 rdt_datas_last->len = BLCKSZ;
787 }
788 else
789 {
790 /* must skip the hole */
791 rdt_datas_last->data = page;
792 rdt_datas_last->len = bimg.hole_offset;
793
794 rdt_datas_last->next = &regbuf->bkp_rdatas[1];
796
797 rdt_datas_last->data =
798 page + (bimg.hole_offset + cbimg.hole_length);
799 rdt_datas_last->len =
800 BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
801 }
802 }
803
804 total_len += bimg.length;
805
806 /* Track the WAL full page images in bytes */
807 *fpi_bytes += bimg.length;
808 }
809
810 if (needs_data)
811 {
812 /*
813 * When copying to XLogRecordBlockHeader, the length is narrowed
814 * to an uint16. Double-check that it is still correct.
815 */
816 Assert(regbuf->rdata_len <= UINT16_MAX);
817
818 /*
819 * Link the caller-supplied rdata chain for this buffer to the
820 * overall list.
821 */
822 bkpb.fork_flags |= BKPBLOCK_HAS_DATA;
823 bkpb.data_length = (uint16) regbuf->rdata_len;
824 total_len += regbuf->rdata_len;
825
826 rdt_datas_last->next = regbuf->rdata_head;
827 rdt_datas_last = regbuf->rdata_tail;
828 }
829
830 if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
831 {
832 samerel = true;
833 bkpb.fork_flags |= BKPBLOCK_SAME_REL;
834 }
835 else
836 samerel = false;
838
839 /* Ok, copy the header to the scratch buffer */
842 if (include_image)
843 {
846 if (cbimg.hole_length != 0 && is_compressed)
847 {
851 }
852 }
853 if (!samerel)
854 {
855 memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
856 scratch += sizeof(RelFileLocator);
857 }
858 memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
859 scratch += sizeof(BlockNumber);
860 }
861
862 /* followed by the record's origin, if any */
865 {
866 *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
869 }
870
871 /* followed by toplevel XID, if not already included in previous record */
873 {
875
876 /* Set the flag that the top xid is included in the WAL */
877 *topxid_included = true;
878
880 memcpy(scratch, &xid, sizeof(TransactionId));
881 scratch += sizeof(TransactionId);
882 }
883
884 /* followed by main data, if any */
885 if (mainrdata_len > 0)
886 {
887 if (mainrdata_len > 255)
888 {
890
893 (errmsg_internal("too much WAL data"),
894 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",
896 PG_UINT32_MAX)));
897
899 *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
901 scratch += sizeof(uint32);
902 }
903 else
904 {
905 *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
906 *(scratch++) = (uint8) mainrdata_len;
907 }
910 total_len += mainrdata_len;
911 }
913
915 total_len += hdr_rdt.len;
916
917 /*
918 * Calculate CRC of the data
919 *
920 * Note that the record header isn't added into the CRC initially since we
921 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
922 * the whole record in the order: rdata, then backup blocks, then record
923 * header.
924 */
927 for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
928 COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
929
930 /*
931 * Ensure that the XLogRecord is not too large.
932 *
933 * XLogReader machinery is only able to handle records up to a certain
934 * size (ignoring machine resource limitations), so make sure that we will
935 * not emit records larger than the sizes advertised to be supported.
936 */
937 if (total_len > XLogRecordMaxSize)
939 (errmsg_internal("oversized WAL record"),
940 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",
941 total_len, XLogRecordMaxSize, rmid, info)));
942
943 /*
944 * Fill in the fields in the record header. Prev-link is filled in later,
945 * once we know where in the WAL the record will be inserted. The CRC does
946 * not include the record header yet.
947 */
949 rechdr->xl_tot_len = (uint32) total_len;
950 rechdr->xl_info = info;
951 rechdr->xl_rmid = rmid;
952 rechdr->xl_prev = InvalidXLogRecPtr;
953 rechdr->xl_crc = rdata_crc;
954
955 return &hdr_rdt;
956}

References Assert, BKPBLOCK_HAS_DATA, BKPBLOCK_HAS_IMAGE, BKPBLOCK_SAME_REL, BKPBLOCK_WILL_INIT, BKPIMAGE_APPLY, BKPIMAGE_COMPRESS_LZ4, BKPIMAGE_COMPRESS_PGLZ, BKPIMAGE_COMPRESS_ZSTD, BKPIMAGE_HAS_HOLE, COMP_CRC32C, curinsert_flags, XLogRecData::data, doPageWrites, elog, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), GetCurrentTransactionIdIfAny(), GetTopTransactionIdIfAny(), hdr_rdt, hdr_scratch, INIT_CRC32C, InvalidRepOriginId, InvalidXLogRecPtr, IsSubxactTopXidLogPending(), XLogRecData::len, lower(), mainrdata_head, mainrdata_last, mainrdata_len, max_registered_block_id, XLogRecData::next, PageGetLSN(), PG_UINT32_MAX, RedoRecPtr, REGBUF_FORCE_IMAGE, REGBUF_KEEP_DATA, REGBUF_NO_IMAGE, REGBUF_STANDARD, REGBUF_WILL_INIT, registered_buffers, RelFileLocatorEquals, replorigin_session_origin, SizeOfPageHeaderData, SizeOfXLogRecord, SizeOfXLogRecordBlockCompressHeader, SizeOfXLogRecordBlockHeader, SizeOfXLogRecordBlockImageHeader, upper(), wal_compression, WAL_COMPRESSION_LZ4, WAL_COMPRESSION_NONE, WAL_COMPRESSION_PGLZ, WAL_COMPRESSION_ZSTD, wal_consistency_checking, XLOG_INCLUDE_ORIGIN, XLogCompressBackupBlock(), XLogRecordMaxSize, XLogRecPtrIsValid, XLR_BLOCK_ID_DATA_LONG, XLR_BLOCK_ID_DATA_SHORT, XLR_BLOCK_ID_ORIGIN, XLR_BLOCK_ID_TOPLEVEL_XID, and XLR_CHECK_CONSISTENCY.

Referenced by XLogInsert().

◆ XLogRegisterBlock()

void XLogRegisterBlock ( uint8  block_id,
RelFileLocator rlocator,
ForkNumber  forknum,
BlockNumber  blknum,
const PageData page,
uint8  flags 
)

Definition at line 313 of file xloginsert.c.

315{
317
319
322
324 elog(ERROR, "too many registered buffers");
325
327
328 regbuf->rlocator = *rlocator;
329 regbuf->forkno = forknum;
330 regbuf->block = blknum;
331 regbuf->page = page;
332 regbuf->flags = flags;
333 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
334 regbuf->rdata_len = 0;
335
336 /*
337 * Check that this page hasn't already been registered with some other
338 * block_id.
339 */
340#ifdef USE_ASSERT_CHECKING
341 {
342 int i;
343
344 for (i = 0; i < max_registered_block_id; i++)
345 {
347
348 if (i == block_id || !regbuf_old->in_use)
349 continue;
350
351 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
352 regbuf_old->forkno != regbuf->forkno ||
353 regbuf_old->block != regbuf->block);
354 }
355 }
356#endif
357
358 regbuf->in_use = true;
359}

References Assert, begininsert_called, elog, ERROR, fb(), i, max_registered_block_id, max_registered_buffers, registered_buffers, RelFileLocatorEquals, and registered_buffer::rlocator.

Referenced by heap_inplace_update_and_unlock(), log_newpage(), log_newpages(), and XLogSaveBufferForHint().

◆ XLogRegisterBufData()

void XLogRegisterBufData ( uint8  block_id,
const void data,
uint32  len 
)

Definition at line 409 of file xloginsert.c.

410{
413
415
416 /* find the registered buffer struct */
418 if (!regbuf->in_use)
419 elog(ERROR, "no block with id %d registered with WAL insertion",
420 block_id);
421
422 /*
423 * Check against max_rdatas and ensure we do not register more data per
424 * buffer than can be handled by the physical data format; i.e. that
425 * regbuf->rdata_len does not grow beyond what
426 * XLogRecordBlockHeader->data_length can hold.
427 */
428 if (num_rdatas >= max_rdatas)
430 (errmsg_internal("too much WAL data"),
431 errdetail_internal("%d out of %d data segments are already in use.",
433 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)
435 (errmsg_internal("too much WAL data"),
436 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
437 UINT16_MAX, block_id, regbuf->rdata_len, len)));
438
439 rdata = &rdatas[num_rdatas++];
440
441 rdata->data = data;
442 rdata->len = len;
443
444 regbuf->rdata_tail->next = rdata;
445 regbuf->rdata_tail = rdata;
446 regbuf->rdata_len += len;
447}

References Assert, begininsert_called, XLogRecData::data, data, elog, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), len, max_rdatas, num_rdatas, rdatas, and registered_buffers.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_squeezebucket(), brin_doinsert(), brin_doupdate(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), entryExecPlaceToPage(), GenericXLogFinish(), ginHeapTupleFastInsert(), ginVacuumPostingTreeLeaf(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), heap_inplace_update_and_unlock(), heap_insert(), heap_multi_insert(), log_heap_prune_and_freeze(), log_heap_update(), and writeListPage().

◆ XLogRegisterBuffer()

void XLogRegisterBuffer ( uint8  block_id,
Buffer  buffer,
uint8  flags 
)

Definition at line 245 of file xloginsert.c.

246{
248
249 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
250 Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
252
253 /*
254 * Ordinarily, buffer should be exclusive-locked and marked dirty before
255 * we get here, otherwise we could end up violating one of the rules in
256 * access/transam/README.
257 *
258 * Some callers intentionally register a clean page and never update that
259 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
260 * bypass these checks.
261 */
262#ifdef USE_ASSERT_CHECKING
263 if (!(flags & REGBUF_NO_CHANGE))
265 BufferIsDirty(buffer));
266#endif
267
269 {
271 elog(ERROR, "too many registered buffers");
273 }
274
276
277 BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
278 regbuf->page = BufferGetPage(buffer);
279 regbuf->flags = flags;
280 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
281 regbuf->rdata_len = 0;
282
283 /*
284 * Check that this page hasn't already been registered with some other
285 * block_id.
286 */
287#ifdef USE_ASSERT_CHECKING
288 {
289 int i;
290
291 for (i = 0; i < max_registered_block_id; i++)
292 {
294
295 if (i == block_id || !regbuf_old->in_use)
296 continue;
297
298 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
299 regbuf_old->forkno != regbuf->forkno ||
300 regbuf_old->block != regbuf->block);
301 }
302 }
303#endif
304
305 regbuf->in_use = true;
306}

References Assert, begininsert_called, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferGetTag(), BufferIsDirty(), BufferIsLockedByMeInMode(), elog, ERROR, fb(), i, max_registered_block_id, max_registered_buffers, REGBUF_FORCE_IMAGE, REGBUF_NO_CHANGE, REGBUF_NO_IMAGE, registered_buffers, and RelFileLocatorEquals.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), createPostingTree(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), doPickSplit(), entryExecPlaceToPage(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogDelete(), gistXLogPageDelete(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_newpage_range(), log_split_page(), moveLeafs(), nextval_internal(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), writeListPage(), and xlogVacuumPage().

◆ XLogRegisterData()

void XLogRegisterData ( const void data,
uint32  len 
)

Definition at line 368 of file xloginsert.c.

369{
371
373
374 if (num_rdatas >= max_rdatas)
376 (errmsg_internal("too much WAL data"),
377 errdetail_internal("%d out of %d data segments are already in use.",
379 rdata = &rdatas[num_rdatas++];
380
381 rdata->data = data;
382 rdata->len = len;
383
384 /*
385 * we use the mainrdata_last pointer to track the end of the chain, so no
386 * need to clear 'next' here.
387 */
388
391
393}

References Assert, begininsert_called, XLogRecData::data, data, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), len, mainrdata_last, mainrdata_len, max_rdatas, XLogRecData::next, num_rdatas, and rdatas.

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), gistXLogAssignLSN(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_smgrcreate(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), and XLogSimpleInsertInt64().

◆ XLogResetInsertion()

void XLogResetInsertion ( void  )

◆ XLogSaveBufferForHint()

XLogRecPtr XLogSaveBufferForHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 1087 of file xloginsert.c.

1088{
1090 XLogRecPtr lsn;
1092
1093 /*
1094 * Ensure no checkpoint can change our view of RedoRecPtr.
1095 */
1097
1098 /*
1099 * Update RedoRecPtr so that we can make the right decision
1100 */
1102
1103 /*
1104 * We assume page LSN is first data on *every* page that can be passed to
1105 * XLogInsert, whether it has the standard page layout or not. Since we're
1106 * only holding a share-lock on the page, we must take the buffer header
1107 * lock when we look at the LSN.
1108 */
1109 lsn = BufferGetLSNAtomic(buffer);
1110
1111 if (lsn <= RedoRecPtr)
1112 {
1113 int flags = 0;
1115 char *origdata = (char *) BufferGetBlock(buffer);
1116 RelFileLocator rlocator;
1117 ForkNumber forkno;
1118 BlockNumber blkno;
1119
1120 /*
1121 * Copy buffer so we don't have to worry about concurrent hint bit or
1122 * lsn updates. We assume pd_lower/upper cannot be changed without an
1123 * exclusive lock, so the contents bkp are not racy.
1124 */
1125 if (buffer_std)
1126 {
1127 /* Assume we can omit data between pd_lower and pd_upper */
1128 Page page = BufferGetPage(buffer);
1129 uint16 lower = ((PageHeader) page)->pd_lower;
1130 uint16 upper = ((PageHeader) page)->pd_upper;
1131
1134 }
1135 else
1137
1139
1140 if (buffer_std)
1141 flags |= REGBUF_STANDARD;
1142
1143 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
1144 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
1145
1147 }
1148
1149 return recptr;
1150}

References Assert, BufferGetBlock(), BufferGetLSNAtomic(), BufferGetPage(), BufferGetTag(), DELAY_CHKPT_START, PGPROC::delayChkptFlags, fb(), GetRedoRecPtr(), InvalidXLogRecPtr, lower(), MyProc, RedoRecPtr, REGBUF_STANDARD, upper(), XLOG_FPI_FOR_HINT, XLogBeginInsert(), XLogInsert(), and XLogRegisterBlock().

Referenced by MarkBufferDirtyHint().

◆ XLogSetRecordFlags()

◆ XLogSimpleInsertInt64()

XLogRecPtr XLogSimpleInsertInt64 ( RmgrId  rmid,
uint8  info,
int64  value 
)

Definition at line 543 of file xloginsert.c.

544{
546 XLogRegisterData(&value, sizeof(value));
547 return XLogInsert(rmid, info);
548}

References value, XLogBeginInsert(), XLogInsert(), and XLogRegisterData().

Referenced by ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), and ExtendMultiXactOffset().

Variable Documentation

◆ begininsert_called

◆ curinsert_flags

uint8 curinsert_flags = 0
static

◆ hdr_rdt

XLogRecData hdr_rdt
static

Definition at line 115 of file xloginsert.c.

Referenced by XLogRecordAssemble().

◆ hdr_scratch

char* hdr_scratch = NULL
static

Definition at line 116 of file xloginsert.c.

Referenced by InitXLogInsert(), and XLogRecordAssemble().

◆ mainrdata_head

XLogRecData* mainrdata_head
static

Definition at line 100 of file xloginsert.c.

Referenced by XLogBeginInsert(), XLogRecordAssemble(), and XLogResetInsertion().

◆ mainrdata_last

XLogRecData* mainrdata_last = (XLogRecData *) &mainrdata_head
static

◆ mainrdata_len

uint64 mainrdata_len
static

◆ max_rdatas

int max_rdatas
static

◆ max_registered_block_id

int max_registered_block_id = 0
static

◆ max_registered_buffers

int max_registered_buffers
static

◆ num_rdatas

int num_rdatas
static

Definition at line 131 of file xloginsert.c.

Referenced by XLogRegisterBufData(), XLogRegisterData(), and XLogResetInsertion().

◆ rdatas

XLogRecData* rdatas
static

◆ registered_buffers

◆ xloginsert_cxt

MemoryContext xloginsert_cxt
static

Definition at line 137 of file xloginsert.c.

Referenced by InitXLogInsert().