PostgreSQL Source Code git master
Loading...
Searching...
No Matches
xloginsert.c File Reference
#include "postgres.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xloginsert.h"
#include "catalog/pg_control.h"
#include "common/pg_lzcompress.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "replication/origin.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
#include "utils/memutils.h"
#include "utils/pgstat_internal.h"
#include "utils/rel.h"
Include dependency graph for xloginsert.c:

Go to the source code of this file.

Data Structures

struct  registered_buffer
 

Macros

#define LZ4_MAX_BLCKSZ   0
 
#define ZSTD_MAX_BLCKSZ   0
 
#define PGLZ_MAX_BLCKSZ   PGLZ_MAX_OUTPUT(BLCKSZ)
 
#define COMPRESS_BUFSIZE   Max(Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ), ZSTD_MAX_BLCKSZ)
 
#define SizeOfXlogOrigin   (sizeof(ReplOriginId) + sizeof(char))
 
#define SizeOfXLogTransactionId   (sizeof(TransactionId) + sizeof(char))
 
#define HEADER_SCRATCH_SIZE
 

Functions

static XLogRecDataXLogRecordAssemble (RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, bool *topxid_included)
 
static bool XLogCompressBackupBlock (const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
 
void XLogBeginInsert (void)
 
void XLogEnsureRecordSpace (int max_block_id, int ndatas)
 
void XLogResetInsertion (void)
 
void XLogRegisterBuffer (uint8 block_id, Buffer buffer, uint8 flags)
 
void XLogRegisterBlock (uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
 
void XLogRegisterData (const void *data, uint32 len)
 
void XLogRegisterBufData (uint8 block_id, const void *data, uint32 len)
 
void XLogSetRecordFlags (uint8 flags)
 
XLogRecPtr XLogInsert (RmgrId rmid, uint8 info)
 
XLogRecPtr XLogSimpleInsertInt64 (RmgrId rmid, uint8 info, int64 value)
 
XLogRecPtr XLogGetFakeLSN (Relation rel)
 
bool XLogCheckBufferNeedsBackup (Buffer buffer)
 
XLogRecPtr XLogSaveBufferForHint (Buffer buffer, bool buffer_std)
 
XLogRecPtr log_newpage (RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
 
void log_newpages (RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
 
XLogRecPtr log_newpage_buffer (Buffer buffer, bool page_std)
 
void log_newpage_range (Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
 
void InitXLogInsert (void)
 

Variables

static registered_bufferregistered_buffers
 
static int max_registered_buffers
 
static int max_registered_block_id = 0
 
static XLogRecDatamainrdata_head
 
static XLogRecDatamainrdata_last = (XLogRecData *) &mainrdata_head
 
static uint64 mainrdata_len
 
static uint8 curinsert_flags = 0
 
static XLogRecData hdr_rdt
 
static charhdr_scratch = NULL
 
static XLogRecDatardatas
 
static int num_rdatas
 
static int max_rdatas
 
static bool begininsert_called = false
 
static MemoryContext xloginsert_cxt
 

Macro Definition Documentation

◆ COMPRESS_BUFSIZE

Definition at line 65 of file xloginsert.c.

◆ HEADER_SCRATCH_SIZE

#define HEADER_SCRATCH_SIZE
Value:
#define SizeOfXlogOrigin
Definition xloginsert.c:119
#define SizeOfXLogTransactionId
Definition xloginsert.c:120
#define XLR_MAX_BLOCK_ID
Definition xlogrecord.h:239
#define MaxSizeOfXLogRecordBlockHeader
Definition xlogrecord.h:184
#define SizeOfXLogRecordDataHeaderLong
Definition xlogrecord.h:225
#define SizeOfXLogRecord
Definition xlogrecord.h:55

Definition at line 122 of file xloginsert.c.

153{
156 Assert(mainrdata_len == 0);
157
158 /* cross-check on whether we should be here or not */
159 if (!XLogInsertAllowed())
160 elog(ERROR, "cannot make new WAL entries during recovery");
161
163 elog(ERROR, "XLogBeginInsert was already called");
164
165 begininsert_called = true;
166}
167
168/*
169 * Ensure that there are enough buffer and data slots in the working area,
170 * for subsequent XLogRegisterBuffer, XLogRegisterData and XLogRegisterBufData
171 * calls.
172 *
173 * There is always space for a small number of buffers and data chunks, enough
174 * for most record types. This function is for the exceptional cases that need
175 * more.
176 */
177void
178XLogEnsureRecordSpace(int max_block_id, int ndatas)
179{
180 int nbuffers;
181
182 /*
183 * This must be called before entering a critical section, because
184 * allocating memory inside a critical section can fail. repalloc() will
185 * check the same, but better to check it here too so that we fail
186 * consistently even if the arrays happen to be large enough already.
187 */
189
190 /* the minimum values can't be decreased */
191 if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
192 max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
195
196 if (max_block_id > XLR_MAX_BLOCK_ID)
197 elog(ERROR, "maximum number of WAL record block references exceeded");
198 nbuffers = max_block_id + 1;
199
200 if (nbuffers > max_registered_buffers)
201 {
203 repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
204
205 /*
206 * At least the padding bytes in the structs must be zeroed, because
207 * they are included in WAL data, but initialize it all for tidiness.
208 */
210 (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
211 max_registered_buffers = nbuffers;
212 }
213
214 if (ndatas > max_rdatas)
215 {
218 }
219}
220
221/*
222 * Reset WAL record construction buffers.
223 */
224void
226{
227 int i;
228
229 for (i = 0; i < max_registered_block_id; i++)
230 registered_buffers[i].in_use = false;
231
232 num_rdatas = 0;
234 mainrdata_len = 0;
236 curinsert_flags = 0;
237 begininsert_called = false;
238}
239
240/*
241 * Register a reference to a buffer with the WAL record being constructed.
242 * This must be called for every page that the WAL-logged operation modifies.
243 */
244void
246{
248
249 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
250 Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
252
253 /*
254 * Ordinarily, buffer should be exclusive-locked and marked dirty before
255 * we get here, otherwise we could end up violating one of the rules in
256 * access/transam/README.
257 *
258 * Some callers intentionally register a clean page and never update that
259 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
260 * bypass these checks.
261 */
262#ifdef USE_ASSERT_CHECKING
263 if (!(flags & REGBUF_NO_CHANGE))
265 BufferIsDirty(buffer));
266#endif
267
269 {
271 elog(ERROR, "too many registered buffers");
273 }
274
276
277 BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
278 regbuf->page = BufferGetPage(buffer);
279 regbuf->flags = flags;
280 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
281 regbuf->rdata_len = 0;
282
283 /*
284 * Check that this page hasn't already been registered with some other
285 * block_id.
286 */
287#ifdef USE_ASSERT_CHECKING
288 {
289 int i;
290
291 for (i = 0; i < max_registered_block_id; i++)
292 {
294
295 if (i == block_id || !regbuf_old->in_use)
296 continue;
297
298 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
299 regbuf_old->forkno != regbuf->forkno ||
300 regbuf_old->block != regbuf->block);
301 }
302 }
303#endif
304
305 regbuf->in_use = true;
306}
307
308/*
309 * Like XLogRegisterBuffer, but for registering a block that's not in the
310 * shared buffer pool (i.e. when you don't have a Buffer for it).
311 */
312void
314 BlockNumber blknum, const PageData *page, uint8 flags)
315{
317
319
322
324 elog(ERROR, "too many registered buffers");
325
327
328 regbuf->rlocator = *rlocator;
329 regbuf->forkno = forknum;
330 regbuf->block = blknum;
331 regbuf->page = page;
332 regbuf->flags = flags;
333 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
334 regbuf->rdata_len = 0;
335
336 /*
337 * Check that this page hasn't already been registered with some other
338 * block_id.
339 */
340#ifdef USE_ASSERT_CHECKING
341 {
342 int i;
343
344 for (i = 0; i < max_registered_block_id; i++)
345 {
347
348 if (i == block_id || !regbuf_old->in_use)
349 continue;
350
351 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
352 regbuf_old->forkno != regbuf->forkno ||
353 regbuf_old->block != regbuf->block);
354 }
355 }
356#endif
357
358 regbuf->in_use = true;
359}
360
361/*
362 * Add data to the WAL record that's being constructed.
363 *
364 * The data is appended to the "main chunk", available at replay with
365 * XLogRecGetData().
366 */
367void
368XLogRegisterData(const void *data, uint32 len)
369{
371
373
374 if (num_rdatas >= max_rdatas)
376 (errmsg_internal("too much WAL data"),
377 errdetail_internal("%d out of %d data segments are already in use.",
379 rdata = &rdatas[num_rdatas++];
380
381 rdata->data = data;
382 rdata->len = len;
383
384 /*
385 * we use the mainrdata_last pointer to track the end of the chain, so no
386 * need to clear 'next' here.
387 */
388
391
393}
394
395/*
396 * Add buffer-specific data to the WAL record that's being constructed.
397 *
398 * Block_id must reference a block previously registered with
399 * XLogRegisterBuffer(). If this is called more than once for the same
400 * block_id, the data is appended.
401 *
402 * The maximum amount of data that can be registered per block is 65535
403 * bytes. That should be plenty; if you need more than BLCKSZ bytes to
404 * reconstruct the changes to the page, you might as well just log a full
405 * copy of it. (the "main data" that's not associated with a block is not
406 * limited)
407 */
408void
410{
413
415
416 /* find the registered buffer struct */
418 if (!regbuf->in_use)
419 elog(ERROR, "no block with id %d registered with WAL insertion",
420 block_id);
421
422 /*
423 * Check against max_rdatas and ensure we do not register more data per
424 * buffer than can be handled by the physical data format; i.e. that
425 * regbuf->rdata_len does not grow beyond what
426 * XLogRecordBlockHeader->data_length can hold.
427 */
428 if (num_rdatas >= max_rdatas)
430 (errmsg_internal("too much WAL data"),
431 errdetail_internal("%d out of %d data segments are already in use.",
433 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)
435 (errmsg_internal("too much WAL data"),
436 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
437 UINT16_MAX, block_id, regbuf->rdata_len, len)));
438
439 rdata = &rdatas[num_rdatas++];
440
441 rdata->data = data;
442 rdata->len = len;
443
444 regbuf->rdata_tail->next = rdata;
445 regbuf->rdata_tail = rdata;
446 regbuf->rdata_len += len;
447}
448
449/*
450 * Set insert status flags for the upcoming WAL record.
451 *
452 * The flags that can be used here are:
453 * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
454 * included in the record.
455 * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
456 * durability, which allows to avoid triggering WAL archiving and other
457 * background activity.
458 */
459void
461{
463 curinsert_flags |= flags;
464}
465
466/*
467 * Insert an XLOG record having the specified RMID and info bytes, with the
468 * body of the record being the data and buffer references registered earlier
469 * with XLogRegister* calls.
470 *
471 * Returns XLOG pointer to end of record (beginning of next record).
472 * This can be used as LSN for data pages affected by the logged action.
473 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
474 * before the data page can be written out. This implements the basic
475 * WAL rule "write the log before the data".)
476 */
478XLogInsert(RmgrId rmid, uint8 info)
479{
481
482 /* XLogBeginInsert() must have been called. */
484 elog(ERROR, "XLogBeginInsert was not called");
485
486 /*
487 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
488 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
489 */
490 if ((info & ~(XLR_RMGR_INFO_MASK |
493 elog(PANIC, "invalid xlog info mask %02X", info);
494
495 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
496
497 /*
498 * In bootstrap mode, we don't actually log anything but XLOG resources;
499 * return a phony record pointer.
500 */
501 if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
502 {
504 EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
505 return EndPos;
506 }
507
508 do
509 {
511 bool doPageWrites;
512 bool topxid_included = false;
515 int num_fpi = 0;
516 uint64 fpi_bytes = 0;
517
518 /*
519 * Get values needed to decide whether to do full-page writes. Since
520 * we don't yet have an insertion lock, these could change under us,
521 * but XLogInsertRecord will recheck them once it has a lock.
522 */
524
528
531 } while (!XLogRecPtrIsValid(EndPos));
532
534
535 return EndPos;
536}
537
538/*
539 * Simple wrapper to XLogInsert to insert a WAL record with elementary
540 * contents (only an int64 is supported as value currently).
541 */
544{
546 XLogRegisterData(&value, sizeof(value));
547 return XLogInsert(rmid, info);
548}
549
550/*
551 * XLogGetFakeLSN - get a fake LSN for an index page that isn't WAL-logged.
552 *
553 * Some index AMs use LSNs to detect concurrent page modifications, but not
554 * all index pages are WAL-logged. This function provides a sequence of fake
555 * LSNs for that purpose.
556 */
559{
560 if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
561 {
562 /*
563 * Temporary relations are only accessible in our session, so a simple
564 * backend-local counter will do.
565 */
566 static XLogRecPtr counter = FirstNormalUnloggedLSN;
567
568 return counter++;
569 }
570 else if (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
571 {
572 /*
573 * Unlogged relations are accessible from other backends, and survive
574 * (clean) restarts. GetFakeLSNForUnloggedRel() handles that for us.
575 */
577 }
578 else
579 {
580 /*
581 * WAL-logging on this relation will start after commit, so its LSNs
582 * must be distinct numbers smaller than the LSN at the next commit.
583 * Emit a dummy WAL record if insert-LSN hasn't advanced after the
584 * last call.
585 */
588
591
592 /* No need for an actual record if we already have a distinct LSN */
595
597 return currlsn;
598 }
599}
600
601/*
602 * Assemble a WAL record from the registered data and buffers into an
603 * XLogRecData chain, ready for insertion with XLogInsertRecord().
604 *
605 * The record header fields are filled in, except for the xl_prev field. The
606 * calculated CRC does not include the record header yet.
607 *
608 * If there are any registered buffers, and a full-page image was not taken
609 * of all of them, *fpw_lsn is set to the lowest LSN among such pages. This
610 * signals that the assembled record is only good for insertion on the
611 * assumption that the RedoRecPtr and doPageWrites values were up-to-date.
612 *
613 * *topxid_included is set if the topmost transaction ID is logged with the
614 * current subtransaction.
615 */
616static XLogRecData *
620 bool *topxid_included)
621{
623 uint64 total_len = 0;
624 int block_id;
629 char *scratch = hdr_scratch;
630
631 /*
632 * Note: this function can be called multiple times for the same record.
633 * All the modifications we do to the rdata chains below must handle that.
634 */
635
636 /* The record begins with the fixed-size header */
639
640 hdr_rdt.next = NULL;
643
644 /*
645 * Enforce consistency checks for this record if user is looking for it.
646 * Do this before at the beginning of this routine to give the possibility
647 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
648 * a record.
649 */
650 if (wal_consistency_checking[rmid])
651 info |= XLR_CHECK_CONSISTENCY;
652
653 /*
654 * Make an rdata chain containing all the data portions of all block
655 * references. This includes the data for full-page images. Also append
656 * the headers for the block references in the scratch buffer.
657 */
660 {
662 bool needs_backup;
663 bool needs_data;
667 bool samerel;
668 bool is_compressed = false;
669 bool include_image;
670
671 if (!regbuf->in_use)
672 continue;
673
674 /* Determine if this block needs to be backed up */
675 if (regbuf->flags & REGBUF_FORCE_IMAGE)
676 needs_backup = true;
677 else if (regbuf->flags & REGBUF_NO_IMAGE)
678 needs_backup = false;
679 else if (!doPageWrites)
680 needs_backup = false;
681 else
682 {
683 /*
684 * We assume page LSN is first data on *every* page that can be
685 * passed to XLogInsert, whether it has the standard page layout
686 * or not.
687 */
688 XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
689
690 needs_backup = (page_lsn <= RedoRecPtr);
691 if (!needs_backup)
692 {
693 if (!XLogRecPtrIsValid(*fpw_lsn) || page_lsn < *fpw_lsn)
694 *fpw_lsn = page_lsn;
695 }
696 }
697
698 /* Determine if the buffer data needs to included */
699 if (regbuf->rdata_len == 0)
700 needs_data = false;
701 else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
702 needs_data = true;
703 else
705
706 bkpb.id = block_id;
707 bkpb.fork_flags = regbuf->forkno;
708 bkpb.data_length = 0;
709
710 if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
711 bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
712
713 /*
714 * If needs_backup is true or WAL checking is enabled for current
715 * resource manager, log a full-page write for the current block.
716 */
718
719 if (include_image)
720 {
721 const PageData *page = regbuf->page;
723
724 /*
725 * The page needs to be backed up, so calculate its hole length
726 * and offset.
727 */
728 if (regbuf->flags & REGBUF_STANDARD)
729 {
730 /* Assume we can omit data between pd_lower and pd_upper */
731 uint16 lower = ((const PageHeaderData *) page)->pd_lower;
732 uint16 upper = ((const PageHeaderData *) page)->pd_upper;
733
735 upper > lower &&
736 upper <= BLCKSZ)
737 {
738 bimg.hole_offset = lower;
739 cbimg.hole_length = upper - lower;
740 }
741 else
742 {
743 /* No "hole" to remove */
744 bimg.hole_offset = 0;
745 cbimg.hole_length = 0;
746 }
747 }
748 else
749 {
750 /* Not a standard page header, don't try to eliminate "hole" */
751 bimg.hole_offset = 0;
752 cbimg.hole_length = 0;
753 }
754
755 /*
756 * Try to compress a block image if wal_compression is enabled
757 */
759 {
761 XLogCompressBackupBlock(page, bimg.hole_offset,
762 cbimg.hole_length,
763 regbuf->compressed_page,
765 }
766
767 /*
768 * Fill in the remaining fields in the XLogRecordBlockHeader
769 * struct
770 */
771 bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
772
773 /* Report a full page image constructed for the WAL record */
774 *num_fpi += 1;
775
776 /*
777 * Construct XLogRecData entries for the page content.
778 */
779 rdt_datas_last->next = &regbuf->bkp_rdatas[0];
781
782 bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
783
784 /*
785 * If WAL consistency checking is enabled for the resource manager
786 * of this WAL record, a full-page image is included in the record
787 * for the block modified. During redo, the full-page is replayed
788 * only if BKPIMAGE_APPLY is set.
789 */
790 if (needs_backup)
791 bimg.bimg_info |= BKPIMAGE_APPLY;
792
793 if (is_compressed)
794 {
795 /* The current compression is stored in the WAL record */
796 bimg.length = compressed_len;
797
798 /* Set the compression method used for this block */
800 {
802 bimg.bimg_info |= BKPIMAGE_COMPRESS_PGLZ;
803 break;
804
806#ifdef USE_LZ4
807 bimg.bimg_info |= BKPIMAGE_COMPRESS_LZ4;
808#else
809 elog(ERROR, "LZ4 is not supported by this build");
810#endif
811 break;
812
814#ifdef USE_ZSTD
815 bimg.bimg_info |= BKPIMAGE_COMPRESS_ZSTD;
816#else
817 elog(ERROR, "zstd is not supported by this build");
818#endif
819 break;
820
822 Assert(false); /* cannot happen */
823 break;
824 /* no default case, so that compiler will warn */
825 }
826
827 rdt_datas_last->data = regbuf->compressed_page;
829 }
830 else
831 {
832 bimg.length = BLCKSZ - cbimg.hole_length;
833
834 if (cbimg.hole_length == 0)
835 {
836 rdt_datas_last->data = page;
837 rdt_datas_last->len = BLCKSZ;
838 }
839 else
840 {
841 /* must skip the hole */
842 rdt_datas_last->data = page;
843 rdt_datas_last->len = bimg.hole_offset;
844
845 rdt_datas_last->next = &regbuf->bkp_rdatas[1];
847
848 rdt_datas_last->data =
849 page + (bimg.hole_offset + cbimg.hole_length);
850 rdt_datas_last->len =
851 BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
852 }
853 }
854
855 total_len += bimg.length;
856
857 /* Track the WAL full page images in bytes */
858 *fpi_bytes += bimg.length;
859 }
860
861 if (needs_data)
862 {
863 /*
864 * When copying to XLogRecordBlockHeader, the length is narrowed
865 * to an uint16. Double-check that it is still correct.
866 */
867 Assert(regbuf->rdata_len <= UINT16_MAX);
868
869 /*
870 * Link the caller-supplied rdata chain for this buffer to the
871 * overall list.
872 */
873 bkpb.fork_flags |= BKPBLOCK_HAS_DATA;
874 bkpb.data_length = (uint16) regbuf->rdata_len;
875 total_len += regbuf->rdata_len;
876
877 rdt_datas_last->next = regbuf->rdata_head;
878 rdt_datas_last = regbuf->rdata_tail;
879 }
880
881 if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
882 {
883 samerel = true;
884 bkpb.fork_flags |= BKPBLOCK_SAME_REL;
885 }
886 else
887 samerel = false;
889
890 /* Ok, copy the header to the scratch buffer */
893 if (include_image)
894 {
897 if (cbimg.hole_length != 0 && is_compressed)
898 {
902 }
903 }
904 if (!samerel)
905 {
906 memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
907 scratch += sizeof(RelFileLocator);
908 }
909 memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
910 scratch += sizeof(BlockNumber);
911 }
912
913 /* followed by the record's origin, if any */
916 {
917 *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
920 }
921
922 /* followed by toplevel XID, if not already included in previous record */
924 {
926
927 /* Set the flag that the top xid is included in the WAL */
928 *topxid_included = true;
929
931 memcpy(scratch, &xid, sizeof(TransactionId));
932 scratch += sizeof(TransactionId);
933 }
934
935 /* followed by main data, if any */
936 if (mainrdata_len > 0)
937 {
938 if (mainrdata_len > 255)
939 {
941
944 (errmsg_internal("too much WAL data"),
945 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",
947 PG_UINT32_MAX)));
948
950 *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
952 scratch += sizeof(uint32);
953 }
954 else
955 {
956 *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
957 *(scratch++) = (uint8) mainrdata_len;
958 }
961 total_len += mainrdata_len;
962 }
964
966 total_len += hdr_rdt.len;
967
968 /*
969 * Calculate CRC of the data
970 *
971 * Note that the record header isn't added into the CRC initially since we
972 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
973 * the whole record in the order: rdata, then backup blocks, then record
974 * header.
975 */
978 for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
979 COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
980
981 /*
982 * Ensure that the XLogRecord is not too large.
983 *
984 * XLogReader machinery is only able to handle records up to a certain
985 * size (ignoring machine resource limitations), so make sure that we will
986 * not emit records larger than the sizes advertised to be supported.
987 */
988 if (total_len > XLogRecordMaxSize)
990 (errmsg_internal("oversized WAL record"),
991 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",
992 total_len, XLogRecordMaxSize, rmid, info)));
993
994 /*
995 * Fill in the fields in the record header. Prev-link is filled in later,
996 * once we know where in the WAL the record will be inserted. The CRC does
997 * not include the record header yet.
998 */
1000 rechdr->xl_tot_len = (uint32) total_len;
1001 rechdr->xl_info = info;
1002 rechdr->xl_rmid = rmid;
1003 rechdr->xl_prev = InvalidXLogRecPtr;
1004 rechdr->xl_crc = rdata_crc;
1005
1006 return &hdr_rdt;
1007}
1008
1009/*
1010 * Create a compressed version of a backup block image.
1011 *
1012 * Returns false if compression fails (i.e., compressed result is actually
1013 * bigger than original). Otherwise, returns true and sets 'dlen' to
1014 * the length of compressed block image.
1015 */
1016static bool
1017XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length,
1018 void *dest, uint16 *dlen)
1019{
1020 int32 orig_len = BLCKSZ - hole_length;
1021 int32 len = -1;
1022 int32 extra_bytes = 0;
1023 const void *source;
1024 PGAlignedBlock tmp;
1025
1026 if (hole_length != 0)
1027 {
1028 /* must skip the hole */
1029 memcpy(tmp.data, page, hole_offset);
1030 memcpy(tmp.data + hole_offset,
1031 page + (hole_offset + hole_length),
1032 BLCKSZ - (hole_length + hole_offset));
1033 source = tmp.data;
1034
1035 /*
1036 * Extra data needs to be stored in WAL record for the compressed
1037 * version of block image if the hole exists.
1038 */
1040 }
1041 else
1042 source = page;
1043
1045 {
1048 break;
1049
1051#ifdef USE_LZ4
1054 if (len <= 0)
1055 len = -1; /* failure */
1056#else
1057 elog(ERROR, "LZ4 is not supported by this build");
1058#endif
1059 break;
1060
1062#ifdef USE_ZSTD
1065 if (ZSTD_isError(len))
1066 len = -1; /* failure */
1067#else
1068 elog(ERROR, "zstd is not supported by this build");
1069#endif
1070 break;
1071
1073 Assert(false); /* cannot happen */
1074 break;
1075 /* no default case, so that compiler will warn */
1076 }
1077
1078 /*
1079 * We recheck the actual size even if compression reports success and see
1080 * if the number of bytes saved by compression is larger than the length
1081 * of extra data needed for the compressed version of block image.
1082 */
1083 if (len >= 0 &&
1085 {
1086 *dlen = (uint16) len; /* successful compression */
1087 return true;
1088 }
1089 return false;
1090}
1091
1092/*
1093 * Determine whether the buffer referenced has to be backed up.
1094 *
1095 * Since we don't yet have the insert lock, fullPageWrites and runningBackups
1096 * (which forces full-page writes) could change later, so the result should
1097 * be used for optimization purposes only.
1098 */
1099bool
1101{
1103 bool doPageWrites;
1104 Page page;
1105
1107
1108 page = BufferGetPage(buffer);
1109
1110 if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
1111 return true; /* buffer requires backup */
1112
1113 return false; /* buffer does not need to be backed up */
1114}
1115
1116/*
1117 * Write a backup block if needed when we are setting a hint. Note that
1118 * this may be called for a variety of page types, not just heaps.
1119 *
1120 * Callable while holding just a share-exclusive lock on the buffer
1121 * content. That suffices to prevent concurrent modifications of the
1122 * buffer. The buffer already needs to have been marked dirty by
1123 * MarkBufferDirtyHint().
1124 *
1125 * We can't use the plain backup block mechanism since that relies on the
1126 * Buffer being exclusively locked. Since some modifications (setting LSN, hint
1127 * bits) are allowed in a sharelocked buffer that can lead to wal checksum
1128 * failures. So instead we copy the page and insert the copied data as normal
1129 * record data.
1130 *
1131 * We only need to do something if page has not yet been full page written in
1132 * this checkpoint round. The LSN of the inserted wal record is returned if we
1133 * had to write, InvalidXLogRecPtr otherwise.
1134 */
1137{
1139 XLogRecPtr lsn;
1141
1142 /* this also verifies that we hold an appropriate lock */
1143 Assert(BufferIsDirty(buffer));
1144
1145 /*
1146 * Update RedoRecPtr so that we can make the right decision. It's possible
1147 * that a new checkpoint will start just after GetRedoRecPtr(), but that
1148 * is ok, as the buffer is already dirty, ensuring that any BufferSync()
1149 * started after the buffer was marked dirty cannot complete without
1150 * flushing this buffer. If a checkpoint started between marking the
1151 * buffer dirty and this check, we will emit an unnecessary WAL record (as
1152 * the buffer will be written out as part of the checkpoint), but the
1153 * window for that is not big.
1154 */
1156
1157 /*
1158 * We assume page LSN is first data on *every* page that can be passed to
1159 * XLogInsert, whether it has the standard page layout or not.
1160 */
1161 lsn = PageGetLSN(BufferGetPage(buffer));
1162
1163 if (lsn <= RedoRecPtr)
1164 {
1165 int flags = 0;
1167 char *origdata = (char *) BufferGetBlock(buffer);
1168 RelFileLocator rlocator;
1169 ForkNumber forkno;
1170 BlockNumber blkno;
1171
1172 /*
1173 * Copy buffer so we don't have to worry about concurrent hint bit or
1174 * lsn updates. We assume pd_lower/upper cannot be changed without an
1175 * exclusive lock, so the contents bkp are not racy.
1176 */
1177 if (buffer_std)
1178 {
1179 /* Assume we can omit data between pd_lower and pd_upper */
1180 Page page = BufferGetPage(buffer);
1181 uint16 lower = ((PageHeader) page)->pd_lower;
1182 uint16 upper = ((PageHeader) page)->pd_upper;
1183
1186 }
1187 else
1189
1191
1192 if (buffer_std)
1193 flags |= REGBUF_STANDARD;
1194
1195 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
1196 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
1197
1199 }
1200
1201 return recptr;
1202}
1203
1204/*
1205 * Write a WAL record containing a full image of a page. Caller is responsible
1206 * for writing the page to disk after calling this routine.
1207 *
1208 * Note: If you're using this function, you should be building pages in private
1209 * memory and writing them directly to smgr. If you're using buffers, call
1210 * log_newpage_buffer instead.
1211 *
1212 * If the page follows the standard page layout, with a PageHeader and unused
1213 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1214 * the unused space to be left out from the WAL record, making it smaller.
1215 */
1217log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno,
1218 Page page, bool page_std)
1219{
1220 int flags;
1222
1223 flags = REGBUF_FORCE_IMAGE;
1224 if (page_std)
1225 flags |= REGBUF_STANDARD;
1226
1228 XLogRegisterBlock(0, rlocator, forknum, blkno, page, flags);
1230
1231 /*
1232 * The page may be uninitialized. If so, we can't set the LSN because that
1233 * would corrupt the page.
1234 */
1235 if (!PageIsNew(page))
1236 {
1237 PageSetLSN(page, recptr);
1238 }
1239
1240 return recptr;
1241}
1242
1243/*
1244 * Like log_newpage(), but allows logging multiple pages in one operation.
1245 * It is more efficient than calling log_newpage() for each page separately,
1246 * because we can write multiple pages in a single WAL record.
1247 */
1248void
1249log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages,
1250 BlockNumber *blknos, Page *pages, bool page_std)
1251{
1252 int flags;
1254 int i;
1255 int j;
1256
1257 flags = REGBUF_FORCE_IMAGE;
1258 if (page_std)
1259 flags |= REGBUF_STANDARD;
1260
1261 /*
1262 * Iterate over all the pages. They are collected into batches of
1263 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1264 * batch.
1265 */
1267
1268 i = 0;
1269 while (i < num_pages)
1270 {
1271 int batch_start = i;
1272 int nbatch;
1273
1275
1276 nbatch = 0;
1277 while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1278 {
1279 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);
1280 i++;
1281 nbatch++;
1282 }
1283
1285
1286 for (j = batch_start; j < i; j++)
1287 {
1288 /*
1289 * The page may be uninitialized. If so, we can't set the LSN
1290 * because that would corrupt the page.
1291 */
1292 if (!PageIsNew(pages[j]))
1293 {
1294 PageSetLSN(pages[j], recptr);
1295 }
1296 }
1297 }
1298}
1299
1300/*
1301 * Write a WAL record containing a full image of a page.
1302 *
1303 * Caller should initialize the buffer and mark it dirty before calling this
1304 * function. This function will set the page LSN.
1305 *
1306 * If the page follows the standard page layout, with a PageHeader and unused
1307 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1308 * the unused space to be left out from the WAL record, making it smaller.
1309 */
1311log_newpage_buffer(Buffer buffer, bool page_std)
1312{
1313 Page page = BufferGetPage(buffer);
1314 RelFileLocator rlocator;
1315 ForkNumber forknum;
1316 BlockNumber blkno;
1317
1318 /* Shared buffers should be modified in a critical section. */
1320
1321 BufferGetTag(buffer, &rlocator, &forknum, &blkno);
1322
1323 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1324}
1325
1326/*
1327 * WAL-log a range of blocks in a relation.
1328 *
1329 * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
1330 * written to the WAL. If the range is large, this is done in multiple WAL
1331 * records.
1332 *
1333 * If all page follows the standard page layout, with a PageHeader and unused
1334 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1335 * the unused space to be left out from the WAL records, making them smaller.
1336 *
1337 * NOTE: This function acquires exclusive-locks on the pages. Typically, this
1338 * is used on a newly-built relation, and the caller is holding a
1339 * AccessExclusiveLock on it, so no other backend can be accessing it at the
1340 * same time. If that's not the case, you must ensure that this does not
1341 * cause a deadlock through some other means.
1342 */
1343void
1346 bool page_std)
1347{
1348 int flags;
1349 BlockNumber blkno;
1350
1351 flags = REGBUF_FORCE_IMAGE;
1352 if (page_std)
1353 flags |= REGBUF_STANDARD;
1354
1355 /*
1356 * Iterate over all the pages in the range. They are collected into
1357 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1358 * for each batch.
1359 */
1361
1362 blkno = startblk;
1363 while (blkno < endblk)
1364 {
1367 int nbufs;
1368 int i;
1369
1371
1372 /* Collect a batch of blocks. */
1373 nbufs = 0;
1374 while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1375 {
1376 Buffer buf = ReadBufferExtended(rel, forknum, blkno,
1377 RBM_NORMAL, NULL);
1378
1380
1381 /*
1382 * Completely empty pages are not WAL-logged. Writing a WAL record
1383 * would change the LSN, and we don't want that. We want the page
1384 * to stay empty.
1385 */
1387 bufpack[nbufs++] = buf;
1388 else
1390 blkno++;
1391 }
1392
1393 /* Nothing more to do if all remaining blocks were empty. */
1394 if (nbufs == 0)
1395 break;
1396
1397 /* Write WAL record for this batch. */
1399
1401 for (i = 0; i < nbufs; i++)
1402 {
1404 XLogRegisterBuffer(i, bufpack[i], flags);
1405 }
1406
1408
1409 for (i = 0; i < nbufs; i++)
1411
1413
1414 for (i = 0; i < nbufs; i++)
1416 }
1417}
1418
1419/*
1420 * Allocate working buffers needed for WAL record construction.
1421 */
1422void
1423InitXLogInsert(void)
1424{
1425#ifdef USE_ASSERT_CHECKING
1426
1427 /*
1428 * Check that any records assembled can be decoded. This is capped based
1429 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1430 * addend covers the larger allocate_recordbuf() demand. This code path
1431 * is called once per backend, more than enough for this check.
1432 */
1433 size_t max_required =
1435
1437#endif
1438
1439 /* Initialize the working areas */
1440 if (xloginsert_cxt == NULL)
1441 {
1443 "WAL record construction",
1445 }
1446
1447 if (registered_buffers == NULL)
1448 {
1453 }
1454 if (rdatas == NULL)
1455 {
1457 sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1459 }
1460
1461 /*
1462 * Allocate a buffer to hold the header information for a WAL record.
1463 */
1464 if (hdr_scratch == NULL)
1467}
uint32 BlockNumber
Definition block.h:31
int Buffer
Definition buf.h:23
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
Definition bufmgr.c:3003
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition bufmgr.c:4378
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3030
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5522
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3063
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:921
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:470
static Block BufferGetBlock(Buffer buffer)
Definition bufmgr.h:437
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:220
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:332
@ RBM_NORMAL
Definition bufmgr.h:46
PageHeaderData * PageHeader
Definition bufpage.h:199
char PageData
Definition bufpage.h:80
static bool PageIsNew(const PageData *page)
Definition bufpage.h:259
#define SizeOfPageHeaderData
Definition bufpage.h:242
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:417
PageData * Page
Definition bufpage.h:81
static XLogRecPtr PageGetLSN(const PageData *page)
Definition bufpage.h:411
uint8_t uint8
Definition c.h:616
#define PG_UINT32_MAX
Definition c.h:676
#define Assert(condition)
Definition c.h:945
int64_t int64
Definition c.h:615
int32_t int32
Definition c.h:614
uint64_t uint64
Definition c.h:619
uint16_t uint16
Definition c.h:617
uint32_t uint32
Definition c.h:618
#define MemSet(start, val, len)
Definition c.h:1109
uint32 TransactionId
Definition c.h:738
int int errdetail_internal(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define PANIC
Definition elog.h:42
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
volatile uint32 CritSectionCount
Definition globals.c:45
static struct @174 value
int j
Definition isn.c:78
int i
Definition isn.c:77
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
MemoryContext TopMemoryContext
Definition mcxt.c:166
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define AllocSizeIsValid(size)
Definition memutils.h:42
#define IsBootstrapProcessingMode()
Definition miscadmin.h:477
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define END_CRIT_SECTION()
Definition miscadmin.h:152
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
ReplOriginXactState replorigin_xact_state
Definition origin.c:167
#define InvalidReplOriginId
Definition origin.h:33
#define XLOG_FPI
Definition pg_control.h:80
#define XLOG_FPI_FOR_HINT
Definition pg_control.h:79
uint32 pg_crc32c
Definition pg_crc32c.h:38
#define COMP_CRC32C(crc, data, len)
Definition pg_crc32c.h:153
#define INIT_CRC32C(crc)
Definition pg_crc32c.h:41
const void size_t len
const void * data
const PGLZ_Strategy *const PGLZ_strategy_default
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
static rewind_source * source
Definition pg_rewind.c:89
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int fb(int x)
#define RelationNeedsWAL(relation)
Definition rel.h:637
#define RelationIsPermanent(relation)
Definition rel.h:626
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition relpath.h:56
uint8 RmgrId
Definition rmgr.h:11
char data[BLCKSZ]
Definition c.h:1206
Form_pg_class rd_rel
Definition rel.h:111
ReplOriginId origin
Definition origin.h:45
const void * data
struct XLogRecData * next
RelFileLocator rlocator
Definition xloginsert.c:75
Datum batch_start(PG_FUNCTION_ARGS)
Definition test_aio.c:668
TransactionId GetTopTransactionIdIfAny(void)
Definition xact.c:443
TransactionId GetCurrentTransactionIdIfAny(void)
Definition xact.c:473
bool IsSubxactTopXidLogPending(void)
Definition xact.c:561
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition xlog.c:6577
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, uint64 fpi_bytes, bool topxid_included)
Definition xlog.c:750
XLogRecPtr GetRedoRecPtr(void)
Definition xlog.c:6547
static XLogRecPtr RedoRecPtr
Definition xlog.c:277
static bool doPageWrites
Definition xlog.c:290
int wal_compression
Definition xlog.c:128
bool XLogInsertAllowed(void)
Definition xlog.c:6499
XLogRecPtr GetFakeLSNForUnloggedRel(void)
Definition xlog.c:4660
XLogRecPtr GetXLogInsertEndRecPtr(void)
Definition xlog.c:9630
XLogRecPtr XLogAssignLSN(void)
Definition xlog.c:8237
bool * wal_consistency_checking
Definition xlog.c:130
#define XLOG_INCLUDE_ORIGIN
Definition xlog.h:165
WalCompression
Definition xlog.h:82
@ WAL_COMPRESSION_NONE
Definition xlog.h:83
@ WAL_COMPRESSION_LZ4
Definition xlog.h:85
@ WAL_COMPRESSION_PGLZ
Definition xlog.h:84
@ WAL_COMPRESSION_ZSTD
Definition xlog.h:86
#define SizeOfXLogLongPHD
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define FirstNormalUnloggedLSN
Definition xlogdefs.h:37
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
static XLogRecData * mainrdata_head
Definition xloginsert.c:101
static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
XLogRecPtr XLogSimpleInsertInt64(RmgrId rmid, uint8 info, int64 value)
Definition xloginsert.c:544
static int max_registered_buffers
Definition xloginsert.c:93
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:479
static uint8 curinsert_flags
Definition xloginsert.c:106
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:410
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:369
static uint64 mainrdata_len
Definition xloginsert.c:103
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
static bool begininsert_called
Definition xloginsert.c:135
static int max_registered_block_id
Definition xloginsert.c:94
XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
void InitXLogInsert(void)
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:461
static int num_rdatas
Definition xloginsert.c:132
void log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
Definition xloginsert.c:314
static XLogRecData * mainrdata_last
Definition xloginsert.c:102
static MemoryContext xloginsert_cxt
Definition xloginsert.c:138
void log_newpage_range(Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
void XLogResetInsertion(void)
Definition xloginsert.c:226
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
static XLogRecData hdr_rdt
Definition xloginsert.c:116
static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, bool *topxid_included)
Definition xloginsert.c:618
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:246
static char * hdr_scratch
Definition xloginsert.c:117
static XLogRecData * rdatas
Definition xloginsert.c:131
void XLogBeginInsert(void)
Definition xloginsert.c:153
XLogRecPtr XLogGetFakeLSN(Relation rel)
Definition xloginsert.c:559
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition xloginsert.c:179
#define COMPRESS_BUFSIZE
Definition xloginsert.c:65
static registered_buffer * registered_buffers
Definition xloginsert.c:92
static int max_rdatas
Definition xloginsert.c:133
#define HEADER_SCRATCH_SIZE
Definition xloginsert.c:122
#define REGBUF_NO_CHANGE
Definition xloginsert.h:37
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define XLR_NORMAL_MAX_BLOCK_ID
Definition xloginsert.h:28
#define REGBUF_FORCE_IMAGE
Definition xloginsert.h:32
#define XLR_NORMAL_RDATAS
Definition xloginsert.h:29
#define REGBUF_NO_IMAGE
Definition xloginsert.h:33
#define REGBUF_KEEP_DATA
Definition xloginsert.h:36
#define REGBUF_WILL_INIT
Definition xloginsert.h:34
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
#define SizeOfXLogRecordBlockImageHeader
Definition xlogrecord.h:153
#define XLogRecordMaxSize
Definition xlogrecord.h:74
#define BKPIMAGE_COMPRESS_ZSTD
Definition xlogrecord.h:162
#define BKPBLOCK_HAS_DATA
Definition xlogrecord.h:198
#define BKPIMAGE_APPLY
Definition xlogrecord.h:158
#define BKPIMAGE_HAS_HOLE
Definition xlogrecord.h:157
#define XLR_BLOCK_ID_DATA_LONG
Definition xlogrecord.h:242
#define BKPBLOCK_WILL_INIT
Definition xlogrecord.h:199
#define XLR_RMGR_INFO_MASK
Definition xlogrecord.h:63
#define BKPIMAGE_COMPRESS_LZ4
Definition xlogrecord.h:161
#define XLR_BLOCK_ID_TOPLEVEL_XID
Definition xlogrecord.h:244
#define XLR_BLOCK_ID_DATA_SHORT
Definition xlogrecord.h:241
#define SizeOfXLogRecordBlockCompressHeader
Definition xlogrecord.h:177
#define BKPBLOCK_SAME_REL
Definition xlogrecord.h:200
#define XLR_SPECIAL_REL_UPDATE
Definition xlogrecord.h:82
#define SizeOfXLogRecordBlockHeader
Definition xlogrecord.h:115
#define BKPIMAGE_COMPRESS_PGLZ
Definition xlogrecord.h:160
#define XLR_BLOCK_ID_ORIGIN
Definition xlogrecord.h:243
#define BKPBLOCK_HAS_IMAGE
Definition xlogrecord.h:197
#define XLR_CHECK_CONSISTENCY
Definition xlogrecord.h:91

◆ LZ4_MAX_BLCKSZ

#define LZ4_MAX_BLCKSZ   0

Definition at line 53 of file xloginsert.c.

◆ PGLZ_MAX_BLCKSZ

#define PGLZ_MAX_BLCKSZ   PGLZ_MAX_OUTPUT(BLCKSZ)

Definition at line 62 of file xloginsert.c.

◆ SizeOfXlogOrigin

#define SizeOfXlogOrigin   (sizeof(ReplOriginId) + sizeof(char))

Definition at line 119 of file xloginsert.c.

◆ SizeOfXLogTransactionId

#define SizeOfXLogTransactionId   (sizeof(TransactionId) + sizeof(char))

Definition at line 120 of file xloginsert.c.

◆ ZSTD_MAX_BLCKSZ

#define ZSTD_MAX_BLCKSZ   0

Definition at line 59 of file xloginsert.c.

Function Documentation

◆ InitXLogInsert()

void InitXLogInsert ( void  )

Definition at line 1424 of file xloginsert.c.

1425{
1426#ifdef USE_ASSERT_CHECKING
1427
1428 /*
1429 * Check that any records assembled can be decoded. This is capped based
1430 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1431 * addend covers the larger allocate_recordbuf() demand. This code path
1432 * is called once per backend, more than enough for this check.
1433 */
1434 size_t max_required =
1436
1438#endif
1439
1440 /* Initialize the working areas */
1441 if (xloginsert_cxt == NULL)
1442 {
1444 "WAL record construction",
1446 }
1447
1448 if (registered_buffers == NULL)
1449 {
1454 }
1455 if (rdatas == NULL)
1456 {
1458 sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1460 }
1461
1462 /*
1463 * Allocate a buffer to hold the header information for a WAL record.
1464 */
1465 if (hdr_scratch == NULL)
1468}

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, AllocSizeIsValid, Assert, DecodeXLogRecordRequiredSpace(), fb(), hdr_scratch, HEADER_SCRATCH_SIZE, max_rdatas, max_registered_buffers, MemoryContextAlloc(), MemoryContextAllocZero(), rdatas, registered_buffers, TopMemoryContext, xloginsert_cxt, XLogRecordMaxSize, XLR_NORMAL_MAX_BLOCK_ID, and XLR_NORMAL_RDATAS.

Referenced by BaseInit().

◆ log_newpage()

XLogRecPtr log_newpage ( RelFileLocator rlocator,
ForkNumber  forknum,
BlockNumber  blkno,
Page  page,
bool  page_std 
)

Definition at line 1218 of file xloginsert.c.

1220{
1221 int flags;
1223
1224 flags = REGBUF_FORCE_IMAGE;
1225 if (page_std)
1226 flags |= REGBUF_STANDARD;
1227
1229 XLogRegisterBlock(0, rlocator, forknum, blkno, page, flags);
1231
1232 /*
1233 * The page may be uninitialized. If so, we can't set the LSN because that
1234 * would corrupt the page.
1235 */
1236 if (!PageIsNew(page))
1237 {
1238 PageSetLSN(page, recptr);
1239 }
1240
1241 return recptr;
1242}

References fb(), PageIsNew(), PageSetLSN(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, XLOG_FPI, XLogBeginInsert(), XLogInsert(), and XLogRegisterBlock().

Referenced by _hash_alloc_buckets(), _hash_init(), and log_newpage_buffer().

◆ log_newpage_buffer()

XLogRecPtr log_newpage_buffer ( Buffer  buffer,
bool  page_std 
)

Definition at line 1312 of file xloginsert.c.

1313{
1314 Page page = BufferGetPage(buffer);
1315 RelFileLocator rlocator;
1316 ForkNumber forknum;
1317 BlockNumber blkno;
1318
1319 /* Shared buffers should be modified in a critical section. */
1321
1322 BufferGetTag(buffer, &rlocator, &forknum, &blkno);
1323
1324 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1325}

References Assert, BufferGetPage(), BufferGetTag(), CritSectionCount, and log_newpage().

Referenced by brin_initialize_empty_new_buffer(), brinbuildempty(), FreeSpaceMapPrepareTruncateRel(), ginbuildempty(), gistbuildempty(), heap_force_common(), lazy_scan_new_or_empty(), RelationCopyStorageUsingBuffer(), and visibilitymap_prepare_truncate().

◆ log_newpage_range()

void log_newpage_range ( Relation  rel,
ForkNumber  forknum,
BlockNumber  startblk,
BlockNumber  endblk,
bool  page_std 
)

Definition at line 1345 of file xloginsert.c.

1348{
1349 int flags;
1350 BlockNumber blkno;
1351
1352 flags = REGBUF_FORCE_IMAGE;
1353 if (page_std)
1354 flags |= REGBUF_STANDARD;
1355
1356 /*
1357 * Iterate over all the pages in the range. They are collected into
1358 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1359 * for each batch.
1360 */
1362
1363 blkno = startblk;
1364 while (blkno < endblk)
1365 {
1368 int nbufs;
1369 int i;
1370
1372
1373 /* Collect a batch of blocks. */
1374 nbufs = 0;
1375 while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1376 {
1377 Buffer buf = ReadBufferExtended(rel, forknum, blkno,
1378 RBM_NORMAL, NULL);
1379
1381
1382 /*
1383 * Completely empty pages are not WAL-logged. Writing a WAL record
1384 * would change the LSN, and we don't want that. We want the page
1385 * to stay empty.
1386 */
1388 bufpack[nbufs++] = buf;
1389 else
1391 blkno++;
1392 }
1393
1394 /* Nothing more to do if all remaining blocks were empty. */
1395 if (nbufs == 0)
1396 break;
1397
1398 /* Write WAL record for this batch. */
1400
1402 for (i = 0; i < nbufs; i++)
1403 {
1405 XLogRegisterBuffer(i, bufpack[i], flags);
1406 }
1407
1409
1410 for (i = 0; i < nbufs; i++)
1412
1414
1415 for (i = 0; i < nbufs; i++)
1417 }
1418}

References buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), CHECK_FOR_INTERRUPTS, END_CRIT_SECTION, fb(), i, LockBuffer(), MarkBufferDirty(), PageIsNew(), PageSetLSN(), RBM_NORMAL, ReadBufferExtended(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, START_CRIT_SECTION, UnlockReleaseBuffer(), XLOG_FPI, XLogBeginInsert(), XLogEnsureRecordSpace(), XLogInsert(), XLogRegisterBuffer(), and XLR_MAX_BLOCK_ID.

Referenced by ginbuild(), gistbuild(), smgrDoPendingSyncs(), and spgbuild().

◆ log_newpages()

void log_newpages ( RelFileLocator rlocator,
ForkNumber  forknum,
int  num_pages,
BlockNumber blknos,
Page pages,
bool  page_std 
)

Definition at line 1250 of file xloginsert.c.

1252{
1253 int flags;
1255 int i;
1256 int j;
1257
1258 flags = REGBUF_FORCE_IMAGE;
1259 if (page_std)
1260 flags |= REGBUF_STANDARD;
1261
1262 /*
1263 * Iterate over all the pages. They are collected into batches of
1264 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1265 * batch.
1266 */
1268
1269 i = 0;
1270 while (i < num_pages)
1271 {
1272 int batch_start = i;
1273 int nbatch;
1274
1276
1277 nbatch = 0;
1278 while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1279 {
1280 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);
1281 i++;
1282 nbatch++;
1283 }
1284
1286
1287 for (j = batch_start; j < i; j++)
1288 {
1289 /*
1290 * The page may be uninitialized. If so, we can't set the LSN
1291 * because that would corrupt the page.
1292 */
1293 if (!PageIsNew(pages[j]))
1294 {
1295 PageSetLSN(pages[j], recptr);
1296 }
1297 }
1298 }
1299}

References batch_start(), fb(), i, j, PageIsNew(), PageSetLSN(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, XLOG_FPI, XLogBeginInsert(), XLogEnsureRecordSpace(), XLogInsert(), XLogRegisterBlock(), and XLR_MAX_BLOCK_ID.

Referenced by smgr_bulk_flush().

◆ XLogBeginInsert()

void XLogBeginInsert ( void  )

Definition at line 153 of file xloginsert.c.

154{
157 Assert(mainrdata_len == 0);
158
159 /* cross-check on whether we should be here or not */
160 if (!XLogInsertAllowed())
161 elog(ERROR, "cannot make new WAL entries during recovery");
162
164 elog(ERROR, "XLogBeginInsert was already called");
165
166 begininsert_called = true;
167}

References Assert, begininsert_called, elog, ERROR, mainrdata_head, mainrdata_last, mainrdata_len, max_registered_block_id, and XLogInsertAllowed().

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_newpage(), log_newpage_range(), log_newpages(), log_smgrcreate(), log_split_page(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), RequestXLogSwitch(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogAssignLSN(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), XLogSaveBufferForHint(), XLogSimpleInsertInt64(), and xlogVacuumPage().

◆ XLogCheckBufferNeedsBackup()

bool XLogCheckBufferNeedsBackup ( Buffer  buffer)

Definition at line 1101 of file xloginsert.c.

1102{
1104 bool doPageWrites;
1105 Page page;
1106
1108
1109 page = BufferGetPage(buffer);
1110
1111 if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
1112 return true; /* buffer requires backup */
1113
1114 return false; /* buffer does not need to be backed up */
1115}

References BufferGetPage(), doPageWrites, GetFullPageWriteInfo(), PageGetLSN(), and RedoRecPtr.

Referenced by heap_page_will_freeze(), and log_heap_update().

◆ XLogCompressBackupBlock()

static bool XLogCompressBackupBlock ( const PageData page,
uint16  hole_offset,
uint16  hole_length,
void dest,
uint16 dlen 
)
static

Definition at line 1018 of file xloginsert.c.

1020{
1021 int32 orig_len = BLCKSZ - hole_length;
1022 int32 len = -1;
1023 int32 extra_bytes = 0;
1024 const void *source;
1025 PGAlignedBlock tmp;
1026
1027 if (hole_length != 0)
1028 {
1029 /* must skip the hole */
1030 memcpy(tmp.data, page, hole_offset);
1031 memcpy(tmp.data + hole_offset,
1032 page + (hole_offset + hole_length),
1033 BLCKSZ - (hole_length + hole_offset));
1034 source = tmp.data;
1035
1036 /*
1037 * Extra data needs to be stored in WAL record for the compressed
1038 * version of block image if the hole exists.
1039 */
1041 }
1042 else
1043 source = page;
1044
1046 {
1049 break;
1050
1052#ifdef USE_LZ4
1055 if (len <= 0)
1056 len = -1; /* failure */
1057#else
1058 elog(ERROR, "LZ4 is not supported by this build");
1059#endif
1060 break;
1061
1063#ifdef USE_ZSTD
1066 if (ZSTD_isError(len))
1067 len = -1; /* failure */
1068#else
1069 elog(ERROR, "zstd is not supported by this build");
1070#endif
1071 break;
1072
1074 Assert(false); /* cannot happen */
1075 break;
1076 /* no default case, so that compiler will warn */
1077 }
1078
1079 /*
1080 * We recheck the actual size even if compression reports success and see
1081 * if the number of bytes saved by compression is larger than the length
1082 * of extra data needed for the compressed version of block image.
1083 */
1084 if (len >= 0 &&
1086 {
1087 *dlen = (uint16) len; /* successful compression */
1088 return true;
1089 }
1090 return false;
1091}

References Assert, COMPRESS_BUFSIZE, PGAlignedBlock::data, elog, ERROR, fb(), len, pglz_compress(), PGLZ_strategy_default, SizeOfXLogRecordBlockCompressHeader, source, wal_compression, WAL_COMPRESSION_LZ4, WAL_COMPRESSION_NONE, WAL_COMPRESSION_PGLZ, and WAL_COMPRESSION_ZSTD.

Referenced by XLogRecordAssemble().

◆ XLogEnsureRecordSpace()

void XLogEnsureRecordSpace ( int  max_block_id,
int  ndatas 
)

Definition at line 179 of file xloginsert.c.

180{
181 int nbuffers;
182
183 /*
184 * This must be called before entering a critical section, because
185 * allocating memory inside a critical section can fail. repalloc() will
186 * check the same, but better to check it here too so that we fail
187 * consistently even if the arrays happen to be large enough already.
188 */
190
191 /* the minimum values can't be decreased */
192 if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
193 max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
196
197 if (max_block_id > XLR_MAX_BLOCK_ID)
198 elog(ERROR, "maximum number of WAL record block references exceeded");
199 nbuffers = max_block_id + 1;
200
201 if (nbuffers > max_registered_buffers)
202 {
204 repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
205
206 /*
207 * At least the padding bytes in the structs must be zeroed, because
208 * they are included in WAL data, but initialize it all for tidiness.
209 */
211 (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
212 max_registered_buffers = nbuffers;
213 }
214
215 if (ndatas > max_rdatas)
216 {
219 }
220}

References Assert, CritSectionCount, elog, ERROR, fb(), max_rdatas, max_registered_buffers, MemSet, rdatas, registered_buffers, repalloc(), XLR_MAX_BLOCK_ID, XLR_NORMAL_MAX_BLOCK_ID, and XLR_NORMAL_RDATAS.

Referenced by _hash_freeovflpage(), _hash_squeezebucket(), EndPrepare(), gistplacetopage(), log_newpage_range(), log_newpages(), and shiftList().

◆ XLogGetFakeLSN()

XLogRecPtr XLogGetFakeLSN ( Relation  rel)

Definition at line 559 of file xloginsert.c.

560{
561 if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
562 {
563 /*
564 * Temporary relations are only accessible in our session, so a simple
565 * backend-local counter will do.
566 */
567 static XLogRecPtr counter = FirstNormalUnloggedLSN;
568
569 return counter++;
570 }
571 else if (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
572 {
573 /*
574 * Unlogged relations are accessible from other backends, and survive
575 * (clean) restarts. GetFakeLSNForUnloggedRel() handles that for us.
576 */
578 }
579 else
580 {
581 /*
582 * WAL-logging on this relation will start after commit, so its LSNs
583 * must be distinct numbers smaller than the LSN at the next commit.
584 * Emit a dummy WAL record if insert-LSN hasn't advanced after the
585 * last call.
586 */
589
592
593 /* No need for an actual record if we already have a distinct LSN */
596
598 return currlsn;
599 }
600}

References Assert, fb(), FirstNormalUnloggedLSN, GetFakeLSNForUnloggedRel(), GetXLogInsertEndRecPtr(), InvalidXLogRecPtr, RelationData::rd_rel, RelationIsPermanent, RelationNeedsWAL, XLogAssignLSN(), and XLogRecPtrIsValid.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistvacuumpage(), and gistvacuumscan().

◆ XLogInsert()

XLogRecPtr XLogInsert ( RmgrId  rmid,
uint8  info 
)

Definition at line 479 of file xloginsert.c.

480{
482
483 /* XLogBeginInsert() must have been called. */
485 elog(ERROR, "XLogBeginInsert was not called");
486
487 /*
488 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
489 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
490 */
491 if ((info & ~(XLR_RMGR_INFO_MASK |
494 elog(PANIC, "invalid xlog info mask %02X", info);
495
496 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
497
498 /*
499 * In bootstrap mode, we don't actually log anything but XLOG resources;
500 * return a phony record pointer.
501 */
502 if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
503 {
505 EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
506 return EndPos;
507 }
508
509 do
510 {
512 bool doPageWrites;
513 bool topxid_included = false;
516 int num_fpi = 0;
517 uint64 fpi_bytes = 0;
518
519 /*
520 * Get values needed to decide whether to do full-page writes. Since
521 * we don't yet have an insertion lock, these could change under us,
522 * but XLogInsertRecord will recheck them once it has a lock.
523 */
525
529
532 } while (!XLogRecPtrIsValid(EndPos));
533
535
536 return EndPos;
537}

References begininsert_called, curinsert_flags, doPageWrites, elog, ERROR, fb(), GetFullPageWriteInfo(), IsBootstrapProcessingMode, PANIC, RedoRecPtr, SizeOfXLogLongPHD, XLogInsertRecord(), XLogRecordAssemble(), XLogRecPtrIsValid, XLogResetInsertion(), XLR_CHECK_CONSISTENCY, XLR_RMGR_INFO_MASK, and XLR_SPECIAL_REL_UPDATE.

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_newpage(), log_newpage_range(), log_newpages(), log_smgrcreate(), log_split_page(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), RequestXLogSwitch(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogAssignLSN(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), XLogSaveBufferForHint(), XLogSimpleInsertInt64(), and xlogVacuumPage().

◆ XLogRecordAssemble()

static XLogRecData * XLogRecordAssemble ( RmgrId  rmid,
uint8  info,
XLogRecPtr  RedoRecPtr,
bool  doPageWrites,
XLogRecPtr fpw_lsn,
int num_fpi,
uint64 fpi_bytes,
bool topxid_included 
)
static

Definition at line 618 of file xloginsert.c.

622{
624 uint64 total_len = 0;
625 int block_id;
630 char *scratch = hdr_scratch;
631
632 /*
633 * Note: this function can be called multiple times for the same record.
634 * All the modifications we do to the rdata chains below must handle that.
635 */
636
637 /* The record begins with the fixed-size header */
640
641 hdr_rdt.next = NULL;
644
645 /*
646 * Enforce consistency checks for this record if user is looking for it.
647 * Do this before at the beginning of this routine to give the possibility
648 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
649 * a record.
650 */
651 if (wal_consistency_checking[rmid])
652 info |= XLR_CHECK_CONSISTENCY;
653
654 /*
655 * Make an rdata chain containing all the data portions of all block
656 * references. This includes the data for full-page images. Also append
657 * the headers for the block references in the scratch buffer.
658 */
661 {
663 bool needs_backup;
664 bool needs_data;
668 bool samerel;
669 bool is_compressed = false;
670 bool include_image;
671
672 if (!regbuf->in_use)
673 continue;
674
675 /* Determine if this block needs to be backed up */
676 if (regbuf->flags & REGBUF_FORCE_IMAGE)
677 needs_backup = true;
678 else if (regbuf->flags & REGBUF_NO_IMAGE)
679 needs_backup = false;
680 else if (!doPageWrites)
681 needs_backup = false;
682 else
683 {
684 /*
685 * We assume page LSN is first data on *every* page that can be
686 * passed to XLogInsert, whether it has the standard page layout
687 * or not.
688 */
689 XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
690
691 needs_backup = (page_lsn <= RedoRecPtr);
692 if (!needs_backup)
693 {
694 if (!XLogRecPtrIsValid(*fpw_lsn) || page_lsn < *fpw_lsn)
695 *fpw_lsn = page_lsn;
696 }
697 }
698
699 /* Determine if the buffer data needs to included */
700 if (regbuf->rdata_len == 0)
701 needs_data = false;
702 else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
703 needs_data = true;
704 else
706
707 bkpb.id = block_id;
708 bkpb.fork_flags = regbuf->forkno;
709 bkpb.data_length = 0;
710
711 if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
712 bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
713
714 /*
715 * If needs_backup is true or WAL checking is enabled for current
716 * resource manager, log a full-page write for the current block.
717 */
719
720 if (include_image)
721 {
722 const PageData *page = regbuf->page;
724
725 /*
726 * The page needs to be backed up, so calculate its hole length
727 * and offset.
728 */
729 if (regbuf->flags & REGBUF_STANDARD)
730 {
731 /* Assume we can omit data between pd_lower and pd_upper */
732 uint16 lower = ((const PageHeaderData *) page)->pd_lower;
733 uint16 upper = ((const PageHeaderData *) page)->pd_upper;
734
736 upper > lower &&
737 upper <= BLCKSZ)
738 {
739 bimg.hole_offset = lower;
740 cbimg.hole_length = upper - lower;
741 }
742 else
743 {
744 /* No "hole" to remove */
745 bimg.hole_offset = 0;
746 cbimg.hole_length = 0;
747 }
748 }
749 else
750 {
751 /* Not a standard page header, don't try to eliminate "hole" */
752 bimg.hole_offset = 0;
753 cbimg.hole_length = 0;
754 }
755
756 /*
757 * Try to compress a block image if wal_compression is enabled
758 */
760 {
762 XLogCompressBackupBlock(page, bimg.hole_offset,
763 cbimg.hole_length,
764 regbuf->compressed_page,
766 }
767
768 /*
769 * Fill in the remaining fields in the XLogRecordBlockHeader
770 * struct
771 */
772 bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
773
774 /* Report a full page image constructed for the WAL record */
775 *num_fpi += 1;
776
777 /*
778 * Construct XLogRecData entries for the page content.
779 */
780 rdt_datas_last->next = &regbuf->bkp_rdatas[0];
782
783 bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
784
785 /*
786 * If WAL consistency checking is enabled for the resource manager
787 * of this WAL record, a full-page image is included in the record
788 * for the block modified. During redo, the full-page is replayed
789 * only if BKPIMAGE_APPLY is set.
790 */
791 if (needs_backup)
792 bimg.bimg_info |= BKPIMAGE_APPLY;
793
794 if (is_compressed)
795 {
796 /* The current compression is stored in the WAL record */
797 bimg.length = compressed_len;
798
799 /* Set the compression method used for this block */
801 {
803 bimg.bimg_info |= BKPIMAGE_COMPRESS_PGLZ;
804 break;
805
807#ifdef USE_LZ4
808 bimg.bimg_info |= BKPIMAGE_COMPRESS_LZ4;
809#else
810 elog(ERROR, "LZ4 is not supported by this build");
811#endif
812 break;
813
815#ifdef USE_ZSTD
816 bimg.bimg_info |= BKPIMAGE_COMPRESS_ZSTD;
817#else
818 elog(ERROR, "zstd is not supported by this build");
819#endif
820 break;
821
823 Assert(false); /* cannot happen */
824 break;
825 /* no default case, so that compiler will warn */
826 }
827
828 rdt_datas_last->data = regbuf->compressed_page;
830 }
831 else
832 {
833 bimg.length = BLCKSZ - cbimg.hole_length;
834
835 if (cbimg.hole_length == 0)
836 {
837 rdt_datas_last->data = page;
838 rdt_datas_last->len = BLCKSZ;
839 }
840 else
841 {
842 /* must skip the hole */
843 rdt_datas_last->data = page;
844 rdt_datas_last->len = bimg.hole_offset;
845
846 rdt_datas_last->next = &regbuf->bkp_rdatas[1];
848
849 rdt_datas_last->data =
850 page + (bimg.hole_offset + cbimg.hole_length);
851 rdt_datas_last->len =
852 BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
853 }
854 }
855
856 total_len += bimg.length;
857
858 /* Track the WAL full page images in bytes */
859 *fpi_bytes += bimg.length;
860 }
861
862 if (needs_data)
863 {
864 /*
865 * When copying to XLogRecordBlockHeader, the length is narrowed
866 * to an uint16. Double-check that it is still correct.
867 */
868 Assert(regbuf->rdata_len <= UINT16_MAX);
869
870 /*
871 * Link the caller-supplied rdata chain for this buffer to the
872 * overall list.
873 */
874 bkpb.fork_flags |= BKPBLOCK_HAS_DATA;
875 bkpb.data_length = (uint16) regbuf->rdata_len;
876 total_len += regbuf->rdata_len;
877
878 rdt_datas_last->next = regbuf->rdata_head;
879 rdt_datas_last = regbuf->rdata_tail;
880 }
881
882 if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
883 {
884 samerel = true;
885 bkpb.fork_flags |= BKPBLOCK_SAME_REL;
886 }
887 else
888 samerel = false;
890
891 /* Ok, copy the header to the scratch buffer */
894 if (include_image)
895 {
898 if (cbimg.hole_length != 0 && is_compressed)
899 {
903 }
904 }
905 if (!samerel)
906 {
907 memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
908 scratch += sizeof(RelFileLocator);
909 }
910 memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
911 scratch += sizeof(BlockNumber);
912 }
913
914 /* followed by the record's origin, if any */
917 {
918 *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
921 }
922
923 /* followed by toplevel XID, if not already included in previous record */
925 {
927
928 /* Set the flag that the top xid is included in the WAL */
929 *topxid_included = true;
930
932 memcpy(scratch, &xid, sizeof(TransactionId));
933 scratch += sizeof(TransactionId);
934 }
935
936 /* followed by main data, if any */
937 if (mainrdata_len > 0)
938 {
939 if (mainrdata_len > 255)
940 {
942
945 (errmsg_internal("too much WAL data"),
946 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",
948 PG_UINT32_MAX)));
949
951 *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
953 scratch += sizeof(uint32);
954 }
955 else
956 {
957 *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
958 *(scratch++) = (uint8) mainrdata_len;
959 }
962 total_len += mainrdata_len;
963 }
965
967 total_len += hdr_rdt.len;
968
969 /*
970 * Calculate CRC of the data
971 *
972 * Note that the record header isn't added into the CRC initially since we
973 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
974 * the whole record in the order: rdata, then backup blocks, then record
975 * header.
976 */
979 for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
980 COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
981
982 /*
983 * Ensure that the XLogRecord is not too large.
984 *
985 * XLogReader machinery is only able to handle records up to a certain
986 * size (ignoring machine resource limitations), so make sure that we will
987 * not emit records larger than the sizes advertised to be supported.
988 */
989 if (total_len > XLogRecordMaxSize)
991 (errmsg_internal("oversized WAL record"),
992 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",
993 total_len, XLogRecordMaxSize, rmid, info)));
994
995 /*
996 * Fill in the fields in the record header. Prev-link is filled in later,
997 * once we know where in the WAL the record will be inserted. The CRC does
998 * not include the record header yet.
999 */
1001 rechdr->xl_tot_len = (uint32) total_len;
1002 rechdr->xl_info = info;
1003 rechdr->xl_rmid = rmid;
1004 rechdr->xl_prev = InvalidXLogRecPtr;
1005 rechdr->xl_crc = rdata_crc;
1006
1007 return &hdr_rdt;
1008}

References Assert, BKPBLOCK_HAS_DATA, BKPBLOCK_HAS_IMAGE, BKPBLOCK_SAME_REL, BKPBLOCK_WILL_INIT, BKPIMAGE_APPLY, BKPIMAGE_COMPRESS_LZ4, BKPIMAGE_COMPRESS_PGLZ, BKPIMAGE_COMPRESS_ZSTD, BKPIMAGE_HAS_HOLE, COMP_CRC32C, curinsert_flags, XLogRecData::data, doPageWrites, elog, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), GetCurrentTransactionIdIfAny(), GetTopTransactionIdIfAny(), hdr_rdt, hdr_scratch, INIT_CRC32C, InvalidReplOriginId, InvalidXLogRecPtr, IsSubxactTopXidLogPending(), XLogRecData::len, lower(), mainrdata_head, mainrdata_last, mainrdata_len, max_registered_block_id, XLogRecData::next, ReplOriginXactState::origin, PageGetLSN(), PG_UINT32_MAX, RedoRecPtr, REGBUF_FORCE_IMAGE, REGBUF_KEEP_DATA, REGBUF_NO_IMAGE, REGBUF_STANDARD, REGBUF_WILL_INIT, registered_buffers, RelFileLocatorEquals, replorigin_xact_state, SizeOfPageHeaderData, SizeOfXLogRecord, SizeOfXLogRecordBlockCompressHeader, SizeOfXLogRecordBlockHeader, SizeOfXLogRecordBlockImageHeader, upper(), wal_compression, WAL_COMPRESSION_LZ4, WAL_COMPRESSION_NONE, WAL_COMPRESSION_PGLZ, WAL_COMPRESSION_ZSTD, wal_consistency_checking, XLOG_INCLUDE_ORIGIN, XLogCompressBackupBlock(), XLogRecordMaxSize, XLogRecPtrIsValid, XLR_BLOCK_ID_DATA_LONG, XLR_BLOCK_ID_DATA_SHORT, XLR_BLOCK_ID_ORIGIN, XLR_BLOCK_ID_TOPLEVEL_XID, and XLR_CHECK_CONSISTENCY.

Referenced by XLogInsert().

◆ XLogRegisterBlock()

void XLogRegisterBlock ( uint8  block_id,
RelFileLocator rlocator,
ForkNumber  forknum,
BlockNumber  blknum,
const PageData page,
uint8  flags 
)

Definition at line 314 of file xloginsert.c.

316{
318
320
323
325 elog(ERROR, "too many registered buffers");
326
328
329 regbuf->rlocator = *rlocator;
330 regbuf->forkno = forknum;
331 regbuf->block = blknum;
332 regbuf->page = page;
333 regbuf->flags = flags;
334 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
335 regbuf->rdata_len = 0;
336
337 /*
338 * Check that this page hasn't already been registered with some other
339 * block_id.
340 */
341#ifdef USE_ASSERT_CHECKING
342 {
343 int i;
344
345 for (i = 0; i < max_registered_block_id; i++)
346 {
348
349 if (i == block_id || !regbuf_old->in_use)
350 continue;
351
352 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
353 regbuf_old->forkno != regbuf->forkno ||
354 regbuf_old->block != regbuf->block);
355 }
356 }
357#endif
358
359 regbuf->in_use = true;
360}

References Assert, begininsert_called, elog, ERROR, fb(), i, max_registered_block_id, max_registered_buffers, registered_buffers, RelFileLocatorEquals, and registered_buffer::rlocator.

Referenced by heap_inplace_update_and_unlock(), log_newpage(), log_newpages(), and XLogSaveBufferForHint().

◆ XLogRegisterBufData()

void XLogRegisterBufData ( uint8  block_id,
const void data,
uint32  len 
)

Definition at line 410 of file xloginsert.c.

411{
414
416
417 /* find the registered buffer struct */
419 if (!regbuf->in_use)
420 elog(ERROR, "no block with id %d registered with WAL insertion",
421 block_id);
422
423 /*
424 * Check against max_rdatas and ensure we do not register more data per
425 * buffer than can be handled by the physical data format; i.e. that
426 * regbuf->rdata_len does not grow beyond what
427 * XLogRecordBlockHeader->data_length can hold.
428 */
429 if (num_rdatas >= max_rdatas)
431 (errmsg_internal("too much WAL data"),
432 errdetail_internal("%d out of %d data segments are already in use.",
434 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)
436 (errmsg_internal("too much WAL data"),
437 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
438 UINT16_MAX, block_id, regbuf->rdata_len, len)));
439
440 rdata = &rdatas[num_rdatas++];
441
442 rdata->data = data;
443 rdata->len = len;
444
445 regbuf->rdata_tail->next = rdata;
446 regbuf->rdata_tail = rdata;
447 regbuf->rdata_len += len;
448}

References Assert, begininsert_called, XLogRecData::data, data, elog, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), len, max_rdatas, num_rdatas, rdatas, and registered_buffers.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_squeezebucket(), brin_doinsert(), brin_doupdate(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), entryExecPlaceToPage(), GenericXLogFinish(), ginHeapTupleFastInsert(), ginVacuumPostingTreeLeaf(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), heap_inplace_update_and_unlock(), heap_insert(), heap_multi_insert(), log_heap_prune_and_freeze(), log_heap_update(), and writeListPage().

◆ XLogRegisterBuffer()

void XLogRegisterBuffer ( uint8  block_id,
Buffer  buffer,
uint8  flags 
)

Definition at line 246 of file xloginsert.c.

247{
249
250 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
251 Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
253
254 /*
255 * Ordinarily, buffer should be exclusive-locked and marked dirty before
256 * we get here, otherwise we could end up violating one of the rules in
257 * access/transam/README.
258 *
259 * Some callers intentionally register a clean page and never update that
260 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
261 * bypass these checks.
262 */
263#ifdef USE_ASSERT_CHECKING
264 if (!(flags & REGBUF_NO_CHANGE))
266 BufferIsDirty(buffer));
267#endif
268
270 {
272 elog(ERROR, "too many registered buffers");
274 }
275
277
278 BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
279 regbuf->page = BufferGetPage(buffer);
280 regbuf->flags = flags;
281 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
282 regbuf->rdata_len = 0;
283
284 /*
285 * Check that this page hasn't already been registered with some other
286 * block_id.
287 */
288#ifdef USE_ASSERT_CHECKING
289 {
290 int i;
291
292 for (i = 0; i < max_registered_block_id; i++)
293 {
295
296 if (i == block_id || !regbuf_old->in_use)
297 continue;
298
299 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
300 regbuf_old->forkno != regbuf->forkno ||
301 regbuf_old->block != regbuf->block);
302 }
303 }
304#endif
305
306 regbuf->in_use = true;
307}

References Assert, begininsert_called, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferGetTag(), BufferIsDirty(), BufferIsLockedByMeInMode(), elog, ERROR, fb(), i, max_registered_block_id, max_registered_buffers, REGBUF_FORCE_IMAGE, REGBUF_NO_CHANGE, REGBUF_NO_IMAGE, registered_buffers, and RelFileLocatorEquals.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), createPostingTree(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), doPickSplit(), entryExecPlaceToPage(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogDelete(), gistXLogPageDelete(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_newpage_range(), log_split_page(), moveLeafs(), nextval_internal(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), writeListPage(), and xlogVacuumPage().

◆ XLogRegisterData()

void XLogRegisterData ( const void data,
uint32  len 
)

Definition at line 369 of file xloginsert.c.

370{
372
374
375 if (num_rdatas >= max_rdatas)
377 (errmsg_internal("too much WAL data"),
378 errdetail_internal("%d out of %d data segments are already in use.",
380 rdata = &rdatas[num_rdatas++];
381
382 rdata->data = data;
383 rdata->len = len;
384
385 /*
386 * we use the mainrdata_last pointer to track the end of the chain, so no
387 * need to clear 'next' here.
388 */
389
392
394}

References Assert, begininsert_called, XLogRecData::data, data, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), len, mainrdata_last, mainrdata_len, max_rdatas, XLogRecData::next, num_rdatas, and rdatas.

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_heap_visible(), log_smgrcreate(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogAssignLSN(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), and XLogSimpleInsertInt64().

◆ XLogResetInsertion()

void XLogResetInsertion ( void  )

◆ XLogSaveBufferForHint()

XLogRecPtr XLogSaveBufferForHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 1137 of file xloginsert.c.

1138{
1140 XLogRecPtr lsn;
1142
1143 /* this also verifies that we hold an appropriate lock */
1144 Assert(BufferIsDirty(buffer));
1145
1146 /*
1147 * Update RedoRecPtr so that we can make the right decision. It's possible
1148 * that a new checkpoint will start just after GetRedoRecPtr(), but that
1149 * is ok, as the buffer is already dirty, ensuring that any BufferSync()
1150 * started after the buffer was marked dirty cannot complete without
1151 * flushing this buffer. If a checkpoint started between marking the
1152 * buffer dirty and this check, we will emit an unnecessary WAL record (as
1153 * the buffer will be written out as part of the checkpoint), but the
1154 * window for that is not big.
1155 */
1157
1158 /*
1159 * We assume page LSN is first data on *every* page that can be passed to
1160 * XLogInsert, whether it has the standard page layout or not.
1161 */
1162 lsn = PageGetLSN(BufferGetPage(buffer));
1163
1164 if (lsn <= RedoRecPtr)
1165 {
1166 int flags = 0;
1168 char *origdata = (char *) BufferGetBlock(buffer);
1169 RelFileLocator rlocator;
1170 ForkNumber forkno;
1171 BlockNumber blkno;
1172
1173 /*
1174 * Copy buffer so we don't have to worry about concurrent hint bit or
1175 * lsn updates. We assume pd_lower/upper cannot be changed without an
1176 * exclusive lock, so the contents bkp are not racy.
1177 */
1178 if (buffer_std)
1179 {
1180 /* Assume we can omit data between pd_lower and pd_upper */
1181 Page page = BufferGetPage(buffer);
1182 uint16 lower = ((PageHeader) page)->pd_lower;
1183 uint16 upper = ((PageHeader) page)->pd_upper;
1184
1187 }
1188 else
1190
1192
1193 if (buffer_std)
1194 flags |= REGBUF_STANDARD;
1195
1196 BufferGetTag(buffer, &rlocator, &forkno, &blkno);
1197 XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
1198
1200 }
1201
1202 return recptr;
1203}

References Assert, BufferGetBlock(), BufferGetPage(), BufferGetTag(), BufferIsDirty(), fb(), GetRedoRecPtr(), InvalidXLogRecPtr, lower(), PageGetLSN(), RedoRecPtr, REGBUF_STANDARD, upper(), XLOG_FPI_FOR_HINT, XLogBeginInsert(), XLogInsert(), and XLogRegisterBlock().

Referenced by MarkSharedBufferDirtyHint().

◆ XLogSetRecordFlags()

◆ XLogSimpleInsertInt64()

XLogRecPtr XLogSimpleInsertInt64 ( RmgrId  rmid,
uint8  info,
int64  value 
)

Definition at line 544 of file xloginsert.c.

545{
547 XLogRegisterData(&value, sizeof(value));
548 return XLogInsert(rmid, info);
549}

References value, XLogBeginInsert(), XLogInsert(), and XLogRegisterData().

Referenced by ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), and ExtendMultiXactOffset().

Variable Documentation

◆ begininsert_called

◆ curinsert_flags

uint8 curinsert_flags = 0
static

◆ hdr_rdt

XLogRecData hdr_rdt
static

Definition at line 116 of file xloginsert.c.

Referenced by XLogRecordAssemble().

◆ hdr_scratch

char* hdr_scratch = NULL
static

Definition at line 117 of file xloginsert.c.

Referenced by InitXLogInsert(), and XLogRecordAssemble().

◆ mainrdata_head

XLogRecData* mainrdata_head
static

Definition at line 101 of file xloginsert.c.

Referenced by XLogBeginInsert(), XLogRecordAssemble(), and XLogResetInsertion().

◆ mainrdata_last

XLogRecData* mainrdata_last = (XLogRecData *) &mainrdata_head
static

◆ mainrdata_len

uint64 mainrdata_len
static

◆ max_rdatas

int max_rdatas
static

◆ max_registered_block_id

int max_registered_block_id = 0
static

◆ max_registered_buffers

int max_registered_buffers
static

◆ num_rdatas

int num_rdatas
static

Definition at line 132 of file xloginsert.c.

Referenced by XLogRegisterBufData(), XLogRegisterData(), and XLogResetInsertion().

◆ rdatas

XLogRecData* rdatas
static

◆ registered_buffers

◆ xloginsert_cxt

MemoryContext xloginsert_cxt
static

Definition at line 138 of file xloginsert.c.

Referenced by InitXLogInsert().