PostgreSQL Source Code git master
Loading...
Searching...
No Matches
xloginsert.c File Reference
#include "postgres.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xloginsert.h"
#include "catalog/pg_control.h"
#include "common/pg_lzcompress.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "replication/origin.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
#include "utils/memutils.h"
#include "utils/pgstat_internal.h"
#include "utils/rel.h"
Include dependency graph for xloginsert.c:

Go to the source code of this file.

Data Structures

struct  registered_buffer
 

Macros

#define LZ4_MAX_BLCKSZ   0
 
#define ZSTD_MAX_BLCKSZ   0
 
#define PGLZ_MAX_BLCKSZ   PGLZ_MAX_OUTPUT(BLCKSZ)
 
#define COMPRESS_BUFSIZE   Max(Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ), ZSTD_MAX_BLCKSZ)
 
#define SizeOfXlogOrigin   (sizeof(ReplOriginId) + sizeof(char))
 
#define SizeOfXLogTransactionId   (sizeof(TransactionId) + sizeof(char))
 
#define HEADER_SCRATCH_SIZE
 

Functions

static XLogRecDataXLogRecordAssemble (RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, bool *topxid_included)
 
static bool XLogCompressBackupBlock (const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
 
void XLogBeginInsert (void)
 
void XLogEnsureRecordSpace (int max_block_id, int ndatas)
 
void XLogResetInsertion (void)
 
void XLogRegisterBuffer (uint8 block_id, Buffer buffer, uint8 flags)
 
void XLogRegisterBlock (uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
 
void XLogRegisterData (const void *data, uint32 len)
 
void XLogRegisterBufData (uint8 block_id, const void *data, uint32 len)
 
void XLogSetRecordFlags (uint8 flags)
 
XLogRecPtr XLogInsert (RmgrId rmid, uint8 info)
 
XLogRecPtr XLogSimpleInsertInt64 (RmgrId rmid, uint8 info, int64 value)
 
XLogRecPtr XLogGetFakeLSN (Relation rel)
 
bool XLogCheckBufferNeedsBackup (Buffer buffer)
 
XLogRecPtr XLogSaveBufferForHint (Buffer buffer, bool buffer_std)
 
XLogRecPtr log_newpage (RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
 
void log_newpages (RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
 
XLogRecPtr log_newpage_buffer (Buffer buffer, bool page_std)
 
void log_newpage_range (Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
 
void InitXLogInsert (void)
 

Variables

static registered_bufferregistered_buffers
 
static int max_registered_buffers
 
static int max_registered_block_id = 0
 
static XLogRecDatamainrdata_head
 
static XLogRecDatamainrdata_last = (XLogRecData *) &mainrdata_head
 
static uint64 mainrdata_len
 
static uint8 curinsert_flags = 0
 
static XLogRecData hdr_rdt
 
static charhdr_scratch = NULL
 
static XLogRecDatardatas
 
static int num_rdatas
 
static int max_rdatas
 
static bool begininsert_called = false
 
static MemoryContext xloginsert_cxt
 

Macro Definition Documentation

◆ COMPRESS_BUFSIZE

Definition at line 65 of file xloginsert.c.

◆ HEADER_SCRATCH_SIZE

#define HEADER_SCRATCH_SIZE
Value:
#define SizeOfXlogOrigin
Definition xloginsert.c:119
#define SizeOfXLogTransactionId
Definition xloginsert.c:120
#define XLR_MAX_BLOCK_ID
Definition xlogrecord.h:239
#define MaxSizeOfXLogRecordBlockHeader
Definition xlogrecord.h:184
#define SizeOfXLogRecordDataHeaderLong
Definition xlogrecord.h:225
#define SizeOfXLogRecord
Definition xlogrecord.h:55

Definition at line 122 of file xloginsert.c.

153{
156 Assert(mainrdata_len == 0);
157
158 /* cross-check on whether we should be here or not */
159 if (!XLogInsertAllowed())
160 elog(ERROR, "cannot make new WAL entries during recovery");
161
163 elog(ERROR, "XLogBeginInsert was already called");
164
165 begininsert_called = true;
166}
167
168/*
169 * Ensure that there are enough buffer and data slots in the working area,
170 * for subsequent XLogRegisterBuffer, XLogRegisterData and XLogRegisterBufData
171 * calls.
172 *
173 * There is always space for a small number of buffers and data chunks, enough
174 * for most record types. This function is for the exceptional cases that need
175 * more.
176 */
177void
178XLogEnsureRecordSpace(int max_block_id, int ndatas)
179{
180 int nbuffers;
181
182 /*
183 * This must be called before entering a critical section, because
184 * allocating memory inside a critical section can fail. repalloc() will
185 * check the same, but better to check it here too so that we fail
186 * consistently even if the arrays happen to be large enough already.
187 */
189
190 /* the minimum values can't be decreased */
191 if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
192 max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
195
196 if (max_block_id > XLR_MAX_BLOCK_ID)
197 elog(ERROR, "maximum number of WAL record block references exceeded");
198 nbuffers = max_block_id + 1;
199
200 if (nbuffers > max_registered_buffers)
201 {
203 repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
204
205 /*
206 * At least the padding bytes in the structs must be zeroed, because
207 * they are included in WAL data, but initialize it all for tidiness.
208 */
210 (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
211 max_registered_buffers = nbuffers;
212 }
213
214 if (ndatas > max_rdatas)
215 {
218 }
219}
220
221/*
222 * Reset WAL record construction buffers.
223 */
224void
226{
227 int i;
228
229 for (i = 0; i < max_registered_block_id; i++)
230 registered_buffers[i].in_use = false;
231
232 num_rdatas = 0;
234 mainrdata_len = 0;
236 curinsert_flags = 0;
237 begininsert_called = false;
238}
239
240/*
241 * Register a reference to a buffer with the WAL record being constructed.
242 * This must be called for every page that the WAL-logged operation modifies.
243 */
244void
246{
248
249 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
250 Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
252
253 /*
254 * Ordinarily, the buffer should be exclusive-locked (or share-exclusive
255 * in case of hint bits) and marked dirty before we get here, otherwise we
256 * could end up violating one of the rules in access/transam/README.
257 *
258 * Some callers intentionally register a clean page and never update that
259 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
260 * bypass these checks.
261 */
262#ifdef USE_ASSERT_CHECKING
263 if (!(flags & REGBUF_NO_CHANGE))
264 {
265 Assert(BufferIsDirty(buffer));
268 }
269#endif
270
272 {
274 elog(ERROR, "too many registered buffers");
276 }
277
279
280 BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
281 regbuf->page = BufferGetPage(buffer);
282 regbuf->flags = flags;
283 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
284 regbuf->rdata_len = 0;
285
286 /*
287 * Check that this page hasn't already been registered with some other
288 * block_id.
289 */
290#ifdef USE_ASSERT_CHECKING
291 {
292 int i;
293
294 for (i = 0; i < max_registered_block_id; i++)
295 {
297
298 if (i == block_id || !regbuf_old->in_use)
299 continue;
300
301 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
302 regbuf_old->forkno != regbuf->forkno ||
303 regbuf_old->block != regbuf->block);
304 }
305 }
306#endif
307
308 regbuf->in_use = true;
309}
310
311/*
312 * Like XLogRegisterBuffer, but for registering a block that's not in the
313 * shared buffer pool (i.e. when you don't have a Buffer for it).
314 */
315void
317 BlockNumber blknum, const PageData *page, uint8 flags)
318{
320
322
325
327 elog(ERROR, "too many registered buffers");
328
330
331 regbuf->rlocator = *rlocator;
332 regbuf->forkno = forknum;
333 regbuf->block = blknum;
334 regbuf->page = page;
335 regbuf->flags = flags;
336 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
337 regbuf->rdata_len = 0;
338
339 /*
340 * Check that this page hasn't already been registered with some other
341 * block_id.
342 */
343#ifdef USE_ASSERT_CHECKING
344 {
345 int i;
346
347 for (i = 0; i < max_registered_block_id; i++)
348 {
350
351 if (i == block_id || !regbuf_old->in_use)
352 continue;
353
354 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
355 regbuf_old->forkno != regbuf->forkno ||
356 regbuf_old->block != regbuf->block);
357 }
358 }
359#endif
360
361 regbuf->in_use = true;
362}
363
364/*
365 * Add data to the WAL record that's being constructed.
366 *
367 * The data is appended to the "main chunk", available at replay with
368 * XLogRecGetData().
369 */
370void
371XLogRegisterData(const void *data, uint32 len)
372{
374
376
377 if (num_rdatas >= max_rdatas)
379 (errmsg_internal("too much WAL data"),
380 errdetail_internal("%d out of %d data segments are already in use.",
382 rdata = &rdatas[num_rdatas++];
383
384 rdata->data = data;
385 rdata->len = len;
386
387 /*
388 * we use the mainrdata_last pointer to track the end of the chain, so no
389 * need to clear 'next' here.
390 */
391
394
396}
397
398/*
399 * Add buffer-specific data to the WAL record that's being constructed.
400 *
401 * Block_id must reference a block previously registered with
402 * XLogRegisterBuffer(). If this is called more than once for the same
403 * block_id, the data is appended.
404 *
405 * The maximum amount of data that can be registered per block is 65535
406 * bytes. That should be plenty; if you need more than BLCKSZ bytes to
407 * reconstruct the changes to the page, you might as well just log a full
408 * copy of it. (the "main data" that's not associated with a block is not
409 * limited)
410 */
411void
413{
416
418
419 /* find the registered buffer struct */
421 if (!regbuf->in_use)
422 elog(ERROR, "no block with id %d registered with WAL insertion",
423 block_id);
424
425 /*
426 * Check against max_rdatas and ensure we do not register more data per
427 * buffer than can be handled by the physical data format; i.e. that
428 * regbuf->rdata_len does not grow beyond what
429 * XLogRecordBlockHeader->data_length can hold.
430 */
431 if (num_rdatas >= max_rdatas)
433 (errmsg_internal("too much WAL data"),
434 errdetail_internal("%d out of %d data segments are already in use.",
436 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)
438 (errmsg_internal("too much WAL data"),
439 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
440 UINT16_MAX, block_id, regbuf->rdata_len, len)));
441
442 rdata = &rdatas[num_rdatas++];
443
444 rdata->data = data;
445 rdata->len = len;
446
447 regbuf->rdata_tail->next = rdata;
448 regbuf->rdata_tail = rdata;
449 regbuf->rdata_len += len;
450}
451
452/*
453 * Set insert status flags for the upcoming WAL record.
454 *
455 * The flags that can be used here are:
456 * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
457 * included in the record.
458 * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
459 * durability, which allows to avoid triggering WAL archiving and other
460 * background activity.
461 */
462void
464{
466 curinsert_flags |= flags;
467}
468
469/*
470 * Insert an XLOG record having the specified RMID and info bytes, with the
471 * body of the record being the data and buffer references registered earlier
472 * with XLogRegister* calls.
473 *
474 * Returns XLOG pointer to end of record (beginning of next record).
475 * This can be used as LSN for data pages affected by the logged action.
476 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
477 * before the data page can be written out. This implements the basic
478 * WAL rule "write the log before the data".)
479 */
481XLogInsert(RmgrId rmid, uint8 info)
482{
484
485 /* XLogBeginInsert() must have been called. */
487 elog(ERROR, "XLogBeginInsert was not called");
488
489 /*
490 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
491 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
492 */
493 if ((info & ~(XLR_RMGR_INFO_MASK |
496 elog(PANIC, "invalid xlog info mask %02X", info);
497
498 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
499
500 /*
501 * In bootstrap mode, we don't actually log anything but XLOG resources;
502 * return a phony record pointer.
503 */
504 if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
505 {
507 EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
508 return EndPos;
509 }
510
511 do
512 {
514 bool doPageWrites;
515 bool topxid_included = false;
518 int num_fpi = 0;
519 uint64 fpi_bytes = 0;
520
521 /*
522 * Get values needed to decide whether to do full-page writes. Since
523 * we don't yet have an insertion lock, these could change under us,
524 * but XLogInsertRecord will recheck them once it has a lock.
525 */
527
531
534 } while (!XLogRecPtrIsValid(EndPos));
535
537
538 return EndPos;
539}
540
541/*
542 * Simple wrapper to XLogInsert to insert a WAL record with elementary
543 * contents (only an int64 is supported as value currently).
544 */
547{
549 XLogRegisterData(&value, sizeof(value));
550 return XLogInsert(rmid, info);
551}
552
553/*
554 * XLogGetFakeLSN - get a fake LSN for an index page that isn't WAL-logged.
555 *
556 * Some index AMs use LSNs to detect concurrent page modifications, but not
557 * all index pages are WAL-logged. This function provides a sequence of fake
558 * LSNs for that purpose.
559 */
562{
563 if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
564 {
565 /*
566 * Temporary relations are only accessible in our session, so a simple
567 * backend-local counter will do.
568 */
569 static XLogRecPtr counter = FirstNormalUnloggedLSN;
570
571 return counter++;
572 }
573 else if (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
574 {
575 /*
576 * Unlogged relations are accessible from other backends, and survive
577 * (clean) restarts. GetFakeLSNForUnloggedRel() handles that for us.
578 */
580 }
581 else
582 {
583 /*
584 * WAL-logging on this relation will start after commit, so its LSNs
585 * must be distinct numbers smaller than the LSN at the next commit.
586 * Emit a dummy WAL record if insert-LSN hasn't advanced after the
587 * last call.
588 */
591
594
595 /* No need for an actual record if we already have a distinct LSN */
598
600 return currlsn;
601 }
602}
603
604/*
605 * Assemble a WAL record from the registered data and buffers into an
606 * XLogRecData chain, ready for insertion with XLogInsertRecord().
607 *
608 * The record header fields are filled in, except for the xl_prev field. The
609 * calculated CRC does not include the record header yet.
610 *
611 * If there are any registered buffers, and a full-page image was not taken
612 * of all of them, *fpw_lsn is set to the lowest LSN among such pages. This
613 * signals that the assembled record is only good for insertion on the
614 * assumption that the RedoRecPtr and doPageWrites values were up-to-date.
615 *
616 * *topxid_included is set if the topmost transaction ID is logged with the
617 * current subtransaction.
618 */
619static XLogRecData *
623 bool *topxid_included)
624{
626 uint64 total_len = 0;
627 int block_id;
632 char *scratch = hdr_scratch;
633
634 /*
635 * Note: this function can be called multiple times for the same record.
636 * All the modifications we do to the rdata chains below must handle that.
637 */
638
639 /* The record begins with the fixed-size header */
642
643 hdr_rdt.next = NULL;
646
647 /*
648 * Enforce consistency checks for this record if user is looking for it.
649 * Do this before at the beginning of this routine to give the possibility
650 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
651 * a record.
652 */
653 if (wal_consistency_checking[rmid])
654 info |= XLR_CHECK_CONSISTENCY;
655
656 /*
657 * Make an rdata chain containing all the data portions of all block
658 * references. This includes the data for full-page images. Also append
659 * the headers for the block references in the scratch buffer.
660 */
663 {
665 bool needs_backup;
666 bool needs_data;
670 bool samerel;
671 bool is_compressed = false;
672 bool include_image;
673
674 if (!regbuf->in_use)
675 continue;
676
677 /* Determine if this block needs to be backed up */
678 if (regbuf->flags & REGBUF_FORCE_IMAGE)
679 needs_backup = true;
680 else if (regbuf->flags & REGBUF_NO_IMAGE)
681 needs_backup = false;
682 else if (!doPageWrites)
683 needs_backup = false;
684 else
685 {
686 /*
687 * We assume page LSN is first data on *every* page that can be
688 * passed to XLogInsert, whether it has the standard page layout
689 * or not.
690 */
691 XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
692
693 needs_backup = (page_lsn <= RedoRecPtr);
694 if (!needs_backup)
695 {
696 if (!XLogRecPtrIsValid(*fpw_lsn) || page_lsn < *fpw_lsn)
697 *fpw_lsn = page_lsn;
698 }
699 }
700
701 /* Determine if the buffer data needs to included */
702 if (regbuf->rdata_len == 0)
703 needs_data = false;
704 else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
705 needs_data = true;
706 else
708
709 bkpb.id = block_id;
710 bkpb.fork_flags = regbuf->forkno;
711 bkpb.data_length = 0;
712
713 if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
714 bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
715
716 /*
717 * If needs_backup is true or WAL checking is enabled for current
718 * resource manager, log a full-page write for the current block.
719 */
721
722 if (include_image)
723 {
724 const PageData *page = regbuf->page;
726
727 /*
728 * The page needs to be backed up, so calculate its hole length
729 * and offset.
730 */
731 if (regbuf->flags & REGBUF_STANDARD)
732 {
733 /* Assume we can omit data between pd_lower and pd_upper */
734 uint16 lower = ((const PageHeaderData *) page)->pd_lower;
735 uint16 upper = ((const PageHeaderData *) page)->pd_upper;
736
738 upper > lower &&
739 upper <= BLCKSZ)
740 {
741 bimg.hole_offset = lower;
742 cbimg.hole_length = upper - lower;
743 }
744 else
745 {
746 /* No "hole" to remove */
747 bimg.hole_offset = 0;
748 cbimg.hole_length = 0;
749 }
750 }
751 else
752 {
753 /* Not a standard page header, don't try to eliminate "hole" */
754 bimg.hole_offset = 0;
755 cbimg.hole_length = 0;
756 }
757
758 /*
759 * Try to compress a block image if wal_compression is enabled
760 */
762 {
764 XLogCompressBackupBlock(page, bimg.hole_offset,
765 cbimg.hole_length,
766 regbuf->compressed_page,
768 }
769
770 /*
771 * Fill in the remaining fields in the XLogRecordBlockHeader
772 * struct
773 */
774 bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
775
776 /* Report a full page image constructed for the WAL record */
777 *num_fpi += 1;
778
779 /*
780 * Construct XLogRecData entries for the page content.
781 */
782 rdt_datas_last->next = &regbuf->bkp_rdatas[0];
784
785 bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
786
787 /*
788 * If WAL consistency checking is enabled for the resource manager
789 * of this WAL record, a full-page image is included in the record
790 * for the block modified. During redo, the full-page is replayed
791 * only if BKPIMAGE_APPLY is set.
792 */
793 if (needs_backup)
794 bimg.bimg_info |= BKPIMAGE_APPLY;
795
796 if (is_compressed)
797 {
798 /* The current compression is stored in the WAL record */
799 bimg.length = compressed_len;
800
801 /* Set the compression method used for this block */
803 {
805 bimg.bimg_info |= BKPIMAGE_COMPRESS_PGLZ;
806 break;
807
809#ifdef USE_LZ4
810 bimg.bimg_info |= BKPIMAGE_COMPRESS_LZ4;
811#else
812 elog(ERROR, "LZ4 is not supported by this build");
813#endif
814 break;
815
817#ifdef USE_ZSTD
818 bimg.bimg_info |= BKPIMAGE_COMPRESS_ZSTD;
819#else
820 elog(ERROR, "zstd is not supported by this build");
821#endif
822 break;
823
825 Assert(false); /* cannot happen */
826 break;
827 /* no default case, so that compiler will warn */
828 }
829
830 rdt_datas_last->data = regbuf->compressed_page;
832 }
833 else
834 {
835 bimg.length = BLCKSZ - cbimg.hole_length;
836
837 if (cbimg.hole_length == 0)
838 {
839 rdt_datas_last->data = page;
840 rdt_datas_last->len = BLCKSZ;
841 }
842 else
843 {
844 /* must skip the hole */
845 rdt_datas_last->data = page;
846 rdt_datas_last->len = bimg.hole_offset;
847
848 rdt_datas_last->next = &regbuf->bkp_rdatas[1];
850
851 rdt_datas_last->data =
852 page + (bimg.hole_offset + cbimg.hole_length);
853 rdt_datas_last->len =
854 BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
855 }
856 }
857
858 total_len += bimg.length;
859
860 /* Track the WAL full page images in bytes */
861 *fpi_bytes += bimg.length;
862 }
863
864 if (needs_data)
865 {
866 /*
867 * When copying to XLogRecordBlockHeader, the length is narrowed
868 * to an uint16. Double-check that it is still correct.
869 */
870 Assert(regbuf->rdata_len <= UINT16_MAX);
871
872 /*
873 * Link the caller-supplied rdata chain for this buffer to the
874 * overall list.
875 */
876 bkpb.fork_flags |= BKPBLOCK_HAS_DATA;
877 bkpb.data_length = (uint16) regbuf->rdata_len;
878 total_len += regbuf->rdata_len;
879
880 rdt_datas_last->next = regbuf->rdata_head;
881 rdt_datas_last = regbuf->rdata_tail;
882 }
883
884 if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
885 {
886 samerel = true;
887 bkpb.fork_flags |= BKPBLOCK_SAME_REL;
888 }
889 else
890 samerel = false;
892
893 /* Ok, copy the header to the scratch buffer */
896 if (include_image)
897 {
900 if (cbimg.hole_length != 0 && is_compressed)
901 {
905 }
906 }
907 if (!samerel)
908 {
909 memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
910 scratch += sizeof(RelFileLocator);
911 }
912 memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
913 scratch += sizeof(BlockNumber);
914 }
915
916 /* followed by the record's origin, if any */
919 {
920 *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
923 }
924
925 /* followed by toplevel XID, if not already included in previous record */
927 {
929
930 /* Set the flag that the top xid is included in the WAL */
931 *topxid_included = true;
932
934 memcpy(scratch, &xid, sizeof(TransactionId));
935 scratch += sizeof(TransactionId);
936 }
937
938 /* followed by main data, if any */
939 if (mainrdata_len > 0)
940 {
941 if (mainrdata_len > 255)
942 {
944
947 (errmsg_internal("too much WAL data"),
948 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",
950 PG_UINT32_MAX)));
951
953 *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
955 scratch += sizeof(uint32);
956 }
957 else
958 {
959 *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
960 *(scratch++) = (uint8) mainrdata_len;
961 }
964 total_len += mainrdata_len;
965 }
967
969 total_len += hdr_rdt.len;
970
971 /*
972 * Calculate CRC of the data
973 *
974 * Note that the record header isn't added into the CRC initially since we
975 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
976 * the whole record in the order: rdata, then backup blocks, then record
977 * header.
978 */
981 for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
982 COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
983
984 /*
985 * Ensure that the XLogRecord is not too large.
986 *
987 * XLogReader machinery is only able to handle records up to a certain
988 * size (ignoring machine resource limitations), so make sure that we will
989 * not emit records larger than the sizes advertised to be supported.
990 */
991 if (total_len > XLogRecordMaxSize)
993 (errmsg_internal("oversized WAL record"),
994 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",
995 total_len, XLogRecordMaxSize, rmid, info)));
996
997 /*
998 * Fill in the fields in the record header. Prev-link is filled in later,
999 * once we know where in the WAL the record will be inserted. The CRC does
1000 * not include the record header yet.
1001 */
1003 rechdr->xl_tot_len = (uint32) total_len;
1004 rechdr->xl_info = info;
1005 rechdr->xl_rmid = rmid;
1006 rechdr->xl_prev = InvalidXLogRecPtr;
1007 rechdr->xl_crc = rdata_crc;
1008
1009 return &hdr_rdt;
1010}
1011
1012/*
1013 * Create a compressed version of a backup block image.
1014 *
1015 * Returns false if compression fails (i.e., compressed result is actually
1016 * bigger than original). Otherwise, returns true and sets 'dlen' to
1017 * the length of compressed block image.
1018 */
1019static bool
1020XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length,
1021 void *dest, uint16 *dlen)
1022{
1023 int32 orig_len = BLCKSZ - hole_length;
1024 int32 len = -1;
1025 int32 extra_bytes = 0;
1026 const void *source;
1027 PGAlignedBlock tmp;
1028
1029 if (hole_length != 0)
1030 {
1031 /* must skip the hole */
1032 memcpy(tmp.data, page, hole_offset);
1033 memcpy(tmp.data + hole_offset,
1034 page + (hole_offset + hole_length),
1035 BLCKSZ - (hole_length + hole_offset));
1036 source = tmp.data;
1037
1038 /*
1039 * Extra data needs to be stored in WAL record for the compressed
1040 * version of block image if the hole exists.
1041 */
1043 }
1044 else
1045 source = page;
1046
1048 {
1051 break;
1052
1054#ifdef USE_LZ4
1057 if (len <= 0)
1058 len = -1; /* failure */
1059#else
1060 elog(ERROR, "LZ4 is not supported by this build");
1061#endif
1062 break;
1063
1065#ifdef USE_ZSTD
1068 if (ZSTD_isError(len))
1069 len = -1; /* failure */
1070#else
1071 elog(ERROR, "zstd is not supported by this build");
1072#endif
1073 break;
1074
1076 Assert(false); /* cannot happen */
1077 break;
1078 /* no default case, so that compiler will warn */
1079 }
1080
1081 /*
1082 * We recheck the actual size even if compression reports success and see
1083 * if the number of bytes saved by compression is larger than the length
1084 * of extra data needed for the compressed version of block image.
1085 */
1086 if (len >= 0 &&
1088 {
1089 *dlen = (uint16) len; /* successful compression */
1090 return true;
1091 }
1092 return false;
1093}
1094
1095/*
1096 * Determine whether the buffer referenced has to be backed up.
1097 *
1098 * Since we don't yet have the insert lock, fullPageWrites and runningBackups
1099 * (which forces full-page writes) could change later, so the result should
1100 * be used for optimization purposes only.
1101 */
1102bool
1104{
1106 bool doPageWrites;
1107 Page page;
1108
1110
1111 page = BufferGetPage(buffer);
1112
1113 if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
1114 return true; /* buffer requires backup */
1115
1116 return false; /* buffer does not need to be backed up */
1117}
1118
1119/*
1120 * Write a backup block if needed when we are setting a hint. Note that
1121 * this may be called for a variety of page types, not just heaps.
1122 *
1123 * Callable while holding just a share-exclusive lock on the buffer
1124 * content. That suffices to prevent concurrent modifications of the
1125 * buffer. The buffer already needs to have been marked dirty by
1126 * MarkBufferDirtyHint().
1127 *
1128 * We only need to do something if page has not yet been full page written in
1129 * this checkpoint round. The LSN of the inserted wal record is returned if we
1130 * had to write, InvalidXLogRecPtr otherwise.
1131 */
1134{
1136 XLogRecPtr lsn;
1138
1139 /* this also verifies that we hold an appropriate lock */
1140 Assert(BufferIsDirty(buffer));
1141
1142 /*
1143 * Update RedoRecPtr so that we can make the right decision. It's possible
1144 * that a new checkpoint will start just after GetRedoRecPtr(), but that
1145 * is ok, as the buffer is already dirty, ensuring that any BufferSync()
1146 * started after the buffer was marked dirty cannot complete without
1147 * flushing this buffer. If a checkpoint started between marking the
1148 * buffer dirty and this check, we will emit an unnecessary WAL record (as
1149 * the buffer will be written out as part of the checkpoint), but the
1150 * window for that is not big.
1151 */
1153
1154 /*
1155 * We assume page LSN is first data on *every* page that can be passed to
1156 * XLogInsert, whether it has the standard page layout or not.
1157 */
1158 lsn = PageGetLSN(BufferGetPage(buffer));
1159
1160 if (lsn <= RedoRecPtr)
1161 {
1162 int flags = 0;
1163
1165
1166 if (buffer_std)
1167 flags |= REGBUF_STANDARD;
1168
1169 XLogRegisterBuffer(0, buffer, flags);
1170
1172 }
1173
1174 return recptr;
1175}
1176
1177/*
1178 * Write a WAL record containing a full image of a page. Caller is responsible
1179 * for writing the page to disk after calling this routine.
1180 *
1181 * Note: If you're using this function, you should be building pages in private
1182 * memory and writing them directly to smgr. If you're using buffers, call
1183 * log_newpage_buffer instead.
1184 *
1185 * If the page follows the standard page layout, with a PageHeader and unused
1186 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1187 * the unused space to be left out from the WAL record, making it smaller.
1188 */
1190log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno,
1191 Page page, bool page_std)
1192{
1193 int flags;
1195
1196 flags = REGBUF_FORCE_IMAGE;
1197 if (page_std)
1198 flags |= REGBUF_STANDARD;
1199
1201 XLogRegisterBlock(0, rlocator, forknum, blkno, page, flags);
1203
1204 /*
1205 * The page may be uninitialized. If so, we can't set the LSN because that
1206 * would corrupt the page.
1207 */
1208 if (!PageIsNew(page))
1209 {
1210 PageSetLSN(page, recptr);
1211 }
1212
1213 return recptr;
1214}
1215
1216/*
1217 * Like log_newpage(), but allows logging multiple pages in one operation.
1218 * It is more efficient than calling log_newpage() for each page separately,
1219 * because we can write multiple pages in a single WAL record.
1220 */
1221void
1222log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages,
1223 BlockNumber *blknos, Page *pages, bool page_std)
1224{
1225 int flags;
1227 int i;
1228 int j;
1229
1230 flags = REGBUF_FORCE_IMAGE;
1231 if (page_std)
1232 flags |= REGBUF_STANDARD;
1233
1234 /*
1235 * Iterate over all the pages. They are collected into batches of
1236 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1237 * batch.
1238 */
1240
1241 i = 0;
1242 while (i < num_pages)
1243 {
1244 int batch_start = i;
1245 int nbatch;
1246
1248
1249 nbatch = 0;
1250 while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1251 {
1252 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);
1253 i++;
1254 nbatch++;
1255 }
1256
1258
1259 for (j = batch_start; j < i; j++)
1260 {
1261 /*
1262 * The page may be uninitialized. If so, we can't set the LSN
1263 * because that would corrupt the page.
1264 */
1265 if (!PageIsNew(pages[j]))
1266 {
1267 PageSetLSN(pages[j], recptr);
1268 }
1269 }
1270 }
1271}
1272
1273/*
1274 * Write a WAL record containing a full image of a page.
1275 *
1276 * Caller should initialize the buffer and mark it dirty before calling this
1277 * function. This function will set the page LSN.
1278 *
1279 * If the page follows the standard page layout, with a PageHeader and unused
1280 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1281 * the unused space to be left out from the WAL record, making it smaller.
1282 */
1284log_newpage_buffer(Buffer buffer, bool page_std)
1285{
1286 Page page = BufferGetPage(buffer);
1287 RelFileLocator rlocator;
1288 ForkNumber forknum;
1289 BlockNumber blkno;
1290
1291 /* Shared buffers should be modified in a critical section. */
1293
1294 BufferGetTag(buffer, &rlocator, &forknum, &blkno);
1295
1296 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1297}
1298
1299/*
1300 * WAL-log a range of blocks in a relation.
1301 *
1302 * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
1303 * written to the WAL. If the range is large, this is done in multiple WAL
1304 * records.
1305 *
1306 * If all page follows the standard page layout, with a PageHeader and unused
1307 * space between pd_lower and pd_upper, set 'page_std' to true. That allows
1308 * the unused space to be left out from the WAL records, making them smaller.
1309 *
1310 * NOTE: This function acquires exclusive-locks on the pages. Typically, this
1311 * is used on a newly-built relation, and the caller is holding a
1312 * AccessExclusiveLock on it, so no other backend can be accessing it at the
1313 * same time. If that's not the case, you must ensure that this does not
1314 * cause a deadlock through some other means.
1315 */
1316void
1319 bool page_std)
1320{
1321 int flags;
1322 BlockNumber blkno;
1323
1324 flags = REGBUF_FORCE_IMAGE;
1325 if (page_std)
1326 flags |= REGBUF_STANDARD;
1327
1328 /*
1329 * Iterate over all the pages in the range. They are collected into
1330 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1331 * for each batch.
1332 */
1334
1335 blkno = startblk;
1336 while (blkno < endblk)
1337 {
1340 int nbufs;
1341 int i;
1342
1344
1345 /* Collect a batch of blocks. */
1346 nbufs = 0;
1347 while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1348 {
1349 Buffer buf = ReadBufferExtended(rel, forknum, blkno,
1350 RBM_NORMAL, NULL);
1351
1353
1354 /*
1355 * Completely empty pages are not WAL-logged. Writing a WAL record
1356 * would change the LSN, and we don't want that. We want the page
1357 * to stay empty.
1358 */
1360 bufpack[nbufs++] = buf;
1361 else
1363 blkno++;
1364 }
1365
1366 /* Nothing more to do if all remaining blocks were empty. */
1367 if (nbufs == 0)
1368 break;
1369
1370 /* Write WAL record for this batch. */
1372
1374 for (i = 0; i < nbufs; i++)
1375 {
1377 XLogRegisterBuffer(i, bufpack[i], flags);
1378 }
1379
1381
1382 for (i = 0; i < nbufs; i++)
1384
1386
1387 for (i = 0; i < nbufs; i++)
1389 }
1390}
1391
1392/*
1393 * Allocate working buffers needed for WAL record construction.
1394 */
1395void
1396InitXLogInsert(void)
1397{
1398#ifdef USE_ASSERT_CHECKING
1399
1400 /*
1401 * Check that any records assembled can be decoded. This is capped based
1402 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1403 * addend covers the larger allocate_recordbuf() demand. This code path
1404 * is called once per backend, more than enough for this check.
1405 */
1406 size_t max_required =
1408
1410#endif
1411
1412 /* Initialize the working areas */
1413 if (xloginsert_cxt == NULL)
1414 {
1416 "WAL record construction",
1418 }
1419
1420 if (registered_buffers == NULL)
1421 {
1426 }
1427 if (rdatas == NULL)
1428 {
1430 sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1432 }
1433
1434 /*
1435 * Allocate a buffer to hold the header information for a WAL record.
1436 */
1437 if (hdr_scratch == NULL)
1440}
uint32 BlockNumber
Definition block.h:31
int Buffer
Definition buf.h:23
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
Definition bufmgr.c:3087
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition bufmgr.c:4467
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3114
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5603
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3147
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:926
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:468
@ BUFFER_LOCK_SHARE_EXCLUSIVE
Definition bufmgr.h:217
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:222
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:334
@ RBM_NORMAL
Definition bufmgr.h:46
char PageData
Definition bufpage.h:80
static bool PageIsNew(const PageData *page)
Definition bufpage.h:258
#define SizeOfPageHeaderData
Definition bufpage.h:241
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:416
PageData * Page
Definition bufpage.h:81
static XLogRecPtr PageGetLSN(const PageData *page)
Definition bufpage.h:410
uint8_t uint8
Definition c.h:622
#define PG_UINT32_MAX
Definition c.h:674
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
int32_t int32
Definition c.h:620
uint64_t uint64
Definition c.h:625
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
#define MemSet(start, val, len)
Definition c.h:1107
uint32 TransactionId
Definition c.h:736
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
int int errdetail_internal(const char *fmt,...) pg_attribute_printf(1
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define PANIC
Definition elog.h:44
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
volatile uint32 CritSectionCount
Definition globals.c:45
static struct @177 value
int j
Definition isn.c:78
int i
Definition isn.c:77
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
MemoryContext TopMemoryContext
Definition mcxt.c:166
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define AllocSizeIsValid(size)
Definition memutils.h:42
#define IsBootstrapProcessingMode()
Definition miscadmin.h:495
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:125
#define END_CRIT_SECTION()
Definition miscadmin.h:154
Datum lower(PG_FUNCTION_ARGS)
Datum upper(PG_FUNCTION_ARGS)
ReplOriginXactState replorigin_xact_state
Definition origin.c:168
#define InvalidReplOriginId
Definition origin.h:33
#define XLOG_FPI
Definition pg_control.h:83
#define XLOG_FPI_FOR_HINT
Definition pg_control.h:82
uint32 pg_crc32c
Definition pg_crc32c.h:38
#define COMP_CRC32C(crc, data, len)
Definition pg_crc32c.h:173
#define INIT_CRC32C(crc)
Definition pg_crc32c.h:41
const void size_t len
const void * data
const PGLZ_Strategy *const PGLZ_strategy_default
int32 pglz_compress(const char *source, int32 slen, char *dest, const PGLZ_Strategy *strategy)
static rewind_source * source
Definition pg_rewind.c:89
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int fb(int x)
#define RelationNeedsWAL(relation)
Definition rel.h:639
#define RelationIsPermanent(relation)
Definition rel.h:628
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition relpath.h:56
uint8 RmgrId
Definition rmgr.h:11
char data[BLCKSZ]
Definition c.h:1204
Form_pg_class rd_rel
Definition rel.h:111
ReplOriginId origin
Definition origin.h:45
const void * data
struct XLogRecData * next
RelFileLocator rlocator
Definition xloginsert.c:75
Datum batch_start(PG_FUNCTION_ARGS)
Definition test_aio.c:974
TransactionId GetTopTransactionIdIfAny(void)
Definition xact.c:443
TransactionId GetCurrentTransactionIdIfAny(void)
Definition xact.c:473
bool IsSubxactTopXidLogPending(void)
Definition xact.c:561
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition xlog.c:6963
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, uint64 fpi_bytes, bool topxid_included)
Definition xlog.c:784
XLogRecPtr GetRedoRecPtr(void)
Definition xlog.c:6933
static XLogRecPtr RedoRecPtr
Definition xlog.c:280
static bool doPageWrites
Definition xlog.c:293
int wal_compression
Definition xlog.c:131
bool XLogInsertAllowed(void)
Definition xlog.c:6885
XLogRecPtr GetFakeLSNForUnloggedRel(void)
Definition xlog.c:5006
XLogRecPtr GetXLogInsertEndRecPtr(void)
Definition xlog.c:10124
XLogRecPtr XLogAssignLSN(void)
Definition xlog.c:8655
bool * wal_consistency_checking
Definition xlog.c:133
#define XLOG_INCLUDE_ORIGIN
Definition xlog.h:166
WalCompression
Definition xlog.h:83
@ WAL_COMPRESSION_NONE
Definition xlog.h:84
@ WAL_COMPRESSION_LZ4
Definition xlog.h:86
@ WAL_COMPRESSION_PGLZ
Definition xlog.h:85
@ WAL_COMPRESSION_ZSTD
Definition xlog.h:87
#define SizeOfXLogLongPHD
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define FirstNormalUnloggedLSN
Definition xlogdefs.h:37
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
static XLogRecData * mainrdata_head
Definition xloginsert.c:101
static bool XLogCompressBackupBlock(const PageData *page, uint16 hole_offset, uint16 hole_length, void *dest, uint16 *dlen)
XLogRecPtr XLogSimpleInsertInt64(RmgrId rmid, uint8 info, int64 value)
Definition xloginsert.c:547
static int max_registered_buffers
Definition xloginsert.c:93
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:482
static uint8 curinsert_flags
Definition xloginsert.c:106
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:413
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:372
static uint64 mainrdata_len
Definition xloginsert.c:103
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
static bool begininsert_called
Definition xloginsert.c:135
static int max_registered_block_id
Definition xloginsert.c:94
XLogRecPtr log_newpage(RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blkno, Page page, bool page_std)
void InitXLogInsert(void)
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:464
static int num_rdatas
Definition xloginsert.c:132
void log_newpages(RelFileLocator *rlocator, ForkNumber forknum, int num_pages, BlockNumber *blknos, Page *pages, bool page_std)
void XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum, BlockNumber blknum, const PageData *page, uint8 flags)
Definition xloginsert.c:317
static XLogRecData * mainrdata_last
Definition xloginsert.c:102
static MemoryContext xloginsert_cxt
Definition xloginsert.c:138
void log_newpage_range(Relation rel, ForkNumber forknum, BlockNumber startblk, BlockNumber endblk, bool page_std)
void XLogResetInsertion(void)
Definition xloginsert.c:226
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
static XLogRecData hdr_rdt
Definition xloginsert.c:116
static XLogRecData * XLogRecordAssemble(RmgrId rmid, uint8 info, XLogRecPtr RedoRecPtr, bool doPageWrites, XLogRecPtr *fpw_lsn, int *num_fpi, uint64 *fpi_bytes, bool *topxid_included)
Definition xloginsert.c:621
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:246
static char * hdr_scratch
Definition xloginsert.c:117
static XLogRecData * rdatas
Definition xloginsert.c:131
void XLogBeginInsert(void)
Definition xloginsert.c:153
XLogRecPtr XLogGetFakeLSN(Relation rel)
Definition xloginsert.c:562
void XLogEnsureRecordSpace(int max_block_id, int ndatas)
Definition xloginsert.c:179
#define COMPRESS_BUFSIZE
Definition xloginsert.c:65
static registered_buffer * registered_buffers
Definition xloginsert.c:92
static int max_rdatas
Definition xloginsert.c:133
#define HEADER_SCRATCH_SIZE
Definition xloginsert.c:122
#define REGBUF_NO_CHANGE
Definition xloginsert.h:37
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define XLR_NORMAL_MAX_BLOCK_ID
Definition xloginsert.h:28
#define REGBUF_FORCE_IMAGE
Definition xloginsert.h:32
#define XLR_NORMAL_RDATAS
Definition xloginsert.h:29
#define REGBUF_NO_IMAGE
Definition xloginsert.h:33
#define REGBUF_KEEP_DATA
Definition xloginsert.h:36
#define REGBUF_WILL_INIT
Definition xloginsert.h:34
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
#define SizeOfXLogRecordBlockImageHeader
Definition xlogrecord.h:153
#define XLogRecordMaxSize
Definition xlogrecord.h:74
#define BKPIMAGE_COMPRESS_ZSTD
Definition xlogrecord.h:162
#define BKPBLOCK_HAS_DATA
Definition xlogrecord.h:198
#define BKPIMAGE_APPLY
Definition xlogrecord.h:158
#define BKPIMAGE_HAS_HOLE
Definition xlogrecord.h:157
#define XLR_BLOCK_ID_DATA_LONG
Definition xlogrecord.h:242
#define BKPBLOCK_WILL_INIT
Definition xlogrecord.h:199
#define XLR_RMGR_INFO_MASK
Definition xlogrecord.h:63
#define BKPIMAGE_COMPRESS_LZ4
Definition xlogrecord.h:161
#define XLR_BLOCK_ID_TOPLEVEL_XID
Definition xlogrecord.h:244
#define XLR_BLOCK_ID_DATA_SHORT
Definition xlogrecord.h:241
#define SizeOfXLogRecordBlockCompressHeader
Definition xlogrecord.h:177
#define BKPBLOCK_SAME_REL
Definition xlogrecord.h:200
#define XLR_SPECIAL_REL_UPDATE
Definition xlogrecord.h:82
#define SizeOfXLogRecordBlockHeader
Definition xlogrecord.h:115
#define BKPIMAGE_COMPRESS_PGLZ
Definition xlogrecord.h:160
#define XLR_BLOCK_ID_ORIGIN
Definition xlogrecord.h:243
#define BKPBLOCK_HAS_IMAGE
Definition xlogrecord.h:197
#define XLR_CHECK_CONSISTENCY
Definition xlogrecord.h:91

◆ LZ4_MAX_BLCKSZ

#define LZ4_MAX_BLCKSZ   0

Definition at line 53 of file xloginsert.c.

◆ PGLZ_MAX_BLCKSZ

#define PGLZ_MAX_BLCKSZ   PGLZ_MAX_OUTPUT(BLCKSZ)

Definition at line 62 of file xloginsert.c.

◆ SizeOfXlogOrigin

#define SizeOfXlogOrigin   (sizeof(ReplOriginId) + sizeof(char))

Definition at line 119 of file xloginsert.c.

◆ SizeOfXLogTransactionId

#define SizeOfXLogTransactionId   (sizeof(TransactionId) + sizeof(char))

Definition at line 120 of file xloginsert.c.

◆ ZSTD_MAX_BLCKSZ

#define ZSTD_MAX_BLCKSZ   0

Definition at line 59 of file xloginsert.c.

Function Documentation

◆ InitXLogInsert()

void InitXLogInsert ( void  )

Definition at line 1397 of file xloginsert.c.

1398{
1399#ifdef USE_ASSERT_CHECKING
1400
1401 /*
1402 * Check that any records assembled can be decoded. This is capped based
1403 * on what XLogReader would require at its maximum bound. The XLOG_BLCKSZ
1404 * addend covers the larger allocate_recordbuf() demand. This code path
1405 * is called once per backend, more than enough for this check.
1406 */
1407 size_t max_required =
1409
1411#endif
1412
1413 /* Initialize the working areas */
1414 if (xloginsert_cxt == NULL)
1415 {
1417 "WAL record construction",
1419 }
1420
1421 if (registered_buffers == NULL)
1422 {
1427 }
1428 if (rdatas == NULL)
1429 {
1431 sizeof(XLogRecData) * XLR_NORMAL_RDATAS);
1433 }
1434
1435 /*
1436 * Allocate a buffer to hold the header information for a WAL record.
1437 */
1438 if (hdr_scratch == NULL)
1441}

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, AllocSizeIsValid, Assert, DecodeXLogRecordRequiredSpace(), fb(), hdr_scratch, HEADER_SCRATCH_SIZE, max_rdatas, max_registered_buffers, MemoryContextAlloc(), MemoryContextAllocZero(), rdatas, registered_buffers, TopMemoryContext, xloginsert_cxt, XLogRecordMaxSize, XLR_NORMAL_MAX_BLOCK_ID, and XLR_NORMAL_RDATAS.

Referenced by BaseInit().

◆ log_newpage()

XLogRecPtr log_newpage ( RelFileLocator rlocator,
ForkNumber  forknum,
BlockNumber  blkno,
Page  page,
bool  page_std 
)

Definition at line 1191 of file xloginsert.c.

1193{
1194 int flags;
1196
1197 flags = REGBUF_FORCE_IMAGE;
1198 if (page_std)
1199 flags |= REGBUF_STANDARD;
1200
1202 XLogRegisterBlock(0, rlocator, forknum, blkno, page, flags);
1204
1205 /*
1206 * The page may be uninitialized. If so, we can't set the LSN because that
1207 * would corrupt the page.
1208 */
1209 if (!PageIsNew(page))
1210 {
1211 PageSetLSN(page, recptr);
1212 }
1213
1214 return recptr;
1215}

References fb(), PageIsNew(), PageSetLSN(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, XLOG_FPI, XLogBeginInsert(), XLogInsert(), and XLogRegisterBlock().

Referenced by _hash_alloc_buckets(), _hash_init(), and log_newpage_buffer().

◆ log_newpage_buffer()

XLogRecPtr log_newpage_buffer ( Buffer  buffer,
bool  page_std 
)

Definition at line 1285 of file xloginsert.c.

1286{
1287 Page page = BufferGetPage(buffer);
1288 RelFileLocator rlocator;
1289 ForkNumber forknum;
1290 BlockNumber blkno;
1291
1292 /* Shared buffers should be modified in a critical section. */
1294
1295 BufferGetTag(buffer, &rlocator, &forknum, &blkno);
1296
1297 return log_newpage(&rlocator, forknum, blkno, page, page_std);
1298}

References Assert, BufferGetPage(), BufferGetTag(), CritSectionCount, and log_newpage().

Referenced by brin_initialize_empty_new_buffer(), brinbuildempty(), FreeSpaceMapPrepareTruncateRel(), ginbuildempty(), gistbuildempty(), heap_force_common(), ProcessSingleRelationFork(), RelationCopyStorageUsingBuffer(), and visibilitymap_prepare_truncate().

◆ log_newpage_range()

void log_newpage_range ( Relation  rel,
ForkNumber  forknum,
BlockNumber  startblk,
BlockNumber  endblk,
bool  page_std 
)

Definition at line 1318 of file xloginsert.c.

1321{
1322 int flags;
1323 BlockNumber blkno;
1324
1325 flags = REGBUF_FORCE_IMAGE;
1326 if (page_std)
1327 flags |= REGBUF_STANDARD;
1328
1329 /*
1330 * Iterate over all the pages in the range. They are collected into
1331 * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
1332 * for each batch.
1333 */
1335
1336 blkno = startblk;
1337 while (blkno < endblk)
1338 {
1341 int nbufs;
1342 int i;
1343
1345
1346 /* Collect a batch of blocks. */
1347 nbufs = 0;
1348 while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
1349 {
1350 Buffer buf = ReadBufferExtended(rel, forknum, blkno,
1351 RBM_NORMAL, NULL);
1352
1354
1355 /*
1356 * Completely empty pages are not WAL-logged. Writing a WAL record
1357 * would change the LSN, and we don't want that. We want the page
1358 * to stay empty.
1359 */
1361 bufpack[nbufs++] = buf;
1362 else
1364 blkno++;
1365 }
1366
1367 /* Nothing more to do if all remaining blocks were empty. */
1368 if (nbufs == 0)
1369 break;
1370
1371 /* Write WAL record for this batch. */
1373
1375 for (i = 0; i < nbufs; i++)
1376 {
1378 XLogRegisterBuffer(i, bufpack[i], flags);
1379 }
1380
1382
1383 for (i = 0; i < nbufs; i++)
1385
1387
1388 for (i = 0; i < nbufs; i++)
1390 }
1391}

References buf, BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), CHECK_FOR_INTERRUPTS, END_CRIT_SECTION, fb(), i, LockBuffer(), MarkBufferDirty(), PageIsNew(), PageSetLSN(), RBM_NORMAL, ReadBufferExtended(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, START_CRIT_SECTION, UnlockReleaseBuffer(), XLOG_FPI, XLogBeginInsert(), XLogEnsureRecordSpace(), XLogInsert(), XLogRegisterBuffer(), and XLR_MAX_BLOCK_ID.

Referenced by ginbuild(), gistbuild(), smgrDoPendingSyncs(), and spgbuild().

◆ log_newpages()

void log_newpages ( RelFileLocator rlocator,
ForkNumber  forknum,
int  num_pages,
BlockNumber blknos,
Page pages,
bool  page_std 
)

Definition at line 1223 of file xloginsert.c.

1225{
1226 int flags;
1228 int i;
1229 int j;
1230
1231 flags = REGBUF_FORCE_IMAGE;
1232 if (page_std)
1233 flags |= REGBUF_STANDARD;
1234
1235 /*
1236 * Iterate over all the pages. They are collected into batches of
1237 * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each
1238 * batch.
1239 */
1241
1242 i = 0;
1243 while (i < num_pages)
1244 {
1245 int batch_start = i;
1246 int nbatch;
1247
1249
1250 nbatch = 0;
1251 while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
1252 {
1253 XLogRegisterBlock(nbatch, rlocator, forknum, blknos[i], pages[i], flags);
1254 i++;
1255 nbatch++;
1256 }
1257
1259
1260 for (j = batch_start; j < i; j++)
1261 {
1262 /*
1263 * The page may be uninitialized. If so, we can't set the LSN
1264 * because that would corrupt the page.
1265 */
1266 if (!PageIsNew(pages[j]))
1267 {
1268 PageSetLSN(pages[j], recptr);
1269 }
1270 }
1271 }
1272}

References batch_start(), fb(), i, j, PageIsNew(), PageSetLSN(), REGBUF_FORCE_IMAGE, REGBUF_STANDARD, XLOG_FPI, XLogBeginInsert(), XLogEnsureRecordSpace(), XLogInsert(), XLogRegisterBlock(), and XLR_MAX_BLOCK_ID.

Referenced by smgr_bulk_flush().

◆ XLogBeginInsert()

void XLogBeginInsert ( void  )

Definition at line 153 of file xloginsert.c.

154{
157 Assert(mainrdata_len == 0);
158
159 /* cross-check on whether we should be here or not */
160 if (!XLogInsertAllowed())
161 elog(ERROR, "cannot make new WAL entries during recovery");
162
164 elog(ERROR, "XLogBeginInsert was already called");
165
166 begininsert_called = true;
167}

References Assert, begininsert_called, elog, ERROR, mainrdata_head, mainrdata_last, mainrdata_len, max_registered_block_id, and XLogInsertAllowed().

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_newpage(), log_newpage_range(), log_newpages(), log_smgrcreate(), log_split_page(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), RequestXLogSwitch(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogAssignLSN(), XLogChecksums(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), XLogSaveBufferForHint(), XLogSimpleInsertInt64(), and xlogVacuumPage().

◆ XLogCheckBufferNeedsBackup()

bool XLogCheckBufferNeedsBackup ( Buffer  buffer)

Definition at line 1104 of file xloginsert.c.

1105{
1107 bool doPageWrites;
1108 Page page;
1109
1111
1112 page = BufferGetPage(buffer);
1113
1114 if (doPageWrites && PageGetLSN(page) <= RedoRecPtr)
1115 return true; /* buffer requires backup */
1116
1117 return false; /* buffer does not need to be backed up */
1118}

References BufferGetPage(), doPageWrites, GetFullPageWriteInfo(), PageGetLSN(), and RedoRecPtr.

Referenced by heap_page_will_freeze(), heap_page_will_set_vm(), and log_heap_update().

◆ XLogCompressBackupBlock()

static bool XLogCompressBackupBlock ( const PageData page,
uint16  hole_offset,
uint16  hole_length,
void dest,
uint16 dlen 
)
static

Definition at line 1021 of file xloginsert.c.

1023{
1024 int32 orig_len = BLCKSZ - hole_length;
1025 int32 len = -1;
1026 int32 extra_bytes = 0;
1027 const void *source;
1028 PGAlignedBlock tmp;
1029
1030 if (hole_length != 0)
1031 {
1032 /* must skip the hole */
1033 memcpy(tmp.data, page, hole_offset);
1034 memcpy(tmp.data + hole_offset,
1035 page + (hole_offset + hole_length),
1036 BLCKSZ - (hole_length + hole_offset));
1037 source = tmp.data;
1038
1039 /*
1040 * Extra data needs to be stored in WAL record for the compressed
1041 * version of block image if the hole exists.
1042 */
1044 }
1045 else
1046 source = page;
1047
1049 {
1052 break;
1053
1055#ifdef USE_LZ4
1058 if (len <= 0)
1059 len = -1; /* failure */
1060#else
1061 elog(ERROR, "LZ4 is not supported by this build");
1062#endif
1063 break;
1064
1066#ifdef USE_ZSTD
1069 if (ZSTD_isError(len))
1070 len = -1; /* failure */
1071#else
1072 elog(ERROR, "zstd is not supported by this build");
1073#endif
1074 break;
1075
1077 Assert(false); /* cannot happen */
1078 break;
1079 /* no default case, so that compiler will warn */
1080 }
1081
1082 /*
1083 * We recheck the actual size even if compression reports success and see
1084 * if the number of bytes saved by compression is larger than the length
1085 * of extra data needed for the compressed version of block image.
1086 */
1087 if (len >= 0 &&
1089 {
1090 *dlen = (uint16) len; /* successful compression */
1091 return true;
1092 }
1093 return false;
1094}

References Assert, COMPRESS_BUFSIZE, PGAlignedBlock::data, elog, ERROR, fb(), len, memcpy(), pglz_compress(), PGLZ_strategy_default, SizeOfXLogRecordBlockCompressHeader, source, wal_compression, WAL_COMPRESSION_LZ4, WAL_COMPRESSION_NONE, WAL_COMPRESSION_PGLZ, and WAL_COMPRESSION_ZSTD.

Referenced by XLogRecordAssemble().

◆ XLogEnsureRecordSpace()

void XLogEnsureRecordSpace ( int  max_block_id,
int  ndatas 
)

Definition at line 179 of file xloginsert.c.

180{
181 int nbuffers;
182
183 /*
184 * This must be called before entering a critical section, because
185 * allocating memory inside a critical section can fail. repalloc() will
186 * check the same, but better to check it here too so that we fail
187 * consistently even if the arrays happen to be large enough already.
188 */
190
191 /* the minimum values can't be decreased */
192 if (max_block_id < XLR_NORMAL_MAX_BLOCK_ID)
193 max_block_id = XLR_NORMAL_MAX_BLOCK_ID;
196
197 if (max_block_id > XLR_MAX_BLOCK_ID)
198 elog(ERROR, "maximum number of WAL record block references exceeded");
199 nbuffers = max_block_id + 1;
200
201 if (nbuffers > max_registered_buffers)
202 {
204 repalloc(registered_buffers, sizeof(registered_buffer) * nbuffers);
205
206 /*
207 * At least the padding bytes in the structs must be zeroed, because
208 * they are included in WAL data, but initialize it all for tidiness.
209 */
211 (nbuffers - max_registered_buffers) * sizeof(registered_buffer));
212 max_registered_buffers = nbuffers;
213 }
214
215 if (ndatas > max_rdatas)
216 {
219 }
220}

References Assert, CritSectionCount, elog, ERROR, fb(), max_rdatas, max_registered_buffers, MemSet, rdatas, registered_buffers, repalloc(), XLR_MAX_BLOCK_ID, XLR_NORMAL_MAX_BLOCK_ID, and XLR_NORMAL_RDATAS.

Referenced by _hash_freeovflpage(), _hash_squeezebucket(), EndPrepare(), gistplacetopage(), log_newpage_range(), log_newpages(), and shiftList().

◆ XLogGetFakeLSN()

XLogRecPtr XLogGetFakeLSN ( Relation  rel)

Definition at line 562 of file xloginsert.c.

563{
564 if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
565 {
566 /*
567 * Temporary relations are only accessible in our session, so a simple
568 * backend-local counter will do.
569 */
570 static XLogRecPtr counter = FirstNormalUnloggedLSN;
571
572 return counter++;
573 }
574 else if (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
575 {
576 /*
577 * Unlogged relations are accessible from other backends, and survive
578 * (clean) restarts. GetFakeLSNForUnloggedRel() handles that for us.
579 */
581 }
582 else
583 {
584 /*
585 * WAL-logging on this relation will start after commit, so its LSNs
586 * must be distinct numbers smaller than the LSN at the next commit.
587 * Emit a dummy WAL record if insert-LSN hasn't advanced after the
588 * last call.
589 */
592
595
596 /* No need for an actual record if we already have a distinct LSN */
599
601 return currlsn;
602 }
603}

References Assert, fb(), FirstNormalUnloggedLSN, GetFakeLSNForUnloggedRel(), GetXLogInsertEndRecPtr(), InvalidXLogRecPtr, RelationData::rd_rel, RelationIsPermanent, RelationNeedsWAL, XLogAssignLSN(), and XLogRecPtrIsValid.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistvacuumpage(), gistvacuumscan(), hashbucketcleanup(), and hashbulkdelete().

◆ XLogInsert()

XLogRecPtr XLogInsert ( RmgrId  rmid,
uint8  info 
)

Definition at line 482 of file xloginsert.c.

483{
485
486 /* XLogBeginInsert() must have been called. */
488 elog(ERROR, "XLogBeginInsert was not called");
489
490 /*
491 * The caller can set rmgr bits, XLR_SPECIAL_REL_UPDATE and
492 * XLR_CHECK_CONSISTENCY; the rest are reserved for use by me.
493 */
494 if ((info & ~(XLR_RMGR_INFO_MASK |
497 elog(PANIC, "invalid xlog info mask %02X", info);
498
499 TRACE_POSTGRESQL_WAL_INSERT(rmid, info);
500
501 /*
502 * In bootstrap mode, we don't actually log anything but XLOG resources;
503 * return a phony record pointer.
504 */
505 if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
506 {
508 EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
509 return EndPos;
510 }
511
512 do
513 {
515 bool doPageWrites;
516 bool topxid_included = false;
519 int num_fpi = 0;
520 uint64 fpi_bytes = 0;
521
522 /*
523 * Get values needed to decide whether to do full-page writes. Since
524 * we don't yet have an insertion lock, these could change under us,
525 * but XLogInsertRecord will recheck them once it has a lock.
526 */
528
532
535 } while (!XLogRecPtrIsValid(EndPos));
536
538
539 return EndPos;
540}

References begininsert_called, curinsert_flags, doPageWrites, elog, ERROR, fb(), GetFullPageWriteInfo(), IsBootstrapProcessingMode, PANIC, RedoRecPtr, SizeOfXLogLongPHD, XLogInsertRecord(), XLogRecordAssemble(), XLogRecPtrIsValid, XLogResetInsertion(), XLR_CHECK_CONSISTENCY, XLR_RMGR_INFO_MASK, and XLR_SPECIAL_REL_UPDATE.

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_newpage(), log_newpage_range(), log_newpages(), log_smgrcreate(), log_split_page(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), RequestXLogSwitch(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogAssignLSN(), XLogChecksums(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), XLogSaveBufferForHint(), XLogSimpleInsertInt64(), and xlogVacuumPage().

◆ XLogRecordAssemble()

static XLogRecData * XLogRecordAssemble ( RmgrId  rmid,
uint8  info,
XLogRecPtr  RedoRecPtr,
bool  doPageWrites,
XLogRecPtr fpw_lsn,
int num_fpi,
uint64 fpi_bytes,
bool topxid_included 
)
static

Definition at line 621 of file xloginsert.c.

625{
627 uint64 total_len = 0;
628 int block_id;
633 char *scratch = hdr_scratch;
634
635 /*
636 * Note: this function can be called multiple times for the same record.
637 * All the modifications we do to the rdata chains below must handle that.
638 */
639
640 /* The record begins with the fixed-size header */
643
644 hdr_rdt.next = NULL;
647
648 /*
649 * Enforce consistency checks for this record if user is looking for it.
650 * Do this before at the beginning of this routine to give the possibility
651 * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
652 * a record.
653 */
654 if (wal_consistency_checking[rmid])
655 info |= XLR_CHECK_CONSISTENCY;
656
657 /*
658 * Make an rdata chain containing all the data portions of all block
659 * references. This includes the data for full-page images. Also append
660 * the headers for the block references in the scratch buffer.
661 */
664 {
666 bool needs_backup;
667 bool needs_data;
671 bool samerel;
672 bool is_compressed = false;
673 bool include_image;
674
675 if (!regbuf->in_use)
676 continue;
677
678 /* Determine if this block needs to be backed up */
679 if (regbuf->flags & REGBUF_FORCE_IMAGE)
680 needs_backup = true;
681 else if (regbuf->flags & REGBUF_NO_IMAGE)
682 needs_backup = false;
683 else if (!doPageWrites)
684 needs_backup = false;
685 else
686 {
687 /*
688 * We assume page LSN is first data on *every* page that can be
689 * passed to XLogInsert, whether it has the standard page layout
690 * or not.
691 */
692 XLogRecPtr page_lsn = PageGetLSN(regbuf->page);
693
694 needs_backup = (page_lsn <= RedoRecPtr);
695 if (!needs_backup)
696 {
697 if (!XLogRecPtrIsValid(*fpw_lsn) || page_lsn < *fpw_lsn)
698 *fpw_lsn = page_lsn;
699 }
700 }
701
702 /* Determine if the buffer data needs to included */
703 if (regbuf->rdata_len == 0)
704 needs_data = false;
705 else if ((regbuf->flags & REGBUF_KEEP_DATA) != 0)
706 needs_data = true;
707 else
709
710 bkpb.id = block_id;
711 bkpb.fork_flags = regbuf->forkno;
712 bkpb.data_length = 0;
713
714 if ((regbuf->flags & REGBUF_WILL_INIT) == REGBUF_WILL_INIT)
715 bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
716
717 /*
718 * If needs_backup is true or WAL checking is enabled for current
719 * resource manager, log a full-page write for the current block.
720 */
722
723 if (include_image)
724 {
725 const PageData *page = regbuf->page;
727
728 /*
729 * The page needs to be backed up, so calculate its hole length
730 * and offset.
731 */
732 if (regbuf->flags & REGBUF_STANDARD)
733 {
734 /* Assume we can omit data between pd_lower and pd_upper */
735 uint16 lower = ((const PageHeaderData *) page)->pd_lower;
736 uint16 upper = ((const PageHeaderData *) page)->pd_upper;
737
739 upper > lower &&
740 upper <= BLCKSZ)
741 {
742 bimg.hole_offset = lower;
743 cbimg.hole_length = upper - lower;
744 }
745 else
746 {
747 /* No "hole" to remove */
748 bimg.hole_offset = 0;
749 cbimg.hole_length = 0;
750 }
751 }
752 else
753 {
754 /* Not a standard page header, don't try to eliminate "hole" */
755 bimg.hole_offset = 0;
756 cbimg.hole_length = 0;
757 }
758
759 /*
760 * Try to compress a block image if wal_compression is enabled
761 */
763 {
765 XLogCompressBackupBlock(page, bimg.hole_offset,
766 cbimg.hole_length,
767 regbuf->compressed_page,
769 }
770
771 /*
772 * Fill in the remaining fields in the XLogRecordBlockHeader
773 * struct
774 */
775 bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
776
777 /* Report a full page image constructed for the WAL record */
778 *num_fpi += 1;
779
780 /*
781 * Construct XLogRecData entries for the page content.
782 */
783 rdt_datas_last->next = &regbuf->bkp_rdatas[0];
785
786 bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
787
788 /*
789 * If WAL consistency checking is enabled for the resource manager
790 * of this WAL record, a full-page image is included in the record
791 * for the block modified. During redo, the full-page is replayed
792 * only if BKPIMAGE_APPLY is set.
793 */
794 if (needs_backup)
795 bimg.bimg_info |= BKPIMAGE_APPLY;
796
797 if (is_compressed)
798 {
799 /* The current compression is stored in the WAL record */
800 bimg.length = compressed_len;
801
802 /* Set the compression method used for this block */
804 {
806 bimg.bimg_info |= BKPIMAGE_COMPRESS_PGLZ;
807 break;
808
810#ifdef USE_LZ4
811 bimg.bimg_info |= BKPIMAGE_COMPRESS_LZ4;
812#else
813 elog(ERROR, "LZ4 is not supported by this build");
814#endif
815 break;
816
818#ifdef USE_ZSTD
819 bimg.bimg_info |= BKPIMAGE_COMPRESS_ZSTD;
820#else
821 elog(ERROR, "zstd is not supported by this build");
822#endif
823 break;
824
826 Assert(false); /* cannot happen */
827 break;
828 /* no default case, so that compiler will warn */
829 }
830
831 rdt_datas_last->data = regbuf->compressed_page;
833 }
834 else
835 {
836 bimg.length = BLCKSZ - cbimg.hole_length;
837
838 if (cbimg.hole_length == 0)
839 {
840 rdt_datas_last->data = page;
841 rdt_datas_last->len = BLCKSZ;
842 }
843 else
844 {
845 /* must skip the hole */
846 rdt_datas_last->data = page;
847 rdt_datas_last->len = bimg.hole_offset;
848
849 rdt_datas_last->next = &regbuf->bkp_rdatas[1];
851
852 rdt_datas_last->data =
853 page + (bimg.hole_offset + cbimg.hole_length);
854 rdt_datas_last->len =
855 BLCKSZ - (bimg.hole_offset + cbimg.hole_length);
856 }
857 }
858
859 total_len += bimg.length;
860
861 /* Track the WAL full page images in bytes */
862 *fpi_bytes += bimg.length;
863 }
864
865 if (needs_data)
866 {
867 /*
868 * When copying to XLogRecordBlockHeader, the length is narrowed
869 * to an uint16. Double-check that it is still correct.
870 */
871 Assert(regbuf->rdata_len <= UINT16_MAX);
872
873 /*
874 * Link the caller-supplied rdata chain for this buffer to the
875 * overall list.
876 */
877 bkpb.fork_flags |= BKPBLOCK_HAS_DATA;
878 bkpb.data_length = (uint16) regbuf->rdata_len;
879 total_len += regbuf->rdata_len;
880
881 rdt_datas_last->next = regbuf->rdata_head;
882 rdt_datas_last = regbuf->rdata_tail;
883 }
884
885 if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
886 {
887 samerel = true;
888 bkpb.fork_flags |= BKPBLOCK_SAME_REL;
889 }
890 else
891 samerel = false;
893
894 /* Ok, copy the header to the scratch buffer */
897 if (include_image)
898 {
901 if (cbimg.hole_length != 0 && is_compressed)
902 {
906 }
907 }
908 if (!samerel)
909 {
910 memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
911 scratch += sizeof(RelFileLocator);
912 }
913 memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
914 scratch += sizeof(BlockNumber);
915 }
916
917 /* followed by the record's origin, if any */
920 {
921 *(scratch++) = (char) XLR_BLOCK_ID_ORIGIN;
924 }
925
926 /* followed by toplevel XID, if not already included in previous record */
928 {
930
931 /* Set the flag that the top xid is included in the WAL */
932 *topxid_included = true;
933
935 memcpy(scratch, &xid, sizeof(TransactionId));
936 scratch += sizeof(TransactionId);
937 }
938
939 /* followed by main data, if any */
940 if (mainrdata_len > 0)
941 {
942 if (mainrdata_len > 255)
943 {
945
948 (errmsg_internal("too much WAL data"),
949 errdetail_internal("Main data length is %" PRIu64 " bytes for a maximum of %u bytes.",
951 PG_UINT32_MAX)));
952
954 *(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
956 scratch += sizeof(uint32);
957 }
958 else
959 {
960 *(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
961 *(scratch++) = (uint8) mainrdata_len;
962 }
965 total_len += mainrdata_len;
966 }
968
970 total_len += hdr_rdt.len;
971
972 /*
973 * Calculate CRC of the data
974 *
975 * Note that the record header isn't added into the CRC initially since we
976 * don't know the prev-link yet. Thus, the CRC will represent the CRC of
977 * the whole record in the order: rdata, then backup blocks, then record
978 * header.
979 */
982 for (rdt = hdr_rdt.next; rdt != NULL; rdt = rdt->next)
983 COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
984
985 /*
986 * Ensure that the XLogRecord is not too large.
987 *
988 * XLogReader machinery is only able to handle records up to a certain
989 * size (ignoring machine resource limitations), so make sure that we will
990 * not emit records larger than the sizes advertised to be supported.
991 */
992 if (total_len > XLogRecordMaxSize)
994 (errmsg_internal("oversized WAL record"),
995 errdetail_internal("WAL record would be %" PRIu64 " bytes (of maximum %u bytes); rmid %u flags %u.",
996 total_len, XLogRecordMaxSize, rmid, info)));
997
998 /*
999 * Fill in the fields in the record header. Prev-link is filled in later,
1000 * once we know where in the WAL the record will be inserted. The CRC does
1001 * not include the record header yet.
1002 */
1004 rechdr->xl_tot_len = (uint32) total_len;
1005 rechdr->xl_info = info;
1006 rechdr->xl_rmid = rmid;
1007 rechdr->xl_prev = InvalidXLogRecPtr;
1008 rechdr->xl_crc = rdata_crc;
1009
1010 return &hdr_rdt;
1011}

References Assert, BKPBLOCK_HAS_DATA, BKPBLOCK_HAS_IMAGE, BKPBLOCK_SAME_REL, BKPBLOCK_WILL_INIT, BKPIMAGE_APPLY, BKPIMAGE_COMPRESS_LZ4, BKPIMAGE_COMPRESS_PGLZ, BKPIMAGE_COMPRESS_ZSTD, BKPIMAGE_HAS_HOLE, COMP_CRC32C, curinsert_flags, XLogRecData::data, doPageWrites, elog, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), GetCurrentTransactionIdIfAny(), GetTopTransactionIdIfAny(), hdr_rdt, hdr_scratch, INIT_CRC32C, InvalidReplOriginId, InvalidXLogRecPtr, IsSubxactTopXidLogPending(), XLogRecData::len, lower(), mainrdata_head, mainrdata_last, mainrdata_len, max_registered_block_id, memcpy(), XLogRecData::next, ReplOriginXactState::origin, PageGetLSN(), PG_UINT32_MAX, RedoRecPtr, REGBUF_FORCE_IMAGE, REGBUF_KEEP_DATA, REGBUF_NO_IMAGE, REGBUF_STANDARD, REGBUF_WILL_INIT, registered_buffers, RelFileLocatorEquals, replorigin_xact_state, SizeOfPageHeaderData, SizeOfXLogRecord, SizeOfXLogRecordBlockCompressHeader, SizeOfXLogRecordBlockHeader, SizeOfXLogRecordBlockImageHeader, upper(), wal_compression, WAL_COMPRESSION_LZ4, WAL_COMPRESSION_NONE, WAL_COMPRESSION_PGLZ, WAL_COMPRESSION_ZSTD, wal_consistency_checking, XLOG_INCLUDE_ORIGIN, XLogCompressBackupBlock(), XLogRecordMaxSize, XLogRecPtrIsValid, XLR_BLOCK_ID_DATA_LONG, XLR_BLOCK_ID_DATA_SHORT, XLR_BLOCK_ID_ORIGIN, XLR_BLOCK_ID_TOPLEVEL_XID, and XLR_CHECK_CONSISTENCY.

Referenced by XLogInsert().

◆ XLogRegisterBlock()

void XLogRegisterBlock ( uint8  block_id,
RelFileLocator rlocator,
ForkNumber  forknum,
BlockNumber  blknum,
const PageData page,
uint8  flags 
)

Definition at line 317 of file xloginsert.c.

319{
321
323
326
328 elog(ERROR, "too many registered buffers");
329
331
332 regbuf->rlocator = *rlocator;
333 regbuf->forkno = forknum;
334 regbuf->block = blknum;
335 regbuf->page = page;
336 regbuf->flags = flags;
337 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
338 regbuf->rdata_len = 0;
339
340 /*
341 * Check that this page hasn't already been registered with some other
342 * block_id.
343 */
344#ifdef USE_ASSERT_CHECKING
345 {
346 int i;
347
348 for (i = 0; i < max_registered_block_id; i++)
349 {
351
352 if (i == block_id || !regbuf_old->in_use)
353 continue;
354
355 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
356 regbuf_old->forkno != regbuf->forkno ||
357 regbuf_old->block != regbuf->block);
358 }
359 }
360#endif
361
362 regbuf->in_use = true;
363}

References Assert, begininsert_called, elog, ERROR, fb(), i, max_registered_block_id, max_registered_buffers, registered_buffers, RelFileLocatorEquals, and registered_buffer::rlocator.

Referenced by heap_inplace_update_and_unlock(), log_newpage(), and log_newpages().

◆ XLogRegisterBufData()

void XLogRegisterBufData ( uint8  block_id,
const void data,
uint32  len 
)

Definition at line 413 of file xloginsert.c.

414{
417
419
420 /* find the registered buffer struct */
422 if (!regbuf->in_use)
423 elog(ERROR, "no block with id %d registered with WAL insertion",
424 block_id);
425
426 /*
427 * Check against max_rdatas and ensure we do not register more data per
428 * buffer than can be handled by the physical data format; i.e. that
429 * regbuf->rdata_len does not grow beyond what
430 * XLogRecordBlockHeader->data_length can hold.
431 */
432 if (num_rdatas >= max_rdatas)
434 (errmsg_internal("too much WAL data"),
435 errdetail_internal("%d out of %d data segments are already in use.",
437 if (regbuf->rdata_len + len > UINT16_MAX || len > UINT16_MAX)
439 (errmsg_internal("too much WAL data"),
440 errdetail_internal("Registering more than maximum %u bytes allowed to block %u: current %u bytes, adding %u bytes.",
441 UINT16_MAX, block_id, regbuf->rdata_len, len)));
442
443 rdata = &rdatas[num_rdatas++];
444
445 rdata->data = data;
446 rdata->len = len;
447
448 regbuf->rdata_tail->next = rdata;
449 regbuf->rdata_tail = rdata;
450 regbuf->rdata_len += len;
451}

References Assert, begininsert_called, XLogRecData::data, data, elog, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), len, max_rdatas, num_rdatas, rdatas, and registered_buffers.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_squeezebucket(), brin_doinsert(), brin_doupdate(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), entryExecPlaceToPage(), GenericXLogFinish(), ginHeapTupleFastInsert(), ginVacuumPostingTreeLeaf(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), heap_inplace_update_and_unlock(), heap_insert(), heap_multi_insert(), log_heap_prune_and_freeze(), log_heap_update(), and writeListPage().

◆ XLogRegisterBuffer()

void XLogRegisterBuffer ( uint8  block_id,
Buffer  buffer,
uint8  flags 
)

Definition at line 246 of file xloginsert.c.

247{
249
250 /* NO_IMAGE doesn't make sense with FORCE_IMAGE */
251 Assert(!((flags & REGBUF_FORCE_IMAGE) && (flags & (REGBUF_NO_IMAGE))));
253
254 /*
255 * Ordinarily, the buffer should be exclusive-locked (or share-exclusive
256 * in case of hint bits) and marked dirty before we get here, otherwise we
257 * could end up violating one of the rules in access/transam/README.
258 *
259 * Some callers intentionally register a clean page and never update that
260 * page's LSN; in that case they can pass the flag REGBUF_NO_CHANGE to
261 * bypass these checks.
262 */
263#ifdef USE_ASSERT_CHECKING
264 if (!(flags & REGBUF_NO_CHANGE))
265 {
266 Assert(BufferIsDirty(buffer));
269 }
270#endif
271
273 {
275 elog(ERROR, "too many registered buffers");
277 }
278
280
281 BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
282 regbuf->page = BufferGetPage(buffer);
283 regbuf->flags = flags;
284 regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
285 regbuf->rdata_len = 0;
286
287 /*
288 * Check that this page hasn't already been registered with some other
289 * block_id.
290 */
291#ifdef USE_ASSERT_CHECKING
292 {
293 int i;
294
295 for (i = 0; i < max_registered_block_id; i++)
296 {
298
299 if (i == block_id || !regbuf_old->in_use)
300 continue;
301
302 Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
303 regbuf_old->forkno != regbuf->forkno ||
304 regbuf_old->block != regbuf->block);
305 }
306 }
307#endif
308
309 regbuf->in_use = true;
310}

References Assert, begininsert_called, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE_EXCLUSIVE, BufferGetPage(), BufferGetTag(), BufferIsDirty(), BufferIsLockedByMeInMode(), elog, ERROR, fb(), i, max_registered_block_id, max_registered_buffers, REGBUF_FORCE_IMAGE, REGBUF_NO_CHANGE, REGBUF_NO_IMAGE, registered_buffers, and RelFileLocatorEquals.

Referenced by _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), createPostingTree(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), doPickSplit(), entryExecPlaceToPage(), fill_seq_fork_with_data(), GenericXLogFinish(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistXLogDelete(), gistXLogPageDelete(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_prune_and_freeze(), log_heap_update(), log_newpage_range(), log_split_page(), moveLeafs(), nextval_internal(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), writeListPage(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ XLogRegisterData()

void XLogRegisterData ( const void data,
uint32  len 
)

Definition at line 372 of file xloginsert.c.

373{
375
377
378 if (num_rdatas >= max_rdatas)
380 (errmsg_internal("too much WAL data"),
381 errdetail_internal("%d out of %d data segments are already in use.",
383 rdata = &rdatas[num_rdatas++];
384
385 rdata->data = data;
386 rdata->len = len;
387
388 /*
389 * we use the mainrdata_last pointer to track the end of the chain, so no
390 * need to clear 'next' here.
391 */
392
395
397}

References Assert, begininsert_called, XLogRecData::data, data, ereport, errdetail_internal(), errmsg_internal(), ERROR, fb(), len, mainrdata_last, mainrdata_len, max_rdatas, XLogRecData::next, num_rdatas, and rdatas.

Referenced by _bt_allocbuf(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), AssignTransactionId(), brin_doinsert(), brin_doupdate(), brinbuild(), brinRevmapDesummarizeRange(), CreateCheckPoint(), CreateDatabaseUsingFileCopy(), CreateDirAndVersionFile(), CreateEndOfRecoveryRecord(), CreateOverwriteContrecordRecord(), createPostingTree(), CreateTableSpace(), do_pg_backup_stop(), doPickSplit(), DropTableSpace(), EndPrepare(), ExecuteTruncateGuts(), fill_seq_fork_with_data(), ginDeletePostingPage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginUpdateStats(), gistXLogDelete(), gistXLogPageDelete(), gistXLogPageReuse(), gistXLogSplit(), gistXLogUpdate(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), log_heap_new_cid(), log_heap_prune_and_freeze(), log_heap_update(), log_smgrcreate(), LogAccessExclusiveLocks(), LogCurrentRunningXacts(), logical_heap_rewrite_flush_mappings(), LogLogicalInvalidations(), LogLogicalMessage(), LogStandbyInvalidations(), movedb(), moveLeafs(), MultiXactIdCreateFromMembers(), nextval_internal(), pg_truncate_visibility_map(), RelationTruncate(), remove_dbtablespaces(), replorigin_advance(), replorigin_state_clear(), revmap_physical_extend(), SetSequence(), shiftList(), spgAddNodeAction(), spgSplitNodeAction(), test_custom_rmgrs_insert_wal_record(), UpdateFullPageWrites(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), write_logical_decoding_status_update_record(), write_relmap_file(), writeListPage(), WriteMTruncateXlogRec(), WriteTruncateXlogRec(), WriteTruncateXlogRec(), XactLogAbortRecord(), XactLogCommitRecord(), XLogAssignLSN(), XLogChecksums(), XLogPutNextOid(), XLogReportParameters(), XLogRestorePoint(), and XLogSimpleInsertInt64().

◆ XLogResetInsertion()

void XLogResetInsertion ( void  )

◆ XLogSaveBufferForHint()

XLogRecPtr XLogSaveBufferForHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 1134 of file xloginsert.c.

1135{
1137 XLogRecPtr lsn;
1139
1140 /* this also verifies that we hold an appropriate lock */
1141 Assert(BufferIsDirty(buffer));
1142
1143 /*
1144 * Update RedoRecPtr so that we can make the right decision. It's possible
1145 * that a new checkpoint will start just after GetRedoRecPtr(), but that
1146 * is ok, as the buffer is already dirty, ensuring that any BufferSync()
1147 * started after the buffer was marked dirty cannot complete without
1148 * flushing this buffer. If a checkpoint started between marking the
1149 * buffer dirty and this check, we will emit an unnecessary WAL record (as
1150 * the buffer will be written out as part of the checkpoint), but the
1151 * window for that is not big.
1152 */
1154
1155 /*
1156 * We assume page LSN is first data on *every* page that can be passed to
1157 * XLogInsert, whether it has the standard page layout or not.
1158 */
1159 lsn = PageGetLSN(BufferGetPage(buffer));
1160
1161 if (lsn <= RedoRecPtr)
1162 {
1163 int flags = 0;
1164
1166
1167 if (buffer_std)
1168 flags |= REGBUF_STANDARD;
1169
1170 XLogRegisterBuffer(0, buffer, flags);
1171
1173 }
1174
1175 return recptr;
1176}

References Assert, BufferGetPage(), BufferIsDirty(), fb(), GetRedoRecPtr(), InvalidXLogRecPtr, PageGetLSN(), RedoRecPtr, REGBUF_STANDARD, XLOG_FPI_FOR_HINT, XLogBeginInsert(), XLogInsert(), and XLogRegisterBuffer().

Referenced by MarkSharedBufferDirtyHint().

◆ XLogSetRecordFlags()

◆ XLogSimpleInsertInt64()

XLogRecPtr XLogSimpleInsertInt64 ( RmgrId  rmid,
uint8  info,
int64  value 
)

Definition at line 547 of file xloginsert.c.

548{
550 XLogRegisterData(&value, sizeof(value));
551 return XLogInsert(rmid, info);
552}

References value, XLogBeginInsert(), XLogInsert(), and XLogRegisterData().

Referenced by ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), and ExtendMultiXactOffset().

Variable Documentation

◆ begininsert_called

◆ curinsert_flags

uint8 curinsert_flags = 0
static

◆ hdr_rdt

XLogRecData hdr_rdt
static

Definition at line 116 of file xloginsert.c.

Referenced by XLogRecordAssemble().

◆ hdr_scratch

char* hdr_scratch = NULL
static

Definition at line 117 of file xloginsert.c.

Referenced by InitXLogInsert(), and XLogRecordAssemble().

◆ mainrdata_head

XLogRecData* mainrdata_head
static

Definition at line 101 of file xloginsert.c.

Referenced by XLogBeginInsert(), XLogRecordAssemble(), and XLogResetInsertion().

◆ mainrdata_last

XLogRecData* mainrdata_last = (XLogRecData *) &mainrdata_head
static

◆ mainrdata_len

uint64 mainrdata_len
static

◆ max_rdatas

int max_rdatas
static

◆ max_registered_block_id

int max_registered_block_id = 0
static

◆ max_registered_buffers

int max_registered_buffers
static

◆ num_rdatas

int num_rdatas
static

Definition at line 132 of file xloginsert.c.

Referenced by XLogRegisterBufData(), XLogRegisterData(), and XLogResetInsertion().

◆ rdatas

XLogRecData* rdatas
static

◆ registered_buffers

◆ xloginsert_cxt

MemoryContext xloginsert_cxt
static

Definition at line 138 of file xloginsert.c.

Referenced by InitXLogInsert().