PostgreSQL Source Code git master
Loading...
Searching...
No Matches
heapam_xlog.h
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heapam_xlog.h
4 * POSTGRES heap access XLOG definitions.
5 *
6 *
7 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * src/include/access/heapam_xlog.h
11 *
12 *-------------------------------------------------------------------------
13 */
14#ifndef HEAPAM_XLOG_H
15#define HEAPAM_XLOG_H
16
17#include "access/htup.h"
18#include "access/xlogreader.h"
19#include "lib/stringinfo.h"
20#include "storage/buf.h"
21#include "storage/bufpage.h"
23#include "storage/sinval.h"
24#include "utils/relcache.h"
25
26
27/*
28 * WAL record definitions for heapam.c's WAL operations
29 *
30 * XLOG allows to store some information in high 4 bits of log
31 * record xl_info field. We use 3 for opcode and one for init bit.
32 */
33#define XLOG_HEAP_INSERT 0x00
34#define XLOG_HEAP_DELETE 0x10
35#define XLOG_HEAP_UPDATE 0x20
36#define XLOG_HEAP_TRUNCATE 0x30
37#define XLOG_HEAP_HOT_UPDATE 0x40
38#define XLOG_HEAP_CONFIRM 0x50
39#define XLOG_HEAP_LOCK 0x60
40#define XLOG_HEAP_INPLACE 0x70
41
42#define XLOG_HEAP_OPMASK 0x70
43/*
44 * When we insert 1st item on new page in INSERT, UPDATE, HOT_UPDATE,
45 * or MULTI_INSERT, we can (and we do) restore entire page in redo
46 */
47#define XLOG_HEAP_INIT_PAGE 0x80
48/*
49 * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
50 * are associated with RM_HEAP2_ID, but are not logically different from
51 * the ones above associated with RM_HEAP_ID. XLOG_HEAP_OPMASK applies to
52 * these, too.
53 *
54 * There's no difference between XLOG_HEAP2_PRUNE_ON_ACCESS,
55 * XLOG_HEAP2_PRUNE_VACUUM_SCAN and XLOG_HEAP2_PRUNE_VACUUM_CLEANUP records.
56 * They have separate opcodes just for debugging and analysis purposes, to
57 * indicate why the WAL record was emitted.
58 */
59#define XLOG_HEAP2_REWRITE 0x00
60#define XLOG_HEAP2_PRUNE_ON_ACCESS 0x10
61#define XLOG_HEAP2_PRUNE_VACUUM_SCAN 0x20
62#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP 0x30
63/* 0x40 was XLOG_HEAP2_VISIBLE */
64#define XLOG_HEAP2_MULTI_INSERT 0x50
65#define XLOG_HEAP2_LOCK_UPDATED 0x60
66#define XLOG_HEAP2_NEW_CID 0x70
67
68/*
69 * xl_heap_insert/xl_heap_multi_insert flag values, 8 bits are available.
70 */
71/* PD_ALL_VISIBLE was cleared */
72#define XLH_INSERT_ALL_VISIBLE_CLEARED (1<<0)
73#define XLH_INSERT_LAST_IN_MULTI (1<<1)
74#define XLH_INSERT_IS_SPECULATIVE (1<<2)
75#define XLH_INSERT_CONTAINS_NEW_TUPLE (1<<3)
76#define XLH_INSERT_ON_TOAST_RELATION (1<<4)
77
78/* all_frozen_set always implies all_visible_set */
79#define XLH_INSERT_ALL_FROZEN_SET (1<<5)
80
81/*
82 * xl_heap_update flag values, 8 bits are available.
83 */
84/* PD_ALL_VISIBLE was cleared */
85#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED (1<<0)
86/* PD_ALL_VISIBLE was cleared in the 2nd page */
87#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED (1<<1)
88#define XLH_UPDATE_CONTAINS_OLD_TUPLE (1<<2)
89#define XLH_UPDATE_CONTAINS_OLD_KEY (1<<3)
90#define XLH_UPDATE_CONTAINS_NEW_TUPLE (1<<4)
91#define XLH_UPDATE_PREFIX_FROM_OLD (1<<5)
92#define XLH_UPDATE_SUFFIX_FROM_OLD (1<<6)
93
94/* convenience macro for checking whether any form of old tuple was logged */
95#define XLH_UPDATE_CONTAINS_OLD \
96 (XLH_UPDATE_CONTAINS_OLD_TUPLE | XLH_UPDATE_CONTAINS_OLD_KEY)
97
98/*
99 * xl_heap_delete flag values, 8 bits are available.
100 */
101/* PD_ALL_VISIBLE was cleared */
102#define XLH_DELETE_ALL_VISIBLE_CLEARED (1<<0)
103#define XLH_DELETE_CONTAINS_OLD_TUPLE (1<<1)
104#define XLH_DELETE_CONTAINS_OLD_KEY (1<<2)
105#define XLH_DELETE_IS_SUPER (1<<3)
106#define XLH_DELETE_IS_PARTITION_MOVE (1<<4)
107/* See heap_delete() */
108#define XLH_DELETE_NO_LOGICAL (1<<5)
109
110/* convenience macro for checking whether any form of old tuple was logged */
111#define XLH_DELETE_CONTAINS_OLD \
112 (XLH_DELETE_CONTAINS_OLD_TUPLE | XLH_DELETE_CONTAINS_OLD_KEY)
113
114/* This is what we need to know about delete */
115typedef struct xl_heap_delete
116{
117 TransactionId xmax; /* xmax of the deleted tuple */
118 OffsetNumber offnum; /* deleted tuple's offset */
119 uint8 infobits_set; /* infomask bits */
122
123#define SizeOfHeapDelete (offsetof(xl_heap_delete, flags) + sizeof(uint8))
124
125/*
126 * xl_heap_truncate flag values, 8 bits are available.
127 */
128#define XLH_TRUNCATE_CASCADE (1<<0)
129#define XLH_TRUNCATE_RESTART_SEQS (1<<1)
130
131/*
132 * For truncate we list all truncated relids in an array, followed by all
133 * sequence relids that need to be restarted, if any.
134 * All rels are always within the same database, so we just list dbid once.
135 */
143
144#define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids))
145
146/*
147 * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted
148 * or updated tuple in WAL; we can save a few bytes by reconstructing the
149 * fields that are available elsewhere in the WAL record, or perhaps just
150 * plain needn't be reconstructed. These are the fields we must store.
151 */
158
159#define SizeOfHeapHeader (offsetof(xl_heap_header, t_hoff) + sizeof(uint8))
160
161/* This is what we need to know about insert */
162typedef struct xl_heap_insert
163{
164 OffsetNumber offnum; /* inserted tuple's offset */
166
167 /* xl_heap_header & TUPLE DATA in backup block 0 */
169
170#define SizeOfHeapInsert (offsetof(xl_heap_insert, flags) + sizeof(uint8))
171
172/*
173 * This is what we need to know about a multi-insert.
174 *
175 * The main data of the record consists of this xl_heap_multi_insert header.
176 * 'offsets' array is omitted if the whole page is reinitialized
177 * (XLOG_HEAP_INIT_PAGE).
178 *
179 * In block 0's data portion, there is an xl_multi_insert_tuple struct,
180 * followed by the tuple data for each tuple. There is padding to align
181 * each xl_multi_insert_tuple struct.
182 */
189
190#define SizeOfHeapMultiInsert offsetof(xl_heap_multi_insert, offsets)
191
193{
194 uint16 datalen; /* size of tuple data that follows */
198 /* TUPLE DATA FOLLOWS AT END OF STRUCT */
200
201#define SizeOfMultiInsertTuple (offsetof(xl_multi_insert_tuple, t_hoff) + sizeof(uint8))
202
203/*
204 * This is what we need to know about update|hot_update
205 *
206 * Backup blk 0: new page
207 *
208 * If XLH_UPDATE_PREFIX_FROM_OLD or XLH_UPDATE_SUFFIX_FROM_OLD flags are set,
209 * the prefix and/or suffix come first, as one or two uint16s.
210 *
211 * After that, xl_heap_header and new tuple data follow. The new tuple
212 * data doesn't include the prefix and suffix, which are copied from the
213 * old tuple on replay.
214 *
215 * If XLH_UPDATE_CONTAINS_NEW_TUPLE flag is given, the tuple data is
216 * included even if a full-page image was taken.
217 *
218 * Backup blk 1: old page, if different. (no data, just a reference to the blk)
219 */
220typedef struct xl_heap_update
221{
222 TransactionId old_xmax; /* xmax of the old tuple */
223 OffsetNumber old_offnum; /* old tuple's offset */
224 uint8 old_infobits_set; /* infomask bits to set on old tuple */
226 TransactionId new_xmax; /* xmax of the new tuple */
227 OffsetNumber new_offnum; /* new tuple's offset */
228
229 /*
230 * If XLH_UPDATE_CONTAINS_OLD_TUPLE or XLH_UPDATE_CONTAINS_OLD_KEY flags
231 * are set, xl_heap_header and tuple data for the old tuple follow.
232 */
234
235#define SizeOfHeapUpdate (offsetof(xl_heap_update, new_offnum) + sizeof(OffsetNumber))
236
237/*
238 * These structures and flags encode VACUUM pruning and freezing and on-access
239 * pruning page modifications.
240 *
241 * xl_heap_prune is the main record. The XLHP_HAS_* flags indicate which
242 * "sub-records" are included and the other XLHP_* flags provide additional
243 * information about the conditions for replay.
244 *
245 * The data for block reference 0 contains "sub-records" depending on which of
246 * the XLHP_HAS_* flags are set. See xlhp_* struct definitions below. The
247 * sub-records appear in the same order as the XLHP_* flags. An example
248 * record with every sub-record included:
249 *
250 *-----------------------------------------------------------------------------
251 * Main data section:
252 *
253 * xl_heap_prune
254 * uint16 flags
255 * TransactionId snapshot_conflict_horizon
256 *
257 * Block 0 data section:
258 *
259 * xlhp_freeze_plans
260 * uint16 nplans
261 * [2 bytes of padding]
262 * xlhp_freeze_plan plans[nplans]
263 *
264 * xlhp_prune_items
265 * uint16 nredirected
266 * OffsetNumber redirected[2 * nredirected]
267 *
268 * xlhp_prune_items
269 * uint16 ndead
270 * OffsetNumber nowdead[ndead]
271 *
272 * xlhp_prune_items
273 * uint16 nunused
274 * OffsetNumber nowunused[nunused]
275 *
276 * OffsetNumber frz_offsets[sum([plan.ntuples for plan in plans])]
277 *-----------------------------------------------------------------------------
278 *
279 * NOTE: because the record data is assembled from many optional parts, we
280 * have to pay close attention to alignment. In the main data section,
281 * 'snapshot_conflict_horizon' is stored unaligned after 'flags', to save
282 * space. In the block 0 data section, the freeze plans appear first, because
283 * they contain TransactionId fields that require 4-byte alignment. All the
284 * other fields require only 2-byte alignment. This is also the reason that
285 * 'frz_offsets' is stored separately from the xlhp_freeze_plan structs.
286 */
287typedef struct xl_heap_prune
288{
290
291 /*
292 * If XLHP_HAS_CONFLICT_HORIZON is set, the conflict horizon XID follows,
293 * unaligned
294 */
296
297#define SizeOfHeapPrune (offsetof(xl_heap_prune, flags) + sizeof(uint16))
298
299/* to handle recovery conflict during logical decoding on standby */
300#define XLHP_IS_CATALOG_REL (1 << 1)
301
302/*
303 * Does replaying the record require a cleanup-lock?
304 *
305 * Pruning, in VACUUM's first pass or when otherwise accessing a page,
306 * requires a cleanup lock. For freezing, and VACUUM's second pass which
307 * marks LP_DEAD line pointers as unused without moving any tuple data, an
308 * ordinary exclusive lock is sufficient.
309 */
310#define XLHP_CLEANUP_LOCK (1 << 2)
311
312/*
313 * If we remove or freeze any entries that contain xids, we need to include a
314 * snapshot conflict horizon. It's used in Hot Standby mode to ensure that
315 * there are no queries running for which the removed tuples are still
316 * visible, or which still consider the frozen XIDs as running.
317 */
318#define XLHP_HAS_CONFLICT_HORIZON (1 << 3)
319
320/*
321 * Indicates that an xlhp_freeze_plans sub-record and one or more
322 * xlhp_freeze_plan sub-records are present.
323 */
324#define XLHP_HAS_FREEZE_PLANS (1 << 4)
325
326/*
327 * XLHP_HAS_REDIRECTIONS, XLHP_HAS_DEAD_ITEMS, and XLHP_HAS_NOW_UNUSED_ITEMS
328 * indicate that xlhp_prune_items sub-records with redirected, dead, and
329 * unused item offsets are present.
330 */
331#define XLHP_HAS_REDIRECTIONS (1 << 5)
332#define XLHP_HAS_DEAD_ITEMS (1 << 6)
333#define XLHP_HAS_NOW_UNUSED_ITEMS (1 << 7)
334
335/*
336 * The xl_heap_prune record's flags may also contain which VM bits to set.
337 * xl_heap_prune should always use the XLHP_VM_ALL_VISIBLE and
338 * XLHP_VM_ALL_FROZEN flags and translate them to their visibilitymapdefs.h
339 * equivalents, VISIBILITYMAP_ALL_VISIBLE and VISIBILITYMAP_ALL_FROZEN.
340 */
341#define XLHP_VM_ALL_VISIBLE (1 << 8)
342#define XLHP_VM_ALL_FROZEN (1 << 9)
343
344/*
345 * xlhp_freeze_plan describes how to freeze a group of one or more heap tuples
346 * (appears in xl_heap_prune's xlhp_freeze_plans sub-record)
347 */
348/* 0x01 was XLH_FREEZE_XMIN */
349#define XLH_FREEZE_XVAC 0x02
350#define XLH_INVALID_XVAC 0x04
351
352typedef struct xlhp_freeze_plan
353{
358
359 /* Length of individual page offset numbers array for this plan */
362
363/*
364 * This is what we need to know about a block being frozen during vacuum
365 *
366 * The backup block's data contains an array of xlhp_freeze_plan structs (with
367 * nplans elements). The individual item offsets are located in an array at
368 * the end of the entire record with nplans * (each plan's ntuples) members
369 * Those offsets are in the same order as the plans. The REDO routine uses
370 * the offsets to freeze the corresponding heap tuples.
371 *
372 * (As of PostgreSQL 17, XLOG_HEAP2_PRUNE_VACUUM_SCAN records replace the
373 * separate XLOG_HEAP2_FREEZE_PAGE records.)
374 */
380
381/*
382 * Generic sub-record type contained in block reference 0 of an xl_heap_prune
383 * record and used for redirect, dead, and unused items if any of
384 * XLHP_HAS_REDIRECTIONS/XLHP_HAS_DEAD_ITEMS/XLHP_HAS_NOW_UNUSED_ITEMS are
385 * set. Note that in the XLHP_HAS_REDIRECTIONS variant, there are actually 2
386 * * length number of OffsetNumbers in the data.
387 */
393
394
395/* flags for infobits_set */
396#define XLHL_XMAX_IS_MULTI 0x01
397#define XLHL_XMAX_LOCK_ONLY 0x02
398#define XLHL_XMAX_EXCL_LOCK 0x04
399#define XLHL_XMAX_KEYSHR_LOCK 0x08
400#define XLHL_KEYS_UPDATED 0x10
401
402/* flag bits for xl_heap_lock / xl_heap_lock_updated's flag field */
403#define XLH_LOCK_ALL_FROZEN_CLEARED 0x01
404
405/* This is what we need to know about lock */
406typedef struct xl_heap_lock
407{
408 TransactionId xmax; /* might be a MultiXactId */
409 OffsetNumber offnum; /* locked tuple's offset on page */
410 uint8 infobits_set; /* infomask and infomask2 bits to set */
411 uint8 flags; /* XLH_LOCK_* flag bits */
413
414#define SizeOfHeapLock (offsetof(xl_heap_lock, flags) + sizeof(uint8))
415
416/* This is what we need to know about locking an updated version of a row */
424
425#define SizeOfHeapLockUpdated (offsetof(xl_heap_lock_updated, flags) + sizeof(uint8))
426
427/* This is what we need to know about confirmation of speculative insertion */
428typedef struct xl_heap_confirm
429{
430 OffsetNumber offnum; /* confirmed tuple's offset on page */
432
433#define SizeOfHeapConfirm (offsetof(xl_heap_confirm, offnum) + sizeof(OffsetNumber))
434
435/* This is what we need to know about in-place update */
436typedef struct xl_heap_inplace
437{
438 OffsetNumber offnum; /* updated tuple's offset on page */
439 Oid dbId; /* MyDatabaseId */
440 Oid tsId; /* MyDatabaseTableSpace */
441 bool relcacheInitFileInval; /* invalidate relcache init files */
442 int nmsgs; /* number of shared inval msgs */
445
446#define MinSizeOfHeapInplace (offsetof(xl_heap_inplace, nmsgs) + sizeof(int))
447
448typedef struct xl_heap_new_cid
449{
450 /*
451 * store toplevel xid so we don't have to merge cids from different
452 * transactions
453 */
457 CommandId combocid; /* just for debugging */
458
459 /*
460 * Store the relfilelocator/ctid pair to facilitate lookups.
461 */
465
466#define SizeOfHeapNewCid (offsetof(xl_heap_new_cid, target_tid) + sizeof(ItemPointerData))
467
468/* logical rewrite xlog record header */
470{
471 TransactionId mapped_xid; /* xid that might need to see the row */
472 Oid mapped_db; /* DbOid or InvalidOid for shared rels */
473 Oid mapped_rel; /* Oid of the mapped relation */
474 off_t offset; /* How far have we written so far */
475 uint32 num_mappings; /* Number of in-memory mappings */
476 XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */
478
480 TransactionId *snapshotConflictHorizon);
481
482extern void heap_redo(XLogReaderState *record);
483extern void heap_desc(StringInfo buf, XLogReaderState *record);
484extern const char *heap_identify(uint8 info);
485extern void heap_mask(char *pagedata, BlockNumber blkno);
486extern void heap2_redo(XLogReaderState *record);
487extern void heap2_desc(StringInfo buf, XLogReaderState *record);
488extern const char *heap2_identify(uint8 info);
490
491/* in heapdesc.c, so it can be shared between frontend/backend code */
493 int *nplans, xlhp_freeze_plan **plans,
495 int *nredirected, OffsetNumber **redirected,
496 int *ndead, OffsetNumber **nowdead,
497 int *nunused, OffsetNumber **nowunused);
498
499#endif /* HEAPAM_XLOG_H */
uint32 BlockNumber
Definition block.h:31
uint8_t uint8
Definition c.h:622
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:558
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
uint32 CommandId
Definition c.h:750
uint32 TransactionId
Definition c.h:736
void heap_desc(StringInfo buf, XLogReaderState *record)
Definition heapdesc.c:185
void heap_redo(XLogReaderState *record)
void heap2_desc(StringInfo buf, XLogReaderState *record)
Definition heapdesc.c:265
void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint16 flags, int *nplans, xlhp_freeze_plan **plans, OffsetNumber **frz_offsets, int *nredirected, OffsetNumber **redirected, int *ndead, OffsetNumber **nowdead, int *nunused, OffsetNumber **nowunused)
Definition heapdesc.c:106
void heap_mask(char *pagedata, BlockNumber blkno)
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition heapam.c:7954
void heap_xlog_logical_rewrite(XLogReaderState *r)
const char * heap2_identify(uint8 info)
Definition heapdesc.c:442
const char * heap_identify(uint8 info)
Definition heapdesc.c:397
void heap2_redo(XLogReaderState *record)
uint16 OffsetNumber
Definition off.h:24
static char buf[DEFAULT_XLOG_SEG_SIZE]
unsigned int Oid
static int fb(int x)
Definition type.h:138
OffsetNumber offnum
TransactionId xmax
OffsetNumber offnum
OffsetNumber offnum
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
bool relcacheInitFileInval
OffsetNumber offnum
TransactionId xmax
OffsetNumber offnum
uint8 infobits_set
OffsetNumber offnum
TransactionId xmax
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
CommandId combocid
ItemPointerData target_tid
TransactionId top_xid
RelFileLocator target_locator
TransactionId mapped_xid
Oid relids[FLEXIBLE_ARRAY_MEMBER]
TransactionId new_xmax
uint8 old_infobits_set
TransactionId old_xmax
OffsetNumber old_offnum
OffsetNumber new_offnum
TransactionId xmax
xlhp_freeze_plan plans[FLEXIBLE_ARRAY_MEMBER]
OffsetNumber data[FLEXIBLE_ARRAY_MEMBER]
uint64 XLogRecPtr
Definition xlogdefs.h:21