PostgreSQL Source Code git master
Loading...
Searching...
No Matches
heapam_xlog.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/visibilitymap.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "storage/freespace.h"
#include "storage/standby.h"
Include dependency graph for heapam_xlog.c:

Go to the source code of this file.

Functions

static void heap_xlog_prune_freeze (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Function Documentation

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 262 of file heapam_xlog.c.

263{
267
274 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
277
280}
#define XLHL_XMAX_KEYSHR_LOCK
#define XLHL_XMAX_IS_MULTI
#define XLHL_XMAX_LOCK_ONLY
#define XLHL_XMAX_EXCL_LOCK
#define XLHL_KEYS_UPDATED
#define HEAP_KEYS_UPDATED
#define HEAP_XMAX_LOCK_ONLY
#define HEAP_XMAX_IS_MULTI
#define HEAP_XMAX_EXCL_LOCK
#define HEAP_XMAX_KEYSHR_LOCK
static int fb(int x)

References fb(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 1241 of file heapam_xlog.c.

1242{
1243 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1244
1245 switch (info & XLOG_HEAP_OPMASK)
1246 {
1250 heap_xlog_prune_freeze(record);
1251 break;
1253 heap_xlog_multi_insert(record);
1254 break;
1256 heap_xlog_lock_updated(record);
1257 break;
1258 case XLOG_HEAP2_NEW_CID:
1259
1260 /*
1261 * Nothing to do on a real replay, only used during logical
1262 * decoding.
1263 */
1264 break;
1265 case XLOG_HEAP2_REWRITE:
1267 break;
1268 default:
1269 elog(PANIC, "heap2_redo: unknown op code %u", info);
1270 }
1271}
uint8_t uint8
Definition c.h:622
#define PANIC
Definition elog.h:44
#define elog(elevel,...)
Definition elog.h:228
static void heap_xlog_prune_freeze(XLogReaderState *record)
Definition heapam_xlog.c:30
static void heap_xlog_lock_updated(XLogReaderState *record)
static void heap_xlog_multi_insert(XLogReaderState *record)
#define XLOG_HEAP2_MULTI_INSERT
Definition heapam_xlog.h:64
#define XLOG_HEAP2_REWRITE
Definition heapam_xlog.h:59
#define XLOG_HEAP_OPMASK
Definition heapam_xlog.h:42
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition heapam_xlog.h:61
#define XLOG_HEAP2_LOCK_UPDATED
Definition heapam_xlog.h:65
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition heapam_xlog.h:60
#define XLOG_HEAP2_NEW_CID
Definition heapam_xlog.h:66
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition heapam_xlog.h:62
void heap_xlog_logical_rewrite(XLogReaderState *r)
#define XLogRecGetInfo(decoder)
Definition xlogreader.h:410

References elog, fb(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), PANIC, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_PRUNE_ON_ACCESS, XLOG_HEAP2_PRUNE_VACUUM_CLEANUP, XLOG_HEAP2_PRUNE_VACUUM_SCAN, XLOG_HEAP2_REWRITE, XLOG_HEAP_OPMASK, and XLogRecGetInfo.

◆ heap_mask()

void heap_mask ( char pagedata,
BlockNumber  blkno 
)

Definition at line 1277 of file heapam_xlog.c.

1278{
1279 Page page = (Page) pagedata;
1280 OffsetNumber off;
1281
1283
1284 mask_page_hint_bits(page);
1285 mask_unused_space(page);
1286
1287 for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
1288 {
1289 ItemId iid = PageGetItemId(page, off);
1290 char *page_item;
1291
1292 page_item = (char *) (page + ItemIdGetOffset(iid));
1293
1294 if (ItemIdIsNormal(iid))
1295 {
1297
1298 /*
1299 * If xmin of a tuple is not yet frozen, we should ignore
1300 * differences in hint bits, since they can be set without
1301 * emitting WAL.
1302 */
1305 else
1306 {
1307 /* Still we need to mask xmax hint bits. */
1308 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
1309 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
1310 }
1311
1312 /*
1313 * During replay, we set Command Id to FirstCommandId. Hence, mask
1314 * it. See heap_xlog_insert() for details.
1315 */
1316 page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
1317
1318 /*
1319 * For a speculative tuple, heap_insert() does not set ctid in the
1320 * caller-passed heap tuple itself, leaving the ctid field to
1321 * contain a speculative token value - a per-backend monotonically
1322 * increasing identifier. Besides, it does not WAL-log ctid under
1323 * any circumstances.
1324 *
1325 * During redo, heap_xlog_insert() sets t_ctid to current block
1326 * number and self offset number. It doesn't care about any
1327 * speculative insertions on the primary. Hence, we set t_ctid to
1328 * current block number and self offset number to ignore any
1329 * inconsistency.
1330 */
1332 ItemPointerSet(&page_htup->t_ctid, blkno, off);
1333
1334 /*
1335 * NB: Not ignoring ctid changes due to the tuple having moved
1336 * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
1337 * important information that needs to be in-sync between primary
1338 * and standby, and thus is WAL logged.
1339 */
1340 }
1341
1342 /*
1343 * Ignore any padding bytes after the tuple, when the length of the
1344 * item is not MAXALIGNed.
1345 */
1346 if (ItemIdHasStorage(iid))
1347 {
1348 int len = ItemIdGetLength(iid);
1349 int padlen = MAXALIGN(len) - len;
1350
1351 if (padlen > 0)
1353 }
1354 }
1355}
void mask_page_lsn_and_checksum(Page page)
Definition bufmask.c:31
void mask_unused_space(Page page)
Definition bufmask.c:70
void mask_page_hint_bits(Page page)
Definition bufmask.c:46
#define MASK_MARKER
Definition bufmask.h:24
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:268
PageData * Page
Definition bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:396
#define MAXALIGN(LEN)
Definition c.h:896
HeapTupleHeaderData * HeapTupleHeader
Definition htup.h:23
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
#define HEAP_XACT_MASK
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdIsNormal(itemId)
Definition itemid.h:99
#define ItemIdGetOffset(itemId)
Definition itemid.h:65
#define ItemIdHasStorage(itemId)
Definition itemid.h:120
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition itemptr.h:135
uint16 OffsetNumber
Definition off.h:24
const void size_t len

References fb(), HEAP_XACT_MASK, HeapTupleHeaderIsSpeculative(), HeapTupleHeaderXminFrozen(), ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, ItemIdIsNormal, ItemPointerSet(), len, MASK_MARKER, mask_page_hint_bits(), mask_page_lsn_and_checksum(), mask_unused_space(), MAXALIGN, PageGetItemId(), PageGetMaxOffsetNumber(), and HeapTupleHeaderData::t_infomask.

◆ heap_redo()

void heap_redo ( XLogReaderState record)

Definition at line 1195 of file heapam_xlog.c.

1196{
1197 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1198
1199 /*
1200 * These operations don't overwrite MVCC data so no conflict processing is
1201 * required. The ones in heap2 rmgr do.
1202 */
1203
1204 switch (info & XLOG_HEAP_OPMASK)
1205 {
1206 case XLOG_HEAP_INSERT:
1207 heap_xlog_insert(record);
1208 break;
1209 case XLOG_HEAP_DELETE:
1210 heap_xlog_delete(record);
1211 break;
1212 case XLOG_HEAP_UPDATE:
1213 heap_xlog_update(record, false);
1214 break;
1215 case XLOG_HEAP_TRUNCATE:
1216
1217 /*
1218 * TRUNCATE is a no-op because the actions are already logged as
1219 * SMGR WAL records. TRUNCATE WAL record only exists for logical
1220 * decoding.
1221 */
1222 break;
1224 heap_xlog_update(record, true);
1225 break;
1226 case XLOG_HEAP_CONFIRM:
1227 heap_xlog_confirm(record);
1228 break;
1229 case XLOG_HEAP_LOCK:
1230 heap_xlog_lock(record);
1231 break;
1232 case XLOG_HEAP_INPLACE:
1233 heap_xlog_inplace(record);
1234 break;
1235 default:
1236 elog(PANIC, "heap_redo: unknown op code %u", info);
1237 }
1238}
static void heap_xlog_insert(XLogReaderState *record)
static void heap_xlog_update(XLogReaderState *record, bool hot_update)
static void heap_xlog_delete(XLogReaderState *record)
static void heap_xlog_lock(XLogReaderState *record)
static void heap_xlog_inplace(XLogReaderState *record)
static void heap_xlog_confirm(XLogReaderState *record)
#define XLOG_HEAP_HOT_UPDATE
Definition heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition heapam_xlog.h:34
#define XLOG_HEAP_TRUNCATE
Definition heapam_xlog.h:36
#define XLOG_HEAP_UPDATE
Definition heapam_xlog.h:35
#define XLOG_HEAP_INPLACE
Definition heapam_xlog.h:40
#define XLOG_HEAP_LOCK
Definition heapam_xlog.h:39
#define XLOG_HEAP_INSERT
Definition heapam_xlog.h:33
#define XLOG_HEAP_CONFIRM
Definition heapam_xlog.h:38

References elog, fb(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_update(), PANIC, XLOG_HEAP_CONFIRM, XLOG_HEAP_DELETE, XLOG_HEAP_HOT_UPDATE, XLOG_HEAP_INPLACE, XLOG_HEAP_INSERT, XLOG_HEAP_LOCK, XLOG_HEAP_OPMASK, XLOG_HEAP_TRUNCATE, XLOG_HEAP_UPDATE, and XLogRecGetInfo.

◆ heap_xlog_confirm()

static void heap_xlog_confirm ( XLogReaderState record)
static

Definition at line 972 of file heapam_xlog.c.

973{
974 XLogRecPtr lsn = record->EndRecPtr;
976 Buffer buffer;
977 Page page;
978 OffsetNumber offnum;
979 ItemId lp;
980 HeapTupleHeader htup;
981
982 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
983 {
984 page = BufferGetPage(buffer);
985
986 offnum = xlrec->offnum;
988 elog(PANIC, "offnum out of range");
989 lp = PageGetItemId(page, offnum);
990 if (!ItemIdIsNormal(lp))
991 elog(PANIC, "invalid lp");
992
993 htup = (HeapTupleHeader) PageGetItem(page, lp);
994
995 /*
996 * Confirm tuple as actually inserted
997 */
998 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
999
1000 PageSetLSN(page, lsn);
1001 MarkBufferDirty(buffer);
1002 }
1003 if (BufferIsValid(buffer))
1004 UnlockReleaseBuffer(buffer);
1005}
int Buffer
Definition buf.h:23
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4446
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5603
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3147
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:468
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:419
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:378
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:416
ItemPointerData t_ctid
XLogRecPtr EndRecPtr
Definition xlogreader.h:206
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define XLogRecGetData(decoder)
Definition xlogreader.h:415
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition xlogutils.c:303
@ BLK_NEEDS_REDO
Definition xlogutils.h:74

References BLK_NEEDS_REDO, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), elog, XLogReaderState::EndRecPtr, fb(), ItemIdIsNormal, ItemPointerSet(), MarkBufferDirty(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLogReadBufferForRedo(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_delete()

static void heap_xlog_delete ( XLogReaderState record)
static

Definition at line 286 of file heapam_xlog.c.

287{
288 XLogRecPtr lsn = record->EndRecPtr;
290 Buffer buffer;
291 Page page;
292 ItemId lp;
293 HeapTupleHeader htup;
294 BlockNumber blkno;
295 RelFileLocator target_locator;
296 ItemPointerData target_tid;
297
298 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
299 ItemPointerSetBlockNumber(&target_tid, blkno);
300 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
301
302 /*
303 * The visibility map may need to be fixed even if the heap page is
304 * already up-to-date.
305 */
307 {
308 Relation reln = CreateFakeRelcacheEntry(target_locator);
309 Buffer vmbuffer = InvalidBuffer;
310
311 visibilitymap_pin(reln, blkno, &vmbuffer);
313 ReleaseBuffer(vmbuffer);
315 }
316
317 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
318 {
319 page = BufferGetPage(buffer);
320
321 if (xlrec->offnum < 1 || xlrec->offnum > PageGetMaxOffsetNumber(page))
322 elog(PANIC, "offnum out of range");
323 lp = PageGetItemId(page, xlrec->offnum);
324 if (!ItemIdIsNormal(lp))
325 elog(PANIC, "invalid lp");
326
327 htup = (HeapTupleHeader) PageGetItem(page, lp);
328
332 fix_infomask_from_infobits(xlrec->infobits_set,
333 &htup->t_infomask, &htup->t_infomask2);
334 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
335 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
336 else
339
340 /* Mark the page as a candidate for pruning */
341 PageSetPrunable(page, XLogRecGetXid(record));
342
345
346 /* Make sure t_ctid is set correctly */
349 else
350 htup->t_ctid = target_tid;
351 PageSetLSN(page, lsn);
352 MarkBufferDirty(buffer);
353 }
354 if (BufferIsValid(buffer))
355 UnlockReleaseBuffer(buffer);
356}
uint32 BlockNumber
Definition block.h:31
#define InvalidBuffer
Definition buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5586
static void PageClearAllVisible(Page page)
Definition bufpage.h:464
#define PageSetPrunable(page, xid)
Definition bufpage.h:478
#define FirstCommandId
Definition c.h:752
static void fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
#define XLH_DELETE_ALL_VISIBLE_CLEARED
#define XLH_DELETE_IS_PARTITION_MOVE
#define XLH_DELETE_IS_SUPER
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
#define HEAP_XMAX_BITS
#define HEAP_MOVED
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition itemptr.h:147
#define InvalidTransactionId
Definition transam.h:31
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_VALID_BITS
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
#define XLogRecGetXid(decoder)
Definition xlogreader.h:412
void FreeFakeRelcacheEntry(Relation fakerel)
Definition xlogutils.c:618
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition xlogutils.c:571

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetMovedPartitions(), HeapTupleHeaderSetXmax(), HeapTupleHeaderSetXmin(), InvalidBuffer, InvalidTransactionId, ItemIdIsNormal, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), MarkBufferDirty(), PageClearAllVisible(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_IS_PARTITION_MOVE, XLH_DELETE_IS_SUPER, XLogReadBufferForRedo(), XLogRecGetBlockTag(), XLogRecGetData, and XLogRecGetXid.

Referenced by heap_redo().

◆ heap_xlog_inplace()

static void heap_xlog_inplace ( XLogReaderState record)
static

Definition at line 1148 of file heapam_xlog.c.

1149{
1150 XLogRecPtr lsn = record->EndRecPtr;
1152 Buffer buffer;
1153 Page page;
1154 OffsetNumber offnum;
1155 ItemId lp;
1156 HeapTupleHeader htup;
1157 uint32 oldlen;
1158 Size newlen;
1159
1160 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1161 {
1162 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
1163
1164 page = BufferGetPage(buffer);
1165
1166 offnum = xlrec->offnum;
1168 elog(PANIC, "offnum out of range");
1169 lp = PageGetItemId(page, offnum);
1170 if (!ItemIdIsNormal(lp))
1171 elog(PANIC, "invalid lp");
1172
1173 htup = (HeapTupleHeader) PageGetItem(page, lp);
1174
1175 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
1176 if (oldlen != newlen)
1177 elog(PANIC, "wrong tuple length");
1178
1179 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
1180
1181 PageSetLSN(page, lsn);
1182 MarkBufferDirty(buffer);
1183 }
1184 if (BufferIsValid(buffer))
1185 UnlockReleaseBuffer(buffer);
1186
1188 xlrec->nmsgs,
1189 xlrec->relcacheInitFileInval,
1190 xlrec->dbId,
1191 xlrec->tsId);
1192}
uint32_t uint32
Definition c.h:624
size_t Size
Definition c.h:689
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition inval.c:1135
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), elog, XLogReaderState::EndRecPtr, fb(), ItemIdGetLength, ItemIdIsNormal, MarkBufferDirty(), memcpy(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ProcessCommittedInvalidationMessages(), HeapTupleHeaderData::t_hoff, UnlockReleaseBuffer(), XLogReadBufferForRedo(), XLogRecGetBlockData(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_insert()

static void heap_xlog_insert ( XLogReaderState record)
static

Definition at line 362 of file heapam_xlog.c.

363{
364 XLogRecPtr lsn = record->EndRecPtr;
366 Buffer buffer;
367 Page page;
368 union
369 {
372 } tbuf;
373 HeapTupleHeader htup;
376 Size freespace = 0;
377 RelFileLocator target_locator;
378 BlockNumber blkno;
379 ItemPointerData target_tid;
381
382 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
383 ItemPointerSetBlockNumber(&target_tid, blkno);
384 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
385
386 /* No freezing in the heap_insert() code path */
388
389 /*
390 * The visibility map may need to be fixed even if the heap page is
391 * already up-to-date.
392 */
394 {
395 Relation reln = CreateFakeRelcacheEntry(target_locator);
396 Buffer vmbuffer = InvalidBuffer;
397
398 visibilitymap_pin(reln, blkno, &vmbuffer);
400 ReleaseBuffer(vmbuffer);
402 }
403
404 /*
405 * If we inserted the first and only tuple on the page, re-initialize the
406 * page from scratch.
407 */
409 {
410 buffer = XLogInitBufferForRedo(record, 0);
411 page = BufferGetPage(buffer);
412 PageInit(page, BufferGetPageSize(buffer), 0);
414 }
415 else
416 action = XLogReadBufferForRedo(record, 0, &buffer);
417 if (action == BLK_NEEDS_REDO)
418 {
419 Size datalen;
420 char *data;
421
422 page = BufferGetPage(buffer);
423
424 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
425 elog(PANIC, "invalid max offset number");
426
427 data = XLogRecGetBlockData(record, 0, &datalen);
428
429 newlen = datalen - SizeOfHeapHeader;
433
434 htup = &tbuf.hdr;
436 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
437 memcpy((char *) htup + SizeofHeapTupleHeader,
438 data,
439 newlen);
441 htup->t_infomask2 = xlhdr.t_infomask2;
442 htup->t_infomask = xlhdr.t_infomask;
443 htup->t_hoff = xlhdr.t_hoff;
446 htup->t_ctid = target_tid;
447
448 if (PageAddItem(page, htup, newlen, xlrec->offnum, true, true) == InvalidOffsetNumber)
449 elog(PANIC, "failed to add tuple");
450
451 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
452
453 /*
454 * Set the page prunable to trigger on-access pruning later, which may
455 * set the page all-visible in the VM. See comments in heap_insert().
456 */
459 PageSetPrunable(page, XLogRecGetXid(record));
460
461 PageSetLSN(page, lsn);
462
465
466 MarkBufferDirty(buffer);
467 }
468 if (BufferIsValid(buffer))
469 UnlockReleaseBuffer(buffer);
470
471 /*
472 * If the page is running low on free space, update the FSM as well.
473 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
474 * better than that without knowing the fill-factor for the table.
475 *
476 * XXX: Don't do this if the page was restored from full page image. We
477 * don't bother to update the FSM in that case, it doesn't need to be
478 * totally accurate anyway.
479 */
480 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
481 XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
482}
static Size BufferGetPageSize(Buffer buffer)
Definition bufmgr.h:457
Size PageGetHeapFreeSpace(const PageData *page)
Definition bufpage.c:1000
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition bufpage.h:504
#define Assert(condition)
Definition c.h:943
#define MemSet(start, val, len)
Definition c.h:1107
void XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, Size spaceAvail)
Definition freespace.c:211
#define XLH_INSERT_ALL_FROZEN_SET
Definition heapam_xlog.h:79
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:72
#define SizeOfHeapHeader
#define XLOG_HEAP_INIT_PAGE
Definition heapam_xlog.h:47
#define SizeofHeapTupleHeader
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
#define MaxHeapTupleSize
#define InvalidOffsetNumber
Definition off.h:26
const void * data
#define TransactionIdIsNormal(xid)
Definition transam.h:42
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition xlogutils.c:315
XLogRedoAction
Definition xlogutils.h:73

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, FreeFakeRelcacheEntry(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetXmin(), HeapTupleHeaderXminFrozen(), InvalidBuffer, InvalidOffsetNumber, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), MarkBufferDirty(), MaxHeapTupleSize, memcpy(), MemSet, PageAddItem, PageClearAllVisible(), PageGetHeapFreeSpace(), PageGetMaxOffsetNumber(), PageInit(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), SizeOfHeapHeader, SizeofHeapTupleHeader, HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_hoff, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, TransactionIdIsNormal, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_FROZEN_SET, XLH_INSERT_ALL_VISIBLE_CLEARED, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap_redo().

◆ heap_xlog_lock()

static void heap_xlog_lock ( XLogReaderState record)
static

Definition at line 1011 of file heapam_xlog.c.

1012{
1013 XLogRecPtr lsn = record->EndRecPtr;
1015 Buffer buffer;
1016 Page page;
1017 OffsetNumber offnum;
1018 ItemId lp;
1019 HeapTupleHeader htup;
1020
1021 /*
1022 * The visibility map may need to be fixed even if the heap page is
1023 * already up-to-date.
1024 */
1026 {
1027 RelFileLocator rlocator;
1028 Buffer vmbuffer = InvalidBuffer;
1029 BlockNumber block;
1030 Relation reln;
1031
1032 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1033 reln = CreateFakeRelcacheEntry(rlocator);
1034
1035 visibilitymap_pin(reln, block, &vmbuffer);
1037
1038 ReleaseBuffer(vmbuffer);
1040 }
1041
1042 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1043 {
1044 page = BufferGetPage(buffer);
1045
1046 offnum = xlrec->offnum;
1048 elog(PANIC, "offnum out of range");
1049 lp = PageGetItemId(page, offnum);
1050 if (!ItemIdIsNormal(lp))
1051 elog(PANIC, "invalid lp");
1052
1053 htup = (HeapTupleHeader) PageGetItem(page, lp);
1054
1055 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1057 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
1058 &htup->t_infomask2);
1059
1060 /*
1061 * Clear relevant update flags, but only if the modified infomask says
1062 * there's no update.
1063 */
1065 {
1067 /* Make sure there is no forward chain link in t_ctid */
1068 ItemPointerSet(&htup->t_ctid,
1069 BufferGetBlockNumber(buffer),
1070 offnum);
1071 }
1072 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1074 PageSetLSN(page, lsn);
1075 MarkBufferDirty(buffer);
1076 }
1077 if (BufferIsValid(buffer))
1078 UnlockReleaseBuffer(buffer);
1079}
#define XLH_LOCK_ALL_FROZEN_CLEARED
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
#define VISIBILITYMAP_ALL_FROZEN

References BLK_NEEDS_REDO, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_IS_LOCKED_ONLY(), HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetXmax(), InvalidBuffer, ItemIdIsNormal, ItemPointerSet(), MarkBufferDirty(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XLH_LOCK_ALL_FROZEN_CLEARED, XLogReadBufferForRedo(), XLogRecGetBlockTag(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_lock_updated()

static void heap_xlog_lock_updated ( XLogReaderState record)
static

Definition at line 1085 of file heapam_xlog.c.

1086{
1087 XLogRecPtr lsn = record->EndRecPtr;
1089 Buffer buffer;
1090 Page page;
1091 OffsetNumber offnum;
1092 ItemId lp;
1093 HeapTupleHeader htup;
1094
1096
1097 /*
1098 * The visibility map may need to be fixed even if the heap page is
1099 * already up-to-date.
1100 */
1102 {
1103 RelFileLocator rlocator;
1104 Buffer vmbuffer = InvalidBuffer;
1105 BlockNumber block;
1106 Relation reln;
1107
1108 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1109 reln = CreateFakeRelcacheEntry(rlocator);
1110
1111 visibilitymap_pin(reln, block, &vmbuffer);
1113
1114 ReleaseBuffer(vmbuffer);
1116 }
1117
1118 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1119 {
1120 page = BufferGetPage(buffer);
1121
1122 offnum = xlrec->offnum;
1124 elog(PANIC, "offnum out of range");
1125 lp = PageGetItemId(page, offnum);
1126 if (!ItemIdIsNormal(lp))
1127 elog(PANIC, "invalid lp");
1128
1129 htup = (HeapTupleHeader) PageGetItem(page, lp);
1130
1131 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1133 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
1134 &htup->t_infomask2);
1135 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1136
1137 PageSetLSN(page, lsn);
1138 MarkBufferDirty(buffer);
1139 }
1140 if (BufferIsValid(buffer))
1141 UnlockReleaseBuffer(buffer);
1142}

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, fb(), fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderSetXmax(), InvalidBuffer, ItemIdIsNormal, MarkBufferDirty(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XLH_LOCK_ALL_FROZEN_CLEARED, XLogReadBufferForRedo(), XLogRecGetBlockTag(), and XLogRecGetData.

Referenced by heap2_redo().

◆ heap_xlog_multi_insert()

static void heap_xlog_multi_insert ( XLogReaderState record)
static

Definition at line 488 of file heapam_xlog.c.

489{
490 XLogRecPtr lsn = record->EndRecPtr;
492 RelFileLocator rlocator;
493 BlockNumber blkno;
494 Buffer buffer;
495 Page page;
496 union
497 {
500 } tbuf;
501 HeapTupleHeader htup;
503 Size freespace = 0;
504 int i;
505 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
507 Buffer vmbuffer = InvalidBuffer;
508
509 /*
510 * Insertion doesn't overwrite MVCC data, so no conflict processing is
511 * required.
512 */
514
515 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
516
517 /* check that the mutually exclusive flags are not both set */
519 (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
520
521 /*
522 * The visibility map may need to be fixed even if the heap page is
523 * already up-to-date.
524 */
526 {
528
529 visibilitymap_pin(reln, blkno, &vmbuffer);
531 ReleaseBuffer(vmbuffer);
532 vmbuffer = InvalidBuffer;
534 }
535
536 if (isinit)
537 {
538 buffer = XLogInitBufferForRedo(record, 0);
539 page = BufferGetPage(buffer);
540 PageInit(page, BufferGetPageSize(buffer), 0);
542 }
543 else
544 action = XLogReadBufferForRedo(record, 0, &buffer);
545 if (action == BLK_NEEDS_REDO)
546 {
547 char *tupdata;
548 char *endptr;
549 Size len;
550
551 /* Tuples are stored as block data */
552 tupdata = XLogRecGetBlockData(record, 0, &len);
553 endptr = tupdata + len;
554
555 page = BufferGetPage(buffer);
556
557 for (i = 0; i < xlrec->ntuples; i++)
558 {
559 OffsetNumber offnum;
561
562 /*
563 * If we're reinitializing the page, the tuples are stored in
564 * order from FirstOffsetNumber. Otherwise there's an array of
565 * offsets in the WAL record, and the tuples come after that.
566 */
567 if (isinit)
568 offnum = FirstOffsetNumber + i;
569 else
570 offnum = xlrec->offsets[i];
571 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
572 elog(PANIC, "invalid max offset number");
573
575 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
576
577 newlen = xlhdr->datalen;
579 htup = &tbuf.hdr;
581 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
582 memcpy((char *) htup + SizeofHeapTupleHeader,
583 tupdata,
584 newlen);
585 tupdata += newlen;
586
588 htup->t_infomask2 = xlhdr->t_infomask2;
589 htup->t_infomask = xlhdr->t_infomask;
590 htup->t_hoff = xlhdr->t_hoff;
593 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
594 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
595
596 offnum = PageAddItem(page, htup, newlen, offnum, true, true);
597 if (offnum == InvalidOffsetNumber)
598 elog(PANIC, "failed to add tuple");
599 }
600 if (tupdata != endptr)
601 elog(PANIC, "total tuple length mismatch");
602
603 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
604
605 PageSetLSN(page, lsn);
606
609
610 /*
611 * XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible. If
612 * we are not setting the page frozen, then set the page's prunable
613 * hint so that we trigger on-access pruning later which may set the
614 * page all-visible in the VM.
615 */
616 if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
617 {
618 PageSetAllVisible(page);
619 PageClearPrunable(page);
620 }
621 else
622 PageSetPrunable(page, XLogRecGetXid(record));
623
624 MarkBufferDirty(buffer);
625 }
626 if (BufferIsValid(buffer))
627 UnlockReleaseBuffer(buffer);
628
629 buffer = InvalidBuffer;
630
631 /*
632 * Read and update the visibility map (VM) block.
633 *
634 * We must always redo VM changes, even if the corresponding heap page
635 * update was skipped due to the LSN interlock. Each VM block covers
636 * multiple heap pages, so later WAL records may update other bits in the
637 * same block. If this record includes an FPI (full-page image),
638 * subsequent WAL records may depend on it to guard against torn pages.
639 *
640 * Heap page changes are replayed first to preserve the invariant:
641 * PD_ALL_VISIBLE must be set on the heap page if the VM bit is set.
642 *
643 * Note that we released the heap page lock above. During normal
644 * operation, this would be unsafe — a concurrent modification could
645 * clear PD_ALL_VISIBLE while the VM bit remained set, violating the
646 * invariant.
647 *
648 * During recovery, however, no concurrent writers exist. Therefore,
649 * updating the VM without holding the heap page lock is safe enough. This
650 * same approach is taken when replaying XLOG_HEAP2_PRUNE* records (see
651 * heap_xlog_prune_freeze()).
652 */
653 if ((xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) &&
655 &vmbuffer) == BLK_NEEDS_REDO)
656 {
657 Page vmpage = BufferGetPage(vmbuffer);
658
659 /* initialize the page if it was read as zeros */
660 if (PageIsNew(vmpage))
662
663 visibilitymap_set(blkno,
664 vmbuffer,
667 rlocator);
668
669 Assert(BufferIsDirty(vmbuffer));
670 PageSetLSN(vmpage, lsn);
671 }
672
673 if (BufferIsValid(vmbuffer))
674 UnlockReleaseBuffer(vmbuffer);
675
676 /*
677 * If the page is running low on free space, update the FSM as well.
678 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
679 * better than that without knowing the fill-factor for the table.
680 *
681 * XXX: Don't do this if the page was restored from full page image. We
682 * don't bother to update the FSM in that case, it doesn't need to be
683 * totally accurate anyway.
684 */
685 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
686 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
687}
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3114
@ RBM_ZERO_ON_ERROR
Definition bufmgr.h:51
static bool PageIsNew(const PageData *page)
Definition bufpage.h:258
static void PageSetAllVisible(Page page)
Definition bufpage.h:459
#define PageClearPrunable(page)
Definition bufpage.h:485
#define SHORTALIGN(LEN)
Definition c.h:892
#define SizeOfMultiInsertTuple
int i
Definition isn.c:77
#define FirstOffsetNumber
Definition off.h:27
void visibilitymap_set(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define VISIBILITYMAP_ALL_VISIBLE
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition xlogutils.c:340

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsDirty(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, FirstOffsetNumber, FreeFakeRelcacheEntry(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetXmin(), i, InvalidBuffer, InvalidOffsetNumber, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), len, MarkBufferDirty(), MaxHeapTupleSize, memcpy(), MemSet, PageAddItem, PageClearAllVisible(), PageClearPrunable, PageGetHeapFreeSpace(), PageGetMaxOffsetNumber(), PageInit(), PageIsNew(), PageSetAllVisible(), PageSetLSN(), PageSetPrunable, PANIC, RBM_ZERO_ON_ERROR, ReleaseBuffer(), SHORTALIGN, SizeofHeapTupleHeader, SizeOfMultiInsertTuple, HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_hoff, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_FROZEN_SET, XLH_INSERT_ALL_VISIBLE_CLEARED, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap2_redo().

◆ heap_xlog_prune_freeze()

static void heap_xlog_prune_freeze ( XLogReaderState record)
static

Definition at line 30 of file heapam_xlog.c.

31{
32 XLogRecPtr lsn = record->EndRecPtr;
33 char *maindataptr = XLogRecGetData(record);
35 Buffer buffer;
36 RelFileLocator rlocator;
37 BlockNumber blkno;
38 Buffer vmbuffer = InvalidBuffer;
39 uint8 vmflags = 0;
40 Size freespace = 0;
41
42 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
45
46 /*
47 * We will take an ordinary exclusive lock or a cleanup lock depending on
48 * whether the XLHP_CLEANUP_LOCK flag is set. With an ordinary exclusive
49 * lock, we better not be doing anything that requires moving existing
50 * tuple data.
51 */
52 Assert((xlrec.flags & XLHP_CLEANUP_LOCK) != 0 ||
54
55 if (xlrec.flags & XLHP_VM_ALL_VISIBLE)
56 {
58 if (xlrec.flags & XLHP_VM_ALL_FROZEN)
60 }
61
62 /*
63 * After xl_heap_prune is the optional snapshot conflict horizon.
64 *
65 * In Hot Standby mode, we must ensure that there are no running queries
66 * which would conflict with the changes in this record. That means we
67 * can't replay this record if it removes tuples that are still visible to
68 * transactions on the standby, freeze tuples with xids that are still
69 * considered running on the standby, or set a page as all-visible in the
70 * VM if it isn't all-visible to all transactions on the standby.
71 */
72 if ((xlrec.flags & XLHP_HAS_CONFLICT_HORIZON) != 0)
73 {
75
76 /* memcpy() because snapshot_conflict_horizon is stored unaligned */
78 maindataptr += sizeof(TransactionId);
79
80 if (InHotStandby)
82 (xlrec.flags & XLHP_IS_CATALOG_REL) != 0,
83 rlocator);
84 }
85
86 /*
87 * If we have a full-page image of the heap block, restore it and we're
88 * done with the heap block.
89 */
91 (xlrec.flags & XLHP_CLEANUP_LOCK) != 0,
92 &buffer) == BLK_NEEDS_REDO)
93 {
94 Page page = BufferGetPage(buffer);
95 OffsetNumber *redirected;
96 OffsetNumber *nowdead;
97 OffsetNumber *nowunused;
98 int nredirected;
99 int ndead;
100 int nunused;
101 int nplans;
102 Size datalen;
103 xlhp_freeze_plan *plans;
105 char *dataptr = XLogRecGetBlockData(record, 0, &datalen);
106 bool do_prune;
107
109 &nplans, &plans, &frz_offsets,
110 &nredirected, &redirected,
111 &ndead, &nowdead,
112 &nunused, &nowunused);
113
114 do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
115
116 /* Ensure the record does something */
118
119 /*
120 * Update all line pointers per the record, and repair fragmentation
121 * if needed.
122 */
123 if (do_prune)
125 (xlrec.flags & XLHP_CLEANUP_LOCK) == 0,
126 redirected, nredirected,
127 nowdead, ndead,
128 nowunused, nunused);
129
130 /* Freeze tuples */
131 for (int p = 0; p < nplans; p++)
132 {
134
135 /*
136 * Convert freeze plan representation from WAL record into
137 * per-tuple format used by heap_execute_freeze_tuple
138 */
139 frz.xmax = plans[p].xmax;
140 frz.t_infomask2 = plans[p].t_infomask2;
141 frz.t_infomask = plans[p].t_infomask;
142 frz.frzflags = plans[p].frzflags;
143 frz.offset = InvalidOffsetNumber; /* unused, but be tidy */
144
145 for (int i = 0; i < plans[p].ntuples; i++)
146 {
147 OffsetNumber offset = *(frz_offsets++);
148 ItemId lp;
149 HeapTupleHeader tuple;
150
151 lp = PageGetItemId(page, offset);
152 tuple = (HeapTupleHeader) PageGetItem(page, lp);
154 }
155 }
156
157 /* There should be no more data */
158 Assert((char *) frz_offsets == dataptr + datalen);
159
160 /*
161 * The critical integrity requirement here is that we must never end
162 * up with the visibility map bit set and the page-level
163 * PD_ALL_VISIBLE bit unset. If that were to occur, a subsequent page
164 * modification would fail to clear the visibility map bit.
165 */
167 {
168 PageSetAllVisible(page);
169 PageClearPrunable(page);
170 }
171
172 MarkBufferDirty(buffer);
173
174 /*
175 * See log_heap_prune_and_freeze() for commentary on when we set the
176 * heap page LSN.
177 */
178 if (do_prune || nplans > 0 ||
180 PageSetLSN(page, lsn);
181
182 /*
183 * Note: we don't worry about updating the page's prunability hints.
184 * At worst this will cause an extra prune cycle to occur soon.
185 */
186 }
187
188 /*
189 * If we 1) released any space or line pointers or 2) set PD_ALL_VISIBLE
190 * or the VM, update the freespace map.
191 *
192 * Even when no actual space is freed (when only marking the page
193 * all-visible or frozen), we still update the FSM. Because the FSM is
194 * unlogged and maintained heuristically, it often becomes stale on
195 * standbys. If such a standby is later promoted and runs VACUUM, it will
196 * skip recalculating free space for pages that were marked
197 * all-visible/all-frozen. FreeSpaceMapVacuum() can then propagate overly
198 * optimistic free space values upward, causing future insertions to
199 * select pages that turn out to be unusable. In bulk, this can lead to
200 * long stalls.
201 *
202 * To prevent this, always update the FSM even when only marking a page
203 * all-visible/all-frozen.
204 *
205 * Do this regardless of whether a full-page image is logged, since FSM
206 * data is not part of the page itself.
207 */
208 if (BufferIsValid(buffer))
209 {
210 if ((xlrec.flags & (XLHP_HAS_REDIRECTIONS |
214 freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
215
216 /*
217 * We want to avoid holding an exclusive lock on the heap buffer while
218 * doing IO (either of the FSM or the VM), so we'll release it now.
219 */
220 UnlockReleaseBuffer(buffer);
221 }
222
223 /*
224 * Now read and update the VM block.
225 *
226 * We must redo changes to the VM even if the heap page was skipped due to
227 * LSN interlock. See comment in heap_xlog_multi_insert() for more details
228 * on replaying changes to the VM.
229 */
233 false,
234 &vmbuffer) == BLK_NEEDS_REDO)
235 {
236 Page vmpage = BufferGetPage(vmbuffer);
237
238 /* initialize the page if it was read as zeros */
239 if (PageIsNew(vmpage))
241
242 visibilitymap_set(blkno, vmbuffer, vmflags, rlocator);
243
244 Assert(BufferIsDirty(vmbuffer));
245 PageSetLSN(vmpage, lsn);
246 }
247
248 if (BufferIsValid(vmbuffer))
249 UnlockReleaseBuffer(vmbuffer);
250
251 if (freespace > 0)
252 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
253}
@ RBM_NORMAL
Definition bufmgr.h:46
uint32 TransactionId
Definition c.h:736
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition heapam.h:533
#define XLHP_HAS_CONFLICT_HORIZON
#define XLHP_VM_ALL_VISIBLE
#define SizeOfHeapPrune
#define XLHP_HAS_NOW_UNUSED_ITEMS
#define XLHP_VM_ALL_FROZEN
#define XLHP_HAS_REDIRECTIONS
#define XLHP_CLEANUP_LOCK
#define XLHP_HAS_DEAD_ITEMS
#define XLHP_IS_CATALOG_REL
void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint16 flags, int *nplans, xlhp_freeze_plan **plans, OffsetNumber **frz_offsets, int *nredirected, OffsetNumber **redirected, int *ndead, OffsetNumber **nowdead, int *nunused, OffsetNumber **nowunused)
Definition heapdesc.c:106
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition pruneheap.c:2065
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition standby.c:470
TransactionId xmax
Definition heapam.h:156
TransactionId xmax
#define XLogHintBitIsNeeded()
Definition xlog.h:123
#define InHotStandby
Definition xlogutils.h:60

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferIsDirty(), BufferIsValid(), XLogReaderState::EndRecPtr, fb(), xlhp_freeze_plan::frzflags, heap_execute_freeze_tuple(), heap_page_prune_execute(), heap_xlog_deserialize_prune_and_freeze(), i, InHotStandby, InvalidBuffer, InvalidOffsetNumber, MarkBufferDirty(), memcpy(), xlhp_freeze_plan::ntuples, PageClearPrunable, PageGetHeapFreeSpace(), PageGetItem(), PageGetItemId(), PageInit(), PageIsNew(), PageSetAllVisible(), PageSetLSN(), RBM_NORMAL, RBM_ZERO_ON_ERROR, ResolveRecoveryConflictWithSnapshot(), SizeOfHeapPrune, xlhp_freeze_plan::t_infomask, xlhp_freeze_plan::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), VISIBILITYMAP_VALID_BITS, XLHP_CLEANUP_LOCK, XLHP_HAS_CONFLICT_HORIZON, XLHP_HAS_DEAD_ITEMS, XLHP_HAS_NOW_UNUSED_ITEMS, XLHP_HAS_REDIRECTIONS, XLHP_IS_CATALOG_REL, XLHP_VM_ALL_FROZEN, XLHP_VM_ALL_VISIBLE, XLogHintBitIsNeeded, XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecordPageWithFreeSpace(), HeapTupleFreeze::xmax, and xlhp_freeze_plan::xmax.

Referenced by heap2_redo().

◆ heap_xlog_update()

static void heap_xlog_update ( XLogReaderState record,
bool  hot_update 
)
static

Definition at line 693 of file heapam_xlog.c.

694{
695 XLogRecPtr lsn = record->EndRecPtr;
697 RelFileLocator rlocator;
702 nbuffer;
703 Page opage,
704 npage;
705 OffsetNumber offnum;
706 ItemId lp;
708 HeapTupleHeader htup;
709 uint16 prefixlen = 0,
710 suffixlen = 0;
711 char *newp;
712 union
713 {
716 } tbuf;
719 Size freespace = 0;
722
723 /* initialize to keep the compiler quiet */
724 oldtup.t_data = NULL;
725 oldtup.t_len = 0;
726
727 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
728 if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
729 {
730 /* HOT updates are never done across pages */
732 }
733 else
734 oldblk = newblk;
735
736 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
737
738 /*
739 * The visibility map may need to be fixed even if the heap page is
740 * already up-to-date.
741 */
743 {
745 Buffer vmbuffer = InvalidBuffer;
746
747 visibilitymap_pin(reln, oldblk, &vmbuffer);
749 ReleaseBuffer(vmbuffer);
751 }
752
753 /*
754 * In normal operation, it is important to lock the two pages in
755 * page-number order, to avoid possible deadlocks against other update
756 * operations going the other way. However, during WAL replay there can
757 * be no other update happening, so we don't need to worry about that. But
758 * we *do* need to worry that we don't expose an inconsistent state to Hot
759 * Standby queries --- so the original page can't be unlocked before we've
760 * added the new tuple to the new page.
761 */
762
763 /* Deal with old tuple version */
764 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
765 &obuffer);
767 {
769 offnum = xlrec->old_offnum;
771 elog(PANIC, "offnum out of range");
772 lp = PageGetItemId(opage, offnum);
773 if (!ItemIdIsNormal(lp))
774 elog(PANIC, "invalid lp");
775
777
778 oldtup.t_data = htup;
779 oldtup.t_len = ItemIdGetLength(lp);
780
783 if (hot_update)
785 else
787 fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
788 &htup->t_infomask2);
789 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
791 /* Set forward chain link in t_ctid */
792 htup->t_ctid = newtid;
793
794 /* Mark the page as a candidate for pruning */
796
799
800 PageSetLSN(opage, lsn);
802 }
803
804 /*
805 * Read the page the new tuple goes into, if different from old.
806 */
807 if (oldblk == newblk)
808 {
811 }
812 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
813 {
814 nbuffer = XLogInitBufferForRedo(record, 0);
815 npage = BufferGetPage(nbuffer);
818 }
819 else
821
822 /*
823 * The visibility map may need to be fixed even if the heap page is
824 * already up-to-date.
825 */
827 {
829 Buffer vmbuffer = InvalidBuffer;
830
831 visibilitymap_pin(reln, newblk, &vmbuffer);
833 ReleaseBuffer(vmbuffer);
835 }
836
837 /* Deal with new tuple */
839 {
840 char *recdata;
841 char *recdata_end;
842 Size datalen;
843 Size tuplen;
844
845 recdata = XLogRecGetBlockData(record, 0, &datalen);
846 recdata_end = recdata + datalen;
847
848 npage = BufferGetPage(nbuffer);
849
850 offnum = xlrec->new_offnum;
851 if (PageGetMaxOffsetNumber(npage) + 1 < offnum)
852 elog(PANIC, "invalid max offset number");
853
855 {
856 Assert(newblk == oldblk);
857 memcpy(&prefixlen, recdata, sizeof(uint16));
858 recdata += sizeof(uint16);
859 }
861 {
862 Assert(newblk == oldblk);
863 memcpy(&suffixlen, recdata, sizeof(uint16));
864 recdata += sizeof(uint16);
865 }
866
869
870 tuplen = recdata_end - recdata;
871 Assert(tuplen <= MaxHeapTupleSize);
872
873 htup = &tbuf.hdr;
875
876 /*
877 * Reconstruct the new tuple using the prefix and/or suffix from the
878 * old tuple, and the data stored in the WAL record.
879 */
880 newp = (char *) htup + SizeofHeapTupleHeader;
881 if (prefixlen > 0)
882 {
883 int len;
884
885 /* copy bitmap [+ padding] [+ oid] from WAL record */
888 recdata += len;
889 newp += len;
890
891 /* copy prefix from old tuple */
892 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
893 newp += prefixlen;
894
895 /* copy new tuple data from WAL record */
896 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
898 recdata += len;
899 newp += len;
900 }
901 else
902 {
903 /*
904 * copy bitmap [+ padding] [+ oid] + data from record, all in one
905 * go
906 */
907 memcpy(newp, recdata, tuplen);
908 recdata += tuplen;
909 newp += tuplen;
910 }
912
913 /* copy suffix from old tuple */
914 if (suffixlen > 0)
915 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
916
918 htup->t_infomask2 = xlhdr.t_infomask2;
919 htup->t_infomask = xlhdr.t_infomask;
920 htup->t_hoff = xlhdr.t_hoff;
921
924 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
925 /* Make sure there is no forward chain link in t_ctid */
926 htup->t_ctid = newtid;
927
928 offnum = PageAddItem(npage, htup, newlen, offnum, true, true);
929 if (offnum == InvalidOffsetNumber)
930 elog(PANIC, "failed to add tuple");
931
933 PageClearAllVisible(npage);
934
935 /* needed to update FSM below */
936 freespace = PageGetHeapFreeSpace(npage);
937
938 PageSetLSN(npage, lsn);
939 /* See heap_insert() for why we set pd_prune_xid on insert */
940 PageSetPrunable(npage, XLogRecGetXid(record));
942 }
943
948
949 /*
950 * If the new page is running low on free space, update the FSM as well.
951 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
952 * better than that without knowing the fill-factor for the table.
953 *
954 * However, don't update the FSM on HOT updates, because after crash
955 * recovery, either the old or the new tuple will certainly be dead and
956 * prunable. After pruning, the page will have roughly as much free space
957 * as it did before the update, assuming the new tuple is about the same
958 * size as the old one.
959 *
960 * XXX: Don't do this if the page was restored from full page image. We
961 * don't bother to update the FSM in that case, it doesn't need to be
962 * totally accurate anyway.
963 */
964 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
965 XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
966}
uint16_t uint16
Definition c.h:623
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:87
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:85
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition heapam_xlog.h:91
static void HeapTupleHeaderSetHotUpdated(HeapTupleHeaderData *tup)
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetHotUpdated(), HeapTupleHeaderSetXmax(), HeapTupleHeaderSetXmin(), InvalidBuffer, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsNormal, ItemPointerSet(), len, MarkBufferDirty(), MaxHeapTupleSize, memcpy(), MemSet, PageAddItem, PageClearAllVisible(), PageGetHeapFreeSpace(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageInit(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), SizeOfHeapHeader, SizeofHeapTupleHeader, HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_hoff, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED, XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED, XLH_UPDATE_PREFIX_FROM_OLD, XLH_UPDATE_SUFFIX_FROM_OLD, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetBlockTagExtended(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap_redo().