PostgreSQL Source Code git master
Loading...
Searching...
No Matches
heapam_xlog.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/visibilitymap.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "storage/freespace.h"
#include "storage/standby.h"
Include dependency graph for heapam_xlog.c:

Go to the source code of this file.

Functions

static void heap_xlog_prune_freeze (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Function Documentation

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 266 of file heapam_xlog.c.

267{
271
278 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
281
284}
#define XLHL_XMAX_KEYSHR_LOCK
#define XLHL_XMAX_IS_MULTI
#define XLHL_XMAX_LOCK_ONLY
#define XLHL_XMAX_EXCL_LOCK
#define XLHL_KEYS_UPDATED
#define HEAP_KEYS_UPDATED
#define HEAP_XMAX_LOCK_ONLY
#define HEAP_XMAX_IS_MULTI
#define HEAP_XMAX_EXCL_LOCK
#define HEAP_XMAX_KEYSHR_LOCK
static int fb(int x)

References fb(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 1245 of file heapam_xlog.c.

1246{
1247 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1248
1249 switch (info & XLOG_HEAP_OPMASK)
1250 {
1254 heap_xlog_prune_freeze(record);
1255 break;
1257 heap_xlog_multi_insert(record);
1258 break;
1260 heap_xlog_lock_updated(record);
1261 break;
1262 case XLOG_HEAP2_NEW_CID:
1263
1264 /*
1265 * Nothing to do on a real replay, only used during logical
1266 * decoding.
1267 */
1268 break;
1269 case XLOG_HEAP2_REWRITE:
1271 break;
1272 default:
1273 elog(PANIC, "heap2_redo: unknown op code %u", info);
1274 }
1275}
uint8_t uint8
Definition c.h:622
#define PANIC
Definition elog.h:44
#define elog(elevel,...)
Definition elog.h:228
static void heap_xlog_prune_freeze(XLogReaderState *record)
Definition heapam_xlog.c:30
static void heap_xlog_lock_updated(XLogReaderState *record)
static void heap_xlog_multi_insert(XLogReaderState *record)
#define XLOG_HEAP2_MULTI_INSERT
Definition heapam_xlog.h:64
#define XLOG_HEAP2_REWRITE
Definition heapam_xlog.h:59
#define XLOG_HEAP_OPMASK
Definition heapam_xlog.h:42
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition heapam_xlog.h:61
#define XLOG_HEAP2_LOCK_UPDATED
Definition heapam_xlog.h:65
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition heapam_xlog.h:60
#define XLOG_HEAP2_NEW_CID
Definition heapam_xlog.h:66
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition heapam_xlog.h:62
void heap_xlog_logical_rewrite(XLogReaderState *r)
#define XLogRecGetInfo(decoder)
Definition xlogreader.h:410

References elog, fb(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), PANIC, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_PRUNE_ON_ACCESS, XLOG_HEAP2_PRUNE_VACUUM_CLEANUP, XLOG_HEAP2_PRUNE_VACUUM_SCAN, XLOG_HEAP2_REWRITE, XLOG_HEAP_OPMASK, and XLogRecGetInfo.

◆ heap_mask()

void heap_mask ( char pagedata,
BlockNumber  blkno 
)

Definition at line 1281 of file heapam_xlog.c.

1282{
1283 Page page = (Page) pagedata;
1284 OffsetNumber off;
1285
1287
1288 mask_page_hint_bits(page);
1289 mask_unused_space(page);
1290
1291 for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
1292 {
1293 ItemId iid = PageGetItemId(page, off);
1294 char *page_item;
1295
1296 page_item = (char *) (page + ItemIdGetOffset(iid));
1297
1298 if (ItemIdIsNormal(iid))
1299 {
1301
1302 /*
1303 * If xmin of a tuple is not yet frozen, we should ignore
1304 * differences in hint bits, since they can be set without
1305 * emitting WAL.
1306 */
1309 else
1310 {
1311 /* Still we need to mask xmax hint bits. */
1312 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
1313 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
1314 }
1315
1316 /*
1317 * During replay, we set Command Id to FirstCommandId. Hence, mask
1318 * it. See heap_xlog_insert() for details.
1319 */
1320 page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
1321
1322 /*
1323 * For a speculative tuple, heap_insert() does not set ctid in the
1324 * caller-passed heap tuple itself, leaving the ctid field to
1325 * contain a speculative token value - a per-backend monotonically
1326 * increasing identifier. Besides, it does not WAL-log ctid under
1327 * any circumstances.
1328 *
1329 * During redo, heap_xlog_insert() sets t_ctid to current block
1330 * number and self offset number. It doesn't care about any
1331 * speculative insertions on the primary. Hence, we set t_ctid to
1332 * current block number and self offset number to ignore any
1333 * inconsistency.
1334 */
1336 ItemPointerSet(&page_htup->t_ctid, blkno, off);
1337
1338 /*
1339 * NB: Not ignoring ctid changes due to the tuple having moved
1340 * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
1341 * important information that needs to be in-sync between primary
1342 * and standby, and thus is WAL logged.
1343 */
1344 }
1345
1346 /*
1347 * Ignore any padding bytes after the tuple, when the length of the
1348 * item is not MAXALIGNed.
1349 */
1350 if (ItemIdHasStorage(iid))
1351 {
1352 int len = ItemIdGetLength(iid);
1353 int padlen = MAXALIGN(len) - len;
1354
1355 if (padlen > 0)
1357 }
1358 }
1359}
void mask_page_lsn_and_checksum(Page page)
Definition bufmask.c:31
void mask_unused_space(Page page)
Definition bufmask.c:70
void mask_page_hint_bits(Page page)
Definition bufmask.c:46
#define MASK_MARKER
Definition bufmask.h:24
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:268
PageData * Page
Definition bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:396
#define MAXALIGN(LEN)
Definition c.h:896
HeapTupleHeaderData * HeapTupleHeader
Definition htup.h:23
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
#define HEAP_XACT_MASK
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdIsNormal(itemId)
Definition itemid.h:99
#define ItemIdGetOffset(itemId)
Definition itemid.h:65
#define ItemIdHasStorage(itemId)
Definition itemid.h:120
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition itemptr.h:135
uint16 OffsetNumber
Definition off.h:24
const void size_t len

References fb(), HEAP_XACT_MASK, HeapTupleHeaderIsSpeculative(), HeapTupleHeaderXminFrozen(), ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, ItemIdIsNormal, ItemPointerSet(), len, MASK_MARKER, mask_page_hint_bits(), mask_page_lsn_and_checksum(), mask_unused_space(), MAXALIGN, PageGetItemId(), PageGetMaxOffsetNumber(), and HeapTupleHeaderData::t_infomask.

◆ heap_redo()

void heap_redo ( XLogReaderState record)

Definition at line 1199 of file heapam_xlog.c.

1200{
1201 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1202
1203 /*
1204 * These operations don't overwrite MVCC data so no conflict processing is
1205 * required. The ones in heap2 rmgr do.
1206 */
1207
1208 switch (info & XLOG_HEAP_OPMASK)
1209 {
1210 case XLOG_HEAP_INSERT:
1211 heap_xlog_insert(record);
1212 break;
1213 case XLOG_HEAP_DELETE:
1214 heap_xlog_delete(record);
1215 break;
1216 case XLOG_HEAP_UPDATE:
1217 heap_xlog_update(record, false);
1218 break;
1219 case XLOG_HEAP_TRUNCATE:
1220
1221 /*
1222 * TRUNCATE is a no-op because the actions are already logged as
1223 * SMGR WAL records. TRUNCATE WAL record only exists for logical
1224 * decoding.
1225 */
1226 break;
1228 heap_xlog_update(record, true);
1229 break;
1230 case XLOG_HEAP_CONFIRM:
1231 heap_xlog_confirm(record);
1232 break;
1233 case XLOG_HEAP_LOCK:
1234 heap_xlog_lock(record);
1235 break;
1236 case XLOG_HEAP_INPLACE:
1237 heap_xlog_inplace(record);
1238 break;
1239 default:
1240 elog(PANIC, "heap_redo: unknown op code %u", info);
1241 }
1242}
static void heap_xlog_insert(XLogReaderState *record)
static void heap_xlog_update(XLogReaderState *record, bool hot_update)
static void heap_xlog_delete(XLogReaderState *record)
static void heap_xlog_lock(XLogReaderState *record)
static void heap_xlog_inplace(XLogReaderState *record)
static void heap_xlog_confirm(XLogReaderState *record)
#define XLOG_HEAP_HOT_UPDATE
Definition heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition heapam_xlog.h:34
#define XLOG_HEAP_TRUNCATE
Definition heapam_xlog.h:36
#define XLOG_HEAP_UPDATE
Definition heapam_xlog.h:35
#define XLOG_HEAP_INPLACE
Definition heapam_xlog.h:40
#define XLOG_HEAP_LOCK
Definition heapam_xlog.h:39
#define XLOG_HEAP_INSERT
Definition heapam_xlog.h:33
#define XLOG_HEAP_CONFIRM
Definition heapam_xlog.h:38

References elog, fb(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_update(), PANIC, XLOG_HEAP_CONFIRM, XLOG_HEAP_DELETE, XLOG_HEAP_HOT_UPDATE, XLOG_HEAP_INPLACE, XLOG_HEAP_INSERT, XLOG_HEAP_LOCK, XLOG_HEAP_OPMASK, XLOG_HEAP_TRUNCATE, XLOG_HEAP_UPDATE, and XLogRecGetInfo.

◆ heap_xlog_confirm()

static void heap_xlog_confirm ( XLogReaderState record)
static

Definition at line 976 of file heapam_xlog.c.

977{
978 XLogRecPtr lsn = record->EndRecPtr;
980 Buffer buffer;
981 Page page;
982 OffsetNumber offnum;
983 ItemId lp;
984 HeapTupleHeader htup;
985
986 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
987 {
988 page = BufferGetPage(buffer);
989
990 offnum = xlrec->offnum;
992 elog(PANIC, "offnum out of range");
993 lp = PageGetItemId(page, offnum);
994 if (!ItemIdIsNormal(lp))
995 elog(PANIC, "invalid lp");
996
997 htup = (HeapTupleHeader) PageGetItem(page, lp);
998
999 /*
1000 * Confirm tuple as actually inserted
1001 */
1002 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
1003
1004 PageSetLSN(page, lsn);
1005 MarkBufferDirty(buffer);
1006 }
1007 if (BufferIsValid(buffer))
1008 UnlockReleaseBuffer(buffer);
1009}
int Buffer
Definition buf.h:23
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4446
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5603
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3147
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:468
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:419
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:378
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:416
ItemPointerData t_ctid
XLogRecPtr EndRecPtr
Definition xlogreader.h:206
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define XLogRecGetData(decoder)
Definition xlogreader.h:415
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition xlogutils.c:303
@ BLK_NEEDS_REDO
Definition xlogutils.h:74

References BLK_NEEDS_REDO, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), elog, XLogReaderState::EndRecPtr, fb(), ItemIdIsNormal, ItemPointerSet(), MarkBufferDirty(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLogReadBufferForRedo(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_delete()

static void heap_xlog_delete ( XLogReaderState record)
static

Definition at line 290 of file heapam_xlog.c.

291{
292 XLogRecPtr lsn = record->EndRecPtr;
294 Buffer buffer;
295 Page page;
296 ItemId lp;
297 HeapTupleHeader htup;
298 BlockNumber blkno;
299 RelFileLocator target_locator;
300 ItemPointerData target_tid;
301
302 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
303 ItemPointerSetBlockNumber(&target_tid, blkno);
304 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
305
306 /*
307 * The visibility map may need to be fixed even if the heap page is
308 * already up-to-date.
309 */
311 {
312 Relation reln = CreateFakeRelcacheEntry(target_locator);
313 Buffer vmbuffer = InvalidBuffer;
314
315 visibilitymap_pin(reln, blkno, &vmbuffer);
317 ReleaseBuffer(vmbuffer);
319 }
320
321 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
322 {
323 page = BufferGetPage(buffer);
324
325 if (xlrec->offnum < 1 || xlrec->offnum > PageGetMaxOffsetNumber(page))
326 elog(PANIC, "offnum out of range");
327 lp = PageGetItemId(page, xlrec->offnum);
328 if (!ItemIdIsNormal(lp))
329 elog(PANIC, "invalid lp");
330
331 htup = (HeapTupleHeader) PageGetItem(page, lp);
332
336 fix_infomask_from_infobits(xlrec->infobits_set,
337 &htup->t_infomask, &htup->t_infomask2);
338 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
339 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
340 else
343
344 /* Mark the page as a candidate for pruning */
345 PageSetPrunable(page, XLogRecGetXid(record));
346
349
350 /* Make sure t_ctid is set correctly */
353 else
354 htup->t_ctid = target_tid;
355 PageSetLSN(page, lsn);
356 MarkBufferDirty(buffer);
357 }
358 if (BufferIsValid(buffer))
359 UnlockReleaseBuffer(buffer);
360}
uint32 BlockNumber
Definition block.h:31
#define InvalidBuffer
Definition buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5586
static void PageClearAllVisible(Page page)
Definition bufpage.h:464
#define PageSetPrunable(page, xid)
Definition bufpage.h:478
#define FirstCommandId
Definition c.h:752
static void fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
#define XLH_DELETE_ALL_VISIBLE_CLEARED
#define XLH_DELETE_IS_PARTITION_MOVE
#define XLH_DELETE_IS_SUPER
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
#define HEAP_XMAX_BITS
#define HEAP_MOVED
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition itemptr.h:147
#define InvalidTransactionId
Definition transam.h:31
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_VALID_BITS
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
#define XLogRecGetXid(decoder)
Definition xlogreader.h:412
void FreeFakeRelcacheEntry(Relation fakerel)
Definition xlogutils.c:618
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition xlogutils.c:571

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetMovedPartitions(), HeapTupleHeaderSetXmax(), HeapTupleHeaderSetXmin(), InvalidBuffer, InvalidTransactionId, ItemIdIsNormal, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), MarkBufferDirty(), PageClearAllVisible(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_IS_PARTITION_MOVE, XLH_DELETE_IS_SUPER, XLogReadBufferForRedo(), XLogRecGetBlockTag(), XLogRecGetData, and XLogRecGetXid.

Referenced by heap_redo().

◆ heap_xlog_inplace()

static void heap_xlog_inplace ( XLogReaderState record)
static

Definition at line 1152 of file heapam_xlog.c.

1153{
1154 XLogRecPtr lsn = record->EndRecPtr;
1156 Buffer buffer;
1157 Page page;
1158 OffsetNumber offnum;
1159 ItemId lp;
1160 HeapTupleHeader htup;
1161 uint32 oldlen;
1162 Size newlen;
1163
1164 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1165 {
1166 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
1167
1168 page = BufferGetPage(buffer);
1169
1170 offnum = xlrec->offnum;
1172 elog(PANIC, "offnum out of range");
1173 lp = PageGetItemId(page, offnum);
1174 if (!ItemIdIsNormal(lp))
1175 elog(PANIC, "invalid lp");
1176
1177 htup = (HeapTupleHeader) PageGetItem(page, lp);
1178
1179 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
1180 if (oldlen != newlen)
1181 elog(PANIC, "wrong tuple length");
1182
1183 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
1184
1185 PageSetLSN(page, lsn);
1186 MarkBufferDirty(buffer);
1187 }
1188 if (BufferIsValid(buffer))
1189 UnlockReleaseBuffer(buffer);
1190
1192 xlrec->nmsgs,
1193 xlrec->relcacheInitFileInval,
1194 xlrec->dbId,
1195 xlrec->tsId);
1196}
uint32_t uint32
Definition c.h:624
size_t Size
Definition c.h:689
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition inval.c:1135
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), elog, XLogReaderState::EndRecPtr, fb(), ItemIdGetLength, ItemIdIsNormal, MarkBufferDirty(), memcpy(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ProcessCommittedInvalidationMessages(), HeapTupleHeaderData::t_hoff, UnlockReleaseBuffer(), XLogReadBufferForRedo(), XLogRecGetBlockData(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_insert()

static void heap_xlog_insert ( XLogReaderState record)
static

Definition at line 366 of file heapam_xlog.c.

367{
368 XLogRecPtr lsn = record->EndRecPtr;
370 Buffer buffer;
371 Page page;
372 union
373 {
376 } tbuf;
377 HeapTupleHeader htup;
380 Size freespace = 0;
381 RelFileLocator target_locator;
382 BlockNumber blkno;
383 ItemPointerData target_tid;
385
386 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
387 ItemPointerSetBlockNumber(&target_tid, blkno);
388 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
389
390 /* No freezing in the heap_insert() code path */
392
393 /*
394 * The visibility map may need to be fixed even if the heap page is
395 * already up-to-date.
396 */
398 {
399 Relation reln = CreateFakeRelcacheEntry(target_locator);
400 Buffer vmbuffer = InvalidBuffer;
401
402 visibilitymap_pin(reln, blkno, &vmbuffer);
404 ReleaseBuffer(vmbuffer);
406 }
407
408 /*
409 * If we inserted the first and only tuple on the page, re-initialize the
410 * page from scratch.
411 */
413 {
414 buffer = XLogInitBufferForRedo(record, 0);
415 page = BufferGetPage(buffer);
416 PageInit(page, BufferGetPageSize(buffer), 0);
418 }
419 else
420 action = XLogReadBufferForRedo(record, 0, &buffer);
421 if (action == BLK_NEEDS_REDO)
422 {
423 Size datalen;
424 char *data;
425
426 page = BufferGetPage(buffer);
427
428 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
429 elog(PANIC, "invalid max offset number");
430
431 data = XLogRecGetBlockData(record, 0, &datalen);
432
433 newlen = datalen - SizeOfHeapHeader;
437
438 htup = &tbuf.hdr;
440 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
441 memcpy((char *) htup + SizeofHeapTupleHeader,
442 data,
443 newlen);
445 htup->t_infomask2 = xlhdr.t_infomask2;
446 htup->t_infomask = xlhdr.t_infomask;
447 htup->t_hoff = xlhdr.t_hoff;
450 htup->t_ctid = target_tid;
451
452 if (PageAddItem(page, htup, newlen, xlrec->offnum, true, true) == InvalidOffsetNumber)
453 elog(PANIC, "failed to add tuple");
454
455 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
456
457 /*
458 * Set the page prunable to trigger on-access pruning later, which may
459 * set the page all-visible in the VM. See comments in heap_insert().
460 */
463 PageSetPrunable(page, XLogRecGetXid(record));
464
465 PageSetLSN(page, lsn);
466
469
470 MarkBufferDirty(buffer);
471 }
472 if (BufferIsValid(buffer))
473 UnlockReleaseBuffer(buffer);
474
475 /*
476 * If the page is running low on free space, update the FSM as well.
477 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
478 * better than that without knowing the fill-factor for the table.
479 *
480 * XXX: Don't do this if the page was restored from full page image. We
481 * don't bother to update the FSM in that case, it doesn't need to be
482 * totally accurate anyway.
483 */
484 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
485 XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
486}
static Size BufferGetPageSize(Buffer buffer)
Definition bufmgr.h:457
Size PageGetHeapFreeSpace(const PageData *page)
Definition bufpage.c:1000
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition bufpage.h:504
#define Assert(condition)
Definition c.h:943
#define MemSet(start, val, len)
Definition c.h:1107
void XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, Size spaceAvail)
Definition freespace.c:211
#define XLH_INSERT_ALL_FROZEN_SET
Definition heapam_xlog.h:79
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:72
#define SizeOfHeapHeader
#define XLOG_HEAP_INIT_PAGE
Definition heapam_xlog.h:47
#define SizeofHeapTupleHeader
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
#define MaxHeapTupleSize
#define InvalidOffsetNumber
Definition off.h:26
const void * data
#define TransactionIdIsNormal(xid)
Definition transam.h:42
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition xlogutils.c:315
XLogRedoAction
Definition xlogutils.h:73

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, FreeFakeRelcacheEntry(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetXmin(), HeapTupleHeaderXminFrozen(), InvalidBuffer, InvalidOffsetNumber, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), MarkBufferDirty(), MaxHeapTupleSize, memcpy(), MemSet, PageAddItem, PageClearAllVisible(), PageGetHeapFreeSpace(), PageGetMaxOffsetNumber(), PageInit(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), SizeOfHeapHeader, SizeofHeapTupleHeader, HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_hoff, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, TransactionIdIsNormal, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_FROZEN_SET, XLH_INSERT_ALL_VISIBLE_CLEARED, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap_redo().

◆ heap_xlog_lock()

static void heap_xlog_lock ( XLogReaderState record)
static

Definition at line 1015 of file heapam_xlog.c.

1016{
1017 XLogRecPtr lsn = record->EndRecPtr;
1019 Buffer buffer;
1020 Page page;
1021 OffsetNumber offnum;
1022 ItemId lp;
1023 HeapTupleHeader htup;
1024
1025 /*
1026 * The visibility map may need to be fixed even if the heap page is
1027 * already up-to-date.
1028 */
1030 {
1031 RelFileLocator rlocator;
1032 Buffer vmbuffer = InvalidBuffer;
1033 BlockNumber block;
1034 Relation reln;
1035
1036 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1037 reln = CreateFakeRelcacheEntry(rlocator);
1038
1039 visibilitymap_pin(reln, block, &vmbuffer);
1041
1042 ReleaseBuffer(vmbuffer);
1044 }
1045
1046 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1047 {
1048 page = BufferGetPage(buffer);
1049
1050 offnum = xlrec->offnum;
1052 elog(PANIC, "offnum out of range");
1053 lp = PageGetItemId(page, offnum);
1054 if (!ItemIdIsNormal(lp))
1055 elog(PANIC, "invalid lp");
1056
1057 htup = (HeapTupleHeader) PageGetItem(page, lp);
1058
1059 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1061 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
1062 &htup->t_infomask2);
1063
1064 /*
1065 * Clear relevant update flags, but only if the modified infomask says
1066 * there's no update.
1067 */
1069 {
1071 /* Make sure there is no forward chain link in t_ctid */
1072 ItemPointerSet(&htup->t_ctid,
1073 BufferGetBlockNumber(buffer),
1074 offnum);
1075 }
1076 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1078 PageSetLSN(page, lsn);
1079 MarkBufferDirty(buffer);
1080 }
1081 if (BufferIsValid(buffer))
1082 UnlockReleaseBuffer(buffer);
1083}
#define XLH_LOCK_ALL_FROZEN_CLEARED
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
#define VISIBILITYMAP_ALL_FROZEN

References BLK_NEEDS_REDO, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_IS_LOCKED_ONLY(), HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetXmax(), InvalidBuffer, ItemIdIsNormal, ItemPointerSet(), MarkBufferDirty(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XLH_LOCK_ALL_FROZEN_CLEARED, XLogReadBufferForRedo(), XLogRecGetBlockTag(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_lock_updated()

static void heap_xlog_lock_updated ( XLogReaderState record)
static

Definition at line 1089 of file heapam_xlog.c.

1090{
1091 XLogRecPtr lsn = record->EndRecPtr;
1093 Buffer buffer;
1094 Page page;
1095 OffsetNumber offnum;
1096 ItemId lp;
1097 HeapTupleHeader htup;
1098
1100
1101 /*
1102 * The visibility map may need to be fixed even if the heap page is
1103 * already up-to-date.
1104 */
1106 {
1107 RelFileLocator rlocator;
1108 Buffer vmbuffer = InvalidBuffer;
1109 BlockNumber block;
1110 Relation reln;
1111
1112 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1113 reln = CreateFakeRelcacheEntry(rlocator);
1114
1115 visibilitymap_pin(reln, block, &vmbuffer);
1117
1118 ReleaseBuffer(vmbuffer);
1120 }
1121
1122 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1123 {
1124 page = BufferGetPage(buffer);
1125
1126 offnum = xlrec->offnum;
1128 elog(PANIC, "offnum out of range");
1129 lp = PageGetItemId(page, offnum);
1130 if (!ItemIdIsNormal(lp))
1131 elog(PANIC, "invalid lp");
1132
1133 htup = (HeapTupleHeader) PageGetItem(page, lp);
1134
1135 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1137 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
1138 &htup->t_infomask2);
1139 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1140
1141 PageSetLSN(page, lsn);
1142 MarkBufferDirty(buffer);
1143 }
1144 if (BufferIsValid(buffer))
1145 UnlockReleaseBuffer(buffer);
1146}

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, fb(), fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderSetXmax(), InvalidBuffer, ItemIdIsNormal, MarkBufferDirty(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XLH_LOCK_ALL_FROZEN_CLEARED, XLogReadBufferForRedo(), XLogRecGetBlockTag(), and XLogRecGetData.

Referenced by heap2_redo().

◆ heap_xlog_multi_insert()

static void heap_xlog_multi_insert ( XLogReaderState record)
static

Definition at line 492 of file heapam_xlog.c.

493{
494 XLogRecPtr lsn = record->EndRecPtr;
496 RelFileLocator rlocator;
497 BlockNumber blkno;
498 Buffer buffer;
499 Page page;
500 union
501 {
504 } tbuf;
505 HeapTupleHeader htup;
507 Size freespace = 0;
508 int i;
509 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
511 Buffer vmbuffer = InvalidBuffer;
512
513 /*
514 * Insertion doesn't overwrite MVCC data, so no conflict processing is
515 * required.
516 */
518
519 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
520
521 /* check that the mutually exclusive flags are not both set */
523 (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
524
525 /*
526 * The visibility map may need to be fixed even if the heap page is
527 * already up-to-date.
528 */
530 {
532
533 visibilitymap_pin(reln, blkno, &vmbuffer);
535 ReleaseBuffer(vmbuffer);
536 vmbuffer = InvalidBuffer;
538 }
539
540 if (isinit)
541 {
542 buffer = XLogInitBufferForRedo(record, 0);
543 page = BufferGetPage(buffer);
544 PageInit(page, BufferGetPageSize(buffer), 0);
546 }
547 else
548 action = XLogReadBufferForRedo(record, 0, &buffer);
549 if (action == BLK_NEEDS_REDO)
550 {
551 char *tupdata;
552 char *endptr;
553 Size len;
554
555 /* Tuples are stored as block data */
556 tupdata = XLogRecGetBlockData(record, 0, &len);
557 endptr = tupdata + len;
558
559 page = BufferGetPage(buffer);
560
561 for (i = 0; i < xlrec->ntuples; i++)
562 {
563 OffsetNumber offnum;
565
566 /*
567 * If we're reinitializing the page, the tuples are stored in
568 * order from FirstOffsetNumber. Otherwise there's an array of
569 * offsets in the WAL record, and the tuples come after that.
570 */
571 if (isinit)
572 offnum = FirstOffsetNumber + i;
573 else
574 offnum = xlrec->offsets[i];
575 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
576 elog(PANIC, "invalid max offset number");
577
579 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
580
581 newlen = xlhdr->datalen;
583 htup = &tbuf.hdr;
585 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
586 memcpy((char *) htup + SizeofHeapTupleHeader,
587 tupdata,
588 newlen);
589 tupdata += newlen;
590
592 htup->t_infomask2 = xlhdr->t_infomask2;
593 htup->t_infomask = xlhdr->t_infomask;
594 htup->t_hoff = xlhdr->t_hoff;
597 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
598 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
599
600 offnum = PageAddItem(page, htup, newlen, offnum, true, true);
601 if (offnum == InvalidOffsetNumber)
602 elog(PANIC, "failed to add tuple");
603 }
604 if (tupdata != endptr)
605 elog(PANIC, "total tuple length mismatch");
606
607 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
608
609 PageSetLSN(page, lsn);
610
613
614 /*
615 * XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible. If
616 * we are not setting the page frozen, then set the page's prunable
617 * hint so that we trigger on-access pruning later which may set the
618 * page all-visible in the VM.
619 */
620 if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
621 {
622 PageSetAllVisible(page);
623 PageClearPrunable(page);
624 }
625 else
626 PageSetPrunable(page, XLogRecGetXid(record));
627
628 MarkBufferDirty(buffer);
629 }
630 if (BufferIsValid(buffer))
631 UnlockReleaseBuffer(buffer);
632
633 buffer = InvalidBuffer;
634
635 /*
636 * Read and update the visibility map (VM) block.
637 *
638 * We must always redo VM changes, even if the corresponding heap page
639 * update was skipped due to the LSN interlock. Each VM block covers
640 * multiple heap pages, so later WAL records may update other bits in the
641 * same block. If this record includes an FPI (full-page image),
642 * subsequent WAL records may depend on it to guard against torn pages.
643 *
644 * Heap page changes are replayed first to preserve the invariant:
645 * PD_ALL_VISIBLE must be set on the heap page if the VM bit is set.
646 *
647 * Note that we released the heap page lock above. During normal
648 * operation, this would be unsafe — a concurrent modification could
649 * clear PD_ALL_VISIBLE while the VM bit remained set, violating the
650 * invariant.
651 *
652 * During recovery, however, no concurrent writers exist. Therefore,
653 * updating the VM without holding the heap page lock is safe enough. This
654 * same approach is taken when replaying XLOG_HEAP2_PRUNE* records (see
655 * heap_xlog_prune_freeze()).
656 */
657 if ((xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) &&
659 &vmbuffer) == BLK_NEEDS_REDO)
660 {
661 Page vmpage = BufferGetPage(vmbuffer);
662
663 /* initialize the page if it was read as zeros */
664 if (PageIsNew(vmpage))
666
667 visibilitymap_set(blkno,
668 vmbuffer,
671 rlocator);
672
673 Assert(BufferIsDirty(vmbuffer));
674 PageSetLSN(vmpage, lsn);
675 }
676
677 if (BufferIsValid(vmbuffer))
678 UnlockReleaseBuffer(vmbuffer);
679
680 /*
681 * If the page is running low on free space, update the FSM as well.
682 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
683 * better than that without knowing the fill-factor for the table.
684 *
685 * XXX: Don't do this if the page was restored from full page image. We
686 * don't bother to update the FSM in that case, it doesn't need to be
687 * totally accurate anyway.
688 */
689 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
690 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
691}
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3114
@ RBM_ZERO_ON_ERROR
Definition bufmgr.h:51
static bool PageIsNew(const PageData *page)
Definition bufpage.h:258
static void PageSetAllVisible(Page page)
Definition bufpage.h:459
#define PageClearPrunable(page)
Definition bufpage.h:485
#define SHORTALIGN(LEN)
Definition c.h:892
#define SizeOfMultiInsertTuple
int i
Definition isn.c:77
#define FirstOffsetNumber
Definition off.h:27
void visibilitymap_set(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define VISIBILITYMAP_ALL_VISIBLE
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition xlogutils.c:340

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsDirty(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, FirstOffsetNumber, FreeFakeRelcacheEntry(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetXmin(), i, InvalidBuffer, InvalidOffsetNumber, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), len, MarkBufferDirty(), MaxHeapTupleSize, memcpy(), MemSet, PageAddItem, PageClearAllVisible(), PageClearPrunable, PageGetHeapFreeSpace(), PageGetMaxOffsetNumber(), PageInit(), PageIsNew(), PageSetAllVisible(), PageSetLSN(), PageSetPrunable, PANIC, RBM_ZERO_ON_ERROR, ReleaseBuffer(), SHORTALIGN, SizeofHeapTupleHeader, SizeOfMultiInsertTuple, HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_hoff, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_pin(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_FROZEN_SET, XLH_INSERT_ALL_VISIBLE_CLEARED, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap2_redo().

◆ heap_xlog_prune_freeze()

static void heap_xlog_prune_freeze ( XLogReaderState record)
static

Definition at line 30 of file heapam_xlog.c.

31{
32 XLogRecPtr lsn = record->EndRecPtr;
33 char *maindataptr = XLogRecGetData(record);
35 Buffer buffer;
36 RelFileLocator rlocator;
37 BlockNumber blkno;
38 Buffer vmbuffer = InvalidBuffer;
39 uint8 vmflags = 0;
40 Size freespace = 0;
41 bool do_update_fsm = false;
42
43 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
46
47 /*
48 * We will take an ordinary exclusive lock or a cleanup lock depending on
49 * whether the XLHP_CLEANUP_LOCK flag is set. With an ordinary exclusive
50 * lock, we better not be doing anything that requires moving existing
51 * tuple data.
52 */
53 Assert((xlrec.flags & XLHP_CLEANUP_LOCK) != 0 ||
55
56 if (xlrec.flags & XLHP_VM_ALL_VISIBLE)
57 {
59 if (xlrec.flags & XLHP_VM_ALL_FROZEN)
61 }
62
63 /*
64 * After xl_heap_prune is the optional snapshot conflict horizon.
65 *
66 * In Hot Standby mode, we must ensure that there are no running queries
67 * which would conflict with the changes in this record. That means we
68 * can't replay this record if it removes tuples that are still visible to
69 * transactions on the standby, freeze tuples with xids that are still
70 * considered running on the standby, or set a page as all-visible in the
71 * VM if it isn't all-visible to all transactions on the standby.
72 */
73 if ((xlrec.flags & XLHP_HAS_CONFLICT_HORIZON) != 0)
74 {
76
77 /* memcpy() because snapshot_conflict_horizon is stored unaligned */
79 maindataptr += sizeof(TransactionId);
80
81 if (InHotStandby)
83 (xlrec.flags & XLHP_IS_CATALOG_REL) != 0,
84 rlocator);
85 }
86
87 /*
88 * If we have a full-page image of the heap block, restore it and we're
89 * done with the heap block.
90 */
92 (xlrec.flags & XLHP_CLEANUP_LOCK) != 0,
93 &buffer) == BLK_NEEDS_REDO)
94 {
95 Page page = BufferGetPage(buffer);
96 OffsetNumber *redirected;
97 OffsetNumber *nowdead;
98 OffsetNumber *nowunused;
99 int nredirected;
100 int ndead;
101 int nunused;
102 int nplans;
103 Size datalen;
104 xlhp_freeze_plan *plans;
106 char *dataptr = XLogRecGetBlockData(record, 0, &datalen);
107 bool do_prune;
108
110 &nplans, &plans, &frz_offsets,
111 &nredirected, &redirected,
112 &ndead, &nowdead,
113 &nunused, &nowunused);
114
115 do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
116
117 /* Ensure the record does something */
119
120 /*
121 * Update all line pointers per the record, and repair fragmentation
122 * if needed.
123 */
124 if (do_prune)
126 (xlrec.flags & XLHP_CLEANUP_LOCK) == 0,
127 redirected, nredirected,
128 nowdead, ndead,
129 nowunused, nunused);
130
131 /* Freeze tuples */
132 for (int p = 0; p < nplans; p++)
133 {
135
136 /*
137 * Convert freeze plan representation from WAL record into
138 * per-tuple format used by heap_execute_freeze_tuple
139 */
140 frz.xmax = plans[p].xmax;
141 frz.t_infomask2 = plans[p].t_infomask2;
142 frz.t_infomask = plans[p].t_infomask;
143 frz.frzflags = plans[p].frzflags;
144 frz.offset = InvalidOffsetNumber; /* unused, but be tidy */
145
146 for (int i = 0; i < plans[p].ntuples; i++)
147 {
148 OffsetNumber offset = *(frz_offsets++);
149 ItemId lp;
150 HeapTupleHeader tuple;
151
152 lp = PageGetItemId(page, offset);
153 tuple = (HeapTupleHeader) PageGetItem(page, lp);
155 }
156 }
157
158 /* There should be no more data */
159 Assert((char *) frz_offsets == dataptr + datalen);
160
161 /*
162 * The critical integrity requirement here is that we must never end
163 * up with the visibility map bit set and the page-level
164 * PD_ALL_VISIBLE bit unset. If that were to occur, a subsequent page
165 * modification would fail to clear the visibility map bit.
166 */
168 {
169 PageSetAllVisible(page);
170 PageClearPrunable(page);
171 }
172
173 MarkBufferDirty(buffer);
174
175 /*
176 * See log_heap_prune_and_freeze() for commentary on when we set the
177 * heap page LSN.
178 */
179 if (do_prune || nplans > 0 ||
181 PageSetLSN(page, lsn);
182
183 /*
184 * Note: we don't worry about updating the page's prunability hints.
185 * At worst this will cause an extra prune cycle to occur soon.
186 */
187 }
188
189 /*
190 * If we 1) released any space or line pointers or 2) set PD_ALL_VISIBLE
191 * or the VM, update the freespace map.
192 *
193 * Even when no actual space is freed (when only marking the page
194 * all-visible or frozen), we still update the FSM. Because the FSM is
195 * unlogged and maintained heuristically, it often becomes stale on
196 * standbys. If such a standby is later promoted and runs VACUUM, it will
197 * skip recalculating free space for pages that were marked
198 * all-visible/all-frozen. FreeSpaceMapVacuum() can then propagate overly
199 * optimistic free space values upward, causing future insertions to
200 * select pages that turn out to be unusable. In bulk, this can lead to
201 * long stalls.
202 *
203 * To prevent this, always update the FSM even when only marking a page
204 * all-visible/all-frozen.
205 *
206 * Do this regardless of whether a full-page image is logged, since FSM
207 * data is not part of the page itself.
208 */
209 if (BufferIsValid(buffer))
210 {
211 if ((xlrec.flags & (XLHP_HAS_REDIRECTIONS |
215 {
216 freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
217 do_update_fsm = true;
218 }
219
220 /*
221 * We want to avoid holding an exclusive lock on the heap buffer while
222 * doing IO (either of the FSM or the VM), so we'll release it now.
223 */
224 UnlockReleaseBuffer(buffer);
225 }
226
227 /*
228 * Now read and update the VM block.
229 *
230 * We must redo changes to the VM even if the heap page was skipped due to
231 * LSN interlock. See comment in heap_xlog_multi_insert() for more details
232 * on replaying changes to the VM.
233 */
237 false,
238 &vmbuffer) == BLK_NEEDS_REDO)
239 {
240 Page vmpage = BufferGetPage(vmbuffer);
241
242 /* initialize the page if it was read as zeros */
243 if (PageIsNew(vmpage))
245
246 visibilitymap_set(blkno, vmbuffer, vmflags, rlocator);
247
248 Assert(BufferIsDirty(vmbuffer));
249 PageSetLSN(vmpage, lsn);
250 }
251
252 if (BufferIsValid(vmbuffer))
253 UnlockReleaseBuffer(vmbuffer);
254
255 if (do_update_fsm)
256 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
257}
@ RBM_NORMAL
Definition bufmgr.h:46
uint32 TransactionId
Definition c.h:736
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition heapam.h:533
#define XLHP_HAS_CONFLICT_HORIZON
#define XLHP_VM_ALL_VISIBLE
#define SizeOfHeapPrune
#define XLHP_HAS_NOW_UNUSED_ITEMS
#define XLHP_VM_ALL_FROZEN
#define XLHP_HAS_REDIRECTIONS
#define XLHP_CLEANUP_LOCK
#define XLHP_HAS_DEAD_ITEMS
#define XLHP_IS_CATALOG_REL
void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint16 flags, int *nplans, xlhp_freeze_plan **plans, OffsetNumber **frz_offsets, int *nredirected, OffsetNumber **redirected, int *ndead, OffsetNumber **nowdead, int *nunused, OffsetNumber **nowunused)
Definition heapdesc.c:106
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition pruneheap.c:2065
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition standby.c:470
TransactionId xmax
Definition heapam.h:156
TransactionId xmax
#define XLogHintBitIsNeeded()
Definition xlog.h:123
#define InHotStandby
Definition xlogutils.h:60

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferIsDirty(), BufferIsValid(), XLogReaderState::EndRecPtr, fb(), xlhp_freeze_plan::frzflags, heap_execute_freeze_tuple(), heap_page_prune_execute(), heap_xlog_deserialize_prune_and_freeze(), i, InHotStandby, InvalidBuffer, InvalidOffsetNumber, MarkBufferDirty(), memcpy(), xlhp_freeze_plan::ntuples, PageClearPrunable, PageGetHeapFreeSpace(), PageGetItem(), PageGetItemId(), PageInit(), PageIsNew(), PageSetAllVisible(), PageSetLSN(), RBM_NORMAL, RBM_ZERO_ON_ERROR, ResolveRecoveryConflictWithSnapshot(), SizeOfHeapPrune, xlhp_freeze_plan::t_infomask, xlhp_freeze_plan::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set(), VISIBILITYMAP_VALID_BITS, XLHP_CLEANUP_LOCK, XLHP_HAS_CONFLICT_HORIZON, XLHP_HAS_DEAD_ITEMS, XLHP_HAS_NOW_UNUSED_ITEMS, XLHP_HAS_REDIRECTIONS, XLHP_IS_CATALOG_REL, XLHP_VM_ALL_FROZEN, XLHP_VM_ALL_VISIBLE, XLogHintBitIsNeeded, XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecordPageWithFreeSpace(), HeapTupleFreeze::xmax, and xlhp_freeze_plan::xmax.

Referenced by heap2_redo().

◆ heap_xlog_update()

static void heap_xlog_update ( XLogReaderState record,
bool  hot_update 
)
static

Definition at line 697 of file heapam_xlog.c.

698{
699 XLogRecPtr lsn = record->EndRecPtr;
701 RelFileLocator rlocator;
706 nbuffer;
707 Page opage,
708 npage;
709 OffsetNumber offnum;
710 ItemId lp;
712 HeapTupleHeader htup;
713 uint16 prefixlen = 0,
714 suffixlen = 0;
715 char *newp;
716 union
717 {
720 } tbuf;
723 Size freespace = 0;
726
727 /* initialize to keep the compiler quiet */
728 oldtup.t_data = NULL;
729 oldtup.t_len = 0;
730
731 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
732 if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
733 {
734 /* HOT updates are never done across pages */
736 }
737 else
738 oldblk = newblk;
739
740 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
741
742 /*
743 * The visibility map may need to be fixed even if the heap page is
744 * already up-to-date.
745 */
747 {
749 Buffer vmbuffer = InvalidBuffer;
750
751 visibilitymap_pin(reln, oldblk, &vmbuffer);
753 ReleaseBuffer(vmbuffer);
755 }
756
757 /*
758 * In normal operation, it is important to lock the two pages in
759 * page-number order, to avoid possible deadlocks against other update
760 * operations going the other way. However, during WAL replay there can
761 * be no other update happening, so we don't need to worry about that. But
762 * we *do* need to worry that we don't expose an inconsistent state to Hot
763 * Standby queries --- so the original page can't be unlocked before we've
764 * added the new tuple to the new page.
765 */
766
767 /* Deal with old tuple version */
768 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
769 &obuffer);
771 {
773 offnum = xlrec->old_offnum;
775 elog(PANIC, "offnum out of range");
776 lp = PageGetItemId(opage, offnum);
777 if (!ItemIdIsNormal(lp))
778 elog(PANIC, "invalid lp");
779
781
782 oldtup.t_data = htup;
783 oldtup.t_len = ItemIdGetLength(lp);
784
787 if (hot_update)
789 else
791 fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
792 &htup->t_infomask2);
793 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
795 /* Set forward chain link in t_ctid */
796 htup->t_ctid = newtid;
797
798 /* Mark the page as a candidate for pruning */
800
803
804 PageSetLSN(opage, lsn);
806 }
807
808 /*
809 * Read the page the new tuple goes into, if different from old.
810 */
811 if (oldblk == newblk)
812 {
815 }
816 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
817 {
818 nbuffer = XLogInitBufferForRedo(record, 0);
819 npage = BufferGetPage(nbuffer);
822 }
823 else
825
826 /*
827 * The visibility map may need to be fixed even if the heap page is
828 * already up-to-date.
829 */
831 {
833 Buffer vmbuffer = InvalidBuffer;
834
835 visibilitymap_pin(reln, newblk, &vmbuffer);
837 ReleaseBuffer(vmbuffer);
839 }
840
841 /* Deal with new tuple */
843 {
844 char *recdata;
845 char *recdata_end;
846 Size datalen;
847 Size tuplen;
848
849 recdata = XLogRecGetBlockData(record, 0, &datalen);
850 recdata_end = recdata + datalen;
851
852 npage = BufferGetPage(nbuffer);
853
854 offnum = xlrec->new_offnum;
855 if (PageGetMaxOffsetNumber(npage) + 1 < offnum)
856 elog(PANIC, "invalid max offset number");
857
859 {
860 Assert(newblk == oldblk);
861 memcpy(&prefixlen, recdata, sizeof(uint16));
862 recdata += sizeof(uint16);
863 }
865 {
866 Assert(newblk == oldblk);
867 memcpy(&suffixlen, recdata, sizeof(uint16));
868 recdata += sizeof(uint16);
869 }
870
873
874 tuplen = recdata_end - recdata;
875 Assert(tuplen <= MaxHeapTupleSize);
876
877 htup = &tbuf.hdr;
879
880 /*
881 * Reconstruct the new tuple using the prefix and/or suffix from the
882 * old tuple, and the data stored in the WAL record.
883 */
884 newp = (char *) htup + SizeofHeapTupleHeader;
885 if (prefixlen > 0)
886 {
887 int len;
888
889 /* copy bitmap [+ padding] [+ oid] from WAL record */
892 recdata += len;
893 newp += len;
894
895 /* copy prefix from old tuple */
896 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
897 newp += prefixlen;
898
899 /* copy new tuple data from WAL record */
900 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
902 recdata += len;
903 newp += len;
904 }
905 else
906 {
907 /*
908 * copy bitmap [+ padding] [+ oid] + data from record, all in one
909 * go
910 */
911 memcpy(newp, recdata, tuplen);
912 recdata += tuplen;
913 newp += tuplen;
914 }
916
917 /* copy suffix from old tuple */
918 if (suffixlen > 0)
919 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
920
922 htup->t_infomask2 = xlhdr.t_infomask2;
923 htup->t_infomask = xlhdr.t_infomask;
924 htup->t_hoff = xlhdr.t_hoff;
925
928 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
929 /* Make sure there is no forward chain link in t_ctid */
930 htup->t_ctid = newtid;
931
932 offnum = PageAddItem(npage, htup, newlen, offnum, true, true);
933 if (offnum == InvalidOffsetNumber)
934 elog(PANIC, "failed to add tuple");
935
937 PageClearAllVisible(npage);
938
939 /* needed to update FSM below */
940 freespace = PageGetHeapFreeSpace(npage);
941
942 PageSetLSN(npage, lsn);
943 /* See heap_insert() for why we set pd_prune_xid on insert */
944 PageSetPrunable(npage, XLogRecGetXid(record));
946 }
947
952
953 /*
954 * If the new page is running low on free space, update the FSM as well.
955 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
956 * better than that without knowing the fill-factor for the table.
957 *
958 * However, don't update the FSM on HOT updates, because after crash
959 * recovery, either the old or the new tuple will certainly be dead and
960 * prunable. After pruning, the page will have roughly as much free space
961 * as it did before the update, assuming the new tuple is about the same
962 * size as the old one.
963 *
964 * XXX: Don't do this if the page was restored from full page image. We
965 * don't bother to update the FSM in that case, it doesn't need to be
966 * totally accurate anyway.
967 */
968 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
969 XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
970}
uint16_t uint16
Definition c.h:623
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:87
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:85
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition heapam_xlog.h:91
static void HeapTupleHeaderSetHotUpdated(HeapTupleHeaderData *tup)
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)

References Assert, BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, fb(), FirstCommandId, fix_infomask_from_infobits(), FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetHotUpdated(), HeapTupleHeaderSetXmax(), HeapTupleHeaderSetXmin(), InvalidBuffer, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsNormal, ItemPointerSet(), len, MarkBufferDirty(), MaxHeapTupleSize, memcpy(), MemSet, PageAddItem, PageClearAllVisible(), PageGetHeapFreeSpace(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageInit(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), SizeOfHeapHeader, SizeofHeapTupleHeader, HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_hoff, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED, XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED, XLH_UPDATE_PREFIX_FROM_OLD, XLH_UPDATE_SUFFIX_FROM_OLD, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetBlockTagExtended(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap_redo().