77 bool all_visible_cleared,
bool new_all_visible_cleared);
85 bool *have_tuple_lock);
157 #define LOCKMODE_from_mxstatus(status) \
158 (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
165 #define LockTupleTuplock(rel, tup, mode) \
166 LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
167 #define UnlockTupleTuplock(rel, tup, mode) \
168 UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
169 #define ConditionalLockTupleTuplock(rel, tup, mode) \
170 ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
183 } IndexDeletePrefetchState;
187 #define BOTTOMUP_MAX_NBLOCKS 6
188 #define BOTTOMUP_TOLERANCE_NBLOCKS 3
216 #define TUPLOCK_from_mxstatus(status) \
217 (MultiXactStatusLock[(status)])
273 allow_strat = allow_sync =
false;
296 else if (keep_startblock)
359 Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
384 Assert(block < scan->rs_nblocks);
469 &loctup, buffer, snapshot);
607 *linesleft = *lineoff;
766 for (; linesleft > 0; linesleft--, lineoff += dir)
783 tuple, scan->rs_cbuf,
797 scan->rs_coffset = lineoff;
877 linesleft = scan->rs_ntuples;
883 for (; linesleft > 0; linesleft--, lineindex += dir)
888 lineoff = scan->rs_vistuples[lineindex];
902 scan->rs_cindex = lineindex;
992 if (parallel_scan != NULL)
1013 bool allow_strat,
bool allow_sync,
bool allow_pagemode)
1095 (
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1106 elog(
ERROR,
"unexpected heap_getnext call during logical decoding");
1463 bool *all_dead,
bool first_call)
1469 bool at_chain_start;
1476 *all_dead = first_call;
1480 at_chain_start = first_call;
1506 at_chain_start =
false;
1573 if (all_dead && *all_dead)
1591 at_chain_start =
false;
1831 bool all_visible_cleared =
false;
1879 all_visible_cleared =
true;
1930 if (all_visible_cleared)
2024 (
errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2025 errmsg(
"cannot insert tuples in a parallel worker")));
2042 if (relation->
rd_rel->relkind != RELKIND_RELATION &&
2043 relation->
rd_rel->relkind != RELKIND_MATVIEW)
2066 for (
int i = done;
i < ntuples;
i++)
2070 if (page_avail < tup_sz)
2075 page_avail -= tup_sz;
2107 bool starting_with_empty_page =
false;
2109 int npages_used = 0;
2120 for (
i = 0;
i < ntuples;
i++)
2157 while (ndone < ntuples)
2160 bool all_visible_cleared =
false;
2161 bool all_frozen_set =
false;
2176 if (ndone == 0 || !starting_with_empty_page)
2195 npages - npages_used);
2201 all_frozen_set =
true;
2216 if (needwal && need_cids)
2219 for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2221 HeapTuple heaptup = heaptuples[ndone + nthispage];
2232 if (needwal && need_cids)
2245 all_visible_cleared =
true;
2251 else if (all_frozen_set)
2268 char *scratchptr = scratch.
data;
2276 init = starting_with_empty_page;
2292 tupledata = scratchptr;
2295 Assert(!(all_visible_cleared && all_frozen_set));
2298 if (all_visible_cleared)
2309 for (
i = 0;
i < nthispage;
i++)
2331 scratchptr += datalen;
2333 totaldatalen = scratchptr - tupledata;
2334 Assert((scratchptr - scratch.
data) < BLCKSZ);
2336 if (need_tuple_data)
2344 if (ndone + nthispage == ntuples)
2357 if (need_tuple_data)
2434 for (
i = 0;
i < ntuples;
i++)
2439 for (
i = 0;
i < ntuples;
i++)
2440 slots[
i]->tts_tid = heaptuples[
i]->t_self;
2492 const uint16 interesting =
2495 if ((new_infomask & interesting) != (old_infomask & interesting))
2528 bool have_tuple_lock =
false;
2530 bool all_visible_cleared =
false;
2532 bool old_key_copied =
false;
2543 (
errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2544 errmsg(
"cannot delete tuples during a parallel operation")));
2590 (
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2591 errmsg(
"attempted to delete invisible tuple")));
2617 bool current_is_member =
false;
2628 if (!current_is_member)
2708 if (result !=
TM_Ok)
2726 if (result !=
TM_Ok)
2735 if (have_tuple_lock)
2775 &new_xmax, &new_infomask, &new_infomask2);
2790 all_visible_cleared =
true;
2833 if (all_visible_cleared)
2840 xlrec.
xmax = new_xmax;
2842 if (old_key_tuple != NULL)
2844 if (relation->
rd_rel->relreplident == REPLICA_IDENTITY_FULL)
2858 if (old_key_tuple != NULL)
2867 old_key_tuple->
t_len
2892 if (relation->
rd_rel->relkind != RELKIND_RELATION &&
2893 relation->
rd_rel->relkind != RELKIND_MATVIEW)
2914 if (have_tuple_lock)
2919 if (old_key_tuple != NULL && old_key_copied)
2947 elog(
ERROR,
"tuple already updated by self");
2955 elog(
ERROR,
"tuple concurrently updated");
2959 elog(
ERROR,
"tuple concurrently deleted");
2963 elog(
ERROR,
"unrecognized heap_delete status: %u", result);
2997 bool old_key_copied =
false;
3008 bool have_tuple_lock =
false;
3010 bool use_hot_update =
false;
3011 bool summarized_update =
false;
3013 bool all_visible_cleared =
false;
3014 bool all_visible_cleared_new =
false;
3015 bool checked_lockers;
3016 bool locker_remains;
3017 bool id_has_external =
false;
3020 uint16 infomask_old_tuple,
3021 infomask2_old_tuple,
3023 infomask2_new_tuple;
3038 (
errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3039 errmsg(
"cannot update tuples during a parallel operation")));
3064 interesting_attrs = NULL;
3110 newtup, &id_has_external);
3155 checked_lockers =
false;
3156 locker_remains =
false;
3166 (
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3167 errmsg(
"attempted to update invisible tuple")));
3173 bool can_continue =
false;
3218 bool current_is_member =
false;
3221 *lockmode, ¤t_is_member))
3229 if (!current_is_member)
3237 checked_lockers =
true;
3238 locker_remains = remain != 0;
3285 can_continue =
true;
3293 checked_lockers =
true;
3294 locker_remains =
true;
3295 can_continue =
true;
3304 checked_lockers =
true;
3305 locker_remains =
true;
3306 can_continue =
true;
3319 checked_lockers =
true;
3335 can_continue =
true;
3347 if (result !=
TM_Ok)
3365 if (result !=
TM_Ok)
3374 if (have_tuple_lock)
3415 xid, *lockmode,
true,
3416 &xmax_old_tuple, &infomask_old_tuple,
3417 &infomask2_old_tuple);
3428 (checked_lockers && !locker_remains))
3436 infomask2_new_tuple = 0;
3449 &infomask2_new_tuple);
3454 infomask2_new_tuple = 0;
3487 if (relation->
rd_rel->relkind != RELKIND_RELATION &&
3488 relation->
rd_rel->relkind != RELKIND_MATVIEW)
3504 if (need_toast || newtupsize > pagefree)
3507 uint16 infomask_lock_old_tuple,
3508 infomask2_lock_old_tuple;
3509 bool cleared_all_frozen =
false;
3532 xid, *lockmode,
false,
3533 &xmax_lock_old_tuple, &infomask_lock_old_tuple,
3534 &infomask2_lock_old_tuple);
3563 cleared_all_frozen =
true;
3576 xlrec.
xmax = xmax_lock_old_tuple;
3631 if (newtupsize > pagefree)
3636 &vmbuffer_new, &vmbuffer,
3648 if (newtupsize > pagefree ||
3696 if (newbuf == buffer)
3705 use_hot_update =
true;
3715 summarized_update =
true;
3789 all_visible_cleared =
true;
3796 all_visible_cleared_new =
true;
3802 if (newbuf != buffer)
3822 newbuf, &oldtup, heaptup,
3824 all_visible_cleared,
3825 all_visible_cleared_new);
3826 if (newbuf != buffer)
3835 if (newbuf != buffer)
3850 if (newbuf != buffer)
3861 if (have_tuple_lock)
3870 if (heaptup != newtup)
3884 if (summarized_update)
3890 *update_indexes =
TU_All;
3892 if (old_key_tuple != NULL && old_key_copied)
3911 bool isnull1,
bool isnull2)
3919 if (isnull1 != isnull2)
3944 Assert(attrnum <= tupdesc->natts);
3946 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4012 value1 =
heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4013 value2 =
heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4016 value2, isnull1, isnull2))
4026 if (attrnum < 0 || isnull1 ||
4036 *has_external =
true;
4061 &tmfd, &lockmode, update_indexes);
4066 elog(
ERROR,
"tuple already updated by self");
4074 elog(
ERROR,
"tuple concurrently updated");
4078 elog(
ERROR,
"tuple concurrently deleted");
4082 elog(
ERROR,
"unrecognized heap_update status: %u", result);
4103 is_update ?
"true" :
"false");
4142 bool follow_updates,
4156 bool first_time =
true;
4157 bool skip_tuple_lock =
false;
4158 bool have_tuple_lock =
false;
4159 bool cleared_all_frozen =
false;
4246 for (
i = 0;
i < nmembers;
i++)
4271 skip_tuple_lock =
true;
4320 require_sleep =
true;
4354 if (follow_updates && updated)
4385 require_sleep =
false;
4414 require_sleep =
false;
4440 require_sleep =
false;
4453 require_sleep =
false;
4479 require_sleep =
false;
4498 else if (require_sleep)
4510 if (!skip_tuple_lock &&
4530 elog(
ERROR,
"invalid lock mode in heap_lock_tuple");
4533 switch (wait_policy)
4541 status, infomask, relation,
4552 status, infomask, relation,
4555 (
errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4556 errmsg(
"could not obtain lock on row in relation \"%s\"",
4575 switch (wait_policy)
4593 (
errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4594 errmsg(
"could not obtain lock on row in relation \"%s\"",
4650 if (!require_sleep ||
4662 if (result !=
TM_Ok)
4724 &xid, &new_infomask, &new_infomask2);
4760 cleared_all_frozen =
true;
4819 if (have_tuple_lock)
4841 if (*have_tuple_lock)
4844 switch (wait_policy)
4858 (
errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4859 errmsg(
"could not obtain lock on row in relation \"%s\"",
4863 *have_tuple_lock =
true;
4891 uint16 *result_infomask2)
4914 new_xmax = add_to_xmax;
4924 new_xmax = add_to_xmax;
4928 new_xmax = add_to_xmax;
4932 new_xmax = add_to_xmax;
4936 new_xmax = add_to_xmax;
5060 elog(
WARNING,
"LOCK_ONLY found for Xid in progress %u", xmax);
5082 if (xmax == add_to_xmax)
5096 if (
mode < old_mode)
5107 add_to_xmax, new_status);
5147 *result_infomask = new_infomask;
5148 *result_infomask2 = new_infomask2;
5149 *result_xmax = new_xmax;
5273 bool cleared_all_frozen =
false;
5274 bool pinned_desired_page;
5312 pinned_desired_page =
true;
5315 pinned_desired_page =
false;
5395 for (
i = 0;
i < nmembers;
i++)
5428 if (result !=
TM_Ok)
5465 elog(
ERROR,
"invalid lock status in tuple");
5499 if (result !=
TM_Ok)
5509 &new_xmax, &new_infomask, &new_infomask2);
5514 cleared_all_frozen =
true;
5538 xlrec.
xmax = new_xmax;
5773 elog(
ERROR,
"attempted to kill a tuple inserted by another transaction");
5775 elog(
ERROR,
"attempted to kill a non-speculative tuple");
5798 prune_xid = relation->
rd_rel->relfrozenxid;
5907 (
errcode(ERRCODE_INVALID_TRANSACTION_STATE),
5908 errmsg(
"cannot update tuples during a parallel operation")));
5931 memcpy((
char *) htup + htup->
t_hoff,
5972 #define FRM_NOOP 0x0001
5973 #define FRM_INVALIDATE_XMAX 0x0002
5974 #define FRM_RETURN_IS_XID 0x0004
5975 #define FRM_RETURN_IS_MULTI 0x0008
5976 #define FRM_MARK_COMMITTED 0x0010
6037 bool update_committed;
6071 errmsg_internal(
"multixact %u from before multi freeze cutoff %u found to be still running",
6086 errmsg_internal(
"multixact %u contains update XID %u from before relfrozenxid %u",
6099 errmsg_internal(
"multixact %u contains committed update XID %u from before removable cutoff %u",
6147 need_replace =
false;
6149 for (
int i = 0;
i < nmembers;
i++)
6158 need_replace =
true;
6162 FreezePageRelfrozenXid = xid;
6194 has_lockers =
false;
6196 update_committed =
false;
6201 for (
int i = 0;
i < nmembers;
i++)
6220 errmsg_internal(
"multixact %u contains running locker XID %u from before removable cutoff %u",
6223 newmembers[nnewmembers++] = members[
i];
6264 update_committed =
true;
6283 errmsg_internal(
"multixact %u contains committed update XID %u from before removable cutoff %u",
6285 newmembers[nnewmembers++] = members[
i];
6294 if (nnewmembers == 0)
6309 Assert(nnewmembers == 1);
6311 if (update_committed)
6313 newxmax = update_xid;
6380 bool xmin_already_frozen =
false,
6381 xmax_already_frozen =
false;
6382 bool freeze_xmin =
false,
6383 replace_xvac =
false,
6384 replace_xmax =
false,
6385 freeze_xmax =
false;
6401 xmin_already_frozen =
true;
6494 frz->
xmax = newxmax;
6497 replace_xmax =
true;
6521 frz->
xmax = newxmax;
6522 replace_xmax =
true;
6564 xmax_already_frozen =
true;
6569 errmsg_internal(
"found raw xmax %u (infomask 0x%04x) not invalid and not multi",
6574 Assert(!xmin_already_frozen);
6593 Assert(!xmax_already_frozen && !freeze_xmax);
6600 Assert(!xmax_already_frozen && !replace_xmax);
6619 *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
6620 (freeze_xmax || xmax_already_frozen));
6623 xmax_already_frozen))
6636 return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
6693 for (
int i = 0;
i < ntuples;
i++)
6734 for (
int i = 0;
i < ntuples;
i++)
6881 for (
int i = 0;
i < ntuples;
i++)
6917 Assert(nplans > 0 && nplans <= ntuples);
6935 bool totally_frozen;
6946 pagefrz.freeze_required =
true;
6953 &pagefrz, &frz, &totally_frozen);
6981 bool has_update =
false;
6990 for (
i = 0;
i < nmembers;
i++)
6999 if (
mode > strongest)
7003 switch (members[
i].status)
7039 *new_infomask = bits;
7040 *new_infomask2 = bits2;
7073 for (
i = 0;
i < nmembers;
i++)
7081 update_xact = members[
i].
xid;
7082 #ifndef USE_ASSERT_CHECKING
7127 bool result =
false;
7139 for (
i = 0;
i < nmembers;
i++)
7144 if (result && (current_is_member == NULL || *current_is_member))
7150 memxid = members[
i].
xid;
7153 if (current_is_member != NULL)
7154 *current_is_member =
true;
7222 uint16 infomask,
bool nowait,
7240 for (
i = 0;
i < nmembers;
i++)
7397 bool freeze =
false;
7405 *NoFreezePageRelfrozenXid = xid;
7423 *NoFreezePageRelfrozenXid = xid;
7435 *NoFreezePageRelminMxid = multi;
7447 *NoFreezePageRelminMxid = multi;
7455 for (
int i = 0;
i < nmembers;
i++)
7457 xid = members[
i].
xid;
7460 *NoFreezePageRelfrozenXid = xid;
7475 *NoFreezePageRelfrozenXid = xid;
7508 *snapshotConflictHorizon = xvac;
7522 *snapshotConflictHorizon = xmax;
7538 index_delete_prefetch_buffer(
Relation rel,
7539 IndexDeletePrefetchState *prefetch_state,
7542 BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
7545 int ndeltids = prefetch_state->ndeltids;
7548 for (
i = prefetch_state->next_item;
7549 i < ndeltids && count < prefetch_count;
7567 prefetch_state->next_item =
i;
7568 prefetch_state->cur_hblkno = cur_hblkno;
7592 if (
unlikely(indexpagehoffnum > maxoff))
7594 (
errcode(ERRCODE_INDEX_CORRUPTED),
7595 errmsg_internal(
"heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
7604 (
errcode(ERRCODE_INDEX_CORRUPTED),
7605 errmsg_internal(
"heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
7620 (
errcode(ERRCODE_INDEX_CORRUPTED),
7621 errmsg_internal(
"heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
7653 IndexDeletePrefetchState prefetch_state;
7654 int prefetch_distance;
7657 int finalndeltids = 0,
7658 nblocksaccessed = 0;
7661 int nblocksfavorable = 0;
7664 actualfreespace = 0;
7665 bool bottomup_final_block =
false;
7685 prefetch_state.next_item = 0;
7686 prefetch_state.ndeltids = delstate->
ndeltids;
7687 prefetch_state.deltids = delstate->
deltids;
7705 Assert(nblocksfavorable >= 1);
7707 prefetch_distance =
Min(prefetch_distance, nblocksfavorable);
7711 index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
7754 if (bottomup_final_block)
7763 if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
7765 lastfreespace = actualfreespace;
7791 Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
7792 if (nblocksfavorable > 0)
7795 curtargetfreespace /= 2;
7814 index_delete_prefetch_buffer(rel, &prefetch_state, 1);
7840 &heapTuple, NULL,
true))
7851 if (actualfreespace >= curtargetfreespace)
7852 bottomup_final_block =
true;
7876 if (offnum > maxoff)
7913 &snapshotConflictHorizon);
7931 finalndeltids =
i + 1;
7943 delstate->
ndeltids = finalndeltids;
7945 return snapshotConflictHorizon;
7962 return (blk1 < blk2) ? -1 : 1;
7969 return (pos1 < pos2) ? -1 : 1;
8000 const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8004 "element size exceeds 8 bytes");
8006 for (
int g = 0; g <
lengthof(gaps); g++)
8008 for (
int hi = gaps[g],
i = low + hi;
i < ndeltids;
i++)
8015 deltids[
j] = deltids[
j - hi];
8088 int64 lastblock = -1;
8089 int nblocksfavorable = 0;
8091 Assert(nblockgroups >= 1);
8103 for (
int b = 0;
b < nblockgroups;
b++)
8109 if (lastblock != -1 &&
8119 Assert(nblocksfavorable >= 1);
8121 return nblocksfavorable;
8156 if (ntids1 > ntids2)
8158 if (ntids1 < ntids2)
8206 int nblockgroups = 0;
8208 int nblocksfavorable = 0;
8232 blockgroups[nblockgroups - 1].
ntids = 1;
8237 blockgroups[nblockgroups - 1].
ntids++;
8272 for (
int b = 0;
b < nblockgroups;
b++)
8294 for (
int b = 0;
b < nblockgroups;
b++)
8299 memcpy(reordereddeltids + ncopied, firstdtid,
8301 ncopied += group->
ntids;
8305 memcpy(delstate->
deltids, reordereddeltids,
8309 pfree(reordereddeltids);
8312 return nblocksfavorable;
8341 xlrec.
flags = vmflags;
8367 bool all_visible_cleared,
bool new_all_visible_cleared)
8412 if (oldbuf == newbuf && !need_tuple_data &&
8421 for (prefixlen = 0; prefixlen <
Min(oldlen, newlen); prefixlen++)
8423 if (newp[prefixlen] != oldp[prefixlen])
8435 for (suffixlen = 0; suffixlen <
Min(oldlen, newlen) - prefixlen; suffixlen++)
8437 if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
8446 if (all_visible_cleared)
8448 if (new_all_visible_cleared)
8454 if (need_tuple_data)
8459 if (reln->
rd_rel->relreplident == REPLICA_IDENTITY_FULL)
8489 if (need_tuple_data)
8493 if (oldbuf != newbuf)
8501 if (prefixlen > 0 || suffixlen > 0)
8503 if (prefixlen > 0 && suffixlen > 0)
8505 prefix_suffix[0] = prefixlen;
8506 prefix_suffix[1] = suffixlen;
8509 else if (prefixlen > 0)
8557 if (need_tuple_data && old_key_tuple)
8671 char replident = relation->
rd_rel->relreplident;
8682 if (replident == REPLICA_IDENTITY_NOTHING)
8685 if (replident == REPLICA_IDENTITY_FULL)
8723 for (
int i = 0;
i < desc->
natts;
i++)
8802 ndead = xlrec->
ndead;
8803 end = (
OffsetNumber *) ((
char *) redirected + datalen);
8804 nowdead = redirected + (nredirected * 2);
8805 nowunused = nowdead + ndead;
8806 nunused = (end - nowunused);
8811 redirected, nredirected,
8813 nowunused, nunused);
9087 for (
int p = 0; p < xlrec->
nplans; p++)
9212 htup->
t_ctid = target_tid;
9281 elog(
PANIC,
"invalid max offset number");
9297 htup->t_infomask2 = xlhdr.t_infomask2;
9298 htup->t_infomask = xlhdr.t_infomask;
9299 htup->t_hoff = xlhdr.t_hoff;
9302 htup->t_ctid = target_tid;
9405 endptr = tupdata +
len;
9424 elog(
PANIC,
"invalid max offset number");
9442 htup->t_hoff = xlhdr->
t_hoff;
9452 if (tupdata != endptr)
9453 elog(
PANIC,
"total tuple length mismatch");
9601 if (oldblk == newblk)
9604 newaction = oldaction;
9640 recdata_end = recdata + datalen;
9646 elog(
PANIC,
"invalid max offset number");
9650 Assert(newblk == oldblk);
9651 memcpy(&prefixlen, recdata,
sizeof(
uint16));
9652 recdata +=
sizeof(
uint16);
9656 Assert(newblk == oldblk);
9657 memcpy(&suffixlen, recdata,
sizeof(
uint16));
9658 recdata +=
sizeof(
uint16);
9664 tuplen = recdata_end - recdata;
9681 memcpy(newp, recdata,
len);
9691 memcpy(newp, recdata,
len);
9701 memcpy(newp, recdata, tuplen);
9705 Assert(recdata == recdata_end);
9709 memcpy(newp, (
char *) oldtup.
t_data + oldtup.
t_len - suffixlen, suffixlen);
9712 htup->t_infomask2 = xlhdr.t_infomask2;
9713 htup->t_infomask = xlhdr.t_infomask;
9714 htup->t_hoff = xlhdr.t_hoff;
9720 htup->t_ctid = newtid;
9755 if (newaction ==
BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
9955 if (oldlen != newlen)
9958 memcpy((
char *) htup + htup->
t_hoff, newtup, newlen);
10009 elog(
PANIC,
"heap_redo: unknown op code %u", info);
10049 elog(
PANIC,
"heap2_redo: unknown op code %u", info);
10096 page_htup->t_choice.t_heap.t_field3.t_cid =
MASK_MARKER;
10175 switch (htsvResult)
10209 elog(
ERROR,
"unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
int bms_next_member(const Bitmapset *a, int prevbit)
void bms_free(Bitmapset *a)
bool bms_is_member(int x, const Bitmapset *a)
Bitmapset * bms_add_member(Bitmapset *a, int x)
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
#define InvalidBlockNumber
static bool BlockNumberIsValid(BlockNumber blockNumber)
static Datum values[MAXATTR]
void mask_page_lsn_and_checksum(Page page)
void mask_unused_space(Page page)
void mask_page_hint_bits(Page page)
BlockNumber BufferGetBlockNumber(Buffer buffer)
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
void ReleaseBuffer(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBuffer(Buffer buffer, int mode)
int maintenance_io_concurrency
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
#define BUFFER_LOCK_UNLOCK
#define BUFFER_LOCK_SHARE
#define RelationGetNumberOfBlocks(reln)
static Page BufferGetPage(Buffer buffer)
static Size BufferGetPageSize(Buffer buffer)
#define BUFFER_LOCK_EXCLUSIVE
static bool BufferIsValid(Buffer bufnum)
Size PageGetHeapFreeSpace(Page page)
void PageTruncateLinePointerArray(Page page)
void PageInit(Page page, Size pageSize, Size specialSize)
Size PageGetFreeSpace(Page page)
static Item PageGetItem(Page page, ItemId itemId)
static void PageClearAllVisible(Page page)
#define SizeOfPageHeaderData
static void PageSetAllVisible(Page page)
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
static bool PageIsNew(Page page)
static bool PageIsAllVisible(Page page)
static void PageSetFull(Page page)
static void PageSetLSN(Page page, XLogRecPtr lsn)
static OffsetNumber PageGetMaxOffsetNumber(Page page)
#define PageSetPrunable(page, xid)
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
TransactionId MultiXactId
#define MemSet(start, val, len)
#define StaticAssertDecl(condition, errmessage)
bool IsToastRelation(Relation relation)
bool IsCatalogRelation(Relation relation)
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup)
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
bool datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
static void PGresult * res
int errmsg_internal(const char *fmt,...)
int errdetail_internal(const char *fmt,...)
int errcode(int sqlerrcode)
int errmsg(const char *fmt,...)
#define ereport(elevel,...)
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
HeapTuple ExecFetchSlotHeapTuple(TupleTableSlot *slot, bool materialize, bool *shouldFree)
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
void FreeAccessStrategy(BufferAccessStrategy strategy)
void XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, Size spaceAvail)
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
void heap_finish_speculative(Relation relation, ItemPointer tid)
void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
static void heap_xlog_prune(XLogReaderState *record)
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags)
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
void heap_redo(XLogReaderState *record)
struct IndexDeleteCounts IndexDeleteCounts
static int heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, xl_heap_freeze_plan *plans_out, OffsetNumber *offsets_out)
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf)
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
#define BOTTOMUP_TOLERANCE_NBLOCKS
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
void heap_mask(char *pagedata, BlockNumber blkno)
static int heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
void simple_heap_delete(Relation relation, ItemPointer tid)
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
bool heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
#define LOCKMODE_from_mxstatus(status)
void heap_endscan(TableScanDesc sscan)
static void heap_xlog_insert(XLogReaderState *record)
TransactionId HeapTupleGetUpdateXid(HeapTupleHeader tuple)
static void index_delete_check_htid(TM_IndexDeleteOp *delstate, Page page, OffsetNumber maxoff, ItemPointer htid, TM_IndexStatus *istatus)
#define FRM_RETURN_IS_XID
#define TUPLOCK_from_mxstatus(status)
void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
static int index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
TM_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
#define ConditionalLockTupleTuplock(rel, tup, mode)
static void fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
static TransactionId FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, const struct VacuumCutoffs *cutoffs, uint16 *flags, HeapPageFreeze *pagefrz)
static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, bool *copy)
void heap_inplace_update(Relation relation, HeapTuple tuple)
#define LockTupleTuplock(rel, tup, mode)
bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid)
static void heap_xlog_update(XLogReaderState *record, bool hot_update)
static void heap_xlog_vacuum(XLogReaderState *record)
bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId FreezeLimit, TransactionId MultiXactCutoff)
static BlockNumber heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
#define BOTTOMUP_MAX_NBLOCKS
static int heap_log_freeze_cmp(const void *arg1, const void *arg2)
void ReleaseBulkInsertStatePin(BulkInsertState bistate)
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
#define FRM_MARK_COMMITTED
HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction)
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
static void heap_log_freeze_new_plan(xl_heap_freeze_plan *plan, HeapTupleFreeze *frz)
void simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup, TU_UpdateIndexes *update_indexes)
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
bool heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
static void heap_xlog_delete(XLogReaderState *record)
void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid)
static void heap_xlog_lock_updated(XLogReaderState *record)
static void heap_xlog_lock(XLogReaderState *record)
static void heap_xlog_multi_insert(XLogReaderState *record)
void heap_abort_speculative(Relation relation, ItemPointer tid)
static void heap_xlog_visible(XLogReaderState *record)
TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
static Page heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples)
#define FRM_RETURN_IS_MULTI
static void heap_xlog_inplace(XLogReaderState *record)
#define FRM_INVALIDATE_XMAX
static const struct @12 tupleLockExtraInfo[MaxLockTupleMode+1]
static void heap_xlog_confirm(XLogReaderState *record)
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
static bool heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2, bool isnull1, bool isnull2)
static void index_delete_sort(TM_IndexDeleteOp *delstate)
static Bitmapset * HeapDetermineColumnsInfo(Relation relation, Bitmapset *interesting_cols, Bitmapset *external_cols, HeapTuple oldtup, HeapTuple newtup, bool *has_external)
static const int MultiXactStatusLock[MaxMultiXactStatus+1]
void simple_heap_insert(Relation relation, HeapTuple tup)
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
void heap2_redo(XLogReaderState *record)
#define UnlockTupleTuplock(rel, tup, mode)
static bool heap_log_freeze_eq(xl_heap_freeze_plan *plan, HeapTupleFreeze *frz)
static TM_Result test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
static BlockNumber heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
TransactionId heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
void heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
static int bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
static int bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid)
void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
void heapgetpage(TableScanDesc sscan, BlockNumber block)
static Page heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff)
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
BulkInsertState GetBulkInsertState(void)
void FreeBulkInsertState(BulkInsertState bistate)
static TM_Result heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
static void heap_xlog_freeze_page(XLogReaderState *record)
#define HEAP_INSERT_SPECULATIVE
#define HEAP_FREEZE_CHECK_XMAX_ABORTED
struct HeapScanDescData * HeapScanDesc
@ HEAPTUPLE_RECENTLY_DEAD
@ HEAPTUPLE_INSERT_IN_PROGRESS
@ HEAPTUPLE_DELETE_IN_PROGRESS
#define HEAP_INSERT_FROZEN
#define HEAP_FREEZE_CHECK_XMIN_COMMITTED
#define HEAP_INSERT_NO_LOGICAL
struct BulkInsertStateData * BulkInsertState
const TableAmRoutine * GetHeapamTableAmRoutine(void)
void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer, uint16 infomask, TransactionId xid)
bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer)
bool HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest)
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define XLH_INSERT_ON_TOAST_RELATION
#define SizeOfHeapMultiInsert
#define XLOG_HEAP2_MULTI_INSERT
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
#define SizeOfHeapVisible
#define XLOG_HEAP_HOT_UPDATE
#define XLOG_HEAP2_VACUUM
#define XLH_INSERT_IS_SPECULATIVE
#define XLOG_HEAP2_REWRITE
#define XLH_LOCK_ALL_FROZEN_CLEARED
#define XLH_DELETE_CONTAINS_OLD_KEY
#define SizeOfHeapInplace
#define XLOG_HEAP_TRUNCATE
#define XLH_UPDATE_CONTAINS_NEW_TUPLE
#define XLH_INSERT_LAST_IN_MULTI
#define SizeOfHeapFreezePage
#define XLH_INSERT_ALL_FROZEN_SET
#define XLHL_XMAX_KEYSHR_LOCK
#define XLH_DELETE_ALL_VISIBLE_CLEARED
#define XLH_UPDATE_CONTAINS_OLD_TUPLE
#define SizeOfHeapLockUpdated
#define XLHL_XMAX_IS_MULTI
struct xl_heap_freeze_plan xl_heap_freeze_plan
#define XLH_INSERT_ALL_VISIBLE_CLEARED
#define XLH_DELETE_IS_PARTITION_MOVE
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
#define XLHL_XMAX_LOCK_ONLY
#define XLOG_HEAP_INPLACE
#define XLOG_HEAP2_LOCK_UPDATED
#define XLH_UPDATE_SUFFIX_FROM_OLD
#define XLOG_HEAP2_FREEZE_PAGE
#define XLH_UPDATE_PREFIX_FROM_OLD
#define SizeOfMultiInsertTuple
#define XLHL_XMAX_EXCL_LOCK
#define XLOG_HEAP2_NEW_CID
#define XLH_DELETE_CONTAINS_OLD_TUPLE
#define XLH_DELETE_IS_SUPER
#define XLH_UPDATE_CONTAINS_OLD_KEY
#define XLHL_KEYS_UPDATED
#define XLOG_HEAP2_VISIBLE
#define XLH_INSERT_CONTAINS_NEW_TUPLE
#define XLOG_HEAP_INIT_PAGE
#define SizeOfHeapConfirm
#define XLOG_HEAP_CONFIRM
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
HeapTuple heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, int options)
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
#define TOAST_TUPLE_THRESHOLD
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
void heap_freetuple(HeapTuple htup)
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other, int num_pages)
HeapTupleHeaderData * HeapTupleHeader
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
#define HeapTupleHeaderSetXminFrozen(tup)
#define HEAP_XMAX_SHR_LOCK
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
#define HeapTupleHeaderGetNatts(tup)
#define SizeofHeapTupleHeader
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
#define HEAP_KEYS_UPDATED
#define HeapTupleHeaderIsHeapOnly(tup)
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
#define HeapTupleSetHotUpdated(tuple)
#define HeapTupleHeaderGetXvac(tup)
#define HeapTupleHeaderSetXmin(tup, xid)
#define HeapTupleHeaderSetXmax(tup, xid)
#define HEAP_XMAX_LOCK_ONLY
#define HeapTupleHeaderGetXmin(tup)
#define HeapTupleHasExternal(tuple)
#define HeapTupleHeaderClearHotUpdated(tup)
#define HeapTupleHeaderSetCmin(tup, cid)
#define HeapTupleHeaderSetHotUpdated(tup)
#define HeapTupleHeaderSetXvac(tup, xid)
#define HEAP_XMAX_IS_MULTI
#define HEAP_XMAX_COMMITTED
#define HeapTupleIsHeapOnly(tuple)
#define HeapTupleHeaderGetRawXmin(tup)
#define HeapTupleHeaderXminFrozen(tup)
#define HEAP_XMAX_EXCL_LOCK
#define HEAP_XMAX_INVALID
#define HeapTupleHeaderXminCommitted(tup)
#define MaxHeapAttributeNumber
#define HeapTupleHeaderSetMovedPartitions(tup)
#define HeapTupleIsHotUpdated(tuple)
#define HeapTupleHeaderGetRawXmax(tup)
#define MaxHeapTuplesPerPage
#define HeapTupleSetHeapOnly(tuple)
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
#define HeapTupleClearHeapOnly(tuple)
#define HEAP_XMAX_KEYSHR_LOCK
#define HeapTupleHeaderGetUpdateXid(tup)
#define HeapTupleClearHotUpdated(tuple)
#define HeapTupleHeaderGetRawCommandId(tup)
#define HEAP_LOCKED_UPGRADED(infomask)
#define HeapTupleHeaderIsSpeculative(tup)
#define HeapTupleHeaderIsHotUpdated(tup)
#define HeapTupleHeaderXminInvalid(tup)
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
#define IsParallelWorker()
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
if(TABLE==NULL||TABLE_index==NULL)
#define ItemIdGetLength(itemId)
#define ItemIdIsNormal(itemId)
#define ItemIdGetOffset(itemId)
struct ItemIdData ItemIdData
#define ItemIdGetRedirect(itemId)
#define ItemIdIsDead(itemId)
#define ItemIdIsUsed(itemId)
#define ItemIdSetUnused(itemId)
#define ItemIdIsRedirected(itemId)
#define ItemIdHasStorage(itemId)
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
static void ItemPointerSetInvalid(ItemPointerData *pointer)
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
static BlockNumber ItemPointerGetBlockNumberNoCheck(const ItemPointerData *pointer)
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Assert(fmt[strlen(fmt) - 1] !='\n')
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
bool ConditionalXactLockTableWait(TransactionId xid)
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
#define AccessExclusiveLock
@ LockTupleNoKeyExclusive
void pfree(void *pointer)
#define IsBootstrapProcessingMode()
#define START_CRIT_SECTION()
#define CHECK_FOR_INTERRUPTS()
#define END_CRIT_SECTION()
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2)
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
void MultiXactIdSetOldestMember(void)
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool isLockOnly)
#define MultiXactIdIsValid(multi)
@ MultiXactStatusForShare
@ MultiXactStatusForNoKeyUpdate
@ MultiXactStatusNoKeyUpdate
@ MultiXactStatusForUpdate
@ MultiXactStatusForKeyShare
#define ISUPDATE_from_mxstatus(status)
#define InvalidMultiXactId
#define MaxMultiXactStatus
#define InvalidOffsetNumber
#define OffsetNumberIsValid(offsetNumber)
#define OffsetNumberNext(offsetNumber)
#define FirstOffsetNumber
#define OffsetNumberPrev(offsetNumber)
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
FormData_pg_attribute * Form_pg_attribute
#define ERRCODE_DATA_CORRUPTED
static uint32 pg_nextpower2_32(uint32 num)
static PgChecksumMode mode
static const struct exclude_list_item skip[]
#define pgstat_count_heap_getnext(rel)
#define pgstat_count_heap_scan(rel)
void pgstat_count_heap_update(Relation rel, bool hot, bool newpage)
void pgstat_count_heap_delete(Relation rel)
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
#define qsort(a, b, c, d)
static Oid DatumGetObjectId(Datum X)
static Pointer DatumGetPointer(Datum X)
void CheckForSerializableConflictOut(Relation relation, TransactionId xid, Snapshot snapshot)
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
void PredicateLockRelation(Relation relation, Snapshot snapshot)
bool CheckForSerializableConflictOutNeeded(Relation relation, Snapshot snapshot)
GlobalVisState * GlobalVisTestFor(Relation rel)
bool TransactionIdIsInProgress(TransactionId xid)
void heap_page_prune_opt(Relation relation, Buffer buffer)
void heap_page_prune_execute(Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
#define RelationGetRelid(relation)
#define RelationIsLogicallyLogged(relation)
#define RelationGetTargetPageFreeSpace(relation, defaultff)
#define RelationGetDescr(relation)
#define RelationGetNumberOfAttributes(relation)
#define RelationGetRelationName(relation)
#define RelationIsAccessibleInLogicalDecoding(relation)
#define RelationNeedsWAL(relation)
#define RelationUsesLocalBuffers(relation)
#define HEAP_DEFAULT_FILLFACTOR
void RelationDecrementReferenceCount(Relation rel)
void RelationIncrementReferenceCount(Relation rel)
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
@ INDEX_ATTR_BITMAP_HOT_BLOCKING
@ INDEX_ATTR_BITMAP_SUMMARIZED
@ INDEX_ATTR_BITMAP_IDENTITY_KEY
struct ParallelBlockTableScanDescData * ParallelBlockTableScanDesc
void heap_xlog_logical_rewrite(XLogReaderState *r)
#define ScanDirectionIsForward(direction)
#define ScanDirectionIsBackward(direction)
void UnregisterSnapshot(Snapshot snapshot)
TransactionId TransactionXmin
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
#define IsMVCCSnapshot(snapshot)
int get_tablespace_maintenance_io_concurrency(Oid spcid)
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
BufferAccessStrategy strategy
uint32 already_extended_by
MultiXactId NoFreezePageRelminMxid
TransactionId FreezePageRelfrozenXid
MultiXactId FreezePageRelminMxid
TransactionId NoFreezePageRelfrozenXid
BufferAccessStrategy rs_strategy
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
BlockNumber rs_startblock
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]
TableScanDescData rs_base
const struct TableAmRoutine * rd_tableam
RelFileLocator rd_locator
ItemPointerData rs_mintid
ItemPointerData rs_maxtid
struct ScanKeyData * rs_key
struct SnapshotData * rs_snapshot
struct ParallelTableScanDescData * rs_parallel
TransactionId FreezeLimit
TransactionId relfrozenxid
MultiXactId MultiXactCutoff
TransactionId snapshotConflictHorizon
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
ItemPointerData target_tid
RelFileLocator target_locator
TransactionId snapshotConflictHorizon
TransactionId snapshotConflictHorizon
TransactionId SubTransGetTopmostTransaction(TransactionId xid)
void ss_report_location(Relation rel, BlockNumber location)
BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks)
#define FirstLowInvalidHeapAttributeNumber
#define TableOidAttributeNumber
void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
BlockNumber table_block_parallelscan_nextpage(Relation rel, ParallelBlockTableScanWorker pbscanwork, ParallelBlockTableScanDesc pbscan)
bool synchronize_seqscans
bool TransactionIdDidCommit(TransactionId transactionId)
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
bool TransactionIdDidAbort(TransactionId transactionId)
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2)
#define FrozenTransactionId
#define InvalidTransactionId
#define TransactionIdEquals(id1, id2)
#define TransactionIdIsValid(xid)
#define TransactionIdIsNormal(xid)
#define TupleDescAttr(tupdesc, i)
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
static bool HeapKeyTest(HeapTuple tuple, TupleDesc tupdesc, int nkeys, ScanKey keys)
#define VARATT_IS_EXTERNAL(PTR)
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_XLOG_VALID_BITS
#define VISIBILITYMAP_XLOG_CATALOG_REL
#define VISIBILITYMAP_ALL_VISIBLE
TransactionId GetTopTransactionId(void)
TransactionId CheckXidAlive
TransactionId GetTopTransactionIdIfAny(void)
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
bool IsInParallelMode(void)
TransactionId GetCurrentTransactionId(void)
CommandId GetCurrentCommandId(bool used)
#define XLOG_INCLUDE_ORIGIN
#define XLogHintBitIsNeeded()
#define InvalidXLogRecPtr
void XLogRegisterData(char *data, uint32 len)
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
bool XLogCheckBufferNeedsBackup(Buffer buffer)
void XLogSetRecordFlags(uint8 flags)
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
void XLogBeginInsert(void)
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
#define XLogRecGetInfo(decoder)
#define XLogRecGetData(decoder)
#define XLogRecGetXid(decoder)
void FreeFakeRelcacheEntry(Relation fakerel)
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)