PostgreSQL Source Code git master
predicate.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * predicate.c
4 * POSTGRES predicate locking
5 * to support full serializable transaction isolation
6 *
7 *
8 * The approach taken is to implement Serializable Snapshot Isolation (SSI)
9 * as initially described in this paper:
10 *
11 * Michael J. Cahill, Uwe Röhm, and Alan D. Fekete. 2008.
12 * Serializable isolation for snapshot databases.
13 * In SIGMOD '08: Proceedings of the 2008 ACM SIGMOD
14 * international conference on Management of data,
15 * pages 729-738, New York, NY, USA. ACM.
16 * http://doi.acm.org/10.1145/1376616.1376690
17 *
18 * and further elaborated in Cahill's doctoral thesis:
19 *
20 * Michael James Cahill. 2009.
21 * Serializable Isolation for Snapshot Databases.
22 * Sydney Digital Theses.
23 * University of Sydney, School of Information Technologies.
24 * http://hdl.handle.net/2123/5353
25 *
26 *
27 * Predicate locks for Serializable Snapshot Isolation (SSI) are SIREAD
28 * locks, which are so different from normal locks that a distinct set of
29 * structures is required to handle them. They are needed to detect
30 * rw-conflicts when the read happens before the write. (When the write
31 * occurs first, the reading transaction can check for a conflict by
32 * examining the MVCC data.)
33 *
34 * (1) Besides tuples actually read, they must cover ranges of tuples
35 * which would have been read based on the predicate. This will
36 * require modelling the predicates through locks against database
37 * objects such as pages, index ranges, or entire tables.
38 *
39 * (2) They must be kept in RAM for quick access. Because of this, it
40 * isn't possible to always maintain tuple-level granularity -- when
41 * the space allocated to store these approaches exhaustion, a
42 * request for a lock may need to scan for situations where a single
43 * transaction holds many fine-grained locks which can be coalesced
44 * into a single coarser-grained lock.
45 *
46 * (3) They never block anything; they are more like flags than locks
47 * in that regard; although they refer to database objects and are
48 * used to identify rw-conflicts with normal write locks.
49 *
50 * (4) While they are associated with a transaction, they must survive
51 * a successful COMMIT of that transaction, and remain until all
52 * overlapping transactions complete. This even means that they
53 * must survive termination of the transaction's process. If a
54 * top level transaction is rolled back, however, it is immediately
55 * flagged so that it can be ignored, and its SIREAD locks can be
56 * released any time after that.
57 *
58 * (5) The only transactions which create SIREAD locks or check for
59 * conflicts with them are serializable transactions.
60 *
61 * (6) When a write lock for a top level transaction is found to cover
62 * an existing SIREAD lock for the same transaction, the SIREAD lock
63 * can be deleted.
64 *
65 * (7) A write from a serializable transaction must ensure that an xact
66 * record exists for the transaction, with the same lifespan (until
67 * all concurrent transaction complete or the transaction is rolled
68 * back) so that rw-dependencies to that transaction can be
69 * detected.
70 *
71 * We use an optimization for read-only transactions. Under certain
72 * circumstances, a read-only transaction's snapshot can be shown to
73 * never have conflicts with other transactions. This is referred to
74 * as a "safe" snapshot (and one known not to be is "unsafe").
75 * However, it can't be determined whether a snapshot is safe until
76 * all concurrent read/write transactions complete.
77 *
78 * Once a read-only transaction is known to have a safe snapshot, it
79 * can release its predicate locks and exempt itself from further
80 * predicate lock tracking. READ ONLY DEFERRABLE transactions run only
81 * on safe snapshots, waiting as necessary for one to be available.
82 *
83 *
84 * Lightweight locks to manage access to the predicate locking shared
85 * memory objects must be taken in this order, and should be released in
86 * reverse order:
87 *
88 * SerializableFinishedListLock
89 * - Protects the list of transactions which have completed but which
90 * may yet matter because they overlap still-active transactions.
91 *
92 * SerializablePredicateListLock
93 * - Protects the linked list of locks held by a transaction. Note
94 * that the locks themselves are also covered by the partition
95 * locks of their respective lock targets; this lock only affects
96 * the linked list connecting the locks related to a transaction.
97 * - All transactions share this single lock (with no partitioning).
98 * - There is never a need for a process other than the one running
99 * an active transaction to walk the list of locks held by that
100 * transaction, except parallel query workers sharing the leader's
101 * transaction. In the parallel case, an extra per-sxact lock is
102 * taken; see below.
103 * - It is relatively infrequent that another process needs to
104 * modify the list for a transaction, but it does happen for such
105 * things as index page splits for pages with predicate locks and
106 * freeing of predicate locked pages by a vacuum process. When
107 * removing a lock in such cases, the lock itself contains the
108 * pointers needed to remove it from the list. When adding a
109 * lock in such cases, the lock can be added using the anchor in
110 * the transaction structure. Neither requires walking the list.
111 * - Cleaning up the list for a terminated transaction is sometimes
112 * not done on a retail basis, in which case no lock is required.
113 * - Due to the above, a process accessing its active transaction's
114 * list always uses a shared lock, regardless of whether it is
115 * walking or maintaining the list. This improves concurrency
116 * for the common access patterns.
117 * - A process which needs to alter the list of a transaction other
118 * than its own active transaction must acquire an exclusive
119 * lock.
120 *
121 * SERIALIZABLEXACT's member 'perXactPredicateListLock'
122 * - Protects the linked list of predicate locks held by a transaction.
123 * Only needed for parallel mode, where multiple backends share the
124 * same SERIALIZABLEXACT object. Not needed if
125 * SerializablePredicateListLock is held exclusively.
126 *
127 * PredicateLockHashPartitionLock(hashcode)
128 * - The same lock protects a target, all locks on that target, and
129 * the linked list of locks on the target.
130 * - When more than one is needed, acquire in ascending address order.
131 * - When all are needed (rare), acquire in ascending index order with
132 * PredicateLockHashPartitionLockByIndex(index).
133 *
134 * SerializableXactHashLock
135 * - Protects both PredXact and SerializableXidHash.
136 *
137 * SerialControlLock
138 * - Protects SerialControlData members
139 *
140 * SLRU per-bank locks
141 * - Protects SerialSlruCtl
142 *
143 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
144 * Portions Copyright (c) 1994, Regents of the University of California
145 *
146 *
147 * IDENTIFICATION
148 * src/backend/storage/lmgr/predicate.c
149 *
150 *-------------------------------------------------------------------------
151 */
152/*
153 * INTERFACE ROUTINES
154 *
155 * housekeeping for setting up shared memory predicate lock structures
156 * PredicateLockShmemInit(void)
157 * PredicateLockShmemSize(void)
158 *
159 * predicate lock reporting
160 * GetPredicateLockStatusData(void)
161 * PageIsPredicateLocked(Relation relation, BlockNumber blkno)
162 *
163 * predicate lock maintenance
164 * GetSerializableTransactionSnapshot(Snapshot snapshot)
165 * SetSerializableTransactionSnapshot(Snapshot snapshot,
166 * VirtualTransactionId *sourcevxid)
167 * RegisterPredicateLockingXid(void)
168 * PredicateLockRelation(Relation relation, Snapshot snapshot)
169 * PredicateLockPage(Relation relation, BlockNumber blkno,
170 * Snapshot snapshot)
171 * PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot,
172 * TransactionId tuple_xid)
173 * PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
174 * BlockNumber newblkno)
175 * PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
176 * BlockNumber newblkno)
177 * TransferPredicateLocksToHeapRelation(Relation relation)
178 * ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
179 *
180 * conflict detection (may also trigger rollback)
181 * CheckForSerializableConflictOut(Relation relation, TransactionId xid,
182 * Snapshot snapshot)
183 * CheckForSerializableConflictIn(Relation relation, ItemPointer tid,
184 * BlockNumber blkno)
185 * CheckTableForSerializableConflictIn(Relation relation)
186 *
187 * final rollback checking
188 * PreCommit_CheckForSerializationFailure(void)
189 *
190 * two-phase commit support
191 * AtPrepare_PredicateLocks(void);
192 * PostPrepare_PredicateLocks(TransactionId xid);
193 * PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
194 * predicatelock_twophase_recover(TransactionId xid, uint16 info,
195 * void *recdata, uint32 len);
196 */
197
198#include "postgres.h"
199
200#include "access/parallel.h"
201#include "access/slru.h"
202#include "access/transam.h"
203#include "access/twophase.h"
204#include "access/twophase_rmgr.h"
205#include "access/xact.h"
206#include "access/xlog.h"
207#include "miscadmin.h"
208#include "pgstat.h"
209#include "port/pg_lfind.h"
210#include "storage/predicate.h"
212#include "storage/proc.h"
213#include "storage/procarray.h"
214#include "utils/guc_hooks.h"
215#include "utils/rel.h"
216#include "utils/snapmgr.h"
217
218/* Uncomment the next line to test the graceful degradation code. */
219/* #define TEST_SUMMARIZE_SERIAL */
220
221/*
222 * Test the most selective fields first, for performance.
223 *
224 * a is covered by b if all of the following hold:
225 * 1) a.database = b.database
226 * 2) a.relation = b.relation
227 * 3) b.offset is invalid (b is page-granularity or higher)
228 * 4) either of the following:
229 * 4a) a.offset is valid (a is tuple-granularity) and a.page = b.page
230 * or 4b) a.offset is invalid and b.page is invalid (a is
231 * page-granularity and b is relation-granularity
232 */
233#define TargetTagIsCoveredBy(covered_target, covering_target) \
234 ((GET_PREDICATELOCKTARGETTAG_RELATION(covered_target) == /* (2) */ \
235 GET_PREDICATELOCKTARGETTAG_RELATION(covering_target)) \
236 && (GET_PREDICATELOCKTARGETTAG_OFFSET(covering_target) == \
237 InvalidOffsetNumber) /* (3) */ \
238 && (((GET_PREDICATELOCKTARGETTAG_OFFSET(covered_target) != \
239 InvalidOffsetNumber) /* (4a) */ \
240 && (GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) == \
241 GET_PREDICATELOCKTARGETTAG_PAGE(covered_target))) \
242 || ((GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) == \
243 InvalidBlockNumber) /* (4b) */ \
244 && (GET_PREDICATELOCKTARGETTAG_PAGE(covered_target) \
245 != InvalidBlockNumber))) \
246 && (GET_PREDICATELOCKTARGETTAG_DB(covered_target) == /* (1) */ \
247 GET_PREDICATELOCKTARGETTAG_DB(covering_target)))
248
249/*
250 * The predicate locking target and lock shared hash tables are partitioned to
251 * reduce contention. To determine which partition a given target belongs to,
252 * compute the tag's hash code with PredicateLockTargetTagHashCode(), then
253 * apply one of these macros.
254 * NB: NUM_PREDICATELOCK_PARTITIONS must be a power of 2!
255 */
256#define PredicateLockHashPartition(hashcode) \
257 ((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
258#define PredicateLockHashPartitionLock(hashcode) \
259 (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + \
260 PredicateLockHashPartition(hashcode)].lock)
261#define PredicateLockHashPartitionLockByIndex(i) \
262 (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
263
264#define NPREDICATELOCKTARGETENTS() \
265 mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
266
267#define SxactIsOnFinishedList(sxact) (!dlist_node_is_detached(&(sxact)->finishedLink))
268
269/*
270 * Note that a sxact is marked "prepared" once it has passed
271 * PreCommit_CheckForSerializationFailure, even if it isn't using
272 * 2PC. This is the point at which it can no longer be aborted.
273 *
274 * The PREPARED flag remains set after commit, so SxactIsCommitted
275 * implies SxactIsPrepared.
276 */
277#define SxactIsCommitted(sxact) (((sxact)->flags & SXACT_FLAG_COMMITTED) != 0)
278#define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
279#define SxactIsRolledBack(sxact) (((sxact)->flags & SXACT_FLAG_ROLLED_BACK) != 0)
280#define SxactIsDoomed(sxact) (((sxact)->flags & SXACT_FLAG_DOOMED) != 0)
281#define SxactIsReadOnly(sxact) (((sxact)->flags & SXACT_FLAG_READ_ONLY) != 0)
282#define SxactHasSummaryConflictIn(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_IN) != 0)
283#define SxactHasSummaryConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_OUT) != 0)
284/*
285 * The following macro actually means that the specified transaction has a
286 * conflict out *to a transaction which committed ahead of it*. It's hard
287 * to get that into a name of a reasonable length.
288 */
289#define SxactHasConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_CONFLICT_OUT) != 0)
290#define SxactIsDeferrableWaiting(sxact) (((sxact)->flags & SXACT_FLAG_DEFERRABLE_WAITING) != 0)
291#define SxactIsROSafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_SAFE) != 0)
292#define SxactIsROUnsafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_UNSAFE) != 0)
293#define SxactIsPartiallyReleased(sxact) (((sxact)->flags & SXACT_FLAG_PARTIALLY_RELEASED) != 0)
294
295/*
296 * Compute the hash code associated with a PREDICATELOCKTARGETTAG.
297 *
298 * To avoid unnecessary recomputations of the hash code, we try to do this
299 * just once per function, and then pass it around as needed. Aside from
300 * passing the hashcode to hash_search_with_hash_value(), we can extract
301 * the lock partition number from the hashcode.
302 */
303#define PredicateLockTargetTagHashCode(predicatelocktargettag) \
304 get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
305
306/*
307 * Given a predicate lock tag, and the hash for its target,
308 * compute the lock hash.
309 *
310 * To make the hash code also depend on the transaction, we xor the sxid
311 * struct's address into the hash code, left-shifted so that the
312 * partition-number bits don't change. Since this is only a hash, we
313 * don't care if we lose high-order bits of the address; use an
314 * intermediate variable to suppress cast-pointer-to-int warnings.
315 */
316#define PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash) \
317 ((targethash) ^ ((uint32) PointerGetDatum((predicatelocktag)->myXact)) \
318 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
319
320
321/*
322 * The SLRU buffer area through which we access the old xids.
323 */
325
326#define SerialSlruCtl (&SerialSlruCtlData)
327
328#define SERIAL_PAGESIZE BLCKSZ
329#define SERIAL_ENTRYSIZE sizeof(SerCommitSeqNo)
330#define SERIAL_ENTRIESPERPAGE (SERIAL_PAGESIZE / SERIAL_ENTRYSIZE)
331
332/*
333 * Set maximum pages based on the number needed to track all transactions.
334 */
335#define SERIAL_MAX_PAGE (MaxTransactionId / SERIAL_ENTRIESPERPAGE)
336
337#define SerialNextPage(page) (((page) >= SERIAL_MAX_PAGE) ? 0 : (page) + 1)
338
339#define SerialValue(slotno, xid) (*((SerCommitSeqNo *) \
340 (SerialSlruCtl->shared->page_buffer[slotno] + \
341 ((((uint32) (xid)) % SERIAL_ENTRIESPERPAGE) * SERIAL_ENTRYSIZE))))
342
343#define SerialPage(xid) (((uint32) (xid)) / SERIAL_ENTRIESPERPAGE)
344
345typedef struct SerialControlData
346{
347 int64 headPage; /* newest initialized page */
348 TransactionId headXid; /* newest valid Xid in the SLRU */
349 TransactionId tailXid; /* oldest xmin we might be interested in */
351
353
355
356/*
357 * When the oldest committed transaction on the "finished" list is moved to
358 * SLRU, its predicate locks will be moved to this "dummy" transaction,
359 * collapsing duplicate targets. When a duplicate is found, the later
360 * commitSeqNo is used.
361 */
363
364
365/*
366 * These configuration variables are used to set the predicate lock table size
367 * and to control promotion of predicate locks to coarser granularity in an
368 * attempt to degrade performance (mostly as false positive serialization
369 * failure) gracefully in the face of memory pressure.
370 */
371int max_predicate_locks_per_xact; /* in guc_tables.c */
372int max_predicate_locks_per_relation; /* in guc_tables.c */
373int max_predicate_locks_per_page; /* in guc_tables.c */
374
375/*
376 * This provides a list of objects in order to track transactions
377 * participating in predicate locking. Entries in the list are fixed size,
378 * and reside in shared memory. The memory address of an entry must remain
379 * fixed during its lifetime. The list will be protected from concurrent
380 * update externally; no provision is made in this code to manage that. The
381 * number of entries in the list, and the size allowed for each entry is
382 * fixed upon creation.
383 */
385
386/*
387 * This provides a pool of RWConflict data elements to use in conflict lists
388 * between transactions.
389 */
391
392/*
393 * The predicate locking hash tables are in shared memory.
394 * Each backend keeps pointers to them.
395 */
400
401/*
402 * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
403 * this entry, you can ensure that there's enough scratch space available for
404 * inserting one entry in the hash table. This is an otherwise-invalid tag.
405 */
406static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0};
409
410/*
411 * The local hash table used to determine when to combine multiple fine-
412 * grained locks into a single courser-grained lock.
413 */
415
416/*
417 * Keep a pointer to the currently-running serializable transaction (if any)
418 * for quick reference. Also, remember if we have written anything that could
419 * cause a rw-conflict.
420 */
422static bool MyXactDidWrite = false;
423
424/*
425 * The SXACT_FLAG_RO_UNSAFE optimization might lead us to release
426 * MySerializableXact early. If that happens in a parallel query, the leader
427 * needs to defer the destruction of the SERIALIZABLEXACT until end of
428 * transaction, because the workers still have a reference to it. In that
429 * case, the leader stores it here.
430 */
432
433/* local functions */
434
435static SERIALIZABLEXACT *CreatePredXact(void);
436static void ReleasePredXact(SERIALIZABLEXACT *sxact);
437
438static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer);
439static void SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
440static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact);
441static void ReleaseRWConflict(RWConflict conflict);
442static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact);
443
444static bool SerialPagePrecedesLogically(int64 page1, int64 page2);
445static void SerialInit(void);
446static void SerialAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo);
449
450static uint32 predicatelock_hash(const void *key, Size keysize);
451static void SummarizeOldestCommittedSxact(void);
452static Snapshot GetSafeSnapshot(Snapshot origSnapshot);
454 VirtualTransactionId *sourcevxid,
455 int sourcepid);
456static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
458 PREDICATELOCKTARGETTAG *parent);
459static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
460static void RemoveScratchTarget(bool lockheld);
461static void RestoreScratchTarget(bool lockheld);
463 uint32 targettaghash);
464static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
467static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag);
468static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
469 uint32 targettaghash,
470 SERIALIZABLEXACT *sxact);
471static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash);
473 PREDICATELOCKTARGETTAG newtargettag,
474 bool removeOld);
475static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
476static void DropAllPredicateLocksFromTable(Relation relation,
477 bool transfer);
478static void SetNewSxactGlobalXmin(void);
479static void ClearOldPredicateLocks(void);
480static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
481 bool summarize);
482static bool XidIsConcurrent(TransactionId xid);
484static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
486 SERIALIZABLEXACT *writer);
487static void CreateLocalPredicateLockHash(void);
488static void ReleasePredicateLocksLocal(void);
489
490
491/*------------------------------------------------------------------------*/
492
493/*
494 * Does this relation participate in predicate locking? Temporary and system
495 * relations are exempt.
496 */
497static inline bool
499{
500 return !(relation->rd_id < FirstUnpinnedObjectId ||
501 RelationUsesLocalBuffers(relation));
502}
503
504/*
505 * When a public interface method is called for a read, this is the test to
506 * see if we should do a quick return.
507 *
508 * Note: this function has side-effects! If this transaction has been flagged
509 * as RO-safe since the last call, we release all predicate locks and reset
510 * MySerializableXact. That makes subsequent calls to return quickly.
511 *
512 * This is marked as 'inline' to eliminate the function call overhead in the
513 * common case that serialization is not needed.
514 */
515static inline bool
517{
518 /* Nothing to do if this is not a serializable transaction */
520 return false;
521
522 /*
523 * Don't acquire locks or conflict when scanning with a special snapshot.
524 * This excludes things like CLUSTER and REINDEX. They use the wholesale
525 * functions TransferPredicateLocksToHeapRelation() and
526 * CheckTableForSerializableConflictIn() to participate in serialization,
527 * but the scans involved don't need serialization.
528 */
529 if (!IsMVCCSnapshot(snapshot))
530 return false;
531
532 /*
533 * Check if we have just become "RO-safe". If we have, immediately release
534 * all locks as they're not needed anymore. This also resets
535 * MySerializableXact, so that subsequent calls to this function can exit
536 * quickly.
537 *
538 * A transaction is flagged as RO_SAFE if all concurrent R/W transactions
539 * commit without having conflicts out to an earlier snapshot, thus
540 * ensuring that no conflicts are possible for this transaction.
541 */
543 {
544 ReleasePredicateLocks(false, true);
545 return false;
546 }
547
548 /* Check if the relation doesn't participate in predicate locking */
550 return false;
551
552 return true; /* no excuse to skip predicate locking */
553}
554
555/*
556 * Like SerializationNeededForRead(), but called on writes.
557 * The logic is the same, but there is no snapshot and we can't be RO-safe.
558 */
559static inline bool
561{
562 /* Nothing to do if this is not a serializable transaction */
564 return false;
565
566 /* Check if the relation doesn't participate in predicate locking */
568 return false;
569
570 return true; /* no excuse to skip predicate locking */
571}
572
573
574/*------------------------------------------------------------------------*/
575
576/*
577 * These functions are a simple implementation of a list for this specific
578 * type of struct. If there is ever a generalized shared memory list, we
579 * should probably switch to that.
580 */
581static SERIALIZABLEXACT *
583{
584 SERIALIZABLEXACT *sxact;
585
587 return NULL;
588
589 sxact = dlist_container(SERIALIZABLEXACT, xactLink,
592 return sxact;
593}
594
595static void
597{
598 Assert(ShmemAddrIsValid(sxact));
599
600 dlist_delete(&sxact->xactLink);
602}
603
604/*------------------------------------------------------------------------*/
605
606/*
607 * These functions manage primitive access to the RWConflict pool and lists.
608 */
609static bool
611{
612 dlist_iter iter;
613
614 Assert(reader != writer);
615
616 /* Check the ends of the purported conflict first. */
617 if (SxactIsDoomed(reader)
618 || SxactIsDoomed(writer)
619 || dlist_is_empty(&reader->outConflicts)
620 || dlist_is_empty(&writer->inConflicts))
621 return false;
622
623 /*
624 * A conflict is possible; walk the list to find out.
625 *
626 * The unconstify is needed as we have no const version of
627 * dlist_foreach().
628 */
629 dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts)
630 {
631 RWConflict conflict =
632 dlist_container(RWConflictData, outLink, iter.cur);
633
634 if (conflict->sxactIn == writer)
635 return true;
636 }
637
638 /* No conflict found. */
639 return false;
640}
641
642static void
644{
645 RWConflict conflict;
646
647 Assert(reader != writer);
648 Assert(!RWConflictExists(reader, writer));
649
652 (errcode(ERRCODE_OUT_OF_MEMORY),
653 errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
654 errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
655
657 dlist_delete(&conflict->outLink);
658
659 conflict->sxactOut = reader;
660 conflict->sxactIn = writer;
661 dlist_push_tail(&reader->outConflicts, &conflict->outLink);
662 dlist_push_tail(&writer->inConflicts, &conflict->inLink);
663}
664
665static void
667 SERIALIZABLEXACT *activeXact)
668{
669 RWConflict conflict;
670
671 Assert(roXact != activeXact);
672 Assert(SxactIsReadOnly(roXact));
673 Assert(!SxactIsReadOnly(activeXact));
674
677 (errcode(ERRCODE_OUT_OF_MEMORY),
678 errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
679 errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
680
682 dlist_delete(&conflict->outLink);
683
684 conflict->sxactOut = activeXact;
685 conflict->sxactIn = roXact;
686 dlist_push_tail(&activeXact->possibleUnsafeConflicts, &conflict->outLink);
687 dlist_push_tail(&roXact->possibleUnsafeConflicts, &conflict->inLink);
688}
689
690static void
692{
693 dlist_delete(&conflict->inLink);
694 dlist_delete(&conflict->outLink);
696}
697
698static void
700{
702
703 Assert(SxactIsReadOnly(sxact));
704 Assert(!SxactIsROSafe(sxact));
705
706 sxact->flags |= SXACT_FLAG_RO_UNSAFE;
707
708 /*
709 * We know this isn't a safe snapshot, so we can stop looking for other
710 * potential conflicts.
711 */
713 {
714 RWConflict conflict =
715 dlist_container(RWConflictData, inLink, iter.cur);
716
717 Assert(!SxactIsReadOnly(conflict->sxactOut));
718 Assert(sxact == conflict->sxactIn);
719
720 ReleaseRWConflict(conflict);
721 }
722}
723
724/*------------------------------------------------------------------------*/
725
726/*
727 * Decide whether a Serial page number is "older" for truncation purposes.
728 * Analogous to CLOGPagePrecedes().
729 */
730static bool
732{
733 TransactionId xid1;
734 TransactionId xid2;
735
736 xid1 = ((TransactionId) page1) * SERIAL_ENTRIESPERPAGE;
737 xid1 += FirstNormalTransactionId + 1;
738 xid2 = ((TransactionId) page2) * SERIAL_ENTRIESPERPAGE;
739 xid2 += FirstNormalTransactionId + 1;
740
741 return (TransactionIdPrecedes(xid1, xid2) &&
743}
744
745#ifdef USE_ASSERT_CHECKING
746static void
747SerialPagePrecedesLogicallyUnitTests(void)
748{
749 int per_page = SERIAL_ENTRIESPERPAGE,
750 offset = per_page / 2;
751 int64 newestPage,
752 oldestPage,
753 headPage,
754 targetPage;
755 TransactionId newestXact,
756 oldestXact;
757
758 /* GetNewTransactionId() has assigned the last XID it can safely use. */
759 newestPage = 2 * SLRU_PAGES_PER_SEGMENT - 1; /* nothing special */
760 newestXact = newestPage * per_page + offset;
761 Assert(newestXact / per_page == newestPage);
762 oldestXact = newestXact + 1;
763 oldestXact -= 1U << 31;
764 oldestPage = oldestXact / per_page;
765
766 /*
767 * In this scenario, the SLRU headPage pertains to the last ~1000 XIDs
768 * assigned. oldestXact finishes, ~2B XIDs having elapsed since it
769 * started. Further transactions cause us to summarize oldestXact to
770 * tailPage. Function must return false so SerialAdd() doesn't zero
771 * tailPage (which may contain entries for other old, recently-finished
772 * XIDs) and half the SLRU. Reaching this requires burning ~2B XIDs in
773 * single-user mode, a negligible possibility.
774 */
775 headPage = newestPage;
776 targetPage = oldestPage;
778
779 /*
780 * In this scenario, the SLRU headPage pertains to oldestXact. We're
781 * summarizing an XID near newestXact. (Assume few other XIDs used
782 * SERIALIZABLE, hence the minimal headPage advancement. Assume
783 * oldestXact was long-running and only recently reached the SLRU.)
784 * Function must return true to make SerialAdd() create targetPage.
785 *
786 * Today's implementation mishandles this case, but it doesn't matter
787 * enough to fix. Verify that the defect affects just one page by
788 * asserting correct treatment of its prior page. Reaching this case
789 * requires burning ~2B XIDs in single-user mode, a negligible
790 * possibility. Moreover, if it does happen, the consequence would be
791 * mild, namely a new transaction failing in SimpleLruReadPage().
792 */
793 headPage = oldestPage;
794 targetPage = newestPage;
796#if 0
798#endif
799}
800#endif
801
802/*
803 * Initialize for the tracking of old serializable committed xids.
804 */
805static void
807{
808 bool found;
809
810 /*
811 * Set up SLRU management of the pg_serial data.
812 */
814 SimpleLruInit(SerialSlruCtl, "serializable",
815 serializable_buffers, 0, "pg_serial",
817 SYNC_HANDLER_NONE, false);
818#ifdef USE_ASSERT_CHECKING
819 SerialPagePrecedesLogicallyUnitTests();
820#endif
822
823 /*
824 * Create or attach to the SerialControl structure.
825 */
827 ShmemInitStruct("SerialControlData", sizeof(SerialControlData), &found);
828
829 Assert(found == IsUnderPostmaster);
830 if (!found)
831 {
832 /*
833 * Set control information to reflect empty SLRU.
834 */
835 LWLockAcquire(SerialControlLock, LW_EXCLUSIVE);
839 LWLockRelease(SerialControlLock);
840 }
841}
842
843/*
844 * GUC check_hook for serializable_buffers
845 */
846bool
848{
849 return check_slru_buffers("serializable_buffers", newval);
850}
851
852/*
853 * Record a committed read write serializable xid and the minimum
854 * commitSeqNo of any transactions to which this xid had a rw-conflict out.
855 * An invalid commitSeqNo means that there were no conflicts out from xid.
856 */
857static void
858SerialAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
859{
861 int64 targetPage;
862 int slotno;
863 int64 firstZeroPage;
864 bool isNewPage;
865 LWLock *lock;
866
868
869 targetPage = SerialPage(xid);
870 lock = SimpleLruGetBankLock(SerialSlruCtl, targetPage);
871
872 /*
873 * In this routine, we must hold both SerialControlLock and the SLRU bank
874 * lock simultaneously while making the SLRU data catch up with the new
875 * state that we determine.
876 */
877 LWLockAcquire(SerialControlLock, LW_EXCLUSIVE);
878
879 /*
880 * If 'xid' is older than the global xmin (== tailXid), there's no need to
881 * store it, after all. This can happen if the oldest transaction holding
882 * back the global xmin just finished, making 'xid' uninteresting, but
883 * ClearOldPredicateLocks() has not yet run.
884 */
887 {
888 LWLockRelease(SerialControlLock);
889 return;
890 }
891
892 /*
893 * If the SLRU is currently unused, zero out the whole active region from
894 * tailXid to headXid before taking it into use. Otherwise zero out only
895 * any new pages that enter the tailXid-headXid range as we advance
896 * headXid.
897 */
898 if (serialControl->headPage < 0)
899 {
900 firstZeroPage = SerialPage(tailXid);
901 isNewPage = true;
902 }
903 else
904 {
905 firstZeroPage = SerialNextPage(serialControl->headPage);
907 targetPage);
908 }
909
912 serialControl->headXid = xid;
913 if (isNewPage)
914 serialControl->headPage = targetPage;
915
916 if (isNewPage)
917 {
918 /* Initialize intervening pages; might involve trading locks */
919 for (;;)
920 {
921 lock = SimpleLruGetBankLock(SerialSlruCtl, firstZeroPage);
923 slotno = SimpleLruZeroPage(SerialSlruCtl, firstZeroPage);
924 if (firstZeroPage == targetPage)
925 break;
926 firstZeroPage = SerialNextPage(firstZeroPage);
927 LWLockRelease(lock);
928 }
929 }
930 else
931 {
933 slotno = SimpleLruReadPage(SerialSlruCtl, targetPage, true, xid);
934 }
935
936 SerialValue(slotno, xid) = minConflictCommitSeqNo;
937 SerialSlruCtl->shared->page_dirty[slotno] = true;
938
939 LWLockRelease(lock);
940 LWLockRelease(SerialControlLock);
941}
942
943/*
944 * Get the minimum commitSeqNo for any conflict out for the given xid. For
945 * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
946 * will be returned.
947 */
948static SerCommitSeqNo
950{
954 int slotno;
955
957
958 LWLockAcquire(SerialControlLock, LW_SHARED);
961 LWLockRelease(SerialControlLock);
962
964 return 0;
965
967
970 return 0;
971
972 /*
973 * The following function must be called without holding SLRU bank lock,
974 * but will return with that lock held, which must then be released.
975 */
977 SerialPage(xid), xid);
978 val = SerialValue(slotno, xid);
980 return val;
981}
982
983/*
984 * Call this whenever there is a new xmin for active serializable
985 * transactions. We don't need to keep information on transactions which
986 * precede that. InvalidTransactionId means none active, so everything in
987 * the SLRU can be discarded.
988 */
989static void
991{
992 LWLockAcquire(SerialControlLock, LW_EXCLUSIVE);
993
994 /*
995 * When no sxacts are active, nothing overlaps, set the xid values to
996 * invalid to show that there are no valid entries. Don't clear headPage,
997 * though. A new xmin might still land on that page, and we don't want to
998 * repeatedly zero out the same page.
999 */
1000 if (!TransactionIdIsValid(xid))
1001 {
1004 LWLockRelease(SerialControlLock);
1005 return;
1006 }
1007
1008 /*
1009 * When we're recovering prepared transactions, the global xmin might move
1010 * backwards depending on the order they're recovered. Normally that's not
1011 * OK, but during recovery no serializable transactions will commit, so
1012 * the SLRU is empty and we can get away with it.
1013 */
1014 if (RecoveryInProgress())
1015 {
1019 {
1020 serialControl->tailXid = xid;
1021 }
1022 LWLockRelease(SerialControlLock);
1023 return;
1024 }
1025
1028
1029 serialControl->tailXid = xid;
1030
1031 LWLockRelease(SerialControlLock);
1032}
1033
1034/*
1035 * Perform a checkpoint --- either during shutdown, or on-the-fly
1036 *
1037 * We don't have any data that needs to survive a restart, but this is a
1038 * convenient place to truncate the SLRU.
1039 */
1040void
1042{
1043 int64 truncateCutoffPage;
1044
1045 LWLockAcquire(SerialControlLock, LW_EXCLUSIVE);
1046
1047 /* Exit quickly if the SLRU is currently not in use. */
1048 if (serialControl->headPage < 0)
1049 {
1050 LWLockRelease(SerialControlLock);
1051 return;
1052 }
1053
1055 {
1056 int64 tailPage;
1057
1058 tailPage = SerialPage(serialControl->tailXid);
1059
1060 /*
1061 * It is possible for the tailXid to be ahead of the headXid. This
1062 * occurs if we checkpoint while there are in-progress serializable
1063 * transaction(s) advancing the tail but we are yet to summarize the
1064 * transactions. In this case, we cutoff up to the headPage and the
1065 * next summary will advance the headXid.
1066 */
1068 {
1069 /* We can truncate the SLRU up to the page containing tailXid */
1070 truncateCutoffPage = tailPage;
1071 }
1072 else
1073 truncateCutoffPage = serialControl->headPage;
1074 }
1075 else
1076 {
1077 /*----------
1078 * The SLRU is no longer needed. Truncate to head before we set head
1079 * invalid.
1080 *
1081 * XXX: It's possible that the SLRU is not needed again until XID
1082 * wrap-around has happened, so that the segment containing headPage
1083 * that we leave behind will appear to be new again. In that case it
1084 * won't be removed until XID horizon advances enough to make it
1085 * current again.
1086 *
1087 * XXX: This should happen in vac_truncate_clog(), not in checkpoints.
1088 * Consider this scenario, starting from a system with no in-progress
1089 * transactions and VACUUM FREEZE having maximized oldestXact:
1090 * - Start a SERIALIZABLE transaction.
1091 * - Start, finish, and summarize a SERIALIZABLE transaction, creating
1092 * one SLRU page.
1093 * - Consume XIDs to reach xidStopLimit.
1094 * - Finish all transactions. Due to the long-running SERIALIZABLE
1095 * transaction, earlier checkpoints did not touch headPage. The
1096 * next checkpoint will change it, but that checkpoint happens after
1097 * the end of the scenario.
1098 * - VACUUM to advance XID limits.
1099 * - Consume ~2M XIDs, crossing the former xidWrapLimit.
1100 * - Start, finish, and summarize a SERIALIZABLE transaction.
1101 * SerialAdd() declines to create the targetPage, because headPage
1102 * is not regarded as in the past relative to that targetPage. The
1103 * transaction instigating the summarize fails in
1104 * SimpleLruReadPage().
1105 */
1106 truncateCutoffPage = serialControl->headPage;
1107 serialControl->headPage = -1;
1108 }
1109
1110 LWLockRelease(SerialControlLock);
1111
1112 /*
1113 * Truncate away pages that are no longer required. Note that no
1114 * additional locking is required, because this is only called as part of
1115 * a checkpoint, and the validity limits have already been determined.
1116 */
1117 SimpleLruTruncate(SerialSlruCtl, truncateCutoffPage);
1118
1119 /*
1120 * Write dirty SLRU pages to disk
1121 *
1122 * This is not actually necessary from a correctness point of view. We do
1123 * it merely as a debugging aid.
1124 *
1125 * We're doing this after the truncation to avoid writing pages right
1126 * before deleting the file in which they sit, which would be completely
1127 * pointless.
1128 */
1130}
1131
1132/*------------------------------------------------------------------------*/
1133
1134/*
1135 * PredicateLockShmemInit -- Initialize the predicate locking data structures.
1136 *
1137 * This is called from CreateSharedMemoryAndSemaphores(), which see for
1138 * more comments. In the normal postmaster case, the shared hash tables
1139 * are created here. Backends inherit the pointers
1140 * to the shared tables via fork(). In the EXEC_BACKEND case, each
1141 * backend re-executes this code to obtain pointers to the already existing
1142 * shared hash tables.
1143 */
1144void
1146{
1147 HASHCTL info;
1148 long max_table_size;
1149 Size requestSize;
1150 bool found;
1151
1152#ifndef EXEC_BACKEND
1154#endif
1155
1156 /*
1157 * Compute size of predicate lock target hashtable. Note these
1158 * calculations must agree with PredicateLockShmemSize!
1159 */
1160 max_table_size = NPREDICATELOCKTARGETENTS();
1161
1162 /*
1163 * Allocate hash table for PREDICATELOCKTARGET structs. This stores
1164 * per-predicate-lock-target information.
1165 */
1166 info.keysize = sizeof(PREDICATELOCKTARGETTAG);
1167 info.entrysize = sizeof(PREDICATELOCKTARGET);
1169
1170 PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
1171 max_table_size,
1172 max_table_size,
1173 &info,
1176
1177 /*
1178 * Reserve a dummy entry in the hash table; we use it to make sure there's
1179 * always one entry available when we need to split or combine a page,
1180 * because running out of space there could mean aborting a
1181 * non-serializable transaction.
1182 */
1183 if (!IsUnderPostmaster)
1184 {
1186 HASH_ENTER, &found);
1187 Assert(!found);
1188 }
1189
1190 /* Pre-calculate the hash and partition lock of the scratch entry */
1193
1194 /*
1195 * Allocate hash table for PREDICATELOCK structs. This stores per
1196 * xact-lock-of-a-target information.
1197 */
1198 info.keysize = sizeof(PREDICATELOCKTAG);
1199 info.entrysize = sizeof(PREDICATELOCK);
1200 info.hash = predicatelock_hash;
1202
1203 /* Assume an average of 2 xacts per target */
1204 max_table_size *= 2;
1205
1206 PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
1207 max_table_size,
1208 max_table_size,
1209 &info,
1212
1213 /*
1214 * Compute size for serializable transaction hashtable. Note these
1215 * calculations must agree with PredicateLockShmemSize!
1216 */
1217 max_table_size = (MaxBackends + max_prepared_xacts);
1218
1219 /*
1220 * Allocate a list to hold information on transactions participating in
1221 * predicate locking.
1222 *
1223 * Assume an average of 10 predicate locking transactions per backend.
1224 * This allows aggressive cleanup while detail is present before data must
1225 * be summarized for storage in SLRU and the "dummy" transaction.
1226 */
1227 max_table_size *= 10;
1228
1229 PredXact = ShmemInitStruct("PredXactList",
1231 &found);
1232 Assert(found == IsUnderPostmaster);
1233 if (!found)
1234 {
1235 int i;
1236
1245 requestSize = mul_size((Size) max_table_size,
1246 sizeof(SERIALIZABLEXACT));
1247 PredXact->element = ShmemAlloc(requestSize);
1248 /* Add all elements to available list, clean. */
1249 memset(PredXact->element, 0, requestSize);
1250 for (i = 0; i < max_table_size; i++)
1251 {
1255 }
1272 }
1273 /* This never changes, so let's keep a local copy. */
1275
1276 /*
1277 * Allocate hash table for SERIALIZABLEXID structs. This stores per-xid
1278 * information for serializable transactions which have accessed data.
1279 */
1280 info.keysize = sizeof(SERIALIZABLEXIDTAG);
1281 info.entrysize = sizeof(SERIALIZABLEXID);
1282
1283 SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
1284 max_table_size,
1285 max_table_size,
1286 &info,
1289
1290 /*
1291 * Allocate space for tracking rw-conflicts in lists attached to the
1292 * transactions.
1293 *
1294 * Assume an average of 5 conflicts per transaction. Calculations suggest
1295 * that this will prevent resource exhaustion in even the most pessimal
1296 * loads up to max_connections = 200 with all 200 connections pounding the
1297 * database with serializable transactions. Beyond that, there may be
1298 * occasional transactions canceled when trying to flag conflicts. That's
1299 * probably OK.
1300 */
1301 max_table_size *= 5;
1302
1303 RWConflictPool = ShmemInitStruct("RWConflictPool",
1305 &found);
1306 Assert(found == IsUnderPostmaster);
1307 if (!found)
1308 {
1309 int i;
1310
1312 requestSize = mul_size((Size) max_table_size,
1314 RWConflictPool->element = ShmemAlloc(requestSize);
1315 /* Add all elements to available list, clean. */
1316 memset(RWConflictPool->element, 0, requestSize);
1317 for (i = 0; i < max_table_size; i++)
1318 {
1321 }
1322 }
1323
1324 /*
1325 * Create or attach to the header for the list of finished serializable
1326 * transactions.
1327 */
1329 ShmemInitStruct("FinishedSerializableTransactions",
1330 sizeof(dlist_head),
1331 &found);
1332 Assert(found == IsUnderPostmaster);
1333 if (!found)
1335
1336 /*
1337 * Initialize the SLRU storage for old committed serializable
1338 * transactions.
1339 */
1340 SerialInit();
1341}
1342
1343/*
1344 * Estimate shared-memory space used for predicate lock table
1345 */
1346Size
1348{
1349 Size size = 0;
1350 long max_table_size;
1351
1352 /* predicate lock target hash table */
1353 max_table_size = NPREDICATELOCKTARGETENTS();
1354 size = add_size(size, hash_estimate_size(max_table_size,
1355 sizeof(PREDICATELOCKTARGET)));
1356
1357 /* predicate lock hash table */
1358 max_table_size *= 2;
1359 size = add_size(size, hash_estimate_size(max_table_size,
1360 sizeof(PREDICATELOCK)));
1361
1362 /*
1363 * Since NPREDICATELOCKTARGETENTS is only an estimate, add 10% safety
1364 * margin.
1365 */
1366 size = add_size(size, size / 10);
1367
1368 /* transaction list */
1369 max_table_size = MaxBackends + max_prepared_xacts;
1370 max_table_size *= 10;
1372 size = add_size(size, mul_size((Size) max_table_size,
1373 sizeof(SERIALIZABLEXACT)));
1374
1375 /* transaction xid table */
1376 size = add_size(size, hash_estimate_size(max_table_size,
1377 sizeof(SERIALIZABLEXID)));
1378
1379 /* rw-conflict pool */
1380 max_table_size *= 5;
1382 size = add_size(size, mul_size((Size) max_table_size,
1384
1385 /* Head for list of finished serializable transactions. */
1386 size = add_size(size, sizeof(dlist_head));
1387
1388 /* Shared memory structures for SLRU tracking of old committed xids. */
1389 size = add_size(size, sizeof(SerialControlData));
1391
1392 return size;
1393}
1394
1395
1396/*
1397 * Compute the hash code associated with a PREDICATELOCKTAG.
1398 *
1399 * Because we want to use just one set of partition locks for both the
1400 * PREDICATELOCKTARGET and PREDICATELOCK hash tables, we have to make sure
1401 * that PREDICATELOCKs fall into the same partition number as their
1402 * associated PREDICATELOCKTARGETs. dynahash.c expects the partition number
1403 * to be the low-order bits of the hash code, and therefore a
1404 * PREDICATELOCKTAG's hash code must have the same low-order bits as the
1405 * associated PREDICATELOCKTARGETTAG's hash code. We achieve this with this
1406 * specialized hash function.
1407 */
1408static uint32
1409predicatelock_hash(const void *key, Size keysize)
1410{
1411 const PREDICATELOCKTAG *predicatelocktag = (const PREDICATELOCKTAG *) key;
1412 uint32 targethash;
1413
1414 Assert(keysize == sizeof(PREDICATELOCKTAG));
1415
1416 /* Look into the associated target object, and compute its hash code */
1417 targethash = PredicateLockTargetTagHashCode(&predicatelocktag->myTarget->tag);
1418
1419 return PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash);
1420}
1421
1422
1423/*
1424 * GetPredicateLockStatusData
1425 * Return a table containing the internal state of the predicate
1426 * lock manager for use in pg_lock_status.
1427 *
1428 * Like GetLockStatusData, this function tries to hold the partition LWLocks
1429 * for as short a time as possible by returning two arrays that simply
1430 * contain the PREDICATELOCKTARGETTAG and SERIALIZABLEXACT for each lock
1431 * table entry. Multiple copies of the same PREDICATELOCKTARGETTAG and
1432 * SERIALIZABLEXACT will likely appear.
1433 */
1436{
1438 int i;
1439 int els,
1440 el;
1441 HASH_SEQ_STATUS seqstat;
1442 PREDICATELOCK *predlock;
1443
1445
1446 /*
1447 * To ensure consistency, take simultaneous locks on all partition locks
1448 * in ascending order, then SerializableXactHashLock.
1449 */
1450 for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
1452 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1453
1454 /* Get number of locks and allocate appropriately-sized arrays. */
1456 data->nelements = els;
1457 data->locktags = (PREDICATELOCKTARGETTAG *)
1458 palloc(sizeof(PREDICATELOCKTARGETTAG) * els);
1459 data->xacts = (SERIALIZABLEXACT *)
1460 palloc(sizeof(SERIALIZABLEXACT) * els);
1461
1462
1463 /* Scan through PredicateLockHash and copy contents */
1465
1466 el = 0;
1467
1468 while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
1469 {
1470 data->locktags[el] = predlock->tag.myTarget->tag;
1471 data->xacts[el] = *predlock->tag.myXact;
1472 el++;
1473 }
1474
1475 Assert(el == els);
1476
1477 /* Release locks in reverse order */
1478 LWLockRelease(SerializableXactHashLock);
1479 for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
1481
1482 return data;
1483}
1484
1485/*
1486 * Free up shared memory structures by pushing the oldest sxact (the one at
1487 * the front of the SummarizeOldestCommittedSxact queue) into summary form.
1488 * Each call will free exactly one SERIALIZABLEXACT structure and may also
1489 * free one or more of these structures: SERIALIZABLEXID, PREDICATELOCK,
1490 * PREDICATELOCKTARGET, RWConflictData.
1491 */
1492static void
1494{
1495 SERIALIZABLEXACT *sxact;
1496
1497 LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
1498
1499 /*
1500 * This function is only called if there are no sxact slots available.
1501 * Some of them must belong to old, already-finished transactions, so
1502 * there should be something in FinishedSerializableTransactions list that
1503 * we can summarize. However, there's a race condition: while we were not
1504 * holding any locks, a transaction might have ended and cleaned up all
1505 * the finished sxact entries already, freeing up their sxact slots. In
1506 * that case, we have nothing to do here. The caller will find one of the
1507 * slots released by the other backend when it retries.
1508 */
1510 {
1511 LWLockRelease(SerializableFinishedListLock);
1512 return;
1513 }
1514
1515 /*
1516 * Grab the first sxact off the finished list -- this will be the earliest
1517 * commit. Remove it from the list.
1518 */
1519 sxact = dlist_head_element(SERIALIZABLEXACT, finishedLink,
1522
1523 /* Add to SLRU summary information. */
1524 if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
1525 SerialAdd(sxact->topXid, SxactHasConflictOut(sxact)
1527
1528 /* Summarize and release the detail. */
1529 ReleaseOneSerializableXact(sxact, false, true);
1530
1531 LWLockRelease(SerializableFinishedListLock);
1532}
1533
1534/*
1535 * GetSafeSnapshot
1536 * Obtain and register a snapshot for a READ ONLY DEFERRABLE
1537 * transaction. Ensures that the snapshot is "safe", i.e. a
1538 * read-only transaction running on it can execute serializably
1539 * without further checks. This requires waiting for concurrent
1540 * transactions to complete, and retrying with a new snapshot if
1541 * one of them could possibly create a conflict.
1542 *
1543 * As with GetSerializableTransactionSnapshot (which this is a subroutine
1544 * for), the passed-in Snapshot pointer should reference a static data
1545 * area that can safely be passed to GetSnapshotData.
1546 */
1547static Snapshot
1549{
1550 Snapshot snapshot;
1551
1553
1554 while (true)
1555 {
1556 /*
1557 * GetSerializableTransactionSnapshotInt is going to call
1558 * GetSnapshotData, so we need to provide it the static snapshot area
1559 * our caller passed to us. The pointer returned is actually the same
1560 * one passed to it, but we avoid assuming that here.
1561 */
1562 snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
1563 NULL, InvalidPid);
1564
1566 return snapshot; /* no concurrent r/w xacts; it's safe */
1567
1568 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1569
1570 /*
1571 * Wait for concurrent transactions to finish. Stop early if one of
1572 * them marked us as conflicted.
1573 */
1577 {
1578 LWLockRelease(SerializableXactHashLock);
1579 ProcWaitForSignal(WAIT_EVENT_SAFE_SNAPSHOT);
1580 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1581 }
1582 MySerializableXact->flags &= ~SXACT_FLAG_DEFERRABLE_WAITING;
1583
1585 {
1586 LWLockRelease(SerializableXactHashLock);
1587 break; /* success */
1588 }
1589
1590 LWLockRelease(SerializableXactHashLock);
1591
1592 /* else, need to retry... */
1595 errmsg_internal("deferrable snapshot was unsafe; trying a new one")));
1596 ReleasePredicateLocks(false, false);
1597 }
1598
1599 /*
1600 * Now we have a safe snapshot, so we don't need to do any further checks.
1601 */
1603 ReleasePredicateLocks(false, true);
1604
1605 return snapshot;
1606}
1607
1608/*
1609 * GetSafeSnapshotBlockingPids
1610 * If the specified process is currently blocked in GetSafeSnapshot,
1611 * write the process IDs of all processes that it is blocked by
1612 * into the caller-supplied buffer output[]. The list is truncated at
1613 * output_size, and the number of PIDs written into the buffer is
1614 * returned. Returns zero if the given PID is not currently blocked
1615 * in GetSafeSnapshot.
1616 */
1617int
1618GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
1619{
1620 int num_written = 0;
1621 dlist_iter iter;
1622 SERIALIZABLEXACT *blocking_sxact = NULL;
1623
1624 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1625
1626 /* Find blocked_pid's SERIALIZABLEXACT by linear search. */
1628 {
1629 SERIALIZABLEXACT *sxact =
1630 dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
1631
1632 if (sxact->pid == blocked_pid)
1633 {
1634 blocking_sxact = sxact;
1635 break;
1636 }
1637 }
1638
1639 /* Did we find it, and is it currently waiting in GetSafeSnapshot? */
1640 if (blocking_sxact != NULL && SxactIsDeferrableWaiting(blocking_sxact))
1641 {
1642 /* Traverse the list of possible unsafe conflicts collecting PIDs. */
1643 dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts)
1644 {
1645 RWConflict possibleUnsafeConflict =
1646 dlist_container(RWConflictData, inLink, iter.cur);
1647
1648 output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
1649
1650 if (num_written >= output_size)
1651 break;
1652 }
1653 }
1654
1655 LWLockRelease(SerializableXactHashLock);
1656
1657 return num_written;
1658}
1659
1660/*
1661 * Acquire a snapshot that can be used for the current transaction.
1662 *
1663 * Make sure we have a SERIALIZABLEXACT reference in MySerializableXact.
1664 * It should be current for this process and be contained in PredXact.
1665 *
1666 * The passed-in Snapshot pointer should reference a static data area that
1667 * can safely be passed to GetSnapshotData. The return value is actually
1668 * always this same pointer; no new snapshot data structure is allocated
1669 * within this function.
1670 */
1673{
1675
1676 /*
1677 * Can't use serializable mode while recovery is still active, as it is,
1678 * for example, on a hot standby. We could get here despite the check in
1679 * check_transaction_isolation() if default_transaction_isolation is set
1680 * to serializable, so phrase the hint accordingly.
1681 */
1682 if (RecoveryInProgress())
1683 ereport(ERROR,
1684 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1685 errmsg("cannot use serializable mode in a hot standby"),
1686 errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
1687 errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
1688
1689 /*
1690 * A special optimization is available for SERIALIZABLE READ ONLY
1691 * DEFERRABLE transactions -- we can wait for a suitable snapshot and
1692 * thereby avoid all SSI overhead once it's running.
1693 */
1695 return GetSafeSnapshot(snapshot);
1696
1698 NULL, InvalidPid);
1699}
1700
1701/*
1702 * Import a snapshot to be used for the current transaction.
1703 *
1704 * This is nearly the same as GetSerializableTransactionSnapshot, except that
1705 * we don't take a new snapshot, but rather use the data we're handed.
1706 *
1707 * The caller must have verified that the snapshot came from a serializable
1708 * transaction; and if we're read-write, the source transaction must not be
1709 * read-only.
1710 */
1711void
1713 VirtualTransactionId *sourcevxid,
1714 int sourcepid)
1715{
1717
1718 /*
1719 * If this is called by parallel.c in a parallel worker, we don't want to
1720 * create a SERIALIZABLEXACT just yet because the leader's
1721 * SERIALIZABLEXACT will be installed with AttachSerializableXact(). We
1722 * also don't want to reject SERIALIZABLE READ ONLY DEFERRABLE in this
1723 * case, because the leader has already determined that the snapshot it
1724 * has passed us is safe. So there is nothing for us to do.
1725 */
1726 if (IsParallelWorker())
1727 return;
1728
1729 /*
1730 * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
1731 * import snapshots, since there's no way to wait for a safe snapshot when
1732 * we're using the snap we're told to. (XXX instead of throwing an error,
1733 * we could just ignore the XactDeferrable flag?)
1734 */
1736 ereport(ERROR,
1737 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1738 errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
1739
1740 (void) GetSerializableTransactionSnapshotInt(snapshot, sourcevxid,
1741 sourcepid);
1742}
1743
1744/*
1745 * Guts of GetSerializableTransactionSnapshot
1746 *
1747 * If sourcevxid is valid, this is actually an import operation and we should
1748 * skip calling GetSnapshotData, because the snapshot contents are already
1749 * loaded up. HOWEVER: to avoid race conditions, we must check that the
1750 * source xact is still running after we acquire SerializableXactHashLock.
1751 * We do that by calling ProcArrayInstallImportedXmin.
1752 */
1753static Snapshot
1755 VirtualTransactionId *sourcevxid,
1756 int sourcepid)
1757{
1758 PGPROC *proc;
1760 SERIALIZABLEXACT *sxact,
1761 *othersxact;
1762
1763 /* We only do this for serializable transactions. Once. */
1765
1767
1768 /*
1769 * Since all parts of a serializable transaction must use the same
1770 * snapshot, it is too late to establish one after a parallel operation
1771 * has begun.
1772 */
1773 if (IsInParallelMode())
1774 elog(ERROR, "cannot establish serializable snapshot during a parallel operation");
1775
1776 proc = MyProc;
1777 Assert(proc != NULL);
1778 GET_VXID_FROM_PGPROC(vxid, *proc);
1779
1780 /*
1781 * First we get the sxact structure, which may involve looping and access
1782 * to the "finished" list to free a structure for use.
1783 *
1784 * We must hold SerializableXactHashLock when taking/checking the snapshot
1785 * to avoid race conditions, for much the same reasons that
1786 * GetSnapshotData takes the ProcArrayLock. Since we might have to
1787 * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
1788 * this means we have to create the sxact first, which is a bit annoying
1789 * (in particular, an elog(ERROR) in procarray.c would cause us to leak
1790 * the sxact). Consider refactoring to avoid this.
1791 */
1792#ifdef TEST_SUMMARIZE_SERIAL
1794#endif
1795 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1796 do
1797 {
1798 sxact = CreatePredXact();
1799 /* If null, push out committed sxact to SLRU summary & retry. */
1800 if (!sxact)
1801 {
1802 LWLockRelease(SerializableXactHashLock);
1804 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1805 }
1806 } while (!sxact);
1807
1808 /* Get the snapshot, or check that it's safe to use */
1809 if (!sourcevxid)
1810 snapshot = GetSnapshotData(snapshot);
1811 else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcevxid))
1812 {
1813 ReleasePredXact(sxact);
1814 LWLockRelease(SerializableXactHashLock);
1815 ereport(ERROR,
1816 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1817 errmsg("could not import the requested snapshot"),
1818 errdetail("The source process with PID %d is not running anymore.",
1819 sourcepid)));
1820 }
1821
1822 /*
1823 * If there are no serializable transactions which are not read-only, we
1824 * can "opt out" of predicate locking and conflict checking for a
1825 * read-only transaction.
1826 *
1827 * The reason this is safe is that a read-only transaction can only become
1828 * part of a dangerous structure if it overlaps a writable transaction
1829 * which in turn overlaps a writable transaction which committed before
1830 * the read-only transaction started. A new writable transaction can
1831 * overlap this one, but it can't meet the other condition of overlapping
1832 * a transaction which committed before this one started.
1833 */
1835 {
1836 ReleasePredXact(sxact);
1837 LWLockRelease(SerializableXactHashLock);
1838 return snapshot;
1839 }
1840
1841 /* Initialize the structure. */
1842 sxact->vxid = vxid;
1846 dlist_init(&(sxact->outConflicts));
1847 dlist_init(&(sxact->inConflicts));
1851 sxact->xmin = snapshot->xmin;
1852 sxact->pid = MyProcPid;
1853 sxact->pgprocno = MyProcNumber;
1854 dlist_init(&sxact->predicateLocks);
1856 sxact->flags = 0;
1857 if (XactReadOnly)
1858 {
1859 dlist_iter iter;
1860
1861 sxact->flags |= SXACT_FLAG_READ_ONLY;
1862
1863 /*
1864 * Register all concurrent r/w transactions as possible conflicts; if
1865 * all of them commit without any outgoing conflicts to earlier
1866 * transactions then this snapshot can be deemed safe (and we can run
1867 * without tracking predicate locks).
1868 */
1870 {
1871 othersxact = dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
1872
1873 if (!SxactIsCommitted(othersxact)
1874 && !SxactIsDoomed(othersxact)
1875 && !SxactIsReadOnly(othersxact))
1876 {
1877 SetPossibleUnsafeConflict(sxact, othersxact);
1878 }
1879 }
1880
1881 /*
1882 * If we didn't find any possibly unsafe conflicts because every
1883 * uncommitted writable transaction turned out to be doomed, then we
1884 * can "opt out" immediately. See comments above the earlier check
1885 * for PredXact->WritableSxactCount == 0.
1886 */
1888 {
1889 ReleasePredXact(sxact);
1890 LWLockRelease(SerializableXactHashLock);
1891 return snapshot;
1892 }
1893 }
1894 else
1895 {
1899 }
1900
1901 /* Maintain serializable global xmin info. */
1903 {
1905 PredXact->SxactGlobalXmin = snapshot->xmin;
1907 SerialSetActiveSerXmin(snapshot->xmin);
1908 }
1909 else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
1910 {
1913 }
1914 else
1915 {
1917 }
1918
1919 MySerializableXact = sxact;
1920 MyXactDidWrite = false; /* haven't written anything yet */
1921
1922 LWLockRelease(SerializableXactHashLock);
1923
1925
1926 return snapshot;
1927}
1928
1929static void
1931{
1932 HASHCTL hash_ctl;
1933
1934 /* Initialize the backend-local hash table of parent locks */
1936 hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
1937 hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
1938 LocalPredicateLockHash = hash_create("Local predicate lock",
1940 &hash_ctl,
1942}
1943
1944/*
1945 * Register the top level XID in SerializableXidHash.
1946 * Also store it for easy reference in MySerializableXact.
1947 */
1948void
1950{
1951 SERIALIZABLEXIDTAG sxidtag;
1952 SERIALIZABLEXID *sxid;
1953 bool found;
1954
1955 /*
1956 * If we're not tracking predicate lock data for this transaction, we
1957 * should ignore the request and return quickly.
1958 */
1960 return;
1961
1962 /* We should have a valid XID and be at the top level. */
1964
1965 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1966
1967 /* This should only be done once per transaction. */
1969
1971
1972 sxidtag.xid = xid;
1974 &sxidtag,
1975 HASH_ENTER, &found);
1976 Assert(!found);
1977
1978 /* Initialize the structure. */
1979 sxid->myXact = MySerializableXact;
1980 LWLockRelease(SerializableXactHashLock);
1981}
1982
1983
1984/*
1985 * Check whether there are any predicate locks held by any transaction
1986 * for the page at the given block number.
1987 *
1988 * Note that the transaction may be completed but not yet subject to
1989 * cleanup due to overlapping serializable transactions. This must
1990 * return valid information regardless of transaction isolation level.
1991 *
1992 * Also note that this doesn't check for a conflicting relation lock,
1993 * just a lock specifically on the given page.
1994 *
1995 * One use is to support proper behavior during GiST index vacuum.
1996 */
1997bool
1999{
2000 PREDICATELOCKTARGETTAG targettag;
2001 uint32 targettaghash;
2002 LWLock *partitionLock;
2003 PREDICATELOCKTARGET *target;
2004
2006 relation->rd_locator.dbOid,
2007 relation->rd_id,
2008 blkno);
2009
2010 targettaghash = PredicateLockTargetTagHashCode(&targettag);
2011 partitionLock = PredicateLockHashPartitionLock(targettaghash);
2012 LWLockAcquire(partitionLock, LW_SHARED);
2013 target = (PREDICATELOCKTARGET *)
2015 &targettag, targettaghash,
2016 HASH_FIND, NULL);
2017 LWLockRelease(partitionLock);
2018
2019 return (target != NULL);
2020}
2021
2022
2023/*
2024 * Check whether a particular lock is held by this transaction.
2025 *
2026 * Important note: this function may return false even if the lock is
2027 * being held, because it uses the local lock table which is not
2028 * updated if another transaction modifies our lock list (e.g. to
2029 * split an index page). It can also return true when a coarser
2030 * granularity lock that covers this target is being held. Be careful
2031 * to only use this function in circumstances where such errors are
2032 * acceptable!
2033 */
2034static bool
2036{
2037 LOCALPREDICATELOCK *lock;
2038
2039 /* check local hash table */
2041 targettag,
2042 HASH_FIND, NULL);
2043
2044 if (!lock)
2045 return false;
2046
2047 /*
2048 * Found entry in the table, but still need to check whether it's actually
2049 * held -- it could just be a parent of some held lock.
2050 */
2051 return lock->held;
2052}
2053
2054/*
2055 * Return the parent lock tag in the lock hierarchy: the next coarser
2056 * lock that covers the provided tag.
2057 *
2058 * Returns true and sets *parent to the parent tag if one exists,
2059 * returns false if none exists.
2060 */
2061static bool
2063 PREDICATELOCKTARGETTAG *parent)
2064{
2065 switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2066 {
2068 /* relation locks have no parent lock */
2069 return false;
2070
2071 case PREDLOCKTAG_PAGE:
2072 /* parent lock is relation lock */
2076
2077 return true;
2078
2079 case PREDLOCKTAG_TUPLE:
2080 /* parent lock is page lock */
2085 return true;
2086 }
2087
2088 /* not reachable */
2089 Assert(false);
2090 return false;
2091}
2092
2093/*
2094 * Check whether the lock we are considering is already covered by a
2095 * coarser lock for our transaction.
2096 *
2097 * Like PredicateLockExists, this function might return a false
2098 * negative, but it will never return a false positive.
2099 */
2100static bool
2102{
2103 PREDICATELOCKTARGETTAG targettag,
2104 parenttag;
2105
2106 targettag = *newtargettag;
2107
2108 /* check parents iteratively until no more */
2109 while (GetParentPredicateLockTag(&targettag, &parenttag))
2110 {
2111 targettag = parenttag;
2112 if (PredicateLockExists(&targettag))
2113 return true;
2114 }
2115
2116 /* no more parents to check; lock is not covered */
2117 return false;
2118}
2119
2120/*
2121 * Remove the dummy entry from the predicate lock target hash, to free up some
2122 * scratch space. The caller must be holding SerializablePredicateListLock,
2123 * and must restore the entry with RestoreScratchTarget() before releasing the
2124 * lock.
2125 *
2126 * If lockheld is true, the caller is already holding the partition lock
2127 * of the partition containing the scratch entry.
2128 */
2129static void
2131{
2132 bool found;
2133
2134 Assert(LWLockHeldByMe(SerializablePredicateListLock));
2135
2136 if (!lockheld)
2141 HASH_REMOVE, &found);
2142 Assert(found);
2143 if (!lockheld)
2145}
2146
2147/*
2148 * Re-insert the dummy entry in predicate lock target hash.
2149 */
2150static void
2152{
2153 bool found;
2154
2155 Assert(LWLockHeldByMe(SerializablePredicateListLock));
2156
2157 if (!lockheld)
2162 HASH_ENTER, &found);
2163 Assert(!found);
2164 if (!lockheld)
2166}
2167
2168/*
2169 * Check whether the list of related predicate locks is empty for a
2170 * predicate lock target, and remove the target if it is.
2171 */
2172static void
2174{
2176
2177 Assert(LWLockHeldByMe(SerializablePredicateListLock));
2178
2179 /* Can't remove it until no locks at this target. */
2180 if (!dlist_is_empty(&target->predicateLocks))
2181 return;
2182
2183 /* Actually remove the target. */
2185 &target->tag,
2186 targettaghash,
2187 HASH_REMOVE, NULL);
2188 Assert(rmtarget == target);
2189}
2190
2191/*
2192 * Delete child target locks owned by this process.
2193 * This implementation is assuming that the usage of each target tag field
2194 * is uniform. No need to make this hard if we don't have to.
2195 *
2196 * We acquire an LWLock in the case of parallel mode, because worker
2197 * backends have access to the leader's SERIALIZABLEXACT. Otherwise,
2198 * we aren't acquiring LWLocks for the predicate lock or lock
2199 * target structures associated with this transaction unless we're going
2200 * to modify them, because no other process is permitted to modify our
2201 * locks.
2202 */
2203static void
2205{
2206 SERIALIZABLEXACT *sxact;
2207 PREDICATELOCK *predlock;
2208 dlist_mutable_iter iter;
2209
2210 LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
2211 sxact = MySerializableXact;
2212 if (IsInParallelMode())
2214
2216 {
2217 PREDICATELOCKTAG oldlocktag;
2218 PREDICATELOCKTARGET *oldtarget;
2219 PREDICATELOCKTARGETTAG oldtargettag;
2220
2221 predlock = dlist_container(PREDICATELOCK, xactLink, iter.cur);
2222
2223 oldlocktag = predlock->tag;
2224 Assert(oldlocktag.myXact == sxact);
2225 oldtarget = oldlocktag.myTarget;
2226 oldtargettag = oldtarget->tag;
2227
2228 if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
2229 {
2230 uint32 oldtargettaghash;
2231 LWLock *partitionLock;
2233
2234 oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2235 partitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2236
2237 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2238
2239 dlist_delete(&predlock->xactLink);
2240 dlist_delete(&predlock->targetLink);
2241 rmpredlock = hash_search_with_hash_value
2243 &oldlocktag,
2245 oldtargettaghash),
2246 HASH_REMOVE, NULL);
2247 Assert(rmpredlock == predlock);
2248
2249 RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2250
2251 LWLockRelease(partitionLock);
2252
2253 DecrementParentLocks(&oldtargettag);
2254 }
2255 }
2256 if (IsInParallelMode())
2258 LWLockRelease(SerializablePredicateListLock);
2259}
2260
2261/*
2262 * Returns the promotion limit for a given predicate lock target. This is the
2263 * max number of descendant locks allowed before promoting to the specified
2264 * tag. Note that the limit includes non-direct descendants (e.g., both tuples
2265 * and pages for a relation lock).
2266 *
2267 * Currently the default limit is 2 for a page lock, and half of the value of
2268 * max_pred_locks_per_transaction - 1 for a relation lock, to match behavior
2269 * of earlier releases when upgrading.
2270 *
2271 * TODO SSI: We should probably add additional GUCs to allow a maximum ratio
2272 * of page and tuple locks based on the pages in a relation, and the maximum
2273 * ratio of tuple locks to tuples in a page. This would provide more
2274 * generally "balanced" allocation of locks to where they are most useful,
2275 * while still allowing the absolute numbers to prevent one relation from
2276 * tying up all predicate lock resources.
2277 */
2278static int
2280{
2281 switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2282 {
2288
2289 case PREDLOCKTAG_PAGE:
2291
2292 case PREDLOCKTAG_TUPLE:
2293
2294 /*
2295 * not reachable: nothing is finer-granularity than a tuple, so we
2296 * should never try to promote to it.
2297 */
2298 Assert(false);
2299 return 0;
2300 }
2301
2302 /* not reachable */
2303 Assert(false);
2304 return 0;
2305}
2306
2307/*
2308 * For all ancestors of a newly-acquired predicate lock, increment
2309 * their child count in the parent hash table. If any of them have
2310 * more descendants than their promotion threshold, acquire the
2311 * coarsest such lock.
2312 *
2313 * Returns true if a parent lock was acquired and false otherwise.
2314 */
2315static bool
2317{
2318 PREDICATELOCKTARGETTAG targettag,
2319 nexttag,
2320 promotiontag;
2321 LOCALPREDICATELOCK *parentlock;
2322 bool found,
2323 promote;
2324
2325 promote = false;
2326
2327 targettag = *reqtag;
2328
2329 /* check parents iteratively */
2330 while (GetParentPredicateLockTag(&targettag, &nexttag))
2331 {
2332 targettag = nexttag;
2334 &targettag,
2335 HASH_ENTER,
2336 &found);
2337 if (!found)
2338 {
2339 parentlock->held = false;
2340 parentlock->childLocks = 1;
2341 }
2342 else
2343 parentlock->childLocks++;
2344
2345 if (parentlock->childLocks >
2346 MaxPredicateChildLocks(&targettag))
2347 {
2348 /*
2349 * We should promote to this parent lock. Continue to check its
2350 * ancestors, however, both to get their child counts right and to
2351 * check whether we should just go ahead and promote to one of
2352 * them.
2353 */
2354 promotiontag = targettag;
2355 promote = true;
2356 }
2357 }
2358
2359 if (promote)
2360 {
2361 /* acquire coarsest ancestor eligible for promotion */
2362 PredicateLockAcquire(&promotiontag);
2363 return true;
2364 }
2365 else
2366 return false;
2367}
2368
2369/*
2370 * When releasing a lock, decrement the child count on all ancestor
2371 * locks.
2372 *
2373 * This is called only when releasing a lock via
2374 * DeleteChildTargetLocks (i.e. when a lock becomes redundant because
2375 * we've acquired its parent, possibly due to promotion) or when a new
2376 * MVCC write lock makes the predicate lock unnecessary. There's no
2377 * point in calling it when locks are released at transaction end, as
2378 * this information is no longer needed.
2379 */
2380static void
2382{
2383 PREDICATELOCKTARGETTAG parenttag,
2384 nexttag;
2385
2386 parenttag = *targettag;
2387
2388 while (GetParentPredicateLockTag(&parenttag, &nexttag))
2389 {
2390 uint32 targettaghash;
2391 LOCALPREDICATELOCK *parentlock,
2393
2394 parenttag = nexttag;
2395 targettaghash = PredicateLockTargetTagHashCode(&parenttag);
2396 parentlock = (LOCALPREDICATELOCK *)
2398 &parenttag, targettaghash,
2399 HASH_FIND, NULL);
2400
2401 /*
2402 * There's a small chance the parent lock doesn't exist in the lock
2403 * table. This can happen if we prematurely removed it because an
2404 * index split caused the child refcount to be off.
2405 */
2406 if (parentlock == NULL)
2407 continue;
2408
2409 parentlock->childLocks--;
2410
2411 /*
2412 * Under similar circumstances the parent lock's refcount might be
2413 * zero. This only happens if we're holding that lock (otherwise we
2414 * would have removed the entry).
2415 */
2416 if (parentlock->childLocks < 0)
2417 {
2418 Assert(parentlock->held);
2419 parentlock->childLocks = 0;
2420 }
2421
2422 if ((parentlock->childLocks == 0) && (!parentlock->held))
2423 {
2424 rmlock = (LOCALPREDICATELOCK *)
2426 &parenttag, targettaghash,
2427 HASH_REMOVE, NULL);
2428 Assert(rmlock == parentlock);
2429 }
2430 }
2431}
2432
2433/*
2434 * Indicate that a predicate lock on the given target is held by the
2435 * specified transaction. Has no effect if the lock is already held.
2436 *
2437 * This updates the lock table and the sxact's lock list, and creates
2438 * the lock target if necessary, but does *not* do anything related to
2439 * granularity promotion or the local lock table. See
2440 * PredicateLockAcquire for that.
2441 */
2442static void
2444 uint32 targettaghash,
2445 SERIALIZABLEXACT *sxact)
2446{
2447 PREDICATELOCKTARGET *target;
2448 PREDICATELOCKTAG locktag;
2449 PREDICATELOCK *lock;
2450 LWLock *partitionLock;
2451 bool found;
2452
2453 partitionLock = PredicateLockHashPartitionLock(targettaghash);
2454
2455 LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
2456 if (IsInParallelMode())
2458 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2459
2460 /* Make sure that the target is represented. */
2461 target = (PREDICATELOCKTARGET *)
2463 targettag, targettaghash,
2464 HASH_ENTER_NULL, &found);
2465 if (!target)
2466 ereport(ERROR,
2467 (errcode(ERRCODE_OUT_OF_MEMORY),
2468 errmsg("out of shared memory"),
2469 errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
2470 if (!found)
2471 dlist_init(&target->predicateLocks);
2472
2473 /* We've got the sxact and target, make sure they're joined. */
2474 locktag.myTarget = target;
2475 locktag.myXact = sxact;
2476 lock = (PREDICATELOCK *)
2478 PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
2479 HASH_ENTER_NULL, &found);
2480 if (!lock)
2481 ereport(ERROR,
2482 (errcode(ERRCODE_OUT_OF_MEMORY),
2483 errmsg("out of shared memory"),
2484 errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
2485
2486 if (!found)
2487 {
2488 dlist_push_tail(&target->predicateLocks, &lock->targetLink);
2489 dlist_push_tail(&sxact->predicateLocks, &lock->xactLink);
2491 }
2492
2493 LWLockRelease(partitionLock);
2494 if (IsInParallelMode())
2496 LWLockRelease(SerializablePredicateListLock);
2497}
2498
2499/*
2500 * Acquire a predicate lock on the specified target for the current
2501 * connection if not already held. This updates the local lock table
2502 * and uses it to implement granularity promotion. It will consolidate
2503 * multiple locks into a coarser lock if warranted, and will release
2504 * any finer-grained locks covered by the new one.
2505 */
2506static void
2508{
2509 uint32 targettaghash;
2510 bool found;
2511 LOCALPREDICATELOCK *locallock;
2512
2513 /* Do we have the lock already, or a covering lock? */
2514 if (PredicateLockExists(targettag))
2515 return;
2516
2517 if (CoarserLockCovers(targettag))
2518 return;
2519
2520 /* the same hash and LW lock apply to the lock target and the local lock. */
2521 targettaghash = PredicateLockTargetTagHashCode(targettag);
2522
2523 /* Acquire lock in local table */
2524 locallock = (LOCALPREDICATELOCK *)
2526 targettag, targettaghash,
2527 HASH_ENTER, &found);
2528 locallock->held = true;
2529 if (!found)
2530 locallock->childLocks = 0;
2531
2532 /* Actually create the lock */
2533 CreatePredicateLock(targettag, targettaghash, MySerializableXact);
2534
2535 /*
2536 * Lock has been acquired. Check whether it should be promoted to a
2537 * coarser granularity, or whether there are finer-granularity locks to
2538 * clean up.
2539 */
2541 {
2542 /*
2543 * Lock request was promoted to a coarser-granularity lock, and that
2544 * lock was acquired. It will delete this lock and any of its
2545 * children, so we're done.
2546 */
2547 }
2548 else
2549 {
2550 /* Clean up any finer-granularity locks */
2552 DeleteChildTargetLocks(targettag);
2553 }
2554}
2555
2556
2557/*
2558 * PredicateLockRelation
2559 *
2560 * Gets a predicate lock at the relation level.
2561 * Skip if not in full serializable transaction isolation level.
2562 * Skip if this is a temporary table.
2563 * Clear any finer-grained predicate locks this session has on the relation.
2564 */
2565void
2567{
2569
2570 if (!SerializationNeededForRead(relation, snapshot))
2571 return;
2572
2574 relation->rd_locator.dbOid,
2575 relation->rd_id);
2577}
2578
2579/*
2580 * PredicateLockPage
2581 *
2582 * Gets a predicate lock at the page level.
2583 * Skip if not in full serializable transaction isolation level.
2584 * Skip if this is a temporary table.
2585 * Skip if a coarser predicate lock already covers this page.
2586 * Clear any finer-grained predicate locks this session has on the relation.
2587 */
2588void
2590{
2592
2593 if (!SerializationNeededForRead(relation, snapshot))
2594 return;
2595
2597 relation->rd_locator.dbOid,
2598 relation->rd_id,
2599 blkno);
2601}
2602
2603/*
2604 * PredicateLockTID
2605 *
2606 * Gets a predicate lock at the tuple level.
2607 * Skip if not in full serializable transaction isolation level.
2608 * Skip if this is a temporary table.
2609 */
2610void
2612 TransactionId tuple_xid)
2613{
2615
2616 if (!SerializationNeededForRead(relation, snapshot))
2617 return;
2618
2619 /*
2620 * Return if this xact wrote it.
2621 */
2622 if (relation->rd_index == NULL)
2623 {
2624 /* If we wrote it; we already have a write lock. */
2626 return;
2627 }
2628
2629 /*
2630 * Do quick-but-not-definitive test for a relation lock first. This will
2631 * never cause a return when the relation is *not* locked, but will
2632 * occasionally let the check continue when there really *is* a relation
2633 * level lock.
2634 */
2636 relation->rd_locator.dbOid,
2637 relation->rd_id);
2638 if (PredicateLockExists(&tag))
2639 return;
2640
2642 relation->rd_locator.dbOid,
2643 relation->rd_id,
2647}
2648
2649
2650/*
2651 * DeleteLockTarget
2652 *
2653 * Remove a predicate lock target along with any locks held for it.
2654 *
2655 * Caller must hold SerializablePredicateListLock and the
2656 * appropriate hash partition lock for the target.
2657 */
2658static void
2660{
2661 dlist_mutable_iter iter;
2662
2663 Assert(LWLockHeldByMeInMode(SerializablePredicateListLock,
2664 LW_EXCLUSIVE));
2666
2667 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2668
2669 dlist_foreach_modify(iter, &target->predicateLocks)
2670 {
2671 PREDICATELOCK *predlock =
2672 dlist_container(PREDICATELOCK, targetLink, iter.cur);
2673 bool found;
2674
2675 dlist_delete(&(predlock->xactLink));
2676 dlist_delete(&(predlock->targetLink));
2677
2680 &predlock->tag,
2682 targettaghash),
2683 HASH_REMOVE, &found);
2684 Assert(found);
2685 }
2686 LWLockRelease(SerializableXactHashLock);
2687
2688 /* Remove the target itself, if possible. */
2689 RemoveTargetIfNoLongerUsed(target, targettaghash);
2690}
2691
2692
2693/*
2694 * TransferPredicateLocksToNewTarget
2695 *
2696 * Move or copy all the predicate locks for a lock target, for use by
2697 * index page splits/combines and other things that create or replace
2698 * lock targets. If 'removeOld' is true, the old locks and the target
2699 * will be removed.
2700 *
2701 * Returns true on success, or false if we ran out of shared memory to
2702 * allocate the new target or locks. Guaranteed to always succeed if
2703 * removeOld is set (by using the scratch entry in PredicateLockTargetHash
2704 * for scratch space).
2705 *
2706 * Warning: the "removeOld" option should be used only with care,
2707 * because this function does not (indeed, can not) update other
2708 * backends' LocalPredicateLockHash. If we are only adding new
2709 * entries, this is not a problem: the local lock table is used only
2710 * as a hint, so missing entries for locks that are held are
2711 * OK. Having entries for locks that are no longer held, as can happen
2712 * when using "removeOld", is not in general OK. We can only use it
2713 * safely when replacing a lock with a coarser-granularity lock that
2714 * covers it, or if we are absolutely certain that no one will need to
2715 * refer to that lock in the future.
2716 *
2717 * Caller must hold SerializablePredicateListLock exclusively.
2718 */
2719static bool
2721 PREDICATELOCKTARGETTAG newtargettag,
2722 bool removeOld)
2723{
2724 uint32 oldtargettaghash;
2725 LWLock *oldpartitionLock;
2726 PREDICATELOCKTARGET *oldtarget;
2727 uint32 newtargettaghash;
2728 LWLock *newpartitionLock;
2729 bool found;
2730 bool outOfShmem = false;
2731
2732 Assert(LWLockHeldByMeInMode(SerializablePredicateListLock,
2733 LW_EXCLUSIVE));
2734
2735 oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2736 newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
2737 oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2738 newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
2739
2740 if (removeOld)
2741 {
2742 /*
2743 * Remove the dummy entry to give us scratch space, so we know we'll
2744 * be able to create the new lock target.
2745 */
2746 RemoveScratchTarget(false);
2747 }
2748
2749 /*
2750 * We must get the partition locks in ascending sequence to avoid
2751 * deadlocks. If old and new partitions are the same, we must request the
2752 * lock only once.
2753 */
2754 if (oldpartitionLock < newpartitionLock)
2755 {
2756 LWLockAcquire(oldpartitionLock,
2757 (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2758 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2759 }
2760 else if (oldpartitionLock > newpartitionLock)
2761 {
2762 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2763 LWLockAcquire(oldpartitionLock,
2764 (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2765 }
2766 else
2767 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2768
2769 /*
2770 * Look for the old target. If not found, that's OK; no predicate locks
2771 * are affected, so we can just clean up and return. If it does exist,
2772 * walk its list of predicate locks and move or copy them to the new
2773 * target.
2774 */
2776 &oldtargettag,
2777 oldtargettaghash,
2778 HASH_FIND, NULL);
2779
2780 if (oldtarget)
2781 {
2782 PREDICATELOCKTARGET *newtarget;
2783 PREDICATELOCKTAG newpredlocktag;
2784 dlist_mutable_iter iter;
2785
2787 &newtargettag,
2788 newtargettaghash,
2789 HASH_ENTER_NULL, &found);
2790
2791 if (!newtarget)
2792 {
2793 /* Failed to allocate due to insufficient shmem */
2794 outOfShmem = true;
2795 goto exit;
2796 }
2797
2798 /* If we created a new entry, initialize it */
2799 if (!found)
2800 dlist_init(&newtarget->predicateLocks);
2801
2802 newpredlocktag.myTarget = newtarget;
2803
2804 /*
2805 * Loop through all the locks on the old target, replacing them with
2806 * locks on the new target.
2807 */
2808 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2809
2810 dlist_foreach_modify(iter, &oldtarget->predicateLocks)
2811 {
2812 PREDICATELOCK *oldpredlock =
2813 dlist_container(PREDICATELOCK, targetLink, iter.cur);
2814 PREDICATELOCK *newpredlock;
2815 SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
2816
2817 newpredlocktag.myXact = oldpredlock->tag.myXact;
2818
2819 if (removeOld)
2820 {
2821 dlist_delete(&(oldpredlock->xactLink));
2822 dlist_delete(&(oldpredlock->targetLink));
2823
2826 &oldpredlock->tag,
2828 oldtargettaghash),
2829 HASH_REMOVE, &found);
2830 Assert(found);
2831 }
2832
2833 newpredlock = (PREDICATELOCK *)
2835 &newpredlocktag,
2837 newtargettaghash),
2839 &found);
2840 if (!newpredlock)
2841 {
2842 /* Out of shared memory. Undo what we've done so far. */
2843 LWLockRelease(SerializableXactHashLock);
2844 DeleteLockTarget(newtarget, newtargettaghash);
2845 outOfShmem = true;
2846 goto exit;
2847 }
2848 if (!found)
2849 {
2850 dlist_push_tail(&(newtarget->predicateLocks),
2851 &(newpredlock->targetLink));
2852 dlist_push_tail(&(newpredlocktag.myXact->predicateLocks),
2853 &(newpredlock->xactLink));
2854 newpredlock->commitSeqNo = oldCommitSeqNo;
2855 }
2856 else
2857 {
2858 if (newpredlock->commitSeqNo < oldCommitSeqNo)
2859 newpredlock->commitSeqNo = oldCommitSeqNo;
2860 }
2861
2862 Assert(newpredlock->commitSeqNo != 0);
2863 Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
2864 || (newpredlock->tag.myXact == OldCommittedSxact));
2865 }
2866 LWLockRelease(SerializableXactHashLock);
2867
2868 if (removeOld)
2869 {
2870 Assert(dlist_is_empty(&oldtarget->predicateLocks));
2871 RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2872 }
2873 }
2874
2875
2876exit:
2877 /* Release partition locks in reverse order of acquisition. */
2878 if (oldpartitionLock < newpartitionLock)
2879 {
2880 LWLockRelease(newpartitionLock);
2881 LWLockRelease(oldpartitionLock);
2882 }
2883 else if (oldpartitionLock > newpartitionLock)
2884 {
2885 LWLockRelease(oldpartitionLock);
2886 LWLockRelease(newpartitionLock);
2887 }
2888 else
2889 LWLockRelease(newpartitionLock);
2890
2891 if (removeOld)
2892 {
2893 /* We shouldn't run out of memory if we're moving locks */
2894 Assert(!outOfShmem);
2895
2896 /* Put the scratch entry back */
2897 RestoreScratchTarget(false);
2898 }
2899
2900 return !outOfShmem;
2901}
2902
2903/*
2904 * Drop all predicate locks of any granularity from the specified relation,
2905 * which can be a heap relation or an index relation. If 'transfer' is true,
2906 * acquire a relation lock on the heap for any transactions with any lock(s)
2907 * on the specified relation.
2908 *
2909 * This requires grabbing a lot of LW locks and scanning the entire lock
2910 * target table for matches. That makes this more expensive than most
2911 * predicate lock management functions, but it will only be called for DDL
2912 * type commands that are expensive anyway, and there are fast returns when
2913 * no serializable transactions are active or the relation is temporary.
2914 *
2915 * We don't use the TransferPredicateLocksToNewTarget function because it
2916 * acquires its own locks on the partitions of the two targets involved,
2917 * and we'll already be holding all partition locks.
2918 *
2919 * We can't throw an error from here, because the call could be from a
2920 * transaction which is not serializable.
2921 *
2922 * NOTE: This is currently only called with transfer set to true, but that may
2923 * change. If we decide to clean up the locks from a table on commit of a
2924 * transaction which executed DROP TABLE, the false condition will be useful.
2925 */
2926static void
2928{
2929 HASH_SEQ_STATUS seqstat;
2930 PREDICATELOCKTARGET *oldtarget;
2931 PREDICATELOCKTARGET *heaptarget;
2932 Oid dbId;
2933 Oid relId;
2934 Oid heapId;
2935 int i;
2936 bool isIndex;
2937 bool found;
2938 uint32 heaptargettaghash;
2939
2940 /*
2941 * Bail out quickly if there are no serializable transactions running.
2942 * It's safe to check this without taking locks because the caller is
2943 * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
2944 * would matter here can be acquired while that is held.
2945 */
2947 return;
2948
2949 if (!PredicateLockingNeededForRelation(relation))
2950 return;
2951
2952 dbId = relation->rd_locator.dbOid;
2953 relId = relation->rd_id;
2954 if (relation->rd_index == NULL)
2955 {
2956 isIndex = false;
2957 heapId = relId;
2958 }
2959 else
2960 {
2961 isIndex = true;
2962 heapId = relation->rd_index->indrelid;
2963 }
2964 Assert(heapId != InvalidOid);
2965 Assert(transfer || !isIndex); /* index OID only makes sense with
2966 * transfer */
2967
2968 /* Retrieve first time needed, then keep. */
2969 heaptargettaghash = 0;
2970 heaptarget = NULL;
2971
2972 /* Acquire locks on all lock partitions */
2973 LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);
2974 for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
2976 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2977
2978 /*
2979 * Remove the dummy entry to give us scratch space, so we know we'll be
2980 * able to create the new lock target.
2981 */
2982 if (transfer)
2983 RemoveScratchTarget(true);
2984
2985 /* Scan through target map */
2987
2988 while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
2989 {
2990 dlist_mutable_iter iter;
2991
2992 /*
2993 * Check whether this is a target which needs attention.
2994 */
2995 if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
2996 continue; /* wrong relation id */
2997 if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
2998 continue; /* wrong database id */
2999 if (transfer && !isIndex
3001 continue; /* already the right lock */
3002
3003 /*
3004 * If we made it here, we have work to do. We make sure the heap
3005 * relation lock exists, then we walk the list of predicate locks for
3006 * the old target we found, moving all locks to the heap relation lock
3007 * -- unless they already hold that.
3008 */
3009
3010 /*
3011 * First make sure we have the heap relation target. We only need to
3012 * do this once.
3013 */
3014 if (transfer && heaptarget == NULL)
3015 {
3016 PREDICATELOCKTARGETTAG heaptargettag;
3017
3018 SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
3019 heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
3021 &heaptargettag,
3022 heaptargettaghash,
3023 HASH_ENTER, &found);
3024 if (!found)
3025 dlist_init(&heaptarget->predicateLocks);
3026 }
3027
3028 /*
3029 * Loop through all the locks on the old target, replacing them with
3030 * locks on the new target.
3031 */
3032 dlist_foreach_modify(iter, &oldtarget->predicateLocks)
3033 {
3034 PREDICATELOCK *oldpredlock =
3035 dlist_container(PREDICATELOCK, targetLink, iter.cur);
3036 PREDICATELOCK *newpredlock;
3037 SerCommitSeqNo oldCommitSeqNo;
3038 SERIALIZABLEXACT *oldXact;
3039
3040 /*
3041 * Remove the old lock first. This avoids the chance of running
3042 * out of lock structure entries for the hash table.
3043 */
3044 oldCommitSeqNo = oldpredlock->commitSeqNo;
3045 oldXact = oldpredlock->tag.myXact;
3046
3047 dlist_delete(&(oldpredlock->xactLink));
3048
3049 /*
3050 * No need for retail delete from oldtarget list, we're removing
3051 * the whole target anyway.
3052 */
3054 &oldpredlock->tag,
3055 HASH_REMOVE, &found);
3056 Assert(found);
3057
3058 if (transfer)
3059 {
3060 PREDICATELOCKTAG newpredlocktag;
3061
3062 newpredlocktag.myTarget = heaptarget;
3063 newpredlocktag.myXact = oldXact;
3064 newpredlock = (PREDICATELOCK *)
3066 &newpredlocktag,
3068 heaptargettaghash),
3069 HASH_ENTER,
3070 &found);
3071 if (!found)
3072 {
3073 dlist_push_tail(&(heaptarget->predicateLocks),
3074 &(newpredlock->targetLink));
3075 dlist_push_tail(&(newpredlocktag.myXact->predicateLocks),
3076 &(newpredlock->xactLink));
3077 newpredlock->commitSeqNo = oldCommitSeqNo;
3078 }
3079 else
3080 {
3081 if (newpredlock->commitSeqNo < oldCommitSeqNo)
3082 newpredlock->commitSeqNo = oldCommitSeqNo;
3083 }
3084
3085 Assert(newpredlock->commitSeqNo != 0);
3086 Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
3087 || (newpredlock->tag.myXact == OldCommittedSxact));
3088 }
3089 }
3090
3092 &found);
3093 Assert(found);
3094 }
3095
3096 /* Put the scratch entry back */
3097 if (transfer)
3099
3100 /* Release locks in reverse order */
3101 LWLockRelease(SerializableXactHashLock);
3102 for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
3104 LWLockRelease(SerializablePredicateListLock);
3105}
3106
3107/*
3108 * TransferPredicateLocksToHeapRelation
3109 * For all transactions, transfer all predicate locks for the given
3110 * relation to a single relation lock on the heap.
3111 */
3112void
3114{
3115 DropAllPredicateLocksFromTable(relation, true);
3116}
3117
3118
3119/*
3120 * PredicateLockPageSplit
3121 *
3122 * Copies any predicate locks for the old page to the new page.
3123 * Skip if this is a temporary table or toast table.
3124 *
3125 * NOTE: A page split (or overflow) affects all serializable transactions,
3126 * even if it occurs in the context of another transaction isolation level.
3127 *
3128 * NOTE: This currently leaves the local copy of the locks without
3129 * information on the new lock which is in shared memory. This could cause
3130 * problems if enough page splits occur on locked pages without the processes
3131 * which hold the locks getting in and noticing.
3132 */
3133void
3135 BlockNumber newblkno)
3136{
3137 PREDICATELOCKTARGETTAG oldtargettag;
3138 PREDICATELOCKTARGETTAG newtargettag;
3139 bool success;
3140
3141 /*
3142 * Bail out quickly if there are no serializable transactions running.
3143 *
3144 * It's safe to do this check without taking any additional locks. Even if
3145 * a serializable transaction starts concurrently, we know it can't take
3146 * any SIREAD locks on the page being split because the caller is holding
3147 * the associated buffer page lock. Memory reordering isn't an issue; the
3148 * memory barrier in the LWLock acquisition guarantees that this read
3149 * occurs while the buffer page lock is held.
3150 */
3152 return;
3153
3154 if (!PredicateLockingNeededForRelation(relation))
3155 return;
3156
3157 Assert(oldblkno != newblkno);
3158 Assert(BlockNumberIsValid(oldblkno));
3159 Assert(BlockNumberIsValid(newblkno));
3160
3162 relation->rd_locator.dbOid,
3163 relation->rd_id,
3164 oldblkno);
3166 relation->rd_locator.dbOid,
3167 relation->rd_id,
3168 newblkno);
3169
3170 LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);
3171
3172 /*
3173 * Try copying the locks over to the new page's tag, creating it if
3174 * necessary.
3175 */
3177 newtargettag,
3178 false);
3179
3180 if (!success)
3181 {
3182 /*
3183 * No more predicate lock entries are available. Failure isn't an
3184 * option here, so promote the page lock to a relation lock.
3185 */
3186
3187 /* Get the parent relation lock's lock tag */
3188 success = GetParentPredicateLockTag(&oldtargettag,
3189 &newtargettag);
3190 Assert(success);
3191
3192 /*
3193 * Move the locks to the parent. This shouldn't fail.
3194 *
3195 * Note that here we are removing locks held by other backends,
3196 * leading to a possible inconsistency in their local lock hash table.
3197 * This is OK because we're replacing it with a lock that covers the
3198 * old one.
3199 */
3201 newtargettag,
3202 true);
3203 Assert(success);
3204 }
3205
3206 LWLockRelease(SerializablePredicateListLock);
3207}
3208
3209/*
3210 * PredicateLockPageCombine
3211 *
3212 * Combines predicate locks for two existing pages.
3213 * Skip if this is a temporary table or toast table.
3214 *
3215 * NOTE: A page combine affects all serializable transactions, even if it
3216 * occurs in the context of another transaction isolation level.
3217 */
3218void
3220 BlockNumber newblkno)
3221{
3222 /*
3223 * Page combines differ from page splits in that we ought to be able to
3224 * remove the locks on the old page after transferring them to the new
3225 * page, instead of duplicating them. However, because we can't edit other
3226 * backends' local lock tables, removing the old lock would leave them
3227 * with an entry in their LocalPredicateLockHash for a lock they're not
3228 * holding, which isn't acceptable. So we wind up having to do the same
3229 * work as a page split, acquiring a lock on the new page and keeping the
3230 * old page locked too. That can lead to some false positives, but should
3231 * be rare in practice.
3232 */
3233 PredicateLockPageSplit(relation, oldblkno, newblkno);
3234}
3235
3236/*
3237 * Walk the list of in-progress serializable transactions and find the new
3238 * xmin.
3239 */
3240static void
3242{
3243 dlist_iter iter;
3244
3245 Assert(LWLockHeldByMe(SerializableXactHashLock));
3246
3249
3251 {
3252 SERIALIZABLEXACT *sxact =
3253 dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
3254
3255 if (!SxactIsRolledBack(sxact)
3256 && !SxactIsCommitted(sxact)
3257 && sxact != OldCommittedSxact)
3258 {
3259 Assert(sxact->xmin != InvalidTransactionId);
3261 || TransactionIdPrecedes(sxact->xmin,
3263 {
3264 PredXact->SxactGlobalXmin = sxact->xmin;
3266 }
3267 else if (TransactionIdEquals(sxact->xmin,
3270 }
3271 }
3272
3274}
3275
3276/*
3277 * ReleasePredicateLocks
3278 *
3279 * Releases predicate locks based on completion of the current transaction,
3280 * whether committed or rolled back. It can also be called for a read only
3281 * transaction when it becomes impossible for the transaction to become
3282 * part of a dangerous structure.
3283 *
3284 * We do nothing unless this is a serializable transaction.
3285 *
3286 * This method must ensure that shared memory hash tables are cleaned
3287 * up in some relatively timely fashion.
3288 *
3289 * If this transaction is committing and is holding any predicate locks,
3290 * it must be added to a list of completed serializable transactions still
3291 * holding locks.
3292 *
3293 * If isReadOnlySafe is true, then predicate locks are being released before
3294 * the end of the transaction because MySerializableXact has been determined
3295 * to be RO_SAFE. In non-parallel mode we can release it completely, but it
3296 * in parallel mode we partially release the SERIALIZABLEXACT and keep it
3297 * around until the end of the transaction, allowing each backend to clear its
3298 * MySerializableXact variable and benefit from the optimization in its own
3299 * time.
3300 */
3301void
3302ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
3303{
3304 bool partiallyReleasing = false;
3305 bool needToClear;
3306 SERIALIZABLEXACT *roXact;
3307 dlist_mutable_iter iter;
3308
3309 /*
3310 * We can't trust XactReadOnly here, because a transaction which started
3311 * as READ WRITE can show as READ ONLY later, e.g., within
3312 * subtransactions. We want to flag a transaction as READ ONLY if it
3313 * commits without writing so that de facto READ ONLY transactions get the
3314 * benefit of some RO optimizations, so we will use this local variable to
3315 * get some cleanup logic right which is based on whether the transaction
3316 * was declared READ ONLY at the top level.
3317 */
3318 bool topLevelIsDeclaredReadOnly;
3319
3320 /* We can't be both committing and releasing early due to RO_SAFE. */
3321 Assert(!(isCommit && isReadOnlySafe));
3322
3323 /* Are we at the end of a transaction, that is, a commit or abort? */
3324 if (!isReadOnlySafe)
3325 {
3326 /*
3327 * Parallel workers mustn't release predicate locks at the end of
3328 * their transaction. The leader will do that at the end of its
3329 * transaction.
3330 */
3331 if (IsParallelWorker())
3332 {
3334 return;
3335 }
3336
3337 /*
3338 * By the time the leader in a parallel query reaches end of
3339 * transaction, it has waited for all workers to exit.
3340 */
3342
3343 /*
3344 * If the leader in a parallel query earlier stashed a partially
3345 * released SERIALIZABLEXACT for final clean-up at end of transaction
3346 * (because workers might still have been accessing it), then it's
3347 * time to restore it.
3348 */
3350 {
3355 }
3356 }
3357
3359 {
3361 return;
3362 }
3363
3364 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3365
3366 /*
3367 * If the transaction is committing, but it has been partially released
3368 * already, then treat this as a roll back. It was marked as rolled back.
3369 */
3371 isCommit = false;
3372
3373 /*
3374 * If we're called in the middle of a transaction because we discovered
3375 * that the SXACT_FLAG_RO_SAFE flag was set, then we'll partially release
3376 * it (that is, release the predicate locks and conflicts, but not the
3377 * SERIALIZABLEXACT itself) if we're the first backend to have noticed.
3378 */
3379 if (isReadOnlySafe && IsInParallelMode())
3380 {
3381 /*
3382 * The leader needs to stash a pointer to it, so that it can
3383 * completely release it at end-of-transaction.
3384 */
3385 if (!IsParallelWorker())
3387
3388 /*
3389 * The first backend to reach this condition will partially release
3390 * the SERIALIZABLEXACT. All others will just clear their
3391 * backend-local state so that they stop doing SSI checks for the rest
3392 * of the transaction.
3393 */
3395 {
3396 LWLockRelease(SerializableXactHashLock);
3398 return;
3399 }
3400 else
3401 {
3403 partiallyReleasing = true;
3404 /* ... and proceed to perform the partial release below. */
3405 }
3406 }
3408 Assert(!isCommit || !SxactIsDoomed(MySerializableXact));
3412
3413 /* may not be serializable during COMMIT/ROLLBACK PREPARED */
3415
3416 /* We'd better not already be on the cleanup list. */
3418
3419 topLevelIsDeclaredReadOnly = SxactIsReadOnly(MySerializableXact);
3420
3421 /*
3422 * We don't hold XidGenLock lock here, assuming that TransactionId is
3423 * atomic!
3424 *
3425 * If this value is changing, we don't care that much whether we get the
3426 * old or new value -- it is just used to determine how far
3427 * SxactGlobalXmin must advance before this transaction can be fully
3428 * cleaned up. The worst that could happen is we wait for one more
3429 * transaction to complete before freeing some RAM; correctness of visible
3430 * behavior is not affected.
3431 */
3433
3434 /*
3435 * If it's not a commit it's either a rollback or a read-only transaction
3436 * flagged SXACT_FLAG_RO_SAFE, and we can clear our locks immediately.
3437 */
3438 if (isCommit)
3439 {
3442 /* Recognize implicit read-only transaction (commit without write). */
3443 if (!MyXactDidWrite)
3445 }
3446 else
3447 {
3448 /*
3449 * The DOOMED flag indicates that we intend to roll back this
3450 * transaction and so it should not cause serialization failures for
3451 * other transactions that conflict with it. Note that this flag might
3452 * already be set, if another backend marked this transaction for
3453 * abort.
3454 *
3455 * The ROLLED_BACK flag further indicates that ReleasePredicateLocks
3456 * has been called, and so the SerializableXact is eligible for
3457 * cleanup. This means it should not be considered when calculating
3458 * SxactGlobalXmin.
3459 */
3462
3463 /*
3464 * If the transaction was previously prepared, but is now failing due
3465 * to a ROLLBACK PREPARED or (hopefully very rare) error after the
3466 * prepare, clear the prepared flag. This simplifies conflict
3467 * checking.
3468 */
3469 MySerializableXact->flags &= ~SXACT_FLAG_PREPARED;
3470 }
3471
3472 if (!topLevelIsDeclaredReadOnly)
3473 {
3475 if (--(PredXact->WritableSxactCount) == 0)
3476 {
3477 /*
3478 * Release predicate locks and rw-conflicts in for all committed
3479 * transactions. There are no longer any transactions which might
3480 * conflict with the locks and no chance for new transactions to
3481 * overlap. Similarly, existing conflicts in can't cause pivots,
3482 * and any conflicts in which could have completed a dangerous
3483 * structure would already have caused a rollback, so any
3484 * remaining ones must be benign.
3485 */
3487 }
3488 }
3489 else
3490 {
3491 /*
3492 * Read-only transactions: clear the list of transactions that might
3493 * make us unsafe. Note that we use 'inLink' for the iteration as
3494 * opposed to 'outLink' for the r/w xacts.
3495 */
3497 {
3498 RWConflict possibleUnsafeConflict =
3499 dlist_container(RWConflictData, inLink, iter.cur);
3500
3501 Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
3502 Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
3503
3504 ReleaseRWConflict(possibleUnsafeConflict);
3505 }
3506 }
3507
3508 /* Check for conflict out to old committed transactions. */
3509 if (isCommit
3512 {
3513 /*
3514 * we don't know which old committed transaction we conflicted with,
3515 * so be conservative and use FirstNormalSerCommitSeqNo here
3516 */
3520 }
3521
3522 /*
3523 * Release all outConflicts to committed transactions. If we're rolling
3524 * back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to
3525 * previously committed transactions.
3526 */
3528 {
3529 RWConflict conflict =
3530 dlist_container(RWConflictData, outLink, iter.cur);
3531
3532 if (isCommit
3534 && SxactIsCommitted(conflict->sxactIn))
3535 {
3540 }
3541
3542 if (!isCommit
3543 || SxactIsCommitted(conflict->sxactIn)
3545 ReleaseRWConflict(conflict);
3546 }
3547
3548 /*
3549 * Release all inConflicts from committed and read-only transactions. If
3550 * we're rolling back, clear them all.
3551 */
3553 {
3554 RWConflict conflict =
3555 dlist_container(RWConflictData, inLink, iter.cur);
3556
3557 if (!isCommit
3558 || SxactIsCommitted(conflict->sxactOut)
3559 || SxactIsReadOnly(conflict->sxactOut))
3560 ReleaseRWConflict(conflict);
3561 }
3562
3563 if (!topLevelIsDeclaredReadOnly)
3564 {
3565 /*
3566 * Remove ourselves from the list of possible conflicts for concurrent
3567 * READ ONLY transactions, flagging them as unsafe if we have a
3568 * conflict out. If any are waiting DEFERRABLE transactions, wake them
3569 * up if they are known safe or known unsafe.
3570 */
3572 {
3573 RWConflict possibleUnsafeConflict =
3574 dlist_container(RWConflictData, outLink, iter.cur);
3575
3576 roXact = possibleUnsafeConflict->sxactIn;
3577 Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
3578 Assert(SxactIsReadOnly(roXact));
3579
3580 /* Mark conflicted if necessary. */
3581 if (isCommit
3585 <= roXact->SeqNo.lastCommitBeforeSnapshot))
3586 {
3587 /*
3588 * This releases possibleUnsafeConflict (as well as all other
3589 * possible conflicts for roXact)
3590 */
3591 FlagSxactUnsafe(roXact);
3592 }
3593 else
3594 {
3595 ReleaseRWConflict(possibleUnsafeConflict);
3596
3597 /*
3598 * If we were the last possible conflict, flag it safe. The
3599 * transaction can now safely release its predicate locks (but
3600 * that transaction's backend has to do that itself).
3601 */
3603 roXact->flags |= SXACT_FLAG_RO_SAFE;
3604 }
3605
3606 /*
3607 * Wake up the process for a waiting DEFERRABLE transaction if we
3608 * now know it's either safe or conflicted.
3609 */
3610 if (SxactIsDeferrableWaiting(roXact) &&
3611 (SxactIsROUnsafe(roXact) || SxactIsROSafe(roXact)))
3612 ProcSendSignal(roXact->pgprocno);
3613 }
3614 }
3615
3616 /*
3617 * Check whether it's time to clean up old transactions. This can only be
3618 * done when the last serializable transaction with the oldest xmin among
3619 * serializable transactions completes. We then find the "new oldest"
3620 * xmin and purge any transactions which finished before this transaction
3621 * was launched.
3622 *
3623 * For parallel queries in read-only transactions, it might run twice. We
3624 * only release the reference on the first call.
3625 */
3626 needToClear = false;
3627 if ((partiallyReleasing ||
3631 {
3633 if (--(PredXact->SxactGlobalXminCount) == 0)
3634 {
3636 needToClear = true;
3637 }
3638 }
3639
3640 LWLockRelease(SerializableXactHashLock);
3641
3642 LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3643
3644 /* Add this to the list of transactions to check for later cleanup. */
3645 if (isCommit)
3648
3649 /*
3650 * If we're releasing a RO_SAFE transaction in parallel mode, we'll only
3651 * partially release it. That's necessary because other backends may have
3652 * a reference to it. The leader will release the SERIALIZABLEXACT itself
3653 * at the end of the transaction after workers have stopped running.
3654 */
3655 if (!isCommit)
3657 isReadOnlySafe && IsInParallelMode(),
3658 false);
3659
3660 LWLockRelease(SerializableFinishedListLock);
3661
3662 if (needToClear)
3664
3666}
3667
3668static void
3670{
3672 MyXactDidWrite = false;
3673
3674 /* Delete per-transaction lock table */
3675 if (LocalPredicateLockHash != NULL)
3676 {
3679 }
3680}
3681
3682/*
3683 * Clear old predicate locks, belonging to committed transactions that are no
3684 * longer interesting to any in-progress transaction.
3685 */
3686static void
3688{
3689 dlist_mutable_iter iter;
3690
3691 /*
3692 * Loop through finished transactions. They are in commit order, so we can
3693 * stop as soon as we find one that's still interesting.
3694 */
3695 LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3696 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3698 {
3699 SERIALIZABLEXACT *finishedSxact =
3700 dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
3701
3705 {
3706 /*
3707 * This transaction committed before any in-progress transaction
3708 * took its snapshot. It's no longer interesting.
3709 */
3710 LWLockRelease(SerializableXactHashLock);
3711 dlist_delete_thoroughly(&finishedSxact->finishedLink);
3712 ReleaseOneSerializableXact(finishedSxact, false, false);
3713 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3714 }
3715 else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
3716 && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
3717 {
3718 /*
3719 * Any active transactions that took their snapshot before this
3720 * transaction committed are read-only, so we can clear part of
3721 * its state.
3722 */
3723 LWLockRelease(SerializableXactHashLock);
3724
3725 if (SxactIsReadOnly(finishedSxact))
3726 {
3727 /* A read-only transaction can be removed entirely */
3728 dlist_delete_thoroughly(&(finishedSxact->finishedLink));
3729 ReleaseOneSerializableXact(finishedSxact, false, false);
3730 }
3731 else
3732 {
3733 /*
3734 * A read-write transaction can only be partially cleared. We
3735 * need to keep the SERIALIZABLEXACT but can release the
3736 * SIREAD locks and conflicts in.
3737 */
3738 ReleaseOneSerializableXact(finishedSxact, true, false);
3739 }
3740
3742 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3743 }
3744 else
3745 {
3746 /* Still interesting. */
3747 break;
3748 }
3749 }
3750 LWLockRelease(SerializableXactHashLock);
3751
3752 /*
3753 * Loop through predicate locks on dummy transaction for summarized data.
3754 */
3755 LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
3757 {
3758 PREDICATELOCK *predlock =
3759 dlist_container(PREDICATELOCK, xactLink, iter.cur);
3760 bool canDoPartialCleanup;
3761
3762 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3763 Assert(predlock->commitSeqNo != 0);
3765 canDoPartialCleanup = (predlock->commitSeqNo <= PredXact->CanPartialClearThrough);
3766 LWLockRelease(SerializableXactHashLock);
3767
3768 /*
3769 * If this lock originally belonged to an old enough transaction, we
3770 * can release it.
3771 */
3772 if (canDoPartialCleanup)
3773 {
3774 PREDICATELOCKTAG tag;
3775 PREDICATELOCKTARGET *target;
3776 PREDICATELOCKTARGETTAG targettag;
3777 uint32 targettaghash;
3778 LWLock *partitionLock;
3779
3780 tag = predlock->tag;
3781 target = tag.myTarget;
3782 targettag = target->tag;
3783 targettaghash = PredicateLockTargetTagHashCode(&targettag);
3784 partitionLock = PredicateLockHashPartitionLock(targettaghash);
3785
3786 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3787
3788 dlist_delete(&(predlock->targetLink));
3789 dlist_delete(&(predlock->xactLink));
3790
3793 targettaghash),
3794 HASH_REMOVE, NULL);
3795 RemoveTargetIfNoLongerUsed(target, targettaghash);
3796
3797 LWLockRelease(partitionLock);
3798 }
3799 }
3800
3801 LWLockRelease(SerializablePredicateListLock);
3802 LWLockRelease(SerializableFinishedListLock);
3803}
3804
3805/*
3806 * This is the normal way to delete anything from any of the predicate
3807 * locking hash tables. Given a transaction which we know can be deleted:
3808 * delete all predicate locks held by that transaction and any predicate
3809 * lock targets which are now unreferenced by a lock; delete all conflicts
3810 * for the transaction; delete all xid values for the transaction; then
3811 * delete the transaction.
3812 *
3813 * When the partial flag is set, we can release all predicate locks and
3814 * in-conflict information -- we've established that there are no longer
3815 * any overlapping read write transactions for which this transaction could
3816 * matter -- but keep the transaction entry itself and any outConflicts.
3817 *
3818 * When the summarize flag is set, we've run short of room for sxact data
3819 * and must summarize to the SLRU. Predicate locks are transferred to a
3820 * dummy "old" transaction, with duplicate locks on a single target
3821 * collapsing to a single lock with the "latest" commitSeqNo from among
3822 * the conflicting locks..
3823 */
3824static void
3826 bool summarize)
3827{
3828 SERIALIZABLEXIDTAG sxidtag;
3829 dlist_mutable_iter iter;
3830
3831 Assert(sxact != NULL);
3832 Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
3833 Assert(partial || !SxactIsOnFinishedList(sxact));
3834 Assert(LWLockHeldByMe(SerializableFinishedListLock));
3835
3836 /*
3837 * First release all the predicate locks held by this xact (or transfer
3838 * them to OldCommittedSxact if summarize is true)
3839 */
3840 LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
3841 if (IsInParallelMode())
3844 {
3845 PREDICATELOCK *predlock =
3846 dlist_container(PREDICATELOCK, xactLink, iter.cur);
3847 PREDICATELOCKTAG tag;
3848 PREDICATELOCKTARGET *target;
3849 PREDICATELOCKTARGETTAG targettag;
3850 uint32 targettaghash;
3851 LWLock *partitionLock;
3852
3853 tag = predlock->tag;
3854 target = tag.myTarget;
3855 targettag = target->tag;
3856 targettaghash = PredicateLockTargetTagHashCode(&targettag);
3857 partitionLock = PredicateLockHashPartitionLock(targettaghash);
3858
3859 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3860
3861 dlist_delete(&predlock->targetLink);
3862
3865 targettaghash),
3866 HASH_REMOVE, NULL);
3867 if (summarize)
3868 {
3869 bool found;
3870
3871 /* Fold into dummy transaction list. */
3875 targettaghash),
3876 HASH_ENTER_NULL, &found);
3877 if (!predlock)
3878 ereport(ERROR,
3879 (errcode(ERRCODE_OUT_OF_MEMORY),
3880 errmsg("out of shared memory"),
3881 errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
3882 if (found)
3883 {
3884 Assert(predlock->commitSeqNo != 0);
3886 if (predlock->commitSeqNo < sxact->commitSeqNo)
3887 predlock->commitSeqNo = sxact->commitSeqNo;
3888 }
3889 else
3890 {
3892 &predlock->targetLink);
3894 &predlock->xactLink);
3895 predlock->commitSeqNo = sxact->commitSeqNo;
3896 }
3897 }
3898 else
3899 RemoveTargetIfNoLongerUsed(target, targettaghash);
3900
3901 LWLockRelease(partitionLock);
3902 }
3903
3904 /*
3905 * Rather than retail removal, just re-init the head after we've run
3906 * through the list.
3907 */
3908 dlist_init(&sxact->predicateLocks);
3909
3910 if (IsInParallelMode())
3912 LWLockRelease(SerializablePredicateListLock);
3913
3914 sxidtag.xid = sxact->topXid;
3915 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3916
3917 /* Release all outConflicts (unless 'partial' is true) */
3918 if (!partial)
3919 {
3920 dlist_foreach_modify(iter, &sxact->outConflicts)
3921 {
3922 RWConflict conflict =
3923 dlist_container(RWConflictData, outLink, iter.cur);
3924
3925 if (summarize)
3927 ReleaseRWConflict(conflict);
3928 }
3929 }
3930
3931 /* Release all inConflicts. */
3932 dlist_foreach_modify(iter, &sxact->inConflicts)
3933 {
3934 RWConflict conflict =
3935 dlist_container(RWConflictData, inLink, iter.cur);
3936
3937 if (summarize)
3939 ReleaseRWConflict(conflict);
3940 }
3941
3942 /* Finally, get rid of the xid and the record of the transaction itself. */
3943 if (!partial)
3944 {
3945 if (sxidtag.xid != InvalidTransactionId)
3947 ReleasePredXact(sxact);
3948 }
3949
3950 LWLockRelease(SerializableXactHashLock);
3951}
3952
3953/*
3954 * Tests whether the given top level transaction is concurrent with
3955 * (overlaps) our current transaction.
3956 *
3957 * We need to identify the top level transaction for SSI, anyway, so pass
3958 * that to this function to save the overhead of checking the snapshot's
3959 * subxip array.
3960 */
3961static bool
3963{
3964 Snapshot snap;
3965
3968
3969 snap = GetTransactionSnapshot();
3970
3971 if (TransactionIdPrecedes(xid, snap->xmin))
3972 return false;
3973
3974 if (TransactionIdFollowsOrEquals(xid, snap->xmax))
3975 return true;
3976
3977 return pg_lfind32(xid, snap->xip, snap->xcnt);
3978}
3979
3980bool
3982{
3983 if (!SerializationNeededForRead(relation, snapshot))
3984 return false;
3985
3986 /* Check if someone else has already decided that we need to die */
3988 {
3989 ereport(ERROR,
3991 errmsg("could not serialize access due to read/write dependencies among transactions"),
3992 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
3993 errhint("The transaction might succeed if retried.")));
3994 }
3995
3996 return true;
3997}
3998
3999/*
4000 * CheckForSerializableConflictOut
4001 * A table AM is reading a tuple that has been modified. If it determines
4002 * that the tuple version it is reading is not visible to us, it should
4003 * pass in the top level xid of the transaction that created it.
4004 * Otherwise, if it determines that it is visible to us but it has been
4005 * deleted or there is a newer version available due to an update, it
4006 * should pass in the top level xid of the modifying transaction.
4007 *
4008 * This function will check for overlap with our own transaction. If the given
4009 * xid is also serializable and the transactions overlap (i.e., they cannot see
4010 * each other's writes), then we have a conflict out.
4011 */
4012void
4014{
4015 SERIALIZABLEXIDTAG sxidtag;
4016 SERIALIZABLEXID *sxid;
4017 SERIALIZABLEXACT *sxact;
4018
4019 if (!SerializationNeededForRead(relation, snapshot))
4020 return;
4021
4022 /* Check if someone else has already decided that we need to die */
4024 {
4025 ereport(ERROR,
4027 errmsg("could not serialize access due to read/write dependencies among transactions"),
4028 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
4029 errhint("The transaction might succeed if retried.")));
4030 }
4032
4034 return;
4035
4036 /*
4037 * Find sxact or summarized info for the top level xid.
4038 */
4039 sxidtag.xid = xid;
4040 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4041 sxid = (SERIALIZABLEXID *)
4042 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4043 if (!sxid)
4044 {
4045 /*
4046 * Transaction not found in "normal" SSI structures. Check whether it
4047 * got pushed out to SLRU storage for "old committed" transactions.
4048 */
4049 SerCommitSeqNo conflictCommitSeqNo;
4050
4051 conflictCommitSeqNo = SerialGetMinConflictCommitSeqNo(xid);
4052 if (conflictCommitSeqNo != 0)
4053 {
4054 if (conflictCommitSeqNo != InvalidSerCommitSeqNo
4056 || conflictCommitSeqNo
4058 ereport(ERROR,
4060 errmsg("could not serialize access due to read/write dependencies among transactions"),
4061 errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
4062 errhint("The transaction might succeed if retried.")));
4063
4066 ereport(ERROR,
4068 errmsg("could not serialize access due to read/write dependencies among transactions"),
4069 errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
4070 errhint("The transaction might succeed if retried.")));
4071
4073 }
4074
4075 /* It's not serializable or otherwise not important. */
4076 LWLockRelease(SerializableXactHashLock);
4077 return;
4078 }
4079 sxact = sxid->myXact;
4080 Assert(TransactionIdEquals(sxact->topXid, xid));
4081 if (sxact == MySerializableXact || SxactIsDoomed(sxact))
4082 {
4083 /* Can't conflict with ourself or a transaction that will roll back. */
4084 LWLockRelease(SerializableXactHashLock);
4085 return;
4086 }
4087
4088 /*
4089 * We have a conflict out to a transaction which has a conflict out to a
4090 * summarized transaction. That summarized transaction must have
4091 * committed first, and we can't tell when it committed in relation to our
4092 * snapshot acquisition, so something needs to be canceled.
4093 */
4094 if (SxactHasSummaryConflictOut(sxact))
4095 {
4096 if (!SxactIsPrepared(sxact))
4097 {
4098 sxact->flags |= SXACT_FLAG_DOOMED;
4099 LWLockRelease(SerializableXactHashLock);
4100 return;
4101 }
4102 else
4103 {
4104 LWLockRelease(SerializableXactHashLock);
4105 ereport(ERROR,
4107 errmsg("could not serialize access due to read/write dependencies among transactions"),
4108 errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
4109 errhint("The transaction might succeed if retried.")));
4110 }
4111 }
4112
4113 /*
4114 * If this is a read-only transaction and the writing transaction has
4115 * committed, and it doesn't have a rw-conflict to a transaction which
4116 * committed before it, no conflict.
4117 */
4119 && SxactIsCommitted(sxact)
4121 && (!SxactHasConflictOut(sxact)
4123 {
4124 /* Read-only transaction will appear to run first. No conflict. */
4125 LWLockRelease(SerializableXactHashLock);
4126 return;
4127 }
4128
4129 if (!XidIsConcurrent(xid))
4130 {
4131 /* This write was already in our snapshot; no conflict. */
4132 LWLockRelease(SerializableXactHashLock);
4133 return;
4134 }
4135
4137 {
4138 /* We don't want duplicate conflict records in the list. */
4139 LWLockRelease(SerializableXactHashLock);
4140 return;
4141 }
4142
4143 /*
4144 * Flag the conflict. But first, if this conflict creates a dangerous
4145 * structure, ereport an error.
4146 */
4148 LWLockRelease(SerializableXactHashLock);
4149}
4150
4151/*
4152 * Check a particular target for rw-dependency conflict in. A subroutine of
4153 * CheckForSerializableConflictIn().
4154 */
4155static void
4157{
4158 uint32 targettaghash;
4159 LWLock *partitionLock;
4160 PREDICATELOCKTARGET *target;
4161 PREDICATELOCK *mypredlock = NULL;
4162 PREDICATELOCKTAG mypredlocktag;
4163 dlist_mutable_iter iter;
4164
4166
4167 /*
4168 * The same hash and LW lock apply to the lock target and the lock itself.
4169 */
4170 targettaghash = PredicateLockTargetTagHashCode(targettag);
4171 partitionLock = PredicateLockHashPartitionLock(targettaghash);
4172 LWLockAcquire(partitionLock, LW_SHARED);
4173 target = (PREDICATELOCKTARGET *)
4175 targettag, targettaghash,
4176 HASH_FIND, NULL);
4177 if (!target)
4178 {
4179 /* Nothing has this target locked; we're done here. */
4180 LWLockRelease(partitionLock);
4181 return;
4182 }
4183
4184 /*
4185 * Each lock for an overlapping transaction represents a conflict: a
4186 * rw-dependency in to this transaction.
4187 */
4188 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4189
4190 dlist_foreach_modify(iter, &target->predicateLocks)
4191 {
4192 PREDICATELOCK *predlock =
4193 dlist_container(PREDICATELOCK, targetLink, iter.cur);
4194 SERIALIZABLEXACT *sxact = predlock->tag.myXact;
4195
4196 if (sxact == MySerializableXact)
4197 {
4198 /*
4199 * If we're getting a write lock on a tuple, we don't need a
4200 * predicate (SIREAD) lock on the same tuple. We can safely remove
4201 * our SIREAD lock, but we'll defer doing so until after the loop
4202 * because that requires upgrading to an exclusive partition lock.
4203 *
4204 * We can't use this optimization within a subtransaction because
4205 * the subtransaction could roll back, and we would be left
4206 * without any lock at the top level.
4207 */
4208 if (!IsSubTransaction()
4209 && GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
4210 {
4211 mypredlock = predlock;
4212 mypredlocktag = predlock->tag;
4213 }
4214 }
4215 else if (!SxactIsDoomed(sxact)
4216 && (!SxactIsCommitted(sxact)
4218 sxact->finishedBefore))
4220 {
4221 LWLockRelease(SerializableXactHashLock);
4222 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4223
4224 /*
4225 * Re-check after getting exclusive lock because the other
4226 * transaction may have flagged a conflict.
4227 */
4228 if (!SxactIsDoomed(sxact)
4229 && (!SxactIsCommitted(sxact)
4231 sxact->finishedBefore))
4233 {
4235 }
4236
4237 LWLockRelease(SerializableXactHashLock);
4238 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4239 }
4240 }
4241 LWLockRelease(SerializableXactHashLock);
4242 LWLockRelease(partitionLock);
4243
4244 /*
4245 * If we found one of our own SIREAD locks to remove, remove it now.
4246 *
4247 * At this point our transaction already has a RowExclusiveLock on the
4248 * relation, so we are OK to drop the predicate lock on the tuple, if
4249 * found, without fearing that another write against the tuple will occur
4250 * before the MVCC information makes it to the buffer.
4251 */
4252 if (mypredlock != NULL)
4253 {
4254 uint32 predlockhashcode;
4255 PREDICATELOCK *rmpredlock;
4256
4257 LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
4258 if (IsInParallelMode())
4260 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4261 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4262
4263 /*
4264 * Remove the predicate lock from shared memory, if it wasn't removed
4265 * while the locks were released. One way that could happen is from
4266 * autovacuum cleaning up an index.
4267 */
4269 (&mypredlocktag, targettaghash);
4270 rmpredlock = (PREDICATELOCK *)
4272 &mypredlocktag,
4273 predlockhashcode,
4274 HASH_FIND, NULL);
4275 if (rmpredlock != NULL)
4276 {
4277 Assert(rmpredlock == mypredlock);
4278
4279 dlist_delete(&(mypredlock->targetLink));
4280 dlist_delete(&(mypredlock->xactLink));
4281
4282 rmpredlock = (PREDICATELOCK *)
4284 &mypredlocktag,
4285 predlockhashcode,
4286 HASH_REMOVE, NULL);
4287 Assert(rmpredlock == mypredlock);
4288
4289 RemoveTargetIfNoLongerUsed(target, targettaghash);
4290 }
4291
4292 LWLockRelease(SerializableXactHashLock);
4293 LWLockRelease(partitionLock);
4294 if (IsInParallelMode())
4296 LWLockRelease(SerializablePredicateListLock);
4297
4298 if (rmpredlock != NULL)
4299 {
4300 /*
4301 * Remove entry in local lock table if it exists. It's OK if it
4302 * doesn't exist; that means the lock was transferred to a new
4303 * target by a different backend.
4304 */
4306 targettag, targettaghash,
4307 HASH_REMOVE, NULL);
4308
4309 DecrementParentLocks(targettag);
4310 }
4311 }
4312}
4313
4314/*
4315 * CheckForSerializableConflictIn
4316 * We are writing the given tuple. If that indicates a rw-conflict
4317 * in from another serializable transaction, take appropriate action.
4318 *
4319 * Skip checking for any granularity for which a parameter is missing.
4320 *
4321 * A tuple update or delete is in conflict if we have a predicate lock
4322 * against the relation or page in which the tuple exists, or against the
4323 * tuple itself.
4324 */
4325void
4327{
4328 PREDICATELOCKTARGETTAG targettag;
4329
4330 if (!SerializationNeededForWrite(relation))
4331 return;
4332
4333 /* Check if someone else has already decided that we need to die */
4335 ereport(ERROR,
4337 errmsg("could not serialize access due to read/write dependencies among transactions"),
4338 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict in checking."),
4339 errhint("The transaction might succeed if retried.")));
4340
4341 /*
4342 * We're doing a write which might cause rw-conflicts now or later.
4343 * Memorize that fact.
4344 */
4345 MyXactDidWrite = true;
4346
4347 /*
4348 * It is important that we check for locks from the finest granularity to
4349 * the coarsest granularity, so that granularity promotion doesn't cause
4350 * us to miss a lock. The new (coarser) lock will be acquired before the
4351 * old (finer) locks are released.
4352 *
4353 * It is not possible to take and hold a lock across the checks for all
4354 * granularities because each target could be in a separate partition.
4355 */
4356 if (tid != NULL)
4357 {
4359 relation->rd_locator.dbOid,
4360 relation->rd_id,
4363 CheckTargetForConflictsIn(&targettag);
4364 }
4365
4366 if (blkno != InvalidBlockNumber)
4367 {
4369 relation->rd_locator.dbOid,
4370 relation->rd_id,
4371 blkno);
4372 CheckTargetForConflictsIn(&targettag);
4373 }
4374
4376 relation->rd_locator.dbOid,
4377 relation->rd_id);
4378 CheckTargetForConflictsIn(&targettag);
4379}
4380
4381/*
4382 * CheckTableForSerializableConflictIn
4383 * The entire table is going through a DDL-style logical mass delete
4384 * like TRUNCATE or DROP TABLE. If that causes a rw-conflict in from
4385 * another serializable transaction, take appropriate action.
4386 *
4387 * While these operations do not operate entirely within the bounds of
4388 * snapshot isolation, they can occur inside a serializable transaction, and
4389 * will logically occur after any reads which saw rows which were destroyed
4390 * by these operations, so we do what we can to serialize properly under
4391 * SSI.
4392 *
4393 * The relation passed in must be a heap relation. Any predicate lock of any
4394 * granularity on the heap will cause a rw-conflict in to this transaction.
4395 * Predicate locks on indexes do not matter because they only exist to guard
4396 * against conflicting inserts into the index, and this is a mass *delete*.
4397 * When a table is truncated or dropped, the index will also be truncated
4398 * or dropped, and we'll deal with locks on the index when that happens.
4399 *
4400 * Dropping or truncating a table also needs to drop any existing predicate
4401 * locks on heap tuples or pages, because they're about to go away. This
4402 * should be done before altering the predicate locks because the transaction
4403 * could be rolled back because of a conflict, in which case the lock changes
4404 * are not needed. (At the moment, we don't actually bother to drop the
4405 * existing locks on a dropped or truncated table at the moment. That might
4406 * lead to some false positives, but it doesn't seem worth the trouble.)
4407 */
4408void
4410{
4411 HASH_SEQ_STATUS seqstat;
4412 PREDICATELOCKTARGET *target;
4413 Oid dbId;
4414 Oid heapId;
4415 int i;
4416
4417 /*
4418 * Bail out quickly if there are no serializable transactions running.
4419 * It's safe to check this without taking locks because the caller is
4420 * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
4421 * would matter here can be acquired while that is held.
4422 */
4424 return;
4425
4426 if (!SerializationNeededForWrite(relation))
4427 return;
4428
4429 /*
4430 * We're doing a write which might cause rw-conflicts now or later.
4431 * Memorize that fact.
4432 */
4433 MyXactDidWrite = true;
4434
4435 Assert(relation->rd_index == NULL); /* not an index relation */
4436
4437 dbId = relation->rd_locator.dbOid;
4438 heapId = relation->rd_id;
4439
4440 LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);
4441 for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
4443 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4444
4445 /* Scan through target list */
4447
4448 while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
4449 {
4450 dlist_mutable_iter iter;
4451
4452 /*
4453 * Check whether this is a target which needs attention.
4454 */
4455 if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
4456 continue; /* wrong relation id */
4457 if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
4458 continue; /* wrong database id */
4459
4460 /*
4461 * Loop through locks for this target and flag conflicts.
4462 */
4463 dlist_foreach_modify(iter, &target->predicateLocks)
4464 {
4465 PREDICATELOCK *predlock =
4466 dlist_container(PREDICATELOCK, targetLink, iter.cur);
4467
4468 if (predlock->tag.myXact != MySerializableXact
4470 {
4472 }
4473 }
4474 }
4475
4476 /* Release locks in reverse order */
4477 LWLockRelease(SerializableXactHashLock);
4478 for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
4480 LWLockRelease(SerializablePredicateListLock);
4481}
4482
4483
4484/*
4485 * Flag a rw-dependency between two serializable transactions.
4486 *
4487 * The caller is responsible for ensuring that we have a LW lock on
4488 * the transaction hash table.
4489 */
4490static void
4492{
4493 Assert(reader != writer);
4494
4495 /* First, see if this conflict causes failure. */
4497
4498 /* Actually do the conflict flagging. */
4499 if (reader == OldCommittedSxact)
4501 else if (writer == OldCommittedSxact)
4503 else
4504 SetRWConflict(reader, writer);
4505}
4506
4507/*----------------------------------------------------------------------------
4508 * We are about to add a RW-edge to the dependency graph - check that we don't
4509 * introduce a dangerous structure by doing so, and abort one of the
4510 * transactions if so.
4511 *
4512 * A serialization failure can only occur if there is a dangerous structure
4513 * in the dependency graph:
4514 *
4515 * Tin ------> Tpivot ------> Tout
4516 * rw rw
4517 *
4518 * Furthermore, Tout must commit first.
4519 *
4520 * One more optimization is that if Tin is declared READ ONLY (or commits
4521 * without writing), we can only have a problem if Tout committed before Tin
4522 * acquired its snapshot.
4523 *----------------------------------------------------------------------------
4524 */
4525static void
4527 SERIALIZABLEXACT *writer)
4528{
4529 bool failure;
4530
4531 Assert(LWLockHeldByMe(SerializableXactHashLock));
4532
4533 failure = false;
4534
4535 /*------------------------------------------------------------------------
4536 * Check for already-committed writer with rw-conflict out flagged
4537 * (conflict-flag on W means that T2 committed before W):
4538 *
4539 * R ------> W ------> T2
4540 * rw rw
4541 *
4542 * That is a dangerous structure, so we must abort. (Since the writer
4543 * has already committed, we must be the reader)
4544 *------------------------------------------------------------------------
4545 */
4546 if (SxactIsCommitted(writer)
4547 && (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
4548 failure = true;
4549
4550 /*------------------------------------------------------------------------
4551 * Check whether the writer has become a pivot with an out-conflict
4552 * committed transaction (T2), and T2 committed first:
4553 *
4554 * R ------> W ------> T2
4555 * rw rw
4556 *
4557 * Because T2 must've committed first, there is no anomaly if:
4558 * - the reader committed before T2
4559 * - the writer committed before T2
4560 * - the reader is a READ ONLY transaction and the reader was concurrent
4561 * with T2 (= reader acquired its snapshot before T2 committed)
4562 *
4563 * We also handle the case that T2 is prepared but not yet committed
4564 * here. In that case T2 has already checked for conflicts, so if it
4565 * commits first, making the above conflict real, it's too late for it
4566 * to abort.
4567 *------------------------------------------------------------------------
4568 */
4569 if (!failure && SxactHasSummaryConflictOut(writer))
4570 failure = true;
4571 else if (!failure)
4572 {
4573 dlist_iter iter;
4574
4575 dlist_foreach(iter, &writer->outConflicts)
4576 {
4577 RWConflict conflict =
4578 dlist_container(RWConflictData, outLink, iter.cur);
4579 SERIALIZABLEXACT *t2 = conflict->sxactIn;
4580
4581 if (SxactIsPrepared(t2)
4582 && (!SxactIsCommitted(reader)
4583 || t2->prepareSeqNo <= reader->commitSeqNo)
4584 && (!SxactIsCommitted(writer)
4585 || t2->prepareSeqNo <= writer->commitSeqNo)
4586 && (!SxactIsReadOnly(reader)
4587 || t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
4588 {
4589 failure = true;
4590 break;
4591 }
4592 }
4593 }
4594
4595 /*------------------------------------------------------------------------
4596 * Check whether the reader has become a pivot with a writer
4597 * that's committed (or prepared):
4598 *
4599 * T0 ------> R ------> W
4600 * rw rw
4601 *
4602 * Because W must've committed first for an anomaly to occur, there is no
4603 * anomaly if:
4604 * - T0 committed before the writer
4605 * - T0 is READ ONLY, and overlaps the writer
4606 *------------------------------------------------------------------------
4607 */
4608 if (!failure && SxactIsPrepared(writer) && !SxactIsReadOnly(reader))
4609 {
4610 if (SxactHasSummaryConflictIn(reader))
4611 {
4612 failure = true;
4613 }
4614 else
4615 {
4616 dlist_iter iter;
4617
4618 /*
4619 * The unconstify is needed as we have no const version of
4620 * dlist_foreach().
4621 */
4622 dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts)
4623 {
4624 const RWConflict conflict =
4625 dlist_container(RWConflictData, inLink, iter.cur);
4626 const SERIALIZABLEXACT *t0 = conflict->sxactOut;
4627
4628 if (!SxactIsDoomed(t0)
4629 && (!SxactIsCommitted(t0)
4630 || t0->commitSeqNo >= writer->prepareSeqNo)
4631 && (!SxactIsReadOnly(t0)
4632 || t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
4633 {
4634 failure = true;
4635 break;
4636 }
4637 }
4638 }
4639 }
4640
4641 if (failure)
4642 {
4643 /*
4644 * We have to kill a transaction to avoid a possible anomaly from
4645 * occurring. If the writer is us, we can just ereport() to cause a
4646 * transaction abort. Otherwise we flag the writer for termination,
4647 * causing it to abort when it tries to commit. However, if the writer
4648 * is a prepared transaction, already prepared, we can't abort it
4649 * anymore, so we have to kill the reader instead.
4650 */
4651 if (MySerializableXact == writer)
4652 {
4653 LWLockRelease(SerializableXactHashLock);
4654 ereport(ERROR,
4656 errmsg("could not serialize access due to read/write dependencies among transactions"),
4657 errdetail_internal("Reason code: Canceled on identification as a pivot, during write."),
4658 errhint("The transaction might succeed if retried.")));
4659 }
4660 else if (SxactIsPrepared(writer))
4661 {
4662 LWLockRelease(SerializableXactHashLock);
4663
4664 /* if we're not the writer, we have to be the reader */
4665 Assert(MySerializableXact == reader);
4666 ereport(ERROR,
4668 errmsg("could not serialize access due to read/write dependencies among transactions"),
4669 errdetail_internal("Reason code: Canceled on conflict out to pivot %u, during read.", writer->topXid),
4670 errhint("The transaction might succeed if retried.")));
4671 }
4672 writer->flags |= SXACT_FLAG_DOOMED;
4673 }
4674}
4675
4676/*
4677 * PreCommit_CheckForSerializationFailure
4678 * Check for dangerous structures in a serializable transaction
4679 * at commit.
4680 *
4681 * We're checking for a dangerous structure as each conflict is recorded.
4682 * The only way we could have a problem at commit is if this is the "out"
4683 * side of a pivot, and neither the "in" side nor the pivot has yet
4684 * committed.
4685 *
4686 * If a dangerous structure is found, the pivot (the near conflict) is
4687 * marked for death, because rolling back another transaction might mean
4688 * that we fail without ever making progress. This transaction is
4689 * committing writes, so letting it commit ensures progress. If we
4690 * canceled the far conflict, it might immediately fail again on retry.
4691 */
4692void
4694{
4695 dlist_iter near_iter;
4696
4698 return;
4699
4701
4702 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4703
4704 /*
4705 * Check if someone else has already decided that we need to die. Since
4706 * we set our own DOOMED flag when partially releasing, ignore in that
4707 * case.
4708 */
4711 {
4712 LWLockRelease(SerializableXactHashLock);
4713 ereport(ERROR,
4715 errmsg("could not serialize access due to read/write dependencies among transactions"),
4716 errdetail_internal("Reason code: Canceled on identification as a pivot, during commit attempt."),
4717 errhint("The transaction might succeed if retried.")));
4718 }
4719
4721 {
4722 RWConflict nearConflict =
4723 dlist_container(RWConflictData, inLink, near_iter.cur);
4724
4725 if (!SxactIsCommitted(nearConflict->sxactOut)
4726 && !SxactIsDoomed(nearConflict->sxactOut))
4727 {
4728 dlist_iter far_iter;
4729
4730 dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts)
4731 {
4732 RWConflict farConflict =
4733 dlist_container(RWConflictData, inLink, far_iter.cur);
4734
4735 if (farConflict->sxactOut == MySerializableXact
4736 || (!SxactIsCommitted(farConflict->sxactOut)
4737 && !SxactIsReadOnly(farConflict->sxactOut)
4738 && !SxactIsDoomed(farConflict->sxactOut)))
4739 {
4740 /*
4741 * Normally, we kill the pivot transaction to make sure we
4742 * make progress if the failing transaction is retried.
4743 * However, we can't kill it if it's already prepared, so
4744 * in that case we commit suicide instead.
4745 */
4746 if (SxactIsPrepared(nearConflict->sxactOut))
4747 {
4748 LWLockRelease(SerializableXactHashLock);
4749 ereport(ERROR,
4751 errmsg("could not serialize access due to read/write dependencies among transactions"),
4752 errdetail_internal("Reason code: Canceled on commit attempt with conflict in from prepared pivot."),
4753 errhint("The transaction might succeed if retried.")));
4754 }
4755 nearConflict->sxactOut->flags |= SXACT_FLAG_DOOMED;
4756 break;
4757 }
4758 }
4759 }
4760 }
4761
4764
4765 LWLockRelease(SerializableXactHashLock);
4766}
4767
4768/*------------------------------------------------------------------------*/
4769
4770/*
4771 * Two-phase commit support
4772 */
4773
4774/*
4775 * AtPrepare_Locks
4776 * Do the preparatory work for a PREPARE: make 2PC state file
4777 * records for all predicate locks currently held.
4778 */
4779void
4781{
4782 SERIALIZABLEXACT *sxact;
4784 TwoPhasePredicateXactRecord *xactRecord;
4785 TwoPhasePredicateLockRecord *lockRecord;
4786 dlist_iter iter;
4787
4788 sxact = MySerializableXact;
4789 xactRecord = &(record.data.xactRecord);
4790 lockRecord = &(record.data.lockRecord);
4791
4793 return;
4794
4795 /* Generate an xact record for our SERIALIZABLEXACT */
4797 xactRecord->xmin = MySerializableXact->xmin;
4798 xactRecord->flags = MySerializableXact->flags;
4799
4800 /*
4801 * Note that we don't include the list of conflicts in our out in the
4802 * statefile, because new conflicts can be added even after the
4803 * transaction prepares. We'll just make a conservative assumption during
4804 * recovery instead.
4805 */
4806
4808 &record, sizeof(record));
4809
4810 /*
4811 * Generate a lock record for each lock.
4812 *
4813 * To do this, we need to walk the predicate lock list in our sxact rather
4814 * than using the local predicate lock table because the latter is not
4815 * guaranteed to be accurate.
4816 */
4817 LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
4818
4819 /*
4820 * No need to take sxact->perXactPredicateListLock in parallel mode
4821 * because there cannot be any parallel workers running while we are
4822 * preparing a transaction.
4823 */
4825
4826 dlist_foreach(iter, &sxact->predicateLocks)
4827 {
4828 PREDICATELOCK *predlock =
4829 dlist_container(PREDICATELOCK, xactLink, iter.cur);
4830
4832 lockRecord->target = predlock->tag.myTarget->tag;
4833
4835 &record, sizeof(record));
4836 }
4837
4838 LWLockRelease(SerializablePredicateListLock);
4839}
4840
4841/*
4842 * PostPrepare_Locks
4843 * Clean up after successful PREPARE. Unlike the non-predicate
4844 * lock manager, we do not need to transfer locks to a dummy
4845 * PGPROC because our SERIALIZABLEXACT will stay around
4846 * anyway. We only need to clean up our local state.
4847 */
4848void
4850{
4852 return;
4853
4855
4858
4861
4863 MyXactDidWrite = false;
4864}
4865
4866/*
4867 * PredicateLockTwoPhaseFinish
4868 * Release a prepared transaction's predicate locks once it
4869 * commits or aborts.
4870 */
4871void
4873{
4874 SERIALIZABLEXID *sxid;
4875 SERIALIZABLEXIDTAG sxidtag;
4876
4877 sxidtag.xid = xid;
4878
4879 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4880 sxid = (SERIALIZABLEXID *)
4881 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4882 LWLockRelease(SerializableXactHashLock);
4883
4884 /* xid will not be found if it wasn't a serializable transaction */
4885 if (sxid == NULL)
4886 return;
4887
4888 /* Release its locks */
4889 MySerializableXact = sxid->myXact;
4890 MyXactDidWrite = true; /* conservatively assume that we wrote
4891 * something */
4892 ReleasePredicateLocks(isCommit, false);
4893}
4894
4895/*
4896 * Re-acquire a predicate lock belonging to a transaction that was prepared.
4897 */
4898void
4900 void *recdata, uint32 len)
4901{
4903
4905
4906 record = (TwoPhasePredicateRecord *) recdata;
4907
4909 (record->type == TWOPHASEPREDICATERECORD_LOCK));
4910
4911 if (record->type == TWOPHASEPREDICATERECORD_XACT)
4912 {
4913 /* Per-transaction record. Set up a SERIALIZABLEXACT. */
4914 TwoPhasePredicateXactRecord *xactRecord;
4915 SERIALIZABLEXACT *sxact;
4916 SERIALIZABLEXID *sxid;
4917 SERIALIZABLEXIDTAG sxidtag;
4918 bool found;
4919
4920 xactRecord = (TwoPhasePredicateXactRecord *) &record->data.xactRecord;
4921
4922 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4923 sxact = CreatePredXact();
4924 if (!sxact)
4925 ereport(ERROR,
4926 (errcode(ERRCODE_OUT_OF_MEMORY),
4927 errmsg("out of shared memory")));
4928
4929 /* vxid for a prepared xact is INVALID_PROC_NUMBER/xid; no pid */
4932 sxact->pid = 0;
4934
4935 /* a prepared xact hasn't committed yet */
4939
4941
4942 /*
4943 * Don't need to track this; no transactions running at the time the
4944 * recovered xact started are still active, except possibly other
4945 * prepared xacts and we don't care whether those are RO_SAFE or not.
4946 */
4948
4949 dlist_init(&(sxact->predicateLocks));
4951
4952 sxact->topXid = xid;
4953 sxact->xmin = xactRecord->xmin;
4954 sxact->flags = xactRecord->flags;
4955 Assert(SxactIsPrepared(sxact));
4956 if (!SxactIsReadOnly(sxact))
4957 {
4961 }
4962
4963 /*
4964 * We don't know whether the transaction had any conflicts or not, so
4965 * we'll conservatively assume that it had both a conflict in and a
4966 * conflict out, and represent that with the summary conflict flags.
4967 */
4968 dlist_init(&(sxact->outConflicts));
4969 dlist_init(&(sxact->inConflicts));
4972
4973 /* Register the transaction's xid */
4974 sxidtag.xid = xid;
4976 &sxidtag,
4977 HASH_ENTER, &found);
4978 Assert(sxid != NULL);
4979 Assert(!found);
4980 sxid->myXact = (SERIALIZABLEXACT *) sxact;
4981
4982 /*
4983 * Update global xmin. Note that this is a special case compared to
4984 * registering a normal transaction, because the global xmin might go
4985 * backwards. That's OK, because until recovery is over we're not
4986 * going to complete any transactions or create any non-prepared
4987 * transactions, so there's no danger of throwing away.
4988 */
4991 {
4992 PredXact->SxactGlobalXmin = sxact->xmin;
4995 }
4997 {
5000 }
5001
5002 LWLockRelease(SerializableXactHashLock);
5003 }
5004 else if (record->type == TWOPHASEPREDICATERECORD_LOCK)
5005 {
5006 /* Lock record. Recreate the PREDICATELOCK */
5007 TwoPhasePredicateLockRecord *lockRecord;
5008 SERIALIZABLEXID *sxid;
5009 SERIALIZABLEXACT *sxact;
5010 SERIALIZABLEXIDTAG sxidtag;
5011 uint32 targettaghash;
5012
5013 lockRecord = (TwoPhasePredicateLockRecord *) &record->data.lockRecord;
5014 targettaghash = PredicateLockTargetTagHashCode(&lockRecord->target);
5015
5016 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
5017 sxidtag.xid = xid;
5018 sxid = (SERIALIZABLEXID *)
5019 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
5020 LWLockRelease(SerializableXactHashLock);
5021
5022 Assert(sxid != NULL);
5023 sxact = sxid->myXact;
5025
5026 CreatePredicateLock(&lockRecord->target, targettaghash, sxact);
5027 }
5028}
5029
5030/*
5031 * Prepare to share the current SERIALIZABLEXACT with parallel workers.
5032 * Return a handle object that can be used by AttachSerializableXact() in a
5033 * parallel worker.
5034 */
5037{
5038 return MySerializableXact;
5039}
5040
5041/*
5042 * Allow parallel workers to import the leader's SERIALIZABLEXACT.
5043 */
5044void
5046{
5047
5049
5053}
bool ParallelContextActive(void)
Definition: parallel.c:1020
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define unconstify(underlying_type, expr)
Definition: c.h:1202
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:204
#define Assert(condition)
Definition: c.h:815
int64_t int64
Definition: c.h:485
uint16_t uint16
Definition: c.h:487
uint32_t uint32
Definition: c.h:488
uint32 LocalTransactionId
Definition: c.h:611
uint32 TransactionId
Definition: c.h:609
size_t Size
Definition: c.h:562
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:968
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:783
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1157
int errdetail_internal(const char *fmt,...)
Definition: elog.c:1230
int errdetail(const char *fmt,...)
Definition: elog.c:1203
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define DEBUG2
Definition: elog.h:29
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
int MyProcPid
Definition: globals.c:46
ProcNumber MyProcNumber
Definition: globals.c:89
bool IsUnderPostmaster
Definition: globals.c:119
int MaxBackends
Definition: globals.c:145
int serializable_buffers
Definition: globals.c:164
#define newval
GucSource
Definition: guc.h:108
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
@ HASH_ENTER_NULL
Definition: hsearch.h:116
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_FIXED_SIZE
Definition: hsearch.h:105
#define HASH_PARTITION
Definition: hsearch.h:92
static dlist_node * dlist_pop_head_node(dlist_head *head)
Definition: ilist.h:450
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
#define dlist_head_element(type, membername, lhead)
Definition: ilist.h:603
static void dlist_delete_thoroughly(dlist_node *node)
Definition: ilist.h:416
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dlist_node_init(dlist_node *node)
Definition: ilist.h:325
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define IsParallelWorker()
Definition: parallel.h:60
FILE * output
long val
Definition: informix.c:689
static bool success
Definition: initdb.c:186
int i
Definition: isn.c:72
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
exit(1)
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:77
#define SetInvalidVirtualTransactionId(vxid)
Definition: lock.h:74
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1893
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1937
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:707
@ LWTRANCHE_SERIAL_SLRU
Definition: lwlock.h:214
@ LWTRANCHE_SERIAL_BUFFER
Definition: lwlock.h:185
@ LWTRANCHE_PER_XACT_PREDICATE_LIST
Definition: lwlock.h:202
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
#define NUM_PREDICATELOCK_PARTITIONS
Definition: lwlock.h:101
void * palloc(Size size)
Definition: mcxt.c:1317
#define InvalidPid
Definition: miscadmin.h:32
const void size_t len
const void * data
static bool pg_lfind32(uint32 key, const uint32 *base, uint32 nelem)
Definition: pg_lfind.h:153
static rewind_source * source
Definition: pg_rewind.c:89
#define ERRCODE_T_R_SERIALIZATION_FAILURE
Definition: pgbench.c:76
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
PredicateLockData * GetPredicateLockStatusData(void)
Definition: predicate.c:1435
void CheckPointPredicate(void)
Definition: predicate.c:1041
void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3134
static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag)
Definition: predicate.c:2381
static HTAB * PredicateLockHash
Definition: predicate.c:398
static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact)
Definition: predicate.c:666
#define PredicateLockTargetTagHashCode(predicatelocktargettag)
Definition: predicate.c:303
static void SetNewSxactGlobalXmin(void)
Definition: predicate.c:3241
void PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
Definition: predicate.c:4872
#define SerialPage(xid)
Definition: predicate.c:343
static void ReleasePredXact(SERIALIZABLEXACT *sxact)
Definition: predicate.c:596
void SetSerializableTransactionSnapshot(Snapshot snapshot, VirtualTransactionId *sourcevxid, int sourcepid)
Definition: predicate.c:1712
static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
Definition: predicate.c:610
static bool PredicateLockingNeededForRelation(Relation relation)
Definition: predicate.c:498
static bool SerializationNeededForRead(Relation relation, Snapshot snapshot)
Definition: predicate.c:516
static Snapshot GetSafeSnapshot(Snapshot origSnapshot)
Definition: predicate.c:1548
#define SxactIsCommitted(sxact)
Definition: predicate.c:277
static SerialControl serialControl
Definition: predicate.c:354
void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
Definition: predicate.c:2589