PostgreSQL Source Code  git master
predicate.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * predicate.c
4  * POSTGRES predicate locking
5  * to support full serializable transaction isolation
6  *
7  *
8  * The approach taken is to implement Serializable Snapshot Isolation (SSI)
9  * as initially described in this paper:
10  *
11  * Michael J. Cahill, Uwe Röhm, and Alan D. Fekete. 2008.
12  * Serializable isolation for snapshot databases.
13  * In SIGMOD '08: Proceedings of the 2008 ACM SIGMOD
14  * international conference on Management of data,
15  * pages 729-738, New York, NY, USA. ACM.
16  * http://doi.acm.org/10.1145/1376616.1376690
17  *
18  * and further elaborated in Cahill's doctoral thesis:
19  *
20  * Michael James Cahill. 2009.
21  * Serializable Isolation for Snapshot Databases.
22  * Sydney Digital Theses.
23  * University of Sydney, School of Information Technologies.
24  * http://hdl.handle.net/2123/5353
25  *
26  *
27  * Predicate locks for Serializable Snapshot Isolation (SSI) are SIREAD
28  * locks, which are so different from normal locks that a distinct set of
29  * structures is required to handle them. They are needed to detect
30  * rw-conflicts when the read happens before the write. (When the write
31  * occurs first, the reading transaction can check for a conflict by
32  * examining the MVCC data.)
33  *
34  * (1) Besides tuples actually read, they must cover ranges of tuples
35  * which would have been read based on the predicate. This will
36  * require modelling the predicates through locks against database
37  * objects such as pages, index ranges, or entire tables.
38  *
39  * (2) They must be kept in RAM for quick access. Because of this, it
40  * isn't possible to always maintain tuple-level granularity -- when
41  * the space allocated to store these approaches exhaustion, a
42  * request for a lock may need to scan for situations where a single
43  * transaction holds many fine-grained locks which can be coalesced
44  * into a single coarser-grained lock.
45  *
46  * (3) They never block anything; they are more like flags than locks
47  * in that regard; although they refer to database objects and are
48  * used to identify rw-conflicts with normal write locks.
49  *
50  * (4) While they are associated with a transaction, they must survive
51  * a successful COMMIT of that transaction, and remain until all
52  * overlapping transactions complete. This even means that they
53  * must survive termination of the transaction's process. If a
54  * top level transaction is rolled back, however, it is immediately
55  * flagged so that it can be ignored, and its SIREAD locks can be
56  * released any time after that.
57  *
58  * (5) The only transactions which create SIREAD locks or check for
59  * conflicts with them are serializable transactions.
60  *
61  * (6) When a write lock for a top level transaction is found to cover
62  * an existing SIREAD lock for the same transaction, the SIREAD lock
63  * can be deleted.
64  *
65  * (7) A write from a serializable transaction must ensure that an xact
66  * record exists for the transaction, with the same lifespan (until
67  * all concurrent transaction complete or the transaction is rolled
68  * back) so that rw-dependencies to that transaction can be
69  * detected.
70  *
71  * We use an optimization for read-only transactions. Under certain
72  * circumstances, a read-only transaction's snapshot can be shown to
73  * never have conflicts with other transactions. This is referred to
74  * as a "safe" snapshot (and one known not to be is "unsafe").
75  * However, it can't be determined whether a snapshot is safe until
76  * all concurrent read/write transactions complete.
77  *
78  * Once a read-only transaction is known to have a safe snapshot, it
79  * can release its predicate locks and exempt itself from further
80  * predicate lock tracking. READ ONLY DEFERRABLE transactions run only
81  * on safe snapshots, waiting as necessary for one to be available.
82  *
83  *
84  * Lightweight locks to manage access to the predicate locking shared
85  * memory objects must be taken in this order, and should be released in
86  * reverse order:
87  *
88  * SerializableFinishedListLock
89  * - Protects the list of transactions which have completed but which
90  * may yet matter because they overlap still-active transactions.
91  *
92  * SerializablePredicateLockListLock
93  * - Protects the linked list of locks held by a transaction. Note
94  * that the locks themselves are also covered by the partition
95  * locks of their respective lock targets; this lock only affects
96  * the linked list connecting the locks related to a transaction.
97  * - All transactions share this single lock (with no partitioning).
98  * - There is never a need for a process other than the one running
99  * an active transaction to walk the list of locks held by that
100  * transaction.
101  * - It is relatively infrequent that another process needs to
102  * modify the list for a transaction, but it does happen for such
103  * things as index page splits for pages with predicate locks and
104  * freeing of predicate locked pages by a vacuum process. When
105  * removing a lock in such cases, the lock itself contains the
106  * pointers needed to remove it from the list. When adding a
107  * lock in such cases, the lock can be added using the anchor in
108  * the transaction structure. Neither requires walking the list.
109  * - Cleaning up the list for a terminated transaction is sometimes
110  * not done on a retail basis, in which case no lock is required.
111  * - Due to the above, a process accessing its active transaction's
112  * list always uses a shared lock, regardless of whether it is
113  * walking or maintaining the list. This improves concurrency
114  * for the common access patterns.
115  * - A process which needs to alter the list of a transaction other
116  * than its own active transaction must acquire an exclusive
117  * lock.
118  *
119  * PredicateLockHashPartitionLock(hashcode)
120  * - The same lock protects a target, all locks on that target, and
121  * the linked list of locks on the target.
122  * - When more than one is needed, acquire in ascending address order.
123  * - When all are needed (rare), acquire in ascending index order with
124  * PredicateLockHashPartitionLockByIndex(index).
125  *
126  * SerializableXactHashLock
127  * - Protects both PredXact and SerializableXidHash.
128  *
129  *
130  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
131  * Portions Copyright (c) 1994, Regents of the University of California
132  *
133  *
134  * IDENTIFICATION
135  * src/backend/storage/lmgr/predicate.c
136  *
137  *-------------------------------------------------------------------------
138  */
139 /*
140  * INTERFACE ROUTINES
141  *
142  * housekeeping for setting up shared memory predicate lock structures
143  * InitPredicateLocks(void)
144  * PredicateLockShmemSize(void)
145  *
146  * predicate lock reporting
147  * GetPredicateLockStatusData(void)
148  * PageIsPredicateLocked(Relation relation, BlockNumber blkno)
149  *
150  * predicate lock maintenance
151  * GetSerializableTransactionSnapshot(Snapshot snapshot)
152  * SetSerializableTransactionSnapshot(Snapshot snapshot,
153  * VirtualTransactionId *sourcevxid)
154  * RegisterPredicateLockingXid(void)
155  * PredicateLockRelation(Relation relation, Snapshot snapshot)
156  * PredicateLockPage(Relation relation, BlockNumber blkno,
157  * Snapshot snapshot)
158  * PredicateLockTuple(Relation relation, HeapTuple tuple,
159  * Snapshot snapshot)
160  * PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
161  * BlockNumber newblkno)
162  * PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
163  * BlockNumber newblkno)
164  * TransferPredicateLocksToHeapRelation(Relation relation)
165  * ReleasePredicateLocks(bool isCommit)
166  *
167  * conflict detection (may also trigger rollback)
168  * CheckForSerializableConflictOut(bool visible, Relation relation,
169  * HeapTupleData *tup, Buffer buffer,
170  * Snapshot snapshot)
171  * CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
172  * Buffer buffer)
173  * CheckTableForSerializableConflictIn(Relation relation)
174  *
175  * final rollback checking
176  * PreCommit_CheckForSerializationFailure(void)
177  *
178  * two-phase commit support
179  * AtPrepare_PredicateLocks(void);
180  * PostPrepare_PredicateLocks(TransactionId xid);
181  * PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
182  * predicatelock_twophase_recover(TransactionId xid, uint16 info,
183  * void *recdata, uint32 len);
184  */
185 
186 #include "postgres.h"
187 
188 #include "access/htup_details.h"
189 #include "access/slru.h"
190 #include "access/subtrans.h"
191 #include "access/transam.h"
192 #include "access/twophase.h"
193 #include "access/twophase_rmgr.h"
194 #include "access/xact.h"
195 #include "access/xlog.h"
196 #include "miscadmin.h"
197 #include "pgstat.h"
198 #include "storage/bufmgr.h"
199 #include "storage/predicate.h"
201 #include "storage/proc.h"
202 #include "storage/procarray.h"
203 #include "utils/rel.h"
204 #include "utils/snapmgr.h"
205 #include "utils/tqual.h"
206 
207 /* Uncomment the next line to test the graceful degradation code. */
208 /* #define TEST_OLDSERXID */
209 
210 /*
211  * Test the most selective fields first, for performance.
212  *
213  * a is covered by b if all of the following hold:
214  * 1) a.database = b.database
215  * 2) a.relation = b.relation
216  * 3) b.offset is invalid (b is page-granularity or higher)
217  * 4) either of the following:
218  * 4a) a.offset is valid (a is tuple-granularity) and a.page = b.page
219  * or 4b) a.offset is invalid and b.page is invalid (a is
220  * page-granularity and b is relation-granularity
221  */
222 #define TargetTagIsCoveredBy(covered_target, covering_target) \
223  ((GET_PREDICATELOCKTARGETTAG_RELATION(covered_target) == /* (2) */ \
224  GET_PREDICATELOCKTARGETTAG_RELATION(covering_target)) \
225  && (GET_PREDICATELOCKTARGETTAG_OFFSET(covering_target) == \
226  InvalidOffsetNumber) /* (3) */ \
227  && (((GET_PREDICATELOCKTARGETTAG_OFFSET(covered_target) != \
228  InvalidOffsetNumber) /* (4a) */ \
229  && (GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) == \
230  GET_PREDICATELOCKTARGETTAG_PAGE(covered_target))) \
231  || ((GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) == \
232  InvalidBlockNumber) /* (4b) */ \
233  && (GET_PREDICATELOCKTARGETTAG_PAGE(covered_target) \
234  != InvalidBlockNumber))) \
235  && (GET_PREDICATELOCKTARGETTAG_DB(covered_target) == /* (1) */ \
236  GET_PREDICATELOCKTARGETTAG_DB(covering_target)))
237 
238 /*
239  * The predicate locking target and lock shared hash tables are partitioned to
240  * reduce contention. To determine which partition a given target belongs to,
241  * compute the tag's hash code with PredicateLockTargetTagHashCode(), then
242  * apply one of these macros.
243  * NB: NUM_PREDICATELOCK_PARTITIONS must be a power of 2!
244  */
245 #define PredicateLockHashPartition(hashcode) \
246  ((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
247 #define PredicateLockHashPartitionLock(hashcode) \
248  (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + \
249  PredicateLockHashPartition(hashcode)].lock)
250 #define PredicateLockHashPartitionLockByIndex(i) \
251  (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
252 
253 #define NPREDICATELOCKTARGETENTS() \
254  mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
255 
256 #define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
257 
258 /*
259  * Note that a sxact is marked "prepared" once it has passed
260  * PreCommit_CheckForSerializationFailure, even if it isn't using
261  * 2PC. This is the point at which it can no longer be aborted.
262  *
263  * The PREPARED flag remains set after commit, so SxactIsCommitted
264  * implies SxactIsPrepared.
265  */
266 #define SxactIsCommitted(sxact) (((sxact)->flags & SXACT_FLAG_COMMITTED) != 0)
267 #define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
268 #define SxactIsRolledBack(sxact) (((sxact)->flags & SXACT_FLAG_ROLLED_BACK) != 0)
269 #define SxactIsDoomed(sxact) (((sxact)->flags & SXACT_FLAG_DOOMED) != 0)
270 #define SxactIsReadOnly(sxact) (((sxact)->flags & SXACT_FLAG_READ_ONLY) != 0)
271 #define SxactHasSummaryConflictIn(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_IN) != 0)
272 #define SxactHasSummaryConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_OUT) != 0)
273 /*
274  * The following macro actually means that the specified transaction has a
275  * conflict out *to a transaction which committed ahead of it*. It's hard
276  * to get that into a name of a reasonable length.
277  */
278 #define SxactHasConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_CONFLICT_OUT) != 0)
279 #define SxactIsDeferrableWaiting(sxact) (((sxact)->flags & SXACT_FLAG_DEFERRABLE_WAITING) != 0)
280 #define SxactIsROSafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_SAFE) != 0)
281 #define SxactIsROUnsafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_UNSAFE) != 0)
282 
283 /*
284  * Compute the hash code associated with a PREDICATELOCKTARGETTAG.
285  *
286  * To avoid unnecessary recomputations of the hash code, we try to do this
287  * just once per function, and then pass it around as needed. Aside from
288  * passing the hashcode to hash_search_with_hash_value(), we can extract
289  * the lock partition number from the hashcode.
290  */
291 #define PredicateLockTargetTagHashCode(predicatelocktargettag) \
292  get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
293 
294 /*
295  * Given a predicate lock tag, and the hash for its target,
296  * compute the lock hash.
297  *
298  * To make the hash code also depend on the transaction, we xor the sxid
299  * struct's address into the hash code, left-shifted so that the
300  * partition-number bits don't change. Since this is only a hash, we
301  * don't care if we lose high-order bits of the address; use an
302  * intermediate variable to suppress cast-pointer-to-int warnings.
303  */
304 #define PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash) \
305  ((targethash) ^ ((uint32) PointerGetDatum((predicatelocktag)->myXact)) \
306  << LOG2_NUM_PREDICATELOCK_PARTITIONS)
307 
308 
309 /*
310  * The SLRU buffer area through which we access the old xids.
311  */
313 
314 #define OldSerXidSlruCtl (&OldSerXidSlruCtlData)
315 
316 #define OLDSERXID_PAGESIZE BLCKSZ
317 #define OLDSERXID_ENTRYSIZE sizeof(SerCommitSeqNo)
318 #define OLDSERXID_ENTRIESPERPAGE (OLDSERXID_PAGESIZE / OLDSERXID_ENTRYSIZE)
319 
320 /*
321  * Set maximum pages based on the number needed to track all transactions.
322  */
323 #define OLDSERXID_MAX_PAGE (MaxTransactionId / OLDSERXID_ENTRIESPERPAGE)
324 
325 #define OldSerXidNextPage(page) (((page) >= OLDSERXID_MAX_PAGE) ? 0 : (page) + 1)
326 
327 #define OldSerXidValue(slotno, xid) (*((SerCommitSeqNo *) \
328  (OldSerXidSlruCtl->shared->page_buffer[slotno] + \
329  ((((uint32) (xid)) % OLDSERXID_ENTRIESPERPAGE) * OLDSERXID_ENTRYSIZE))))
330 
331 #define OldSerXidPage(xid) (((uint32) (xid)) / OLDSERXID_ENTRIESPERPAGE)
332 
333 typedef struct OldSerXidControlData
334 {
335  int headPage; /* newest initialized page */
336  TransactionId headXid; /* newest valid Xid in the SLRU */
337  TransactionId tailXid; /* oldest xmin we might be interested in */
339 
341 
342 static OldSerXidControl oldSerXidControl;
343 
344 /*
345  * When the oldest committed transaction on the "finished" list is moved to
346  * SLRU, its predicate locks will be moved to this "dummy" transaction,
347  * collapsing duplicate targets. When a duplicate is found, the later
348  * commitSeqNo is used.
349  */
351 
352 
353 /*
354  * These configuration variables are used to set the predicate lock table size
355  * and to control promotion of predicate locks to coarser granularity in an
356  * attempt to degrade performance (mostly as false positive serialization
357  * failure) gracefully in the face of memory pressurel
358  */
359 int max_predicate_locks_per_xact; /* set by guc.c */
360 int max_predicate_locks_per_relation; /* set by guc.c */
361 int max_predicate_locks_per_page; /* set by guc.c */
362 
363 /*
364  * This provides a list of objects in order to track transactions
365  * participating in predicate locking. Entries in the list are fixed size,
366  * and reside in shared memory. The memory address of an entry must remain
367  * fixed during its lifetime. The list will be protected from concurrent
368  * update externally; no provision is made in this code to manage that. The
369  * number of entries in the list, and the size allowed for each entry is
370  * fixed upon creation.
371  */
373 
374 /*
375  * This provides a pool of RWConflict data elements to use in conflict lists
376  * between transactions.
377  */
379 
380 /*
381  * The predicate locking hash tables are in shared memory.
382  * Each backend keeps pointers to them.
383  */
388 
389 /*
390  * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
391  * this entry, you can ensure that there's enough scratch space available for
392  * inserting one entry in the hash table. This is an otherwise-invalid tag.
393  */
394 static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0};
397 
398 /*
399  * The local hash table used to determine when to combine multiple fine-
400  * grained locks into a single courser-grained lock.
401  */
403 
404 /*
405  * Keep a pointer to the currently-running serializable transaction (if any)
406  * for quick reference. Also, remember if we have written anything that could
407  * cause a rw-conflict.
408  */
410 static bool MyXactDidWrite = false;
411 
412 /* local functions */
413 
414 static SERIALIZABLEXACT *CreatePredXact(void);
415 static void ReleasePredXact(SERIALIZABLEXACT *sxact);
416 static SERIALIZABLEXACT *FirstPredXact(void);
418 
419 static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer);
420 static void SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
421 static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact);
422 static void ReleaseRWConflict(RWConflict conflict);
423 static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact);
424 
425 static bool OldSerXidPagePrecedesLogically(int p, int q);
426 static void OldSerXidInit(void);
427 static void OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo);
430 
431 static uint32 predicatelock_hash(const void *key, Size keysize);
432 static void SummarizeOldestCommittedSxact(void);
433 static Snapshot GetSafeSnapshot(Snapshot snapshot);
435  VirtualTransactionId *sourcevxid,
436  int sourcepid);
437 static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
439  PREDICATELOCKTARGETTAG *parent);
440 static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
441 static void RemoveScratchTarget(bool lockheld);
442 static void RestoreScratchTarget(bool lockheld);
444  uint32 targettaghash);
445 static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
446 static int MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag);
448 static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag);
449 static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
450  uint32 targettaghash,
451  SERIALIZABLEXACT *sxact);
452 static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash);
454  PREDICATELOCKTARGETTAG newtargettag,
455  bool removeOld);
456 static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
457 static void DropAllPredicateLocksFromTable(Relation relation,
458  bool transfer);
459 static void SetNewSxactGlobalXmin(void);
460 static void ClearOldPredicateLocks(void);
461 static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
462  bool summarize);
463 static bool XidIsConcurrent(TransactionId xid);
464 static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag);
465 static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
467  SERIALIZABLEXACT *writer);
468 
469 
470 /*------------------------------------------------------------------------*/
471 
472 /*
473  * Does this relation participate in predicate locking? Temporary and system
474  * relations are exempt, as are materialized views.
475  */
476 static inline bool
478 {
479  return !(relation->rd_id < FirstBootstrapObjectId ||
480  RelationUsesLocalBuffers(relation) ||
481  relation->rd_rel->relkind == RELKIND_MATVIEW);
482 }
483 
484 /*
485  * When a public interface method is called for a read, this is the test to
486  * see if we should do a quick return.
487  *
488  * Note: this function has side-effects! If this transaction has been flagged
489  * as RO-safe since the last call, we release all predicate locks and reset
490  * MySerializableXact. That makes subsequent calls to return quickly.
491  *
492  * This is marked as 'inline' to eliminate the function call overhead in the
493  * common case that serialization is not needed.
494  */
495 static inline bool
497 {
498  /* Nothing to do if this is not a serializable transaction */
499  if (MySerializableXact == InvalidSerializableXact)
500  return false;
501 
502  /*
503  * Don't acquire locks or conflict when scanning with a special snapshot.
504  * This excludes things like CLUSTER and REINDEX. They use the wholesale
505  * functions TransferPredicateLocksToHeapRelation() and
506  * CheckTableForSerializableConflictIn() to participate in serialization,
507  * but the scans involved don't need serialization.
508  */
509  if (!IsMVCCSnapshot(snapshot))
510  return false;
511 
512  /*
513  * Check if we have just become "RO-safe". If we have, immediately release
514  * all locks as they're not needed anymore. This also resets
515  * MySerializableXact, so that subsequent calls to this function can exit
516  * quickly.
517  *
518  * A transaction is flagged as RO_SAFE if all concurrent R/W transactions
519  * commit without having conflicts out to an earlier snapshot, thus
520  * ensuring that no conflicts are possible for this transaction.
521  */
522  if (SxactIsROSafe(MySerializableXact))
523  {
524  ReleasePredicateLocks(false);
525  return false;
526  }
527 
528  /* Check if the relation doesn't participate in predicate locking */
529  if (!PredicateLockingNeededForRelation(relation))
530  return false;
531 
532  return true; /* no excuse to skip predicate locking */
533 }
534 
535 /*
536  * Like SerializationNeededForRead(), but called on writes.
537  * The logic is the same, but there is no snapshot and we can't be RO-safe.
538  */
539 static inline bool
541 {
542  /* Nothing to do if this is not a serializable transaction */
543  if (MySerializableXact == InvalidSerializableXact)
544  return false;
545 
546  /* Check if the relation doesn't participate in predicate locking */
547  if (!PredicateLockingNeededForRelation(relation))
548  return false;
549 
550  return true; /* no excuse to skip predicate locking */
551 }
552 
553 
554 /*------------------------------------------------------------------------*/
555 
556 /*
557  * These functions are a simple implementation of a list for this specific
558  * type of struct. If there is ever a generalized shared memory list, we
559  * should probably switch to that.
560  */
561 static SERIALIZABLEXACT *
563 {
564  PredXactListElement ptle;
565 
566  ptle = (PredXactListElement)
567  SHMQueueNext(&PredXact->availableList,
568  &PredXact->availableList,
570  if (!ptle)
571  return NULL;
572 
573  SHMQueueDelete(&ptle->link);
574  SHMQueueInsertBefore(&PredXact->activeList, &ptle->link);
575  return &ptle->sxact;
576 }
577 
578 static void
580 {
581  PredXactListElement ptle;
582 
583  Assert(ShmemAddrIsValid(sxact));
584 
585  ptle = (PredXactListElement)
586  (((char *) sxact)
589  SHMQueueDelete(&ptle->link);
590  SHMQueueInsertBefore(&PredXact->availableList, &ptle->link);
591 }
592 
593 static SERIALIZABLEXACT *
595 {
596  PredXactListElement ptle;
597 
598  ptle = (PredXactListElement)
599  SHMQueueNext(&PredXact->activeList,
600  &PredXact->activeList,
602  if (!ptle)
603  return NULL;
604 
605  return &ptle->sxact;
606 }
607 
608 static SERIALIZABLEXACT *
610 {
611  PredXactListElement ptle;
612 
613  Assert(ShmemAddrIsValid(sxact));
614 
615  ptle = (PredXactListElement)
616  (((char *) sxact)
619  ptle = (PredXactListElement)
620  SHMQueueNext(&PredXact->activeList,
621  &ptle->link,
623  if (!ptle)
624  return NULL;
625 
626  return &ptle->sxact;
627 }
628 
629 /*------------------------------------------------------------------------*/
630 
631 /*
632  * These functions manage primitive access to the RWConflict pool and lists.
633  */
634 static bool
636 {
637  RWConflict conflict;
638 
639  Assert(reader != writer);
640 
641  /* Check the ends of the purported conflict first. */
642  if (SxactIsDoomed(reader)
643  || SxactIsDoomed(writer)
644  || SHMQueueEmpty(&reader->outConflicts)
645  || SHMQueueEmpty(&writer->inConflicts))
646  return false;
647 
648  /* A conflict is possible; walk the list to find out. */
649  conflict = (RWConflict)
650  SHMQueueNext(&reader->outConflicts,
651  &reader->outConflicts,
652  offsetof(RWConflictData, outLink));
653  while (conflict)
654  {
655  if (conflict->sxactIn == writer)
656  return true;
657  conflict = (RWConflict)
658  SHMQueueNext(&reader->outConflicts,
659  &conflict->outLink,
660  offsetof(RWConflictData, outLink));
661  }
662 
663  /* No conflict found. */
664  return false;
665 }
666 
667 static void
669 {
670  RWConflict conflict;
671 
672  Assert(reader != writer);
673  Assert(!RWConflictExists(reader, writer));
674 
675  conflict = (RWConflict)
676  SHMQueueNext(&RWConflictPool->availableList,
677  &RWConflictPool->availableList,
678  offsetof(RWConflictData, outLink));
679  if (!conflict)
680  ereport(ERROR,
681  (errcode(ERRCODE_OUT_OF_MEMORY),
682  errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
683  errhint("You might need to run fewer transactions at a time or increase max_connections.")));
684 
685  SHMQueueDelete(&conflict->outLink);
686 
687  conflict->sxactOut = reader;
688  conflict->sxactIn = writer;
689  SHMQueueInsertBefore(&reader->outConflicts, &conflict->outLink);
690  SHMQueueInsertBefore(&writer->inConflicts, &conflict->inLink);
691 }
692 
693 static void
695  SERIALIZABLEXACT *activeXact)
696 {
697  RWConflict conflict;
698 
699  Assert(roXact != activeXact);
700  Assert(SxactIsReadOnly(roXact));
701  Assert(!SxactIsReadOnly(activeXact));
702 
703  conflict = (RWConflict)
704  SHMQueueNext(&RWConflictPool->availableList,
705  &RWConflictPool->availableList,
706  offsetof(RWConflictData, outLink));
707  if (!conflict)
708  ereport(ERROR,
709  (errcode(ERRCODE_OUT_OF_MEMORY),
710  errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
711  errhint("You might need to run fewer transactions at a time or increase max_connections.")));
712 
713  SHMQueueDelete(&conflict->outLink);
714 
715  conflict->sxactOut = activeXact;
716  conflict->sxactIn = roXact;
718  &conflict->outLink);
720  &conflict->inLink);
721 }
722 
723 static void
725 {
726  SHMQueueDelete(&conflict->inLink);
727  SHMQueueDelete(&conflict->outLink);
728  SHMQueueInsertBefore(&RWConflictPool->availableList, &conflict->outLink);
729 }
730 
731 static void
733 {
734  RWConflict conflict,
735  nextConflict;
736 
737  Assert(SxactIsReadOnly(sxact));
738  Assert(!SxactIsROSafe(sxact));
739 
740  sxact->flags |= SXACT_FLAG_RO_UNSAFE;
741 
742  /*
743  * We know this isn't a safe snapshot, so we can stop looking for other
744  * potential conflicts.
745  */
746  conflict = (RWConflict)
748  &sxact->possibleUnsafeConflicts,
749  offsetof(RWConflictData, inLink));
750  while (conflict)
751  {
752  nextConflict = (RWConflict)
754  &conflict->inLink,
755  offsetof(RWConflictData, inLink));
756 
757  Assert(!SxactIsReadOnly(conflict->sxactOut));
758  Assert(sxact == conflict->sxactIn);
759 
760  ReleaseRWConflict(conflict);
761 
762  conflict = nextConflict;
763  }
764 }
765 
766 /*------------------------------------------------------------------------*/
767 
768 /*
769  * We will work on the page range of 0..OLDSERXID_MAX_PAGE.
770  * Compares using wraparound logic, as is required by slru.c.
771  */
772 static bool
774 {
775  int diff;
776 
777  /*
778  * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
779  * be in the range 0..OLDSERXID_MAX_PAGE.
780  */
781  Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE);
782  Assert(q >= 0 && q <= OLDSERXID_MAX_PAGE);
783 
784  diff = p - q;
785  if (diff >= ((OLDSERXID_MAX_PAGE + 1) / 2))
786  diff -= OLDSERXID_MAX_PAGE + 1;
787  else if (diff < -((int) (OLDSERXID_MAX_PAGE + 1) / 2))
788  diff += OLDSERXID_MAX_PAGE + 1;
789  return diff < 0;
790 }
791 
792 /*
793  * Initialize for the tracking of old serializable committed xids.
794  */
795 static void
797 {
798  bool found;
799 
800  /*
801  * Set up SLRU management of the pg_serial data.
802  */
804  SimpleLruInit(OldSerXidSlruCtl, "oldserxid",
805  NUM_OLDSERXID_BUFFERS, 0, OldSerXidLock, "pg_serial",
807  /* Override default assumption that writes should be fsync'd */
808  OldSerXidSlruCtl->do_fsync = false;
809 
810  /*
811  * Create or attach to the OldSerXidControl structure.
812  */
813  oldSerXidControl = (OldSerXidControl)
814  ShmemInitStruct("OldSerXidControlData", sizeof(OldSerXidControlData), &found);
815 
816  Assert(found == IsUnderPostmaster);
817  if (!found)
818  {
819  /*
820  * Set control information to reflect empty SLRU.
821  */
822  oldSerXidControl->headPage = -1;
823  oldSerXidControl->headXid = InvalidTransactionId;
824  oldSerXidControl->tailXid = InvalidTransactionId;
825  }
826 }
827 
828 /*
829  * Record a committed read write serializable xid and the minimum
830  * commitSeqNo of any transactions to which this xid had a rw-conflict out.
831  * An invalid seqNo means that there were no conflicts out from xid.
832  */
833 static void
834 OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
835 {
837  int targetPage;
838  int slotno;
839  int firstZeroPage;
840  bool isNewPage;
841 
843 
844  targetPage = OldSerXidPage(xid);
845 
846  LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
847 
848  /*
849  * If no serializable transactions are active, there shouldn't be anything
850  * to push out to the SLRU. Hitting this assert would mean there's
851  * something wrong with the earlier cleanup logic.
852  */
853  tailXid = oldSerXidControl->tailXid;
854  Assert(TransactionIdIsValid(tailXid));
855 
856  /*
857  * If the SLRU is currently unused, zero out the whole active region from
858  * tailXid to headXid before taking it into use. Otherwise zero out only
859  * any new pages that enter the tailXid-headXid range as we advance
860  * headXid.
861  */
862  if (oldSerXidControl->headPage < 0)
863  {
864  firstZeroPage = OldSerXidPage(tailXid);
865  isNewPage = true;
866  }
867  else
868  {
869  firstZeroPage = OldSerXidNextPage(oldSerXidControl->headPage);
870  isNewPage = OldSerXidPagePrecedesLogically(oldSerXidControl->headPage,
871  targetPage);
872  }
873 
874  if (!TransactionIdIsValid(oldSerXidControl->headXid)
875  || TransactionIdFollows(xid, oldSerXidControl->headXid))
876  oldSerXidControl->headXid = xid;
877  if (isNewPage)
878  oldSerXidControl->headPage = targetPage;
879 
880  if (isNewPage)
881  {
882  /* Initialize intervening pages. */
883  while (firstZeroPage != targetPage)
884  {
885  (void) SimpleLruZeroPage(OldSerXidSlruCtl, firstZeroPage);
886  firstZeroPage = OldSerXidNextPage(firstZeroPage);
887  }
888  slotno = SimpleLruZeroPage(OldSerXidSlruCtl, targetPage);
889  }
890  else
891  slotno = SimpleLruReadPage(OldSerXidSlruCtl, targetPage, true, xid);
892 
893  OldSerXidValue(slotno, xid) = minConflictCommitSeqNo;
894  OldSerXidSlruCtl->shared->page_dirty[slotno] = true;
895 
896  LWLockRelease(OldSerXidLock);
897 }
898 
899 /*
900  * Get the minimum commitSeqNo for any conflict out for the given xid. For
901  * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
902  * will be returned.
903  */
904 static SerCommitSeqNo
906 {
910  int slotno;
911 
913 
914  LWLockAcquire(OldSerXidLock, LW_SHARED);
915  headXid = oldSerXidControl->headXid;
916  tailXid = oldSerXidControl->tailXid;
917  LWLockRelease(OldSerXidLock);
918 
919  if (!TransactionIdIsValid(headXid))
920  return 0;
921 
922  Assert(TransactionIdIsValid(tailXid));
923 
924  if (TransactionIdPrecedes(xid, tailXid)
925  || TransactionIdFollows(xid, headXid))
926  return 0;
927 
928  /*
929  * The following function must be called without holding OldSerXidLock,
930  * but will return with that lock held, which must then be released.
931  */
933  OldSerXidPage(xid), xid);
934  val = OldSerXidValue(slotno, xid);
935  LWLockRelease(OldSerXidLock);
936  return val;
937 }
938 
939 /*
940  * Call this whenever there is a new xmin for active serializable
941  * transactions. We don't need to keep information on transactions which
942  * precede that. InvalidTransactionId means none active, so everything in
943  * the SLRU can be discarded.
944  */
945 static void
947 {
948  LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
949 
950  /*
951  * When no sxacts are active, nothing overlaps, set the xid values to
952  * invalid to show that there are no valid entries. Don't clear headPage,
953  * though. A new xmin might still land on that page, and we don't want to
954  * repeatedly zero out the same page.
955  */
956  if (!TransactionIdIsValid(xid))
957  {
958  oldSerXidControl->tailXid = InvalidTransactionId;
959  oldSerXidControl->headXid = InvalidTransactionId;
960  LWLockRelease(OldSerXidLock);
961  return;
962  }
963 
964  /*
965  * When we're recovering prepared transactions, the global xmin might move
966  * backwards depending on the order they're recovered. Normally that's not
967  * OK, but during recovery no serializable transactions will commit, so
968  * the SLRU is empty and we can get away with it.
969  */
970  if (RecoveryInProgress())
971  {
972  Assert(oldSerXidControl->headPage < 0);
973  if (!TransactionIdIsValid(oldSerXidControl->tailXid)
974  || TransactionIdPrecedes(xid, oldSerXidControl->tailXid))
975  {
976  oldSerXidControl->tailXid = xid;
977  }
978  LWLockRelease(OldSerXidLock);
979  return;
980  }
981 
982  Assert(!TransactionIdIsValid(oldSerXidControl->tailXid)
983  || TransactionIdFollows(xid, oldSerXidControl->tailXid));
984 
985  oldSerXidControl->tailXid = xid;
986 
987  LWLockRelease(OldSerXidLock);
988 }
989 
990 /*
991  * Perform a checkpoint --- either during shutdown, or on-the-fly
992  *
993  * We don't have any data that needs to survive a restart, but this is a
994  * convenient place to truncate the SLRU.
995  */
996 void
998 {
999  int tailPage;
1000 
1001  LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1002 
1003  /* Exit quickly if the SLRU is currently not in use. */
1004  if (oldSerXidControl->headPage < 0)
1005  {
1006  LWLockRelease(OldSerXidLock);
1007  return;
1008  }
1009 
1010  if (TransactionIdIsValid(oldSerXidControl->tailXid))
1011  {
1012  /* We can truncate the SLRU up to the page containing tailXid */
1013  tailPage = OldSerXidPage(oldSerXidControl->tailXid);
1014  }
1015  else
1016  {
1017  /*
1018  * The SLRU is no longer needed. Truncate to head before we set head
1019  * invalid.
1020  *
1021  * XXX: It's possible that the SLRU is not needed again until XID
1022  * wrap-around has happened, so that the segment containing headPage
1023  * that we leave behind will appear to be new again. In that case it
1024  * won't be removed until XID horizon advances enough to make it
1025  * current again.
1026  */
1027  tailPage = oldSerXidControl->headPage;
1028  oldSerXidControl->headPage = -1;
1029  }
1030 
1031  LWLockRelease(OldSerXidLock);
1032 
1033  /* Truncate away pages that are no longer required */
1035 
1036  /*
1037  * Flush dirty SLRU pages to disk
1038  *
1039  * This is not actually necessary from a correctness point of view. We do
1040  * it merely as a debugging aid.
1041  *
1042  * We're doing this after the truncation to avoid writing pages right
1043  * before deleting the file in which they sit, which would be completely
1044  * pointless.
1045  */
1047 }
1048 
1049 /*------------------------------------------------------------------------*/
1050 
1051 /*
1052  * InitPredicateLocks -- Initialize the predicate locking data structures.
1053  *
1054  * This is called from CreateSharedMemoryAndSemaphores(), which see for
1055  * more comments. In the normal postmaster case, the shared hash tables
1056  * are created here. Backends inherit the pointers
1057  * to the shared tables via fork(). In the EXEC_BACKEND case, each
1058  * backend re-executes this code to obtain pointers to the already existing
1059  * shared hash tables.
1060  */
1061 void
1063 {
1064  HASHCTL info;
1065  long max_table_size;
1066  Size requestSize;
1067  bool found;
1068 
1069 #ifndef EXEC_BACKEND
1071 #endif
1072 
1073  /*
1074  * Compute size of predicate lock target hashtable. Note these
1075  * calculations must agree with PredicateLockShmemSize!
1076  */
1077  max_table_size = NPREDICATELOCKTARGETENTS();
1078 
1079  /*
1080  * Allocate hash table for PREDICATELOCKTARGET structs. This stores
1081  * per-predicate-lock-target information.
1082  */
1083  MemSet(&info, 0, sizeof(info));
1084  info.keysize = sizeof(PREDICATELOCKTARGETTAG);
1085  info.entrysize = sizeof(PREDICATELOCKTARGET);
1087 
1088  PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
1089  max_table_size,
1090  max_table_size,
1091  &info,
1092  HASH_ELEM | HASH_BLOBS |
1094 
1095  /*
1096  * Reserve a dummy entry in the hash table; we use it to make sure there's
1097  * always one entry available when we need to split or combine a page,
1098  * because running out of space there could mean aborting a
1099  * non-serializable transaction.
1100  */
1101  if (!IsUnderPostmaster)
1102  {
1103  (void) hash_search(PredicateLockTargetHash, &ScratchTargetTag,
1104  HASH_ENTER, &found);
1105  Assert(!found);
1106  }
1107 
1108  /* Pre-calculate the hash and partition lock of the scratch entry */
1110  ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash);
1111 
1112  /*
1113  * Allocate hash table for PREDICATELOCK structs. This stores per
1114  * xact-lock-of-a-target information.
1115  */
1116  MemSet(&info, 0, sizeof(info));
1117  info.keysize = sizeof(PREDICATELOCKTAG);
1118  info.entrysize = sizeof(PREDICATELOCK);
1119  info.hash = predicatelock_hash;
1121 
1122  /* Assume an average of 2 xacts per target */
1123  max_table_size *= 2;
1124 
1125  PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
1126  max_table_size,
1127  max_table_size,
1128  &info,
1131 
1132  /*
1133  * Compute size for serializable transaction hashtable. Note these
1134  * calculations must agree with PredicateLockShmemSize!
1135  */
1136  max_table_size = (MaxBackends + max_prepared_xacts);
1137 
1138  /*
1139  * Allocate a list to hold information on transactions participating in
1140  * predicate locking.
1141  *
1142  * Assume an average of 10 predicate locking transactions per backend.
1143  * This allows aggressive cleanup while detail is present before data must
1144  * be summarized for storage in SLRU and the "dummy" transaction.
1145  */
1146  max_table_size *= 10;
1147 
1148  PredXact = ShmemInitStruct("PredXactList",
1150  &found);
1151  Assert(found == IsUnderPostmaster);
1152  if (!found)
1153  {
1154  int i;
1155 
1156  SHMQueueInit(&PredXact->availableList);
1157  SHMQueueInit(&PredXact->activeList);
1159  PredXact->SxactGlobalXminCount = 0;
1160  PredXact->WritableSxactCount = 0;
1162  PredXact->CanPartialClearThrough = 0;
1163  PredXact->HavePartialClearedThrough = 0;
1164  requestSize = mul_size((Size) max_table_size,
1166  PredXact->element = ShmemAlloc(requestSize);
1167  /* Add all elements to available list, clean. */
1168  memset(PredXact->element, 0, requestSize);
1169  for (i = 0; i < max_table_size; i++)
1170  {
1171  SHMQueueInsertBefore(&(PredXact->availableList),
1172  &(PredXact->element[i].link));
1173  }
1174  PredXact->OldCommittedSxact = CreatePredXact();
1176  PredXact->OldCommittedSxact->prepareSeqNo = 0;
1177  PredXact->OldCommittedSxact->commitSeqNo = 0;
1188  PredXact->OldCommittedSxact->pid = 0;
1189  }
1190  /* This never changes, so let's keep a local copy. */
1191  OldCommittedSxact = PredXact->OldCommittedSxact;
1192 
1193  /*
1194  * Allocate hash table for SERIALIZABLEXID structs. This stores per-xid
1195  * information for serializable transactions which have accessed data.
1196  */
1197  MemSet(&info, 0, sizeof(info));
1198  info.keysize = sizeof(SERIALIZABLEXIDTAG);
1199  info.entrysize = sizeof(SERIALIZABLEXID);
1200 
1201  SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
1202  max_table_size,
1203  max_table_size,
1204  &info,
1205  HASH_ELEM | HASH_BLOBS |
1206  HASH_FIXED_SIZE);
1207 
1208  /*
1209  * Allocate space for tracking rw-conflicts in lists attached to the
1210  * transactions.
1211  *
1212  * Assume an average of 5 conflicts per transaction. Calculations suggest
1213  * that this will prevent resource exhaustion in even the most pessimal
1214  * loads up to max_connections = 200 with all 200 connections pounding the
1215  * database with serializable transactions. Beyond that, there may be
1216  * occasional transactions canceled when trying to flag conflicts. That's
1217  * probably OK.
1218  */
1219  max_table_size *= 5;
1220 
1221  RWConflictPool = ShmemInitStruct("RWConflictPool",
1223  &found);
1224  Assert(found == IsUnderPostmaster);
1225  if (!found)
1226  {
1227  int i;
1228 
1229  SHMQueueInit(&RWConflictPool->availableList);
1230  requestSize = mul_size((Size) max_table_size,
1232  RWConflictPool->element = ShmemAlloc(requestSize);
1233  /* Add all elements to available list, clean. */
1234  memset(RWConflictPool->element, 0, requestSize);
1235  for (i = 0; i < max_table_size; i++)
1236  {
1237  SHMQueueInsertBefore(&(RWConflictPool->availableList),
1238  &(RWConflictPool->element[i].outLink));
1239  }
1240  }
1241 
1242  /*
1243  * Create or attach to the header for the list of finished serializable
1244  * transactions.
1245  */
1246  FinishedSerializableTransactions = (SHM_QUEUE *)
1247  ShmemInitStruct("FinishedSerializableTransactions",
1248  sizeof(SHM_QUEUE),
1249  &found);
1250  Assert(found == IsUnderPostmaster);
1251  if (!found)
1252  SHMQueueInit(FinishedSerializableTransactions);
1253 
1254  /*
1255  * Initialize the SLRU storage for old committed serializable
1256  * transactions.
1257  */
1258  OldSerXidInit();
1259 }
1260 
1261 /*
1262  * Estimate shared-memory space used for predicate lock table
1263  */
1264 Size
1266 {
1267  Size size = 0;
1268  long max_table_size;
1269 
1270  /* predicate lock target hash table */
1271  max_table_size = NPREDICATELOCKTARGETENTS();
1272  size = add_size(size, hash_estimate_size(max_table_size,
1273  sizeof(PREDICATELOCKTARGET)));
1274 
1275  /* predicate lock hash table */
1276  max_table_size *= 2;
1277  size = add_size(size, hash_estimate_size(max_table_size,
1278  sizeof(PREDICATELOCK)));
1279 
1280  /*
1281  * Since NPREDICATELOCKTARGETENTS is only an estimate, add 10% safety
1282  * margin.
1283  */
1284  size = add_size(size, size / 10);
1285 
1286  /* transaction list */
1287  max_table_size = MaxBackends + max_prepared_xacts;
1288  max_table_size *= 10;
1289  size = add_size(size, PredXactListDataSize);
1290  size = add_size(size, mul_size((Size) max_table_size,
1292 
1293  /* transaction xid table */
1294  size = add_size(size, hash_estimate_size(max_table_size,
1295  sizeof(SERIALIZABLEXID)));
1296 
1297  /* rw-conflict pool */
1298  max_table_size *= 5;
1299  size = add_size(size, RWConflictPoolHeaderDataSize);
1300  size = add_size(size, mul_size((Size) max_table_size,
1302 
1303  /* Head for list of finished serializable transactions. */
1304  size = add_size(size, sizeof(SHM_QUEUE));
1305 
1306  /* Shared memory structures for SLRU tracking of old committed xids. */
1307  size = add_size(size, sizeof(OldSerXidControlData));
1309 
1310  return size;
1311 }
1312 
1313 
1314 /*
1315  * Compute the hash code associated with a PREDICATELOCKTAG.
1316  *
1317  * Because we want to use just one set of partition locks for both the
1318  * PREDICATELOCKTARGET and PREDICATELOCK hash tables, we have to make sure
1319  * that PREDICATELOCKs fall into the same partition number as their
1320  * associated PREDICATELOCKTARGETs. dynahash.c expects the partition number
1321  * to be the low-order bits of the hash code, and therefore a
1322  * PREDICATELOCKTAG's hash code must have the same low-order bits as the
1323  * associated PREDICATELOCKTARGETTAG's hash code. We achieve this with this
1324  * specialized hash function.
1325  */
1326 static uint32
1327 predicatelock_hash(const void *key, Size keysize)
1328 {
1329  const PREDICATELOCKTAG *predicatelocktag = (const PREDICATELOCKTAG *) key;
1330  uint32 targethash;
1331 
1332  Assert(keysize == sizeof(PREDICATELOCKTAG));
1333 
1334  /* Look into the associated target object, and compute its hash code */
1335  targethash = PredicateLockTargetTagHashCode(&predicatelocktag->myTarget->tag);
1336 
1337  return PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash);
1338 }
1339 
1340 
1341 /*
1342  * GetPredicateLockStatusData
1343  * Return a table containing the internal state of the predicate
1344  * lock manager for use in pg_lock_status.
1345  *
1346  * Like GetLockStatusData, this function tries to hold the partition LWLocks
1347  * for as short a time as possible by returning two arrays that simply
1348  * contain the PREDICATELOCKTARGETTAG and SERIALIZABLEXACT for each lock
1349  * table entry. Multiple copies of the same PREDICATELOCKTARGETTAG and
1350  * SERIALIZABLEXACT will likely appear.
1351  */
1354 {
1355  PredicateLockData *data;
1356  int i;
1357  int els,
1358  el;
1359  HASH_SEQ_STATUS seqstat;
1360  PREDICATELOCK *predlock;
1361 
1362  data = (PredicateLockData *) palloc(sizeof(PredicateLockData));
1363 
1364  /*
1365  * To ensure consistency, take simultaneous locks on all partition locks
1366  * in ascending order, then SerializableXactHashLock.
1367  */
1368  for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
1370  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1371 
1372  /* Get number of locks and allocate appropriately-sized arrays. */
1373  els = hash_get_num_entries(PredicateLockHash);
1374  data->nelements = els;
1375  data->locktags = (PREDICATELOCKTARGETTAG *)
1376  palloc(sizeof(PREDICATELOCKTARGETTAG) * els);
1377  data->xacts = (SERIALIZABLEXACT *)
1378  palloc(sizeof(SERIALIZABLEXACT) * els);
1379 
1380 
1381  /* Scan through PredicateLockHash and copy contents */
1382  hash_seq_init(&seqstat, PredicateLockHash);
1383 
1384  el = 0;
1385 
1386  while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
1387  {
1388  data->locktags[el] = predlock->tag.myTarget->tag;
1389  data->xacts[el] = *predlock->tag.myXact;
1390  el++;
1391  }
1392 
1393  Assert(el == els);
1394 
1395  /* Release locks in reverse order */
1396  LWLockRelease(SerializableXactHashLock);
1397  for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
1399 
1400  return data;
1401 }
1402 
1403 /*
1404  * Free up shared memory structures by pushing the oldest sxact (the one at
1405  * the front of the SummarizeOldestCommittedSxact queue) into summary form.
1406  * Each call will free exactly one SERIALIZABLEXACT structure and may also
1407  * free one or more of these structures: SERIALIZABLEXID, PREDICATELOCK,
1408  * PREDICATELOCKTARGET, RWConflictData.
1409  */
1410 static void
1412 {
1413  SERIALIZABLEXACT *sxact;
1414 
1415  LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
1416 
1417  /*
1418  * This function is only called if there are no sxact slots available.
1419  * Some of them must belong to old, already-finished transactions, so
1420  * there should be something in FinishedSerializableTransactions list that
1421  * we can summarize. However, there's a race condition: while we were not
1422  * holding any locks, a transaction might have ended and cleaned up all
1423  * the finished sxact entries already, freeing up their sxact slots. In
1424  * that case, we have nothing to do here. The caller will find one of the
1425  * slots released by the other backend when it retries.
1426  */
1427  if (SHMQueueEmpty(FinishedSerializableTransactions))
1428  {
1429  LWLockRelease(SerializableFinishedListLock);
1430  return;
1431  }
1432 
1433  /*
1434  * Grab the first sxact off the finished list -- this will be the earliest
1435  * commit. Remove it from the list.
1436  */
1437  sxact = (SERIALIZABLEXACT *)
1438  SHMQueueNext(FinishedSerializableTransactions,
1439  FinishedSerializableTransactions,
1440  offsetof(SERIALIZABLEXACT, finishedLink));
1441  SHMQueueDelete(&(sxact->finishedLink));
1442 
1443  /* Add to SLRU summary information. */
1444  if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
1445  OldSerXidAdd(sxact->topXid, SxactHasConflictOut(sxact)
1447 
1448  /* Summarize and release the detail. */
1449  ReleaseOneSerializableXact(sxact, false, true);
1450 
1451  LWLockRelease(SerializableFinishedListLock);
1452 }
1453 
1454 /*
1455  * GetSafeSnapshot
1456  * Obtain and register a snapshot for a READ ONLY DEFERRABLE
1457  * transaction. Ensures that the snapshot is "safe", i.e. a
1458  * read-only transaction running on it can execute serializably
1459  * without further checks. This requires waiting for concurrent
1460  * transactions to complete, and retrying with a new snapshot if
1461  * one of them could possibly create a conflict.
1462  *
1463  * As with GetSerializableTransactionSnapshot (which this is a subroutine
1464  * for), the passed-in Snapshot pointer should reference a static data
1465  * area that can safely be passed to GetSnapshotData.
1466  */
1467 static Snapshot
1469 {
1470  Snapshot snapshot;
1471 
1473 
1474  while (true)
1475  {
1476  /*
1477  * GetSerializableTransactionSnapshotInt is going to call
1478  * GetSnapshotData, so we need to provide it the static snapshot area
1479  * our caller passed to us. The pointer returned is actually the same
1480  * one passed to it, but we avoid assuming that here.
1481  */
1482  snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
1483  NULL, InvalidPid);
1484 
1485  if (MySerializableXact == InvalidSerializableXact)
1486  return snapshot; /* no concurrent r/w xacts; it's safe */
1487 
1488  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1489 
1490  /*
1491  * Wait for concurrent transactions to finish. Stop early if one of
1492  * them marked us as conflicted.
1493  */
1494  MySerializableXact->flags |= SXACT_FLAG_DEFERRABLE_WAITING;
1495  while (!(SHMQueueEmpty(&MySerializableXact->possibleUnsafeConflicts) ||
1496  SxactIsROUnsafe(MySerializableXact)))
1497  {
1498  LWLockRelease(SerializableXactHashLock);
1500  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1501  }
1502  MySerializableXact->flags &= ~SXACT_FLAG_DEFERRABLE_WAITING;
1503 
1504  if (!SxactIsROUnsafe(MySerializableXact))
1505  {
1506  LWLockRelease(SerializableXactHashLock);
1507  break; /* success */
1508  }
1509 
1510  LWLockRelease(SerializableXactHashLock);
1511 
1512  /* else, need to retry... */
1513  ereport(DEBUG2,
1514  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1515  errmsg("deferrable snapshot was unsafe; trying a new one")));
1516  ReleasePredicateLocks(false);
1517  }
1518 
1519  /*
1520  * Now we have a safe snapshot, so we don't need to do any further checks.
1521  */
1522  Assert(SxactIsROSafe(MySerializableXact));
1523  ReleasePredicateLocks(false);
1524 
1525  return snapshot;
1526 }
1527 
1528 /*
1529  * GetSafeSnapshotBlockingPids
1530  * If the specified process is currently blocked in GetSafeSnapshot,
1531  * write the process IDs of all processes that it is blocked by
1532  * into the caller-supplied buffer output[]. The list is truncated at
1533  * output_size, and the number of PIDs written into the buffer is
1534  * returned. Returns zero if the given PID is not currently blocked
1535  * in GetSafeSnapshot.
1536  */
1537 int
1538 GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
1539 {
1540  int num_written = 0;
1541  SERIALIZABLEXACT *sxact;
1542 
1543  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1544 
1545  /* Find blocked_pid's SERIALIZABLEXACT by linear search. */
1546  for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
1547  {
1548  if (sxact->pid == blocked_pid)
1549  break;
1550  }
1551 
1552  /* Did we find it, and is it currently waiting in GetSafeSnapshot? */
1553  if (sxact != NULL && SxactIsDeferrableWaiting(sxact))
1554  {
1555  RWConflict possibleUnsafeConflict;
1556 
1557  /* Traverse the list of possible unsafe conflicts collecting PIDs. */
1558  possibleUnsafeConflict = (RWConflict)
1560  &sxact->possibleUnsafeConflicts,
1561  offsetof(RWConflictData, inLink));
1562 
1563  while (possibleUnsafeConflict != NULL && num_written < output_size)
1564  {
1565  output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
1566  possibleUnsafeConflict = (RWConflict)
1568  &possibleUnsafeConflict->inLink,
1569  offsetof(RWConflictData, inLink));
1570  }
1571  }
1572 
1573  LWLockRelease(SerializableXactHashLock);
1574 
1575  return num_written;
1576 }
1577 
1578 /*
1579  * Acquire a snapshot that can be used for the current transaction.
1580  *
1581  * Make sure we have a SERIALIZABLEXACT reference in MySerializableXact.
1582  * It should be current for this process and be contained in PredXact.
1583  *
1584  * The passed-in Snapshot pointer should reference a static data area that
1585  * can safely be passed to GetSnapshotData. The return value is actually
1586  * always this same pointer; no new snapshot data structure is allocated
1587  * within this function.
1588  */
1589 Snapshot
1591 {
1593 
1594  /*
1595  * Can't use serializable mode while recovery is still active, as it is,
1596  * for example, on a hot standby. We could get here despite the check in
1597  * check_XactIsoLevel() if default_transaction_isolation is set to
1598  * serializable, so phrase the hint accordingly.
1599  */
1600  if (RecoveryInProgress())
1601  ereport(ERROR,
1602  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1603  errmsg("cannot use serializable mode in a hot standby"),
1604  errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
1605  errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
1606 
1607  /*
1608  * A special optimization is available for SERIALIZABLE READ ONLY
1609  * DEFERRABLE transactions -- we can wait for a suitable snapshot and
1610  * thereby avoid all SSI overhead once it's running.
1611  */
1613  return GetSafeSnapshot(snapshot);
1614 
1615  return GetSerializableTransactionSnapshotInt(snapshot,
1616  NULL, InvalidPid);
1617 }
1618 
1619 /*
1620  * Import a snapshot to be used for the current transaction.
1621  *
1622  * This is nearly the same as GetSerializableTransactionSnapshot, except that
1623  * we don't take a new snapshot, but rather use the data we're handed.
1624  *
1625  * The caller must have verified that the snapshot came from a serializable
1626  * transaction; and if we're read-write, the source transaction must not be
1627  * read-only.
1628  */
1629 void
1631  VirtualTransactionId *sourcevxid,
1632  int sourcepid)
1633 {
1635 
1636  /*
1637  * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
1638  * import snapshots, since there's no way to wait for a safe snapshot when
1639  * we're using the snap we're told to. (XXX instead of throwing an error,
1640  * we could just ignore the XactDeferrable flag?)
1641  */
1643  ereport(ERROR,
1644  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1645  errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
1646 
1647  (void) GetSerializableTransactionSnapshotInt(snapshot, sourcevxid,
1648  sourcepid);
1649 }
1650 
1651 /*
1652  * Guts of GetSerializableTransactionSnapshot
1653  *
1654  * If sourcexid is valid, this is actually an import operation and we should
1655  * skip calling GetSnapshotData, because the snapshot contents are already
1656  * loaded up. HOWEVER: to avoid race conditions, we must check that the
1657  * source xact is still running after we acquire SerializableXactHashLock.
1658  * We do that by calling ProcArrayInstallImportedXmin.
1659  */
1660 static Snapshot
1662  VirtualTransactionId *sourcevxid,
1663  int sourcepid)
1664 {
1665  PGPROC *proc;
1666  VirtualTransactionId vxid;
1667  SERIALIZABLEXACT *sxact,
1668  *othersxact;
1669  HASHCTL hash_ctl;
1670 
1671  /* We only do this for serializable transactions. Once. */
1672  Assert(MySerializableXact == InvalidSerializableXact);
1673 
1675 
1676  /*
1677  * Since all parts of a serializable transaction must use the same
1678  * snapshot, it is too late to establish one after a parallel operation
1679  * has begun.
1680  */
1681  if (IsInParallelMode())
1682  elog(ERROR, "cannot establish serializable snapshot during a parallel operation");
1683 
1684  proc = MyProc;
1685  Assert(proc != NULL);
1686  GET_VXID_FROM_PGPROC(vxid, *proc);
1687 
1688  /*
1689  * First we get the sxact structure, which may involve looping and access
1690  * to the "finished" list to free a structure for use.
1691  *
1692  * We must hold SerializableXactHashLock when taking/checking the snapshot
1693  * to avoid race conditions, for much the same reasons that
1694  * GetSnapshotData takes the ProcArrayLock. Since we might have to
1695  * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
1696  * this means we have to create the sxact first, which is a bit annoying
1697  * (in particular, an elog(ERROR) in procarray.c would cause us to leak
1698  * the sxact). Consider refactoring to avoid this.
1699  */
1700 #ifdef TEST_OLDSERXID
1702 #endif
1703  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1704  do
1705  {
1706  sxact = CreatePredXact();
1707  /* If null, push out committed sxact to SLRU summary & retry. */
1708  if (!sxact)
1709  {
1710  LWLockRelease(SerializableXactHashLock);
1712  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1713  }
1714  } while (!sxact);
1715 
1716  /* Get the snapshot, or check that it's safe to use */
1717  if (!sourcevxid)
1718  snapshot = GetSnapshotData(snapshot);
1719  else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcevxid))
1720  {
1721  ReleasePredXact(sxact);
1722  LWLockRelease(SerializableXactHashLock);
1723  ereport(ERROR,
1724  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1725  errmsg("could not import the requested snapshot"),
1726  errdetail("The source process with PID %d is not running anymore.",
1727  sourcepid)));
1728  }
1729 
1730  /*
1731  * If there are no serializable transactions which are not read-only, we
1732  * can "opt out" of predicate locking and conflict checking for a
1733  * read-only transaction.
1734  *
1735  * The reason this is safe is that a read-only transaction can only become
1736  * part of a dangerous structure if it overlaps a writable transaction
1737  * which in turn overlaps a writable transaction which committed before
1738  * the read-only transaction started. A new writable transaction can
1739  * overlap this one, but it can't meet the other condition of overlapping
1740  * a transaction which committed before this one started.
1741  */
1742  if (XactReadOnly && PredXact->WritableSxactCount == 0)
1743  {
1744  ReleasePredXact(sxact);
1745  LWLockRelease(SerializableXactHashLock);
1746  return snapshot;
1747  }
1748 
1749  /* Maintain serializable global xmin info. */
1750  if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
1751  {
1752  Assert(PredXact->SxactGlobalXminCount == 0);
1753  PredXact->SxactGlobalXmin = snapshot->xmin;
1754  PredXact->SxactGlobalXminCount = 1;
1755  OldSerXidSetActiveSerXmin(snapshot->xmin);
1756  }
1757  else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
1758  {
1759  Assert(PredXact->SxactGlobalXminCount > 0);
1760  PredXact->SxactGlobalXminCount++;
1761  }
1762  else
1763  {
1764  Assert(TransactionIdFollows(snapshot->xmin, PredXact->SxactGlobalXmin));
1765  }
1766 
1767  /* Initialize the structure. */
1768  sxact->vxid = vxid;
1772  SHMQueueInit(&(sxact->outConflicts));
1773  SHMQueueInit(&(sxact->inConflicts));
1775  sxact->topXid = GetTopTransactionIdIfAny();
1777  sxact->xmin = snapshot->xmin;
1778  sxact->pid = MyProcPid;
1779  SHMQueueInit(&(sxact->predicateLocks));
1780  SHMQueueElemInit(&(sxact->finishedLink));
1781  sxact->flags = 0;
1782  if (XactReadOnly)
1783  {
1784  sxact->flags |= SXACT_FLAG_READ_ONLY;
1785 
1786  /*
1787  * Register all concurrent r/w transactions as possible conflicts; if
1788  * all of them commit without any outgoing conflicts to earlier
1789  * transactions then this snapshot can be deemed safe (and we can run
1790  * without tracking predicate locks).
1791  */
1792  for (othersxact = FirstPredXact();
1793  othersxact != NULL;
1794  othersxact = NextPredXact(othersxact))
1795  {
1796  if (!SxactIsCommitted(othersxact)
1797  && !SxactIsDoomed(othersxact)
1798  && !SxactIsReadOnly(othersxact))
1799  {
1800  SetPossibleUnsafeConflict(sxact, othersxact);
1801  }
1802  }
1803  }
1804  else
1805  {
1806  ++(PredXact->WritableSxactCount);
1807  Assert(PredXact->WritableSxactCount <=
1809  }
1810 
1811  MySerializableXact = sxact;
1812  MyXactDidWrite = false; /* haven't written anything yet */
1813 
1814  LWLockRelease(SerializableXactHashLock);
1815 
1816  /* Initialize the backend-local hash table of parent locks */
1817  Assert(LocalPredicateLockHash == NULL);
1818  MemSet(&hash_ctl, 0, sizeof(hash_ctl));
1819  hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
1820  hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
1821  LocalPredicateLockHash = hash_create("Local predicate lock",
1823  &hash_ctl,
1824  HASH_ELEM | HASH_BLOBS);
1825 
1826  return snapshot;
1827 }
1828 
1829 /*
1830  * Register the top level XID in SerializableXidHash.
1831  * Also store it for easy reference in MySerializableXact.
1832  */
1833 void
1835 {
1836  SERIALIZABLEXIDTAG sxidtag;
1837  SERIALIZABLEXID *sxid;
1838  bool found;
1839 
1840  /*
1841  * If we're not tracking predicate lock data for this transaction, we
1842  * should ignore the request and return quickly.
1843  */
1844  if (MySerializableXact == InvalidSerializableXact)
1845  return;
1846 
1847  /* We should have a valid XID and be at the top level. */
1849 
1850  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1851 
1852  /* This should only be done once per transaction. */
1853  Assert(MySerializableXact->topXid == InvalidTransactionId);
1854 
1855  MySerializableXact->topXid = xid;
1856 
1857  sxidtag.xid = xid;
1858  sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
1859  &sxidtag,
1860  HASH_ENTER, &found);
1861  Assert(!found);
1862 
1863  /* Initialize the structure. */
1864  sxid->myXact = MySerializableXact;
1865  LWLockRelease(SerializableXactHashLock);
1866 }
1867 
1868 
1869 /*
1870  * Check whether there are any predicate locks held by any transaction
1871  * for the page at the given block number.
1872  *
1873  * Note that the transaction may be completed but not yet subject to
1874  * cleanup due to overlapping serializable transactions. This must
1875  * return valid information regardless of transaction isolation level.
1876  *
1877  * Also note that this doesn't check for a conflicting relation lock,
1878  * just a lock specifically on the given page.
1879  *
1880  * One use is to support proper behavior during GiST index vacuum.
1881  */
1882 bool
1884 {
1885  PREDICATELOCKTARGETTAG targettag;
1886  uint32 targettaghash;
1887  LWLock *partitionLock;
1888  PREDICATELOCKTARGET *target;
1889 
1891  relation->rd_node.dbNode,
1892  relation->rd_id,
1893  blkno);
1894 
1895  targettaghash = PredicateLockTargetTagHashCode(&targettag);
1896  partitionLock = PredicateLockHashPartitionLock(targettaghash);
1897  LWLockAcquire(partitionLock, LW_SHARED);
1898  target = (PREDICATELOCKTARGET *)
1899  hash_search_with_hash_value(PredicateLockTargetHash,
1900  &targettag, targettaghash,
1901  HASH_FIND, NULL);
1902  LWLockRelease(partitionLock);
1903 
1904  return (target != NULL);
1905 }
1906 
1907 
1908 /*
1909  * Check whether a particular lock is held by this transaction.
1910  *
1911  * Important note: this function may return false even if the lock is
1912  * being held, because it uses the local lock table which is not
1913  * updated if another transaction modifies our lock list (e.g. to
1914  * split an index page). It can also return true when a coarser
1915  * granularity lock that covers this target is being held. Be careful
1916  * to only use this function in circumstances where such errors are
1917  * acceptable!
1918  */
1919 static bool
1921 {
1922  LOCALPREDICATELOCK *lock;
1923 
1924  /* check local hash table */
1925  lock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
1926  targettag,
1927  HASH_FIND, NULL);
1928 
1929  if (!lock)
1930  return false;
1931 
1932  /*
1933  * Found entry in the table, but still need to check whether it's actually
1934  * held -- it could just be a parent of some held lock.
1935  */
1936  return lock->held;
1937 }
1938 
1939 /*
1940  * Return the parent lock tag in the lock hierarchy: the next coarser
1941  * lock that covers the provided tag.
1942  *
1943  * Returns true and sets *parent to the parent tag if one exists,
1944  * returns false if none exists.
1945  */
1946 static bool
1948  PREDICATELOCKTARGETTAG *parent)
1949 {
1950  switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
1951  {
1952  case PREDLOCKTAG_RELATION:
1953  /* relation locks have no parent lock */
1954  return false;
1955 
1956  case PREDLOCKTAG_PAGE:
1957  /* parent lock is relation lock */
1961 
1962  return true;
1963 
1964  case PREDLOCKTAG_TUPLE:
1965  /* parent lock is page lock */
1970  return true;
1971  }
1972 
1973  /* not reachable */
1974  Assert(false);
1975  return false;
1976 }
1977 
1978 /*
1979  * Check whether the lock we are considering is already covered by a
1980  * coarser lock for our transaction.
1981  *
1982  * Like PredicateLockExists, this function might return a false
1983  * negative, but it will never return a false positive.
1984  */
1985 static bool
1987 {
1988  PREDICATELOCKTARGETTAG targettag,
1989  parenttag;
1990 
1991  targettag = *newtargettag;
1992 
1993  /* check parents iteratively until no more */
1994  while (GetParentPredicateLockTag(&targettag, &parenttag))
1995  {
1996  targettag = parenttag;
1997  if (PredicateLockExists(&targettag))
1998  return true;
1999  }
2000 
2001  /* no more parents to check; lock is not covered */
2002  return false;
2003 }
2004 
2005 /*
2006  * Remove the dummy entry from the predicate lock target hash, to free up some
2007  * scratch space. The caller must be holding SerializablePredicateLockListLock,
2008  * and must restore the entry with RestoreScratchTarget() before releasing the
2009  * lock.
2010  *
2011  * If lockheld is true, the caller is already holding the partition lock
2012  * of the partition containing the scratch entry.
2013  */
2014 static void
2015 RemoveScratchTarget(bool lockheld)
2016 {
2017  bool found;
2018 
2019  Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2020 
2021  if (!lockheld)
2022  LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2023  hash_search_with_hash_value(PredicateLockTargetHash,
2024  &ScratchTargetTag,
2026  HASH_REMOVE, &found);
2027  Assert(found);
2028  if (!lockheld)
2029  LWLockRelease(ScratchPartitionLock);
2030 }
2031 
2032 /*
2033  * Re-insert the dummy entry in predicate lock target hash.
2034  */
2035 static void
2036 RestoreScratchTarget(bool lockheld)
2037 {
2038  bool found;
2039 
2040  Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2041 
2042  if (!lockheld)
2043  LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2044  hash_search_with_hash_value(PredicateLockTargetHash,
2045  &ScratchTargetTag,
2047  HASH_ENTER, &found);
2048  Assert(!found);
2049  if (!lockheld)
2050  LWLockRelease(ScratchPartitionLock);
2051 }
2052 
2053 /*
2054  * Check whether the list of related predicate locks is empty for a
2055  * predicate lock target, and remove the target if it is.
2056  */
2057 static void
2059 {
2061 
2062  Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2063 
2064  /* Can't remove it until no locks at this target. */
2065  if (!SHMQueueEmpty(&target->predicateLocks))
2066  return;
2067 
2068  /* Actually remove the target. */
2069  rmtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2070  &target->tag,
2071  targettaghash,
2072  HASH_REMOVE, NULL);
2073  Assert(rmtarget == target);
2074 }
2075 
2076 /*
2077  * Delete child target locks owned by this process.
2078  * This implementation is assuming that the usage of each target tag field
2079  * is uniform. No need to make this hard if we don't have to.
2080  *
2081  * We aren't acquiring lightweight locks for the predicate lock or lock
2082  * target structures associated with this transaction unless we're going
2083  * to modify them, because no other process is permitted to modify our
2084  * locks.
2085  */
2086 static void
2088 {
2089  SERIALIZABLEXACT *sxact;
2090  PREDICATELOCK *predlock;
2091 
2092  LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2093  sxact = MySerializableXact;
2094  predlock = (PREDICATELOCK *)
2095  SHMQueueNext(&(sxact->predicateLocks),
2096  &(sxact->predicateLocks),
2097  offsetof(PREDICATELOCK, xactLink));
2098  while (predlock)
2099  {
2100  SHM_QUEUE *predlocksxactlink;
2101  PREDICATELOCK *nextpredlock;
2102  PREDICATELOCKTAG oldlocktag;
2103  PREDICATELOCKTARGET *oldtarget;
2104  PREDICATELOCKTARGETTAG oldtargettag;
2105 
2106  predlocksxactlink = &(predlock->xactLink);
2107  nextpredlock = (PREDICATELOCK *)
2108  SHMQueueNext(&(sxact->predicateLocks),
2109  predlocksxactlink,
2110  offsetof(PREDICATELOCK, xactLink));
2111 
2112  oldlocktag = predlock->tag;
2113  Assert(oldlocktag.myXact == sxact);
2114  oldtarget = oldlocktag.myTarget;
2115  oldtargettag = oldtarget->tag;
2116 
2117  if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
2118  {
2119  uint32 oldtargettaghash;
2120  LWLock *partitionLock;
2122 
2123  oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2124  partitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2125 
2126  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2127 
2128  SHMQueueDelete(predlocksxactlink);
2129  SHMQueueDelete(&(predlock->targetLink));
2130  rmpredlock = hash_search_with_hash_value
2131  (PredicateLockHash,
2132  &oldlocktag,
2134  oldtargettaghash),
2135  HASH_REMOVE, NULL);
2136  Assert(rmpredlock == predlock);
2137 
2138  RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2139 
2140  LWLockRelease(partitionLock);
2141 
2142  DecrementParentLocks(&oldtargettag);
2143  }
2144 
2145  predlock = nextpredlock;
2146  }
2147  LWLockRelease(SerializablePredicateLockListLock);
2148 }
2149 
2150 /*
2151  * Returns the promotion limit for a given predicate lock target. This is the
2152  * max number of descendant locks allowed before promoting to the specified
2153  * tag. Note that the limit includes non-direct descendants (e.g., both tuples
2154  * and pages for a relation lock).
2155  *
2156  * Currently the default limit is 2 for a page lock, and half of the value of
2157  * max_pred_locks_per_transaction - 1 for a relation lock, to match behavior
2158  * of earlier releases when upgrading.
2159  *
2160  * TODO SSI: We should probably add additional GUCs to allow a maximum ratio
2161  * of page and tuple locks based on the pages in a relation, and the maximum
2162  * ratio of tuple locks to tuples in a page. This would provide more
2163  * generally "balanced" allocation of locks to where they are most useful,
2164  * while still allowing the absolute numbers to prevent one relation from
2165  * tying up all predicate lock resources.
2166  */
2167 static int
2169 {
2170  switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2171  {
2172  case PREDLOCKTAG_RELATION:
2177 
2178  case PREDLOCKTAG_PAGE:
2180 
2181  case PREDLOCKTAG_TUPLE:
2182 
2183  /*
2184  * not reachable: nothing is finer-granularity than a tuple, so we
2185  * should never try to promote to it.
2186  */
2187  Assert(false);
2188  return 0;
2189  }
2190 
2191  /* not reachable */
2192  Assert(false);
2193  return 0;
2194 }
2195 
2196 /*
2197  * For all ancestors of a newly-acquired predicate lock, increment
2198  * their child count in the parent hash table. If any of them have
2199  * more descendants than their promotion threshold, acquire the
2200  * coarsest such lock.
2201  *
2202  * Returns true if a parent lock was acquired and false otherwise.
2203  */
2204 static bool
2206 {
2207  PREDICATELOCKTARGETTAG targettag,
2208  nexttag,
2209  promotiontag;
2210  LOCALPREDICATELOCK *parentlock;
2211  bool found,
2212  promote;
2213 
2214  promote = false;
2215 
2216  targettag = *reqtag;
2217 
2218  /* check parents iteratively */
2219  while (GetParentPredicateLockTag(&targettag, &nexttag))
2220  {
2221  targettag = nexttag;
2222  parentlock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2223  &targettag,
2224  HASH_ENTER,
2225  &found);
2226  if (!found)
2227  {
2228  parentlock->held = false;
2229  parentlock->childLocks = 1;
2230  }
2231  else
2232  parentlock->childLocks++;
2233 
2234  if (parentlock->childLocks >
2235  MaxPredicateChildLocks(&targettag))
2236  {
2237  /*
2238  * We should promote to this parent lock. Continue to check its
2239  * ancestors, however, both to get their child counts right and to
2240  * check whether we should just go ahead and promote to one of
2241  * them.
2242  */
2243  promotiontag = targettag;
2244  promote = true;
2245  }
2246  }
2247 
2248  if (promote)
2249  {
2250  /* acquire coarsest ancestor eligible for promotion */
2251  PredicateLockAcquire(&promotiontag);
2252  return true;
2253  }
2254  else
2255  return false;
2256 }
2257 
2258 /*
2259  * When releasing a lock, decrement the child count on all ancestor
2260  * locks.
2261  *
2262  * This is called only when releasing a lock via
2263  * DeleteChildTargetLocks (i.e. when a lock becomes redundant because
2264  * we've acquired its parent, possibly due to promotion) or when a new
2265  * MVCC write lock makes the predicate lock unnecessary. There's no
2266  * point in calling it when locks are released at transaction end, as
2267  * this information is no longer needed.
2268  */
2269 static void
2271 {
2272  PREDICATELOCKTARGETTAG parenttag,
2273  nexttag;
2274 
2275  parenttag = *targettag;
2276 
2277  while (GetParentPredicateLockTag(&parenttag, &nexttag))
2278  {
2279  uint32 targettaghash;
2280  LOCALPREDICATELOCK *parentlock,
2281  *rmlock PG_USED_FOR_ASSERTS_ONLY;
2282 
2283  parenttag = nexttag;
2284  targettaghash = PredicateLockTargetTagHashCode(&parenttag);
2285  parentlock = (LOCALPREDICATELOCK *)
2286  hash_search_with_hash_value(LocalPredicateLockHash,
2287  &parenttag, targettaghash,
2288  HASH_FIND, NULL);
2289 
2290  /*
2291  * There's a small chance the parent lock doesn't exist in the lock
2292  * table. This can happen if we prematurely removed it because an
2293  * index split caused the child refcount to be off.
2294  */
2295  if (parentlock == NULL)
2296  continue;
2297 
2298  parentlock->childLocks--;
2299 
2300  /*
2301  * Under similar circumstances the parent lock's refcount might be
2302  * zero. This only happens if we're holding that lock (otherwise we
2303  * would have removed the entry).
2304  */
2305  if (parentlock->childLocks < 0)
2306  {
2307  Assert(parentlock->held);
2308  parentlock->childLocks = 0;
2309  }
2310 
2311  if ((parentlock->childLocks == 0) && (!parentlock->held))
2312  {
2313  rmlock = (LOCALPREDICATELOCK *)
2314  hash_search_with_hash_value(LocalPredicateLockHash,
2315  &parenttag, targettaghash,
2316  HASH_REMOVE, NULL);
2317  Assert(rmlock == parentlock);
2318  }
2319  }
2320 }
2321 
2322 /*
2323  * Indicate that a predicate lock on the given target is held by the
2324  * specified transaction. Has no effect if the lock is already held.
2325  *
2326  * This updates the lock table and the sxact's lock list, and creates
2327  * the lock target if necessary, but does *not* do anything related to
2328  * granularity promotion or the local lock table. See
2329  * PredicateLockAcquire for that.
2330  */
2331 static void
2333  uint32 targettaghash,
2334  SERIALIZABLEXACT *sxact)
2335 {
2336  PREDICATELOCKTARGET *target;
2337  PREDICATELOCKTAG locktag;
2338  PREDICATELOCK *lock;
2339  LWLock *partitionLock;
2340  bool found;
2341 
2342  partitionLock = PredicateLockHashPartitionLock(targettaghash);
2343 
2344  LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2345  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2346 
2347  /* Make sure that the target is represented. */
2348  target = (PREDICATELOCKTARGET *)
2349  hash_search_with_hash_value(PredicateLockTargetHash,
2350  targettag, targettaghash,
2351  HASH_ENTER_NULL, &found);
2352  if (!target)
2353  ereport(ERROR,
2354  (errcode(ERRCODE_OUT_OF_MEMORY),
2355  errmsg("out of shared memory"),
2356  errhint("You might need to increase max_pred_locks_per_transaction.")));
2357  if (!found)
2358  SHMQueueInit(&(target->predicateLocks));
2359 
2360  /* We've got the sxact and target, make sure they're joined. */
2361  locktag.myTarget = target;
2362  locktag.myXact = sxact;
2363  lock = (PREDICATELOCK *)
2364  hash_search_with_hash_value(PredicateLockHash, &locktag,
2365  PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
2366  HASH_ENTER_NULL, &found);
2367  if (!lock)
2368  ereport(ERROR,
2369  (errcode(ERRCODE_OUT_OF_MEMORY),
2370  errmsg("out of shared memory"),
2371  errhint("You might need to increase max_pred_locks_per_transaction.")));
2372 
2373  if (!found)
2374  {
2375  SHMQueueInsertBefore(&(target->predicateLocks), &(lock->targetLink));
2377  &(lock->xactLink));
2379  }
2380 
2381  LWLockRelease(partitionLock);
2382  LWLockRelease(SerializablePredicateLockListLock);
2383 }
2384 
2385 /*
2386  * Acquire a predicate lock on the specified target for the current
2387  * connection if not already held. This updates the local lock table
2388  * and uses it to implement granularity promotion. It will consolidate
2389  * multiple locks into a coarser lock if warranted, and will release
2390  * any finer-grained locks covered by the new one.
2391  */
2392 static void
2394 {
2395  uint32 targettaghash;
2396  bool found;
2397  LOCALPREDICATELOCK *locallock;
2398 
2399  /* Do we have the lock already, or a covering lock? */
2400  if (PredicateLockExists(targettag))
2401  return;
2402 
2403  if (CoarserLockCovers(targettag))
2404  return;
2405 
2406  /* the same hash and LW lock apply to the lock target and the local lock. */
2407  targettaghash = PredicateLockTargetTagHashCode(targettag);
2408 
2409  /* Acquire lock in local table */
2410  locallock = (LOCALPREDICATELOCK *)
2411  hash_search_with_hash_value(LocalPredicateLockHash,
2412  targettag, targettaghash,
2413  HASH_ENTER, &found);
2414  locallock->held = true;
2415  if (!found)
2416  locallock->childLocks = 0;
2417 
2418  /* Actually create the lock */
2419  CreatePredicateLock(targettag, targettaghash, MySerializableXact);
2420 
2421  /*
2422  * Lock has been acquired. Check whether it should be promoted to a
2423  * coarser granularity, or whether there are finer-granularity locks to
2424  * clean up.
2425  */
2426  if (CheckAndPromotePredicateLockRequest(targettag))
2427  {
2428  /*
2429  * Lock request was promoted to a coarser-granularity lock, and that
2430  * lock was acquired. It will delete this lock and any of its
2431  * children, so we're done.
2432  */
2433  }
2434  else
2435  {
2436  /* Clean up any finer-granularity locks */
2438  DeleteChildTargetLocks(targettag);
2439  }
2440 }
2441 
2442 
2443 /*
2444  * PredicateLockRelation
2445  *
2446  * Gets a predicate lock at the relation level.
2447  * Skip if not in full serializable transaction isolation level.
2448  * Skip if this is a temporary table.
2449  * Clear any finer-grained predicate locks this session has on the relation.
2450  */
2451 void
2453 {
2455 
2456  if (!SerializationNeededForRead(relation, snapshot))
2457  return;
2458 
2460  relation->rd_node.dbNode,
2461  relation->rd_id);
2462  PredicateLockAcquire(&tag);
2463 }
2464 
2465 /*
2466  * PredicateLockPage
2467  *
2468  * Gets a predicate lock at the page level.
2469  * Skip if not in full serializable transaction isolation level.
2470  * Skip if this is a temporary table.
2471  * Skip if a coarser predicate lock already covers this page.
2472  * Clear any finer-grained predicate locks this session has on the relation.
2473  */
2474 void
2476 {
2478 
2479  if (!SerializationNeededForRead(relation, snapshot))
2480  return;
2481 
2483  relation->rd_node.dbNode,
2484  relation->rd_id,
2485  blkno);
2486  PredicateLockAcquire(&tag);
2487 }
2488 
2489 /*
2490  * PredicateLockTuple
2491  *
2492  * Gets a predicate lock at the tuple level.
2493  * Skip if not in full serializable transaction isolation level.
2494  * Skip if this is a temporary table.
2495  */
2496 void
2497 PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
2498 {
2500  ItemPointer tid;
2501  TransactionId targetxmin;
2502 
2503  if (!SerializationNeededForRead(relation, snapshot))
2504  return;
2505 
2506  /*
2507  * If it's a heap tuple, return if this xact wrote it.
2508  */
2509  if (relation->rd_index == NULL)
2510  {
2511  TransactionId myxid;
2512 
2513  targetxmin = HeapTupleHeaderGetXmin(tuple->t_data);
2514 
2515  myxid = GetTopTransactionIdIfAny();
2516  if (TransactionIdIsValid(myxid))
2517  {
2519  {
2520  TransactionId xid = SubTransGetTopmostTransaction(targetxmin);
2521 
2522  if (TransactionIdEquals(xid, myxid))
2523  {
2524  /* We wrote it; we already have a write lock. */
2525  return;
2526  }
2527  }
2528  }
2529  }
2530 
2531  /*
2532  * Do quick-but-not-definitive test for a relation lock first. This will
2533  * never cause a return when the relation is *not* locked, but will
2534  * occasionally let the check continue when there really *is* a relation
2535  * level lock.
2536  */
2538  relation->rd_node.dbNode,
2539  relation->rd_id);
2540  if (PredicateLockExists(&tag))
2541  return;
2542 
2543  tid = &(tuple->t_self);
2545  relation->rd_node.dbNode,
2546  relation->rd_id,
2549  PredicateLockAcquire(&tag);
2550 }
2551 
2552 
2553 /*
2554  * DeleteLockTarget
2555  *
2556  * Remove a predicate lock target along with any locks held for it.
2557  *
2558  * Caller must hold SerializablePredicateLockListLock and the
2559  * appropriate hash partition lock for the target.
2560  */
2561 static void
2563 {
2564  PREDICATELOCK *predlock;
2565  SHM_QUEUE *predlocktargetlink;
2566  PREDICATELOCK *nextpredlock;
2567  bool found;
2568 
2569  Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2571 
2572  predlock = (PREDICATELOCK *)
2573  SHMQueueNext(&(target->predicateLocks),
2574  &(target->predicateLocks),
2575  offsetof(PREDICATELOCK, targetLink));
2576  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2577  while (predlock)
2578  {
2579  predlocktargetlink = &(predlock->targetLink);
2580  nextpredlock = (PREDICATELOCK *)
2581  SHMQueueNext(&(target->predicateLocks),
2582  predlocktargetlink,
2583  offsetof(PREDICATELOCK, targetLink));
2584 
2585  SHMQueueDelete(&(predlock->xactLink));
2586  SHMQueueDelete(&(predlock->targetLink));
2587 
2589  (PredicateLockHash,
2590  &predlock->tag,
2592  targettaghash),
2593  HASH_REMOVE, &found);
2594  Assert(found);
2595 
2596  predlock = nextpredlock;
2597  }
2598  LWLockRelease(SerializableXactHashLock);
2599 
2600  /* Remove the target itself, if possible. */
2601  RemoveTargetIfNoLongerUsed(target, targettaghash);
2602 }
2603 
2604 
2605 /*
2606  * TransferPredicateLocksToNewTarget
2607  *
2608  * Move or copy all the predicate locks for a lock target, for use by
2609  * index page splits/combines and other things that create or replace
2610  * lock targets. If 'removeOld' is true, the old locks and the target
2611  * will be removed.
2612  *
2613  * Returns true on success, or false if we ran out of shared memory to
2614  * allocate the new target or locks. Guaranteed to always succeed if
2615  * removeOld is set (by using the scratch entry in PredicateLockTargetHash
2616  * for scratch space).
2617  *
2618  * Warning: the "removeOld" option should be used only with care,
2619  * because this function does not (indeed, can not) update other
2620  * backends' LocalPredicateLockHash. If we are only adding new
2621  * entries, this is not a problem: the local lock table is used only
2622  * as a hint, so missing entries for locks that are held are
2623  * OK. Having entries for locks that are no longer held, as can happen
2624  * when using "removeOld", is not in general OK. We can only use it
2625  * safely when replacing a lock with a coarser-granularity lock that
2626  * covers it, or if we are absolutely certain that no one will need to
2627  * refer to that lock in the future.
2628  *
2629  * Caller must hold SerializablePredicateLockListLock.
2630  */
2631 static bool
2633  PREDICATELOCKTARGETTAG newtargettag,
2634  bool removeOld)
2635 {
2636  uint32 oldtargettaghash;
2637  LWLock *oldpartitionLock;
2638  PREDICATELOCKTARGET *oldtarget;
2639  uint32 newtargettaghash;
2640  LWLock *newpartitionLock;
2641  bool found;
2642  bool outOfShmem = false;
2643 
2644  Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2645 
2646  oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2647  newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
2648  oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2649  newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
2650 
2651  if (removeOld)
2652  {
2653  /*
2654  * Remove the dummy entry to give us scratch space, so we know we'll
2655  * be able to create the new lock target.
2656  */
2657  RemoveScratchTarget(false);
2658  }
2659 
2660  /*
2661  * We must get the partition locks in ascending sequence to avoid
2662  * deadlocks. If old and new partitions are the same, we must request the
2663  * lock only once.
2664  */
2665  if (oldpartitionLock < newpartitionLock)
2666  {
2667  LWLockAcquire(oldpartitionLock,
2668  (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2669  LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2670  }
2671  else if (oldpartitionLock > newpartitionLock)
2672  {
2673  LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2674  LWLockAcquire(oldpartitionLock,
2675  (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2676  }
2677  else
2678  LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2679 
2680  /*
2681  * Look for the old target. If not found, that's OK; no predicate locks
2682  * are affected, so we can just clean up and return. If it does exist,
2683  * walk its list of predicate locks and move or copy them to the new
2684  * target.
2685  */
2686  oldtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2687  &oldtargettag,
2688  oldtargettaghash,
2689  HASH_FIND, NULL);
2690 
2691  if (oldtarget)
2692  {
2693  PREDICATELOCKTARGET *newtarget;
2694  PREDICATELOCK *oldpredlock;
2695  PREDICATELOCKTAG newpredlocktag;
2696 
2697  newtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2698  &newtargettag,
2699  newtargettaghash,
2700  HASH_ENTER_NULL, &found);
2701 
2702  if (!newtarget)
2703  {
2704  /* Failed to allocate due to insufficient shmem */
2705  outOfShmem = true;
2706  goto exit;
2707  }
2708 
2709  /* If we created a new entry, initialize it */
2710  if (!found)
2711  SHMQueueInit(&(newtarget->predicateLocks));
2712 
2713  newpredlocktag.myTarget = newtarget;
2714 
2715  /*
2716  * Loop through all the locks on the old target, replacing them with
2717  * locks on the new target.
2718  */
2719  oldpredlock = (PREDICATELOCK *)
2720  SHMQueueNext(&(oldtarget->predicateLocks),
2721  &(oldtarget->predicateLocks),
2722  offsetof(PREDICATELOCK, targetLink));
2723  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2724  while (oldpredlock)
2725  {
2726  SHM_QUEUE *predlocktargetlink;
2727  PREDICATELOCK *nextpredlock;
2728  PREDICATELOCK *newpredlock;
2729  SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
2730 
2731  predlocktargetlink = &(oldpredlock->targetLink);
2732  nextpredlock = (PREDICATELOCK *)
2733  SHMQueueNext(&(oldtarget->predicateLocks),
2734  predlocktargetlink,
2735  offsetof(PREDICATELOCK, targetLink));
2736  newpredlocktag.myXact = oldpredlock->tag.myXact;
2737 
2738  if (removeOld)
2739  {
2740  SHMQueueDelete(&(oldpredlock->xactLink));
2741  SHMQueueDelete(&(oldpredlock->targetLink));
2742 
2744  (PredicateLockHash,
2745  &oldpredlock->tag,
2747  oldtargettaghash),
2748  HASH_REMOVE, &found);
2749  Assert(found);
2750  }
2751 
2752  newpredlock = (PREDICATELOCK *)
2753  hash_search_with_hash_value(PredicateLockHash,
2754  &newpredlocktag,
2756  newtargettaghash),
2758  &found);
2759  if (!newpredlock)
2760  {
2761  /* Out of shared memory. Undo what we've done so far. */
2762  LWLockRelease(SerializableXactHashLock);
2763  DeleteLockTarget(newtarget, newtargettaghash);
2764  outOfShmem = true;
2765  goto exit;
2766  }
2767  if (!found)
2768  {
2769  SHMQueueInsertBefore(&(newtarget->predicateLocks),
2770  &(newpredlock->targetLink));
2771  SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
2772  &(newpredlock->xactLink));
2773  newpredlock->commitSeqNo = oldCommitSeqNo;
2774  }
2775  else
2776  {
2777  if (newpredlock->commitSeqNo < oldCommitSeqNo)
2778  newpredlock->commitSeqNo = oldCommitSeqNo;
2779  }
2780 
2781  Assert(newpredlock->commitSeqNo != 0);
2782  Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
2783  || (newpredlock->tag.myXact == OldCommittedSxact));
2784 
2785  oldpredlock = nextpredlock;
2786  }
2787  LWLockRelease(SerializableXactHashLock);
2788 
2789  if (removeOld)
2790  {
2791  Assert(SHMQueueEmpty(&oldtarget->predicateLocks));
2792  RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2793  }
2794  }
2795 
2796 
2797 exit:
2798  /* Release partition locks in reverse order of acquisition. */
2799  if (oldpartitionLock < newpartitionLock)
2800  {
2801  LWLockRelease(newpartitionLock);
2802  LWLockRelease(oldpartitionLock);
2803  }
2804  else if (oldpartitionLock > newpartitionLock)
2805  {
2806  LWLockRelease(oldpartitionLock);
2807  LWLockRelease(newpartitionLock);
2808  }
2809  else
2810  LWLockRelease(newpartitionLock);
2811 
2812  if (removeOld)
2813  {
2814  /* We shouldn't run out of memory if we're moving locks */
2815  Assert(!outOfShmem);
2816 
2817  /* Put the scratch entry back */
2818  RestoreScratchTarget(false);
2819  }
2820 
2821  return !outOfShmem;
2822 }
2823 
2824 /*
2825  * Drop all predicate locks of any granularity from the specified relation,
2826  * which can be a heap relation or an index relation. If 'transfer' is true,
2827  * acquire a relation lock on the heap for any transactions with any lock(s)
2828  * on the specified relation.
2829  *
2830  * This requires grabbing a lot of LW locks and scanning the entire lock
2831  * target table for matches. That makes this more expensive than most
2832  * predicate lock management functions, but it will only be called for DDL
2833  * type commands that are expensive anyway, and there are fast returns when
2834  * no serializable transactions are active or the relation is temporary.
2835  *
2836  * We don't use the TransferPredicateLocksToNewTarget function because it
2837  * acquires its own locks on the partitions of the two targets involved,
2838  * and we'll already be holding all partition locks.
2839  *
2840  * We can't throw an error from here, because the call could be from a
2841  * transaction which is not serializable.
2842  *
2843  * NOTE: This is currently only called with transfer set to true, but that may
2844  * change. If we decide to clean up the locks from a table on commit of a
2845  * transaction which executed DROP TABLE, the false condition will be useful.
2846  */
2847 static void
2849 {
2850  HASH_SEQ_STATUS seqstat;
2851  PREDICATELOCKTARGET *oldtarget;
2852  PREDICATELOCKTARGET *heaptarget;
2853  Oid dbId;
2854  Oid relId;
2855  Oid heapId;
2856  int i;
2857  bool isIndex;
2858  bool found;
2859  uint32 heaptargettaghash;
2860 
2861  /*
2862  * Bail out quickly if there are no serializable transactions running.
2863  * It's safe to check this without taking locks because the caller is
2864  * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
2865  * would matter here can be acquired while that is held.
2866  */
2867  if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
2868  return;
2869 
2870  if (!PredicateLockingNeededForRelation(relation))
2871  return;
2872 
2873  dbId = relation->rd_node.dbNode;
2874  relId = relation->rd_id;
2875  if (relation->rd_index == NULL)
2876  {
2877  isIndex = false;
2878  heapId = relId;
2879  }
2880  else
2881  {
2882  isIndex = true;
2883  heapId = relation->rd_index->indrelid;
2884  }
2885  Assert(heapId != InvalidOid);
2886  Assert(transfer || !isIndex); /* index OID only makes sense with
2887  * transfer */
2888 
2889  /* Retrieve first time needed, then keep. */
2890  heaptargettaghash = 0;
2891  heaptarget = NULL;
2892 
2893  /* Acquire locks on all lock partitions */
2894  LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
2895  for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
2897  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2898 
2899  /*
2900  * Remove the dummy entry to give us scratch space, so we know we'll be
2901  * able to create the new lock target.
2902  */
2903  if (transfer)
2904  RemoveScratchTarget(true);
2905 
2906  /* Scan through target map */
2907  hash_seq_init(&seqstat, PredicateLockTargetHash);
2908 
2909  while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
2910  {
2911  PREDICATELOCK *oldpredlock;
2912 
2913  /*
2914  * Check whether this is a target which needs attention.
2915  */
2916  if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
2917  continue; /* wrong relation id */
2918  if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
2919  continue; /* wrong database id */
2920  if (transfer && !isIndex
2922  continue; /* already the right lock */
2923 
2924  /*
2925  * If we made it here, we have work to do. We make sure the heap
2926  * relation lock exists, then we walk the list of predicate locks for
2927  * the old target we found, moving all locks to the heap relation lock
2928  * -- unless they already hold that.
2929  */
2930 
2931  /*
2932  * First make sure we have the heap relation target. We only need to
2933  * do this once.
2934  */
2935  if (transfer && heaptarget == NULL)
2936  {
2937  PREDICATELOCKTARGETTAG heaptargettag;
2938 
2939  SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
2940  heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
2941  heaptarget = hash_search_with_hash_value(PredicateLockTargetHash,
2942  &heaptargettag,
2943  heaptargettaghash,
2944  HASH_ENTER, &found);
2945  if (!found)
2946  SHMQueueInit(&heaptarget->predicateLocks);
2947  }
2948 
2949  /*
2950  * Loop through all the locks on the old target, replacing them with
2951  * locks on the new target.
2952  */
2953  oldpredlock = (PREDICATELOCK *)
2954  SHMQueueNext(&(oldtarget->predicateLocks),
2955  &(oldtarget->predicateLocks),
2956  offsetof(PREDICATELOCK, targetLink));
2957  while (oldpredlock)
2958  {
2959  PREDICATELOCK *nextpredlock;
2960  PREDICATELOCK *newpredlock;
2961  SerCommitSeqNo oldCommitSeqNo;
2962  SERIALIZABLEXACT *oldXact;
2963 
2964  nextpredlock = (PREDICATELOCK *)
2965  SHMQueueNext(&(oldtarget->predicateLocks),
2966  &(oldpredlock->targetLink),
2967  offsetof(PREDICATELOCK, targetLink));
2968 
2969  /*
2970  * Remove the old lock first. This avoids the chance of running
2971  * out of lock structure entries for the hash table.
2972  */
2973  oldCommitSeqNo = oldpredlock->commitSeqNo;
2974  oldXact = oldpredlock->tag.myXact;
2975 
2976  SHMQueueDelete(&(oldpredlock->xactLink));
2977 
2978  /*
2979  * No need for retail delete from oldtarget list, we're removing
2980  * the whole target anyway.
2981  */
2982  hash_search(PredicateLockHash,
2983  &oldpredlock->tag,
2984  HASH_REMOVE, &found);
2985  Assert(found);
2986 
2987  if (transfer)
2988  {
2989  PREDICATELOCKTAG newpredlocktag;
2990 
2991  newpredlocktag.myTarget = heaptarget;
2992  newpredlocktag.myXact = oldXact;
2993  newpredlock = (PREDICATELOCK *)
2994  hash_search_with_hash_value(PredicateLockHash,
2995  &newpredlocktag,
2997  heaptargettaghash),
2998  HASH_ENTER,
2999  &found);
3000  if (!found)
3001  {
3002  SHMQueueInsertBefore(&(heaptarget->predicateLocks),
3003  &(newpredlock->targetLink));
3004  SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
3005  &(newpredlock->xactLink));
3006  newpredlock->commitSeqNo = oldCommitSeqNo;
3007  }
3008  else
3009  {
3010  if (newpredlock->commitSeqNo < oldCommitSeqNo)
3011  newpredlock->commitSeqNo = oldCommitSeqNo;
3012  }
3013 
3014  Assert(newpredlock->commitSeqNo != 0);
3015  Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
3016  || (newpredlock->tag.myXact == OldCommittedSxact));
3017  }
3018 
3019  oldpredlock = nextpredlock;
3020  }
3021 
3022  hash_search(PredicateLockTargetHash, &oldtarget->tag, HASH_REMOVE,
3023  &found);
3024  Assert(found);
3025  }
3026 
3027  /* Put the scratch entry back */
3028  if (transfer)
3029  RestoreScratchTarget(true);
3030 
3031  /* Release locks in reverse order */
3032  LWLockRelease(SerializableXactHashLock);
3033  for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
3035  LWLockRelease(SerializablePredicateLockListLock);
3036 }
3037 
3038 /*
3039  * TransferPredicateLocksToHeapRelation
3040  * For all transactions, transfer all predicate locks for the given
3041  * relation to a single relation lock on the heap.
3042  */
3043 void
3045 {
3046  DropAllPredicateLocksFromTable(relation, true);
3047 }
3048 
3049 
3050 /*
3051  * PredicateLockPageSplit
3052  *
3053  * Copies any predicate locks for the old page to the new page.
3054  * Skip if this is a temporary table or toast table.
3055  *
3056  * NOTE: A page split (or overflow) affects all serializable transactions,
3057  * even if it occurs in the context of another transaction isolation level.
3058  *
3059  * NOTE: This currently leaves the local copy of the locks without
3060  * information on the new lock which is in shared memory. This could cause
3061  * problems if enough page splits occur on locked pages without the processes
3062  * which hold the locks getting in and noticing.
3063  */
3064 void
3066  BlockNumber newblkno)
3067 {
3068  PREDICATELOCKTARGETTAG oldtargettag;
3069  PREDICATELOCKTARGETTAG newtargettag;
3070  bool success;
3071 
3072  /*
3073  * Bail out quickly if there are no serializable transactions running.
3074  *
3075  * It's safe to do this check without taking any additional locks. Even if
3076  * a serializable transaction starts concurrently, we know it can't take
3077  * any SIREAD locks on the page being split because the caller is holding
3078  * the associated buffer page lock. Memory reordering isn't an issue; the
3079  * memory barrier in the LWLock acquisition guarantees that this read
3080  * occurs while the buffer page lock is held.
3081  */
3082  if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
3083  return;
3084 
3085  if (!PredicateLockingNeededForRelation(relation))
3086  return;
3087 
3088  Assert(oldblkno != newblkno);
3089  Assert(BlockNumberIsValid(oldblkno));
3090  Assert(BlockNumberIsValid(newblkno));
3091 
3092  SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
3093  relation->rd_node.dbNode,
3094  relation->rd_id,
3095  oldblkno);
3096  SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
3097  relation->rd_node.dbNode,
3098  relation->rd_id,
3099  newblkno);
3100 
3101  LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
3102 
3103  /*
3104  * Try copying the locks over to the new page's tag, creating it if
3105  * necessary.
3106  */
3107  success = TransferPredicateLocksToNewTarget(oldtargettag,
3108  newtargettag,
3109  false);
3110 
3111  if (!success)
3112  {
3113  /*
3114  * No more predicate lock entries are available. Failure isn't an
3115  * option here, so promote the page lock to a relation lock.
3116  */
3117 
3118  /* Get the parent relation lock's lock tag */
3119  success = GetParentPredicateLockTag(&oldtargettag,
3120  &newtargettag);
3121  Assert(success);
3122 
3123  /*
3124  * Move the locks to the parent. This shouldn't fail.
3125  *
3126  * Note that here we are removing locks held by other backends,
3127  * leading to a possible inconsistency in their local lock hash table.
3128  * This is OK because we're replacing it with a lock that covers the
3129  * old one.
3130  */
3131  success = TransferPredicateLocksToNewTarget(oldtargettag,
3132  newtargettag,
3133  true);
3134  Assert(success);
3135  }
3136 
3137  LWLockRelease(SerializablePredicateLockListLock);
3138 }
3139 
3140 /*
3141  * PredicateLockPageCombine
3142  *
3143  * Combines predicate locks for two existing pages.
3144  * Skip if this is a temporary table or toast table.
3145  *
3146  * NOTE: A page combine affects all serializable transactions, even if it
3147  * occurs in the context of another transaction isolation level.
3148  */
3149 void
3151  BlockNumber newblkno)
3152 {
3153  /*
3154  * Page combines differ from page splits in that we ought to be able to
3155  * remove the locks on the old page after transferring them to the new
3156  * page, instead of duplicating them. However, because we can't edit other
3157  * backends' local lock tables, removing the old lock would leave them
3158  * with an entry in their LocalPredicateLockHash for a lock they're not
3159  * holding, which isn't acceptable. So we wind up having to do the same
3160  * work as a page split, acquiring a lock on the new page and keeping the
3161  * old page locked too. That can lead to some false positives, but should
3162  * be rare in practice.
3163  */
3164  PredicateLockPageSplit(relation, oldblkno, newblkno);
3165 }
3166 
3167 /*
3168  * Walk the list of in-progress serializable transactions and find the new
3169  * xmin.
3170  */
3171 static void
3173 {
3174  SERIALIZABLEXACT *sxact;
3175 
3176  Assert(LWLockHeldByMe(SerializableXactHashLock));
3177 
3179  PredXact->SxactGlobalXminCount = 0;
3180 
3181  for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
3182  {
3183  if (!SxactIsRolledBack(sxact)
3184  && !SxactIsCommitted(sxact)
3185  && sxact != OldCommittedSxact)
3186  {
3187  Assert(sxact->xmin != InvalidTransactionId);
3188  if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3189  || TransactionIdPrecedes(sxact->xmin,
3190  PredXact->SxactGlobalXmin))
3191  {
3192  PredXact->SxactGlobalXmin = sxact->xmin;
3193  PredXact->SxactGlobalXminCount = 1;
3194  }
3195  else if (TransactionIdEquals(sxact->xmin,
3196  PredXact->SxactGlobalXmin))
3197  PredXact->SxactGlobalXminCount++;
3198  }
3199  }
3200 
3202 }
3203 
3204 /*
3205  * ReleasePredicateLocks
3206  *
3207  * Releases predicate locks based on completion of the current transaction,
3208  * whether committed or rolled back. It can also be called for a read only
3209  * transaction when it becomes impossible for the transaction to become
3210  * part of a dangerous structure.
3211  *
3212  * We do nothing unless this is a serializable transaction.
3213  *
3214  * This method must ensure that shared memory hash tables are cleaned
3215  * up in some relatively timely fashion.
3216  *
3217  * If this transaction is committing and is holding any predicate locks,
3218  * it must be added to a list of completed serializable transactions still
3219  * holding locks.
3220  */
3221 void
3223 {
3224  bool needToClear;
3225  RWConflict conflict,
3226  nextConflict,
3227  possibleUnsafeConflict;
3228  SERIALIZABLEXACT *roXact;
3229 
3230  /*
3231  * We can't trust XactReadOnly here, because a transaction which started
3232  * as READ WRITE can show as READ ONLY later, e.g., within
3233  * subtransactions. We want to flag a transaction as READ ONLY if it
3234  * commits without writing so that de facto READ ONLY transactions get the
3235  * benefit of some RO optimizations, so we will use this local variable to
3236  * get some cleanup logic right which is based on whether the transaction
3237  * was declared READ ONLY at the top level.
3238  */
3239  bool topLevelIsDeclaredReadOnly;
3240 
3241  if (MySerializableXact == InvalidSerializableXact)
3242  {
3243  Assert(LocalPredicateLockHash == NULL);
3244  return;
3245  }
3246 
3247  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3248 
3249  Assert(!isCommit || SxactIsPrepared(MySerializableXact));
3250  Assert(!isCommit || !SxactIsDoomed(MySerializableXact));
3251  Assert(!SxactIsCommitted(MySerializableXact));
3252  Assert(!SxactIsRolledBack(MySerializableXact));
3253 
3254  /* may not be serializable during COMMIT/ROLLBACK PREPARED */
3255  Assert(MySerializableXact->pid == 0 || IsolationIsSerializable());
3256 
3257  /* We'd better not already be on the cleanup list. */
3258  Assert(!SxactIsOnFinishedList(MySerializableXact));
3259 
3260  topLevelIsDeclaredReadOnly = SxactIsReadOnly(MySerializableXact);
3261 
3262  /*
3263  * We don't hold XidGenLock lock here, assuming that TransactionId is
3264  * atomic!
3265  *
3266  * If this value is changing, we don't care that much whether we get the
3267  * old or new value -- it is just used to determine how far
3268  * GlobalSerializableXmin must advance before this transaction can be
3269  * fully cleaned up. The worst that could happen is we wait for one more
3270  * transaction to complete before freeing some RAM; correctness of visible
3271  * behavior is not affected.
3272  */
3273  MySerializableXact->finishedBefore = ShmemVariableCache->nextXid;
3274 
3275  /*
3276  * If it's not a commit it's a rollback, and we can clear our locks
3277  * immediately.
3278  */
3279  if (isCommit)
3280  {
3281  MySerializableXact->flags |= SXACT_FLAG_COMMITTED;
3282  MySerializableXact->commitSeqNo = ++(PredXact->LastSxactCommitSeqNo);
3283  /* Recognize implicit read-only transaction (commit without write). */
3284  if (!MyXactDidWrite)
3285  MySerializableXact->flags |= SXACT_FLAG_READ_ONLY;
3286  }
3287  else
3288  {
3289  /*
3290  * The DOOMED flag indicates that we intend to roll back this
3291  * transaction and so it should not cause serialization failures for
3292  * other transactions that conflict with it. Note that this flag might
3293  * already be set, if another backend marked this transaction for
3294  * abort.
3295  *
3296  * The ROLLED_BACK flag further indicates that ReleasePredicateLocks
3297  * has been called, and so the SerializableXact is eligible for
3298  * cleanup. This means it should not be considered when calculating
3299  * SxactGlobalXmin.
3300  */
3301  MySerializableXact->flags |= SXACT_FLAG_DOOMED;
3302  MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
3303 
3304  /*
3305  * If the transaction was previously prepared, but is now failing due
3306  * to a ROLLBACK PREPARED or (hopefully very rare) error after the
3307  * prepare, clear the prepared flag. This simplifies conflict
3308  * checking.
3309  */
3310  MySerializableXact->flags &= ~SXACT_FLAG_PREPARED;
3311  }
3312 
3313  if (!topLevelIsDeclaredReadOnly)
3314  {
3315  Assert(PredXact->WritableSxactCount > 0);
3316  if (--(PredXact->WritableSxactCount) == 0)
3317  {
3318  /*
3319  * Release predicate locks and rw-conflicts in for all committed
3320  * transactions. There are no longer any transactions which might
3321  * conflict with the locks and no chance for new transactions to
3322  * overlap. Similarly, existing conflicts in can't cause pivots,
3323  * and any conflicts in which could have completed a dangerous
3324  * structure would already have caused a rollback, so any
3325  * remaining ones must be benign.
3326  */
3327  PredXact->CanPartialClearThrough = PredXact->LastSxactCommitSeqNo;
3328  }
3329  }
3330  else
3331  {
3332  /*
3333  * Read-only transactions: clear the list of transactions that might
3334  * make us unsafe. Note that we use 'inLink' for the iteration as
3335  * opposed to 'outLink' for the r/w xacts.
3336  */
3337  possibleUnsafeConflict = (RWConflict)
3338  SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3339  &MySerializableXact->possibleUnsafeConflicts,
3340  offsetof(RWConflictData, inLink));
3341  while (possibleUnsafeConflict)
3342  {
3343  nextConflict = (RWConflict)
3344  SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3345  &possibleUnsafeConflict->inLink,
3346  offsetof(RWConflictData, inLink));
3347 
3348  Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
3349  Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
3350 
3351  ReleaseRWConflict(possibleUnsafeConflict);
3352 
3353  possibleUnsafeConflict = nextConflict;
3354  }
3355  }
3356 
3357  /* Check for conflict out to old committed transactions. */
3358  if (isCommit
3359  && !SxactIsReadOnly(MySerializableXact)
3360  && SxactHasSummaryConflictOut(MySerializableXact))
3361  {
3362  /*
3363  * we don't know which old committed transaction we conflicted with,
3364  * so be conservative and use FirstNormalSerCommitSeqNo here
3365  */
3366  MySerializableXact->SeqNo.earliestOutConflictCommit =
3368  MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3369  }
3370 
3371  /*
3372  * Release all outConflicts to committed transactions. If we're rolling
3373  * back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to
3374  * previously committed transactions.
3375  */
3376  conflict = (RWConflict)
3377  SHMQueueNext(&MySerializableXact->outConflicts,
3378  &MySerializableXact->outConflicts,
3379  offsetof(RWConflictData, outLink));
3380  while (conflict)
3381  {
3382  nextConflict = (RWConflict)
3383  SHMQueueNext(&MySerializableXact->outConflicts,
3384  &conflict->outLink,
3385  offsetof(RWConflictData, outLink));
3386 
3387  if (isCommit
3388  && !SxactIsReadOnly(MySerializableXact)
3389  && SxactIsCommitted(conflict->sxactIn))
3390  {
3391  if ((MySerializableXact->flags & SXACT_FLAG_CONFLICT_OUT) == 0
3392  || conflict->sxactIn->prepareSeqNo < MySerializableXact->SeqNo.earliestOutConflictCommit)
3393  MySerializableXact->SeqNo.earliestOutConflictCommit = conflict->sxactIn->prepareSeqNo;
3394  MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3395  }
3396 
3397  if (!isCommit
3398  || SxactIsCommitted(conflict->sxactIn)
3399  || (conflict->sxactIn->SeqNo.lastCommitBeforeSnapshot >= PredXact->LastSxactCommitSeqNo))
3400  ReleaseRWConflict(conflict);
3401 
3402  conflict = nextConflict;
3403  }
3404 
3405  /*
3406  * Release all inConflicts from committed and read-only transactions. If
3407  * we're rolling back, clear them all.
3408  */
3409  conflict = (RWConflict)
3410  SHMQueueNext(&MySerializableXact->inConflicts,
3411  &MySerializableXact->inConflicts,
3412  offsetof(RWConflictData, inLink));
3413  while (conflict)
3414  {
3415  nextConflict = (RWConflict)
3416  SHMQueueNext(&MySerializableXact->inConflicts,
3417  &conflict->inLink,
3418  offsetof(RWConflictData, inLink));
3419 
3420  if (!isCommit
3421  || SxactIsCommitted(conflict->sxactOut)
3422  || SxactIsReadOnly(conflict->sxactOut))
3423  ReleaseRWConflict(conflict);
3424 
3425  conflict = nextConflict;
3426  }
3427 
3428  if (!topLevelIsDeclaredReadOnly)
3429  {
3430  /*
3431  * Remove ourselves from the list of possible conflicts for concurrent
3432  * READ ONLY transactions, flagging them as unsafe if we have a
3433  * conflict out. If any are waiting DEFERRABLE transactions, wake them
3434  * up if they are known safe or known unsafe.
3435  */
3436  possibleUnsafeConflict = (RWConflict)
3437  SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3438  &MySerializableXact->possibleUnsafeConflicts,
3439  offsetof(RWConflictData, outLink));
3440  while (possibleUnsafeConflict)
3441  {
3442  nextConflict = (RWConflict)
3443  SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3444  &possibleUnsafeConflict->outLink,
3445  offsetof(RWConflictData, outLink));
3446 
3447  roXact = possibleUnsafeConflict->sxactIn;
3448  Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
3449  Assert(SxactIsReadOnly(roXact));
3450 
3451  /* Mark conflicted if necessary. */
3452  if (isCommit
3453  && MyXactDidWrite
3454  && SxactHasConflictOut(MySerializableXact)
3455  && (MySerializableXact->SeqNo.earliestOutConflictCommit
3456  <= roXact->SeqNo.lastCommitBeforeSnapshot))
3457  {
3458  /*
3459  * This releases possibleUnsafeConflict (as well as all other
3460  * possible conflicts for roXact)
3461  */
3462  FlagSxactUnsafe(roXact);
3463  }
3464  else
3465  {
3466  ReleaseRWConflict(possibleUnsafeConflict);
3467 
3468  /*
3469  * If we were the last possible conflict, flag it safe. The
3470  * transaction can now safely release its predicate locks (but
3471  * that transaction's backend has to do that itself).
3472  */
3473  if (SHMQueueEmpty(&roXact->possibleUnsafeConflicts))
3474  roXact->flags |= SXACT_FLAG_RO_SAFE;
3475  }
3476 
3477  /*
3478  * Wake up the process for a waiting DEFERRABLE transaction if we
3479  * now know it's either safe or conflicted.
3480  */
3481  if (SxactIsDeferrableWaiting(roXact) &&
3482  (SxactIsROUnsafe(roXact) || SxactIsROSafe(roXact)))
3483  ProcSendSignal(roXact->pid);
3484 
3485  possibleUnsafeConflict = nextConflict;
3486  }
3487  }
3488 
3489  /*
3490  * Check whether it's time to clean up old transactions. This can only be
3491  * done when the last serializable transaction with the oldest xmin among
3492  * serializable transactions completes. We then find the "new oldest"
3493  * xmin and purge any transactions which finished before this transaction
3494  * was launched.
3495  */
3496  needToClear = false;
3497  if (TransactionIdEquals(MySerializableXact->xmin, PredXact->SxactGlobalXmin))
3498  {
3499  Assert(PredXact->SxactGlobalXminCount > 0);
3500  if (--(PredXact->SxactGlobalXminCount) == 0)
3501  {
3503  needToClear = true;
3504  }
3505  }
3506 
3507  LWLockRelease(SerializableXactHashLock);
3508 
3509  LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3510 
3511  /* Add this to the list of transactions to check for later cleanup. */
3512  if (isCommit)
3513  SHMQueueInsertBefore(FinishedSerializableTransactions,
3514  &MySerializableXact->finishedLink);
3515 
3516  if (!isCommit)
3517  ReleaseOneSerializableXact(MySerializableXact, false, false);
3518 
3519  LWLockRelease(SerializableFinishedListLock);
3520 
3521  if (needToClear)
3523 
3524  MySerializableXact = InvalidSerializableXact;
3525  MyXactDidWrite = false;
3526 
3527  /* Delete per-transaction lock table */
3528  if (LocalPredicateLockHash != NULL)
3529  {
3530  hash_destroy(LocalPredicateLockHash);
3531  LocalPredicateLockHash = NULL;
3532  }
3533 }
3534 
3535 /*
3536  * Clear old predicate locks, belonging to committed transactions that are no
3537  * longer interesting to any in-progress transaction.
3538  */
3539 static void
3541 {
3542  SERIALIZABLEXACT *finishedSxact;
3543  PREDICATELOCK *predlock;
3544 
3545  /*
3546  * Loop through finished transactions. They are in commit order, so we can
3547  * stop as soon as we find one that's still interesting.
3548  */
3549  LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3550  finishedSxact = (SERIALIZABLEXACT *)
3551  SHMQueueNext(FinishedSerializableTransactions,
3552  FinishedSerializableTransactions,
3553  offsetof(SERIALIZABLEXACT, finishedLink));
3554  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3555  while (finishedSxact)
3556  {
3557  SERIALIZABLEXACT *nextSxact;
3558 
3559  nextSxact = (SERIALIZABLEXACT *)
3560  SHMQueueNext(FinishedSerializableTransactions,
3561  &(finishedSxact->finishedLink),
3562  offsetof(SERIALIZABLEXACT, finishedLink));
3563  if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3565  PredXact->SxactGlobalXmin))
3566  {
3567  /*
3568  * This transaction committed before any in-progress transaction
3569  * took its snapshot. It's no longer interesting.
3570  */
3571  LWLockRelease(SerializableXactHashLock);
3572  SHMQueueDelete(&(finishedSxact->finishedLink));
3573  ReleaseOneSerializableXact(finishedSxact, false, false);
3574  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3575  }
3576  else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
3577  && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
3578  {
3579  /*
3580  * Any active transactions that took their snapshot before this
3581  * transaction committed are read-only, so we can clear part of
3582  * its state.
3583  */
3584  LWLockRelease(SerializableXactHashLock);
3585 
3586  if (SxactIsReadOnly(finishedSxact))
3587  {
3588  /* A read-only transaction can be removed entirely */
3589  SHMQueueDelete(&(finishedSxact->finishedLink));
3590  ReleaseOneSerializableXact(finishedSxact, false, false);
3591  }
3592  else
3593  {
3594  /*
3595  * A read-write transaction can only be partially cleared. We
3596  * need to keep the SERIALIZABLEXACT but can release the
3597  * SIREAD locks and conflicts in.
3598  */
3599  ReleaseOneSerializableXact(finishedSxact, true, false);
3600  }
3601 
3602  PredXact->HavePartialClearedThrough = finishedSxact->commitSeqNo;
3603  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3604  }
3605  else
3606  {
3607  /* Still interesting. */
3608  break;
3609  }
3610  finishedSxact = nextSxact;
3611  }
3612  LWLockRelease(SerializableXactHashLock);
3613 
3614  /*
3615  * Loop through predicate locks on dummy transaction for summarized data.
3616  */
3617  LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3618  predlock = (PREDICATELOCK *)
3619  SHMQueueNext(&OldCommittedSxact->predicateLocks,
3620  &OldCommittedSxact->predicateLocks,
3621  offsetof(PREDICATELOCK, xactLink));
3622  while (predlock)
3623  {
3624  PREDICATELOCK *nextpredlock;
3625  bool canDoPartialCleanup;
3626 
3627  nextpredlock = (PREDICATELOCK *)
3628  SHMQueueNext(&OldCommittedSxact->predicateLocks,
3629  &predlock->xactLink,
3630  offsetof(PREDICATELOCK, xactLink));
3631 
3632  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3633  Assert(predlock->commitSeqNo != 0);
3635  canDoPartialCleanup = (predlock->commitSeqNo <= PredXact->CanPartialClearThrough);
3636  LWLockRelease(SerializableXactHashLock);
3637 
3638  /*
3639  * If this lock originally belonged to an old enough transaction, we
3640  * can release it.
3641  */
3642  if (canDoPartialCleanup)
3643  {
3644  PREDICATELOCKTAG tag;
3645  PREDICATELOCKTARGET *target;
3646  PREDICATELOCKTARGETTAG targettag;
3647  uint32 targettaghash;
3648  LWLock *partitionLock;
3649 
3650  tag = predlock->tag;
3651  target = tag.myTarget;
3652  targettag = target->tag;
3653  targettaghash = PredicateLockTargetTagHashCode(&targettag);
3654  partitionLock = PredicateLockHashPartitionLock(targettaghash);
3655 
3656  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3657 
3658  SHMQueueDelete(&(predlock->targetLink));
3659  SHMQueueDelete(&(predlock->xactLink));
3660 
3661  hash_search_with_hash_value(PredicateLockHash, &tag,
3663  targettaghash),
3664  HASH_REMOVE, NULL);
3665  RemoveTargetIfNoLongerUsed(target, targettaghash);
3666 
3667  LWLockRelease(partitionLock);
3668  }
3669 
3670  predlock = nextpredlock;
3671  }
3672 
3673  LWLockRelease(SerializablePredicateLockListLock);
3674  LWLockRelease(SerializableFinishedListLock);
3675 }
3676 
3677 /*
3678  * This is the normal way to delete anything from any of the predicate
3679  * locking hash tables. Given a transaction which we know can be deleted:
3680  * delete all predicate locks held by that transaction and any predicate
3681  * lock targets which are now unreferenced by a lock; delete all conflicts
3682  * for the transaction; delete all xid values for the transaction; then
3683  * delete the transaction.
3684  *
3685  * When the partial flag is set, we can release all predicate locks and
3686  * in-conflict information -- we've established that there are no longer
3687  * any overlapping read write transactions for which this transaction could
3688  * matter -- but keep the transaction entry itself and any outConflicts.
3689  *
3690  * When the summarize flag is set, we've run short of room for sxact data
3691  * and must summarize to the SLRU. Predicate locks are transferred to a
3692  * dummy "old" transaction, with duplicate locks on a single target
3693  * collapsing to a single lock with the "latest" commitSeqNo from among
3694  * the conflicting locks..
3695  */
3696 static void
3698  bool summarize)
3699 {
3700  PREDICATELOCK *predlock;
3701  SERIALIZABLEXIDTAG sxidtag;
3702  RWConflict conflict,
3703  nextConflict;
3704 
3705  Assert(sxact != NULL);
3706  Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
3707  Assert(partial || !SxactIsOnFinishedList(sxact));
3708  Assert(LWLockHeldByMe(SerializableFinishedListLock));
3709 
3710  /*
3711  * First release all the predicate locks held by this xact (or transfer
3712  * them to OldCommittedSxact if summarize is true)
3713  */
3714  LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3715  predlock = (PREDICATELOCK *)
3716  SHMQueueNext(&(sxact->predicateLocks),
3717  &(sxact->predicateLocks),
3718  offsetof(PREDICATELOCK, xactLink));
3719  while (predlock)
3720  {
3721  PREDICATELOCK *nextpredlock;
3722  PREDICATELOCKTAG tag;
3723  SHM_QUEUE *targetLink;
3724  PREDICATELOCKTARGET *target;
3725  PREDICATELOCKTARGETTAG targettag;
3726  uint32 targettaghash;
3727  LWLock *partitionLock;
3728 
3729  nextpredlock = (PREDICATELOCK *)
3730  SHMQueueNext(&(sxact->predicateLocks),
3731  &(predlock->xactLink),
3732  offsetof(PREDICATELOCK, xactLink));
3733 
3734  tag = predlock->tag;
3735  targetLink = &(predlock->targetLink);
3736  target = tag.myTarget;
3737  targettag = target->tag;
3738  targettaghash = PredicateLockTargetTagHashCode(&targettag);
3739  partitionLock = PredicateLockHashPartitionLock(targettaghash);
3740 
3741  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3742 
3743  SHMQueueDelete(targetLink);
3744 
3745  hash_search_with_hash_value(PredicateLockHash, &tag,
3747  targettaghash),
3748  HASH_REMOVE, NULL);
3749  if (summarize)
3750  {
3751  bool found;
3752 
3753  /* Fold into dummy transaction list. */
3754  tag.myXact = OldCommittedSxact;
3755  predlock = hash_search_with_hash_value(PredicateLockHash, &tag,
3757  targettaghash),
3758  HASH_ENTER_NULL, &found);
3759  if (!predlock)
3760  ereport(ERROR,
3761  (errcode(ERRCODE_OUT_OF_MEMORY),
3762  errmsg("out of shared memory"),
3763  errhint("You might need to increase max_pred_locks_per_transaction.")));
3764  if (found)
3765  {
3766  Assert(predlock->commitSeqNo != 0);
3768  if (predlock->commitSeqNo < sxact->commitSeqNo)
3769  predlock->commitSeqNo = sxact->commitSeqNo;
3770  }
3771  else
3772  {
3774  &(predlock->targetLink));
3775  SHMQueueInsertBefore(&(OldCommittedSxact->predicateLocks),
3776  &(predlock->xactLink));
3777  predlock->commitSeqNo = sxact->commitSeqNo;
3778  }
3779  }
3780  else
3781  RemoveTargetIfNoLongerUsed(target, targettaghash);
3782 
3783  LWLockRelease(partitionLock);
3784 
3785  predlock = nextpredlock;
3786  }
3787 
3788  /*
3789  * Rather than retail removal, just re-init the head after we've run
3790  * through the list.
3791  */
3792  SHMQueueInit(&sxact->predicateLocks);
3793 
3794  LWLockRelease(SerializablePredicateLockListLock);
3795 
3796  sxidtag.xid = sxact->topXid;
3797  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3798 
3799  /* Release all outConflicts (unless 'partial' is true) */
3800  if (!partial)
3801  {
3802  conflict = (RWConflict)
3803  SHMQueueNext(&sxact->outConflicts,
3804  &sxact->outConflicts,
3805  offsetof(RWConflictData, outLink));
3806  while (conflict)
3807  {
3808  nextConflict = (RWConflict)
3809  SHMQueueNext(&sxact->outConflicts,
3810  &conflict->outLink,
3811  offsetof(RWConflictData, outLink));
3812  if (summarize)
3814  ReleaseRWConflict(conflict);
3815  conflict = nextConflict;
3816  }
3817  }
3818 
3819  /* Release all inConflicts. */
3820  conflict = (RWConflict)
3821  SHMQueueNext(&sxact->inConflicts,
3822  &sxact->inConflicts,
3823  offsetof(RWConflictData, inLink));
3824  while (conflict)
3825  {
3826  nextConflict = (RWConflict)
3827  SHMQueueNext(&sxact->inConflicts,
3828  &conflict->inLink,
3829  offsetof(RWConflictData, inLink));
3830  if (summarize)
3832  ReleaseRWConflict(conflict);
3833  conflict = nextConflict;
3834  }
3835 
3836  /* Finally, get rid of the xid and the record of the transaction itself. */
3837  if (!partial)
3838  {
3839  if (sxidtag.xid != InvalidTransactionId)
3840  hash_search(SerializableXidHash, &sxidtag, HASH_REMOVE, NULL);
3841  ReleasePredXact(sxact);
3842  }
3843 
3844  LWLockRelease(SerializableXactHashLock);
3845 }
3846 
3847 /*
3848  * Tests whether the given top level transaction is concurrent with
3849  * (overlaps) our current transaction.
3850  *
3851  * We need to identify the top level transaction for SSI, anyway, so pass
3852  * that to this function to save the overhead of checking the snapshot's
3853  * subxip array.
3854  */
3855 static bool
3857 {
3858  Snapshot snap;
3859  uint32 i;
3860 
3863 
3864  snap = GetTransactionSnapshot();
3865 
3866  if (TransactionIdPrecedes(xid, snap->xmin))
3867  return false;
3868 
3869  if (TransactionIdFollowsOrEquals(xid, snap->xmax))
3870  return true;
3871 
3872  for (i = 0; i < snap->xcnt; i++)
3873  {
3874  if (xid == snap->xip[i])
3875  return true;
3876  }
3877 
3878  return false;
3879 }
3880 
3881 /*
3882  * CheckForSerializableConflictOut
3883  * We are reading a tuple which has been modified. If it is visible to
3884  * us but has been deleted, that indicates a rw-conflict out. If it's
3885  * not visible and was created by a concurrent (overlapping)
3886  * serializable transaction, that is also a rw-conflict out,
3887  *
3888  * We will determine the top level xid of the writing transaction with which
3889  * we may be in conflict, and check for overlap with our own transaction.
3890  * If the transactions overlap (i.e., they cannot see each other's writes),
3891  * then we have a conflict out.
3892  *
3893  * This function should be called just about anywhere in heapam.c where a
3894  * tuple has been read. The caller must hold at least a shared lock on the
3895  * buffer, because this function might set hint bits on the tuple. There is
3896  * currently no known reason to call this function from an index AM.
3897  */
3898 void
3900  HeapTuple tuple, Buffer buffer,
3901  Snapshot snapshot)
3902 {
3903  TransactionId xid;
3904  SERIALIZABLEXIDTAG sxidtag;
3905  SERIALIZABLEXID *sxid;
3906  SERIALIZABLEXACT *sxact;
3907  HTSV_Result htsvResult;
3908 
3909  if (!SerializationNeededForRead(relation, snapshot))
3910  return;
3911 
3912  /* Check if someone else has already decided that we need to die */
3913  if (SxactIsDoomed(MySerializableXact))
3914  {
3915  ereport(ERROR,
3916  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3917  errmsg("could not serialize access due to read/write dependencies among transactions"),
3918  errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
3919  errhint("The transaction might succeed if retried.")));
3920  }
3921 
3922  /*
3923  * Check to see whether the tuple has been written to by a concurrent
3924  * transaction, either to create it not visible to us, or to delete it
3925  * while it is visible to us. The "visible" bool indicates whether the
3926  * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
3927  * is going on with it.
3928  */
3929  htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
3930  switch (htsvResult)
3931  {
3932  case HEAPTUPLE_LIVE:
3933  if (visible)
3934  return;
3935  xid = HeapTupleHeaderGetXmin(tuple->t_data);
3936  break;
3938  if (!visible)
3939  return;
3940  xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
3941  break;
3943  xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
3944  break;
3946  xid = HeapTupleHeaderGetXmin(tuple->t_data);
3947  break;
3948  case HEAPTUPLE_DEAD:
3949  return;
3950  default:
3951 
3952  /*
3953  * The only way to get to this default clause is if a new value is
3954  * added to the enum type without adding it to this switch
3955  * statement. That's a bug, so elog.
3956  */
3957  elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
3958 
3959  /*
3960  * In spite of having all enum values covered and calling elog on
3961  * this default, some compilers think this is a code path which
3962  * allows xid to be used below without initialization. Silence
3963  * that warning.
3964  */
3965  xid = InvalidTransactionId;
3966  }
3969 
3970  /*
3971  * Find top level xid. Bail out if xid is too early to be a conflict, or
3972  * if it's our own xid.
3973  */
3975  return;
3976  xid = SubTransGetTopmostTransaction(xid);
3978  return;
3980  return;
3981 
3982  /*
3983  * Find sxact or summarized info for the top level xid.
3984  */
3985  sxidtag.xid = xid;
3986  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3987  sxid = (SERIALIZABLEXID *)
3988  hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
3989  if (!sxid)
3990  {
3991  /*
3992  * Transaction not found in "normal" SSI structures. Check whether it
3993  * got pushed out to SLRU storage for "old committed" transactions.
3994  */
3995  SerCommitSeqNo conflictCommitSeqNo;
3996 
3997  conflictCommitSeqNo = OldSerXidGetMinConflictCommitSeqNo(xid);
3998  if (conflictCommitSeqNo != 0)
3999  {
4000  if (conflictCommitSeqNo != InvalidSerCommitSeqNo
4001  && (!SxactIsReadOnly(MySerializableXact)
4002  || conflictCommitSeqNo
4003  <= MySerializableXact->SeqNo.lastCommitBeforeSnapshot))
4004  ereport(ERROR,
4005  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4006  errmsg("could not serialize access due to read/write dependencies among transactions"),
4007  errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
4008  errhint("The transaction might succeed if retried.")));
4009 
4010  if (SxactHasSummaryConflictIn(MySerializableXact)
4011  || !SHMQueueEmpty(&MySerializableXact->inConflicts))
4012  ereport(ERROR,
4013  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4014  errmsg("could not serialize access due to read/write dependencies among transactions"),
4015  errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
4016  errhint("The transaction might succeed if retried.")));
4017 
4018  MySerializableXact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4019  }
4020 
4021  /* It's not serializable or otherwise not important. */
4022  LWLockRelease(SerializableXactHashLock);
4023  return;
4024  }
4025  sxact = sxid->myXact;
4026  Assert(TransactionIdEquals(sxact->topXid, xid));
4027  if (sxact == MySerializableXact || SxactIsDoomed(sxact))
4028  {
4029  /* Can't conflict with ourself or a transaction that will roll back. */
4030  LWLockRelease(SerializableXactHashLock);
4031  return;
4032  }
4033 
4034  /*
4035  * We have a conflict out to a transaction which has a conflict out to a
4036  * summarized transaction. That summarized transaction must have
4037  * committed first, and we can't tell when it committed in relation to our
4038  * snapshot acquisition, so something needs to be canceled.
4039  */
4040  if (SxactHasSummaryConflictOut(sxact))
4041  {
4042  if (!SxactIsPrepared(sxact))
4043  {
4044  sxact->flags |= SXACT_FLAG_DOOMED;
4045  LWLockRelease(SerializableXactHashLock);
4046  return;
4047  }
4048  else
4049  {
4050  LWLockRelease(SerializableXactHashLock);
4051  ereport(ERROR,
4052  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4053  errmsg("could not serialize access due to read/write dependencies among transactions"),
4054  errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
4055  errhint("The transaction might succeed if retried.")));
4056  }
4057  }
4058 
4059  /*
4060  * If this is a read-only transaction and the writing transaction has
4061  * committed, and it doesn't have a rw-conflict to a transaction which
4062  * committed before it, no conflict.
4063  */
4064  if (SxactIsReadOnly(MySerializableXact)
4065  && SxactIsCommitted(sxact)
4066  && !SxactHasSummaryConflictOut(sxact)
4067  && (!SxactHasConflictOut(sxact)
4068  || MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
4069  {
4070  /* Read-only transaction will appear to run first. No conflict. */
4071  LWLockRelease(SerializableXactHashLock);
4072  return;
4073  }
4074 
4075  if (!XidIsConcurrent(xid))
4076  {
4077  /* This write was already in our snapshot; no conflict. */
4078  LWLockRelease(SerializableXactHashLock);
4079  return;
4080  }
4081 
4082  if (RWConflictExists(MySerializableXact, sxact))
4083  {
4084  /* We don't want duplicate conflict records in the list. */
4085  LWLockRelease(SerializableXactHashLock);
4086  return;
4087  }
4088 
4089  /*
4090  * Flag the conflict. But first, if this conflict creates a dangerous
4091  * structure, ereport an error.
4092  */
4093  FlagRWConflict(MySerializableXact, sxact);
4094  LWLockRelease(SerializableXactHashLock);
4095 }
4096 
4097 /*
4098  * Check a particular target for rw-dependency conflict in. A subroutine of
4099  * CheckForSerializableConflictIn().
4100  */
4101 static void
4103 {
4104  uint32 targettaghash;
4105  LWLock *partitionLock;
4106  PREDICATELOCKTARGET *target;
4107  PREDICATELOCK *predlock;
4108  PREDICATELOCK *mypredlock = NULL;
4109  PREDICATELOCKTAG mypredlocktag;
4110 
4111  Assert(MySerializableXact != InvalidSerializableXact);
4112 
4113  /*
4114  * The same hash and LW lock apply to the lock target and the lock itself.
4115  */
4116  targettaghash = PredicateLockTargetTagHashCode(targettag);
4117  partitionLock = PredicateLockHashPartitionLock(targettaghash);
4118  LWLockAcquire(partitionLock, LW_SHARED);
4119  target = (PREDICATELOCKTARGET *)
4120  hash_search_with_hash_value(PredicateLockTargetHash,
4121  targettag, targettaghash,
4122  HASH_FIND, NULL);
4123  if (!target)
4124  {
4125  /* Nothing has this target locked; we're done here. */
4126  LWLockRelease(partitionLock);
4127  return;
4128  }
4129 
4130  /*
4131  * Each lock for an overlapping transaction represents a conflict: a
4132  * rw-dependency in to this transaction.
4133  */
4134  predlock = (PREDICATELOCK *)
4135  SHMQueueNext(&(target->predicateLocks),
4136  &(target->predicateLocks),
4137  offsetof(PREDICATELOCK, targetLink));
4138  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4139  while (predlock)
4140  {
4141  SHM_QUEUE *predlocktargetlink;
4142  PREDICATELOCK *nextpredlock;
4143  SERIALIZABLEXACT *sxact;
4144 
4145  predlocktargetlink = &(predlock->targetLink);
4146  nextpredlock = (PREDICATELOCK *)
4147  SHMQueueNext(&(target->predicateLocks),
4148  predlocktargetlink,
4149  offsetof(PREDICATELOCK, targetLink));
4150 
4151  sxact = predlock->tag.myXact;
4152  if (sxact == MySerializableXact)
4153  {
4154  /*
4155  * If we're getting a write lock on a tuple, we don't need a
4156  * predicate (SIREAD) lock on the same tuple. We can safely remove
4157  * our SIREAD lock, but we'll defer doing so until after the loop
4158  * because that requires upgrading to an exclusive partition lock.
4159  *
4160  * We can't use this optimization within a subtransaction because
4161  * the subtransaction could roll back, and we would be left
4162  * without any lock at the top level.
4163  */
4164  if (!IsSubTransaction()
4165  && GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
4166  {
4167  mypredlock = predlock;
4168  mypredlocktag = predlock->tag;
4169  }
4170  }
4171  else if (!SxactIsDoomed(sxact)
4172  && (!SxactIsCommitted(sxact)
4174  sxact->finishedBefore))
4175  && !RWConflictExists(sxact, MySerializableXact))
4176  {
4177  LWLockRelease(SerializableXactHashLock);
4178  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4179 
4180  /*
4181  * Re-check after getting exclusive lock because the other
4182  * transaction may have flagged a conflict.
4183  */
4184  if (!SxactIsDoomed(sxact)
4185  && (!SxactIsCommitted(sxact)
4187  sxact->finishedBefore))
4188  && !RWConflictExists(sxact, MySerializableXact))
4189  {
4190  FlagRWConflict(sxact, MySerializableXact);
4191  }
4192 
4193  LWLockRelease(SerializableXactHashLock);
4194  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4195  }
4196 
4197  predlock = nextpredlock;
4198  }
4199  LWLockRelease(SerializableXactHashLock);
4200  LWLockRelease(partitionLock);
4201 
4202  /*
4203  * If we found one of our own SIREAD locks to remove, remove it now.
4204  *
4205  * At this point our transaction already has an ExclusiveRowLock on the
4206  * relation, so we are OK to drop the predicate lock on the tuple, if
4207  * found, without fearing that another write against the tuple will occur
4208  * before the MVCC information makes it to the buffer.
4209  */
4210  if (mypredlock != NULL)
4211  {
4212  uint32 predlockhashcode;
4213  PREDICATELOCK *rmpredlock;
4214 
4215  LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4216  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4217  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4218 
4219  /*
4220  * Remove the predicate lock from shared memory, if it wasn't removed
4221  * while the locks were released. One way that could happen is from
4222  * autovacuum cleaning up an index.
4223  */
4224  predlockhashcode = PredicateLockHashCodeFromTargetHashCode
4225  (&mypredlocktag, targettaghash);
4226  rmpredlock = (PREDICATELOCK *)
4227  hash_search_with_hash_value(PredicateLockHash,
4228  &mypredlocktag,
4229  predlockhashcode,
4230  HASH_FIND, NULL);
4231  if (rmpredlock != NULL)
4232  {
4233  Assert(rmpredlock == mypredlock);
4234 
4235  SHMQueueDelete(&(mypredlock->targetLink));
4236  SHMQueueDelete(&(mypredlock->xactLink));
4237 
4238  rmpredlock = (PREDICATELOCK *)
4239  hash_search_with_hash_value(PredicateLockHash,
4240  &mypredlocktag,
4241  predlockhashcode,
4242  HASH_REMOVE, NULL);
4243  Assert(rmpredlock == mypredlock);
4244 
4245  RemoveTargetIfNoLongerUsed(target, targettaghash);
4246  }
4247 
4248  LWLockRelease(SerializableXactHashLock);
4249  LWLockRelease(partitionLock);
4250  LWLockRelease(SerializablePredicateLockListLock);
4251 
4252  if (rmpredlock != NULL)
4253  {
4254  /*
4255  * Remove entry in local lock table if it exists. It's OK if it
4256  * doesn't exist; that means the lock was transferred to a new
4257  * target by a different backend.
4258  */
4259  hash_search_with_hash_value(LocalPredicateLockHash,
4260  targettag, targettaghash,
4261  HASH_REMOVE, NULL);
4262 
4263  DecrementParentLocks(targettag);
4264  }
4265  }
4266 }
4267 
4268 /*
4269  * CheckForSerializableConflictIn
4270  * We are writing the given tuple. If that indicates a rw-conflict
4271  * in from another serializable transaction, take appropriate action.
4272  *
4273  * Skip checking for any granularity for which a parameter is missing.
4274  *
4275  * A tuple update or delete is in conflict if we have a predicate lock
4276  * against the relation or page in which the tuple exists, or against the
4277  * tuple itself.
4278  */
4279 void
4281  Buffer buffer)
4282 {
4283  PREDICATELOCKTARGETTAG targettag;
4284 
4285  if (!SerializationNeededForWrite(relation))
4286  return;
4287 
4288  /* Check if someone else has already decided that we need to die */
4289  if (SxactIsDoomed(MySerializableXact))
4290  ereport(ERROR,
4291  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4292  errmsg("could not serialize access due to read/write dependencies among transactions"),
4293  errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict in checking."),
4294  errhint("The transaction might succeed if retried.")));
4295 
4296  /*
4297  * We're doing a write which might cause rw-conflicts now or later.
4298  * Memorize that fact.
4299  */
4300  MyXactDidWrite = true;
4301 
4302  /*
4303  * It is important that we check for locks from the finest granularity to
4304  * the coarsest granularity, so that granularity promotion doesn't cause
4305  * us to miss a lock. The new (coarser) lock will be acquired before the
4306  * old (finer) locks are released.
4307  *
4308  * It is not possible to take and hold a lock across the checks for all
4309  * granularities because each target could be in a separate partition.
4310  */
4311  if (tuple != NULL)
4312  {
4314  relation->rd_node.dbNode,
4315  relation->rd_id,
4316  ItemPointerGetBlockNumber(&(tuple->t_self)),
4317  ItemPointerGetOffsetNumber(&(tuple->t_self)));
4318  CheckTargetForConflictsIn(&targettag);
4319  }
4320 
4321  if (BufferIsValid(buffer))
4322  {
4324  relation->rd_node.dbNode,
4325  relation->rd_id,
4326  BufferGetBlockNumber(buffer));
4327  CheckTargetForConflictsIn(&targettag);
4328  }
4329 
4331  relation->rd_node.dbNode,
4332  relation->rd_id);
4333  CheckTargetForConflictsIn(&targettag);
4334 }
4335 
4336 /*
4337  * CheckTableForSerializableConflictIn
4338  * The entire table is going through a DDL-style logical mass delete
4339  * like TRUNCATE or DROP TABLE. If that causes a rw-conflict in from
4340  * another serializable transaction, take appropriate action.
4341  *
4342  * While these operations do not operate entirely within the bounds of
4343  * snapshot isolation, they can occur inside a serializable transaction, and
4344  * will logically occur after any reads which saw rows which were destroyed
4345  * by these operations, so we do what we can to serialize properly under
4346  * SSI.
4347  *
4348  * The relation passed in must be a heap relation. Any predicate lock of any
4349  * granularity on the heap will cause a rw-conflict in to this transaction.
4350  * Predicate locks on indexes do not matter because they only exist to guard
4351  * against conflicting inserts into the index, and this is a mass *delete*.
4352  * When a table is truncated or dropped, the index will also be truncated
4353  * or dropped, and we'll deal with locks on the index when that happens.
4354  *
4355  * Dropping or truncating a table also needs to drop any existing predicate
4356  * locks on heap tuples or pages, because they're about to go away. This
4357  * should be done before altering the predicate locks because the transaction
4358  * could be rolled back because of a conflict, in which case the lock changes
4359  * are not needed. (At the moment, we don't actually bother to drop the
4360  * existing locks on a dropped or truncated table at the moment. That might
4361  * lead to some false positives, but it doesn't seem worth the trouble.)
4362  */
4363 void
4365 {
4366  HASH_SEQ_STATUS seqstat;
4367  PREDICATELOCKTARGET *target;
4368  Oid dbId;
4369  Oid heapId;
4370  int i;
4371 
4372  /*
4373  * Bail out quickly if there are no serializable transactions running.
4374  * It's safe to check this without taking locks because the caller is
4375  * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
4376  * would matter here can be acquired while that is held.
4377  */
4378  if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
4379  return;
4380 
4381  if (!SerializationNeededForWrite(relation))
4382  return;
4383 
4384  /*
4385  * We're doing a write which might cause rw-conflicts now or later.
4386  * Memorize that fact.
4387  */
4388  MyXactDidWrite = true;
4389 
4390  Assert(relation->rd_index == NULL); /* not an index relation */
4391 
4392  dbId = relation->rd_node.dbNode;
4393  heapId = relation->rd_id;
4394 
4395  LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
4396  for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
4398  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4399 
4400  /* Scan through target list */
4401  hash_seq_init(&seqstat, PredicateLockTargetHash);
4402 
4403  while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
4404  {
4405  PREDICATELOCK *predlock;
4406 
4407  /*
4408  * Check whether this is a target which needs attention.
4409  */
4410  if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
4411  continue; /* wrong relation id */
4412  if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
4413  continue; /* wrong database id */
4414 
4415  /*
4416  * Loop through locks for this target and flag conflicts.
4417  */
4418  predlock = (PREDICATELOCK *)
4419  SHMQueueNext(&(target->predicateLocks),
4420  &(target->predicateLocks),
4421  offsetof(PREDICATELOCK, targetLink));
4422  while (predlock)
4423  {
4424  PREDICATELOCK *nextpredlock;
4425 
4426  nextpredlock = (PREDICATELOCK *)
4427  SHMQueueNext(&(target->predicateLocks),
4428  &(predlock->targetLink),
4429  offsetof(PREDICATELOCK, targetLink));
4430 
4431  if (predlock->tag.myXact != MySerializableXact
4432  && !RWConflictExists(predlock->tag.myXact, MySerializableXact))
4433  {
4434  FlagRWConflict(predlock->tag.myXact, MySerializableXact);
4435  }
4436 
4437  predlock = nextpredlock;
4438  }
4439  }
4440 
4441  /* Release locks in reverse order */
4442  LWLockRelease(SerializableXactHashLock);
4443  for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
4445  LWLockRelease(SerializablePredicateLockListLock);
4446 }
4447 
4448 
4449 /*
4450  * Flag a rw-dependency between two serializable transactions.
4451  *
4452  * The caller is responsible for ensuring that we have a LW lock on
4453  * the transaction hash table.
4454  */
4455 static void
4457 {
4458  Assert(reader != writer);
4459 
4460  /* First, see if this conflict causes failure. */
4462 
4463  /* Actually do the conflict flagging. */
4464  if (reader == OldCommittedSxact)
4466  else if (writer == OldCommittedSxact)
4468  else
4469  SetRWConflict(reader, writer);
4470 }
4471 
4472 /*----------------------------------------------------------------------------
4473  * We are about to add a RW-edge to the dependency graph - check that we don't
4474  * introduce a dangerous structure by doing so, and abort one of the
4475  * transactions if so.
4476  *
4477  * A serialization failure can only occur if there is a dangerous structure
4478  * in the dependency graph:
4479  *
4480  * Tin ------> Tpivot ------> Tout
4481  * rw rw
4482  *
4483  * Furthermore, Tout must commit first.
4484  *
4485  * One more optimization is that if Tin is declared READ ONLY (or commits
4486  * without writing), we can only have a problem if Tout committed before Tin
4487  * acquired its snapshot.
4488  *----------------------------------------------------------------------------
4489  */
4490 static void
4492  SERIALIZABLEXACT *writer)
4493 {
4494  bool failure;
4495  RWConflict conflict;
4496 
4497  Assert(LWLockHeldByMe(SerializableXactHashLock));
4498 
4499  failure = false;
4500 
4501  /*------------------------------------------------------------------------
4502  * Check for already-committed writer with rw-conflict out flagged
4503  * (conflict-flag on W means that T2 committed before W):
4504  *
4505  * R ------> W ------> T2
4506  * rw rw
4507  *
4508  * That is a dangerous structure, so we must abort. (Since the writer
4509  * has already committed, we must be the reader)
4510  *------------------------------------------------------------------------
4511  */
4512  if (SxactIsCommitted(writer)
4513  && (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
4514  failure = true;
4515 
4516  /*------------------------------------------------------------------------
4517  * Check whether the writer has become a pivot with an out-conflict
4518  * committed transaction (T2), and T2 committed first:
4519  *
4520  * R ------> W ------> T2
4521  * rw rw
4522  *
4523  * Because T2 must've committed first, there is no anomaly if:
4524  * - the reader committed before T2
4525  * - the writer committed before T2
4526  * - the reader is a READ ONLY transaction and the reader was concurrent
4527  * with T2 (= reader acquired its snapshot before T2 committed)
4528  *
4529  * We also handle the case that T2 is prepared but not yet committed
4530  * here. In that case T2 has already checked for conflicts, so if it
4531  * commits first, making the above conflict real, it's too late for it
4532  * to abort.
4533  *------------------------------------------------------------------------
4534  */
4535  if (!failure)
4536  {
4537  if (SxactHasSummaryConflictOut(writer))
4538  {
4539  failure = true;
4540  conflict = NULL;
4541  }
4542  else
4543  conflict = (RWConflict)
4544  SHMQueueNext(&writer->outConflicts,
4545  &writer->outConflicts,
4546  offsetof(RWConflictData, outLink));
4547  while (conflict)
4548  {
4549  SERIALIZABLEXACT *t2 = conflict->sxactIn;
4550 
4551  if (SxactIsPrepared(t2)
4552  && (!SxactIsCommitted(reader)
4553  || t2->prepareSeqNo <= reader->commitSeqNo)
4554  && (!SxactIsCommitted(writer)
4555  || t2->prepareSeqNo <= writer->commitSeqNo)
4556  && (!SxactIsReadOnly(reader)
4557  || t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
4558  {
4559  failure = true;
4560  break;
4561  }
4562  conflict = (RWConflict)
4563  SHMQueueNext(&writer->outConflicts,
4564  &conflict->outLink,
4565  offsetof(RWConflictData, outLink));
4566  }
4567  }
4568 
4569  /*------------------------------------------------------------------------
4570  * Check whether the reader has become a pivot with a writer
4571  * that's committed (or prepared):
4572  *
4573  * T0 ------> R ------> W
4574  * rw rw
4575  *
4576  * Because W must've committed first for an anomaly to occur, there is no
4577  * anomaly if:
4578  * - T0 committed before the writer
4579  * - T0 is READ ONLY, and overlaps the writer
4580  *------------------------------------------------------------------------
4581  */
4582  if (!failure && SxactIsPrepared(writer) && !SxactIsReadOnly(reader))
4583  {
4584  if (SxactHasSummaryConflictIn(reader))
4585  {
4586  failure = true;
4587  conflict = NULL;
4588  }
4589  else
4590  conflict = (RWConflict)
4591  SHMQueueNext(&reader->inConflicts,
4592  &reader->inConflicts,
4593  offsetof(RWConflictData, inLink));
4594  while (conflict)
4595  {
4596  SERIALIZABLEXACT *t0 = conflict->sxactOut;
4597 
4598  if (!SxactIsDoomed(t0)
4599  && (!SxactIsCommitted(t0)
4600  || t0->commitSeqNo >= writer->prepareSeqNo)
4601  && (!SxactIsReadOnly(t0)
4602  || t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
4603  {
4604  failure = true;
4605  break;
4606  }
4607  conflict = (RWConflict)
4608  SHMQueueNext(&reader->inConflicts,
4609  &conflict->inLink,
4610  offsetof(RWConflictData, inLink));
4611  }
4612  }
4613 
4614  if (failure)
4615  {
4616  /*
4617  * We have to kill a transaction to avoid a possible anomaly from
4618  * occurring. If the writer is us, we can just ereport() to cause a
4619  * transaction abort. Otherwise we flag the writer for termination,
4620  * causing it to abort when it tries to commit. However, if the writer
4621  * is a prepared transaction, already prepared, we can't abort it
4622  * anymore, so we have to kill the reader instead.
4623  */
4624  if (MySerializableXact == writer)
4625  {
4626  LWLockRelease(SerializableXactHashLock);
4627  ereport(ERROR,
4628  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4629  errmsg("could not serialize access due to read/write dependencies among transactions"),
4630  errdetail_internal("Reason code: Canceled on identification as a pivot, during write."),
4631  errhint("The transaction might succeed if retried.")));
4632  }
4633  else if (SxactIsPrepared(writer))
4634  {
4635  LWLockRelease(SerializableXactHashLock);
4636 
4637  /* if we're not the writer, we have to be the reader */
4638  Assert(MySerializableXact == reader);
4639  ereport(ERROR,
4640  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4641  errmsg("could not serialize access due to read/write dependencies among transactions"),
4642  errdetail_internal("Reason code: Canceled on conflict out to pivot %u, during read.", writer->topXid),
4643  errhint("The transaction might succeed if retried.")));
4644  }
4645  writer->flags |= SXACT_FLAG_DOOMED;
4646  }
4647 }
4648 
4649 /*
4650  * PreCommit_CheckForSerializableConflicts
4651  * Check for dangerous structures in a serializable transaction
4652  * at commit.
4653  *
4654  * We're checking for a dangerous structure as each conflict is recorded.
4655  * The only way we could have a problem at commit is if this is the "out"
4656  * side of a pivot, and neither the "in" side nor the pivot has yet
4657  * committed.
4658  *
4659  * If a dangerous structure is found, the pivot (the near conflict) is
4660  * marked for death, because rolling back another transaction might mean
4661  * that we flail without ever making progress. This transaction is
4662  * committing writes, so letting it commit ensures progress. If we
4663  * canceled the far conflict, it might immediately fail again on retry.
4664  */
4665 void
4667 {
4668  RWConflict nearConflict;
4669 
4670  if (MySerializableXact == InvalidSerializableXact)
4671  return;
4672 
4674 
4675  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4676 
4677  /* Check if someone else has already decided that we need to die */
4678  if (SxactIsDoomed(MySerializableXact))
4679  {
4680  LWLockRelease(SerializableXactHashLock);
4681  ereport(ERROR,
4682  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4683  errmsg("could not serialize access due to read/write dependencies among transactions"),
4684  errdetail_internal("Reason code: Canceled on identification as a pivot, during commit attempt."),
4685  errhint("The transaction might succeed if retried.")));
4686  }
4687 
4688  nearConflict = (RWConflict)
4689  SHMQueueNext(&MySerializableXact->inConflicts,
4690  &MySerializableXact->inConflicts,
4691  offsetof(RWConflictData, inLink));
4692  while (nearConflict)
4693  {
4694  if (!SxactIsCommitted(nearConflict->sxactOut)
4695  && !SxactIsDoomed(nearConflict->sxactOut))
4696  {
4697  RWConflict farConflict;
4698 
4699  farConflict = (RWConflict)
4700  SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4701  &nearConflict->sxactOut->inConflicts,
4702  offsetof(RWConflictData, inLink));
4703  while (farConflict)
4704  {
4705  if (farConflict->sxactOut == MySerializableXact
4706  || (!SxactIsCommitted(farConflict->sxactOut)
4707  && !SxactIsReadOnly(farConflict->sxactOut)
4708  && !SxactIsDoomed(farConflict->sxactOut)))
4709  {
4710  /*
4711  * Normally, we kill the pivot transaction to make sure we
4712  * make progress if the failing transaction is retried.
4713  * However, we can't kill it if it's already prepared, so
4714  * in that case we commit suicide instead.
4715  */
4716  if (SxactIsPrepared(nearConflict->sxactOut))
4717  {
4718  LWLockRelease(SerializableXactHashLock);
4719  ereport(ERROR,
4720  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4721  errmsg("could not serialize access due to read/write dependencies among transactions"),
4722  errdetail_internal("Reason code: Canceled on commit attempt with conflict in from prepared pivot."),
4723  errhint("The transaction might succeed if retried.")));
4724  }
4725  nearConflict->sxactOut->flags |= SXACT_FLAG_DOOMED;
4726  break;
4727  }
4728  farConflict = (RWConflict)
4729  SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4730  &farConflict->inLink,
4731  offsetof(RWConflictData, inLink));
4732  }
4733  }
4734 
4735  nearConflict = (RWConflict)
4736  SHMQueueNext(&MySerializableXact->inConflicts,
4737  &nearConflict->inLink,
4738  offsetof(RWConflictData, inLink));
4739  }
4740 
4741  MySerializableXact->prepareSeqNo = ++(PredXact->LastSxactCommitSeqNo);
4742  MySerializableXact->flags |= SXACT_FLAG_PREPARED;
4743 
4744  LWLockRelease(SerializableXactHashLock);
4745 }
4746 
4747 /*------------------------------------------------------------------------*/
4748 
4749 /*
4750  * Two-phase commit support
4751  */
4752 
4753 /*
4754  * AtPrepare_Locks
4755  * Do the preparatory work for a PREPARE: make 2PC state file
4756  * records for all predicate locks currently held.
4757  */
4758 void
4760 {
4761  PREDICATELOCK *predlock;
4762  SERIALIZABLEXACT *sxact;
4763  TwoPhasePredicateRecord record;
4764  TwoPhasePredicateXactRecord *xactRecord;
4765  TwoPhasePredicateLockRecord *lockRecord;
4766 
4767  sxact = MySerializableXact;
4768  xactRecord = &(record.data.xactRecord);
4769  lockRecord = &(record.data.lockRecord);
4770 
4771  if (MySerializableXact == InvalidSerializableXact)
4772  return;
4773 
4774  /* Generate an xact record for our SERIALIZABLEXACT */
4776  xactRecord->xmin = MySerializableXact->xmin;
4777  xactRecord->flags = MySerializableXact->flags;
4778 
4779  /*
4780  * Note that we don't include the list of conflicts in our out in the
4781  * statefile, because new conflicts can be added even after the
4782  * transaction prepares. We'll just make a conservative assumption during
4783  * recovery instead.
4784  */
4785 
4787  &record, sizeof(record));
4788 
4789  /*
4790  * Generate a lock record for each lock.
4791  *
4792  * To do this, we need to walk the predicate lock list in our sxact rather
4793  * than using the local predicate lock table because the latter is not
4794  * guaranteed to be accurate.
4795  */
4796  LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4797 
4798  predlock = (PREDICATELOCK *)
4799  SHMQueueNext(&(sxact->predicateLocks),
4800  &(sxact->predicateLocks),
4801  offsetof(PREDICATELOCK, xactLink));
4802 
4803  while (predlock != NULL)
4804  {
4806  lockRecord->target = predlock->tag.myTarget->tag;
4807 
4809  &record, sizeof(record));
4810 
4811  predlock = (PREDICATELOCK *)
4812  SHMQueueNext(&(sxact->predicateLocks),
4813  &(predlock->xactLink),
4814  offsetof(PREDICATELOCK, xactLink));
4815  }
4816 
4817  LWLockRelease(SerializablePredicateLockListLock);
4818 }
4819 
4820 /*
4821  * PostPrepare_Locks
4822  * Clean up after successful PREPARE. Unlike the non-predicate
4823  * lock manager, we do not need to transfer locks to a dummy
4824  * PGPROC because our SERIALIZABLEXACT will stay around
4825  * anyway. We only need to clean up our local state.
4826  */
4827 void
4829 {
4830  if (MySerializableXact == InvalidSerializableXact)
4831  return;
4832 
4833  Assert(SxactIsPrepared(MySerializableXact));
4834 
4835  MySerializableXact->pid = 0;
4836 
4837  hash_destroy(LocalPredicateLockHash);
4838  LocalPredicateLockHash = NULL;
4839 
4840  MySerializableXact = InvalidSerializableXact;
4841  MyXactDidWrite = false;
4842 }
4843 
4844 /*
4845  * PredicateLockTwoPhaseFinish
4846  * Release a prepared transaction's predicate locks once it
4847  * commits or aborts.
4848  */
4849 void
4851 {
4852  SERIALIZABLEXID *sxid;
4853  SERIALIZABLEXIDTAG sxidtag;
4854 
4855  sxidtag.xid = xid;
4856 
4857  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4858  sxid = (SERIALIZABLEXID *)
4859  hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4860  LWLockRelease(SerializableXactHashLock);
4861 
4862  /* xid will not be found if it wasn't a serializable transaction */
4863  if (sxid == NULL)
4864  return;
4865 
4866  /* Release its locks */
4867  MySerializableXact = sxid->myXact;
4868  MyXactDidWrite = true; /* conservatively assume that we wrote
4869  * something */
4870  ReleasePredicateLocks(isCommit);
4871 }
4872 
4873 /*
4874  * Re-acquire a predicate lock belonging to a transaction that was prepared.
4875  */
4876 void
4878  void *recdata, uint32 len)
4879 {
4880  TwoPhasePredicateRecord *record;
4881 
4882  Assert(len == sizeof(TwoPhasePredicateRecord));
4883 
4884  record = (TwoPhasePredicateRecord *) recdata;
4885 
4886  Assert((record->type == TWOPHASEPREDICATERECORD_XACT) ||
4887  (record->type == TWOPHASEPREDICATERECORD_LOCK));
4888 
4889  if (record->type == TWOPHASEPREDICATERECORD_XACT)
4890  {
4891  /* Per-transaction record. Set up a SERIALIZABLEXACT. */
4892  TwoPhasePredicateXactRecord *xactRecord;
4893  SERIALIZABLEXACT *sxact;
4894  SERIALIZABLEXID *sxid;
4895  SERIALIZABLEXIDTAG sxidtag;
4896  bool found;
4897 
4898  xactRecord = (TwoPhasePredicateXactRecord *) &record->data.xactRecord;
4899 
4900  LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4901  sxact = CreatePredXact();
4902  if (!sxact)
4903  ereport(ERROR,
4904  (errcode(ERRCODE_OUT_OF_MEMORY),
4905  errmsg("out of shared memory")));
4906 
4907  /* vxid for a prepared xact is InvalidBackendId/xid; no pid */
4908  sxact->vxid.backendId = InvalidBackendId;
4910  sxact->pid = 0;
4911 
4912  /* a prepared xact hasn't committed yet */
4916 
4918 
4919  /*
4920  * Don't need to track this; no transactions running at the time the
4921  * recovered xact started are still active, except possibly other
4922  * prepared xacts and we don't care whether those are RO_SAFE or not.
4923  */
4925 
4926  SHMQueueInit(&(sxact->predicateLocks));
4927  SHMQueueElemInit(&(sxact->finishedLink));
4928 
4929  sxact->topXid = xid;
4930  sxact->xmin = xactRecord->xmin;
4931  sxact->flags = xactRecord->flags;
4932  Assert(SxactIsPrepared(sxact));
4933  if (!SxactIsReadOnly(sxact))
4934  {
4935  ++(PredXact->WritableSxactCount);
4936  Assert(PredXact->WritableSxactCount <=
4938  }
4939 
4940  /*
4941  * We don't know whether the transaction had any conflicts or not, so
4942  * we'll conservatively assume that it had both a conflict in and a
4943  * conflict out, and represent that with the summary conflict flags.
4944  */
4945  SHMQueueInit(&(sxact->outConflicts));
4946  SHMQueueInit(&(sxact->inConflicts));
4949 
4950  /* Register the transaction's xid */
4951  sxidtag.xid = xid;
4952  sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
4953  &sxidtag,
4954  HASH_ENTER, &found);
4955  Assert(sxid != NULL);
4956  Assert(!found);
4957  sxid->myXact = (SERIALIZABLEXACT *) sxact;
4958 
4959  /*
4960  * Update global xmin. Note that this is a special case compared to
4961  * registering a normal transaction, because the global xmin might go
4962  * backwards. That's OK, because until recovery is over we're not
4963  * going to complete any transactions or create any non-prepared
4964  * transactions, so there's no danger of throwing away.
4965  */
4966  if ((!TransactionIdIsValid(PredXact->SxactGlobalXmin)) ||
4967  (TransactionIdFollows(PredXact->SxactGlobalXmin, sxact->xmin)))
4968  {
4969  PredXact->SxactGlobalXmin = sxact->xmin;
4970  PredXact->SxactGlobalXminCount = 1;
4972  }
4973  else if (TransactionIdEquals(sxact->xmin, PredXact->SxactGlobalXmin))
4974  {
4975  Assert(PredXact->SxactGlobalXminCount > 0);
4976  PredXact->SxactGlobalXminCount++;
4977  }
4978 
4979  LWLockRelease(SerializableXactHashLock);
4980  }
4981  else if (record->type == TWOPHASEPREDICATERECORD_LOCK)
4982  {
4983  /* Lock record. Recreate the PREDICATELOCK */
4984  TwoPhasePredicateLockRecord *lockRecord;
4985  SERIALIZABLEXID *sxid;
4986  SERIALIZABLEXACT *sxact;
4987  SERIALIZABLEXIDTAG sxidtag;
4988  uint32 targettaghash;
4989 
4990  lockRecord = (TwoPhasePredicateLockRecord *) &record->data.lockRecord;
4991  targettaghash = PredicateLockTargetTagHashCode(&lockRecord->target);
4992 
4993  LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4994  sxidtag.xid = xid;
4995  sxid = (SERIALIZABLEXID *)
4996  hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4997  LWLockRelease(SerializableXactHashLock);
4998 
4999  Assert(sxid != NULL);
5000  sxact = sxid->myXact;
5001  Assert(sxact != InvalidSerializableXact);
5002 
5003  CreatePredicateLock(&lockRecord->target, targettaghash, sxact);
5004  }
5005 }
#define GET_PREDICATELOCKTARGETTAG_RELATION(locktag)
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:370
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
#define SxactIsReadOnly(sxact)
Definition: predicate.c:270
static SERIALIZABLEXACT * MySerializableXact
Definition: predicate.c:409
static bool PredicateLockingNeededForRelation(Relation relation)
Definition: predicate.c:477
#define GET_PREDICATELOCKTARGETTAG_PAGE(locktag)
TransactionId finishedBefore
void PostPrepare_PredicateLocks(TransactionId xid)
Definition: predicate.c:4828
static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag, uint32 targettaghash, SERIALIZABLEXACT *sxact)
Definition: predicate.c:2332
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:814
#define PredXactListDataSize
void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
Definition: predicate.c:2475
Definition: lwlock.h:32
bool XactDeferrable
Definition: xact.c:79
static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
Definition: predicate.c:635
struct SERIALIZABLEXID SERIALIZABLEXID
void SetSerializableTransactionSnapshot(Snapshot snapshot, VirtualTransactionId *sourcevxid, int sourcepid)
Definition: predicate.c:1630
static HTAB * PredicateLockTargetHash
Definition: predicate.c:385
int MyProcPid
Definition: globals.c:42
int errhint(const char *fmt,...)
Definition: elog.c:987
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:80
static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
Definition: predicate.c:2562
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.c:334
#define HASH_ELEM
Definition: hsearch.h:87
static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
Definition: predicate.c:4456
uint32 TransactionId
Definition: c.h:474
struct OldSerXidControlData OldSerXidControlData
#define SxactHasSummaryConflictIn(sxact)
Definition: predicate.c:271
TransactionId SubTransGetTopmostTransaction(TransactionId xid)
Definition: subtrans.c:150
static bool TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, PREDICATELOCKTARGETTAG newtargettag, bool removeOld)
Definition: predicate.c:2632
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1841
static Snapshot GetSafeSnapshot(Snapshot snapshot)
Definition: predicate.c:1468
PGPROC * MyProc
Definition: proc.c:67
static void output(uint64 loop_count)
struct OldSerXidControlData * OldSerXidControl
Definition: predicate.c:340
#define NPREDICATELOCKTARGETENTS()
Definition: predicate.c:253
static bool XidIsConcurrent(TransactionId xid)
Definition: predicate.c:3856
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2452
static PredXactList PredXact
Definition: predicate.c:372
#define SXACT_FLAG_SUMMARY_CONFLICT_OUT
void SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
Definition: slru.c:1168
TransactionId SxactGlobalXmin
struct SERIALIZABLEXIDTAG SERIALIZABLEXIDTAG
static void RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
Definition: predicate.c:2058
static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag)
Definition: predicate.c:1920
static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
Definition: predicate.c:4102
bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:349
struct PREDICATELOCKTARGET PREDICATELOCKTARGET
Size PredicateLockShmemSize(void)
Definition: predicate.c:1265
Size entrysize
Definition: hsearch.h:73
HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer)
Definition: tqual.c:1164
struct RWConflictData * RWConflict
#define SET_PREDICATELOCKTARGETTAG_PAGE(locktag, dboid, reloid, blocknum)
static uint32 predicatelock_hash(const void *key, Size keysize)
Definition: predicate.c:1327
static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
Definition: predicate.c:2087
#define OLDSERXID_MAX_PAGE
Definition: predicate.c:323
#define NUM_OLDSERXID_BUFFERS
Definition: predicate.h:31
static void ClearOldPredicateLocks(void)
Definition: predicate.c:3540
int errcode(int sqlerrcode)
Definition: elog.c:575
static HTAB * SerializableXidHash
Definition: predicate.c:384
static void ReleaseRWConflict(RWConflict conflict)
Definition: predicate.c:724
#define MemSet(start, val, len)
Definition: c.h:908
static void DropAllPredicateLocksFromTable(Relation relation, bool transfer)
Definition: predicate.c:2848
bool PageIsPredicateLocked(Relation relation, BlockNumber blkno)
Definition: predicate.c:1883
static void OldSerXidInit(void)
Definition: predicate.c:796
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1335
SERIALIZABLEXACT * xacts
#define OldSerXidPage(xid)
Definition: predicate.c:331
SERIALIZABLEXACT * myXact
uint32 BlockNumber
Definition: block.h:31
static bool SerializationNeededForWrite(Relation relation)
Definition: predicate.c:540
void * ShmemAlloc(Size size)
Definition: shmem.c:157
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
#define SXACT_FLAG_COMMITTED
#define FirstNormalSerCommitSeqNo
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
#define SET_PREDICATELOCKTARGETTAG_TUPLE(locktag, dboid, reloid, blocknum, offnum)
#define OldSerXidSlruCtl
Definition: predicate.c:314
#define SxactIsPrepared(sxact)
Definition: predicate.c:267
Form_pg_class rd_rel
Definition: rel.h:84
unsigned int Oid
Definition: postgres_ext.h:31
TwoPhasePredicateRecordType type
bool RecoveryInProgress(void)
Definition: xlog.c:7939
#define SET_PREDICATELOCKTARGETTAG_RELATION(locktag, dboid, reloid)
LocalTransactionId localTransactionId
Definition: lock.h:66
#define SxactIsOnFinishedList(sxact)
Definition: predicate.c:256
static void RemoveScratchTarget(bool lockheld)
Definition: predicate.c:2015
Snapshot GetTransactionSnapshot(void)
Definition: snapmgr.c:304
Size SimpleLruShmemSize(int nslots, int nlsns)
Definition: slru.c:145
void SimpleLruFlush(SlruCtl ctl, bool allow_redirtied)
Definition: slru.c:1103
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4280
PredicateLockData * GetPredicateLockStatusData(void)
Definition: predicate.c:1353
static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact)
Definition: predicate.c:732
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3899
HTSV_Result
Definition: tqual.h:49
int max_predicate_locks_per_xact
Definition: predicate.c:359
PREDICATELOCKTARGETTAG target
#define HASH_PARTITION
Definition: hsearch.h:83
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1191
int errdetail_internal(const char *fmt,...)
Definition: elog.c:900
TransactionId TransactionXmin
Definition: snapmgr.c:164
void predicatelock_twophase_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: predicate.c:4877
#define PredicateLockHashPartitionLock(hashcode)
Definition: predicate.c:247
HeapTupleHeader t_data
Definition: htup.h:68
void PreCommit_CheckForSerializationFailure(void)
Definition: predicate.c:4666
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1725
SERIALIZABLEXACT * sxactIn
void ProcSendSignal(int pid)
Definition: proc.c:1786
#define SxactIsDoomed(sxact)
Definition: predicate.c:269
static void SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
Definition: predicate.c:668
Definition: dynahash.c:208
Form_pg_index rd_index
Definition: rel.h:131
static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot, VirtualTransactionId *sourcevxid, int sourcepid)
Definition: predicate.c:1661
#define GET_PREDICATELOCKTARGETTAG_OFFSET(locktag)
unsigned short uint16
Definition: c.h:324
bool IsInParallelMode(void)
Definition: xact.c:905
#define SxactIsRolledBack(sxact)
Definition: predicate.c:268
#define PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash)
Definition: predicate.c:304
SHM_QUEUE possibleUnsafeConflicts
bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:319
#define TWOPHASE_RM_PREDICATELOCK_ID
Definition: twophase_rmgr.h:28
#define SXACT_FLAG_RO_SAFE
#define ERROR
Definition: elog.h:43
static HTAB * PredicateLockHash
Definition: predicate.c:386
int max_prepared_xacts
Definition: twophase.c:117
static RWConflictPoolHeader RWConflictPool
Definition: predicate.c:378
struct PREDICATELOCK PREDICATELOCK
long num_partitions
Definition: hsearch.h:67
static SlruCtlData OldSerXidSlruCtlData
Definition: predicate.c:312
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:372
struct PREDICATELOCKTAG PREDICATELOCKTAG
TwoPhasePredicateXactRecord xactRecord
#define InvalidSerializableXact
TransactionId nextXid
Definition: transam.h:117
int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok, TransactionId xid)
Definition: slru.c:375
ItemPointerData t_self
Definition: htup.h:65
static void ReleasePredXact(SERIALIZABLEXACT *sxact)
Definition: predicate.c:579
#define SXACT_FLAG_DEFERRABLE_WAITING
int MaxBackends
Definition: globals.c:136
static void OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
Definition: predicate.c:4491
#define DEBUG2
Definition: elog.h:24
struct LOCALPREDICATELOCK LOCALPREDICATELOCK
#define RWConflictDataSize
void PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
Definition: predicate.c:4850
static bool success
VirtualTransactionId vxid
static SERIALIZABLEXACT * NextPredXact(SERIALIZABLEXACT *sxact)
Definition: predicate.c:609
bool IsUnderPostmaster
Definition: globals.c:110
#define GET_PREDICATELOCKTARGETTAG_TYPE(locktag)
int errdetail(const char *fmt,...)
Definition: elog.c:873
VariableCache ShmemVariableCache
Definition: varsup.c:34
static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag)
Definition: predicate.c:2393
#define InvalidTransactionId
Definition: transam.h:31
#define SXACT_FLAG_CONFLICT_OUT
#define GET_PREDICATELOCKTARGETTAG_DB(locktag)
unsigned int uint32
Definition: c.h:325
#define SXACT_FLAG_PREPARED
#define FirstBootstrapObjectId
Definition: transam.h:93
TransactionId xmax
Definition: snapshot.h:69
TransactionId xmin
Definition: snapshot.h:68
uint32 LocalTransactionId
Definition: c.h:476
SerCommitSeqNo lastCommitBeforeSnapshot
TransactionId GetTopTransactionIdIfAny(void)
Definition: xact.c:404
#define SxactIsROSafe(sxact)
Definition: predicate.c:280
TransactionId headXid
Definition: predicate.c:336
#define ereport(elevel, rest)
Definition: elog.h:122
#define SxactHasSummaryConflictOut(sxact)
Definition: predicate.c:272
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
TransactionId * xip
Definition: snapshot.h:79
Oid rd_id
Definition: rel.h:86
#define InvalidSerCommitSeqNo
static void RestoreScratchTarget(bool lockheld)
Definition: predicate.c:2036
void TransferPredicateLocksToHeapRelation(Relation relation)
Definition: predicate.c:3044
union SERIALIZABLEXACT::@110 SeqNo
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1775
PREDICATELOCKTARGETTAG * locktags
static SERIALIZABLEXACT * FirstPredXact(void)
Definition: predicate.c:594
SerCommitSeqNo commitSeqNo
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:732
static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag)
Definition: predicate.c:2270
#define RWConflictPoolHeaderDataSize
SerCommitSeqNo HavePartialClearedThrough
#define HASH_BLOBS
Definition: hsearch.h:88
PREDICATELOCKTAG tag
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
SerCommitSeqNo CanPartialClearThrough
#define PredicateLockTargetTagHashCode(predicatelocktargettag)
Definition: predicate.c:291
#define InvalidBackendId
Definition: backendid.h:23
static int MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag)
Definition: predicate.c:2168
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:467
Size keysize
Definition: hsearch.h:72
SerCommitSeqNo earliestOutConflictCommit
static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag, PREDICATELOCKTARGETTAG *parent)
Definition: predicate.c:1947
#define InvalidOid
Definition: postgres_ext.h:36
PREDICATELOCKTARGETTAG tag
bool ShmemAddrIsValid(const void *addr)
Definition: shmem.c:263
void ReleasePredicateLocks(bool isCommit)
Definition: predicate.c:3222
static SerCommitSeqNo OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)
Definition: predicate.c:905
bool XactReadOnly
Definition: xact.c:76
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
RelFileNode rd_node
Definition: rel.h:55
SerCommitSeqNo commitSeqNo
uint64 SerCommitSeqNo
#define SXACT_FLAG_DOOMED
#define RecoverySerCommitSeqNo
#define SxactHasConflictOut(sxact)
Definition: predicate.c:278
static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, bool summarize)
Definition: predicate.c:3697
#define Assert(condition)
Definition: c.h:699
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void AtPrepare_PredicateLocks(void)
Definition: predicate.c:4759
BackendId backendId
Definition: lock.h:65
Snapshot GetSerializableTransactionSnapshot(Snapshot snapshot)
Definition: predicate.c:1590
static bool OldSerXidPagePrecedesLogically(int p, int q)
Definition: predicate.c:773
#define SxactIsDeferrableWaiting(sxact)
Definition: predicate.c:279
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
static void OldSerXidSetActiveSerXmin(TransactionId xid)
Definition: predicate.c:946
static bool CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag)
Definition: predicate.c:2205
#define SetInvalidVirtualTransactionId(vxid)
Definition: lock.h:77
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:318
struct PREDICATELOCKTARGETTAG PREDICATELOCKTARGETTAG
#define SXACT_FLAG_ROLLED_BACK
SerCommitSeqNo prepareSeqNo
size_t Size
Definition: c.h:433
Snapshot GetSnapshotData(Snapshot snapshot)
Definition: procarray.c:1509
static HTAB * LocalPredicateLockHash
Definition: predicate.c:402
SerCommitSeqNo LastSxactCommitSeqNo
union TwoPhasePredicateRecord::@111 data
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1121
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void CheckTableForSerializableConflictIn(Relation relation)
Definition: predicate.c:4364
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
SERIALIZABLEXACT * OldCommittedSxact
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
#define HASH_FIXED_SIZE
Definition: hsearch.h:96
static SERIALIZABLEXACT * OldCommittedSxact
Definition: predicate.c:350
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:517
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2497
#define PredicateLockHashPartitionLockByIndex(i)
Definition: predicate.c:250
static OldSerXidControl oldSerXidControl
Definition: predicate.c:342
static bool SerializationNeededForRead(Relation relation, Snapshot snapshot)
Definition: predicate.c:496
bool IsSubTransaction(void)
Definition: xact.c:4495
void SHMQueueElemInit(SHM_QUEUE *queue)
Definition: shmqueue.c:57
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void RegisterPredicateLockingXid(TransactionId xid)
Definition: predicate.c:1834
int max_predicate_locks_per_relation
Definition: predicate.c:360
uint32 xcnt
Definition: snapshot.h:80
void * palloc(Size size)
Definition: mcxt.c:924
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define IsolationIsSerializable()
Definition: xact.h:51
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
int max_predicate_locks_per_page
Definition: predicate.c:361
static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact)
Definition: predicate.c:694
int i
#define SXACT_FLAG_READ_ONLY
static const PREDICATELOCKTARGETTAG ScratchTargetTag
Definition: predicate.c:394
int GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
Definition: predicate.c:1538
#define TargetTagIsCoveredBy(covered_target, covering_target)
Definition: predicate.c:222
void PredicateLockPageCombine(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3150
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
static void SummarizeOldestCommittedSxact(void)
Definition: predicate.c:1411
SERIALIZABLEXACT * myXact
#define OldSerXidValue(slotno, xid)
Definition: predicate.c:327
void CheckPointPredicate(void)
Definition: predicate.c:997
static bool MyXactDidWrite
Definition: predicate.c:410
#define SXACT_FLAG_RO_UNSAFE
#define elog
Definition: elog.h:219
struct PredXactListElementData * PredXactListElement
void InitPredicateLocks(void)
Definition: predicate.c:1062
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:317
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define SxactIsROUnsafe(sxact)
Definition: predicate.c:281
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:123
static SHM_QUEUE * FinishedSerializableTransactions
Definition: predicate.c:387
static uint32 ScratchTargetTagHash
Definition: predicate.c:395
static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
Definition: predicate.c:1986
static LWLock * ScratchPartitionLock
Definition: predicate.c:396
TwoPhasePredicateLockRecord lockRecord
bool ProcArrayInstallImportedXmin(TransactionId xmin, VirtualTransactionId *sourcevxid)
Definition: procarray.c:1797
static void SetNewSxactGlobalXmin(void)
Definition: predicate.c:3172
Definition: proc.h:95
int Buffer
Definition: buf.h:23
#define SXACT_FLAG_SUMMARY_CONFLICT_IN
static SERIALIZABLEXACT * CreatePredXact(void)
Definition: predicate.c:562
PredXactListElement element
long val
Definition: informix.c:689
int SimpleLruZeroPage(SlruCtl ctl, int pageno)
Definition: slru.c:263
#define SxactIsCommitted(sxact)
Definition: predicate.c:266
void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3065
#define PredXactListElementDataSize
#define OldSerXidNextPage(page)
Definition: predicate.c:325
#define offsetof(type, field)
Definition: c.h:622
static void OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
Definition: predicate.c:834
TransactionId tailXid
Definition: predicate.c:337
PREDICATELOCKTARGET * myTarget
HashValueFunc hash
Definition: hsearch.h:74
#define HASH_FUNCTION
Definition: hsearch.h:89
#define InvalidPid
Definition: miscadmin.h:31
SERIALIZABLEXACT * sxactOut
#define NUM_PREDICATELOCK_PARTITIONS
Definition: lwlock.h:121
void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, LWLock *ctllock, const char *subdir, int tranche_id)
Definition: slru.c:165