PostgreSQL Source Code  git master
deadlock.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "utils/memutils.h"
Include dependency graph for deadlock.c:

Go to the source code of this file.

Data Structures

struct  EDGE
 
struct  WAIT_ORDER
 
struct  DEADLOCK_INFO
 

Functions

static bool DeadLockCheckRecurse (PGPROC *proc)
 
static int TestConfiguration (PGPROC *startProc)
 
static bool FindLockCycle (PGPROC *checkProc, EDGE *softEdges, int *nSoftEdges)
 
static bool FindLockCycleRecurse (PGPROC *checkProc, int depth, EDGE *softEdges, int *nSoftEdges)
 
static bool FindLockCycleRecurseMember (PGPROC *checkProc, PGPROC *checkProcLeader, int depth, EDGE *softEdges, int *nSoftEdges)
 
static bool ExpandConstraints (EDGE *constraints, int nConstraints)
 
static bool TopoSort (LOCK *lock, EDGE *constraints, int nConstraints, PGPROC **ordering)
 
void InitDeadLockChecking (void)
 
DeadLockState DeadLockCheck (PGPROC *proc)
 
PGPROCGetBlockingAutoVacuumPgproc (void)
 
void DeadLockReport (void)
 
void RememberSimpleDeadLock (PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
 

Variables

static PGPROC ** visitedProcs
 
static int nVisitedProcs
 
static PGPROC ** topoProcs
 
static int * beforeConstraints
 
static int * afterConstraints
 
static WAIT_ORDERwaitOrders
 
static int nWaitOrders
 
static PGPROC ** waitOrderProcs
 
static EDGEcurConstraints
 
static int nCurConstraints
 
static int maxCurConstraints
 
static EDGEpossibleConstraints
 
static int nPossibleConstraints
 
static int maxPossibleConstraints
 
static DEADLOCK_INFOdeadlockDetails
 
static int nDeadlockDetails
 
static PGPROCblocking_autovacuum_proc = NULL
 

Function Documentation

◆ DeadLockCheck()

DeadLockState DeadLockCheck ( PGPROC proc)

Definition at line 217 of file deadlock.c.

218 {
219  /* Initialize to "no constraints" */
220  nCurConstraints = 0;
222  nWaitOrders = 0;
223 
224  /* Initialize to not blocked by an autovacuum worker */
226 
227  /* Search for deadlocks and possible fixes */
228  if (DeadLockCheckRecurse(proc))
229  {
230  /*
231  * Call FindLockCycle one more time, to record the correct
232  * deadlockDetails[] for the basic state with no rearrangements.
233  */
234  int nSoftEdges;
235 
236  TRACE_POSTGRESQL_DEADLOCK_FOUND();
237 
238  nWaitOrders = 0;
239  if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
240  elog(FATAL, "deadlock seems to have disappeared");
241 
242  return DS_HARD_DEADLOCK; /* cannot find a non-deadlocked state */
243  }
244 
245  /* Apply any needed rearrangements of wait queues */
246  for (int i = 0; i < nWaitOrders; i++)
247  {
248  LOCK *lock = waitOrders[i].lock;
249  PGPROC **procs = waitOrders[i].procs;
250  int nProcs = waitOrders[i].nProcs;
251  dclist_head *waitQueue = &lock->waitProcs;
252 
253  Assert(nProcs == dclist_count(waitQueue));
254 
255 #ifdef DEBUG_DEADLOCK
256  PrintLockQueue(lock, "DeadLockCheck:");
257 #endif
258 
259  /* Reset the queue and re-add procs in the desired order */
260  dclist_init(waitQueue);
261  for (int j = 0; j < nProcs; j++)
262  dclist_push_tail(waitQueue, &procs[j]->links);
263 
264 #ifdef DEBUG_DEADLOCK
265  PrintLockQueue(lock, "rearranged to:");
266 #endif
267 
268  /* See if any waiters for the lock can be woken up now */
269  ProcLockWakeup(GetLocksMethodTable(lock), lock);
270  }
271 
272  /* Return code tells caller if we had to escape a deadlock or not */
273  if (nWaitOrders > 0)
274  return DS_SOFT_DEADLOCK;
275  else if (blocking_autovacuum_proc != NULL)
277  else
278  return DS_NO_DEADLOCK;
279 }
#define Assert(condition)
Definition: c.h:858
static WAIT_ORDER * waitOrders
Definition: deadlock.c:111
static bool FindLockCycle(PGPROC *checkProc, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:443
static bool DeadLockCheckRecurse(PGPROC *proc)
Definition: deadlock.c:309
static EDGE * possibleConstraints
Definition: deadlock.c:121
static int nWaitOrders
Definition: deadlock.c:112
static int nCurConstraints
Definition: deadlock.c:117
static PGPROC * blocking_autovacuum_proc
Definition: deadlock.c:128
static int nPossibleConstraints
Definition: deadlock.c:122
#define FATAL
Definition: elog.h:41
#define elog(elevel,...)
Definition: elog.h:224
static void dclist_push_tail(dclist_head *head, dlist_node *node)
Definition: ilist.h:709
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
int j
Definition: isn.c:74
int i
Definition: isn.c:73
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:474
@ DS_HARD_DEADLOCK
Definition: lock.h:513
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:514
@ DS_NO_DEADLOCK
Definition: lock.h:511
@ DS_SOFT_DEADLOCK
Definition: lock.h:512
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1706
Definition: lock.h:309
dclist_head waitProcs
Definition: lock.h:317
Definition: proc.h:157
PGPROC ** procs
Definition: deadlock.c:59
LOCK * lock
Definition: deadlock.c:58
int nProcs
Definition: deadlock.c:60
static struct link * links
Definition: zic.c:299

References Assert, blocking_autovacuum_proc, dclist_count(), dclist_init(), dclist_push_tail(), DeadLockCheckRecurse(), DS_BLOCKED_BY_AUTOVACUUM, DS_HARD_DEADLOCK, DS_NO_DEADLOCK, DS_SOFT_DEADLOCK, elog, FATAL, FindLockCycle(), GetLocksMethodTable(), i, j, links, WAIT_ORDER::lock, nCurConstraints, nPossibleConstraints, WAIT_ORDER::nProcs, nWaitOrders, possibleConstraints, ProcLockWakeup(), WAIT_ORDER::procs, waitOrders, and LOCK::waitProcs.

Referenced by CheckDeadLock().

◆ DeadLockCheckRecurse()

static bool DeadLockCheckRecurse ( PGPROC proc)
static

Definition at line 309 of file deadlock.c.

310 {
311  int nEdges;
312  int oldPossibleConstraints;
313  bool savedList;
314  int i;
315 
316  nEdges = TestConfiguration(proc);
317  if (nEdges < 0)
318  return true; /* hard deadlock --- no solution */
319  if (nEdges == 0)
320  return false; /* good configuration found */
322  return true; /* out of room for active constraints? */
323  oldPossibleConstraints = nPossibleConstraints;
325  {
326  /* We can save the edge list in possibleConstraints[] */
327  nPossibleConstraints += nEdges;
328  savedList = true;
329  }
330  else
331  {
332  /* Not room; will need to regenerate the edges on-the-fly */
333  savedList = false;
334  }
335 
336  /*
337  * Try each available soft edge as an addition to the configuration.
338  */
339  for (i = 0; i < nEdges; i++)
340  {
341  if (!savedList && i > 0)
342  {
343  /* Regenerate the list of possible added constraints */
344  if (nEdges != TestConfiguration(proc))
345  elog(FATAL, "inconsistent results during deadlock check");
346  }
348  possibleConstraints[oldPossibleConstraints + i];
349  nCurConstraints++;
350  if (!DeadLockCheckRecurse(proc))
351  return false; /* found a valid solution! */
352  /* give up on that added constraint, try again */
353  nCurConstraints--;
354  }
355  nPossibleConstraints = oldPossibleConstraints;
356  return true; /* no solution found */
357 }
static int TestConfiguration(PGPROC *startProc)
Definition: deadlock.c:375
static int maxPossibleConstraints
Definition: deadlock.c:123
static int maxCurConstraints
Definition: deadlock.c:118
static EDGE * curConstraints
Definition: deadlock.c:116
int MaxBackends
Definition: globals.c:143

References curConstraints, elog, FATAL, i, MaxBackends, maxCurConstraints, maxPossibleConstraints, nCurConstraints, nPossibleConstraints, possibleConstraints, and TestConfiguration().

Referenced by DeadLockCheck().

◆ DeadLockReport()

void DeadLockReport ( void  )

Definition at line 1072 of file deadlock.c.

1073 {
1074  StringInfoData clientbuf; /* errdetail for client */
1075  StringInfoData logbuf; /* errdetail for server log */
1076  StringInfoData locktagbuf;
1077  int i;
1078 
1079  initStringInfo(&clientbuf);
1080  initStringInfo(&logbuf);
1081  initStringInfo(&locktagbuf);
1082 
1083  /* Generate the "waits for" lines sent to the client */
1084  for (i = 0; i < nDeadlockDetails; i++)
1085  {
1086  DEADLOCK_INFO *info = &deadlockDetails[i];
1087  int nextpid;
1088 
1089  /* The last proc waits for the first one... */
1090  if (i < nDeadlockDetails - 1)
1091  nextpid = info[1].pid;
1092  else
1093  nextpid = deadlockDetails[0].pid;
1094 
1095  /* reset locktagbuf to hold next object description */
1096  resetStringInfo(&locktagbuf);
1097 
1098  DescribeLockTag(&locktagbuf, &info->locktag);
1099 
1100  if (i > 0)
1101  appendStringInfoChar(&clientbuf, '\n');
1102 
1103  appendStringInfo(&clientbuf,
1104  _("Process %d waits for %s on %s; blocked by process %d."),
1105  info->pid,
1107  info->lockmode),
1108  locktagbuf.data,
1109  nextpid);
1110  }
1111 
1112  /* Duplicate all the above for the server ... */
1113  appendBinaryStringInfo(&logbuf, clientbuf.data, clientbuf.len);
1114 
1115  /* ... and add info about query strings */
1116  for (i = 0; i < nDeadlockDetails; i++)
1117  {
1118  DEADLOCK_INFO *info = &deadlockDetails[i];
1119 
1120  appendStringInfoChar(&logbuf, '\n');
1121 
1122  appendStringInfo(&logbuf,
1123  _("Process %d: %s"),
1124  info->pid,
1126  }
1127 
1129 
1130  ereport(ERROR,
1132  errmsg("deadlock detected"),
1133  errdetail_internal("%s", clientbuf.data),
1134  errdetail_log("%s", logbuf.data),
1135  errhint("See server log for query details.")));
1136 }
const char * pgstat_get_backend_current_activity(int pid, bool checkUser)
static int nDeadlockDetails
Definition: deadlock.c:125
static DEADLOCK_INFO * deadlockDetails
Definition: deadlock.c:124
int errdetail_internal(const char *fmt,...)
Definition: elog.c:1230
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:857
int errmsg(const char *fmt,...)
Definition: elog.c:1070
int errdetail_log(const char *fmt,...)
Definition: elog.c:1251
#define _(x)
Definition: elog.c:90
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1243
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4038
#define ERRCODE_T_R_DEADLOCK_DETECTED
Definition: pgbench.c:77
void pgstat_report_deadlock(void)
void resetStringInfo(StringInfo str)
Definition: stringinfo.c:78
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:97
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition: stringinfo.c:233
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:194
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
LOCKTAG locktag
Definition: deadlock.c:73
LOCKMODE lockmode
Definition: deadlock.c:74
uint8 locktag_lockmethodid
Definition: lock.h:171

References _, appendBinaryStringInfo(), appendStringInfo(), appendStringInfoChar(), StringInfoData::data, deadlockDetails, DescribeLockTag(), ereport, errcode(), ERRCODE_T_R_DEADLOCK_DETECTED, errdetail_internal(), errdetail_log(), errhint(), errmsg(), ERROR, GetLockmodeName(), i, initStringInfo(), StringInfoData::len, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, LOCKTAG::locktag_lockmethodid, nDeadlockDetails, pgstat_get_backend_current_activity(), pgstat_report_deadlock(), DEADLOCK_INFO::pid, and resetStringInfo().

Referenced by WaitOnLock().

◆ ExpandConstraints()

static bool ExpandConstraints ( EDGE constraints,
int  nConstraints 
)
static

Definition at line 787 of file deadlock.c.

789 {
790  int nWaitOrderProcs = 0;
791  int i,
792  j;
793 
794  nWaitOrders = 0;
795 
796  /*
797  * Scan constraint list backwards. This is because the last-added
798  * constraint is the only one that could fail, and so we want to test it
799  * for inconsistency first.
800  */
801  for (i = nConstraints; --i >= 0;)
802  {
803  LOCK *lock = constraints[i].lock;
804 
805  /* Did we already make a list for this lock? */
806  for (j = nWaitOrders; --j >= 0;)
807  {
808  if (waitOrders[j].lock == lock)
809  break;
810  }
811  if (j >= 0)
812  continue;
813  /* No, so allocate a new list */
814  waitOrders[nWaitOrders].lock = lock;
815  waitOrders[nWaitOrders].procs = waitOrderProcs + nWaitOrderProcs;
817  nWaitOrderProcs += dclist_count(&lock->waitProcs);
818  Assert(nWaitOrderProcs <= MaxBackends);
819 
820  /*
821  * Do the topo sort. TopoSort need not examine constraints after this
822  * one, since they must be for different locks.
823  */
824  if (!TopoSort(lock, constraints, i + 1,
825  waitOrders[nWaitOrders].procs))
826  return false;
827  nWaitOrders++;
828  }
829  return true;
830 }
static bool TopoSort(LOCK *lock, EDGE *constraints, int nConstraints, PGPROC **ordering)
Definition: deadlock.c:859
static PGPROC ** waitOrderProcs
Definition: deadlock.c:113
LOCK * lock
Definition: deadlock.c:50

References Assert, dclist_count(), i, j, EDGE::lock, WAIT_ORDER::lock, MaxBackends, WAIT_ORDER::nProcs, nWaitOrders, WAIT_ORDER::procs, TopoSort(), waitOrderProcs, waitOrders, and LOCK::waitProcs.

Referenced by TestConfiguration().

◆ FindLockCycle()

static bool FindLockCycle ( PGPROC checkProc,
EDGE softEdges,
int *  nSoftEdges 
)
static

Definition at line 443 of file deadlock.c.

446 {
447  nVisitedProcs = 0;
448  nDeadlockDetails = 0;
449  *nSoftEdges = 0;
450  return FindLockCycleRecurse(checkProc, 0, softEdges, nSoftEdges);
451 }
static int nVisitedProcs
Definition: deadlock.c:103
static bool FindLockCycleRecurse(PGPROC *checkProc, int depth, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:454

References FindLockCycleRecurse(), nDeadlockDetails, and nVisitedProcs.

Referenced by DeadLockCheck(), and TestConfiguration().

◆ FindLockCycleRecurse()

static bool FindLockCycleRecurse ( PGPROC checkProc,
int  depth,
EDGE softEdges,
int *  nSoftEdges 
)
static

Definition at line 454 of file deadlock.c.

458 {
459  int i;
460  dlist_iter iter;
461 
462  /*
463  * If this process is a lock group member, check the leader instead. (Note
464  * that we might be the leader, in which case this is a no-op.)
465  */
466  if (checkProc->lockGroupLeader != NULL)
467  checkProc = checkProc->lockGroupLeader;
468 
469  /*
470  * Have we already seen this proc?
471  */
472  for (i = 0; i < nVisitedProcs; i++)
473  {
474  if (visitedProcs[i] == checkProc)
475  {
476  /* If we return to starting point, we have a deadlock cycle */
477  if (i == 0)
478  {
479  /*
480  * record total length of cycle --- outer levels will now fill
481  * deadlockDetails[]
482  */
483  Assert(depth <= MaxBackends);
484  nDeadlockDetails = depth;
485 
486  return true;
487  }
488 
489  /*
490  * Otherwise, we have a cycle but it does not include the start
491  * point, so say "no deadlock".
492  */
493  return false;
494  }
495  }
496  /* Mark proc as seen */
498  visitedProcs[nVisitedProcs++] = checkProc;
499 
500  /*
501  * If the process is waiting, there is an outgoing waits-for edge to each
502  * process that blocks it.
503  */
504  if (checkProc->links.next != NULL && checkProc->waitLock != NULL &&
505  FindLockCycleRecurseMember(checkProc, checkProc, depth, softEdges,
506  nSoftEdges))
507  return true;
508 
509  /*
510  * If the process is not waiting, there could still be outgoing waits-for
511  * edges if it is part of a lock group, because other members of the lock
512  * group might be waiting even though this process is not. (Given lock
513  * groups {A1, A2} and {B1, B2}, if A1 waits for B1 and B2 waits for A2,
514  * that is a deadlock even neither of B1 and A2 are waiting for anything.)
515  */
516  dlist_foreach(iter, &checkProc->lockGroupMembers)
517  {
518  PGPROC *memberProc;
519 
520  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
521 
522  if (memberProc->links.next != NULL && memberProc->waitLock != NULL &&
523  memberProc != checkProc &&
524  FindLockCycleRecurseMember(memberProc, checkProc, depth, softEdges,
525  nSoftEdges))
526  return true;
527  }
528 
529  return false;
530 }
static bool FindLockCycleRecurseMember(PGPROC *checkProc, PGPROC *checkProcLeader, int depth, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:533
static PGPROC ** visitedProcs
Definition: deadlock.c:102
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
dlist_head lockGroupMembers
Definition: proc.h:301
LOCK * waitLock
Definition: proc.h:228
PGPROC * lockGroupLeader
Definition: proc.h:300
dlist_node links
Definition: proc.h:159
dlist_node * cur
Definition: ilist.h:179
dlist_node * next
Definition: ilist.h:140

References Assert, dlist_iter::cur, dlist_container, dlist_foreach, FindLockCycleRecurseMember(), i, PGPROC::links, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, MaxBackends, nDeadlockDetails, dlist_node::next, nVisitedProcs, visitedProcs, and PGPROC::waitLock.

Referenced by FindLockCycle(), and FindLockCycleRecurseMember().

◆ FindLockCycleRecurseMember()

static bool FindLockCycleRecurseMember ( PGPROC checkProc,
PGPROC checkProcLeader,
int  depth,
EDGE softEdges,
int *  nSoftEdges 
)
static

Definition at line 533 of file deadlock.c.

538 {
539  PGPROC *proc;
540  LOCK *lock = checkProc->waitLock;
541  dlist_iter proclock_iter;
542  LockMethod lockMethodTable;
543  int conflictMask;
544  int i;
545  int numLockModes,
546  lm;
547 
548  /*
549  * The relation extension lock can never participate in actual deadlock
550  * cycle. See Assert in LockAcquireExtended. So, there is no advantage
551  * in checking wait edges from it.
552  */
554  return false;
555 
556  lockMethodTable = GetLocksMethodTable(lock);
557  numLockModes = lockMethodTable->numLockModes;
558  conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
559 
560  /*
561  * Scan for procs that already hold conflicting locks. These are "hard"
562  * edges in the waits-for graph.
563  */
564  dlist_foreach(proclock_iter, &lock->procLocks)
565  {
566  PROCLOCK *proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
567  PGPROC *leader;
568 
569  proc = proclock->tag.myProc;
570  leader = proc->lockGroupLeader == NULL ? proc : proc->lockGroupLeader;
571 
572  /* A proc never blocks itself or any other lock group member */
573  if (leader != checkProcLeader)
574  {
575  for (lm = 1; lm <= numLockModes; lm++)
576  {
577  if ((proclock->holdMask & LOCKBIT_ON(lm)) &&
578  (conflictMask & LOCKBIT_ON(lm)))
579  {
580  /* This proc hard-blocks checkProc */
581  if (FindLockCycleRecurse(proc, depth + 1,
582  softEdges, nSoftEdges))
583  {
584  /* fill deadlockDetails[] */
585  DEADLOCK_INFO *info = &deadlockDetails[depth];
586 
587  info->locktag = lock->tag;
588  info->lockmode = checkProc->waitLockMode;
589  info->pid = checkProc->pid;
590 
591  return true;
592  }
593 
594  /*
595  * No deadlock here, but see if this proc is an autovacuum
596  * that is directly hard-blocking our own proc. If so,
597  * report it so that the caller can send a cancel signal
598  * to it, if appropriate. If there's more than one such
599  * proc, it's indeterminate which one will be reported.
600  *
601  * We don't touch autovacuums that are indirectly blocking
602  * us; it's up to the direct blockee to take action. This
603  * rule simplifies understanding the behavior and ensures
604  * that an autovacuum won't be canceled with less than
605  * deadlock_timeout grace period.
606  *
607  * Note we read statusFlags without any locking. This is
608  * OK only for checking the PROC_IS_AUTOVACUUM flag,
609  * because that flag is set at process start and never
610  * reset. There is logic elsewhere to avoid canceling an
611  * autovacuum that is working to prevent XID wraparound
612  * problems (which needs to read a different statusFlags
613  * bit), but we don't do that here to avoid grabbing
614  * ProcArrayLock.
615  */
616  if (checkProc == MyProc &&
619 
620  /* We're done looking at this proclock */
621  break;
622  }
623  }
624  }
625  }
626 
627  /*
628  * Scan for procs that are ahead of this one in the lock's wait queue.
629  * Those that have conflicting requests soft-block this one. This must be
630  * done after the hard-block search, since if another proc both hard- and
631  * soft-blocks this one, we want to call it a hard edge.
632  *
633  * If there is a proposed re-ordering of the lock's wait order, use that
634  * rather than the current wait order.
635  */
636  for (i = 0; i < nWaitOrders; i++)
637  {
638  if (waitOrders[i].lock == lock)
639  break;
640  }
641 
642  if (i < nWaitOrders)
643  {
644  /* Use the given hypothetical wait queue order */
645  PGPROC **procs = waitOrders[i].procs;
646  int queue_size = waitOrders[i].nProcs;
647 
648  for (i = 0; i < queue_size; i++)
649  {
650  PGPROC *leader;
651 
652  proc = procs[i];
653  leader = proc->lockGroupLeader == NULL ? proc :
654  proc->lockGroupLeader;
655 
656  /*
657  * TopoSort will always return an ordering with group members
658  * adjacent to each other in the wait queue (see comments
659  * therein). So, as soon as we reach a process in the same lock
660  * group as checkProc, we know we've found all the conflicts that
661  * precede any member of the lock group lead by checkProcLeader.
662  */
663  if (leader == checkProcLeader)
664  break;
665 
666  /* Is there a conflict with this guy's request? */
667  if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0)
668  {
669  /* This proc soft-blocks checkProc */
670  if (FindLockCycleRecurse(proc, depth + 1,
671  softEdges, nSoftEdges))
672  {
673  /* fill deadlockDetails[] */
674  DEADLOCK_INFO *info = &deadlockDetails[depth];
675 
676  info->locktag = lock->tag;
677  info->lockmode = checkProc->waitLockMode;
678  info->pid = checkProc->pid;
679 
680  /*
681  * Add this edge to the list of soft edges in the cycle
682  */
683  Assert(*nSoftEdges < MaxBackends);
684  softEdges[*nSoftEdges].waiter = checkProcLeader;
685  softEdges[*nSoftEdges].blocker = leader;
686  softEdges[*nSoftEdges].lock = lock;
687  (*nSoftEdges)++;
688  return true;
689  }
690  }
691  }
692  }
693  else
694  {
695  PGPROC *lastGroupMember = NULL;
696  dlist_iter proc_iter;
697  dclist_head *waitQueue;
698 
699  /* Use the true lock wait queue order */
700  waitQueue = &lock->waitProcs;
701 
702  /*
703  * Find the last member of the lock group that is present in the wait
704  * queue. Anything after this is not a soft lock conflict. If group
705  * locking is not in use, then we know immediately which process we're
706  * looking for, but otherwise we've got to search the wait queue to
707  * find the last process actually present.
708  */
709  if (checkProc->lockGroupLeader == NULL)
710  lastGroupMember = checkProc;
711  else
712  {
713  dclist_foreach(proc_iter, waitQueue)
714  {
715  proc = dlist_container(PGPROC, links, proc_iter.cur);
716 
717  if (proc->lockGroupLeader == checkProcLeader)
718  lastGroupMember = proc;
719  }
720  Assert(lastGroupMember != NULL);
721  }
722 
723  /*
724  * OK, now rescan (or scan) the queue to identify the soft conflicts.
725  */
726  dclist_foreach(proc_iter, waitQueue)
727  {
728  PGPROC *leader;
729 
730  proc = dlist_container(PGPROC, links, proc_iter.cur);
731 
732  leader = proc->lockGroupLeader == NULL ? proc :
733  proc->lockGroupLeader;
734 
735  /* Done when we reach the target proc */
736  if (proc == lastGroupMember)
737  break;
738 
739  /* Is there a conflict with this guy's request? */
740  if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0 &&
741  leader != checkProcLeader)
742  {
743  /* This proc soft-blocks checkProc */
744  if (FindLockCycleRecurse(proc, depth + 1,
745  softEdges, nSoftEdges))
746  {
747  /* fill deadlockDetails[] */
748  DEADLOCK_INFO *info = &deadlockDetails[depth];
749 
750  info->locktag = lock->tag;
751  info->lockmode = checkProc->waitLockMode;
752  info->pid = checkProc->pid;
753 
754  /*
755  * Add this edge to the list of soft edges in the cycle
756  */
757  Assert(*nSoftEdges < MaxBackends);
758  softEdges[*nSoftEdges].waiter = checkProcLeader;
759  softEdges[*nSoftEdges].blocker = leader;
760  softEdges[*nSoftEdges].lock = lock;
761  (*nSoftEdges)++;
762  return true;
763  }
764  }
765  }
766  }
767 
768  /*
769  * No conflict detected here.
770  */
771  return false;
772 }
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
#define PROC_IS_AUTOVACUUM
Definition: proc.h:57
PGPROC * MyProc
Definition: proc.c:66
PGPROC * blocker
Definition: deadlock.c:49
PGPROC * waiter
Definition: deadlock.c:48
LOCKTAG tag
Definition: lock.h:311
dlist_head procLocks
Definition: lock.h:316
const LOCKMASK * conflictTab
Definition: lock.h:111
int numLockModes
Definition: lock.h:110
uint8 statusFlags
Definition: proc.h:238
int pid
Definition: proc.h:178
LOCKMODE waitLockMode
Definition: proc.h:230
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370
LOCKMASK holdMask
Definition: lock.h:376
PROCLOCKTAG tag
Definition: lock.h:372

References Assert, EDGE::blocker, blocking_autovacuum_proc, LockMethodData::conflictTab, dlist_iter::cur, dclist_foreach, deadlockDetails, dlist_container, dlist_foreach, FindLockCycleRecurse(), GetLocksMethodTable(), PROCLOCK::holdMask, i, links, EDGE::lock, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, LOCKTAG_RELATION_EXTEND, MaxBackends, MyProc, PROCLOCKTAG::myProc, WAIT_ORDER::nProcs, LockMethodData::numLockModes, nWaitOrders, DEADLOCK_INFO::pid, PGPROC::pid, PROC_IS_AUTOVACUUM, LOCK::procLocks, WAIT_ORDER::procs, PGPROC::statusFlags, LOCK::tag, PROCLOCK::tag, EDGE::waiter, PGPROC::waitLock, PGPROC::waitLockMode, waitOrders, and LOCK::waitProcs.

Referenced by FindLockCycleRecurse().

◆ GetBlockingAutoVacuumPgproc()

PGPROC* GetBlockingAutoVacuumPgproc ( void  )

Definition at line 287 of file deadlock.c.

288 {
289  PGPROC *ptr;
290 
293 
294  return ptr;
295 }

References blocking_autovacuum_proc.

Referenced by ProcSleep().

◆ InitDeadLockChecking()

void InitDeadLockChecking ( void  )

Definition at line 143 of file deadlock.c.

144 {
145  MemoryContext oldcxt;
146 
147  /* Make sure allocations are permanent */
149 
150  /*
151  * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
152  * deadlockDetails[].
153  */
154  visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
156 
157  /*
158  * TopoSort needs to consider at most MaxBackends wait-queue entries, and
159  * it needn't run concurrently with FindLockCycle.
160  */
161  topoProcs = visitedProcs; /* re-use this space */
162  beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
163  afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
164 
165  /*
166  * We need to consider rearranging at most MaxBackends/2 wait queues
167  * (since it takes at least two waiters in a queue to create a soft edge),
168  * and the expanded form of the wait queues can't involve more than
169  * MaxBackends total waiters.
170  */
171  waitOrders = (WAIT_ORDER *)
172  palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
173  waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
174 
175  /*
176  * Allow at most MaxBackends distinct constraints in a configuration. (Is
177  * this enough? In practice it seems it should be, but I don't quite see
178  * how to prove it. If we run out, we might fail to find a workable wait
179  * queue rearrangement even though one exists.) NOTE that this number
180  * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
181  * really big might potentially allow a stack-overflow problem.
182  */
185 
186  /*
187  * Allow up to 3*MaxBackends constraints to be saved without having to
188  * re-run TestConfiguration. (This is probably more than enough, but we
189  * can survive if we run low on space by doing excess runs of
190  * TestConfiguration to re-compute constraint lists each time needed.) The
191  * last MaxBackends entries in possibleConstraints[] are reserved as
192  * output workspace for FindLockCycle.
193  */
196  (EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
197 
198  MemoryContextSwitchTo(oldcxt);
199 }
static int * beforeConstraints
Definition: deadlock.c:107
static int * afterConstraints
Definition: deadlock.c:108
static PGPROC ** topoProcs
Definition: deadlock.c:106
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void * palloc(Size size)
Definition: mcxt.c:1316
MemoryContextSwitchTo(old_ctx)
Definition: deadlock.c:47

References afterConstraints, beforeConstraints, curConstraints, deadlockDetails, MaxBackends, maxCurConstraints, maxPossibleConstraints, MemoryContextSwitchTo(), palloc(), possibleConstraints, TopMemoryContext, topoProcs, visitedProcs, waitOrderProcs, and waitOrders.

Referenced by InitProcess().

◆ RememberSimpleDeadLock()

void RememberSimpleDeadLock ( PGPROC proc1,
LOCKMODE  lockmode,
LOCK lock,
PGPROC proc2 
)

Definition at line 1144 of file deadlock.c.

1148 {
1149  DEADLOCK_INFO *info = &deadlockDetails[0];
1150 
1151  info->locktag = lock->tag;
1152  info->lockmode = lockmode;
1153  info->pid = proc1->pid;
1154  info++;
1155  info->locktag = proc2->waitLock->tag;
1156  info->lockmode = proc2->waitLockMode;
1157  info->pid = proc2->pid;
1158  nDeadlockDetails = 2;
1159 }

References deadlockDetails, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, nDeadlockDetails, DEADLOCK_INFO::pid, PGPROC::pid, LOCK::tag, PGPROC::waitLock, and PGPROC::waitLockMode.

Referenced by ProcSleep().

◆ TestConfiguration()

static int TestConfiguration ( PGPROC startProc)
static

Definition at line 375 of file deadlock.c.

376 {
377  int softFound = 0;
379  int nSoftEdges;
380  int i;
381 
382  /*
383  * Make sure we have room for FindLockCycle's output.
384  */
386  return -1;
387 
388  /*
389  * Expand current constraint set into wait orderings. Fail if the
390  * constraint set is not self-consistent.
391  */
393  return -1;
394 
395  /*
396  * Check for cycles involving startProc or any of the procs mentioned in
397  * constraints. We check startProc last because if it has a soft cycle
398  * still to be dealt with, we want to deal with that first.
399  */
400  for (i = 0; i < nCurConstraints; i++)
401  {
402  if (FindLockCycle(curConstraints[i].waiter, softEdges, &nSoftEdges))
403  {
404  if (nSoftEdges == 0)
405  return -1; /* hard deadlock detected */
406  softFound = nSoftEdges;
407  }
408  if (FindLockCycle(curConstraints[i].blocker, softEdges, &nSoftEdges))
409  {
410  if (nSoftEdges == 0)
411  return -1; /* hard deadlock detected */
412  softFound = nSoftEdges;
413  }
414  }
415  if (FindLockCycle(startProc, softEdges, &nSoftEdges))
416  {
417  if (nSoftEdges == 0)
418  return -1; /* hard deadlock detected */
419  softFound = nSoftEdges;
420  }
421  return softFound;
422 }
static bool ExpandConstraints(EDGE *constraints, int nConstraints)
Definition: deadlock.c:787

References curConstraints, ExpandConstraints(), FindLockCycle(), i, MaxBackends, maxPossibleConstraints, nCurConstraints, nPossibleConstraints, and possibleConstraints.

Referenced by DeadLockCheckRecurse().

◆ TopoSort()

static bool TopoSort ( LOCK lock,
EDGE constraints,
int  nConstraints,
PGPROC **  ordering 
)
static

Definition at line 859 of file deadlock.c.

863 {
864  dclist_head *waitQueue = &lock->waitProcs;
865  int queue_size = dclist_count(waitQueue);
866  PGPROC *proc;
867  int i,
868  j,
869  jj,
870  k,
871  kk,
872  last;
873  dlist_iter proc_iter;
874 
875  /* First, fill topoProcs[] array with the procs in their current order */
876  i = 0;
877  dclist_foreach(proc_iter, waitQueue)
878  {
879  proc = dlist_container(PGPROC, links, proc_iter.cur);
880  topoProcs[i++] = proc;
881  }
882  Assert(i == queue_size);
883 
884  /*
885  * Scan the constraints, and for each proc in the array, generate a count
886  * of the number of constraints that say it must be before something else,
887  * plus a list of the constraints that say it must be after something
888  * else. The count for the j'th proc is stored in beforeConstraints[j],
889  * and the head of its list in afterConstraints[j]. Each constraint
890  * stores its list link in constraints[i].link (note any constraint will
891  * be in just one list). The array index for the before-proc of the i'th
892  * constraint is remembered in constraints[i].pred.
893  *
894  * Note that it's not necessarily the case that every constraint affects
895  * this particular wait queue. Prior to group locking, a process could be
896  * waiting for at most one lock. But a lock group can be waiting for
897  * zero, one, or multiple locks. Since topoProcs[] is an array of the
898  * processes actually waiting, while constraints[] is an array of group
899  * leaders, we've got to scan through topoProcs[] for each constraint,
900  * checking whether both a waiter and a blocker for that group are
901  * present. If so, the constraint is relevant to this wait queue; if not,
902  * it isn't.
903  */
904  MemSet(beforeConstraints, 0, queue_size * sizeof(int));
905  MemSet(afterConstraints, 0, queue_size * sizeof(int));
906  for (i = 0; i < nConstraints; i++)
907  {
908  /*
909  * Find a representative process that is on the lock queue and part of
910  * the waiting lock group. This may or may not be the leader, which
911  * may or may not be waiting at all. If there are any other processes
912  * in the same lock group on the queue, set their number of
913  * beforeConstraints to -1 to indicate that they should be emitted
914  * with their groupmates rather than considered separately.
915  *
916  * In this loop and the similar one just below, it's critical that we
917  * consistently select the same representative member of any one lock
918  * group, so that all the constraints are associated with the same
919  * proc, and the -1's are only associated with not-representative
920  * members. We select the last one in the topoProcs array.
921  */
922  proc = constraints[i].waiter;
923  Assert(proc != NULL);
924  jj = -1;
925  for (j = queue_size; --j >= 0;)
926  {
927  PGPROC *waiter = topoProcs[j];
928 
929  if (waiter == proc || waiter->lockGroupLeader == proc)
930  {
931  Assert(waiter->waitLock == lock);
932  if (jj == -1)
933  jj = j;
934  else
935  {
936  Assert(beforeConstraints[j] <= 0);
937  beforeConstraints[j] = -1;
938  }
939  }
940  }
941 
942  /* If no matching waiter, constraint is not relevant to this lock. */
943  if (jj < 0)
944  continue;
945 
946  /*
947  * Similarly, find a representative process that is on the lock queue
948  * and waiting for the blocking lock group. Again, this could be the
949  * leader but does not need to be.
950  */
951  proc = constraints[i].blocker;
952  Assert(proc != NULL);
953  kk = -1;
954  for (k = queue_size; --k >= 0;)
955  {
956  PGPROC *blocker = topoProcs[k];
957 
958  if (blocker == proc || blocker->lockGroupLeader == proc)
959  {
960  Assert(blocker->waitLock == lock);
961  if (kk == -1)
962  kk = k;
963  else
964  {
965  Assert(beforeConstraints[k] <= 0);
966  beforeConstraints[k] = -1;
967  }
968  }
969  }
970 
971  /* If no matching blocker, constraint is not relevant to this lock. */
972  if (kk < 0)
973  continue;
974 
975  Assert(beforeConstraints[jj] >= 0);
976  beforeConstraints[jj]++; /* waiter must come before */
977  /* add this constraint to list of after-constraints for blocker */
978  constraints[i].pred = jj;
979  constraints[i].link = afterConstraints[kk];
980  afterConstraints[kk] = i + 1;
981  }
982 
983  /*--------------------
984  * Now scan the topoProcs array backwards. At each step, output the
985  * last proc that has no remaining before-constraints plus any other
986  * members of the same lock group; then decrease the beforeConstraints
987  * count of each of the procs it was constrained against.
988  * i = index of ordering[] entry we want to output this time
989  * j = search index for topoProcs[]
990  * k = temp for scanning constraint list for proc j
991  * last = last non-null index in topoProcs (avoid redundant searches)
992  *--------------------
993  */
994  last = queue_size - 1;
995  for (i = queue_size - 1; i >= 0;)
996  {
997  int c;
998  int nmatches = 0;
999 
1000  /* Find next candidate to output */
1001  while (topoProcs[last] == NULL)
1002  last--;
1003  for (j = last; j >= 0; j--)
1004  {
1005  if (topoProcs[j] != NULL && beforeConstraints[j] == 0)
1006  break;
1007  }
1008 
1009  /* If no available candidate, topological sort fails */
1010  if (j < 0)
1011  return false;
1012 
1013  /*
1014  * Output everything in the lock group. There's no point in
1015  * outputting an ordering where members of the same lock group are not
1016  * consecutive on the wait queue: if some other waiter is between two
1017  * requests that belong to the same group, then either it conflicts
1018  * with both of them and is certainly not a solution; or it conflicts
1019  * with at most one of them and is thus isomorphic to an ordering
1020  * where the group members are consecutive.
1021  */
1022  proc = topoProcs[j];
1023  if (proc->lockGroupLeader != NULL)
1024  proc = proc->lockGroupLeader;
1025  Assert(proc != NULL);
1026  for (c = 0; c <= last; ++c)
1027  {
1028  if (topoProcs[c] == proc || (topoProcs[c] != NULL &&
1029  topoProcs[c]->lockGroupLeader == proc))
1030  {
1031  ordering[i - nmatches] = topoProcs[c];
1032  topoProcs[c] = NULL;
1033  ++nmatches;
1034  }
1035  }
1036  Assert(nmatches > 0);
1037  i -= nmatches;
1038 
1039  /* Update beforeConstraints counts of its predecessors */
1040  for (k = afterConstraints[j]; k > 0; k = constraints[k - 1].link)
1041  beforeConstraints[constraints[k - 1].pred]--;
1042  }
1043 
1044  /* Done */
1045  return true;
1046 }
#define MemSet(start, val, len)
Definition: c.h:1020
char * c
int pred
Definition: deadlock.c:51
int link
Definition: deadlock.c:52

References afterConstraints, Assert, beforeConstraints, EDGE::blocker, dlist_iter::cur, dclist_count(), dclist_foreach, dlist_container, i, j, EDGE::link, links, PGPROC::lockGroupLeader, MemSet, EDGE::pred, topoProcs, EDGE::waiter, PGPROC::waitLock, and LOCK::waitProcs.

Referenced by ExpandConstraints().

Variable Documentation

◆ afterConstraints

int* afterConstraints
static

Definition at line 108 of file deadlock.c.

Referenced by InitDeadLockChecking(), and TopoSort().

◆ beforeConstraints

int* beforeConstraints
static

Definition at line 107 of file deadlock.c.

Referenced by InitDeadLockChecking(), and TopoSort().

◆ blocking_autovacuum_proc

PGPROC* blocking_autovacuum_proc = NULL
static

◆ curConstraints

EDGE* curConstraints
static

Definition at line 116 of file deadlock.c.

Referenced by DeadLockCheckRecurse(), InitDeadLockChecking(), and TestConfiguration().

◆ deadlockDetails

DEADLOCK_INFO* deadlockDetails
static

◆ maxCurConstraints

int maxCurConstraints
static

Definition at line 118 of file deadlock.c.

Referenced by DeadLockCheckRecurse(), and InitDeadLockChecking().

◆ maxPossibleConstraints

int maxPossibleConstraints
static

Definition at line 123 of file deadlock.c.

Referenced by DeadLockCheckRecurse(), InitDeadLockChecking(), and TestConfiguration().

◆ nCurConstraints

int nCurConstraints
static

Definition at line 117 of file deadlock.c.

Referenced by DeadLockCheck(), DeadLockCheckRecurse(), and TestConfiguration().

◆ nDeadlockDetails

int nDeadlockDetails
static

◆ nPossibleConstraints

int nPossibleConstraints
static

Definition at line 122 of file deadlock.c.

Referenced by DeadLockCheck(), DeadLockCheckRecurse(), and TestConfiguration().

◆ nVisitedProcs

int nVisitedProcs
static

Definition at line 103 of file deadlock.c.

Referenced by FindLockCycle(), and FindLockCycleRecurse().

◆ nWaitOrders

int nWaitOrders
static

Definition at line 112 of file deadlock.c.

Referenced by DeadLockCheck(), ExpandConstraints(), and FindLockCycleRecurseMember().

◆ possibleConstraints

EDGE* possibleConstraints
static

◆ topoProcs

PGPROC** topoProcs
static

Definition at line 106 of file deadlock.c.

Referenced by InitDeadLockChecking(), and TopoSort().

◆ visitedProcs

PGPROC** visitedProcs
static

Definition at line 102 of file deadlock.c.

Referenced by FindLockCycleRecurse(), and InitDeadLockChecking().

◆ waitOrderProcs

PGPROC** waitOrderProcs
static

Definition at line 113 of file deadlock.c.

Referenced by ExpandConstraints(), and InitDeadLockChecking().

◆ waitOrders

WAIT_ORDER* waitOrders
static