PostgreSQL Source Code  git master
deadlock.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "utils/memutils.h"
Include dependency graph for deadlock.c:

Go to the source code of this file.

Data Structures

struct  EDGE
 
struct  WAIT_ORDER
 
struct  DEADLOCK_INFO
 

Functions

static bool DeadLockCheckRecurse (PGPROC *proc)
 
static int TestConfiguration (PGPROC *startProc)
 
static bool FindLockCycle (PGPROC *checkProc, EDGE *softEdges, int *nSoftEdges)
 
static bool FindLockCycleRecurse (PGPROC *checkProc, int depth, EDGE *softEdges, int *nSoftEdges)
 
static bool FindLockCycleRecurseMember (PGPROC *checkProc, PGPROC *checkProcLeader, int depth, EDGE *softEdges, int *nSoftEdges)
 
static bool ExpandConstraints (EDGE *constraints, int nConstraints)
 
static bool TopoSort (LOCK *lock, EDGE *constraints, int nConstraints, PGPROC **ordering)
 
void InitDeadLockChecking (void)
 
DeadLockState DeadLockCheck (PGPROC *proc)
 
PGPROCGetBlockingAutoVacuumPgproc (void)
 
void DeadLockReport (void)
 
void RememberSimpleDeadLock (PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
 

Variables

static PGPROC ** visitedProcs
 
static int nVisitedProcs
 
static PGPROC ** topoProcs
 
static int * beforeConstraints
 
static int * afterConstraints
 
static WAIT_ORDERwaitOrders
 
static int nWaitOrders
 
static PGPROC ** waitOrderProcs
 
static EDGEcurConstraints
 
static int nCurConstraints
 
static int maxCurConstraints
 
static EDGEpossibleConstraints
 
static int nPossibleConstraints
 
static int maxPossibleConstraints
 
static DEADLOCK_INFOdeadlockDetails
 
static int nDeadlockDetails
 
static PGPROCblocking_autovacuum_proc = NULL
 

Function Documentation

◆ DeadLockCheck()

DeadLockState DeadLockCheck ( PGPROC proc)

Definition at line 217 of file deadlock.c.

218 {
219  /* Initialize to "no constraints" */
220  nCurConstraints = 0;
222  nWaitOrders = 0;
223 
224  /* Initialize to not blocked by an autovacuum worker */
226 
227  /* Search for deadlocks and possible fixes */
228  if (DeadLockCheckRecurse(proc))
229  {
230  /*
231  * Call FindLockCycle one more time, to record the correct
232  * deadlockDetails[] for the basic state with no rearrangements.
233  */
234  int nSoftEdges;
235 
236  TRACE_POSTGRESQL_DEADLOCK_FOUND();
237 
238  nWaitOrders = 0;
239  if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
240  elog(FATAL, "deadlock seems to have disappeared");
241 
242  return DS_HARD_DEADLOCK; /* cannot find a non-deadlocked state */
243  }
244 
245  /* Apply any needed rearrangements of wait queues */
246  for (int i = 0; i < nWaitOrders; i++)
247  {
248  LOCK *lock = waitOrders[i].lock;
249  PGPROC **procs = waitOrders[i].procs;
250  int nProcs = waitOrders[i].nProcs;
251  dclist_head *waitQueue = &lock->waitProcs;
252 
253  Assert(nProcs == dclist_count(waitQueue));
254 
255 #ifdef DEBUG_DEADLOCK
256  PrintLockQueue(lock, "DeadLockCheck:");
257 #endif
258 
259  /* Reset the queue and re-add procs in the desired order */
260  dclist_init(waitQueue);
261  for (int j = 0; j < nProcs; j++)
262  dclist_push_tail(waitQueue, &procs[j]->links);
263 
264 #ifdef DEBUG_DEADLOCK
265  PrintLockQueue(lock, "rearranged to:");
266 #endif
267 
268  /* See if any waiters for the lock can be woken up now */
269  ProcLockWakeup(GetLocksMethodTable(lock), lock);
270  }
271 
272  /* Return code tells caller if we had to escape a deadlock or not */
273  if (nWaitOrders > 0)
274  return DS_SOFT_DEADLOCK;
275  else if (blocking_autovacuum_proc != NULL)
277  else
278  return DS_NO_DEADLOCK;
279 }
static WAIT_ORDER * waitOrders
Definition: deadlock.c:111
static bool FindLockCycle(PGPROC *checkProc, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:443
static bool DeadLockCheckRecurse(PGPROC *proc)
Definition: deadlock.c:309
static EDGE * possibleConstraints
Definition: deadlock.c:121
static int nWaitOrders
Definition: deadlock.c:112
static int nCurConstraints
Definition: deadlock.c:117
static PGPROC * blocking_autovacuum_proc
Definition: deadlock.c:128
static int nPossibleConstraints
Definition: deadlock.c:122
#define FATAL
Definition: elog.h:41
static void dclist_push_tail(dclist_head *head, dlist_node *node)
Definition: ilist.h:709
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
int j
Definition: isn.c:74
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:487
@ DS_HARD_DEADLOCK
Definition: lock.h:514
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:515
@ DS_NO_DEADLOCK
Definition: lock.h:512
@ DS_SOFT_DEADLOCK
Definition: lock.h:513
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1637
Definition: lock.h:309
dclist_head waitProcs
Definition: lock.h:317
Definition: proc.h:162
PGPROC ** procs
Definition: deadlock.c:59
LOCK * lock
Definition: deadlock.c:58
int nProcs
Definition: deadlock.c:60
static struct link * links
Definition: zic.c:299

References Assert(), blocking_autovacuum_proc, dclist_count(), dclist_init(), dclist_push_tail(), DeadLockCheckRecurse(), DS_BLOCKED_BY_AUTOVACUUM, DS_HARD_DEADLOCK, DS_NO_DEADLOCK, DS_SOFT_DEADLOCK, elog(), FATAL, FindLockCycle(), GetLocksMethodTable(), i, j, links, WAIT_ORDER::lock, nCurConstraints, nPossibleConstraints, WAIT_ORDER::nProcs, nWaitOrders, possibleConstraints, ProcLockWakeup(), WAIT_ORDER::procs, waitOrders, and LOCK::waitProcs.

Referenced by CheckDeadLock().

◆ DeadLockCheckRecurse()

static bool DeadLockCheckRecurse ( PGPROC proc)
static

Definition at line 309 of file deadlock.c.

310 {
311  int nEdges;
312  int oldPossibleConstraints;
313  bool savedList;
314  int i;
315 
316  nEdges = TestConfiguration(proc);
317  if (nEdges < 0)
318  return true; /* hard deadlock --- no solution */
319  if (nEdges == 0)
320  return false; /* good configuration found */
322  return true; /* out of room for active constraints? */
323  oldPossibleConstraints = nPossibleConstraints;
325  {
326  /* We can save the edge list in possibleConstraints[] */
327  nPossibleConstraints += nEdges;
328  savedList = true;
329  }
330  else
331  {
332  /* Not room; will need to regenerate the edges on-the-fly */
333  savedList = false;
334  }
335 
336  /*
337  * Try each available soft edge as an addition to the configuration.
338  */
339  for (i = 0; i < nEdges; i++)
340  {
341  if (!savedList && i > 0)
342  {
343  /* Regenerate the list of possible added constraints */
344  if (nEdges != TestConfiguration(proc))
345  elog(FATAL, "inconsistent results during deadlock check");
346  }
348  possibleConstraints[oldPossibleConstraints + i];
349  nCurConstraints++;
350  if (!DeadLockCheckRecurse(proc))
351  return false; /* found a valid solution! */
352  /* give up on that added constraint, try again */
353  nCurConstraints--;
354  }
355  nPossibleConstraints = oldPossibleConstraints;
356  return true; /* no solution found */
357 }
static int TestConfiguration(PGPROC *startProc)
Definition: deadlock.c:375
static int maxPossibleConstraints
Definition: deadlock.c:123
static int maxCurConstraints
Definition: deadlock.c:118
static EDGE * curConstraints
Definition: deadlock.c:116
int MaxBackends
Definition: globals.c:140

References curConstraints, elog(), FATAL, i, MaxBackends, maxCurConstraints, maxPossibleConstraints, nCurConstraints, nPossibleConstraints, possibleConstraints, and TestConfiguration().

Referenced by DeadLockCheck().

◆ DeadLockReport()

void DeadLockReport ( void  )

Definition at line 1073 of file deadlock.c.

1074 {
1075  StringInfoData clientbuf; /* errdetail for client */
1076  StringInfoData logbuf; /* errdetail for server log */
1077  StringInfoData locktagbuf;
1078  int i;
1079 
1080  initStringInfo(&clientbuf);
1081  initStringInfo(&logbuf);
1082  initStringInfo(&locktagbuf);
1083 
1084  /* Generate the "waits for" lines sent to the client */
1085  for (i = 0; i < nDeadlockDetails; i++)
1086  {
1087  DEADLOCK_INFO *info = &deadlockDetails[i];
1088  int nextpid;
1089 
1090  /* The last proc waits for the first one... */
1091  if (i < nDeadlockDetails - 1)
1092  nextpid = info[1].pid;
1093  else
1094  nextpid = deadlockDetails[0].pid;
1095 
1096  /* reset locktagbuf to hold next object description */
1097  resetStringInfo(&locktagbuf);
1098 
1099  DescribeLockTag(&locktagbuf, &info->locktag);
1100 
1101  if (i > 0)
1102  appendStringInfoChar(&clientbuf, '\n');
1103 
1104  appendStringInfo(&clientbuf,
1105  _("Process %d waits for %s on %s; blocked by process %d."),
1106  info->pid,
1108  info->lockmode),
1109  locktagbuf.data,
1110  nextpid);
1111  }
1112 
1113  /* Duplicate all the above for the server ... */
1114  appendBinaryStringInfo(&logbuf, clientbuf.data, clientbuf.len);
1115 
1116  /* ... and add info about query strings */
1117  for (i = 0; i < nDeadlockDetails; i++)
1118  {
1119  DEADLOCK_INFO *info = &deadlockDetails[i];
1120 
1121  appendStringInfoChar(&logbuf, '\n');
1122 
1123  appendStringInfo(&logbuf,
1124  _("Process %d: %s"),
1125  info->pid,
1127  }
1128 
1130 
1131  ereport(ERROR,
1133  errmsg("deadlock detected"),
1134  errdetail_internal("%s", clientbuf.data),
1135  errdetail_log("%s", logbuf.data),
1136  errhint("See server log for query details.")));
1137 }
const char * pgstat_get_backend_current_activity(int pid, bool checkUser)
static int nDeadlockDetails
Definition: deadlock.c:125
static DEADLOCK_INFO * deadlockDetails
Definition: deadlock.c:124
int errdetail_internal(const char *fmt,...)
Definition: elog.c:1229
int errhint(const char *fmt,...)
Definition: elog.c:1316
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
int errdetail_log(const char *fmt,...)
Definition: elog.c:1250
#define _(x)
Definition: elog.c:91
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1168
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4045
#define ERRCODE_T_R_DEADLOCK_DETECTED
Definition: pgbench.c:77
void pgstat_report_deadlock(void)
void resetStringInfo(StringInfo str)
Definition: stringinfo.c:75
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition: stringinfo.c:227
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:188
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
LOCKTAG locktag
Definition: deadlock.c:73
LOCKMODE lockmode
Definition: deadlock.c:74
uint8 locktag_lockmethodid
Definition: lock.h:171

References _, appendBinaryStringInfo(), appendStringInfo(), appendStringInfoChar(), StringInfoData::data, deadlockDetails, DescribeLockTag(), ereport, errcode(), ERRCODE_T_R_DEADLOCK_DETECTED, errdetail_internal(), errdetail_log(), errhint(), errmsg(), ERROR, GetLockmodeName(), i, initStringInfo(), StringInfoData::len, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, LOCKTAG::locktag_lockmethodid, nDeadlockDetails, pgstat_get_backend_current_activity(), pgstat_report_deadlock(), DEADLOCK_INFO::pid, and resetStringInfo().

Referenced by WaitOnLock().

◆ ExpandConstraints()

static bool ExpandConstraints ( EDGE constraints,
int  nConstraints 
)
static

Definition at line 788 of file deadlock.c.

790 {
791  int nWaitOrderProcs = 0;
792  int i,
793  j;
794 
795  nWaitOrders = 0;
796 
797  /*
798  * Scan constraint list backwards. This is because the last-added
799  * constraint is the only one that could fail, and so we want to test it
800  * for inconsistency first.
801  */
802  for (i = nConstraints; --i >= 0;)
803  {
804  LOCK *lock = constraints[i].lock;
805 
806  /* Did we already make a list for this lock? */
807  for (j = nWaitOrders; --j >= 0;)
808  {
809  if (waitOrders[j].lock == lock)
810  break;
811  }
812  if (j >= 0)
813  continue;
814  /* No, so allocate a new list */
815  waitOrders[nWaitOrders].lock = lock;
816  waitOrders[nWaitOrders].procs = waitOrderProcs + nWaitOrderProcs;
818  nWaitOrderProcs += dclist_count(&lock->waitProcs);
819  Assert(nWaitOrderProcs <= MaxBackends);
820 
821  /*
822  * Do the topo sort. TopoSort need not examine constraints after this
823  * one, since they must be for different locks.
824  */
825  if (!TopoSort(lock, constraints, i + 1,
826  waitOrders[nWaitOrders].procs))
827  return false;
828  nWaitOrders++;
829  }
830  return true;
831 }
static bool TopoSort(LOCK *lock, EDGE *constraints, int nConstraints, PGPROC **ordering)
Definition: deadlock.c:860
static PGPROC ** waitOrderProcs
Definition: deadlock.c:113
LOCK * lock
Definition: deadlock.c:50

References Assert(), dclist_count(), i, j, EDGE::lock, WAIT_ORDER::lock, MaxBackends, WAIT_ORDER::nProcs, nWaitOrders, WAIT_ORDER::procs, TopoSort(), waitOrderProcs, waitOrders, and LOCK::waitProcs.

Referenced by TestConfiguration().

◆ FindLockCycle()

static bool FindLockCycle ( PGPROC checkProc,
EDGE softEdges,
int *  nSoftEdges 
)
static

Definition at line 443 of file deadlock.c.

446 {
447  nVisitedProcs = 0;
448  nDeadlockDetails = 0;
449  *nSoftEdges = 0;
450  return FindLockCycleRecurse(checkProc, 0, softEdges, nSoftEdges);
451 }
static int nVisitedProcs
Definition: deadlock.c:103
static bool FindLockCycleRecurse(PGPROC *checkProc, int depth, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:454

References FindLockCycleRecurse(), nDeadlockDetails, and nVisitedProcs.

Referenced by DeadLockCheck(), and TestConfiguration().

◆ FindLockCycleRecurse()

static bool FindLockCycleRecurse ( PGPROC checkProc,
int  depth,
EDGE softEdges,
int *  nSoftEdges 
)
static

Definition at line 454 of file deadlock.c.

458 {
459  int i;
460  dlist_iter iter;
461 
462  /*
463  * If this process is a lock group member, check the leader instead. (Note
464  * that we might be the leader, in which case this is a no-op.)
465  */
466  if (checkProc->lockGroupLeader != NULL)
467  checkProc = checkProc->lockGroupLeader;
468 
469  /*
470  * Have we already seen this proc?
471  */
472  for (i = 0; i < nVisitedProcs; i++)
473  {
474  if (visitedProcs[i] == checkProc)
475  {
476  /* If we return to starting point, we have a deadlock cycle */
477  if (i == 0)
478  {
479  /*
480  * record total length of cycle --- outer levels will now fill
481  * deadlockDetails[]
482  */
483  Assert(depth <= MaxBackends);
484  nDeadlockDetails = depth;
485 
486  return true;
487  }
488 
489  /*
490  * Otherwise, we have a cycle but it does not include the start
491  * point, so say "no deadlock".
492  */
493  return false;
494  }
495  }
496  /* Mark proc as seen */
498  visitedProcs[nVisitedProcs++] = checkProc;
499 
500  /*
501  * If the process is waiting, there is an outgoing waits-for edge to each
502  * process that blocks it.
503  */
504  if (checkProc->links.next != NULL && checkProc->waitLock != NULL &&
505  FindLockCycleRecurseMember(checkProc, checkProc, depth, softEdges,
506  nSoftEdges))
507  return true;
508 
509  /*
510  * If the process is not waiting, there could still be outgoing waits-for
511  * edges if it is part of a lock group, because other members of the lock
512  * group might be waiting even though this process is not. (Given lock
513  * groups {A1, A2} and {B1, B2}, if A1 waits for B1 and B2 waits for A2,
514  * that is a deadlock even neither of B1 and A2 are waiting for anything.)
515  */
516  dlist_foreach(iter, &checkProc->lockGroupMembers)
517  {
518  PGPROC *memberProc;
519 
520  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
521 
522  if (memberProc->links.next != NULL && memberProc->waitLock != NULL &&
523  memberProc != checkProc &&
524  FindLockCycleRecurseMember(memberProc, checkProc, depth, softEdges,
525  nSoftEdges))
526  return true;
527  }
528 
529  return false;
530 }
static bool FindLockCycleRecurseMember(PGPROC *checkProc, PGPROC *checkProcLeader, int depth, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:533
static PGPROC ** visitedProcs
Definition: deadlock.c:102
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
dlist_head lockGroupMembers
Definition: proc.h:296
LOCK * waitLock
Definition: proc.h:223
PGPROC * lockGroupLeader
Definition: proc.h:295
dlist_node links
Definition: proc.h:164
dlist_node * cur
Definition: ilist.h:179
dlist_node * next
Definition: ilist.h:140

References Assert(), dlist_iter::cur, dlist_container, dlist_foreach, FindLockCycleRecurseMember(), i, PGPROC::links, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, MaxBackends, nDeadlockDetails, dlist_node::next, nVisitedProcs, visitedProcs, and PGPROC::waitLock.

Referenced by FindLockCycle(), and FindLockCycleRecurseMember().

◆ FindLockCycleRecurseMember()

static bool FindLockCycleRecurseMember ( PGPROC checkProc,
PGPROC checkProcLeader,
int  depth,
EDGE softEdges,
int *  nSoftEdges 
)
static

Definition at line 533 of file deadlock.c.

538 {
539  PGPROC *proc;
540  LOCK *lock = checkProc->waitLock;
541  dlist_iter proclock_iter;
542  LockMethod lockMethodTable;
543  int conflictMask;
544  int i;
545  int numLockModes,
546  lm;
547 
548  /*
549  * The relation extension or page lock can never participate in actual
550  * deadlock cycle. See Asserts in LockAcquireExtended. So, there is no
551  * advantage in checking wait edges from them.
552  */
553  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
554  (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
555  return false;
556 
557  lockMethodTable = GetLocksMethodTable(lock);
558  numLockModes = lockMethodTable->numLockModes;
559  conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
560 
561  /*
562  * Scan for procs that already hold conflicting locks. These are "hard"
563  * edges in the waits-for graph.
564  */
565  dlist_foreach(proclock_iter, &lock->procLocks)
566  {
567  PROCLOCK *proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
568  PGPROC *leader;
569 
570  proc = proclock->tag.myProc;
571  leader = proc->lockGroupLeader == NULL ? proc : proc->lockGroupLeader;
572 
573  /* A proc never blocks itself or any other lock group member */
574  if (leader != checkProcLeader)
575  {
576  for (lm = 1; lm <= numLockModes; lm++)
577  {
578  if ((proclock->holdMask & LOCKBIT_ON(lm)) &&
579  (conflictMask & LOCKBIT_ON(lm)))
580  {
581  /* This proc hard-blocks checkProc */
582  if (FindLockCycleRecurse(proc, depth + 1,
583  softEdges, nSoftEdges))
584  {
585  /* fill deadlockDetails[] */
586  DEADLOCK_INFO *info = &deadlockDetails[depth];
587 
588  info->locktag = lock->tag;
589  info->lockmode = checkProc->waitLockMode;
590  info->pid = checkProc->pid;
591 
592  return true;
593  }
594 
595  /*
596  * No deadlock here, but see if this proc is an autovacuum
597  * that is directly hard-blocking our own proc. If so,
598  * report it so that the caller can send a cancel signal
599  * to it, if appropriate. If there's more than one such
600  * proc, it's indeterminate which one will be reported.
601  *
602  * We don't touch autovacuums that are indirectly blocking
603  * us; it's up to the direct blockee to take action. This
604  * rule simplifies understanding the behavior and ensures
605  * that an autovacuum won't be canceled with less than
606  * deadlock_timeout grace period.
607  *
608  * Note we read statusFlags without any locking. This is
609  * OK only for checking the PROC_IS_AUTOVACUUM flag,
610  * because that flag is set at process start and never
611  * reset. There is logic elsewhere to avoid canceling an
612  * autovacuum that is working to prevent XID wraparound
613  * problems (which needs to read a different statusFlags
614  * bit), but we don't do that here to avoid grabbing
615  * ProcArrayLock.
616  */
617  if (checkProc == MyProc &&
620 
621  /* We're done looking at this proclock */
622  break;
623  }
624  }
625  }
626  }
627 
628  /*
629  * Scan for procs that are ahead of this one in the lock's wait queue.
630  * Those that have conflicting requests soft-block this one. This must be
631  * done after the hard-block search, since if another proc both hard- and
632  * soft-blocks this one, we want to call it a hard edge.
633  *
634  * If there is a proposed re-ordering of the lock's wait order, use that
635  * rather than the current wait order.
636  */
637  for (i = 0; i < nWaitOrders; i++)
638  {
639  if (waitOrders[i].lock == lock)
640  break;
641  }
642 
643  if (i < nWaitOrders)
644  {
645  /* Use the given hypothetical wait queue order */
646  PGPROC **procs = waitOrders[i].procs;
647  int queue_size = waitOrders[i].nProcs;
648 
649  for (i = 0; i < queue_size; i++)
650  {
651  PGPROC *leader;
652 
653  proc = procs[i];
654  leader = proc->lockGroupLeader == NULL ? proc :
655  proc->lockGroupLeader;
656 
657  /*
658  * TopoSort will always return an ordering with group members
659  * adjacent to each other in the wait queue (see comments
660  * therein). So, as soon as we reach a process in the same lock
661  * group as checkProc, we know we've found all the conflicts that
662  * precede any member of the lock group lead by checkProcLeader.
663  */
664  if (leader == checkProcLeader)
665  break;
666 
667  /* Is there a conflict with this guy's request? */
668  if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0)
669  {
670  /* This proc soft-blocks checkProc */
671  if (FindLockCycleRecurse(proc, depth + 1,
672  softEdges, nSoftEdges))
673  {
674  /* fill deadlockDetails[] */
675  DEADLOCK_INFO *info = &deadlockDetails[depth];
676 
677  info->locktag = lock->tag;
678  info->lockmode = checkProc->waitLockMode;
679  info->pid = checkProc->pid;
680 
681  /*
682  * Add this edge to the list of soft edges in the cycle
683  */
684  Assert(*nSoftEdges < MaxBackends);
685  softEdges[*nSoftEdges].waiter = checkProcLeader;
686  softEdges[*nSoftEdges].blocker = leader;
687  softEdges[*nSoftEdges].lock = lock;
688  (*nSoftEdges)++;
689  return true;
690  }
691  }
692  }
693  }
694  else
695  {
696  PGPROC *lastGroupMember = NULL;
697  dlist_iter proc_iter;
698  dclist_head *waitQueue;
699 
700  /* Use the true lock wait queue order */
701  waitQueue = &lock->waitProcs;
702 
703  /*
704  * Find the last member of the lock group that is present in the wait
705  * queue. Anything after this is not a soft lock conflict. If group
706  * locking is not in use, then we know immediately which process we're
707  * looking for, but otherwise we've got to search the wait queue to
708  * find the last process actually present.
709  */
710  if (checkProc->lockGroupLeader == NULL)
711  lastGroupMember = checkProc;
712  else
713  {
714  dclist_foreach(proc_iter, waitQueue)
715  {
716  proc = dlist_container(PGPROC, links, proc_iter.cur);
717 
718  if (proc->lockGroupLeader == checkProcLeader)
719  lastGroupMember = proc;
720  }
721  Assert(lastGroupMember != NULL);
722  }
723 
724  /*
725  * OK, now rescan (or scan) the queue to identify the soft conflicts.
726  */
727  dclist_foreach(proc_iter, waitQueue)
728  {
729  PGPROC *leader;
730 
731  proc = dlist_container(PGPROC, links, proc_iter.cur);
732 
733  leader = proc->lockGroupLeader == NULL ? proc :
734  proc->lockGroupLeader;
735 
736  /* Done when we reach the target proc */
737  if (proc == lastGroupMember)
738  break;
739 
740  /* Is there a conflict with this guy's request? */
741  if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0 &&
742  leader != checkProcLeader)
743  {
744  /* This proc soft-blocks checkProc */
745  if (FindLockCycleRecurse(proc, depth + 1,
746  softEdges, nSoftEdges))
747  {
748  /* fill deadlockDetails[] */
749  DEADLOCK_INFO *info = &deadlockDetails[depth];
750 
751  info->locktag = lock->tag;
752  info->lockmode = checkProc->waitLockMode;
753  info->pid = checkProc->pid;
754 
755  /*
756  * Add this edge to the list of soft edges in the cycle
757  */
758  Assert(*nSoftEdges < MaxBackends);
759  softEdges[*nSoftEdges].waiter = checkProcLeader;
760  softEdges[*nSoftEdges].blocker = leader;
761  softEdges[*nSoftEdges].lock = lock;
762  (*nSoftEdges)++;
763  return true;
764  }
765  }
766  }
767  }
768 
769  /*
770  * No conflict detected here.
771  */
772  return false;
773 }
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
@ LOCKTAG_PAGE
Definition: lock.h:140
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
#define PROC_IS_AUTOVACUUM
Definition: proc.h:56
PGPROC * MyProc
Definition: proc.c:66
PGPROC * blocker
Definition: deadlock.c:49
PGPROC * waiter
Definition: deadlock.c:48
LOCKTAG tag
Definition: lock.h:311
dlist_head procLocks
Definition: lock.h:316
const LOCKMASK * conflictTab
Definition: lock.h:111
int numLockModes
Definition: lock.h:110
uint8 statusFlags
Definition: proc.h:233
int pid
Definition: proc.h:186
LOCKMODE waitLockMode
Definition: proc.h:225
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370
LOCKMASK holdMask
Definition: lock.h:376
PROCLOCKTAG tag
Definition: lock.h:372

References Assert(), EDGE::blocker, blocking_autovacuum_proc, LockMethodData::conflictTab, dlist_iter::cur, dclist_foreach, deadlockDetails, dlist_container, dlist_foreach, FindLockCycleRecurse(), GetLocksMethodTable(), PROCLOCK::holdMask, i, links, EDGE::lock, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, LOCKTAG_PAGE, LOCKTAG_RELATION_EXTEND, MaxBackends, MyProc, PROCLOCKTAG::myProc, WAIT_ORDER::nProcs, LockMethodData::numLockModes, nWaitOrders, DEADLOCK_INFO::pid, PGPROC::pid, PROC_IS_AUTOVACUUM, LOCK::procLocks, WAIT_ORDER::procs, PGPROC::statusFlags, LOCK::tag, PROCLOCK::tag, EDGE::waiter, PGPROC::waitLock, PGPROC::waitLockMode, waitOrders, and LOCK::waitProcs.

Referenced by FindLockCycleRecurse().

◆ GetBlockingAutoVacuumPgproc()

PGPROC* GetBlockingAutoVacuumPgproc ( void  )

Definition at line 287 of file deadlock.c.

288 {
289  PGPROC *ptr;
290 
293 
294  return ptr;
295 }

References blocking_autovacuum_proc.

Referenced by ProcSleep().

◆ InitDeadLockChecking()

void InitDeadLockChecking ( void  )

Definition at line 143 of file deadlock.c.

144 {
145  MemoryContext oldcxt;
146 
147  /* Make sure allocations are permanent */
149 
150  /*
151  * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
152  * deadlockDetails[].
153  */
154  visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
156 
157  /*
158  * TopoSort needs to consider at most MaxBackends wait-queue entries, and
159  * it needn't run concurrently with FindLockCycle.
160  */
161  topoProcs = visitedProcs; /* re-use this space */
162  beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
163  afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
164 
165  /*
166  * We need to consider rearranging at most MaxBackends/2 wait queues
167  * (since it takes at least two waiters in a queue to create a soft edge),
168  * and the expanded form of the wait queues can't involve more than
169  * MaxBackends total waiters.
170  */
171  waitOrders = (WAIT_ORDER *)
172  palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
173  waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
174 
175  /*
176  * Allow at most MaxBackends distinct constraints in a configuration. (Is
177  * this enough? In practice it seems it should be, but I don't quite see
178  * how to prove it. If we run out, we might fail to find a workable wait
179  * queue rearrangement even though one exists.) NOTE that this number
180  * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
181  * really big might potentially allow a stack-overflow problem.
182  */
185 
186  /*
187  * Allow up to 3*MaxBackends constraints to be saved without having to
188  * re-run TestConfiguration. (This is probably more than enough, but we
189  * can survive if we run low on space by doing excess runs of
190  * TestConfiguration to re-compute constraint lists each time needed.) The
191  * last MaxBackends entries in possibleConstraints[] are reserved as
192  * output workspace for FindLockCycle.
193  */
196  (EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
197 
198  MemoryContextSwitchTo(oldcxt);
199 }
static int * beforeConstraints
Definition: deadlock.c:107
static int * afterConstraints
Definition: deadlock.c:108
static PGPROC ** topoProcs
Definition: deadlock.c:106
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * palloc(Size size)
Definition: mcxt.c:1226
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
Definition: deadlock.c:47

References afterConstraints, beforeConstraints, curConstraints, deadlockDetails, MaxBackends, maxCurConstraints, maxPossibleConstraints, MemoryContextSwitchTo(), palloc(), possibleConstraints, TopMemoryContext, topoProcs, visitedProcs, waitOrderProcs, and waitOrders.

Referenced by InitProcess().

◆ RememberSimpleDeadLock()

void RememberSimpleDeadLock ( PGPROC proc1,
LOCKMODE  lockmode,
LOCK lock,
PGPROC proc2 
)

Definition at line 1145 of file deadlock.c.

1149 {
1150  DEADLOCK_INFO *info = &deadlockDetails[0];
1151 
1152  info->locktag = lock->tag;
1153  info->lockmode = lockmode;
1154  info->pid = proc1->pid;
1155  info++;
1156  info->locktag = proc2->waitLock->tag;
1157  info->lockmode = proc2->waitLockMode;
1158  info->pid = proc2->pid;
1159  nDeadlockDetails = 2;
1160 }

References deadlockDetails, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, nDeadlockDetails, DEADLOCK_INFO::pid, PGPROC::pid, LOCK::tag, PGPROC::waitLock, and PGPROC::waitLockMode.

Referenced by ProcSleep().

◆ TestConfiguration()

static int TestConfiguration ( PGPROC startProc)
static

Definition at line 375 of file deadlock.c.

376 {
377  int softFound = 0;
379  int nSoftEdges;
380  int i;
381 
382  /*
383  * Make sure we have room for FindLockCycle's output.
384  */
386  return -1;
387 
388  /*
389  * Expand current constraint set into wait orderings. Fail if the
390  * constraint set is not self-consistent.
391  */
393  return -1;
394 
395  /*
396  * Check for cycles involving startProc or any of the procs mentioned in
397  * constraints. We check startProc last because if it has a soft cycle
398  * still to be dealt with, we want to deal with that first.
399  */
400  for (i = 0; i < nCurConstraints; i++)
401  {
402  if (FindLockCycle(curConstraints[i].waiter, softEdges, &nSoftEdges))
403  {
404  if (nSoftEdges == 0)
405  return -1; /* hard deadlock detected */
406  softFound = nSoftEdges;
407  }
408  if (FindLockCycle(curConstraints[i].blocker, softEdges, &nSoftEdges))
409  {
410  if (nSoftEdges == 0)
411  return -1; /* hard deadlock detected */
412  softFound = nSoftEdges;
413  }
414  }
415  if (FindLockCycle(startProc, softEdges, &nSoftEdges))
416  {
417  if (nSoftEdges == 0)
418  return -1; /* hard deadlock detected */
419  softFound = nSoftEdges;
420  }
421  return softFound;
422 }
static bool ExpandConstraints(EDGE *constraints, int nConstraints)
Definition: deadlock.c:788

References curConstraints, ExpandConstraints(), FindLockCycle(), i, MaxBackends, maxPossibleConstraints, nCurConstraints, nPossibleConstraints, and possibleConstraints.

Referenced by DeadLockCheckRecurse().

◆ TopoSort()

static bool TopoSort ( LOCK lock,
EDGE constraints,
int  nConstraints,
PGPROC **  ordering 
)
static

Definition at line 860 of file deadlock.c.

864 {
865  dclist_head *waitQueue = &lock->waitProcs;
866  int queue_size = dclist_count(waitQueue);
867  PGPROC *proc;
868  int i,
869  j,
870  jj,
871  k,
872  kk,
873  last;
874  dlist_iter proc_iter;
875 
876  /* First, fill topoProcs[] array with the procs in their current order */
877  i = 0;
878  dclist_foreach(proc_iter, waitQueue)
879  {
880  proc = dlist_container(PGPROC, links, proc_iter.cur);
881  topoProcs[i++] = proc;
882  }
883  Assert(i == queue_size);
884 
885  /*
886  * Scan the constraints, and for each proc in the array, generate a count
887  * of the number of constraints that say it must be before something else,
888  * plus a list of the constraints that say it must be after something
889  * else. The count for the j'th proc is stored in beforeConstraints[j],
890  * and the head of its list in afterConstraints[j]. Each constraint
891  * stores its list link in constraints[i].link (note any constraint will
892  * be in just one list). The array index for the before-proc of the i'th
893  * constraint is remembered in constraints[i].pred.
894  *
895  * Note that it's not necessarily the case that every constraint affects
896  * this particular wait queue. Prior to group locking, a process could be
897  * waiting for at most one lock. But a lock group can be waiting for
898  * zero, one, or multiple locks. Since topoProcs[] is an array of the
899  * processes actually waiting, while constraints[] is an array of group
900  * leaders, we've got to scan through topoProcs[] for each constraint,
901  * checking whether both a waiter and a blocker for that group are
902  * present. If so, the constraint is relevant to this wait queue; if not,
903  * it isn't.
904  */
905  MemSet(beforeConstraints, 0, queue_size * sizeof(int));
906  MemSet(afterConstraints, 0, queue_size * sizeof(int));
907  for (i = 0; i < nConstraints; i++)
908  {
909  /*
910  * Find a representative process that is on the lock queue and part of
911  * the waiting lock group. This may or may not be the leader, which
912  * may or may not be waiting at all. If there are any other processes
913  * in the same lock group on the queue, set their number of
914  * beforeConstraints to -1 to indicate that they should be emitted
915  * with their groupmates rather than considered separately.
916  *
917  * In this loop and the similar one just below, it's critical that we
918  * consistently select the same representative member of any one lock
919  * group, so that all the constraints are associated with the same
920  * proc, and the -1's are only associated with not-representative
921  * members. We select the last one in the topoProcs array.
922  */
923  proc = constraints[i].waiter;
924  Assert(proc != NULL);
925  jj = -1;
926  for (j = queue_size; --j >= 0;)
927  {
928  PGPROC *waiter = topoProcs[j];
929 
930  if (waiter == proc || waiter->lockGroupLeader == proc)
931  {
932  Assert(waiter->waitLock == lock);
933  if (jj == -1)
934  jj = j;
935  else
936  {
937  Assert(beforeConstraints[j] <= 0);
938  beforeConstraints[j] = -1;
939  }
940  }
941  }
942 
943  /* If no matching waiter, constraint is not relevant to this lock. */
944  if (jj < 0)
945  continue;
946 
947  /*
948  * Similarly, find a representative process that is on the lock queue
949  * and waiting for the blocking lock group. Again, this could be the
950  * leader but does not need to be.
951  */
952  proc = constraints[i].blocker;
953  Assert(proc != NULL);
954  kk = -1;
955  for (k = queue_size; --k >= 0;)
956  {
957  PGPROC *blocker = topoProcs[k];
958 
959  if (blocker == proc || blocker->lockGroupLeader == proc)
960  {
961  Assert(blocker->waitLock == lock);
962  if (kk == -1)
963  kk = k;
964  else
965  {
966  Assert(beforeConstraints[k] <= 0);
967  beforeConstraints[k] = -1;
968  }
969  }
970  }
971 
972  /* If no matching blocker, constraint is not relevant to this lock. */
973  if (kk < 0)
974  continue;
975 
976  Assert(beforeConstraints[jj] >= 0);
977  beforeConstraints[jj]++; /* waiter must come before */
978  /* add this constraint to list of after-constraints for blocker */
979  constraints[i].pred = jj;
980  constraints[i].link = afterConstraints[kk];
981  afterConstraints[kk] = i + 1;
982  }
983 
984  /*--------------------
985  * Now scan the topoProcs array backwards. At each step, output the
986  * last proc that has no remaining before-constraints plus any other
987  * members of the same lock group; then decrease the beforeConstraints
988  * count of each of the procs it was constrained against.
989  * i = index of ordering[] entry we want to output this time
990  * j = search index for topoProcs[]
991  * k = temp for scanning constraint list for proc j
992  * last = last non-null index in topoProcs (avoid redundant searches)
993  *--------------------
994  */
995  last = queue_size - 1;
996  for (i = queue_size - 1; i >= 0;)
997  {
998  int c;
999  int nmatches = 0;
1000 
1001  /* Find next candidate to output */
1002  while (topoProcs[last] == NULL)
1003  last--;
1004  for (j = last; j >= 0; j--)
1005  {
1006  if (topoProcs[j] != NULL && beforeConstraints[j] == 0)
1007  break;
1008  }
1009 
1010  /* If no available candidate, topological sort fails */
1011  if (j < 0)
1012  return false;
1013 
1014  /*
1015  * Output everything in the lock group. There's no point in
1016  * outputting an ordering where members of the same lock group are not
1017  * consecutive on the wait queue: if some other waiter is between two
1018  * requests that belong to the same group, then either it conflicts
1019  * with both of them and is certainly not a solution; or it conflicts
1020  * with at most one of them and is thus isomorphic to an ordering
1021  * where the group members are consecutive.
1022  */
1023  proc = topoProcs[j];
1024  if (proc->lockGroupLeader != NULL)
1025  proc = proc->lockGroupLeader;
1026  Assert(proc != NULL);
1027  for (c = 0; c <= last; ++c)
1028  {
1029  if (topoProcs[c] == proc || (topoProcs[c] != NULL &&
1030  topoProcs[c]->lockGroupLeader == proc))
1031  {
1032  ordering[i - nmatches] = topoProcs[c];
1033  topoProcs[c] = NULL;
1034  ++nmatches;
1035  }
1036  }
1037  Assert(nmatches > 0);
1038  i -= nmatches;
1039 
1040  /* Update beforeConstraints counts of its predecessors */
1041  for (k = afterConstraints[j]; k > 0; k = constraints[k - 1].link)
1042  beforeConstraints[constraints[k - 1].pred]--;
1043  }
1044 
1045  /* Done */
1046  return true;
1047 }
#define MemSet(start, val, len)
Definition: c.h:1004
char * c
int pred
Definition: deadlock.c:51
int link
Definition: deadlock.c:52

References afterConstraints, Assert(), beforeConstraints, EDGE::blocker, dlist_iter::cur, dclist_count(), dclist_foreach, dlist_container, i, j, EDGE::link, links, PGPROC::lockGroupLeader, MemSet, EDGE::pred, topoProcs, EDGE::waiter, PGPROC::waitLock, and LOCK::waitProcs.

Referenced by ExpandConstraints().

Variable Documentation

◆ afterConstraints

int* afterConstraints
static

Definition at line 108 of file deadlock.c.

Referenced by InitDeadLockChecking(), and TopoSort().

◆ beforeConstraints

int* beforeConstraints
static

Definition at line 107 of file deadlock.c.

Referenced by InitDeadLockChecking(), and TopoSort().

◆ blocking_autovacuum_proc

PGPROC* blocking_autovacuum_proc = NULL
static

◆ curConstraints

EDGE* curConstraints
static

Definition at line 116 of file deadlock.c.

Referenced by DeadLockCheckRecurse(), InitDeadLockChecking(), and TestConfiguration().

◆ deadlockDetails

DEADLOCK_INFO* deadlockDetails
static

◆ maxCurConstraints

int maxCurConstraints
static

Definition at line 118 of file deadlock.c.

Referenced by DeadLockCheckRecurse(), and InitDeadLockChecking().

◆ maxPossibleConstraints

int maxPossibleConstraints
static

Definition at line 123 of file deadlock.c.

Referenced by DeadLockCheckRecurse(), InitDeadLockChecking(), and TestConfiguration().

◆ nCurConstraints

int nCurConstraints
static

Definition at line 117 of file deadlock.c.

Referenced by DeadLockCheck(), DeadLockCheckRecurse(), and TestConfiguration().

◆ nDeadlockDetails

int nDeadlockDetails
static

◆ nPossibleConstraints

int nPossibleConstraints
static

Definition at line 122 of file deadlock.c.

Referenced by DeadLockCheck(), DeadLockCheckRecurse(), and TestConfiguration().

◆ nVisitedProcs

int nVisitedProcs
static

Definition at line 103 of file deadlock.c.

Referenced by FindLockCycle(), and FindLockCycleRecurse().

◆ nWaitOrders

int nWaitOrders
static

Definition at line 112 of file deadlock.c.

Referenced by DeadLockCheck(), ExpandConstraints(), and FindLockCycleRecurseMember().

◆ possibleConstraints

EDGE* possibleConstraints
static

◆ topoProcs

PGPROC** topoProcs
static

Definition at line 106 of file deadlock.c.

Referenced by InitDeadLockChecking(), and TopoSort().

◆ visitedProcs

PGPROC** visitedProcs
static

Definition at line 102 of file deadlock.c.

Referenced by FindLockCycleRecurse(), and InitDeadLockChecking().

◆ waitOrderProcs

PGPROC** waitOrderProcs
static

Definition at line 113 of file deadlock.c.

Referenced by ExpandConstraints(), and InitDeadLockChecking().

◆ waitOrders

WAIT_ORDER* waitOrders
static