PostgreSQL Source Code  git master
timeout.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * timeout.c
4  * Routines to multiplex SIGALRM interrupts for multiple timeout reasons.
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/misc/timeout.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <sys/time.h>
18 
19 #include "miscadmin.h"
20 #include "storage/proc.h"
21 #include "utils/timeout.h"
22 #include "utils/timestamp.h"
23 
24 
25 /* Data about any one timeout reason */
26 typedef struct timeout_params
27 {
28  TimeoutId index; /* identifier of timeout reason */
29 
30  /* volatile because these may be changed from the signal handler */
31  volatile bool active; /* true if timeout is in active_timeouts[] */
32  volatile bool indicator; /* true if timeout has occurred */
33 
34  /* callback function for timeout, or NULL if timeout not registered */
36 
37  TimestampTz start_time; /* time that timeout was last activated */
38  TimestampTz fin_time; /* time it is, or was last, due to fire */
39  int interval_in_ms; /* time between firings, or 0 if just once */
41 
42 /*
43  * List of possible timeout reasons in the order of enum TimeoutId.
44  */
46 static bool all_timeouts_initialized = false;
47 
48 /*
49  * List of active timeouts ordered by their fin_time and priority.
50  * This list is subject to change by the interrupt handler, so it's volatile.
51  */
52 static volatile int num_active_timeouts = 0;
54 
55 /*
56  * Flag controlling whether the signal handler is allowed to do anything.
57  * This is useful to avoid race conditions with the handler. Note in
58  * particular that this lets us make changes in the data structures without
59  * tediously disabling and re-enabling the timer signal. Most of the time,
60  * no interrupt would happen anyway during such critical sections, but if
61  * one does, this rule ensures it's safe. Leaving the signal enabled across
62  * multiple operations can greatly reduce the number of kernel calls we make,
63  * too. See comments in schedule_alarm() about that.
64  *
65  * We leave this "false" when we're not expecting interrupts, just in case.
66  */
67 static volatile sig_atomic_t alarm_enabled = false;
68 
69 #define disable_alarm() (alarm_enabled = false)
70 #define enable_alarm() (alarm_enabled = true)
71 
72 /*
73  * State recording if and when we next expect the interrupt to fire.
74  * (signal_due_at is valid only when signal_pending is true.)
75  * Note that the signal handler will unconditionally reset signal_pending to
76  * false, so that can change asynchronously even when alarm_enabled is false.
77  */
78 static volatile sig_atomic_t signal_pending = false;
79 static volatile TimestampTz signal_due_at = 0;
80 
81 
82 /*****************************************************************************
83  * Internal helper functions
84  *
85  * For all of these, it is caller's responsibility to protect them from
86  * interruption by the signal handler. Generally, call disable_alarm()
87  * first to prevent interruption, then update state, and last call
88  * schedule_alarm(), which will re-enable the signal handler if needed.
89  *****************************************************************************/
90 
91 /*
92  * Find the index of a given timeout reason in the active array.
93  * If it's not there, return -1.
94  */
95 static int
97 {
98  int i;
99 
100  for (i = 0; i < num_active_timeouts; i++)
101  {
102  if (active_timeouts[i]->index == id)
103  return i;
104  }
105 
106  return -1;
107 }
108 
109 /*
110  * Insert specified timeout reason into the list of active timeouts
111  * at the given index.
112  */
113 static void
115 {
116  int i;
117 
119  elog(FATAL, "timeout index %d out of range 0..%d", index,
121 
122  Assert(!all_timeouts[id].active);
123  all_timeouts[id].active = true;
124 
125  for (i = num_active_timeouts - 1; i >= index; i--)
127 
129 
131 }
132 
133 /*
134  * Remove the index'th element from the timeout list.
135  */
136 static void
138 {
139  int i;
140 
142  elog(FATAL, "timeout index %d out of range 0..%d", index,
143  num_active_timeouts - 1);
144 
145  Assert(active_timeouts[index]->active);
146  active_timeouts[index]->active = false;
147 
148  for (i = index + 1; i < num_active_timeouts; i++)
150 
152 }
153 
154 /*
155  * Enable the specified timeout reason
156  */
157 static void
159  int interval_in_ms)
160 {
161  int i;
162 
163  /* Assert request is sane */
165  Assert(all_timeouts[id].timeout_handler != NULL);
166 
167  /*
168  * If this timeout was already active, momentarily disable it. We
169  * interpret the call as a directive to reschedule the timeout.
170  */
171  if (all_timeouts[id].active)
173 
174  /*
175  * Find out the index where to insert the new timeout. We sort by
176  * fin_time, and for equal fin_time by priority.
177  */
178  for (i = 0; i < num_active_timeouts; i++)
179  {
180  timeout_params *old_timeout = active_timeouts[i];
181 
182  if (fin_time < old_timeout->fin_time)
183  break;
184  if (fin_time == old_timeout->fin_time && id < old_timeout->index)
185  break;
186  }
187 
188  /*
189  * Mark the timeout active, and insert it into the active list.
190  */
191  all_timeouts[id].indicator = false;
193  all_timeouts[id].fin_time = fin_time;
194  all_timeouts[id].interval_in_ms = interval_in_ms;
195 
196  insert_timeout(id, i);
197 }
198 
199 /*
200  * Schedule alarm for the next active timeout, if any
201  *
202  * We assume the caller has obtained the current time, or a close-enough
203  * approximation. (It's okay if a tick or two has passed since "now", or
204  * if a little more time elapses before we reach the kernel call; that will
205  * cause us to ask for an interrupt a tick or two later than the nearest
206  * timeout, which is no big deal. Passing a "now" value that's in the future
207  * would be bad though.)
208  */
209 static void
211 {
212  if (num_active_timeouts > 0)
213  {
214  struct itimerval timeval;
215  TimestampTz nearest_timeout;
216  long secs;
217  int usecs;
218 
219  MemSet(&timeval, 0, sizeof(struct itimerval));
220 
221  /*
222  * If we think there's a signal pending, but current time is more than
223  * 10ms past when the signal was due, then assume that the timeout
224  * request got lost somehow; clear signal_pending so that we'll reset
225  * the interrupt request below. (10ms corresponds to the worst-case
226  * timeout granularity on modern systems.) It won't hurt us if the
227  * interrupt does manage to fire between now and when we reach the
228  * setitimer() call.
229  */
230  if (signal_pending && now > signal_due_at + 10 * 1000)
231  signal_pending = false;
232 
233  /*
234  * Get the time remaining till the nearest pending timeout. If it is
235  * negative, assume that we somehow missed an interrupt, and clear
236  * signal_pending. This gives us another chance to recover if the
237  * kernel drops a timeout request for some reason.
238  */
239  nearest_timeout = active_timeouts[0]->fin_time;
240  if (now > nearest_timeout)
241  {
242  signal_pending = false;
243  /* force an interrupt as soon as possible */
244  secs = 0;
245  usecs = 1;
246  }
247  else
248  {
249  TimestampDifference(now, nearest_timeout,
250  &secs, &usecs);
251 
252  /*
253  * It's possible that the difference is less than a microsecond;
254  * ensure we don't cancel, rather than set, the interrupt.
255  */
256  if (secs == 0 && usecs == 0)
257  usecs = 1;
258  }
259 
260  timeval.it_value.tv_sec = secs;
261  timeval.it_value.tv_usec = usecs;
262 
263  /*
264  * We must enable the signal handler before calling setitimer(); if we
265  * did it in the other order, we'd have a race condition wherein the
266  * interrupt could occur before we can set alarm_enabled, so that the
267  * signal handler would fail to do anything.
268  *
269  * Because we didn't bother to disable the timer in disable_alarm(),
270  * it's possible that a previously-set interrupt will fire between
271  * enable_alarm() and setitimer(). This is safe, however. There are
272  * two possible outcomes:
273  *
274  * 1. The signal handler finds nothing to do (because the nearest
275  * timeout event is still in the future). It will re-set the timer
276  * and return. Then we'll overwrite the timer value with a new one.
277  * This will mean that the timer fires a little later than we
278  * intended, but only by the amount of time it takes for the signal
279  * handler to do nothing useful, which shouldn't be much.
280  *
281  * 2. The signal handler executes and removes one or more timeout
282  * events. When it returns, either the queue is now empty or the
283  * frontmost event is later than the one we looked at above. So we'll
284  * overwrite the timer value with one that is too soon (plus or minus
285  * the signal handler's execution time), causing a useless interrupt
286  * to occur. But the handler will then re-set the timer and
287  * everything will still work as expected.
288  *
289  * Since these cases are of very low probability (the window here
290  * being quite narrow), it's not worth adding cycles to the mainline
291  * code to prevent occasional wasted interrupts.
292  */
293  enable_alarm();
294 
295  /*
296  * If there is already an interrupt pending that's at or before the
297  * needed time, we need not do anything more. The signal handler will
298  * do the right thing in the first case, and re-schedule the interrupt
299  * for later in the second case. It might seem that the extra
300  * interrupt is wasted work, but it's not terribly much work, and this
301  * method has very significant advantages in the common use-case where
302  * we repeatedly set a timeout that we don't expect to reach and then
303  * cancel it. Instead of invoking setitimer() every time the timeout
304  * is set or canceled, we perform one interrupt and a re-scheduling
305  * setitimer() call at intervals roughly equal to the timeout delay.
306  * For example, with statement_timeout = 1s and a throughput of
307  * thousands of queries per second, this method requires an interrupt
308  * and setitimer() call roughly once a second, rather than thousands
309  * of setitimer() calls per second.
310  *
311  * Because of the possible passage of time between when we obtained
312  * "now" and when we reach setitimer(), the kernel's opinion of when
313  * to trigger the interrupt is likely to be a bit later than
314  * signal_due_at. That's fine, for the same reasons described above.
315  */
316  if (signal_pending && nearest_timeout >= signal_due_at)
317  return;
318 
319  /*
320  * As with calling enable_alarm(), we must set signal_pending *before*
321  * calling setitimer(); if we did it after, the signal handler could
322  * trigger before we set it, leaving us with a false opinion that a
323  * signal is still coming.
324  *
325  * Other race conditions involved with setting/checking signal_pending
326  * are okay, for the reasons described above. One additional point is
327  * that the signal handler could fire after we set signal_due_at, but
328  * still before the setitimer() call. Then the handler could
329  * overwrite signal_due_at with a value it computes, which will be the
330  * same as or perhaps later than what we just computed. After we
331  * perform setitimer(), the net effect would be that signal_due_at
332  * gives a time later than when the interrupt will really happen;
333  * which is a safe situation.
334  */
335  signal_due_at = nearest_timeout;
336  signal_pending = true;
337 
338  /* Set the alarm timer */
339  if (setitimer(ITIMER_REAL, &timeval, NULL) != 0)
340  {
341  /*
342  * Clearing signal_pending here is a bit pro forma, but not
343  * entirely so, since something in the FATAL exit path could try
344  * to use timeout facilities.
345  */
346  signal_pending = false;
347  elog(FATAL, "could not enable SIGALRM timer: %m");
348  }
349  }
350 }
351 
352 
353 /*****************************************************************************
354  * Signal handler
355  *****************************************************************************/
356 
357 /*
358  * Signal handler for SIGALRM
359  *
360  * Process any active timeout reasons and then reschedule the interrupt
361  * as needed.
362  */
363 static void
365 {
366  int save_errno = errno;
367 
368  /*
369  * Bump the holdoff counter, to make sure nothing we call will process
370  * interrupts directly. No timeout handler should do that, but these
371  * failures are hard to debug, so better be sure.
372  */
373  HOLD_INTERRUPTS();
374 
375  /*
376  * SIGALRM is always cause for waking anything waiting on the process
377  * latch.
378  */
379  SetLatch(MyLatch);
380 
381  /*
382  * Always reset signal_pending, even if !alarm_enabled, since indeed no
383  * signal is now pending.
384  */
385  signal_pending = false;
386 
387  /*
388  * Fire any pending timeouts, but only if we're enabled to do so.
389  */
390  if (alarm_enabled)
391  {
392  /*
393  * Disable alarms, just in case this platform allows signal handlers
394  * to interrupt themselves. schedule_alarm() will re-enable if
395  * appropriate.
396  */
397  disable_alarm();
398 
399  if (num_active_timeouts > 0)
400  {
402 
403  /* While the first pending timeout has been reached ... */
404  while (num_active_timeouts > 0 &&
405  now >= active_timeouts[0]->fin_time)
406  {
407  timeout_params *this_timeout = active_timeouts[0];
408 
409  /* Remove it from the active list */
411 
412  /* Mark it as fired */
413  this_timeout->indicator = true;
414 
415  /* And call its handler function */
416  this_timeout->timeout_handler();
417 
418  /* If it should fire repeatedly, re-enable it. */
419  if (this_timeout->interval_in_ms > 0)
420  {
421  TimestampTz new_fin_time;
422 
423  /*
424  * To guard against drift, schedule the next instance of
425  * the timeout based on the intended firing time rather
426  * than the actual firing time. But if the timeout was so
427  * late that we missed an entire cycle, fall back to
428  * scheduling based on the actual firing time.
429  */
430  new_fin_time =
432  this_timeout->interval_in_ms);
433  if (new_fin_time < now)
434  new_fin_time =
436  this_timeout->interval_in_ms);
437  enable_timeout(this_timeout->index, now, new_fin_time,
438  this_timeout->interval_in_ms);
439  }
440 
441  /*
442  * The handler might not take negligible time (CheckDeadLock
443  * for instance isn't too cheap), so let's update our idea of
444  * "now" after each one.
445  */
447  }
448 
449  /* Done firing timeouts, so reschedule next interrupt if any */
451  }
452  }
453 
455 
456  errno = save_errno;
457 }
458 
459 
460 /*****************************************************************************
461  * Public API
462  *****************************************************************************/
463 
464 /*
465  * Initialize timeout module.
466  *
467  * This must be called in every process that wants to use timeouts.
468  *
469  * If the process was forked from another one that was also using this
470  * module, be sure to call this before re-enabling signals; else handlers
471  * meant to run in the parent process might get invoked in this one.
472  */
473 void
475 {
476  int i;
477 
478  /* Initialize, or re-initialize, all local state */
479  disable_alarm();
480 
482 
483  for (i = 0; i < MAX_TIMEOUTS; i++)
484  {
485  all_timeouts[i].index = i;
486  all_timeouts[i].active = false;
487  all_timeouts[i].indicator = false;
490  all_timeouts[i].fin_time = 0;
492  }
493 
495 
496  /* Now establish the signal handler */
498 }
499 
500 /*
501  * Register a timeout reason
502  *
503  * For predefined timeouts, this just registers the callback function.
504  *
505  * For user-defined timeouts, pass id == USER_TIMEOUT; we then allocate and
506  * return a timeout ID.
507  */
508 TimeoutId
510 {
512 
513  /* There's no need to disable the signal handler here. */
514 
515  if (id >= USER_TIMEOUT)
516  {
517  /* Allocate a user-defined timeout reason */
518  for (id = USER_TIMEOUT; id < MAX_TIMEOUTS; id++)
519  if (all_timeouts[id].timeout_handler == NULL)
520  break;
521  if (id >= MAX_TIMEOUTS)
522  ereport(FATAL,
523  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
524  errmsg("cannot add more timeout reasons")));
525  }
526 
527  Assert(all_timeouts[id].timeout_handler == NULL);
528 
529  all_timeouts[id].timeout_handler = handler;
530 
531  return id;
532 }
533 
534 /*
535  * Reschedule any pending SIGALRM interrupt.
536  *
537  * This can be used during error recovery in case query cancel resulted in loss
538  * of a SIGALRM event (due to longjmp'ing out of handle_sig_alarm before it
539  * could do anything). But note it's not necessary if any of the public
540  * enable_ or disable_timeout functions are called in the same area, since
541  * those all do schedule_alarm() internally if needed.
542  */
543 void
545 {
546  /* For flexibility, allow this to be called before we're initialized. */
548  return;
549 
550  /* Disable timeout interrupts for safety. */
551  disable_alarm();
552 
553  /* Reschedule the interrupt, if any timeouts remain active. */
554  if (num_active_timeouts > 0)
556 }
557 
558 /*
559  * Enable the specified timeout to fire after the specified delay.
560  *
561  * Delay is given in milliseconds.
562  */
563 void
565 {
567  TimestampTz fin_time;
568 
569  /* Disable timeout interrupts for safety. */
570  disable_alarm();
571 
572  /* Queue the timeout at the appropriate time. */
574  fin_time = TimestampTzPlusMilliseconds(now, delay_ms);
575  enable_timeout(id, now, fin_time, 0);
576 
577  /* Set the timer interrupt. */
579 }
580 
581 /*
582  * Enable the specified timeout to fire periodically, with the specified
583  * delay as the time between firings.
584  *
585  * Delay is given in milliseconds.
586  */
587 void
588 enable_timeout_every(TimeoutId id, TimestampTz fin_time, int delay_ms)
589 {
591 
592  /* Disable timeout interrupts for safety. */
593  disable_alarm();
594 
595  /* Queue the timeout at the appropriate time. */
597  enable_timeout(id, now, fin_time, delay_ms);
598 
599  /* Set the timer interrupt. */
601 }
602 
603 /*
604  * Enable the specified timeout to fire at the specified time.
605  *
606  * This is provided to support cases where there's a reason to calculate
607  * the timeout by reference to some point other than "now". If there isn't,
608  * use enable_timeout_after(), to avoid calling GetCurrentTimestamp() twice.
609  */
610 void
612 {
614 
615  /* Disable timeout interrupts for safety. */
616  disable_alarm();
617 
618  /* Queue the timeout at the appropriate time. */
620  enable_timeout(id, now, fin_time, 0);
621 
622  /* Set the timer interrupt. */
624 }
625 
626 /*
627  * Enable multiple timeouts at once.
628  *
629  * This works like calling enable_timeout_after() and/or enable_timeout_at()
630  * multiple times. Use this to reduce the number of GetCurrentTimestamp()
631  * and setitimer() calls needed to establish multiple timeouts.
632  */
633 void
634 enable_timeouts(const EnableTimeoutParams *timeouts, int count)
635 {
637  int i;
638 
639  /* Disable timeout interrupts for safety. */
640  disable_alarm();
641 
642  /* Queue the timeout(s) at the appropriate times. */
644 
645  for (i = 0; i < count; i++)
646  {
647  TimeoutId id = timeouts[i].id;
648  TimestampTz fin_time;
649 
650  switch (timeouts[i].type)
651  {
652  case TMPARAM_AFTER:
653  fin_time = TimestampTzPlusMilliseconds(now,
654  timeouts[i].delay_ms);
655  enable_timeout(id, now, fin_time, 0);
656  break;
657 
658  case TMPARAM_AT:
659  enable_timeout(id, now, timeouts[i].fin_time, 0);
660  break;
661 
662  case TMPARAM_EVERY:
663  fin_time = TimestampTzPlusMilliseconds(now,
664  timeouts[i].delay_ms);
665  enable_timeout(id, now, fin_time, timeouts[i].delay_ms);
666  break;
667 
668  default:
669  elog(ERROR, "unrecognized timeout type %d",
670  (int) timeouts[i].type);
671  break;
672  }
673  }
674 
675  /* Set the timer interrupt. */
677 }
678 
679 /*
680  * Cancel the specified timeout.
681  *
682  * The timeout's I've-been-fired indicator is reset,
683  * unless keep_indicator is true.
684  *
685  * When a timeout is canceled, any other active timeout remains in force.
686  * It's not an error to disable a timeout that is not enabled.
687  */
688 void
689 disable_timeout(TimeoutId id, bool keep_indicator)
690 {
691  /* Assert request is sane */
693  Assert(all_timeouts[id].timeout_handler != NULL);
694 
695  /* Disable timeout interrupts for safety. */
696  disable_alarm();
697 
698  /* Find the timeout and remove it from the active list. */
699  if (all_timeouts[id].active)
701 
702  /* Mark it inactive, whether it was active or not. */
703  if (!keep_indicator)
704  all_timeouts[id].indicator = false;
705 
706  /* Reschedule the interrupt, if any timeouts remain active. */
707  if (num_active_timeouts > 0)
709 }
710 
711 /*
712  * Cancel multiple timeouts at once.
713  *
714  * The timeouts' I've-been-fired indicators are reset,
715  * unless timeouts[i].keep_indicator is true.
716  *
717  * This works like calling disable_timeout() multiple times.
718  * Use this to reduce the number of GetCurrentTimestamp()
719  * and setitimer() calls needed to cancel multiple timeouts.
720  */
721 void
722 disable_timeouts(const DisableTimeoutParams *timeouts, int count)
723 {
724  int i;
725 
727 
728  /* Disable timeout interrupts for safety. */
729  disable_alarm();
730 
731  /* Cancel the timeout(s). */
732  for (i = 0; i < count; i++)
733  {
734  TimeoutId id = timeouts[i].id;
735 
736  Assert(all_timeouts[id].timeout_handler != NULL);
737 
738  if (all_timeouts[id].active)
740 
741  if (!timeouts[i].keep_indicator)
742  all_timeouts[id].indicator = false;
743  }
744 
745  /* Reschedule the interrupt, if any timeouts remain active. */
746  if (num_active_timeouts > 0)
748 }
749 
750 /*
751  * Disable the signal handler, remove all timeouts from the active list,
752  * and optionally reset their timeout indicators.
753  */
754 void
755 disable_all_timeouts(bool keep_indicators)
756 {
757  int i;
758 
759  disable_alarm();
760 
761  /*
762  * We used to disable the timer interrupt here, but in common usage
763  * patterns it's cheaper to leave it enabled; that may save us from having
764  * to enable it again shortly. See comments in schedule_alarm().
765  */
766 
768 
769  for (i = 0; i < MAX_TIMEOUTS; i++)
770  {
771  all_timeouts[i].active = false;
772  if (!keep_indicators)
773  all_timeouts[i].indicator = false;
774  }
775 }
776 
777 /*
778  * Return true if the timeout is active (enabled and not yet fired)
779  *
780  * This is, of course, subject to race conditions, as the timeout could fire
781  * immediately after we look.
782  */
783 bool
785 {
786  return all_timeouts[id].active;
787 }
788 
789 /*
790  * Return the timeout's I've-been-fired indicator
791  *
792  * If reset_indicator is true, reset the indicator when returning true.
793  * To avoid missing timeouts due to race conditions, we are careful not to
794  * reset the indicator when returning false.
795  */
796 bool
797 get_timeout_indicator(TimeoutId id, bool reset_indicator)
798 {
799  if (all_timeouts[id].indicator)
800  {
801  if (reset_indicator)
802  all_timeouts[id].indicator = false;
803  return true;
804  }
805  return false;
806 }
807 
808 /*
809  * Return the time when the timeout was most recently activated
810  *
811  * Note: will return 0 if timeout has never been activated in this process.
812  * However, we do *not* reset the start_time when a timeout occurs, so as
813  * not to create a race condition if SIGALRM fires just as some code is
814  * about to fetch the value.
815  */
818 {
819  return all_timeouts[id].start_time;
820 }
821 
822 /*
823  * Return the time when the timeout is, or most recently was, due to fire
824  *
825  * Note: will return 0 if timeout has never been activated in this process.
826  * However, we do *not* reset the fin_time when a timeout occurs, so as
827  * not to create a race condition if SIGALRM fires just as some code is
828  * about to fetch the value.
829  */
832 {
833  return all_timeouts[id].fin_time;
834 }
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1659
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1583
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1547
#define SIGNAL_ARGS
Definition: c.h:1355
#define MemSet(start, val, len)
Definition: c.h:1009
int64 TimestampTz
Definition: timestamp.h:39
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define FATAL
Definition: elog.h:41
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
struct Latch * MyLatch
Definition: globals.c:58
int i
Definition: isn.c:73
void SetLatch(Latch *latch)
Definition: latch.c:605
Assert(fmt[strlen(fmt) - 1] !='\n')
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:134
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:132
pqsigfunc pqsignal(int signo, pqsigfunc func)
TimeoutId id
Definition: timeout.h:70
TimeoutId id
Definition: timeout.h:59
Definition: type.h:95
struct timeval it_value
Definition: win32_port.h:194
int interval_in_ms
Definition: timeout.c:39
volatile bool active
Definition: timeout.c:31
timeout_handler_proc timeout_handler
Definition: timeout.c:35
TimeoutId index
Definition: timeout.c:28
TimestampTz start_time
Definition: timeout.c:37
TimestampTz fin_time
Definition: timeout.c:38
volatile bool indicator
Definition: timeout.c:32
static int find_active_timeout(TimeoutId id)
Definition: timeout.c:96
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:564
void reschedule_timeouts(void)
Definition: timeout.c:544
bool get_timeout_active(TimeoutId id)
Definition: timeout.c:784
void disable_all_timeouts(bool keep_indicators)
Definition: timeout.c:755
#define enable_alarm()
Definition: timeout.c:70
#define disable_alarm()
Definition: timeout.c:69
static timeout_params *volatile active_timeouts[MAX_TIMEOUTS]
Definition: timeout.c:53
static bool all_timeouts_initialized
Definition: timeout.c:46
TimestampTz get_timeout_finish_time(TimeoutId id)
Definition: timeout.c:831
void InitializeTimeouts(void)
Definition: timeout.c:474
static volatile TimestampTz signal_due_at
Definition: timeout.c:79
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:817
void enable_timeout_every(TimeoutId id, TimestampTz fin_time, int delay_ms)
Definition: timeout.c:588
static void handle_sig_alarm(SIGNAL_ARGS)
Definition: timeout.c:364
static timeout_params all_timeouts[MAX_TIMEOUTS]
Definition: timeout.c:45
static volatile sig_atomic_t alarm_enabled
Definition: timeout.c:67
static void insert_timeout(TimeoutId id, int index)
Definition: timeout.c:114
static volatile sig_atomic_t signal_pending
Definition: timeout.c:78
struct timeout_params timeout_params
void enable_timeout_at(TimeoutId id, TimestampTz fin_time)
Definition: timeout.c:611
static void enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time, int interval_in_ms)
Definition: timeout.c:158
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:689
static volatile int num_active_timeouts
Definition: timeout.c:52
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:634
TimeoutId RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
Definition: timeout.c:509
static void schedule_alarm(TimestampTz now)
Definition: timeout.c:210
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:722
static void remove_timeout_index(int index)
Definition: timeout.c:137
bool get_timeout_indicator(TimeoutId id, bool reset_indicator)
Definition: timeout.c:797
void(* timeout_handler_proc)(void)
Definition: timeout.h:45
TimeoutId
Definition: timeout.h:24
@ MAX_TIMEOUTS
Definition: timeout.h:41
@ USER_TIMEOUT
Definition: timeout.h:39
@ TMPARAM_AT
Definition: timeout.h:53
@ TMPARAM_EVERY
Definition: timeout.h:54
@ TMPARAM_AFTER
Definition: timeout.h:52
int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue)
Definition: timer.c:86
#define TimestampTzPlusMilliseconds(tz, ms)
Definition: timestamp.h:85
const char * type
#define SIGALRM
Definition: win32_port.h:174
#define ITIMER_REAL
Definition: win32_port.h:190