PostgreSQL Source Code  git master
inval.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * inval.c
4  * POSTGRES cache invalidation dispatcher code.
5  *
6  * This is subtle stuff, so pay attention:
7  *
8  * When a tuple is updated or deleted, our standard visibility rules
9  * consider that it is *still valid* so long as we are in the same command,
10  * ie, until the next CommandCounterIncrement() or transaction commit.
11  * (See access/heap/heapam_visibility.c, and note that system catalogs are
12  * generally scanned under the most current snapshot available, rather than
13  * the transaction snapshot.) At the command boundary, the old tuple stops
14  * being valid and the new version, if any, becomes valid. Therefore,
15  * we cannot simply flush a tuple from the system caches during heap_update()
16  * or heap_delete(). The tuple is still good at that point; what's more,
17  * even if we did flush it, it might be reloaded into the caches by a later
18  * request in the same command. So the correct behavior is to keep a list
19  * of outdated (updated/deleted) tuples and then do the required cache
20  * flushes at the next command boundary. We must also keep track of
21  * inserted tuples so that we can flush "negative" cache entries that match
22  * the new tuples; again, that mustn't happen until end of command.
23  *
24  * Once we have finished the command, we still need to remember inserted
25  * tuples (including new versions of updated tuples), so that we can flush
26  * them from the caches if we abort the transaction. Similarly, we'd better
27  * be able to flush "negative" cache entries that may have been loaded in
28  * place of deleted tuples, so we still need the deleted ones too.
29  *
30  * If we successfully complete the transaction, we have to broadcast all
31  * these invalidation events to other backends (via the SI message queue)
32  * so that they can flush obsolete entries from their caches. Note we have
33  * to record the transaction commit before sending SI messages, otherwise
34  * the other backends won't see our updated tuples as good.
35  *
36  * When a subtransaction aborts, we can process and discard any events
37  * it has queued. When a subtransaction commits, we just add its events
38  * to the pending lists of the parent transaction.
39  *
40  * In short, we need to remember until xact end every insert or delete
41  * of a tuple that might be in the system caches. Updates are treated as
42  * two events, delete + insert, for simplicity. (If the update doesn't
43  * change the tuple hash value, catcache.c optimizes this into one event.)
44  *
45  * We do not need to register EVERY tuple operation in this way, just those
46  * on tuples in relations that have associated catcaches. We do, however,
47  * have to register every operation on every tuple that *could* be in a
48  * catcache, whether or not it currently is in our cache. Also, if the
49  * tuple is in a relation that has multiple catcaches, we need to register
50  * an invalidation message for each such catcache. catcache.c's
51  * PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52  * catcaches may need invalidation for a given tuple.
53  *
54  * Also, whenever we see an operation on a pg_class, pg_attribute, or
55  * pg_index tuple, we register a relcache flush operation for the relation
56  * described by that tuple (as specified in CacheInvalidateHeapTuple()).
57  * Likewise for pg_constraint tuples for foreign keys on relations.
58  *
59  * We keep the relcache flush requests in lists separate from the catcache
60  * tuple flush requests. This allows us to issue all the pending catcache
61  * flushes before we issue relcache flushes, which saves us from loading
62  * a catcache tuple during relcache load only to flush it again right away.
63  * Also, we avoid queuing multiple relcache flush requests for the same
64  * relation, since a relcache flush is relatively expensive to do.
65  * (XXX is it worth testing likewise for duplicate catcache flush entries?
66  * Probably not.)
67  *
68  * If a relcache flush is issued for a system relation that we preload
69  * from the relcache init file, we must also delete the init file so that
70  * it will be rebuilt during the next backend restart. The actual work of
71  * manipulating the init file is in relcache.c, but we keep track of the
72  * need for it here.
73  *
74  * The request lists proper are kept in CurTransactionContext of their
75  * creating (sub)transaction, since they can be forgotten on abort of that
76  * transaction but must be kept till top-level commit otherwise. For
77  * simplicity we keep the controlling list-of-lists in TopTransactionContext.
78  *
79  * Currently, inval messages are sent without regard for the possibility
80  * that the object described by the catalog tuple might be a session-local
81  * object such as a temporary table. This is because (1) this code has
82  * no practical way to tell the difference, and (2) it is not certain that
83  * other backends don't have catalog cache or even relcache entries for
84  * such tables, anyway; there is nothing that prevents that. It might be
85  * worth trying to avoid sending such inval traffic in the future, if those
86  * problems can be overcome cheaply.
87  *
88  * When wal_level=logical, write invalidations into WAL at each command end to
89  * support the decoding of the in-progress transactions. See
90  * CommandEndInvalidationMessages.
91  *
92  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
93  * Portions Copyright (c) 1994, Regents of the University of California
94  *
95  * IDENTIFICATION
96  * src/backend/utils/cache/inval.c
97  *
98  *-------------------------------------------------------------------------
99  */
100 #include "postgres.h"
101 
102 #include <limits.h>
103 
104 #include "access/htup_details.h"
105 #include "access/xact.h"
106 #include "catalog/catalog.h"
107 #include "catalog/pg_constraint.h"
108 #include "miscadmin.h"
109 #include "port/pg_bitutils.h"
110 #include "storage/sinval.h"
111 #include "storage/smgr.h"
112 #include "utils/catcache.h"
113 #include "utils/guc.h"
114 #include "utils/inval.h"
115 #include "utils/memdebug.h"
116 #include "utils/memutils.h"
117 #include "utils/rel.h"
118 #include "utils/relmapper.h"
119 #include "utils/snapmgr.h"
120 #include "utils/syscache.h"
121 
122 
123 /*
124  * To minimize palloc traffic, we keep pending requests in successively-
125  * larger chunks (a slightly more sophisticated version of an expansible
126  * array). All request types can be stored as SharedInvalidationMessage
127  * records. The ordering of requests within a list is never significant.
128  */
129 typedef struct InvalidationChunk
130 {
131  struct InvalidationChunk *next; /* list link */
132  int nitems; /* # items currently stored in chunk */
133  int maxitems; /* size of allocated array in this chunk */
136 
138 {
139  InvalidationChunk *cclist; /* list of chunks holding catcache msgs */
140  InvalidationChunk *rclist; /* list of chunks holding relcache msgs */
142 
143 /*----------------
144  * Invalidation info is divided into two lists:
145  * 1) events so far in current command, not yet reflected to caches.
146  * 2) events in previous commands of current transaction; these have
147  * been reflected to local caches, and must be either broadcast to
148  * other backends or rolled back from local cache when we commit
149  * or abort the transaction.
150  * Actually, we need two such lists for each level of nested transaction,
151  * so that we can discard events from an aborted subtransaction. When
152  * a subtransaction commits, we append its lists to the parent's lists.
153  *
154  * The relcache-file-invalidated flag can just be a simple boolean,
155  * since we only act on it at transaction commit; we don't care which
156  * command of the transaction set it.
157  *----------------
158  */
159 
160 typedef struct TransInvalidationInfo
161 {
162  /* Back link to parent transaction's info */
164 
165  /* Subtransaction nesting depth */
166  int my_level;
167 
168  /* head of current-command event list */
170 
171  /* head of previous-commands event list */
173 
174  /* init file must be invalidated? */
177 
179 
183 
184 /* GUC storage */
186 
187 /*
188  * Dynamically-registered callback functions. Current implementation
189  * assumes there won't be enough of these to justify a dynamically resizable
190  * array; it'd be easy to improve that if needed.
191  *
192  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
193  * syscache are linked into a list pointed to by syscache_callback_links[id].
194  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
195  */
196 
197 #define MAX_SYSCACHE_CALLBACKS 64
198 #define MAX_RELCACHE_CALLBACKS 10
199 
200 static struct SYSCACHECALLBACK
201 {
202  int16 id; /* cache number */
203  int16 link; /* next callback index+1 for same cache */
207 
209 
210 static int syscache_callback_count = 0;
211 
212 static struct RELCACHECALLBACK
213 {
217 
218 static int relcache_callback_count = 0;
219 
220 /* ----------------------------------------------------------------
221  * Invalidation list support functions
222  *
223  * These three routines encapsulate processing of the "chunked"
224  * representation of what is logically just a list of messages.
225  * ----------------------------------------------------------------
226  */
227 
228 /*
229  * AddInvalidationMessage
230  * Add an invalidation message to a list (of chunks).
231  *
232  * Note that we do not pay any great attention to maintaining the original
233  * ordering of the messages.
234  */
235 static void
238 {
239  InvalidationChunk *chunk = *listHdr;
240 
241  if (chunk == NULL)
242  {
243  /* First time through; create initial chunk */
244 #define FIRSTCHUNKSIZE 32
245  chunk = (InvalidationChunk *)
249  chunk->nitems = 0;
250  chunk->maxitems = FIRSTCHUNKSIZE;
251  chunk->next = *listHdr;
252  *listHdr = chunk;
253  }
254  else if (chunk->nitems >= chunk->maxitems)
255  {
256  /* Need another chunk; double size of last chunk */
257  int chunksize = 2 * chunk->maxitems;
258 
259  chunk = (InvalidationChunk *)
262  chunksize * sizeof(SharedInvalidationMessage));
263  chunk->nitems = 0;
264  chunk->maxitems = chunksize;
265  chunk->next = *listHdr;
266  *listHdr = chunk;
267  }
268  /* Okay, add message to current chunk */
269  chunk->msgs[chunk->nitems] = *msg;
270  chunk->nitems++;
271 }
272 
273 /*
274  * Append one list of invalidation message chunks to another, resetting
275  * the source chunk-list pointer to NULL.
276  */
277 static void
279  InvalidationChunk **srcHdr)
280 {
281  InvalidationChunk *chunk = *srcHdr;
282 
283  if (chunk == NULL)
284  return; /* nothing to do */
285 
286  while (chunk->next != NULL)
287  chunk = chunk->next;
288 
289  chunk->next = *destHdr;
290 
291  *destHdr = *srcHdr;
292 
293  *srcHdr = NULL;
294 }
295 
296 /*
297  * Process a list of invalidation messages.
298  *
299  * This is a macro that executes the given code fragment for each message in
300  * a message chunk list. The fragment should refer to the message as *msg.
301  */
302 #define ProcessMessageList(listHdr, codeFragment) \
303  do { \
304  InvalidationChunk *_chunk; \
305  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
306  { \
307  int _cindex; \
308  for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
309  { \
310  SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
311  codeFragment; \
312  } \
313  } \
314  } while (0)
315 
316 /*
317  * Process a list of invalidation messages group-wise.
318  *
319  * As above, but the code fragment can handle an array of messages.
320  * The fragment should refer to the messages as msgs[], with n entries.
321  */
322 #define ProcessMessageListMulti(listHdr, codeFragment) \
323  do { \
324  InvalidationChunk *_chunk; \
325  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
326  { \
327  SharedInvalidationMessage *msgs = _chunk->msgs; \
328  int n = _chunk->nitems; \
329  codeFragment; \
330  } \
331  } while (0)
332 
333 
334 /* ----------------------------------------------------------------
335  * Invalidation set support functions
336  *
337  * These routines understand about the division of a logical invalidation
338  * list into separate physical lists for catcache and relcache entries.
339  * ----------------------------------------------------------------
340  */
341 
342 /*
343  * Add a catcache inval entry
344  */
345 static void
347  int id, uint32 hashValue, Oid dbId)
348 {
350 
351  Assert(id < CHAR_MAX);
352  msg.cc.id = (int8) id;
353  msg.cc.dbId = dbId;
354  msg.cc.hashValue = hashValue;
355 
356  /*
357  * Define padding bytes in SharedInvalidationMessage structs to be
358  * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
359  * multiple processes, will cause spurious valgrind warnings about
360  * undefined memory being used. That's because valgrind remembers the
361  * undefined bytes from the last local process's store, not realizing that
362  * another process has written since, filling the previously uninitialized
363  * bytes
364  */
365  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
366 
367  AddInvalidationMessage(&hdr->cclist, &msg);
368 }
369 
370 /*
371  * Add a whole-catalog inval entry
372  */
373 static void
375  Oid dbId, Oid catId)
376 {
378 
380  msg.cat.dbId = dbId;
381  msg.cat.catId = catId;
382  /* check AddCatcacheInvalidationMessage() for an explanation */
383  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
384 
385  AddInvalidationMessage(&hdr->cclist, &msg);
386 }
387 
388 /*
389  * Add a relcache inval entry
390  */
391 static void
393  Oid dbId, Oid relId)
394 {
396 
397  /*
398  * Don't add a duplicate item. We assume dbId need not be checked because
399  * it will never change. InvalidOid for relId means all relations so we
400  * don't need to add individual ones when it is present.
401  */
403  if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
404  (msg->rc.relId == relId ||
405  msg->rc.relId == InvalidOid))
406  return);
407 
408  /* OK, add the item */
410  msg.rc.dbId = dbId;
411  msg.rc.relId = relId;
412  /* check AddCatcacheInvalidationMessage() for an explanation */
413  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
414 
415  AddInvalidationMessage(&hdr->rclist, &msg);
416 }
417 
418 /*
419  * Add a snapshot inval entry
420  */
421 static void
423  Oid dbId, Oid relId)
424 {
426 
427  /* Don't add a duplicate item */
428  /* We assume dbId need not be checked because it will never change */
430  if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
431  msg->sn.relId == relId)
432  return);
433 
434  /* OK, add the item */
436  msg.sn.dbId = dbId;
437  msg.sn.relId = relId;
438  /* check AddCatcacheInvalidationMessage() for an explanation */
439  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
440 
441  AddInvalidationMessage(&hdr->rclist, &msg);
442 }
443 
444 /*
445  * Append one list of invalidation messages to another, resetting
446  * the source list to empty.
447  */
448 static void
451 {
454 }
455 
456 /*
457  * Execute the given function for all the messages in an invalidation list.
458  * The list is not altered.
459  *
460  * catcache entries are processed first, for reasons mentioned above.
461  */
462 static void
464  void (*func) (SharedInvalidationMessage *msg))
465 {
466  ProcessMessageList(hdr->cclist, func(msg));
467  ProcessMessageList(hdr->rclist, func(msg));
468 }
469 
470 /*
471  * As above, but the function is able to process an array of messages
472  * rather than just one at a time.
473  */
474 static void
476  void (*func) (const SharedInvalidationMessage *msgs, int n))
477 {
478  ProcessMessageListMulti(hdr->cclist, func(msgs, n));
479  ProcessMessageListMulti(hdr->rclist, func(msgs, n));
480 }
481 
482 /* ----------------------------------------------------------------
483  * private support functions
484  * ----------------------------------------------------------------
485  */
486 
487 /*
488  * RegisterCatcacheInvalidation
489  *
490  * Register an invalidation event for a catcache tuple entry.
491  */
492 static void
494  uint32 hashValue,
495  Oid dbId)
496 {
498  cacheId, hashValue, dbId);
499 }
500 
501 /*
502  * RegisterCatalogInvalidation
503  *
504  * Register an invalidation event for all catcache entries from a catalog.
505  */
506 static void
508 {
510  dbId, catId);
511 }
512 
513 /*
514  * RegisterRelcacheInvalidation
515  *
516  * As above, but register a relcache invalidation event.
517  */
518 static void
520 {
522  dbId, relId);
523 
524  /*
525  * Most of the time, relcache invalidation is associated with system
526  * catalog updates, but there are a few cases where it isn't. Quick hack
527  * to ensure that the next CommandCounterIncrement() will think that we
528  * need to do CommandEndInvalidationMessages().
529  */
530  (void) GetCurrentCommandId(true);
531 
532  /*
533  * If the relation being invalidated is one of those cached in a relcache
534  * init file, mark that we need to zap that file at commit. For simplicity
535  * invalidations for a specific database always invalidate the shared file
536  * as well. Also zap when we are invalidating whole relcache.
537  */
538  if (relId == InvalidOid || RelationIdIsInInitFile(relId))
539  transInvalInfo->RelcacheInitFileInval = true;
540 }
541 
542 /*
543  * RegisterSnapshotInvalidation
544  *
545  * Register an invalidation event for MVCC scans against a given catalog.
546  * Only needed for catalogs that don't have catcaches.
547  */
548 static void
550 {
552  dbId, relId);
553 }
554 
555 /*
556  * LocalExecuteInvalidationMessage
557  *
558  * Process a single invalidation message (which could be of any type).
559  * Only the local caches are flushed; this does not transmit the message
560  * to other backends.
561  */
562 void
564 {
565  if (msg->id >= 0)
566  {
567  if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
568  {
570 
571  SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
572 
573  CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
574  }
575  }
576  else if (msg->id == SHAREDINVALCATALOG_ID)
577  {
578  if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
579  {
581 
583 
584  /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
585  }
586  }
587  else if (msg->id == SHAREDINVALRELCACHE_ID)
588  {
589  if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
590  {
591  int i;
592 
593  if (msg->rc.relId == InvalidOid)
595  else
597 
598  for (i = 0; i < relcache_callback_count; i++)
599  {
600  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
601 
602  ccitem->function(ccitem->arg, msg->rc.relId);
603  }
604  }
605  }
606  else if (msg->id == SHAREDINVALSMGR_ID)
607  {
608  /*
609  * We could have smgr entries for relations of other databases, so no
610  * short-circuit test is possible here.
611  */
612  RelFileNodeBackend rnode;
613 
614  rnode.node = msg->sm.rnode;
615  rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
616  smgrclosenode(rnode);
617  }
618  else if (msg->id == SHAREDINVALRELMAP_ID)
619  {
620  /* We only care about our own database and shared catalogs */
621  if (msg->rm.dbId == InvalidOid)
622  RelationMapInvalidate(true);
623  else if (msg->rm.dbId == MyDatabaseId)
624  RelationMapInvalidate(false);
625  }
626  else if (msg->id == SHAREDINVALSNAPSHOT_ID)
627  {
628  /* We only care about our own database and shared catalogs */
629  if (msg->sn.dbId == InvalidOid)
631  else if (msg->sn.dbId == MyDatabaseId)
633  }
634  else
635  elog(FATAL, "unrecognized SI message ID: %d", msg->id);
636 }
637 
638 /*
639  * InvalidateSystemCaches
640  *
641  * This blows away all tuples in the system catalog caches and
642  * all the cached relation descriptors and smgr cache entries.
643  * Relation descriptors that have positive refcounts are then rebuilt.
644  *
645  * We call this when we see a shared-inval-queue overflow signal,
646  * since that tells us we've lost some shared-inval messages and hence
647  * don't know what needs to be invalidated.
648  */
649 void
651 {
652  int i;
653 
656  RelationCacheInvalidate(); /* gets smgr and relmap too */
657 
658  for (i = 0; i < syscache_callback_count; i++)
659  {
660  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
661 
662  ccitem->function(ccitem->arg, ccitem->id, 0);
663  }
664 
665  for (i = 0; i < relcache_callback_count; i++)
666  {
667  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
668 
669  ccitem->function(ccitem->arg, InvalidOid);
670  }
671 }
672 
673 
674 /* ----------------------------------------------------------------
675  * public functions
676  * ----------------------------------------------------------------
677  */
678 
679 /*
680  * AcceptInvalidationMessages
681  * Read and process invalidation messages from the shared invalidation
682  * message queue.
683  *
684  * Note:
685  * This should be called as the first step in processing a transaction.
686  */
687 void
689 {
692 
693  /*----------
694  * Test code to force cache flushes anytime a flush could happen.
695  *
696  * This helps detect intermittent faults caused by code that reads a cache
697  * entry and then performs an action that could invalidate the entry, but
698  * rarely actually does so. This can spot issues that would otherwise
699  * only arise with badly timed concurrent DDL, for example.
700  *
701  * The default debug_discard_caches = 0 does no forced cache flushes.
702  *
703  * If used with CLOBBER_FREED_MEMORY,
704  * debug_discard_caches = 1 (formerly known as CLOBBER_CACHE_ALWAYS)
705  * provides a fairly thorough test that the system contains no cache-flush
706  * hazards. However, it also makes the system unbelievably slow --- the
707  * regression tests take about 100 times longer than normal.
708  *
709  * If you're a glutton for punishment, try
710  * debug_discard_caches = 3 (formerly known as CLOBBER_CACHE_RECURSIVELY).
711  * This slows things by at least a factor of 10000, so I wouldn't suggest
712  * trying to run the entire regression tests that way. It's useful to try
713  * a few simple tests, to make sure that cache reload isn't subject to
714  * internal cache-flush hazards, but after you've done a few thousand
715  * recursive reloads it's unlikely you'll learn more.
716  *----------
717  */
718 #ifdef DISCARD_CACHES_ENABLED
719  {
720  static int recursion_depth = 0;
721 
722  if (recursion_depth < debug_discard_caches)
723  {
724  recursion_depth++;
726  recursion_depth--;
727  }
728  }
729 #endif
730 }
731 
732 /*
733  * PrepareInvalidationState
734  * Initialize inval lists for the current (sub)transaction.
735  */
736 static void
738 {
739  TransInvalidationInfo *myInfo;
740 
741  if (transInvalInfo != NULL &&
742  transInvalInfo->my_level == GetCurrentTransactionNestLevel())
743  return;
744 
745  myInfo = (TransInvalidationInfo *)
747  sizeof(TransInvalidationInfo));
748  myInfo->parent = transInvalInfo;
750 
751  /*
752  * If there's any previous entry, this one should be for a deeper nesting
753  * level.
754  */
755  Assert(transInvalInfo == NULL ||
756  myInfo->my_level > transInvalInfo->my_level);
757 
758  transInvalInfo = myInfo;
759 }
760 
761 /*
762  * PostPrepare_Inval
763  * Clean up after successful PREPARE.
764  *
765  * Here, we want to act as though the transaction aborted, so that we will
766  * undo any syscache changes it made, thereby bringing us into sync with the
767  * outside world, which doesn't believe the transaction committed yet.
768  *
769  * If the prepared transaction is later aborted, there is nothing more to
770  * do; if it commits, we will receive the consequent inval messages just
771  * like everyone else.
772  */
773 void
775 {
776  AtEOXact_Inval(false);
777 }
778 
779 /*
780  * Collect invalidation messages into SharedInvalidMessagesArray array.
781  */
782 static void
784 {
785  /*
786  * Initialise array first time through in each commit
787  */
788  if (SharedInvalidMessagesArray == NULL)
789  {
792 
793  /*
794  * Although this is being palloc'd we don't actually free it directly.
795  * We're so close to EOXact that we now we're going to lose it anyhow.
796  */
797  SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
798  * sizeof(SharedInvalidationMessage));
799  }
800 
802  {
804 
805  SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
807  * sizeof(SharedInvalidationMessage));
808  }
809 
810  /*
811  * Append the next chunk onto the array
812  */
813  memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
814  msgs, n * sizeof(SharedInvalidationMessage));
816 }
817 
818 /*
819  * xactGetCommittedInvalidationMessages() is executed by
820  * RecordTransactionCommit() to add invalidation messages onto the
821  * commit record. This applies only to commit message types, never to
822  * abort records. Must always run before AtEOXact_Inval(), since that
823  * removes the data we need to see.
824  *
825  * Remember that this runs before we have officially committed, so we
826  * must not do anything here to change what might occur *if* we should
827  * fail between here and the actual commit.
828  *
829  * see also xact_redo_commit() and xact_desc_commit()
830  */
831 int
833  bool *RelcacheInitFileInval)
834 {
835  MemoryContext oldcontext;
836 
837  /* Quick exit if we haven't done anything with invalidation messages. */
838  if (transInvalInfo == NULL)
839  {
840  *RelcacheInitFileInval = false;
841  *msgs = NULL;
842  return 0;
843  }
844 
845  /* Must be at top of stack */
846  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
847 
848  /*
849  * Relcache init file invalidation requires processing both before and
850  * after we send the SI messages. However, we need not do anything unless
851  * we committed.
852  */
853  *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
854 
855  /*
856  * Walk through TransInvalidationInfo to collect all the messages into a
857  * single contiguous array of invalidation messages. It must be contiguous
858  * so we can copy directly into WAL message. Maintain the order that they
859  * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
860  * in redo is as similar as possible to original. We want the same bugs,
861  * if any, not new ones.
862  */
864 
869  MemoryContextSwitchTo(oldcontext);
870 
872  SharedInvalidMessagesArray == NULL));
873 
875 
877 }
878 
879 /*
880  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
881  * standby_redo() to process invalidation messages. Currently that happens
882  * only at end-of-xact.
883  *
884  * Relcache init file invalidation requires processing both
885  * before and after we send the SI messages. See AtEOXact_Inval()
886  */
887 void
889  int nmsgs, bool RelcacheInitFileInval,
890  Oid dbid, Oid tsid)
891 {
892  if (nmsgs <= 0)
893  return;
894 
895  elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
896  (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
897 
898  if (RelcacheInitFileInval)
899  {
900  elog(trace_recovery(DEBUG4), "removing relcache init files for database %u",
901  dbid);
902 
903  /*
904  * RelationCacheInitFilePreInvalidate, when the invalidation message
905  * is for a specific database, requires DatabasePath to be set, but we
906  * should not use SetDatabasePath during recovery, since it is
907  * intended to be used only once by normal backends. Hence, a quick
908  * hack: set DatabasePath directly then unset after use.
909  */
910  if (OidIsValid(dbid))
911  DatabasePath = GetDatabasePath(dbid, tsid);
912 
914 
915  if (OidIsValid(dbid))
916  {
918  DatabasePath = NULL;
919  }
920  }
921 
922  SendSharedInvalidMessages(msgs, nmsgs);
923 
924  if (RelcacheInitFileInval)
926 }
927 
928 /*
929  * AtEOXact_Inval
930  * Process queued-up invalidation messages at end of main transaction.
931  *
932  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
933  * to the shared invalidation message queue. Note that these will be read
934  * not only by other backends, but also by our own backend at the next
935  * transaction start (via AcceptInvalidationMessages). This means that
936  * we can skip immediate local processing of anything that's still in
937  * CurrentCmdInvalidMsgs, and just send that list out too.
938  *
939  * If not isCommit, we are aborting, and must locally process the messages
940  * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
941  * since they'll not have seen our changed tuples anyway. We can forget
942  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
943  * the caches yet.
944  *
945  * In any case, reset the various lists to empty. We need not physically
946  * free memory here, since TopTransactionContext is about to be emptied
947  * anyway.
948  *
949  * Note:
950  * This should be called as the last step in processing a transaction.
951  */
952 void
953 AtEOXact_Inval(bool isCommit)
954 {
955  /* Quick exit if no messages */
956  if (transInvalInfo == NULL)
957  return;
958 
959  /* Must be at top of stack */
960  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
961 
962  if (isCommit)
963  {
964  /*
965  * Relcache init file invalidation requires processing both before and
966  * after we send the SI messages. However, we need not do anything
967  * unless we committed.
968  */
969  if (transInvalInfo->RelcacheInitFileInval)
971 
973  &transInvalInfo->CurrentCmdInvalidMsgs);
974 
977 
978  if (transInvalInfo->RelcacheInitFileInval)
980  }
981  else
982  {
985  }
986 
987  /* Need not free anything explicitly */
988  transInvalInfo = NULL;
989  SharedInvalidMessagesArray = NULL;
991 }
992 
993 /*
994  * AtEOSubXact_Inval
995  * Process queued-up invalidation messages at end of subtransaction.
996  *
997  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
998  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
999  * parent's PriorCmdInvalidMsgs list.
1000  *
1001  * If not isCommit, we are aborting, and must locally process the messages
1002  * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
1003  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
1004  * touched the caches yet.
1005  *
1006  * In any case, pop the transaction stack. We need not physically free memory
1007  * here, since CurTransactionContext is about to be emptied anyway
1008  * (if aborting). Beware of the possibility of aborting the same nesting
1009  * level twice, though.
1010  */
1011 void
1012 AtEOSubXact_Inval(bool isCommit)
1013 {
1014  int my_level;
1016 
1017  /* Quick exit if no messages. */
1018  if (myInfo == NULL)
1019  return;
1020 
1021  /* Also bail out quickly if messages are not for this level. */
1022  my_level = GetCurrentTransactionNestLevel();
1023  if (myInfo->my_level != my_level)
1024  {
1025  Assert(myInfo->my_level < my_level);
1026  return;
1027  }
1028 
1029  if (isCommit)
1030  {
1031  /* If CurrentCmdInvalidMsgs still has anything, fix it */
1033 
1034  /*
1035  * We create invalidation stack entries lazily, so the parent might
1036  * not have one. Instead of creating one, moving all the data over,
1037  * and then freeing our own, we can just adjust the level of our own
1038  * entry.
1039  */
1040  if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1041  {
1042  myInfo->my_level--;
1043  return;
1044  }
1045 
1046  /* Pass up my inval messages to parent */
1048  &myInfo->PriorCmdInvalidMsgs);
1049 
1050  /* Pending relcache inval becomes parent's problem too */
1051  if (myInfo->RelcacheInitFileInval)
1052  myInfo->parent->RelcacheInitFileInval = true;
1053 
1054  /* Pop the transaction state stack */
1055  transInvalInfo = myInfo->parent;
1056 
1057  /* Need not free anything else explicitly */
1058  pfree(myInfo);
1059  }
1060  else
1061  {
1064 
1065  /* Pop the transaction state stack */
1066  transInvalInfo = myInfo->parent;
1067 
1068  /* Need not free anything else explicitly */
1069  pfree(myInfo);
1070  }
1071 }
1072 
1073 /*
1074  * CommandEndInvalidationMessages
1075  * Process queued-up invalidation messages at end of one command
1076  * in a transaction.
1077  *
1078  * Here, we send no messages to the shared queue, since we don't know yet if
1079  * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
1080  * list, so as to flush our caches of any entries we have outdated in the
1081  * current command. We then move the current-cmd list over to become part
1082  * of the prior-cmds list.
1083  *
1084  * Note:
1085  * This should be called during CommandCounterIncrement(),
1086  * after we have advanced the command ID.
1087  */
1088 void
1090 {
1091  /*
1092  * You might think this shouldn't be called outside any transaction, but
1093  * bootstrap does it, and also ABORT issued when not in a transaction. So
1094  * just quietly return if no state to work on.
1095  */
1096  if (transInvalInfo == NULL)
1097  return;
1098 
1101 
1102  /* WAL Log per-command invalidation messages for wal_level=logical */
1103  if (XLogLogicalInfoActive())
1105 
1107  &transInvalInfo->CurrentCmdInvalidMsgs);
1108 }
1109 
1110 
1111 /*
1112  * CacheInvalidateHeapTuple
1113  * Register the given tuple for invalidation at end of command
1114  * (ie, current command is creating or outdating this tuple).
1115  * Also, detect whether a relcache invalidation is implied.
1116  *
1117  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1118  * For an update, we are called just once, with tuple being the old tuple
1119  * version and newtuple the new version. This allows avoidance of duplicate
1120  * effort during an update.
1121  */
1122 void
1124  HeapTuple tuple,
1125  HeapTuple newtuple)
1126 {
1127  Oid tupleRelId;
1128  Oid databaseId;
1129  Oid relationId;
1130 
1131  /* Do nothing during bootstrap */
1133  return;
1134 
1135  /*
1136  * We only need to worry about invalidation for tuples that are in system
1137  * catalogs; user-relation tuples are never in catcaches and can't affect
1138  * the relcache either.
1139  */
1140  if (!IsCatalogRelation(relation))
1141  return;
1142 
1143  /*
1144  * IsCatalogRelation() will return true for TOAST tables of system
1145  * catalogs, but we don't care about those, either.
1146  */
1147  if (IsToastRelation(relation))
1148  return;
1149 
1150  /*
1151  * If we're not prepared to queue invalidation messages for this
1152  * subtransaction level, get ready now.
1153  */
1155 
1156  /*
1157  * First let the catcache do its thing
1158  */
1159  tupleRelId = RelationGetRelid(relation);
1160  if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1161  {
1162  databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1163  RegisterSnapshotInvalidation(databaseId, tupleRelId);
1164  }
1165  else
1166  PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1168 
1169  /*
1170  * Now, is this tuple one of the primary definers of a relcache entry? See
1171  * comments in file header for deeper explanation.
1172  *
1173  * Note we ignore newtuple here; we assume an update cannot move a tuple
1174  * from being part of one relcache entry to being part of another.
1175  */
1176  if (tupleRelId == RelationRelationId)
1177  {
1178  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1179 
1180  relationId = classtup->oid;
1181  if (classtup->relisshared)
1182  databaseId = InvalidOid;
1183  else
1184  databaseId = MyDatabaseId;
1185  }
1186  else if (tupleRelId == AttributeRelationId)
1187  {
1188  Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1189 
1190  relationId = atttup->attrelid;
1191 
1192  /*
1193  * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1194  * even if the rel in question is shared (which we can't easily tell).
1195  * This essentially means that only backends in this same database
1196  * will react to the relcache flush request. This is in fact
1197  * appropriate, since only those backends could see our pg_attribute
1198  * change anyway. It looks a bit ugly though. (In practice, shared
1199  * relations can't have schema changes after bootstrap, so we should
1200  * never come here for a shared rel anyway.)
1201  */
1202  databaseId = MyDatabaseId;
1203  }
1204  else if (tupleRelId == IndexRelationId)
1205  {
1206  Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1207 
1208  /*
1209  * When a pg_index row is updated, we should send out a relcache inval
1210  * for the index relation. As above, we don't know the shared status
1211  * of the index, but in practice it doesn't matter since indexes of
1212  * shared catalogs can't have such updates.
1213  */
1214  relationId = indextup->indexrelid;
1215  databaseId = MyDatabaseId;
1216  }
1217  else if (tupleRelId == ConstraintRelationId)
1218  {
1219  Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
1220 
1221  /*
1222  * Foreign keys are part of relcache entries, too, so send out an
1223  * inval for the table that the FK applies to.
1224  */
1225  if (constrtup->contype == CONSTRAINT_FOREIGN &&
1226  OidIsValid(constrtup->conrelid))
1227  {
1228  relationId = constrtup->conrelid;
1229  databaseId = MyDatabaseId;
1230  }
1231  else
1232  return;
1233  }
1234  else
1235  return;
1236 
1237  /*
1238  * Yes. We need to register a relcache invalidation event.
1239  */
1240  RegisterRelcacheInvalidation(databaseId, relationId);
1241 }
1242 
1243 /*
1244  * CacheInvalidateCatalog
1245  * Register invalidation of the whole content of a system catalog.
1246  *
1247  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1248  * changed any tuples as moved them around. Some uses of catcache entries
1249  * expect their TIDs to be correct, so we have to blow away the entries.
1250  *
1251  * Note: we expect caller to verify that the rel actually is a system
1252  * catalog. If it isn't, no great harm is done, just a wasted sinval message.
1253  */
1254 void
1256 {
1257  Oid databaseId;
1258 
1260 
1261  if (IsSharedRelation(catalogId))
1262  databaseId = InvalidOid;
1263  else
1264  databaseId = MyDatabaseId;
1265 
1266  RegisterCatalogInvalidation(databaseId, catalogId);
1267 }
1268 
1269 /*
1270  * CacheInvalidateRelcache
1271  * Register invalidation of the specified relation's relcache entry
1272  * at end of command.
1273  *
1274  * This is used in places that need to force relcache rebuild but aren't
1275  * changing any of the tuples recognized as contributors to the relcache
1276  * entry by CacheInvalidateHeapTuple. (An example is dropping an index.)
1277  */
1278 void
1280 {
1281  Oid databaseId;
1282  Oid relationId;
1283 
1285 
1286  relationId = RelationGetRelid(relation);
1287  if (relation->rd_rel->relisshared)
1288  databaseId = InvalidOid;
1289  else
1290  databaseId = MyDatabaseId;
1291 
1292  RegisterRelcacheInvalidation(databaseId, relationId);
1293 }
1294 
1295 /*
1296  * CacheInvalidateRelcacheAll
1297  * Register invalidation of the whole relcache at the end of command.
1298  *
1299  * This is used by alter publication as changes in publications may affect
1300  * large number of tables.
1301  */
1302 void
1304 {
1306 
1308 }
1309 
1310 /*
1311  * CacheInvalidateRelcacheByTuple
1312  * As above, but relation is identified by passing its pg_class tuple.
1313  */
1314 void
1316 {
1317  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1318  Oid databaseId;
1319  Oid relationId;
1320 
1322 
1323  relationId = classtup->oid;
1324  if (classtup->relisshared)
1325  databaseId = InvalidOid;
1326  else
1327  databaseId = MyDatabaseId;
1328  RegisterRelcacheInvalidation(databaseId, relationId);
1329 }
1330 
1331 /*
1332  * CacheInvalidateRelcacheByRelid
1333  * As above, but relation is identified by passing its OID.
1334  * This is the least efficient of the three options; use one of
1335  * the above routines if you have a Relation or pg_class tuple.
1336  */
1337 void
1339 {
1340  HeapTuple tup;
1341 
1343 
1344  tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1345  if (!HeapTupleIsValid(tup))
1346  elog(ERROR, "cache lookup failed for relation %u", relid);
1348  ReleaseSysCache(tup);
1349 }
1350 
1351 
1352 /*
1353  * CacheInvalidateSmgr
1354  * Register invalidation of smgr references to a physical relation.
1355  *
1356  * Sending this type of invalidation msg forces other backends to close open
1357  * smgr entries for the rel. This should be done to flush dangling open-file
1358  * references when the physical rel is being dropped or truncated. Because
1359  * these are nontransactional (i.e., not-rollback-able) operations, we just
1360  * send the inval message immediately without any queuing.
1361  *
1362  * Note: in most cases there will have been a relcache flush issued against
1363  * the rel at the logical level. We need a separate smgr-level flush because
1364  * it is possible for backends to have open smgr entries for rels they don't
1365  * have a relcache entry for, e.g. because the only thing they ever did with
1366  * the rel is write out dirty shared buffers.
1367  *
1368  * Note: because these messages are nontransactional, they won't be captured
1369  * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
1370  * should happen in low-level smgr.c routines, which are executed while
1371  * replaying WAL as well as when creating it.
1372  *
1373  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1374  * three bytes of the backend ID using what would otherwise be padding space.
1375  * Thus, the maximum possible backend ID is 2^23-1.
1376  */
1377 void
1379 {
1381 
1382  msg.sm.id = SHAREDINVALSMGR_ID;
1383  msg.sm.backend_hi = rnode.backend >> 16;
1384  msg.sm.backend_lo = rnode.backend & 0xffff;
1385  msg.sm.rnode = rnode.node;
1386  /* check AddCatcacheInvalidationMessage() for an explanation */
1387  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1388 
1389  SendSharedInvalidMessages(&msg, 1);
1390 }
1391 
1392 /*
1393  * CacheInvalidateRelmap
1394  * Register invalidation of the relation mapping for a database,
1395  * or for the shared catalogs if databaseId is zero.
1396  *
1397  * Sending this type of invalidation msg forces other backends to re-read
1398  * the indicated relation mapping file. It is also necessary to send a
1399  * relcache inval for the specific relations whose mapping has been altered,
1400  * else the relcache won't get updated with the new filenode data.
1401  *
1402  * Note: because these messages are nontransactional, they won't be captured
1403  * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap()
1404  * should happen in low-level relmapper.c routines, which are executed while
1405  * replaying WAL as well as when creating it.
1406  */
1407 void
1409 {
1411 
1412  msg.rm.id = SHAREDINVALRELMAP_ID;
1413  msg.rm.dbId = databaseId;
1414  /* check AddCatcacheInvalidationMessage() for an explanation */
1415  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1416 
1417  SendSharedInvalidMessages(&msg, 1);
1418 }
1419 
1420 
1421 /*
1422  * CacheRegisterSyscacheCallback
1423  * Register the specified function to be called for all future
1424  * invalidation events in the specified cache. The cache ID and the
1425  * hash value of the tuple being invalidated will be passed to the
1426  * function.
1427  *
1428  * NOTE: Hash value zero will be passed if a cache reset request is received.
1429  * In this case the called routines should flush all cached state.
1430  * Yes, there's a possibility of a false match to zero, but it doesn't seem
1431  * worth troubling over, especially since most of the current callees just
1432  * flush all cached state anyway.
1433  */
1434 void
1437  Datum arg)
1438 {
1439  if (cacheid < 0 || cacheid >= SysCacheSize)
1440  elog(FATAL, "invalid cache ID: %d", cacheid);
1442  elog(FATAL, "out of syscache_callback_list slots");
1443 
1444  if (syscache_callback_links[cacheid] == 0)
1445  {
1446  /* first callback for this cache */
1448  }
1449  else
1450  {
1451  /* add to end of chain, so that older callbacks are called first */
1452  int i = syscache_callback_links[cacheid] - 1;
1453 
1454  while (syscache_callback_list[i].link > 0)
1455  i = syscache_callback_list[i].link - 1;
1457  }
1458 
1463 
1465 }
1466 
1467 /*
1468  * CacheRegisterRelcacheCallback
1469  * Register the specified function to be called for all future
1470  * relcache invalidation events. The OID of the relation being
1471  * invalidated will be passed to the function.
1472  *
1473  * NOTE: InvalidOid will be passed if a cache reset request is received.
1474  * In this case the called routines should flush all cached state.
1475  */
1476 void
1478  Datum arg)
1479 {
1481  elog(FATAL, "out of relcache_callback_list slots");
1482 
1485 
1487 }
1488 
1489 /*
1490  * CallSyscacheCallbacks
1491  *
1492  * This is exported so that CatalogCacheFlushCatalog can call it, saving
1493  * this module from knowing which catcache IDs correspond to which catalogs.
1494  */
1495 void
1496 CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1497 {
1498  int i;
1499 
1500  if (cacheid < 0 || cacheid >= SysCacheSize)
1501  elog(ERROR, "invalid cache ID: %d", cacheid);
1502 
1503  i = syscache_callback_links[cacheid] - 1;
1504  while (i >= 0)
1505  {
1506  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1507 
1508  Assert(ccitem->id == cacheid);
1509  ccitem->function(ccitem->arg, cacheid, hashvalue);
1510  i = ccitem->link - 1;
1511  }
1512 }
1513 
1514 /*
1515  * LogLogicalInvalidations
1516  *
1517  * Emit WAL for invalidations. This is currently only used for logging
1518  * invalidations at the command end or at commit time if any invalidations
1519  * are pending.
1520  */
1521 void
1523 {
1524  xl_xact_invals xlrec;
1525  SharedInvalidationMessage *invalMessages;
1526  int nmsgs = 0;
1527 
1528  /* Quick exit if we haven't done anything with invalidation messages. */
1529  if (transInvalInfo == NULL)
1530  return;
1531 
1534 
1536  SharedInvalidMessagesArray == NULL));
1537 
1538  invalMessages = SharedInvalidMessagesArray;
1540  SharedInvalidMessagesArray = NULL;
1542 
1543  if (nmsgs > 0)
1544  {
1545  /* prepare record */
1546  memset(&xlrec, 0, MinSizeOfXactInvals);
1547  xlrec.nmsgs = nmsgs;
1548 
1549  /* perform insertion */
1550  XLogBeginInsert();
1551  XLogRegisterData((char *) (&xlrec), MinSizeOfXactInvals);
1552  XLogRegisterData((char *) invalMessages,
1553  nmsgs * sizeof(SharedInvalidationMessage));
1554  XLogInsert(RM_XACT_ID, XLOG_XACT_INVALIDATIONS);
1555 
1556  pfree(invalMessages);
1557  }
1558 }
signed short int16
Definition: c.h:428
void CacheInvalidateSmgr(RelFileNodeBackend rnode)
Definition: inval.c:1378
#define ProcessMessageListMulti(listHdr, codeFragment)
Definition: inval.c:322
#define FIRSTCHUNKSIZE
static void AppendInvalidationMessages(InvalidationListHeader *dest, InvalidationListHeader *src)
Definition: inval.c:449
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: inval.c:134
static SharedInvalidationMessage * SharedInvalidMessagesArray
Definition: inval.c:180
bool IsToastRelation(Relation relation)
Definition: catalog.c:146
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:104
SharedInvalSnapshotMsg sn
Definition: sinval.h:121
void(* RelcacheCallbackFunction)(Datum arg, Oid relid)
Definition: inval.h:24
static void AddInvalidationMessage(InvalidationChunk **listHdr, SharedInvalidationMessage *msg)
Definition: inval.c:236
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1123
#define GETSTRUCT(TUP)
Definition: htup_details.h:654
void AtEOXact_Inval(bool isCommit)
Definition: inval.c:953
MemoryContext TopTransactionContext
Definition: mcxt.c:53
void AcceptInvalidationMessages(void)
Definition: inval.c:688
InvalidationListHeader PriorCmdInvalidMsgs
Definition: inval.c:172
struct InvalidationChunk InvalidationChunk
SharedInvalRelcacheMsg rc
Definition: sinval.h:118
void CommandEndInvalidationMessages(void)
Definition: inval.c:1089
static struct RELCACHECALLBACK relcache_callback_list[MAX_RELCACHE_CALLBACKS]
#define MAX_RELCACHE_CALLBACKS
Definition: inval.c:198
static int relcache_callback_count
Definition: inval.c:218
#define XLOG_XACT_INVALIDATIONS
Definition: xact.h:154
RelcacheCallbackFunction function
Definition: inval.c:214
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
bool RelationIdIsInInitFile(Oid relationId)
Definition: relcache.c:6400
MemoryContext CurTransactionContext
Definition: mcxt.c:54
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:350
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:2009
struct TransInvalidationInfo * parent
Definition: inval.c:163
static int recursion_depth
Definition: elog.c:149
void CacheInvalidateRelmap(Oid databaseId)
Definition: inval.c:1408
static TransInvalidationInfo * transInvalInfo
Definition: inval.c:178
Form_pg_class rd_rel
Definition: rel.h:109
unsigned int Oid
Definition: postgres_ext.h:31
void RelationMapInvalidate(bool shared)
Definition: relmapper.c:403
#define DEBUG4
Definition: elog.h:22
int debug_discard_caches
Definition: inval.c:185
#define OidIsValid(objectId)
Definition: c.h:710
void InvalidateSystemCaches(void)
Definition: inval.c:650
int trace_recovery(int trace_level)
Definition: elog.c:3609
#define ProcessMessageList(listHdr, codeFragment)
Definition: inval.c:302
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1477
int16 link
Definition: inval.c:203
InvalidationListHeader CurrentCmdInvalidMsgs
Definition: inval.c:169
void pfree(void *pointer)
Definition: mcxt.c:1169
#define SysCacheSize
Definition: syscache.h:114
#define ObjectIdGetDatum(X)
Definition: postgres.h:551
#define ERROR
Definition: elog.h:46
struct InvalidationChunk * next
Definition: inval.c:131
#define XLogLogicalInfoActive()
Definition: xlog.h:183
#define FATAL
Definition: elog.h:49
static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
Definition: inval.c:783
void ReceiveSharedInvalidMessages(void(*invalFunction)(SharedInvalidationMessage *msg), void(*resetFunction)(void))
Definition: sinval.c:71
#define SHAREDINVALRELCACHE_ID
Definition: sinval.h:76
struct InvalidationListHeader InvalidationListHeader
SharedInvalRelmapMsg rm
Definition: sinval.h:120
#define SHAREDINVALRELMAP_ID
Definition: sinval.h:96
void(* SyscacheCallbackFunction)(Datum arg, int cacheid, uint32 hashvalue)
Definition: inval.h:23
#define MAX_SYSCACHE_CALLBACKS
Definition: inval.c:197
struct TransInvalidationInfo TransInvalidationInfo
void CacheInvalidateRelcacheByRelid(Oid relid)
Definition: inval.c:1338
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:146
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:207
void PostPrepare_Inval(void)
Definition: inval.c:774
unsigned int uint32
Definition: c.h:441
InvalidationChunk * cclist
Definition: inval.c:139
static int numSharedInvalidMessagesArray
Definition: inval.c:181
SharedInvalCatcacheMsg cc
Definition: sinval.h:116
#define MinSizeOfXactInvals
Definition: xact.h:257
void smgrclosenode(RelFileNodeBackend rnode)
Definition: smgr.c:310
SharedInvalCatalogMsg cat
Definition: sinval.h:117
void InvalidateCatalogSnapshot(void)
Definition: snapmgr.c:456
char * GetDatabasePath(Oid dbNode, Oid spcNode)
Definition: relpath.c:110
static void ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, void(*func)(const SharedInvalidationMessage *msgs, int n))
Definition: inval.c:475
static void PrepareInvalidationState(void)
Definition: inval.c:737
static struct SYSCACHECALLBACK syscache_callback_list[MAX_SYSCACHE_CALLBACKS]
void LogLogicalInvalidations()
Definition: inval.c:1522
signed char int8
Definition: c.h:427
FormData_pg_index * Form_pg_index
Definition: pg_index.h:69
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:1127
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
bool RelcacheInitFileInval
Definition: inval.c:175
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:432
static void ProcessInvalidationMessages(InvalidationListHeader *hdr, void(*func)(SharedInvalidationMessage *msg))
Definition: inval.c:463
void CacheInvalidateRelcacheAll(void)
Definition: inval.c:1303
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1435
uintptr_t Datum
Definition: postgres.h:411
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1175
static void AddSnapshotInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:422
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1496
Oid MyDatabaseId
Definition: globals.c:88
static void RegisterCatalogInvalidation(Oid dbId, Oid catId)
Definition: inval.c:507
void RelationCacheInitFilePostInvalidate(void)
Definition: relcache.c:6465
bool IsSharedRelation(Oid relationId)
Definition: catalog.c:244
static void AddCatalogInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid catId)
Definition: inval.c:374
static int syscache_callback_count
Definition: inval.c:210
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:906
#define InvalidOid
Definition: postgres_ext.h:36
static void RegisterRelcacheInvalidation(Oid dbId, Oid relId)
Definition: inval.c:519
#define SHAREDINVALSNAPSHOT_ID
Definition: sinval.h:104
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:888
RelFileNode node
Definition: relfilenode.h:74
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:857
char * DatabasePath
Definition: globals.c:96
FormData_pg_constraint * Form_pg_constraint
void CacheInvalidateCatalog(Oid catalogId)
Definition: inval.c:1255
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void RelationCacheInitFilePreInvalidate(void)
Definition: relcache.c:6440
static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId)
Definition: inval.c:493
#define Assert(condition)
Definition: c.h:804
bool RelationInvalidatesSnapshotsOnly(Oid relid)
Definition: syscache.c:1484
SharedInvalSmgrMsg sm
Definition: sinval.h:119
void SysCacheInvalidate(int cacheId, uint32 hashValue)
Definition: syscache.c:1460
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:719
BackendId backend
Definition: relfilenode.h:75
static void AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:392
void RelationCacheInvalidateEntry(Oid relationId)
Definition: relcache.c:2797
void SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
Definition: sinval.c:49
void ResetCatalogCaches(void)
Definition: catcache.c:689
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1182
static int16 syscache_callback_links[SysCacheSize]
Definition: inval.c:208
uint16 backend_lo
Definition: sinval.h:92
void CacheInvalidateRelcache(Relation relation)
Definition: inval.c:1279
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:406
FormData_pg_class * Form_pg_class
Definition: pg_class.h:153
void LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
Definition: inval.c:563
#define SHAREDINVALCATALOG_ID
Definition: sinval.h:67
static int maxSharedInvalidMessagesArray
Definition: inval.c:182
void * palloc(Size size)
Definition: mcxt.c:1062
void AtEOSubXact_Inval(bool isCommit)
Definition: inval.c:1012
int xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition: inval.c:832
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
#define elog(elevel,...)
Definition: elog.h:232
void RelationCacheInvalidate(void)
Definition: relcache.c:2840
int i
RelFileNode rnode
Definition: sinval.h:93
void * arg
#define SHAREDINVALSMGR_ID
Definition: sinval.h:85
void CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Definition: inval.c:1315
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:761
InvalidationChunk * rclist
Definition: inval.c:140
void XLogBeginInsert(void)
Definition: xloginsert.c:135
#define RelationGetRelid(relation)
Definition: rel.h:477
int nmsgs
Definition: xact.h:254
#define offsetof(type, field)
Definition: c.h:727
static void AddCatcacheInvalidationMessage(InvalidationListHeader *hdr, int id, uint32 hashValue, Oid dbId)
Definition: inval.c:346
static void RegisterSnapshotInvalidation(Oid dbId, Oid relId)
Definition: inval.c:549
SyscacheCallbackFunction function
Definition: inval.c:204
static void AppendInvalidationMessageList(InvalidationChunk **destHdr, InvalidationChunk **srcHdr)
Definition: inval.c:278