PostgreSQL Source Code  git master
inval.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * inval.c
4  * POSTGRES cache invalidation dispatcher code.
5  *
6  * This is subtle stuff, so pay attention:
7  *
8  * When a tuple is updated or deleted, our standard time qualification rules
9  * consider that it is *still valid* so long as we are in the same command,
10  * ie, until the next CommandCounterIncrement() or transaction commit.
11  * (See utils/time/tqual.c, and note that system catalogs are generally
12  * scanned under the most current snapshot available, rather than the
13  * transaction snapshot.) At the command boundary, the old tuple stops
14  * being valid and the new version, if any, becomes valid. Therefore,
15  * we cannot simply flush a tuple from the system caches during heap_update()
16  * or heap_delete(). The tuple is still good at that point; what's more,
17  * even if we did flush it, it might be reloaded into the caches by a later
18  * request in the same command. So the correct behavior is to keep a list
19  * of outdated (updated/deleted) tuples and then do the required cache
20  * flushes at the next command boundary. We must also keep track of
21  * inserted tuples so that we can flush "negative" cache entries that match
22  * the new tuples; again, that mustn't happen until end of command.
23  *
24  * Once we have finished the command, we still need to remember inserted
25  * tuples (including new versions of updated tuples), so that we can flush
26  * them from the caches if we abort the transaction. Similarly, we'd better
27  * be able to flush "negative" cache entries that may have been loaded in
28  * place of deleted tuples, so we still need the deleted ones too.
29  *
30  * If we successfully complete the transaction, we have to broadcast all
31  * these invalidation events to other backends (via the SI message queue)
32  * so that they can flush obsolete entries from their caches. Note we have
33  * to record the transaction commit before sending SI messages, otherwise
34  * the other backends won't see our updated tuples as good.
35  *
36  * When a subtransaction aborts, we can process and discard any events
37  * it has queued. When a subtransaction commits, we just add its events
38  * to the pending lists of the parent transaction.
39  *
40  * In short, we need to remember until xact end every insert or delete
41  * of a tuple that might be in the system caches. Updates are treated as
42  * two events, delete + insert, for simplicity. (If the update doesn't
43  * change the tuple hash value, catcache.c optimizes this into one event.)
44  *
45  * We do not need to register EVERY tuple operation in this way, just those
46  * on tuples in relations that have associated catcaches. We do, however,
47  * have to register every operation on every tuple that *could* be in a
48  * catcache, whether or not it currently is in our cache. Also, if the
49  * tuple is in a relation that has multiple catcaches, we need to register
50  * an invalidation message for each such catcache. catcache.c's
51  * PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52  * catcaches may need invalidation for a given tuple.
53  *
54  * Also, whenever we see an operation on a pg_class, pg_attribute, or
55  * pg_index tuple, we register a relcache flush operation for the relation
56  * described by that tuple (as specified in CacheInvalidateHeapTuple()).
57  *
58  * We keep the relcache flush requests in lists separate from the catcache
59  * tuple flush requests. This allows us to issue all the pending catcache
60  * flushes before we issue relcache flushes, which saves us from loading
61  * a catcache tuple during relcache load only to flush it again right away.
62  * Also, we avoid queuing multiple relcache flush requests for the same
63  * relation, since a relcache flush is relatively expensive to do.
64  * (XXX is it worth testing likewise for duplicate catcache flush entries?
65  * Probably not.)
66  *
67  * If a relcache flush is issued for a system relation that we preload
68  * from the relcache init file, we must also delete the init file so that
69  * it will be rebuilt during the next backend restart. The actual work of
70  * manipulating the init file is in relcache.c, but we keep track of the
71  * need for it here.
72  *
73  * The request lists proper are kept in CurTransactionContext of their
74  * creating (sub)transaction, since they can be forgotten on abort of that
75  * transaction but must be kept till top-level commit otherwise. For
76  * simplicity we keep the controlling list-of-lists in TopTransactionContext.
77  *
78  * Currently, inval messages are sent without regard for the possibility
79  * that the object described by the catalog tuple might be a session-local
80  * object such as a temporary table. This is because (1) this code has
81  * no practical way to tell the difference, and (2) it is not certain that
82  * other backends don't have catalog cache or even relcache entries for
83  * such tables, anyway; there is nothing that prevents that. It might be
84  * worth trying to avoid sending such inval traffic in the future, if those
85  * problems can be overcome cheaply.
86  *
87  *
88  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
89  * Portions Copyright (c) 1994, Regents of the University of California
90  *
91  * IDENTIFICATION
92  * src/backend/utils/cache/inval.c
93  *
94  *-------------------------------------------------------------------------
95  */
96 #include "postgres.h"
97 
98 #include <limits.h>
99 
100 #include "access/htup_details.h"
101 #include "access/xact.h"
102 #include "catalog/catalog.h"
103 #include "miscadmin.h"
104 #include "storage/sinval.h"
105 #include "storage/smgr.h"
106 #include "utils/catcache.h"
107 #include "utils/inval.h"
108 #include "utils/memdebug.h"
109 #include "utils/memutils.h"
110 #include "utils/rel.h"
111 #include "utils/relmapper.h"
112 #include "utils/snapmgr.h"
113 #include "utils/syscache.h"
114 
115 
116 /*
117  * To minimize palloc traffic, we keep pending requests in successively-
118  * larger chunks (a slightly more sophisticated version of an expansible
119  * array). All request types can be stored as SharedInvalidationMessage
120  * records. The ordering of requests within a list is never significant.
121  */
122 typedef struct InvalidationChunk
123 {
124  struct InvalidationChunk *next; /* list link */
125  int nitems; /* # items currently stored in chunk */
126  int maxitems; /* size of allocated array in this chunk */
127  SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER];
129 
131 {
132  InvalidationChunk *cclist; /* list of chunks holding catcache msgs */
133  InvalidationChunk *rclist; /* list of chunks holding relcache msgs */
135 
136 /*----------------
137  * Invalidation info is divided into two lists:
138  * 1) events so far in current command, not yet reflected to caches.
139  * 2) events in previous commands of current transaction; these have
140  * been reflected to local caches, and must be either broadcast to
141  * other backends or rolled back from local cache when we commit
142  * or abort the transaction.
143  * Actually, we need two such lists for each level of nested transaction,
144  * so that we can discard events from an aborted subtransaction. When
145  * a subtransaction commits, we append its lists to the parent's lists.
146  *
147  * The relcache-file-invalidated flag can just be a simple boolean,
148  * since we only act on it at transaction commit; we don't care which
149  * command of the transaction set it.
150  *----------------
151  */
152 
153 typedef struct TransInvalidationInfo
154 {
155  /* Back link to parent transaction's info */
157 
158  /* Subtransaction nesting depth */
159  int my_level;
160 
161  /* head of current-command event list */
163 
164  /* head of previous-commands event list */
166 
167  /* init file must be invalidated? */
170 
172 
176 
177 
178 /*
179  * Dynamically-registered callback functions. Current implementation
180  * assumes there won't be enough of these to justify a dynamically resizable
181  * array; it'd be easy to improve that if needed.
182  *
183  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
184  * syscache are linked into a list pointed to by syscache_callback_links[id].
185  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
186  */
187 
188 #define MAX_SYSCACHE_CALLBACKS 64
189 #define MAX_RELCACHE_CALLBACKS 10
190 
191 static struct SYSCACHECALLBACK
192 {
193  int16 id; /* cache number */
194  int16 link; /* next callback index+1 for same cache */
198 
200 
201 static int syscache_callback_count = 0;
202 
203 static struct RELCACHECALLBACK
204 {
208 
209 static int relcache_callback_count = 0;
210 
211 /* ----------------------------------------------------------------
212  * Invalidation list support functions
213  *
214  * These three routines encapsulate processing of the "chunked"
215  * representation of what is logically just a list of messages.
216  * ----------------------------------------------------------------
217  */
218 
219 /*
220  * AddInvalidationMessage
221  * Add an invalidation message to a list (of chunks).
222  *
223  * Note that we do not pay any great attention to maintaining the original
224  * ordering of the messages.
225  */
226 static void
229 {
230  InvalidationChunk *chunk = *listHdr;
231 
232  if (chunk == NULL)
233  {
234  /* First time through; create initial chunk */
235 #define FIRSTCHUNKSIZE 32
236  chunk = (InvalidationChunk *)
240  chunk->nitems = 0;
241  chunk->maxitems = FIRSTCHUNKSIZE;
242  chunk->next = *listHdr;
243  *listHdr = chunk;
244  }
245  else if (chunk->nitems >= chunk->maxitems)
246  {
247  /* Need another chunk; double size of last chunk */
248  int chunksize = 2 * chunk->maxitems;
249 
250  chunk = (InvalidationChunk *)
253  chunksize * sizeof(SharedInvalidationMessage));
254  chunk->nitems = 0;
255  chunk->maxitems = chunksize;
256  chunk->next = *listHdr;
257  *listHdr = chunk;
258  }
259  /* Okay, add message to current chunk */
260  chunk->msgs[chunk->nitems] = *msg;
261  chunk->nitems++;
262 }
263 
264 /*
265  * Append one list of invalidation message chunks to another, resetting
266  * the source chunk-list pointer to NULL.
267  */
268 static void
270  InvalidationChunk **srcHdr)
271 {
272  InvalidationChunk *chunk = *srcHdr;
273 
274  if (chunk == NULL)
275  return; /* nothing to do */
276 
277  while (chunk->next != NULL)
278  chunk = chunk->next;
279 
280  chunk->next = *destHdr;
281 
282  *destHdr = *srcHdr;
283 
284  *srcHdr = NULL;
285 }
286 
287 /*
288  * Process a list of invalidation messages.
289  *
290  * This is a macro that executes the given code fragment for each message in
291  * a message chunk list. The fragment should refer to the message as *msg.
292  */
293 #define ProcessMessageList(listHdr, codeFragment) \
294  do { \
295  InvalidationChunk *_chunk; \
296  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
297  { \
298  int _cindex; \
299  for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
300  { \
301  SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
302  codeFragment; \
303  } \
304  } \
305  } while (0)
306 
307 /*
308  * Process a list of invalidation messages group-wise.
309  *
310  * As above, but the code fragment can handle an array of messages.
311  * The fragment should refer to the messages as msgs[], with n entries.
312  */
313 #define ProcessMessageListMulti(listHdr, codeFragment) \
314  do { \
315  InvalidationChunk *_chunk; \
316  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
317  { \
318  SharedInvalidationMessage *msgs = _chunk->msgs; \
319  int n = _chunk->nitems; \
320  codeFragment; \
321  } \
322  } while (0)
323 
324 
325 /* ----------------------------------------------------------------
326  * Invalidation set support functions
327  *
328  * These routines understand about the division of a logical invalidation
329  * list into separate physical lists for catcache and relcache entries.
330  * ----------------------------------------------------------------
331  */
332 
333 /*
334  * Add a catcache inval entry
335  */
336 static void
338  int id, uint32 hashValue, Oid dbId)
339 {
341 
342  Assert(id < CHAR_MAX);
343  msg.cc.id = (int8) id;
344  msg.cc.dbId = dbId;
345  msg.cc.hashValue = hashValue;
346 
347  /*
348  * Define padding bytes in SharedInvalidationMessage structs to be
349  * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
350  * multiple processes, will cause spurious valgrind warnings about
351  * undefined memory being used. That's because valgrind remembers the
352  * undefined bytes from the last local process's store, not realizing that
353  * another process has written since, filling the previously uninitialized
354  * bytes
355  */
356  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
357 
358  AddInvalidationMessage(&hdr->cclist, &msg);
359 }
360 
361 /*
362  * Add a whole-catalog inval entry
363  */
364 static void
366  Oid dbId, Oid catId)
367 {
369 
371  msg.cat.dbId = dbId;
372  msg.cat.catId = catId;
373  /* check AddCatcacheInvalidationMessage() for an explanation */
374  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
375 
376  AddInvalidationMessage(&hdr->cclist, &msg);
377 }
378 
379 /*
380  * Add a relcache inval entry
381  */
382 static void
384  Oid dbId, Oid relId)
385 {
387 
388  /*
389  * Don't add a duplicate item. We assume dbId need not be checked because
390  * it will never change. InvalidOid for relId means all relations so we
391  * don't need to add individual ones when it is present.
392  */
394  if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
395  (msg->rc.relId == relId ||
396  msg->rc.relId == InvalidOid))
397  return);
398 
399  /* OK, add the item */
401  msg.rc.dbId = dbId;
402  msg.rc.relId = relId;
403  /* check AddCatcacheInvalidationMessage() for an explanation */
404  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
405 
406  AddInvalidationMessage(&hdr->rclist, &msg);
407 }
408 
409 /*
410  * Add a snapshot inval entry
411  */
412 static void
414  Oid dbId, Oid relId)
415 {
417 
418  /* Don't add a duplicate item */
419  /* We assume dbId need not be checked because it will never change */
421  if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
422  msg->sn.relId == relId)
423  return);
424 
425  /* OK, add the item */
427  msg.sn.dbId = dbId;
428  msg.sn.relId = relId;
429  /* check AddCatcacheInvalidationMessage() for an explanation */
430  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
431 
432  AddInvalidationMessage(&hdr->rclist, &msg);
433 }
434 
435 /*
436  * Append one list of invalidation messages to another, resetting
437  * the source list to empty.
438  */
439 static void
442 {
445 }
446 
447 /*
448  * Execute the given function for all the messages in an invalidation list.
449  * The list is not altered.
450  *
451  * catcache entries are processed first, for reasons mentioned above.
452  */
453 static void
455  void (*func) (SharedInvalidationMessage *msg))
456 {
457  ProcessMessageList(hdr->cclist, func(msg));
458  ProcessMessageList(hdr->rclist, func(msg));
459 }
460 
461 /*
462  * As above, but the function is able to process an array of messages
463  * rather than just one at a time.
464  */
465 static void
467  void (*func) (const SharedInvalidationMessage *msgs, int n))
468 {
469  ProcessMessageListMulti(hdr->cclist, func(msgs, n));
470  ProcessMessageListMulti(hdr->rclist, func(msgs, n));
471 }
472 
473 /* ----------------------------------------------------------------
474  * private support functions
475  * ----------------------------------------------------------------
476  */
477 
478 /*
479  * RegisterCatcacheInvalidation
480  *
481  * Register an invalidation event for a catcache tuple entry.
482  */
483 static void
485  uint32 hashValue,
486  Oid dbId)
487 {
489  cacheId, hashValue, dbId);
490 }
491 
492 /*
493  * RegisterCatalogInvalidation
494  *
495  * Register an invalidation event for all catcache entries from a catalog.
496  */
497 static void
499 {
501  dbId, catId);
502 }
503 
504 /*
505  * RegisterRelcacheInvalidation
506  *
507  * As above, but register a relcache invalidation event.
508  */
509 static void
511 {
513  dbId, relId);
514 
515  /*
516  * Most of the time, relcache invalidation is associated with system
517  * catalog updates, but there are a few cases where it isn't. Quick hack
518  * to ensure that the next CommandCounterIncrement() will think that we
519  * need to do CommandEndInvalidationMessages().
520  */
521  (void) GetCurrentCommandId(true);
522 
523  /*
524  * If the relation being invalidated is one of those cached in the local
525  * relcache init file, mark that we need to zap that file at commit. Same
526  * is true when we are invalidating whole relcache.
527  */
528  if (OidIsValid(dbId) &&
529  (RelationIdIsInInitFile(relId) || relId == InvalidOid))
530  transInvalInfo->RelcacheInitFileInval = true;
531 }
532 
533 /*
534  * RegisterSnapshotInvalidation
535  *
536  * Register an invalidation event for MVCC scans against a given catalog.
537  * Only needed for catalogs that don't have catcaches.
538  */
539 static void
541 {
543  dbId, relId);
544 }
545 
546 /*
547  * LocalExecuteInvalidationMessage
548  *
549  * Process a single invalidation message (which could be of any type).
550  * Only the local caches are flushed; this does not transmit the message
551  * to other backends.
552  */
553 void
555 {
556  if (msg->id >= 0)
557  {
558  if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
559  {
561 
562  SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
563 
564  CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
565  }
566  }
567  else if (msg->id == SHAREDINVALCATALOG_ID)
568  {
569  if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
570  {
572 
574 
575  /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
576  }
577  }
578  else if (msg->id == SHAREDINVALRELCACHE_ID)
579  {
580  if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
581  {
582  int i;
583 
584  if (msg->rc.relId == InvalidOid)
586  else
588 
589  for (i = 0; i < relcache_callback_count; i++)
590  {
591  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
592 
593  ccitem->function(ccitem->arg, msg->rc.relId);
594  }
595  }
596  }
597  else if (msg->id == SHAREDINVALSMGR_ID)
598  {
599  /*
600  * We could have smgr entries for relations of other databases, so no
601  * short-circuit test is possible here.
602  */
603  RelFileNodeBackend rnode;
604 
605  rnode.node = msg->sm.rnode;
606  rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
607  smgrclosenode(rnode);
608  }
609  else if (msg->id == SHAREDINVALRELMAP_ID)
610  {
611  /* We only care about our own database and shared catalogs */
612  if (msg->rm.dbId == InvalidOid)
613  RelationMapInvalidate(true);
614  else if (msg->rm.dbId == MyDatabaseId)
615  RelationMapInvalidate(false);
616  }
617  else if (msg->id == SHAREDINVALSNAPSHOT_ID)
618  {
619  /* We only care about our own database and shared catalogs */
620  if (msg->rm.dbId == InvalidOid)
622  else if (msg->rm.dbId == MyDatabaseId)
624  }
625  else
626  elog(FATAL, "unrecognized SI message ID: %d", msg->id);
627 }
628 
629 /*
630  * InvalidateSystemCaches
631  *
632  * This blows away all tuples in the system catalog caches and
633  * all the cached relation descriptors and smgr cache entries.
634  * Relation descriptors that have positive refcounts are then rebuilt.
635  *
636  * We call this when we see a shared-inval-queue overflow signal,
637  * since that tells us we've lost some shared-inval messages and hence
638  * don't know what needs to be invalidated.
639  */
640 void
642 {
643  int i;
644 
647  RelationCacheInvalidate(); /* gets smgr and relmap too */
648 
649  for (i = 0; i < syscache_callback_count; i++)
650  {
651  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
652 
653  ccitem->function(ccitem->arg, ccitem->id, 0);
654  }
655 
656  for (i = 0; i < relcache_callback_count; i++)
657  {
658  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
659 
660  ccitem->function(ccitem->arg, InvalidOid);
661  }
662 }
663 
664 
665 /* ----------------------------------------------------------------
666  * public functions
667  * ----------------------------------------------------------------
668  */
669 
670 /*
671  * AcceptInvalidationMessages
672  * Read and process invalidation messages from the shared invalidation
673  * message queue.
674  *
675  * Note:
676  * This should be called as the first step in processing a transaction.
677  */
678 void
680 {
683 
684  /*
685  * Test code to force cache flushes anytime a flush could happen.
686  *
687  * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a
688  * fairly thorough test that the system contains no cache-flush hazards.
689  * However, it also makes the system unbelievably slow --- the regression
690  * tests take about 100 times longer than normal.
691  *
692  * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
693  * slows things by at least a factor of 10000, so I wouldn't suggest
694  * trying to run the entire regression tests that way. It's useful to try
695  * a few simple tests, to make sure that cache reload isn't subject to
696  * internal cache-flush hazards, but after you've done a few thousand
697  * recursive reloads it's unlikely you'll learn more.
698  */
699 #if defined(CLOBBER_CACHE_ALWAYS)
700  {
701  static bool in_recursion = false;
702 
703  if (!in_recursion)
704  {
705  in_recursion = true;
707  in_recursion = false;
708  }
709  }
710 #elif defined(CLOBBER_CACHE_RECURSIVELY)
712 #endif
713 }
714 
715 /*
716  * PrepareInvalidationState
717  * Initialize inval lists for the current (sub)transaction.
718  */
719 static void
721 {
722  TransInvalidationInfo *myInfo;
723 
724  if (transInvalInfo != NULL &&
725  transInvalInfo->my_level == GetCurrentTransactionNestLevel())
726  return;
727 
728  myInfo = (TransInvalidationInfo *)
730  sizeof(TransInvalidationInfo));
731  myInfo->parent = transInvalInfo;
733 
734  /*
735  * If there's any previous entry, this one should be for a deeper nesting
736  * level.
737  */
738  Assert(transInvalInfo == NULL ||
739  myInfo->my_level > transInvalInfo->my_level);
740 
741  transInvalInfo = myInfo;
742 }
743 
744 /*
745  * PostPrepare_Inval
746  * Clean up after successful PREPARE.
747  *
748  * Here, we want to act as though the transaction aborted, so that we will
749  * undo any syscache changes it made, thereby bringing us into sync with the
750  * outside world, which doesn't believe the transaction committed yet.
751  *
752  * If the prepared transaction is later aborted, there is nothing more to
753  * do; if it commits, we will receive the consequent inval messages just
754  * like everyone else.
755  */
756 void
758 {
759  AtEOXact_Inval(false);
760 }
761 
762 /*
763  * Collect invalidation messages into SharedInvalidMessagesArray array.
764  */
765 static void
767 {
768  /*
769  * Initialise array first time through in each commit
770  */
771  if (SharedInvalidMessagesArray == NULL)
772  {
775 
776  /*
777  * Although this is being palloc'd we don't actually free it directly.
778  * We're so close to EOXact that we now we're going to lose it anyhow.
779  */
780  SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
781  * sizeof(SharedInvalidationMessage));
782  }
783 
785  {
788 
789  SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
791  * sizeof(SharedInvalidationMessage));
792  }
793 
794  /*
795  * Append the next chunk onto the array
796  */
797  memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
798  msgs, n * sizeof(SharedInvalidationMessage));
800 }
801 
802 /*
803  * xactGetCommittedInvalidationMessages() is executed by
804  * RecordTransactionCommit() to add invalidation messages onto the
805  * commit record. This applies only to commit message types, never to
806  * abort records. Must always run before AtEOXact_Inval(), since that
807  * removes the data we need to see.
808  *
809  * Remember that this runs before we have officially committed, so we
810  * must not do anything here to change what might occur *if* we should
811  * fail between here and the actual commit.
812  *
813  * see also xact_redo_commit() and xact_desc_commit()
814  */
815 int
817  bool *RelcacheInitFileInval)
818 {
819  MemoryContext oldcontext;
820 
821  /* Quick exit if we haven't done anything with invalidation messages. */
822  if (transInvalInfo == NULL)
823  {
824  *RelcacheInitFileInval = false;
825  *msgs = NULL;
826  return 0;
827  }
828 
829  /* Must be at top of stack */
830  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
831 
832  /*
833  * Relcache init file invalidation requires processing both before and
834  * after we send the SI messages. However, we need not do anything unless
835  * we committed.
836  */
837  *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
838 
839  /*
840  * Walk through TransInvalidationInfo to collect all the messages into a
841  * single contiguous array of invalidation messages. It must be contiguous
842  * so we can copy directly into WAL message. Maintain the order that they
843  * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
844  * in redo is as similar as possible to original. We want the same bugs,
845  * if any, not new ones.
846  */
848 
853  MemoryContextSwitchTo(oldcontext);
854 
856  SharedInvalidMessagesArray == NULL));
857 
859 
861 }
862 
863 /*
864  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
865  * standby_redo() to process invalidation messages. Currently that happens
866  * only at end-of-xact.
867  *
868  * Relcache init file invalidation requires processing both
869  * before and after we send the SI messages. See AtEOXact_Inval()
870  */
871 void
873  int nmsgs, bool RelcacheInitFileInval,
874  Oid dbid, Oid tsid)
875 {
876  if (nmsgs <= 0)
877  return;
878 
879  elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
880  (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
881 
882  if (RelcacheInitFileInval)
883  {
884  /*
885  * RelationCacheInitFilePreInvalidate requires DatabasePath to be set,
886  * but we should not use SetDatabasePath during recovery, since it is
887  * intended to be used only once by normal backends. Hence, a quick
888  * hack: set DatabasePath directly then unset after use.
889  */
890  DatabasePath = GetDatabasePath(dbid, tsid);
891  elog(trace_recovery(DEBUG4), "removing relcache init file in \"%s\"",
892  DatabasePath);
895  DatabasePath = NULL;
896  }
897 
898  SendSharedInvalidMessages(msgs, nmsgs);
899 
900  if (RelcacheInitFileInval)
902 }
903 
904 /*
905  * AtEOXact_Inval
906  * Process queued-up invalidation messages at end of main transaction.
907  *
908  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
909  * to the shared invalidation message queue. Note that these will be read
910  * not only by other backends, but also by our own backend at the next
911  * transaction start (via AcceptInvalidationMessages). This means that
912  * we can skip immediate local processing of anything that's still in
913  * CurrentCmdInvalidMsgs, and just send that list out too.
914  *
915  * If not isCommit, we are aborting, and must locally process the messages
916  * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
917  * since they'll not have seen our changed tuples anyway. We can forget
918  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
919  * the caches yet.
920  *
921  * In any case, reset the various lists to empty. We need not physically
922  * free memory here, since TopTransactionContext is about to be emptied
923  * anyway.
924  *
925  * Note:
926  * This should be called as the last step in processing a transaction.
927  */
928 void
929 AtEOXact_Inval(bool isCommit)
930 {
931  /* Quick exit if no messages */
932  if (transInvalInfo == NULL)
933  return;
934 
935  /* Must be at top of stack */
936  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
937 
938  if (isCommit)
939  {
940  /*
941  * Relcache init file invalidation requires processing both before and
942  * after we send the SI messages. However, we need not do anything
943  * unless we committed.
944  */
945  if (transInvalInfo->RelcacheInitFileInval)
947 
949  &transInvalInfo->CurrentCmdInvalidMsgs);
950 
953 
954  if (transInvalInfo->RelcacheInitFileInval)
956  }
957  else
958  {
961  }
962 
963  /* Need not free anything explicitly */
964  transInvalInfo = NULL;
965  SharedInvalidMessagesArray = NULL;
967 }
968 
969 /*
970  * AtEOSubXact_Inval
971  * Process queued-up invalidation messages at end of subtransaction.
972  *
973  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
974  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
975  * parent's PriorCmdInvalidMsgs list.
976  *
977  * If not isCommit, we are aborting, and must locally process the messages
978  * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
979  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
980  * touched the caches yet.
981  *
982  * In any case, pop the transaction stack. We need not physically free memory
983  * here, since CurTransactionContext is about to be emptied anyway
984  * (if aborting). Beware of the possibility of aborting the same nesting
985  * level twice, though.
986  */
987 void
988 AtEOSubXact_Inval(bool isCommit)
989 {
990  int my_level;
992 
993  /* Quick exit if no messages. */
994  if (myInfo == NULL)
995  return;
996 
997  /* Also bail out quickly if messages are not for this level. */
998  my_level = GetCurrentTransactionNestLevel();
999  if (myInfo->my_level != my_level)
1000  {
1001  Assert(myInfo->my_level < my_level);
1002  return;
1003  }
1004 
1005  if (isCommit)
1006  {
1007  /* If CurrentCmdInvalidMsgs still has anything, fix it */
1009 
1010  /*
1011  * We create invalidation stack entries lazily, so the parent might
1012  * not have one. Instead of creating one, moving all the data over,
1013  * and then freeing our own, we can just adjust the level of our own
1014  * entry.
1015  */
1016  if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1017  {
1018  myInfo->my_level--;
1019  return;
1020  }
1021 
1022  /* Pass up my inval messages to parent */
1024  &myInfo->PriorCmdInvalidMsgs);
1025 
1026  /* Pending relcache inval becomes parent's problem too */
1027  if (myInfo->RelcacheInitFileInval)
1028  myInfo->parent->RelcacheInitFileInval = true;
1029 
1030  /* Pop the transaction state stack */
1031  transInvalInfo = myInfo->parent;
1032 
1033  /* Need not free anything else explicitly */
1034  pfree(myInfo);
1035  }
1036  else
1037  {
1040 
1041  /* Pop the transaction state stack */
1042  transInvalInfo = myInfo->parent;
1043 
1044  /* Need not free anything else explicitly */
1045  pfree(myInfo);
1046  }
1047 }
1048 
1049 /*
1050  * CommandEndInvalidationMessages
1051  * Process queued-up invalidation messages at end of one command
1052  * in a transaction.
1053  *
1054  * Here, we send no messages to the shared queue, since we don't know yet if
1055  * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
1056  * list, so as to flush our caches of any entries we have outdated in the
1057  * current command. We then move the current-cmd list over to become part
1058  * of the prior-cmds list.
1059  *
1060  * Note:
1061  * This should be called during CommandCounterIncrement(),
1062  * after we have advanced the command ID.
1063  */
1064 void
1066 {
1067  /*
1068  * You might think this shouldn't be called outside any transaction, but
1069  * bootstrap does it, and also ABORT issued when not in a transaction. So
1070  * just quietly return if no state to work on.
1071  */
1072  if (transInvalInfo == NULL)
1073  return;
1074 
1078  &transInvalInfo->CurrentCmdInvalidMsgs);
1079 }
1080 
1081 
1082 /*
1083  * CacheInvalidateHeapTuple
1084  * Register the given tuple for invalidation at end of command
1085  * (ie, current command is creating or outdating this tuple).
1086  * Also, detect whether a relcache invalidation is implied.
1087  *
1088  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1089  * For an update, we are called just once, with tuple being the old tuple
1090  * version and newtuple the new version. This allows avoidance of duplicate
1091  * effort during an update.
1092  */
1093 void
1095  HeapTuple tuple,
1096  HeapTuple newtuple)
1097 {
1098  Oid tupleRelId;
1099  Oid databaseId;
1100  Oid relationId;
1101 
1102  /* Do nothing during bootstrap */
1104  return;
1105 
1106  /*
1107  * We only need to worry about invalidation for tuples that are in system
1108  * catalogs; user-relation tuples are never in catcaches and can't affect
1109  * the relcache either.
1110  */
1111  if (!IsCatalogRelation(relation))
1112  return;
1113 
1114  /*
1115  * IsCatalogRelation() will return true for TOAST tables of system
1116  * catalogs, but we don't care about those, either.
1117  */
1118  if (IsToastRelation(relation))
1119  return;
1120 
1121  /*
1122  * If we're not prepared to queue invalidation messages for this
1123  * subtransaction level, get ready now.
1124  */
1126 
1127  /*
1128  * First let the catcache do its thing
1129  */
1130  tupleRelId = RelationGetRelid(relation);
1131  if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1132  {
1133  databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1134  RegisterSnapshotInvalidation(databaseId, tupleRelId);
1135  }
1136  else
1137  PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1139 
1140  /*
1141  * Now, is this tuple one of the primary definers of a relcache entry? See
1142  * comments in file header for deeper explanation.
1143  *
1144  * Note we ignore newtuple here; we assume an update cannot move a tuple
1145  * from being part of one relcache entry to being part of another.
1146  */
1147  if (tupleRelId == RelationRelationId)
1148  {
1149  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1150 
1151  relationId = HeapTupleGetOid(tuple);
1152  if (classtup->relisshared)
1153  databaseId = InvalidOid;
1154  else
1155  databaseId = MyDatabaseId;
1156  }
1157  else if (tupleRelId == AttributeRelationId)
1158  {
1159  Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1160 
1161  relationId = atttup->attrelid;
1162 
1163  /*
1164  * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1165  * even if the rel in question is shared (which we can't easily tell).
1166  * This essentially means that only backends in this same database
1167  * will react to the relcache flush request. This is in fact
1168  * appropriate, since only those backends could see our pg_attribute
1169  * change anyway. It looks a bit ugly though. (In practice, shared
1170  * relations can't have schema changes after bootstrap, so we should
1171  * never come here for a shared rel anyway.)
1172  */
1173  databaseId = MyDatabaseId;
1174  }
1175  else if (tupleRelId == IndexRelationId)
1176  {
1177  Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1178 
1179  /*
1180  * When a pg_index row is updated, we should send out a relcache inval
1181  * for the index relation. As above, we don't know the shared status
1182  * of the index, but in practice it doesn't matter since indexes of
1183  * shared catalogs can't have such updates.
1184  */
1185  relationId = indextup->indexrelid;
1186  databaseId = MyDatabaseId;
1187  }
1188  else
1189  return;
1190 
1191  /*
1192  * Yes. We need to register a relcache invalidation event.
1193  */
1194  RegisterRelcacheInvalidation(databaseId, relationId);
1195 }
1196 
1197 /*
1198  * CacheInvalidateCatalog
1199  * Register invalidation of the whole content of a system catalog.
1200  *
1201  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1202  * changed any tuples as moved them around. Some uses of catcache entries
1203  * expect their TIDs to be correct, so we have to blow away the entries.
1204  *
1205  * Note: we expect caller to verify that the rel actually is a system
1206  * catalog. If it isn't, no great harm is done, just a wasted sinval message.
1207  */
1208 void
1210 {
1211  Oid databaseId;
1212 
1214 
1215  if (IsSharedRelation(catalogId))
1216  databaseId = InvalidOid;
1217  else
1218  databaseId = MyDatabaseId;
1219 
1220  RegisterCatalogInvalidation(databaseId, catalogId);
1221 }
1222 
1223 /*
1224  * CacheInvalidateRelcache
1225  * Register invalidation of the specified relation's relcache entry
1226  * at end of command.
1227  *
1228  * This is used in places that need to force relcache rebuild but aren't
1229  * changing any of the tuples recognized as contributors to the relcache
1230  * entry by CacheInvalidateHeapTuple. (An example is dropping an index.)
1231  */
1232 void
1234 {
1235  Oid databaseId;
1236  Oid relationId;
1237 
1239 
1240  relationId = RelationGetRelid(relation);
1241  if (relation->rd_rel->relisshared)
1242  databaseId = InvalidOid;
1243  else
1244  databaseId = MyDatabaseId;
1245 
1246  RegisterRelcacheInvalidation(databaseId, relationId);
1247 }
1248 
1249 /*
1250  * CacheInvalidateRelcacheAll
1251  * Register invalidation of the whole relcache at the end of command.
1252  *
1253  * This is used by alter publication as changes in publications may affect
1254  * large number of tables.
1255  */
1256 void
1258 {
1260 
1262 }
1263 
1264 /*
1265  * CacheInvalidateRelcacheByTuple
1266  * As above, but relation is identified by passing its pg_class tuple.
1267  */
1268 void
1270 {
1271  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1272  Oid databaseId;
1273  Oid relationId;
1274 
1276 
1277  relationId = HeapTupleGetOid(classTuple);
1278  if (classtup->relisshared)
1279  databaseId = InvalidOid;
1280  else
1281  databaseId = MyDatabaseId;
1282  RegisterRelcacheInvalidation(databaseId, relationId);
1283 }
1284 
1285 /*
1286  * CacheInvalidateRelcacheByRelid
1287  * As above, but relation is identified by passing its OID.
1288  * This is the least efficient of the three options; use one of
1289  * the above routines if you have a Relation or pg_class tuple.
1290  */
1291 void
1293 {
1294  HeapTuple tup;
1295 
1297 
1298  tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1299  if (!HeapTupleIsValid(tup))
1300  elog(ERROR, "cache lookup failed for relation %u", relid);
1302  ReleaseSysCache(tup);
1303 }
1304 
1305 
1306 /*
1307  * CacheInvalidateSmgr
1308  * Register invalidation of smgr references to a physical relation.
1309  *
1310  * Sending this type of invalidation msg forces other backends to close open
1311  * smgr entries for the rel. This should be done to flush dangling open-file
1312  * references when the physical rel is being dropped or truncated. Because
1313  * these are nontransactional (i.e., not-rollback-able) operations, we just
1314  * send the inval message immediately without any queuing.
1315  *
1316  * Note: in most cases there will have been a relcache flush issued against
1317  * the rel at the logical level. We need a separate smgr-level flush because
1318  * it is possible for backends to have open smgr entries for rels they don't
1319  * have a relcache entry for, e.g. because the only thing they ever did with
1320  * the rel is write out dirty shared buffers.
1321  *
1322  * Note: because these messages are nontransactional, they won't be captured
1323  * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
1324  * should happen in low-level smgr.c routines, which are executed while
1325  * replaying WAL as well as when creating it.
1326  *
1327  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1328  * three bytes of the backend ID using what would otherwise be padding space.
1329  * Thus, the maximum possible backend ID is 2^23-1.
1330  */
1331 void
1333 {
1335 
1336  msg.sm.id = SHAREDINVALSMGR_ID;
1337  msg.sm.backend_hi = rnode.backend >> 16;
1338  msg.sm.backend_lo = rnode.backend & 0xffff;
1339  msg.sm.rnode = rnode.node;
1340  /* check AddCatcacheInvalidationMessage() for an explanation */
1341  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1342 
1343  SendSharedInvalidMessages(&msg, 1);
1344 }
1345 
1346 /*
1347  * CacheInvalidateRelmap
1348  * Register invalidation of the relation mapping for a database,
1349  * or for the shared catalogs if databaseId is zero.
1350  *
1351  * Sending this type of invalidation msg forces other backends to re-read
1352  * the indicated relation mapping file. It is also necessary to send a
1353  * relcache inval for the specific relations whose mapping has been altered,
1354  * else the relcache won't get updated with the new filenode data.
1355  *
1356  * Note: because these messages are nontransactional, they won't be captured
1357  * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap()
1358  * should happen in low-level relmapper.c routines, which are executed while
1359  * replaying WAL as well as when creating it.
1360  */
1361 void
1363 {
1365 
1366  msg.rm.id = SHAREDINVALRELMAP_ID;
1367  msg.rm.dbId = databaseId;
1368  /* check AddCatcacheInvalidationMessage() for an explanation */
1369  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1370 
1371  SendSharedInvalidMessages(&msg, 1);
1372 }
1373 
1374 
1375 /*
1376  * CacheRegisterSyscacheCallback
1377  * Register the specified function to be called for all future
1378  * invalidation events in the specified cache. The cache ID and the
1379  * hash value of the tuple being invalidated will be passed to the
1380  * function.
1381  *
1382  * NOTE: Hash value zero will be passed if a cache reset request is received.
1383  * In this case the called routines should flush all cached state.
1384  * Yes, there's a possibility of a false match to zero, but it doesn't seem
1385  * worth troubling over, especially since most of the current callees just
1386  * flush all cached state anyway.
1387  */
1388 void
1391  Datum arg)
1392 {
1393  if (cacheid < 0 || cacheid >= SysCacheSize)
1394  elog(FATAL, "invalid cache ID: %d", cacheid);
1396  elog(FATAL, "out of syscache_callback_list slots");
1397 
1398  if (syscache_callback_links[cacheid] == 0)
1399  {
1400  /* first callback for this cache */
1402  }
1403  else
1404  {
1405  /* add to end of chain, so that older callbacks are called first */
1406  int i = syscache_callback_links[cacheid] - 1;
1407 
1408  while (syscache_callback_list[i].link > 0)
1409  i = syscache_callback_list[i].link - 1;
1411  }
1412 
1417 
1419 }
1420 
1421 /*
1422  * CacheRegisterRelcacheCallback
1423  * Register the specified function to be called for all future
1424  * relcache invalidation events. The OID of the relation being
1425  * invalidated will be passed to the function.
1426  *
1427  * NOTE: InvalidOid will be passed if a cache reset request is received.
1428  * In this case the called routines should flush all cached state.
1429  */
1430 void
1432  Datum arg)
1433 {
1435  elog(FATAL, "out of relcache_callback_list slots");
1436 
1439 
1441 }
1442 
1443 /*
1444  * CallSyscacheCallbacks
1445  *
1446  * This is exported so that CatalogCacheFlushCatalog can call it, saving
1447  * this module from knowing which catcache IDs correspond to which catalogs.
1448  */
1449 void
1450 CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1451 {
1452  int i;
1453 
1454  if (cacheid < 0 || cacheid >= SysCacheSize)
1455  elog(ERROR, "invalid cache ID: %d", cacheid);
1456 
1457  i = syscache_callback_links[cacheid] - 1;
1458  while (i >= 0)
1459  {
1460  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1461 
1462  Assert(ccitem->id == cacheid);
1463  ccitem->function(ccitem->arg, cacheid, hashvalue);
1464  i = ccitem->link - 1;
1465  }
1466 }
signed short int16
Definition: c.h:293
void CacheInvalidateSmgr(RelFileNodeBackend rnode)
Definition: inval.c:1332
#define ProcessMessageListMulti(listHdr, codeFragment)
Definition: inval.c:313
#define FIRSTCHUNKSIZE
static void AppendInvalidationMessages(InvalidationListHeader *dest, InvalidationListHeader *src)
Definition: inval.c:440
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: inval.c:127
static SharedInvalidationMessage * SharedInvalidMessagesArray
Definition: inval.c:173
bool IsToastRelation(Relation relation)
Definition: catalog.c:136
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:92
SharedInvalSnapshotMsg sn
Definition: sinval.h:121
void(* RelcacheCallbackFunction)(Datum arg, Oid relid)
Definition: inval.h:23
static void AddInvalidationMessage(InvalidationChunk **listHdr, SharedInvalidationMessage *msg)
Definition: inval.c:227
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
#define GETSTRUCT(TUP)
Definition: htup_details.h:661
void AtEOXact_Inval(bool isCommit)
Definition: inval.c:929
MemoryContext TopTransactionContext
Definition: mcxt.c:48
void AcceptInvalidationMessages(void)
Definition: inval.c:679
InvalidationListHeader PriorCmdInvalidMsgs
Definition: inval.c:165
struct InvalidationChunk InvalidationChunk
SharedInvalRelcacheMsg rc
Definition: sinval.h:118
void CommandEndInvalidationMessages(void)
Definition: inval.c:1065
static struct RELCACHECALLBACK relcache_callback_list[MAX_RELCACHE_CALLBACKS]
#define MAX_RELCACHE_CALLBACKS
Definition: inval.c:189
static int relcache_callback_count
Definition: inval.c:209
RelcacheCallbackFunction function
Definition: inval.c:205
#define IndexRelationId
Definition: pg_index.h:29
#define RelationRelationId
Definition: pg_class.h:29
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
bool RelationIdIsInInitFile(Oid relationId)
Definition: relcache.c:5983
MemoryContext CurTransactionContext
Definition: mcxt.c:49
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:2030
struct TransInvalidationInfo * parent
Definition: inval.c:156
#define AttributeRelationId
Definition: pg_attribute.h:33
void CacheInvalidateRelmap(Oid databaseId)
Definition: inval.c:1362
static TransInvalidationInfo * transInvalInfo
Definition: inval.c:171
Form_pg_class rd_rel
Definition: rel.h:114
unsigned int Oid
Definition: postgres_ext.h:31
void RelationMapInvalidate(bool shared)
Definition: relmapper.c:387
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:586
void InvalidateSystemCaches(void)
Definition: inval.c:641
int trace_recovery(int trace_level)
Definition: elog.c:3758
#define ProcessMessageList(listHdr, codeFragment)
Definition: inval.c:293
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1431
int16 link
Definition: inval.c:194
InvalidationListHeader CurrentCmdInvalidMsgs
Definition: inval.c:162
void pfree(void *pointer)
Definition: mcxt.c:936
#define SysCacheSize
Definition: syscache.h:112
#define ObjectIdGetDatum(X)
Definition: postgres.h:513
#define ERROR
Definition: elog.h:43
struct InvalidationChunk * next
Definition: inval.c:124
#define FATAL
Definition: elog.h:52
static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
Definition: inval.c:766
void ReceiveSharedInvalidMessages(void(*invalFunction)(SharedInvalidationMessage *msg), void(*resetFunction)(void))
Definition: sinval.c:71
#define SHAREDINVALRELCACHE_ID
Definition: sinval.h:76
struct InvalidationListHeader InvalidationListHeader
SharedInvalRelmapMsg rm
Definition: sinval.h:120
#define SHAREDINVALRELMAP_ID
Definition: sinval.h:96
void(* SyscacheCallbackFunction)(Datum arg, int cacheid, uint32 hashvalue)
Definition: inval.h:22
#define MAX_SYSCACHE_CALLBACKS
Definition: inval.c:188
struct TransInvalidationInfo TransInvalidationInfo
void CacheInvalidateRelcacheByRelid(Oid relid)
Definition: inval.c:1292
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:187
void PostPrepare_Inval(void)
Definition: inval.c:757
unsigned int uint32
Definition: c.h:306
InvalidationChunk * cclist
Definition: inval.c:132
static int numSharedInvalidMessagesArray
Definition: inval.c:174
SharedInvalCatcacheMsg cc
Definition: sinval.h:116
void smgrclosenode(RelFileNodeBackend rnode)
Definition: smgr.c:350
SharedInvalCatalogMsg cat
Definition: sinval.h:117
void InvalidateCatalogSnapshot(void)
Definition: snapmgr.c:510
char * GetDatabasePath(Oid dbNode, Oid spcNode)
Definition: relpath.c:108
static void ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, void(*func)(const SharedInvalidationMessage *msgs, int n))
Definition: inval.c:466
static void PrepareInvalidationState(void)
Definition: inval.c:720
static struct SYSCACHECALLBACK syscache_callback_list[MAX_SYSCACHE_CALLBACKS]
signed char int8
Definition: c.h:292
FormData_pg_index * Form_pg_index
Definition: pg_index.h:67
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:1112
bool RelcacheInitFileInval
Definition: inval.c:168
static void ProcessInvalidationMessages(InvalidationListHeader *hdr, void(*func)(SharedInvalidationMessage *msg))
Definition: inval.c:454
void CacheInvalidateRelcacheAll(void)
Definition: inval.c:1257
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1389
uintptr_t Datum
Definition: postgres.h:372
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1160
static void AddSnapshotInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:413
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1450
Oid MyDatabaseId
Definition: globals.c:77
static void RegisterCatalogInvalidation(Oid dbId, Oid catId)
Definition: inval.c:498
void RelationCacheInitFilePostInvalidate(void)
Definition: relcache.c:6092
bool IsSharedRelation(Oid relationId)
Definition: catalog.c:220
static void AddCatalogInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid catId)
Definition: inval.c:365
static int syscache_callback_count
Definition: inval.c:201
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:728
#define InvalidOid
Definition: postgres_ext.h:36
static void RegisterRelcacheInvalidation(Oid dbId, Oid relId)
Definition: inval.c:510
#define SHAREDINVALSNAPSHOT_ID
Definition: sinval.h:104
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:872
RelFileNode node
Definition: relfilenode.h:74
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:754
char * DatabasePath
Definition: globals.c:85
void CacheInvalidateCatalog(Oid catalogId)
Definition: inval.c:1209
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
void RelationCacheInitFilePreInvalidate(void)
Definition: relcache.c:6066
static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId)
Definition: inval.c:484
#define Assert(condition)
Definition: c.h:680
bool RelationInvalidatesSnapshotsOnly(Oid relid)
Definition: syscache.c:1464
SharedInvalSmgrMsg sm
Definition: sinval.h:119
void SysCacheInvalidate(int cacheId, uint32 hashValue)
Definition: syscache.c:1440
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:735
BackendId backend
Definition: relfilenode.h:75
static void AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:383
void RelationCacheInvalidateEntry(Oid relationId)
Definition: relcache.c:2726
void SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
Definition: sinval.c:49
void ResetCatalogCaches(void)
Definition: catcache.c:705
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:949
static int16 syscache_callback_links[SysCacheSize]
Definition: inval.c:199
uint16 backend_lo
Definition: sinval.h:92
void CacheInvalidateRelcache(Relation relation)
Definition: inval.c:1233
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:367
FormData_pg_class * Form_pg_class
Definition: pg_class.h:95
void LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
Definition: inval.c:554
#define SHAREDINVALCATALOG_ID
Definition: sinval.h:67
static int maxSharedInvalidMessagesArray
Definition: inval.c:175
void * palloc(Size size)
Definition: mcxt.c:835
void AtEOSubXact_Inval(bool isCommit)
Definition: inval.c:988
int xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition: inval.c:816
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:693
void RelationCacheInvalidate(void)
Definition: relcache.c:2770
int i
RelFileNode rnode
Definition: sinval.h:93
void * arg
#define SHAREDINVALSMGR_ID
Definition: sinval.h:85
void CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Definition: inval.c:1269
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:680
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:700
InvalidationChunk * rclist
Definition: inval.c:133
#define RelationGetRelid(relation)
Definition: rel.h:425
#define offsetof(type, field)
Definition: c.h:603
static void AddCatcacheInvalidationMessage(InvalidationListHeader *hdr, int id, uint32 hashValue, Oid dbId)
Definition: inval.c:337
static void RegisterSnapshotInvalidation(Oid dbId, Oid relId)
Definition: inval.c:540
SyscacheCallbackFunction function
Definition: inval.c:195
static void AppendInvalidationMessageList(InvalidationChunk **destHdr, InvalidationChunk **srcHdr)
Definition: inval.c:269