PostgreSQL Source Code  git master
inval.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * inval.c
4  * POSTGRES cache invalidation dispatcher code.
5  *
6  * This is subtle stuff, so pay attention:
7  *
8  * When a tuple is updated or deleted, our standard visibility rules
9  * consider that it is *still valid* so long as we are in the same command,
10  * ie, until the next CommandCounterIncrement() or transaction commit.
11  * (See access/heap/heapam_visibility.c, and note that system catalogs are
12  * generally scanned under the most current snapshot available, rather than
13  * the transaction snapshot.) At the command boundary, the old tuple stops
14  * being valid and the new version, if any, becomes valid. Therefore,
15  * we cannot simply flush a tuple from the system caches during heap_update()
16  * or heap_delete(). The tuple is still good at that point; what's more,
17  * even if we did flush it, it might be reloaded into the caches by a later
18  * request in the same command. So the correct behavior is to keep a list
19  * of outdated (updated/deleted) tuples and then do the required cache
20  * flushes at the next command boundary. We must also keep track of
21  * inserted tuples so that we can flush "negative" cache entries that match
22  * the new tuples; again, that mustn't happen until end of command.
23  *
24  * Once we have finished the command, we still need to remember inserted
25  * tuples (including new versions of updated tuples), so that we can flush
26  * them from the caches if we abort the transaction. Similarly, we'd better
27  * be able to flush "negative" cache entries that may have been loaded in
28  * place of deleted tuples, so we still need the deleted ones too.
29  *
30  * If we successfully complete the transaction, we have to broadcast all
31  * these invalidation events to other backends (via the SI message queue)
32  * so that they can flush obsolete entries from their caches. Note we have
33  * to record the transaction commit before sending SI messages, otherwise
34  * the other backends won't see our updated tuples as good.
35  *
36  * When a subtransaction aborts, we can process and discard any events
37  * it has queued. When a subtransaction commits, we just add its events
38  * to the pending lists of the parent transaction.
39  *
40  * In short, we need to remember until xact end every insert or delete
41  * of a tuple that might be in the system caches. Updates are treated as
42  * two events, delete + insert, for simplicity. (If the update doesn't
43  * change the tuple hash value, catcache.c optimizes this into one event.)
44  *
45  * We do not need to register EVERY tuple operation in this way, just those
46  * on tuples in relations that have associated catcaches. We do, however,
47  * have to register every operation on every tuple that *could* be in a
48  * catcache, whether or not it currently is in our cache. Also, if the
49  * tuple is in a relation that has multiple catcaches, we need to register
50  * an invalidation message for each such catcache. catcache.c's
51  * PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52  * catcaches may need invalidation for a given tuple.
53  *
54  * Also, whenever we see an operation on a pg_class, pg_attribute, or
55  * pg_index tuple, we register a relcache flush operation for the relation
56  * described by that tuple (as specified in CacheInvalidateHeapTuple()).
57  * Likewise for pg_constraint tuples for foreign keys on relations.
58  *
59  * We keep the relcache flush requests in lists separate from the catcache
60  * tuple flush requests. This allows us to issue all the pending catcache
61  * flushes before we issue relcache flushes, which saves us from loading
62  * a catcache tuple during relcache load only to flush it again right away.
63  * Also, we avoid queuing multiple relcache flush requests for the same
64  * relation, since a relcache flush is relatively expensive to do.
65  * (XXX is it worth testing likewise for duplicate catcache flush entries?
66  * Probably not.)
67  *
68  * If a relcache flush is issued for a system relation that we preload
69  * from the relcache init file, we must also delete the init file so that
70  * it will be rebuilt during the next backend restart. The actual work of
71  * manipulating the init file is in relcache.c, but we keep track of the
72  * need for it here.
73  *
74  * The request lists proper are kept in CurTransactionContext of their
75  * creating (sub)transaction, since they can be forgotten on abort of that
76  * transaction but must be kept till top-level commit otherwise. For
77  * simplicity we keep the controlling list-of-lists in TopTransactionContext.
78  *
79  * Currently, inval messages are sent without regard for the possibility
80  * that the object described by the catalog tuple might be a session-local
81  * object such as a temporary table. This is because (1) this code has
82  * no practical way to tell the difference, and (2) it is not certain that
83  * other backends don't have catalog cache or even relcache entries for
84  * such tables, anyway; there is nothing that prevents that. It might be
85  * worth trying to avoid sending such inval traffic in the future, if those
86  * problems can be overcome cheaply.
87  *
88  *
89  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
90  * Portions Copyright (c) 1994, Regents of the University of California
91  *
92  * IDENTIFICATION
93  * src/backend/utils/cache/inval.c
94  *
95  *-------------------------------------------------------------------------
96  */
97 #include "postgres.h"
98 
99 #include <limits.h>
100 
101 #include "access/htup_details.h"
102 #include "access/xact.h"
103 #include "catalog/catalog.h"
104 #include "catalog/pg_constraint.h"
105 #include "miscadmin.h"
106 #include "storage/sinval.h"
107 #include "storage/smgr.h"
108 #include "utils/catcache.h"
109 #include "utils/inval.h"
110 #include "utils/memdebug.h"
111 #include "utils/memutils.h"
112 #include "utils/rel.h"
113 #include "utils/relmapper.h"
114 #include "utils/snapmgr.h"
115 #include "utils/syscache.h"
116 
117 
118 /*
119  * To minimize palloc traffic, we keep pending requests in successively-
120  * larger chunks (a slightly more sophisticated version of an expansible
121  * array). All request types can be stored as SharedInvalidationMessage
122  * records. The ordering of requests within a list is never significant.
123  */
124 typedef struct InvalidationChunk
125 {
126  struct InvalidationChunk *next; /* list link */
127  int nitems; /* # items currently stored in chunk */
128  int maxitems; /* size of allocated array in this chunk */
129  SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER];
131 
133 {
134  InvalidationChunk *cclist; /* list of chunks holding catcache msgs */
135  InvalidationChunk *rclist; /* list of chunks holding relcache msgs */
137 
138 /*----------------
139  * Invalidation info is divided into two lists:
140  * 1) events so far in current command, not yet reflected to caches.
141  * 2) events in previous commands of current transaction; these have
142  * been reflected to local caches, and must be either broadcast to
143  * other backends or rolled back from local cache when we commit
144  * or abort the transaction.
145  * Actually, we need two such lists for each level of nested transaction,
146  * so that we can discard events from an aborted subtransaction. When
147  * a subtransaction commits, we append its lists to the parent's lists.
148  *
149  * The relcache-file-invalidated flag can just be a simple boolean,
150  * since we only act on it at transaction commit; we don't care which
151  * command of the transaction set it.
152  *----------------
153  */
154 
155 typedef struct TransInvalidationInfo
156 {
157  /* Back link to parent transaction's info */
159 
160  /* Subtransaction nesting depth */
161  int my_level;
162 
163  /* head of current-command event list */
165 
166  /* head of previous-commands event list */
168 
169  /* init file must be invalidated? */
172 
174 
178 
179 
180 /*
181  * Dynamically-registered callback functions. Current implementation
182  * assumes there won't be enough of these to justify a dynamically resizable
183  * array; it'd be easy to improve that if needed.
184  *
185  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
186  * syscache are linked into a list pointed to by syscache_callback_links[id].
187  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
188  */
189 
190 #define MAX_SYSCACHE_CALLBACKS 64
191 #define MAX_RELCACHE_CALLBACKS 10
192 
193 static struct SYSCACHECALLBACK
194 {
195  int16 id; /* cache number */
196  int16 link; /* next callback index+1 for same cache */
200 
202 
203 static int syscache_callback_count = 0;
204 
205 static struct RELCACHECALLBACK
206 {
210 
211 static int relcache_callback_count = 0;
212 
213 /* ----------------------------------------------------------------
214  * Invalidation list support functions
215  *
216  * These three routines encapsulate processing of the "chunked"
217  * representation of what is logically just a list of messages.
218  * ----------------------------------------------------------------
219  */
220 
221 /*
222  * AddInvalidationMessage
223  * Add an invalidation message to a list (of chunks).
224  *
225  * Note that we do not pay any great attention to maintaining the original
226  * ordering of the messages.
227  */
228 static void
231 {
232  InvalidationChunk *chunk = *listHdr;
233 
234  if (chunk == NULL)
235  {
236  /* First time through; create initial chunk */
237 #define FIRSTCHUNKSIZE 32
238  chunk = (InvalidationChunk *)
242  chunk->nitems = 0;
243  chunk->maxitems = FIRSTCHUNKSIZE;
244  chunk->next = *listHdr;
245  *listHdr = chunk;
246  }
247  else if (chunk->nitems >= chunk->maxitems)
248  {
249  /* Need another chunk; double size of last chunk */
250  int chunksize = 2 * chunk->maxitems;
251 
252  chunk = (InvalidationChunk *)
255  chunksize * sizeof(SharedInvalidationMessage));
256  chunk->nitems = 0;
257  chunk->maxitems = chunksize;
258  chunk->next = *listHdr;
259  *listHdr = chunk;
260  }
261  /* Okay, add message to current chunk */
262  chunk->msgs[chunk->nitems] = *msg;
263  chunk->nitems++;
264 }
265 
266 /*
267  * Append one list of invalidation message chunks to another, resetting
268  * the source chunk-list pointer to NULL.
269  */
270 static void
272  InvalidationChunk **srcHdr)
273 {
274  InvalidationChunk *chunk = *srcHdr;
275 
276  if (chunk == NULL)
277  return; /* nothing to do */
278 
279  while (chunk->next != NULL)
280  chunk = chunk->next;
281 
282  chunk->next = *destHdr;
283 
284  *destHdr = *srcHdr;
285 
286  *srcHdr = NULL;
287 }
288 
289 /*
290  * Process a list of invalidation messages.
291  *
292  * This is a macro that executes the given code fragment for each message in
293  * a message chunk list. The fragment should refer to the message as *msg.
294  */
295 #define ProcessMessageList(listHdr, codeFragment) \
296  do { \
297  InvalidationChunk *_chunk; \
298  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
299  { \
300  int _cindex; \
301  for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
302  { \
303  SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
304  codeFragment; \
305  } \
306  } \
307  } while (0)
308 
309 /*
310  * Process a list of invalidation messages group-wise.
311  *
312  * As above, but the code fragment can handle an array of messages.
313  * The fragment should refer to the messages as msgs[], with n entries.
314  */
315 #define ProcessMessageListMulti(listHdr, codeFragment) \
316  do { \
317  InvalidationChunk *_chunk; \
318  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
319  { \
320  SharedInvalidationMessage *msgs = _chunk->msgs; \
321  int n = _chunk->nitems; \
322  codeFragment; \
323  } \
324  } while (0)
325 
326 
327 /* ----------------------------------------------------------------
328  * Invalidation set support functions
329  *
330  * These routines understand about the division of a logical invalidation
331  * list into separate physical lists for catcache and relcache entries.
332  * ----------------------------------------------------------------
333  */
334 
335 /*
336  * Add a catcache inval entry
337  */
338 static void
340  int id, uint32 hashValue, Oid dbId)
341 {
343 
344  Assert(id < CHAR_MAX);
345  msg.cc.id = (int8) id;
346  msg.cc.dbId = dbId;
347  msg.cc.hashValue = hashValue;
348 
349  /*
350  * Define padding bytes in SharedInvalidationMessage structs to be
351  * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
352  * multiple processes, will cause spurious valgrind warnings about
353  * undefined memory being used. That's because valgrind remembers the
354  * undefined bytes from the last local process's store, not realizing that
355  * another process has written since, filling the previously uninitialized
356  * bytes
357  */
358  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
359 
360  AddInvalidationMessage(&hdr->cclist, &msg);
361 }
362 
363 /*
364  * Add a whole-catalog inval entry
365  */
366 static void
368  Oid dbId, Oid catId)
369 {
371 
373  msg.cat.dbId = dbId;
374  msg.cat.catId = catId;
375  /* check AddCatcacheInvalidationMessage() for an explanation */
376  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
377 
378  AddInvalidationMessage(&hdr->cclist, &msg);
379 }
380 
381 /*
382  * Add a relcache inval entry
383  */
384 static void
386  Oid dbId, Oid relId)
387 {
389 
390  /*
391  * Don't add a duplicate item. We assume dbId need not be checked because
392  * it will never change. InvalidOid for relId means all relations so we
393  * don't need to add individual ones when it is present.
394  */
396  if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
397  (msg->rc.relId == relId ||
398  msg->rc.relId == InvalidOid))
399  return);
400 
401  /* OK, add the item */
403  msg.rc.dbId = dbId;
404  msg.rc.relId = relId;
405  /* check AddCatcacheInvalidationMessage() for an explanation */
406  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
407 
408  AddInvalidationMessage(&hdr->rclist, &msg);
409 }
410 
411 /*
412  * Add a snapshot inval entry
413  */
414 static void
416  Oid dbId, Oid relId)
417 {
419 
420  /* Don't add a duplicate item */
421  /* We assume dbId need not be checked because it will never change */
423  if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
424  msg->sn.relId == relId)
425  return);
426 
427  /* OK, add the item */
429  msg.sn.dbId = dbId;
430  msg.sn.relId = relId;
431  /* check AddCatcacheInvalidationMessage() for an explanation */
432  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
433 
434  AddInvalidationMessage(&hdr->rclist, &msg);
435 }
436 
437 /*
438  * Append one list of invalidation messages to another, resetting
439  * the source list to empty.
440  */
441 static void
444 {
447 }
448 
449 /*
450  * Execute the given function for all the messages in an invalidation list.
451  * The list is not altered.
452  *
453  * catcache entries are processed first, for reasons mentioned above.
454  */
455 static void
457  void (*func) (SharedInvalidationMessage *msg))
458 {
459  ProcessMessageList(hdr->cclist, func(msg));
460  ProcessMessageList(hdr->rclist, func(msg));
461 }
462 
463 /*
464  * As above, but the function is able to process an array of messages
465  * rather than just one at a time.
466  */
467 static void
469  void (*func) (const SharedInvalidationMessage *msgs, int n))
470 {
471  ProcessMessageListMulti(hdr->cclist, func(msgs, n));
472  ProcessMessageListMulti(hdr->rclist, func(msgs, n));
473 }
474 
475 /* ----------------------------------------------------------------
476  * private support functions
477  * ----------------------------------------------------------------
478  */
479 
480 /*
481  * RegisterCatcacheInvalidation
482  *
483  * Register an invalidation event for a catcache tuple entry.
484  */
485 static void
487  uint32 hashValue,
488  Oid dbId)
489 {
491  cacheId, hashValue, dbId);
492 }
493 
494 /*
495  * RegisterCatalogInvalidation
496  *
497  * Register an invalidation event for all catcache entries from a catalog.
498  */
499 static void
501 {
503  dbId, catId);
504 }
505 
506 /*
507  * RegisterRelcacheInvalidation
508  *
509  * As above, but register a relcache invalidation event.
510  */
511 static void
513 {
515  dbId, relId);
516 
517  /*
518  * Most of the time, relcache invalidation is associated with system
519  * catalog updates, but there are a few cases where it isn't. Quick hack
520  * to ensure that the next CommandCounterIncrement() will think that we
521  * need to do CommandEndInvalidationMessages().
522  */
523  (void) GetCurrentCommandId(true);
524 
525  /*
526  * If the relation being invalidated is one of those cached in a relcache
527  * init file, mark that we need to zap that file at commit. For simplicity
528  * invalidations for a specific database always invalidate the shared file
529  * as well. Also zap when we are invalidating whole relcache.
530  */
531  if (relId == InvalidOid || RelationIdIsInInitFile(relId))
532  transInvalInfo->RelcacheInitFileInval = true;
533 }
534 
535 /*
536  * RegisterSnapshotInvalidation
537  *
538  * Register an invalidation event for MVCC scans against a given catalog.
539  * Only needed for catalogs that don't have catcaches.
540  */
541 static void
543 {
545  dbId, relId);
546 }
547 
548 /*
549  * LocalExecuteInvalidationMessage
550  *
551  * Process a single invalidation message (which could be of any type).
552  * Only the local caches are flushed; this does not transmit the message
553  * to other backends.
554  */
555 void
557 {
558  if (msg->id >= 0)
559  {
560  if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
561  {
563 
564  SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
565 
566  CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
567  }
568  }
569  else if (msg->id == SHAREDINVALCATALOG_ID)
570  {
571  if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
572  {
574 
576 
577  /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
578  }
579  }
580  else if (msg->id == SHAREDINVALRELCACHE_ID)
581  {
582  if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
583  {
584  int i;
585 
586  if (msg->rc.relId == InvalidOid)
588  else
590 
591  for (i = 0; i < relcache_callback_count; i++)
592  {
593  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
594 
595  ccitem->function(ccitem->arg, msg->rc.relId);
596  }
597  }
598  }
599  else if (msg->id == SHAREDINVALSMGR_ID)
600  {
601  /*
602  * We could have smgr entries for relations of other databases, so no
603  * short-circuit test is possible here.
604  */
605  RelFileNodeBackend rnode;
606 
607  rnode.node = msg->sm.rnode;
608  rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
609  smgrclosenode(rnode);
610  }
611  else if (msg->id == SHAREDINVALRELMAP_ID)
612  {
613  /* We only care about our own database and shared catalogs */
614  if (msg->rm.dbId == InvalidOid)
615  RelationMapInvalidate(true);
616  else if (msg->rm.dbId == MyDatabaseId)
617  RelationMapInvalidate(false);
618  }
619  else if (msg->id == SHAREDINVALSNAPSHOT_ID)
620  {
621  /* We only care about our own database and shared catalogs */
622  if (msg->rm.dbId == InvalidOid)
624  else if (msg->rm.dbId == MyDatabaseId)
626  }
627  else
628  elog(FATAL, "unrecognized SI message ID: %d", msg->id);
629 }
630 
631 /*
632  * InvalidateSystemCaches
633  *
634  * This blows away all tuples in the system catalog caches and
635  * all the cached relation descriptors and smgr cache entries.
636  * Relation descriptors that have positive refcounts are then rebuilt.
637  *
638  * We call this when we see a shared-inval-queue overflow signal,
639  * since that tells us we've lost some shared-inval messages and hence
640  * don't know what needs to be invalidated.
641  */
642 void
644 {
645  int i;
646 
649  RelationCacheInvalidate(); /* gets smgr and relmap too */
650 
651  for (i = 0; i < syscache_callback_count; i++)
652  {
653  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
654 
655  ccitem->function(ccitem->arg, ccitem->id, 0);
656  }
657 
658  for (i = 0; i < relcache_callback_count; i++)
659  {
660  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
661 
662  ccitem->function(ccitem->arg, InvalidOid);
663  }
664 }
665 
666 
667 /* ----------------------------------------------------------------
668  * public functions
669  * ----------------------------------------------------------------
670  */
671 
672 /*
673  * AcceptInvalidationMessages
674  * Read and process invalidation messages from the shared invalidation
675  * message queue.
676  *
677  * Note:
678  * This should be called as the first step in processing a transaction.
679  */
680 void
682 {
685 
686  /*
687  * Test code to force cache flushes anytime a flush could happen.
688  *
689  * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a
690  * fairly thorough test that the system contains no cache-flush hazards.
691  * However, it also makes the system unbelievably slow --- the regression
692  * tests take about 100 times longer than normal.
693  *
694  * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
695  * slows things by at least a factor of 10000, so I wouldn't suggest
696  * trying to run the entire regression tests that way. It's useful to try
697  * a few simple tests, to make sure that cache reload isn't subject to
698  * internal cache-flush hazards, but after you've done a few thousand
699  * recursive reloads it's unlikely you'll learn more.
700  */
701 #if defined(CLOBBER_CACHE_ALWAYS)
702  {
703  static bool in_recursion = false;
704 
705  if (!in_recursion)
706  {
707  in_recursion = true;
709  in_recursion = false;
710  }
711  }
712 #elif defined(CLOBBER_CACHE_RECURSIVELY)
713  {
714  static int recursion_depth = 0;
715 
716  /* Maximum depth is arbitrary depending on your threshold of pain */
717  if (recursion_depth < 3)
718  {
719  recursion_depth++;
721  recursion_depth--;
722  }
723  }
724 #endif
725 }
726 
727 /*
728  * PrepareInvalidationState
729  * Initialize inval lists for the current (sub)transaction.
730  */
731 static void
733 {
734  TransInvalidationInfo *myInfo;
735 
736  if (transInvalInfo != NULL &&
737  transInvalInfo->my_level == GetCurrentTransactionNestLevel())
738  return;
739 
740  myInfo = (TransInvalidationInfo *)
742  sizeof(TransInvalidationInfo));
743  myInfo->parent = transInvalInfo;
745 
746  /*
747  * If there's any previous entry, this one should be for a deeper nesting
748  * level.
749  */
750  Assert(transInvalInfo == NULL ||
751  myInfo->my_level > transInvalInfo->my_level);
752 
753  transInvalInfo = myInfo;
754 }
755 
756 /*
757  * PostPrepare_Inval
758  * Clean up after successful PREPARE.
759  *
760  * Here, we want to act as though the transaction aborted, so that we will
761  * undo any syscache changes it made, thereby bringing us into sync with the
762  * outside world, which doesn't believe the transaction committed yet.
763  *
764  * If the prepared transaction is later aborted, there is nothing more to
765  * do; if it commits, we will receive the consequent inval messages just
766  * like everyone else.
767  */
768 void
770 {
771  AtEOXact_Inval(false);
772 }
773 
774 /*
775  * Collect invalidation messages into SharedInvalidMessagesArray array.
776  */
777 static void
779 {
780  /*
781  * Initialise array first time through in each commit
782  */
783  if (SharedInvalidMessagesArray == NULL)
784  {
787 
788  /*
789  * Although this is being palloc'd we don't actually free it directly.
790  * We're so close to EOXact that we now we're going to lose it anyhow.
791  */
792  SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
793  * sizeof(SharedInvalidationMessage));
794  }
795 
797  {
800 
801  SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
803  * sizeof(SharedInvalidationMessage));
804  }
805 
806  /*
807  * Append the next chunk onto the array
808  */
809  memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
810  msgs, n * sizeof(SharedInvalidationMessage));
812 }
813 
814 /*
815  * xactGetCommittedInvalidationMessages() is executed by
816  * RecordTransactionCommit() to add invalidation messages onto the
817  * commit record. This applies only to commit message types, never to
818  * abort records. Must always run before AtEOXact_Inval(), since that
819  * removes the data we need to see.
820  *
821  * Remember that this runs before we have officially committed, so we
822  * must not do anything here to change what might occur *if* we should
823  * fail between here and the actual commit.
824  *
825  * see also xact_redo_commit() and xact_desc_commit()
826  */
827 int
829  bool *RelcacheInitFileInval)
830 {
831  MemoryContext oldcontext;
832 
833  /* Quick exit if we haven't done anything with invalidation messages. */
834  if (transInvalInfo == NULL)
835  {
836  *RelcacheInitFileInval = false;
837  *msgs = NULL;
838  return 0;
839  }
840 
841  /* Must be at top of stack */
842  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
843 
844  /*
845  * Relcache init file invalidation requires processing both before and
846  * after we send the SI messages. However, we need not do anything unless
847  * we committed.
848  */
849  *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
850 
851  /*
852  * Walk through TransInvalidationInfo to collect all the messages into a
853  * single contiguous array of invalidation messages. It must be contiguous
854  * so we can copy directly into WAL message. Maintain the order that they
855  * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
856  * in redo is as similar as possible to original. We want the same bugs,
857  * if any, not new ones.
858  */
860 
865  MemoryContextSwitchTo(oldcontext);
866 
868  SharedInvalidMessagesArray == NULL));
869 
871 
873 }
874 
875 /*
876  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
877  * standby_redo() to process invalidation messages. Currently that happens
878  * only at end-of-xact.
879  *
880  * Relcache init file invalidation requires processing both
881  * before and after we send the SI messages. See AtEOXact_Inval()
882  */
883 void
885  int nmsgs, bool RelcacheInitFileInval,
886  Oid dbid, Oid tsid)
887 {
888  if (nmsgs <= 0)
889  return;
890 
891  elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
892  (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
893 
894  if (RelcacheInitFileInval)
895  {
896  elog(trace_recovery(DEBUG4), "removing relcache init files for database %u",
897  dbid);
898 
899  /*
900  * RelationCacheInitFilePreInvalidate, when the invalidation message
901  * is for a specific database, requires DatabasePath to be set, but we
902  * should not use SetDatabasePath during recovery, since it is
903  * intended to be used only once by normal backends. Hence, a quick
904  * hack: set DatabasePath directly then unset after use.
905  */
906  if (OidIsValid(dbid))
907  DatabasePath = GetDatabasePath(dbid, tsid);
908 
910 
911  if (OidIsValid(dbid))
912  {
914  DatabasePath = NULL;
915  }
916  }
917 
918  SendSharedInvalidMessages(msgs, nmsgs);
919 
920  if (RelcacheInitFileInval)
922 }
923 
924 /*
925  * AtEOXact_Inval
926  * Process queued-up invalidation messages at end of main transaction.
927  *
928  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
929  * to the shared invalidation message queue. Note that these will be read
930  * not only by other backends, but also by our own backend at the next
931  * transaction start (via AcceptInvalidationMessages). This means that
932  * we can skip immediate local processing of anything that's still in
933  * CurrentCmdInvalidMsgs, and just send that list out too.
934  *
935  * If not isCommit, we are aborting, and must locally process the messages
936  * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
937  * since they'll not have seen our changed tuples anyway. We can forget
938  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
939  * the caches yet.
940  *
941  * In any case, reset the various lists to empty. We need not physically
942  * free memory here, since TopTransactionContext is about to be emptied
943  * anyway.
944  *
945  * Note:
946  * This should be called as the last step in processing a transaction.
947  */
948 void
949 AtEOXact_Inval(bool isCommit)
950 {
951  /* Quick exit if no messages */
952  if (transInvalInfo == NULL)
953  return;
954 
955  /* Must be at top of stack */
956  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
957 
958  if (isCommit)
959  {
960  /*
961  * Relcache init file invalidation requires processing both before and
962  * after we send the SI messages. However, we need not do anything
963  * unless we committed.
964  */
965  if (transInvalInfo->RelcacheInitFileInval)
967 
969  &transInvalInfo->CurrentCmdInvalidMsgs);
970 
973 
974  if (transInvalInfo->RelcacheInitFileInval)
976  }
977  else
978  {
981  }
982 
983  /* Need not free anything explicitly */
984  transInvalInfo = NULL;
985  SharedInvalidMessagesArray = NULL;
987 }
988 
989 /*
990  * AtEOSubXact_Inval
991  * Process queued-up invalidation messages at end of subtransaction.
992  *
993  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
994  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
995  * parent's PriorCmdInvalidMsgs list.
996  *
997  * If not isCommit, we are aborting, and must locally process the messages
998  * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
999  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
1000  * touched the caches yet.
1001  *
1002  * In any case, pop the transaction stack. We need not physically free memory
1003  * here, since CurTransactionContext is about to be emptied anyway
1004  * (if aborting). Beware of the possibility of aborting the same nesting
1005  * level twice, though.
1006  */
1007 void
1008 AtEOSubXact_Inval(bool isCommit)
1009 {
1010  int my_level;
1012 
1013  /* Quick exit if no messages. */
1014  if (myInfo == NULL)
1015  return;
1016 
1017  /* Also bail out quickly if messages are not for this level. */
1018  my_level = GetCurrentTransactionNestLevel();
1019  if (myInfo->my_level != my_level)
1020  {
1021  Assert(myInfo->my_level < my_level);
1022  return;
1023  }
1024 
1025  if (isCommit)
1026  {
1027  /* If CurrentCmdInvalidMsgs still has anything, fix it */
1029 
1030  /*
1031  * We create invalidation stack entries lazily, so the parent might
1032  * not have one. Instead of creating one, moving all the data over,
1033  * and then freeing our own, we can just adjust the level of our own
1034  * entry.
1035  */
1036  if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1037  {
1038  myInfo->my_level--;
1039  return;
1040  }
1041 
1042  /* Pass up my inval messages to parent */
1044  &myInfo->PriorCmdInvalidMsgs);
1045 
1046  /* Pending relcache inval becomes parent's problem too */
1047  if (myInfo->RelcacheInitFileInval)
1048  myInfo->parent->RelcacheInitFileInval = true;
1049 
1050  /* Pop the transaction state stack */
1051  transInvalInfo = myInfo->parent;
1052 
1053  /* Need not free anything else explicitly */
1054  pfree(myInfo);
1055  }
1056  else
1057  {
1060 
1061  /* Pop the transaction state stack */
1062  transInvalInfo = myInfo->parent;
1063 
1064  /* Need not free anything else explicitly */
1065  pfree(myInfo);
1066  }
1067 }
1068 
1069 /*
1070  * CommandEndInvalidationMessages
1071  * Process queued-up invalidation messages at end of one command
1072  * in a transaction.
1073  *
1074  * Here, we send no messages to the shared queue, since we don't know yet if
1075  * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
1076  * list, so as to flush our caches of any entries we have outdated in the
1077  * current command. We then move the current-cmd list over to become part
1078  * of the prior-cmds list.
1079  *
1080  * Note:
1081  * This should be called during CommandCounterIncrement(),
1082  * after we have advanced the command ID.
1083  */
1084 void
1086 {
1087  /*
1088  * You might think this shouldn't be called outside any transaction, but
1089  * bootstrap does it, and also ABORT issued when not in a transaction. So
1090  * just quietly return if no state to work on.
1091  */
1092  if (transInvalInfo == NULL)
1093  return;
1094 
1098  &transInvalInfo->CurrentCmdInvalidMsgs);
1099 }
1100 
1101 
1102 /*
1103  * CacheInvalidateHeapTuple
1104  * Register the given tuple for invalidation at end of command
1105  * (ie, current command is creating or outdating this tuple).
1106  * Also, detect whether a relcache invalidation is implied.
1107  *
1108  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1109  * For an update, we are called just once, with tuple being the old tuple
1110  * version and newtuple the new version. This allows avoidance of duplicate
1111  * effort during an update.
1112  */
1113 void
1115  HeapTuple tuple,
1116  HeapTuple newtuple)
1117 {
1118  Oid tupleRelId;
1119  Oid databaseId;
1120  Oid relationId;
1121 
1122  /* Do nothing during bootstrap */
1124  return;
1125 
1126  /*
1127  * We only need to worry about invalidation for tuples that are in system
1128  * catalogs; user-relation tuples are never in catcaches and can't affect
1129  * the relcache either.
1130  */
1131  if (!IsCatalogRelation(relation))
1132  return;
1133 
1134  /*
1135  * IsCatalogRelation() will return true for TOAST tables of system
1136  * catalogs, but we don't care about those, either.
1137  */
1138  if (IsToastRelation(relation))
1139  return;
1140 
1141  /*
1142  * If we're not prepared to queue invalidation messages for this
1143  * subtransaction level, get ready now.
1144  */
1146 
1147  /*
1148  * First let the catcache do its thing
1149  */
1150  tupleRelId = RelationGetRelid(relation);
1151  if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1152  {
1153  databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1154  RegisterSnapshotInvalidation(databaseId, tupleRelId);
1155  }
1156  else
1157  PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1159 
1160  /*
1161  * Now, is this tuple one of the primary definers of a relcache entry? See
1162  * comments in file header for deeper explanation.
1163  *
1164  * Note we ignore newtuple here; we assume an update cannot move a tuple
1165  * from being part of one relcache entry to being part of another.
1166  */
1167  if (tupleRelId == RelationRelationId)
1168  {
1169  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1170 
1171  relationId = classtup->oid;
1172  if (classtup->relisshared)
1173  databaseId = InvalidOid;
1174  else
1175  databaseId = MyDatabaseId;
1176  }
1177  else if (tupleRelId == AttributeRelationId)
1178  {
1179  Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1180 
1181  relationId = atttup->attrelid;
1182 
1183  /*
1184  * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1185  * even if the rel in question is shared (which we can't easily tell).
1186  * This essentially means that only backends in this same database
1187  * will react to the relcache flush request. This is in fact
1188  * appropriate, since only those backends could see our pg_attribute
1189  * change anyway. It looks a bit ugly though. (In practice, shared
1190  * relations can't have schema changes after bootstrap, so we should
1191  * never come here for a shared rel anyway.)
1192  */
1193  databaseId = MyDatabaseId;
1194  }
1195  else if (tupleRelId == IndexRelationId)
1196  {
1197  Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1198 
1199  /*
1200  * When a pg_index row is updated, we should send out a relcache inval
1201  * for the index relation. As above, we don't know the shared status
1202  * of the index, but in practice it doesn't matter since indexes of
1203  * shared catalogs can't have such updates.
1204  */
1205  relationId = indextup->indexrelid;
1206  databaseId = MyDatabaseId;
1207  }
1208  else if (tupleRelId == ConstraintRelationId)
1209  {
1210  Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
1211 
1212  /*
1213  * Foreign keys are part of relcache entries, too, so send out an
1214  * inval for the table that the FK applies to.
1215  */
1216  if (constrtup->contype == CONSTRAINT_FOREIGN &&
1217  OidIsValid(constrtup->conrelid))
1218  {
1219  relationId = constrtup->conrelid;
1220  databaseId = MyDatabaseId;
1221  }
1222  else
1223  return;
1224  }
1225  else
1226  return;
1227 
1228  /*
1229  * Yes. We need to register a relcache invalidation event.
1230  */
1231  RegisterRelcacheInvalidation(databaseId, relationId);
1232 }
1233 
1234 /*
1235  * CacheInvalidateCatalog
1236  * Register invalidation of the whole content of a system catalog.
1237  *
1238  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1239  * changed any tuples as moved them around. Some uses of catcache entries
1240  * expect their TIDs to be correct, so we have to blow away the entries.
1241  *
1242  * Note: we expect caller to verify that the rel actually is a system
1243  * catalog. If it isn't, no great harm is done, just a wasted sinval message.
1244  */
1245 void
1247 {
1248  Oid databaseId;
1249 
1251 
1252  if (IsSharedRelation(catalogId))
1253  databaseId = InvalidOid;
1254  else
1255  databaseId = MyDatabaseId;
1256 
1257  RegisterCatalogInvalidation(databaseId, catalogId);
1258 }
1259 
1260 /*
1261  * CacheInvalidateRelcache
1262  * Register invalidation of the specified relation's relcache entry
1263  * at end of command.
1264  *
1265  * This is used in places that need to force relcache rebuild but aren't
1266  * changing any of the tuples recognized as contributors to the relcache
1267  * entry by CacheInvalidateHeapTuple. (An example is dropping an index.)
1268  */
1269 void
1271 {
1272  Oid databaseId;
1273  Oid relationId;
1274 
1276 
1277  relationId = RelationGetRelid(relation);
1278  if (relation->rd_rel->relisshared)
1279  databaseId = InvalidOid;
1280  else
1281  databaseId = MyDatabaseId;
1282 
1283  RegisterRelcacheInvalidation(databaseId, relationId);
1284 }
1285 
1286 /*
1287  * CacheInvalidateRelcacheAll
1288  * Register invalidation of the whole relcache at the end of command.
1289  *
1290  * This is used by alter publication as changes in publications may affect
1291  * large number of tables.
1292  */
1293 void
1295 {
1297 
1299 }
1300 
1301 /*
1302  * CacheInvalidateRelcacheByTuple
1303  * As above, but relation is identified by passing its pg_class tuple.
1304  */
1305 void
1307 {
1308  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1309  Oid databaseId;
1310  Oid relationId;
1311 
1313 
1314  relationId = classtup->oid;
1315  if (classtup->relisshared)
1316  databaseId = InvalidOid;
1317  else
1318  databaseId = MyDatabaseId;
1319  RegisterRelcacheInvalidation(databaseId, relationId);
1320 }
1321 
1322 /*
1323  * CacheInvalidateRelcacheByRelid
1324  * As above, but relation is identified by passing its OID.
1325  * This is the least efficient of the three options; use one of
1326  * the above routines if you have a Relation or pg_class tuple.
1327  */
1328 void
1330 {
1331  HeapTuple tup;
1332 
1334 
1335  tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1336  if (!HeapTupleIsValid(tup))
1337  elog(ERROR, "cache lookup failed for relation %u", relid);
1339  ReleaseSysCache(tup);
1340 }
1341 
1342 
1343 /*
1344  * CacheInvalidateSmgr
1345  * Register invalidation of smgr references to a physical relation.
1346  *
1347  * Sending this type of invalidation msg forces other backends to close open
1348  * smgr entries for the rel. This should be done to flush dangling open-file
1349  * references when the physical rel is being dropped or truncated. Because
1350  * these are nontransactional (i.e., not-rollback-able) operations, we just
1351  * send the inval message immediately without any queuing.
1352  *
1353  * Note: in most cases there will have been a relcache flush issued against
1354  * the rel at the logical level. We need a separate smgr-level flush because
1355  * it is possible for backends to have open smgr entries for rels they don't
1356  * have a relcache entry for, e.g. because the only thing they ever did with
1357  * the rel is write out dirty shared buffers.
1358  *
1359  * Note: because these messages are nontransactional, they won't be captured
1360  * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
1361  * should happen in low-level smgr.c routines, which are executed while
1362  * replaying WAL as well as when creating it.
1363  *
1364  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1365  * three bytes of the backend ID using what would otherwise be padding space.
1366  * Thus, the maximum possible backend ID is 2^23-1.
1367  */
1368 void
1370 {
1372 
1373  msg.sm.id = SHAREDINVALSMGR_ID;
1374  msg.sm.backend_hi = rnode.backend >> 16;
1375  msg.sm.backend_lo = rnode.backend & 0xffff;
1376  msg.sm.rnode = rnode.node;
1377  /* check AddCatcacheInvalidationMessage() for an explanation */
1378  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1379 
1380  SendSharedInvalidMessages(&msg, 1);
1381 }
1382 
1383 /*
1384  * CacheInvalidateRelmap
1385  * Register invalidation of the relation mapping for a database,
1386  * or for the shared catalogs if databaseId is zero.
1387  *
1388  * Sending this type of invalidation msg forces other backends to re-read
1389  * the indicated relation mapping file. It is also necessary to send a
1390  * relcache inval for the specific relations whose mapping has been altered,
1391  * else the relcache won't get updated with the new filenode data.
1392  *
1393  * Note: because these messages are nontransactional, they won't be captured
1394  * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap()
1395  * should happen in low-level relmapper.c routines, which are executed while
1396  * replaying WAL as well as when creating it.
1397  */
1398 void
1400 {
1402 
1403  msg.rm.id = SHAREDINVALRELMAP_ID;
1404  msg.rm.dbId = databaseId;
1405  /* check AddCatcacheInvalidationMessage() for an explanation */
1406  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1407 
1408  SendSharedInvalidMessages(&msg, 1);
1409 }
1410 
1411 
1412 /*
1413  * CacheRegisterSyscacheCallback
1414  * Register the specified function to be called for all future
1415  * invalidation events in the specified cache. The cache ID and the
1416  * hash value of the tuple being invalidated will be passed to the
1417  * function.
1418  *
1419  * NOTE: Hash value zero will be passed if a cache reset request is received.
1420  * In this case the called routines should flush all cached state.
1421  * Yes, there's a possibility of a false match to zero, but it doesn't seem
1422  * worth troubling over, especially since most of the current callees just
1423  * flush all cached state anyway.
1424  */
1425 void
1428  Datum arg)
1429 {
1430  if (cacheid < 0 || cacheid >= SysCacheSize)
1431  elog(FATAL, "invalid cache ID: %d", cacheid);
1433  elog(FATAL, "out of syscache_callback_list slots");
1434 
1435  if (syscache_callback_links[cacheid] == 0)
1436  {
1437  /* first callback for this cache */
1439  }
1440  else
1441  {
1442  /* add to end of chain, so that older callbacks are called first */
1443  int i = syscache_callback_links[cacheid] - 1;
1444 
1445  while (syscache_callback_list[i].link > 0)
1446  i = syscache_callback_list[i].link - 1;
1448  }
1449 
1454 
1456 }
1457 
1458 /*
1459  * CacheRegisterRelcacheCallback
1460  * Register the specified function to be called for all future
1461  * relcache invalidation events. The OID of the relation being
1462  * invalidated will be passed to the function.
1463  *
1464  * NOTE: InvalidOid will be passed if a cache reset request is received.
1465  * In this case the called routines should flush all cached state.
1466  */
1467 void
1469  Datum arg)
1470 {
1472  elog(FATAL, "out of relcache_callback_list slots");
1473 
1476 
1478 }
1479 
1480 /*
1481  * CallSyscacheCallbacks
1482  *
1483  * This is exported so that CatalogCacheFlushCatalog can call it, saving
1484  * this module from knowing which catcache IDs correspond to which catalogs.
1485  */
1486 void
1487 CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1488 {
1489  int i;
1490 
1491  if (cacheid < 0 || cacheid >= SysCacheSize)
1492  elog(ERROR, "invalid cache ID: %d", cacheid);
1493 
1494  i = syscache_callback_links[cacheid] - 1;
1495  while (i >= 0)
1496  {
1497  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1498 
1499  Assert(ccitem->id == cacheid);
1500  ccitem->function(ccitem->arg, cacheid, hashvalue);
1501  i = ccitem->link - 1;
1502  }
1503 }
signed short int16
Definition: c.h:345
void CacheInvalidateSmgr(RelFileNodeBackend rnode)
Definition: inval.c:1369
#define ProcessMessageListMulti(listHdr, codeFragment)
Definition: inval.c:315
#define FIRSTCHUNKSIZE
static void AppendInvalidationMessages(InvalidationListHeader *dest, InvalidationListHeader *src)
Definition: inval.c:442
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: inval.c:129
static SharedInvalidationMessage * SharedInvalidMessagesArray
Definition: inval.c:175
bool IsToastRelation(Relation relation)
Definition: catalog.c:142
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:100
SharedInvalSnapshotMsg sn
Definition: sinval.h:121
void(* RelcacheCallbackFunction)(Datum arg, Oid relid)
Definition: inval.h:23
static void AddInvalidationMessage(InvalidationChunk **listHdr, SharedInvalidationMessage *msg)
Definition: inval.c:229
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1114
#define GETSTRUCT(TUP)
Definition: htup_details.h:655
void AtEOXact_Inval(bool isCommit)
Definition: inval.c:949
MemoryContext TopTransactionContext
Definition: mcxt.c:49
void AcceptInvalidationMessages(void)
Definition: inval.c:681
InvalidationListHeader PriorCmdInvalidMsgs
Definition: inval.c:167
struct InvalidationChunk InvalidationChunk
SharedInvalRelcacheMsg rc
Definition: sinval.h:118
void CommandEndInvalidationMessages(void)
Definition: inval.c:1085
static struct RELCACHECALLBACK relcache_callback_list[MAX_RELCACHE_CALLBACKS]
#define MAX_RELCACHE_CALLBACKS
Definition: inval.c:191
static int relcache_callback_count
Definition: inval.c:211
RelcacheCallbackFunction function
Definition: inval.c:207
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
bool RelationIdIsInInitFile(Oid relationId)
Definition: relcache.c:5899
MemoryContext CurTransactionContext
Definition: mcxt.c:50
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:2007
struct TransInvalidationInfo * parent
Definition: inval.c:158
static int recursion_depth
Definition: elog.c:144
void CacheInvalidateRelmap(Oid databaseId)
Definition: inval.c:1399
static TransInvalidationInfo * transInvalInfo
Definition: inval.c:173
Form_pg_class rd_rel
Definition: rel.h:83
unsigned int Oid
Definition: postgres_ext.h:31
void RelationMapInvalidate(bool shared)
Definition: relmapper.c:403
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:638
void InvalidateSystemCaches(void)
Definition: inval.c:643
int trace_recovery(int trace_level)
Definition: elog.c:3474
#define ProcessMessageList(listHdr, codeFragment)
Definition: inval.c:295
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1468
int16 link
Definition: inval.c:196
InvalidationListHeader CurrentCmdInvalidMsgs
Definition: inval.c:164
void pfree(void *pointer)
Definition: mcxt.c:1056
#define SysCacheSize
Definition: syscache.h:113
#define ObjectIdGetDatum(X)
Definition: postgres.h:507
#define ERROR
Definition: elog.h:43
struct InvalidationChunk * next
Definition: inval.c:126
#define FATAL
Definition: elog.h:52
static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
Definition: inval.c:778
void ReceiveSharedInvalidMessages(void(*invalFunction)(SharedInvalidationMessage *msg), void(*resetFunction)(void))
Definition: sinval.c:71
#define SHAREDINVALRELCACHE_ID
Definition: sinval.h:76
struct InvalidationListHeader InvalidationListHeader
SharedInvalRelmapMsg rm
Definition: sinval.h:120
#define SHAREDINVALRELMAP_ID
Definition: sinval.h:96
void(* SyscacheCallbackFunction)(Datum arg, int cacheid, uint32 hashvalue)
Definition: inval.h:22
#define MAX_SYSCACHE_CALLBACKS
Definition: inval.c:190
struct TransInvalidationInfo TransInvalidationInfo
void CacheInvalidateRelcacheByRelid(Oid relid)
Definition: inval.c:1329
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:200
void PostPrepare_Inval(void)
Definition: inval.c:769
unsigned int uint32
Definition: c.h:358
InvalidationChunk * cclist
Definition: inval.c:134
static int numSharedInvalidMessagesArray
Definition: inval.c:176
SharedInvalCatcacheMsg cc
Definition: sinval.h:116
void smgrclosenode(RelFileNodeBackend rnode)
Definition: smgr.c:310
SharedInvalCatalogMsg cat
Definition: sinval.h:117
void InvalidateCatalogSnapshot(void)
Definition: snapmgr.c:512
char * GetDatabasePath(Oid dbNode, Oid spcNode)
Definition: relpath.c:107
static void ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, void(*func)(const SharedInvalidationMessage *msgs, int n))
Definition: inval.c:468
static void PrepareInvalidationState(void)
Definition: inval.c:732
static struct SYSCACHECALLBACK syscache_callback_list[MAX_SYSCACHE_CALLBACKS]
signed char int8
Definition: c.h:344
FormData_pg_index * Form_pg_index
Definition: pg_index.h:66
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:1124
bool RelcacheInitFileInval
Definition: inval.c:170
static void ProcessInvalidationMessages(InvalidationListHeader *hdr, void(*func)(SharedInvalidationMessage *msg))
Definition: inval.c:456
void CacheInvalidateRelcacheAll(void)
Definition: inval.c:1294
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1426
uintptr_t Datum
Definition: postgres.h:367
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1172
static void AddSnapshotInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:415
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1487
Oid MyDatabaseId
Definition: globals.c:85
static void RegisterCatalogInvalidation(Oid dbId, Oid catId)
Definition: inval.c:500
void RelationCacheInitFilePostInvalidate(void)
Definition: relcache.c:5964
bool IsSharedRelation(Oid relationId)
Definition: catalog.c:240
static void AddCatalogInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid catId)
Definition: inval.c:367
static int syscache_callback_count
Definition: inval.c:203
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:839
#define InvalidOid
Definition: postgres_ext.h:36
static void RegisterRelcacheInvalidation(Oid dbId, Oid relId)
Definition: inval.c:512
#define SHAREDINVALSNAPSHOT_ID
Definition: sinval.h:104
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:884
RelFileNode node
Definition: relfilenode.h:74
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:842
char * DatabasePath
Definition: globals.c:93
FormData_pg_constraint * Form_pg_constraint
void CacheInvalidateCatalog(Oid catalogId)
Definition: inval.c:1246
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void RelationCacheInitFilePreInvalidate(void)
Definition: relcache.c:5939
static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId)
Definition: inval.c:486
#define Assert(condition)
Definition: c.h:732
bool RelationInvalidatesSnapshotsOnly(Oid relid)
Definition: syscache.c:1481
SharedInvalSmgrMsg sm
Definition: sinval.h:119
void SysCacheInvalidate(int cacheId, uint32 hashValue)
Definition: syscache.c:1457
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:719
BackendId backend
Definition: relfilenode.h:75
static void AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:385
void RelationCacheInvalidateEntry(Oid relationId)
Definition: relcache.c:2735
void SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
Definition: sinval.c:49
void ResetCatalogCaches(void)
Definition: catcache.c:689
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1069
static int16 syscache_callback_links[SysCacheSize]
Definition: inval.c:201
uint16 backend_lo
Definition: sinval.h:92
void CacheInvalidateRelcache(Relation relation)
Definition: inval.c:1270
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:374
FormData_pg_class * Form_pg_class
Definition: pg_class.h:150
void LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
Definition: inval.c:556
#define SHAREDINVALCATALOG_ID
Definition: sinval.h:67
static int maxSharedInvalidMessagesArray
Definition: inval.c:177
void * palloc(Size size)
Definition: mcxt.c:949
void AtEOSubXact_Inval(bool isCommit)
Definition: inval.c:1008
int xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition: inval.c:828
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:796
#define elog(elevel,...)
Definition: elog.h:226
void RelationCacheInvalidate(void)
Definition: relcache.c:2779
int i
RelFileNode rnode
Definition: sinval.h:93
void * arg
#define SHAREDINVALSMGR_ID
Definition: sinval.h:85
void CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Definition: inval.c:1306
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:746
InvalidationChunk * rclist
Definition: inval.c:135
#define RelationGetRelid(relation)
Definition: rel.h:419
#define offsetof(type, field)
Definition: c.h:655
static void AddCatcacheInvalidationMessage(InvalidationListHeader *hdr, int id, uint32 hashValue, Oid dbId)
Definition: inval.c:339
static void RegisterSnapshotInvalidation(Oid dbId, Oid relId)
Definition: inval.c:542
SyscacheCallbackFunction function
Definition: inval.c:197
static void AppendInvalidationMessageList(InvalidationChunk **destHdr, InvalidationChunk **srcHdr)
Definition: inval.c:271