PostgreSQL Source Code  git master
inval.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * inval.c
4  * POSTGRES cache invalidation dispatcher code.
5  *
6  * This is subtle stuff, so pay attention:
7  *
8  * When a tuple is updated or deleted, our standard visibility rules
9  * consider that it is *still valid* so long as we are in the same command,
10  * ie, until the next CommandCounterIncrement() or transaction commit.
11  * (See access/heap/heapam_visibility.c, and note that system catalogs are
12  * generally scanned under the most current snapshot available, rather than
13  * the transaction snapshot.) At the command boundary, the old tuple stops
14  * being valid and the new version, if any, becomes valid. Therefore,
15  * we cannot simply flush a tuple from the system caches during heap_update()
16  * or heap_delete(). The tuple is still good at that point; what's more,
17  * even if we did flush it, it might be reloaded into the caches by a later
18  * request in the same command. So the correct behavior is to keep a list
19  * of outdated (updated/deleted) tuples and then do the required cache
20  * flushes at the next command boundary. We must also keep track of
21  * inserted tuples so that we can flush "negative" cache entries that match
22  * the new tuples; again, that mustn't happen until end of command.
23  *
24  * Once we have finished the command, we still need to remember inserted
25  * tuples (including new versions of updated tuples), so that we can flush
26  * them from the caches if we abort the transaction. Similarly, we'd better
27  * be able to flush "negative" cache entries that may have been loaded in
28  * place of deleted tuples, so we still need the deleted ones too.
29  *
30  * If we successfully complete the transaction, we have to broadcast all
31  * these invalidation events to other backends (via the SI message queue)
32  * so that they can flush obsolete entries from their caches. Note we have
33  * to record the transaction commit before sending SI messages, otherwise
34  * the other backends won't see our updated tuples as good.
35  *
36  * When a subtransaction aborts, we can process and discard any events
37  * it has queued. When a subtransaction commits, we just add its events
38  * to the pending lists of the parent transaction.
39  *
40  * In short, we need to remember until xact end every insert or delete
41  * of a tuple that might be in the system caches. Updates are treated as
42  * two events, delete + insert, for simplicity. (If the update doesn't
43  * change the tuple hash value, catcache.c optimizes this into one event.)
44  *
45  * We do not need to register EVERY tuple operation in this way, just those
46  * on tuples in relations that have associated catcaches. We do, however,
47  * have to register every operation on every tuple that *could* be in a
48  * catcache, whether or not it currently is in our cache. Also, if the
49  * tuple is in a relation that has multiple catcaches, we need to register
50  * an invalidation message for each such catcache. catcache.c's
51  * PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52  * catcaches may need invalidation for a given tuple.
53  *
54  * Also, whenever we see an operation on a pg_class, pg_attribute, or
55  * pg_index tuple, we register a relcache flush operation for the relation
56  * described by that tuple (as specified in CacheInvalidateHeapTuple()).
57  * Likewise for pg_constraint tuples for foreign keys on relations.
58  *
59  * We keep the relcache flush requests in lists separate from the catcache
60  * tuple flush requests. This allows us to issue all the pending catcache
61  * flushes before we issue relcache flushes, which saves us from loading
62  * a catcache tuple during relcache load only to flush it again right away.
63  * Also, we avoid queuing multiple relcache flush requests for the same
64  * relation, since a relcache flush is relatively expensive to do.
65  * (XXX is it worth testing likewise for duplicate catcache flush entries?
66  * Probably not.)
67  *
68  * If a relcache flush is issued for a system relation that we preload
69  * from the relcache init file, we must also delete the init file so that
70  * it will be rebuilt during the next backend restart. The actual work of
71  * manipulating the init file is in relcache.c, but we keep track of the
72  * need for it here.
73  *
74  * The request lists proper are kept in CurTransactionContext of their
75  * creating (sub)transaction, since they can be forgotten on abort of that
76  * transaction but must be kept till top-level commit otherwise. For
77  * simplicity we keep the controlling list-of-lists in TopTransactionContext.
78  *
79  * Currently, inval messages are sent without regard for the possibility
80  * that the object described by the catalog tuple might be a session-local
81  * object such as a temporary table. This is because (1) this code has
82  * no practical way to tell the difference, and (2) it is not certain that
83  * other backends don't have catalog cache or even relcache entries for
84  * such tables, anyway; there is nothing that prevents that. It might be
85  * worth trying to avoid sending such inval traffic in the future, if those
86  * problems can be overcome cheaply.
87  *
88  * When wal_level=logical, write invalidations into WAL at each command end to
89  * support the decoding of the in-progress transactions. See
90  * CommandEndInvalidationMessages.
91  *
92  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
93  * Portions Copyright (c) 1994, Regents of the University of California
94  *
95  * IDENTIFICATION
96  * src/backend/utils/cache/inval.c
97  *
98  *-------------------------------------------------------------------------
99  */
100 #include "postgres.h"
101 
102 #include <limits.h>
103 
104 #include "access/htup_details.h"
105 #include "access/xact.h"
106 #include "catalog/catalog.h"
107 #include "catalog/pg_constraint.h"
108 #include "miscadmin.h"
109 #include "storage/sinval.h"
110 #include "storage/smgr.h"
111 #include "utils/catcache.h"
112 #include "utils/inval.h"
113 #include "utils/memdebug.h"
114 #include "utils/memutils.h"
115 #include "utils/rel.h"
116 #include "utils/relmapper.h"
117 #include "utils/snapmgr.h"
118 #include "utils/syscache.h"
119 
120 
121 /*
122  * To minimize palloc traffic, we keep pending requests in successively-
123  * larger chunks (a slightly more sophisticated version of an expansible
124  * array). All request types can be stored as SharedInvalidationMessage
125  * records. The ordering of requests within a list is never significant.
126  */
127 typedef struct InvalidationChunk
128 {
129  struct InvalidationChunk *next; /* list link */
130  int nitems; /* # items currently stored in chunk */
131  int maxitems; /* size of allocated array in this chunk */
134 
136 {
137  InvalidationChunk *cclist; /* list of chunks holding catcache msgs */
138  InvalidationChunk *rclist; /* list of chunks holding relcache msgs */
140 
141 /*----------------
142  * Invalidation info is divided into two lists:
143  * 1) events so far in current command, not yet reflected to caches.
144  * 2) events in previous commands of current transaction; these have
145  * been reflected to local caches, and must be either broadcast to
146  * other backends or rolled back from local cache when we commit
147  * or abort the transaction.
148  * Actually, we need two such lists for each level of nested transaction,
149  * so that we can discard events from an aborted subtransaction. When
150  * a subtransaction commits, we append its lists to the parent's lists.
151  *
152  * The relcache-file-invalidated flag can just be a simple boolean,
153  * since we only act on it at transaction commit; we don't care which
154  * command of the transaction set it.
155  *----------------
156  */
157 
158 typedef struct TransInvalidationInfo
159 {
160  /* Back link to parent transaction's info */
162 
163  /* Subtransaction nesting depth */
164  int my_level;
165 
166  /* head of current-command event list */
168 
169  /* head of previous-commands event list */
171 
172  /* init file must be invalidated? */
175 
177 
181 
182 
183 /*
184  * Dynamically-registered callback functions. Current implementation
185  * assumes there won't be enough of these to justify a dynamically resizable
186  * array; it'd be easy to improve that if needed.
187  *
188  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
189  * syscache are linked into a list pointed to by syscache_callback_links[id].
190  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
191  */
192 
193 #define MAX_SYSCACHE_CALLBACKS 64
194 #define MAX_RELCACHE_CALLBACKS 10
195 
196 static struct SYSCACHECALLBACK
197 {
198  int16 id; /* cache number */
199  int16 link; /* next callback index+1 for same cache */
203 
205 
206 static int syscache_callback_count = 0;
207 
208 static struct RELCACHECALLBACK
209 {
213 
214 static int relcache_callback_count = 0;
215 
216 /* ----------------------------------------------------------------
217  * Invalidation list support functions
218  *
219  * These three routines encapsulate processing of the "chunked"
220  * representation of what is logically just a list of messages.
221  * ----------------------------------------------------------------
222  */
223 
224 /*
225  * AddInvalidationMessage
226  * Add an invalidation message to a list (of chunks).
227  *
228  * Note that we do not pay any great attention to maintaining the original
229  * ordering of the messages.
230  */
231 static void
234 {
235  InvalidationChunk *chunk = *listHdr;
236 
237  if (chunk == NULL)
238  {
239  /* First time through; create initial chunk */
240 #define FIRSTCHUNKSIZE 32
241  chunk = (InvalidationChunk *)
245  chunk->nitems = 0;
246  chunk->maxitems = FIRSTCHUNKSIZE;
247  chunk->next = *listHdr;
248  *listHdr = chunk;
249  }
250  else if (chunk->nitems >= chunk->maxitems)
251  {
252  /* Need another chunk; double size of last chunk */
253  int chunksize = 2 * chunk->maxitems;
254 
255  chunk = (InvalidationChunk *)
258  chunksize * sizeof(SharedInvalidationMessage));
259  chunk->nitems = 0;
260  chunk->maxitems = chunksize;
261  chunk->next = *listHdr;
262  *listHdr = chunk;
263  }
264  /* Okay, add message to current chunk */
265  chunk->msgs[chunk->nitems] = *msg;
266  chunk->nitems++;
267 }
268 
269 /*
270  * Append one list of invalidation message chunks to another, resetting
271  * the source chunk-list pointer to NULL.
272  */
273 static void
275  InvalidationChunk **srcHdr)
276 {
277  InvalidationChunk *chunk = *srcHdr;
278 
279  if (chunk == NULL)
280  return; /* nothing to do */
281 
282  while (chunk->next != NULL)
283  chunk = chunk->next;
284 
285  chunk->next = *destHdr;
286 
287  *destHdr = *srcHdr;
288 
289  *srcHdr = NULL;
290 }
291 
292 /*
293  * Process a list of invalidation messages.
294  *
295  * This is a macro that executes the given code fragment for each message in
296  * a message chunk list. The fragment should refer to the message as *msg.
297  */
298 #define ProcessMessageList(listHdr, codeFragment) \
299  do { \
300  InvalidationChunk *_chunk; \
301  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
302  { \
303  int _cindex; \
304  for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
305  { \
306  SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
307  codeFragment; \
308  } \
309  } \
310  } while (0)
311 
312 /*
313  * Process a list of invalidation messages group-wise.
314  *
315  * As above, but the code fragment can handle an array of messages.
316  * The fragment should refer to the messages as msgs[], with n entries.
317  */
318 #define ProcessMessageListMulti(listHdr, codeFragment) \
319  do { \
320  InvalidationChunk *_chunk; \
321  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
322  { \
323  SharedInvalidationMessage *msgs = _chunk->msgs; \
324  int n = _chunk->nitems; \
325  codeFragment; \
326  } \
327  } while (0)
328 
329 
330 /* ----------------------------------------------------------------
331  * Invalidation set support functions
332  *
333  * These routines understand about the division of a logical invalidation
334  * list into separate physical lists for catcache and relcache entries.
335  * ----------------------------------------------------------------
336  */
337 
338 /*
339  * Add a catcache inval entry
340  */
341 static void
343  int id, uint32 hashValue, Oid dbId)
344 {
346 
347  Assert(id < CHAR_MAX);
348  msg.cc.id = (int8) id;
349  msg.cc.dbId = dbId;
350  msg.cc.hashValue = hashValue;
351 
352  /*
353  * Define padding bytes in SharedInvalidationMessage structs to be
354  * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
355  * multiple processes, will cause spurious valgrind warnings about
356  * undefined memory being used. That's because valgrind remembers the
357  * undefined bytes from the last local process's store, not realizing that
358  * another process has written since, filling the previously uninitialized
359  * bytes
360  */
361  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
362 
363  AddInvalidationMessage(&hdr->cclist, &msg);
364 }
365 
366 /*
367  * Add a whole-catalog inval entry
368  */
369 static void
371  Oid dbId, Oid catId)
372 {
374 
376  msg.cat.dbId = dbId;
377  msg.cat.catId = catId;
378  /* check AddCatcacheInvalidationMessage() for an explanation */
379  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
380 
381  AddInvalidationMessage(&hdr->cclist, &msg);
382 }
383 
384 /*
385  * Add a relcache inval entry
386  */
387 static void
389  Oid dbId, Oid relId)
390 {
392 
393  /*
394  * Don't add a duplicate item. We assume dbId need not be checked because
395  * it will never change. InvalidOid for relId means all relations so we
396  * don't need to add individual ones when it is present.
397  */
399  if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
400  (msg->rc.relId == relId ||
401  msg->rc.relId == InvalidOid))
402  return);
403 
404  /* OK, add the item */
406  msg.rc.dbId = dbId;
407  msg.rc.relId = relId;
408  /* check AddCatcacheInvalidationMessage() for an explanation */
409  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
410 
411  AddInvalidationMessage(&hdr->rclist, &msg);
412 }
413 
414 /*
415  * Add a snapshot inval entry
416  */
417 static void
419  Oid dbId, Oid relId)
420 {
422 
423  /* Don't add a duplicate item */
424  /* We assume dbId need not be checked because it will never change */
426  if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
427  msg->sn.relId == relId)
428  return);
429 
430  /* OK, add the item */
432  msg.sn.dbId = dbId;
433  msg.sn.relId = relId;
434  /* check AddCatcacheInvalidationMessage() for an explanation */
435  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
436 
437  AddInvalidationMessage(&hdr->rclist, &msg);
438 }
439 
440 /*
441  * Append one list of invalidation messages to another, resetting
442  * the source list to empty.
443  */
444 static void
447 {
450 }
451 
452 /*
453  * Execute the given function for all the messages in an invalidation list.
454  * The list is not altered.
455  *
456  * catcache entries are processed first, for reasons mentioned above.
457  */
458 static void
460  void (*func) (SharedInvalidationMessage *msg))
461 {
462  ProcessMessageList(hdr->cclist, func(msg));
463  ProcessMessageList(hdr->rclist, func(msg));
464 }
465 
466 /*
467  * As above, but the function is able to process an array of messages
468  * rather than just one at a time.
469  */
470 static void
472  void (*func) (const SharedInvalidationMessage *msgs, int n))
473 {
474  ProcessMessageListMulti(hdr->cclist, func(msgs, n));
475  ProcessMessageListMulti(hdr->rclist, func(msgs, n));
476 }
477 
478 /* ----------------------------------------------------------------
479  * private support functions
480  * ----------------------------------------------------------------
481  */
482 
483 /*
484  * RegisterCatcacheInvalidation
485  *
486  * Register an invalidation event for a catcache tuple entry.
487  */
488 static void
490  uint32 hashValue,
491  Oid dbId)
492 {
494  cacheId, hashValue, dbId);
495 }
496 
497 /*
498  * RegisterCatalogInvalidation
499  *
500  * Register an invalidation event for all catcache entries from a catalog.
501  */
502 static void
504 {
506  dbId, catId);
507 }
508 
509 /*
510  * RegisterRelcacheInvalidation
511  *
512  * As above, but register a relcache invalidation event.
513  */
514 static void
516 {
518  dbId, relId);
519 
520  /*
521  * Most of the time, relcache invalidation is associated with system
522  * catalog updates, but there are a few cases where it isn't. Quick hack
523  * to ensure that the next CommandCounterIncrement() will think that we
524  * need to do CommandEndInvalidationMessages().
525  */
526  (void) GetCurrentCommandId(true);
527 
528  /*
529  * If the relation being invalidated is one of those cached in a relcache
530  * init file, mark that we need to zap that file at commit. For simplicity
531  * invalidations for a specific database always invalidate the shared file
532  * as well. Also zap when we are invalidating whole relcache.
533  */
534  if (relId == InvalidOid || RelationIdIsInInitFile(relId))
535  transInvalInfo->RelcacheInitFileInval = true;
536 }
537 
538 /*
539  * RegisterSnapshotInvalidation
540  *
541  * Register an invalidation event for MVCC scans against a given catalog.
542  * Only needed for catalogs that don't have catcaches.
543  */
544 static void
546 {
548  dbId, relId);
549 }
550 
551 /*
552  * LocalExecuteInvalidationMessage
553  *
554  * Process a single invalidation message (which could be of any type).
555  * Only the local caches are flushed; this does not transmit the message
556  * to other backends.
557  */
558 void
560 {
561  if (msg->id >= 0)
562  {
563  if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
564  {
566 
567  SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
568 
569  CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
570  }
571  }
572  else if (msg->id == SHAREDINVALCATALOG_ID)
573  {
574  if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
575  {
577 
579 
580  /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
581  }
582  }
583  else if (msg->id == SHAREDINVALRELCACHE_ID)
584  {
585  if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
586  {
587  int i;
588 
589  if (msg->rc.relId == InvalidOid)
591  else
593 
594  for (i = 0; i < relcache_callback_count; i++)
595  {
596  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
597 
598  ccitem->function(ccitem->arg, msg->rc.relId);
599  }
600  }
601  }
602  else if (msg->id == SHAREDINVALSMGR_ID)
603  {
604  /*
605  * We could have smgr entries for relations of other databases, so no
606  * short-circuit test is possible here.
607  */
608  RelFileNodeBackend rnode;
609 
610  rnode.node = msg->sm.rnode;
611  rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
612  smgrclosenode(rnode);
613  }
614  else if (msg->id == SHAREDINVALRELMAP_ID)
615  {
616  /* We only care about our own database and shared catalogs */
617  if (msg->rm.dbId == InvalidOid)
618  RelationMapInvalidate(true);
619  else if (msg->rm.dbId == MyDatabaseId)
620  RelationMapInvalidate(false);
621  }
622  else if (msg->id == SHAREDINVALSNAPSHOT_ID)
623  {
624  /* We only care about our own database and shared catalogs */
625  if (msg->rm.dbId == InvalidOid)
627  else if (msg->rm.dbId == MyDatabaseId)
629  }
630  else
631  elog(FATAL, "unrecognized SI message ID: %d", msg->id);
632 }
633 
634 /*
635  * InvalidateSystemCaches
636  *
637  * This blows away all tuples in the system catalog caches and
638  * all the cached relation descriptors and smgr cache entries.
639  * Relation descriptors that have positive refcounts are then rebuilt.
640  *
641  * We call this when we see a shared-inval-queue overflow signal,
642  * since that tells us we've lost some shared-inval messages and hence
643  * don't know what needs to be invalidated.
644  */
645 void
647 {
648  int i;
649 
652  RelationCacheInvalidate(); /* gets smgr and relmap too */
653 
654  for (i = 0; i < syscache_callback_count; i++)
655  {
656  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
657 
658  ccitem->function(ccitem->arg, ccitem->id, 0);
659  }
660 
661  for (i = 0; i < relcache_callback_count; i++)
662  {
663  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
664 
665  ccitem->function(ccitem->arg, InvalidOid);
666  }
667 }
668 
669 
670 /* ----------------------------------------------------------------
671  * public functions
672  * ----------------------------------------------------------------
673  */
674 
675 /*
676  * AcceptInvalidationMessages
677  * Read and process invalidation messages from the shared invalidation
678  * message queue.
679  *
680  * Note:
681  * This should be called as the first step in processing a transaction.
682  */
683 void
685 {
688 
689  /*
690  * Test code to force cache flushes anytime a flush could happen.
691  *
692  * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a
693  * fairly thorough test that the system contains no cache-flush hazards.
694  * However, it also makes the system unbelievably slow --- the regression
695  * tests take about 100 times longer than normal.
696  *
697  * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
698  * slows things by at least a factor of 10000, so I wouldn't suggest
699  * trying to run the entire regression tests that way. It's useful to try
700  * a few simple tests, to make sure that cache reload isn't subject to
701  * internal cache-flush hazards, but after you've done a few thousand
702  * recursive reloads it's unlikely you'll learn more.
703  */
704 #if defined(CLOBBER_CACHE_ALWAYS)
705  {
706  static bool in_recursion = false;
707 
708  if (!in_recursion)
709  {
710  in_recursion = true;
712  in_recursion = false;
713  }
714  }
715 #elif defined(CLOBBER_CACHE_RECURSIVELY)
716  {
717  static int recursion_depth = 0;
718 
719  /* Maximum depth is arbitrary depending on your threshold of pain */
720  if (recursion_depth < 3)
721  {
722  recursion_depth++;
724  recursion_depth--;
725  }
726  }
727 #endif
728 }
729 
730 /*
731  * PrepareInvalidationState
732  * Initialize inval lists for the current (sub)transaction.
733  */
734 static void
736 {
737  TransInvalidationInfo *myInfo;
738 
739  if (transInvalInfo != NULL &&
740  transInvalInfo->my_level == GetCurrentTransactionNestLevel())
741  return;
742 
743  myInfo = (TransInvalidationInfo *)
745  sizeof(TransInvalidationInfo));
746  myInfo->parent = transInvalInfo;
748 
749  /*
750  * If there's any previous entry, this one should be for a deeper nesting
751  * level.
752  */
753  Assert(transInvalInfo == NULL ||
754  myInfo->my_level > transInvalInfo->my_level);
755 
756  transInvalInfo = myInfo;
757 }
758 
759 /*
760  * PostPrepare_Inval
761  * Clean up after successful PREPARE.
762  *
763  * Here, we want to act as though the transaction aborted, so that we will
764  * undo any syscache changes it made, thereby bringing us into sync with the
765  * outside world, which doesn't believe the transaction committed yet.
766  *
767  * If the prepared transaction is later aborted, there is nothing more to
768  * do; if it commits, we will receive the consequent inval messages just
769  * like everyone else.
770  */
771 void
773 {
774  AtEOXact_Inval(false);
775 }
776 
777 /*
778  * Collect invalidation messages into SharedInvalidMessagesArray array.
779  */
780 static void
782 {
783  /*
784  * Initialise array first time through in each commit
785  */
786  if (SharedInvalidMessagesArray == NULL)
787  {
790 
791  /*
792  * Although this is being palloc'd we don't actually free it directly.
793  * We're so close to EOXact that we now we're going to lose it anyhow.
794  */
795  SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
796  * sizeof(SharedInvalidationMessage));
797  }
798 
800  {
803 
804  SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
806  * sizeof(SharedInvalidationMessage));
807  }
808 
809  /*
810  * Append the next chunk onto the array
811  */
812  memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
813  msgs, n * sizeof(SharedInvalidationMessage));
815 }
816 
817 /*
818  * xactGetCommittedInvalidationMessages() is executed by
819  * RecordTransactionCommit() to add invalidation messages onto the
820  * commit record. This applies only to commit message types, never to
821  * abort records. Must always run before AtEOXact_Inval(), since that
822  * removes the data we need to see.
823  *
824  * Remember that this runs before we have officially committed, so we
825  * must not do anything here to change what might occur *if* we should
826  * fail between here and the actual commit.
827  *
828  * see also xact_redo_commit() and xact_desc_commit()
829  */
830 int
832  bool *RelcacheInitFileInval)
833 {
834  MemoryContext oldcontext;
835 
836  /* Quick exit if we haven't done anything with invalidation messages. */
837  if (transInvalInfo == NULL)
838  {
839  *RelcacheInitFileInval = false;
840  *msgs = NULL;
841  return 0;
842  }
843 
844  /* Must be at top of stack */
845  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
846 
847  /*
848  * Relcache init file invalidation requires processing both before and
849  * after we send the SI messages. However, we need not do anything unless
850  * we committed.
851  */
852  *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
853 
854  /*
855  * Walk through TransInvalidationInfo to collect all the messages into a
856  * single contiguous array of invalidation messages. It must be contiguous
857  * so we can copy directly into WAL message. Maintain the order that they
858  * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
859  * in redo is as similar as possible to original. We want the same bugs,
860  * if any, not new ones.
861  */
863 
868  MemoryContextSwitchTo(oldcontext);
869 
871  SharedInvalidMessagesArray == NULL));
872 
874 
876 }
877 
878 /*
879  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
880  * standby_redo() to process invalidation messages. Currently that happens
881  * only at end-of-xact.
882  *
883  * Relcache init file invalidation requires processing both
884  * before and after we send the SI messages. See AtEOXact_Inval()
885  */
886 void
888  int nmsgs, bool RelcacheInitFileInval,
889  Oid dbid, Oid tsid)
890 {
891  if (nmsgs <= 0)
892  return;
893 
894  elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
895  (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
896 
897  if (RelcacheInitFileInval)
898  {
899  elog(trace_recovery(DEBUG4), "removing relcache init files for database %u",
900  dbid);
901 
902  /*
903  * RelationCacheInitFilePreInvalidate, when the invalidation message
904  * is for a specific database, requires DatabasePath to be set, but we
905  * should not use SetDatabasePath during recovery, since it is
906  * intended to be used only once by normal backends. Hence, a quick
907  * hack: set DatabasePath directly then unset after use.
908  */
909  if (OidIsValid(dbid))
910  DatabasePath = GetDatabasePath(dbid, tsid);
911 
913 
914  if (OidIsValid(dbid))
915  {
917  DatabasePath = NULL;
918  }
919  }
920 
921  SendSharedInvalidMessages(msgs, nmsgs);
922 
923  if (RelcacheInitFileInval)
925 }
926 
927 /*
928  * AtEOXact_Inval
929  * Process queued-up invalidation messages at end of main transaction.
930  *
931  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
932  * to the shared invalidation message queue. Note that these will be read
933  * not only by other backends, but also by our own backend at the next
934  * transaction start (via AcceptInvalidationMessages). This means that
935  * we can skip immediate local processing of anything that's still in
936  * CurrentCmdInvalidMsgs, and just send that list out too.
937  *
938  * If not isCommit, we are aborting, and must locally process the messages
939  * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
940  * since they'll not have seen our changed tuples anyway. We can forget
941  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
942  * the caches yet.
943  *
944  * In any case, reset the various lists to empty. We need not physically
945  * free memory here, since TopTransactionContext is about to be emptied
946  * anyway.
947  *
948  * Note:
949  * This should be called as the last step in processing a transaction.
950  */
951 void
952 AtEOXact_Inval(bool isCommit)
953 {
954  /* Quick exit if no messages */
955  if (transInvalInfo == NULL)
956  return;
957 
958  /* Must be at top of stack */
959  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
960 
961  if (isCommit)
962  {
963  /*
964  * Relcache init file invalidation requires processing both before and
965  * after we send the SI messages. However, we need not do anything
966  * unless we committed.
967  */
968  if (transInvalInfo->RelcacheInitFileInval)
970 
972  &transInvalInfo->CurrentCmdInvalidMsgs);
973 
976 
977  if (transInvalInfo->RelcacheInitFileInval)
979  }
980  else
981  {
984  }
985 
986  /* Need not free anything explicitly */
987  transInvalInfo = NULL;
988  SharedInvalidMessagesArray = NULL;
990 }
991 
992 /*
993  * AtEOSubXact_Inval
994  * Process queued-up invalidation messages at end of subtransaction.
995  *
996  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
997  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
998  * parent's PriorCmdInvalidMsgs list.
999  *
1000  * If not isCommit, we are aborting, and must locally process the messages
1001  * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
1002  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
1003  * touched the caches yet.
1004  *
1005  * In any case, pop the transaction stack. We need not physically free memory
1006  * here, since CurTransactionContext is about to be emptied anyway
1007  * (if aborting). Beware of the possibility of aborting the same nesting
1008  * level twice, though.
1009  */
1010 void
1011 AtEOSubXact_Inval(bool isCommit)
1012 {
1013  int my_level;
1015 
1016  /* Quick exit if no messages. */
1017  if (myInfo == NULL)
1018  return;
1019 
1020  /* Also bail out quickly if messages are not for this level. */
1021  my_level = GetCurrentTransactionNestLevel();
1022  if (myInfo->my_level != my_level)
1023  {
1024  Assert(myInfo->my_level < my_level);
1025  return;
1026  }
1027 
1028  if (isCommit)
1029  {
1030  /* If CurrentCmdInvalidMsgs still has anything, fix it */
1032 
1033  /*
1034  * We create invalidation stack entries lazily, so the parent might
1035  * not have one. Instead of creating one, moving all the data over,
1036  * and then freeing our own, we can just adjust the level of our own
1037  * entry.
1038  */
1039  if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1040  {
1041  myInfo->my_level--;
1042  return;
1043  }
1044 
1045  /* Pass up my inval messages to parent */
1047  &myInfo->PriorCmdInvalidMsgs);
1048 
1049  /* Pending relcache inval becomes parent's problem too */
1050  if (myInfo->RelcacheInitFileInval)
1051  myInfo->parent->RelcacheInitFileInval = true;
1052 
1053  /* Pop the transaction state stack */
1054  transInvalInfo = myInfo->parent;
1055 
1056  /* Need not free anything else explicitly */
1057  pfree(myInfo);
1058  }
1059  else
1060  {
1063 
1064  /* Pop the transaction state stack */
1065  transInvalInfo = myInfo->parent;
1066 
1067  /* Need not free anything else explicitly */
1068  pfree(myInfo);
1069  }
1070 }
1071 
1072 /*
1073  * CommandEndInvalidationMessages
1074  * Process queued-up invalidation messages at end of one command
1075  * in a transaction.
1076  *
1077  * Here, we send no messages to the shared queue, since we don't know yet if
1078  * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
1079  * list, so as to flush our caches of any entries we have outdated in the
1080  * current command. We then move the current-cmd list over to become part
1081  * of the prior-cmds list.
1082  *
1083  * Note:
1084  * This should be called during CommandCounterIncrement(),
1085  * after we have advanced the command ID.
1086  */
1087 void
1089 {
1090  /*
1091  * You might think this shouldn't be called outside any transaction, but
1092  * bootstrap does it, and also ABORT issued when not in a transaction. So
1093  * just quietly return if no state to work on.
1094  */
1095  if (transInvalInfo == NULL)
1096  return;
1097 
1100 
1101  /* WAL Log per-command invalidation messages for wal_level=logical */
1102  if (XLogLogicalInfoActive())
1104 
1106  &transInvalInfo->CurrentCmdInvalidMsgs);
1107 }
1108 
1109 
1110 /*
1111  * CacheInvalidateHeapTuple
1112  * Register the given tuple for invalidation at end of command
1113  * (ie, current command is creating or outdating this tuple).
1114  * Also, detect whether a relcache invalidation is implied.
1115  *
1116  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1117  * For an update, we are called just once, with tuple being the old tuple
1118  * version and newtuple the new version. This allows avoidance of duplicate
1119  * effort during an update.
1120  */
1121 void
1123  HeapTuple tuple,
1124  HeapTuple newtuple)
1125 {
1126  Oid tupleRelId;
1127  Oid databaseId;
1128  Oid relationId;
1129 
1130  /* Do nothing during bootstrap */
1132  return;
1133 
1134  /*
1135  * We only need to worry about invalidation for tuples that are in system
1136  * catalogs; user-relation tuples are never in catcaches and can't affect
1137  * the relcache either.
1138  */
1139  if (!IsCatalogRelation(relation))
1140  return;
1141 
1142  /*
1143  * IsCatalogRelation() will return true for TOAST tables of system
1144  * catalogs, but we don't care about those, either.
1145  */
1146  if (IsToastRelation(relation))
1147  return;
1148 
1149  /*
1150  * If we're not prepared to queue invalidation messages for this
1151  * subtransaction level, get ready now.
1152  */
1154 
1155  /*
1156  * First let the catcache do its thing
1157  */
1158  tupleRelId = RelationGetRelid(relation);
1159  if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1160  {
1161  databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1162  RegisterSnapshotInvalidation(databaseId, tupleRelId);
1163  }
1164  else
1165  PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1167 
1168  /*
1169  * Now, is this tuple one of the primary definers of a relcache entry? See
1170  * comments in file header for deeper explanation.
1171  *
1172  * Note we ignore newtuple here; we assume an update cannot move a tuple
1173  * from being part of one relcache entry to being part of another.
1174  */
1175  if (tupleRelId == RelationRelationId)
1176  {
1177  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1178 
1179  relationId = classtup->oid;
1180  if (classtup->relisshared)
1181  databaseId = InvalidOid;
1182  else
1183  databaseId = MyDatabaseId;
1184  }
1185  else if (tupleRelId == AttributeRelationId)
1186  {
1187  Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1188 
1189  relationId = atttup->attrelid;
1190 
1191  /*
1192  * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1193  * even if the rel in question is shared (which we can't easily tell).
1194  * This essentially means that only backends in this same database
1195  * will react to the relcache flush request. This is in fact
1196  * appropriate, since only those backends could see our pg_attribute
1197  * change anyway. It looks a bit ugly though. (In practice, shared
1198  * relations can't have schema changes after bootstrap, so we should
1199  * never come here for a shared rel anyway.)
1200  */
1201  databaseId = MyDatabaseId;
1202  }
1203  else if (tupleRelId == IndexRelationId)
1204  {
1205  Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1206 
1207  /*
1208  * When a pg_index row is updated, we should send out a relcache inval
1209  * for the index relation. As above, we don't know the shared status
1210  * of the index, but in practice it doesn't matter since indexes of
1211  * shared catalogs can't have such updates.
1212  */
1213  relationId = indextup->indexrelid;
1214  databaseId = MyDatabaseId;
1215  }
1216  else if (tupleRelId == ConstraintRelationId)
1217  {
1218  Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
1219 
1220  /*
1221  * Foreign keys are part of relcache entries, too, so send out an
1222  * inval for the table that the FK applies to.
1223  */
1224  if (constrtup->contype == CONSTRAINT_FOREIGN &&
1225  OidIsValid(constrtup->conrelid))
1226  {
1227  relationId = constrtup->conrelid;
1228  databaseId = MyDatabaseId;
1229  }
1230  else
1231  return;
1232  }
1233  else
1234  return;
1235 
1236  /*
1237  * Yes. We need to register a relcache invalidation event.
1238  */
1239  RegisterRelcacheInvalidation(databaseId, relationId);
1240 }
1241 
1242 /*
1243  * CacheInvalidateCatalog
1244  * Register invalidation of the whole content of a system catalog.
1245  *
1246  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1247  * changed any tuples as moved them around. Some uses of catcache entries
1248  * expect their TIDs to be correct, so we have to blow away the entries.
1249  *
1250  * Note: we expect caller to verify that the rel actually is a system
1251  * catalog. If it isn't, no great harm is done, just a wasted sinval message.
1252  */
1253 void
1255 {
1256  Oid databaseId;
1257 
1259 
1260  if (IsSharedRelation(catalogId))
1261  databaseId = InvalidOid;
1262  else
1263  databaseId = MyDatabaseId;
1264 
1265  RegisterCatalogInvalidation(databaseId, catalogId);
1266 }
1267 
1268 /*
1269  * CacheInvalidateRelcache
1270  * Register invalidation of the specified relation's relcache entry
1271  * at end of command.
1272  *
1273  * This is used in places that need to force relcache rebuild but aren't
1274  * changing any of the tuples recognized as contributors to the relcache
1275  * entry by CacheInvalidateHeapTuple. (An example is dropping an index.)
1276  */
1277 void
1279 {
1280  Oid databaseId;
1281  Oid relationId;
1282 
1284 
1285  relationId = RelationGetRelid(relation);
1286  if (relation->rd_rel->relisshared)
1287  databaseId = InvalidOid;
1288  else
1289  databaseId = MyDatabaseId;
1290 
1291  RegisterRelcacheInvalidation(databaseId, relationId);
1292 }
1293 
1294 /*
1295  * CacheInvalidateRelcacheAll
1296  * Register invalidation of the whole relcache at the end of command.
1297  *
1298  * This is used by alter publication as changes in publications may affect
1299  * large number of tables.
1300  */
1301 void
1303 {
1305 
1307 }
1308 
1309 /*
1310  * CacheInvalidateRelcacheByTuple
1311  * As above, but relation is identified by passing its pg_class tuple.
1312  */
1313 void
1315 {
1316  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1317  Oid databaseId;
1318  Oid relationId;
1319 
1321 
1322  relationId = classtup->oid;
1323  if (classtup->relisshared)
1324  databaseId = InvalidOid;
1325  else
1326  databaseId = MyDatabaseId;
1327  RegisterRelcacheInvalidation(databaseId, relationId);
1328 }
1329 
1330 /*
1331  * CacheInvalidateRelcacheByRelid
1332  * As above, but relation is identified by passing its OID.
1333  * This is the least efficient of the three options; use one of
1334  * the above routines if you have a Relation or pg_class tuple.
1335  */
1336 void
1338 {
1339  HeapTuple tup;
1340 
1342 
1343  tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1344  if (!HeapTupleIsValid(tup))
1345  elog(ERROR, "cache lookup failed for relation %u", relid);
1347  ReleaseSysCache(tup);
1348 }
1349 
1350 
1351 /*
1352  * CacheInvalidateSmgr
1353  * Register invalidation of smgr references to a physical relation.
1354  *
1355  * Sending this type of invalidation msg forces other backends to close open
1356  * smgr entries for the rel. This should be done to flush dangling open-file
1357  * references when the physical rel is being dropped or truncated. Because
1358  * these are nontransactional (i.e., not-rollback-able) operations, we just
1359  * send the inval message immediately without any queuing.
1360  *
1361  * Note: in most cases there will have been a relcache flush issued against
1362  * the rel at the logical level. We need a separate smgr-level flush because
1363  * it is possible for backends to have open smgr entries for rels they don't
1364  * have a relcache entry for, e.g. because the only thing they ever did with
1365  * the rel is write out dirty shared buffers.
1366  *
1367  * Note: because these messages are nontransactional, they won't be captured
1368  * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
1369  * should happen in low-level smgr.c routines, which are executed while
1370  * replaying WAL as well as when creating it.
1371  *
1372  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1373  * three bytes of the backend ID using what would otherwise be padding space.
1374  * Thus, the maximum possible backend ID is 2^23-1.
1375  */
1376 void
1378 {
1380 
1381  msg.sm.id = SHAREDINVALSMGR_ID;
1382  msg.sm.backend_hi = rnode.backend >> 16;
1383  msg.sm.backend_lo = rnode.backend & 0xffff;
1384  msg.sm.rnode = rnode.node;
1385  /* check AddCatcacheInvalidationMessage() for an explanation */
1386  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1387 
1388  SendSharedInvalidMessages(&msg, 1);
1389 }
1390 
1391 /*
1392  * CacheInvalidateRelmap
1393  * Register invalidation of the relation mapping for a database,
1394  * or for the shared catalogs if databaseId is zero.
1395  *
1396  * Sending this type of invalidation msg forces other backends to re-read
1397  * the indicated relation mapping file. It is also necessary to send a
1398  * relcache inval for the specific relations whose mapping has been altered,
1399  * else the relcache won't get updated with the new filenode data.
1400  *
1401  * Note: because these messages are nontransactional, they won't be captured
1402  * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap()
1403  * should happen in low-level relmapper.c routines, which are executed while
1404  * replaying WAL as well as when creating it.
1405  */
1406 void
1408 {
1410 
1411  msg.rm.id = SHAREDINVALRELMAP_ID;
1412  msg.rm.dbId = databaseId;
1413  /* check AddCatcacheInvalidationMessage() for an explanation */
1414  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1415 
1416  SendSharedInvalidMessages(&msg, 1);
1417 }
1418 
1419 
1420 /*
1421  * CacheRegisterSyscacheCallback
1422  * Register the specified function to be called for all future
1423  * invalidation events in the specified cache. The cache ID and the
1424  * hash value of the tuple being invalidated will be passed to the
1425  * function.
1426  *
1427  * NOTE: Hash value zero will be passed if a cache reset request is received.
1428  * In this case the called routines should flush all cached state.
1429  * Yes, there's a possibility of a false match to zero, but it doesn't seem
1430  * worth troubling over, especially since most of the current callees just
1431  * flush all cached state anyway.
1432  */
1433 void
1436  Datum arg)
1437 {
1438  if (cacheid < 0 || cacheid >= SysCacheSize)
1439  elog(FATAL, "invalid cache ID: %d", cacheid);
1441  elog(FATAL, "out of syscache_callback_list slots");
1442 
1443  if (syscache_callback_links[cacheid] == 0)
1444  {
1445  /* first callback for this cache */
1447  }
1448  else
1449  {
1450  /* add to end of chain, so that older callbacks are called first */
1451  int i = syscache_callback_links[cacheid] - 1;
1452 
1453  while (syscache_callback_list[i].link > 0)
1454  i = syscache_callback_list[i].link - 1;
1456  }
1457 
1462 
1464 }
1465 
1466 /*
1467  * CacheRegisterRelcacheCallback
1468  * Register the specified function to be called for all future
1469  * relcache invalidation events. The OID of the relation being
1470  * invalidated will be passed to the function.
1471  *
1472  * NOTE: InvalidOid will be passed if a cache reset request is received.
1473  * In this case the called routines should flush all cached state.
1474  */
1475 void
1477  Datum arg)
1478 {
1480  elog(FATAL, "out of relcache_callback_list slots");
1481 
1484 
1486 }
1487 
1488 /*
1489  * CallSyscacheCallbacks
1490  *
1491  * This is exported so that CatalogCacheFlushCatalog can call it, saving
1492  * this module from knowing which catcache IDs correspond to which catalogs.
1493  */
1494 void
1495 CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1496 {
1497  int i;
1498 
1499  if (cacheid < 0 || cacheid >= SysCacheSize)
1500  elog(ERROR, "invalid cache ID: %d", cacheid);
1501 
1502  i = syscache_callback_links[cacheid] - 1;
1503  while (i >= 0)
1504  {
1505  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1506 
1507  Assert(ccitem->id == cacheid);
1508  ccitem->function(ccitem->arg, cacheid, hashvalue);
1509  i = ccitem->link - 1;
1510  }
1511 }
1512 
1513 /*
1514  * LogLogicalInvalidations
1515  *
1516  * Emit WAL for invalidations. This is currently only used for logging
1517  * invalidations at the command end or at commit time if any invalidations
1518  * are pending.
1519  */
1520 void
1522 {
1523  xl_xact_invals xlrec;
1524  SharedInvalidationMessage *invalMessages;
1525  int nmsgs = 0;
1526 
1527  /* Quick exit if we haven't done anything with invalidation messages. */
1528  if (transInvalInfo == NULL)
1529  return;
1530 
1533 
1535  SharedInvalidMessagesArray == NULL));
1536 
1537  invalMessages = SharedInvalidMessagesArray;
1539  SharedInvalidMessagesArray = NULL;
1541 
1542  if (nmsgs > 0)
1543  {
1544  /* prepare record */
1545  memset(&xlrec, 0, MinSizeOfXactInvals);
1546  xlrec.nmsgs = nmsgs;
1547 
1548  /* perform insertion */
1549  XLogBeginInsert();
1550  XLogRegisterData((char *) (&xlrec), MinSizeOfXactInvals);
1551  XLogRegisterData((char *) invalMessages,
1552  nmsgs * sizeof(SharedInvalidationMessage));
1553  XLogInsert(RM_XACT_ID, XLOG_XACT_INVALIDATIONS);
1554 
1555  pfree(invalMessages);
1556  }
1557 }
signed short int16
Definition: c.h:416
void CacheInvalidateSmgr(RelFileNodeBackend rnode)
Definition: inval.c:1377
#define ProcessMessageListMulti(listHdr, codeFragment)
Definition: inval.c:318
#define FIRSTCHUNKSIZE
static void AppendInvalidationMessages(InvalidationListHeader *dest, InvalidationListHeader *src)
Definition: inval.c:445
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: inval.c:132
static SharedInvalidationMessage * SharedInvalidMessagesArray
Definition: inval.c:178
bool IsToastRelation(Relation relation)
Definition: catalog.c:138
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:96
SharedInvalSnapshotMsg sn
Definition: sinval.h:121
void(* RelcacheCallbackFunction)(Datum arg, Oid relid)
Definition: inval.h:23
static void AddInvalidationMessage(InvalidationChunk **listHdr, SharedInvalidationMessage *msg)
Definition: inval.c:232
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1122
#define GETSTRUCT(TUP)
Definition: htup_details.h:655
void AtEOXact_Inval(bool isCommit)
Definition: inval.c:952
MemoryContext TopTransactionContext
Definition: mcxt.c:49
void AcceptInvalidationMessages(void)
Definition: inval.c:684
InvalidationListHeader PriorCmdInvalidMsgs
Definition: inval.c:170
struct InvalidationChunk InvalidationChunk
SharedInvalRelcacheMsg rc
Definition: sinval.h:118
void CommandEndInvalidationMessages(void)
Definition: inval.c:1088
static struct RELCACHECALLBACK relcache_callback_list[MAX_RELCACHE_CALLBACKS]
#define MAX_RELCACHE_CALLBACKS
Definition: inval.c:194
static int relcache_callback_count
Definition: inval.c:214
#define XLOG_XACT_INVALIDATIONS
Definition: xact.h:154
RelcacheCallbackFunction function
Definition: inval.c:210
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
bool RelationIdIsInInitFile(Oid relationId)
Definition: relcache.c:6260
MemoryContext CurTransactionContext
Definition: mcxt.c:50
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:338
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:2007
struct TransInvalidationInfo * parent
Definition: inval.c:161
static int recursion_depth
Definition: elog.c:148
void CacheInvalidateRelmap(Oid databaseId)
Definition: inval.c:1407
static TransInvalidationInfo * transInvalInfo
Definition: inval.c:176
Form_pg_class rd_rel
Definition: rel.h:110
unsigned int Oid
Definition: postgres_ext.h:31
void RelationMapInvalidate(bool shared)
Definition: relmapper.c:403
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:706
void InvalidateSystemCaches(void)
Definition: inval.c:646
int trace_recovery(int trace_level)
Definition: elog.c:3586
#define ProcessMessageList(listHdr, codeFragment)
Definition: inval.c:298
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1476
int16 link
Definition: inval.c:199
InvalidationListHeader CurrentCmdInvalidMsgs
Definition: inval.c:167
void pfree(void *pointer)
Definition: mcxt.c:1057
#define SysCacheSize
Definition: syscache.h:113
#define ObjectIdGetDatum(X)
Definition: postgres.h:507
#define ERROR
Definition: elog.h:43
struct InvalidationChunk * next
Definition: inval.c:129
#define XLogLogicalInfoActive()
Definition: xlog.h:208
#define FATAL
Definition: elog.h:52
static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
Definition: inval.c:781
void ReceiveSharedInvalidMessages(void(*invalFunction)(SharedInvalidationMessage *msg), void(*resetFunction)(void))
Definition: sinval.c:71
#define SHAREDINVALRELCACHE_ID
Definition: sinval.h:76
struct InvalidationListHeader InvalidationListHeader
SharedInvalRelmapMsg rm
Definition: sinval.h:120
#define SHAREDINVALRELMAP_ID
Definition: sinval.h:96
void(* SyscacheCallbackFunction)(Datum arg, int cacheid, uint32 hashvalue)
Definition: inval.h:22
#define MAX_SYSCACHE_CALLBACKS
Definition: inval.c:193
struct TransInvalidationInfo TransInvalidationInfo
void CacheInvalidateRelcacheByRelid(Oid relid)
Definition: inval.c:1337
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:193
void PostPrepare_Inval(void)
Definition: inval.c:772
unsigned int uint32
Definition: c.h:429
InvalidationChunk * cclist
Definition: inval.c:137
static int numSharedInvalidMessagesArray
Definition: inval.c:179
SharedInvalCatcacheMsg cc
Definition: sinval.h:116
#define MinSizeOfXactInvals
Definition: xact.h:257
void smgrclosenode(RelFileNodeBackend rnode)
Definition: smgr.c:311
SharedInvalCatalogMsg cat
Definition: sinval.h:117
void InvalidateCatalogSnapshot(void)
Definition: snapmgr.c:456
char * GetDatabasePath(Oid dbNode, Oid spcNode)
Definition: relpath.c:110
static void ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, void(*func)(const SharedInvalidationMessage *msgs, int n))
Definition: inval.c:471
static void PrepareInvalidationState(void)
Definition: inval.c:735
static struct SYSCACHECALLBACK syscache_callback_list[MAX_SYSCACHE_CALLBACKS]
void LogLogicalInvalidations()
Definition: inval.c:1521
signed char int8
Definition: c.h:415
FormData_pg_index * Form_pg_index
Definition: pg_index.h:68
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:1115
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
bool RelcacheInitFileInval
Definition: inval.c:173
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
static void ProcessInvalidationMessages(InvalidationListHeader *hdr, void(*func)(SharedInvalidationMessage *msg))
Definition: inval.c:459
void CacheInvalidateRelcacheAll(void)
Definition: inval.c:1302
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1434
uintptr_t Datum
Definition: postgres.h:367
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1163
static void AddSnapshotInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:418
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1495
Oid MyDatabaseId
Definition: globals.c:85
static void RegisterCatalogInvalidation(Oid dbId, Oid catId)
Definition: inval.c:503
void RelationCacheInitFilePostInvalidate(void)
Definition: relcache.c:6325
bool IsSharedRelation(Oid relationId)
Definition: catalog.c:236
static void AddCatalogInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid catId)
Definition: inval.c:370
static int syscache_callback_count
Definition: inval.c:206
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:840
#define InvalidOid
Definition: postgres_ext.h:36
static void RegisterRelcacheInvalidation(Oid dbId, Oid relId)
Definition: inval.c:515
#define SHAREDINVALSNAPSHOT_ID
Definition: sinval.h:104
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:887
RelFileNode node
Definition: relfilenode.h:74
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:857
char * DatabasePath
Definition: globals.c:93
FormData_pg_constraint * Form_pg_constraint
void CacheInvalidateCatalog(Oid catalogId)
Definition: inval.c:1254
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
void RelationCacheInitFilePreInvalidate(void)
Definition: relcache.c:6300
static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId)
Definition: inval.c:489
#define Assert(condition)
Definition: c.h:800
bool RelationInvalidatesSnapshotsOnly(Oid relid)
Definition: syscache.c:1472
SharedInvalSmgrMsg sm
Definition: sinval.h:119
void SysCacheInvalidate(int cacheId, uint32 hashValue)
Definition: syscache.c:1448
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:719
BackendId backend
Definition: relfilenode.h:75
static void AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:388
void RelationCacheInvalidateEntry(Oid relationId)
Definition: relcache.c:2789
void SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
Definition: sinval.c:49
void ResetCatalogCaches(void)
Definition: catcache.c:689
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1070
static int16 syscache_callback_links[SysCacheSize]
Definition: inval.c:204
uint16 backend_lo
Definition: sinval.h:92
void CacheInvalidateRelcache(Relation relation)
Definition: inval.c:1278
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:393
FormData_pg_class * Form_pg_class
Definition: pg_class.h:153
void LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
Definition: inval.c:559
#define SHAREDINVALCATALOG_ID
Definition: sinval.h:67
static int maxSharedInvalidMessagesArray
Definition: inval.c:180
void * palloc(Size size)
Definition: mcxt.c:950
void AtEOSubXact_Inval(bool isCommit)
Definition: inval.c:1011
int xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition: inval.c:831
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:797
#define elog(elevel,...)
Definition: elog.h:228
void RelationCacheInvalidate(void)
Definition: relcache.c:2832
int i
RelFileNode rnode
Definition: sinval.h:93
void * arg
#define SHAREDINVALSMGR_ID
Definition: sinval.h:85
void CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Definition: inval.c:1314
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:761
InvalidationChunk * rclist
Definition: inval.c:138
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define RelationGetRelid(relation)
Definition: rel.h:457
int nmsgs
Definition: xact.h:254
#define offsetof(type, field)
Definition: c.h:723
static void AddCatcacheInvalidationMessage(InvalidationListHeader *hdr, int id, uint32 hashValue, Oid dbId)
Definition: inval.c:342
static void RegisterSnapshotInvalidation(Oid dbId, Oid relId)
Definition: inval.c:545
SyscacheCallbackFunction function
Definition: inval.c:200
static void AppendInvalidationMessageList(InvalidationChunk **destHdr, InvalidationChunk **srcHdr)
Definition: inval.c:274