PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
inval.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * inval.c
4  * POSTGRES cache invalidation dispatcher code.
5  *
6  * This is subtle stuff, so pay attention:
7  *
8  * When a tuple is updated or deleted, our standard time qualification rules
9  * consider that it is *still valid* so long as we are in the same command,
10  * ie, until the next CommandCounterIncrement() or transaction commit.
11  * (See utils/time/tqual.c, and note that system catalogs are generally
12  * scanned under the most current snapshot available, rather than the
13  * transaction snapshot.) At the command boundary, the old tuple stops
14  * being valid and the new version, if any, becomes valid. Therefore,
15  * we cannot simply flush a tuple from the system caches during heap_update()
16  * or heap_delete(). The tuple is still good at that point; what's more,
17  * even if we did flush it, it might be reloaded into the caches by a later
18  * request in the same command. So the correct behavior is to keep a list
19  * of outdated (updated/deleted) tuples and then do the required cache
20  * flushes at the next command boundary. We must also keep track of
21  * inserted tuples so that we can flush "negative" cache entries that match
22  * the new tuples; again, that mustn't happen until end of command.
23  *
24  * Once we have finished the command, we still need to remember inserted
25  * tuples (including new versions of updated tuples), so that we can flush
26  * them from the caches if we abort the transaction. Similarly, we'd better
27  * be able to flush "negative" cache entries that may have been loaded in
28  * place of deleted tuples, so we still need the deleted ones too.
29  *
30  * If we successfully complete the transaction, we have to broadcast all
31  * these invalidation events to other backends (via the SI message queue)
32  * so that they can flush obsolete entries from their caches. Note we have
33  * to record the transaction commit before sending SI messages, otherwise
34  * the other backends won't see our updated tuples as good.
35  *
36  * When a subtransaction aborts, we can process and discard any events
37  * it has queued. When a subtransaction commits, we just add its events
38  * to the pending lists of the parent transaction.
39  *
40  * In short, we need to remember until xact end every insert or delete
41  * of a tuple that might be in the system caches. Updates are treated as
42  * two events, delete + insert, for simplicity. (If the update doesn't
43  * change the tuple hash value, catcache.c optimizes this into one event.)
44  *
45  * We do not need to register EVERY tuple operation in this way, just those
46  * on tuples in relations that have associated catcaches. We do, however,
47  * have to register every operation on every tuple that *could* be in a
48  * catcache, whether or not it currently is in our cache. Also, if the
49  * tuple is in a relation that has multiple catcaches, we need to register
50  * an invalidation message for each such catcache. catcache.c's
51  * PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52  * catcaches may need invalidation for a given tuple.
53  *
54  * Also, whenever we see an operation on a pg_class, pg_attribute, or
55  * pg_index tuple, we register a relcache flush operation for the relation
56  * described by that tuple (as specified in CacheInvalidateHeapTuple()).
57  *
58  * We keep the relcache flush requests in lists separate from the catcache
59  * tuple flush requests. This allows us to issue all the pending catcache
60  * flushes before we issue relcache flushes, which saves us from loading
61  * a catcache tuple during relcache load only to flush it again right away.
62  * Also, we avoid queuing multiple relcache flush requests for the same
63  * relation, since a relcache flush is relatively expensive to do.
64  * (XXX is it worth testing likewise for duplicate catcache flush entries?
65  * Probably not.)
66  *
67  * If a relcache flush is issued for a system relation that we preload
68  * from the relcache init file, we must also delete the init file so that
69  * it will be rebuilt during the next backend restart. The actual work of
70  * manipulating the init file is in relcache.c, but we keep track of the
71  * need for it here.
72  *
73  * The request lists proper are kept in CurTransactionContext of their
74  * creating (sub)transaction, since they can be forgotten on abort of that
75  * transaction but must be kept till top-level commit otherwise. For
76  * simplicity we keep the controlling list-of-lists in TopTransactionContext.
77  *
78  * Currently, inval messages are sent without regard for the possibility
79  * that the object described by the catalog tuple might be a session-local
80  * object such as a temporary table. This is because (1) this code has
81  * no practical way to tell the difference, and (2) it is not certain that
82  * other backends don't have catalog cache or even relcache entries for
83  * such tables, anyway; there is nothing that prevents that. It might be
84  * worth trying to avoid sending such inval traffic in the future, if those
85  * problems can be overcome cheaply.
86  *
87  *
88  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
89  * Portions Copyright (c) 1994, Regents of the University of California
90  *
91  * IDENTIFICATION
92  * src/backend/utils/cache/inval.c
93  *
94  *-------------------------------------------------------------------------
95  */
96 #include "postgres.h"
97 
98 #include <limits.h>
99 
100 #include "access/htup_details.h"
101 #include "access/xact.h"
102 #include "catalog/catalog.h"
103 #include "miscadmin.h"
104 #include "storage/sinval.h"
105 #include "storage/smgr.h"
106 #include "utils/catcache.h"
107 #include "utils/inval.h"
108 #include "utils/memdebug.h"
109 #include "utils/memutils.h"
110 #include "utils/rel.h"
111 #include "utils/relmapper.h"
112 #include "utils/snapmgr.h"
113 #include "utils/syscache.h"
114 
115 
116 /*
117  * To minimize palloc traffic, we keep pending requests in successively-
118  * larger chunks (a slightly more sophisticated version of an expansible
119  * array). All request types can be stored as SharedInvalidationMessage
120  * records. The ordering of requests within a list is never significant.
121  */
122 typedef struct InvalidationChunk
123 {
124  struct InvalidationChunk *next; /* list link */
125  int nitems; /* # items currently stored in chunk */
126  int maxitems; /* size of allocated array in this chunk */
127  SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER];
129 
131 {
132  InvalidationChunk *cclist; /* list of chunks holding catcache msgs */
133  InvalidationChunk *rclist; /* list of chunks holding relcache msgs */
135 
136 /*----------------
137  * Invalidation info is divided into two lists:
138  * 1) events so far in current command, not yet reflected to caches.
139  * 2) events in previous commands of current transaction; these have
140  * been reflected to local caches, and must be either broadcast to
141  * other backends or rolled back from local cache when we commit
142  * or abort the transaction.
143  * Actually, we need two such lists for each level of nested transaction,
144  * so that we can discard events from an aborted subtransaction. When
145  * a subtransaction commits, we append its lists to the parent's lists.
146  *
147  * The relcache-file-invalidated flag can just be a simple boolean,
148  * since we only act on it at transaction commit; we don't care which
149  * command of the transaction set it.
150  *----------------
151  */
152 
153 typedef struct TransInvalidationInfo
154 {
155  /* Back link to parent transaction's info */
157 
158  /* Subtransaction nesting depth */
159  int my_level;
160 
161  /* head of current-command event list */
163 
164  /* head of previous-commands event list */
166 
167  /* init file must be invalidated? */
170 
172 
176 
177 
178 /*
179  * Dynamically-registered callback functions. Current implementation
180  * assumes there won't be very many of these at once; could improve if needed.
181  */
182 
183 #define MAX_SYSCACHE_CALLBACKS 32
184 #define MAX_RELCACHE_CALLBACKS 10
185 
186 static struct SYSCACHECALLBACK
187 {
188  int16 id; /* cache number */
192 
193 static int syscache_callback_count = 0;
194 
195 static struct RELCACHECALLBACK
196 {
200 
201 static int relcache_callback_count = 0;
202 
203 /* ----------------------------------------------------------------
204  * Invalidation list support functions
205  *
206  * These three routines encapsulate processing of the "chunked"
207  * representation of what is logically just a list of messages.
208  * ----------------------------------------------------------------
209  */
210 
211 /*
212  * AddInvalidationMessage
213  * Add an invalidation message to a list (of chunks).
214  *
215  * Note that we do not pay any great attention to maintaining the original
216  * ordering of the messages.
217  */
218 static void
221 {
222  InvalidationChunk *chunk = *listHdr;
223 
224  if (chunk == NULL)
225  {
226  /* First time through; create initial chunk */
227 #define FIRSTCHUNKSIZE 32
228  chunk = (InvalidationChunk *)
230  offsetof(InvalidationChunk, msgs) +
232  chunk->nitems = 0;
233  chunk->maxitems = FIRSTCHUNKSIZE;
234  chunk->next = *listHdr;
235  *listHdr = chunk;
236  }
237  else if (chunk->nitems >= chunk->maxitems)
238  {
239  /* Need another chunk; double size of last chunk */
240  int chunksize = 2 * chunk->maxitems;
241 
242  chunk = (InvalidationChunk *)
244  offsetof(InvalidationChunk, msgs) +
245  chunksize * sizeof(SharedInvalidationMessage));
246  chunk->nitems = 0;
247  chunk->maxitems = chunksize;
248  chunk->next = *listHdr;
249  *listHdr = chunk;
250  }
251  /* Okay, add message to current chunk */
252  chunk->msgs[chunk->nitems] = *msg;
253  chunk->nitems++;
254 }
255 
256 /*
257  * Append one list of invalidation message chunks to another, resetting
258  * the source chunk-list pointer to NULL.
259  */
260 static void
262  InvalidationChunk **srcHdr)
263 {
264  InvalidationChunk *chunk = *srcHdr;
265 
266  if (chunk == NULL)
267  return; /* nothing to do */
268 
269  while (chunk->next != NULL)
270  chunk = chunk->next;
271 
272  chunk->next = *destHdr;
273 
274  *destHdr = *srcHdr;
275 
276  *srcHdr = NULL;
277 }
278 
279 /*
280  * Process a list of invalidation messages.
281  *
282  * This is a macro that executes the given code fragment for each message in
283  * a message chunk list. The fragment should refer to the message as *msg.
284  */
285 #define ProcessMessageList(listHdr, codeFragment) \
286  do { \
287  InvalidationChunk *_chunk; \
288  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
289  { \
290  int _cindex; \
291  for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
292  { \
293  SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
294  codeFragment; \
295  } \
296  } \
297  } while (0)
298 
299 /*
300  * Process a list of invalidation messages group-wise.
301  *
302  * As above, but the code fragment can handle an array of messages.
303  * The fragment should refer to the messages as msgs[], with n entries.
304  */
305 #define ProcessMessageListMulti(listHdr, codeFragment) \
306  do { \
307  InvalidationChunk *_chunk; \
308  for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
309  { \
310  SharedInvalidationMessage *msgs = _chunk->msgs; \
311  int n = _chunk->nitems; \
312  codeFragment; \
313  } \
314  } while (0)
315 
316 
317 /* ----------------------------------------------------------------
318  * Invalidation set support functions
319  *
320  * These routines understand about the division of a logical invalidation
321  * list into separate physical lists for catcache and relcache entries.
322  * ----------------------------------------------------------------
323  */
324 
325 /*
326  * Add a catcache inval entry
327  */
328 static void
330  int id, uint32 hashValue, Oid dbId)
331 {
333 
334  Assert(id < CHAR_MAX);
335  msg.cc.id = (int8) id;
336  msg.cc.dbId = dbId;
337  msg.cc.hashValue = hashValue;
338 
339  /*
340  * Define padding bytes in SharedInvalidationMessage structs to be
341  * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
342  * multiple processes, will cause spurious valgrind warnings about
343  * undefined memory being used. That's because valgrind remembers the
344  * undefined bytes from the last local process's store, not realizing that
345  * another process has written since, filling the previously uninitialized
346  * bytes
347  */
348  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
349 
350  AddInvalidationMessage(&hdr->cclist, &msg);
351 }
352 
353 /*
354  * Add a whole-catalog inval entry
355  */
356 static void
358  Oid dbId, Oid catId)
359 {
361 
363  msg.cat.dbId = dbId;
364  msg.cat.catId = catId;
365  /* check AddCatcacheInvalidationMessage() for an explanation */
366  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
367 
368  AddInvalidationMessage(&hdr->cclist, &msg);
369 }
370 
371 /*
372  * Add a relcache inval entry
373  */
374 static void
376  Oid dbId, Oid relId)
377 {
379 
380  /*
381  * Don't add a duplicate item.
382  * We assume dbId need not be checked because it will never change.
383  * InvalidOid for relId means all relations so we don't need to add
384  * individual ones when it is present.
385  */
387  if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
388  (msg->rc.relId == relId ||
389  msg->rc.relId == InvalidOid))
390  return);
391 
392  /* OK, add the item */
394  msg.rc.dbId = dbId;
395  msg.rc.relId = relId;
396  /* check AddCatcacheInvalidationMessage() for an explanation */
397  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
398 
399  AddInvalidationMessage(&hdr->rclist, &msg);
400 }
401 
402 /*
403  * Add a snapshot inval entry
404  */
405 static void
407  Oid dbId, Oid relId)
408 {
410 
411  /* Don't add a duplicate item */
412  /* We assume dbId need not be checked because it will never change */
414  if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
415  msg->sn.relId == relId)
416  return);
417 
418  /* OK, add the item */
420  msg.sn.dbId = dbId;
421  msg.sn.relId = relId;
422  /* check AddCatcacheInvalidationMessage() for an explanation */
423  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
424 
425  AddInvalidationMessage(&hdr->rclist, &msg);
426 }
427 
428 /*
429  * Append one list of invalidation messages to another, resetting
430  * the source list to empty.
431  */
432 static void
435 {
438 }
439 
440 /*
441  * Execute the given function for all the messages in an invalidation list.
442  * The list is not altered.
443  *
444  * catcache entries are processed first, for reasons mentioned above.
445  */
446 static void
448  void (*func) (SharedInvalidationMessage *msg))
449 {
450  ProcessMessageList(hdr->cclist, func(msg));
451  ProcessMessageList(hdr->rclist, func(msg));
452 }
453 
454 /*
455  * As above, but the function is able to process an array of messages
456  * rather than just one at a time.
457  */
458 static void
460  void (*func) (const SharedInvalidationMessage *msgs, int n))
461 {
462  ProcessMessageListMulti(hdr->cclist, func(msgs, n));
463  ProcessMessageListMulti(hdr->rclist, func(msgs, n));
464 }
465 
466 /* ----------------------------------------------------------------
467  * private support functions
468  * ----------------------------------------------------------------
469  */
470 
471 /*
472  * RegisterCatcacheInvalidation
473  *
474  * Register an invalidation event for a catcache tuple entry.
475  */
476 static void
478  uint32 hashValue,
479  Oid dbId)
480 {
482  cacheId, hashValue, dbId);
483 }
484 
485 /*
486  * RegisterCatalogInvalidation
487  *
488  * Register an invalidation event for all catcache entries from a catalog.
489  */
490 static void
492 {
494  dbId, catId);
495 }
496 
497 /*
498  * RegisterRelcacheInvalidation
499  *
500  * As above, but register a relcache invalidation event.
501  */
502 static void
504 {
506  dbId, relId);
507 
508  /*
509  * Most of the time, relcache invalidation is associated with system
510  * catalog updates, but there are a few cases where it isn't. Quick hack
511  * to ensure that the next CommandCounterIncrement() will think that we
512  * need to do CommandEndInvalidationMessages().
513  */
514  (void) GetCurrentCommandId(true);
515 
516  /*
517  * If the relation being invalidated is one of those cached in the local
518  * relcache init file, mark that we need to zap that file at commit.
519  * Same is true when we are invalidating whole relcache.
520  */
521  if (OidIsValid(dbId) &&
522  (RelationIdIsInInitFile(relId) || relId == InvalidOid))
523  transInvalInfo->RelcacheInitFileInval = true;
524 }
525 
526 /*
527  * RegisterSnapshotInvalidation
528  *
529  * Register an invalidation event for MVCC scans against a given catalog.
530  * Only needed for catalogs that don't have catcaches.
531  */
532 static void
534 {
536  dbId, relId);
537 }
538 
539 /*
540  * LocalExecuteInvalidationMessage
541  *
542  * Process a single invalidation message (which could be of any type).
543  * Only the local caches are flushed; this does not transmit the message
544  * to other backends.
545  */
546 void
548 {
549  if (msg->id >= 0)
550  {
551  if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
552  {
554 
556 
557  CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
558  }
559  }
560  else if (msg->id == SHAREDINVALCATALOG_ID)
561  {
562  if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
563  {
565 
567 
568  /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
569  }
570  }
571  else if (msg->id == SHAREDINVALRELCACHE_ID)
572  {
573  if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
574  {
575  int i;
576 
577  if (msg->rc.relId == InvalidOid)
579  else
581 
582  for (i = 0; i < relcache_callback_count; i++)
583  {
584  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
585 
586  (*ccitem->function) (ccitem->arg, msg->rc.relId);
587  }
588  }
589  }
590  else if (msg->id == SHAREDINVALSMGR_ID)
591  {
592  /*
593  * We could have smgr entries for relations of other databases, so no
594  * short-circuit test is possible here.
595  */
596  RelFileNodeBackend rnode;
597 
598  rnode.node = msg->sm.rnode;
599  rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
600  smgrclosenode(rnode);
601  }
602  else if (msg->id == SHAREDINVALRELMAP_ID)
603  {
604  /* We only care about our own database and shared catalogs */
605  if (msg->rm.dbId == InvalidOid)
606  RelationMapInvalidate(true);
607  else if (msg->rm.dbId == MyDatabaseId)
608  RelationMapInvalidate(false);
609  }
610  else if (msg->id == SHAREDINVALSNAPSHOT_ID)
611  {
612  /* We only care about our own database and shared catalogs */
613  if (msg->rm.dbId == InvalidOid)
615  else if (msg->rm.dbId == MyDatabaseId)
617  }
618  else
619  elog(FATAL, "unrecognized SI message ID: %d", msg->id);
620 }
621 
622 /*
623  * InvalidateSystemCaches
624  *
625  * This blows away all tuples in the system catalog caches and
626  * all the cached relation descriptors and smgr cache entries.
627  * Relation descriptors that have positive refcounts are then rebuilt.
628  *
629  * We call this when we see a shared-inval-queue overflow signal,
630  * since that tells us we've lost some shared-inval messages and hence
631  * don't know what needs to be invalidated.
632  */
633 void
635 {
636  int i;
637 
640  RelationCacheInvalidate(); /* gets smgr and relmap too */
641 
642  for (i = 0; i < syscache_callback_count; i++)
643  {
644  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
645 
646  (*ccitem->function) (ccitem->arg, ccitem->id, 0);
647  }
648 
649  for (i = 0; i < relcache_callback_count; i++)
650  {
651  struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
652 
653  (*ccitem->function) (ccitem->arg, InvalidOid);
654  }
655 }
656 
657 
658 /* ----------------------------------------------------------------
659  * public functions
660  * ----------------------------------------------------------------
661  */
662 
663 /*
664  * AcceptInvalidationMessages
665  * Read and process invalidation messages from the shared invalidation
666  * message queue.
667  *
668  * Note:
669  * This should be called as the first step in processing a transaction.
670  */
671 void
673 {
676 
677  /*
678  * Test code to force cache flushes anytime a flush could happen.
679  *
680  * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a
681  * fairly thorough test that the system contains no cache-flush hazards.
682  * However, it also makes the system unbelievably slow --- the regression
683  * tests take about 100 times longer than normal.
684  *
685  * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
686  * slows things by at least a factor of 10000, so I wouldn't suggest
687  * trying to run the entire regression tests that way. It's useful to try
688  * a few simple tests, to make sure that cache reload isn't subject to
689  * internal cache-flush hazards, but after you've done a few thousand
690  * recursive reloads it's unlikely you'll learn more.
691  */
692 #if defined(CLOBBER_CACHE_ALWAYS)
693  {
694  static bool in_recursion = false;
695 
696  if (!in_recursion)
697  {
698  in_recursion = true;
700  in_recursion = false;
701  }
702  }
703 #elif defined(CLOBBER_CACHE_RECURSIVELY)
705 #endif
706 }
707 
708 /*
709  * PrepareInvalidationState
710  * Initialize inval lists for the current (sub)transaction.
711  */
712 static void
714 {
715  TransInvalidationInfo *myInfo;
716 
717  if (transInvalInfo != NULL &&
718  transInvalInfo->my_level == GetCurrentTransactionNestLevel())
719  return;
720 
721  myInfo = (TransInvalidationInfo *)
723  sizeof(TransInvalidationInfo));
724  myInfo->parent = transInvalInfo;
726 
727  /*
728  * If there's any previous entry, this one should be for a deeper nesting
729  * level.
730  */
731  Assert(transInvalInfo == NULL ||
732  myInfo->my_level > transInvalInfo->my_level);
733 
734  transInvalInfo = myInfo;
735 }
736 
737 /*
738  * PostPrepare_Inval
739  * Clean up after successful PREPARE.
740  *
741  * Here, we want to act as though the transaction aborted, so that we will
742  * undo any syscache changes it made, thereby bringing us into sync with the
743  * outside world, which doesn't believe the transaction committed yet.
744  *
745  * If the prepared transaction is later aborted, there is nothing more to
746  * do; if it commits, we will receive the consequent inval messages just
747  * like everyone else.
748  */
749 void
751 {
752  AtEOXact_Inval(false);
753 }
754 
755 /*
756  * Collect invalidation messages into SharedInvalidMessagesArray array.
757  */
758 static void
760 {
761  /*
762  * Initialise array first time through in each commit
763  */
764  if (SharedInvalidMessagesArray == NULL)
765  {
768 
769  /*
770  * Although this is being palloc'd we don't actually free it directly.
771  * We're so close to EOXact that we now we're going to lose it anyhow.
772  */
773  SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
774  * sizeof(SharedInvalidationMessage));
775  }
776 
778  {
781 
782  SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
784  * sizeof(SharedInvalidationMessage));
785  }
786 
787  /*
788  * Append the next chunk onto the array
789  */
790  memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
791  msgs, n * sizeof(SharedInvalidationMessage));
793 }
794 
795 /*
796  * xactGetCommittedInvalidationMessages() is executed by
797  * RecordTransactionCommit() to add invalidation messages onto the
798  * commit record. This applies only to commit message types, never to
799  * abort records. Must always run before AtEOXact_Inval(), since that
800  * removes the data we need to see.
801  *
802  * Remember that this runs before we have officially committed, so we
803  * must not do anything here to change what might occur *if* we should
804  * fail between here and the actual commit.
805  *
806  * see also xact_redo_commit() and xact_desc_commit()
807  */
808 int
810  bool *RelcacheInitFileInval)
811 {
812  MemoryContext oldcontext;
813 
814  /* Quick exit if we haven't done anything with invalidation messages. */
815  if (transInvalInfo == NULL)
816  {
817  *RelcacheInitFileInval = false;
818  *msgs = NULL;
819  return 0;
820  }
821 
822  /* Must be at top of stack */
823  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
824 
825  /*
826  * Relcache init file invalidation requires processing both before and
827  * after we send the SI messages. However, we need not do anything unless
828  * we committed.
829  */
830  *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
831 
832  /*
833  * Walk through TransInvalidationInfo to collect all the messages into a
834  * single contiguous array of invalidation messages. It must be contiguous
835  * so we can copy directly into WAL message. Maintain the order that they
836  * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
837  * in redo is as similar as possible to original. We want the same bugs,
838  * if any, not new ones.
839  */
841 
846  MemoryContextSwitchTo(oldcontext);
847 
849  SharedInvalidMessagesArray == NULL));
850 
852 
854 }
855 
856 /*
857  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
858  * standby_redo() to process invalidation messages. Currently that happens
859  * only at end-of-xact.
860  *
861  * Relcache init file invalidation requires processing both
862  * before and after we send the SI messages. See AtEOXact_Inval()
863  */
864 void
866  int nmsgs, bool RelcacheInitFileInval,
867  Oid dbid, Oid tsid)
868 {
869  if (nmsgs <= 0)
870  return;
871 
872  elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
873  (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
874 
875  if (RelcacheInitFileInval)
876  {
877  /*
878  * RelationCacheInitFilePreInvalidate requires DatabasePath to be set,
879  * but we should not use SetDatabasePath during recovery, since it is
880  * intended to be used only once by normal backends. Hence, a quick
881  * hack: set DatabasePath directly then unset after use.
882  */
883  DatabasePath = GetDatabasePath(dbid, tsid);
884  elog(trace_recovery(DEBUG4), "removing relcache init file in \"%s\"",
885  DatabasePath);
888  DatabasePath = NULL;
889  }
890 
891  SendSharedInvalidMessages(msgs, nmsgs);
892 
893  if (RelcacheInitFileInval)
895 }
896 
897 /*
898  * AtEOXact_Inval
899  * Process queued-up invalidation messages at end of main transaction.
900  *
901  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
902  * to the shared invalidation message queue. Note that these will be read
903  * not only by other backends, but also by our own backend at the next
904  * transaction start (via AcceptInvalidationMessages). This means that
905  * we can skip immediate local processing of anything that's still in
906  * CurrentCmdInvalidMsgs, and just send that list out too.
907  *
908  * If not isCommit, we are aborting, and must locally process the messages
909  * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
910  * since they'll not have seen our changed tuples anyway. We can forget
911  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
912  * the caches yet.
913  *
914  * In any case, reset the various lists to empty. We need not physically
915  * free memory here, since TopTransactionContext is about to be emptied
916  * anyway.
917  *
918  * Note:
919  * This should be called as the last step in processing a transaction.
920  */
921 void
922 AtEOXact_Inval(bool isCommit)
923 {
924  /* Quick exit if no messages */
925  if (transInvalInfo == NULL)
926  return;
927 
928  /* Must be at top of stack */
929  Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
930 
931  if (isCommit)
932  {
933  /*
934  * Relcache init file invalidation requires processing both before and
935  * after we send the SI messages. However, we need not do anything
936  * unless we committed.
937  */
938  if (transInvalInfo->RelcacheInitFileInval)
940 
942  &transInvalInfo->CurrentCmdInvalidMsgs);
943 
946 
947  if (transInvalInfo->RelcacheInitFileInval)
949  }
950  else
951  {
954  }
955 
956  /* Need not free anything explicitly */
957  transInvalInfo = NULL;
958  SharedInvalidMessagesArray = NULL;
960 }
961 
962 /*
963  * AtEOSubXact_Inval
964  * Process queued-up invalidation messages at end of subtransaction.
965  *
966  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
967  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
968  * parent's PriorCmdInvalidMsgs list.
969  *
970  * If not isCommit, we are aborting, and must locally process the messages
971  * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
972  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
973  * touched the caches yet.
974  *
975  * In any case, pop the transaction stack. We need not physically free memory
976  * here, since CurTransactionContext is about to be emptied anyway
977  * (if aborting). Beware of the possibility of aborting the same nesting
978  * level twice, though.
979  */
980 void
981 AtEOSubXact_Inval(bool isCommit)
982 {
983  int my_level;
985 
986  /* Quick exit if no messages. */
987  if (myInfo == NULL)
988  return;
989 
990  /* Also bail out quickly if messages are not for this level. */
991  my_level = GetCurrentTransactionNestLevel();
992  if (myInfo->my_level != my_level)
993  {
994  Assert(myInfo->my_level < my_level);
995  return;
996  }
997 
998  if (isCommit)
999  {
1000  /* If CurrentCmdInvalidMsgs still has anything, fix it */
1002 
1003  /*
1004  * We create invalidation stack entries lazily, so the parent might
1005  * not have one. Instead of creating one, moving all the data over,
1006  * and then freeing our own, we can just adjust the level of our own
1007  * entry.
1008  */
1009  if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1010  {
1011  myInfo->my_level--;
1012  return;
1013  }
1014 
1015  /* Pass up my inval messages to parent */
1017  &myInfo->PriorCmdInvalidMsgs);
1018 
1019  /* Pending relcache inval becomes parent's problem too */
1020  if (myInfo->RelcacheInitFileInval)
1021  myInfo->parent->RelcacheInitFileInval = true;
1022 
1023  /* Pop the transaction state stack */
1024  transInvalInfo = myInfo->parent;
1025 
1026  /* Need not free anything else explicitly */
1027  pfree(myInfo);
1028  }
1029  else
1030  {
1033 
1034  /* Pop the transaction state stack */
1035  transInvalInfo = myInfo->parent;
1036 
1037  /* Need not free anything else explicitly */
1038  pfree(myInfo);
1039  }
1040 }
1041 
1042 /*
1043  * CommandEndInvalidationMessages
1044  * Process queued-up invalidation messages at end of one command
1045  * in a transaction.
1046  *
1047  * Here, we send no messages to the shared queue, since we don't know yet if
1048  * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
1049  * list, so as to flush our caches of any entries we have outdated in the
1050  * current command. We then move the current-cmd list over to become part
1051  * of the prior-cmds list.
1052  *
1053  * Note:
1054  * This should be called during CommandCounterIncrement(),
1055  * after we have advanced the command ID.
1056  */
1057 void
1059 {
1060  /*
1061  * You might think this shouldn't be called outside any transaction, but
1062  * bootstrap does it, and also ABORT issued when not in a transaction. So
1063  * just quietly return if no state to work on.
1064  */
1065  if (transInvalInfo == NULL)
1066  return;
1067 
1071  &transInvalInfo->CurrentCmdInvalidMsgs);
1072 }
1073 
1074 
1075 /*
1076  * CacheInvalidateHeapTuple
1077  * Register the given tuple for invalidation at end of command
1078  * (ie, current command is creating or outdating this tuple).
1079  * Also, detect whether a relcache invalidation is implied.
1080  *
1081  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1082  * For an update, we are called just once, with tuple being the old tuple
1083  * version and newtuple the new version. This allows avoidance of duplicate
1084  * effort during an update.
1085  */
1086 void
1088  HeapTuple tuple,
1089  HeapTuple newtuple)
1090 {
1091  Oid tupleRelId;
1092  Oid databaseId;
1093  Oid relationId;
1094 
1095  /* Do nothing during bootstrap */
1097  return;
1098 
1099  /*
1100  * We only need to worry about invalidation for tuples that are in system
1101  * catalogs; user-relation tuples are never in catcaches and can't affect
1102  * the relcache either.
1103  */
1104  if (!IsCatalogRelation(relation))
1105  return;
1106 
1107  /*
1108  * IsCatalogRelation() will return true for TOAST tables of system
1109  * catalogs, but we don't care about those, either.
1110  */
1111  if (IsToastRelation(relation))
1112  return;
1113 
1114  /*
1115  * If we're not prepared to queue invalidation messages for this
1116  * subtransaction level, get ready now.
1117  */
1119 
1120  /*
1121  * First let the catcache do its thing
1122  */
1123  tupleRelId = RelationGetRelid(relation);
1124  if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1125  {
1126  databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1127  RegisterSnapshotInvalidation(databaseId, tupleRelId);
1128  }
1129  else
1130  PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1132 
1133  /*
1134  * Now, is this tuple one of the primary definers of a relcache entry?
1135  * See comments in file header for deeper explanation.
1136  *
1137  * Note we ignore newtuple here; we assume an update cannot move a tuple
1138  * from being part of one relcache entry to being part of another.
1139  */
1140  if (tupleRelId == RelationRelationId)
1141  {
1142  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1143 
1144  relationId = HeapTupleGetOid(tuple);
1145  if (classtup->relisshared)
1146  databaseId = InvalidOid;
1147  else
1148  databaseId = MyDatabaseId;
1149  }
1150  else if (tupleRelId == AttributeRelationId)
1151  {
1152  Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1153 
1154  relationId = atttup->attrelid;
1155 
1156  /*
1157  * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1158  * even if the rel in question is shared (which we can't easily tell).
1159  * This essentially means that only backends in this same database
1160  * will react to the relcache flush request. This is in fact
1161  * appropriate, since only those backends could see our pg_attribute
1162  * change anyway. It looks a bit ugly though. (In practice, shared
1163  * relations can't have schema changes after bootstrap, so we should
1164  * never come here for a shared rel anyway.)
1165  */
1166  databaseId = MyDatabaseId;
1167  }
1168  else if (tupleRelId == IndexRelationId)
1169  {
1170  Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1171 
1172  /*
1173  * When a pg_index row is updated, we should send out a relcache inval
1174  * for the index relation. As above, we don't know the shared status
1175  * of the index, but in practice it doesn't matter since indexes of
1176  * shared catalogs can't have such updates.
1177  */
1178  relationId = indextup->indexrelid;
1179  databaseId = MyDatabaseId;
1180  }
1181  else
1182  return;
1183 
1184  /*
1185  * Yes. We need to register a relcache invalidation event.
1186  */
1187  RegisterRelcacheInvalidation(databaseId, relationId);
1188 }
1189 
1190 /*
1191  * CacheInvalidateCatalog
1192  * Register invalidation of the whole content of a system catalog.
1193  *
1194  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1195  * changed any tuples as moved them around. Some uses of catcache entries
1196  * expect their TIDs to be correct, so we have to blow away the entries.
1197  *
1198  * Note: we expect caller to verify that the rel actually is a system
1199  * catalog. If it isn't, no great harm is done, just a wasted sinval message.
1200  */
1201 void
1203 {
1204  Oid databaseId;
1205 
1207 
1208  if (IsSharedRelation(catalogId))
1209  databaseId = InvalidOid;
1210  else
1211  databaseId = MyDatabaseId;
1212 
1213  RegisterCatalogInvalidation(databaseId, catalogId);
1214 }
1215 
1216 /*
1217  * CacheInvalidateRelcache
1218  * Register invalidation of the specified relation's relcache entry
1219  * at end of command.
1220  *
1221  * This is used in places that need to force relcache rebuild but aren't
1222  * changing any of the tuples recognized as contributors to the relcache
1223  * entry by CacheInvalidateHeapTuple. (An example is dropping an index.)
1224  */
1225 void
1227 {
1228  Oid databaseId;
1229  Oid relationId;
1230 
1232 
1233  relationId = RelationGetRelid(relation);
1234  if (relation->rd_rel->relisshared)
1235  databaseId = InvalidOid;
1236  else
1237  databaseId = MyDatabaseId;
1238 
1239  RegisterRelcacheInvalidation(databaseId, relationId);
1240 }
1241 
1242 /*
1243  * CacheInvalidateRelcacheAll
1244  * Register invalidation of the whole relcache at the end of command.
1245  *
1246  * This is used by alter publication as changes in publications may affect
1247  * large number of tables.
1248  */
1249 void
1251 {
1253 
1255 }
1256 
1257 /*
1258  * CacheInvalidateRelcacheByTuple
1259  * As above, but relation is identified by passing its pg_class tuple.
1260  */
1261 void
1263 {
1264  Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1265  Oid databaseId;
1266  Oid relationId;
1267 
1269 
1270  relationId = HeapTupleGetOid(classTuple);
1271  if (classtup->relisshared)
1272  databaseId = InvalidOid;
1273  else
1274  databaseId = MyDatabaseId;
1275  RegisterRelcacheInvalidation(databaseId, relationId);
1276 }
1277 
1278 /*
1279  * CacheInvalidateRelcacheByRelid
1280  * As above, but relation is identified by passing its OID.
1281  * This is the least efficient of the three options; use one of
1282  * the above routines if you have a Relation or pg_class tuple.
1283  */
1284 void
1286 {
1287  HeapTuple tup;
1288 
1290 
1291  tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1292  if (!HeapTupleIsValid(tup))
1293  elog(ERROR, "cache lookup failed for relation %u", relid);
1295  ReleaseSysCache(tup);
1296 }
1297 
1298 
1299 /*
1300  * CacheInvalidateSmgr
1301  * Register invalidation of smgr references to a physical relation.
1302  *
1303  * Sending this type of invalidation msg forces other backends to close open
1304  * smgr entries for the rel. This should be done to flush dangling open-file
1305  * references when the physical rel is being dropped or truncated. Because
1306  * these are nontransactional (i.e., not-rollback-able) operations, we just
1307  * send the inval message immediately without any queuing.
1308  *
1309  * Note: in most cases there will have been a relcache flush issued against
1310  * the rel at the logical level. We need a separate smgr-level flush because
1311  * it is possible for backends to have open smgr entries for rels they don't
1312  * have a relcache entry for, e.g. because the only thing they ever did with
1313  * the rel is write out dirty shared buffers.
1314  *
1315  * Note: because these messages are nontransactional, they won't be captured
1316  * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
1317  * should happen in low-level smgr.c routines, which are executed while
1318  * replaying WAL as well as when creating it.
1319  *
1320  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1321  * three bytes of the backend ID using what would otherwise be padding space.
1322  * Thus, the maximum possible backend ID is 2^23-1.
1323  */
1324 void
1326 {
1328 
1329  msg.sm.id = SHAREDINVALSMGR_ID;
1330  msg.sm.backend_hi = rnode.backend >> 16;
1331  msg.sm.backend_lo = rnode.backend & 0xffff;
1332  msg.sm.rnode = rnode.node;
1333  /* check AddCatcacheInvalidationMessage() for an explanation */
1334  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1335 
1336  SendSharedInvalidMessages(&msg, 1);
1337 }
1338 
1339 /*
1340  * CacheInvalidateRelmap
1341  * Register invalidation of the relation mapping for a database,
1342  * or for the shared catalogs if databaseId is zero.
1343  *
1344  * Sending this type of invalidation msg forces other backends to re-read
1345  * the indicated relation mapping file. It is also necessary to send a
1346  * relcache inval for the specific relations whose mapping has been altered,
1347  * else the relcache won't get updated with the new filenode data.
1348  *
1349  * Note: because these messages are nontransactional, they won't be captured
1350  * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap()
1351  * should happen in low-level relmapper.c routines, which are executed while
1352  * replaying WAL as well as when creating it.
1353  */
1354 void
1356 {
1358 
1359  msg.rm.id = SHAREDINVALRELMAP_ID;
1360  msg.rm.dbId = databaseId;
1361  /* check AddCatcacheInvalidationMessage() for an explanation */
1362  VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1363 
1364  SendSharedInvalidMessages(&msg, 1);
1365 }
1366 
1367 
1368 /*
1369  * CacheRegisterSyscacheCallback
1370  * Register the specified function to be called for all future
1371  * invalidation events in the specified cache. The cache ID and the
1372  * hash value of the tuple being invalidated will be passed to the
1373  * function.
1374  *
1375  * NOTE: Hash value zero will be passed if a cache reset request is received.
1376  * In this case the called routines should flush all cached state.
1377  * Yes, there's a possibility of a false match to zero, but it doesn't seem
1378  * worth troubling over, especially since most of the current callees just
1379  * flush all cached state anyway.
1380  */
1381 void
1384  Datum arg)
1385 {
1387  elog(FATAL, "out of syscache_callback_list slots");
1388 
1392 
1394 }
1395 
1396 /*
1397  * CacheRegisterRelcacheCallback
1398  * Register the specified function to be called for all future
1399  * relcache invalidation events. The OID of the relation being
1400  * invalidated will be passed to the function.
1401  *
1402  * NOTE: InvalidOid will be passed if a cache reset request is received.
1403  * In this case the called routines should flush all cached state.
1404  */
1405 void
1407  Datum arg)
1408 {
1410  elog(FATAL, "out of relcache_callback_list slots");
1411 
1414 
1416 }
1417 
1418 /*
1419  * CallSyscacheCallbacks
1420  *
1421  * This is exported so that CatalogCacheFlushCatalog can call it, saving
1422  * this module from knowing which catcache IDs correspond to which catalogs.
1423  */
1424 void
1425 CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1426 {
1427  int i;
1428 
1429  for (i = 0; i < syscache_callback_count; i++)
1430  {
1431  struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1432 
1433  if (ccitem->id == cacheid)
1434  (*ccitem->function) (ccitem->arg, cacheid, hashvalue);
1435  }
1436 }
signed short int16
Definition: c.h:255
void CacheInvalidateSmgr(RelFileNodeBackend rnode)
Definition: inval.c:1325
#define ProcessMessageListMulti(listHdr, codeFragment)
Definition: inval.c:305
#define FIRSTCHUNKSIZE
static void AppendInvalidationMessages(InvalidationListHeader *dest, InvalidationListHeader *src)
Definition: inval.c:433
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: inval.c:127
static SharedInvalidationMessage * SharedInvalidMessagesArray
Definition: inval.c:173
bool IsToastRelation(Relation relation)
Definition: catalog.c:135
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:91
SharedInvalSnapshotMsg sn
Definition: sinval.h:121
static void AddInvalidationMessage(InvalidationChunk **listHdr, SharedInvalidationMessage *msg)
Definition: inval.c:219
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1087
#define GETSTRUCT(TUP)
Definition: htup_details.h:656
void AtEOXact_Inval(bool isCommit)
Definition: inval.c:922
MemoryContext TopTransactionContext
Definition: mcxt.c:48
void AcceptInvalidationMessages(void)
Definition: inval.c:672
InvalidationListHeader PriorCmdInvalidMsgs
Definition: inval.c:165
struct InvalidationChunk InvalidationChunk
SharedInvalRelcacheMsg rc
Definition: sinval.h:118
void CommandEndInvalidationMessages(void)
Definition: inval.c:1058
void CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
Definition: catcache.c:443
#define MAX_RELCACHE_CALLBACKS
Definition: inval.c:184
static int relcache_callback_count
Definition: inval.c:201
RelcacheCallbackFunction function
Definition: inval.c:197
#define IndexRelationId
Definition: pg_index.h:29
#define RelationRelationId
Definition: pg_class.h:29
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
bool RelationIdIsInInitFile(Oid relationId)
Definition: relcache.c:5948
MemoryContext CurTransactionContext
Definition: mcxt.c:49
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:1846
struct TransInvalidationInfo * parent
Definition: inval.c:156
#define AttributeRelationId
Definition: pg_attribute.h:33
void CacheInvalidateRelmap(Oid databaseId)
Definition: inval.c:1355
static TransInvalidationInfo * transInvalInfo
Definition: inval.c:171
Form_pg_class rd_rel
Definition: rel.h:114
unsigned int Oid
Definition: postgres_ext.h:31
void RelationMapInvalidate(bool shared)
Definition: relmapper.c:387
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:538
void InvalidateSystemCaches(void)
Definition: inval.c:634
#define SearchSysCache1(cacheId, key1)
Definition: syscache.h:152
int trace_recovery(int trace_level)
Definition: elog.c:3753
#define ProcessMessageList(listHdr, codeFragment)
Definition: inval.c:285
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1406
InvalidationListHeader CurrentCmdInvalidMsgs
Definition: inval.c:162
void pfree(void *pointer)
Definition: mcxt.c:950
#define ObjectIdGetDatum(X)
Definition: postgres.h:513
#define ERROR
Definition: elog.h:43
struct InvalidationChunk * next
Definition: inval.c:124
void(* SyscacheCallbackFunction)(Datum arg, int cacheid, uint32 hashvalue)
Definition: inval.h:22
#define FATAL
Definition: elog.h:52
static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
Definition: inval.c:759
void ReceiveSharedInvalidMessages(void(*invalFunction)(SharedInvalidationMessage *msg), void(*resetFunction)(void))
Definition: sinval.c:71
void(* RelcacheCallbackFunction)(Datum arg, Oid relid)
Definition: inval.h:23
#define SHAREDINVALRELCACHE_ID
Definition: sinval.h:76
struct InvalidationListHeader InvalidationListHeader
SharedInvalRelmapMsg rm
Definition: sinval.h:120
#define SHAREDINVALRELMAP_ID
Definition: sinval.h:96
#define MAX_SYSCACHE_CALLBACKS
Definition: inval.c:183
struct TransInvalidationInfo TransInvalidationInfo
void CacheInvalidateRelcacheByRelid(Oid relid)
Definition: inval.c:1285
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:187
void PostPrepare_Inval(void)
Definition: inval.c:750
unsigned int uint32
Definition: c.h:268
InvalidationChunk * cclist
Definition: inval.c:132
static int numSharedInvalidMessagesArray
Definition: inval.c:174
SharedInvalCatcacheMsg cc
Definition: sinval.h:116
void smgrclosenode(RelFileNodeBackend rnode)
Definition: smgr.c:350
SharedInvalCatalogMsg cat
Definition: sinval.h:117
void InvalidateCatalogSnapshot(void)
Definition: snapmgr.c:506
char * GetDatabasePath(Oid dbNode, Oid spcNode)
Definition: relpath.c:108
static void ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, void(*func)(const SharedInvalidationMessage *msgs, int n))
Definition: inval.c:459
static void PrepareInvalidationState(void)
Definition: inval.c:713
signed char int8
Definition: c.h:254
FormData_pg_index * Form_pg_index
Definition: pg_index.h:67
bool RelcacheInitFileInval
Definition: inval.c:168
static void ProcessInvalidationMessages(InvalidationListHeader *hdr, void(*func)(SharedInvalidationMessage *msg))
Definition: inval.c:447
void CacheInvalidateRelcacheAll(void)
Definition: inval.c:1250
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1382
uintptr_t Datum
Definition: postgres.h:372
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1116
static void AddSnapshotInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:406
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1425
Oid MyDatabaseId
Definition: globals.c:76
static void RegisterCatalogInvalidation(Oid dbId, Oid catId)
Definition: inval.c:491
void RelationCacheInitFilePostInvalidate(void)
Definition: relcache.c:6057
bool IsSharedRelation(Oid relationId)
Definition: catalog.c:219
static void AddCatalogInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid catId)
Definition: inval.c:357
static int syscache_callback_count
Definition: inval.c:193
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:742
#define InvalidOid
Definition: postgres_ext.h:36
static void RegisterRelcacheInvalidation(Oid dbId, Oid relId)
Definition: inval.c:503
#define SHAREDINVALSNAPSHOT_ID
Definition: sinval.h:104
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:865
RelFileNode node
Definition: relfilenode.h:74
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:761
char * DatabasePath
Definition: globals.c:84
void CacheInvalidateCatalog(Oid catalogId)
Definition: inval.c:1202
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
#define NULL
Definition: c.h:229
void RelationCacheInitFilePreInvalidate(void)
Definition: relcache.c:6031
static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId)
Definition: inval.c:477
#define Assert(condition)
Definition: c.h:675
bool RelationInvalidatesSnapshotsOnly(Oid relid)
Definition: syscache.c:1353
SharedInvalSmgrMsg sm
Definition: sinval.h:119
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:675
BackendId backend
Definition: relfilenode.h:75
static struct RELCACHECALLBACK relcache_callback_list[MAX_RELCACHE_CALLBACKS]
static void AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, Oid dbId, Oid relId)
Definition: inval.c:375
void RelationCacheInvalidateEntry(Oid relationId)
Definition: relcache.c:2712
void SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
Definition: sinval.c:49
void ResetCatalogCaches(void)
Definition: catcache.c:645
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:963
uint16 backend_lo
Definition: sinval.h:92
void CacheInvalidateRelcache(Relation relation)
Definition: inval.c:1226
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:365
FormData_pg_class * Form_pg_class
Definition: pg_class.h:95
void LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
Definition: inval.c:547
#define SHAREDINVALCATALOG_ID
Definition: sinval.h:67
static int maxSharedInvalidMessagesArray
Definition: inval.c:175
void * palloc(Size size)
Definition: mcxt.c:849
void AtEOSubXact_Inval(bool isCommit)
Definition: inval.c:981
int xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, bool *RelcacheInitFileInval)
Definition: inval.c:809
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:707
void RelationCacheInvalidate(void)
Definition: relcache.c:2756
int i
RelFileNode rnode
Definition: sinval.h:93
void * arg
#define SHAREDINVALSMGR_ID
Definition: sinval.h:85
void CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Definition: inval.c:1262
CommandId GetCurrentCommandId(bool used)
Definition: xact.c:687
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
InvalidationChunk * rclist
Definition: inval.c:133
static struct SYSCACHECALLBACK syscache_callback_list[MAX_SYSCACHE_CALLBACKS]
#define RelationGetRelid(relation)
Definition: rel.h:417
#define offsetof(type, field)
Definition: c.h:555
static void AddCatcacheInvalidationMessage(InvalidationListHeader *hdr, int id, uint32 hashValue, Oid dbId)
Definition: inval.c:329
static void RegisterSnapshotInvalidation(Oid dbId, Oid relId)
Definition: inval.c:533
SyscacheCallbackFunction function
Definition: inval.c:189
static void AppendInvalidationMessageList(InvalidationChunk **destHdr, InvalidationChunk **srcHdr)
Definition: inval.c:261