PostgreSQL Source Code  git master
catcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  * System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/genam.h"
18 #include "access/heaptoast.h"
19 #include "access/relscan.h"
20 #include "access/table.h"
21 #include "access/xact.h"
22 #include "catalog/catalog.h"
23 #include "catalog/pg_collation.h"
24 #include "catalog/pg_type.h"
25 #include "common/hashfn.h"
26 #include "common/pg_prng.h"
27 #include "miscadmin.h"
28 #include "port/pg_bitutils.h"
29 #ifdef CATCACHE_STATS
30 #include "storage/ipc.h" /* for on_proc_exit */
31 #endif
32 #include "storage/lmgr.h"
33 #include "utils/builtins.h"
34 #include "utils/catcache.h"
35 #include "utils/datum.h"
36 #include "utils/fmgroids.h"
37 #include "utils/inval.h"
38 #include "utils/memutils.h"
39 #include "utils/rel.h"
40 #include "utils/resowner.h"
41 #include "utils/syscache.h"
42 
43 
44  /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
45 
46 /*
47  * Given a hash value and the size of the hash table, find the bucket
48  * in which the hash value belongs. Since the hash table must contain
49  * a power-of-2 number of elements, this is a simple bitmask.
50  */
51 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
52 
53 
54 /*
55  * variables, macros and other stuff
56  */
57 
58 #ifdef CACHEDEBUG
59 #define CACHE_elog(...) elog(__VA_ARGS__)
60 #else
61 #define CACHE_elog(...)
62 #endif
63 
64 /* Cache management header --- pointer is NULL until created */
65 static CatCacheHeader *CacheHdr = NULL;
66 
67 static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
68  int nkeys,
69  Datum v1, Datum v2,
70  Datum v3, Datum v4);
71 
73  int nkeys,
74  uint32 hashValue,
75  Index hashIndex,
76  Datum v1, Datum v2,
77  Datum v3, Datum v4);
78 
79 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
80  Datum v1, Datum v2, Datum v3, Datum v4);
81 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
82  HeapTuple tuple);
83 static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
84  const Datum *cachekeys,
85  const Datum *searchkeys);
86 
87 #ifdef CATCACHE_STATS
88 static void CatCachePrintStats(int code, Datum arg);
89 #endif
90 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
91 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
92 static void RehashCatCache(CatCache *cp);
93 static void RehashCatCacheLists(CatCache *cp);
94 static void CatalogCacheInitializeCache(CatCache *cache);
96  HeapTuple ntp, SysScanDesc scandesc,
98  uint32 hashValue, Index hashIndex);
99 
100 static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
102 static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
103  Datum *keys);
104 static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
105  Datum *srckeys, Datum *dstkeys);
106 
107 
108 /*
109  * internal support functions
110  */
111 
112 /* ResourceOwner callbacks to hold catcache references */
113 
114 static void ResOwnerReleaseCatCache(Datum res);
115 static char *ResOwnerPrintCatCache(Datum res);
117 static char *ResOwnerPrintCatCacheList(Datum res);
118 
120 {
121  /* catcache references */
122  .name = "catcache reference",
123  .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
124  .release_priority = RELEASE_PRIO_CATCACHE_REFS,
125  .ReleaseResource = ResOwnerReleaseCatCache,
126  .DebugPrint = ResOwnerPrintCatCache
127 };
128 
130 {
131  /* catcache-list pins */
132  .name = "catcache list reference",
133  .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
134  .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
135  .ReleaseResource = ResOwnerReleaseCatCacheList,
136  .DebugPrint = ResOwnerPrintCatCacheList
137 };
138 
139 /* Convenience wrappers over ResourceOwnerRemember/Forget */
140 static inline void
142 {
144 }
145 static inline void
147 {
149 }
150 static inline void
152 {
154 }
155 static inline void
157 {
159 }
160 
161 
162 /*
163  * Hash and equality functions for system types that are used as cache key
164  * fields. In some cases, we just call the regular SQL-callable functions for
165  * the appropriate data type, but that tends to be a little slow, and the
166  * speed of these functions is performance-critical. Therefore, for data
167  * types that frequently occur as catcache keys, we hard-code the logic here.
168  * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
169  * in certain cases (like int4) we can adopt a faster hash algorithm as well.
170  */
171 
172 static bool
174 {
175  return DatumGetChar(a) == DatumGetChar(b);
176 }
177 
178 static uint32
180 {
181  return murmurhash32((int32) DatumGetChar(datum));
182 }
183 
184 static bool
186 {
187  char *ca = NameStr(*DatumGetName(a));
188  char *cb = NameStr(*DatumGetName(b));
189 
190  return strncmp(ca, cb, NAMEDATALEN) == 0;
191 }
192 
193 static uint32
195 {
196  char *key = NameStr(*DatumGetName(datum));
197 
198  return hash_any((unsigned char *) key, strlen(key));
199 }
200 
201 static bool
203 {
204  return DatumGetInt16(a) == DatumGetInt16(b);
205 }
206 
207 static uint32
209 {
210  return murmurhash32((int32) DatumGetInt16(datum));
211 }
212 
213 static bool
215 {
216  return DatumGetInt32(a) == DatumGetInt32(b);
217 }
218 
219 static uint32
221 {
222  return murmurhash32((int32) DatumGetInt32(datum));
223 }
224 
225 static bool
227 {
228  /*
229  * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
230  * want to take the fast "deterministic" path in texteq().
231  */
232  return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
233 }
234 
235 static uint32
237 {
238  /* analogously here as in texteqfast() */
239  return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
240 }
241 
242 static bool
244 {
246 }
247 
248 static uint32
250 {
252 }
253 
254 /* Lookup support functions for a type. */
255 static void
256 GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
257 {
258  switch (keytype)
259  {
260  case BOOLOID:
261  *hashfunc = charhashfast;
262  *fasteqfunc = chareqfast;
263  *eqfunc = F_BOOLEQ;
264  break;
265  case CHAROID:
266  *hashfunc = charhashfast;
267  *fasteqfunc = chareqfast;
268  *eqfunc = F_CHAREQ;
269  break;
270  case NAMEOID:
271  *hashfunc = namehashfast;
272  *fasteqfunc = nameeqfast;
273  *eqfunc = F_NAMEEQ;
274  break;
275  case INT2OID:
276  *hashfunc = int2hashfast;
277  *fasteqfunc = int2eqfast;
278  *eqfunc = F_INT2EQ;
279  break;
280  case INT4OID:
281  *hashfunc = int4hashfast;
282  *fasteqfunc = int4eqfast;
283  *eqfunc = F_INT4EQ;
284  break;
285  case TEXTOID:
286  *hashfunc = texthashfast;
287  *fasteqfunc = texteqfast;
288  *eqfunc = F_TEXTEQ;
289  break;
290  case OIDOID:
291  case REGPROCOID:
292  case REGPROCEDUREOID:
293  case REGOPEROID:
294  case REGOPERATOROID:
295  case REGCLASSOID:
296  case REGTYPEOID:
297  case REGCOLLATIONOID:
298  case REGCONFIGOID:
299  case REGDICTIONARYOID:
300  case REGROLEOID:
301  case REGNAMESPACEOID:
302  *hashfunc = int4hashfast;
303  *fasteqfunc = int4eqfast;
304  *eqfunc = F_OIDEQ;
305  break;
306  case OIDVECTOROID:
307  *hashfunc = oidvectorhashfast;
308  *fasteqfunc = oidvectoreqfast;
309  *eqfunc = F_OIDVECTOREQ;
310  break;
311  default:
312  elog(FATAL, "type %u not supported as catcache key", keytype);
313  *hashfunc = NULL; /* keep compiler quiet */
314 
315  *eqfunc = InvalidOid;
316  break;
317  }
318 }
319 
320 /*
321  * CatalogCacheComputeHashValue
322  *
323  * Compute the hash value associated with a given set of lookup keys
324  */
325 static uint32
327  Datum v1, Datum v2, Datum v3, Datum v4)
328 {
329  uint32 hashValue = 0;
330  uint32 oneHash;
331  CCHashFN *cc_hashfunc = cache->cc_hashfunc;
332 
333  CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
334  cache->cc_relname, nkeys, cache);
335 
336  switch (nkeys)
337  {
338  case 4:
339  oneHash = (cc_hashfunc[3]) (v4);
340  hashValue ^= pg_rotate_left32(oneHash, 24);
341  /* FALLTHROUGH */
342  case 3:
343  oneHash = (cc_hashfunc[2]) (v3);
344  hashValue ^= pg_rotate_left32(oneHash, 16);
345  /* FALLTHROUGH */
346  case 2:
347  oneHash = (cc_hashfunc[1]) (v2);
348  hashValue ^= pg_rotate_left32(oneHash, 8);
349  /* FALLTHROUGH */
350  case 1:
351  oneHash = (cc_hashfunc[0]) (v1);
352  hashValue ^= oneHash;
353  break;
354  default:
355  elog(FATAL, "wrong number of hash keys: %d", nkeys);
356  break;
357  }
358 
359  return hashValue;
360 }
361 
362 /*
363  * CatalogCacheComputeTupleHashValue
364  *
365  * Compute the hash value associated with a given tuple to be cached
366  */
367 static uint32
369 {
370  Datum v1 = 0,
371  v2 = 0,
372  v3 = 0,
373  v4 = 0;
374  bool isNull = false;
375  int *cc_keyno = cache->cc_keyno;
376  TupleDesc cc_tupdesc = cache->cc_tupdesc;
377 
378  /* Now extract key fields from tuple, insert into scankey */
379  switch (nkeys)
380  {
381  case 4:
382  v4 = fastgetattr(tuple,
383  cc_keyno[3],
384  cc_tupdesc,
385  &isNull);
386  Assert(!isNull);
387  /* FALLTHROUGH */
388  case 3:
389  v3 = fastgetattr(tuple,
390  cc_keyno[2],
391  cc_tupdesc,
392  &isNull);
393  Assert(!isNull);
394  /* FALLTHROUGH */
395  case 2:
396  v2 = fastgetattr(tuple,
397  cc_keyno[1],
398  cc_tupdesc,
399  &isNull);
400  Assert(!isNull);
401  /* FALLTHROUGH */
402  case 1:
403  v1 = fastgetattr(tuple,
404  cc_keyno[0],
405  cc_tupdesc,
406  &isNull);
407  Assert(!isNull);
408  break;
409  default:
410  elog(FATAL, "wrong number of hash keys: %d", nkeys);
411  break;
412  }
413 
414  return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
415 }
416 
417 /*
418  * CatalogCacheCompareTuple
419  *
420  * Compare a tuple to the passed arguments.
421  */
422 static inline bool
423 CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
424  const Datum *cachekeys,
425  const Datum *searchkeys)
426 {
427  const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
428  int i;
429 
430  for (i = 0; i < nkeys; i++)
431  {
432  if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
433  return false;
434  }
435  return true;
436 }
437 
438 
439 #ifdef CATCACHE_STATS
440 
441 static void
442 CatCachePrintStats(int code, Datum arg)
443 {
444  slist_iter iter;
445  long cc_searches = 0;
446  long cc_hits = 0;
447  long cc_neg_hits = 0;
448  long cc_newloads = 0;
449  long cc_invals = 0;
450  long cc_nlists = 0;
451  long cc_lsearches = 0;
452  long cc_lhits = 0;
453 
455  {
456  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
457 
458  if (cache->cc_ntup == 0 && cache->cc_searches == 0)
459  continue; /* don't print unused caches */
460  elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits",
461  cache->cc_relname,
462  cache->cc_indexoid,
463  cache->cc_ntup,
464  cache->cc_searches,
465  cache->cc_hits,
466  cache->cc_neg_hits,
467  cache->cc_hits + cache->cc_neg_hits,
468  cache->cc_newloads,
469  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
470  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
471  cache->cc_invals,
472  cache->cc_nlist,
473  cache->cc_lsearches,
474  cache->cc_lhits);
475  cc_searches += cache->cc_searches;
476  cc_hits += cache->cc_hits;
477  cc_neg_hits += cache->cc_neg_hits;
478  cc_newloads += cache->cc_newloads;
479  cc_invals += cache->cc_invals;
480  cc_nlists += cache->cc_nlist;
481  cc_lsearches += cache->cc_lsearches;
482  cc_lhits += cache->cc_lhits;
483  }
484  elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits",
485  CacheHdr->ch_ntup,
486  cc_searches,
487  cc_hits,
488  cc_neg_hits,
489  cc_hits + cc_neg_hits,
490  cc_newloads,
491  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
492  cc_searches - cc_hits - cc_neg_hits,
493  cc_invals,
494  cc_nlists,
495  cc_lsearches,
496  cc_lhits);
497 }
498 #endif /* CATCACHE_STATS */
499 
500 
501 /*
502  * CatCacheRemoveCTup
503  *
504  * Unlink and delete the given cache entry
505  *
506  * NB: if it is a member of a CatCList, the CatCList is deleted too.
507  * Both the cache entry and the list had better have zero refcount.
508  */
509 static void
511 {
512  Assert(ct->refcount == 0);
513  Assert(ct->my_cache == cache);
514 
515  if (ct->c_list)
516  {
517  /*
518  * The cleanest way to handle this is to call CatCacheRemoveCList,
519  * which will recurse back to me, and the recursive call will do the
520  * work. Set the "dead" flag to make sure it does recurse.
521  */
522  ct->dead = true;
523  CatCacheRemoveCList(cache, ct->c_list);
524  return; /* nothing left to do */
525  }
526 
527  /* delink from linked list */
528  dlist_delete(&ct->cache_elem);
529 
530  /*
531  * Free keys when we're dealing with a negative entry, normal entries just
532  * point into tuple, allocated together with the CatCTup.
533  */
534  if (ct->negative)
535  CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
536  cache->cc_keyno, ct->keys);
537 
538  pfree(ct);
539 
540  --cache->cc_ntup;
541  --CacheHdr->ch_ntup;
542 }
543 
544 /*
545  * CatCacheRemoveCList
546  *
547  * Unlink and delete the given cache list entry
548  *
549  * NB: any dead member entries that become unreferenced are deleted too.
550  */
551 static void
553 {
554  int i;
555 
556  Assert(cl->refcount == 0);
557  Assert(cl->my_cache == cache);
558 
559  /* delink from member tuples */
560  for (i = cl->n_members; --i >= 0;)
561  {
562  CatCTup *ct = cl->members[i];
563 
564  Assert(ct->c_list == cl);
565  ct->c_list = NULL;
566  /* if the member is dead and now has no references, remove it */
567  if (
568 #ifndef CATCACHE_FORCE_RELEASE
569  ct->dead &&
570 #endif
571  ct->refcount == 0)
572  CatCacheRemoveCTup(cache, ct);
573  }
574 
575  /* delink from linked list */
576  dlist_delete(&cl->cache_elem);
577 
578  /* free associated column data */
579  CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
580  cache->cc_keyno, cl->keys);
581 
582  pfree(cl);
583 
584  --cache->cc_nlist;
585 }
586 
587 
588 /*
589  * CatCacheInvalidate
590  *
591  * Invalidate entries in the specified cache, given a hash value.
592  *
593  * We delete cache entries that match the hash value, whether positive
594  * or negative. We don't care whether the invalidation is the result
595  * of a tuple insertion or a deletion.
596  *
597  * We used to try to match positive cache entries by TID, but that is
598  * unsafe after a VACUUM FULL on a system catalog: an inval event could
599  * be queued before VACUUM FULL, and then processed afterwards, when the
600  * target tuple that has to be invalidated has a different TID than it
601  * did when the event was created. So now we just compare hash values and
602  * accept the small risk of unnecessary invalidations due to false matches.
603  *
604  * This routine is only quasi-public: it should only be used by inval.c.
605  */
606 void
608 {
609  Index hashIndex;
610  dlist_mutable_iter iter;
611 
612  CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
613 
614  /*
615  * We don't bother to check whether the cache has finished initialization
616  * yet; if not, there will be no entries in it so no problem.
617  */
618 
619  /*
620  * Invalidate *all* CatCLists in this cache; it's too hard to tell which
621  * searches might still be correct, so just zap 'em all.
622  */
623  for (int i = 0; i < cache->cc_nlbuckets; i++)
624  {
625  dlist_head *bucket = &cache->cc_lbucket[i];
626 
627  dlist_foreach_modify(iter, bucket)
628  {
629  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
630 
631  if (cl->refcount > 0)
632  cl->dead = true;
633  else
634  CatCacheRemoveCList(cache, cl);
635  }
636  }
637 
638  /*
639  * inspect the proper hash bucket for tuple matches
640  */
641  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
642  dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
643  {
644  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
645 
646  if (hashValue == ct->hash_value)
647  {
648  if (ct->refcount > 0 ||
649  (ct->c_list && ct->c_list->refcount > 0))
650  {
651  ct->dead = true;
652  /* list, if any, was marked dead above */
653  Assert(ct->c_list == NULL || ct->c_list->dead);
654  }
655  else
656  CatCacheRemoveCTup(cache, ct);
657  CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
658 #ifdef CATCACHE_STATS
659  cache->cc_invals++;
660 #endif
661  /* could be multiple matches, so keep looking! */
662  }
663  }
664 }
665 
666 /* ----------------------------------------------------------------
667  * public functions
668  * ----------------------------------------------------------------
669  */
670 
671 
672 /*
673  * Standard routine for creating cache context if it doesn't exist yet
674  *
675  * There are a lot of places (probably far more than necessary) that check
676  * whether CacheMemoryContext exists yet and want to create it if not.
677  * We centralize knowledge of exactly how to create it here.
678  */
679 void
681 {
682  /*
683  * Purely for paranoia, check that context doesn't exist; caller probably
684  * did so already.
685  */
686  if (!CacheMemoryContext)
688  "CacheMemoryContext",
690 }
691 
692 
693 /*
694  * ResetCatalogCache
695  *
696  * Reset one catalog cache to empty.
697  *
698  * This is not very efficient if the target cache is nearly empty.
699  * However, it shouldn't need to be efficient; we don't invoke it often.
700  */
701 static void
703 {
704  dlist_mutable_iter iter;
705  int i;
706 
707  /* Remove each list in this cache, or at least mark it dead */
708  for (i = 0; i < cache->cc_nlbuckets; i++)
709  {
710  dlist_head *bucket = &cache->cc_lbucket[i];
711 
712  dlist_foreach_modify(iter, bucket)
713  {
714  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
715 
716  if (cl->refcount > 0)
717  cl->dead = true;
718  else
719  CatCacheRemoveCList(cache, cl);
720  }
721  }
722 
723  /* Remove each tuple in this cache, or at least mark it dead */
724  for (i = 0; i < cache->cc_nbuckets; i++)
725  {
726  dlist_head *bucket = &cache->cc_bucket[i];
727 
728  dlist_foreach_modify(iter, bucket)
729  {
730  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
731 
732  if (ct->refcount > 0 ||
733  (ct->c_list && ct->c_list->refcount > 0))
734  {
735  ct->dead = true;
736  /* list, if any, was marked dead above */
737  Assert(ct->c_list == NULL || ct->c_list->dead);
738  }
739  else
740  CatCacheRemoveCTup(cache, ct);
741 #ifdef CATCACHE_STATS
742  cache->cc_invals++;
743 #endif
744  }
745  }
746 }
747 
748 /*
749  * ResetCatalogCaches
750  *
751  * Reset all caches when a shared cache inval event forces it
752  */
753 void
755 {
756  slist_iter iter;
757 
758  CACHE_elog(DEBUG2, "ResetCatalogCaches called");
759 
761  {
762  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
763 
764  ResetCatalogCache(cache);
765  }
766 
767  CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
768 }
769 
770 /*
771  * CatalogCacheFlushCatalog
772  *
773  * Flush all catcache entries that came from the specified system catalog.
774  * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
775  * tuples very likely now have different TIDs than before. (At one point
776  * we also tried to force re-execution of CatalogCacheInitializeCache for
777  * the cache(s) on that catalog. This is a bad idea since it leads to all
778  * kinds of trouble if a cache flush occurs while loading cache entries.
779  * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
780  * rather than relying on the relcache to keep a tupdesc for us. Of course
781  * this assumes the tupdesc of a cachable system table will not change...)
782  */
783 void
785 {
786  slist_iter iter;
787 
788  CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
789 
791  {
792  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
793 
794  /* Does this cache store tuples of the target catalog? */
795  if (cache->cc_reloid == catId)
796  {
797  /* Yes, so flush all its contents */
798  ResetCatalogCache(cache);
799 
800  /* Tell inval.c to call syscache callbacks for this cache */
801  CallSyscacheCallbacks(cache->id, 0);
802  }
803  }
804 
805  CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
806 }
807 
808 /*
809  * InitCatCache
810  *
811  * This allocates and initializes a cache for a system catalog relation.
812  * Actually, the cache is only partially initialized to avoid opening the
813  * relation. The relation will be opened and the rest of the cache
814  * structure initialized on the first access.
815  */
816 #ifdef CACHEDEBUG
817 #define InitCatCache_DEBUG2 \
818 do { \
819  elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
820  cp->cc_reloid, cp->cc_indexoid, cp->id, \
821  cp->cc_nkeys, cp->cc_nbuckets); \
822 } while(0)
823 #else
824 #define InitCatCache_DEBUG2
825 #endif
826 
827 CatCache *
829  Oid reloid,
830  Oid indexoid,
831  int nkeys,
832  const int *key,
833  int nbuckets)
834 {
835  CatCache *cp;
836  MemoryContext oldcxt;
837  int i;
838 
839  /*
840  * nbuckets is the initial number of hash buckets to use in this catcache.
841  * It will be enlarged later if it becomes too full.
842  *
843  * nbuckets must be a power of two. We check this via Assert rather than
844  * a full runtime check because the values will be coming from constant
845  * tables.
846  *
847  * If you're confused by the power-of-two check, see comments in
848  * bitmapset.c for an explanation.
849  */
850  Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
851 
852  /*
853  * first switch to the cache context so our allocations do not vanish at
854  * the end of a transaction
855  */
856  if (!CacheMemoryContext)
858 
860 
861  /*
862  * if first time through, initialize the cache group header
863  */
864  if (CacheHdr == NULL)
865  {
868  CacheHdr->ch_ntup = 0;
869 #ifdef CATCACHE_STATS
870  /* set up to dump stats at backend exit */
871  on_proc_exit(CatCachePrintStats, 0);
872 #endif
873  }
874 
875  /*
876  * Allocate a new cache structure, aligning to a cacheline boundary
877  *
878  * Note: we rely on zeroing to initialize all the dlist headers correctly
879  */
882  cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
883 
884  /*
885  * Many catcaches never receive any list searches. Therefore, we don't
886  * allocate the cc_lbuckets till we get a list search.
887  */
888  cp->cc_lbucket = NULL;
889 
890  /*
891  * initialize the cache's relation information for the relation
892  * corresponding to this cache, and initialize some of the new cache's
893  * other internal fields. But don't open the relation yet.
894  */
895  cp->id = id;
896  cp->cc_relname = "(not known yet)";
897  cp->cc_reloid = reloid;
898  cp->cc_indexoid = indexoid;
899  cp->cc_relisshared = false; /* temporary */
900  cp->cc_tupdesc = (TupleDesc) NULL;
901  cp->cc_ntup = 0;
902  cp->cc_nlist = 0;
903  cp->cc_nbuckets = nbuckets;
904  cp->cc_nlbuckets = 0;
905  cp->cc_nkeys = nkeys;
906  for (i = 0; i < nkeys; ++i)
907  {
909  cp->cc_keyno[i] = key[i];
910  }
911 
912  /*
913  * new cache is initialized as far as we can go for now. print some
914  * debugging information, if appropriate.
915  */
917 
918  /*
919  * add completed cache to top of group header's list
920  */
922 
923  /*
924  * back to the old context before we return...
925  */
926  MemoryContextSwitchTo(oldcxt);
927 
928  return cp;
929 }
930 
931 /*
932  * Enlarge a catcache, doubling the number of buckets.
933  */
934 static void
936 {
937  dlist_head *newbucket;
938  int newnbuckets;
939  int i;
940 
941  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
942  cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
943 
944  /* Allocate a new, larger, hash table. */
945  newnbuckets = cp->cc_nbuckets * 2;
946  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
947 
948  /* Move all entries from old hash table to new. */
949  for (i = 0; i < cp->cc_nbuckets; i++)
950  {
951  dlist_mutable_iter iter;
952 
953  dlist_foreach_modify(iter, &cp->cc_bucket[i])
954  {
955  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
956  int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
957 
958  dlist_delete(iter.cur);
959  dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
960  }
961  }
962 
963  /* Switch to the new array. */
964  pfree(cp->cc_bucket);
965  cp->cc_nbuckets = newnbuckets;
966  cp->cc_bucket = newbucket;
967 }
968 
969 /*
970  * Enlarge a catcache's list storage, doubling the number of buckets.
971  */
972 static void
974 {
975  dlist_head *newbucket;
976  int newnbuckets;
977  int i;
978 
979  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
980  cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
981 
982  /* Allocate a new, larger, hash table. */
983  newnbuckets = cp->cc_nlbuckets * 2;
984  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
985 
986  /* Move all entries from old hash table to new. */
987  for (i = 0; i < cp->cc_nlbuckets; i++)
988  {
989  dlist_mutable_iter iter;
990 
991  dlist_foreach_modify(iter, &cp->cc_lbucket[i])
992  {
993  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
994  int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
995 
996  dlist_delete(iter.cur);
997  dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
998  }
999  }
1000 
1001  /* Switch to the new array. */
1002  pfree(cp->cc_lbucket);
1003  cp->cc_nlbuckets = newnbuckets;
1004  cp->cc_lbucket = newbucket;
1005 }
1006 
1007 /*
1008  * CatalogCacheInitializeCache
1009  *
1010  * This function does final initialization of a catcache: obtain the tuple
1011  * descriptor and set up the hash and equality function links. We assume
1012  * that the relcache entry can be opened at this point!
1013  */
1014 #ifdef CACHEDEBUG
1015 #define CatalogCacheInitializeCache_DEBUG1 \
1016  elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1017  cache->cc_reloid)
1018 
1019 #define CatalogCacheInitializeCache_DEBUG2 \
1020 do { \
1021  if (cache->cc_keyno[i] > 0) { \
1022  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1023  i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1024  TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1025  } else { \
1026  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1027  i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1028  } \
1029 } while(0)
1030 #else
1031 #define CatalogCacheInitializeCache_DEBUG1
1032 #define CatalogCacheInitializeCache_DEBUG2
1033 #endif
1034 
1035 static void
1037 {
1038  Relation relation;
1039  MemoryContext oldcxt;
1040  TupleDesc tupdesc;
1041  int i;
1042 
1044 
1045  relation = table_open(cache->cc_reloid, AccessShareLock);
1046 
1047  /*
1048  * switch to the cache context so our allocations do not vanish at the end
1049  * of a transaction
1050  */
1051  Assert(CacheMemoryContext != NULL);
1052 
1054 
1055  /*
1056  * copy the relcache's tuple descriptor to permanent cache storage
1057  */
1058  tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1059 
1060  /*
1061  * save the relation's name and relisshared flag, too (cc_relname is used
1062  * only for debugging purposes)
1063  */
1064  cache->cc_relname = pstrdup(RelationGetRelationName(relation));
1065  cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1066 
1067  /*
1068  * return to the caller's memory context and close the rel
1069  */
1070  MemoryContextSwitchTo(oldcxt);
1071 
1072  table_close(relation, AccessShareLock);
1073 
1074  CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1075  cache->cc_relname, cache->cc_nkeys);
1076 
1077  /*
1078  * initialize cache's key information
1079  */
1080  for (i = 0; i < cache->cc_nkeys; ++i)
1081  {
1082  Oid keytype;
1083  RegProcedure eqfunc;
1084 
1086 
1087  if (cache->cc_keyno[i] > 0)
1088  {
1089  Form_pg_attribute attr = TupleDescAttr(tupdesc,
1090  cache->cc_keyno[i] - 1);
1091 
1092  keytype = attr->atttypid;
1093  /* cache key columns should always be NOT NULL */
1094  Assert(attr->attnotnull);
1095  }
1096  else
1097  {
1098  if (cache->cc_keyno[i] < 0)
1099  elog(FATAL, "sys attributes are not supported in caches");
1100  keytype = OIDOID;
1101  }
1102 
1103  GetCCHashEqFuncs(keytype,
1104  &cache->cc_hashfunc[i],
1105  &eqfunc,
1106  &cache->cc_fastequal[i]);
1107 
1108  /*
1109  * Do equality-function lookup (we assume this won't need a catalog
1110  * lookup for any supported type)
1111  */
1112  fmgr_info_cxt(eqfunc,
1113  &cache->cc_skey[i].sk_func,
1115 
1116  /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1117  cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1118 
1119  /* Fill in sk_strategy as well --- always standard equality */
1121  cache->cc_skey[i].sk_subtype = InvalidOid;
1122  /* If a catcache key requires a collation, it must be C collation */
1123  cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1124 
1125  CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1126  cache->cc_relname, i, cache);
1127  }
1128 
1129  /*
1130  * mark this cache fully initialized
1131  */
1132  cache->cc_tupdesc = tupdesc;
1133 }
1134 
1135 /*
1136  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1137  *
1138  * One reason to call this routine is to ensure that the relcache has
1139  * created entries for all the catalogs and indexes referenced by catcaches.
1140  * Therefore, provide an option to open the index as well as fixing the
1141  * cache itself. An exception is the indexes on pg_am, which we don't use
1142  * (cf. IndexScanOK).
1143  */
1144 void
1145 InitCatCachePhase2(CatCache *cache, bool touch_index)
1146 {
1147  if (cache->cc_tupdesc == NULL)
1149 
1150  if (touch_index &&
1151  cache->id != AMOID &&
1152  cache->id != AMNAME)
1153  {
1154  Relation idesc;
1155 
1156  /*
1157  * We must lock the underlying catalog before opening the index to
1158  * avoid deadlock, since index_open could possibly result in reading
1159  * this same catalog, and if anyone else is exclusive-locking this
1160  * catalog and index they'll be doing it in that order.
1161  */
1163  idesc = index_open(cache->cc_indexoid, AccessShareLock);
1164 
1165  /*
1166  * While we've got the index open, let's check that it's unique (and
1167  * not just deferrable-unique, thank you very much). This is just to
1168  * catch thinkos in definitions of new catcaches, so we don't worry
1169  * about the pg_am indexes not getting tested.
1170  */
1171  Assert(idesc->rd_index->indisunique &&
1172  idesc->rd_index->indimmediate);
1173 
1174  index_close(idesc, AccessShareLock);
1176  }
1177 }
1178 
1179 
1180 /*
1181  * IndexScanOK
1182  *
1183  * This function checks for tuples that will be fetched by
1184  * IndexSupportInitialize() during relcache initialization for
1185  * certain system indexes that support critical syscaches.
1186  * We can't use an indexscan to fetch these, else we'll get into
1187  * infinite recursion. A plain heap scan will work, however.
1188  * Once we have completed relcache initialization (signaled by
1189  * criticalRelcachesBuilt), we don't have to worry anymore.
1190  *
1191  * Similarly, during backend startup we have to be able to use the
1192  * pg_authid, pg_auth_members and pg_database syscaches for
1193  * authentication even if we don't yet have relcache entries for those
1194  * catalogs' indexes.
1195  */
1196 static bool
1197 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1198 {
1199  switch (cache->id)
1200  {
1201  case INDEXRELID:
1202 
1203  /*
1204  * Rather than tracking exactly which indexes have to be loaded
1205  * before we can use indexscans (which changes from time to time),
1206  * just force all pg_index searches to be heap scans until we've
1207  * built the critical relcaches.
1208  */
1210  return false;
1211  break;
1212 
1213  case AMOID:
1214  case AMNAME:
1215 
1216  /*
1217  * Always do heap scans in pg_am, because it's so small there's
1218  * not much point in an indexscan anyway. We *must* do this when
1219  * initially building critical relcache entries, but we might as
1220  * well just always do it.
1221  */
1222  return false;
1223 
1224  case AUTHNAME:
1225  case AUTHOID:
1226  case AUTHMEMMEMROLE:
1227  case DATABASEOID:
1228 
1229  /*
1230  * Protect authentication lookups occurring before relcache has
1231  * collected entries for shared indexes.
1232  */
1234  return false;
1235  break;
1236 
1237  default:
1238  break;
1239  }
1240 
1241  /* Normal case, allow index scan */
1242  return true;
1243 }
1244 
1245 /*
1246  * SearchCatCache
1247  *
1248  * This call searches a system cache for a tuple, opening the relation
1249  * if necessary (on the first access to a particular cache).
1250  *
1251  * The result is NULL if not found, or a pointer to a HeapTuple in
1252  * the cache. The caller must not modify the tuple, and must call
1253  * ReleaseCatCache() when done with it.
1254  *
1255  * The search key values should be expressed as Datums of the key columns'
1256  * datatype(s). (Pass zeroes for any unused parameters.) As a special
1257  * exception, the passed-in key for a NAME column can be just a C string;
1258  * the caller need not go to the trouble of converting it to a fully
1259  * null-padded NAME.
1260  */
1261 HeapTuple
1263  Datum v1,
1264  Datum v2,
1265  Datum v3,
1266  Datum v4)
1267 {
1268  return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1269 }
1270 
1271 
1272 /*
1273  * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1274  * arguments. The compiler can inline the body and unroll loops, making them a
1275  * bit faster than SearchCatCache().
1276  */
1277 
1278 HeapTuple
1280  Datum v1)
1281 {
1282  return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1283 }
1284 
1285 
1286 HeapTuple
1288  Datum v1, Datum v2)
1289 {
1290  return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1291 }
1292 
1293 
1294 HeapTuple
1296  Datum v1, Datum v2, Datum v3)
1297 {
1298  return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1299 }
1300 
1301 
1302 HeapTuple
1304  Datum v1, Datum v2, Datum v3, Datum v4)
1305 {
1306  return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1307 }
1308 
1309 /*
1310  * Work-horse for SearchCatCache/SearchCatCacheN.
1311  */
1312 static inline HeapTuple
1314  int nkeys,
1315  Datum v1,
1316  Datum v2,
1317  Datum v3,
1318  Datum v4)
1319 {
1321  uint32 hashValue;
1322  Index hashIndex;
1323  dlist_iter iter;
1324  dlist_head *bucket;
1325  CatCTup *ct;
1326 
1327  /* Make sure we're in an xact, even if this ends up being a cache hit */
1329 
1330  Assert(cache->cc_nkeys == nkeys);
1331 
1332  /*
1333  * one-time startup overhead for each cache
1334  */
1335  if (unlikely(cache->cc_tupdesc == NULL))
1337 
1338 #ifdef CATCACHE_STATS
1339  cache->cc_searches++;
1340 #endif
1341 
1342  /* Initialize local parameter array */
1343  arguments[0] = v1;
1344  arguments[1] = v2;
1345  arguments[2] = v3;
1346  arguments[3] = v4;
1347 
1348  /*
1349  * find the hash bucket in which to look for the tuple
1350  */
1351  hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1352  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1353 
1354  /*
1355  * scan the hash bucket until we find a match or exhaust our tuples
1356  *
1357  * Note: it's okay to use dlist_foreach here, even though we modify the
1358  * dlist within the loop, because we don't continue the loop afterwards.
1359  */
1360  bucket = &cache->cc_bucket[hashIndex];
1361  dlist_foreach(iter, bucket)
1362  {
1363  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1364 
1365  if (ct->dead)
1366  continue; /* ignore dead entries */
1367 
1368  if (ct->hash_value != hashValue)
1369  continue; /* quickly skip entry if wrong hash val */
1370 
1371  if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1372  continue;
1373 
1374  /*
1375  * We found a match in the cache. Move it to the front of the list
1376  * for its hashbucket, in order to speed subsequent searches. (The
1377  * most frequently accessed elements in any hashbucket will tend to be
1378  * near the front of the hashbucket's list.)
1379  */
1380  dlist_move_head(bucket, &ct->cache_elem);
1381 
1382  /*
1383  * If it's a positive entry, bump its refcount and return it. If it's
1384  * negative, we can report failure to the caller.
1385  */
1386  if (!ct->negative)
1387  {
1389  ct->refcount++;
1391 
1392  CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1393  cache->cc_relname, hashIndex);
1394 
1395 #ifdef CATCACHE_STATS
1396  cache->cc_hits++;
1397 #endif
1398 
1399  return &ct->tuple;
1400  }
1401  else
1402  {
1403  CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1404  cache->cc_relname, hashIndex);
1405 
1406 #ifdef CATCACHE_STATS
1407  cache->cc_neg_hits++;
1408 #endif
1409 
1410  return NULL;
1411  }
1412  }
1413 
1414  return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1415 }
1416 
1417 /*
1418  * Search the actual catalogs, rather than the cache.
1419  *
1420  * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1421  * as small as possible. To avoid that effort being undone by a helpful
1422  * compiler, try to explicitly forbid inlining.
1423  */
1424 static pg_noinline HeapTuple
1426  int nkeys,
1427  uint32 hashValue,
1428  Index hashIndex,
1429  Datum v1,
1430  Datum v2,
1431  Datum v3,
1432  Datum v4)
1433 {
1434  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1435  Relation relation;
1436  SysScanDesc scandesc;
1437  HeapTuple ntp;
1438  CatCTup *ct;
1439  bool stale;
1441 
1442  /* Initialize local parameter array */
1443  arguments[0] = v1;
1444  arguments[1] = v2;
1445  arguments[2] = v3;
1446  arguments[3] = v4;
1447 
1448  /*
1449  * Tuple was not found in cache, so we have to try to retrieve it directly
1450  * from the relation. If found, we will add it to the cache; if not
1451  * found, we will add a negative cache entry instead.
1452  *
1453  * NOTE: it is possible for recursive cache lookups to occur while reading
1454  * the relation --- for example, due to shared-cache-inval messages being
1455  * processed during table_open(). This is OK. It's even possible for one
1456  * of those lookups to find and enter the very same tuple we are trying to
1457  * fetch here. If that happens, we will enter a second copy of the tuple
1458  * into the cache. The first copy will never be referenced again, and
1459  * will eventually age out of the cache, so there's no functional problem.
1460  * This case is rare enough that it's not worth expending extra cycles to
1461  * detect.
1462  *
1463  * Another case, which we *must* handle, is that the tuple could become
1464  * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1465  * AcceptInvalidationMessages can run during TOAST table access). We do
1466  * not want to return already-stale catcache entries, so we loop around
1467  * and do the table scan again if that happens.
1468  */
1469  relation = table_open(cache->cc_reloid, AccessShareLock);
1470 
1471  do
1472  {
1473  /*
1474  * Ok, need to make a lookup in the relation, copy the scankey and
1475  * fill out any per-call fields. (We must re-do this when retrying,
1476  * because systable_beginscan scribbles on the scankey.)
1477  */
1478  memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1479  cur_skey[0].sk_argument = v1;
1480  cur_skey[1].sk_argument = v2;
1481  cur_skey[2].sk_argument = v3;
1482  cur_skey[3].sk_argument = v4;
1483 
1484  scandesc = systable_beginscan(relation,
1485  cache->cc_indexoid,
1486  IndexScanOK(cache, cur_skey),
1487  NULL,
1488  nkeys,
1489  cur_skey);
1490 
1491  ct = NULL;
1492  stale = false;
1493 
1494  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1495  {
1496  ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1497  hashValue, hashIndex);
1498  /* upon failure, we must start the scan over */
1499  if (ct == NULL)
1500  {
1501  stale = true;
1502  break;
1503  }
1504  /* immediately set the refcount to 1 */
1506  ct->refcount++;
1508  break; /* assume only one match */
1509  }
1510 
1511  systable_endscan(scandesc);
1512  } while (stale);
1513 
1514  table_close(relation, AccessShareLock);
1515 
1516  /*
1517  * If tuple was not found, we need to build a negative cache entry
1518  * containing a fake tuple. The fake tuple has the correct key columns,
1519  * but nulls everywhere else.
1520  *
1521  * In bootstrap mode, we don't build negative entries, because the cache
1522  * invalidation mechanism isn't alive and can't clear them if the tuple
1523  * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1524  * cache inval for that.)
1525  */
1526  if (ct == NULL)
1527  {
1529  return NULL;
1530 
1531  ct = CatalogCacheCreateEntry(cache, NULL, NULL, arguments,
1532  hashValue, hashIndex);
1533 
1534  /* Creating a negative cache entry shouldn't fail */
1535  Assert(ct != NULL);
1536 
1537  CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1538  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1539  CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1540  cache->cc_relname, hashIndex);
1541 
1542  /*
1543  * We are not returning the negative entry to the caller, so leave its
1544  * refcount zero.
1545  */
1546 
1547  return NULL;
1548  }
1549 
1550  CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1551  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1552  CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1553  cache->cc_relname, hashIndex);
1554 
1555 #ifdef CATCACHE_STATS
1556  cache->cc_newloads++;
1557 #endif
1558 
1559  return &ct->tuple;
1560 }
1561 
1562 /*
1563  * ReleaseCatCache
1564  *
1565  * Decrement the reference count of a catcache entry (releasing the
1566  * hold grabbed by a successful SearchCatCache).
1567  *
1568  * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1569  * will be freed as soon as their refcount goes to zero. In combination
1570  * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1571  * to catch references to already-released catcache entries.
1572  */
1573 void
1575 {
1577 }
1578 
1579 static void
1581 {
1582  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1583  offsetof(CatCTup, tuple));
1584 
1585  /* Safety checks to ensure we were handed a cache entry */
1586  Assert(ct->ct_magic == CT_MAGIC);
1587  Assert(ct->refcount > 0);
1588 
1589  ct->refcount--;
1590  if (resowner)
1592 
1593  if (
1594 #ifndef CATCACHE_FORCE_RELEASE
1595  ct->dead &&
1596 #endif
1597  ct->refcount == 0 &&
1598  (ct->c_list == NULL || ct->c_list->refcount == 0))
1599  CatCacheRemoveCTup(ct->my_cache, ct);
1600 }
1601 
1602 
1603 /*
1604  * GetCatCacheHashValue
1605  *
1606  * Compute the hash value for a given set of search keys.
1607  *
1608  * The reason for exposing this as part of the API is that the hash value is
1609  * exposed in cache invalidation operations, so there are places outside the
1610  * catcache code that need to be able to compute the hash values.
1611  */
1612 uint32
1614  Datum v1,
1615  Datum v2,
1616  Datum v3,
1617  Datum v4)
1618 {
1619  /*
1620  * one-time startup overhead for each cache
1621  */
1622  if (cache->cc_tupdesc == NULL)
1624 
1625  /*
1626  * calculate the hash value
1627  */
1628  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1629 }
1630 
1631 
1632 /*
1633  * SearchCatCacheList
1634  *
1635  * Generate a list of all tuples matching a partial key (that is,
1636  * a key specifying just the first K of the cache's N key columns).
1637  *
1638  * It doesn't make any sense to specify all of the cache's key columns
1639  * here: since the key is unique, there could be at most one match, so
1640  * you ought to use SearchCatCache() instead. Hence this function takes
1641  * one fewer Datum argument than SearchCatCache() does.
1642  *
1643  * The caller must not modify the list object or the pointed-to tuples,
1644  * and must call ReleaseCatCacheList() when done with the list.
1645  */
1646 CatCList *
1648  int nkeys,
1649  Datum v1,
1650  Datum v2,
1651  Datum v3)
1652 {
1653  Datum v4 = 0; /* dummy last-column value */
1655  uint32 lHashValue;
1656  Index lHashIndex;
1657  dlist_iter iter;
1658  dlist_head *lbucket;
1659  CatCList *cl;
1660  CatCTup *ct;
1661  List *volatile ctlist;
1662  ListCell *ctlist_item;
1663  int nmembers;
1664  bool ordered;
1665  HeapTuple ntp;
1666  MemoryContext oldcxt;
1667  int i;
1668 
1669  /*
1670  * one-time startup overhead for each cache
1671  */
1672  if (unlikely(cache->cc_tupdesc == NULL))
1674 
1675  Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1676 
1677 #ifdef CATCACHE_STATS
1678  cache->cc_lsearches++;
1679 #endif
1680 
1681  /* Initialize local parameter array */
1682  arguments[0] = v1;
1683  arguments[1] = v2;
1684  arguments[2] = v3;
1685  arguments[3] = v4;
1686 
1687  /*
1688  * If we haven't previously done a list search in this cache, create the
1689  * bucket header array; otherwise, consider whether it's time to enlarge
1690  * it.
1691  */
1692  if (cache->cc_lbucket == NULL)
1693  {
1694  /* Arbitrary initial size --- must be a power of 2 */
1695  int nbuckets = 16;
1696 
1697  cache->cc_lbucket = (dlist_head *)
1699  nbuckets * sizeof(dlist_head));
1700  /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1701  cache->cc_nlbuckets = nbuckets;
1702  }
1703  else
1704  {
1705  /*
1706  * If the hash table has become too full, enlarge the buckets array.
1707  * Quite arbitrarily, we enlarge when fill factor > 2.
1708  */
1709  if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1710  RehashCatCacheLists(cache);
1711  }
1712 
1713  /*
1714  * Find the hash bucket in which to look for the CatCList.
1715  */
1716  lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1717  lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1718 
1719  /*
1720  * scan the items until we find a match or exhaust our list
1721  *
1722  * Note: it's okay to use dlist_foreach here, even though we modify the
1723  * dlist within the loop, because we don't continue the loop afterwards.
1724  */
1725  lbucket = &cache->cc_lbucket[lHashIndex];
1726  dlist_foreach(iter, lbucket)
1727  {
1728  cl = dlist_container(CatCList, cache_elem, iter.cur);
1729 
1730  if (cl->dead)
1731  continue; /* ignore dead entries */
1732 
1733  if (cl->hash_value != lHashValue)
1734  continue; /* quickly skip entry if wrong hash val */
1735 
1736  /*
1737  * see if the cached list matches our key.
1738  */
1739  if (cl->nkeys != nkeys)
1740  continue;
1741 
1742  if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1743  continue;
1744 
1745  /*
1746  * We found a matching list. Move the list to the front of the list
1747  * for its hashbucket, so as to speed subsequent searches. (We do not
1748  * move the members to the fronts of their hashbucket lists, however,
1749  * since there's no point in that unless they are searched for
1750  * individually.)
1751  */
1752  dlist_move_head(lbucket, &cl->cache_elem);
1753 
1754  /* Bump the list's refcount and return it */
1756  cl->refcount++;
1758 
1759  CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1760  cache->cc_relname);
1761 
1762 #ifdef CATCACHE_STATS
1763  cache->cc_lhits++;
1764 #endif
1765 
1766  return cl;
1767  }
1768 
1769  /*
1770  * List was not found in cache, so we have to build it by reading the
1771  * relation. For each matching tuple found in the relation, use an
1772  * existing cache entry if possible, else build a new one.
1773  *
1774  * We have to bump the member refcounts temporarily to ensure they won't
1775  * get dropped from the cache while loading other members. We use a PG_TRY
1776  * block to ensure we can undo those refcounts if we get an error before
1777  * we finish constructing the CatCList. ctlist must be valid throughout
1778  * the PG_TRY block.
1779  */
1780  ctlist = NIL;
1781 
1782  PG_TRY();
1783  {
1784  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1785  Relation relation;
1786  SysScanDesc scandesc;
1787  bool stale;
1788 
1789  relation = table_open(cache->cc_reloid, AccessShareLock);
1790 
1791  do
1792  {
1793  /*
1794  * Ok, need to make a lookup in the relation, copy the scankey and
1795  * fill out any per-call fields. (We must re-do this when
1796  * retrying, because systable_beginscan scribbles on the scankey.)
1797  */
1798  memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1799  cur_skey[0].sk_argument = v1;
1800  cur_skey[1].sk_argument = v2;
1801  cur_skey[2].sk_argument = v3;
1802  cur_skey[3].sk_argument = v4;
1803 
1804  scandesc = systable_beginscan(relation,
1805  cache->cc_indexoid,
1806  IndexScanOK(cache, cur_skey),
1807  NULL,
1808  nkeys,
1809  cur_skey);
1810 
1811  /* The list will be ordered iff we are doing an index scan */
1812  ordered = (scandesc->irel != NULL);
1813 
1814  stale = false;
1815 
1816  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1817  {
1818  uint32 hashValue;
1819  Index hashIndex;
1820  bool found = false;
1821  dlist_head *bucket;
1822 
1823  /*
1824  * See if there's an entry for this tuple already.
1825  */
1826  ct = NULL;
1827  hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1828  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1829 
1830  bucket = &cache->cc_bucket[hashIndex];
1831  dlist_foreach(iter, bucket)
1832  {
1833  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1834 
1835  if (ct->dead || ct->negative)
1836  continue; /* ignore dead and negative entries */
1837 
1838  if (ct->hash_value != hashValue)
1839  continue; /* quickly skip entry if wrong hash val */
1840 
1841  if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1842  continue; /* not same tuple */
1843 
1844  /*
1845  * Found a match, but can't use it if it belongs to
1846  * another list already
1847  */
1848  if (ct->c_list)
1849  continue;
1850 
1851  found = true;
1852  break; /* A-OK */
1853  }
1854 
1855  if (!found)
1856  {
1857  /* We didn't find a usable entry, so make a new one */
1858  ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1859  hashValue, hashIndex);
1860  /* upon failure, we must start the scan over */
1861  if (ct == NULL)
1862  {
1863  /*
1864  * Release refcounts on any items we already had. We
1865  * dare not try to free them if they're now
1866  * unreferenced, since an error while doing that would
1867  * result in the PG_CATCH below doing extra refcount
1868  * decrements. Besides, we'll likely re-adopt those
1869  * items in the next iteration, so it's not worth
1870  * complicating matters to try to get rid of them.
1871  */
1872  foreach(ctlist_item, ctlist)
1873  {
1874  ct = (CatCTup *) lfirst(ctlist_item);
1875  Assert(ct->c_list == NULL);
1876  Assert(ct->refcount > 0);
1877  ct->refcount--;
1878  }
1879  /* Reset ctlist in preparation for new try */
1880  ctlist = NIL;
1881  stale = true;
1882  break;
1883  }
1884  }
1885 
1886  /* Careful here: add entry to ctlist, then bump its refcount */
1887  /* This way leaves state correct if lappend runs out of memory */
1888  ctlist = lappend(ctlist, ct);
1889  ct->refcount++;
1890  }
1891 
1892  systable_endscan(scandesc);
1893  } while (stale);
1894 
1895  table_close(relation, AccessShareLock);
1896 
1897  /* Make sure the resource owner has room to remember this entry. */
1899 
1900  /* Now we can build the CatCList entry. */
1902  nmembers = list_length(ctlist);
1903  cl = (CatCList *)
1904  palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1905 
1906  /* Extract key values */
1907  CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1908  arguments, cl->keys);
1909  MemoryContextSwitchTo(oldcxt);
1910 
1911  /*
1912  * We are now past the last thing that could trigger an elog before we
1913  * have finished building the CatCList and remembering it in the
1914  * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1915  * we'd better do so before we start marking the members as belonging
1916  * to the list.
1917  */
1918  }
1919  PG_CATCH();
1920  {
1921  foreach(ctlist_item, ctlist)
1922  {
1923  ct = (CatCTup *) lfirst(ctlist_item);
1924  Assert(ct->c_list == NULL);
1925  Assert(ct->refcount > 0);
1926  ct->refcount--;
1927  if (
1928 #ifndef CATCACHE_FORCE_RELEASE
1929  ct->dead &&
1930 #endif
1931  ct->refcount == 0 &&
1932  (ct->c_list == NULL || ct->c_list->refcount == 0))
1933  CatCacheRemoveCTup(cache, ct);
1934  }
1935 
1936  PG_RE_THROW();
1937  }
1938  PG_END_TRY();
1939 
1940  cl->cl_magic = CL_MAGIC;
1941  cl->my_cache = cache;
1942  cl->refcount = 0; /* for the moment */
1943  cl->dead = false;
1944  cl->ordered = ordered;
1945  cl->nkeys = nkeys;
1946  cl->hash_value = lHashValue;
1947  cl->n_members = nmembers;
1948 
1949  i = 0;
1950  foreach(ctlist_item, ctlist)
1951  {
1952  cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1953  Assert(ct->c_list == NULL);
1954  ct->c_list = cl;
1955  /* release the temporary refcount on the member */
1956  Assert(ct->refcount > 0);
1957  ct->refcount--;
1958  /* mark list dead if any members already dead */
1959  if (ct->dead)
1960  cl->dead = true;
1961  }
1962  Assert(i == nmembers);
1963 
1964  /*
1965  * Add the CatCList to the appropriate bucket, and count it.
1966  */
1967  dlist_push_head(lbucket, &cl->cache_elem);
1968 
1969  cache->cc_nlist++;
1970 
1971  /* Finally, bump the list's refcount and return it */
1972  cl->refcount++;
1974 
1975  CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1976  cache->cc_relname, nmembers);
1977 
1978  return cl;
1979 }
1980 
1981 /*
1982  * ReleaseCatCacheList
1983  *
1984  * Decrement the reference count of a catcache list.
1985  */
1986 void
1988 {
1990 }
1991 
1992 static void
1994 {
1995  /* Safety checks to ensure we were handed a cache entry */
1996  Assert(list->cl_magic == CL_MAGIC);
1997  Assert(list->refcount > 0);
1998  list->refcount--;
1999  if (resowner)
2001 
2002  if (
2003 #ifndef CATCACHE_FORCE_RELEASE
2004  list->dead &&
2005 #endif
2006  list->refcount == 0)
2007  CatCacheRemoveCList(list->my_cache, list);
2008 }
2009 
2010 
2011 /*
2012  * equalTuple
2013  * Are these tuples memcmp()-equal?
2014  */
2015 static bool
2017 {
2018  uint32 alen;
2019  uint32 blen;
2020 
2021  alen = a->t_len;
2022  blen = b->t_len;
2023  return (alen == blen &&
2024  memcmp((char *) a->t_data,
2025  (char *) b->t_data, blen) == 0);
2026 }
2027 
2028 /*
2029  * CatalogCacheCreateEntry
2030  * Create a new CatCTup entry, copying the given HeapTuple and other
2031  * supplied data into it. The new entry initially has refcount 0.
2032  *
2033  * To create a normal cache entry, ntp must be the HeapTuple just fetched
2034  * from scandesc, and "arguments" is not used. To create a negative cache
2035  * entry, pass NULL for ntp and scandesc; then "arguments" is the cache
2036  * keys to use. In either case, hashValue/hashIndex are the hash values
2037  * computed from the cache keys.
2038  *
2039  * Returns NULL if we attempt to detoast the tuple and observe that it
2040  * became stale. (This cannot happen for a negative entry.) Caller must
2041  * retry the tuple lookup in that case.
2042  */
2043 static CatCTup *
2045  Datum *arguments,
2046  uint32 hashValue, Index hashIndex)
2047 {
2048  CatCTup *ct;
2049  HeapTuple dtp;
2050  MemoryContext oldcxt;
2051 
2052  if (ntp)
2053  {
2054  int i;
2055 
2056  /*
2057  * The visibility recheck below essentially never fails during our
2058  * regression tests, and there's no easy way to force it to fail for
2059  * testing purposes. To ensure we have test coverage for the retry
2060  * paths in our callers, make debug builds randomly fail about 0.1% of
2061  * the times through this code path, even when there's no toasted
2062  * fields.
2063  */
2064 #ifdef USE_ASSERT_CHECKING
2066  return NULL;
2067 #endif
2068 
2069  /*
2070  * If there are any out-of-line toasted fields in the tuple, expand
2071  * them in-line. This saves cycles during later use of the catcache
2072  * entry, and also protects us against the possibility of the toast
2073  * tuples being freed before we attempt to fetch them, in case of
2074  * something using a slightly stale catcache entry.
2075  */
2076  if (HeapTupleHasExternal(ntp))
2077  {
2078  bool need_cmp = IsInplaceUpdateOid(cache->cc_reloid);
2079  HeapTuple before = NULL;
2080  bool matches = true;
2081 
2082  if (need_cmp)
2083  before = heap_copytuple(ntp);
2084  dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
2085 
2086  /*
2087  * The tuple could become stale while we are doing toast table
2088  * access (since AcceptInvalidationMessages can run then).
2089  * equalTuple() detects staleness from inplace updates, while
2090  * systable_recheck_tuple() detects staleness from normal updates.
2091  *
2092  * While this equalTuple() follows the usual rule of reading with
2093  * a pin and no buffer lock, it warrants suspicion since an
2094  * inplace update could appear at any moment. It's safe because
2095  * the inplace update sends an invalidation that can't reorder
2096  * before the inplace heap change. If the heap change reaches
2097  * this process just after equalTuple() looks, we've not missed
2098  * its inval.
2099  */
2100  if (need_cmp)
2101  {
2102  matches = equalTuple(before, ntp);
2104  }
2105  if (!matches || !systable_recheck_tuple(scandesc, ntp))
2106  {
2107  heap_freetuple(dtp);
2108  return NULL;
2109  }
2110  }
2111  else
2112  dtp = ntp;
2113 
2114  /* Allocate memory for CatCTup and the cached tuple in one go */
2116 
2117  ct = (CatCTup *) palloc(sizeof(CatCTup) +
2118  MAXIMUM_ALIGNOF + dtp->t_len);
2119  ct->tuple.t_len = dtp->t_len;
2120  ct->tuple.t_self = dtp->t_self;
2121  ct->tuple.t_tableOid = dtp->t_tableOid;
2122  ct->tuple.t_data = (HeapTupleHeader)
2123  MAXALIGN(((char *) ct) + sizeof(CatCTup));
2124  /* copy tuple contents */
2125  memcpy((char *) ct->tuple.t_data,
2126  (const char *) dtp->t_data,
2127  dtp->t_len);
2128  MemoryContextSwitchTo(oldcxt);
2129 
2130  if (dtp != ntp)
2131  heap_freetuple(dtp);
2132 
2133  /* extract keys - they'll point into the tuple if not by-value */
2134  for (i = 0; i < cache->cc_nkeys; i++)
2135  {
2136  Datum atp;
2137  bool isnull;
2138 
2139  atp = heap_getattr(&ct->tuple,
2140  cache->cc_keyno[i],
2141  cache->cc_tupdesc,
2142  &isnull);
2143  Assert(!isnull);
2144  ct->keys[i] = atp;
2145  }
2146  }
2147  else
2148  {
2149  /* Set up keys for a negative cache entry */
2151  ct = (CatCTup *) palloc(sizeof(CatCTup));
2152 
2153  /*
2154  * Store keys - they'll point into separately allocated memory if not
2155  * by-value.
2156  */
2157  CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
2158  arguments, ct->keys);
2159  MemoryContextSwitchTo(oldcxt);
2160  }
2161 
2162  /*
2163  * Finish initializing the CatCTup header, and add it to the cache's
2164  * linked list and counts.
2165  */
2166  ct->ct_magic = CT_MAGIC;
2167  ct->my_cache = cache;
2168  ct->c_list = NULL;
2169  ct->refcount = 0; /* for the moment */
2170  ct->dead = false;
2171  ct->negative = (ntp == NULL);
2172  ct->hash_value = hashValue;
2173 
2174  dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2175 
2176  cache->cc_ntup++;
2177  CacheHdr->ch_ntup++;
2178 
2179  /*
2180  * If the hash table has become too full, enlarge the buckets array. Quite
2181  * arbitrarily, we enlarge when fill factor > 2.
2182  */
2183  if (cache->cc_ntup > cache->cc_nbuckets * 2)
2184  RehashCatCache(cache);
2185 
2186  return ct;
2187 }
2188 
2189 /*
2190  * Helper routine that frees keys stored in the keys array.
2191  */
2192 static void
2193 CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
2194 {
2195  int i;
2196 
2197  for (i = 0; i < nkeys; i++)
2198  {
2199  int attnum = attnos[i];
2200  Form_pg_attribute att;
2201 
2202  /* system attribute are not supported in caches */
2203  Assert(attnum > 0);
2204 
2205  att = TupleDescAttr(tupdesc, attnum - 1);
2206 
2207  if (!att->attbyval)
2208  pfree(DatumGetPointer(keys[i]));
2209  }
2210 }
2211 
2212 /*
2213  * Helper routine that copies the keys in the srckeys array into the dstkeys
2214  * one, guaranteeing that the datums are fully allocated in the current memory
2215  * context.
2216  */
2217 static void
2218 CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
2219  Datum *srckeys, Datum *dstkeys)
2220 {
2221  int i;
2222 
2223  /*
2224  * XXX: memory and lookup performance could possibly be improved by
2225  * storing all keys in one allocation.
2226  */
2227 
2228  for (i = 0; i < nkeys; i++)
2229  {
2230  int attnum = attnos[i];
2231  Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2232  Datum src = srckeys[i];
2233  NameData srcname;
2234 
2235  /*
2236  * Must be careful in case the caller passed a C string where a NAME
2237  * is wanted: convert the given argument to a correctly padded NAME.
2238  * Otherwise the memcpy() done by datumCopy() could fall off the end
2239  * of memory.
2240  */
2241  if (att->atttypid == NAMEOID)
2242  {
2243  namestrcpy(&srcname, DatumGetCString(src));
2244  src = NameGetDatum(&srcname);
2245  }
2246 
2247  dstkeys[i] = datumCopy(src,
2248  att->attbyval,
2249  att->attlen);
2250  }
2251 }
2252 
2253 /*
2254  * PrepareToInvalidateCacheTuple()
2255  *
2256  * This is part of a rather subtle chain of events, so pay attention:
2257  *
2258  * When a tuple is inserted or deleted, it cannot be flushed from the
2259  * catcaches immediately, for reasons explained at the top of cache/inval.c.
2260  * Instead we have to add entry(s) for the tuple to a list of pending tuple
2261  * invalidations that will be done at the end of the command or transaction.
2262  *
2263  * The lists of tuples that need to be flushed are kept by inval.c. This
2264  * routine is a helper routine for inval.c. Given a tuple belonging to
2265  * the specified relation, find all catcaches it could be in, compute the
2266  * correct hash value for each such catcache, and call the specified
2267  * function to record the cache id and hash value in inval.c's lists.
2268  * SysCacheInvalidate will be called later, if appropriate,
2269  * using the recorded information.
2270  *
2271  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2272  * For an update, we are called just once, with tuple being the old tuple
2273  * version and newtuple the new version. We should make two list entries
2274  * if the tuple's hash value changed, but only one if it didn't.
2275  *
2276  * Note that it is irrelevant whether the given tuple is actually loaded
2277  * into the catcache at the moment. Even if it's not there now, it might
2278  * be by the end of the command, or there might be a matching negative entry
2279  * to flush --- or other backends' caches might have such entries --- so
2280  * we have to make list entries to flush it later.
2281  *
2282  * Also note that it's not an error if there are no catcaches for the
2283  * specified relation. inval.c doesn't know exactly which rels have
2284  * catcaches --- it will call this routine for any tuple that's in a
2285  * system relation.
2286  */
2287 void
2289  HeapTuple tuple,
2290  HeapTuple newtuple,
2291  void (*function) (int, uint32, Oid))
2292 {
2293  slist_iter iter;
2294  Oid reloid;
2295 
2296  CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2297 
2298  /*
2299  * sanity checks
2300  */
2301  Assert(RelationIsValid(relation));
2302  Assert(HeapTupleIsValid(tuple));
2303  Assert(PointerIsValid(function));
2304  Assert(CacheHdr != NULL);
2305 
2306  reloid = RelationGetRelid(relation);
2307 
2308  /* ----------------
2309  * for each cache
2310  * if the cache contains tuples from the specified relation
2311  * compute the tuple's hash value(s) in this cache,
2312  * and call the passed function to register the information.
2313  * ----------------
2314  */
2315 
2317  {
2318  CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2319  uint32 hashvalue;
2320  Oid dbid;
2321 
2322  if (ccp->cc_reloid != reloid)
2323  continue;
2324 
2325  /* Just in case cache hasn't finished initialization yet... */
2326  if (ccp->cc_tupdesc == NULL)
2328 
2329  hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2330  dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2331 
2332  (*function) (ccp->id, hashvalue, dbid);
2333 
2334  if (newtuple)
2335  {
2336  uint32 newhashvalue;
2337 
2338  newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2339 
2340  if (newhashvalue != hashvalue)
2341  (*function) (ccp->id, newhashvalue, dbid);
2342  }
2343  }
2344 }
2345 
2346 /* ResourceOwner callbacks */
2347 
2348 static void
2350 {
2352 }
2353 
2354 static char *
2356 {
2358  CatCTup *ct = (CatCTup *) (((char *) tuple) -
2359  offsetof(CatCTup, tuple));
2360 
2361  /* Safety check to ensure we were handed a cache entry */
2362  Assert(ct->ct_magic == CT_MAGIC);
2363 
2364  return psprintf("cache %s (%d), tuple %u/%u has count %d",
2365  ct->my_cache->cc_relname, ct->my_cache->id,
2366  ItemPointerGetBlockNumber(&(tuple->t_self)),
2368  ct->refcount);
2369 }
2370 
2371 static void
2373 {
2375 }
2376 
2377 static char *
2379 {
2381 
2382  return psprintf("cache %s (%d), list %p has count %d",
2383  list->my_cache->cc_relname, list->my_cache->id,
2384  list, list->refcount);
2385 }
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
#define NameStr(name)
Definition: c.h:746
unsigned int uint32
Definition: c.h:506
#define pg_noinline
Definition: c.h:250
#define MAXALIGN(LEN)
Definition: c.h:811
#define PG_UINT32_MAX
Definition: c.h:590
signed int int32
Definition: c.h:494
#define Assert(condition)
Definition: c.h:858
#define PointerIsValid(pointer)
Definition: c.h:763
regproc RegProcedure
Definition: c.h:650
#define unlikely(x)
Definition: c.h:311
unsigned int Index
Definition: c.h:614
bool IsInplaceUpdateOid(Oid relid)
Definition: catalog.c:153
static bool chareqfast(Datum a, Datum b)
Definition: catcache.c:173
HeapTuple SearchCatCache2(CatCache *cache, Datum v1, Datum v2)
Definition: catcache.c:1287
static bool int4eqfast(Datum a, Datum b)
Definition: catcache.c:214
HeapTuple SearchCatCache3(CatCache *cache, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1295
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:1987
static bool equalTuple(HeapTuple a, HeapTuple b)
Definition: catcache.c:2016
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:1036
static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache, int nkeys, uint32 hashValue, Index hashIndex, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1425
static bool int2eqfast(Datum a, Datum b)
Definition: catcache.c:202
static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
Definition: catcache.c:1580
static uint32 int4hashfast(Datum datum)
Definition: catcache.c:220
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:1145
void ResetCatalogCaches(void)
Definition: catcache.c:754
static void ResOwnerReleaseCatCache(Datum res)
Definition: catcache.c:2349
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1613
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:510
static char * ResOwnerPrintCatCache(Datum res)
Definition: catcache.c:2355
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:935
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
Definition: catcache.c:368
HeapTuple SearchCatCache4(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1303
static void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: catcache.c:156
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc, Datum *arguments, uint32 hashValue, Index hashIndex)
Definition: catcache.c:2044
static const ResourceOwnerDesc catcache_resowner_desc
Definition: catcache.c:119
static void ResOwnerReleaseCatCacheList(Datum res)
Definition: catcache.c:2372
static void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: catcache.c:141
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:1031
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *srckeys, Datum *dstkeys)
Definition: catcache.c:2218
static HeapTuple SearchCatCacheInternal(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1313
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:828
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1647
static char * ResOwnerPrintCatCacheList(Datum res)
Definition: catcache.c:2378
static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
Definition: catcache.c:1993
static CatCacheHeader * CacheHdr
Definition: catcache.c:65
static uint32 namehashfast(Datum datum)
Definition: catcache.c:194
void CreateCacheMemoryContext(void)
Definition: catcache.c:680
static void ResetCatalogCache(CatCache *cache)
Definition: catcache.c:702
static const ResourceOwnerDesc catlistref_resowner_desc
Definition: catcache.c:129
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:2288
static void GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
Definition: catcache.c:256
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:326
static bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys, const Datum *cachekeys, const Datum *searchkeys)
Definition: catcache.c:423
void CatCacheInvalidate(CatCache *cache, uint32 hashValue)
Definition: catcache.c:607
static void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: catcache.c:146
static bool nameeqfast(Datum a, Datum b)
Definition: catcache.c:185
static uint32 charhashfast(Datum datum)
Definition: catcache.c:179
static void RehashCatCacheLists(CatCache *cp)
Definition: catcache.c:973
HeapTuple SearchCatCache1(CatCache *cache, Datum v1)
Definition: catcache.c:1279
#define InitCatCache_DEBUG2
Definition: catcache.c:824
static uint32 oidvectorhashfast(Datum datum)
Definition: catcache.c:249
static void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: catcache.c:151
static bool texteqfast(Datum a, Datum b)
Definition: catcache.c:226
#define CACHE_elog(...)
Definition: catcache.c:61
static bool oidvectoreqfast(Datum a, Datum b)
Definition: catcache.c:243
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:784
static uint32 int2hashfast(Datum datum)
Definition: catcache.c:208
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:1032
static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
Definition: catcache.c:2193
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:552
#define HASH_INDEX(h, sz)
Definition: catcache.c:51
static bool IndexScanOK(CatCache *cache, ScanKey cur_skey)
Definition: catcache.c:1197
static uint32 texthashfast(Datum datum)
Definition: catcache.c:236
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1574
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1262
#define CT_MAGIC
Definition: catcache.h:91
uint32(* CCHashFN)(Datum datum)
Definition: catcache.h:39
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
bool(* CCFastEqualFN)(Datum a, Datum b)
Definition: catcache.h:42
#define CL_MAGIC
Definition: catcache.h:162
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
#define PG_RE_THROW()
Definition: elog.h:411
#define FATAL
Definition: elog.h:41
#define PG_TRY(...)
Definition: elog.h:370
#define DEBUG2
Definition: elog.h:29
#define PG_END_TRY(...)
Definition: elog.h:395
#define DEBUG1
Definition: elog.h:30
#define PG_CATCH(...)
Definition: elog.h:380
#define elog(elevel,...)
Definition: elog.h:224
#define MCXT_ALLOC_ZERO
Definition: fe_memutils.h:18
Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:812
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1)
Definition: fmgr.c:792
#define DirectFunctionCall2(func, arg1, arg2)
Definition: fmgr.h:644
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:642
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:596
bool systable_recheck_tuple(SysScanDesc sysscan, HeapTuple tup)
Definition: genam.c:562
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:503
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:384
Oid MyDatabaseId
Definition: globals.c:92
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
static Datum hash_any(const unsigned char *k, int keylen)
Definition: hashfn.h:31
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:232
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:267
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:776
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1434
HeapTupleData * HeapTuple
Definition: htup.h:71
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:792
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:671
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:749
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void slist_init(slist_head *head)
Definition: ilist.h:986
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:1006
#define slist_container(type, membername, ptr)
Definition: ilist.h:1106
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:467
#define slist_foreach(iter, lhead)
Definition: ilist.h:1132
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1577
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:309
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int i
Definition: isn.c:73
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:35
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
List * lappend(List *list, void *datum)
Definition: list.c:339
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:227
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:108
#define AccessShareLock
Definition: lockdefs.h:36
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void * palloc0(Size size)
Definition: mcxt.c:1347
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
void * palloc_aligned(Size size, Size alignto, int flags)
Definition: mcxt.c:1511
MemoryContext CacheMemoryContext
Definition: mcxt.c:152
void * palloc(Size size)
Definition: mcxt.c:1317
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:454
void namestrcpy(Name name, const char *str)
Definition: name.c:233
Datum oidvectoreq(PG_FUNCTION_ARGS)
Definition: oid.c:344
int16 attnum
Definition: pg_attribute.h:74
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
void * arg
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:404
#define NAMEDATALEN
#define PG_CACHE_LINE_SIZE
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
static bool DatumGetBool(Datum X)
Definition: postgres.h:90
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
static Name DatumGetName(Datum X)
Definition: postgres.h:360
static char * DatumGetCString(Datum X)
Definition: postgres.h:335
uintptr_t Datum
Definition: postgres.h:64
static Datum NameGetDatum(const NameData *X)
Definition: postgres.h:373
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
static char DatumGetChar(Datum X)
Definition: postgres.h:112
static int16 DatumGetInt16(Datum X)
Definition: postgres.h:162
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
MemoryContextSwitchTo(old_ctx)
static int before(chr x, chr y)
Definition: regc_locale.c:488
#define RelationGetForm(relation)
Definition: rel.h:499
#define RelationGetRelid(relation)
Definition: rel.h:505
#define RelationGetDescr(relation)
Definition: rel.h:531
#define RelationGetRelationName(relation)
Definition: rel.h:539
#define RelationIsValid(relation)
Definition: rel.h:478
bool criticalRelcachesBuilt
Definition: relcache.c:140
bool criticalSharedRelcachesBuilt
Definition: relcache.c:146
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:554
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:514
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442
#define RELEASE_PRIO_CATCACHE_LIST_REFS
Definition: resowner.h:72
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
#define RELEASE_PRIO_CATCACHE_REFS
Definition: resowner.h:71
#define BTEqualStrategyNumber
Definition: stratnum.h:31
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
Definition: pg_list.h:54
Form_pg_index rd_index
Definition: rel.h:192
const char * name
Definition: resowner.h:93
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_subtype
Definition: skey.h:69
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
Relation irel
Definition: relscan.h:184
const char * cc_relname
Definition: catcache.h:59
CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
dlist_head * cc_bucket
Definition: catcache.h:49
slist_node cc_next
Definition: catcache.h:63
Oid cc_reloid
Definition: catcache.h:60
int cc_nkeys
Definition: catcache.h:54
int cc_keyno[CATCACHE_MAXKEYS]
Definition: catcache.h:53
CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]
Definition: catcache.h:51
Oid cc_indexoid
Definition: catcache.h:61
int cc_nbuckets
Definition: catcache.h:47
bool cc_relisshared
Definition: catcache.h:62
int cc_ntup
Definition: catcache.h:55
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:64
int cc_nlist
Definition: catcache.h:56
int id
Definition: catcache.h:46
TupleDesc cc_tupdesc
Definition: catcache.h:48
int cc_nlbuckets
Definition: catcache.h:57
dlist_head * cc_lbucket
Definition: catcache.h:58
slist_head ch_caches
Definition: catcache.h:186
dlist_node cache_elem
Definition: catcache.h:166
int refcount
Definition: catcache.h:174
CatCache * my_cache
Definition: catcache.h:179
int cl_magic
Definition: catcache.h:161
bool dead
Definition: catcache.h:175
short nkeys
Definition: catcache.h:177
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:172
bool ordered
Definition: catcache.h:176
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:180
uint32 hash_value
Definition: catcache.h:164
int n_members
Definition: catcache.h:178
int ct_magic
Definition: catcache.h:90
int refcount
Definition: catcache.h:120
bool negative
Definition: catcache.h:122
dlist_node cache_elem
Definition: catcache.h:106
HeapTupleData tuple
Definition: catcache.h:123
CatCache * my_cache
Definition: catcache.h:134
struct catclist * c_list
Definition: catcache.h:132
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:99
bool dead
Definition: catcache.h:121
uint32 hash_value
Definition: catcache.h:93
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
Definition: c.h:741
slist_node * cur
Definition: ilist.h:259
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:173
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
Datum texteq(PG_FUNCTION_ARGS)
Definition: varlena.c:1619
bool IsTransactionState(void)
Definition: xact.c:385