PostgreSQL Source Code  git master
catcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  * System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/relscan.h"
21 #include "access/sysattr.h"
22 #include "access/tuptoaster.h"
23 #include "access/valid.h"
24 #include "access/xact.h"
25 #include "catalog/pg_operator.h"
26 #include "catalog/pg_type.h"
27 #include "miscadmin.h"
28 #ifdef CATCACHE_STATS
29 #include "storage/ipc.h" /* for on_proc_exit */
30 #endif
31 #include "storage/lmgr.h"
32 #include "utils/builtins.h"
33 #include "utils/datum.h"
34 #include "utils/fmgroids.h"
35 #include "utils/hashutils.h"
36 #include "utils/inval.h"
37 #include "utils/memutils.h"
38 #include "utils/rel.h"
39 #include "utils/resowner_private.h"
40 #include "utils/syscache.h"
41 #include "utils/tqual.h"
42 
43 
44  /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
45 
46 /*
47  * Given a hash value and the size of the hash table, find the bucket
48  * in which the hash value belongs. Since the hash table must contain
49  * a power-of-2 number of elements, this is a simple bitmask.
50  */
51 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
52 
53 
54 /*
55  * variables, macros and other stuff
56  */
57 
58 #ifdef CACHEDEBUG
59 #define CACHE1_elog(a,b) elog(a,b)
60 #define CACHE2_elog(a,b,c) elog(a,b,c)
61 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
62 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
63 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
64 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
65 #else
66 #define CACHE1_elog(a,b)
67 #define CACHE2_elog(a,b,c)
68 #define CACHE3_elog(a,b,c,d)
69 #define CACHE4_elog(a,b,c,d,e)
70 #define CACHE5_elog(a,b,c,d,e,f)
71 #define CACHE6_elog(a,b,c,d,e,f,g)
72 #endif
73 
74 /* Cache management header --- pointer is NULL until created */
75 static CatCacheHeader *CacheHdr = NULL;
76 
77 static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
78  int nkeys,
79  Datum v1, Datum v2,
80  Datum v3, Datum v4);
81 
83  int nkeys,
84  uint32 hashValue,
85  Index hashIndex,
86  Datum v1, Datum v2,
87  Datum v3, Datum v4);
88 
89 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
90  Datum v1, Datum v2, Datum v3, Datum v4);
91 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
92  HeapTuple tuple);
93 static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
94  const Datum *cachekeys,
95  const Datum *searchkeys);
96 
97 #ifdef CATCACHE_STATS
98 static void CatCachePrintStats(int code, Datum arg);
99 #endif
100 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
101 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
102 static void CatalogCacheInitializeCache(CatCache *cache);
104  Datum *arguments,
105  uint32 hashValue, Index hashIndex,
106  bool negative);
107 
108 static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
109  Datum *keys);
110 static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
111  Datum *srckeys, Datum *dstkeys);
112 
113 
114 /*
115  * internal support functions
116  */
117 
118 /*
119  * Hash and equality functions for system types that are used as cache key
120  * fields. In some cases, we just call the regular SQL-callable functions for
121  * the appropriate data type, but that tends to be a little slow, and the
122  * speed of these functions is performance-critical. Therefore, for data
123  * types that frequently occur as catcache keys, we hard-code the logic here.
124  * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
125  * in certain cases (like int4) we can adopt a faster hash algorithm as well.
126  */
127 
128 static bool
130 {
131  return DatumGetChar(a) == DatumGetChar(b);
132 }
133 
134 static uint32
136 {
137  return murmurhash32((int32) DatumGetChar(datum));
138 }
139 
140 static bool
142 {
143  char *ca = NameStr(*DatumGetName(a));
144  char *cb = NameStr(*DatumGetName(b));
145 
146  return strncmp(ca, cb, NAMEDATALEN) == 0;
147 }
148 
149 static uint32
151 {
152  char *key = NameStr(*DatumGetName(datum));
153 
154  return hash_any((unsigned char *) key, strlen(key));
155 }
156 
157 static bool
159 {
160  return DatumGetInt16(a) == DatumGetInt16(b);
161 }
162 
163 static uint32
165 {
166  return murmurhash32((int32) DatumGetInt16(datum));
167 }
168 
169 static bool
171 {
172  return DatumGetInt32(a) == DatumGetInt32(b);
173 }
174 
175 static uint32
177 {
178  return murmurhash32((int32) DatumGetInt32(datum));
179 }
180 
181 static bool
183 {
184  return DatumGetBool(DirectFunctionCall2(texteq, a, b));
185 }
186 
187 static uint32
189 {
191 }
192 
193 static bool
195 {
197 }
198 
199 static uint32
201 {
203 }
204 
205 /* Lookup support functions for a type. */
206 static void
207 GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
208 {
209  switch (keytype)
210  {
211  case BOOLOID:
212  *hashfunc = charhashfast;
213  *fasteqfunc = chareqfast;
214  *eqfunc = F_BOOLEQ;
215  break;
216  case CHAROID:
217  *hashfunc = charhashfast;
218  *fasteqfunc = chareqfast;
219  *eqfunc = F_CHAREQ;
220  break;
221  case NAMEOID:
222  *hashfunc = namehashfast;
223  *fasteqfunc = nameeqfast;
224  *eqfunc = F_NAMEEQ;
225  break;
226  case INT2OID:
227  *hashfunc = int2hashfast;
228  *fasteqfunc = int2eqfast;
229  *eqfunc = F_INT2EQ;
230  break;
231  case INT4OID:
232  *hashfunc = int4hashfast;
233  *fasteqfunc = int4eqfast;
234  *eqfunc = F_INT4EQ;
235  break;
236  case TEXTOID:
237  *hashfunc = texthashfast;
238  *fasteqfunc = texteqfast;
239  *eqfunc = F_TEXTEQ;
240  break;
241  case OIDOID:
242  case REGPROCOID:
243  case REGPROCEDUREOID:
244  case REGOPEROID:
245  case REGOPERATOROID:
246  case REGCLASSOID:
247  case REGTYPEOID:
248  case REGCONFIGOID:
249  case REGDICTIONARYOID:
250  case REGROLEOID:
251  case REGNAMESPACEOID:
252  *hashfunc = int4hashfast;
253  *fasteqfunc = int4eqfast;
254  *eqfunc = F_OIDEQ;
255  break;
256  case OIDVECTOROID:
257  *hashfunc = oidvectorhashfast;
258  *fasteqfunc = oidvectoreqfast;
259  *eqfunc = F_OIDVECTOREQ;
260  break;
261  default:
262  elog(FATAL, "type %u not supported as catcache key", keytype);
263  *hashfunc = NULL; /* keep compiler quiet */
264 
265  *eqfunc = InvalidOid;
266  break;
267  }
268 }
269 
270 /*
271  * CatalogCacheComputeHashValue
272  *
273  * Compute the hash value associated with a given set of lookup keys
274  */
275 static uint32
277  Datum v1, Datum v2, Datum v3, Datum v4)
278 {
279  uint32 hashValue = 0;
280  uint32 oneHash;
281  CCHashFN *cc_hashfunc = cache->cc_hashfunc;
282 
283  CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
284  cache->cc_relname,
285  nkeys,
286  cache);
287 
288  switch (nkeys)
289  {
290  case 4:
291  oneHash = (cc_hashfunc[3]) (v4);
292 
293  hashValue ^= oneHash << 24;
294  hashValue ^= oneHash >> 8;
295  /* FALLTHROUGH */
296  case 3:
297  oneHash = (cc_hashfunc[2]) (v3);
298 
299  hashValue ^= oneHash << 16;
300  hashValue ^= oneHash >> 16;
301  /* FALLTHROUGH */
302  case 2:
303  oneHash = (cc_hashfunc[1]) (v2);
304 
305  hashValue ^= oneHash << 8;
306  hashValue ^= oneHash >> 24;
307  /* FALLTHROUGH */
308  case 1:
309  oneHash = (cc_hashfunc[0]) (v1);
310 
311  hashValue ^= oneHash;
312  break;
313  default:
314  elog(FATAL, "wrong number of hash keys: %d", nkeys);
315  break;
316  }
317 
318  return hashValue;
319 }
320 
321 /*
322  * CatalogCacheComputeTupleHashValue
323  *
324  * Compute the hash value associated with a given tuple to be cached
325  */
326 static uint32
328 {
329  Datum v1 = 0,
330  v2 = 0,
331  v3 = 0,
332  v4 = 0;
333  bool isNull = false;
334  int *cc_keyno = cache->cc_keyno;
335  TupleDesc cc_tupdesc = cache->cc_tupdesc;
336 
337  /* Now extract key fields from tuple, insert into scankey */
338  switch (nkeys)
339  {
340  case 4:
341  v4 = (cc_keyno[3] == ObjectIdAttributeNumber)
343  : fastgetattr(tuple,
344  cc_keyno[3],
345  cc_tupdesc,
346  &isNull);
347  Assert(!isNull);
348  /* FALLTHROUGH */
349  case 3:
350  v3 = (cc_keyno[2] == ObjectIdAttributeNumber)
352  : fastgetattr(tuple,
353  cc_keyno[2],
354  cc_tupdesc,
355  &isNull);
356  Assert(!isNull);
357  /* FALLTHROUGH */
358  case 2:
359  v2 = (cc_keyno[1] == ObjectIdAttributeNumber)
361  : fastgetattr(tuple,
362  cc_keyno[1],
363  cc_tupdesc,
364  &isNull);
365  Assert(!isNull);
366  /* FALLTHROUGH */
367  case 1:
368  v1 = (cc_keyno[0] == ObjectIdAttributeNumber)
370  : fastgetattr(tuple,
371  cc_keyno[0],
372  cc_tupdesc,
373  &isNull);
374  Assert(!isNull);
375  break;
376  default:
377  elog(FATAL, "wrong number of hash keys: %d", nkeys);
378  break;
379  }
380 
381  return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
382 }
383 
384 /*
385  * CatalogCacheCompareTuple
386  *
387  * Compare a tuple to the passed arguments.
388  */
389 static inline bool
390 CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
391  const Datum *cachekeys,
392  const Datum *searchkeys)
393 {
394  const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
395  int i;
396 
397  for (i = 0; i < nkeys; i++)
398  {
399  if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
400  return false;
401  }
402  return true;
403 }
404 
405 
406 #ifdef CATCACHE_STATS
407 
408 static void
409 CatCachePrintStats(int code, Datum arg)
410 {
411  slist_iter iter;
412  long cc_searches = 0;
413  long cc_hits = 0;
414  long cc_neg_hits = 0;
415  long cc_newloads = 0;
416  long cc_invals = 0;
417  long cc_lsearches = 0;
418  long cc_lhits = 0;
419 
420  slist_foreach(iter, &CacheHdr->ch_caches)
421  {
422  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
423 
424  if (cache->cc_ntup == 0 && cache->cc_searches == 0)
425  continue; /* don't print unused caches */
426  elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
427  cache->cc_relname,
428  cache->cc_indexoid,
429  cache->cc_ntup,
430  cache->cc_searches,
431  cache->cc_hits,
432  cache->cc_neg_hits,
433  cache->cc_hits + cache->cc_neg_hits,
434  cache->cc_newloads,
435  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
436  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
437  cache->cc_invals,
438  cache->cc_lsearches,
439  cache->cc_lhits);
440  cc_searches += cache->cc_searches;
441  cc_hits += cache->cc_hits;
442  cc_neg_hits += cache->cc_neg_hits;
443  cc_newloads += cache->cc_newloads;
444  cc_invals += cache->cc_invals;
445  cc_lsearches += cache->cc_lsearches;
446  cc_lhits += cache->cc_lhits;
447  }
448  elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
449  CacheHdr->ch_ntup,
450  cc_searches,
451  cc_hits,
452  cc_neg_hits,
453  cc_hits + cc_neg_hits,
454  cc_newloads,
455  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
456  cc_searches - cc_hits - cc_neg_hits,
457  cc_invals,
458  cc_lsearches,
459  cc_lhits);
460 }
461 #endif /* CATCACHE_STATS */
462 
463 
464 /*
465  * CatCacheRemoveCTup
466  *
467  * Unlink and delete the given cache entry
468  *
469  * NB: if it is a member of a CatCList, the CatCList is deleted too.
470  * Both the cache entry and the list had better have zero refcount.
471  */
472 static void
474 {
475  Assert(ct->refcount == 0);
476  Assert(ct->my_cache == cache);
477 
478  if (ct->c_list)
479  {
480  /*
481  * The cleanest way to handle this is to call CatCacheRemoveCList,
482  * which will recurse back to me, and the recursive call will do the
483  * work. Set the "dead" flag to make sure it does recurse.
484  */
485  ct->dead = true;
486  CatCacheRemoveCList(cache, ct->c_list);
487  return; /* nothing left to do */
488  }
489 
490  /* delink from linked list */
491  dlist_delete(&ct->cache_elem);
492 
493  /*
494  * Free keys when we're dealing with a negative entry, normal entries just
495  * point into tuple, allocated together with the CatCTup.
496  */
497  if (ct->negative)
498  CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
499  cache->cc_keyno, ct->keys);
500 
501  pfree(ct);
502 
503  --cache->cc_ntup;
504  --CacheHdr->ch_ntup;
505 }
506 
507 /*
508  * CatCacheRemoveCList
509  *
510  * Unlink and delete the given cache list entry
511  *
512  * NB: any dead member entries that become unreferenced are deleted too.
513  */
514 static void
516 {
517  int i;
518 
519  Assert(cl->refcount == 0);
520  Assert(cl->my_cache == cache);
521 
522  /* delink from member tuples */
523  for (i = cl->n_members; --i >= 0;)
524  {
525  CatCTup *ct = cl->members[i];
526 
527  Assert(ct->c_list == cl);
528  ct->c_list = NULL;
529  /* if the member is dead and now has no references, remove it */
530  if (
531 #ifndef CATCACHE_FORCE_RELEASE
532  ct->dead &&
533 #endif
534  ct->refcount == 0)
535  CatCacheRemoveCTup(cache, ct);
536  }
537 
538  /* delink from linked list */
539  dlist_delete(&cl->cache_elem);
540 
541  /* free associated column data */
542  CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
543  cache->cc_keyno, cl->keys);
544 
545  pfree(cl);
546 }
547 
548 
549 /*
550  * CatCacheInvalidate
551  *
552  * Invalidate entries in the specified cache, given a hash value.
553  *
554  * We delete cache entries that match the hash value, whether positive
555  * or negative. We don't care whether the invalidation is the result
556  * of a tuple insertion or a deletion.
557  *
558  * We used to try to match positive cache entries by TID, but that is
559  * unsafe after a VACUUM FULL on a system catalog: an inval event could
560  * be queued before VACUUM FULL, and then processed afterwards, when the
561  * target tuple that has to be invalidated has a different TID than it
562  * did when the event was created. So now we just compare hash values and
563  * accept the small risk of unnecessary invalidations due to false matches.
564  *
565  * This routine is only quasi-public: it should only be used by inval.c.
566  */
567 void
569 {
570  Index hashIndex;
571  dlist_mutable_iter iter;
572 
573  CACHE1_elog(DEBUG2, "CatCacheInvalidate: called");
574 
575  /*
576  * We don't bother to check whether the cache has finished initialization
577  * yet; if not, there will be no entries in it so no problem.
578  */
579 
580  /*
581  * Invalidate *all* CatCLists in this cache; it's too hard to tell which
582  * searches might still be correct, so just zap 'em all.
583  */
584  dlist_foreach_modify(iter, &cache->cc_lists)
585  {
586  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
587 
588  if (cl->refcount > 0)
589  cl->dead = true;
590  else
591  CatCacheRemoveCList(cache, cl);
592  }
593 
594  /*
595  * inspect the proper hash bucket for tuple matches
596  */
597  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
598  dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
599  {
600  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
601 
602  if (hashValue == ct->hash_value)
603  {
604  if (ct->refcount > 0 ||
605  (ct->c_list && ct->c_list->refcount > 0))
606  {
607  ct->dead = true;
608  /* list, if any, was marked dead above */
609  Assert(ct->c_list == NULL || ct->c_list->dead);
610  }
611  else
612  CatCacheRemoveCTup(cache, ct);
613  CACHE1_elog(DEBUG2, "CatCacheInvalidate: invalidated");
614 #ifdef CATCACHE_STATS
615  cache->cc_invals++;
616 #endif
617  /* could be multiple matches, so keep looking! */
618  }
619  }
620 }
621 
622 /* ----------------------------------------------------------------
623  * public functions
624  * ----------------------------------------------------------------
625  */
626 
627 
628 /*
629  * Standard routine for creating cache context if it doesn't exist yet
630  *
631  * There are a lot of places (probably far more than necessary) that check
632  * whether CacheMemoryContext exists yet and want to create it if not.
633  * We centralize knowledge of exactly how to create it here.
634  */
635 void
637 {
638  /*
639  * Purely for paranoia, check that context doesn't exist; caller probably
640  * did so already.
641  */
642  if (!CacheMemoryContext)
644  "CacheMemoryContext",
646 }
647 
648 
649 /*
650  * ResetCatalogCache
651  *
652  * Reset one catalog cache to empty.
653  *
654  * This is not very efficient if the target cache is nearly empty.
655  * However, it shouldn't need to be efficient; we don't invoke it often.
656  */
657 static void
659 {
660  dlist_mutable_iter iter;
661  int i;
662 
663  /* Remove each list in this cache, or at least mark it dead */
664  dlist_foreach_modify(iter, &cache->cc_lists)
665  {
666  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
667 
668  if (cl->refcount > 0)
669  cl->dead = true;
670  else
671  CatCacheRemoveCList(cache, cl);
672  }
673 
674  /* Remove each tuple in this cache, or at least mark it dead */
675  for (i = 0; i < cache->cc_nbuckets; i++)
676  {
677  dlist_head *bucket = &cache->cc_bucket[i];
678 
679  dlist_foreach_modify(iter, bucket)
680  {
681  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
682 
683  if (ct->refcount > 0 ||
684  (ct->c_list && ct->c_list->refcount > 0))
685  {
686  ct->dead = true;
687  /* list, if any, was marked dead above */
688  Assert(ct->c_list == NULL || ct->c_list->dead);
689  }
690  else
691  CatCacheRemoveCTup(cache, ct);
692 #ifdef CATCACHE_STATS
693  cache->cc_invals++;
694 #endif
695  }
696  }
697 }
698 
699 /*
700  * ResetCatalogCaches
701  *
702  * Reset all caches when a shared cache inval event forces it
703  */
704 void
706 {
707  slist_iter iter;
708 
709  CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
710 
711  slist_foreach(iter, &CacheHdr->ch_caches)
712  {
713  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
714 
715  ResetCatalogCache(cache);
716  }
717 
718  CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
719 }
720 
721 /*
722  * CatalogCacheFlushCatalog
723  *
724  * Flush all catcache entries that came from the specified system catalog.
725  * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
726  * tuples very likely now have different TIDs than before. (At one point
727  * we also tried to force re-execution of CatalogCacheInitializeCache for
728  * the cache(s) on that catalog. This is a bad idea since it leads to all
729  * kinds of trouble if a cache flush occurs while loading cache entries.
730  * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
731  * rather than relying on the relcache to keep a tupdesc for us. Of course
732  * this assumes the tupdesc of a cachable system table will not change...)
733  */
734 void
736 {
737  slist_iter iter;
738 
739  CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
740 
741  slist_foreach(iter, &CacheHdr->ch_caches)
742  {
743  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
744 
745  /* Does this cache store tuples of the target catalog? */
746  if (cache->cc_reloid == catId)
747  {
748  /* Yes, so flush all its contents */
749  ResetCatalogCache(cache);
750 
751  /* Tell inval.c to call syscache callbacks for this cache */
752  CallSyscacheCallbacks(cache->id, 0);
753  }
754  }
755 
756  CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
757 }
758 
759 /*
760  * InitCatCache
761  *
762  * This allocates and initializes a cache for a system catalog relation.
763  * Actually, the cache is only partially initialized to avoid opening the
764  * relation. The relation will be opened and the rest of the cache
765  * structure initialized on the first access.
766  */
767 #ifdef CACHEDEBUG
768 #define InitCatCache_DEBUG2 \
769 do { \
770  elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
771  cp->cc_reloid, cp->cc_indexoid, cp->id, \
772  cp->cc_nkeys, cp->cc_nbuckets); \
773 } while(0)
774 #else
775 #define InitCatCache_DEBUG2
776 #endif
777 
778 CatCache *
780  Oid reloid,
781  Oid indexoid,
782  int nkeys,
783  const int *key,
784  int nbuckets)
785 {
786  CatCache *cp;
787  MemoryContext oldcxt;
788  size_t sz;
789  int i;
790 
791  /*
792  * nbuckets is the initial number of hash buckets to use in this catcache.
793  * It will be enlarged later if it becomes too full.
794  *
795  * nbuckets must be a power of two. We check this via Assert rather than
796  * a full runtime check because the values will be coming from constant
797  * tables.
798  *
799  * If you're confused by the power-of-two check, see comments in
800  * bitmapset.c for an explanation.
801  */
802  Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
803 
804  /*
805  * first switch to the cache context so our allocations do not vanish at
806  * the end of a transaction
807  */
808  if (!CacheMemoryContext)
810 
812 
813  /*
814  * if first time through, initialize the cache group header
815  */
816  if (CacheHdr == NULL)
817  {
818  CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
819  slist_init(&CacheHdr->ch_caches);
820  CacheHdr->ch_ntup = 0;
821 #ifdef CATCACHE_STATS
822  /* set up to dump stats at backend exit */
823  on_proc_exit(CatCachePrintStats, 0);
824 #endif
825  }
826 
827  /*
828  * Allocate a new cache structure, aligning to a cacheline boundary
829  *
830  * Note: we rely on zeroing to initialize all the dlist headers correctly
831  */
832  sz = sizeof(CatCache) + PG_CACHE_LINE_SIZE;
833  cp = (CatCache *) CACHELINEALIGN(palloc0(sz));
834  cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
835 
836  /*
837  * initialize the cache's relation information for the relation
838  * corresponding to this cache, and initialize some of the new cache's
839  * other internal fields. But don't open the relation yet.
840  */
841  cp->id = id;
842  cp->cc_relname = "(not known yet)";
843  cp->cc_reloid = reloid;
844  cp->cc_indexoid = indexoid;
845  cp->cc_relisshared = false; /* temporary */
846  cp->cc_tupdesc = (TupleDesc) NULL;
847  cp->cc_ntup = 0;
848  cp->cc_nbuckets = nbuckets;
849  cp->cc_nkeys = nkeys;
850  for (i = 0; i < nkeys; ++i)
851  cp->cc_keyno[i] = key[i];
852 
853  /*
854  * new cache is initialized as far as we can go for now. print some
855  * debugging information, if appropriate.
856  */
858 
859  /*
860  * add completed cache to top of group header's list
861  */
862  slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
863 
864  /*
865  * back to the old context before we return...
866  */
867  MemoryContextSwitchTo(oldcxt);
868 
869  return cp;
870 }
871 
872 /*
873  * Enlarge a catcache, doubling the number of buckets.
874  */
875 static void
877 {
878  dlist_head *newbucket;
879  int newnbuckets;
880  int i;
881 
882  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
883  cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
884 
885  /* Allocate a new, larger, hash table. */
886  newnbuckets = cp->cc_nbuckets * 2;
887  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
888 
889  /* Move all entries from old hash table to new. */
890  for (i = 0; i < cp->cc_nbuckets; i++)
891  {
892  dlist_mutable_iter iter;
893 
894  dlist_foreach_modify(iter, &cp->cc_bucket[i])
895  {
896  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
897  int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
898 
899  dlist_delete(iter.cur);
900  dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
901  }
902  }
903 
904  /* Switch to the new array. */
905  pfree(cp->cc_bucket);
906  cp->cc_nbuckets = newnbuckets;
907  cp->cc_bucket = newbucket;
908 }
909 
910 /*
911  * CatalogCacheInitializeCache
912  *
913  * This function does final initialization of a catcache: obtain the tuple
914  * descriptor and set up the hash and equality function links. We assume
915  * that the relcache entry can be opened at this point!
916  */
917 #ifdef CACHEDEBUG
918 #define CatalogCacheInitializeCache_DEBUG1 \
919  elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
920  cache->cc_reloid)
921 
922 #define CatalogCacheInitializeCache_DEBUG2 \
923 do { \
924  if (cache->cc_keyno[i] > 0) { \
925  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
926  i+1, cache->cc_nkeys, cache->cc_keyno[i], \
927  TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
928  } else { \
929  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
930  i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
931  } \
932 } while(0)
933 #else
934 #define CatalogCacheInitializeCache_DEBUG1
935 #define CatalogCacheInitializeCache_DEBUG2
936 #endif
937 
938 static void
940 {
941  Relation relation;
942  MemoryContext oldcxt;
943  TupleDesc tupdesc;
944  int i;
945 
947 
948  relation = heap_open(cache->cc_reloid, AccessShareLock);
949 
950  /*
951  * switch to the cache context so our allocations do not vanish at the end
952  * of a transaction
953  */
954  Assert(CacheMemoryContext != NULL);
955 
957 
958  /*
959  * copy the relcache's tuple descriptor to permanent cache storage
960  */
961  tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
962 
963  /*
964  * save the relation's name and relisshared flag, too (cc_relname is used
965  * only for debugging purposes)
966  */
967  cache->cc_relname = pstrdup(RelationGetRelationName(relation));
968  cache->cc_relisshared = RelationGetForm(relation)->relisshared;
969 
970  /*
971  * return to the caller's memory context and close the rel
972  */
973  MemoryContextSwitchTo(oldcxt);
974 
975  heap_close(relation, AccessShareLock);
976 
977  CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
978  cache->cc_relname, cache->cc_nkeys);
979 
980  /*
981  * initialize cache's key information
982  */
983  for (i = 0; i < cache->cc_nkeys; ++i)
984  {
985  Oid keytype;
986  RegProcedure eqfunc;
987 
989 
990  if (cache->cc_keyno[i] > 0)
991  {
992  Form_pg_attribute attr = TupleDescAttr(tupdesc,
993  cache->cc_keyno[i] - 1);
994 
995  keytype = attr->atttypid;
996  /* cache key columns should always be NOT NULL */
997  Assert(attr->attnotnull);
998  }
999  else
1000  {
1001  if (cache->cc_keyno[i] != ObjectIdAttributeNumber)
1002  elog(FATAL, "only sys attr supported in caches is OID");
1003  keytype = OIDOID;
1004  }
1005 
1006  GetCCHashEqFuncs(keytype,
1007  &cache->cc_hashfunc[i],
1008  &eqfunc,
1009  &cache->cc_fastequal[i]);
1010 
1011  /*
1012  * Do equality-function lookup (we assume this won't need a catalog
1013  * lookup for any supported type)
1014  */
1015  fmgr_info_cxt(eqfunc,
1016  &cache->cc_skey[i].sk_func,
1018 
1019  /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1020  cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1021 
1022  /* Fill in sk_strategy as well --- always standard equality */
1024  cache->cc_skey[i].sk_subtype = InvalidOid;
1025  /* Currently, there are no catcaches on collation-aware data types */
1026  cache->cc_skey[i].sk_collation = InvalidOid;
1027 
1028  CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1029  cache->cc_relname,
1030  i,
1031  cache);
1032  }
1033 
1034  /*
1035  * mark this cache fully initialized
1036  */
1037  cache->cc_tupdesc = tupdesc;
1038 }
1039 
1040 /*
1041  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1042  *
1043  * One reason to call this routine is to ensure that the relcache has
1044  * created entries for all the catalogs and indexes referenced by catcaches.
1045  * Therefore, provide an option to open the index as well as fixing the
1046  * cache itself. An exception is the indexes on pg_am, which we don't use
1047  * (cf. IndexScanOK).
1048  */
1049 void
1050 InitCatCachePhase2(CatCache *cache, bool touch_index)
1051 {
1052  if (cache->cc_tupdesc == NULL)
1054 
1055  if (touch_index &&
1056  cache->id != AMOID &&
1057  cache->id != AMNAME)
1058  {
1059  Relation idesc;
1060 
1061  /*
1062  * We must lock the underlying catalog before opening the index to
1063  * avoid deadlock, since index_open could possibly result in reading
1064  * this same catalog, and if anyone else is exclusive-locking this
1065  * catalog and index they'll be doing it in that order.
1066  */
1068  idesc = index_open(cache->cc_indexoid, AccessShareLock);
1069 
1070  /*
1071  * While we've got the index open, let's check that it's unique (and
1072  * not just deferrable-unique, thank you very much). This is just to
1073  * catch thinkos in definitions of new catcaches, so we don't worry
1074  * about the pg_am indexes not getting tested.
1075  */
1076  Assert(idesc->rd_index->indisunique &&
1077  idesc->rd_index->indimmediate);
1078 
1079  index_close(idesc, AccessShareLock);
1081  }
1082 }
1083 
1084 
1085 /*
1086  * IndexScanOK
1087  *
1088  * This function checks for tuples that will be fetched by
1089  * IndexSupportInitialize() during relcache initialization for
1090  * certain system indexes that support critical syscaches.
1091  * We can't use an indexscan to fetch these, else we'll get into
1092  * infinite recursion. A plain heap scan will work, however.
1093  * Once we have completed relcache initialization (signaled by
1094  * criticalRelcachesBuilt), we don't have to worry anymore.
1095  *
1096  * Similarly, during backend startup we have to be able to use the
1097  * pg_authid and pg_auth_members syscaches for authentication even if
1098  * we don't yet have relcache entries for those catalogs' indexes.
1099  */
1100 static bool
1101 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1102 {
1103  switch (cache->id)
1104  {
1105  case INDEXRELID:
1106 
1107  /*
1108  * Rather than tracking exactly which indexes have to be loaded
1109  * before we can use indexscans (which changes from time to time),
1110  * just force all pg_index searches to be heap scans until we've
1111  * built the critical relcaches.
1112  */
1114  return false;
1115  break;
1116 
1117  case AMOID:
1118  case AMNAME:
1119 
1120  /*
1121  * Always do heap scans in pg_am, because it's so small there's
1122  * not much point in an indexscan anyway. We *must* do this when
1123  * initially building critical relcache entries, but we might as
1124  * well just always do it.
1125  */
1126  return false;
1127 
1128  case AUTHNAME:
1129  case AUTHOID:
1130  case AUTHMEMMEMROLE:
1131 
1132  /*
1133  * Protect authentication lookups occurring before relcache has
1134  * collected entries for shared indexes.
1135  */
1137  return false;
1138  break;
1139 
1140  default:
1141  break;
1142  }
1143 
1144  /* Normal case, allow index scan */
1145  return true;
1146 }
1147 
1148 /*
1149  * SearchCatCacheInternal
1150  *
1151  * This call searches a system cache for a tuple, opening the relation
1152  * if necessary (on the first access to a particular cache).
1153  *
1154  * The result is NULL if not found, or a pointer to a HeapTuple in
1155  * the cache. The caller must not modify the tuple, and must call
1156  * ReleaseCatCache() when done with it.
1157  *
1158  * The search key values should be expressed as Datums of the key columns'
1159  * datatype(s). (Pass zeroes for any unused parameters.) As a special
1160  * exception, the passed-in key for a NAME column can be just a C string;
1161  * the caller need not go to the trouble of converting it to a fully
1162  * null-padded NAME.
1163  */
1164 HeapTuple
1166  Datum v1,
1167  Datum v2,
1168  Datum v3,
1169  Datum v4)
1170 {
1171  return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1172 }
1173 
1174 
1175 /*
1176  * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1177  * arguments. The compiler can inline the body and unroll loops, making them a
1178  * bit faster than SearchCatCache().
1179  */
1180 
1181 HeapTuple
1183  Datum v1)
1184 {
1185  return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1186 }
1187 
1188 
1189 HeapTuple
1191  Datum v1, Datum v2)
1192 {
1193  return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1194 }
1195 
1196 
1197 HeapTuple
1199  Datum v1, Datum v2, Datum v3)
1200 {
1201  return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1202 }
1203 
1204 
1205 HeapTuple
1207  Datum v1, Datum v2, Datum v3, Datum v4)
1208 {
1209  return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1210 }
1211 
1212 /*
1213  * Work-horse for SearchCatCache/SearchCatCacheN.
1214  */
1215 static inline HeapTuple
1217  int nkeys,
1218  Datum v1,
1219  Datum v2,
1220  Datum v3,
1221  Datum v4)
1222 {
1224  uint32 hashValue;
1225  Index hashIndex;
1226  dlist_iter iter;
1227  dlist_head *bucket;
1228  CatCTup *ct;
1229 
1230  /* Make sure we're in an xact, even if this ends up being a cache hit */
1232 
1233  Assert(cache->cc_nkeys == nkeys);
1234 
1235  /*
1236  * one-time startup overhead for each cache
1237  */
1238  if (unlikely(cache->cc_tupdesc == NULL))
1240 
1241 #ifdef CATCACHE_STATS
1242  cache->cc_searches++;
1243 #endif
1244 
1245  /* Initialize local parameter array */
1246  arguments[0] = v1;
1247  arguments[1] = v2;
1248  arguments[2] = v3;
1249  arguments[3] = v4;
1250 
1251  /*
1252  * find the hash bucket in which to look for the tuple
1253  */
1254  hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1255  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1256 
1257  /*
1258  * scan the hash bucket until we find a match or exhaust our tuples
1259  *
1260  * Note: it's okay to use dlist_foreach here, even though we modify the
1261  * dlist within the loop, because we don't continue the loop afterwards.
1262  */
1263  bucket = &cache->cc_bucket[hashIndex];
1264  dlist_foreach(iter, bucket)
1265  {
1266  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1267 
1268  if (ct->dead)
1269  continue; /* ignore dead entries */
1270 
1271  if (ct->hash_value != hashValue)
1272  continue; /* quickly skip entry if wrong hash val */
1273 
1274  if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1275  continue;
1276 
1277  /*
1278  * We found a match in the cache. Move it to the front of the list
1279  * for its hashbucket, in order to speed subsequent searches. (The
1280  * most frequently accessed elements in any hashbucket will tend to be
1281  * near the front of the hashbucket's list.)
1282  */
1283  dlist_move_head(bucket, &ct->cache_elem);
1284 
1285  /*
1286  * If it's a positive entry, bump its refcount and return it. If it's
1287  * negative, we can report failure to the caller.
1288  */
1289  if (!ct->negative)
1290  {
1292  ct->refcount++;
1294 
1295  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1296  cache->cc_relname, hashIndex);
1297 
1298 #ifdef CATCACHE_STATS
1299  cache->cc_hits++;
1300 #endif
1301 
1302  return &ct->tuple;
1303  }
1304  else
1305  {
1306  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1307  cache->cc_relname, hashIndex);
1308 
1309 #ifdef CATCACHE_STATS
1310  cache->cc_neg_hits++;
1311 #endif
1312 
1313  return NULL;
1314  }
1315  }
1316 
1317  return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1318 }
1319 
1320 /*
1321  * Search the actual catalogs, rather than the cache.
1322  *
1323  * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1324  * as small as possible. To avoid that effort being undone by a helpful
1325  * compiler, try to explicitly forbid inlining.
1326  */
1327 static pg_noinline HeapTuple
1329  int nkeys,
1330  uint32 hashValue,
1331  Index hashIndex,
1332  Datum v1,
1333  Datum v2,
1334  Datum v3,
1335  Datum v4)
1336 {
1337  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1338  Relation relation;
1339  SysScanDesc scandesc;
1340  HeapTuple ntp;
1341  CatCTup *ct;
1343 
1344  /* Initialize local parameter array */
1345  arguments[0] = v1;
1346  arguments[1] = v2;
1347  arguments[2] = v3;
1348  arguments[3] = v4;
1349 
1350  /*
1351  * Ok, need to make a lookup in the relation, copy the scankey and fill
1352  * out any per-call fields.
1353  */
1354  memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1355  cur_skey[0].sk_argument = v1;
1356  cur_skey[1].sk_argument = v2;
1357  cur_skey[2].sk_argument = v3;
1358  cur_skey[3].sk_argument = v4;
1359 
1360  /*
1361  * Tuple was not found in cache, so we have to try to retrieve it directly
1362  * from the relation. If found, we will add it to the cache; if not
1363  * found, we will add a negative cache entry instead.
1364  *
1365  * NOTE: it is possible for recursive cache lookups to occur while reading
1366  * the relation --- for example, due to shared-cache-inval messages being
1367  * processed during heap_open(). This is OK. It's even possible for one
1368  * of those lookups to find and enter the very same tuple we are trying to
1369  * fetch here. If that happens, we will enter a second copy of the tuple
1370  * into the cache. The first copy will never be referenced again, and
1371  * will eventually age out of the cache, so there's no functional problem.
1372  * This case is rare enough that it's not worth expending extra cycles to
1373  * detect.
1374  */
1375  relation = heap_open(cache->cc_reloid, AccessShareLock);
1376 
1377  scandesc = systable_beginscan(relation,
1378  cache->cc_indexoid,
1379  IndexScanOK(cache, cur_skey),
1380  NULL,
1381  nkeys,
1382  cur_skey);
1383 
1384  ct = NULL;
1385 
1386  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1387  {
1388  ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1389  hashValue, hashIndex,
1390  false);
1391  /* immediately set the refcount to 1 */
1393  ct->refcount++;
1395  break; /* assume only one match */
1396  }
1397 
1398  systable_endscan(scandesc);
1399 
1400  heap_close(relation, AccessShareLock);
1401 
1402  /*
1403  * If tuple was not found, we need to build a negative cache entry
1404  * containing a fake tuple. The fake tuple has the correct key columns,
1405  * but nulls everywhere else.
1406  *
1407  * In bootstrap mode, we don't build negative entries, because the cache
1408  * invalidation mechanism isn't alive and can't clear them if the tuple
1409  * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1410  * cache inval for that.)
1411  */
1412  if (ct == NULL)
1413  {
1415  return NULL;
1416 
1417  ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1418  hashValue, hashIndex,
1419  true);
1420 
1421  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1422  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1423  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1424  cache->cc_relname, hashIndex);
1425 
1426  /*
1427  * We are not returning the negative entry to the caller, so leave its
1428  * refcount zero.
1429  */
1430 
1431  return NULL;
1432  }
1433 
1434  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1435  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1436  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1437  cache->cc_relname, hashIndex);
1438 
1439 #ifdef CATCACHE_STATS
1440  cache->cc_newloads++;
1441 #endif
1442 
1443  return &ct->tuple;
1444 }
1445 
1446 /*
1447  * ReleaseCatCache
1448  *
1449  * Decrement the reference count of a catcache entry (releasing the
1450  * hold grabbed by a successful SearchCatCache).
1451  *
1452  * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1453  * will be freed as soon as their refcount goes to zero. In combination
1454  * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1455  * to catch references to already-released catcache entries.
1456  */
1457 void
1459 {
1460  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1461  offsetof(CatCTup, tuple));
1462 
1463  /* Safety checks to ensure we were handed a cache entry */
1464  Assert(ct->ct_magic == CT_MAGIC);
1465  Assert(ct->refcount > 0);
1466 
1467  ct->refcount--;
1469 
1470  if (
1471 #ifndef CATCACHE_FORCE_RELEASE
1472  ct->dead &&
1473 #endif
1474  ct->refcount == 0 &&
1475  (ct->c_list == NULL || ct->c_list->refcount == 0))
1476  CatCacheRemoveCTup(ct->my_cache, ct);
1477 }
1478 
1479 
1480 /*
1481  * GetCatCacheHashValue
1482  *
1483  * Compute the hash value for a given set of search keys.
1484  *
1485  * The reason for exposing this as part of the API is that the hash value is
1486  * exposed in cache invalidation operations, so there are places outside the
1487  * catcache code that need to be able to compute the hash values.
1488  */
1489 uint32
1491  Datum v1,
1492  Datum v2,
1493  Datum v3,
1494  Datum v4)
1495 {
1496  /*
1497  * one-time startup overhead for each cache
1498  */
1499  if (cache->cc_tupdesc == NULL)
1501 
1502  /*
1503  * calculate the hash value
1504  */
1505  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1506 }
1507 
1508 
1509 /*
1510  * SearchCatCacheList
1511  *
1512  * Generate a list of all tuples matching a partial key (that is,
1513  * a key specifying just the first K of the cache's N key columns).
1514  *
1515  * The caller must not modify the list object or the pointed-to tuples,
1516  * and must call ReleaseCatCacheList() when done with the list.
1517  */
1518 CatCList *
1520  int nkeys,
1521  Datum v1,
1522  Datum v2,
1523  Datum v3,
1524  Datum v4)
1525 {
1527  uint32 lHashValue;
1528  dlist_iter iter;
1529  CatCList *cl;
1530  CatCTup *ct;
1531  List *volatile ctlist;
1532  ListCell *ctlist_item;
1533  int nmembers;
1534  bool ordered;
1535  HeapTuple ntp;
1536  MemoryContext oldcxt;
1537  int i;
1538 
1539  /*
1540  * one-time startup overhead for each cache
1541  */
1542  if (cache->cc_tupdesc == NULL)
1544 
1545  Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1546 
1547 #ifdef CATCACHE_STATS
1548  cache->cc_lsearches++;
1549 #endif
1550 
1551  /* Initialize local parameter array */
1552  arguments[0] = v1;
1553  arguments[1] = v2;
1554  arguments[2] = v3;
1555  arguments[3] = v4;
1556 
1557  /*
1558  * compute a hash value of the given keys for faster search. We don't
1559  * presently divide the CatCList items into buckets, but this still lets
1560  * us skip non-matching items quickly most of the time.
1561  */
1562  lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1563 
1564  /*
1565  * scan the items until we find a match or exhaust our list
1566  *
1567  * Note: it's okay to use dlist_foreach here, even though we modify the
1568  * dlist within the loop, because we don't continue the loop afterwards.
1569  */
1570  dlist_foreach(iter, &cache->cc_lists)
1571  {
1572  cl = dlist_container(CatCList, cache_elem, iter.cur);
1573 
1574  if (cl->dead)
1575  continue; /* ignore dead entries */
1576 
1577  if (cl->hash_value != lHashValue)
1578  continue; /* quickly skip entry if wrong hash val */
1579 
1580  /*
1581  * see if the cached list matches our key.
1582  */
1583  if (cl->nkeys != nkeys)
1584  continue;
1585 
1586  if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1587  continue;
1588 
1589  /*
1590  * We found a matching list. Move the list to the front of the
1591  * cache's list-of-lists, to speed subsequent searches. (We do not
1592  * move the members to the fronts of their hashbucket lists, however,
1593  * since there's no point in that unless they are searched for
1594  * individually.)
1595  */
1596  dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1597 
1598  /* Bump the list's refcount and return it */
1600  cl->refcount++;
1602 
1603  CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1604  cache->cc_relname);
1605 
1606 #ifdef CATCACHE_STATS
1607  cache->cc_lhits++;
1608 #endif
1609 
1610  return cl;
1611  }
1612 
1613  /*
1614  * List was not found in cache, so we have to build it by reading the
1615  * relation. For each matching tuple found in the relation, use an
1616  * existing cache entry if possible, else build a new one.
1617  *
1618  * We have to bump the member refcounts temporarily to ensure they won't
1619  * get dropped from the cache while loading other members. We use a PG_TRY
1620  * block to ensure we can undo those refcounts if we get an error before
1621  * we finish constructing the CatCList.
1622  */
1624 
1625  ctlist = NIL;
1626 
1627  PG_TRY();
1628  {
1629  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1630  Relation relation;
1631  SysScanDesc scandesc;
1632 
1633  /*
1634  * Ok, need to make a lookup in the relation, copy the scankey and
1635  * fill out any per-call fields.
1636  */
1637  memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1638  cur_skey[0].sk_argument = v1;
1639  cur_skey[1].sk_argument = v2;
1640  cur_skey[2].sk_argument = v3;
1641  cur_skey[3].sk_argument = v4;
1642 
1643  relation = heap_open(cache->cc_reloid, AccessShareLock);
1644 
1645  scandesc = systable_beginscan(relation,
1646  cache->cc_indexoid,
1647  IndexScanOK(cache, cur_skey),
1648  NULL,
1649  nkeys,
1650  cur_skey);
1651 
1652  /* The list will be ordered iff we are doing an index scan */
1653  ordered = (scandesc->irel != NULL);
1654 
1655  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1656  {
1657  uint32 hashValue;
1658  Index hashIndex;
1659  bool found = false;
1660  dlist_head *bucket;
1661 
1662  /*
1663  * See if there's an entry for this tuple already.
1664  */
1665  ct = NULL;
1666  hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1667  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1668 
1669  bucket = &cache->cc_bucket[hashIndex];
1670  dlist_foreach(iter, bucket)
1671  {
1672  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1673 
1674  if (ct->dead || ct->negative)
1675  continue; /* ignore dead and negative entries */
1676 
1677  if (ct->hash_value != hashValue)
1678  continue; /* quickly skip entry if wrong hash val */
1679 
1680  if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1681  continue; /* not same tuple */
1682 
1683  /*
1684  * Found a match, but can't use it if it belongs to another
1685  * list already
1686  */
1687  if (ct->c_list)
1688  continue;
1689 
1690  found = true;
1691  break; /* A-OK */
1692  }
1693 
1694  if (!found)
1695  {
1696  /* We didn't find a usable entry, so make a new one */
1697  ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1698  hashValue, hashIndex,
1699  false);
1700  }
1701 
1702  /* Careful here: add entry to ctlist, then bump its refcount */
1703  /* This way leaves state correct if lappend runs out of memory */
1704  ctlist = lappend(ctlist, ct);
1705  ct->refcount++;
1706  }
1707 
1708  systable_endscan(scandesc);
1709 
1710  heap_close(relation, AccessShareLock);
1711 
1712  /* Now we can build the CatCList entry. */
1714  nmembers = list_length(ctlist);
1715  cl = (CatCList *)
1716  palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1717 
1718  /* Extract key values */
1719  CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1720  arguments, cl->keys);
1721  MemoryContextSwitchTo(oldcxt);
1722 
1723  /*
1724  * We are now past the last thing that could trigger an elog before we
1725  * have finished building the CatCList and remembering it in the
1726  * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1727  * we'd better do so before we start marking the members as belonging
1728  * to the list.
1729  */
1730 
1731  }
1732  PG_CATCH();
1733  {
1734  foreach(ctlist_item, ctlist)
1735  {
1736  ct = (CatCTup *) lfirst(ctlist_item);
1737  Assert(ct->c_list == NULL);
1738  Assert(ct->refcount > 0);
1739  ct->refcount--;
1740  if (
1741 #ifndef CATCACHE_FORCE_RELEASE
1742  ct->dead &&
1743 #endif
1744  ct->refcount == 0 &&
1745  (ct->c_list == NULL || ct->c_list->refcount == 0))
1746  CatCacheRemoveCTup(cache, ct);
1747  }
1748 
1749  PG_RE_THROW();
1750  }
1751  PG_END_TRY();
1752 
1753  cl->cl_magic = CL_MAGIC;
1754  cl->my_cache = cache;
1755  cl->refcount = 0; /* for the moment */
1756  cl->dead = false;
1757  cl->ordered = ordered;
1758  cl->nkeys = nkeys;
1759  cl->hash_value = lHashValue;
1760  cl->n_members = nmembers;
1761 
1762  i = 0;
1763  foreach(ctlist_item, ctlist)
1764  {
1765  cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1766  Assert(ct->c_list == NULL);
1767  ct->c_list = cl;
1768  /* release the temporary refcount on the member */
1769  Assert(ct->refcount > 0);
1770  ct->refcount--;
1771  /* mark list dead if any members already dead */
1772  if (ct->dead)
1773  cl->dead = true;
1774  }
1775  Assert(i == nmembers);
1776 
1777  dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1778 
1779  /* Finally, bump the list's refcount and return it */
1780  cl->refcount++;
1782 
1783  CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1784  cache->cc_relname, nmembers);
1785 
1786  return cl;
1787 }
1788 
1789 /*
1790  * ReleaseCatCacheList
1791  *
1792  * Decrement the reference count of a catcache list.
1793  */
1794 void
1796 {
1797  /* Safety checks to ensure we were handed a cache entry */
1798  Assert(list->cl_magic == CL_MAGIC);
1799  Assert(list->refcount > 0);
1800  list->refcount--;
1802 
1803  if (
1804 #ifndef CATCACHE_FORCE_RELEASE
1805  list->dead &&
1806 #endif
1807  list->refcount == 0)
1808  CatCacheRemoveCList(list->my_cache, list);
1809 }
1810 
1811 
1812 /*
1813  * CatalogCacheCreateEntry
1814  * Create a new CatCTup entry, copying the given HeapTuple and other
1815  * supplied data into it. The new entry initially has refcount 0.
1816  */
1817 static CatCTup *
1819  uint32 hashValue, Index hashIndex,
1820  bool negative)
1821 {
1822  CatCTup *ct;
1823  HeapTuple dtp;
1824  MemoryContext oldcxt;
1825 
1826  /* negative entries have no tuple associated */
1827  if (ntp)
1828  {
1829  int i;
1830 
1831  Assert(!negative);
1832 
1833  /*
1834  * If there are any out-of-line toasted fields in the tuple, expand
1835  * them in-line. This saves cycles during later use of the catcache
1836  * entry, and also protects us against the possibility of the toast
1837  * tuples being freed before we attempt to fetch them, in case of
1838  * something using a slightly stale catcache entry.
1839  */
1840  if (HeapTupleHasExternal(ntp))
1841  dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1842  else
1843  dtp = ntp;
1844 
1845  /* Allocate memory for CatCTup and the cached tuple in one go */
1847 
1848  ct = (CatCTup *) palloc(sizeof(CatCTup) +
1849  MAXIMUM_ALIGNOF + dtp->t_len);
1850  ct->tuple.t_len = dtp->t_len;
1851  ct->tuple.t_self = dtp->t_self;
1852  ct->tuple.t_tableOid = dtp->t_tableOid;
1853  ct->tuple.t_data = (HeapTupleHeader)
1854  MAXALIGN(((char *) ct) + sizeof(CatCTup));
1855  /* copy tuple contents */
1856  memcpy((char *) ct->tuple.t_data,
1857  (const char *) dtp->t_data,
1858  dtp->t_len);
1859  MemoryContextSwitchTo(oldcxt);
1860 
1861  if (dtp != ntp)
1862  heap_freetuple(dtp);
1863 
1864  /* extract keys - they'll point into the tuple if not by-value */
1865  for (i = 0; i < cache->cc_nkeys; i++)
1866  {
1867  Datum atp;
1868  bool isnull;
1869 
1870  atp = heap_getattr(&ct->tuple,
1871  cache->cc_keyno[i],
1872  cache->cc_tupdesc,
1873  &isnull);
1874  Assert(!isnull);
1875  ct->keys[i] = atp;
1876  }
1877  }
1878  else
1879  {
1880  Assert(negative);
1882  ct = (CatCTup *) palloc(sizeof(CatCTup));
1883 
1884  /*
1885  * Store keys - they'll point into separately allocated memory if not
1886  * by-value.
1887  */
1888  CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
1889  arguments, ct->keys);
1890  MemoryContextSwitchTo(oldcxt);
1891  }
1892 
1893  /*
1894  * Finish initializing the CatCTup header, and add it to the cache's
1895  * linked list and counts.
1896  */
1897  ct->ct_magic = CT_MAGIC;
1898  ct->my_cache = cache;
1899  ct->c_list = NULL;
1900  ct->refcount = 0; /* for the moment */
1901  ct->dead = false;
1902  ct->negative = negative;
1903  ct->hash_value = hashValue;
1904 
1905  dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1906 
1907  cache->cc_ntup++;
1908  CacheHdr->ch_ntup++;
1909 
1910  /*
1911  * If the hash table has become too full, enlarge the buckets array. Quite
1912  * arbitrarily, we enlarge when fill factor > 2.
1913  */
1914  if (cache->cc_ntup > cache->cc_nbuckets * 2)
1915  RehashCatCache(cache);
1916 
1917  return ct;
1918 }
1919 
1920 /*
1921  * Helper routine that frees keys stored in the keys array.
1922  */
1923 static void
1924 CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
1925 {
1926  int i;
1927 
1928  for (i = 0; i < nkeys; i++)
1929  {
1930  int attnum = attnos[i];
1931  Form_pg_attribute att;
1932 
1933  /* only valid system attribute is the oid, which is by value */
1934  if (attnum == ObjectIdAttributeNumber)
1935  continue;
1936  Assert(attnum > 0);
1937 
1938  att = TupleDescAttr(tupdesc, attnum - 1);
1939 
1940  if (!att->attbyval)
1941  pfree(DatumGetPointer(keys[i]));
1942  }
1943 }
1944 
1945 /*
1946  * Helper routine that copies the keys in the srckeys array into the dstkeys
1947  * one, guaranteeing that the datums are fully allocated in the current memory
1948  * context.
1949  */
1950 static void
1951 CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
1952  Datum *srckeys, Datum *dstkeys)
1953 {
1954  int i;
1955 
1956  /*
1957  * XXX: memory and lookup performance could possibly be improved by
1958  * storing all keys in one allocation.
1959  */
1960 
1961  for (i = 0; i < nkeys; i++)
1962  {
1963  int attnum = attnos[i];
1964 
1965  if (attnum == ObjectIdAttributeNumber)
1966  {
1967  dstkeys[i] = srckeys[i];
1968  }
1969  else
1970  {
1971  Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
1972  Datum src = srckeys[i];
1973  NameData srcname;
1974 
1975  /*
1976  * Must be careful in case the caller passed a C string where a
1977  * NAME is wanted: convert the given argument to a correctly
1978  * padded NAME. Otherwise the memcpy() done by datumCopy() could
1979  * fall off the end of memory.
1980  */
1981  if (att->atttypid == NAMEOID)
1982  {
1983  namestrcpy(&srcname, DatumGetCString(src));
1984  src = NameGetDatum(&srcname);
1985  }
1986 
1987  dstkeys[i] = datumCopy(src,
1988  att->attbyval,
1989  att->attlen);
1990  }
1991  }
1992 
1993 }
1994 
1995 /*
1996  * PrepareToInvalidateCacheTuple()
1997  *
1998  * This is part of a rather subtle chain of events, so pay attention:
1999  *
2000  * When a tuple is inserted or deleted, it cannot be flushed from the
2001  * catcaches immediately, for reasons explained at the top of cache/inval.c.
2002  * Instead we have to add entry(s) for the tuple to a list of pending tuple
2003  * invalidations that will be done at the end of the command or transaction.
2004  *
2005  * The lists of tuples that need to be flushed are kept by inval.c. This
2006  * routine is a helper routine for inval.c. Given a tuple belonging to
2007  * the specified relation, find all catcaches it could be in, compute the
2008  * correct hash value for each such catcache, and call the specified
2009  * function to record the cache id and hash value in inval.c's lists.
2010  * SysCacheInvalidate will be called later, if appropriate,
2011  * using the recorded information.
2012  *
2013  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2014  * For an update, we are called just once, with tuple being the old tuple
2015  * version and newtuple the new version. We should make two list entries
2016  * if the tuple's hash value changed, but only one if it didn't.
2017  *
2018  * Note that it is irrelevant whether the given tuple is actually loaded
2019  * into the catcache at the moment. Even if it's not there now, it might
2020  * be by the end of the command, or there might be a matching negative entry
2021  * to flush --- or other backends' caches might have such entries --- so
2022  * we have to make list entries to flush it later.
2023  *
2024  * Also note that it's not an error if there are no catcaches for the
2025  * specified relation. inval.c doesn't know exactly which rels have
2026  * catcaches --- it will call this routine for any tuple that's in a
2027  * system relation.
2028  */
2029 void
2031  HeapTuple tuple,
2032  HeapTuple newtuple,
2033  void (*function) (int, uint32, Oid))
2034 {
2035  slist_iter iter;
2036  Oid reloid;
2037 
2038  CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2039 
2040  /*
2041  * sanity checks
2042  */
2043  Assert(RelationIsValid(relation));
2044  Assert(HeapTupleIsValid(tuple));
2045  Assert(PointerIsValid(function));
2046  Assert(CacheHdr != NULL);
2047 
2048  reloid = RelationGetRelid(relation);
2049 
2050  /* ----------------
2051  * for each cache
2052  * if the cache contains tuples from the specified relation
2053  * compute the tuple's hash value(s) in this cache,
2054  * and call the passed function to register the information.
2055  * ----------------
2056  */
2057 
2058  slist_foreach(iter, &CacheHdr->ch_caches)
2059  {
2060  CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2061  uint32 hashvalue;
2062  Oid dbid;
2063 
2064  if (ccp->cc_reloid != reloid)
2065  continue;
2066 
2067  /* Just in case cache hasn't finished initialization yet... */
2068  if (ccp->cc_tupdesc == NULL)
2070 
2071  hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2072  dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2073 
2074  (*function) (ccp->id, hashvalue, dbid);
2075 
2076  if (newtuple)
2077  {
2078  uint32 newhashvalue;
2079 
2080  newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2081 
2082  if (newhashvalue != hashvalue)
2083  (*function) (ccp->id, newhashvalue, dbid);
2084  }
2085  }
2086 }
2087 
2088 
2089 /*
2090  * Subroutines for warning about reference leaks. These are exported so
2091  * that resowner.c can call them.
2092  */
2093 void
2095 {
2096  CatCTup *ct = (CatCTup *) (((char *) tuple) -
2097  offsetof(CatCTup, tuple));
2098 
2099  /* Safety check to ensure we were handed a cache entry */
2100  Assert(ct->ct_magic == CT_MAGIC);
2101 
2102  elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2103  ct->my_cache->cc_relname, ct->my_cache->id,
2104  ItemPointerGetBlockNumber(&(tuple->t_self)),
2105  ItemPointerGetOffsetNumber(&(tuple->t_self)),
2106  ct->refcount);
2107 }
2108 
2109 void
2111 {
2112  elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
2113  list->my_cache->cc_relname, list->my_cache->id,
2114  list, list->refcount);
2115 }
#define PG_CACHE_LINE_SIZE
int cc_keyno[CATCACHE_MAXKEYS]
Definition: catcache.h:53
#define NIL
Definition: pg_list.h:69
Oid sk_subtype
Definition: skey.h:69
Relation irel
Definition: relscan.h:156
#define REGCLASSOID
Definition: pg_type.h:577
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1085
void PrintCatCacheListLeakWarning(CatCList *list)
Definition: catcache.c:2110
#define NameGetDatum(X)
Definition: postgres.h:601
int n_members
Definition: catcache.h:176
void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:935
uint32 hash_value
Definition: catcache.h:162
#define DEBUG1
Definition: elog.h:25
#define NAMEOID
Definition: pg_type.h:300
dlist_node * cur
Definition: ilist.h:180
static void ResetCatalogCache(CatCache *cache)
Definition: catcache.c:658
uint32 hash_value
Definition: catcache.h:91
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:724
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:934
Definition: syscache.h:36
CatCache * my_cache
Definition: catcache.h:132
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
void CatCacheInvalidate(CatCache *cache, uint32 hashValue)
Definition: catcache.c:568
#define CACHE3_elog(a, b, c, d)
Definition: catcache.c:68
static CatCacheHeader * CacheHdr
Definition: catcache.c:75
#define DatumGetInt32(X)
Definition: postgres.h:478
#define RelationGetDescr(relation)
Definition: rel.h:428
#define HASH_INDEX(h, sz)
Definition: catcache.c:51
#define REGROLEOID
Definition: pg_type.h:585
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:182
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:292
#define OIDOID
Definition: pg_type.h:328
#define TEXTOID
Definition: pg_type.h:324
slist_node * cur
Definition: ilist.h:226
#define CT_MAGIC
Definition: catcache.h:89
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:90
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:138
CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
#define RelationGetForm(relation)
Definition: rel.h:410
char * pstrdup(const char *in)
Definition: mcxt.c:1076
static uint32 namehashfast(Datum datum)
Definition: catcache.c:150
regproc RegProcedure
Definition: c.h:443
static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache, int nkeys, uint32 hashValue, Index hashIndex, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1328
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
dlist_head * cc_bucket
Definition: catcache.h:49
#define AccessShareLock
Definition: lockdefs.h:36
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:574
#define INT4OID
Definition: pg_type.h:316
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:2030
slist_node cc_next
Definition: catcache.h:61
void ResourceOwnerEnlargeCatCacheListRefs(ResourceOwner owner)
Definition: resowner.c:959
int id
Definition: catcache.h:46
struct catcache CatCache
void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:944
int cl_magic
Definition: catcache.h:159
bool dead
Definition: catcache.h:173
bool criticalSharedRelcachesBuilt
Definition: relcache.c:131
#define heap_close(r, l)
Definition: heapam.h:97
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:585
static bool texteqfast(Datum a, Datum b)
Definition: catcache.c:182
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
unsigned int Oid
Definition: postgres_ext.h:31
#define REGTYPEOID
Definition: pg_type.h:581
dlist_head cc_lists
Definition: catcache.h:54
int namestrcpy(Name name, const char *str)
Definition: name.c:216
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:97
#define REGOPEROID
Definition: pg_type.h:569
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:939
bool cc_relisshared
Definition: catcache.h:60
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:328
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:170
short nkeys
Definition: catcache.h:175
signed int int32
Definition: c.h:284
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
Definition: catcache.c:327
struct catclist * c_list
Definition: catcache.h:130
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *srckeys, Datum *dstkeys)
Definition: catcache.c:1951
HeapTupleHeader t_data
Definition: htup.h:67
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:1795
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:178
#define NAMEDATALEN
#define pg_noinline
Definition: c.h:162
#define DatumGetName(X)
Definition: postgres.h:591
Oid cc_indexoid
Definition: catcache.h:59
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
Form_pg_index rd_index
Definition: rel.h:159
#define OIDVECTOROID
Definition: pg_type.h:344
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:416
static bool nameeqfast(Datum a, Datum b)
Definition: catcache.c:141
void pfree(void *pointer)
Definition: mcxt.c:949
static void slist_init(slist_head *head)
Definition: ilist.h:554
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:876
#define ObjectIdGetDatum(X)
Definition: postgres.h:513
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
#define DatumGetCString(X)
Definition: postgres.h:572
Oid cc_reloid
Definition: catcache.h:58
int cc_nkeys
Definition: catcache.h:56
#define RelationIsValid(relation)
Definition: rel.h:389
#define FATAL
Definition: elog.h:52
StrategyNumber sk_strategy
Definition: skey.h:68
ItemPointerData t_self
Definition: htup.h:65
TupleDesc cc_tupdesc
Definition: catcache.h:48
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments, uint32 hashValue, Index hashIndex, bool negative)
Definition: catcache.c:1818
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:242
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
int cc_ntup
Definition: catcache.h:55
Definition: c.h:541
#define DEBUG2
Definition: elog.h:24
#define INT2OID
Definition: pg_type.h:308
Datum texteq(PG_FUNCTION_ARGS)
Definition: varlena.c:1642
uint32 t_len
Definition: htup.h:64
FmgrInfo sk_func
Definition: skey.h:71
#define DatumGetInt16(X)
Definition: postgres.h:450
#define DatumGetBool(X)
Definition: postgres.h:399
HeapTuple SearchCatCache4(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1206
#define REGDICTIONARYOID
Definition: pg_type.h:627
static uint32 int2hashfast(Datum datum)
Definition: catcache.c:164
#define RelationGetRelationName(relation)
Definition: rel.h:436
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:187
unsigned int uint32
Definition: c.h:296
static HeapTuple SearchCatCacheInternal(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1216
Oid t_tableOid
Definition: htup.h:66
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:935
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:131
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:132
static void GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
Definition: catcache.c:207
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:779
#define CACHE4_elog(a, b, c, d, e)
Definition: catcache.c:69
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:128
MemoryContext TopMemoryContext
Definition: mcxt.c:43
List * lappend(List *list, void *datum)
Definition: list.c:128
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:515
#define WARNING
Definition: elog.h:40
static bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys, const Datum *cachekeys, const Datum *searchkeys)
Definition: catcache.c:390
#define heap_getattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:774
dlist_node cache_elem
Definition: catcache.h:164
#define CL_MAGIC
Definition: catcache.h:160
static bool oidvectoreqfast(Datum a, Datum b)
Definition: catcache.c:194
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:473
static bool chareqfast(Datum a, Datum b)
Definition: catcache.c:129
#define slist_container(type, membername, ptr)
Definition: ilist.h:674
static uint32 charhashfast(Datum datum)
Definition: catcache.c:135
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
void * palloc0(Size size)
Definition: mcxt.c:877
static bool IndexScanOK(CatCache *cache, ScanKey cur_skey)
Definition: catcache.c:1101
uintptr_t Datum
Definition: postgres.h:372
#define DatumGetChar(X)
Definition: postgres.h:415
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1450
Oid MyDatabaseId
Definition: globals.c:77
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1290
CatCache * my_cache
Definition: catcache.h:177
void PrintCatCacheLeakWarning(HeapTuple tuple)
Definition: catcache.c:2094
dlist_node * cur
Definition: ilist.h:161
unsigned int Index
Definition: c.h:413
#define CHAROID
Definition: pg_type.h:296
#define CACHE1_elog(a, b)
Definition: catcache.c:66
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:741
#define InvalidOid
Definition: postgres_ext.h:36
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1458
struct tupleDesc * TupleDesc
slist_head ch_caches
Definition: catcache.h:184
void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:979
#define PG_CATCH()
Definition: elog.h:293
#define InitCatCache_DEBUG2
Definition: catcache.c:775
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
#define Assert(condition)
Definition: c.h:670
#define lfirst(lc)
Definition: pg_list.h:106
Datum hash_any(register const unsigned char *k, register int keylen)
Definition: hashfunc.c:428
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:735
int cc_nbuckets
Definition: catcache.h:47
#define CACHELINEALIGN(LEN)
Definition: c.h:626
CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]
Definition: catcache.h:51
static bool int2eqfast(Datum a, Datum b)
Definition: catcache.c:158
void CreateCacheMemoryContext(void)
Definition: catcache.c:636
uint32(* CCHashFN)(Datum datum)
Definition: catcache.h:39
static int list_length(const List *l)
Definition: pg_list.h:89
int refcount
Definition: catcache.h:118
bool IsTransactionState(void)
Definition: xact.c:351
#define MAXALIGN(LEN)
Definition: c.h:623
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
const char * cc_relname
Definition: catcache.h:57
#define PG_RE_THROW()
Definition: elog.h:314
#define BOOLOID
Definition: pg_type.h:288
void ResetCatalogCaches(void)
Definition: catcache.c:705
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:276
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:176
#define DatumGetPointer(X)
Definition: postgres.h:555
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:385
dlist_node cache_elem
Definition: catcache.h:104
#define REGCONFIGOID
Definition: pg_type.h:624
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:367
bool negative
Definition: catcache.h:120
#define slist_foreach(iter, lhead)
Definition: ilist.h:700
HeapTuple SearchCatCache3(CatCache *cache, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1198
bool ordered
Definition: catcache.h:174
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1165
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:679
static bool int4eqfast(Datum a, Datum b)
Definition: catcache.c:170
void * palloc(Size size)
Definition: mcxt.c:848
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1519
Oid sk_collation
Definition: skey.h:70
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:207
int i
static uint32 texthashfast(Datum datum)
Definition: catcache.c:188
#define NameStr(name)
Definition: c.h:547
void * arg
#define unlikely(x)
Definition: c.h:190
static uint32 murmurhash32(uint32 data)
Definition: hashutils.h:41
HeapTupleData tuple
Definition: catcache.h:121
#define CACHE2_elog(a, b, c)
Definition: catcache.c:67
#define elog
Definition: elog.h:219
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:1050
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
bool criticalRelcachesBuilt
Definition: relcache.c:125
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:700
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1490
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:105
#define REGPROCEDUREOID
Definition: pg_type.h:565
int refcount
Definition: catcache.h:172
#define PG_TRY()
Definition: elog.h:284
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:62
Datum oidvectoreq(PG_FUNCTION_ARGS)
Definition: oid.c:426
static uint32 int4hashfast(Datum datum)
Definition: catcache.c:176
Definition: pg_list.h:45
bool(* CCFastEqualFN)(Datum a, Datum b)
Definition: catcache.h:42
#define PointerIsValid(pointer)
Definition: c.h:564
#define REGNAMESPACEOID
Definition: pg_type.h:589
Datum sk_argument
Definition: skey.h:72
HeapTuple SearchCatCache1(CatCache *cache, Datum v1)
Definition: catcache.c:1182
#define RelationGetRelid(relation)
Definition: rel.h:416
HeapTuple SearchCatCache2(CatCache *cache, Datum v1, Datum v2)
Definition: catcache.c:1190
void ResourceOwnerEnlargeCatCacheRefs(ResourceOwner owner)
Definition: resowner.c:924
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:151
#define DirectFunctionCall2(func, arg1, arg2)
Definition: fmgr.h:587
#define PG_END_TRY()
Definition: elog.h:300
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define offsetof(type, field)
Definition: c.h:593
AttrNumber sk_attno
Definition: skey.h:67
#define REGOPERATOROID
Definition: pg_type.h:573
static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
Definition: catcache.c:1924
#define REGPROCOID
Definition: pg_type.h:320
int ct_magic
Definition: catcache.h:88
bool dead
Definition: catcache.h:119
static uint32 oidvectorhashfast(Datum datum)
Definition: catcache.c:200
MemoryContext CacheMemoryContext
Definition: mcxt.c:46
void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:970