PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
catcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  * System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/relscan.h"
21 #include "access/sysattr.h"
22 #include "access/tuptoaster.h"
23 #include "access/valid.h"
24 #include "access/xact.h"
25 #include "catalog/pg_operator.h"
26 #include "catalog/pg_type.h"
27 #include "miscadmin.h"
28 #ifdef CATCACHE_STATS
29 #include "storage/ipc.h" /* for on_proc_exit */
30 #endif
31 #include "storage/lmgr.h"
32 #include "utils/builtins.h"
33 #include "utils/fmgroids.h"
34 #include "utils/inval.h"
35 #include "utils/memutils.h"
36 #include "utils/rel.h"
37 #include "utils/resowner_private.h"
38 #include "utils/syscache.h"
39 #include "utils/tqual.h"
40 
41 
42  /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
43 
44 /*
45  * Given a hash value and the size of the hash table, find the bucket
46  * in which the hash value belongs. Since the hash table must contain
47  * a power-of-2 number of elements, this is a simple bitmask.
48  */
49 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
50 
51 
52 /*
53  * variables, macros and other stuff
54  */
55 
56 #ifdef CACHEDEBUG
57 #define CACHE1_elog(a,b) elog(a,b)
58 #define CACHE2_elog(a,b,c) elog(a,b,c)
59 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
60 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
61 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
62 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
63 #else
64 #define CACHE1_elog(a,b)
65 #define CACHE2_elog(a,b,c)
66 #define CACHE3_elog(a,b,c,d)
67 #define CACHE4_elog(a,b,c,d,e)
68 #define CACHE5_elog(a,b,c,d,e,f)
69 #define CACHE6_elog(a,b,c,d,e,f,g)
70 #endif
71 
72 /* Cache management header --- pointer is NULL until created */
74 
75 
76 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
77  ScanKey cur_skey);
79  HeapTuple tuple);
80 
81 #ifdef CATCACHE_STATS
82 static void CatCachePrintStats(int code, Datum arg);
83 #endif
84 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
85 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
86 static void CatalogCacheInitializeCache(CatCache *cache);
88  uint32 hashValue, Index hashIndex,
89  bool negative);
90 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
91 
92 
93 /*
94  * internal support functions
95  */
96 
97 /*
98  * Look up the hash and equality functions for system types that are used
99  * as cache key fields.
100  *
101  * XXX this should be replaced by catalog lookups,
102  * but that seems to pose considerable risk of circularity...
103  */
104 static void
105 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
106 {
107  switch (keytype)
108  {
109  case BOOLOID:
110  *hashfunc = hashchar;
111 
112  *eqfunc = F_BOOLEQ;
113  break;
114  case CHAROID:
115  *hashfunc = hashchar;
116 
117  *eqfunc = F_CHAREQ;
118  break;
119  case NAMEOID:
120  *hashfunc = hashname;
121 
122  *eqfunc = F_NAMEEQ;
123  break;
124  case INT2OID:
125  *hashfunc = hashint2;
126 
127  *eqfunc = F_INT2EQ;
128  break;
129  case INT4OID:
130  *hashfunc = hashint4;
131 
132  *eqfunc = F_INT4EQ;
133  break;
134  case TEXTOID:
135  *hashfunc = hashtext;
136 
137  *eqfunc = F_TEXTEQ;
138  break;
139  case OIDOID:
140  case REGPROCOID:
141  case REGPROCEDUREOID:
142  case REGOPEROID:
143  case REGOPERATOROID:
144  case REGCLASSOID:
145  case REGTYPEOID:
146  case REGCONFIGOID:
147  case REGDICTIONARYOID:
148  case REGROLEOID:
149  case REGNAMESPACEOID:
150  *hashfunc = hashoid;
151 
152  *eqfunc = F_OIDEQ;
153  break;
154  case OIDVECTOROID:
155  *hashfunc = hashoidvector;
156 
157  *eqfunc = F_OIDVECTOREQ;
158  break;
159  default:
160  elog(FATAL, "type %u not supported as catcache key", keytype);
161  *hashfunc = NULL; /* keep compiler quiet */
162 
163  *eqfunc = InvalidOid;
164  break;
165  }
166 }
167 
168 /*
169  * CatalogCacheComputeHashValue
170  *
171  * Compute the hash value associated with a given set of lookup keys
172  */
173 static uint32
174 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
175 {
176  uint32 hashValue = 0;
177  uint32 oneHash;
178 
179  CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
180  cache->cc_relname,
181  nkeys,
182  cache);
183 
184  switch (nkeys)
185  {
186  case 4:
187  oneHash =
189  cur_skey[3].sk_argument));
190  hashValue ^= oneHash << 24;
191  hashValue ^= oneHash >> 8;
192  /* FALLTHROUGH */
193  case 3:
194  oneHash =
196  cur_skey[2].sk_argument));
197  hashValue ^= oneHash << 16;
198  hashValue ^= oneHash >> 16;
199  /* FALLTHROUGH */
200  case 2:
201  oneHash =
203  cur_skey[1].sk_argument));
204  hashValue ^= oneHash << 8;
205  hashValue ^= oneHash >> 24;
206  /* FALLTHROUGH */
207  case 1:
208  oneHash =
210  cur_skey[0].sk_argument));
211  hashValue ^= oneHash;
212  break;
213  default:
214  elog(FATAL, "wrong number of hash keys: %d", nkeys);
215  break;
216  }
217 
218  return hashValue;
219 }
220 
221 /*
222  * CatalogCacheComputeTupleHashValue
223  *
224  * Compute the hash value associated with a given tuple to be cached
225  */
226 static uint32
228 {
229  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
230  bool isNull = false;
231 
232  /* Copy pre-initialized overhead data for scankey */
233  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
234 
235  /* Now extract key fields from tuple, insert into scankey */
236  switch (cache->cc_nkeys)
237  {
238  case 4:
239  cur_skey[3].sk_argument =
240  (cache->cc_key[3] == ObjectIdAttributeNumber)
242  : fastgetattr(tuple,
243  cache->cc_key[3],
244  cache->cc_tupdesc,
245  &isNull);
246  Assert(!isNull);
247  /* FALLTHROUGH */
248  case 3:
249  cur_skey[2].sk_argument =
250  (cache->cc_key[2] == ObjectIdAttributeNumber)
252  : fastgetattr(tuple,
253  cache->cc_key[2],
254  cache->cc_tupdesc,
255  &isNull);
256  Assert(!isNull);
257  /* FALLTHROUGH */
258  case 2:
259  cur_skey[1].sk_argument =
260  (cache->cc_key[1] == ObjectIdAttributeNumber)
262  : fastgetattr(tuple,
263  cache->cc_key[1],
264  cache->cc_tupdesc,
265  &isNull);
266  Assert(!isNull);
267  /* FALLTHROUGH */
268  case 1:
269  cur_skey[0].sk_argument =
270  (cache->cc_key[0] == ObjectIdAttributeNumber)
272  : fastgetattr(tuple,
273  cache->cc_key[0],
274  cache->cc_tupdesc,
275  &isNull);
276  Assert(!isNull);
277  break;
278  default:
279  elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
280  break;
281  }
282 
283  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
284 }
285 
286 
287 #ifdef CATCACHE_STATS
288 
289 static void
290 CatCachePrintStats(int code, Datum arg)
291 {
292  slist_iter iter;
293  long cc_searches = 0;
294  long cc_hits = 0;
295  long cc_neg_hits = 0;
296  long cc_newloads = 0;
297  long cc_invals = 0;
298  long cc_lsearches = 0;
299  long cc_lhits = 0;
300 
301  slist_foreach(iter, &CacheHdr->ch_caches)
302  {
303  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
304 
305  if (cache->cc_ntup == 0 && cache->cc_searches == 0)
306  continue; /* don't print unused caches */
307  elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
308  cache->cc_relname,
309  cache->cc_indexoid,
310  cache->cc_ntup,
311  cache->cc_searches,
312  cache->cc_hits,
313  cache->cc_neg_hits,
314  cache->cc_hits + cache->cc_neg_hits,
315  cache->cc_newloads,
316  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
317  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
318  cache->cc_invals,
319  cache->cc_lsearches,
320  cache->cc_lhits);
321  cc_searches += cache->cc_searches;
322  cc_hits += cache->cc_hits;
323  cc_neg_hits += cache->cc_neg_hits;
324  cc_newloads += cache->cc_newloads;
325  cc_invals += cache->cc_invals;
326  cc_lsearches += cache->cc_lsearches;
327  cc_lhits += cache->cc_lhits;
328  }
329  elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
330  CacheHdr->ch_ntup,
331  cc_searches,
332  cc_hits,
333  cc_neg_hits,
334  cc_hits + cc_neg_hits,
335  cc_newloads,
336  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
337  cc_searches - cc_hits - cc_neg_hits,
338  cc_invals,
339  cc_lsearches,
340  cc_lhits);
341 }
342 #endif /* CATCACHE_STATS */
343 
344 
345 /*
346  * CatCacheRemoveCTup
347  *
348  * Unlink and delete the given cache entry
349  *
350  * NB: if it is a member of a CatCList, the CatCList is deleted too.
351  * Both the cache entry and the list had better have zero refcount.
352  */
353 static void
355 {
356  Assert(ct->refcount == 0);
357  Assert(ct->my_cache == cache);
358 
359  if (ct->c_list)
360  {
361  /*
362  * The cleanest way to handle this is to call CatCacheRemoveCList,
363  * which will recurse back to me, and the recursive call will do the
364  * work. Set the "dead" flag to make sure it does recurse.
365  */
366  ct->dead = true;
367  CatCacheRemoveCList(cache, ct->c_list);
368  return; /* nothing left to do */
369  }
370 
371  /* delink from linked list */
372  dlist_delete(&ct->cache_elem);
373 
374  /* free associated tuple data */
375  if (ct->tuple.t_data != NULL)
376  pfree(ct->tuple.t_data);
377  pfree(ct);
378 
379  --cache->cc_ntup;
380  --CacheHdr->ch_ntup;
381 }
382 
383 /*
384  * CatCacheRemoveCList
385  *
386  * Unlink and delete the given cache list entry
387  *
388  * NB: any dead member entries that become unreferenced are deleted too.
389  */
390 static void
392 {
393  int i;
394 
395  Assert(cl->refcount == 0);
396  Assert(cl->my_cache == cache);
397 
398  /* delink from member tuples */
399  for (i = cl->n_members; --i >= 0;)
400  {
401  CatCTup *ct = cl->members[i];
402 
403  Assert(ct->c_list == cl);
404  ct->c_list = NULL;
405  /* if the member is dead and now has no references, remove it */
406  if (
407 #ifndef CATCACHE_FORCE_RELEASE
408  ct->dead &&
409 #endif
410  ct->refcount == 0)
411  CatCacheRemoveCTup(cache, ct);
412  }
413 
414  /* delink from linked list */
415  dlist_delete(&cl->cache_elem);
416 
417  /* free associated tuple data */
418  if (cl->tuple.t_data != NULL)
419  pfree(cl->tuple.t_data);
420  pfree(cl);
421 }
422 
423 
424 /*
425  * CatalogCacheIdInvalidate
426  *
427  * Invalidate entries in the specified cache, given a hash value.
428  *
429  * We delete cache entries that match the hash value, whether positive
430  * or negative. We don't care whether the invalidation is the result
431  * of a tuple insertion or a deletion.
432  *
433  * We used to try to match positive cache entries by TID, but that is
434  * unsafe after a VACUUM FULL on a system catalog: an inval event could
435  * be queued before VACUUM FULL, and then processed afterwards, when the
436  * target tuple that has to be invalidated has a different TID than it
437  * did when the event was created. So now we just compare hash values and
438  * accept the small risk of unnecessary invalidations due to false matches.
439  *
440  * This routine is only quasi-public: it should only be used by inval.c.
441  */
442 void
443 CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
444 {
445  slist_iter cache_iter;
446 
447  CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
448 
449  /*
450  * inspect caches to find the proper cache
451  */
452  slist_foreach(cache_iter, &CacheHdr->ch_caches)
453  {
454  CatCache *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
455  Index hashIndex;
456  dlist_mutable_iter iter;
457 
458  if (cacheId != ccp->id)
459  continue;
460 
461  /*
462  * We don't bother to check whether the cache has finished
463  * initialization yet; if not, there will be no entries in it so no
464  * problem.
465  */
466 
467  /*
468  * Invalidate *all* CatCLists in this cache; it's too hard to tell
469  * which searches might still be correct, so just zap 'em all.
470  */
471  dlist_foreach_modify(iter, &ccp->cc_lists)
472  {
473  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
474 
475  if (cl->refcount > 0)
476  cl->dead = true;
477  else
478  CatCacheRemoveCList(ccp, cl);
479  }
480 
481  /*
482  * inspect the proper hash bucket for tuple matches
483  */
484  hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
485  dlist_foreach_modify(iter, &ccp->cc_bucket[hashIndex])
486  {
487  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
488 
489  if (hashValue == ct->hash_value)
490  {
491  if (ct->refcount > 0 ||
492  (ct->c_list && ct->c_list->refcount > 0))
493  {
494  ct->dead = true;
495  /* list, if any, was marked dead above */
496  Assert(ct->c_list == NULL || ct->c_list->dead);
497  }
498  else
499  CatCacheRemoveCTup(ccp, ct);
500  CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
501 #ifdef CATCACHE_STATS
502  ccp->cc_invals++;
503 #endif
504  /* could be multiple matches, so keep looking! */
505  }
506  }
507  break; /* need only search this one cache */
508  }
509 }
510 
511 /* ----------------------------------------------------------------
512  * public functions
513  * ----------------------------------------------------------------
514  */
515 
516 
517 /*
518  * Standard routine for creating cache context if it doesn't exist yet
519  *
520  * There are a lot of places (probably far more than necessary) that check
521  * whether CacheMemoryContext exists yet and want to create it if not.
522  * We centralize knowledge of exactly how to create it here.
523  */
524 void
526 {
527  /*
528  * Purely for paranoia, check that context doesn't exist; caller probably
529  * did so already.
530  */
531  if (!CacheMemoryContext)
533  "CacheMemoryContext",
535 }
536 
537 
538 /*
539  * AtEOXact_CatCache
540  *
541  * Clean up catcaches at end of main transaction (either commit or abort)
542  *
543  * As of PostgreSQL 8.1, catcache pins should get released by the
544  * ResourceOwner mechanism. This routine is just a debugging
545  * cross-check that no pins remain.
546  */
547 void
548 AtEOXact_CatCache(bool isCommit)
549 {
550 #ifdef USE_ASSERT_CHECKING
551  slist_iter cache_iter;
552 
553  slist_foreach(cache_iter, &CacheHdr->ch_caches)
554  {
555  CatCache *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
556  dlist_iter iter;
557  int i;
558 
559  /* Check CatCLists */
560  dlist_foreach(iter, &ccp->cc_lists)
561  {
562  CatCList *cl;
563 
564  cl = dlist_container(CatCList, cache_elem, iter.cur);
565  Assert(cl->cl_magic == CL_MAGIC);
566  Assert(cl->refcount == 0);
567  Assert(!cl->dead);
568  }
569 
570  /* Check individual tuples */
571  for (i = 0; i < ccp->cc_nbuckets; i++)
572  {
573  dlist_head *bucket = &ccp->cc_bucket[i];
574 
575  dlist_foreach(iter, bucket)
576  {
577  CatCTup *ct;
578 
579  ct = dlist_container(CatCTup, cache_elem, iter.cur);
580  Assert(ct->ct_magic == CT_MAGIC);
581  Assert(ct->refcount == 0);
582  Assert(!ct->dead);
583  }
584  }
585  }
586 #endif
587 }
588 
589 /*
590  * ResetCatalogCache
591  *
592  * Reset one catalog cache to empty.
593  *
594  * This is not very efficient if the target cache is nearly empty.
595  * However, it shouldn't need to be efficient; we don't invoke it often.
596  */
597 static void
599 {
600  dlist_mutable_iter iter;
601  int i;
602 
603  /* Remove each list in this cache, or at least mark it dead */
604  dlist_foreach_modify(iter, &cache->cc_lists)
605  {
606  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
607 
608  if (cl->refcount > 0)
609  cl->dead = true;
610  else
611  CatCacheRemoveCList(cache, cl);
612  }
613 
614  /* Remove each tuple in this cache, or at least mark it dead */
615  for (i = 0; i < cache->cc_nbuckets; i++)
616  {
617  dlist_head *bucket = &cache->cc_bucket[i];
618 
619  dlist_foreach_modify(iter, bucket)
620  {
621  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
622 
623  if (ct->refcount > 0 ||
624  (ct->c_list && ct->c_list->refcount > 0))
625  {
626  ct->dead = true;
627  /* list, if any, was marked dead above */
628  Assert(ct->c_list == NULL || ct->c_list->dead);
629  }
630  else
631  CatCacheRemoveCTup(cache, ct);
632 #ifdef CATCACHE_STATS
633  cache->cc_invals++;
634 #endif
635  }
636  }
637 }
638 
639 /*
640  * ResetCatalogCaches
641  *
642  * Reset all caches when a shared cache inval event forces it
643  */
644 void
646 {
647  slist_iter iter;
648 
649  CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
650 
651  slist_foreach(iter, &CacheHdr->ch_caches)
652  {
653  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
654 
655  ResetCatalogCache(cache);
656  }
657 
658  CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
659 }
660 
661 /*
662  * CatalogCacheFlushCatalog
663  *
664  * Flush all catcache entries that came from the specified system catalog.
665  * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
666  * tuples very likely now have different TIDs than before. (At one point
667  * we also tried to force re-execution of CatalogCacheInitializeCache for
668  * the cache(s) on that catalog. This is a bad idea since it leads to all
669  * kinds of trouble if a cache flush occurs while loading cache entries.
670  * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
671  * rather than relying on the relcache to keep a tupdesc for us. Of course
672  * this assumes the tupdesc of a cachable system table will not change...)
673  */
674 void
676 {
677  slist_iter iter;
678 
679  CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
680 
681  slist_foreach(iter, &CacheHdr->ch_caches)
682  {
683  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
684 
685  /* Does this cache store tuples of the target catalog? */
686  if (cache->cc_reloid == catId)
687  {
688  /* Yes, so flush all its contents */
689  ResetCatalogCache(cache);
690 
691  /* Tell inval.c to call syscache callbacks for this cache */
692  CallSyscacheCallbacks(cache->id, 0);
693  }
694  }
695 
696  CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
697 }
698 
699 /*
700  * InitCatCache
701  *
702  * This allocates and initializes a cache for a system catalog relation.
703  * Actually, the cache is only partially initialized to avoid opening the
704  * relation. The relation will be opened and the rest of the cache
705  * structure initialized on the first access.
706  */
707 #ifdef CACHEDEBUG
708 #define InitCatCache_DEBUG2 \
709 do { \
710  elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
711  cp->cc_reloid, cp->cc_indexoid, cp->id, \
712  cp->cc_nkeys, cp->cc_nbuckets); \
713 } while(0)
714 #else
715 #define InitCatCache_DEBUG2
716 #endif
717 
718 CatCache *
720  Oid reloid,
721  Oid indexoid,
722  int nkeys,
723  const int *key,
724  int nbuckets)
725 {
726  CatCache *cp;
727  MemoryContext oldcxt;
728  int i;
729 
730  /*
731  * nbuckets is the initial number of hash buckets to use in this catcache.
732  * It will be enlarged later if it becomes too full.
733  *
734  * nbuckets must be a power of two. We check this via Assert rather than
735  * a full runtime check because the values will be coming from constant
736  * tables.
737  *
738  * If you're confused by the power-of-two check, see comments in
739  * bitmapset.c for an explanation.
740  */
741  Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
742 
743  /*
744  * first switch to the cache context so our allocations do not vanish at
745  * the end of a transaction
746  */
747  if (!CacheMemoryContext)
749 
751 
752  /*
753  * if first time through, initialize the cache group header
754  */
755  if (CacheHdr == NULL)
756  {
757  CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
758  slist_init(&CacheHdr->ch_caches);
759  CacheHdr->ch_ntup = 0;
760 #ifdef CATCACHE_STATS
761  /* set up to dump stats at backend exit */
762  on_proc_exit(CatCachePrintStats, 0);
763 #endif
764  }
765 
766  /*
767  * allocate a new cache structure
768  *
769  * Note: we rely on zeroing to initialize all the dlist headers correctly
770  */
771  cp = (CatCache *) palloc0(sizeof(CatCache));
772  cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
773 
774  /*
775  * initialize the cache's relation information for the relation
776  * corresponding to this cache, and initialize some of the new cache's
777  * other internal fields. But don't open the relation yet.
778  */
779  cp->id = id;
780  cp->cc_relname = "(not known yet)";
781  cp->cc_reloid = reloid;
782  cp->cc_indexoid = indexoid;
783  cp->cc_relisshared = false; /* temporary */
784  cp->cc_tupdesc = (TupleDesc) NULL;
785  cp->cc_ntup = 0;
786  cp->cc_nbuckets = nbuckets;
787  cp->cc_nkeys = nkeys;
788  for (i = 0; i < nkeys; ++i)
789  cp->cc_key[i] = key[i];
790 
791  /*
792  * new cache is initialized as far as we can go for now. print some
793  * debugging information, if appropriate.
794  */
796 
797  /*
798  * add completed cache to top of group header's list
799  */
800  slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
801 
802  /*
803  * back to the old context before we return...
804  */
805  MemoryContextSwitchTo(oldcxt);
806 
807  return cp;
808 }
809 
810 /*
811  * Enlarge a catcache, doubling the number of buckets.
812  */
813 static void
815 {
816  dlist_head *newbucket;
817  int newnbuckets;
818  int i;
819 
820  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
821  cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
822 
823  /* Allocate a new, larger, hash table. */
824  newnbuckets = cp->cc_nbuckets * 2;
825  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
826 
827  /* Move all entries from old hash table to new. */
828  for (i = 0; i < cp->cc_nbuckets; i++)
829  {
830  dlist_mutable_iter iter;
831 
832  dlist_foreach_modify(iter, &cp->cc_bucket[i])
833  {
834  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
835  int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
836 
837  dlist_delete(iter.cur);
838  dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
839  }
840  }
841 
842  /* Switch to the new array. */
843  pfree(cp->cc_bucket);
844  cp->cc_nbuckets = newnbuckets;
845  cp->cc_bucket = newbucket;
846 }
847 
848 /*
849  * CatalogCacheInitializeCache
850  *
851  * This function does final initialization of a catcache: obtain the tuple
852  * descriptor and set up the hash and equality function links. We assume
853  * that the relcache entry can be opened at this point!
854  */
855 #ifdef CACHEDEBUG
856 #define CatalogCacheInitializeCache_DEBUG1 \
857  elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
858  cache->cc_reloid)
859 
860 #define CatalogCacheInitializeCache_DEBUG2 \
861 do { \
862  if (cache->cc_key[i] > 0) { \
863  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
864  i+1, cache->cc_nkeys, cache->cc_key[i], \
865  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
866  } else { \
867  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
868  i+1, cache->cc_nkeys, cache->cc_key[i]); \
869  } \
870 } while(0)
871 #else
872 #define CatalogCacheInitializeCache_DEBUG1
873 #define CatalogCacheInitializeCache_DEBUG2
874 #endif
875 
876 static void
878 {
879  Relation relation;
880  MemoryContext oldcxt;
881  TupleDesc tupdesc;
882  int i;
883 
885 
886  relation = heap_open(cache->cc_reloid, AccessShareLock);
887 
888  /*
889  * switch to the cache context so our allocations do not vanish at the end
890  * of a transaction
891  */
893 
895 
896  /*
897  * copy the relcache's tuple descriptor to permanent cache storage
898  */
899  tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
900 
901  /*
902  * save the relation's name and relisshared flag, too (cc_relname is used
903  * only for debugging purposes)
904  */
905  cache->cc_relname = pstrdup(RelationGetRelationName(relation));
906  cache->cc_relisshared = RelationGetForm(relation)->relisshared;
907 
908  /*
909  * return to the caller's memory context and close the rel
910  */
911  MemoryContextSwitchTo(oldcxt);
912 
913  heap_close(relation, AccessShareLock);
914 
915  CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
916  cache->cc_relname, cache->cc_nkeys);
917 
918  /*
919  * initialize cache's key information
920  */
921  for (i = 0; i < cache->cc_nkeys; ++i)
922  {
923  Oid keytype;
924  RegProcedure eqfunc;
925 
927 
928  if (cache->cc_key[i] > 0)
929  {
930  Form_pg_attribute attr = tupdesc->attrs[cache->cc_key[i] - 1];
931 
932  keytype = attr->atttypid;
933  /* cache key columns should always be NOT NULL */
934  Assert(attr->attnotnull);
935  }
936  else
937  {
938  if (cache->cc_key[i] != ObjectIdAttributeNumber)
939  elog(FATAL, "only sys attr supported in caches is OID");
940  keytype = OIDOID;
941  }
942 
943  GetCCHashEqFuncs(keytype,
944  &cache->cc_hashfunc[i],
945  &eqfunc);
946 
947  cache->cc_isname[i] = (keytype == NAMEOID);
948 
949  /*
950  * Do equality-function lookup (we assume this won't need a catalog
951  * lookup for any supported type)
952  */
953  fmgr_info_cxt(eqfunc,
954  &cache->cc_skey[i].sk_func,
956 
957  /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
958  cache->cc_skey[i].sk_attno = cache->cc_key[i];
959 
960  /* Fill in sk_strategy as well --- always standard equality */
962  cache->cc_skey[i].sk_subtype = InvalidOid;
963  /* Currently, there are no catcaches on collation-aware data types */
964  cache->cc_skey[i].sk_collation = InvalidOid;
965 
966  CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
967  cache->cc_relname,
968  i,
969  cache);
970  }
971 
972  /*
973  * mark this cache fully initialized
974  */
975  cache->cc_tupdesc = tupdesc;
976 }
977 
978 /*
979  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
980  *
981  * One reason to call this routine is to ensure that the relcache has
982  * created entries for all the catalogs and indexes referenced by catcaches.
983  * Therefore, provide an option to open the index as well as fixing the
984  * cache itself. An exception is the indexes on pg_am, which we don't use
985  * (cf. IndexScanOK).
986  */
987 void
988 InitCatCachePhase2(CatCache *cache, bool touch_index)
989 {
990  if (cache->cc_tupdesc == NULL)
992 
993  if (touch_index &&
994  cache->id != AMOID &&
995  cache->id != AMNAME)
996  {
997  Relation idesc;
998 
999  /*
1000  * We must lock the underlying catalog before opening the index to
1001  * avoid deadlock, since index_open could possibly result in reading
1002  * this same catalog, and if anyone else is exclusive-locking this
1003  * catalog and index they'll be doing it in that order.
1004  */
1006  idesc = index_open(cache->cc_indexoid, AccessShareLock);
1007 
1008  /*
1009  * While we've got the index open, let's check that it's unique (and
1010  * not just deferrable-unique, thank you very much). This is just to
1011  * catch thinkos in definitions of new catcaches, so we don't worry
1012  * about the pg_am indexes not getting tested.
1013  */
1014  Assert(idesc->rd_index->indisunique &&
1015  idesc->rd_index->indimmediate);
1016 
1017  index_close(idesc, AccessShareLock);
1019  }
1020 }
1021 
1022 
1023 /*
1024  * IndexScanOK
1025  *
1026  * This function checks for tuples that will be fetched by
1027  * IndexSupportInitialize() during relcache initialization for
1028  * certain system indexes that support critical syscaches.
1029  * We can't use an indexscan to fetch these, else we'll get into
1030  * infinite recursion. A plain heap scan will work, however.
1031  * Once we have completed relcache initialization (signaled by
1032  * criticalRelcachesBuilt), we don't have to worry anymore.
1033  *
1034  * Similarly, during backend startup we have to be able to use the
1035  * pg_authid and pg_auth_members syscaches for authentication even if
1036  * we don't yet have relcache entries for those catalogs' indexes.
1037  */
1038 static bool
1039 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1040 {
1041  switch (cache->id)
1042  {
1043  case INDEXRELID:
1044 
1045  /*
1046  * Rather than tracking exactly which indexes have to be loaded
1047  * before we can use indexscans (which changes from time to time),
1048  * just force all pg_index searches to be heap scans until we've
1049  * built the critical relcaches.
1050  */
1052  return false;
1053  break;
1054 
1055  case AMOID:
1056  case AMNAME:
1057 
1058  /*
1059  * Always do heap scans in pg_am, because it's so small there's
1060  * not much point in an indexscan anyway. We *must* do this when
1061  * initially building critical relcache entries, but we might as
1062  * well just always do it.
1063  */
1064  return false;
1065 
1066  case AUTHNAME:
1067  case AUTHOID:
1068  case AUTHMEMMEMROLE:
1069 
1070  /*
1071  * Protect authentication lookups occurring before relcache has
1072  * collected entries for shared indexes.
1073  */
1075  return false;
1076  break;
1077 
1078  default:
1079  break;
1080  }
1081 
1082  /* Normal case, allow index scan */
1083  return true;
1084 }
1085 
1086 /*
1087  * SearchCatCache
1088  *
1089  * This call searches a system cache for a tuple, opening the relation
1090  * if necessary (on the first access to a particular cache).
1091  *
1092  * The result is NULL if not found, or a pointer to a HeapTuple in
1093  * the cache. The caller must not modify the tuple, and must call
1094  * ReleaseCatCache() when done with it.
1095  *
1096  * The search key values should be expressed as Datums of the key columns'
1097  * datatype(s). (Pass zeroes for any unused parameters.) As a special
1098  * exception, the passed-in key for a NAME column can be just a C string;
1099  * the caller need not go to the trouble of converting it to a fully
1100  * null-padded NAME.
1101  */
1102 HeapTuple
1104  Datum v1,
1105  Datum v2,
1106  Datum v3,
1107  Datum v4)
1108 {
1109  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1110  uint32 hashValue;
1111  Index hashIndex;
1112  dlist_iter iter;
1113  dlist_head *bucket;
1114  CatCTup *ct;
1115  Relation relation;
1116  SysScanDesc scandesc;
1117  HeapTuple ntp;
1118 
1119  /* Make sure we're in an xact, even if this ends up being a cache hit */
1121 
1122  /*
1123  * one-time startup overhead for each cache
1124  */
1125  if (cache->cc_tupdesc == NULL)
1127 
1128 #ifdef CATCACHE_STATS
1129  cache->cc_searches++;
1130 #endif
1131 
1132  /*
1133  * initialize the search key information
1134  */
1135  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1136  cur_skey[0].sk_argument = v1;
1137  cur_skey[1].sk_argument = v2;
1138  cur_skey[2].sk_argument = v3;
1139  cur_skey[3].sk_argument = v4;
1140 
1141  /*
1142  * find the hash bucket in which to look for the tuple
1143  */
1144  hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1145  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1146 
1147  /*
1148  * scan the hash bucket until we find a match or exhaust our tuples
1149  *
1150  * Note: it's okay to use dlist_foreach here, even though we modify the
1151  * dlist within the loop, because we don't continue the loop afterwards.
1152  */
1153  bucket = &cache->cc_bucket[hashIndex];
1154  dlist_foreach(iter, bucket)
1155  {
1156  bool res;
1157 
1158  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1159 
1160  if (ct->dead)
1161  continue; /* ignore dead entries */
1162 
1163  if (ct->hash_value != hashValue)
1164  continue; /* quickly skip entry if wrong hash val */
1165 
1166  /*
1167  * see if the cached tuple matches our key.
1168  */
1169  HeapKeyTest(&ct->tuple,
1170  cache->cc_tupdesc,
1171  cache->cc_nkeys,
1172  cur_skey,
1173  res);
1174  if (!res)
1175  continue;
1176 
1177  /*
1178  * We found a match in the cache. Move it to the front of the list
1179  * for its hashbucket, in order to speed subsequent searches. (The
1180  * most frequently accessed elements in any hashbucket will tend to be
1181  * near the front of the hashbucket's list.)
1182  */
1183  dlist_move_head(bucket, &ct->cache_elem);
1184 
1185  /*
1186  * If it's a positive entry, bump its refcount and return it. If it's
1187  * negative, we can report failure to the caller.
1188  */
1189  if (!ct->negative)
1190  {
1192  ct->refcount++;
1194 
1195  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1196  cache->cc_relname, hashIndex);
1197 
1198 #ifdef CATCACHE_STATS
1199  cache->cc_hits++;
1200 #endif
1201 
1202  return &ct->tuple;
1203  }
1204  else
1205  {
1206  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1207  cache->cc_relname, hashIndex);
1208 
1209 #ifdef CATCACHE_STATS
1210  cache->cc_neg_hits++;
1211 #endif
1212 
1213  return NULL;
1214  }
1215  }
1216 
1217  /*
1218  * Tuple was not found in cache, so we have to try to retrieve it directly
1219  * from the relation. If found, we will add it to the cache; if not
1220  * found, we will add a negative cache entry instead.
1221  *
1222  * NOTE: it is possible for recursive cache lookups to occur while reading
1223  * the relation --- for example, due to shared-cache-inval messages being
1224  * processed during heap_open(). This is OK. It's even possible for one
1225  * of those lookups to find and enter the very same tuple we are trying to
1226  * fetch here. If that happens, we will enter a second copy of the tuple
1227  * into the cache. The first copy will never be referenced again, and
1228  * will eventually age out of the cache, so there's no functional problem.
1229  * This case is rare enough that it's not worth expending extra cycles to
1230  * detect.
1231  */
1232  relation = heap_open(cache->cc_reloid, AccessShareLock);
1233 
1234  scandesc = systable_beginscan(relation,
1235  cache->cc_indexoid,
1236  IndexScanOK(cache, cur_skey),
1237  NULL,
1238  cache->cc_nkeys,
1239  cur_skey);
1240 
1241  ct = NULL;
1242 
1243  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1244  {
1245  ct = CatalogCacheCreateEntry(cache, ntp,
1246  hashValue, hashIndex,
1247  false);
1248  /* immediately set the refcount to 1 */
1250  ct->refcount++;
1252  break; /* assume only one match */
1253  }
1254 
1255  systable_endscan(scandesc);
1256 
1257  heap_close(relation, AccessShareLock);
1258 
1259  /*
1260  * If tuple was not found, we need to build a negative cache entry
1261  * containing a fake tuple. The fake tuple has the correct key columns,
1262  * but nulls everywhere else.
1263  *
1264  * In bootstrap mode, we don't build negative entries, because the cache
1265  * invalidation mechanism isn't alive and can't clear them if the tuple
1266  * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1267  * cache inval for that.)
1268  */
1269  if (ct == NULL)
1270  {
1272  return NULL;
1273 
1274  ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1275  ct = CatalogCacheCreateEntry(cache, ntp,
1276  hashValue, hashIndex,
1277  true);
1278  heap_freetuple(ntp);
1279 
1280  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1281  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1282  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1283  cache->cc_relname, hashIndex);
1284 
1285  /*
1286  * We are not returning the negative entry to the caller, so leave its
1287  * refcount zero.
1288  */
1289 
1290  return NULL;
1291  }
1292 
1293  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1294  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1295  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1296  cache->cc_relname, hashIndex);
1297 
1298 #ifdef CATCACHE_STATS
1299  cache->cc_newloads++;
1300 #endif
1301 
1302  return &ct->tuple;
1303 }
1304 
1305 /*
1306  * ReleaseCatCache
1307  *
1308  * Decrement the reference count of a catcache entry (releasing the
1309  * hold grabbed by a successful SearchCatCache).
1310  *
1311  * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1312  * will be freed as soon as their refcount goes to zero. In combination
1313  * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1314  * to catch references to already-released catcache entries.
1315  */
1316 void
1318 {
1319  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1320  offsetof(CatCTup, tuple));
1321 
1322  /* Safety checks to ensure we were handed a cache entry */
1323  Assert(ct->ct_magic == CT_MAGIC);
1324  Assert(ct->refcount > 0);
1325 
1326  ct->refcount--;
1328 
1329  if (
1330 #ifndef CATCACHE_FORCE_RELEASE
1331  ct->dead &&
1332 #endif
1333  ct->refcount == 0 &&
1334  (ct->c_list == NULL || ct->c_list->refcount == 0))
1335  CatCacheRemoveCTup(ct->my_cache, ct);
1336 }
1337 
1338 
1339 /*
1340  * GetCatCacheHashValue
1341  *
1342  * Compute the hash value for a given set of search keys.
1343  *
1344  * The reason for exposing this as part of the API is that the hash value is
1345  * exposed in cache invalidation operations, so there are places outside the
1346  * catcache code that need to be able to compute the hash values.
1347  */
1348 uint32
1350  Datum v1,
1351  Datum v2,
1352  Datum v3,
1353  Datum v4)
1354 {
1355  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1356 
1357  /*
1358  * one-time startup overhead for each cache
1359  */
1360  if (cache->cc_tupdesc == NULL)
1362 
1363  /*
1364  * initialize the search key information
1365  */
1366  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1367  cur_skey[0].sk_argument = v1;
1368  cur_skey[1].sk_argument = v2;
1369  cur_skey[2].sk_argument = v3;
1370  cur_skey[3].sk_argument = v4;
1371 
1372  /*
1373  * calculate the hash value
1374  */
1375  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1376 }
1377 
1378 
1379 /*
1380  * SearchCatCacheList
1381  *
1382  * Generate a list of all tuples matching a partial key (that is,
1383  * a key specifying just the first K of the cache's N key columns).
1384  *
1385  * The caller must not modify the list object or the pointed-to tuples,
1386  * and must call ReleaseCatCacheList() when done with the list.
1387  */
1388 CatCList *
1390  int nkeys,
1391  Datum v1,
1392  Datum v2,
1393  Datum v3,
1394  Datum v4)
1395 {
1396  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1397  uint32 lHashValue;
1398  dlist_iter iter;
1399  CatCList *cl;
1400  CatCTup *ct;
1401  List *volatile ctlist;
1402  ListCell *ctlist_item;
1403  int nmembers;
1404  bool ordered;
1405  HeapTuple ntp;
1406  MemoryContext oldcxt;
1407  int i;
1408 
1409  /*
1410  * one-time startup overhead for each cache
1411  */
1412  if (cache->cc_tupdesc == NULL)
1414 
1415  Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1416 
1417 #ifdef CATCACHE_STATS
1418  cache->cc_lsearches++;
1419 #endif
1420 
1421  /*
1422  * initialize the search key information
1423  */
1424  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1425  cur_skey[0].sk_argument = v1;
1426  cur_skey[1].sk_argument = v2;
1427  cur_skey[2].sk_argument = v3;
1428  cur_skey[3].sk_argument = v4;
1429 
1430  /*
1431  * compute a hash value of the given keys for faster search. We don't
1432  * presently divide the CatCList items into buckets, but this still lets
1433  * us skip non-matching items quickly most of the time.
1434  */
1435  lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1436 
1437  /*
1438  * scan the items until we find a match or exhaust our list
1439  *
1440  * Note: it's okay to use dlist_foreach here, even though we modify the
1441  * dlist within the loop, because we don't continue the loop afterwards.
1442  */
1443  dlist_foreach(iter, &cache->cc_lists)
1444  {
1445  bool res;
1446 
1447  cl = dlist_container(CatCList, cache_elem, iter.cur);
1448 
1449  if (cl->dead)
1450  continue; /* ignore dead entries */
1451 
1452  if (cl->hash_value != lHashValue)
1453  continue; /* quickly skip entry if wrong hash val */
1454 
1455  /*
1456  * see if the cached list matches our key.
1457  */
1458  if (cl->nkeys != nkeys)
1459  continue;
1460  HeapKeyTest(&cl->tuple,
1461  cache->cc_tupdesc,
1462  nkeys,
1463  cur_skey,
1464  res);
1465  if (!res)
1466  continue;
1467 
1468  /*
1469  * We found a matching list. Move the list to the front of the
1470  * cache's list-of-lists, to speed subsequent searches. (We do not
1471  * move the members to the fronts of their hashbucket lists, however,
1472  * since there's no point in that unless they are searched for
1473  * individually.)
1474  */
1475  dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1476 
1477  /* Bump the list's refcount and return it */
1479  cl->refcount++;
1481 
1482  CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1483  cache->cc_relname);
1484 
1485 #ifdef CATCACHE_STATS
1486  cache->cc_lhits++;
1487 #endif
1488 
1489  return cl;
1490  }
1491 
1492  /*
1493  * List was not found in cache, so we have to build it by reading the
1494  * relation. For each matching tuple found in the relation, use an
1495  * existing cache entry if possible, else build a new one.
1496  *
1497  * We have to bump the member refcounts temporarily to ensure they won't
1498  * get dropped from the cache while loading other members. We use a PG_TRY
1499  * block to ensure we can undo those refcounts if we get an error before
1500  * we finish constructing the CatCList.
1501  */
1503 
1504  ctlist = NIL;
1505 
1506  PG_TRY();
1507  {
1508  Relation relation;
1509  SysScanDesc scandesc;
1510 
1511  relation = heap_open(cache->cc_reloid, AccessShareLock);
1512 
1513  scandesc = systable_beginscan(relation,
1514  cache->cc_indexoid,
1515  IndexScanOK(cache, cur_skey),
1516  NULL,
1517  nkeys,
1518  cur_skey);
1519 
1520  /* The list will be ordered iff we are doing an index scan */
1521  ordered = (scandesc->irel != NULL);
1522 
1523  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1524  {
1525  uint32 hashValue;
1526  Index hashIndex;
1527  bool found = false;
1528  dlist_head *bucket;
1529 
1530  /*
1531  * See if there's an entry for this tuple already.
1532  */
1533  ct = NULL;
1534  hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1535  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1536 
1537  bucket = &cache->cc_bucket[hashIndex];
1538  dlist_foreach(iter, bucket)
1539  {
1540  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1541 
1542  if (ct->dead || ct->negative)
1543  continue; /* ignore dead and negative entries */
1544 
1545  if (ct->hash_value != hashValue)
1546  continue; /* quickly skip entry if wrong hash val */
1547 
1548  if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1549  continue; /* not same tuple */
1550 
1551  /*
1552  * Found a match, but can't use it if it belongs to another
1553  * list already
1554  */
1555  if (ct->c_list)
1556  continue;
1557 
1558  found = true;
1559  break; /* A-OK */
1560  }
1561 
1562  if (!found)
1563  {
1564  /* We didn't find a usable entry, so make a new one */
1565  ct = CatalogCacheCreateEntry(cache, ntp,
1566  hashValue, hashIndex,
1567  false);
1568  }
1569 
1570  /* Careful here: add entry to ctlist, then bump its refcount */
1571  /* This way leaves state correct if lappend runs out of memory */
1572  ctlist = lappend(ctlist, ct);
1573  ct->refcount++;
1574  }
1575 
1576  systable_endscan(scandesc);
1577 
1578  heap_close(relation, AccessShareLock);
1579 
1580  /*
1581  * Now we can build the CatCList entry. First we need a dummy tuple
1582  * containing the key values...
1583  */
1584  ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1586  nmembers = list_length(ctlist);
1587  cl = (CatCList *)
1588  palloc(offsetof(CatCList, members) +nmembers * sizeof(CatCTup *));
1589  heap_copytuple_with_tuple(ntp, &cl->tuple);
1590  MemoryContextSwitchTo(oldcxt);
1591  heap_freetuple(ntp);
1592 
1593  /*
1594  * We are now past the last thing that could trigger an elog before we
1595  * have finished building the CatCList and remembering it in the
1596  * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1597  * we'd better do so before we start marking the members as belonging
1598  * to the list.
1599  */
1600 
1601  }
1602  PG_CATCH();
1603  {
1604  foreach(ctlist_item, ctlist)
1605  {
1606  ct = (CatCTup *) lfirst(ctlist_item);
1607  Assert(ct->c_list == NULL);
1608  Assert(ct->refcount > 0);
1609  ct->refcount--;
1610  if (
1611 #ifndef CATCACHE_FORCE_RELEASE
1612  ct->dead &&
1613 #endif
1614  ct->refcount == 0 &&
1615  (ct->c_list == NULL || ct->c_list->refcount == 0))
1616  CatCacheRemoveCTup(cache, ct);
1617  }
1618 
1619  PG_RE_THROW();
1620  }
1621  PG_END_TRY();
1622 
1623  cl->cl_magic = CL_MAGIC;
1624  cl->my_cache = cache;
1625  cl->refcount = 0; /* for the moment */
1626  cl->dead = false;
1627  cl->ordered = ordered;
1628  cl->nkeys = nkeys;
1629  cl->hash_value = lHashValue;
1630  cl->n_members = nmembers;
1631 
1632  i = 0;
1633  foreach(ctlist_item, ctlist)
1634  {
1635  cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1636  Assert(ct->c_list == NULL);
1637  ct->c_list = cl;
1638  /* release the temporary refcount on the member */
1639  Assert(ct->refcount > 0);
1640  ct->refcount--;
1641  /* mark list dead if any members already dead */
1642  if (ct->dead)
1643  cl->dead = true;
1644  }
1645  Assert(i == nmembers);
1646 
1647  dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1648 
1649  /* Finally, bump the list's refcount and return it */
1650  cl->refcount++;
1652 
1653  CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1654  cache->cc_relname, nmembers);
1655 
1656  return cl;
1657 }
1658 
1659 /*
1660  * ReleaseCatCacheList
1661  *
1662  * Decrement the reference count of a catcache list.
1663  */
1664 void
1666 {
1667  /* Safety checks to ensure we were handed a cache entry */
1668  Assert(list->cl_magic == CL_MAGIC);
1669  Assert(list->refcount > 0);
1670  list->refcount--;
1672 
1673  if (
1674 #ifndef CATCACHE_FORCE_RELEASE
1675  list->dead &&
1676 #endif
1677  list->refcount == 0)
1678  CatCacheRemoveCList(list->my_cache, list);
1679 }
1680 
1681 
1682 /*
1683  * CatalogCacheCreateEntry
1684  * Create a new CatCTup entry, copying the given HeapTuple and other
1685  * supplied data into it. The new entry initially has refcount 0.
1686  */
1687 static CatCTup *
1689  uint32 hashValue, Index hashIndex, bool negative)
1690 {
1691  CatCTup *ct;
1692  HeapTuple dtp;
1693  MemoryContext oldcxt;
1694 
1695  /*
1696  * If there are any out-of-line toasted fields in the tuple, expand them
1697  * in-line. This saves cycles during later use of the catcache entry, and
1698  * also protects us against the possibility of the toast tuples being
1699  * freed before we attempt to fetch them, in case of something using a
1700  * slightly stale catcache entry.
1701  */
1702  if (HeapTupleHasExternal(ntp))
1703  dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1704  else
1705  dtp = ntp;
1706 
1707  /*
1708  * Allocate CatCTup header in cache memory, and copy the tuple there too.
1709  */
1711  ct = (CatCTup *) palloc(sizeof(CatCTup));
1712  heap_copytuple_with_tuple(dtp, &ct->tuple);
1713  MemoryContextSwitchTo(oldcxt);
1714 
1715  if (dtp != ntp)
1716  heap_freetuple(dtp);
1717 
1718  /*
1719  * Finish initializing the CatCTup header, and add it to the cache's
1720  * linked list and counts.
1721  */
1722  ct->ct_magic = CT_MAGIC;
1723  ct->my_cache = cache;
1724  ct->c_list = NULL;
1725  ct->refcount = 0; /* for the moment */
1726  ct->dead = false;
1727  ct->negative = negative;
1728  ct->hash_value = hashValue;
1729 
1730  dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1731 
1732  cache->cc_ntup++;
1733  CacheHdr->ch_ntup++;
1734 
1735  /*
1736  * If the hash table has become too full, enlarge the buckets array. Quite
1737  * arbitrarily, we enlarge when fill factor > 2.
1738  */
1739  if (cache->cc_ntup > cache->cc_nbuckets * 2)
1740  RehashCatCache(cache);
1741 
1742  return ct;
1743 }
1744 
1745 /*
1746  * build_dummy_tuple
1747  * Generate a palloc'd HeapTuple that contains the specified key
1748  * columns, and NULLs for other columns.
1749  *
1750  * This is used to store the keys for negative cache entries and CatCList
1751  * entries, which don't have real tuples associated with them.
1752  */
1753 static HeapTuple
1754 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1755 {
1756  HeapTuple ntp;
1757  TupleDesc tupDesc = cache->cc_tupdesc;
1758  Datum *values;
1759  bool *nulls;
1760  Oid tupOid = InvalidOid;
1761  NameData tempNames[4];
1762  int i;
1763 
1764  values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1765  nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
1766 
1767  memset(values, 0, tupDesc->natts * sizeof(Datum));
1768  memset(nulls, true, tupDesc->natts * sizeof(bool));
1769 
1770  for (i = 0; i < nkeys; i++)
1771  {
1772  int attindex = cache->cc_key[i];
1773  Datum keyval = skeys[i].sk_argument;
1774 
1775  if (attindex > 0)
1776  {
1777  /*
1778  * Here we must be careful in case the caller passed a C string
1779  * where a NAME is wanted: convert the given argument to a
1780  * correctly padded NAME. Otherwise the memcpy() done in
1781  * heap_form_tuple could fall off the end of memory.
1782  */
1783  if (cache->cc_isname[i])
1784  {
1785  Name newval = &tempNames[i];
1786 
1787  namestrcpy(newval, DatumGetCString(keyval));
1788  keyval = NameGetDatum(newval);
1789  }
1790  values[attindex - 1] = keyval;
1791  nulls[attindex - 1] = false;
1792  }
1793  else
1794  {
1795  Assert(attindex == ObjectIdAttributeNumber);
1796  tupOid = DatumGetObjectId(keyval);
1797  }
1798  }
1799 
1800  ntp = heap_form_tuple(tupDesc, values, nulls);
1801  if (tupOid != InvalidOid)
1802  HeapTupleSetOid(ntp, tupOid);
1803 
1804  pfree(values);
1805  pfree(nulls);
1806 
1807  return ntp;
1808 }
1809 
1810 
1811 /*
1812  * PrepareToInvalidateCacheTuple()
1813  *
1814  * This is part of a rather subtle chain of events, so pay attention:
1815  *
1816  * When a tuple is inserted or deleted, it cannot be flushed from the
1817  * catcaches immediately, for reasons explained at the top of cache/inval.c.
1818  * Instead we have to add entry(s) for the tuple to a list of pending tuple
1819  * invalidations that will be done at the end of the command or transaction.
1820  *
1821  * The lists of tuples that need to be flushed are kept by inval.c. This
1822  * routine is a helper routine for inval.c. Given a tuple belonging to
1823  * the specified relation, find all catcaches it could be in, compute the
1824  * correct hash value for each such catcache, and call the specified
1825  * function to record the cache id and hash value in inval.c's lists.
1826  * CatalogCacheIdInvalidate will be called later, if appropriate,
1827  * using the recorded information.
1828  *
1829  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1830  * For an update, we are called just once, with tuple being the old tuple
1831  * version and newtuple the new version. We should make two list entries
1832  * if the tuple's hash value changed, but only one if it didn't.
1833  *
1834  * Note that it is irrelevant whether the given tuple is actually loaded
1835  * into the catcache at the moment. Even if it's not there now, it might
1836  * be by the end of the command, or there might be a matching negative entry
1837  * to flush --- or other backends' caches might have such entries --- so
1838  * we have to make list entries to flush it later.
1839  *
1840  * Also note that it's not an error if there are no catcaches for the
1841  * specified relation. inval.c doesn't know exactly which rels have
1842  * catcaches --- it will call this routine for any tuple that's in a
1843  * system relation.
1844  */
1845 void
1847  HeapTuple tuple,
1848  HeapTuple newtuple,
1849  void (*function) (int, uint32, Oid))
1850 {
1851  slist_iter iter;
1852  Oid reloid;
1853 
1854  CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1855 
1856  /*
1857  * sanity checks
1858  */
1859  Assert(RelationIsValid(relation));
1860  Assert(HeapTupleIsValid(tuple));
1861  Assert(PointerIsValid(function));
1862  Assert(CacheHdr != NULL);
1863 
1864  reloid = RelationGetRelid(relation);
1865 
1866  /* ----------------
1867  * for each cache
1868  * if the cache contains tuples from the specified relation
1869  * compute the tuple's hash value(s) in this cache,
1870  * and call the passed function to register the information.
1871  * ----------------
1872  */
1873 
1874  slist_foreach(iter, &CacheHdr->ch_caches)
1875  {
1876  CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
1877  uint32 hashvalue;
1878  Oid dbid;
1879 
1880  if (ccp->cc_reloid != reloid)
1881  continue;
1882 
1883  /* Just in case cache hasn't finished initialization yet... */
1884  if (ccp->cc_tupdesc == NULL)
1886 
1887  hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
1888  dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
1889 
1890  (*function) (ccp->id, hashvalue, dbid);
1891 
1892  if (newtuple)
1893  {
1894  uint32 newhashvalue;
1895 
1896  newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
1897 
1898  if (newhashvalue != hashvalue)
1899  (*function) (ccp->id, newhashvalue, dbid);
1900  }
1901  }
1902 }
1903 
1904 
1905 /*
1906  * Subroutines for warning about reference leaks. These are exported so
1907  * that resowner.c can call them.
1908  */
1909 void
1911 {
1912  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1913  offsetof(CatCTup, tuple));
1914 
1915  /* Safety check to ensure we were handed a cache entry */
1916  Assert(ct->ct_magic == CT_MAGIC);
1917 
1918  elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1919  ct->my_cache->cc_relname, ct->my_cache->id,
1920  ItemPointerGetBlockNumber(&(tuple->t_self)),
1922  ct->refcount);
1923 }
1924 
1925 void
1927 {
1928  elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1929  list->my_cache->cc_relname, list->my_cache->id,
1930  list, list->refcount);
1931 }
#define DatumGetUInt32(X)
Definition: postgres.h:494
#define NIL
Definition: pg_list.h:69
Oid sk_subtype
Definition: skey.h:69
Relation irel
Definition: relscan.h:148
#define REGCLASSOID
Definition: pg_type.h:565
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1084
void PrintCatCacheListLeakWarning(CatCList *list)
Definition: catcache.c:1926
#define NameGetDatum(X)
Definition: postgres.h:603
int n_members
Definition: catcache.h:154
void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:949
uint32 hash_value
Definition: catcache.h:152
Datum hashoid(PG_FUNCTION_ARGS)
Definition: hashfunc.c:82
#define DEBUG1
Definition: elog.h:25
#define NAMEOID
Definition: pg_type.h:300
dlist_node * cur
Definition: ilist.h:180
static void ResetCatalogCache(CatCache *cache)
Definition: catcache.c:598
uint32 hash_value
Definition: catcache.h:115
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:493
Datum hashname(PG_FUNCTION_ARGS)
Definition: hashfunc.c:144
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:872
Definition: syscache.h:36
CatCache * my_cache
Definition: catcache.h:82
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
#define CACHE3_elog(a, b, c, d)
Definition: catcache.c:66
static CatCacheHeader * CacheHdr
Definition: catcache.c:73
#define RelationGetDescr(relation)
Definition: rel.h:425
#define HASH_INDEX(h, sz)
Definition: catcache.c:49
#define REGROLEOID
Definition: pg_type.h:573
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:182
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, uint32 hashValue, Index hashIndex, bool negative)
Definition: catcache.c:1688
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:292
void CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
Definition: catcache.c:443
#define OIDOID
Definition: pg_type.h:328
#define TEXTOID
Definition: pg_type.h:324
Datum(* PGFunction)(FunctionCallInfo fcinfo)
Definition: fmgr.h:40
slist_node * cur
Definition: ilist.h:226
#define CT_MAGIC
Definition: catcache.h:81
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:138
#define DatumGetObjectId(X)
Definition: postgres.h:508
#define RelationGetForm(relation)
Definition: rel.h:407
char * pstrdup(const char *in)
Definition: mcxt.c:1165
regproc RegProcedure
Definition: c.h:392
Form_pg_attribute * attrs
Definition: tupdesc.h:74
void AtEOXact_CatCache(bool isCommit)
Definition: catcache.c:548
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
dlist_head * cc_bucket
Definition: catcache.h:55
#define AccessShareLock
Definition: lockdefs.h:36
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:574
#define INT4OID
Definition: pg_type.h:316
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:1846
slist_node cc_next
Definition: catcache.h:40
void ResourceOwnerEnlargeCatCacheListRefs(ResourceOwner owner)
Definition: resowner.c:973
int id
Definition: catcache.h:39
void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:958
int cl_magic
Definition: catcache.h:122
bool dead
Definition: catcache.h:149
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:692
bool criticalSharedRelcachesBuilt
Definition: relcache.c:134
#define heap_close(r, l)
Definition: heapam.h:97
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:555
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
unsigned int Oid
Definition: postgres_ext.h:31
#define REGTYPEOID
Definition: pg_type.h:569
dlist_head cc_lists
Definition: catcache.h:54
int namestrcpy(Name name, const char *str)
Definition: name.c:217
#define REGOPEROID
Definition: pg_type.h:557
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:877
bool cc_isname[CATCACHE_MAXKEYS]
Definition: catcache.h:53
bool cc_relisshared
Definition: catcache.h:44
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:322
int natts
Definition: tupdesc.h:73
short nkeys
Definition: catcache.h:151
struct catclist * c_list
Definition: catcache.h:98
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:1665
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:155
Datum hashchar(PG_FUNCTION_ARGS)
Definition: hashfunc.c:44
Oid cc_indexoid
Definition: catcache.h:43
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
Form_pg_index rd_index
Definition: rel.h:155
#define OIDVECTOROID
Definition: pg_type.h:344
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:410
void pfree(void *pointer)
Definition: mcxt.c:992
static void slist_init(slist_head *head)
Definition: ilist.h:554
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
Definition: catcache.c:174
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:814
#define ObjectIdGetDatum(X)
Definition: postgres.h:515
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
#define DatumGetCString(X)
Definition: postgres.h:574
Oid cc_reloid
Definition: catcache.h:42
int cc_key[CATCACHE_MAXKEYS]
Definition: catcache.h:49
int cc_nkeys
Definition: catcache.h:48
#define RelationIsValid(relation)
Definition: rel.h:386
#define FATAL
Definition: elog.h:52
StrategyNumber sk_strategy
Definition: skey.h:68
ItemPointerData t_self
Definition: htup.h:65
TupleDesc cc_tupdesc
Definition: catcache.h:45
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:152
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:145
int cc_ntup
Definition: catcache.h:46
Definition: c.h:489
#define DEBUG2
Definition: elog.h:24
#define INT2OID
Definition: pg_type.h:308
FmgrInfo sk_func
Definition: skey.h:71
#define REGDICTIONARYOID
Definition: pg_type.h:615
#define RelationGetRelationName(relation)
Definition: rel.h:433
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:184
unsigned int uint32
Definition: c.h:265
Datum hashint4(PG_FUNCTION_ARGS)
Definition: hashfunc.c:56
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:873
struct tupleDesc * TupleDesc
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:167
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:169
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:719
#define CACHE4_elog(a, b, c, d, e)
Definition: catcache.c:67
MemoryContext TopMemoryContext
Definition: mcxt.c:43
List * lappend(List *list, void *datum)
Definition: list.c:128
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:391
static void GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
Definition: catcache.c:105
#define WARNING
Definition: elog.h:40
dlist_node cache_elem
Definition: catcache.h:147
#define CL_MAGIC
Definition: catcache.h:123
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:354
#define slist_container(type, membername, ptr)
Definition: ilist.h:674
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:440
void * palloc0(Size size)
Definition: mcxt.c:920
static bool IndexScanOK(CatCache *cache, ScanKey cur_skey)
Definition: catcache.c:1039
uintptr_t Datum
Definition: postgres.h:374
void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
Definition: heaptuple.c:634
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1424
Oid MyDatabaseId
Definition: globals.c:76
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1287
CatCache * my_cache
Definition: catcache.h:124
void PrintCatCacheLeakWarning(HeapTuple tuple)
Definition: catcache.c:1910
dlist_node * cur
Definition: ilist.h:161
unsigned int Index
Definition: c.h:362
#define CHAROID
Definition: pg_type.h:296
#define CACHE1_elog(a, b)
Definition: catcache.c:64
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:784
#define InvalidOid
Definition: postgres_ext.h:36
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1317
slist_head ch_caches
Definition: catcache.h:161
void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:993
#define PG_CATCH()
Definition: elog.h:293
#define InitCatCache_DEBUG2
Definition: catcache.c:715
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:671
#define lfirst(lc)
Definition: pg_list.h:106
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:675
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
Definition: catcache.c:1754
int cc_nbuckets
Definition: catcache.h:47
void CreateCacheMemoryContext(void)
Definition: catcache.c:525
HeapTupleData tuple
Definition: catcache.h:153
static int list_length(const List *l)
Definition: pg_list.h:89
#define newval
int refcount
Definition: catcache.h:112
bool IsTransactionState(void)
Definition: xact.c:349
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
const char * cc_relname
Definition: catcache.h:41
#define PG_RE_THROW()
Definition: elog.h:314
#define BOOLOID
Definition: pg_type.h:288
void ResetCatalogCaches(void)
Definition: catcache.c:645
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
Definition: catcache.c:227
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:176
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:385
dlist_node cache_elem
Definition: catcache.h:89
#define REGCONFIGOID
Definition: pg_type.h:612
static Datum values[MAXATTR]
Definition: bootstrap.c:162
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:365
Datum hashint2(PG_FUNCTION_ARGS)
Definition: hashfunc.c:50
bool negative
Definition: catcache.h:114
#define slist_foreach(iter, lhead)
Definition: ilist.h:700
bool ordered
Definition: catcache.h:150
tuple list
Definition: sort-test.py:11
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1103
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void * palloc(Size size)
Definition: mcxt.c:891
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1389
Oid sk_collation
Definition: skey.h:70
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:136
int i
void * arg
HeapTupleData tuple
Definition: catcache.h:116
#define CACHE2_elog(a, b, c)
Definition: catcache.c:65
#define elog
Definition: elog.h:219
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:988
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
bool criticalRelcachesBuilt
Definition: relcache.c:128
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
#define HeapKeyTest(tuple, tupdesc, nkeys, keys, result)
Definition: valid.h:22
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1349
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:105
#define REGPROCEDUREOID
Definition: pg_type.h:553
int refcount
Definition: catcache.h:148
#define PG_TRY()
Definition: elog.h:284
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:51
Definition: pg_list.h:45
#define PointerIsValid(pointer)
Definition: c.h:522
#define REGNAMESPACEOID
Definition: pg_type.h:577
PGFunction cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
Datum sk_argument
Definition: skey.h:72
#define RelationGetRelid(relation)
Definition: rel.h:413
void ResourceOwnerEnlargeCatCacheRefs(ResourceOwner owner)
Definition: resowner.c:938
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:151
#define PG_END_TRY()
Definition: elog.h:300
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define offsetof(type, field)
Definition: c.h:551
AttrNumber sk_attno
Definition: skey.h:67
#define REGOPERATOROID
Definition: pg_type.h:561
#define REGPROCOID
Definition: pg_type.h:320
int ct_magic
Definition: catcache.h:80
bool dead
Definition: catcache.h:113
MemoryContext CacheMemoryContext
Definition: mcxt.c:46
void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:984