PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
catcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  * System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/relscan.h"
21 #include "access/sysattr.h"
22 #include "access/tuptoaster.h"
23 #include "access/valid.h"
24 #include "access/xact.h"
25 #include "catalog/pg_operator.h"
26 #include "catalog/pg_type.h"
27 #include "miscadmin.h"
28 #ifdef CATCACHE_STATS
29 #include "storage/ipc.h" /* for on_proc_exit */
30 #endif
31 #include "storage/lmgr.h"
32 #include "utils/builtins.h"
33 #include "utils/fmgroids.h"
34 #include "utils/inval.h"
35 #include "utils/memutils.h"
36 #include "utils/rel.h"
37 #include "utils/resowner_private.h"
38 #include "utils/syscache.h"
39 #include "utils/tqual.h"
40 
41 
42  /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
43 
44 /*
45  * Given a hash value and the size of the hash table, find the bucket
46  * in which the hash value belongs. Since the hash table must contain
47  * a power-of-2 number of elements, this is a simple bitmask.
48  */
49 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
50 
51 
52 /*
53  * variables, macros and other stuff
54  */
55 
56 #ifdef CACHEDEBUG
57 #define CACHE1_elog(a,b) elog(a,b)
58 #define CACHE2_elog(a,b,c) elog(a,b,c)
59 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
60 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
61 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
62 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
63 #else
64 #define CACHE1_elog(a,b)
65 #define CACHE2_elog(a,b,c)
66 #define CACHE3_elog(a,b,c,d)
67 #define CACHE4_elog(a,b,c,d,e)
68 #define CACHE5_elog(a,b,c,d,e,f)
69 #define CACHE6_elog(a,b,c,d,e,f,g)
70 #endif
71 
72 /* Cache management header --- pointer is NULL until created */
73 static CatCacheHeader *CacheHdr = NULL;
74 
75 
76 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
77  ScanKey cur_skey);
79  HeapTuple tuple);
80 
81 #ifdef CATCACHE_STATS
82 static void CatCachePrintStats(int code, Datum arg);
83 #endif
84 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
85 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
86 static void CatalogCacheInitializeCache(CatCache *cache);
88  uint32 hashValue, Index hashIndex,
89  bool negative);
90 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
91 
92 
93 /*
94  * internal support functions
95  */
96 
97 /*
98  * Look up the hash and equality functions for system types that are used
99  * as cache key fields.
100  *
101  * XXX this should be replaced by catalog lookups,
102  * but that seems to pose considerable risk of circularity...
103  */
104 static void
105 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
106 {
107  switch (keytype)
108  {
109  case BOOLOID:
110  *hashfunc = hashchar;
111 
112  *eqfunc = F_BOOLEQ;
113  break;
114  case CHAROID:
115  *hashfunc = hashchar;
116 
117  *eqfunc = F_CHAREQ;
118  break;
119  case NAMEOID:
120  *hashfunc = hashname;
121 
122  *eqfunc = F_NAMEEQ;
123  break;
124  case INT2OID:
125  *hashfunc = hashint2;
126 
127  *eqfunc = F_INT2EQ;
128  break;
129  case INT4OID:
130  *hashfunc = hashint4;
131 
132  *eqfunc = F_INT4EQ;
133  break;
134  case TEXTOID:
135  *hashfunc = hashtext;
136 
137  *eqfunc = F_TEXTEQ;
138  break;
139  case OIDOID:
140  case REGPROCOID:
141  case REGPROCEDUREOID:
142  case REGOPEROID:
143  case REGOPERATOROID:
144  case REGCLASSOID:
145  case REGTYPEOID:
146  case REGCONFIGOID:
147  case REGDICTIONARYOID:
148  case REGROLEOID:
149  case REGNAMESPACEOID:
150  *hashfunc = hashoid;
151 
152  *eqfunc = F_OIDEQ;
153  break;
154  case OIDVECTOROID:
155  *hashfunc = hashoidvector;
156 
157  *eqfunc = F_OIDVECTOREQ;
158  break;
159  default:
160  elog(FATAL, "type %u not supported as catcache key", keytype);
161  *hashfunc = NULL; /* keep compiler quiet */
162 
163  *eqfunc = InvalidOid;
164  break;
165  }
166 }
167 
168 /*
169  * CatalogCacheComputeHashValue
170  *
171  * Compute the hash value associated with a given set of lookup keys
172  */
173 static uint32
174 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
175 {
176  uint32 hashValue = 0;
177  uint32 oneHash;
178 
179  CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
180  cache->cc_relname,
181  nkeys,
182  cache);
183 
184  switch (nkeys)
185  {
186  case 4:
187  oneHash =
189  cur_skey[3].sk_argument));
190  hashValue ^= oneHash << 24;
191  hashValue ^= oneHash >> 8;
192  /* FALLTHROUGH */
193  case 3:
194  oneHash =
196  cur_skey[2].sk_argument));
197  hashValue ^= oneHash << 16;
198  hashValue ^= oneHash >> 16;
199  /* FALLTHROUGH */
200  case 2:
201  oneHash =
203  cur_skey[1].sk_argument));
204  hashValue ^= oneHash << 8;
205  hashValue ^= oneHash >> 24;
206  /* FALLTHROUGH */
207  case 1:
208  oneHash =
210  cur_skey[0].sk_argument));
211  hashValue ^= oneHash;
212  break;
213  default:
214  elog(FATAL, "wrong number of hash keys: %d", nkeys);
215  break;
216  }
217 
218  return hashValue;
219 }
220 
221 /*
222  * CatalogCacheComputeTupleHashValue
223  *
224  * Compute the hash value associated with a given tuple to be cached
225  */
226 static uint32
228 {
229  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
230  bool isNull = false;
231 
232  /* Copy pre-initialized overhead data for scankey */
233  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
234 
235  /* Now extract key fields from tuple, insert into scankey */
236  switch (cache->cc_nkeys)
237  {
238  case 4:
239  cur_skey[3].sk_argument =
240  (cache->cc_key[3] == ObjectIdAttributeNumber)
242  : fastgetattr(tuple,
243  cache->cc_key[3],
244  cache->cc_tupdesc,
245  &isNull);
246  Assert(!isNull);
247  /* FALLTHROUGH */
248  case 3:
249  cur_skey[2].sk_argument =
250  (cache->cc_key[2] == ObjectIdAttributeNumber)
252  : fastgetattr(tuple,
253  cache->cc_key[2],
254  cache->cc_tupdesc,
255  &isNull);
256  Assert(!isNull);
257  /* FALLTHROUGH */
258  case 2:
259  cur_skey[1].sk_argument =
260  (cache->cc_key[1] == ObjectIdAttributeNumber)
262  : fastgetattr(tuple,
263  cache->cc_key[1],
264  cache->cc_tupdesc,
265  &isNull);
266  Assert(!isNull);
267  /* FALLTHROUGH */
268  case 1:
269  cur_skey[0].sk_argument =
270  (cache->cc_key[0] == ObjectIdAttributeNumber)
272  : fastgetattr(tuple,
273  cache->cc_key[0],
274  cache->cc_tupdesc,
275  &isNull);
276  Assert(!isNull);
277  break;
278  default:
279  elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
280  break;
281  }
282 
283  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
284 }
285 
286 
287 #ifdef CATCACHE_STATS
288 
289 static void
290 CatCachePrintStats(int code, Datum arg)
291 {
292  slist_iter iter;
293  long cc_searches = 0;
294  long cc_hits = 0;
295  long cc_neg_hits = 0;
296  long cc_newloads = 0;
297  long cc_invals = 0;
298  long cc_lsearches = 0;
299  long cc_lhits = 0;
300 
301  slist_foreach(iter, &CacheHdr->ch_caches)
302  {
303  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
304 
305  if (cache->cc_ntup == 0 && cache->cc_searches == 0)
306  continue; /* don't print unused caches */
307  elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
308  cache->cc_relname,
309  cache->cc_indexoid,
310  cache->cc_ntup,
311  cache->cc_searches,
312  cache->cc_hits,
313  cache->cc_neg_hits,
314  cache->cc_hits + cache->cc_neg_hits,
315  cache->cc_newloads,
316  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
317  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
318  cache->cc_invals,
319  cache->cc_lsearches,
320  cache->cc_lhits);
321  cc_searches += cache->cc_searches;
322  cc_hits += cache->cc_hits;
323  cc_neg_hits += cache->cc_neg_hits;
324  cc_newloads += cache->cc_newloads;
325  cc_invals += cache->cc_invals;
326  cc_lsearches += cache->cc_lsearches;
327  cc_lhits += cache->cc_lhits;
328  }
329  elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
330  CacheHdr->ch_ntup,
331  cc_searches,
332  cc_hits,
333  cc_neg_hits,
334  cc_hits + cc_neg_hits,
335  cc_newloads,
336  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
337  cc_searches - cc_hits - cc_neg_hits,
338  cc_invals,
339  cc_lsearches,
340  cc_lhits);
341 }
342 #endif /* CATCACHE_STATS */
343 
344 
345 /*
346  * CatCacheRemoveCTup
347  *
348  * Unlink and delete the given cache entry
349  *
350  * NB: if it is a member of a CatCList, the CatCList is deleted too.
351  * Both the cache entry and the list had better have zero refcount.
352  */
353 static void
355 {
356  Assert(ct->refcount == 0);
357  Assert(ct->my_cache == cache);
358 
359  if (ct->c_list)
360  {
361  /*
362  * The cleanest way to handle this is to call CatCacheRemoveCList,
363  * which will recurse back to me, and the recursive call will do the
364  * work. Set the "dead" flag to make sure it does recurse.
365  */
366  ct->dead = true;
367  CatCacheRemoveCList(cache, ct->c_list);
368  return; /* nothing left to do */
369  }
370 
371  /* delink from linked list */
372  dlist_delete(&ct->cache_elem);
373 
374  /* free associated tuple data */
375  if (ct->tuple.t_data != NULL)
376  pfree(ct->tuple.t_data);
377  pfree(ct);
378 
379  --cache->cc_ntup;
380  --CacheHdr->ch_ntup;
381 }
382 
383 /*
384  * CatCacheRemoveCList
385  *
386  * Unlink and delete the given cache list entry
387  *
388  * NB: any dead member entries that become unreferenced are deleted too.
389  */
390 static void
392 {
393  int i;
394 
395  Assert(cl->refcount == 0);
396  Assert(cl->my_cache == cache);
397 
398  /* delink from member tuples */
399  for (i = cl->n_members; --i >= 0;)
400  {
401  CatCTup *ct = cl->members[i];
402 
403  Assert(ct->c_list == cl);
404  ct->c_list = NULL;
405  /* if the member is dead and now has no references, remove it */
406  if (
407 #ifndef CATCACHE_FORCE_RELEASE
408  ct->dead &&
409 #endif
410  ct->refcount == 0)
411  CatCacheRemoveCTup(cache, ct);
412  }
413 
414  /* delink from linked list */
415  dlist_delete(&cl->cache_elem);
416 
417  /* free associated tuple data */
418  if (cl->tuple.t_data != NULL)
419  pfree(cl->tuple.t_data);
420  pfree(cl);
421 }
422 
423 
424 /*
425  * CatCacheInvalidate
426  *
427  * Invalidate entries in the specified cache, given a hash value.
428  *
429  * We delete cache entries that match the hash value, whether positive
430  * or negative. We don't care whether the invalidation is the result
431  * of a tuple insertion or a deletion.
432  *
433  * We used to try to match positive cache entries by TID, but that is
434  * unsafe after a VACUUM FULL on a system catalog: an inval event could
435  * be queued before VACUUM FULL, and then processed afterwards, when the
436  * target tuple that has to be invalidated has a different TID than it
437  * did when the event was created. So now we just compare hash values and
438  * accept the small risk of unnecessary invalidations due to false matches.
439  *
440  * This routine is only quasi-public: it should only be used by inval.c.
441  */
442 void
444 {
445  Index hashIndex;
446  dlist_mutable_iter iter;
447 
448  CACHE1_elog(DEBUG2, "CatCacheInvalidate: called");
449 
450  /*
451  * We don't bother to check whether the cache has finished initialization
452  * yet; if not, there will be no entries in it so no problem.
453  */
454 
455  /*
456  * Invalidate *all* CatCLists in this cache; it's too hard to tell which
457  * searches might still be correct, so just zap 'em all.
458  */
459  dlist_foreach_modify(iter, &cache->cc_lists)
460  {
461  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
462 
463  if (cl->refcount > 0)
464  cl->dead = true;
465  else
466  CatCacheRemoveCList(cache, cl);
467  }
468 
469  /*
470  * inspect the proper hash bucket for tuple matches
471  */
472  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
473  dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
474  {
475  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
476 
477  if (hashValue == ct->hash_value)
478  {
479  if (ct->refcount > 0 ||
480  (ct->c_list && ct->c_list->refcount > 0))
481  {
482  ct->dead = true;
483  /* list, if any, was marked dead above */
484  Assert(ct->c_list == NULL || ct->c_list->dead);
485  }
486  else
487  CatCacheRemoveCTup(cache, ct);
488  CACHE1_elog(DEBUG2, "CatCacheInvalidate: invalidated");
489 #ifdef CATCACHE_STATS
490  cache->cc_invals++;
491 #endif
492  /* could be multiple matches, so keep looking! */
493  }
494  }
495 }
496 
497 /* ----------------------------------------------------------------
498  * public functions
499  * ----------------------------------------------------------------
500  */
501 
502 
503 /*
504  * Standard routine for creating cache context if it doesn't exist yet
505  *
506  * There are a lot of places (probably far more than necessary) that check
507  * whether CacheMemoryContext exists yet and want to create it if not.
508  * We centralize knowledge of exactly how to create it here.
509  */
510 void
512 {
513  /*
514  * Purely for paranoia, check that context doesn't exist; caller probably
515  * did so already.
516  */
517  if (!CacheMemoryContext)
519  "CacheMemoryContext",
521 }
522 
523 
524 /*
525  * ResetCatalogCache
526  *
527  * Reset one catalog cache to empty.
528  *
529  * This is not very efficient if the target cache is nearly empty.
530  * However, it shouldn't need to be efficient; we don't invoke it often.
531  */
532 static void
534 {
535  dlist_mutable_iter iter;
536  int i;
537 
538  /* Remove each list in this cache, or at least mark it dead */
539  dlist_foreach_modify(iter, &cache->cc_lists)
540  {
541  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
542 
543  if (cl->refcount > 0)
544  cl->dead = true;
545  else
546  CatCacheRemoveCList(cache, cl);
547  }
548 
549  /* Remove each tuple in this cache, or at least mark it dead */
550  for (i = 0; i < cache->cc_nbuckets; i++)
551  {
552  dlist_head *bucket = &cache->cc_bucket[i];
553 
554  dlist_foreach_modify(iter, bucket)
555  {
556  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
557 
558  if (ct->refcount > 0 ||
559  (ct->c_list && ct->c_list->refcount > 0))
560  {
561  ct->dead = true;
562  /* list, if any, was marked dead above */
563  Assert(ct->c_list == NULL || ct->c_list->dead);
564  }
565  else
566  CatCacheRemoveCTup(cache, ct);
567 #ifdef CATCACHE_STATS
568  cache->cc_invals++;
569 #endif
570  }
571  }
572 }
573 
574 /*
575  * ResetCatalogCaches
576  *
577  * Reset all caches when a shared cache inval event forces it
578  */
579 void
581 {
582  slist_iter iter;
583 
584  CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
585 
586  slist_foreach(iter, &CacheHdr->ch_caches)
587  {
588  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
589 
590  ResetCatalogCache(cache);
591  }
592 
593  CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
594 }
595 
596 /*
597  * CatalogCacheFlushCatalog
598  *
599  * Flush all catcache entries that came from the specified system catalog.
600  * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
601  * tuples very likely now have different TIDs than before. (At one point
602  * we also tried to force re-execution of CatalogCacheInitializeCache for
603  * the cache(s) on that catalog. This is a bad idea since it leads to all
604  * kinds of trouble if a cache flush occurs while loading cache entries.
605  * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
606  * rather than relying on the relcache to keep a tupdesc for us. Of course
607  * this assumes the tupdesc of a cachable system table will not change...)
608  */
609 void
611 {
612  slist_iter iter;
613 
614  CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
615 
616  slist_foreach(iter, &CacheHdr->ch_caches)
617  {
618  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
619 
620  /* Does this cache store tuples of the target catalog? */
621  if (cache->cc_reloid == catId)
622  {
623  /* Yes, so flush all its contents */
624  ResetCatalogCache(cache);
625 
626  /* Tell inval.c to call syscache callbacks for this cache */
627  CallSyscacheCallbacks(cache->id, 0);
628  }
629  }
630 
631  CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
632 }
633 
634 /*
635  * InitCatCache
636  *
637  * This allocates and initializes a cache for a system catalog relation.
638  * Actually, the cache is only partially initialized to avoid opening the
639  * relation. The relation will be opened and the rest of the cache
640  * structure initialized on the first access.
641  */
642 #ifdef CACHEDEBUG
643 #define InitCatCache_DEBUG2 \
644 do { \
645  elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
646  cp->cc_reloid, cp->cc_indexoid, cp->id, \
647  cp->cc_nkeys, cp->cc_nbuckets); \
648 } while(0)
649 #else
650 #define InitCatCache_DEBUG2
651 #endif
652 
653 CatCache *
655  Oid reloid,
656  Oid indexoid,
657  int nkeys,
658  const int *key,
659  int nbuckets)
660 {
661  CatCache *cp;
662  MemoryContext oldcxt;
663  int i;
664 
665  /*
666  * nbuckets is the initial number of hash buckets to use in this catcache.
667  * It will be enlarged later if it becomes too full.
668  *
669  * nbuckets must be a power of two. We check this via Assert rather than
670  * a full runtime check because the values will be coming from constant
671  * tables.
672  *
673  * If you're confused by the power-of-two check, see comments in
674  * bitmapset.c for an explanation.
675  */
676  Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
677 
678  /*
679  * first switch to the cache context so our allocations do not vanish at
680  * the end of a transaction
681  */
682  if (!CacheMemoryContext)
684 
686 
687  /*
688  * if first time through, initialize the cache group header
689  */
690  if (CacheHdr == NULL)
691  {
692  CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
693  slist_init(&CacheHdr->ch_caches);
694  CacheHdr->ch_ntup = 0;
695 #ifdef CATCACHE_STATS
696  /* set up to dump stats at backend exit */
697  on_proc_exit(CatCachePrintStats, 0);
698 #endif
699  }
700 
701  /*
702  * allocate a new cache structure
703  *
704  * Note: we rely on zeroing to initialize all the dlist headers correctly
705  */
706  cp = (CatCache *) palloc0(sizeof(CatCache));
707  cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
708 
709  /*
710  * initialize the cache's relation information for the relation
711  * corresponding to this cache, and initialize some of the new cache's
712  * other internal fields. But don't open the relation yet.
713  */
714  cp->id = id;
715  cp->cc_relname = "(not known yet)";
716  cp->cc_reloid = reloid;
717  cp->cc_indexoid = indexoid;
718  cp->cc_relisshared = false; /* temporary */
719  cp->cc_tupdesc = (TupleDesc) NULL;
720  cp->cc_ntup = 0;
721  cp->cc_nbuckets = nbuckets;
722  cp->cc_nkeys = nkeys;
723  for (i = 0; i < nkeys; ++i)
724  cp->cc_key[i] = key[i];
725 
726  /*
727  * new cache is initialized as far as we can go for now. print some
728  * debugging information, if appropriate.
729  */
731 
732  /*
733  * add completed cache to top of group header's list
734  */
735  slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
736 
737  /*
738  * back to the old context before we return...
739  */
740  MemoryContextSwitchTo(oldcxt);
741 
742  return cp;
743 }
744 
745 /*
746  * Enlarge a catcache, doubling the number of buckets.
747  */
748 static void
750 {
751  dlist_head *newbucket;
752  int newnbuckets;
753  int i;
754 
755  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
756  cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
757 
758  /* Allocate a new, larger, hash table. */
759  newnbuckets = cp->cc_nbuckets * 2;
760  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
761 
762  /* Move all entries from old hash table to new. */
763  for (i = 0; i < cp->cc_nbuckets; i++)
764  {
765  dlist_mutable_iter iter;
766 
767  dlist_foreach_modify(iter, &cp->cc_bucket[i])
768  {
769  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
770  int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
771 
772  dlist_delete(iter.cur);
773  dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
774  }
775  }
776 
777  /* Switch to the new array. */
778  pfree(cp->cc_bucket);
779  cp->cc_nbuckets = newnbuckets;
780  cp->cc_bucket = newbucket;
781 }
782 
783 /*
784  * CatalogCacheInitializeCache
785  *
786  * This function does final initialization of a catcache: obtain the tuple
787  * descriptor and set up the hash and equality function links. We assume
788  * that the relcache entry can be opened at this point!
789  */
790 #ifdef CACHEDEBUG
791 #define CatalogCacheInitializeCache_DEBUG1 \
792  elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
793  cache->cc_reloid)
794 
795 #define CatalogCacheInitializeCache_DEBUG2 \
796 do { \
797  if (cache->cc_key[i] > 0) { \
798  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
799  i+1, cache->cc_nkeys, cache->cc_key[i], \
800  TupleDescAttr(tupdesc, cache->cc_key[i] - 1)->atttypid); \
801  } else { \
802  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
803  i+1, cache->cc_nkeys, cache->cc_key[i]); \
804  } \
805 } while(0)
806 #else
807 #define CatalogCacheInitializeCache_DEBUG1
808 #define CatalogCacheInitializeCache_DEBUG2
809 #endif
810 
811 static void
813 {
814  Relation relation;
815  MemoryContext oldcxt;
816  TupleDesc tupdesc;
817  int i;
818 
820 
821  relation = heap_open(cache->cc_reloid, AccessShareLock);
822 
823  /*
824  * switch to the cache context so our allocations do not vanish at the end
825  * of a transaction
826  */
827  Assert(CacheMemoryContext != NULL);
828 
830 
831  /*
832  * copy the relcache's tuple descriptor to permanent cache storage
833  */
834  tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
835 
836  /*
837  * save the relation's name and relisshared flag, too (cc_relname is used
838  * only for debugging purposes)
839  */
840  cache->cc_relname = pstrdup(RelationGetRelationName(relation));
841  cache->cc_relisshared = RelationGetForm(relation)->relisshared;
842 
843  /*
844  * return to the caller's memory context and close the rel
845  */
846  MemoryContextSwitchTo(oldcxt);
847 
848  heap_close(relation, AccessShareLock);
849 
850  CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
851  cache->cc_relname, cache->cc_nkeys);
852 
853  /*
854  * initialize cache's key information
855  */
856  for (i = 0; i < cache->cc_nkeys; ++i)
857  {
858  Oid keytype;
859  RegProcedure eqfunc;
860 
862 
863  if (cache->cc_key[i] > 0)
864  {
865  Form_pg_attribute attr = TupleDescAttr(tupdesc,
866  cache->cc_key[i] - 1);
867 
868  keytype = attr->atttypid;
869  /* cache key columns should always be NOT NULL */
870  Assert(attr->attnotnull);
871  }
872  else
873  {
874  if (cache->cc_key[i] != ObjectIdAttributeNumber)
875  elog(FATAL, "only sys attr supported in caches is OID");
876  keytype = OIDOID;
877  }
878 
879  GetCCHashEqFuncs(keytype,
880  &cache->cc_hashfunc[i],
881  &eqfunc);
882 
883  cache->cc_isname[i] = (keytype == NAMEOID);
884 
885  /*
886  * Do equality-function lookup (we assume this won't need a catalog
887  * lookup for any supported type)
888  */
889  fmgr_info_cxt(eqfunc,
890  &cache->cc_skey[i].sk_func,
892 
893  /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
894  cache->cc_skey[i].sk_attno = cache->cc_key[i];
895 
896  /* Fill in sk_strategy as well --- always standard equality */
898  cache->cc_skey[i].sk_subtype = InvalidOid;
899  /* Currently, there are no catcaches on collation-aware data types */
900  cache->cc_skey[i].sk_collation = InvalidOid;
901 
902  CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
903  cache->cc_relname,
904  i,
905  cache);
906  }
907 
908  /*
909  * mark this cache fully initialized
910  */
911  cache->cc_tupdesc = tupdesc;
912 }
913 
914 /*
915  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
916  *
917  * One reason to call this routine is to ensure that the relcache has
918  * created entries for all the catalogs and indexes referenced by catcaches.
919  * Therefore, provide an option to open the index as well as fixing the
920  * cache itself. An exception is the indexes on pg_am, which we don't use
921  * (cf. IndexScanOK).
922  */
923 void
924 InitCatCachePhase2(CatCache *cache, bool touch_index)
925 {
926  if (cache->cc_tupdesc == NULL)
928 
929  if (touch_index &&
930  cache->id != AMOID &&
931  cache->id != AMNAME)
932  {
933  Relation idesc;
934 
935  /*
936  * We must lock the underlying catalog before opening the index to
937  * avoid deadlock, since index_open could possibly result in reading
938  * this same catalog, and if anyone else is exclusive-locking this
939  * catalog and index they'll be doing it in that order.
940  */
942  idesc = index_open(cache->cc_indexoid, AccessShareLock);
943 
944  /*
945  * While we've got the index open, let's check that it's unique (and
946  * not just deferrable-unique, thank you very much). This is just to
947  * catch thinkos in definitions of new catcaches, so we don't worry
948  * about the pg_am indexes not getting tested.
949  */
950  Assert(idesc->rd_index->indisunique &&
951  idesc->rd_index->indimmediate);
952 
955  }
956 }
957 
958 
959 /*
960  * IndexScanOK
961  *
962  * This function checks for tuples that will be fetched by
963  * IndexSupportInitialize() during relcache initialization for
964  * certain system indexes that support critical syscaches.
965  * We can't use an indexscan to fetch these, else we'll get into
966  * infinite recursion. A plain heap scan will work, however.
967  * Once we have completed relcache initialization (signaled by
968  * criticalRelcachesBuilt), we don't have to worry anymore.
969  *
970  * Similarly, during backend startup we have to be able to use the
971  * pg_authid and pg_auth_members syscaches for authentication even if
972  * we don't yet have relcache entries for those catalogs' indexes.
973  */
974 static bool
975 IndexScanOK(CatCache *cache, ScanKey cur_skey)
976 {
977  switch (cache->id)
978  {
979  case INDEXRELID:
980 
981  /*
982  * Rather than tracking exactly which indexes have to be loaded
983  * before we can use indexscans (which changes from time to time),
984  * just force all pg_index searches to be heap scans until we've
985  * built the critical relcaches.
986  */
988  return false;
989  break;
990 
991  case AMOID:
992  case AMNAME:
993 
994  /*
995  * Always do heap scans in pg_am, because it's so small there's
996  * not much point in an indexscan anyway. We *must* do this when
997  * initially building critical relcache entries, but we might as
998  * well just always do it.
999  */
1000  return false;
1001 
1002  case AUTHNAME:
1003  case AUTHOID:
1004  case AUTHMEMMEMROLE:
1005 
1006  /*
1007  * Protect authentication lookups occurring before relcache has
1008  * collected entries for shared indexes.
1009  */
1011  return false;
1012  break;
1013 
1014  default:
1015  break;
1016  }
1017 
1018  /* Normal case, allow index scan */
1019  return true;
1020 }
1021 
1022 /*
1023  * SearchCatCache
1024  *
1025  * This call searches a system cache for a tuple, opening the relation
1026  * if necessary (on the first access to a particular cache).
1027  *
1028  * The result is NULL if not found, or a pointer to a HeapTuple in
1029  * the cache. The caller must not modify the tuple, and must call
1030  * ReleaseCatCache() when done with it.
1031  *
1032  * The search key values should be expressed as Datums of the key columns'
1033  * datatype(s). (Pass zeroes for any unused parameters.) As a special
1034  * exception, the passed-in key for a NAME column can be just a C string;
1035  * the caller need not go to the trouble of converting it to a fully
1036  * null-padded NAME.
1037  */
1038 HeapTuple
1040  Datum v1,
1041  Datum v2,
1042  Datum v3,
1043  Datum v4)
1044 {
1045  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1046  uint32 hashValue;
1047  Index hashIndex;
1048  dlist_iter iter;
1049  dlist_head *bucket;
1050  CatCTup *ct;
1051  Relation relation;
1052  SysScanDesc scandesc;
1053  HeapTuple ntp;
1054 
1055  /* Make sure we're in an xact, even if this ends up being a cache hit */
1057 
1058  /*
1059  * one-time startup overhead for each cache
1060  */
1061  if (cache->cc_tupdesc == NULL)
1063 
1064 #ifdef CATCACHE_STATS
1065  cache->cc_searches++;
1066 #endif
1067 
1068  /*
1069  * initialize the search key information
1070  */
1071  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1072  cur_skey[0].sk_argument = v1;
1073  cur_skey[1].sk_argument = v2;
1074  cur_skey[2].sk_argument = v3;
1075  cur_skey[3].sk_argument = v4;
1076 
1077  /*
1078  * find the hash bucket in which to look for the tuple
1079  */
1080  hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1081  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1082 
1083  /*
1084  * scan the hash bucket until we find a match or exhaust our tuples
1085  *
1086  * Note: it's okay to use dlist_foreach here, even though we modify the
1087  * dlist within the loop, because we don't continue the loop afterwards.
1088  */
1089  bucket = &cache->cc_bucket[hashIndex];
1090  dlist_foreach(iter, bucket)
1091  {
1092  bool res;
1093 
1094  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1095 
1096  if (ct->dead)
1097  continue; /* ignore dead entries */
1098 
1099  if (ct->hash_value != hashValue)
1100  continue; /* quickly skip entry if wrong hash val */
1101 
1102  /*
1103  * see if the cached tuple matches our key.
1104  */
1105  HeapKeyTest(&ct->tuple,
1106  cache->cc_tupdesc,
1107  cache->cc_nkeys,
1108  cur_skey,
1109  res);
1110  if (!res)
1111  continue;
1112 
1113  /*
1114  * We found a match in the cache. Move it to the front of the list
1115  * for its hashbucket, in order to speed subsequent searches. (The
1116  * most frequently accessed elements in any hashbucket will tend to be
1117  * near the front of the hashbucket's list.)
1118  */
1119  dlist_move_head(bucket, &ct->cache_elem);
1120 
1121  /*
1122  * If it's a positive entry, bump its refcount and return it. If it's
1123  * negative, we can report failure to the caller.
1124  */
1125  if (!ct->negative)
1126  {
1128  ct->refcount++;
1130 
1131  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1132  cache->cc_relname, hashIndex);
1133 
1134 #ifdef CATCACHE_STATS
1135  cache->cc_hits++;
1136 #endif
1137 
1138  return &ct->tuple;
1139  }
1140  else
1141  {
1142  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1143  cache->cc_relname, hashIndex);
1144 
1145 #ifdef CATCACHE_STATS
1146  cache->cc_neg_hits++;
1147 #endif
1148 
1149  return NULL;
1150  }
1151  }
1152 
1153  /*
1154  * Tuple was not found in cache, so we have to try to retrieve it directly
1155  * from the relation. If found, we will add it to the cache; if not
1156  * found, we will add a negative cache entry instead.
1157  *
1158  * NOTE: it is possible for recursive cache lookups to occur while reading
1159  * the relation --- for example, due to shared-cache-inval messages being
1160  * processed during heap_open(). This is OK. It's even possible for one
1161  * of those lookups to find and enter the very same tuple we are trying to
1162  * fetch here. If that happens, we will enter a second copy of the tuple
1163  * into the cache. The first copy will never be referenced again, and
1164  * will eventually age out of the cache, so there's no functional problem.
1165  * This case is rare enough that it's not worth expending extra cycles to
1166  * detect.
1167  */
1168  relation = heap_open(cache->cc_reloid, AccessShareLock);
1169 
1170  scandesc = systable_beginscan(relation,
1171  cache->cc_indexoid,
1172  IndexScanOK(cache, cur_skey),
1173  NULL,
1174  cache->cc_nkeys,
1175  cur_skey);
1176 
1177  ct = NULL;
1178 
1179  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1180  {
1181  ct = CatalogCacheCreateEntry(cache, ntp,
1182  hashValue, hashIndex,
1183  false);
1184  /* immediately set the refcount to 1 */
1186  ct->refcount++;
1188  break; /* assume only one match */
1189  }
1190 
1191  systable_endscan(scandesc);
1192 
1193  heap_close(relation, AccessShareLock);
1194 
1195  /*
1196  * If tuple was not found, we need to build a negative cache entry
1197  * containing a fake tuple. The fake tuple has the correct key columns,
1198  * but nulls everywhere else.
1199  *
1200  * In bootstrap mode, we don't build negative entries, because the cache
1201  * invalidation mechanism isn't alive and can't clear them if the tuple
1202  * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1203  * cache inval for that.)
1204  */
1205  if (ct == NULL)
1206  {
1208  return NULL;
1209 
1210  ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1211  ct = CatalogCacheCreateEntry(cache, ntp,
1212  hashValue, hashIndex,
1213  true);
1214  heap_freetuple(ntp);
1215 
1216  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1217  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1218  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1219  cache->cc_relname, hashIndex);
1220 
1221  /*
1222  * We are not returning the negative entry to the caller, so leave its
1223  * refcount zero.
1224  */
1225 
1226  return NULL;
1227  }
1228 
1229  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1230  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1231  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1232  cache->cc_relname, hashIndex);
1233 
1234 #ifdef CATCACHE_STATS
1235  cache->cc_newloads++;
1236 #endif
1237 
1238  return &ct->tuple;
1239 }
1240 
1241 /*
1242  * ReleaseCatCache
1243  *
1244  * Decrement the reference count of a catcache entry (releasing the
1245  * hold grabbed by a successful SearchCatCache).
1246  *
1247  * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1248  * will be freed as soon as their refcount goes to zero. In combination
1249  * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1250  * to catch references to already-released catcache entries.
1251  */
1252 void
1254 {
1255  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1256  offsetof(CatCTup, tuple));
1257 
1258  /* Safety checks to ensure we were handed a cache entry */
1259  Assert(ct->ct_magic == CT_MAGIC);
1260  Assert(ct->refcount > 0);
1261 
1262  ct->refcount--;
1264 
1265  if (
1266 #ifndef CATCACHE_FORCE_RELEASE
1267  ct->dead &&
1268 #endif
1269  ct->refcount == 0 &&
1270  (ct->c_list == NULL || ct->c_list->refcount == 0))
1271  CatCacheRemoveCTup(ct->my_cache, ct);
1272 }
1273 
1274 
1275 /*
1276  * GetCatCacheHashValue
1277  *
1278  * Compute the hash value for a given set of search keys.
1279  *
1280  * The reason for exposing this as part of the API is that the hash value is
1281  * exposed in cache invalidation operations, so there are places outside the
1282  * catcache code that need to be able to compute the hash values.
1283  */
1284 uint32
1286  Datum v1,
1287  Datum v2,
1288  Datum v3,
1289  Datum v4)
1290 {
1291  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1292 
1293  /*
1294  * one-time startup overhead for each cache
1295  */
1296  if (cache->cc_tupdesc == NULL)
1298 
1299  /*
1300  * initialize the search key information
1301  */
1302  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1303  cur_skey[0].sk_argument = v1;
1304  cur_skey[1].sk_argument = v2;
1305  cur_skey[2].sk_argument = v3;
1306  cur_skey[3].sk_argument = v4;
1307 
1308  /*
1309  * calculate the hash value
1310  */
1311  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1312 }
1313 
1314 
1315 /*
1316  * SearchCatCacheList
1317  *
1318  * Generate a list of all tuples matching a partial key (that is,
1319  * a key specifying just the first K of the cache's N key columns).
1320  *
1321  * The caller must not modify the list object or the pointed-to tuples,
1322  * and must call ReleaseCatCacheList() when done with the list.
1323  */
1324 CatCList *
1326  int nkeys,
1327  Datum v1,
1328  Datum v2,
1329  Datum v3,
1330  Datum v4)
1331 {
1332  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1333  uint32 lHashValue;
1334  dlist_iter iter;
1335  CatCList *cl;
1336  CatCTup *ct;
1337  List *volatile ctlist;
1338  ListCell *ctlist_item;
1339  int nmembers;
1340  bool ordered;
1341  HeapTuple ntp;
1342  MemoryContext oldcxt;
1343  int i;
1344 
1345  /*
1346  * one-time startup overhead for each cache
1347  */
1348  if (cache->cc_tupdesc == NULL)
1350 
1351  Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1352 
1353 #ifdef CATCACHE_STATS
1354  cache->cc_lsearches++;
1355 #endif
1356 
1357  /*
1358  * initialize the search key information
1359  */
1360  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1361  cur_skey[0].sk_argument = v1;
1362  cur_skey[1].sk_argument = v2;
1363  cur_skey[2].sk_argument = v3;
1364  cur_skey[3].sk_argument = v4;
1365 
1366  /*
1367  * compute a hash value of the given keys for faster search. We don't
1368  * presently divide the CatCList items into buckets, but this still lets
1369  * us skip non-matching items quickly most of the time.
1370  */
1371  lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1372 
1373  /*
1374  * scan the items until we find a match or exhaust our list
1375  *
1376  * Note: it's okay to use dlist_foreach here, even though we modify the
1377  * dlist within the loop, because we don't continue the loop afterwards.
1378  */
1379  dlist_foreach(iter, &cache->cc_lists)
1380  {
1381  bool res;
1382 
1383  cl = dlist_container(CatCList, cache_elem, iter.cur);
1384 
1385  if (cl->dead)
1386  continue; /* ignore dead entries */
1387 
1388  if (cl->hash_value != lHashValue)
1389  continue; /* quickly skip entry if wrong hash val */
1390 
1391  /*
1392  * see if the cached list matches our key.
1393  */
1394  if (cl->nkeys != nkeys)
1395  continue;
1396  HeapKeyTest(&cl->tuple,
1397  cache->cc_tupdesc,
1398  nkeys,
1399  cur_skey,
1400  res);
1401  if (!res)
1402  continue;
1403 
1404  /*
1405  * We found a matching list. Move the list to the front of the
1406  * cache's list-of-lists, to speed subsequent searches. (We do not
1407  * move the members to the fronts of their hashbucket lists, however,
1408  * since there's no point in that unless they are searched for
1409  * individually.)
1410  */
1411  dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1412 
1413  /* Bump the list's refcount and return it */
1415  cl->refcount++;
1417 
1418  CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1419  cache->cc_relname);
1420 
1421 #ifdef CATCACHE_STATS
1422  cache->cc_lhits++;
1423 #endif
1424 
1425  return cl;
1426  }
1427 
1428  /*
1429  * List was not found in cache, so we have to build it by reading the
1430  * relation. For each matching tuple found in the relation, use an
1431  * existing cache entry if possible, else build a new one.
1432  *
1433  * We have to bump the member refcounts temporarily to ensure they won't
1434  * get dropped from the cache while loading other members. We use a PG_TRY
1435  * block to ensure we can undo those refcounts if we get an error before
1436  * we finish constructing the CatCList.
1437  */
1439 
1440  ctlist = NIL;
1441 
1442  PG_TRY();
1443  {
1444  Relation relation;
1445  SysScanDesc scandesc;
1446 
1447  relation = heap_open(cache->cc_reloid, AccessShareLock);
1448 
1449  scandesc = systable_beginscan(relation,
1450  cache->cc_indexoid,
1451  IndexScanOK(cache, cur_skey),
1452  NULL,
1453  nkeys,
1454  cur_skey);
1455 
1456  /* The list will be ordered iff we are doing an index scan */
1457  ordered = (scandesc->irel != NULL);
1458 
1459  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1460  {
1461  uint32 hashValue;
1462  Index hashIndex;
1463  bool found = false;
1464  dlist_head *bucket;
1465 
1466  /*
1467  * See if there's an entry for this tuple already.
1468  */
1469  ct = NULL;
1470  hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1471  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1472 
1473  bucket = &cache->cc_bucket[hashIndex];
1474  dlist_foreach(iter, bucket)
1475  {
1476  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1477 
1478  if (ct->dead || ct->negative)
1479  continue; /* ignore dead and negative entries */
1480 
1481  if (ct->hash_value != hashValue)
1482  continue; /* quickly skip entry if wrong hash val */
1483 
1484  if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1485  continue; /* not same tuple */
1486 
1487  /*
1488  * Found a match, but can't use it if it belongs to another
1489  * list already
1490  */
1491  if (ct->c_list)
1492  continue;
1493 
1494  found = true;
1495  break; /* A-OK */
1496  }
1497 
1498  if (!found)
1499  {
1500  /* We didn't find a usable entry, so make a new one */
1501  ct = CatalogCacheCreateEntry(cache, ntp,
1502  hashValue, hashIndex,
1503  false);
1504  }
1505 
1506  /* Careful here: add entry to ctlist, then bump its refcount */
1507  /* This way leaves state correct if lappend runs out of memory */
1508  ctlist = lappend(ctlist, ct);
1509  ct->refcount++;
1510  }
1511 
1512  systable_endscan(scandesc);
1513 
1514  heap_close(relation, AccessShareLock);
1515 
1516  /*
1517  * Now we can build the CatCList entry. First we need a dummy tuple
1518  * containing the key values...
1519  */
1520  ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1522  nmembers = list_length(ctlist);
1523  cl = (CatCList *)
1524  palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1525  heap_copytuple_with_tuple(ntp, &cl->tuple);
1526  MemoryContextSwitchTo(oldcxt);
1527  heap_freetuple(ntp);
1528 
1529  /*
1530  * We are now past the last thing that could trigger an elog before we
1531  * have finished building the CatCList and remembering it in the
1532  * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1533  * we'd better do so before we start marking the members as belonging
1534  * to the list.
1535  */
1536 
1537  }
1538  PG_CATCH();
1539  {
1540  foreach(ctlist_item, ctlist)
1541  {
1542  ct = (CatCTup *) lfirst(ctlist_item);
1543  Assert(ct->c_list == NULL);
1544  Assert(ct->refcount > 0);
1545  ct->refcount--;
1546  if (
1547 #ifndef CATCACHE_FORCE_RELEASE
1548  ct->dead &&
1549 #endif
1550  ct->refcount == 0 &&
1551  (ct->c_list == NULL || ct->c_list->refcount == 0))
1552  CatCacheRemoveCTup(cache, ct);
1553  }
1554 
1555  PG_RE_THROW();
1556  }
1557  PG_END_TRY();
1558 
1559  cl->cl_magic = CL_MAGIC;
1560  cl->my_cache = cache;
1561  cl->refcount = 0; /* for the moment */
1562  cl->dead = false;
1563  cl->ordered = ordered;
1564  cl->nkeys = nkeys;
1565  cl->hash_value = lHashValue;
1566  cl->n_members = nmembers;
1567 
1568  i = 0;
1569  foreach(ctlist_item, ctlist)
1570  {
1571  cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1572  Assert(ct->c_list == NULL);
1573  ct->c_list = cl;
1574  /* release the temporary refcount on the member */
1575  Assert(ct->refcount > 0);
1576  ct->refcount--;
1577  /* mark list dead if any members already dead */
1578  if (ct->dead)
1579  cl->dead = true;
1580  }
1581  Assert(i == nmembers);
1582 
1583  dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1584 
1585  /* Finally, bump the list's refcount and return it */
1586  cl->refcount++;
1588 
1589  CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1590  cache->cc_relname, nmembers);
1591 
1592  return cl;
1593 }
1594 
1595 /*
1596  * ReleaseCatCacheList
1597  *
1598  * Decrement the reference count of a catcache list.
1599  */
1600 void
1602 {
1603  /* Safety checks to ensure we were handed a cache entry */
1604  Assert(list->cl_magic == CL_MAGIC);
1605  Assert(list->refcount > 0);
1606  list->refcount--;
1608 
1609  if (
1610 #ifndef CATCACHE_FORCE_RELEASE
1611  list->dead &&
1612 #endif
1613  list->refcount == 0)
1614  CatCacheRemoveCList(list->my_cache, list);
1615 }
1616 
1617 
1618 /*
1619  * CatalogCacheCreateEntry
1620  * Create a new CatCTup entry, copying the given HeapTuple and other
1621  * supplied data into it. The new entry initially has refcount 0.
1622  */
1623 static CatCTup *
1625  uint32 hashValue, Index hashIndex, bool negative)
1626 {
1627  CatCTup *ct;
1628  HeapTuple dtp;
1629  MemoryContext oldcxt;
1630 
1631  /*
1632  * If there are any out-of-line toasted fields in the tuple, expand them
1633  * in-line. This saves cycles during later use of the catcache entry, and
1634  * also protects us against the possibility of the toast tuples being
1635  * freed before we attempt to fetch them, in case of something using a
1636  * slightly stale catcache entry.
1637  */
1638  if (HeapTupleHasExternal(ntp))
1639  dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1640  else
1641  dtp = ntp;
1642 
1643  /*
1644  * Allocate CatCTup header in cache memory, and copy the tuple there too.
1645  */
1647  ct = (CatCTup *) palloc(sizeof(CatCTup));
1648  heap_copytuple_with_tuple(dtp, &ct->tuple);
1649  MemoryContextSwitchTo(oldcxt);
1650 
1651  if (dtp != ntp)
1652  heap_freetuple(dtp);
1653 
1654  /*
1655  * Finish initializing the CatCTup header, and add it to the cache's
1656  * linked list and counts.
1657  */
1658  ct->ct_magic = CT_MAGIC;
1659  ct->my_cache = cache;
1660  ct->c_list = NULL;
1661  ct->refcount = 0; /* for the moment */
1662  ct->dead = false;
1663  ct->negative = negative;
1664  ct->hash_value = hashValue;
1665 
1666  dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1667 
1668  cache->cc_ntup++;
1669  CacheHdr->ch_ntup++;
1670 
1671  /*
1672  * If the hash table has become too full, enlarge the buckets array. Quite
1673  * arbitrarily, we enlarge when fill factor > 2.
1674  */
1675  if (cache->cc_ntup > cache->cc_nbuckets * 2)
1676  RehashCatCache(cache);
1677 
1678  return ct;
1679 }
1680 
1681 /*
1682  * build_dummy_tuple
1683  * Generate a palloc'd HeapTuple that contains the specified key
1684  * columns, and NULLs for other columns.
1685  *
1686  * This is used to store the keys for negative cache entries and CatCList
1687  * entries, which don't have real tuples associated with them.
1688  */
1689 static HeapTuple
1690 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1691 {
1692  HeapTuple ntp;
1693  TupleDesc tupDesc = cache->cc_tupdesc;
1694  Datum *values;
1695  bool *nulls;
1696  Oid tupOid = InvalidOid;
1697  NameData tempNames[4];
1698  int i;
1699 
1700  values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1701  nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
1702 
1703  memset(values, 0, tupDesc->natts * sizeof(Datum));
1704  memset(nulls, true, tupDesc->natts * sizeof(bool));
1705 
1706  for (i = 0; i < nkeys; i++)
1707  {
1708  int attindex = cache->cc_key[i];
1709  Datum keyval = skeys[i].sk_argument;
1710 
1711  if (attindex > 0)
1712  {
1713  /*
1714  * Here we must be careful in case the caller passed a C string
1715  * where a NAME is wanted: convert the given argument to a
1716  * correctly padded NAME. Otherwise the memcpy() done in
1717  * heap_form_tuple could fall off the end of memory.
1718  */
1719  if (cache->cc_isname[i])
1720  {
1721  Name newval = &tempNames[i];
1722 
1723  namestrcpy(newval, DatumGetCString(keyval));
1724  keyval = NameGetDatum(newval);
1725  }
1726  values[attindex - 1] = keyval;
1727  nulls[attindex - 1] = false;
1728  }
1729  else
1730  {
1731  Assert(attindex == ObjectIdAttributeNumber);
1732  tupOid = DatumGetObjectId(keyval);
1733  }
1734  }
1735 
1736  ntp = heap_form_tuple(tupDesc, values, nulls);
1737  if (tupOid != InvalidOid)
1738  HeapTupleSetOid(ntp, tupOid);
1739 
1740  pfree(values);
1741  pfree(nulls);
1742 
1743  return ntp;
1744 }
1745 
1746 
1747 /*
1748  * PrepareToInvalidateCacheTuple()
1749  *
1750  * This is part of a rather subtle chain of events, so pay attention:
1751  *
1752  * When a tuple is inserted or deleted, it cannot be flushed from the
1753  * catcaches immediately, for reasons explained at the top of cache/inval.c.
1754  * Instead we have to add entry(s) for the tuple to a list of pending tuple
1755  * invalidations that will be done at the end of the command or transaction.
1756  *
1757  * The lists of tuples that need to be flushed are kept by inval.c. This
1758  * routine is a helper routine for inval.c. Given a tuple belonging to
1759  * the specified relation, find all catcaches it could be in, compute the
1760  * correct hash value for each such catcache, and call the specified
1761  * function to record the cache id and hash value in inval.c's lists.
1762  * SysCacheInvalidate will be called later, if appropriate,
1763  * using the recorded information.
1764  *
1765  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1766  * For an update, we are called just once, with tuple being the old tuple
1767  * version and newtuple the new version. We should make two list entries
1768  * if the tuple's hash value changed, but only one if it didn't.
1769  *
1770  * Note that it is irrelevant whether the given tuple is actually loaded
1771  * into the catcache at the moment. Even if it's not there now, it might
1772  * be by the end of the command, or there might be a matching negative entry
1773  * to flush --- or other backends' caches might have such entries --- so
1774  * we have to make list entries to flush it later.
1775  *
1776  * Also note that it's not an error if there are no catcaches for the
1777  * specified relation. inval.c doesn't know exactly which rels have
1778  * catcaches --- it will call this routine for any tuple that's in a
1779  * system relation.
1780  */
1781 void
1783  HeapTuple tuple,
1784  HeapTuple newtuple,
1785  void (*function) (int, uint32, Oid))
1786 {
1787  slist_iter iter;
1788  Oid reloid;
1789 
1790  CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1791 
1792  /*
1793  * sanity checks
1794  */
1795  Assert(RelationIsValid(relation));
1796  Assert(HeapTupleIsValid(tuple));
1797  Assert(PointerIsValid(function));
1798  Assert(CacheHdr != NULL);
1799 
1800  reloid = RelationGetRelid(relation);
1801 
1802  /* ----------------
1803  * for each cache
1804  * if the cache contains tuples from the specified relation
1805  * compute the tuple's hash value(s) in this cache,
1806  * and call the passed function to register the information.
1807  * ----------------
1808  */
1809 
1810  slist_foreach(iter, &CacheHdr->ch_caches)
1811  {
1812  CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
1813  uint32 hashvalue;
1814  Oid dbid;
1815 
1816  if (ccp->cc_reloid != reloid)
1817  continue;
1818 
1819  /* Just in case cache hasn't finished initialization yet... */
1820  if (ccp->cc_tupdesc == NULL)
1822 
1823  hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
1824  dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
1825 
1826  (*function) (ccp->id, hashvalue, dbid);
1827 
1828  if (newtuple)
1829  {
1830  uint32 newhashvalue;
1831 
1832  newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
1833 
1834  if (newhashvalue != hashvalue)
1835  (*function) (ccp->id, newhashvalue, dbid);
1836  }
1837  }
1838 }
1839 
1840 
1841 /*
1842  * Subroutines for warning about reference leaks. These are exported so
1843  * that resowner.c can call them.
1844  */
1845 void
1847 {
1848  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1849  offsetof(CatCTup, tuple));
1850 
1851  /* Safety check to ensure we were handed a cache entry */
1852  Assert(ct->ct_magic == CT_MAGIC);
1853 
1854  elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1855  ct->my_cache->cc_relname, ct->my_cache->id,
1856  ItemPointerGetBlockNumber(&(tuple->t_self)),
1858  ct->refcount);
1859 }
1860 
1861 void
1863 {
1864  elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1865  list->my_cache->cc_relname, list->my_cache->id,
1866  list, list->refcount);
1867 }
#define DatumGetUInt32(X)
Definition: postgres.h:492
#define NIL
Definition: pg_list.h:69
Oid sk_subtype
Definition: skey.h:69
Relation irel
Definition: relscan.h:156
#define REGCLASSOID
Definition: pg_type.h:577
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1085
void PrintCatCacheListLeakWarning(CatCList *list)
Definition: catcache.c:1862
#define NameGetDatum(X)
Definition: postgres.h:601
int n_members
Definition: catcache.h:154
void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:949
uint32 hash_value
Definition: catcache.h:152
Datum hashoid(PG_FUNCTION_ARGS)
Definition: hashfunc.c:113
#define DEBUG1
Definition: elog.h:25
#define NAMEOID
Definition: pg_type.h:300
dlist_node * cur
Definition: ilist.h:180
static void ResetCatalogCache(CatCache *cache)
Definition: catcache.c:533
uint32 hash_value
Definition: catcache.h:115
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
Datum hashname(PG_FUNCTION_ARGS)
Definition: hashfunc.c:225
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:807
Definition: syscache.h:36
CatCache * my_cache
Definition: catcache.h:82
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
void CatCacheInvalidate(CatCache *cache, uint32 hashValue)
Definition: catcache.c:443
#define CACHE3_elog(a, b, c, d)
Definition: catcache.c:66
static CatCacheHeader * CacheHdr
Definition: catcache.c:73
#define RelationGetDescr(relation)
Definition: rel.h:428
#define HASH_INDEX(h, sz)
Definition: catcache.c:49
#define REGROLEOID
Definition: pg_type.h:585
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:182
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, uint32 hashValue, Index hashIndex, bool negative)
Definition: catcache.c:1624
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:292
#define OIDOID
Definition: pg_type.h:328
#define TEXTOID
Definition: pg_type.h:324
Datum(* PGFunction)(FunctionCallInfo fcinfo)
Definition: fmgr.h:40
slist_node * cur
Definition: ilist.h:226
#define CT_MAGIC
Definition: catcache.h:81
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:84
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:138
#define DatumGetObjectId(X)
Definition: postgres.h:506
#define RelationGetForm(relation)
Definition: rel.h:410
char * pstrdup(const char *in)
Definition: mcxt.c:1076
regproc RegProcedure
Definition: c.h:389
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
dlist_head * cc_bucket
Definition: catcache.h:55
#define AccessShareLock
Definition: lockdefs.h:36
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:574
#define INT4OID
Definition: pg_type.h:316
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:1782
slist_node cc_next
Definition: catcache.h:40
void ResourceOwnerEnlargeCatCacheListRefs(ResourceOwner owner)
Definition: resowner.c:973
int id
Definition: catcache.h:39
void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:958
int cl_magic
Definition: catcache.h:122
bool dead
Definition: catcache.h:149
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:695
bool criticalSharedRelcachesBuilt
Definition: relcache.c:135
#define heap_close(r, l)
Definition: heapam.h:97
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:585
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
unsigned int Oid
Definition: postgres_ext.h:31
#define REGTYPEOID
Definition: pg_type.h:581
dlist_head cc_lists
Definition: catcache.h:54
int namestrcpy(Name name, const char *str)
Definition: name.c:216
#define REGOPEROID
Definition: pg_type.h:569
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:812
bool cc_isname[CATCACHE_MAXKEYS]
Definition: catcache.h:53
bool cc_relisshared
Definition: catcache.h:44
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:328
int natts
Definition: tupdesc.h:73
short nkeys
Definition: catcache.h:151
struct catclist * c_list
Definition: catcache.h:98
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:1601
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:155
Datum hashchar(PG_FUNCTION_ARGS)
Definition: hashfunc.c:44
Oid cc_indexoid
Definition: catcache.h:43
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
Form_pg_index rd_index
Definition: rel.h:159
#define OIDVECTOROID
Definition: pg_type.h:344
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:416
void pfree(void *pointer)
Definition: mcxt.c:949
static void slist_init(slist_head *head)
Definition: ilist.h:554
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
Definition: catcache.c:174
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:749
#define ObjectIdGetDatum(X)
Definition: postgres.h:513
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
#define DatumGetCString(X)
Definition: postgres.h:572
Oid cc_reloid
Definition: catcache.h:42
int cc_key[CATCACHE_MAXKEYS]
Definition: catcache.h:49
int cc_nkeys
Definition: catcache.h:48
#define RelationIsValid(relation)
Definition: rel.h:389
#define FATAL
Definition: elog.h:52
StrategyNumber sk_strategy
Definition: skey.h:68
ItemPointerData t_self
Definition: htup.h:65
TupleDesc cc_tupdesc
Definition: catcache.h:45
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:242
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
int cc_ntup
Definition: catcache.h:46
Definition: c.h:487
#define DEBUG2
Definition: elog.h:24
#define INT2OID
Definition: pg_type.h:308
FmgrInfo sk_func
Definition: skey.h:71
#define REGDICTIONARYOID
Definition: pg_type.h:627
#define RelationGetRelationName(relation)
Definition: rel.h:436
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:187
unsigned int uint32
Definition: c.h:258
Datum hashint4(PG_FUNCTION_ARGS)
Definition: hashfunc.c:68
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:808
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:131
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:654
#define CACHE4_elog(a, b, c, d, e)
Definition: catcache.c:67
MemoryContext TopMemoryContext
Definition: mcxt.c:43
List * lappend(List *list, void *datum)
Definition: list.c:128
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:391
static void GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
Definition: catcache.c:105
#define WARNING
Definition: elog.h:40
dlist_node cache_elem
Definition: catcache.h:147
#define CL_MAGIC
Definition: catcache.h:123
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:354
#define slist_container(type, membername, ptr)
Definition: ilist.h:674
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
void * palloc0(Size size)
Definition: mcxt.c:877
static bool IndexScanOK(CatCache *cache, ScanKey cur_skey)
Definition: catcache.c:975
uintptr_t Datum
Definition: postgres.h:372
void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
Definition: heaptuple.c:637
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1450
Oid MyDatabaseId
Definition: globals.c:77
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1290
CatCache * my_cache
Definition: catcache.h:124
void PrintCatCacheLeakWarning(HeapTuple tuple)
Definition: catcache.c:1846
dlist_node * cur
Definition: ilist.h:161
unsigned int Index
Definition: c.h:359
#define CHAROID
Definition: pg_type.h:296
#define CACHE1_elog(a, b)
Definition: catcache.c:64
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:741
#define InvalidOid
Definition: postgres_ext.h:36
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1253
struct tupleDesc * TupleDesc
slist_head ch_caches
Definition: catcache.h:161
void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:993
#define PG_CATCH()
Definition: elog.h:293
#define InitCatCache_DEBUG2
Definition: catcache.c:650
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
#define Assert(condition)
Definition: c.h:664
#define lfirst(lc)
Definition: pg_list.h:106
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:610
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
Definition: catcache.c:1690
int cc_nbuckets
Definition: catcache.h:47
void CreateCacheMemoryContext(void)
Definition: catcache.c:511
HeapTupleData tuple
Definition: catcache.h:153
static int list_length(const List *l)
Definition: pg_list.h:89
#define newval
int refcount
Definition: catcache.h:112
bool IsTransactionState(void)
Definition: xact.c:351
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
const char * cc_relname
Definition: catcache.h:41
#define PG_RE_THROW()
Definition: elog.h:314
#define BOOLOID
Definition: pg_type.h:288
void ResetCatalogCaches(void)
Definition: catcache.c:580
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
Definition: catcache.c:227
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:176
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:385
dlist_node cache_elem
Definition: catcache.h:89
#define REGCONFIGOID
Definition: pg_type.h:624
static Datum values[MAXATTR]
Definition: bootstrap.c:164
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:368
Datum hashint2(PG_FUNCTION_ARGS)
Definition: hashfunc.c:56
bool negative
Definition: catcache.h:114
#define slist_foreach(iter, lhead)
Definition: ilist.h:700
bool ordered
Definition: catcache.h:150
tuple list
Definition: sort-test.py:11
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1039
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void * palloc(Size size)
Definition: mcxt.c:848
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1325
Oid sk_collation
Definition: skey.h:70
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:207
int i
void * arg
HeapTupleData tuple
Definition: catcache.h:116
#define CACHE2_elog(a, b, c)
Definition: catcache.c:65
#define elog
Definition: elog.h:219
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:924
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
bool criticalRelcachesBuilt
Definition: relcache.c:129
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
#define HeapKeyTest(tuple, tupdesc, nkeys, keys, result)
Definition: valid.h:22
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1285
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:105
#define REGPROCEDUREOID
Definition: pg_type.h:565
int refcount
Definition: catcache.h:148
#define PG_TRY()
Definition: elog.h:284
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:51
Definition: pg_list.h:45
#define PointerIsValid(pointer)
Definition: c.h:520
#define REGNAMESPACEOID
Definition: pg_type.h:589
PGFunction cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
Datum sk_argument
Definition: skey.h:72
#define RelationGetRelid(relation)
Definition: rel.h:416
void ResourceOwnerEnlargeCatCacheRefs(ResourceOwner owner)
Definition: resowner.c:938
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:151
#define PG_END_TRY()
Definition: elog.h:300
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define offsetof(type, field)
Definition: c.h:549
AttrNumber sk_attno
Definition: skey.h:67
#define REGOPERATOROID
Definition: pg_type.h:573
#define REGPROCOID
Definition: pg_type.h:320
int ct_magic
Definition: catcache.h:80
bool dead
Definition: catcache.h:113
MemoryContext CacheMemoryContext
Definition: mcxt.c:46
void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:984