PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
catcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  * System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/relscan.h"
21 #include "access/sysattr.h"
22 #include "access/tuptoaster.h"
23 #include "access/valid.h"
24 #include "access/xact.h"
25 #include "catalog/pg_operator.h"
26 #include "catalog/pg_type.h"
27 #include "miscadmin.h"
28 #ifdef CATCACHE_STATS
29 #include "storage/ipc.h" /* for on_proc_exit */
30 #endif
31 #include "storage/lmgr.h"
32 #include "utils/builtins.h"
33 #include "utils/fmgroids.h"
34 #include "utils/inval.h"
35 #include "utils/memutils.h"
36 #include "utils/rel.h"
37 #include "utils/resowner_private.h"
38 #include "utils/syscache.h"
39 #include "utils/tqual.h"
40 
41 
42  /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
43 
44 /*
45  * Given a hash value and the size of the hash table, find the bucket
46  * in which the hash value belongs. Since the hash table must contain
47  * a power-of-2 number of elements, this is a simple bitmask.
48  */
49 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
50 
51 
52 /*
53  * variables, macros and other stuff
54  */
55 
56 #ifdef CACHEDEBUG
57 #define CACHE1_elog(a,b) elog(a,b)
58 #define CACHE2_elog(a,b,c) elog(a,b,c)
59 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
60 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
61 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
62 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
63 #else
64 #define CACHE1_elog(a,b)
65 #define CACHE2_elog(a,b,c)
66 #define CACHE3_elog(a,b,c,d)
67 #define CACHE4_elog(a,b,c,d,e)
68 #define CACHE5_elog(a,b,c,d,e,f)
69 #define CACHE6_elog(a,b,c,d,e,f,g)
70 #endif
71 
72 /* Cache management header --- pointer is NULL until created */
74 
75 
76 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
77  ScanKey cur_skey);
79  HeapTuple tuple);
80 
81 #ifdef CATCACHE_STATS
82 static void CatCachePrintStats(int code, Datum arg);
83 #endif
84 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
85 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
86 static void CatalogCacheInitializeCache(CatCache *cache);
88  uint32 hashValue, Index hashIndex,
89  bool negative);
90 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
91 
92 
93 /*
94  * internal support functions
95  */
96 
97 /*
98  * Look up the hash and equality functions for system types that are used
99  * as cache key fields.
100  *
101  * XXX this should be replaced by catalog lookups,
102  * but that seems to pose considerable risk of circularity...
103  */
104 static void
105 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
106 {
107  switch (keytype)
108  {
109  case BOOLOID:
110  *hashfunc = hashchar;
111 
112  *eqfunc = F_BOOLEQ;
113  break;
114  case CHAROID:
115  *hashfunc = hashchar;
116 
117  *eqfunc = F_CHAREQ;
118  break;
119  case NAMEOID:
120  *hashfunc = hashname;
121 
122  *eqfunc = F_NAMEEQ;
123  break;
124  case INT2OID:
125  *hashfunc = hashint2;
126 
127  *eqfunc = F_INT2EQ;
128  break;
129  case INT4OID:
130  *hashfunc = hashint4;
131 
132  *eqfunc = F_INT4EQ;
133  break;
134  case TEXTOID:
135  *hashfunc = hashtext;
136 
137  *eqfunc = F_TEXTEQ;
138  break;
139  case OIDOID:
140  case REGPROCOID:
141  case REGPROCEDUREOID:
142  case REGOPEROID:
143  case REGOPERATOROID:
144  case REGCLASSOID:
145  case REGTYPEOID:
146  case REGCONFIGOID:
147  case REGDICTIONARYOID:
148  case REGROLEOID:
149  case REGNAMESPACEOID:
150  *hashfunc = hashoid;
151 
152  *eqfunc = F_OIDEQ;
153  break;
154  case OIDVECTOROID:
155  *hashfunc = hashoidvector;
156 
157  *eqfunc = F_OIDVECTOREQ;
158  break;
159  default:
160  elog(FATAL, "type %u not supported as catcache key", keytype);
161  *hashfunc = NULL; /* keep compiler quiet */
162 
163  *eqfunc = InvalidOid;
164  break;
165  }
166 }
167 
168 /*
169  * CatalogCacheComputeHashValue
170  *
171  * Compute the hash value associated with a given set of lookup keys
172  */
173 static uint32
174 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
175 {
176  uint32 hashValue = 0;
177  uint32 oneHash;
178 
179  CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
180  cache->cc_relname,
181  nkeys,
182  cache);
183 
184  switch (nkeys)
185  {
186  case 4:
187  oneHash =
189  cur_skey[3].sk_argument));
190  hashValue ^= oneHash << 24;
191  hashValue ^= oneHash >> 8;
192  /* FALLTHROUGH */
193  case 3:
194  oneHash =
196  cur_skey[2].sk_argument));
197  hashValue ^= oneHash << 16;
198  hashValue ^= oneHash >> 16;
199  /* FALLTHROUGH */
200  case 2:
201  oneHash =
203  cur_skey[1].sk_argument));
204  hashValue ^= oneHash << 8;
205  hashValue ^= oneHash >> 24;
206  /* FALLTHROUGH */
207  case 1:
208  oneHash =
210  cur_skey[0].sk_argument));
211  hashValue ^= oneHash;
212  break;
213  default:
214  elog(FATAL, "wrong number of hash keys: %d", nkeys);
215  break;
216  }
217 
218  return hashValue;
219 }
220 
221 /*
222  * CatalogCacheComputeTupleHashValue
223  *
224  * Compute the hash value associated with a given tuple to be cached
225  */
226 static uint32
228 {
229  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
230  bool isNull = false;
231 
232  /* Copy pre-initialized overhead data for scankey */
233  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
234 
235  /* Now extract key fields from tuple, insert into scankey */
236  switch (cache->cc_nkeys)
237  {
238  case 4:
239  cur_skey[3].sk_argument =
240  (cache->cc_key[3] == ObjectIdAttributeNumber)
242  : fastgetattr(tuple,
243  cache->cc_key[3],
244  cache->cc_tupdesc,
245  &isNull);
246  Assert(!isNull);
247  /* FALLTHROUGH */
248  case 3:
249  cur_skey[2].sk_argument =
250  (cache->cc_key[2] == ObjectIdAttributeNumber)
252  : fastgetattr(tuple,
253  cache->cc_key[2],
254  cache->cc_tupdesc,
255  &isNull);
256  Assert(!isNull);
257  /* FALLTHROUGH */
258  case 2:
259  cur_skey[1].sk_argument =
260  (cache->cc_key[1] == ObjectIdAttributeNumber)
262  : fastgetattr(tuple,
263  cache->cc_key[1],
264  cache->cc_tupdesc,
265  &isNull);
266  Assert(!isNull);
267  /* FALLTHROUGH */
268  case 1:
269  cur_skey[0].sk_argument =
270  (cache->cc_key[0] == ObjectIdAttributeNumber)
272  : fastgetattr(tuple,
273  cache->cc_key[0],
274  cache->cc_tupdesc,
275  &isNull);
276  Assert(!isNull);
277  break;
278  default:
279  elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
280  break;
281  }
282 
283  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
284 }
285 
286 
287 #ifdef CATCACHE_STATS
288 
289 static void
290 CatCachePrintStats(int code, Datum arg)
291 {
292  slist_iter iter;
293  long cc_searches = 0;
294  long cc_hits = 0;
295  long cc_neg_hits = 0;
296  long cc_newloads = 0;
297  long cc_invals = 0;
298  long cc_lsearches = 0;
299  long cc_lhits = 0;
300 
301  slist_foreach(iter, &CacheHdr->ch_caches)
302  {
303  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
304 
305  if (cache->cc_ntup == 0 && cache->cc_searches == 0)
306  continue; /* don't print unused caches */
307  elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
308  cache->cc_relname,
309  cache->cc_indexoid,
310  cache->cc_ntup,
311  cache->cc_searches,
312  cache->cc_hits,
313  cache->cc_neg_hits,
314  cache->cc_hits + cache->cc_neg_hits,
315  cache->cc_newloads,
316  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
317  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
318  cache->cc_invals,
319  cache->cc_lsearches,
320  cache->cc_lhits);
321  cc_searches += cache->cc_searches;
322  cc_hits += cache->cc_hits;
323  cc_neg_hits += cache->cc_neg_hits;
324  cc_newloads += cache->cc_newloads;
325  cc_invals += cache->cc_invals;
326  cc_lsearches += cache->cc_lsearches;
327  cc_lhits += cache->cc_lhits;
328  }
329  elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
330  CacheHdr->ch_ntup,
331  cc_searches,
332  cc_hits,
333  cc_neg_hits,
334  cc_hits + cc_neg_hits,
335  cc_newloads,
336  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
337  cc_searches - cc_hits - cc_neg_hits,
338  cc_invals,
339  cc_lsearches,
340  cc_lhits);
341 }
342 #endif /* CATCACHE_STATS */
343 
344 
345 /*
346  * CatCacheRemoveCTup
347  *
348  * Unlink and delete the given cache entry
349  *
350  * NB: if it is a member of a CatCList, the CatCList is deleted too.
351  * Both the cache entry and the list had better have zero refcount.
352  */
353 static void
355 {
356  Assert(ct->refcount == 0);
357  Assert(ct->my_cache == cache);
358 
359  if (ct->c_list)
360  {
361  /*
362  * The cleanest way to handle this is to call CatCacheRemoveCList,
363  * which will recurse back to me, and the recursive call will do the
364  * work. Set the "dead" flag to make sure it does recurse.
365  */
366  ct->dead = true;
367  CatCacheRemoveCList(cache, ct->c_list);
368  return; /* nothing left to do */
369  }
370 
371  /* delink from linked list */
372  dlist_delete(&ct->cache_elem);
373 
374  /* free associated tuple data */
375  if (ct->tuple.t_data != NULL)
376  pfree(ct->tuple.t_data);
377  pfree(ct);
378 
379  --cache->cc_ntup;
380  --CacheHdr->ch_ntup;
381 }
382 
383 /*
384  * CatCacheRemoveCList
385  *
386  * Unlink and delete the given cache list entry
387  *
388  * NB: any dead member entries that become unreferenced are deleted too.
389  */
390 static void
392 {
393  int i;
394 
395  Assert(cl->refcount == 0);
396  Assert(cl->my_cache == cache);
397 
398  /* delink from member tuples */
399  for (i = cl->n_members; --i >= 0;)
400  {
401  CatCTup *ct = cl->members[i];
402 
403  Assert(ct->c_list == cl);
404  ct->c_list = NULL;
405  /* if the member is dead and now has no references, remove it */
406  if (
407 #ifndef CATCACHE_FORCE_RELEASE
408  ct->dead &&
409 #endif
410  ct->refcount == 0)
411  CatCacheRemoveCTup(cache, ct);
412  }
413 
414  /* delink from linked list */
415  dlist_delete(&cl->cache_elem);
416 
417  /* free associated tuple data */
418  if (cl->tuple.t_data != NULL)
419  pfree(cl->tuple.t_data);
420  pfree(cl);
421 }
422 
423 
424 /*
425  * CatCacheInvalidate
426  *
427  * Invalidate entries in the specified cache, given a hash value.
428  *
429  * We delete cache entries that match the hash value, whether positive
430  * or negative. We don't care whether the invalidation is the result
431  * of a tuple insertion or a deletion.
432  *
433  * We used to try to match positive cache entries by TID, but that is
434  * unsafe after a VACUUM FULL on a system catalog: an inval event could
435  * be queued before VACUUM FULL, and then processed afterwards, when the
436  * target tuple that has to be invalidated has a different TID than it
437  * did when the event was created. So now we just compare hash values and
438  * accept the small risk of unnecessary invalidations due to false matches.
439  *
440  * This routine is only quasi-public: it should only be used by inval.c.
441  */
442 void
444 {
445  Index hashIndex;
446  dlist_mutable_iter iter;
447 
448  CACHE1_elog(DEBUG2, "CatCacheInvalidate: called");
449 
450  /*
451  * We don't bother to check whether the cache has finished initialization
452  * yet; if not, there will be no entries in it so no problem.
453  */
454 
455  /*
456  * Invalidate *all* CatCLists in this cache; it's too hard to tell which
457  * searches might still be correct, so just zap 'em all.
458  */
459  dlist_foreach_modify(iter, &cache->cc_lists)
460  {
461  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
462 
463  if (cl->refcount > 0)
464  cl->dead = true;
465  else
466  CatCacheRemoveCList(cache, cl);
467  }
468 
469  /*
470  * inspect the proper hash bucket for tuple matches
471  */
472  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
473  dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
474  {
475  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
476 
477  if (hashValue == ct->hash_value)
478  {
479  if (ct->refcount > 0 ||
480  (ct->c_list && ct->c_list->refcount > 0))
481  {
482  ct->dead = true;
483  /* list, if any, was marked dead above */
484  Assert(ct->c_list == NULL || ct->c_list->dead);
485  }
486  else
487  CatCacheRemoveCTup(cache, ct);
488  CACHE1_elog(DEBUG2, "CatCacheInvalidate: invalidated");
489 #ifdef CATCACHE_STATS
490  cache->cc_invals++;
491 #endif
492  /* could be multiple matches, so keep looking! */
493  }
494  }
495 }
496 
497 /* ----------------------------------------------------------------
498  * public functions
499  * ----------------------------------------------------------------
500  */
501 
502 
503 /*
504  * Standard routine for creating cache context if it doesn't exist yet
505  *
506  * There are a lot of places (probably far more than necessary) that check
507  * whether CacheMemoryContext exists yet and want to create it if not.
508  * We centralize knowledge of exactly how to create it here.
509  */
510 void
512 {
513  /*
514  * Purely for paranoia, check that context doesn't exist; caller probably
515  * did so already.
516  */
517  if (!CacheMemoryContext)
519  "CacheMemoryContext",
521 }
522 
523 
524 /*
525  * AtEOXact_CatCache
526  *
527  * Clean up catcaches at end of main transaction (either commit or abort)
528  *
529  * As of PostgreSQL 8.1, catcache pins should get released by the
530  * ResourceOwner mechanism. This routine is just a debugging
531  * cross-check that no pins remain.
532  */
533 void
534 AtEOXact_CatCache(bool isCommit)
535 {
536 #ifdef USE_ASSERT_CHECKING
537  slist_iter cache_iter;
538 
539  slist_foreach(cache_iter, &CacheHdr->ch_caches)
540  {
541  CatCache *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
542  dlist_iter iter;
543  int i;
544 
545  /* Check CatCLists */
546  dlist_foreach(iter, &ccp->cc_lists)
547  {
548  CatCList *cl;
549 
550  cl = dlist_container(CatCList, cache_elem, iter.cur);
551  Assert(cl->cl_magic == CL_MAGIC);
552  Assert(cl->refcount == 0);
553  Assert(!cl->dead);
554  }
555 
556  /* Check individual tuples */
557  for (i = 0; i < ccp->cc_nbuckets; i++)
558  {
559  dlist_head *bucket = &ccp->cc_bucket[i];
560 
561  dlist_foreach(iter, bucket)
562  {
563  CatCTup *ct;
564 
565  ct = dlist_container(CatCTup, cache_elem, iter.cur);
566  Assert(ct->ct_magic == CT_MAGIC);
567  Assert(ct->refcount == 0);
568  Assert(!ct->dead);
569  }
570  }
571  }
572 #endif
573 }
574 
575 /*
576  * ResetCatalogCache
577  *
578  * Reset one catalog cache to empty.
579  *
580  * This is not very efficient if the target cache is nearly empty.
581  * However, it shouldn't need to be efficient; we don't invoke it often.
582  */
583 static void
585 {
586  dlist_mutable_iter iter;
587  int i;
588 
589  /* Remove each list in this cache, or at least mark it dead */
590  dlist_foreach_modify(iter, &cache->cc_lists)
591  {
592  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
593 
594  if (cl->refcount > 0)
595  cl->dead = true;
596  else
597  CatCacheRemoveCList(cache, cl);
598  }
599 
600  /* Remove each tuple in this cache, or at least mark it dead */
601  for (i = 0; i < cache->cc_nbuckets; i++)
602  {
603  dlist_head *bucket = &cache->cc_bucket[i];
604 
605  dlist_foreach_modify(iter, bucket)
606  {
607  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
608 
609  if (ct->refcount > 0 ||
610  (ct->c_list && ct->c_list->refcount > 0))
611  {
612  ct->dead = true;
613  /* list, if any, was marked dead above */
614  Assert(ct->c_list == NULL || ct->c_list->dead);
615  }
616  else
617  CatCacheRemoveCTup(cache, ct);
618 #ifdef CATCACHE_STATS
619  cache->cc_invals++;
620 #endif
621  }
622  }
623 }
624 
625 /*
626  * ResetCatalogCaches
627  *
628  * Reset all caches when a shared cache inval event forces it
629  */
630 void
632 {
633  slist_iter iter;
634 
635  CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
636 
637  slist_foreach(iter, &CacheHdr->ch_caches)
638  {
639  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
640 
641  ResetCatalogCache(cache);
642  }
643 
644  CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
645 }
646 
647 /*
648  * CatalogCacheFlushCatalog
649  *
650  * Flush all catcache entries that came from the specified system catalog.
651  * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
652  * tuples very likely now have different TIDs than before. (At one point
653  * we also tried to force re-execution of CatalogCacheInitializeCache for
654  * the cache(s) on that catalog. This is a bad idea since it leads to all
655  * kinds of trouble if a cache flush occurs while loading cache entries.
656  * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
657  * rather than relying on the relcache to keep a tupdesc for us. Of course
658  * this assumes the tupdesc of a cachable system table will not change...)
659  */
660 void
662 {
663  slist_iter iter;
664 
665  CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
666 
667  slist_foreach(iter, &CacheHdr->ch_caches)
668  {
669  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
670 
671  /* Does this cache store tuples of the target catalog? */
672  if (cache->cc_reloid == catId)
673  {
674  /* Yes, so flush all its contents */
675  ResetCatalogCache(cache);
676 
677  /* Tell inval.c to call syscache callbacks for this cache */
678  CallSyscacheCallbacks(cache->id, 0);
679  }
680  }
681 
682  CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
683 }
684 
685 /*
686  * InitCatCache
687  *
688  * This allocates and initializes a cache for a system catalog relation.
689  * Actually, the cache is only partially initialized to avoid opening the
690  * relation. The relation will be opened and the rest of the cache
691  * structure initialized on the first access.
692  */
693 #ifdef CACHEDEBUG
694 #define InitCatCache_DEBUG2 \
695 do { \
696  elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
697  cp->cc_reloid, cp->cc_indexoid, cp->id, \
698  cp->cc_nkeys, cp->cc_nbuckets); \
699 } while(0)
700 #else
701 #define InitCatCache_DEBUG2
702 #endif
703 
704 CatCache *
706  Oid reloid,
707  Oid indexoid,
708  int nkeys,
709  const int *key,
710  int nbuckets)
711 {
712  CatCache *cp;
713  MemoryContext oldcxt;
714  int i;
715 
716  /*
717  * nbuckets is the initial number of hash buckets to use in this catcache.
718  * It will be enlarged later if it becomes too full.
719  *
720  * nbuckets must be a power of two. We check this via Assert rather than
721  * a full runtime check because the values will be coming from constant
722  * tables.
723  *
724  * If you're confused by the power-of-two check, see comments in
725  * bitmapset.c for an explanation.
726  */
727  Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
728 
729  /*
730  * first switch to the cache context so our allocations do not vanish at
731  * the end of a transaction
732  */
733  if (!CacheMemoryContext)
735 
737 
738  /*
739  * if first time through, initialize the cache group header
740  */
741  if (CacheHdr == NULL)
742  {
743  CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
744  slist_init(&CacheHdr->ch_caches);
745  CacheHdr->ch_ntup = 0;
746 #ifdef CATCACHE_STATS
747  /* set up to dump stats at backend exit */
748  on_proc_exit(CatCachePrintStats, 0);
749 #endif
750  }
751 
752  /*
753  * allocate a new cache structure
754  *
755  * Note: we rely on zeroing to initialize all the dlist headers correctly
756  */
757  cp = (CatCache *) palloc0(sizeof(CatCache));
758  cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
759 
760  /*
761  * initialize the cache's relation information for the relation
762  * corresponding to this cache, and initialize some of the new cache's
763  * other internal fields. But don't open the relation yet.
764  */
765  cp->id = id;
766  cp->cc_relname = "(not known yet)";
767  cp->cc_reloid = reloid;
768  cp->cc_indexoid = indexoid;
769  cp->cc_relisshared = false; /* temporary */
770  cp->cc_tupdesc = (TupleDesc) NULL;
771  cp->cc_ntup = 0;
772  cp->cc_nbuckets = nbuckets;
773  cp->cc_nkeys = nkeys;
774  for (i = 0; i < nkeys; ++i)
775  cp->cc_key[i] = key[i];
776 
777  /*
778  * new cache is initialized as far as we can go for now. print some
779  * debugging information, if appropriate.
780  */
782 
783  /*
784  * add completed cache to top of group header's list
785  */
786  slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
787 
788  /*
789  * back to the old context before we return...
790  */
791  MemoryContextSwitchTo(oldcxt);
792 
793  return cp;
794 }
795 
796 /*
797  * Enlarge a catcache, doubling the number of buckets.
798  */
799 static void
801 {
802  dlist_head *newbucket;
803  int newnbuckets;
804  int i;
805 
806  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
807  cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
808 
809  /* Allocate a new, larger, hash table. */
810  newnbuckets = cp->cc_nbuckets * 2;
811  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
812 
813  /* Move all entries from old hash table to new. */
814  for (i = 0; i < cp->cc_nbuckets; i++)
815  {
816  dlist_mutable_iter iter;
817 
818  dlist_foreach_modify(iter, &cp->cc_bucket[i])
819  {
820  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
821  int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
822 
823  dlist_delete(iter.cur);
824  dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
825  }
826  }
827 
828  /* Switch to the new array. */
829  pfree(cp->cc_bucket);
830  cp->cc_nbuckets = newnbuckets;
831  cp->cc_bucket = newbucket;
832 }
833 
834 /*
835  * CatalogCacheInitializeCache
836  *
837  * This function does final initialization of a catcache: obtain the tuple
838  * descriptor and set up the hash and equality function links. We assume
839  * that the relcache entry can be opened at this point!
840  */
841 #ifdef CACHEDEBUG
842 #define CatalogCacheInitializeCache_DEBUG1 \
843  elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
844  cache->cc_reloid)
845 
846 #define CatalogCacheInitializeCache_DEBUG2 \
847 do { \
848  if (cache->cc_key[i] > 0) { \
849  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
850  i+1, cache->cc_nkeys, cache->cc_key[i], \
851  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
852  } else { \
853  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
854  i+1, cache->cc_nkeys, cache->cc_key[i]); \
855  } \
856 } while(0)
857 #else
858 #define CatalogCacheInitializeCache_DEBUG1
859 #define CatalogCacheInitializeCache_DEBUG2
860 #endif
861 
862 static void
864 {
865  Relation relation;
866  MemoryContext oldcxt;
867  TupleDesc tupdesc;
868  int i;
869 
871 
872  relation = heap_open(cache->cc_reloid, AccessShareLock);
873 
874  /*
875  * switch to the cache context so our allocations do not vanish at the end
876  * of a transaction
877  */
879 
881 
882  /*
883  * copy the relcache's tuple descriptor to permanent cache storage
884  */
885  tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
886 
887  /*
888  * save the relation's name and relisshared flag, too (cc_relname is used
889  * only for debugging purposes)
890  */
891  cache->cc_relname = pstrdup(RelationGetRelationName(relation));
892  cache->cc_relisshared = RelationGetForm(relation)->relisshared;
893 
894  /*
895  * return to the caller's memory context and close the rel
896  */
897  MemoryContextSwitchTo(oldcxt);
898 
899  heap_close(relation, AccessShareLock);
900 
901  CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
902  cache->cc_relname, cache->cc_nkeys);
903 
904  /*
905  * initialize cache's key information
906  */
907  for (i = 0; i < cache->cc_nkeys; ++i)
908  {
909  Oid keytype;
910  RegProcedure eqfunc;
911 
913 
914  if (cache->cc_key[i] > 0)
915  {
916  Form_pg_attribute attr = tupdesc->attrs[cache->cc_key[i] - 1];
917 
918  keytype = attr->atttypid;
919  /* cache key columns should always be NOT NULL */
920  Assert(attr->attnotnull);
921  }
922  else
923  {
924  if (cache->cc_key[i] != ObjectIdAttributeNumber)
925  elog(FATAL, "only sys attr supported in caches is OID");
926  keytype = OIDOID;
927  }
928 
929  GetCCHashEqFuncs(keytype,
930  &cache->cc_hashfunc[i],
931  &eqfunc);
932 
933  cache->cc_isname[i] = (keytype == NAMEOID);
934 
935  /*
936  * Do equality-function lookup (we assume this won't need a catalog
937  * lookup for any supported type)
938  */
939  fmgr_info_cxt(eqfunc,
940  &cache->cc_skey[i].sk_func,
942 
943  /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
944  cache->cc_skey[i].sk_attno = cache->cc_key[i];
945 
946  /* Fill in sk_strategy as well --- always standard equality */
948  cache->cc_skey[i].sk_subtype = InvalidOid;
949  /* Currently, there are no catcaches on collation-aware data types */
950  cache->cc_skey[i].sk_collation = InvalidOid;
951 
952  CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
953  cache->cc_relname,
954  i,
955  cache);
956  }
957 
958  /*
959  * mark this cache fully initialized
960  */
961  cache->cc_tupdesc = tupdesc;
962 }
963 
964 /*
965  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
966  *
967  * One reason to call this routine is to ensure that the relcache has
968  * created entries for all the catalogs and indexes referenced by catcaches.
969  * Therefore, provide an option to open the index as well as fixing the
970  * cache itself. An exception is the indexes on pg_am, which we don't use
971  * (cf. IndexScanOK).
972  */
973 void
974 InitCatCachePhase2(CatCache *cache, bool touch_index)
975 {
976  if (cache->cc_tupdesc == NULL)
978 
979  if (touch_index &&
980  cache->id != AMOID &&
981  cache->id != AMNAME)
982  {
983  Relation idesc;
984 
985  /*
986  * We must lock the underlying catalog before opening the index to
987  * avoid deadlock, since index_open could possibly result in reading
988  * this same catalog, and if anyone else is exclusive-locking this
989  * catalog and index they'll be doing it in that order.
990  */
992  idesc = index_open(cache->cc_indexoid, AccessShareLock);
993 
994  /*
995  * While we've got the index open, let's check that it's unique (and
996  * not just deferrable-unique, thank you very much). This is just to
997  * catch thinkos in definitions of new catcaches, so we don't worry
998  * about the pg_am indexes not getting tested.
999  */
1000  Assert(idesc->rd_index->indisunique &&
1001  idesc->rd_index->indimmediate);
1002 
1003  index_close(idesc, AccessShareLock);
1005  }
1006 }
1007 
1008 
1009 /*
1010  * IndexScanOK
1011  *
1012  * This function checks for tuples that will be fetched by
1013  * IndexSupportInitialize() during relcache initialization for
1014  * certain system indexes that support critical syscaches.
1015  * We can't use an indexscan to fetch these, else we'll get into
1016  * infinite recursion. A plain heap scan will work, however.
1017  * Once we have completed relcache initialization (signaled by
1018  * criticalRelcachesBuilt), we don't have to worry anymore.
1019  *
1020  * Similarly, during backend startup we have to be able to use the
1021  * pg_authid and pg_auth_members syscaches for authentication even if
1022  * we don't yet have relcache entries for those catalogs' indexes.
1023  */
1024 static bool
1025 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1026 {
1027  switch (cache->id)
1028  {
1029  case INDEXRELID:
1030 
1031  /*
1032  * Rather than tracking exactly which indexes have to be loaded
1033  * before we can use indexscans (which changes from time to time),
1034  * just force all pg_index searches to be heap scans until we've
1035  * built the critical relcaches.
1036  */
1038  return false;
1039  break;
1040 
1041  case AMOID:
1042  case AMNAME:
1043 
1044  /*
1045  * Always do heap scans in pg_am, because it's so small there's
1046  * not much point in an indexscan anyway. We *must* do this when
1047  * initially building critical relcache entries, but we might as
1048  * well just always do it.
1049  */
1050  return false;
1051 
1052  case AUTHNAME:
1053  case AUTHOID:
1054  case AUTHMEMMEMROLE:
1055 
1056  /*
1057  * Protect authentication lookups occurring before relcache has
1058  * collected entries for shared indexes.
1059  */
1061  return false;
1062  break;
1063 
1064  default:
1065  break;
1066  }
1067 
1068  /* Normal case, allow index scan */
1069  return true;
1070 }
1071 
1072 /*
1073  * SearchCatCache
1074  *
1075  * This call searches a system cache for a tuple, opening the relation
1076  * if necessary (on the first access to a particular cache).
1077  *
1078  * The result is NULL if not found, or a pointer to a HeapTuple in
1079  * the cache. The caller must not modify the tuple, and must call
1080  * ReleaseCatCache() when done with it.
1081  *
1082  * The search key values should be expressed as Datums of the key columns'
1083  * datatype(s). (Pass zeroes for any unused parameters.) As a special
1084  * exception, the passed-in key for a NAME column can be just a C string;
1085  * the caller need not go to the trouble of converting it to a fully
1086  * null-padded NAME.
1087  */
1088 HeapTuple
1090  Datum v1,
1091  Datum v2,
1092  Datum v3,
1093  Datum v4)
1094 {
1095  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1096  uint32 hashValue;
1097  Index hashIndex;
1098  dlist_iter iter;
1099  dlist_head *bucket;
1100  CatCTup *ct;
1101  Relation relation;
1102  SysScanDesc scandesc;
1103  HeapTuple ntp;
1104 
1105  /* Make sure we're in an xact, even if this ends up being a cache hit */
1107 
1108  /*
1109  * one-time startup overhead for each cache
1110  */
1111  if (cache->cc_tupdesc == NULL)
1113 
1114 #ifdef CATCACHE_STATS
1115  cache->cc_searches++;
1116 #endif
1117 
1118  /*
1119  * initialize the search key information
1120  */
1121  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1122  cur_skey[0].sk_argument = v1;
1123  cur_skey[1].sk_argument = v2;
1124  cur_skey[2].sk_argument = v3;
1125  cur_skey[3].sk_argument = v4;
1126 
1127  /*
1128  * find the hash bucket in which to look for the tuple
1129  */
1130  hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1131  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1132 
1133  /*
1134  * scan the hash bucket until we find a match or exhaust our tuples
1135  *
1136  * Note: it's okay to use dlist_foreach here, even though we modify the
1137  * dlist within the loop, because we don't continue the loop afterwards.
1138  */
1139  bucket = &cache->cc_bucket[hashIndex];
1140  dlist_foreach(iter, bucket)
1141  {
1142  bool res;
1143 
1144  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1145 
1146  if (ct->dead)
1147  continue; /* ignore dead entries */
1148 
1149  if (ct->hash_value != hashValue)
1150  continue; /* quickly skip entry if wrong hash val */
1151 
1152  /*
1153  * see if the cached tuple matches our key.
1154  */
1155  HeapKeyTest(&ct->tuple,
1156  cache->cc_tupdesc,
1157  cache->cc_nkeys,
1158  cur_skey,
1159  res);
1160  if (!res)
1161  continue;
1162 
1163  /*
1164  * We found a match in the cache. Move it to the front of the list
1165  * for its hashbucket, in order to speed subsequent searches. (The
1166  * most frequently accessed elements in any hashbucket will tend to be
1167  * near the front of the hashbucket's list.)
1168  */
1169  dlist_move_head(bucket, &ct->cache_elem);
1170 
1171  /*
1172  * If it's a positive entry, bump its refcount and return it. If it's
1173  * negative, we can report failure to the caller.
1174  */
1175  if (!ct->negative)
1176  {
1178  ct->refcount++;
1180 
1181  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1182  cache->cc_relname, hashIndex);
1183 
1184 #ifdef CATCACHE_STATS
1185  cache->cc_hits++;
1186 #endif
1187 
1188  return &ct->tuple;
1189  }
1190  else
1191  {
1192  CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1193  cache->cc_relname, hashIndex);
1194 
1195 #ifdef CATCACHE_STATS
1196  cache->cc_neg_hits++;
1197 #endif
1198 
1199  return NULL;
1200  }
1201  }
1202 
1203  /*
1204  * Tuple was not found in cache, so we have to try to retrieve it directly
1205  * from the relation. If found, we will add it to the cache; if not
1206  * found, we will add a negative cache entry instead.
1207  *
1208  * NOTE: it is possible for recursive cache lookups to occur while reading
1209  * the relation --- for example, due to shared-cache-inval messages being
1210  * processed during heap_open(). This is OK. It's even possible for one
1211  * of those lookups to find and enter the very same tuple we are trying to
1212  * fetch here. If that happens, we will enter a second copy of the tuple
1213  * into the cache. The first copy will never be referenced again, and
1214  * will eventually age out of the cache, so there's no functional problem.
1215  * This case is rare enough that it's not worth expending extra cycles to
1216  * detect.
1217  */
1218  relation = heap_open(cache->cc_reloid, AccessShareLock);
1219 
1220  scandesc = systable_beginscan(relation,
1221  cache->cc_indexoid,
1222  IndexScanOK(cache, cur_skey),
1223  NULL,
1224  cache->cc_nkeys,
1225  cur_skey);
1226 
1227  ct = NULL;
1228 
1229  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1230  {
1231  ct = CatalogCacheCreateEntry(cache, ntp,
1232  hashValue, hashIndex,
1233  false);
1234  /* immediately set the refcount to 1 */
1236  ct->refcount++;
1238  break; /* assume only one match */
1239  }
1240 
1241  systable_endscan(scandesc);
1242 
1243  heap_close(relation, AccessShareLock);
1244 
1245  /*
1246  * If tuple was not found, we need to build a negative cache entry
1247  * containing a fake tuple. The fake tuple has the correct key columns,
1248  * but nulls everywhere else.
1249  *
1250  * In bootstrap mode, we don't build negative entries, because the cache
1251  * invalidation mechanism isn't alive and can't clear them if the tuple
1252  * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1253  * cache inval for that.)
1254  */
1255  if (ct == NULL)
1256  {
1258  return NULL;
1259 
1260  ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1261  ct = CatalogCacheCreateEntry(cache, ntp,
1262  hashValue, hashIndex,
1263  true);
1264  heap_freetuple(ntp);
1265 
1266  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1267  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1268  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1269  cache->cc_relname, hashIndex);
1270 
1271  /*
1272  * We are not returning the negative entry to the caller, so leave its
1273  * refcount zero.
1274  */
1275 
1276  return NULL;
1277  }
1278 
1279  CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1280  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1281  CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1282  cache->cc_relname, hashIndex);
1283 
1284 #ifdef CATCACHE_STATS
1285  cache->cc_newloads++;
1286 #endif
1287 
1288  return &ct->tuple;
1289 }
1290 
1291 /*
1292  * ReleaseCatCache
1293  *
1294  * Decrement the reference count of a catcache entry (releasing the
1295  * hold grabbed by a successful SearchCatCache).
1296  *
1297  * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1298  * will be freed as soon as their refcount goes to zero. In combination
1299  * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1300  * to catch references to already-released catcache entries.
1301  */
1302 void
1304 {
1305  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1306  offsetof(CatCTup, tuple));
1307 
1308  /* Safety checks to ensure we were handed a cache entry */
1309  Assert(ct->ct_magic == CT_MAGIC);
1310  Assert(ct->refcount > 0);
1311 
1312  ct->refcount--;
1314 
1315  if (
1316 #ifndef CATCACHE_FORCE_RELEASE
1317  ct->dead &&
1318 #endif
1319  ct->refcount == 0 &&
1320  (ct->c_list == NULL || ct->c_list->refcount == 0))
1321  CatCacheRemoveCTup(ct->my_cache, ct);
1322 }
1323 
1324 
1325 /*
1326  * GetCatCacheHashValue
1327  *
1328  * Compute the hash value for a given set of search keys.
1329  *
1330  * The reason for exposing this as part of the API is that the hash value is
1331  * exposed in cache invalidation operations, so there are places outside the
1332  * catcache code that need to be able to compute the hash values.
1333  */
1334 uint32
1336  Datum v1,
1337  Datum v2,
1338  Datum v3,
1339  Datum v4)
1340 {
1341  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1342 
1343  /*
1344  * one-time startup overhead for each cache
1345  */
1346  if (cache->cc_tupdesc == NULL)
1348 
1349  /*
1350  * initialize the search key information
1351  */
1352  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1353  cur_skey[0].sk_argument = v1;
1354  cur_skey[1].sk_argument = v2;
1355  cur_skey[2].sk_argument = v3;
1356  cur_skey[3].sk_argument = v4;
1357 
1358  /*
1359  * calculate the hash value
1360  */
1361  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1362 }
1363 
1364 
1365 /*
1366  * SearchCatCacheList
1367  *
1368  * Generate a list of all tuples matching a partial key (that is,
1369  * a key specifying just the first K of the cache's N key columns).
1370  *
1371  * The caller must not modify the list object or the pointed-to tuples,
1372  * and must call ReleaseCatCacheList() when done with the list.
1373  */
1374 CatCList *
1376  int nkeys,
1377  Datum v1,
1378  Datum v2,
1379  Datum v3,
1380  Datum v4)
1381 {
1382  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1383  uint32 lHashValue;
1384  dlist_iter iter;
1385  CatCList *cl;
1386  CatCTup *ct;
1387  List *volatile ctlist;
1388  ListCell *ctlist_item;
1389  int nmembers;
1390  bool ordered;
1391  HeapTuple ntp;
1392  MemoryContext oldcxt;
1393  int i;
1394 
1395  /*
1396  * one-time startup overhead for each cache
1397  */
1398  if (cache->cc_tupdesc == NULL)
1400 
1401  Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1402 
1403 #ifdef CATCACHE_STATS
1404  cache->cc_lsearches++;
1405 #endif
1406 
1407  /*
1408  * initialize the search key information
1409  */
1410  memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1411  cur_skey[0].sk_argument = v1;
1412  cur_skey[1].sk_argument = v2;
1413  cur_skey[2].sk_argument = v3;
1414  cur_skey[3].sk_argument = v4;
1415 
1416  /*
1417  * compute a hash value of the given keys for faster search. We don't
1418  * presently divide the CatCList items into buckets, but this still lets
1419  * us skip non-matching items quickly most of the time.
1420  */
1421  lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1422 
1423  /*
1424  * scan the items until we find a match or exhaust our list
1425  *
1426  * Note: it's okay to use dlist_foreach here, even though we modify the
1427  * dlist within the loop, because we don't continue the loop afterwards.
1428  */
1429  dlist_foreach(iter, &cache->cc_lists)
1430  {
1431  bool res;
1432 
1433  cl = dlist_container(CatCList, cache_elem, iter.cur);
1434 
1435  if (cl->dead)
1436  continue; /* ignore dead entries */
1437 
1438  if (cl->hash_value != lHashValue)
1439  continue; /* quickly skip entry if wrong hash val */
1440 
1441  /*
1442  * see if the cached list matches our key.
1443  */
1444  if (cl->nkeys != nkeys)
1445  continue;
1446  HeapKeyTest(&cl->tuple,
1447  cache->cc_tupdesc,
1448  nkeys,
1449  cur_skey,
1450  res);
1451  if (!res)
1452  continue;
1453 
1454  /*
1455  * We found a matching list. Move the list to the front of the
1456  * cache's list-of-lists, to speed subsequent searches. (We do not
1457  * move the members to the fronts of their hashbucket lists, however,
1458  * since there's no point in that unless they are searched for
1459  * individually.)
1460  */
1461  dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1462 
1463  /* Bump the list's refcount and return it */
1465  cl->refcount++;
1467 
1468  CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1469  cache->cc_relname);
1470 
1471 #ifdef CATCACHE_STATS
1472  cache->cc_lhits++;
1473 #endif
1474 
1475  return cl;
1476  }
1477 
1478  /*
1479  * List was not found in cache, so we have to build it by reading the
1480  * relation. For each matching tuple found in the relation, use an
1481  * existing cache entry if possible, else build a new one.
1482  *
1483  * We have to bump the member refcounts temporarily to ensure they won't
1484  * get dropped from the cache while loading other members. We use a PG_TRY
1485  * block to ensure we can undo those refcounts if we get an error before
1486  * we finish constructing the CatCList.
1487  */
1489 
1490  ctlist = NIL;
1491 
1492  PG_TRY();
1493  {
1494  Relation relation;
1495  SysScanDesc scandesc;
1496 
1497  relation = heap_open(cache->cc_reloid, AccessShareLock);
1498 
1499  scandesc = systable_beginscan(relation,
1500  cache->cc_indexoid,
1501  IndexScanOK(cache, cur_skey),
1502  NULL,
1503  nkeys,
1504  cur_skey);
1505 
1506  /* The list will be ordered iff we are doing an index scan */
1507  ordered = (scandesc->irel != NULL);
1508 
1509  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1510  {
1511  uint32 hashValue;
1512  Index hashIndex;
1513  bool found = false;
1514  dlist_head *bucket;
1515 
1516  /*
1517  * See if there's an entry for this tuple already.
1518  */
1519  ct = NULL;
1520  hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1521  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1522 
1523  bucket = &cache->cc_bucket[hashIndex];
1524  dlist_foreach(iter, bucket)
1525  {
1526  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1527 
1528  if (ct->dead || ct->negative)
1529  continue; /* ignore dead and negative entries */
1530 
1531  if (ct->hash_value != hashValue)
1532  continue; /* quickly skip entry if wrong hash val */
1533 
1534  if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1535  continue; /* not same tuple */
1536 
1537  /*
1538  * Found a match, but can't use it if it belongs to another
1539  * list already
1540  */
1541  if (ct->c_list)
1542  continue;
1543 
1544  found = true;
1545  break; /* A-OK */
1546  }
1547 
1548  if (!found)
1549  {
1550  /* We didn't find a usable entry, so make a new one */
1551  ct = CatalogCacheCreateEntry(cache, ntp,
1552  hashValue, hashIndex,
1553  false);
1554  }
1555 
1556  /* Careful here: add entry to ctlist, then bump its refcount */
1557  /* This way leaves state correct if lappend runs out of memory */
1558  ctlist = lappend(ctlist, ct);
1559  ct->refcount++;
1560  }
1561 
1562  systable_endscan(scandesc);
1563 
1564  heap_close(relation, AccessShareLock);
1565 
1566  /*
1567  * Now we can build the CatCList entry. First we need a dummy tuple
1568  * containing the key values...
1569  */
1570  ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1572  nmembers = list_length(ctlist);
1573  cl = (CatCList *)
1574  palloc(offsetof(CatCList, members) +nmembers * sizeof(CatCTup *));
1575  heap_copytuple_with_tuple(ntp, &cl->tuple);
1576  MemoryContextSwitchTo(oldcxt);
1577  heap_freetuple(ntp);
1578 
1579  /*
1580  * We are now past the last thing that could trigger an elog before we
1581  * have finished building the CatCList and remembering it in the
1582  * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1583  * we'd better do so before we start marking the members as belonging
1584  * to the list.
1585  */
1586 
1587  }
1588  PG_CATCH();
1589  {
1590  foreach(ctlist_item, ctlist)
1591  {
1592  ct = (CatCTup *) lfirst(ctlist_item);
1593  Assert(ct->c_list == NULL);
1594  Assert(ct->refcount > 0);
1595  ct->refcount--;
1596  if (
1597 #ifndef CATCACHE_FORCE_RELEASE
1598  ct->dead &&
1599 #endif
1600  ct->refcount == 0 &&
1601  (ct->c_list == NULL || ct->c_list->refcount == 0))
1602  CatCacheRemoveCTup(cache, ct);
1603  }
1604 
1605  PG_RE_THROW();
1606  }
1607  PG_END_TRY();
1608 
1609  cl->cl_magic = CL_MAGIC;
1610  cl->my_cache = cache;
1611  cl->refcount = 0; /* for the moment */
1612  cl->dead = false;
1613  cl->ordered = ordered;
1614  cl->nkeys = nkeys;
1615  cl->hash_value = lHashValue;
1616  cl->n_members = nmembers;
1617 
1618  i = 0;
1619  foreach(ctlist_item, ctlist)
1620  {
1621  cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1622  Assert(ct->c_list == NULL);
1623  ct->c_list = cl;
1624  /* release the temporary refcount on the member */
1625  Assert(ct->refcount > 0);
1626  ct->refcount--;
1627  /* mark list dead if any members already dead */
1628  if (ct->dead)
1629  cl->dead = true;
1630  }
1631  Assert(i == nmembers);
1632 
1633  dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1634 
1635  /* Finally, bump the list's refcount and return it */
1636  cl->refcount++;
1638 
1639  CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1640  cache->cc_relname, nmembers);
1641 
1642  return cl;
1643 }
1644 
1645 /*
1646  * ReleaseCatCacheList
1647  *
1648  * Decrement the reference count of a catcache list.
1649  */
1650 void
1652 {
1653  /* Safety checks to ensure we were handed a cache entry */
1654  Assert(list->cl_magic == CL_MAGIC);
1655  Assert(list->refcount > 0);
1656  list->refcount--;
1658 
1659  if (
1660 #ifndef CATCACHE_FORCE_RELEASE
1661  list->dead &&
1662 #endif
1663  list->refcount == 0)
1664  CatCacheRemoveCList(list->my_cache, list);
1665 }
1666 
1667 
1668 /*
1669  * CatalogCacheCreateEntry
1670  * Create a new CatCTup entry, copying the given HeapTuple and other
1671  * supplied data into it. The new entry initially has refcount 0.
1672  */
1673 static CatCTup *
1675  uint32 hashValue, Index hashIndex, bool negative)
1676 {
1677  CatCTup *ct;
1678  HeapTuple dtp;
1679  MemoryContext oldcxt;
1680 
1681  /*
1682  * If there are any out-of-line toasted fields in the tuple, expand them
1683  * in-line. This saves cycles during later use of the catcache entry, and
1684  * also protects us against the possibility of the toast tuples being
1685  * freed before we attempt to fetch them, in case of something using a
1686  * slightly stale catcache entry.
1687  */
1688  if (HeapTupleHasExternal(ntp))
1689  dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1690  else
1691  dtp = ntp;
1692 
1693  /*
1694  * Allocate CatCTup header in cache memory, and copy the tuple there too.
1695  */
1697  ct = (CatCTup *) palloc(sizeof(CatCTup));
1698  heap_copytuple_with_tuple(dtp, &ct->tuple);
1699  MemoryContextSwitchTo(oldcxt);
1700 
1701  if (dtp != ntp)
1702  heap_freetuple(dtp);
1703 
1704  /*
1705  * Finish initializing the CatCTup header, and add it to the cache's
1706  * linked list and counts.
1707  */
1708  ct->ct_magic = CT_MAGIC;
1709  ct->my_cache = cache;
1710  ct->c_list = NULL;
1711  ct->refcount = 0; /* for the moment */
1712  ct->dead = false;
1713  ct->negative = negative;
1714  ct->hash_value = hashValue;
1715 
1716  dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1717 
1718  cache->cc_ntup++;
1719  CacheHdr->ch_ntup++;
1720 
1721  /*
1722  * If the hash table has become too full, enlarge the buckets array. Quite
1723  * arbitrarily, we enlarge when fill factor > 2.
1724  */
1725  if (cache->cc_ntup > cache->cc_nbuckets * 2)
1726  RehashCatCache(cache);
1727 
1728  return ct;
1729 }
1730 
1731 /*
1732  * build_dummy_tuple
1733  * Generate a palloc'd HeapTuple that contains the specified key
1734  * columns, and NULLs for other columns.
1735  *
1736  * This is used to store the keys for negative cache entries and CatCList
1737  * entries, which don't have real tuples associated with them.
1738  */
1739 static HeapTuple
1740 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1741 {
1742  HeapTuple ntp;
1743  TupleDesc tupDesc = cache->cc_tupdesc;
1744  Datum *values;
1745  bool *nulls;
1746  Oid tupOid = InvalidOid;
1747  NameData tempNames[4];
1748  int i;
1749 
1750  values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1751  nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
1752 
1753  memset(values, 0, tupDesc->natts * sizeof(Datum));
1754  memset(nulls, true, tupDesc->natts * sizeof(bool));
1755 
1756  for (i = 0; i < nkeys; i++)
1757  {
1758  int attindex = cache->cc_key[i];
1759  Datum keyval = skeys[i].sk_argument;
1760 
1761  if (attindex > 0)
1762  {
1763  /*
1764  * Here we must be careful in case the caller passed a C string
1765  * where a NAME is wanted: convert the given argument to a
1766  * correctly padded NAME. Otherwise the memcpy() done in
1767  * heap_form_tuple could fall off the end of memory.
1768  */
1769  if (cache->cc_isname[i])
1770  {
1771  Name newval = &tempNames[i];
1772 
1773  namestrcpy(newval, DatumGetCString(keyval));
1774  keyval = NameGetDatum(newval);
1775  }
1776  values[attindex - 1] = keyval;
1777  nulls[attindex - 1] = false;
1778  }
1779  else
1780  {
1781  Assert(attindex == ObjectIdAttributeNumber);
1782  tupOid = DatumGetObjectId(keyval);
1783  }
1784  }
1785 
1786  ntp = heap_form_tuple(tupDesc, values, nulls);
1787  if (tupOid != InvalidOid)
1788  HeapTupleSetOid(ntp, tupOid);
1789 
1790  pfree(values);
1791  pfree(nulls);
1792 
1793  return ntp;
1794 }
1795 
1796 
1797 /*
1798  * PrepareToInvalidateCacheTuple()
1799  *
1800  * This is part of a rather subtle chain of events, so pay attention:
1801  *
1802  * When a tuple is inserted or deleted, it cannot be flushed from the
1803  * catcaches immediately, for reasons explained at the top of cache/inval.c.
1804  * Instead we have to add entry(s) for the tuple to a list of pending tuple
1805  * invalidations that will be done at the end of the command or transaction.
1806  *
1807  * The lists of tuples that need to be flushed are kept by inval.c. This
1808  * routine is a helper routine for inval.c. Given a tuple belonging to
1809  * the specified relation, find all catcaches it could be in, compute the
1810  * correct hash value for each such catcache, and call the specified
1811  * function to record the cache id and hash value in inval.c's lists.
1812  * SysCacheInvalidate will be called later, if appropriate,
1813  * using the recorded information.
1814  *
1815  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1816  * For an update, we are called just once, with tuple being the old tuple
1817  * version and newtuple the new version. We should make two list entries
1818  * if the tuple's hash value changed, but only one if it didn't.
1819  *
1820  * Note that it is irrelevant whether the given tuple is actually loaded
1821  * into the catcache at the moment. Even if it's not there now, it might
1822  * be by the end of the command, or there might be a matching negative entry
1823  * to flush --- or other backends' caches might have such entries --- so
1824  * we have to make list entries to flush it later.
1825  *
1826  * Also note that it's not an error if there are no catcaches for the
1827  * specified relation. inval.c doesn't know exactly which rels have
1828  * catcaches --- it will call this routine for any tuple that's in a
1829  * system relation.
1830  */
1831 void
1833  HeapTuple tuple,
1834  HeapTuple newtuple,
1835  void (*function) (int, uint32, Oid))
1836 {
1837  slist_iter iter;
1838  Oid reloid;
1839 
1840  CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1841 
1842  /*
1843  * sanity checks
1844  */
1845  Assert(RelationIsValid(relation));
1846  Assert(HeapTupleIsValid(tuple));
1847  Assert(PointerIsValid(function));
1848  Assert(CacheHdr != NULL);
1849 
1850  reloid = RelationGetRelid(relation);
1851 
1852  /* ----------------
1853  * for each cache
1854  * if the cache contains tuples from the specified relation
1855  * compute the tuple's hash value(s) in this cache,
1856  * and call the passed function to register the information.
1857  * ----------------
1858  */
1859 
1860  slist_foreach(iter, &CacheHdr->ch_caches)
1861  {
1862  CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
1863  uint32 hashvalue;
1864  Oid dbid;
1865 
1866  if (ccp->cc_reloid != reloid)
1867  continue;
1868 
1869  /* Just in case cache hasn't finished initialization yet... */
1870  if (ccp->cc_tupdesc == NULL)
1872 
1873  hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
1874  dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
1875 
1876  (*function) (ccp->id, hashvalue, dbid);
1877 
1878  if (newtuple)
1879  {
1880  uint32 newhashvalue;
1881 
1882  newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
1883 
1884  if (newhashvalue != hashvalue)
1885  (*function) (ccp->id, newhashvalue, dbid);
1886  }
1887  }
1888 }
1889 
1890 
1891 /*
1892  * Subroutines for warning about reference leaks. These are exported so
1893  * that resowner.c can call them.
1894  */
1895 void
1897 {
1898  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1899  offsetof(CatCTup, tuple));
1900 
1901  /* Safety check to ensure we were handed a cache entry */
1902  Assert(ct->ct_magic == CT_MAGIC);
1903 
1904  elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1905  ct->my_cache->cc_relname, ct->my_cache->id,
1906  ItemPointerGetBlockNumber(&(tuple->t_self)),
1908  ct->refcount);
1909 }
1910 
1911 void
1913 {
1914  elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1915  list->my_cache->cc_relname, list->my_cache->id,
1916  list, list->refcount);
1917 }
#define DatumGetUInt32(X)
Definition: postgres.h:492
#define NIL
Definition: pg_list.h:69
Oid sk_subtype
Definition: skey.h:69
Relation irel
Definition: relscan.h:155
#define REGCLASSOID
Definition: pg_type.h:577
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1084
void PrintCatCacheListLeakWarning(CatCList *list)
Definition: catcache.c:1912
#define NameGetDatum(X)
Definition: postgres.h:601
int n_members
Definition: catcache.h:154
void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:949
uint32 hash_value
Definition: catcache.h:152
Datum hashoid(PG_FUNCTION_ARGS)
Definition: hashfunc.c:82
#define DEBUG1
Definition: elog.h:25
#define NAMEOID
Definition: pg_type.h:300
dlist_node * cur
Definition: ilist.h:180
static void ResetCatalogCache(CatCache *cache)
Definition: catcache.c:584
uint32 hash_value
Definition: catcache.h:115
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
Datum hashname(PG_FUNCTION_ARGS)
Definition: hashfunc.c:144
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:858
Definition: syscache.h:36
CatCache * my_cache
Definition: catcache.h:82
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:524
void CatCacheInvalidate(CatCache *cache, uint32 hashValue)
Definition: catcache.c:443
#define CACHE3_elog(a, b, c, d)
Definition: catcache.c:66
static CatCacheHeader * CacheHdr
Definition: catcache.c:73
#define RelationGetDescr(relation)
Definition: rel.h:429
#define HASH_INDEX(h, sz)
Definition: catcache.c:49
#define REGROLEOID
Definition: pg_type.h:585
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:182
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, uint32 hashValue, Index hashIndex, bool negative)
Definition: catcache.c:1674
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:292
#define OIDOID
Definition: pg_type.h:328
#define TEXTOID
Definition: pg_type.h:324
Datum(* PGFunction)(FunctionCallInfo fcinfo)
Definition: fmgr.h:40
slist_node * cur
Definition: ilist.h:226
#define CT_MAGIC
Definition: catcache.h:81
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:138
#define DatumGetObjectId(X)
Definition: postgres.h:506
#define RelationGetForm(relation)
Definition: rel.h:411
char * pstrdup(const char *in)
Definition: mcxt.c:1077
regproc RegProcedure
Definition: c.h:395
Form_pg_attribute * attrs
Definition: tupdesc.h:74
void AtEOXact_CatCache(bool isCommit)
Definition: catcache.c:534
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
dlist_head * cc_bucket
Definition: catcache.h:55
#define AccessShareLock
Definition: lockdefs.h:36
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:574
#define INT4OID
Definition: pg_type.h:316
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:1832
slist_node cc_next
Definition: catcache.h:40
void ResourceOwnerEnlargeCatCacheListRefs(ResourceOwner owner)
Definition: resowner.c:973
int id
Definition: catcache.h:39
void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:958
int cl_magic
Definition: catcache.h:122
bool dead
Definition: catcache.h:149
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:692
bool criticalSharedRelcachesBuilt
Definition: relcache.c:135
#define heap_close(r, l)
Definition: heapam.h:97
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:584
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1372
unsigned int Oid
Definition: postgres_ext.h:31
#define REGTYPEOID
Definition: pg_type.h:581
dlist_head cc_lists
Definition: catcache.h:54
int namestrcpy(Name name, const char *str)
Definition: name.c:217
#define REGOPEROID
Definition: pg_type.h:569
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:863
bool cc_isname[CATCACHE_MAXKEYS]
Definition: catcache.h:53
bool cc_relisshared
Definition: catcache.h:44
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:328
int natts
Definition: tupdesc.h:73
short nkeys
Definition: catcache.h:151
struct catclist * c_list
Definition: catcache.h:98
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:1651
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:155
Datum hashchar(PG_FUNCTION_ARGS)
Definition: hashfunc.c:44
Oid cc_indexoid
Definition: catcache.h:43
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
Form_pg_index rd_index
Definition: rel.h:159
#define OIDVECTOROID
Definition: pg_type.h:344
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:416
void pfree(void *pointer)
Definition: mcxt.c:950
static void slist_init(slist_head *head)
Definition: ilist.h:554
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
Definition: catcache.c:174
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:800
#define ObjectIdGetDatum(X)
Definition: postgres.h:513
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
#define DatumGetCString(X)
Definition: postgres.h:572
Oid cc_reloid
Definition: catcache.h:42
int cc_key[CATCACHE_MAXKEYS]
Definition: catcache.h:49
int cc_nkeys
Definition: catcache.h:48
#define RelationIsValid(relation)
Definition: rel.h:390
#define FATAL
Definition: elog.h:52
StrategyNumber sk_strategy
Definition: skey.h:68
ItemPointerData t_self
Definition: htup.h:65
TupleDesc cc_tupdesc
Definition: catcache.h:45
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:152
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
int cc_ntup
Definition: catcache.h:46
Definition: c.h:493
#define DEBUG2
Definition: elog.h:24
#define INT2OID
Definition: pg_type.h:308
FmgrInfo sk_func
Definition: skey.h:71
#define REGDICTIONARYOID
Definition: pg_type.h:627
#define RelationGetRelationName(relation)
Definition: rel.h:437
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:187
unsigned int uint32
Definition: c.h:268
Datum hashint4(PG_FUNCTION_ARGS)
Definition: hashfunc.c:56
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:859
struct tupleDesc * TupleDesc
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:168
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:705
#define CACHE4_elog(a, b, c, d, e)
Definition: catcache.c:67
MemoryContext TopMemoryContext
Definition: mcxt.c:43
List * lappend(List *list, void *datum)
Definition: list.c:128
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:391
static void GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
Definition: catcache.c:105
#define WARNING
Definition: elog.h:40
dlist_node cache_elem
Definition: catcache.h:147
#define CL_MAGIC
Definition: catcache.h:123
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:354
#define slist_container(type, membername, ptr)
Definition: ilist.h:674
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
void * palloc0(Size size)
Definition: mcxt.c:878
static bool IndexScanOK(CatCache *cache, ScanKey cur_skey)
Definition: catcache.c:1025
uintptr_t Datum
Definition: postgres.h:372
void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
Definition: heaptuple.c:634
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1450
Oid MyDatabaseId
Definition: globals.c:76
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1284
CatCache * my_cache
Definition: catcache.h:124
void PrintCatCacheLeakWarning(HeapTuple tuple)
Definition: catcache.c:1896
dlist_node * cur
Definition: ilist.h:161
unsigned int Index
Definition: c.h:365
#define CHAROID
Definition: pg_type.h:296
#define CACHE1_elog(a, b)
Definition: catcache.c:64
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:742
#define InvalidOid
Definition: postgres_ext.h:36
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1303
slist_head ch_caches
Definition: catcache.h:161
void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:993
#define PG_CATCH()
Definition: elog.h:293
#define InitCatCache_DEBUG2
Definition: catcache.c:701
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
#define lfirst(lc)
Definition: pg_list.h:106
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:661
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
Definition: catcache.c:1740
int cc_nbuckets
Definition: catcache.h:47
void CreateCacheMemoryContext(void)
Definition: catcache.c:511
HeapTupleData tuple
Definition: catcache.h:153
static int list_length(const List *l)
Definition: pg_list.h:89
#define newval
int refcount
Definition: catcache.h:112
bool IsTransactionState(void)
Definition: xact.c:350
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:94
const char * cc_relname
Definition: catcache.h:41
#define PG_RE_THROW()
Definition: elog.h:314
#define BOOLOID
Definition: pg_type.h:288
void ResetCatalogCaches(void)
Definition: catcache.c:631
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
Definition: catcache.c:227
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:176
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:385
dlist_node cache_elem
Definition: catcache.h:89
#define REGCONFIGOID
Definition: pg_type.h:624
static Datum values[MAXATTR]
Definition: bootstrap.c:163
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:365
Datum hashint2(PG_FUNCTION_ARGS)
Definition: hashfunc.c:50
bool negative
Definition: catcache.h:114
#define slist_foreach(iter, lhead)
Definition: ilist.h:700
bool ordered
Definition: catcache.h:150
tuple list
Definition: sort-test.py:11
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1089
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void * palloc(Size size)
Definition: mcxt.c:849
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1375
Oid sk_collation
Definition: skey.h:70
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:136
int i
void * arg
HeapTupleData tuple
Definition: catcache.h:116
#define CACHE2_elog(a, b, c)
Definition: catcache.c:65
#define elog
Definition: elog.h:219
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:974
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:75
bool criticalRelcachesBuilt
Definition: relcache.c:129
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
#define HeapKeyTest(tuple, tupdesc, nkeys, keys, result)
Definition: valid.h:22
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1335
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:105
#define REGPROCEDUREOID
Definition: pg_type.h:565
int refcount
Definition: catcache.h:148
#define PG_TRY()
Definition: elog.h:284
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:51
Definition: pg_list.h:45
#define PointerIsValid(pointer)
Definition: c.h:526
#define REGNAMESPACEOID
Definition: pg_type.h:589
PGFunction cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
Datum sk_argument
Definition: skey.h:72
#define RelationGetRelid(relation)
Definition: rel.h:417
void ResourceOwnerEnlargeCatCacheRefs(ResourceOwner owner)
Definition: resowner.c:938
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:151
#define PG_END_TRY()
Definition: elog.h:300
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define offsetof(type, field)
Definition: c.h:555
AttrNumber sk_attno
Definition: skey.h:67
#define REGOPERATOROID
Definition: pg_type.h:573
#define REGPROCOID
Definition: pg_type.h:320
int ct_magic
Definition: catcache.h:80
bool dead
Definition: catcache.h:113
MemoryContext CacheMemoryContext
Definition: mcxt.c:46
void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:984