PostgreSQL Source Code  git master
catcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  * System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/genam.h"
18 #include "access/heaptoast.h"
19 #include "access/relscan.h"
20 #include "access/sysattr.h"
21 #include "access/table.h"
22 #include "access/xact.h"
23 #include "catalog/pg_collation.h"
24 #include "catalog/pg_operator.h"
25 #include "catalog/pg_type.h"
26 #include "common/hashfn.h"
27 #include "miscadmin.h"
28 #include "port/pg_bitutils.h"
29 #ifdef CATCACHE_STATS
30 #include "storage/ipc.h" /* for on_proc_exit */
31 #endif
32 #include "storage/lmgr.h"
33 #include "utils/builtins.h"
34 #include "utils/datum.h"
35 #include "utils/fmgroids.h"
36 #include "utils/inval.h"
37 #include "utils/memutils.h"
38 #include "utils/rel.h"
39 #include "utils/resowner_private.h"
40 #include "utils/syscache.h"
41 
42 
43  /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
44 
45 /*
46  * Given a hash value and the size of the hash table, find the bucket
47  * in which the hash value belongs. Since the hash table must contain
48  * a power-of-2 number of elements, this is a simple bitmask.
49  */
50 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
51 
52 
53 /*
54  * variables, macros and other stuff
55  */
56 
57 #ifdef CACHEDEBUG
58 #define CACHE_elog(...) elog(__VA_ARGS__)
59 #else
60 #define CACHE_elog(...)
61 #endif
62 
63 /* Cache management header --- pointer is NULL until created */
64 static CatCacheHeader *CacheHdr = NULL;
65 
66 static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
67  int nkeys,
68  Datum v1, Datum v2,
69  Datum v3, Datum v4);
70 
72  int nkeys,
73  uint32 hashValue,
74  Index hashIndex,
75  Datum v1, Datum v2,
76  Datum v3, Datum v4);
77 
78 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
79  Datum v1, Datum v2, Datum v3, Datum v4);
80 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
81  HeapTuple tuple);
82 static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
83  const Datum *cachekeys,
84  const Datum *searchkeys);
85 
86 #ifdef CATCACHE_STATS
87 static void CatCachePrintStats(int code, Datum arg);
88 #endif
89 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
90 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
91 static void CatalogCacheInitializeCache(CatCache *cache);
94  uint32 hashValue, Index hashIndex,
95  bool negative);
96 
97 static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
98  Datum *keys);
99 static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
100  Datum *srckeys, Datum *dstkeys);
101 
102 
103 /*
104  * internal support functions
105  */
106 
107 /*
108  * Hash and equality functions for system types that are used as cache key
109  * fields. In some cases, we just call the regular SQL-callable functions for
110  * the appropriate data type, but that tends to be a little slow, and the
111  * speed of these functions is performance-critical. Therefore, for data
112  * types that frequently occur as catcache keys, we hard-code the logic here.
113  * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
114  * in certain cases (like int4) we can adopt a faster hash algorithm as well.
115  */
116 
117 static bool
119 {
120  return DatumGetChar(a) == DatumGetChar(b);
121 }
122 
123 static uint32
125 {
126  return murmurhash32((int32) DatumGetChar(datum));
127 }
128 
129 static bool
131 {
132  char *ca = NameStr(*DatumGetName(a));
133  char *cb = NameStr(*DatumGetName(b));
134 
135  return strncmp(ca, cb, NAMEDATALEN) == 0;
136 }
137 
138 static uint32
140 {
141  char *key = NameStr(*DatumGetName(datum));
142 
143  return hash_any((unsigned char *) key, strlen(key));
144 }
145 
146 static bool
148 {
149  return DatumGetInt16(a) == DatumGetInt16(b);
150 }
151 
152 static uint32
154 {
155  return murmurhash32((int32) DatumGetInt16(datum));
156 }
157 
158 static bool
160 {
161  return DatumGetInt32(a) == DatumGetInt32(b);
162 }
163 
164 static uint32
166 {
167  return murmurhash32((int32) DatumGetInt32(datum));
168 }
169 
170 static bool
172 {
173  /*
174  * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
175  * want to take the fast "deterministic" path in texteq().
176  */
177  return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
178 }
179 
180 static uint32
182 {
183  /* analogously here as in texteqfast() */
184  return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
185 }
186 
187 static bool
189 {
191 }
192 
193 static uint32
195 {
197 }
198 
199 /* Lookup support functions for a type. */
200 static void
201 GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
202 {
203  switch (keytype)
204  {
205  case BOOLOID:
206  *hashfunc = charhashfast;
207  *fasteqfunc = chareqfast;
208  *eqfunc = F_BOOLEQ;
209  break;
210  case CHAROID:
211  *hashfunc = charhashfast;
212  *fasteqfunc = chareqfast;
213  *eqfunc = F_CHAREQ;
214  break;
215  case NAMEOID:
216  *hashfunc = namehashfast;
217  *fasteqfunc = nameeqfast;
218  *eqfunc = F_NAMEEQ;
219  break;
220  case INT2OID:
221  *hashfunc = int2hashfast;
222  *fasteqfunc = int2eqfast;
223  *eqfunc = F_INT2EQ;
224  break;
225  case INT4OID:
226  *hashfunc = int4hashfast;
227  *fasteqfunc = int4eqfast;
228  *eqfunc = F_INT4EQ;
229  break;
230  case TEXTOID:
231  *hashfunc = texthashfast;
232  *fasteqfunc = texteqfast;
233  *eqfunc = F_TEXTEQ;
234  break;
235  case OIDOID:
236  case REGPROCOID:
237  case REGPROCEDUREOID:
238  case REGOPEROID:
239  case REGOPERATOROID:
240  case REGCLASSOID:
241  case REGTYPEOID:
242  case REGCOLLATIONOID:
243  case REGCONFIGOID:
244  case REGDICTIONARYOID:
245  case REGROLEOID:
246  case REGNAMESPACEOID:
247  *hashfunc = int4hashfast;
248  *fasteqfunc = int4eqfast;
249  *eqfunc = F_OIDEQ;
250  break;
251  case OIDVECTOROID:
252  *hashfunc = oidvectorhashfast;
253  *fasteqfunc = oidvectoreqfast;
254  *eqfunc = F_OIDVECTOREQ;
255  break;
256  default:
257  elog(FATAL, "type %u not supported as catcache key", keytype);
258  *hashfunc = NULL; /* keep compiler quiet */
259 
260  *eqfunc = InvalidOid;
261  break;
262  }
263 }
264 
265 /*
266  * CatalogCacheComputeHashValue
267  *
268  * Compute the hash value associated with a given set of lookup keys
269  */
270 static uint32
272  Datum v1, Datum v2, Datum v3, Datum v4)
273 {
274  uint32 hashValue = 0;
275  uint32 oneHash;
276  CCHashFN *cc_hashfunc = cache->cc_hashfunc;
277 
278  CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
279  cache->cc_relname, nkeys, cache);
280 
281  switch (nkeys)
282  {
283  case 4:
284  oneHash = (cc_hashfunc[3]) (v4);
285  hashValue ^= pg_rotate_left32(oneHash, 24);
286  /* FALLTHROUGH */
287  case 3:
288  oneHash = (cc_hashfunc[2]) (v3);
289  hashValue ^= pg_rotate_left32(oneHash, 16);
290  /* FALLTHROUGH */
291  case 2:
292  oneHash = (cc_hashfunc[1]) (v2);
293  hashValue ^= pg_rotate_left32(oneHash, 8);
294  /* FALLTHROUGH */
295  case 1:
296  oneHash = (cc_hashfunc[0]) (v1);
297  hashValue ^= oneHash;
298  break;
299  default:
300  elog(FATAL, "wrong number of hash keys: %d", nkeys);
301  break;
302  }
303 
304  return hashValue;
305 }
306 
307 /*
308  * CatalogCacheComputeTupleHashValue
309  *
310  * Compute the hash value associated with a given tuple to be cached
311  */
312 static uint32
314 {
315  Datum v1 = 0,
316  v2 = 0,
317  v3 = 0,
318  v4 = 0;
319  bool isNull = false;
320  int *cc_keyno = cache->cc_keyno;
321  TupleDesc cc_tupdesc = cache->cc_tupdesc;
322 
323  /* Now extract key fields from tuple, insert into scankey */
324  switch (nkeys)
325  {
326  case 4:
327  v4 = fastgetattr(tuple,
328  cc_keyno[3],
329  cc_tupdesc,
330  &isNull);
331  Assert(!isNull);
332  /* FALLTHROUGH */
333  case 3:
334  v3 = fastgetattr(tuple,
335  cc_keyno[2],
336  cc_tupdesc,
337  &isNull);
338  Assert(!isNull);
339  /* FALLTHROUGH */
340  case 2:
341  v2 = fastgetattr(tuple,
342  cc_keyno[1],
343  cc_tupdesc,
344  &isNull);
345  Assert(!isNull);
346  /* FALLTHROUGH */
347  case 1:
348  v1 = fastgetattr(tuple,
349  cc_keyno[0],
350  cc_tupdesc,
351  &isNull);
352  Assert(!isNull);
353  break;
354  default:
355  elog(FATAL, "wrong number of hash keys: %d", nkeys);
356  break;
357  }
358 
359  return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
360 }
361 
362 /*
363  * CatalogCacheCompareTuple
364  *
365  * Compare a tuple to the passed arguments.
366  */
367 static inline bool
368 CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
369  const Datum *cachekeys,
370  const Datum *searchkeys)
371 {
372  const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
373  int i;
374 
375  for (i = 0; i < nkeys; i++)
376  {
377  if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
378  return false;
379  }
380  return true;
381 }
382 
383 
384 #ifdef CATCACHE_STATS
385 
386 static void
387 CatCachePrintStats(int code, Datum arg)
388 {
389  slist_iter iter;
390  long cc_searches = 0;
391  long cc_hits = 0;
392  long cc_neg_hits = 0;
393  long cc_newloads = 0;
394  long cc_invals = 0;
395  long cc_lsearches = 0;
396  long cc_lhits = 0;
397 
399  {
400  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
401 
402  if (cache->cc_ntup == 0 && cache->cc_searches == 0)
403  continue; /* don't print unused caches */
404  elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
405  cache->cc_relname,
406  cache->cc_indexoid,
407  cache->cc_ntup,
408  cache->cc_searches,
409  cache->cc_hits,
410  cache->cc_neg_hits,
411  cache->cc_hits + cache->cc_neg_hits,
412  cache->cc_newloads,
413  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
414  cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
415  cache->cc_invals,
416  cache->cc_lsearches,
417  cache->cc_lhits);
418  cc_searches += cache->cc_searches;
419  cc_hits += cache->cc_hits;
420  cc_neg_hits += cache->cc_neg_hits;
421  cc_newloads += cache->cc_newloads;
422  cc_invals += cache->cc_invals;
423  cc_lsearches += cache->cc_lsearches;
424  cc_lhits += cache->cc_lhits;
425  }
426  elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
427  CacheHdr->ch_ntup,
428  cc_searches,
429  cc_hits,
430  cc_neg_hits,
431  cc_hits + cc_neg_hits,
432  cc_newloads,
433  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
434  cc_searches - cc_hits - cc_neg_hits,
435  cc_invals,
436  cc_lsearches,
437  cc_lhits);
438 }
439 #endif /* CATCACHE_STATS */
440 
441 
442 /*
443  * CatCacheRemoveCTup
444  *
445  * Unlink and delete the given cache entry
446  *
447  * NB: if it is a member of a CatCList, the CatCList is deleted too.
448  * Both the cache entry and the list had better have zero refcount.
449  */
450 static void
452 {
453  Assert(ct->refcount == 0);
454  Assert(ct->my_cache == cache);
455 
456  if (ct->c_list)
457  {
458  /*
459  * The cleanest way to handle this is to call CatCacheRemoveCList,
460  * which will recurse back to me, and the recursive call will do the
461  * work. Set the "dead" flag to make sure it does recurse.
462  */
463  ct->dead = true;
464  CatCacheRemoveCList(cache, ct->c_list);
465  return; /* nothing left to do */
466  }
467 
468  /* delink from linked list */
469  dlist_delete(&ct->cache_elem);
470 
471  /*
472  * Free keys when we're dealing with a negative entry, normal entries just
473  * point into tuple, allocated together with the CatCTup.
474  */
475  if (ct->negative)
476  CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
477  cache->cc_keyno, ct->keys);
478 
479  pfree(ct);
480 
481  --cache->cc_ntup;
482  --CacheHdr->ch_ntup;
483 }
484 
485 /*
486  * CatCacheRemoveCList
487  *
488  * Unlink and delete the given cache list entry
489  *
490  * NB: any dead member entries that become unreferenced are deleted too.
491  */
492 static void
494 {
495  int i;
496 
497  Assert(cl->refcount == 0);
498  Assert(cl->my_cache == cache);
499 
500  /* delink from member tuples */
501  for (i = cl->n_members; --i >= 0;)
502  {
503  CatCTup *ct = cl->members[i];
504 
505  Assert(ct->c_list == cl);
506  ct->c_list = NULL;
507  /* if the member is dead and now has no references, remove it */
508  if (
509 #ifndef CATCACHE_FORCE_RELEASE
510  ct->dead &&
511 #endif
512  ct->refcount == 0)
513  CatCacheRemoveCTup(cache, ct);
514  }
515 
516  /* delink from linked list */
517  dlist_delete(&cl->cache_elem);
518 
519  /* free associated column data */
520  CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
521  cache->cc_keyno, cl->keys);
522 
523  pfree(cl);
524 }
525 
526 
527 /*
528  * CatCacheInvalidate
529  *
530  * Invalidate entries in the specified cache, given a hash value.
531  *
532  * We delete cache entries that match the hash value, whether positive
533  * or negative. We don't care whether the invalidation is the result
534  * of a tuple insertion or a deletion.
535  *
536  * We used to try to match positive cache entries by TID, but that is
537  * unsafe after a VACUUM FULL on a system catalog: an inval event could
538  * be queued before VACUUM FULL, and then processed afterwards, when the
539  * target tuple that has to be invalidated has a different TID than it
540  * did when the event was created. So now we just compare hash values and
541  * accept the small risk of unnecessary invalidations due to false matches.
542  *
543  * This routine is only quasi-public: it should only be used by inval.c.
544  */
545 void
547 {
548  Index hashIndex;
549  dlist_mutable_iter iter;
550 
551  CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
552 
553  /*
554  * We don't bother to check whether the cache has finished initialization
555  * yet; if not, there will be no entries in it so no problem.
556  */
557 
558  /*
559  * Invalidate *all* CatCLists in this cache; it's too hard to tell which
560  * searches might still be correct, so just zap 'em all.
561  */
562  dlist_foreach_modify(iter, &cache->cc_lists)
563  {
564  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
565 
566  if (cl->refcount > 0)
567  cl->dead = true;
568  else
569  CatCacheRemoveCList(cache, cl);
570  }
571 
572  /*
573  * inspect the proper hash bucket for tuple matches
574  */
575  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
576  dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
577  {
578  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
579 
580  if (hashValue == ct->hash_value)
581  {
582  if (ct->refcount > 0 ||
583  (ct->c_list && ct->c_list->refcount > 0))
584  {
585  ct->dead = true;
586  /* list, if any, was marked dead above */
587  Assert(ct->c_list == NULL || ct->c_list->dead);
588  }
589  else
590  CatCacheRemoveCTup(cache, ct);
591  CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
592 #ifdef CATCACHE_STATS
593  cache->cc_invals++;
594 #endif
595  /* could be multiple matches, so keep looking! */
596  }
597  }
598 }
599 
600 /* ----------------------------------------------------------------
601  * public functions
602  * ----------------------------------------------------------------
603  */
604 
605 
606 /*
607  * Standard routine for creating cache context if it doesn't exist yet
608  *
609  * There are a lot of places (probably far more than necessary) that check
610  * whether CacheMemoryContext exists yet and want to create it if not.
611  * We centralize knowledge of exactly how to create it here.
612  */
613 void
615 {
616  /*
617  * Purely for paranoia, check that context doesn't exist; caller probably
618  * did so already.
619  */
620  if (!CacheMemoryContext)
622  "CacheMemoryContext",
624 }
625 
626 
627 /*
628  * ResetCatalogCache
629  *
630  * Reset one catalog cache to empty.
631  *
632  * This is not very efficient if the target cache is nearly empty.
633  * However, it shouldn't need to be efficient; we don't invoke it often.
634  */
635 static void
637 {
638  dlist_mutable_iter iter;
639  int i;
640 
641  /* Remove each list in this cache, or at least mark it dead */
642  dlist_foreach_modify(iter, &cache->cc_lists)
643  {
644  CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
645 
646  if (cl->refcount > 0)
647  cl->dead = true;
648  else
649  CatCacheRemoveCList(cache, cl);
650  }
651 
652  /* Remove each tuple in this cache, or at least mark it dead */
653  for (i = 0; i < cache->cc_nbuckets; i++)
654  {
655  dlist_head *bucket = &cache->cc_bucket[i];
656 
657  dlist_foreach_modify(iter, bucket)
658  {
659  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
660 
661  if (ct->refcount > 0 ||
662  (ct->c_list && ct->c_list->refcount > 0))
663  {
664  ct->dead = true;
665  /* list, if any, was marked dead above */
666  Assert(ct->c_list == NULL || ct->c_list->dead);
667  }
668  else
669  CatCacheRemoveCTup(cache, ct);
670 #ifdef CATCACHE_STATS
671  cache->cc_invals++;
672 #endif
673  }
674  }
675 }
676 
677 /*
678  * ResetCatalogCaches
679  *
680  * Reset all caches when a shared cache inval event forces it
681  */
682 void
684 {
685  slist_iter iter;
686 
687  CACHE_elog(DEBUG2, "ResetCatalogCaches called");
688 
690  {
691  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
692 
693  ResetCatalogCache(cache);
694  }
695 
696  CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
697 }
698 
699 /*
700  * CatalogCacheFlushCatalog
701  *
702  * Flush all catcache entries that came from the specified system catalog.
703  * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
704  * tuples very likely now have different TIDs than before. (At one point
705  * we also tried to force re-execution of CatalogCacheInitializeCache for
706  * the cache(s) on that catalog. This is a bad idea since it leads to all
707  * kinds of trouble if a cache flush occurs while loading cache entries.
708  * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
709  * rather than relying on the relcache to keep a tupdesc for us. Of course
710  * this assumes the tupdesc of a cachable system table will not change...)
711  */
712 void
714 {
715  slist_iter iter;
716 
717  CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
718 
720  {
721  CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
722 
723  /* Does this cache store tuples of the target catalog? */
724  if (cache->cc_reloid == catId)
725  {
726  /* Yes, so flush all its contents */
727  ResetCatalogCache(cache);
728 
729  /* Tell inval.c to call syscache callbacks for this cache */
730  CallSyscacheCallbacks(cache->id, 0);
731  }
732  }
733 
734  CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
735 }
736 
737 /*
738  * InitCatCache
739  *
740  * This allocates and initializes a cache for a system catalog relation.
741  * Actually, the cache is only partially initialized to avoid opening the
742  * relation. The relation will be opened and the rest of the cache
743  * structure initialized on the first access.
744  */
745 #ifdef CACHEDEBUG
746 #define InitCatCache_DEBUG2 \
747 do { \
748  elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
749  cp->cc_reloid, cp->cc_indexoid, cp->id, \
750  cp->cc_nkeys, cp->cc_nbuckets); \
751 } while(0)
752 #else
753 #define InitCatCache_DEBUG2
754 #endif
755 
756 CatCache *
758  Oid reloid,
759  Oid indexoid,
760  int nkeys,
761  const int *key,
762  int nbuckets)
763 {
764  CatCache *cp;
765  MemoryContext oldcxt;
766  int i;
767 
768  /*
769  * nbuckets is the initial number of hash buckets to use in this catcache.
770  * It will be enlarged later if it becomes too full.
771  *
772  * nbuckets must be a power of two. We check this via Assert rather than
773  * a full runtime check because the values will be coming from constant
774  * tables.
775  *
776  * If you're confused by the power-of-two check, see comments in
777  * bitmapset.c for an explanation.
778  */
779  Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
780 
781  /*
782  * first switch to the cache context so our allocations do not vanish at
783  * the end of a transaction
784  */
785  if (!CacheMemoryContext)
787 
789 
790  /*
791  * if first time through, initialize the cache group header
792  */
793  if (CacheHdr == NULL)
794  {
797  CacheHdr->ch_ntup = 0;
798 #ifdef CATCACHE_STATS
799  /* set up to dump stats at backend exit */
800  on_proc_exit(CatCachePrintStats, 0);
801 #endif
802  }
803 
804  /*
805  * Allocate a new cache structure, aligning to a cacheline boundary
806  *
807  * Note: we rely on zeroing to initialize all the dlist headers correctly
808  */
811  cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
812 
813  /*
814  * initialize the cache's relation information for the relation
815  * corresponding to this cache, and initialize some of the new cache's
816  * other internal fields. But don't open the relation yet.
817  */
818  cp->id = id;
819  cp->cc_relname = "(not known yet)";
820  cp->cc_reloid = reloid;
821  cp->cc_indexoid = indexoid;
822  cp->cc_relisshared = false; /* temporary */
823  cp->cc_tupdesc = (TupleDesc) NULL;
824  cp->cc_ntup = 0;
825  cp->cc_nbuckets = nbuckets;
826  cp->cc_nkeys = nkeys;
827  for (i = 0; i < nkeys; ++i)
828  {
830  cp->cc_keyno[i] = key[i];
831  }
832 
833  /*
834  * new cache is initialized as far as we can go for now. print some
835  * debugging information, if appropriate.
836  */
838 
839  /*
840  * add completed cache to top of group header's list
841  */
843 
844  /*
845  * back to the old context before we return...
846  */
847  MemoryContextSwitchTo(oldcxt);
848 
849  return cp;
850 }
851 
852 /*
853  * Enlarge a catcache, doubling the number of buckets.
854  */
855 static void
857 {
858  dlist_head *newbucket;
859  int newnbuckets;
860  int i;
861 
862  elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
863  cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
864 
865  /* Allocate a new, larger, hash table. */
866  newnbuckets = cp->cc_nbuckets * 2;
867  newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
868 
869  /* Move all entries from old hash table to new. */
870  for (i = 0; i < cp->cc_nbuckets; i++)
871  {
872  dlist_mutable_iter iter;
873 
874  dlist_foreach_modify(iter, &cp->cc_bucket[i])
875  {
876  CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
877  int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
878 
879  dlist_delete(iter.cur);
880  dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
881  }
882  }
883 
884  /* Switch to the new array. */
885  pfree(cp->cc_bucket);
886  cp->cc_nbuckets = newnbuckets;
887  cp->cc_bucket = newbucket;
888 }
889 
890 /*
891  * CatalogCacheInitializeCache
892  *
893  * This function does final initialization of a catcache: obtain the tuple
894  * descriptor and set up the hash and equality function links. We assume
895  * that the relcache entry can be opened at this point!
896  */
897 #ifdef CACHEDEBUG
898 #define CatalogCacheInitializeCache_DEBUG1 \
899  elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
900  cache->cc_reloid)
901 
902 #define CatalogCacheInitializeCache_DEBUG2 \
903 do { \
904  if (cache->cc_keyno[i] > 0) { \
905  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
906  i+1, cache->cc_nkeys, cache->cc_keyno[i], \
907  TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
908  } else { \
909  elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
910  i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
911  } \
912 } while(0)
913 #else
914 #define CatalogCacheInitializeCache_DEBUG1
915 #define CatalogCacheInitializeCache_DEBUG2
916 #endif
917 
918 static void
920 {
921  Relation relation;
922  MemoryContext oldcxt;
923  TupleDesc tupdesc;
924  int i;
925 
927 
928  relation = table_open(cache->cc_reloid, AccessShareLock);
929 
930  /*
931  * switch to the cache context so our allocations do not vanish at the end
932  * of a transaction
933  */
934  Assert(CacheMemoryContext != NULL);
935 
937 
938  /*
939  * copy the relcache's tuple descriptor to permanent cache storage
940  */
941  tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
942 
943  /*
944  * save the relation's name and relisshared flag, too (cc_relname is used
945  * only for debugging purposes)
946  */
947  cache->cc_relname = pstrdup(RelationGetRelationName(relation));
948  cache->cc_relisshared = RelationGetForm(relation)->relisshared;
949 
950  /*
951  * return to the caller's memory context and close the rel
952  */
953  MemoryContextSwitchTo(oldcxt);
954 
955  table_close(relation, AccessShareLock);
956 
957  CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
958  cache->cc_relname, cache->cc_nkeys);
959 
960  /*
961  * initialize cache's key information
962  */
963  for (i = 0; i < cache->cc_nkeys; ++i)
964  {
965  Oid keytype;
966  RegProcedure eqfunc;
967 
969 
970  if (cache->cc_keyno[i] > 0)
971  {
972  Form_pg_attribute attr = TupleDescAttr(tupdesc,
973  cache->cc_keyno[i] - 1);
974 
975  keytype = attr->atttypid;
976  /* cache key columns should always be NOT NULL */
977  Assert(attr->attnotnull);
978  }
979  else
980  {
981  if (cache->cc_keyno[i] < 0)
982  elog(FATAL, "sys attributes are not supported in caches");
983  keytype = OIDOID;
984  }
985 
986  GetCCHashEqFuncs(keytype,
987  &cache->cc_hashfunc[i],
988  &eqfunc,
989  &cache->cc_fastequal[i]);
990 
991  /*
992  * Do equality-function lookup (we assume this won't need a catalog
993  * lookup for any supported type)
994  */
995  fmgr_info_cxt(eqfunc,
996  &cache->cc_skey[i].sk_func,
998 
999  /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1000  cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1001 
1002  /* Fill in sk_strategy as well --- always standard equality */
1004  cache->cc_skey[i].sk_subtype = InvalidOid;
1005  /* If a catcache key requires a collation, it must be C collation */
1006  cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1007 
1008  CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1009  cache->cc_relname, i, cache);
1010  }
1011 
1012  /*
1013  * mark this cache fully initialized
1014  */
1015  cache->cc_tupdesc = tupdesc;
1016 }
1017 
1018 /*
1019  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1020  *
1021  * One reason to call this routine is to ensure that the relcache has
1022  * created entries for all the catalogs and indexes referenced by catcaches.
1023  * Therefore, provide an option to open the index as well as fixing the
1024  * cache itself. An exception is the indexes on pg_am, which we don't use
1025  * (cf. IndexScanOK).
1026  */
1027 void
1028 InitCatCachePhase2(CatCache *cache, bool touch_index)
1029 {
1030  if (cache->cc_tupdesc == NULL)
1032 
1033  if (touch_index &&
1034  cache->id != AMOID &&
1035  cache->id != AMNAME)
1036  {
1037  Relation idesc;
1038 
1039  /*
1040  * We must lock the underlying catalog before opening the index to
1041  * avoid deadlock, since index_open could possibly result in reading
1042  * this same catalog, and if anyone else is exclusive-locking this
1043  * catalog and index they'll be doing it in that order.
1044  */
1046  idesc = index_open(cache->cc_indexoid, AccessShareLock);
1047 
1048  /*
1049  * While we've got the index open, let's check that it's unique (and
1050  * not just deferrable-unique, thank you very much). This is just to
1051  * catch thinkos in definitions of new catcaches, so we don't worry
1052  * about the pg_am indexes not getting tested.
1053  */
1054  Assert(idesc->rd_index->indisunique &&
1055  idesc->rd_index->indimmediate);
1056 
1057  index_close(idesc, AccessShareLock);
1059  }
1060 }
1061 
1062 
1063 /*
1064  * IndexScanOK
1065  *
1066  * This function checks for tuples that will be fetched by
1067  * IndexSupportInitialize() during relcache initialization for
1068  * certain system indexes that support critical syscaches.
1069  * We can't use an indexscan to fetch these, else we'll get into
1070  * infinite recursion. A plain heap scan will work, however.
1071  * Once we have completed relcache initialization (signaled by
1072  * criticalRelcachesBuilt), we don't have to worry anymore.
1073  *
1074  * Similarly, during backend startup we have to be able to use the
1075  * pg_authid, pg_auth_members and pg_database syscaches for
1076  * authentication even if we don't yet have relcache entries for those
1077  * catalogs' indexes.
1078  */
1079 static bool
1080 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1081 {
1082  switch (cache->id)
1083  {
1084  case INDEXRELID:
1085 
1086  /*
1087  * Rather than tracking exactly which indexes have to be loaded
1088  * before we can use indexscans (which changes from time to time),
1089  * just force all pg_index searches to be heap scans until we've
1090  * built the critical relcaches.
1091  */
1093  return false;
1094  break;
1095 
1096  case AMOID:
1097  case AMNAME:
1098 
1099  /*
1100  * Always do heap scans in pg_am, because it's so small there's
1101  * not much point in an indexscan anyway. We *must* do this when
1102  * initially building critical relcache entries, but we might as
1103  * well just always do it.
1104  */
1105  return false;
1106 
1107  case AUTHNAME:
1108  case AUTHOID:
1109  case AUTHMEMMEMROLE:
1110  case DATABASEOID:
1111 
1112  /*
1113  * Protect authentication lookups occurring before relcache has
1114  * collected entries for shared indexes.
1115  */
1117  return false;
1118  break;
1119 
1120  default:
1121  break;
1122  }
1123 
1124  /* Normal case, allow index scan */
1125  return true;
1126 }
1127 
1128 /*
1129  * SearchCatCache
1130  *
1131  * This call searches a system cache for a tuple, opening the relation
1132  * if necessary (on the first access to a particular cache).
1133  *
1134  * The result is NULL if not found, or a pointer to a HeapTuple in
1135  * the cache. The caller must not modify the tuple, and must call
1136  * ReleaseCatCache() when done with it.
1137  *
1138  * The search key values should be expressed as Datums of the key columns'
1139  * datatype(s). (Pass zeroes for any unused parameters.) As a special
1140  * exception, the passed-in key for a NAME column can be just a C string;
1141  * the caller need not go to the trouble of converting it to a fully
1142  * null-padded NAME.
1143  */
1144 HeapTuple
1146  Datum v1,
1147  Datum v2,
1148  Datum v3,
1149  Datum v4)
1150 {
1151  return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1152 }
1153 
1154 
1155 /*
1156  * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1157  * arguments. The compiler can inline the body and unroll loops, making them a
1158  * bit faster than SearchCatCache().
1159  */
1160 
1161 HeapTuple
1163  Datum v1)
1164 {
1165  return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1166 }
1167 
1168 
1169 HeapTuple
1171  Datum v1, Datum v2)
1172 {
1173  return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1174 }
1175 
1176 
1177 HeapTuple
1179  Datum v1, Datum v2, Datum v3)
1180 {
1181  return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1182 }
1183 
1184 
1185 HeapTuple
1187  Datum v1, Datum v2, Datum v3, Datum v4)
1188 {
1189  return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1190 }
1191 
1192 /*
1193  * Work-horse for SearchCatCache/SearchCatCacheN.
1194  */
1195 static inline HeapTuple
1197  int nkeys,
1198  Datum v1,
1199  Datum v2,
1200  Datum v3,
1201  Datum v4)
1202 {
1204  uint32 hashValue;
1205  Index hashIndex;
1206  dlist_iter iter;
1207  dlist_head *bucket;
1208  CatCTup *ct;
1209 
1210  /* Make sure we're in an xact, even if this ends up being a cache hit */
1212 
1213  Assert(cache->cc_nkeys == nkeys);
1214 
1215  /*
1216  * one-time startup overhead for each cache
1217  */
1218  if (unlikely(cache->cc_tupdesc == NULL))
1220 
1221 #ifdef CATCACHE_STATS
1222  cache->cc_searches++;
1223 #endif
1224 
1225  /* Initialize local parameter array */
1226  arguments[0] = v1;
1227  arguments[1] = v2;
1228  arguments[2] = v3;
1229  arguments[3] = v4;
1230 
1231  /*
1232  * find the hash bucket in which to look for the tuple
1233  */
1234  hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1235  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1236 
1237  /*
1238  * scan the hash bucket until we find a match or exhaust our tuples
1239  *
1240  * Note: it's okay to use dlist_foreach here, even though we modify the
1241  * dlist within the loop, because we don't continue the loop afterwards.
1242  */
1243  bucket = &cache->cc_bucket[hashIndex];
1244  dlist_foreach(iter, bucket)
1245  {
1246  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1247 
1248  if (ct->dead)
1249  continue; /* ignore dead entries */
1250 
1251  if (ct->hash_value != hashValue)
1252  continue; /* quickly skip entry if wrong hash val */
1253 
1254  if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1255  continue;
1256 
1257  /*
1258  * We found a match in the cache. Move it to the front of the list
1259  * for its hashbucket, in order to speed subsequent searches. (The
1260  * most frequently accessed elements in any hashbucket will tend to be
1261  * near the front of the hashbucket's list.)
1262  */
1263  dlist_move_head(bucket, &ct->cache_elem);
1264 
1265  /*
1266  * If it's a positive entry, bump its refcount and return it. If it's
1267  * negative, we can report failure to the caller.
1268  */
1269  if (!ct->negative)
1270  {
1272  ct->refcount++;
1274 
1275  CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1276  cache->cc_relname, hashIndex);
1277 
1278 #ifdef CATCACHE_STATS
1279  cache->cc_hits++;
1280 #endif
1281 
1282  return &ct->tuple;
1283  }
1284  else
1285  {
1286  CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1287  cache->cc_relname, hashIndex);
1288 
1289 #ifdef CATCACHE_STATS
1290  cache->cc_neg_hits++;
1291 #endif
1292 
1293  return NULL;
1294  }
1295  }
1296 
1297  return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1298 }
1299 
1300 /*
1301  * Search the actual catalogs, rather than the cache.
1302  *
1303  * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1304  * as small as possible. To avoid that effort being undone by a helpful
1305  * compiler, try to explicitly forbid inlining.
1306  */
1307 static pg_noinline HeapTuple
1309  int nkeys,
1310  uint32 hashValue,
1311  Index hashIndex,
1312  Datum v1,
1313  Datum v2,
1314  Datum v3,
1315  Datum v4)
1316 {
1317  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1318  Relation relation;
1319  SysScanDesc scandesc;
1320  HeapTuple ntp;
1321  CatCTup *ct;
1323 
1324  /* Initialize local parameter array */
1325  arguments[0] = v1;
1326  arguments[1] = v2;
1327  arguments[2] = v3;
1328  arguments[3] = v4;
1329 
1330  /*
1331  * Ok, need to make a lookup in the relation, copy the scankey and fill
1332  * out any per-call fields.
1333  */
1334  memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1335  cur_skey[0].sk_argument = v1;
1336  cur_skey[1].sk_argument = v2;
1337  cur_skey[2].sk_argument = v3;
1338  cur_skey[3].sk_argument = v4;
1339 
1340  /*
1341  * Tuple was not found in cache, so we have to try to retrieve it directly
1342  * from the relation. If found, we will add it to the cache; if not
1343  * found, we will add a negative cache entry instead.
1344  *
1345  * NOTE: it is possible for recursive cache lookups to occur while reading
1346  * the relation --- for example, due to shared-cache-inval messages being
1347  * processed during table_open(). This is OK. It's even possible for one
1348  * of those lookups to find and enter the very same tuple we are trying to
1349  * fetch here. If that happens, we will enter a second copy of the tuple
1350  * into the cache. The first copy will never be referenced again, and
1351  * will eventually age out of the cache, so there's no functional problem.
1352  * This case is rare enough that it's not worth expending extra cycles to
1353  * detect.
1354  */
1355  relation = table_open(cache->cc_reloid, AccessShareLock);
1356 
1357  scandesc = systable_beginscan(relation,
1358  cache->cc_indexoid,
1359  IndexScanOK(cache, cur_skey),
1360  NULL,
1361  nkeys,
1362  cur_skey);
1363 
1364  ct = NULL;
1365 
1366  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1367  {
1368  ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1369  hashValue, hashIndex,
1370  false);
1371  /* immediately set the refcount to 1 */
1373  ct->refcount++;
1375  break; /* assume only one match */
1376  }
1377 
1378  systable_endscan(scandesc);
1379 
1380  table_close(relation, AccessShareLock);
1381 
1382  /*
1383  * If tuple was not found, we need to build a negative cache entry
1384  * containing a fake tuple. The fake tuple has the correct key columns,
1385  * but nulls everywhere else.
1386  *
1387  * In bootstrap mode, we don't build negative entries, because the cache
1388  * invalidation mechanism isn't alive and can't clear them if the tuple
1389  * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1390  * cache inval for that.)
1391  */
1392  if (ct == NULL)
1393  {
1395  return NULL;
1396 
1397  ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1398  hashValue, hashIndex,
1399  true);
1400 
1401  CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1402  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1403  CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1404  cache->cc_relname, hashIndex);
1405 
1406  /*
1407  * We are not returning the negative entry to the caller, so leave its
1408  * refcount zero.
1409  */
1410 
1411  return NULL;
1412  }
1413 
1414  CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1415  cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1416  CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1417  cache->cc_relname, hashIndex);
1418 
1419 #ifdef CATCACHE_STATS
1420  cache->cc_newloads++;
1421 #endif
1422 
1423  return &ct->tuple;
1424 }
1425 
1426 /*
1427  * ReleaseCatCache
1428  *
1429  * Decrement the reference count of a catcache entry (releasing the
1430  * hold grabbed by a successful SearchCatCache).
1431  *
1432  * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1433  * will be freed as soon as their refcount goes to zero. In combination
1434  * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1435  * to catch references to already-released catcache entries.
1436  */
1437 void
1439 {
1440  CatCTup *ct = (CatCTup *) (((char *) tuple) -
1441  offsetof(CatCTup, tuple));
1442 
1443  /* Safety checks to ensure we were handed a cache entry */
1444  Assert(ct->ct_magic == CT_MAGIC);
1445  Assert(ct->refcount > 0);
1446 
1447  ct->refcount--;
1449 
1450  if (
1451 #ifndef CATCACHE_FORCE_RELEASE
1452  ct->dead &&
1453 #endif
1454  ct->refcount == 0 &&
1455  (ct->c_list == NULL || ct->c_list->refcount == 0))
1456  CatCacheRemoveCTup(ct->my_cache, ct);
1457 }
1458 
1459 
1460 /*
1461  * GetCatCacheHashValue
1462  *
1463  * Compute the hash value for a given set of search keys.
1464  *
1465  * The reason for exposing this as part of the API is that the hash value is
1466  * exposed in cache invalidation operations, so there are places outside the
1467  * catcache code that need to be able to compute the hash values.
1468  */
1469 uint32
1471  Datum v1,
1472  Datum v2,
1473  Datum v3,
1474  Datum v4)
1475 {
1476  /*
1477  * one-time startup overhead for each cache
1478  */
1479  if (cache->cc_tupdesc == NULL)
1481 
1482  /*
1483  * calculate the hash value
1484  */
1485  return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1486 }
1487 
1488 
1489 /*
1490  * SearchCatCacheList
1491  *
1492  * Generate a list of all tuples matching a partial key (that is,
1493  * a key specifying just the first K of the cache's N key columns).
1494  *
1495  * It doesn't make any sense to specify all of the cache's key columns
1496  * here: since the key is unique, there could be at most one match, so
1497  * you ought to use SearchCatCache() instead. Hence this function takes
1498  * one fewer Datum argument than SearchCatCache() does.
1499  *
1500  * The caller must not modify the list object or the pointed-to tuples,
1501  * and must call ReleaseCatCacheList() when done with the list.
1502  */
1503 CatCList *
1505  int nkeys,
1506  Datum v1,
1507  Datum v2,
1508  Datum v3)
1509 {
1510  Datum v4 = 0; /* dummy last-column value */
1512  uint32 lHashValue;
1513  dlist_iter iter;
1514  CatCList *cl;
1515  CatCTup *ct;
1516  List *volatile ctlist;
1517  ListCell *ctlist_item;
1518  int nmembers;
1519  bool ordered;
1520  HeapTuple ntp;
1521  MemoryContext oldcxt;
1522  int i;
1523 
1524  /*
1525  * one-time startup overhead for each cache
1526  */
1527  if (cache->cc_tupdesc == NULL)
1529 
1530  Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1531 
1532 #ifdef CATCACHE_STATS
1533  cache->cc_lsearches++;
1534 #endif
1535 
1536  /* Initialize local parameter array */
1537  arguments[0] = v1;
1538  arguments[1] = v2;
1539  arguments[2] = v3;
1540  arguments[3] = v4;
1541 
1542  /*
1543  * compute a hash value of the given keys for faster search. We don't
1544  * presently divide the CatCList items into buckets, but this still lets
1545  * us skip non-matching items quickly most of the time.
1546  */
1547  lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1548 
1549  /*
1550  * scan the items until we find a match or exhaust our list
1551  *
1552  * Note: it's okay to use dlist_foreach here, even though we modify the
1553  * dlist within the loop, because we don't continue the loop afterwards.
1554  */
1555  dlist_foreach(iter, &cache->cc_lists)
1556  {
1557  cl = dlist_container(CatCList, cache_elem, iter.cur);
1558 
1559  if (cl->dead)
1560  continue; /* ignore dead entries */
1561 
1562  if (cl->hash_value != lHashValue)
1563  continue; /* quickly skip entry if wrong hash val */
1564 
1565  /*
1566  * see if the cached list matches our key.
1567  */
1568  if (cl->nkeys != nkeys)
1569  continue;
1570 
1571  if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1572  continue;
1573 
1574  /*
1575  * We found a matching list. Move the list to the front of the
1576  * cache's list-of-lists, to speed subsequent searches. (We do not
1577  * move the members to the fronts of their hashbucket lists, however,
1578  * since there's no point in that unless they are searched for
1579  * individually.)
1580  */
1581  dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1582 
1583  /* Bump the list's refcount and return it */
1585  cl->refcount++;
1587 
1588  CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1589  cache->cc_relname);
1590 
1591 #ifdef CATCACHE_STATS
1592  cache->cc_lhits++;
1593 #endif
1594 
1595  return cl;
1596  }
1597 
1598  /*
1599  * List was not found in cache, so we have to build it by reading the
1600  * relation. For each matching tuple found in the relation, use an
1601  * existing cache entry if possible, else build a new one.
1602  *
1603  * We have to bump the member refcounts temporarily to ensure they won't
1604  * get dropped from the cache while loading other members. We use a PG_TRY
1605  * block to ensure we can undo those refcounts if we get an error before
1606  * we finish constructing the CatCList.
1607  */
1609 
1610  ctlist = NIL;
1611 
1612  PG_TRY();
1613  {
1614  ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1615  Relation relation;
1616  SysScanDesc scandesc;
1617 
1618  /*
1619  * Ok, need to make a lookup in the relation, copy the scankey and
1620  * fill out any per-call fields.
1621  */
1622  memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1623  cur_skey[0].sk_argument = v1;
1624  cur_skey[1].sk_argument = v2;
1625  cur_skey[2].sk_argument = v3;
1626  cur_skey[3].sk_argument = v4;
1627 
1628  relation = table_open(cache->cc_reloid, AccessShareLock);
1629 
1630  scandesc = systable_beginscan(relation,
1631  cache->cc_indexoid,
1632  IndexScanOK(cache, cur_skey),
1633  NULL,
1634  nkeys,
1635  cur_skey);
1636 
1637  /* The list will be ordered iff we are doing an index scan */
1638  ordered = (scandesc->irel != NULL);
1639 
1640  while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1641  {
1642  uint32 hashValue;
1643  Index hashIndex;
1644  bool found = false;
1645  dlist_head *bucket;
1646 
1647  /*
1648  * See if there's an entry for this tuple already.
1649  */
1650  ct = NULL;
1651  hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1652  hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1653 
1654  bucket = &cache->cc_bucket[hashIndex];
1655  dlist_foreach(iter, bucket)
1656  {
1657  ct = dlist_container(CatCTup, cache_elem, iter.cur);
1658 
1659  if (ct->dead || ct->negative)
1660  continue; /* ignore dead and negative entries */
1661 
1662  if (ct->hash_value != hashValue)
1663  continue; /* quickly skip entry if wrong hash val */
1664 
1665  if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1666  continue; /* not same tuple */
1667 
1668  /*
1669  * Found a match, but can't use it if it belongs to another
1670  * list already
1671  */
1672  if (ct->c_list)
1673  continue;
1674 
1675  found = true;
1676  break; /* A-OK */
1677  }
1678 
1679  if (!found)
1680  {
1681  /* We didn't find a usable entry, so make a new one */
1682  ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1683  hashValue, hashIndex,
1684  false);
1685  }
1686 
1687  /* Careful here: add entry to ctlist, then bump its refcount */
1688  /* This way leaves state correct if lappend runs out of memory */
1689  ctlist = lappend(ctlist, ct);
1690  ct->refcount++;
1691  }
1692 
1693  systable_endscan(scandesc);
1694 
1695  table_close(relation, AccessShareLock);
1696 
1697  /* Now we can build the CatCList entry. */
1699  nmembers = list_length(ctlist);
1700  cl = (CatCList *)
1701  palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1702 
1703  /* Extract key values */
1704  CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1705  arguments, cl->keys);
1706  MemoryContextSwitchTo(oldcxt);
1707 
1708  /*
1709  * We are now past the last thing that could trigger an elog before we
1710  * have finished building the CatCList and remembering it in the
1711  * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1712  * we'd better do so before we start marking the members as belonging
1713  * to the list.
1714  */
1715  }
1716  PG_CATCH();
1717  {
1718  foreach(ctlist_item, ctlist)
1719  {
1720  ct = (CatCTup *) lfirst(ctlist_item);
1721  Assert(ct->c_list == NULL);
1722  Assert(ct->refcount > 0);
1723  ct->refcount--;
1724  if (
1725 #ifndef CATCACHE_FORCE_RELEASE
1726  ct->dead &&
1727 #endif
1728  ct->refcount == 0 &&
1729  (ct->c_list == NULL || ct->c_list->refcount == 0))
1730  CatCacheRemoveCTup(cache, ct);
1731  }
1732 
1733  PG_RE_THROW();
1734  }
1735  PG_END_TRY();
1736 
1737  cl->cl_magic = CL_MAGIC;
1738  cl->my_cache = cache;
1739  cl->refcount = 0; /* for the moment */
1740  cl->dead = false;
1741  cl->ordered = ordered;
1742  cl->nkeys = nkeys;
1743  cl->hash_value = lHashValue;
1744  cl->n_members = nmembers;
1745 
1746  i = 0;
1747  foreach(ctlist_item, ctlist)
1748  {
1749  cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1750  Assert(ct->c_list == NULL);
1751  ct->c_list = cl;
1752  /* release the temporary refcount on the member */
1753  Assert(ct->refcount > 0);
1754  ct->refcount--;
1755  /* mark list dead if any members already dead */
1756  if (ct->dead)
1757  cl->dead = true;
1758  }
1759  Assert(i == nmembers);
1760 
1761  dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1762 
1763  /* Finally, bump the list's refcount and return it */
1764  cl->refcount++;
1766 
1767  CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1768  cache->cc_relname, nmembers);
1769 
1770  return cl;
1771 }
1772 
1773 /*
1774  * ReleaseCatCacheList
1775  *
1776  * Decrement the reference count of a catcache list.
1777  */
1778 void
1780 {
1781  /* Safety checks to ensure we were handed a cache entry */
1782  Assert(list->cl_magic == CL_MAGIC);
1783  Assert(list->refcount > 0);
1784  list->refcount--;
1786 
1787  if (
1788 #ifndef CATCACHE_FORCE_RELEASE
1789  list->dead &&
1790 #endif
1791  list->refcount == 0)
1792  CatCacheRemoveCList(list->my_cache, list);
1793 }
1794 
1795 
1796 /*
1797  * CatalogCacheCreateEntry
1798  * Create a new CatCTup entry, copying the given HeapTuple and other
1799  * supplied data into it. The new entry initially has refcount 0.
1800  */
1801 static CatCTup *
1803  uint32 hashValue, Index hashIndex,
1804  bool negative)
1805 {
1806  CatCTup *ct;
1807  HeapTuple dtp;
1808  MemoryContext oldcxt;
1809 
1810  /* negative entries have no tuple associated */
1811  if (ntp)
1812  {
1813  int i;
1814 
1815  Assert(!negative);
1816 
1817  /*
1818  * If there are any out-of-line toasted fields in the tuple, expand
1819  * them in-line. This saves cycles during later use of the catcache
1820  * entry, and also protects us against the possibility of the toast
1821  * tuples being freed before we attempt to fetch them, in case of
1822  * something using a slightly stale catcache entry.
1823  */
1824  if (HeapTupleHasExternal(ntp))
1825  dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1826  else
1827  dtp = ntp;
1828 
1829  /* Allocate memory for CatCTup and the cached tuple in one go */
1831 
1832  ct = (CatCTup *) palloc(sizeof(CatCTup) +
1833  MAXIMUM_ALIGNOF + dtp->t_len);
1834  ct->tuple.t_len = dtp->t_len;
1835  ct->tuple.t_self = dtp->t_self;
1836  ct->tuple.t_tableOid = dtp->t_tableOid;
1837  ct->tuple.t_data = (HeapTupleHeader)
1838  MAXALIGN(((char *) ct) + sizeof(CatCTup));
1839  /* copy tuple contents */
1840  memcpy((char *) ct->tuple.t_data,
1841  (const char *) dtp->t_data,
1842  dtp->t_len);
1843  MemoryContextSwitchTo(oldcxt);
1844 
1845  if (dtp != ntp)
1846  heap_freetuple(dtp);
1847 
1848  /* extract keys - they'll point into the tuple if not by-value */
1849  for (i = 0; i < cache->cc_nkeys; i++)
1850  {
1851  Datum atp;
1852  bool isnull;
1853 
1854  atp = heap_getattr(&ct->tuple,
1855  cache->cc_keyno[i],
1856  cache->cc_tupdesc,
1857  &isnull);
1858  Assert(!isnull);
1859  ct->keys[i] = atp;
1860  }
1861  }
1862  else
1863  {
1864  Assert(negative);
1866  ct = (CatCTup *) palloc(sizeof(CatCTup));
1867 
1868  /*
1869  * Store keys - they'll point into separately allocated memory if not
1870  * by-value.
1871  */
1872  CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
1873  arguments, ct->keys);
1874  MemoryContextSwitchTo(oldcxt);
1875  }
1876 
1877  /*
1878  * Finish initializing the CatCTup header, and add it to the cache's
1879  * linked list and counts.
1880  */
1881  ct->ct_magic = CT_MAGIC;
1882  ct->my_cache = cache;
1883  ct->c_list = NULL;
1884  ct->refcount = 0; /* for the moment */
1885  ct->dead = false;
1886  ct->negative = negative;
1887  ct->hash_value = hashValue;
1888 
1889  dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1890 
1891  cache->cc_ntup++;
1892  CacheHdr->ch_ntup++;
1893 
1894  /*
1895  * If the hash table has become too full, enlarge the buckets array. Quite
1896  * arbitrarily, we enlarge when fill factor > 2.
1897  */
1898  if (cache->cc_ntup > cache->cc_nbuckets * 2)
1899  RehashCatCache(cache);
1900 
1901  return ct;
1902 }
1903 
1904 /*
1905  * Helper routine that frees keys stored in the keys array.
1906  */
1907 static void
1908 CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
1909 {
1910  int i;
1911 
1912  for (i = 0; i < nkeys; i++)
1913  {
1914  int attnum = attnos[i];
1915  Form_pg_attribute att;
1916 
1917  /* system attribute are not supported in caches */
1918  Assert(attnum > 0);
1919 
1920  att = TupleDescAttr(tupdesc, attnum - 1);
1921 
1922  if (!att->attbyval)
1923  pfree(DatumGetPointer(keys[i]));
1924  }
1925 }
1926 
1927 /*
1928  * Helper routine that copies the keys in the srckeys array into the dstkeys
1929  * one, guaranteeing that the datums are fully allocated in the current memory
1930  * context.
1931  */
1932 static void
1933 CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
1934  Datum *srckeys, Datum *dstkeys)
1935 {
1936  int i;
1937 
1938  /*
1939  * XXX: memory and lookup performance could possibly be improved by
1940  * storing all keys in one allocation.
1941  */
1942 
1943  for (i = 0; i < nkeys; i++)
1944  {
1945  int attnum = attnos[i];
1946  Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
1947  Datum src = srckeys[i];
1948  NameData srcname;
1949 
1950  /*
1951  * Must be careful in case the caller passed a C string where a NAME
1952  * is wanted: convert the given argument to a correctly padded NAME.
1953  * Otherwise the memcpy() done by datumCopy() could fall off the end
1954  * of memory.
1955  */
1956  if (att->atttypid == NAMEOID)
1957  {
1958  namestrcpy(&srcname, DatumGetCString(src));
1959  src = NameGetDatum(&srcname);
1960  }
1961 
1962  dstkeys[i] = datumCopy(src,
1963  att->attbyval,
1964  att->attlen);
1965  }
1966 }
1967 
1968 /*
1969  * PrepareToInvalidateCacheTuple()
1970  *
1971  * This is part of a rather subtle chain of events, so pay attention:
1972  *
1973  * When a tuple is inserted or deleted, it cannot be flushed from the
1974  * catcaches immediately, for reasons explained at the top of cache/inval.c.
1975  * Instead we have to add entry(s) for the tuple to a list of pending tuple
1976  * invalidations that will be done at the end of the command or transaction.
1977  *
1978  * The lists of tuples that need to be flushed are kept by inval.c. This
1979  * routine is a helper routine for inval.c. Given a tuple belonging to
1980  * the specified relation, find all catcaches it could be in, compute the
1981  * correct hash value for each such catcache, and call the specified
1982  * function to record the cache id and hash value in inval.c's lists.
1983  * SysCacheInvalidate will be called later, if appropriate,
1984  * using the recorded information.
1985  *
1986  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1987  * For an update, we are called just once, with tuple being the old tuple
1988  * version and newtuple the new version. We should make two list entries
1989  * if the tuple's hash value changed, but only one if it didn't.
1990  *
1991  * Note that it is irrelevant whether the given tuple is actually loaded
1992  * into the catcache at the moment. Even if it's not there now, it might
1993  * be by the end of the command, or there might be a matching negative entry
1994  * to flush --- or other backends' caches might have such entries --- so
1995  * we have to make list entries to flush it later.
1996  *
1997  * Also note that it's not an error if there are no catcaches for the
1998  * specified relation. inval.c doesn't know exactly which rels have
1999  * catcaches --- it will call this routine for any tuple that's in a
2000  * system relation.
2001  */
2002 void
2004  HeapTuple tuple,
2005  HeapTuple newtuple,
2006  void (*function) (int, uint32, Oid))
2007 {
2008  slist_iter iter;
2009  Oid reloid;
2010 
2011  CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2012 
2013  /*
2014  * sanity checks
2015  */
2016  Assert(RelationIsValid(relation));
2017  Assert(HeapTupleIsValid(tuple));
2018  Assert(PointerIsValid(function));
2019  Assert(CacheHdr != NULL);
2020 
2021  reloid = RelationGetRelid(relation);
2022 
2023  /* ----------------
2024  * for each cache
2025  * if the cache contains tuples from the specified relation
2026  * compute the tuple's hash value(s) in this cache,
2027  * and call the passed function to register the information.
2028  * ----------------
2029  */
2030 
2032  {
2033  CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2034  uint32 hashvalue;
2035  Oid dbid;
2036 
2037  if (ccp->cc_reloid != reloid)
2038  continue;
2039 
2040  /* Just in case cache hasn't finished initialization yet... */
2041  if (ccp->cc_tupdesc == NULL)
2043 
2044  hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2045  dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2046 
2047  (*function) (ccp->id, hashvalue, dbid);
2048 
2049  if (newtuple)
2050  {
2051  uint32 newhashvalue;
2052 
2053  newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2054 
2055  if (newhashvalue != hashvalue)
2056  (*function) (ccp->id, newhashvalue, dbid);
2057  }
2058  }
2059 }
2060 
2061 
2062 /*
2063  * Subroutines for warning about reference leaks. These are exported so
2064  * that resowner.c can call them.
2065  */
2066 void
2068 {
2069  CatCTup *ct = (CatCTup *) (((char *) tuple) -
2070  offsetof(CatCTup, tuple));
2071 
2072  /* Safety check to ensure we were handed a cache entry */
2073  Assert(ct->ct_magic == CT_MAGIC);
2074 
2075  elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2076  ct->my_cache->cc_relname, ct->my_cache->id,
2077  ItemPointerGetBlockNumber(&(tuple->t_self)),
2079  ct->refcount);
2080 }
2081 
2082 void
2084 {
2085  elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
2086  list->my_cache->cc_relname, list->my_cache->id,
2087  list, list->refcount);
2088 }
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
#define NameStr(name)
Definition: c.h:735
unsigned int uint32
Definition: c.h:495
#define pg_noinline
Definition: c.h:239
#define MAXALIGN(LEN)
Definition: c.h:800
signed int int32
Definition: c.h:483
#define PointerIsValid(pointer)
Definition: c.h:752
regproc RegProcedure
Definition: c.h:639
#define unlikely(x)
Definition: c.h:300
unsigned int Index
Definition: c.h:603
static bool chareqfast(Datum a, Datum b)
Definition: catcache.c:118
HeapTuple SearchCatCache2(CatCache *cache, Datum v1, Datum v2)
Definition: catcache.c:1170
static bool int4eqfast(Datum a, Datum b)
Definition: catcache.c:159
HeapTuple SearchCatCache3(CatCache *cache, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1178
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:1779
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:919
void PrintCatCacheLeakWarning(HeapTuple tuple)
Definition: catcache.c:2067
static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache, int nkeys, uint32 hashValue, Index hashIndex, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1308
static bool int2eqfast(Datum a, Datum b)
Definition: catcache.c:147
static uint32 int4hashfast(Datum datum)
Definition: catcache.c:165
void PrintCatCacheListLeakWarning(CatCList *list)
Definition: catcache.c:2083
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:1028
void ResetCatalogCaches(void)
Definition: catcache.c:683
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1470
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:451
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:856
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
Definition: catcache.c:313
HeapTuple SearchCatCache4(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1186
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments, uint32 hashValue, Index hashIndex, bool negative)
Definition: catcache.c:1802
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:914
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *srckeys, Datum *dstkeys)
Definition: catcache.c:1933
static HeapTuple SearchCatCacheInternal(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1196
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:757
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1504
static CatCacheHeader * CacheHdr
Definition: catcache.c:64
static uint32 namehashfast(Datum datum)
Definition: catcache.c:139
void CreateCacheMemoryContext(void)
Definition: catcache.c:614
static void ResetCatalogCache(CatCache *cache)
Definition: catcache.c:636
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid))
Definition: catcache.c:2003
static void GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
Definition: catcache.c:201
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:271
static bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys, const Datum *cachekeys, const Datum *searchkeys)
Definition: catcache.c:368
void CatCacheInvalidate(CatCache *cache, uint32 hashValue)
Definition: catcache.c:546
static bool nameeqfast(Datum a, Datum b)
Definition: catcache.c:130
static uint32 charhashfast(Datum datum)
Definition: catcache.c:124
HeapTuple SearchCatCache1(CatCache *cache, Datum v1)
Definition: catcache.c:1162
#define InitCatCache_DEBUG2
Definition: catcache.c:753
static uint32 oidvectorhashfast(Datum datum)
Definition: catcache.c:194
static bool texteqfast(Datum a, Datum b)
Definition: catcache.c:171
#define CACHE_elog(...)
Definition: catcache.c:60
static bool oidvectoreqfast(Datum a, Datum b)
Definition: catcache.c:188
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:713
static uint32 int2hashfast(Datum datum)
Definition: catcache.c:153
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:915
static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
Definition: catcache.c:1908
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:493
#define HASH_INDEX(h, sz)
Definition: catcache.c:50
static bool IndexScanOK(CatCache *cache, ScanKey cur_skey)
Definition: catcache.c:1080
static uint32 texthashfast(Datum datum)
Definition: catcache.c:181
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1438
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1145
#define CT_MAGIC
Definition: catcache.h:89
uint32(* CCHashFN)(Datum datum)
Definition: catcache.h:39
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
bool(* CCFastEqualFN)(Datum a, Datum b)
Definition: catcache.h:42
#define CL_MAGIC
Definition: catcache.h:160
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
#define PG_RE_THROW()
Definition: elog.h:411
#define FATAL
Definition: elog.h:41
#define PG_TRY(...)
Definition: elog.h:370
#define WARNING
Definition: elog.h:36
#define DEBUG2
Definition: elog.h:29
#define PG_END_TRY(...)
Definition: elog.h:395
#define DEBUG1
Definition: elog.h:30
#define PG_CATCH(...)
Definition: elog.h:380
#define MCXT_ALLOC_ZERO
Definition: fe_memutils.h:18
Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:795
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1)
Definition: fmgr.c:775
#define DirectFunctionCall2(func, arg1, arg2)
Definition: fmgr.h:644
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:642
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:599
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:506
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:387
Oid MyDatabaseId
Definition: globals.c:89
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
static Datum hash_any(const unsigned char *k, int keylen)
Definition: hashfn.h:31
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:234
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:269
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1426
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:792
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:671
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:749
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void slist_init(slist_head *head)
Definition: ilist.h:986
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:1006
#define slist_container(type, membername, ptr)
Definition: ilist.h:1106
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:467
#define slist_foreach(iter, lhead)
Definition: ilist.h:1132
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:158
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:132
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1580
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:305
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int i
Definition: isn.c:73
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:35
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
Assert(fmt[strlen(fmt) - 1] !='\n')
List * lappend(List *list, void *datum)
Definition: list.c:338
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:228
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:109
#define AccessShareLock
Definition: lockdefs.h:36
char * pstrdup(const char *in)
Definition: mcxt.c:1644
void pfree(void *pointer)
Definition: mcxt.c:1456
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * palloc0(Size size)
Definition: mcxt.c:1257
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1064
void * palloc_aligned(Size size, Size alignto, int flags)
Definition: mcxt.c:1446
MemoryContext CacheMemoryContext
Definition: mcxt.c:144
void * palloc(Size size)
Definition: mcxt.c:1226
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:153
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:414
void namestrcpy(Name name, const char *str)
Definition: name.c:233
Datum oidvectoreq(PG_FUNCTION_ARGS)
Definition: oid.c:347
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
int16 attnum
Definition: pg_attribute.h:74
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
void * arg
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:326
#define NAMEDATALEN
#define PG_CACHE_LINE_SIZE
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
static bool DatumGetBool(Datum X)
Definition: postgres.h:90
static Name DatumGetName(Datum X)
Definition: postgres.h:360
static char * DatumGetCString(Datum X)
Definition: postgres.h:335
uintptr_t Datum
Definition: postgres.h:64
static Datum NameGetDatum(const NameData *X)
Definition: postgres.h:373
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
static char DatumGetChar(Datum X)
Definition: postgres.h:112
static int16 DatumGetInt16(Datum X)
Definition: postgres.h:162
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:202
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
#define RelationGetForm(relation)
Definition: rel.h:498
#define RelationGetRelid(relation)
Definition: rel.h:504
#define RelationGetDescr(relation)
Definition: rel.h:530
#define RelationGetRelationName(relation)
Definition: rel.h:538
#define RelationIsValid(relation)
Definition: rel.h:477
bool criticalRelcachesBuilt
Definition: relcache.c:140
bool criticalSharedRelcachesBuilt
Definition: relcache.c:146
void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:1110
ResourceOwner CurrentResourceOwner
Definition: resowner.c:147
void ResourceOwnerEnlargeCatCacheRefs(ResourceOwner owner)
Definition: resowner.c:1099
void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:1145
void ResourceOwnerEnlargeCatCacheListRefs(ResourceOwner owner)
Definition: resowner.c:1134
void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: resowner.c:1119
void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: resowner.c:1154
#define BTEqualStrategyNumber
Definition: stratnum.h:31
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
Definition: pg_list.h:54
Form_pg_index rd_index
Definition: rel.h:191
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_subtype
Definition: skey.h:69
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
Relation irel
Definition: relscan.h:184
const char * cc_relname
Definition: catcache.h:57
CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
dlist_head * cc_bucket
Definition: catcache.h:49
slist_node cc_next
Definition: catcache.h:61
Oid cc_reloid
Definition: catcache.h:58
dlist_head cc_lists
Definition: catcache.h:54
int cc_nkeys
Definition: catcache.h:56
int cc_keyno[CATCACHE_MAXKEYS]
Definition: catcache.h:53
CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]
Definition: catcache.h:51
Oid cc_indexoid
Definition: catcache.h:59
int cc_nbuckets
Definition: catcache.h:47
bool cc_relisshared
Definition: catcache.h:60
int cc_ntup
Definition: catcache.h:55
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:62
int id
Definition: catcache.h:46
TupleDesc cc_tupdesc
Definition: catcache.h:48
slist_head ch_caches
Definition: catcache.h:184
dlist_node cache_elem
Definition: catcache.h:164
int refcount
Definition: catcache.h:172
CatCache * my_cache
Definition: catcache.h:177
int cl_magic
Definition: catcache.h:159
bool dead
Definition: catcache.h:173
short nkeys
Definition: catcache.h:175
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:170
bool ordered
Definition: catcache.h:174
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:178
uint32 hash_value
Definition: catcache.h:162
int n_members
Definition: catcache.h:176
int ct_magic
Definition: catcache.h:88
int refcount
Definition: catcache.h:118
bool negative
Definition: catcache.h:120
dlist_node cache_elem
Definition: catcache.h:104
HeapTupleData tuple
Definition: catcache.h:121
CatCache * my_cache
Definition: catcache.h:132
struct catclist * c_list
Definition: catcache.h:130
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:97
bool dead
Definition: catcache.h:119
uint32 hash_value
Definition: catcache.h:91
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
Definition: c.h:730
slist_node * cur
Definition: ilist.h:259
@ AMNAME
Definition: syscache.h:35
@ AMOID
Definition: syscache.h:36
@ AUTHOID
Definition: syscache.h:45
@ INDEXRELID
Definition: syscache.h:66
@ AUTHNAME
Definition: syscache.h:44
@ AUTHMEMMEMROLE
Definition: syscache.h:42
@ DATABASEOID
Definition: syscache.h:55
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:151
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
Datum texteq(PG_FUNCTION_ARGS)
Definition: varlena.c:1616
bool IsTransactionState(void)
Definition: xact.c:378