PostgreSQL Source Code  git master
typcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * typcache.c
4  * POSTGRES type cache code
5  *
6  * The type cache exists to speed lookup of certain information about data
7  * types that is not directly available from a type's pg_type row. For
8  * example, we use a type's default btree opclass, or the default hash
9  * opclass if no btree opclass exists, to determine which operators should
10  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11  *
12  * Several seemingly-odd choices have been made to support use of the type
13  * cache by generic array and record handling routines, such as array_eq(),
14  * record_cmp(), and hash_array(). Because those routines are used as index
15  * support operations, they cannot leak memory. To allow them to execute
16  * efficiently, all information that they would like to re-use across calls
17  * is kept in the type cache.
18  *
19  * Once created, a type cache entry lives as long as the backend does, so
20  * there is no need for a call to release a cache entry. If the type is
21  * dropped, the cache entry simply becomes wasted storage. This is not
22  * expected to happen often, and assuming that typcache entries are good
23  * permanently allows caching pointers to them in long-lived places.
24  *
25  * We have some provisions for updating cache entries if the stored data
26  * becomes obsolete. Core data extracted from the pg_type row is updated
27  * when we detect updates to pg_type. Information dependent on opclasses is
28  * cleared if we detect updates to pg_opclass. We also support clearing the
29  * tuple descriptor and operator/function parts of a rowtype's cache entry,
30  * since those may need to change as a consequence of ALTER TABLE. Domain
31  * constraint changes are also tracked properly.
32  *
33  *
34  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
35  * Portions Copyright (c) 1994, Regents of the University of California
36  *
37  * IDENTIFICATION
38  * src/backend/utils/cache/typcache.c
39  *
40  *-------------------------------------------------------------------------
41  */
42 #include "postgres.h"
43 
44 #include <limits.h>
45 
46 #include "access/hash.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "access/parallel.h"
50 #include "access/relation.h"
51 #include "access/session.h"
52 #include "access/table.h"
53 #include "catalog/indexing.h"
54 #include "catalog/pg_am.h"
55 #include "catalog/pg_constraint.h"
56 #include "catalog/pg_enum.h"
57 #include "catalog/pg_operator.h"
58 #include "catalog/pg_range.h"
59 #include "catalog/pg_type.h"
60 #include "commands/defrem.h"
61 #include "executor/executor.h"
62 #include "lib/dshash.h"
63 #include "optimizer/optimizer.h"
64 #include "storage/lwlock.h"
65 #include "utils/builtins.h"
66 #include "utils/catcache.h"
67 #include "utils/fmgroids.h"
68 #include "utils/inval.h"
69 #include "utils/lsyscache.h"
70 #include "utils/memutils.h"
71 #include "utils/rel.h"
72 #include "utils/snapmgr.h"
73 #include "utils/syscache.h"
74 #include "utils/typcache.h"
75 
76 
77 /* The main type cache hashtable searched by lookup_type_cache */
78 static HTAB *TypeCacheHash = NULL;
79 
80 /* List of type cache entries for domain types */
82 
83 /* Private flag bits in the TypeCacheEntry.flags field */
84 #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
85 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
86 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
87 #define TCFLAGS_CHECKED_EQ_OPR 0x000008
88 #define TCFLAGS_CHECKED_LT_OPR 0x000010
89 #define TCFLAGS_CHECKED_GT_OPR 0x000020
90 #define TCFLAGS_CHECKED_CMP_PROC 0x000040
91 #define TCFLAGS_CHECKED_HASH_PROC 0x000080
92 #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
93 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
94 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
95 #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
96 #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
97 #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
98 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
99 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
100 #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
101 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x020000
102 #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x040000
103 
104 /* The flags associated with equality/comparison/hashing are all but these: */
105 #define TCFLAGS_OPERATOR_FLAGS \
106  (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
107  TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
108  TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
109 
110 /*
111  * Data stored about a domain type's constraints. Note that we do not create
112  * this struct for the common case of a constraint-less domain; we just set
113  * domainData to NULL to indicate that.
114  *
115  * Within a DomainConstraintCache, we store expression plan trees, but the
116  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
117  * When needed, expression evaluation nodes are built by flat-copying the
118  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
119  * Such a node tree is not part of the DomainConstraintCache, but is
120  * considered to belong to a DomainConstraintRef.
121  */
123 {
124  List *constraints; /* list of DomainConstraintState nodes */
125  MemoryContext dccContext; /* memory context holding all associated data */
126  long dccRefCount; /* number of references to this struct */
127 };
128 
129 /* Private information to support comparisons of enum values */
130 typedef struct
131 {
132  Oid enum_oid; /* OID of one enum value */
133  float4 sort_order; /* its sort position */
134 } EnumItem;
135 
136 typedef struct TypeCacheEnumData
137 {
138  Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
139  Bitmapset *sorted_values; /* Set of OIDs known to be in order */
140  int num_values; /* total number of values in enum */
143 
144 /*
145  * We use a separate table for storing the definitions of non-anonymous
146  * record types. Once defined, a record type will be remembered for the
147  * life of the backend. Subsequent uses of the "same" record type (where
148  * sameness means equalTupleDescs) will refer to the existing table entry.
149  *
150  * Stored record types are remembered in a linear array of TupleDescs,
151  * which can be indexed quickly with the assigned typmod. There is also
152  * a hash table to speed searches for matching TupleDescs.
153  */
154 
155 typedef struct RecordCacheEntry
156 {
159 
160 /*
161  * To deal with non-anonymous record types that are exchanged by backends
162  * involved in a parallel query, we also need a shared version of the above.
163  */
165 {
166  /* A hash table for finding a matching TupleDesc. */
168  /* A hash table for finding a TupleDesc by typmod. */
170  /* A source of new record typmod numbers. */
172 };
173 
174 /*
175  * When using shared tuple descriptors as hash table keys we need a way to be
176  * able to search for an equal shared TupleDesc using a backend-local
177  * TupleDesc. So we use this type which can hold either, and hash and compare
178  * functions that know how to handle both.
179  */
180 typedef struct SharedRecordTableKey
181 {
182  union
183  {
186  } u;
187  bool shared;
189 
190 /*
191  * The shared version of RecordCacheEntry. This lets us look up a typmod
192  * using a TupleDesc which may be in local or shared memory.
193  */
195 {
198 
199 /*
200  * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
201  * up a TupleDesc in shared memory using a typmod.
202  */
204 {
208 
209 /*
210  * A comparator function for SharedRecordTableKey.
211  */
212 static int
213 shared_record_table_compare(const void *a, const void *b, size_t size,
214  void *arg)
215 {
216  dsa_area *area = (dsa_area *) arg;
219  TupleDesc t1;
220  TupleDesc t2;
221 
222  if (k1->shared)
223  t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
224  else
225  t1 = k1->u.local_tupdesc;
226 
227  if (k2->shared)
228  t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
229  else
230  t2 = k2->u.local_tupdesc;
231 
232  return equalTupleDescs(t1, t2) ? 0 : 1;
233 }
234 
235 /*
236  * A hash function for SharedRecordTableKey.
237  */
238 static uint32
239 shared_record_table_hash(const void *a, size_t size, void *arg)
240 {
241  dsa_area *area = (dsa_area *) arg;
243  TupleDesc t;
244 
245  if (k->shared)
246  t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
247  else
248  t = k->u.local_tupdesc;
249 
250  return hashTupleDesc(t);
251 }
252 
253 /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
255  sizeof(SharedRecordTableKey), /* unused */
256  sizeof(SharedRecordTableEntry),
260 };
261 
262 /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
264  sizeof(uint32),
265  sizeof(SharedTypmodTableEntry),
269 };
270 
271 /* hashtable for recognizing registered record types */
272 static HTAB *RecordCacheHash = NULL;
273 
274 /* arrays of info about registered record types, indexed by assigned typmod */
276 static uint64 *RecordIdentifierArray = NULL;
277 static int32 RecordCacheArrayLen = 0; /* allocated length of above arrays */
278 static int32 NextRecordTypmod = 0; /* number of entries used */
279 
280 /*
281  * Process-wide counter for generating unique tupledesc identifiers.
282  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
283  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
284  */
286 
287 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
288 static void load_rangetype_info(TypeCacheEntry *typentry);
289 static void load_domaintype_info(TypeCacheEntry *typentry);
290 static int dcs_cmp(const void *a, const void *b);
291 static void decr_dcc_refcount(DomainConstraintCache *dcc);
292 static void dccref_deletion_callback(void *arg);
294 static bool array_element_has_equality(TypeCacheEntry *typentry);
295 static bool array_element_has_compare(TypeCacheEntry *typentry);
296 static bool array_element_has_hashing(TypeCacheEntry *typentry);
298 static void cache_array_element_properties(TypeCacheEntry *typentry);
299 static bool record_fields_have_equality(TypeCacheEntry *typentry);
300 static bool record_fields_have_compare(TypeCacheEntry *typentry);
301 static void cache_record_field_properties(TypeCacheEntry *typentry);
302 static bool range_element_has_hashing(TypeCacheEntry *typentry);
304 static void cache_range_element_properties(TypeCacheEntry *typentry);
305 static void TypeCacheRelCallback(Datum arg, Oid relid);
306 static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
307 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
308 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
309 static void load_enum_cache_data(TypeCacheEntry *tcache);
310 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
311 static int enum_oid_cmp(const void *left, const void *right);
313  Datum datum);
315 static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
316  uint32 typmod);
317 
318 
319 /*
320  * lookup_type_cache
321  *
322  * Fetch the type cache entry for the specified datatype, and make sure that
323  * all the fields requested by bits in 'flags' are valid.
324  *
325  * The result is never NULL --- we will ereport() if the passed type OID is
326  * invalid. Note however that we may fail to find one or more of the
327  * values requested by 'flags'; the caller needs to check whether the fields
328  * are InvalidOid or not.
329  */
331 lookup_type_cache(Oid type_id, int flags)
332 {
333  TypeCacheEntry *typentry;
334  bool found;
335 
336  if (TypeCacheHash == NULL)
337  {
338  /* First time through: initialize the hash table */
339  HASHCTL ctl;
340 
341  MemSet(&ctl, 0, sizeof(ctl));
342  ctl.keysize = sizeof(Oid);
343  ctl.entrysize = sizeof(TypeCacheEntry);
344  TypeCacheHash = hash_create("Type information cache", 64,
345  &ctl, HASH_ELEM | HASH_BLOBS);
346 
347  /* Also set up callbacks for SI invalidations */
352 
353  /* Also make sure CacheMemoryContext exists */
354  if (!CacheMemoryContext)
356  }
357 
358  /* Try to look up an existing entry */
359  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
360  (void *) &type_id,
361  HASH_FIND, NULL);
362  if (typentry == NULL)
363  {
364  /*
365  * If we didn't find one, we want to make one. But first look up the
366  * pg_type row, just to make sure we don't make a cache entry for an
367  * invalid type OID. If the type OID is not valid, present a
368  * user-facing error, since some code paths such as domain_in() allow
369  * this function to be reached with a user-supplied OID.
370  */
371  HeapTuple tp;
372  Form_pg_type typtup;
373 
374  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
375  if (!HeapTupleIsValid(tp))
376  ereport(ERROR,
377  (errcode(ERRCODE_UNDEFINED_OBJECT),
378  errmsg("type with OID %u does not exist", type_id)));
379  typtup = (Form_pg_type) GETSTRUCT(tp);
380  if (!typtup->typisdefined)
381  ereport(ERROR,
382  (errcode(ERRCODE_UNDEFINED_OBJECT),
383  errmsg("type \"%s\" is only a shell",
384  NameStr(typtup->typname))));
385 
386  /* Now make the typcache entry */
387  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
388  (void *) &type_id,
389  HASH_ENTER, &found);
390  Assert(!found); /* it wasn't there a moment ago */
391 
392  MemSet(typentry, 0, sizeof(TypeCacheEntry));
393 
394  /* These fields can never change, by definition */
395  typentry->type_id = type_id;
397  ObjectIdGetDatum(type_id));
398 
399  /* Keep this part in sync with the code below */
400  typentry->typlen = typtup->typlen;
401  typentry->typbyval = typtup->typbyval;
402  typentry->typalign = typtup->typalign;
403  typentry->typstorage = typtup->typstorage;
404  typentry->typtype = typtup->typtype;
405  typentry->typrelid = typtup->typrelid;
406  typentry->typelem = typtup->typelem;
407  typentry->typcollation = typtup->typcollation;
408  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
409 
410  /* If it's a domain, immediately thread it into the domain cache list */
411  if (typentry->typtype == TYPTYPE_DOMAIN)
412  {
413  typentry->nextDomain = firstDomainTypeEntry;
414  firstDomainTypeEntry = typentry;
415  }
416 
417  ReleaseSysCache(tp);
418  }
419  else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
420  {
421  /*
422  * We have an entry, but its pg_type row got changed, so reload the
423  * data obtained directly from pg_type.
424  */
425  HeapTuple tp;
426  Form_pg_type typtup;
427 
428  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
429  if (!HeapTupleIsValid(tp))
430  ereport(ERROR,
431  (errcode(ERRCODE_UNDEFINED_OBJECT),
432  errmsg("type with OID %u does not exist", type_id)));
433  typtup = (Form_pg_type) GETSTRUCT(tp);
434  if (!typtup->typisdefined)
435  ereport(ERROR,
436  (errcode(ERRCODE_UNDEFINED_OBJECT),
437  errmsg("type \"%s\" is only a shell",
438  NameStr(typtup->typname))));
439 
440  /*
441  * Keep this part in sync with the code above. Many of these fields
442  * shouldn't ever change, particularly typtype, but copy 'em anyway.
443  */
444  typentry->typlen = typtup->typlen;
445  typentry->typbyval = typtup->typbyval;
446  typentry->typalign = typtup->typalign;
447  typentry->typstorage = typtup->typstorage;
448  typentry->typtype = typtup->typtype;
449  typentry->typrelid = typtup->typrelid;
450  typentry->typelem = typtup->typelem;
451  typentry->typcollation = typtup->typcollation;
452  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
453 
454  ReleaseSysCache(tp);
455  }
456 
457  /*
458  * Look up opclasses if we haven't already and any dependent info is
459  * requested.
460  */
465  !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
466  {
467  Oid opclass;
468 
469  opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
470  if (OidIsValid(opclass))
471  {
472  typentry->btree_opf = get_opclass_family(opclass);
473  typentry->btree_opintype = get_opclass_input_type(opclass);
474  }
475  else
476  {
477  typentry->btree_opf = typentry->btree_opintype = InvalidOid;
478  }
479 
480  /*
481  * Reset information derived from btree opclass. Note in particular
482  * that we'll redetermine the eq_opr even if we previously found one;
483  * this matters in case a btree opclass has been added to a type that
484  * previously had only a hash opclass.
485  */
486  typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
491  }
492 
493  /*
494  * If we need to look up equality operator, and there's no btree opclass,
495  * force lookup of hash opclass.
496  */
497  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
498  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
499  typentry->btree_opf == InvalidOid)
500  flags |= TYPECACHE_HASH_OPFAMILY;
501 
506  !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
507  {
508  Oid opclass;
509 
510  opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
511  if (OidIsValid(opclass))
512  {
513  typentry->hash_opf = get_opclass_family(opclass);
514  typentry->hash_opintype = get_opclass_input_type(opclass);
515  }
516  else
517  {
518  typentry->hash_opf = typentry->hash_opintype = InvalidOid;
519  }
520 
521  /*
522  * Reset information derived from hash opclass. We do *not* reset the
523  * eq_opr; if we already found one from the btree opclass, that
524  * decision is still good.
525  */
526  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
528  typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
529  }
530 
531  /*
532  * Look for requested operators and functions, if we haven't already.
533  */
534  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
535  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
536  {
537  Oid eq_opr = InvalidOid;
538 
539  if (typentry->btree_opf != InvalidOid)
540  eq_opr = get_opfamily_member(typentry->btree_opf,
541  typentry->btree_opintype,
542  typentry->btree_opintype,
544  if (eq_opr == InvalidOid &&
545  typentry->hash_opf != InvalidOid)
546  eq_opr = get_opfamily_member(typentry->hash_opf,
547  typentry->hash_opintype,
548  typentry->hash_opintype,
550 
551  /*
552  * If the proposed equality operator is array_eq or record_eq, check
553  * to see if the element type or column types support equality. If
554  * not, array_eq or record_eq would fail at runtime, so we don't want
555  * to report that the type has equality. (We can omit similar
556  * checking for ranges because ranges can't be created in the first
557  * place unless their subtypes support equality.)
558  */
559  if (eq_opr == ARRAY_EQ_OP &&
560  !array_element_has_equality(typentry))
561  eq_opr = InvalidOid;
562  else if (eq_opr == RECORD_EQ_OP &&
563  !record_fields_have_equality(typentry))
564  eq_opr = InvalidOid;
565 
566  /* Force update of eq_opr_finfo only if we're changing state */
567  if (typentry->eq_opr != eq_opr)
568  typentry->eq_opr_finfo.fn_oid = InvalidOid;
569 
570  typentry->eq_opr = eq_opr;
571 
572  /*
573  * Reset info about hash functions whenever we pick up new info about
574  * equality operator. This is so we can ensure that the hash
575  * functions match the operator.
576  */
577  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
579  typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
580  }
581  if ((flags & TYPECACHE_LT_OPR) &&
582  !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
583  {
584  Oid lt_opr = InvalidOid;
585 
586  if (typentry->btree_opf != InvalidOid)
587  lt_opr = get_opfamily_member(typentry->btree_opf,
588  typentry->btree_opintype,
589  typentry->btree_opintype,
591 
592  /*
593  * As above, make sure array_cmp or record_cmp will succeed; but again
594  * we need no special check for ranges.
595  */
596  if (lt_opr == ARRAY_LT_OP &&
597  !array_element_has_compare(typentry))
598  lt_opr = InvalidOid;
599  else if (lt_opr == RECORD_LT_OP &&
600  !record_fields_have_compare(typentry))
601  lt_opr = InvalidOid;
602 
603  typentry->lt_opr = lt_opr;
604  typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
605  }
606  if ((flags & TYPECACHE_GT_OPR) &&
607  !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
608  {
609  Oid gt_opr = InvalidOid;
610 
611  if (typentry->btree_opf != InvalidOid)
612  gt_opr = get_opfamily_member(typentry->btree_opf,
613  typentry->btree_opintype,
614  typentry->btree_opintype,
616 
617  /*
618  * As above, make sure array_cmp or record_cmp will succeed; but again
619  * we need no special check for ranges.
620  */
621  if (gt_opr == ARRAY_GT_OP &&
622  !array_element_has_compare(typentry))
623  gt_opr = InvalidOid;
624  else if (gt_opr == RECORD_GT_OP &&
625  !record_fields_have_compare(typentry))
626  gt_opr = InvalidOid;
627 
628  typentry->gt_opr = gt_opr;
629  typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
630  }
631  if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
632  !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
633  {
634  Oid cmp_proc = InvalidOid;
635 
636  if (typentry->btree_opf != InvalidOid)
637  cmp_proc = get_opfamily_proc(typentry->btree_opf,
638  typentry->btree_opintype,
639  typentry->btree_opintype,
640  BTORDER_PROC);
641 
642  /*
643  * As above, make sure array_cmp or record_cmp will succeed; but again
644  * we need no special check for ranges.
645  */
646  if (cmp_proc == F_BTARRAYCMP &&
647  !array_element_has_compare(typentry))
648  cmp_proc = InvalidOid;
649  else if (cmp_proc == F_BTRECORDCMP &&
650  !record_fields_have_compare(typentry))
651  cmp_proc = InvalidOid;
652 
653  /* Force update of cmp_proc_finfo only if we're changing state */
654  if (typentry->cmp_proc != cmp_proc)
655  typentry->cmp_proc_finfo.fn_oid = InvalidOid;
656 
657  typentry->cmp_proc = cmp_proc;
658  typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
659  }
661  !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
662  {
663  Oid hash_proc = InvalidOid;
664 
665  /*
666  * We insist that the eq_opr, if one has been determined, match the
667  * hash opclass; else report there is no hash function.
668  */
669  if (typentry->hash_opf != InvalidOid &&
670  (!OidIsValid(typentry->eq_opr) ||
671  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
672  typentry->hash_opintype,
673  typentry->hash_opintype,
675  hash_proc = get_opfamily_proc(typentry->hash_opf,
676  typentry->hash_opintype,
677  typentry->hash_opintype,
679 
680  /*
681  * As above, make sure hash_array will succeed. We don't currently
682  * support hashing for composite types, but when we do, we'll need
683  * more logic here to check that case too.
684  */
685  if (hash_proc == F_HASH_ARRAY &&
686  !array_element_has_hashing(typentry))
687  hash_proc = InvalidOid;
688 
689  /*
690  * Likewise for hash_range.
691  */
692  if (hash_proc == F_HASH_RANGE &&
693  !range_element_has_hashing(typentry))
694  hash_proc = InvalidOid;
695 
696  /* Force update of hash_proc_finfo only if we're changing state */
697  if (typentry->hash_proc != hash_proc)
698  typentry->hash_proc_finfo.fn_oid = InvalidOid;
699 
700  typentry->hash_proc = hash_proc;
701  typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
702  }
703  if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
706  {
707  Oid hash_extended_proc = InvalidOid;
708 
709  /*
710  * We insist that the eq_opr, if one has been determined, match the
711  * hash opclass; else report there is no hash function.
712  */
713  if (typentry->hash_opf != InvalidOid &&
714  (!OidIsValid(typentry->eq_opr) ||
715  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
716  typentry->hash_opintype,
717  typentry->hash_opintype,
719  hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
720  typentry->hash_opintype,
721  typentry->hash_opintype,
723 
724  /*
725  * As above, make sure hash_array_extended will succeed. We don't
726  * currently support hashing for composite types, but when we do,
727  * we'll need more logic here to check that case too.
728  */
729  if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
731  hash_extended_proc = InvalidOid;
732 
733  /*
734  * Likewise for hash_range_extended.
735  */
736  if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
738  hash_extended_proc = InvalidOid;
739 
740  /* Force update of proc finfo only if we're changing state */
741  if (typentry->hash_extended_proc != hash_extended_proc)
743 
744  typentry->hash_extended_proc = hash_extended_proc;
746  }
747 
748  /*
749  * Set up fmgr lookup info as requested
750  *
751  * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
752  * which is not quite right (they're really in the hash table's private
753  * memory context) but this will do for our purposes.
754  *
755  * Note: the code above avoids invalidating the finfo structs unless the
756  * referenced operator/function OID actually changes. This is to prevent
757  * unnecessary leakage of any subsidiary data attached to an finfo, since
758  * that would cause session-lifespan memory leaks.
759  */
760  if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
761  typentry->eq_opr_finfo.fn_oid == InvalidOid &&
762  typentry->eq_opr != InvalidOid)
763  {
764  Oid eq_opr_func;
765 
766  eq_opr_func = get_opcode(typentry->eq_opr);
767  if (eq_opr_func != InvalidOid)
768  fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
770  }
771  if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
772  typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
773  typentry->cmp_proc != InvalidOid)
774  {
775  fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
777  }
778  if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
779  typentry->hash_proc_finfo.fn_oid == InvalidOid &&
780  typentry->hash_proc != InvalidOid)
781  {
782  fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
784  }
785  if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
787  typentry->hash_extended_proc != InvalidOid)
788  {
790  &typentry->hash_extended_proc_finfo,
792  }
793 
794  /*
795  * If it's a composite type (row type), get tupdesc if requested
796  */
797  if ((flags & TYPECACHE_TUPDESC) &&
798  typentry->tupDesc == NULL &&
799  typentry->typtype == TYPTYPE_COMPOSITE)
800  {
801  load_typcache_tupdesc(typentry);
802  }
803 
804  /*
805  * If requested, get information about a range type
806  *
807  * This includes making sure that the basic info about the range element
808  * type is up-to-date.
809  */
810  if ((flags & TYPECACHE_RANGE_INFO) &&
811  typentry->typtype == TYPTYPE_RANGE)
812  {
813  if (typentry->rngelemtype == NULL)
814  load_rangetype_info(typentry);
815  else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
816  (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
817  }
818 
819  /*
820  * If requested, get information about a domain type
821  */
822  if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
823  typentry->domainBaseType == InvalidOid &&
824  typentry->typtype == TYPTYPE_DOMAIN)
825  {
826  typentry->domainBaseTypmod = -1;
827  typentry->domainBaseType =
828  getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
829  }
830  if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
831  (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
832  typentry->typtype == TYPTYPE_DOMAIN)
833  {
834  load_domaintype_info(typentry);
835  }
836 
837  return typentry;
838 }
839 
840 /*
841  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
842  */
843 static void
845 {
846  Relation rel;
847 
848  if (!OidIsValid(typentry->typrelid)) /* should not happen */
849  elog(ERROR, "invalid typrelid for composite type %u",
850  typentry->type_id);
851  rel = relation_open(typentry->typrelid, AccessShareLock);
852  Assert(rel->rd_rel->reltype == typentry->type_id);
853 
854  /*
855  * Link to the tupdesc and increment its refcount (we assert it's a
856  * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
857  * because the reference mustn't be entered in the current resource owner;
858  * it can outlive the current query.
859  */
860  typentry->tupDesc = RelationGetDescr(rel);
861 
862  Assert(typentry->tupDesc->tdrefcount > 0);
863  typentry->tupDesc->tdrefcount++;
864 
865  /*
866  * In future, we could take some pains to not change tupDesc_identifier if
867  * the tupdesc didn't really change; but for now it's not worth it.
868  */
870 
872 }
873 
874 /*
875  * load_rangetype_info --- helper routine to set up range type information
876  */
877 static void
879 {
880  Form_pg_range pg_range;
881  HeapTuple tup;
882  Oid subtypeOid;
883  Oid opclassOid;
884  Oid canonicalOid;
885  Oid subdiffOid;
886  Oid opfamilyOid;
887  Oid opcintype;
888  Oid cmpFnOid;
889 
890  /* get information from pg_range */
892  /* should not fail, since we already checked typtype ... */
893  if (!HeapTupleIsValid(tup))
894  elog(ERROR, "cache lookup failed for range type %u",
895  typentry->type_id);
896  pg_range = (Form_pg_range) GETSTRUCT(tup);
897 
898  subtypeOid = pg_range->rngsubtype;
899  typentry->rng_collation = pg_range->rngcollation;
900  opclassOid = pg_range->rngsubopc;
901  canonicalOid = pg_range->rngcanonical;
902  subdiffOid = pg_range->rngsubdiff;
903 
904  ReleaseSysCache(tup);
905 
906  /* get opclass properties and look up the comparison function */
907  opfamilyOid = get_opclass_family(opclassOid);
908  opcintype = get_opclass_input_type(opclassOid);
909 
910  cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
911  BTORDER_PROC);
912  if (!RegProcedureIsValid(cmpFnOid))
913  elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
914  BTORDER_PROC, opcintype, opcintype, opfamilyOid);
915 
916  /* set up cached fmgrinfo structs */
917  fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
919  if (OidIsValid(canonicalOid))
920  fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
922  if (OidIsValid(subdiffOid))
923  fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
925 
926  /* Lastly, set up link to the element type --- this marks data valid */
927  typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
928 }
929 
930 
931 /*
932  * load_domaintype_info --- helper routine to set up domain constraint info
933  *
934  * Note: we assume we're called in a relatively short-lived context, so it's
935  * okay to leak data into the current context while scanning pg_constraint.
936  * We build the new DomainConstraintCache data in a context underneath
937  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
938  * complete.
939  */
940 static void
942 {
943  Oid typeOid = typentry->type_id;
945  bool notNull = false;
946  DomainConstraintState **ccons;
947  int cconslen;
948  Relation conRel;
949  MemoryContext oldcxt;
950 
951  /*
952  * If we're here, any existing constraint info is stale, so release it.
953  * For safety, be sure to null the link before trying to delete the data.
954  */
955  if (typentry->domainData)
956  {
957  dcc = typentry->domainData;
958  typentry->domainData = NULL;
959  decr_dcc_refcount(dcc);
960  }
961 
962  /*
963  * We try to optimize the common case of no domain constraints, so don't
964  * create the dcc object and context until we find a constraint. Likewise
965  * for the temp sorting array.
966  */
967  dcc = NULL;
968  ccons = NULL;
969  cconslen = 0;
970 
971  /*
972  * Scan pg_constraint for relevant constraints. We want to find
973  * constraints for not just this domain, but any ancestor domains, so the
974  * outer loop crawls up the domain stack.
975  */
976  conRel = table_open(ConstraintRelationId, AccessShareLock);
977 
978  for (;;)
979  {
980  HeapTuple tup;
981  HeapTuple conTup;
982  Form_pg_type typTup;
983  int nccons = 0;
984  ScanKeyData key[1];
985  SysScanDesc scan;
986 
987  tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
988  if (!HeapTupleIsValid(tup))
989  elog(ERROR, "cache lookup failed for type %u", typeOid);
990  typTup = (Form_pg_type) GETSTRUCT(tup);
991 
992  if (typTup->typtype != TYPTYPE_DOMAIN)
993  {
994  /* Not a domain, so done */
995  ReleaseSysCache(tup);
996  break;
997  }
998 
999  /* Test for NOT NULL Constraint */
1000  if (typTup->typnotnull)
1001  notNull = true;
1002 
1003  /* Look for CHECK Constraints on this domain */
1004  ScanKeyInit(&key[0],
1005  Anum_pg_constraint_contypid,
1006  BTEqualStrategyNumber, F_OIDEQ,
1007  ObjectIdGetDatum(typeOid));
1008 
1009  scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1010  NULL, 1, key);
1011 
1012  while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1013  {
1015  Datum val;
1016  bool isNull;
1017  char *constring;
1018  Expr *check_expr;
1020 
1021  /* Ignore non-CHECK constraints (presently, shouldn't be any) */
1022  if (c->contype != CONSTRAINT_CHECK)
1023  continue;
1024 
1025  /* Not expecting conbin to be NULL, but we'll test for it anyway */
1026  val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1027  conRel->rd_att, &isNull);
1028  if (isNull)
1029  elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1030  NameStr(typTup->typname), NameStr(c->conname));
1031 
1032  /* Convert conbin to C string in caller context */
1033  constring = TextDatumGetCString(val);
1034 
1035  /* Create the DomainConstraintCache object and context if needed */
1036  if (dcc == NULL)
1037  {
1038  MemoryContext cxt;
1039 
1041  "Domain constraints",
1043  dcc = (DomainConstraintCache *)
1045  dcc->constraints = NIL;
1046  dcc->dccContext = cxt;
1047  dcc->dccRefCount = 0;
1048  }
1049 
1050  /* Create node trees in DomainConstraintCache's context */
1051  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1052 
1053  check_expr = (Expr *) stringToNode(constring);
1054 
1055  /*
1056  * Plan the expression, since ExecInitExpr will expect that.
1057  *
1058  * Note: caching the result of expression_planner() is not very
1059  * good practice. Ideally we'd use a CachedExpression here so
1060  * that we would react promptly to, eg, changes in inlined
1061  * functions. However, because we don't support mutable domain
1062  * CHECK constraints, it's not really clear that it's worth the
1063  * extra overhead to do that.
1064  */
1065  check_expr = expression_planner(check_expr);
1066 
1069  r->name = pstrdup(NameStr(c->conname));
1070  r->check_expr = check_expr;
1071  r->check_exprstate = NULL;
1072 
1073  MemoryContextSwitchTo(oldcxt);
1074 
1075  /* Accumulate constraints in an array, for sorting below */
1076  if (ccons == NULL)
1077  {
1078  cconslen = 8;
1079  ccons = (DomainConstraintState **)
1080  palloc(cconslen * sizeof(DomainConstraintState *));
1081  }
1082  else if (nccons >= cconslen)
1083  {
1084  cconslen *= 2;
1085  ccons = (DomainConstraintState **)
1086  repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1087  }
1088  ccons[nccons++] = r;
1089  }
1090 
1091  systable_endscan(scan);
1092 
1093  if (nccons > 0)
1094  {
1095  /*
1096  * Sort the items for this domain, so that CHECKs are applied in a
1097  * deterministic order.
1098  */
1099  if (nccons > 1)
1100  qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1101 
1102  /*
1103  * Now attach them to the overall list. Use lcons() here because
1104  * constraints of parent domains should be applied earlier.
1105  */
1106  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1107  while (nccons > 0)
1108  dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1109  MemoryContextSwitchTo(oldcxt);
1110  }
1111 
1112  /* loop to next domain in stack */
1113  typeOid = typTup->typbasetype;
1114  ReleaseSysCache(tup);
1115  }
1116 
1117  table_close(conRel, AccessShareLock);
1118 
1119  /*
1120  * Only need to add one NOT NULL check regardless of how many domains in
1121  * the stack request it.
1122  */
1123  if (notNull)
1124  {
1126 
1127  /* Create the DomainConstraintCache object and context if needed */
1128  if (dcc == NULL)
1129  {
1130  MemoryContext cxt;
1131 
1133  "Domain constraints",
1135  dcc = (DomainConstraintCache *)
1137  dcc->constraints = NIL;
1138  dcc->dccContext = cxt;
1139  dcc->dccRefCount = 0;
1140  }
1141 
1142  /* Create node trees in DomainConstraintCache's context */
1143  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1144 
1146 
1148  r->name = pstrdup("NOT NULL");
1149  r->check_expr = NULL;
1150  r->check_exprstate = NULL;
1151 
1152  /* lcons to apply the nullness check FIRST */
1153  dcc->constraints = lcons(r, dcc->constraints);
1154 
1155  MemoryContextSwitchTo(oldcxt);
1156  }
1157 
1158  /*
1159  * If we made a constraint object, move it into CacheMemoryContext and
1160  * attach it to the typcache entry.
1161  */
1162  if (dcc)
1163  {
1165  typentry->domainData = dcc;
1166  dcc->dccRefCount++; /* count the typcache's reference */
1167  }
1168 
1169  /* Either way, the typcache entry's domain data is now valid. */
1171 }
1172 
1173 /*
1174  * qsort comparator to sort DomainConstraintState pointers by name
1175  */
1176 static int
1177 dcs_cmp(const void *a, const void *b)
1178 {
1179  const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1180  const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1181 
1182  return strcmp((*ca)->name, (*cb)->name);
1183 }
1184 
1185 /*
1186  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1187  * and free it if no references remain
1188  */
1189 static void
1191 {
1192  Assert(dcc->dccRefCount > 0);
1193  if (--(dcc->dccRefCount) <= 0)
1195 }
1196 
1197 /*
1198  * Context reset/delete callback for a DomainConstraintRef
1199  */
1200 static void
1202 {
1204  DomainConstraintCache *dcc = ref->dcc;
1205 
1206  /* Paranoia --- be sure link is nulled before trying to release */
1207  if (dcc)
1208  {
1209  ref->constraints = NIL;
1210  ref->dcc = NULL;
1211  decr_dcc_refcount(dcc);
1212  }
1213 }
1214 
1215 /*
1216  * prep_domain_constraints --- prepare domain constraints for execution
1217  *
1218  * The expression trees stored in the DomainConstraintCache's list are
1219  * converted to executable expression state trees stored in execctx.
1220  */
1221 static List *
1223 {
1224  List *result = NIL;
1225  MemoryContext oldcxt;
1226  ListCell *lc;
1227 
1228  oldcxt = MemoryContextSwitchTo(execctx);
1229 
1230  foreach(lc, constraints)
1231  {
1233  DomainConstraintState *newr;
1234 
1236  newr->constrainttype = r->constrainttype;
1237  newr->name = r->name;
1238  newr->check_expr = r->check_expr;
1239  newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1240 
1241  result = lappend(result, newr);
1242  }
1243 
1244  MemoryContextSwitchTo(oldcxt);
1245 
1246  return result;
1247 }
1248 
1249 /*
1250  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1251  *
1252  * Caller must tell us the MemoryContext in which the DomainConstraintRef
1253  * lives. The ref will be cleaned up when that context is reset/deleted.
1254  *
1255  * Caller must also tell us whether it wants check_exprstate fields to be
1256  * computed in the DomainConstraintState nodes attached to this ref.
1257  * If it doesn't, we need not make a copy of the DomainConstraintState list.
1258  */
1259 void
1261  MemoryContext refctx, bool need_exprstate)
1262 {
1263  /* Look up the typcache entry --- we assume it survives indefinitely */
1265  ref->need_exprstate = need_exprstate;
1266  /* For safety, establish the callback before acquiring a refcount */
1267  ref->refctx = refctx;
1268  ref->dcc = NULL;
1270  ref->callback.arg = (void *) ref;
1272  /* Acquire refcount if there are constraints, and set up exported list */
1273  if (ref->tcache->domainData)
1274  {
1275  ref->dcc = ref->tcache->domainData;
1276  ref->dcc->dccRefCount++;
1277  if (ref->need_exprstate)
1279  ref->refctx);
1280  else
1281  ref->constraints = ref->dcc->constraints;
1282  }
1283  else
1284  ref->constraints = NIL;
1285 }
1286 
1287 /*
1288  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1289  *
1290  * If the domain's constraint set changed, ref->constraints is updated to
1291  * point at a new list of cached constraints.
1292  *
1293  * In the normal case where nothing happened to the domain, this is cheap
1294  * enough that it's reasonable (and expected) to check before *each* use
1295  * of the constraint info.
1296  */
1297 void
1299 {
1300  TypeCacheEntry *typentry = ref->tcache;
1301 
1302  /* Make sure typcache entry's data is up to date */
1303  if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1304  typentry->typtype == TYPTYPE_DOMAIN)
1305  load_domaintype_info(typentry);
1306 
1307  /* Transfer to ref object if there's new info, adjusting refcounts */
1308  if (ref->dcc != typentry->domainData)
1309  {
1310  /* Paranoia --- be sure link is nulled before trying to release */
1311  DomainConstraintCache *dcc = ref->dcc;
1312 
1313  if (dcc)
1314  {
1315  /*
1316  * Note: we just leak the previous list of executable domain
1317  * constraints. Alternatively, we could keep those in a child
1318  * context of ref->refctx and free that context at this point.
1319  * However, in practice this code path will be taken so seldom
1320  * that the extra bookkeeping for a child context doesn't seem
1321  * worthwhile; we'll just allow a leak for the lifespan of refctx.
1322  */
1323  ref->constraints = NIL;
1324  ref->dcc = NULL;
1325  decr_dcc_refcount(dcc);
1326  }
1327  dcc = typentry->domainData;
1328  if (dcc)
1329  {
1330  ref->dcc = dcc;
1331  dcc->dccRefCount++;
1332  if (ref->need_exprstate)
1334  ref->refctx);
1335  else
1336  ref->constraints = dcc->constraints;
1337  }
1338  }
1339 }
1340 
1341 /*
1342  * DomainHasConstraints --- utility routine to check if a domain has constraints
1343  *
1344  * This is defined to return false, not fail, if type is not a domain.
1345  */
1346 bool
1348 {
1349  TypeCacheEntry *typentry;
1350 
1351  /*
1352  * Note: a side effect is to cause the typcache's domain data to become
1353  * valid. This is fine since we'll likely need it soon if there is any.
1354  */
1355  typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1356 
1357  return (typentry->domainData != NULL);
1358 }
1359 
1360 
1361 /*
1362  * array_element_has_equality and friends are helper routines to check
1363  * whether we should believe that array_eq and related functions will work
1364  * on the given array type or composite type.
1365  *
1366  * The logic above may call these repeatedly on the same type entry, so we
1367  * make use of the typentry->flags field to cache the results once known.
1368  * Also, we assume that we'll probably want all these facts about the type
1369  * if we want any, so we cache them all using only one lookup of the
1370  * component datatype(s).
1371  */
1372 
1373 static bool
1375 {
1376  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1378  return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1379 }
1380 
1381 static bool
1383 {
1384  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1386  return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1387 }
1388 
1389 static bool
1391 {
1392  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1394  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1395 }
1396 
1397 static bool
1399 {
1400  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1402  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1403 }
1404 
1405 static void
1407 {
1408  Oid elem_type = get_base_element_type(typentry->type_id);
1409 
1410  if (OidIsValid(elem_type))
1411  {
1412  TypeCacheEntry *elementry;
1413 
1414  elementry = lookup_type_cache(elem_type,
1419  if (OidIsValid(elementry->eq_opr))
1420  typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1421  if (OidIsValid(elementry->cmp_proc))
1422  typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1423  if (OidIsValid(elementry->hash_proc))
1424  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1425  if (OidIsValid(elementry->hash_extended_proc))
1427  }
1429 }
1430 
1431 /*
1432  * Likewise, some helper functions for composite types.
1433  */
1434 
1435 static bool
1437 {
1438  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1440  return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1441 }
1442 
1443 static bool
1445 {
1446  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1448  return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1449 }
1450 
1451 static void
1453 {
1454  /*
1455  * For type RECORD, we can't really tell what will work, since we don't
1456  * have access here to the specific anonymous type. Just assume that
1457  * everything will (we may get a failure at runtime ...)
1458  */
1459  if (typentry->type_id == RECORDOID)
1460  typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1462  else if (typentry->typtype == TYPTYPE_COMPOSITE)
1463  {
1464  TupleDesc tupdesc;
1465  int newflags;
1466  int i;
1467 
1468  /* Fetch composite type's tupdesc if we don't have it already */
1469  if (typentry->tupDesc == NULL)
1470  load_typcache_tupdesc(typentry);
1471  tupdesc = typentry->tupDesc;
1472 
1473  /* Must bump the refcount while we do additional catalog lookups */
1474  IncrTupleDescRefCount(tupdesc);
1475 
1476  /* Have each property if all non-dropped fields have the property */
1477  newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1479  for (i = 0; i < tupdesc->natts; i++)
1480  {
1481  TypeCacheEntry *fieldentry;
1482  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1483 
1484  if (attr->attisdropped)
1485  continue;
1486 
1487  fieldentry = lookup_type_cache(attr->atttypid,
1490  if (!OidIsValid(fieldentry->eq_opr))
1491  newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1492  if (!OidIsValid(fieldentry->cmp_proc))
1493  newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1494 
1495  /* We can drop out of the loop once we disprove all bits */
1496  if (newflags == 0)
1497  break;
1498  }
1499  typentry->flags |= newflags;
1500 
1501  DecrTupleDescRefCount(tupdesc);
1502  }
1503  else if (typentry->typtype == TYPTYPE_DOMAIN)
1504  {
1505  /* If it's domain over composite, copy base type's properties */
1506  TypeCacheEntry *baseentry;
1507 
1508  /* load up basetype info if we didn't already */
1509  if (typentry->domainBaseType == InvalidOid)
1510  {
1511  typentry->domainBaseTypmod = -1;
1512  typentry->domainBaseType =
1513  getBaseTypeAndTypmod(typentry->type_id,
1514  &typentry->domainBaseTypmod);
1515  }
1516  baseentry = lookup_type_cache(typentry->domainBaseType,
1519  if (baseentry->typtype == TYPTYPE_COMPOSITE)
1520  {
1522  typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1524  }
1525  }
1527 }
1528 
1529 /*
1530  * Likewise, some helper functions for range types.
1531  *
1532  * We can borrow the flag bits for array element properties to use for range
1533  * element properties, since those flag bits otherwise have no use in a
1534  * range type's typcache entry.
1535  */
1536 
1537 static bool
1539 {
1540  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1542  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1543 }
1544 
1545 static bool
1547 {
1548  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1550  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1551 }
1552 
1553 static void
1555 {
1556  /* load up subtype link if we didn't already */
1557  if (typentry->rngelemtype == NULL &&
1558  typentry->typtype == TYPTYPE_RANGE)
1559  load_rangetype_info(typentry);
1560 
1561  if (typentry->rngelemtype != NULL)
1562  {
1563  TypeCacheEntry *elementry;
1564 
1565  /* might need to calculate subtype's hash function properties */
1566  elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1569  if (OidIsValid(elementry->hash_proc))
1570  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1571  if (OidIsValid(elementry->hash_extended_proc))
1573  }
1575 }
1576 
1577 /*
1578  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1579  * to store 'typmod'.
1580  */
1581 static void
1583 {
1584  if (RecordCacheArray == NULL)
1585  {
1586  RecordCacheArray = (TupleDesc *)
1588  RecordIdentifierArray = (uint64 *)
1589  MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64));
1590  RecordCacheArrayLen = 64;
1591  }
1592 
1593  if (typmod >= RecordCacheArrayLen)
1594  {
1595  int32 newlen = RecordCacheArrayLen * 2;
1596 
1597  while (typmod >= newlen)
1598  newlen *= 2;
1599 
1600  RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
1601  newlen * sizeof(TupleDesc));
1602  memset(RecordCacheArray + RecordCacheArrayLen, 0,
1603  (newlen - RecordCacheArrayLen) * sizeof(TupleDesc));
1605  newlen * sizeof(uint64));
1607  (newlen - RecordCacheArrayLen) * sizeof(uint64));
1608  RecordCacheArrayLen = newlen;
1609  }
1610 }
1611 
1612 /*
1613  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1614  *
1615  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1616  * hasn't had its refcount bumped.
1617  */
1618 static TupleDesc
1619 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1620 {
1621  if (type_id != RECORDOID)
1622  {
1623  /*
1624  * It's a named composite type, so use the regular typcache.
1625  */
1626  TypeCacheEntry *typentry;
1627 
1628  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1629  if (typentry->tupDesc == NULL && !noError)
1630  ereport(ERROR,
1631  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1632  errmsg("type %s is not composite",
1633  format_type_be(type_id))));
1634  return typentry->tupDesc;
1635  }
1636  else
1637  {
1638  /*
1639  * It's a transient record type, so look in our record-type table.
1640  */
1641  if (typmod >= 0)
1642  {
1643  /* It is already in our local cache? */
1644  if (typmod < RecordCacheArrayLen &&
1645  RecordCacheArray[typmod] != NULL)
1646  return RecordCacheArray[typmod];
1647 
1648  /* Are we attached to a shared record typmod registry? */
1650  {
1651  SharedTypmodTableEntry *entry;
1652 
1653  /* Try to find it in the shared typmod index. */
1655  &typmod, false);
1656  if (entry != NULL)
1657  {
1658  TupleDesc tupdesc;
1659 
1660  tupdesc = (TupleDesc)
1662  entry->shared_tupdesc);
1663  Assert(typmod == tupdesc->tdtypmod);
1664 
1665  /* We may need to extend the local RecordCacheArray. */
1667 
1668  /*
1669  * Our local array can now point directly to the TupleDesc
1670  * in shared memory, which is non-reference-counted.
1671  */
1672  RecordCacheArray[typmod] = tupdesc;
1673  Assert(tupdesc->tdrefcount == -1);
1674 
1675  /*
1676  * We don't share tupdesc identifiers across processes, so
1677  * assign one locally.
1678  */
1680 
1682  entry);
1683 
1684  return RecordCacheArray[typmod];
1685  }
1686  }
1687  }
1688 
1689  if (!noError)
1690  ereport(ERROR,
1691  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1692  errmsg("record type has not been registered")));
1693  return NULL;
1694  }
1695 }
1696 
1697 /*
1698  * lookup_rowtype_tupdesc
1699  *
1700  * Given a typeid/typmod that should describe a known composite type,
1701  * return the tuple descriptor for the type. Will ereport on failure.
1702  * (Use ereport because this is reachable with user-specified OIDs,
1703  * for example from record_in().)
1704  *
1705  * Note: on success, we increment the refcount of the returned TupleDesc,
1706  * and log the reference in CurrentResourceOwner. Caller should call
1707  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
1708  */
1709 TupleDesc
1711 {
1712  TupleDesc tupDesc;
1713 
1714  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1715  PinTupleDesc(tupDesc);
1716  return tupDesc;
1717 }
1718 
1719 /*
1720  * lookup_rowtype_tupdesc_noerror
1721  *
1722  * As above, but if the type is not a known composite type and noError
1723  * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1724  * type_id is passed, you'll get an ereport anyway.)
1725  */
1726 TupleDesc
1727 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1728 {
1729  TupleDesc tupDesc;
1730 
1731  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1732  if (tupDesc != NULL)
1733  PinTupleDesc(tupDesc);
1734  return tupDesc;
1735 }
1736 
1737 /*
1738  * lookup_rowtype_tupdesc_copy
1739  *
1740  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1741  * copied into the CurrentMemoryContext and is not reference-counted.
1742  */
1743 TupleDesc
1745 {
1746  TupleDesc tmp;
1747 
1748  tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1749  return CreateTupleDescCopyConstr(tmp);
1750 }
1751 
1752 /*
1753  * lookup_rowtype_tupdesc_domain
1754  *
1755  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1756  * a domain over a named composite type; so this is effectively equivalent to
1757  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1758  * except for being a tad faster.
1759  *
1760  * Note: the reason we don't fold the look-through-domain behavior into plain
1761  * lookup_rowtype_tupdesc() is that we want callers to know they might be
1762  * dealing with a domain. Otherwise they might construct a tuple that should
1763  * be of the domain type, but not apply domain constraints.
1764  */
1765 TupleDesc
1766 lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1767 {
1768  TupleDesc tupDesc;
1769 
1770  if (type_id != RECORDOID)
1771  {
1772  /*
1773  * Check for domain or named composite type. We might as well load
1774  * whichever data is needed.
1775  */
1776  TypeCacheEntry *typentry;
1777 
1778  typentry = lookup_type_cache(type_id,
1781  if (typentry->typtype == TYPTYPE_DOMAIN)
1783  typentry->domainBaseTypmod,
1784  noError);
1785  if (typentry->tupDesc == NULL && !noError)
1786  ereport(ERROR,
1787  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1788  errmsg("type %s is not composite",
1789  format_type_be(type_id))));
1790  tupDesc = typentry->tupDesc;
1791  }
1792  else
1793  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1794  if (tupDesc != NULL)
1795  PinTupleDesc(tupDesc);
1796  return tupDesc;
1797 }
1798 
1799 /*
1800  * Hash function for the hash table of RecordCacheEntry.
1801  */
1802 static uint32
1803 record_type_typmod_hash(const void *data, size_t size)
1804 {
1805  RecordCacheEntry *entry = (RecordCacheEntry *) data;
1806 
1807  return hashTupleDesc(entry->tupdesc);
1808 }
1809 
1810 /*
1811  * Match function for the hash table of RecordCacheEntry.
1812  */
1813 static int
1814 record_type_typmod_compare(const void *a, const void *b, size_t size)
1815 {
1816  RecordCacheEntry *left = (RecordCacheEntry *) a;
1817  RecordCacheEntry *right = (RecordCacheEntry *) b;
1818 
1819  return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
1820 }
1821 
1822 /*
1823  * assign_record_type_typmod
1824  *
1825  * Given a tuple descriptor for a RECORD type, find or create a cache entry
1826  * for the type, and set the tupdesc's tdtypmod field to a value that will
1827  * identify this cache entry to lookup_rowtype_tupdesc.
1828  */
1829 void
1831 {
1832  RecordCacheEntry *recentry;
1833  TupleDesc entDesc;
1834  bool found;
1835  MemoryContext oldcxt;
1836 
1837  Assert(tupDesc->tdtypeid == RECORDOID);
1838 
1839  if (RecordCacheHash == NULL)
1840  {
1841  /* First time through: initialize the hash table */
1842  HASHCTL ctl;
1843 
1844  MemSet(&ctl, 0, sizeof(ctl));
1845  ctl.keysize = sizeof(TupleDesc); /* just the pointer */
1846  ctl.entrysize = sizeof(RecordCacheEntry);
1849  RecordCacheHash = hash_create("Record information cache", 64,
1850  &ctl,
1852 
1853  /* Also make sure CacheMemoryContext exists */
1854  if (!CacheMemoryContext)
1856  }
1857 
1858  /* Find or create a hashtable entry for this tuple descriptor */
1859  recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1860  (void *) &tupDesc,
1861  HASH_ENTER, &found);
1862  if (found && recentry->tupdesc != NULL)
1863  {
1864  tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
1865  return;
1866  }
1867 
1868  /* Not present, so need to manufacture an entry */
1869  recentry->tupdesc = NULL;
1871 
1872  /* Look in the SharedRecordTypmodRegistry, if attached */
1873  entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
1874  if (entDesc == NULL)
1875  {
1876  /* Reference-counted local cache only. */
1877  entDesc = CreateTupleDescCopy(tupDesc);
1878  entDesc->tdrefcount = 1;
1879  entDesc->tdtypmod = NextRecordTypmod++;
1880  }
1882  RecordCacheArray[entDesc->tdtypmod] = entDesc;
1883  recentry->tupdesc = entDesc;
1884 
1885  /* Assign a unique tupdesc identifier, too. */
1887 
1888  /* Update the caller's tuple descriptor. */
1889  tupDesc->tdtypmod = entDesc->tdtypmod;
1890 
1891  MemoryContextSwitchTo(oldcxt);
1892 }
1893 
1894 /*
1895  * assign_record_type_identifier
1896  *
1897  * Get an identifier, which will be unique over the lifespan of this backend
1898  * process, for the current tuple descriptor of the specified composite type.
1899  * For named composite types, the value is guaranteed to change if the type's
1900  * definition does. For registered RECORD types, the value will not change
1901  * once assigned, since the registered type won't either. If an anonymous
1902  * RECORD type is specified, we return a new identifier on each call.
1903  */
1904 uint64
1906 {
1907  if (type_id != RECORDOID)
1908  {
1909  /*
1910  * It's a named composite type, so use the regular typcache.
1911  */
1912  TypeCacheEntry *typentry;
1913 
1914  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1915  if (typentry->tupDesc == NULL)
1916  ereport(ERROR,
1917  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1918  errmsg("type %s is not composite",
1919  format_type_be(type_id))));
1920  Assert(typentry->tupDesc_identifier != 0);
1921  return typentry->tupDesc_identifier;
1922  }
1923  else
1924  {
1925  /*
1926  * It's a transient record type, so look in our record-type table.
1927  */
1928  if (typmod >= 0 && typmod < RecordCacheArrayLen &&
1929  RecordCacheArray[typmod] != NULL)
1930  {
1931  Assert(RecordIdentifierArray[typmod] != 0);
1932  return RecordIdentifierArray[typmod];
1933  }
1934 
1935  /* For anonymous or unrecognized record type, generate a new ID */
1936  return ++tupledesc_id_counter;
1937  }
1938 }
1939 
1940 /*
1941  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
1942  * This exists only to avoid exposing private innards of
1943  * SharedRecordTypmodRegistry in a header.
1944  */
1945 size_t
1947 {
1948  return sizeof(SharedRecordTypmodRegistry);
1949 }
1950 
1951 /*
1952  * Initialize 'registry' in a pre-existing shared memory region, which must be
1953  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
1954  * bytes.
1955  *
1956  * 'area' will be used to allocate shared memory space as required for the
1957  * typemod registration. The current process, expected to be a leader process
1958  * in a parallel query, will be attached automatically and its current record
1959  * types will be loaded into *registry. While attached, all calls to
1960  * assign_record_type_typmod will use the shared registry. Worker backends
1961  * will need to attach explicitly.
1962  *
1963  * Note that this function takes 'area' and 'segment' as arguments rather than
1964  * accessing them via CurrentSession, because they aren't installed there
1965  * until after this function runs.
1966  */
1967 void
1969  dsm_segment *segment,
1970  dsa_area *area)
1971 {
1972  MemoryContext old_context;
1973  dshash_table *record_table;
1974  dshash_table *typmod_table;
1975  int32 typmod;
1976 
1978 
1979  /* We can't already be attached to a shared registry. */
1983 
1984  old_context = MemoryContextSwitchTo(TopMemoryContext);
1985 
1986  /* Create the hash table of tuple descriptors indexed by themselves. */
1987  record_table = dshash_create(area, &srtr_record_table_params, area);
1988 
1989  /* Create the hash table of tuple descriptors indexed by typmod. */
1990  typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
1991 
1992  MemoryContextSwitchTo(old_context);
1993 
1994  /* Initialize the SharedRecordTypmodRegistry. */
1995  registry->record_table_handle = dshash_get_hash_table_handle(record_table);
1996  registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
1998 
1999  /*
2000  * Copy all entries from this backend's private registry into the shared
2001  * registry.
2002  */
2003  for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2004  {
2005  SharedTypmodTableEntry *typmod_table_entry;
2006  SharedRecordTableEntry *record_table_entry;
2007  SharedRecordTableKey record_table_key;
2008  dsa_pointer shared_dp;
2009  TupleDesc tupdesc;
2010  bool found;
2011 
2012  tupdesc = RecordCacheArray[typmod];
2013  if (tupdesc == NULL)
2014  continue;
2015 
2016  /* Copy the TupleDesc into shared memory. */
2017  shared_dp = share_tupledesc(area, tupdesc, typmod);
2018 
2019  /* Insert into the typmod table. */
2020  typmod_table_entry = dshash_find_or_insert(typmod_table,
2021  &tupdesc->tdtypmod,
2022  &found);
2023  if (found)
2024  elog(ERROR, "cannot create duplicate shared record typmod");
2025  typmod_table_entry->typmod = tupdesc->tdtypmod;
2026  typmod_table_entry->shared_tupdesc = shared_dp;
2027  dshash_release_lock(typmod_table, typmod_table_entry);
2028 
2029  /* Insert into the record table. */
2030  record_table_key.shared = false;
2031  record_table_key.u.local_tupdesc = tupdesc;
2032  record_table_entry = dshash_find_or_insert(record_table,
2033  &record_table_key,
2034  &found);
2035  if (!found)
2036  {
2037  record_table_entry->key.shared = true;
2038  record_table_entry->key.u.shared_tupdesc = shared_dp;
2039  }
2040  dshash_release_lock(record_table, record_table_entry);
2041  }
2042 
2043  /*
2044  * Set up the global state that will tell assign_record_type_typmod and
2045  * lookup_rowtype_tupdesc_internal about the shared registry.
2046  */
2047  CurrentSession->shared_record_table = record_table;
2048  CurrentSession->shared_typmod_table = typmod_table;
2050 
2051  /*
2052  * We install a detach hook in the leader, but only to handle cleanup on
2053  * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2054  * the memory, the leader process will use a shared registry until it
2055  * exits.
2056  */
2058 }
2059 
2060 /*
2061  * Attach to 'registry', which must have been initialized already by another
2062  * backend. Future calls to assign_record_type_typmod and
2063  * lookup_rowtype_tupdesc_internal will use the shared registry until the
2064  * current session is detached.
2065  */
2066 void
2068 {
2069  MemoryContext old_context;
2070  dshash_table *record_table;
2071  dshash_table *typmod_table;
2072 
2074 
2075  /* We can't already be attached to a shared registry. */
2076  Assert(CurrentSession != NULL);
2077  Assert(CurrentSession->segment != NULL);
2078  Assert(CurrentSession->area != NULL);
2082 
2083  /*
2084  * We can't already have typmods in our local cache, because they'd clash
2085  * with those imported by SharedRecordTypmodRegistryInit. This should be
2086  * a freshly started parallel worker. If we ever support worker
2087  * recycling, a worker would need to zap its local cache in between
2088  * servicing different queries, in order to be able to call this and
2089  * synchronize typmods with a new leader; but that's problematic because
2090  * we can't be very sure that record-typmod-related state hasn't escaped
2091  * to anywhere else in the process.
2092  */
2093  Assert(NextRecordTypmod == 0);
2094 
2095  old_context = MemoryContextSwitchTo(TopMemoryContext);
2096 
2097  /* Attach to the two hash tables. */
2098  record_table = dshash_attach(CurrentSession->area,
2099  &srtr_record_table_params,
2100  registry->record_table_handle,
2101  CurrentSession->area);
2102  typmod_table = dshash_attach(CurrentSession->area,
2103  &srtr_typmod_table_params,
2104  registry->typmod_table_handle,
2105  NULL);
2106 
2107  MemoryContextSwitchTo(old_context);
2108 
2109  /*
2110  * Set up detach hook to run at worker exit. Currently this is the same
2111  * as the leader's detach hook, but in future they might need to be
2112  * different.
2113  */
2116  PointerGetDatum(registry));
2117 
2118  /*
2119  * Set up the session state that will tell assign_record_type_typmod and
2120  * lookup_rowtype_tupdesc_internal about the shared registry.
2121  */
2123  CurrentSession->shared_record_table = record_table;
2124  CurrentSession->shared_typmod_table = typmod_table;
2125 }
2126 
2127 /*
2128  * TypeCacheRelCallback
2129  * Relcache inval callback function
2130  *
2131  * Delete the cached tuple descriptor (if any) for the given rel's composite
2132  * type, or for all composite types if relid == InvalidOid. Also reset
2133  * whatever info we have cached about the composite type's comparability.
2134  *
2135  * This is called when a relcache invalidation event occurs for the given
2136  * relid. We must scan the whole typcache hash since we don't know the
2137  * type OID corresponding to the relid. We could do a direct search if this
2138  * were a syscache-flush callback on pg_type, but then we would need all
2139  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
2140  * invals against the rel's pg_type OID. The extra SI signaling could very
2141  * well cost more than we'd save, since in most usages there are not very
2142  * many entries in a backend's typcache. The risk of bugs-of-omission seems
2143  * high, too.
2144  *
2145  * Another possibility, with only localized impact, is to maintain a second
2146  * hashtable that indexes composite-type typcache entries by their typrelid.
2147  * But it's still not clear it's worth the trouble.
2148  */
2149 static void
2151 {
2153  TypeCacheEntry *typentry;
2154 
2155  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2156  hash_seq_init(&status, TypeCacheHash);
2157  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2158  {
2159  if (typentry->typtype == TYPTYPE_COMPOSITE)
2160  {
2161  /* Skip if no match, unless we're zapping all composite types */
2162  if (relid != typentry->typrelid && relid != InvalidOid)
2163  continue;
2164 
2165  /* Delete tupdesc if we have it */
2166  if (typentry->tupDesc != NULL)
2167  {
2168  /*
2169  * Release our refcount, and free the tupdesc if none remain.
2170  * (Can't use DecrTupleDescRefCount because this reference is
2171  * not logged in current resource owner.)
2172  */
2173  Assert(typentry->tupDesc->tdrefcount > 0);
2174  if (--typentry->tupDesc->tdrefcount == 0)
2175  FreeTupleDesc(typentry->tupDesc);
2176  typentry->tupDesc = NULL;
2177 
2178  /*
2179  * Also clear tupDesc_identifier, so that anything watching
2180  * that will realize that the tupdesc has possibly changed.
2181  * (Alternatively, we could specify that to detect possible
2182  * tupdesc change, one must check for tupDesc != NULL as well
2183  * as tupDesc_identifier being the same as what was previously
2184  * seen. That seems error-prone.)
2185  */
2186  typentry->tupDesc_identifier = 0;
2187  }
2188 
2189  /* Reset equality/comparison/hashing validity information */
2190  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2191  }
2192  else if (typentry->typtype == TYPTYPE_DOMAIN)
2193  {
2194  /*
2195  * If it's domain over composite, reset flags. (We don't bother
2196  * trying to determine whether the specific base type needs a
2197  * reset.) Note that if we haven't determined whether the base
2198  * type is composite, we don't need to reset anything.
2199  */
2200  if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2201  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2202  }
2203  }
2204 }
2205 
2206 /*
2207  * TypeCacheTypCallback
2208  * Syscache inval callback function
2209  *
2210  * This is called when a syscache invalidation event occurs for any
2211  * pg_type row. If we have information cached about that type, mark
2212  * it as needing to be reloaded.
2213  */
2214 static void
2215 TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2216 {
2218  TypeCacheEntry *typentry;
2219 
2220  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2221  hash_seq_init(&status, TypeCacheHash);
2222  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2223  {
2224  /* Is this the targeted type row (or it's a total cache flush)? */
2225  if (hashvalue == 0 || typentry->type_id_hash == hashvalue)
2226  {
2227  /*
2228  * Mark the data obtained directly from pg_type as invalid. Also,
2229  * if it's a domain, typnotnull might've changed, so we'll need to
2230  * recalculate its constraints.
2231  */
2232  typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2234  }
2235  }
2236 }
2237 
2238 /*
2239  * TypeCacheOpcCallback
2240  * Syscache inval callback function
2241  *
2242  * This is called when a syscache invalidation event occurs for any pg_opclass
2243  * row. In principle we could probably just invalidate data dependent on the
2244  * particular opclass, but since updates on pg_opclass are rare in production
2245  * it doesn't seem worth a lot of complication: we just mark all cached data
2246  * invalid.
2247  *
2248  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2249  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2250  * is not allowed to be used to add/drop the primary operators and functions
2251  * of an opclass, only cross-type members of a family; and the latter sorts
2252  * of members are not going to get cached here.
2253  */
2254 static void
2255 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2256 {
2258  TypeCacheEntry *typentry;
2259 
2260  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2261  hash_seq_init(&status, TypeCacheHash);
2262  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2263  {
2264  /* Reset equality/comparison/hashing validity information */
2265  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2266  }
2267 }
2268 
2269 /*
2270  * TypeCacheConstrCallback
2271  * Syscache inval callback function
2272  *
2273  * This is called when a syscache invalidation event occurs for any
2274  * pg_constraint row. We flush information about domain constraints
2275  * when this happens.
2276  *
2277  * It's slightly annoying that we can't tell whether the inval event was for
2278  * a domain constraint record or not; there's usually more update traffic
2279  * for table constraints than domain constraints, so we'll do a lot of
2280  * useless flushes. Still, this is better than the old no-caching-at-all
2281  * approach to domain constraints.
2282  */
2283 static void
2284 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2285 {
2286  TypeCacheEntry *typentry;
2287 
2288  /*
2289  * Because this is called very frequently, and typically very few of the
2290  * typcache entries are for domains, we don't use hash_seq_search here.
2291  * Instead we thread all the domain-type entries together so that we can
2292  * visit them cheaply.
2293  */
2294  for (typentry = firstDomainTypeEntry;
2295  typentry != NULL;
2296  typentry = typentry->nextDomain)
2297  {
2298  /* Reset domain constraint validity information */
2300  }
2301 }
2302 
2303 
2304 /*
2305  * Check if given OID is part of the subset that's sortable by comparisons
2306  */
2307 static inline bool
2309 {
2310  Oid offset;
2311 
2312  if (arg < enumdata->bitmap_base)
2313  return false;
2314  offset = arg - enumdata->bitmap_base;
2315  if (offset > (Oid) INT_MAX)
2316  return false;
2317  return bms_is_member((int) offset, enumdata->sorted_values);
2318 }
2319 
2320 
2321 /*
2322  * compare_values_of_enum
2323  * Compare two members of an enum type.
2324  * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2325  *
2326  * Note: currently, the enumData cache is refreshed only if we are asked
2327  * to compare an enum value that is not already in the cache. This is okay
2328  * because there is no support for re-ordering existing values, so comparisons
2329  * of previously cached values will return the right answer even if other
2330  * values have been added since we last loaded the cache.
2331  *
2332  * Note: the enum logic has a special-case rule about even-numbered versus
2333  * odd-numbered OIDs, but we take no account of that rule here; this
2334  * routine shouldn't even get called when that rule applies.
2335  */
2336 int
2338 {
2339  TypeCacheEnumData *enumdata;
2340  EnumItem *item1;
2341  EnumItem *item2;
2342 
2343  /*
2344  * Equal OIDs are certainly equal --- this case was probably handled by
2345  * our caller, but we may as well check.
2346  */
2347  if (arg1 == arg2)
2348  return 0;
2349 
2350  /* Load up the cache if first time through */
2351  if (tcache->enumData == NULL)
2352  load_enum_cache_data(tcache);
2353  enumdata = tcache->enumData;
2354 
2355  /*
2356  * If both OIDs are known-sorted, we can just compare them directly.
2357  */
2358  if (enum_known_sorted(enumdata, arg1) &&
2359  enum_known_sorted(enumdata, arg2))
2360  {
2361  if (arg1 < arg2)
2362  return -1;
2363  else
2364  return 1;
2365  }
2366 
2367  /*
2368  * Slow path: we have to identify their actual sort-order positions.
2369  */
2370  item1 = find_enumitem(enumdata, arg1);
2371  item2 = find_enumitem(enumdata, arg2);
2372 
2373  if (item1 == NULL || item2 == NULL)
2374  {
2375  /*
2376  * We couldn't find one or both values. That means the enum has
2377  * changed under us, so re-initialize the cache and try again. We
2378  * don't bother retrying the known-sorted case in this path.
2379  */
2380  load_enum_cache_data(tcache);
2381  enumdata = tcache->enumData;
2382 
2383  item1 = find_enumitem(enumdata, arg1);
2384  item2 = find_enumitem(enumdata, arg2);
2385 
2386  /*
2387  * If we still can't find the values, complain: we must have corrupt
2388  * data.
2389  */
2390  if (item1 == NULL)
2391  elog(ERROR, "enum value %u not found in cache for enum %s",
2392  arg1, format_type_be(tcache->type_id));
2393  if (item2 == NULL)
2394  elog(ERROR, "enum value %u not found in cache for enum %s",
2395  arg2, format_type_be(tcache->type_id));
2396  }
2397 
2398  if (item1->sort_order < item2->sort_order)
2399  return -1;
2400  else if (item1->sort_order > item2->sort_order)
2401  return 1;
2402  else
2403  return 0;
2404 }
2405 
2406 /*
2407  * Load (or re-load) the enumData member of the typcache entry.
2408  */
2409 static void
2411 {
2412  TypeCacheEnumData *enumdata;
2413  Relation enum_rel;
2414  SysScanDesc enum_scan;
2415  HeapTuple enum_tuple;
2416  ScanKeyData skey;
2417  EnumItem *items;
2418  int numitems;
2419  int maxitems;
2420  Oid bitmap_base;
2421  Bitmapset *bitmap;
2422  MemoryContext oldcxt;
2423  int bm_size,
2424  start_pos;
2425 
2426  /* Check that this is actually an enum */
2427  if (tcache->typtype != TYPTYPE_ENUM)
2428  ereport(ERROR,
2429  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2430  errmsg("%s is not an enum",
2431  format_type_be(tcache->type_id))));
2432 
2433  /*
2434  * Read all the information for members of the enum type. We collect the
2435  * info in working memory in the caller's context, and then transfer it to
2436  * permanent memory in CacheMemoryContext. This minimizes the risk of
2437  * leaking memory from CacheMemoryContext in the event of an error partway
2438  * through.
2439  */
2440  maxitems = 64;
2441  items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2442  numitems = 0;
2443 
2444  /* Scan pg_enum for the members of the target enum type. */
2445  ScanKeyInit(&skey,
2446  Anum_pg_enum_enumtypid,
2447  BTEqualStrategyNumber, F_OIDEQ,
2448  ObjectIdGetDatum(tcache->type_id));
2449 
2450  enum_rel = table_open(EnumRelationId, AccessShareLock);
2451  enum_scan = systable_beginscan(enum_rel,
2453  true, NULL,
2454  1, &skey);
2455 
2456  while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2457  {
2458  Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2459 
2460  if (numitems >= maxitems)
2461  {
2462  maxitems *= 2;
2463  items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2464  }
2465  items[numitems].enum_oid = en->oid;
2466  items[numitems].sort_order = en->enumsortorder;
2467  numitems++;
2468  }
2469 
2470  systable_endscan(enum_scan);
2471  table_close(enum_rel, AccessShareLock);
2472 
2473  /* Sort the items into OID order */
2474  qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2475 
2476  /*
2477  * Here, we create a bitmap listing a subset of the enum's OIDs that are
2478  * known to be in order and can thus be compared with just OID comparison.
2479  *
2480  * The point of this is that the enum's initial OIDs were certainly in
2481  * order, so there is some subset that can be compared via OID comparison;
2482  * and we'd rather not do binary searches unnecessarily.
2483  *
2484  * This is somewhat heuristic, and might identify a subset of OIDs that
2485  * isn't exactly what the type started with. That's okay as long as the
2486  * subset is correctly sorted.
2487  */
2488  bitmap_base = InvalidOid;
2489  bitmap = NULL;
2490  bm_size = 1; /* only save sets of at least 2 OIDs */
2491 
2492  for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2493  {
2494  /*
2495  * Identify longest sorted subsequence starting at start_pos
2496  */
2497  Bitmapset *this_bitmap = bms_make_singleton(0);
2498  int this_bm_size = 1;
2499  Oid start_oid = items[start_pos].enum_oid;
2500  float4 prev_order = items[start_pos].sort_order;
2501  int i;
2502 
2503  for (i = start_pos + 1; i < numitems; i++)
2504  {
2505  Oid offset;
2506 
2507  offset = items[i].enum_oid - start_oid;
2508  /* quit if bitmap would be too large; cutoff is arbitrary */
2509  if (offset >= 8192)
2510  break;
2511  /* include the item if it's in-order */
2512  if (items[i].sort_order > prev_order)
2513  {
2514  prev_order = items[i].sort_order;
2515  this_bitmap = bms_add_member(this_bitmap, (int) offset);
2516  this_bm_size++;
2517  }
2518  }
2519 
2520  /* Remember it if larger than previous best */
2521  if (this_bm_size > bm_size)
2522  {
2523  bms_free(bitmap);
2524  bitmap_base = start_oid;
2525  bitmap = this_bitmap;
2526  bm_size = this_bm_size;
2527  }
2528  else
2529  bms_free(this_bitmap);
2530 
2531  /*
2532  * Done if it's not possible to find a longer sequence in the rest of
2533  * the list. In typical cases this will happen on the first
2534  * iteration, which is why we create the bitmaps on the fly instead of
2535  * doing a second pass over the list.
2536  */
2537  if (bm_size >= (numitems - start_pos - 1))
2538  break;
2539  }
2540 
2541  /* OK, copy the data into CacheMemoryContext */
2543  enumdata = (TypeCacheEnumData *)
2544  palloc(offsetof(TypeCacheEnumData, enum_values) +
2545  numitems * sizeof(EnumItem));
2546  enumdata->bitmap_base = bitmap_base;
2547  enumdata->sorted_values = bms_copy(bitmap);
2548  enumdata->num_values = numitems;
2549  memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2550  MemoryContextSwitchTo(oldcxt);
2551 
2552  pfree(items);
2553  bms_free(bitmap);
2554 
2555  /* And link the finished cache struct into the typcache */
2556  if (tcache->enumData != NULL)
2557  pfree(tcache->enumData);
2558  tcache->enumData = enumdata;
2559 }
2560 
2561 /*
2562  * Locate the EnumItem with the given OID, if present
2563  */
2564 static EnumItem *
2566 {
2567  EnumItem srch;
2568 
2569  /* On some versions of Solaris, bsearch of zero items dumps core */
2570  if (enumdata->num_values <= 0)
2571  return NULL;
2572 
2573  srch.enum_oid = arg;
2574  return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2575  sizeof(EnumItem), enum_oid_cmp);
2576 }
2577 
2578 /*
2579  * qsort comparison function for OID-ordered EnumItems
2580  */
2581 static int
2582 enum_oid_cmp(const void *left, const void *right)
2583 {
2584  const EnumItem *l = (const EnumItem *) left;
2585  const EnumItem *r = (const EnumItem *) right;
2586 
2587  if (l->enum_oid < r->enum_oid)
2588  return -1;
2589  else if (l->enum_oid > r->enum_oid)
2590  return 1;
2591  else
2592  return 0;
2593 }
2594 
2595 /*
2596  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2597  * to the given value and return a dsa_pointer.
2598  */
2599 static dsa_pointer
2600 share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2601 {
2602  dsa_pointer shared_dp;
2603  TupleDesc shared;
2604 
2605  shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2606  shared = (TupleDesc) dsa_get_address(area, shared_dp);
2607  TupleDescCopy(shared, tupdesc);
2608  shared->tdtypmod = typmod;
2609 
2610  return shared_dp;
2611 }
2612 
2613 /*
2614  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2615  * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2616  * Tuple descriptors returned by this function are not reference counted, and
2617  * will exist at least as long as the current backend remained attached to the
2618  * current session.
2619  */
2620 static TupleDesc
2622 {
2623  TupleDesc result;
2625  SharedRecordTableEntry *record_table_entry;
2626  SharedTypmodTableEntry *typmod_table_entry;
2627  dsa_pointer shared_dp;
2628  bool found;
2629  uint32 typmod;
2630 
2631  /* If not even attached, nothing to do. */
2633  return NULL;
2634 
2635  /* Try to find a matching tuple descriptor in the record table. */
2636  key.shared = false;
2637  key.u.local_tupdesc = tupdesc;
2638  record_table_entry = (SharedRecordTableEntry *)
2640  if (record_table_entry)
2641  {
2642  Assert(record_table_entry->key.shared);
2644  record_table_entry);
2645  result = (TupleDesc)
2647  record_table_entry->key.u.shared_tupdesc);
2648  Assert(result->tdrefcount == -1);
2649 
2650  return result;
2651  }
2652 
2653  /* Allocate a new typmod number. This will be wasted if we error out. */
2654  typmod = (int)
2656  1);
2657 
2658  /* Copy the TupleDesc into shared memory. */
2659  shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2660 
2661  /*
2662  * Create an entry in the typmod table so that others will understand this
2663  * typmod number.
2664  */
2665  PG_TRY();
2666  {
2667  typmod_table_entry = (SharedTypmodTableEntry *)
2669  &typmod, &found);
2670  if (found)
2671  elog(ERROR, "cannot create duplicate shared record typmod");
2672  }
2673  PG_CATCH();
2674  {
2675  dsa_free(CurrentSession->area, shared_dp);
2676  PG_RE_THROW();
2677  }
2678  PG_END_TRY();
2679  typmod_table_entry->typmod = typmod;
2680  typmod_table_entry->shared_tupdesc = shared_dp;
2682  typmod_table_entry);
2683 
2684  /*
2685  * Finally create an entry in the record table so others with matching
2686  * tuple descriptors can reuse the typmod.
2687  */
2688  record_table_entry = (SharedRecordTableEntry *)
2690  &found);
2691  if (found)
2692  {
2693  /*
2694  * Someone concurrently inserted a matching tuple descriptor since the
2695  * first time we checked. Use that one instead.
2696  */
2698  record_table_entry);
2699 
2700  /* Might as well free up the space used by the one we created. */
2702  &typmod);
2703  Assert(found);
2704  dsa_free(CurrentSession->area, shared_dp);
2705 
2706  /* Return the one we found. */
2707  Assert(record_table_entry->key.shared);
2708  result = (TupleDesc)
2710  record_table_entry->key.shared);
2711  Assert(result->tdrefcount == -1);
2712 
2713  return result;
2714  }
2715 
2716  /* Store it and return it. */
2717  record_table_entry->key.shared = true;
2718  record_table_entry->key.u.shared_tupdesc = shared_dp;
2720  record_table_entry);
2721  result = (TupleDesc)
2722  dsa_get_address(CurrentSession->area, shared_dp);
2723  Assert(result->tdrefcount == -1);
2724 
2725  return result;
2726 }
2727 
2728 /*
2729  * On-DSM-detach hook to forget about the current shared record typmod
2730  * infrastructure. This is currently used by both leader and workers.
2731  */
2732 static void
2734 {
2735  /* Be cautious here: maybe we didn't finish initializing. */
2736  if (CurrentSession->shared_record_table != NULL)
2737  {
2740  }
2741  if (CurrentSession->shared_typmod_table != NULL)
2742  {
2745  }
2747 }
MemoryContextCallback callback
Definition: typcache.h:165
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:2337
struct TypeCacheEnumData TypeCacheEnumData
MemoryContextCallbackFunction func
Definition: palloc.h:49
struct TypeCacheEnumData * enumData
Definition: typcache.h:123
#define NIL
Definition: pg_list.h:65
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1390
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:844
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:375
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition: lsyscache.c:2426
FormData_pg_range * Form_pg_range
Definition: pg_range.h:55
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:99
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:110
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:212
#define AllocSetContextCreate
Definition: memutils.h:170
#define BTORDER_PROC
Definition: nbtree.h:575
DomainConstraintCache * dcc
Definition: typcache.h:164
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:167
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:141
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:98
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:2057
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:569
#define GETSTRUCT(TUP)
Definition: htup_details.h:655
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:74
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:263
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:712
Oid hash_opintype
Definition: typcache.h:59
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:87
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1298
#define HASH_ELEM
Definition: hsearch.h:85
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:81
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1710
uint32 type_id_hash
Definition: typcache.h:36
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:145
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition: typcache.c:102
#define RelationGetDescr(relation)
Definition: rel.h:482
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:95
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1201
MemoryContext dccContext
Definition: typcache.c:125
DomainConstraintType constrainttype
Definition: execnodes.h:903
dsa_pointer dshash_table_handle
Definition: dshash.h:24
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:92
DomainConstraintCache * domainData
Definition: typcache.h:114
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2621
#define PointerGetDatum(X)
Definition: postgres.h:556
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:355
struct RecordCacheEntry RecordCacheEntry
struct TypeCacheEntry TypeCacheEntry
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
char * pstrdup(const char *in)
Definition: mcxt.c:1187
Oid typcollation
Definition: typcache.h:46
Session * CurrentSession
Definition: session.c:48
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:135
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:202
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:254
dshash_table * shared_record_table
Definition: session.h:32
Expr * expression_planner(Expr *expr)
Definition: planner.c:6174
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:137
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define AccessShareLock
Definition: lockdefs.h:36
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1554
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:213
Size entrysize
Definition: hsearch.h:72
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:239
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:284
#define TYPECACHE_EQ_OPR
Definition: typcache.h:130
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition: typcache.c:97
int errcode(int sqlerrcode)
Definition: elog.c:610
void * stringToNode(const char *str)
Definition: read.c:89
#define HASHEXTENDED_PROC
Definition: hash.h:354
#define MemSet(start, val, len)
Definition: c.h:950
char * format_type_be(Oid type_oid)
Definition: format_type.c:339
static uint64 tupledesc_id_counter
Definition: typcache.c:285
uint32 hashTupleDesc(TupleDesc desc)
Definition: tupdesc.c:574
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1177
static HTAB * RecordCacheHash
Definition: typcache.c:272
#define GetSysCacheHashValue1(cacheId, key1)
Definition: syscache.h:201
SharedRecordTableKey key
Definition: typcache.c:196
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
Form_pg_class rd_rel
Definition: rel.h:109
unsigned int Oid
Definition: postgres_ext.h:31
#define EnumTypIdLabelIndexId
Definition: indexing.h:172
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1091
int16 typlen
Definition: typcache.h:39
#define TupleDescSize(src)
Definition: tupdesc.h:102
#define OidIsValid(objectId)
Definition: c.h:652
bool typbyval
Definition: typcache.h:40
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:561
#define INVALID_TUPLEDESC_IDENTIFIER
Definition: typcache.h:148
#define ConstraintTypidIndexId
Definition: indexing.h:141
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:2067
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:357
uint64 dsa_pointer
Definition: dsa.h:62
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:581
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1766
signed int int32
Definition: c.h:363
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:1830
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:1968
static TupleDesc * RecordCacheArray
Definition: typcache.c:275
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1476
Oid domainBaseType
Definition: typcache.h:107
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1347
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:101
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:932
FmgrInfo cmp_proc_finfo
Definition: typcache.h:75
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1452
struct TypeCacheEntry * nextDomain
Definition: typcache.h:126
Definition: dynahash.c:218
dsa_pointer shared_tupdesc
Definition: typcache.c:206
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:476
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:93
pg_atomic_uint32 next_typmod
Definition: typcache.c:171
Bitmapset * sorted_values
Definition: typcache.c:139
void pfree(void *pointer)
Definition: mcxt.c:1057
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:89
#define ObjectIdGetDatum(X)
Definition: postgres.h:507
#define ERROR
Definition: elog.h:43
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition: typcache.c:84
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:100
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2308
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: relation.c:48
int32 tdtypmod
Definition: tupdesc.h:83
#define HTEqualStrategyNumber
Definition: stratnum.h:41
dsa_area * area
Definition: session.h:28
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:186
char * c
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:362
char typstorage
Definition: typcache.h:42
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:2150
#define RegProcedureIsValid(p)
Definition: c.h:654
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1382
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:302
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:164
int32 domainBaseTypmod
Definition: typcache.h:108
ExprState * check_exprstate
Definition: execnodes.h:906
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:44
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:193
Oid hash_extended_proc
Definition: typcache.h:65
unsigned int uint32
Definition: c.h:375
FmgrInfo hash_proc_finfo
Definition: typcache.h:76
#define TYPECACHE_GT_OPR
Definition: typcache.h:132
MemoryContext CurrentMemoryContext
Definition: mcxt.c:38
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1727
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:150
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:136
#define HASHSTANDARD_PROC
Definition: hash.h:353
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:139
dsa_pointer shared_tupdesc
Definition: typcache.c:185
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2565
union SharedRecordTableKey::@35 u
#define IsParallelWorker()
Definition: parallel.h:61
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:99
MemoryContext TopMemoryContext
Definition: mcxt.c:44
FmgrInfo rng_canonical_finfo
Definition: typcache.h:100
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:141
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:263
MemoryContext refctx
Definition: typcache.h:159
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:97
List * lappend(List *list, void *datum)
Definition: list.c:321
#define TYPECACHE_DOMAIN_BASE_INFO
Definition: typcache.h:142
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1406
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2215
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:1116
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1538
float float4
Definition: c.h:498
#define HASH_BLOBS
Definition: hsearch.h:86
#define TextDatumGetCString(d)
Definition: builtins.h:87
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:77
static int32 RecordCacheArrayLen
Definition: typcache.c:277
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:2733
struct SharedTypmodTableEntry SharedTypmodTableEntry
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1434
static int32 NextRecordTypmod
Definition: typcache.c:278
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
Oid enum_oid
Definition: typcache.c:132
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:144
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:326
uintptr_t Datum
Definition: postgres.h:367
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1164
Oid btree_opintype
Definition: typcache.h:57
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:196
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1582
Size keysize
Definition: hsearch.h:71
struct SharedRecordTableKey SharedRecordTableKey
TupleDesc rd_att
Definition: rel.h:110
HashCompareFunc match
Definition: hsearch.h:74
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:840
FmgrInfo eq_opr_finfo
Definition: typcache.h:74
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:331
#define InvalidOid
Definition: postgres_ext.h:36
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:878
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1202
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:168
Oid fn_oid
Definition: fmgr.h:59
#define ereport(elevel,...)
Definition: elog.h:144
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:1803
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:1946
dshash_table * shared_typmod_table
Definition: session.h:33
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:328
#define TYPECACHE_CMP_PROC
Definition: typcache.h:133
List * lcons(void *datum, List *list)
Definition: list.c:453
#define PG_CATCH()
Definition: elog.h:305
char typtype
Definition: typcache.h:43
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:576
FormData_pg_constraint * Form_pg_constraint
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:590
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:502
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: relation.c:206
#define Assert(condition)
Definition: c.h:746
#define lfirst(lc)
Definition: pg_list.h:169
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1222
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition: typcache.h:143
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:393
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:794
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:94
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2410
#define HASH_COMPARE
Definition: hsearch.h:88
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1260
TypeCacheEntry * tcache
Definition: typcache.h:160
void CreateCacheMemoryContext(void)
Definition: catcache.c:620
FormData_pg_type * Form_pg_type
Definition: pg_type.h:255
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:85
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2600
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:101
Oid rng_collation
Definition: typcache.h:98
uint64 tupDesc_identifier
Definition: typcache.h:89
#define PG_RE_THROW()
Definition: elog.h:336
dshash_table_handle record_table_handle
Definition: typcache.c:167
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1401
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:736
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1444
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1129
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1070
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1391
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:96
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1374
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:313
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:116
#define TCFLAGS_OPERATOR_FLAGS
Definition: typcache.c:105
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:941
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2709
Oid tdtypeid
Definition: tupdesc.h:82
float4 sort_order
Definition: typcache.c:133
Definition: dsa.c:354
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:233
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:820
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:91
char typalign
Definition: typcache.h:41
void * palloc(Size size)
Definition: mcxt.c:950
int errmsg(const char *fmt,...)
Definition: elog.c:821
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2582
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:797
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:1814
#define elog(elevel,...)
Definition: elog.h:214
int i
#define TYPECACHE_LT_OPR
Definition: typcache.h:131
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:88
#define NameStr(name)
Definition: c.h:623
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:430
bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:411
TupleDesc local_tupdesc
Definition: typcache.c:184
void * arg
TupleDesc tupdesc
Definition: typcache.c:157
int tdrefcount
Definition: tupdesc.h:84
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:286
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:123
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:136
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:385
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:86
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1546
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:140
#define qsort(a, b, c, d)
Definition: port.h:497
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1436
dshash_table_handle typmod_table_handle
Definition: typcache.c:169
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:90
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
#define PG_TRY()
Definition: elog.h:295
#define BTLessStrategyNumber
Definition: stratnum.h:29
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1190
struct SharedRecordTableEntry SharedRecordTableEntry
Definition: pg_list.h:50
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1398
TupleDesc tupDesc
Definition: typcache.h:88
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition: typcache.c:1905
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2284
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1619
static HTAB * TypeCacheHash
Definition: typcache.c:78
long val
Definition: informix.c:664
#define TYPECACHE_HASH_PROC
Definition: typcache.h:134
#define TYPECACHE_TUPDESC
Definition: typcache.h:138
#define PG_END_TRY()
Definition: elog.h:320
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define offsetof(type, field)
Definition: c.h:669
dsm_segment * segment
Definition: session.h:27
static uint64 * RecordIdentifierArray
Definition: typcache.c:276
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2255
HashValueFunc hash
Definition: hsearch.h:73
#define HASH_FUNCTION
Definition: hsearch.h:87
#define dsa_allocate(area, size)
Definition: dsa.h:84
MemoryContext CacheMemoryContext
Definition: mcxt.c:47
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1744
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1151