PostgreSQL Source Code  git master
typcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * typcache.c
4  * POSTGRES type cache code
5  *
6  * The type cache exists to speed lookup of certain information about data
7  * types that is not directly available from a type's pg_type row. For
8  * example, we use a type's default btree opclass, or the default hash
9  * opclass if no btree opclass exists, to determine which operators should
10  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11  *
12  * Several seemingly-odd choices have been made to support use of the type
13  * cache by generic array and record handling routines, such as array_eq(),
14  * record_cmp(), and hash_array(). Because those routines are used as index
15  * support operations, they cannot leak memory. To allow them to execute
16  * efficiently, all information that they would like to re-use across calls
17  * is kept in the type cache.
18  *
19  * Once created, a type cache entry lives as long as the backend does, so
20  * there is no need for a call to release a cache entry. If the type is
21  * dropped, the cache entry simply becomes wasted storage. This is not
22  * expected to happen often, and assuming that typcache entries are good
23  * permanently allows caching pointers to them in long-lived places.
24  *
25  * We have some provisions for updating cache entries if the stored data
26  * becomes obsolete. Core data extracted from the pg_type row is updated
27  * when we detect updates to pg_type. Information dependent on opclasses is
28  * cleared if we detect updates to pg_opclass. We also support clearing the
29  * tuple descriptor and operator/function parts of a rowtype's cache entry,
30  * since those may need to change as a consequence of ALTER TABLE. Domain
31  * constraint changes are also tracked properly.
32  *
33  *
34  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
35  * Portions Copyright (c) 1994, Regents of the University of California
36  *
37  * IDENTIFICATION
38  * src/backend/utils/cache/typcache.c
39  *
40  *-------------------------------------------------------------------------
41  */
42 #include "postgres.h"
43 
44 #include <limits.h>
45 
46 #include "access/hash.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "access/parallel.h"
50 #include "access/relation.h"
51 #include "access/session.h"
52 #include "access/table.h"
53 #include "catalog/pg_am.h"
54 #include "catalog/pg_constraint.h"
55 #include "catalog/pg_enum.h"
56 #include "catalog/pg_operator.h"
57 #include "catalog/pg_range.h"
58 #include "catalog/pg_type.h"
59 #include "commands/defrem.h"
60 #include "common/int.h"
61 #include "executor/executor.h"
62 #include "lib/dshash.h"
63 #include "optimizer/optimizer.h"
64 #include "port/pg_bitutils.h"
65 #include "storage/lwlock.h"
66 #include "utils/builtins.h"
67 #include "utils/catcache.h"
68 #include "utils/fmgroids.h"
69 #include "utils/inval.h"
70 #include "utils/lsyscache.h"
71 #include "utils/memutils.h"
72 #include "utils/rel.h"
73 #include "utils/syscache.h"
74 #include "utils/typcache.h"
75 
76 
77 /* The main type cache hashtable searched by lookup_type_cache */
78 static HTAB *TypeCacheHash = NULL;
79 
80 /* List of type cache entries for domain types */
82 
83 /* Private flag bits in the TypeCacheEntry.flags field */
84 #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
85 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
86 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
87 #define TCFLAGS_CHECKED_EQ_OPR 0x000008
88 #define TCFLAGS_CHECKED_LT_OPR 0x000010
89 #define TCFLAGS_CHECKED_GT_OPR 0x000020
90 #define TCFLAGS_CHECKED_CMP_PROC 0x000040
91 #define TCFLAGS_CHECKED_HASH_PROC 0x000080
92 #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
93 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
94 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
95 #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
96 #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
97 #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
98 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
99 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
100 #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
101 #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
102 #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
103 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
104 #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
105 
106 /* The flags associated with equality/comparison/hashing are all but these: */
107 #define TCFLAGS_OPERATOR_FLAGS \
108  (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
109  TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
110  TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
111 
112 /*
113  * Data stored about a domain type's constraints. Note that we do not create
114  * this struct for the common case of a constraint-less domain; we just set
115  * domainData to NULL to indicate that.
116  *
117  * Within a DomainConstraintCache, we store expression plan trees, but the
118  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
119  * When needed, expression evaluation nodes are built by flat-copying the
120  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
121  * Such a node tree is not part of the DomainConstraintCache, but is
122  * considered to belong to a DomainConstraintRef.
123  */
125 {
126  List *constraints; /* list of DomainConstraintState nodes */
127  MemoryContext dccContext; /* memory context holding all associated data */
128  long dccRefCount; /* number of references to this struct */
129 };
130 
131 /* Private information to support comparisons of enum values */
132 typedef struct
133 {
134  Oid enum_oid; /* OID of one enum value */
135  float4 sort_order; /* its sort position */
136 } EnumItem;
137 
138 typedef struct TypeCacheEnumData
139 {
140  Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
141  Bitmapset *sorted_values; /* Set of OIDs known to be in order */
142  int num_values; /* total number of values in enum */
145 
146 /*
147  * We use a separate table for storing the definitions of non-anonymous
148  * record types. Once defined, a record type will be remembered for the
149  * life of the backend. Subsequent uses of the "same" record type (where
150  * sameness means equalRowTypes) will refer to the existing table entry.
151  *
152  * Stored record types are remembered in a linear array of TupleDescs,
153  * which can be indexed quickly with the assigned typmod. There is also
154  * a hash table to speed searches for matching TupleDescs.
155  */
156 
157 typedef struct RecordCacheEntry
158 {
161 
162 /*
163  * To deal with non-anonymous record types that are exchanged by backends
164  * involved in a parallel query, we also need a shared version of the above.
165  */
167 {
168  /* A hash table for finding a matching TupleDesc. */
170  /* A hash table for finding a TupleDesc by typmod. */
172  /* A source of new record typmod numbers. */
174 };
175 
176 /*
177  * When using shared tuple descriptors as hash table keys we need a way to be
178  * able to search for an equal shared TupleDesc using a backend-local
179  * TupleDesc. So we use this type which can hold either, and hash and compare
180  * functions that know how to handle both.
181  */
182 typedef struct SharedRecordTableKey
183 {
184  union
185  {
188  } u;
189  bool shared;
191 
192 /*
193  * The shared version of RecordCacheEntry. This lets us look up a typmod
194  * using a TupleDesc which may be in local or shared memory.
195  */
197 {
200 
201 /*
202  * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
203  * up a TupleDesc in shared memory using a typmod.
204  */
206 {
210 
211 /*
212  * A comparator function for SharedRecordTableKey.
213  */
214 static int
215 shared_record_table_compare(const void *a, const void *b, size_t size,
216  void *arg)
217 {
218  dsa_area *area = (dsa_area *) arg;
221  TupleDesc t1;
222  TupleDesc t2;
223 
224  if (k1->shared)
225  t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
226  else
227  t1 = k1->u.local_tupdesc;
228 
229  if (k2->shared)
230  t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
231  else
232  t2 = k2->u.local_tupdesc;
233 
234  return equalRowTypes(t1, t2) ? 0 : 1;
235 }
236 
237 /*
238  * A hash function for SharedRecordTableKey.
239  */
240 static uint32
241 shared_record_table_hash(const void *a, size_t size, void *arg)
242 {
243  dsa_area *area = (dsa_area *) arg;
245  TupleDesc t;
246 
247  if (k->shared)
248  t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
249  else
250  t = k->u.local_tupdesc;
251 
252  return hashRowType(t);
253 }
254 
255 /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
257  sizeof(SharedRecordTableKey), /* unused */
258  sizeof(SharedRecordTableEntry),
263 };
264 
265 /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
267  sizeof(uint32),
268  sizeof(SharedTypmodTableEntry),
273 };
274 
275 /* hashtable for recognizing registered record types */
276 static HTAB *RecordCacheHash = NULL;
277 
278 typedef struct RecordCacheArrayEntry
279 {
280  uint64 id;
283 
284 /* array of info about registered record types, indexed by assigned typmod */
286 static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
287 static int32 NextRecordTypmod = 0; /* number of entries used */
288 
289 /*
290  * Process-wide counter for generating unique tupledesc identifiers.
291  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
292  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
293  */
295 
296 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
297 static void load_rangetype_info(TypeCacheEntry *typentry);
298 static void load_multirangetype_info(TypeCacheEntry *typentry);
299 static void load_domaintype_info(TypeCacheEntry *typentry);
300 static int dcs_cmp(const void *a, const void *b);
301 static void decr_dcc_refcount(DomainConstraintCache *dcc);
302 static void dccref_deletion_callback(void *arg);
303 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
304 static bool array_element_has_equality(TypeCacheEntry *typentry);
305 static bool array_element_has_compare(TypeCacheEntry *typentry);
306 static bool array_element_has_hashing(TypeCacheEntry *typentry);
308 static void cache_array_element_properties(TypeCacheEntry *typentry);
309 static bool record_fields_have_equality(TypeCacheEntry *typentry);
310 static bool record_fields_have_compare(TypeCacheEntry *typentry);
311 static bool record_fields_have_hashing(TypeCacheEntry *typentry);
313 static void cache_record_field_properties(TypeCacheEntry *typentry);
314 static bool range_element_has_hashing(TypeCacheEntry *typentry);
316 static void cache_range_element_properties(TypeCacheEntry *typentry);
317 static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
320 static void TypeCacheRelCallback(Datum arg, Oid relid);
321 static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
322 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
323 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
324 static void load_enum_cache_data(TypeCacheEntry *tcache);
325 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
326 static int enum_oid_cmp(const void *left, const void *right);
328  Datum datum);
330 static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
331  uint32 typmod);
332 
333 
334 /*
335  * Hash function compatible with one-arg system cache hash function.
336  */
337 static uint32
338 type_cache_syshash(const void *key, Size keysize)
339 {
340  Assert(keysize == sizeof(Oid));
341  return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
342 }
343 
344 /*
345  * lookup_type_cache
346  *
347  * Fetch the type cache entry for the specified datatype, and make sure that
348  * all the fields requested by bits in 'flags' are valid.
349  *
350  * The result is never NULL --- we will ereport() if the passed type OID is
351  * invalid. Note however that we may fail to find one or more of the
352  * values requested by 'flags'; the caller needs to check whether the fields
353  * are InvalidOid or not.
354  */
356 lookup_type_cache(Oid type_id, int flags)
357 {
358  TypeCacheEntry *typentry;
359  bool found;
360 
361  if (TypeCacheHash == NULL)
362  {
363  /* First time through: initialize the hash table */
364  HASHCTL ctl;
365 
366  ctl.keysize = sizeof(Oid);
367  ctl.entrysize = sizeof(TypeCacheEntry);
368 
369  /*
370  * TypeCacheEntry takes hash value from the system cache. For
371  * TypeCacheHash we use the same hash in order to speedup search by
372  * hash value. This is used by hash_seq_init_with_hash_value().
373  */
374  ctl.hash = type_cache_syshash;
375 
376  TypeCacheHash = hash_create("Type information cache", 64,
378 
379  /* Also set up callbacks for SI invalidations */
384 
385  /* Also make sure CacheMemoryContext exists */
386  if (!CacheMemoryContext)
388  }
389 
390  /* Try to look up an existing entry */
392  &type_id,
393  HASH_FIND, NULL);
394  if (typentry == NULL)
395  {
396  /*
397  * If we didn't find one, we want to make one. But first look up the
398  * pg_type row, just to make sure we don't make a cache entry for an
399  * invalid type OID. If the type OID is not valid, present a
400  * user-facing error, since some code paths such as domain_in() allow
401  * this function to be reached with a user-supplied OID.
402  */
403  HeapTuple tp;
404  Form_pg_type typtup;
405 
406  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
407  if (!HeapTupleIsValid(tp))
408  ereport(ERROR,
409  (errcode(ERRCODE_UNDEFINED_OBJECT),
410  errmsg("type with OID %u does not exist", type_id)));
411  typtup = (Form_pg_type) GETSTRUCT(tp);
412  if (!typtup->typisdefined)
413  ereport(ERROR,
414  (errcode(ERRCODE_UNDEFINED_OBJECT),
415  errmsg("type \"%s\" is only a shell",
416  NameStr(typtup->typname))));
417 
418  /* Now make the typcache entry */
420  &type_id,
421  HASH_ENTER, &found);
422  Assert(!found); /* it wasn't there a moment ago */
423 
424  MemSet(typentry, 0, sizeof(TypeCacheEntry));
425 
426  /* These fields can never change, by definition */
427  typentry->type_id = type_id;
428  typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
429 
430  /* Keep this part in sync with the code below */
431  typentry->typlen = typtup->typlen;
432  typentry->typbyval = typtup->typbyval;
433  typentry->typalign = typtup->typalign;
434  typentry->typstorage = typtup->typstorage;
435  typentry->typtype = typtup->typtype;
436  typentry->typrelid = typtup->typrelid;
437  typentry->typsubscript = typtup->typsubscript;
438  typentry->typelem = typtup->typelem;
439  typentry->typcollation = typtup->typcollation;
440  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
441 
442  /* If it's a domain, immediately thread it into the domain cache list */
443  if (typentry->typtype == TYPTYPE_DOMAIN)
444  {
445  typentry->nextDomain = firstDomainTypeEntry;
446  firstDomainTypeEntry = typentry;
447  }
448 
449  ReleaseSysCache(tp);
450  }
451  else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
452  {
453  /*
454  * We have an entry, but its pg_type row got changed, so reload the
455  * data obtained directly from pg_type.
456  */
457  HeapTuple tp;
458  Form_pg_type typtup;
459 
460  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
461  if (!HeapTupleIsValid(tp))
462  ereport(ERROR,
463  (errcode(ERRCODE_UNDEFINED_OBJECT),
464  errmsg("type with OID %u does not exist", type_id)));
465  typtup = (Form_pg_type) GETSTRUCT(tp);
466  if (!typtup->typisdefined)
467  ereport(ERROR,
468  (errcode(ERRCODE_UNDEFINED_OBJECT),
469  errmsg("type \"%s\" is only a shell",
470  NameStr(typtup->typname))));
471 
472  /*
473  * Keep this part in sync with the code above. Many of these fields
474  * shouldn't ever change, particularly typtype, but copy 'em anyway.
475  */
476  typentry->typlen = typtup->typlen;
477  typentry->typbyval = typtup->typbyval;
478  typentry->typalign = typtup->typalign;
479  typentry->typstorage = typtup->typstorage;
480  typentry->typtype = typtup->typtype;
481  typentry->typrelid = typtup->typrelid;
482  typentry->typsubscript = typtup->typsubscript;
483  typentry->typelem = typtup->typelem;
484  typentry->typcollation = typtup->typcollation;
485  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
486 
487  ReleaseSysCache(tp);
488  }
489 
490  /*
491  * Look up opclasses if we haven't already and any dependent info is
492  * requested.
493  */
498  !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
499  {
500  Oid opclass;
501 
502  opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
503  if (OidIsValid(opclass))
504  {
505  typentry->btree_opf = get_opclass_family(opclass);
506  typentry->btree_opintype = get_opclass_input_type(opclass);
507  }
508  else
509  {
510  typentry->btree_opf = typentry->btree_opintype = InvalidOid;
511  }
512 
513  /*
514  * Reset information derived from btree opclass. Note in particular
515  * that we'll redetermine the eq_opr even if we previously found one;
516  * this matters in case a btree opclass has been added to a type that
517  * previously had only a hash opclass.
518  */
519  typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
524  }
525 
526  /*
527  * If we need to look up equality operator, and there's no btree opclass,
528  * force lookup of hash opclass.
529  */
530  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
531  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
532  typentry->btree_opf == InvalidOid)
533  flags |= TYPECACHE_HASH_OPFAMILY;
534 
539  !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
540  {
541  Oid opclass;
542 
543  opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
544  if (OidIsValid(opclass))
545  {
546  typentry->hash_opf = get_opclass_family(opclass);
547  typentry->hash_opintype = get_opclass_input_type(opclass);
548  }
549  else
550  {
551  typentry->hash_opf = typentry->hash_opintype = InvalidOid;
552  }
553 
554  /*
555  * Reset information derived from hash opclass. We do *not* reset the
556  * eq_opr; if we already found one from the btree opclass, that
557  * decision is still good.
558  */
559  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
561  typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
562  }
563 
564  /*
565  * Look for requested operators and functions, if we haven't already.
566  */
567  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
568  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
569  {
570  Oid eq_opr = InvalidOid;
571 
572  if (typentry->btree_opf != InvalidOid)
573  eq_opr = get_opfamily_member(typentry->btree_opf,
574  typentry->btree_opintype,
575  typentry->btree_opintype,
577  if (eq_opr == InvalidOid &&
578  typentry->hash_opf != InvalidOid)
579  eq_opr = get_opfamily_member(typentry->hash_opf,
580  typentry->hash_opintype,
581  typentry->hash_opintype,
583 
584  /*
585  * If the proposed equality operator is array_eq or record_eq, check
586  * to see if the element type or column types support equality. If
587  * not, array_eq or record_eq would fail at runtime, so we don't want
588  * to report that the type has equality. (We can omit similar
589  * checking for ranges and multiranges because ranges can't be created
590  * in the first place unless their subtypes support equality.)
591  */
592  if (eq_opr == ARRAY_EQ_OP &&
593  !array_element_has_equality(typentry))
594  eq_opr = InvalidOid;
595  else if (eq_opr == RECORD_EQ_OP &&
596  !record_fields_have_equality(typentry))
597  eq_opr = InvalidOid;
598 
599  /* Force update of eq_opr_finfo only if we're changing state */
600  if (typentry->eq_opr != eq_opr)
601  typentry->eq_opr_finfo.fn_oid = InvalidOid;
602 
603  typentry->eq_opr = eq_opr;
604 
605  /*
606  * Reset info about hash functions whenever we pick up new info about
607  * equality operator. This is so we can ensure that the hash
608  * functions match the operator.
609  */
610  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
612  typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
613  }
614  if ((flags & TYPECACHE_LT_OPR) &&
615  !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
616  {
617  Oid lt_opr = InvalidOid;
618 
619  if (typentry->btree_opf != InvalidOid)
620  lt_opr = get_opfamily_member(typentry->btree_opf,
621  typentry->btree_opintype,
622  typentry->btree_opintype,
624 
625  /*
626  * As above, make sure array_cmp or record_cmp will succeed; but again
627  * we need no special check for ranges or multiranges.
628  */
629  if (lt_opr == ARRAY_LT_OP &&
630  !array_element_has_compare(typentry))
631  lt_opr = InvalidOid;
632  else if (lt_opr == RECORD_LT_OP &&
633  !record_fields_have_compare(typentry))
634  lt_opr = InvalidOid;
635 
636  typentry->lt_opr = lt_opr;
637  typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
638  }
639  if ((flags & TYPECACHE_GT_OPR) &&
640  !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
641  {
642  Oid gt_opr = InvalidOid;
643 
644  if (typentry->btree_opf != InvalidOid)
645  gt_opr = get_opfamily_member(typentry->btree_opf,
646  typentry->btree_opintype,
647  typentry->btree_opintype,
649 
650  /*
651  * As above, make sure array_cmp or record_cmp will succeed; but again
652  * we need no special check for ranges or multiranges.
653  */
654  if (gt_opr == ARRAY_GT_OP &&
655  !array_element_has_compare(typentry))
656  gt_opr = InvalidOid;
657  else if (gt_opr == RECORD_GT_OP &&
658  !record_fields_have_compare(typentry))
659  gt_opr = InvalidOid;
660 
661  typentry->gt_opr = gt_opr;
662  typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
663  }
664  if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
665  !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
666  {
667  Oid cmp_proc = InvalidOid;
668 
669  if (typentry->btree_opf != InvalidOid)
670  cmp_proc = get_opfamily_proc(typentry->btree_opf,
671  typentry->btree_opintype,
672  typentry->btree_opintype,
673  BTORDER_PROC);
674 
675  /*
676  * As above, make sure array_cmp or record_cmp will succeed; but again
677  * we need no special check for ranges or multiranges.
678  */
679  if (cmp_proc == F_BTARRAYCMP &&
680  !array_element_has_compare(typentry))
681  cmp_proc = InvalidOid;
682  else if (cmp_proc == F_BTRECORDCMP &&
683  !record_fields_have_compare(typentry))
684  cmp_proc = InvalidOid;
685 
686  /* Force update of cmp_proc_finfo only if we're changing state */
687  if (typentry->cmp_proc != cmp_proc)
688  typentry->cmp_proc_finfo.fn_oid = InvalidOid;
689 
690  typentry->cmp_proc = cmp_proc;
691  typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
692  }
694  !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
695  {
696  Oid hash_proc = InvalidOid;
697 
698  /*
699  * We insist that the eq_opr, if one has been determined, match the
700  * hash opclass; else report there is no hash function.
701  */
702  if (typentry->hash_opf != InvalidOid &&
703  (!OidIsValid(typentry->eq_opr) ||
704  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
705  typentry->hash_opintype,
706  typentry->hash_opintype,
708  hash_proc = get_opfamily_proc(typentry->hash_opf,
709  typentry->hash_opintype,
710  typentry->hash_opintype,
712 
713  /*
714  * As above, make sure hash_array, hash_record, or hash_range will
715  * succeed.
716  */
717  if (hash_proc == F_HASH_ARRAY &&
718  !array_element_has_hashing(typentry))
719  hash_proc = InvalidOid;
720  else if (hash_proc == F_HASH_RECORD &&
721  !record_fields_have_hashing(typentry))
722  hash_proc = InvalidOid;
723  else if (hash_proc == F_HASH_RANGE &&
724  !range_element_has_hashing(typentry))
725  hash_proc = InvalidOid;
726 
727  /*
728  * Likewise for hash_multirange.
729  */
730  if (hash_proc == F_HASH_MULTIRANGE &&
732  hash_proc = InvalidOid;
733 
734  /* Force update of hash_proc_finfo only if we're changing state */
735  if (typentry->hash_proc != hash_proc)
736  typentry->hash_proc_finfo.fn_oid = InvalidOid;
737 
738  typentry->hash_proc = hash_proc;
739  typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
740  }
741  if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
744  {
745  Oid hash_extended_proc = InvalidOid;
746 
747  /*
748  * We insist that the eq_opr, if one has been determined, match the
749  * hash opclass; else report there is no hash function.
750  */
751  if (typentry->hash_opf != InvalidOid &&
752  (!OidIsValid(typentry->eq_opr) ||
753  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
754  typentry->hash_opintype,
755  typentry->hash_opintype,
757  hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
758  typentry->hash_opintype,
759  typentry->hash_opintype,
761 
762  /*
763  * As above, make sure hash_array_extended, hash_record_extended, or
764  * hash_range_extended will succeed.
765  */
766  if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
768  hash_extended_proc = InvalidOid;
769  else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
771  hash_extended_proc = InvalidOid;
772  else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
774  hash_extended_proc = InvalidOid;
775 
776  /*
777  * Likewise for hash_multirange_extended.
778  */
779  if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
781  hash_extended_proc = InvalidOid;
782 
783  /* Force update of proc finfo only if we're changing state */
784  if (typentry->hash_extended_proc != hash_extended_proc)
786 
787  typentry->hash_extended_proc = hash_extended_proc;
789  }
790 
791  /*
792  * Set up fmgr lookup info as requested
793  *
794  * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
795  * which is not quite right (they're really in the hash table's private
796  * memory context) but this will do for our purposes.
797  *
798  * Note: the code above avoids invalidating the finfo structs unless the
799  * referenced operator/function OID actually changes. This is to prevent
800  * unnecessary leakage of any subsidiary data attached to an finfo, since
801  * that would cause session-lifespan memory leaks.
802  */
803  if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
804  typentry->eq_opr_finfo.fn_oid == InvalidOid &&
805  typentry->eq_opr != InvalidOid)
806  {
807  Oid eq_opr_func;
808 
809  eq_opr_func = get_opcode(typentry->eq_opr);
810  if (eq_opr_func != InvalidOid)
811  fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
813  }
814  if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
815  typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
816  typentry->cmp_proc != InvalidOid)
817  {
818  fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
820  }
821  if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
822  typentry->hash_proc_finfo.fn_oid == InvalidOid &&
823  typentry->hash_proc != InvalidOid)
824  {
825  fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
827  }
828  if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
830  typentry->hash_extended_proc != InvalidOid)
831  {
833  &typentry->hash_extended_proc_finfo,
835  }
836 
837  /*
838  * If it's a composite type (row type), get tupdesc if requested
839  */
840  if ((flags & TYPECACHE_TUPDESC) &&
841  typentry->tupDesc == NULL &&
842  typentry->typtype == TYPTYPE_COMPOSITE)
843  {
844  load_typcache_tupdesc(typentry);
845  }
846 
847  /*
848  * If requested, get information about a range type
849  *
850  * This includes making sure that the basic info about the range element
851  * type is up-to-date.
852  */
853  if ((flags & TYPECACHE_RANGE_INFO) &&
854  typentry->typtype == TYPTYPE_RANGE)
855  {
856  if (typentry->rngelemtype == NULL)
857  load_rangetype_info(typentry);
858  else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
859  (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
860  }
861 
862  /*
863  * If requested, get information about a multirange type
864  */
865  if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
866  typentry->rngtype == NULL &&
867  typentry->typtype == TYPTYPE_MULTIRANGE)
868  {
869  load_multirangetype_info(typentry);
870  }
871 
872  /*
873  * If requested, get information about a domain type
874  */
875  if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
876  typentry->domainBaseType == InvalidOid &&
877  typentry->typtype == TYPTYPE_DOMAIN)
878  {
879  typentry->domainBaseTypmod = -1;
880  typentry->domainBaseType =
881  getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
882  }
883  if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
884  (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
885  typentry->typtype == TYPTYPE_DOMAIN)
886  {
887  load_domaintype_info(typentry);
888  }
889 
890  return typentry;
891 }
892 
893 /*
894  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
895  */
896 static void
898 {
899  Relation rel;
900 
901  if (!OidIsValid(typentry->typrelid)) /* should not happen */
902  elog(ERROR, "invalid typrelid for composite type %u",
903  typentry->type_id);
904  rel = relation_open(typentry->typrelid, AccessShareLock);
905  Assert(rel->rd_rel->reltype == typentry->type_id);
906 
907  /*
908  * Link to the tupdesc and increment its refcount (we assert it's a
909  * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
910  * because the reference mustn't be entered in the current resource owner;
911  * it can outlive the current query.
912  */
913  typentry->tupDesc = RelationGetDescr(rel);
914 
915  Assert(typentry->tupDesc->tdrefcount > 0);
916  typentry->tupDesc->tdrefcount++;
917 
918  /*
919  * In future, we could take some pains to not change tupDesc_identifier if
920  * the tupdesc didn't really change; but for now it's not worth it.
921  */
923 
925 }
926 
927 /*
928  * load_rangetype_info --- helper routine to set up range type information
929  */
930 static void
932 {
933  Form_pg_range pg_range;
934  HeapTuple tup;
935  Oid subtypeOid;
936  Oid opclassOid;
937  Oid canonicalOid;
938  Oid subdiffOid;
939  Oid opfamilyOid;
940  Oid opcintype;
941  Oid cmpFnOid;
942 
943  /* get information from pg_range */
944  tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
945  /* should not fail, since we already checked typtype ... */
946  if (!HeapTupleIsValid(tup))
947  elog(ERROR, "cache lookup failed for range type %u",
948  typentry->type_id);
949  pg_range = (Form_pg_range) GETSTRUCT(tup);
950 
951  subtypeOid = pg_range->rngsubtype;
952  typentry->rng_collation = pg_range->rngcollation;
953  opclassOid = pg_range->rngsubopc;
954  canonicalOid = pg_range->rngcanonical;
955  subdiffOid = pg_range->rngsubdiff;
956 
957  ReleaseSysCache(tup);
958 
959  /* get opclass properties and look up the comparison function */
960  opfamilyOid = get_opclass_family(opclassOid);
961  opcintype = get_opclass_input_type(opclassOid);
962  typentry->rng_opfamily = opfamilyOid;
963 
964  cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
965  BTORDER_PROC);
966  if (!RegProcedureIsValid(cmpFnOid))
967  elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
968  BTORDER_PROC, opcintype, opcintype, opfamilyOid);
969 
970  /* set up cached fmgrinfo structs */
971  fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
973  if (OidIsValid(canonicalOid))
974  fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
976  if (OidIsValid(subdiffOid))
977  fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
979 
980  /* Lastly, set up link to the element type --- this marks data valid */
981  typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
982 }
983 
984 /*
985  * load_multirangetype_info --- helper routine to set up multirange type
986  * information
987  */
988 static void
990 {
991  Oid rangetypeOid;
992 
993  rangetypeOid = get_multirange_range(typentry->type_id);
994  if (!OidIsValid(rangetypeOid))
995  elog(ERROR, "cache lookup failed for multirange type %u",
996  typentry->type_id);
997 
998  typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
999 }
1000 
1001 /*
1002  * load_domaintype_info --- helper routine to set up domain constraint info
1003  *
1004  * Note: we assume we're called in a relatively short-lived context, so it's
1005  * okay to leak data into the current context while scanning pg_constraint.
1006  * We build the new DomainConstraintCache data in a context underneath
1007  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1008  * complete.
1009  */
1010 static void
1012 {
1013  Oid typeOid = typentry->type_id;
1014  DomainConstraintCache *dcc;
1015  bool notNull = false;
1016  DomainConstraintState **ccons;
1017  int cconslen;
1018  Relation conRel;
1019  MemoryContext oldcxt;
1020 
1021  /*
1022  * If we're here, any existing constraint info is stale, so release it.
1023  * For safety, be sure to null the link before trying to delete the data.
1024  */
1025  if (typentry->domainData)
1026  {
1027  dcc = typentry->domainData;
1028  typentry->domainData = NULL;
1029  decr_dcc_refcount(dcc);
1030  }
1031 
1032  /*
1033  * We try to optimize the common case of no domain constraints, so don't
1034  * create the dcc object and context until we find a constraint. Likewise
1035  * for the temp sorting array.
1036  */
1037  dcc = NULL;
1038  ccons = NULL;
1039  cconslen = 0;
1040 
1041  /*
1042  * Scan pg_constraint for relevant constraints. We want to find
1043  * constraints for not just this domain, but any ancestor domains, so the
1044  * outer loop crawls up the domain stack.
1045  */
1046  conRel = table_open(ConstraintRelationId, AccessShareLock);
1047 
1048  for (;;)
1049  {
1050  HeapTuple tup;
1051  HeapTuple conTup;
1052  Form_pg_type typTup;
1053  int nccons = 0;
1054  ScanKeyData key[1];
1055  SysScanDesc scan;
1056 
1057  tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1058  if (!HeapTupleIsValid(tup))
1059  elog(ERROR, "cache lookup failed for type %u", typeOid);
1060  typTup = (Form_pg_type) GETSTRUCT(tup);
1061 
1062  if (typTup->typtype != TYPTYPE_DOMAIN)
1063  {
1064  /* Not a domain, so done */
1065  ReleaseSysCache(tup);
1066  break;
1067  }
1068 
1069  /* Test for NOT NULL Constraint */
1070  if (typTup->typnotnull)
1071  notNull = true;
1072 
1073  /* Look for CHECK Constraints on this domain */
1074  ScanKeyInit(&key[0],
1075  Anum_pg_constraint_contypid,
1076  BTEqualStrategyNumber, F_OIDEQ,
1077  ObjectIdGetDatum(typeOid));
1078 
1079  scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1080  NULL, 1, key);
1081 
1082  while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1083  {
1085  Datum val;
1086  bool isNull;
1087  char *constring;
1088  Expr *check_expr;
1090 
1091  /* Ignore non-CHECK constraints */
1092  if (c->contype != CONSTRAINT_CHECK)
1093  continue;
1094 
1095  /* Not expecting conbin to be NULL, but we'll test for it anyway */
1096  val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1097  conRel->rd_att, &isNull);
1098  if (isNull)
1099  elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1100  NameStr(typTup->typname), NameStr(c->conname));
1101 
1102  /* Convert conbin to C string in caller context */
1103  constring = TextDatumGetCString(val);
1104 
1105  /* Create the DomainConstraintCache object and context if needed */
1106  if (dcc == NULL)
1107  {
1108  MemoryContext cxt;
1109 
1111  "Domain constraints",
1113  dcc = (DomainConstraintCache *)
1115  dcc->constraints = NIL;
1116  dcc->dccContext = cxt;
1117  dcc->dccRefCount = 0;
1118  }
1119 
1120  /* Create node trees in DomainConstraintCache's context */
1121  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1122 
1123  check_expr = (Expr *) stringToNode(constring);
1124 
1125  /*
1126  * Plan the expression, since ExecInitExpr will expect that.
1127  *
1128  * Note: caching the result of expression_planner() is not very
1129  * good practice. Ideally we'd use a CachedExpression here so
1130  * that we would react promptly to, eg, changes in inlined
1131  * functions. However, because we don't support mutable domain
1132  * CHECK constraints, it's not really clear that it's worth the
1133  * extra overhead to do that.
1134  */
1135  check_expr = expression_planner(check_expr);
1136 
1139  r->name = pstrdup(NameStr(c->conname));
1140  r->check_expr = check_expr;
1141  r->check_exprstate = NULL;
1142 
1143  MemoryContextSwitchTo(oldcxt);
1144 
1145  /* Accumulate constraints in an array, for sorting below */
1146  if (ccons == NULL)
1147  {
1148  cconslen = 8;
1149  ccons = (DomainConstraintState **)
1150  palloc(cconslen * sizeof(DomainConstraintState *));
1151  }
1152  else if (nccons >= cconslen)
1153  {
1154  cconslen *= 2;
1155  ccons = (DomainConstraintState **)
1156  repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1157  }
1158  ccons[nccons++] = r;
1159  }
1160 
1161  systable_endscan(scan);
1162 
1163  if (nccons > 0)
1164  {
1165  /*
1166  * Sort the items for this domain, so that CHECKs are applied in a
1167  * deterministic order.
1168  */
1169  if (nccons > 1)
1170  qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1171 
1172  /*
1173  * Now attach them to the overall list. Use lcons() here because
1174  * constraints of parent domains should be applied earlier.
1175  */
1176  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1177  while (nccons > 0)
1178  dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1179  MemoryContextSwitchTo(oldcxt);
1180  }
1181 
1182  /* loop to next domain in stack */
1183  typeOid = typTup->typbasetype;
1184  ReleaseSysCache(tup);
1185  }
1186 
1187  table_close(conRel, AccessShareLock);
1188 
1189  /*
1190  * Only need to add one NOT NULL check regardless of how many domains in
1191  * the stack request it.
1192  */
1193  if (notNull)
1194  {
1196 
1197  /* Create the DomainConstraintCache object and context if needed */
1198  if (dcc == NULL)
1199  {
1200  MemoryContext cxt;
1201 
1203  "Domain constraints",
1205  dcc = (DomainConstraintCache *)
1207  dcc->constraints = NIL;
1208  dcc->dccContext = cxt;
1209  dcc->dccRefCount = 0;
1210  }
1211 
1212  /* Create node trees in DomainConstraintCache's context */
1213  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1214 
1216 
1218  r->name = pstrdup("NOT NULL");
1219  r->check_expr = NULL;
1220  r->check_exprstate = NULL;
1221 
1222  /* lcons to apply the nullness check FIRST */
1223  dcc->constraints = lcons(r, dcc->constraints);
1224 
1225  MemoryContextSwitchTo(oldcxt);
1226  }
1227 
1228  /*
1229  * If we made a constraint object, move it into CacheMemoryContext and
1230  * attach it to the typcache entry.
1231  */
1232  if (dcc)
1233  {
1235  typentry->domainData = dcc;
1236  dcc->dccRefCount++; /* count the typcache's reference */
1237  }
1238 
1239  /* Either way, the typcache entry's domain data is now valid. */
1241 }
1242 
1243 /*
1244  * qsort comparator to sort DomainConstraintState pointers by name
1245  */
1246 static int
1247 dcs_cmp(const void *a, const void *b)
1248 {
1249  const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1250  const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1251 
1252  return strcmp((*ca)->name, (*cb)->name);
1253 }
1254 
1255 /*
1256  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1257  * and free it if no references remain
1258  */
1259 static void
1261 {
1262  Assert(dcc->dccRefCount > 0);
1263  if (--(dcc->dccRefCount) <= 0)
1265 }
1266 
1267 /*
1268  * Context reset/delete callback for a DomainConstraintRef
1269  */
1270 static void
1272 {
1274  DomainConstraintCache *dcc = ref->dcc;
1275 
1276  /* Paranoia --- be sure link is nulled before trying to release */
1277  if (dcc)
1278  {
1279  ref->constraints = NIL;
1280  ref->dcc = NULL;
1281  decr_dcc_refcount(dcc);
1282  }
1283 }
1284 
1285 /*
1286  * prep_domain_constraints --- prepare domain constraints for execution
1287  *
1288  * The expression trees stored in the DomainConstraintCache's list are
1289  * converted to executable expression state trees stored in execctx.
1290  */
1291 static List *
1293 {
1294  List *result = NIL;
1295  MemoryContext oldcxt;
1296  ListCell *lc;
1297 
1298  oldcxt = MemoryContextSwitchTo(execctx);
1299 
1300  foreach(lc, constraints)
1301  {
1303  DomainConstraintState *newr;
1304 
1306  newr->constrainttype = r->constrainttype;
1307  newr->name = r->name;
1308  newr->check_expr = r->check_expr;
1309  newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1310 
1311  result = lappend(result, newr);
1312  }
1313 
1314  MemoryContextSwitchTo(oldcxt);
1315 
1316  return result;
1317 }
1318 
1319 /*
1320  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1321  *
1322  * Caller must tell us the MemoryContext in which the DomainConstraintRef
1323  * lives. The ref will be cleaned up when that context is reset/deleted.
1324  *
1325  * Caller must also tell us whether it wants check_exprstate fields to be
1326  * computed in the DomainConstraintState nodes attached to this ref.
1327  * If it doesn't, we need not make a copy of the DomainConstraintState list.
1328  */
1329 void
1331  MemoryContext refctx, bool need_exprstate)
1332 {
1333  /* Look up the typcache entry --- we assume it survives indefinitely */
1335  ref->need_exprstate = need_exprstate;
1336  /* For safety, establish the callback before acquiring a refcount */
1337  ref->refctx = refctx;
1338  ref->dcc = NULL;
1340  ref->callback.arg = (void *) ref;
1342  /* Acquire refcount if there are constraints, and set up exported list */
1343  if (ref->tcache->domainData)
1344  {
1345  ref->dcc = ref->tcache->domainData;
1346  ref->dcc->dccRefCount++;
1347  if (ref->need_exprstate)
1349  ref->refctx);
1350  else
1351  ref->constraints = ref->dcc->constraints;
1352  }
1353  else
1354  ref->constraints = NIL;
1355 }
1356 
1357 /*
1358  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1359  *
1360  * If the domain's constraint set changed, ref->constraints is updated to
1361  * point at a new list of cached constraints.
1362  *
1363  * In the normal case where nothing happened to the domain, this is cheap
1364  * enough that it's reasonable (and expected) to check before *each* use
1365  * of the constraint info.
1366  */
1367 void
1369 {
1370  TypeCacheEntry *typentry = ref->tcache;
1371 
1372  /* Make sure typcache entry's data is up to date */
1373  if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1374  typentry->typtype == TYPTYPE_DOMAIN)
1375  load_domaintype_info(typentry);
1376 
1377  /* Transfer to ref object if there's new info, adjusting refcounts */
1378  if (ref->dcc != typentry->domainData)
1379  {
1380  /* Paranoia --- be sure link is nulled before trying to release */
1381  DomainConstraintCache *dcc = ref->dcc;
1382 
1383  if (dcc)
1384  {
1385  /*
1386  * Note: we just leak the previous list of executable domain
1387  * constraints. Alternatively, we could keep those in a child
1388  * context of ref->refctx and free that context at this point.
1389  * However, in practice this code path will be taken so seldom
1390  * that the extra bookkeeping for a child context doesn't seem
1391  * worthwhile; we'll just allow a leak for the lifespan of refctx.
1392  */
1393  ref->constraints = NIL;
1394  ref->dcc = NULL;
1395  decr_dcc_refcount(dcc);
1396  }
1397  dcc = typentry->domainData;
1398  if (dcc)
1399  {
1400  ref->dcc = dcc;
1401  dcc->dccRefCount++;
1402  if (ref->need_exprstate)
1404  ref->refctx);
1405  else
1406  ref->constraints = dcc->constraints;
1407  }
1408  }
1409 }
1410 
1411 /*
1412  * DomainHasConstraints --- utility routine to check if a domain has constraints
1413  *
1414  * This is defined to return false, not fail, if type is not a domain.
1415  */
1416 bool
1418 {
1419  TypeCacheEntry *typentry;
1420 
1421  /*
1422  * Note: a side effect is to cause the typcache's domain data to become
1423  * valid. This is fine since we'll likely need it soon if there is any.
1424  */
1425  typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1426 
1427  return (typentry->domainData != NULL);
1428 }
1429 
1430 
1431 /*
1432  * array_element_has_equality and friends are helper routines to check
1433  * whether we should believe that array_eq and related functions will work
1434  * on the given array type or composite type.
1435  *
1436  * The logic above may call these repeatedly on the same type entry, so we
1437  * make use of the typentry->flags field to cache the results once known.
1438  * Also, we assume that we'll probably want all these facts about the type
1439  * if we want any, so we cache them all using only one lookup of the
1440  * component datatype(s).
1441  */
1442 
1443 static bool
1445 {
1446  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1448  return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1449 }
1450 
1451 static bool
1453 {
1454  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1456  return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1457 }
1458 
1459 static bool
1461 {
1462  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1464  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1465 }
1466 
1467 static bool
1469 {
1470  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1472  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1473 }
1474 
1475 static void
1477 {
1478  Oid elem_type = get_base_element_type(typentry->type_id);
1479 
1480  if (OidIsValid(elem_type))
1481  {
1482  TypeCacheEntry *elementry;
1483 
1484  elementry = lookup_type_cache(elem_type,
1489  if (OidIsValid(elementry->eq_opr))
1490  typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1491  if (OidIsValid(elementry->cmp_proc))
1492  typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1493  if (OidIsValid(elementry->hash_proc))
1494  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1495  if (OidIsValid(elementry->hash_extended_proc))
1497  }
1499 }
1500 
1501 /*
1502  * Likewise, some helper functions for composite types.
1503  */
1504 
1505 static bool
1507 {
1508  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1510  return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1511 }
1512 
1513 static bool
1515 {
1516  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1518  return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1519 }
1520 
1521 static bool
1523 {
1524  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1526  return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1527 }
1528 
1529 static bool
1531 {
1532  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1534  return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1535 }
1536 
1537 static void
1539 {
1540  /*
1541  * For type RECORD, we can't really tell what will work, since we don't
1542  * have access here to the specific anonymous type. Just assume that
1543  * equality and comparison will (we may get a failure at runtime). We
1544  * could also claim that hashing works, but then if code that has the
1545  * option between a comparison-based (sort-based) and a hash-based plan
1546  * chooses hashing, stuff could fail that would otherwise work if it chose
1547  * a comparison-based plan. In practice more types support comparison
1548  * than hashing.
1549  */
1550  if (typentry->type_id == RECORDOID)
1551  {
1552  typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1554  }
1555  else if (typentry->typtype == TYPTYPE_COMPOSITE)
1556  {
1557  TupleDesc tupdesc;
1558  int newflags;
1559  int i;
1560 
1561  /* Fetch composite type's tupdesc if we don't have it already */
1562  if (typentry->tupDesc == NULL)
1563  load_typcache_tupdesc(typentry);
1564  tupdesc = typentry->tupDesc;
1565 
1566  /* Must bump the refcount while we do additional catalog lookups */
1567  IncrTupleDescRefCount(tupdesc);
1568 
1569  /* Have each property if all non-dropped fields have the property */
1570  newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1574  for (i = 0; i < tupdesc->natts; i++)
1575  {
1576  TypeCacheEntry *fieldentry;
1577  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1578 
1579  if (attr->attisdropped)
1580  continue;
1581 
1582  fieldentry = lookup_type_cache(attr->atttypid,
1587  if (!OidIsValid(fieldentry->eq_opr))
1588  newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1589  if (!OidIsValid(fieldentry->cmp_proc))
1590  newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1591  if (!OidIsValid(fieldentry->hash_proc))
1592  newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1593  if (!OidIsValid(fieldentry->hash_extended_proc))
1595 
1596  /* We can drop out of the loop once we disprove all bits */
1597  if (newflags == 0)
1598  break;
1599  }
1600  typentry->flags |= newflags;
1601 
1602  DecrTupleDescRefCount(tupdesc);
1603  }
1604  else if (typentry->typtype == TYPTYPE_DOMAIN)
1605  {
1606  /* If it's domain over composite, copy base type's properties */
1607  TypeCacheEntry *baseentry;
1608 
1609  /* load up basetype info if we didn't already */
1610  if (typentry->domainBaseType == InvalidOid)
1611  {
1612  typentry->domainBaseTypmod = -1;
1613  typentry->domainBaseType =
1614  getBaseTypeAndTypmod(typentry->type_id,
1615  &typentry->domainBaseTypmod);
1616  }
1617  baseentry = lookup_type_cache(typentry->domainBaseType,
1622  if (baseentry->typtype == TYPTYPE_COMPOSITE)
1623  {
1625  typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1629  }
1630  }
1632 }
1633 
1634 /*
1635  * Likewise, some helper functions for range and multirange types.
1636  *
1637  * We can borrow the flag bits for array element properties to use for range
1638  * element properties, since those flag bits otherwise have no use in a
1639  * range or multirange type's typcache entry.
1640  */
1641 
1642 static bool
1644 {
1645  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1647  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1648 }
1649 
1650 static bool
1652 {
1653  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1655  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1656 }
1657 
1658 static void
1660 {
1661  /* load up subtype link if we didn't already */
1662  if (typentry->rngelemtype == NULL &&
1663  typentry->typtype == TYPTYPE_RANGE)
1664  load_rangetype_info(typentry);
1665 
1666  if (typentry->rngelemtype != NULL)
1667  {
1668  TypeCacheEntry *elementry;
1669 
1670  /* might need to calculate subtype's hash function properties */
1671  elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1674  if (OidIsValid(elementry->hash_proc))
1675  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1676  if (OidIsValid(elementry->hash_extended_proc))
1678  }
1680 }
1681 
1682 static bool
1684 {
1685  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1687  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1688 }
1689 
1690 static bool
1692 {
1693  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1695  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1696 }
1697 
1698 static void
1700 {
1701  /* load up range link if we didn't already */
1702  if (typentry->rngtype == NULL &&
1703  typentry->typtype == TYPTYPE_MULTIRANGE)
1704  load_multirangetype_info(typentry);
1705 
1706  if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1707  {
1708  TypeCacheEntry *elementry;
1709 
1710  /* might need to calculate subtype's hash function properties */
1711  elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1714  if (OidIsValid(elementry->hash_proc))
1715  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1716  if (OidIsValid(elementry->hash_extended_proc))
1718  }
1720 }
1721 
1722 /*
1723  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1724  * to store 'typmod'.
1725  */
1726 static void
1728 {
1729  if (RecordCacheArray == NULL)
1730  {
1733  64 * sizeof(RecordCacheArrayEntry));
1734  RecordCacheArrayLen = 64;
1735  }
1736 
1737  if (typmod >= RecordCacheArrayLen)
1738  {
1739  int32 newlen = pg_nextpower2_32(typmod + 1);
1740 
1744  newlen);
1745  RecordCacheArrayLen = newlen;
1746  }
1747 }
1748 
1749 /*
1750  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1751  *
1752  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1753  * hasn't had its refcount bumped.
1754  */
1755 static TupleDesc
1756 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1757 {
1758  if (type_id != RECORDOID)
1759  {
1760  /*
1761  * It's a named composite type, so use the regular typcache.
1762  */
1763  TypeCacheEntry *typentry;
1764 
1765  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1766  if (typentry->tupDesc == NULL && !noError)
1767  ereport(ERROR,
1768  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1769  errmsg("type %s is not composite",
1770  format_type_be(type_id))));
1771  return typentry->tupDesc;
1772  }
1773  else
1774  {
1775  /*
1776  * It's a transient record type, so look in our record-type table.
1777  */
1778  if (typmod >= 0)
1779  {
1780  /* It is already in our local cache? */
1781  if (typmod < RecordCacheArrayLen &&
1782  RecordCacheArray[typmod].tupdesc != NULL)
1783  return RecordCacheArray[typmod].tupdesc;
1784 
1785  /* Are we attached to a shared record typmod registry? */
1787  {
1788  SharedTypmodTableEntry *entry;
1789 
1790  /* Try to find it in the shared typmod index. */
1792  &typmod, false);
1793  if (entry != NULL)
1794  {
1795  TupleDesc tupdesc;
1796 
1797  tupdesc = (TupleDesc)
1799  entry->shared_tupdesc);
1800  Assert(typmod == tupdesc->tdtypmod);
1801 
1802  /* We may need to extend the local RecordCacheArray. */
1804 
1805  /*
1806  * Our local array can now point directly to the TupleDesc
1807  * in shared memory, which is non-reference-counted.
1808  */
1809  RecordCacheArray[typmod].tupdesc = tupdesc;
1810  Assert(tupdesc->tdrefcount == -1);
1811 
1812  /*
1813  * We don't share tupdesc identifiers across processes, so
1814  * assign one locally.
1815  */
1817 
1819  entry);
1820 
1821  return RecordCacheArray[typmod].tupdesc;
1822  }
1823  }
1824  }
1825 
1826  if (!noError)
1827  ereport(ERROR,
1828  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1829  errmsg("record type has not been registered")));
1830  return NULL;
1831  }
1832 }
1833 
1834 /*
1835  * lookup_rowtype_tupdesc
1836  *
1837  * Given a typeid/typmod that should describe a known composite type,
1838  * return the tuple descriptor for the type. Will ereport on failure.
1839  * (Use ereport because this is reachable with user-specified OIDs,
1840  * for example from record_in().)
1841  *
1842  * Note: on success, we increment the refcount of the returned TupleDesc,
1843  * and log the reference in CurrentResourceOwner. Caller must call
1844  * ReleaseTupleDesc when done using the tupdesc. (There are some
1845  * cases in which the returned tupdesc is not refcounted, in which
1846  * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1847  * the tupdesc is guaranteed to live till process exit.)
1848  */
1849 TupleDesc
1851 {
1852  TupleDesc tupDesc;
1853 
1854  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1855  PinTupleDesc(tupDesc);
1856  return tupDesc;
1857 }
1858 
1859 /*
1860  * lookup_rowtype_tupdesc_noerror
1861  *
1862  * As above, but if the type is not a known composite type and noError
1863  * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1864  * type_id is passed, you'll get an ereport anyway.)
1865  */
1866 TupleDesc
1867 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1868 {
1869  TupleDesc tupDesc;
1870 
1871  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1872  if (tupDesc != NULL)
1873  PinTupleDesc(tupDesc);
1874  return tupDesc;
1875 }
1876 
1877 /*
1878  * lookup_rowtype_tupdesc_copy
1879  *
1880  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1881  * copied into the CurrentMemoryContext and is not reference-counted.
1882  */
1883 TupleDesc
1885 {
1886  TupleDesc tmp;
1887 
1888  tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1889  return CreateTupleDescCopyConstr(tmp);
1890 }
1891 
1892 /*
1893  * lookup_rowtype_tupdesc_domain
1894  *
1895  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1896  * a domain over a named composite type; so this is effectively equivalent to
1897  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1898  * except for being a tad faster.
1899  *
1900  * Note: the reason we don't fold the look-through-domain behavior into plain
1901  * lookup_rowtype_tupdesc() is that we want callers to know they might be
1902  * dealing with a domain. Otherwise they might construct a tuple that should
1903  * be of the domain type, but not apply domain constraints.
1904  */
1905 TupleDesc
1906 lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1907 {
1908  TupleDesc tupDesc;
1909 
1910  if (type_id != RECORDOID)
1911  {
1912  /*
1913  * Check for domain or named composite type. We might as well load
1914  * whichever data is needed.
1915  */
1916  TypeCacheEntry *typentry;
1917 
1918  typentry = lookup_type_cache(type_id,
1921  if (typentry->typtype == TYPTYPE_DOMAIN)
1923  typentry->domainBaseTypmod,
1924  noError);
1925  if (typentry->tupDesc == NULL && !noError)
1926  ereport(ERROR,
1927  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1928  errmsg("type %s is not composite",
1929  format_type_be(type_id))));
1930  tupDesc = typentry->tupDesc;
1931  }
1932  else
1933  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1934  if (tupDesc != NULL)
1935  PinTupleDesc(tupDesc);
1936  return tupDesc;
1937 }
1938 
1939 /*
1940  * Hash function for the hash table of RecordCacheEntry.
1941  */
1942 static uint32
1943 record_type_typmod_hash(const void *data, size_t size)
1944 {
1945  RecordCacheEntry *entry = (RecordCacheEntry *) data;
1946 
1947  return hashRowType(entry->tupdesc);
1948 }
1949 
1950 /*
1951  * Match function for the hash table of RecordCacheEntry.
1952  */
1953 static int
1954 record_type_typmod_compare(const void *a, const void *b, size_t size)
1955 {
1956  RecordCacheEntry *left = (RecordCacheEntry *) a;
1957  RecordCacheEntry *right = (RecordCacheEntry *) b;
1958 
1959  return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
1960 }
1961 
1962 /*
1963  * assign_record_type_typmod
1964  *
1965  * Given a tuple descriptor for a RECORD type, find or create a cache entry
1966  * for the type, and set the tupdesc's tdtypmod field to a value that will
1967  * identify this cache entry to lookup_rowtype_tupdesc.
1968  */
1969 void
1971 {
1972  RecordCacheEntry *recentry;
1973  TupleDesc entDesc;
1974  bool found;
1975  MemoryContext oldcxt;
1976 
1977  Assert(tupDesc->tdtypeid == RECORDOID);
1978 
1979  if (RecordCacheHash == NULL)
1980  {
1981  /* First time through: initialize the hash table */
1982  HASHCTL ctl;
1983 
1984  ctl.keysize = sizeof(TupleDesc); /* just the pointer */
1985  ctl.entrysize = sizeof(RecordCacheEntry);
1988  RecordCacheHash = hash_create("Record information cache", 64,
1989  &ctl,
1991 
1992  /* Also make sure CacheMemoryContext exists */
1993  if (!CacheMemoryContext)
1995  }
1996 
1997  /*
1998  * Find a hashtable entry for this tuple descriptor. We don't use
1999  * HASH_ENTER yet, because if it's missing, we need to make sure that all
2000  * the allocations succeed before we create the new entry.
2001  */
2003  &tupDesc,
2004  HASH_FIND, &found);
2005  if (found && recentry->tupdesc != NULL)
2006  {
2007  tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2008  return;
2009  }
2010 
2011  /* Not present, so need to manufacture an entry */
2013 
2014  /* Look in the SharedRecordTypmodRegistry, if attached */
2015  entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2016  if (entDesc == NULL)
2017  {
2018  /*
2019  * Make sure we have room before we CreateTupleDescCopy() or advance
2020  * NextRecordTypmod.
2021  */
2023 
2024  /* Reference-counted local cache only. */
2025  entDesc = CreateTupleDescCopy(tupDesc);
2026  entDesc->tdrefcount = 1;
2027  entDesc->tdtypmod = NextRecordTypmod++;
2028  }
2029  else
2030  {
2032  }
2033 
2034  RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2035 
2036  /* Assign a unique tupdesc identifier, too. */
2038 
2039  /* Fully initialized; create the hash table entry */
2041  &tupDesc,
2042  HASH_ENTER, NULL);
2043  recentry->tupdesc = entDesc;
2044 
2045  /* Update the caller's tuple descriptor. */
2046  tupDesc->tdtypmod = entDesc->tdtypmod;
2047 
2048  MemoryContextSwitchTo(oldcxt);
2049 }
2050 
2051 /*
2052  * assign_record_type_identifier
2053  *
2054  * Get an identifier, which will be unique over the lifespan of this backend
2055  * process, for the current tuple descriptor of the specified composite type.
2056  * For named composite types, the value is guaranteed to change if the type's
2057  * definition does. For registered RECORD types, the value will not change
2058  * once assigned, since the registered type won't either. If an anonymous
2059  * RECORD type is specified, we return a new identifier on each call.
2060  */
2061 uint64
2063 {
2064  if (type_id != RECORDOID)
2065  {
2066  /*
2067  * It's a named composite type, so use the regular typcache.
2068  */
2069  TypeCacheEntry *typentry;
2070 
2071  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2072  if (typentry->tupDesc == NULL)
2073  ereport(ERROR,
2074  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2075  errmsg("type %s is not composite",
2076  format_type_be(type_id))));
2077  Assert(typentry->tupDesc_identifier != 0);
2078  return typentry->tupDesc_identifier;
2079  }
2080  else
2081  {
2082  /*
2083  * It's a transient record type, so look in our record-type table.
2084  */
2085  if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2086  RecordCacheArray[typmod].tupdesc != NULL)
2087  {
2088  Assert(RecordCacheArray[typmod].id != 0);
2089  return RecordCacheArray[typmod].id;
2090  }
2091 
2092  /* For anonymous or unrecognized record type, generate a new ID */
2093  return ++tupledesc_id_counter;
2094  }
2095 }
2096 
2097 /*
2098  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2099  * This exists only to avoid exposing private innards of
2100  * SharedRecordTypmodRegistry in a header.
2101  */
2102 size_t
2104 {
2105  return sizeof(SharedRecordTypmodRegistry);
2106 }
2107 
2108 /*
2109  * Initialize 'registry' in a pre-existing shared memory region, which must be
2110  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2111  * bytes.
2112  *
2113  * 'area' will be used to allocate shared memory space as required for the
2114  * typemod registration. The current process, expected to be a leader process
2115  * in a parallel query, will be attached automatically and its current record
2116  * types will be loaded into *registry. While attached, all calls to
2117  * assign_record_type_typmod will use the shared registry. Worker backends
2118  * will need to attach explicitly.
2119  *
2120  * Note that this function takes 'area' and 'segment' as arguments rather than
2121  * accessing them via CurrentSession, because they aren't installed there
2122  * until after this function runs.
2123  */
2124 void
2126  dsm_segment *segment,
2127  dsa_area *area)
2128 {
2129  MemoryContext old_context;
2130  dshash_table *record_table;
2131  dshash_table *typmod_table;
2132  int32 typmod;
2133 
2135 
2136  /* We can't already be attached to a shared registry. */
2140 
2141  old_context = MemoryContextSwitchTo(TopMemoryContext);
2142 
2143  /* Create the hash table of tuple descriptors indexed by themselves. */
2144  record_table = dshash_create(area, &srtr_record_table_params, area);
2145 
2146  /* Create the hash table of tuple descriptors indexed by typmod. */
2147  typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2148 
2149  MemoryContextSwitchTo(old_context);
2150 
2151  /* Initialize the SharedRecordTypmodRegistry. */
2152  registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2153  registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2155 
2156  /*
2157  * Copy all entries from this backend's private registry into the shared
2158  * registry.
2159  */
2160  for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2161  {
2162  SharedTypmodTableEntry *typmod_table_entry;
2163  SharedRecordTableEntry *record_table_entry;
2164  SharedRecordTableKey record_table_key;
2165  dsa_pointer shared_dp;
2166  TupleDesc tupdesc;
2167  bool found;
2168 
2169  tupdesc = RecordCacheArray[typmod].tupdesc;
2170  if (tupdesc == NULL)
2171  continue;
2172 
2173  /* Copy the TupleDesc into shared memory. */
2174  shared_dp = share_tupledesc(area, tupdesc, typmod);
2175 
2176  /* Insert into the typmod table. */
2177  typmod_table_entry = dshash_find_or_insert(typmod_table,
2178  &tupdesc->tdtypmod,
2179  &found);
2180  if (found)
2181  elog(ERROR, "cannot create duplicate shared record typmod");
2182  typmod_table_entry->typmod = tupdesc->tdtypmod;
2183  typmod_table_entry->shared_tupdesc = shared_dp;
2184  dshash_release_lock(typmod_table, typmod_table_entry);
2185 
2186  /* Insert into the record table. */
2187  record_table_key.shared = false;
2188  record_table_key.u.local_tupdesc = tupdesc;
2189  record_table_entry = dshash_find_or_insert(record_table,
2190  &record_table_key,
2191  &found);
2192  if (!found)
2193  {
2194  record_table_entry->key.shared = true;
2195  record_table_entry->key.u.shared_tupdesc = shared_dp;
2196  }
2197  dshash_release_lock(record_table, record_table_entry);
2198  }
2199 
2200  /*
2201  * Set up the global state that will tell assign_record_type_typmod and
2202  * lookup_rowtype_tupdesc_internal about the shared registry.
2203  */
2204  CurrentSession->shared_record_table = record_table;
2205  CurrentSession->shared_typmod_table = typmod_table;
2207 
2208  /*
2209  * We install a detach hook in the leader, but only to handle cleanup on
2210  * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2211  * the memory, the leader process will use a shared registry until it
2212  * exits.
2213  */
2215 }
2216 
2217 /*
2218  * Attach to 'registry', which must have been initialized already by another
2219  * backend. Future calls to assign_record_type_typmod and
2220  * lookup_rowtype_tupdesc_internal will use the shared registry until the
2221  * current session is detached.
2222  */
2223 void
2225 {
2226  MemoryContext old_context;
2227  dshash_table *record_table;
2228  dshash_table *typmod_table;
2229 
2231 
2232  /* We can't already be attached to a shared registry. */
2233  Assert(CurrentSession != NULL);
2234  Assert(CurrentSession->segment != NULL);
2235  Assert(CurrentSession->area != NULL);
2239 
2240  /*
2241  * We can't already have typmods in our local cache, because they'd clash
2242  * with those imported by SharedRecordTypmodRegistryInit. This should be
2243  * a freshly started parallel worker. If we ever support worker
2244  * recycling, a worker would need to zap its local cache in between
2245  * servicing different queries, in order to be able to call this and
2246  * synchronize typmods with a new leader; but that's problematic because
2247  * we can't be very sure that record-typmod-related state hasn't escaped
2248  * to anywhere else in the process.
2249  */
2250  Assert(NextRecordTypmod == 0);
2251 
2252  old_context = MemoryContextSwitchTo(TopMemoryContext);
2253 
2254  /* Attach to the two hash tables. */
2255  record_table = dshash_attach(CurrentSession->area,
2257  registry->record_table_handle,
2258  CurrentSession->area);
2259  typmod_table = dshash_attach(CurrentSession->area,
2261  registry->typmod_table_handle,
2262  NULL);
2263 
2264  MemoryContextSwitchTo(old_context);
2265 
2266  /*
2267  * Set up detach hook to run at worker exit. Currently this is the same
2268  * as the leader's detach hook, but in future they might need to be
2269  * different.
2270  */
2273  PointerGetDatum(registry));
2274 
2275  /*
2276  * Set up the session state that will tell assign_record_type_typmod and
2277  * lookup_rowtype_tupdesc_internal about the shared registry.
2278  */
2280  CurrentSession->shared_record_table = record_table;
2281  CurrentSession->shared_typmod_table = typmod_table;
2282 }
2283 
2284 /*
2285  * TypeCacheRelCallback
2286  * Relcache inval callback function
2287  *
2288  * Delete the cached tuple descriptor (if any) for the given rel's composite
2289  * type, or for all composite types if relid == InvalidOid. Also reset
2290  * whatever info we have cached about the composite type's comparability.
2291  *
2292  * This is called when a relcache invalidation event occurs for the given
2293  * relid. We must scan the whole typcache hash since we don't know the
2294  * type OID corresponding to the relid. We could do a direct search if this
2295  * were a syscache-flush callback on pg_type, but then we would need all
2296  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
2297  * invals against the rel's pg_type OID. The extra SI signaling could very
2298  * well cost more than we'd save, since in most usages there are not very
2299  * many entries in a backend's typcache. The risk of bugs-of-omission seems
2300  * high, too.
2301  *
2302  * Another possibility, with only localized impact, is to maintain a second
2303  * hashtable that indexes composite-type typcache entries by their typrelid.
2304  * But it's still not clear it's worth the trouble.
2305  */
2306 static void
2308 {
2309  HASH_SEQ_STATUS status;
2310  TypeCacheEntry *typentry;
2311 
2312  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2313  hash_seq_init(&status, TypeCacheHash);
2314  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2315  {
2316  if (typentry->typtype == TYPTYPE_COMPOSITE)
2317  {
2318  /* Skip if no match, unless we're zapping all composite types */
2319  if (relid != typentry->typrelid && relid != InvalidOid)
2320  continue;
2321 
2322  /* Delete tupdesc if we have it */
2323  if (typentry->tupDesc != NULL)
2324  {
2325  /*
2326  * Release our refcount, and free the tupdesc if none remain.
2327  * (Can't use DecrTupleDescRefCount because this reference is
2328  * not logged in current resource owner.)
2329  */
2330  Assert(typentry->tupDesc->tdrefcount > 0);
2331  if (--typentry->tupDesc->tdrefcount == 0)
2332  FreeTupleDesc(typentry->tupDesc);
2333  typentry->tupDesc = NULL;
2334 
2335  /*
2336  * Also clear tupDesc_identifier, so that anything watching
2337  * that will realize that the tupdesc has possibly changed.
2338  * (Alternatively, we could specify that to detect possible
2339  * tupdesc change, one must check for tupDesc != NULL as well
2340  * as tupDesc_identifier being the same as what was previously
2341  * seen. That seems error-prone.)
2342  */
2343  typentry->tupDesc_identifier = 0;
2344  }
2345 
2346  /* Reset equality/comparison/hashing validity information */
2347  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2348  }
2349  else if (typentry->typtype == TYPTYPE_DOMAIN)
2350  {
2351  /*
2352  * If it's domain over composite, reset flags. (We don't bother
2353  * trying to determine whether the specific base type needs a
2354  * reset.) Note that if we haven't determined whether the base
2355  * type is composite, we don't need to reset anything.
2356  */
2357  if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2358  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2359  }
2360  }
2361 }
2362 
2363 /*
2364  * TypeCacheTypCallback
2365  * Syscache inval callback function
2366  *
2367  * This is called when a syscache invalidation event occurs for any
2368  * pg_type row. If we have information cached about that type, mark
2369  * it as needing to be reloaded.
2370  */
2371 static void
2372 TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2373 {
2374  HASH_SEQ_STATUS status;
2375  TypeCacheEntry *typentry;
2376 
2377  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2378 
2379  /*
2380  * By convention, zero hash value is passed to the callback as a sign that
2381  * it's time to invalidate the whole cache. See sinval.c, inval.c and
2382  * InvalidateSystemCachesExtended().
2383  */
2384  if (hashvalue == 0)
2385  hash_seq_init(&status, TypeCacheHash);
2386  else
2387  hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2388 
2389  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2390  {
2391  Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2392 
2393  /*
2394  * Mark the data obtained directly from pg_type as invalid. Also, if
2395  * it's a domain, typnotnull might've changed, so we'll need to
2396  * recalculate its constraints.
2397  */
2398  typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2400  }
2401 }
2402 
2403 /*
2404  * TypeCacheOpcCallback
2405  * Syscache inval callback function
2406  *
2407  * This is called when a syscache invalidation event occurs for any pg_opclass
2408  * row. In principle we could probably just invalidate data dependent on the
2409  * particular opclass, but since updates on pg_opclass are rare in production
2410  * it doesn't seem worth a lot of complication: we just mark all cached data
2411  * invalid.
2412  *
2413  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2414  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2415  * is not allowed to be used to add/drop the primary operators and functions
2416  * of an opclass, only cross-type members of a family; and the latter sorts
2417  * of members are not going to get cached here.
2418  */
2419 static void
2420 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2421 {
2422  HASH_SEQ_STATUS status;
2423  TypeCacheEntry *typentry;
2424 
2425  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2426  hash_seq_init(&status, TypeCacheHash);
2427  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2428  {
2429  /* Reset equality/comparison/hashing validity information */
2430  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2431  }
2432 }
2433 
2434 /*
2435  * TypeCacheConstrCallback
2436  * Syscache inval callback function
2437  *
2438  * This is called when a syscache invalidation event occurs for any
2439  * pg_constraint row. We flush information about domain constraints
2440  * when this happens.
2441  *
2442  * It's slightly annoying that we can't tell whether the inval event was for
2443  * a domain constraint record or not; there's usually more update traffic
2444  * for table constraints than domain constraints, so we'll do a lot of
2445  * useless flushes. Still, this is better than the old no-caching-at-all
2446  * approach to domain constraints.
2447  */
2448 static void
2449 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2450 {
2451  TypeCacheEntry *typentry;
2452 
2453  /*
2454  * Because this is called very frequently, and typically very few of the
2455  * typcache entries are for domains, we don't use hash_seq_search here.
2456  * Instead we thread all the domain-type entries together so that we can
2457  * visit them cheaply.
2458  */
2459  for (typentry = firstDomainTypeEntry;
2460  typentry != NULL;
2461  typentry = typentry->nextDomain)
2462  {
2463  /* Reset domain constraint validity information */
2465  }
2466 }
2467 
2468 
2469 /*
2470  * Check if given OID is part of the subset that's sortable by comparisons
2471  */
2472 static inline bool
2474 {
2475  Oid offset;
2476 
2477  if (arg < enumdata->bitmap_base)
2478  return false;
2479  offset = arg - enumdata->bitmap_base;
2480  if (offset > (Oid) INT_MAX)
2481  return false;
2482  return bms_is_member((int) offset, enumdata->sorted_values);
2483 }
2484 
2485 
2486 /*
2487  * compare_values_of_enum
2488  * Compare two members of an enum type.
2489  * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2490  *
2491  * Note: currently, the enumData cache is refreshed only if we are asked
2492  * to compare an enum value that is not already in the cache. This is okay
2493  * because there is no support for re-ordering existing values, so comparisons
2494  * of previously cached values will return the right answer even if other
2495  * values have been added since we last loaded the cache.
2496  *
2497  * Note: the enum logic has a special-case rule about even-numbered versus
2498  * odd-numbered OIDs, but we take no account of that rule here; this
2499  * routine shouldn't even get called when that rule applies.
2500  */
2501 int
2503 {
2504  TypeCacheEnumData *enumdata;
2505  EnumItem *item1;
2506  EnumItem *item2;
2507 
2508  /*
2509  * Equal OIDs are certainly equal --- this case was probably handled by
2510  * our caller, but we may as well check.
2511  */
2512  if (arg1 == arg2)
2513  return 0;
2514 
2515  /* Load up the cache if first time through */
2516  if (tcache->enumData == NULL)
2517  load_enum_cache_data(tcache);
2518  enumdata = tcache->enumData;
2519 
2520  /*
2521  * If both OIDs are known-sorted, we can just compare them directly.
2522  */
2523  if (enum_known_sorted(enumdata, arg1) &&
2524  enum_known_sorted(enumdata, arg2))
2525  {
2526  if (arg1 < arg2)
2527  return -1;
2528  else
2529  return 1;
2530  }
2531 
2532  /*
2533  * Slow path: we have to identify their actual sort-order positions.
2534  */
2535  item1 = find_enumitem(enumdata, arg1);
2536  item2 = find_enumitem(enumdata, arg2);
2537 
2538  if (item1 == NULL || item2 == NULL)
2539  {
2540  /*
2541  * We couldn't find one or both values. That means the enum has
2542  * changed under us, so re-initialize the cache and try again. We
2543  * don't bother retrying the known-sorted case in this path.
2544  */
2545  load_enum_cache_data(tcache);
2546  enumdata = tcache->enumData;
2547 
2548  item1 = find_enumitem(enumdata, arg1);
2549  item2 = find_enumitem(enumdata, arg2);
2550 
2551  /*
2552  * If we still can't find the values, complain: we must have corrupt
2553  * data.
2554  */
2555  if (item1 == NULL)
2556  elog(ERROR, "enum value %u not found in cache for enum %s",
2557  arg1, format_type_be(tcache->type_id));
2558  if (item2 == NULL)
2559  elog(ERROR, "enum value %u not found in cache for enum %s",
2560  arg2, format_type_be(tcache->type_id));
2561  }
2562 
2563  if (item1->sort_order < item2->sort_order)
2564  return -1;
2565  else if (item1->sort_order > item2->sort_order)
2566  return 1;
2567  else
2568  return 0;
2569 }
2570 
2571 /*
2572  * Load (or re-load) the enumData member of the typcache entry.
2573  */
2574 static void
2576 {
2577  TypeCacheEnumData *enumdata;
2578  Relation enum_rel;
2579  SysScanDesc enum_scan;
2580  HeapTuple enum_tuple;
2581  ScanKeyData skey;
2582  EnumItem *items;
2583  int numitems;
2584  int maxitems;
2585  Oid bitmap_base;
2586  Bitmapset *bitmap;
2587  MemoryContext oldcxt;
2588  int bm_size,
2589  start_pos;
2590 
2591  /* Check that this is actually an enum */
2592  if (tcache->typtype != TYPTYPE_ENUM)
2593  ereport(ERROR,
2594  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2595  errmsg("%s is not an enum",
2596  format_type_be(tcache->type_id))));
2597 
2598  /*
2599  * Read all the information for members of the enum type. We collect the
2600  * info in working memory in the caller's context, and then transfer it to
2601  * permanent memory in CacheMemoryContext. This minimizes the risk of
2602  * leaking memory from CacheMemoryContext in the event of an error partway
2603  * through.
2604  */
2605  maxitems = 64;
2606  items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2607  numitems = 0;
2608 
2609  /* Scan pg_enum for the members of the target enum type. */
2610  ScanKeyInit(&skey,
2611  Anum_pg_enum_enumtypid,
2612  BTEqualStrategyNumber, F_OIDEQ,
2613  ObjectIdGetDatum(tcache->type_id));
2614 
2615  enum_rel = table_open(EnumRelationId, AccessShareLock);
2616  enum_scan = systable_beginscan(enum_rel,
2617  EnumTypIdLabelIndexId,
2618  true, NULL,
2619  1, &skey);
2620 
2621  while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2622  {
2623  Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2624 
2625  if (numitems >= maxitems)
2626  {
2627  maxitems *= 2;
2628  items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2629  }
2630  items[numitems].enum_oid = en->oid;
2631  items[numitems].sort_order = en->enumsortorder;
2632  numitems++;
2633  }
2634 
2635  systable_endscan(enum_scan);
2636  table_close(enum_rel, AccessShareLock);
2637 
2638  /* Sort the items into OID order */
2639  qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2640 
2641  /*
2642  * Here, we create a bitmap listing a subset of the enum's OIDs that are
2643  * known to be in order and can thus be compared with just OID comparison.
2644  *
2645  * The point of this is that the enum's initial OIDs were certainly in
2646  * order, so there is some subset that can be compared via OID comparison;
2647  * and we'd rather not do binary searches unnecessarily.
2648  *
2649  * This is somewhat heuristic, and might identify a subset of OIDs that
2650  * isn't exactly what the type started with. That's okay as long as the
2651  * subset is correctly sorted.
2652  */
2653  bitmap_base = InvalidOid;
2654  bitmap = NULL;
2655  bm_size = 1; /* only save sets of at least 2 OIDs */
2656 
2657  for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2658  {
2659  /*
2660  * Identify longest sorted subsequence starting at start_pos
2661  */
2662  Bitmapset *this_bitmap = bms_make_singleton(0);
2663  int this_bm_size = 1;
2664  Oid start_oid = items[start_pos].enum_oid;
2665  float4 prev_order = items[start_pos].sort_order;
2666  int i;
2667 
2668  for (i = start_pos + 1; i < numitems; i++)
2669  {
2670  Oid offset;
2671 
2672  offset = items[i].enum_oid - start_oid;
2673  /* quit if bitmap would be too large; cutoff is arbitrary */
2674  if (offset >= 8192)
2675  break;
2676  /* include the item if it's in-order */
2677  if (items[i].sort_order > prev_order)
2678  {
2679  prev_order = items[i].sort_order;
2680  this_bitmap = bms_add_member(this_bitmap, (int) offset);
2681  this_bm_size++;
2682  }
2683  }
2684 
2685  /* Remember it if larger than previous best */
2686  if (this_bm_size > bm_size)
2687  {
2688  bms_free(bitmap);
2689  bitmap_base = start_oid;
2690  bitmap = this_bitmap;
2691  bm_size = this_bm_size;
2692  }
2693  else
2694  bms_free(this_bitmap);
2695 
2696  /*
2697  * Done if it's not possible to find a longer sequence in the rest of
2698  * the list. In typical cases this will happen on the first
2699  * iteration, which is why we create the bitmaps on the fly instead of
2700  * doing a second pass over the list.
2701  */
2702  if (bm_size >= (numitems - start_pos - 1))
2703  break;
2704  }
2705 
2706  /* OK, copy the data into CacheMemoryContext */
2708  enumdata = (TypeCacheEnumData *)
2709  palloc(offsetof(TypeCacheEnumData, enum_values) +
2710  numitems * sizeof(EnumItem));
2711  enumdata->bitmap_base = bitmap_base;
2712  enumdata->sorted_values = bms_copy(bitmap);
2713  enumdata->num_values = numitems;
2714  memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2715  MemoryContextSwitchTo(oldcxt);
2716 
2717  pfree(items);
2718  bms_free(bitmap);
2719 
2720  /* And link the finished cache struct into the typcache */
2721  if (tcache->enumData != NULL)
2722  pfree(tcache->enumData);
2723  tcache->enumData = enumdata;
2724 }
2725 
2726 /*
2727  * Locate the EnumItem with the given OID, if present
2728  */
2729 static EnumItem *
2731 {
2732  EnumItem srch;
2733 
2734  /* On some versions of Solaris, bsearch of zero items dumps core */
2735  if (enumdata->num_values <= 0)
2736  return NULL;
2737 
2738  srch.enum_oid = arg;
2739  return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2740  sizeof(EnumItem), enum_oid_cmp);
2741 }
2742 
2743 /*
2744  * qsort comparison function for OID-ordered EnumItems
2745  */
2746 static int
2747 enum_oid_cmp(const void *left, const void *right)
2748 {
2749  const EnumItem *l = (const EnumItem *) left;
2750  const EnumItem *r = (const EnumItem *) right;
2751 
2752  return pg_cmp_u32(l->enum_oid, r->enum_oid);
2753 }
2754 
2755 /*
2756  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2757  * to the given value and return a dsa_pointer.
2758  */
2759 static dsa_pointer
2760 share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2761 {
2762  dsa_pointer shared_dp;
2763  TupleDesc shared;
2764 
2765  shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2766  shared = (TupleDesc) dsa_get_address(area, shared_dp);
2767  TupleDescCopy(shared, tupdesc);
2768  shared->tdtypmod = typmod;
2769 
2770  return shared_dp;
2771 }
2772 
2773 /*
2774  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2775  * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2776  * Tuple descriptors returned by this function are not reference counted, and
2777  * will exist at least as long as the current backend remained attached to the
2778  * current session.
2779  */
2780 static TupleDesc
2782 {
2783  TupleDesc result;
2785  SharedRecordTableEntry *record_table_entry;
2786  SharedTypmodTableEntry *typmod_table_entry;
2787  dsa_pointer shared_dp;
2788  bool found;
2789  uint32 typmod;
2790 
2791  /* If not even attached, nothing to do. */
2793  return NULL;
2794 
2795  /* Try to find a matching tuple descriptor in the record table. */
2796  key.shared = false;
2797  key.u.local_tupdesc = tupdesc;
2798  record_table_entry = (SharedRecordTableEntry *)
2800  if (record_table_entry)
2801  {
2802  Assert(record_table_entry->key.shared);
2804  record_table_entry);
2805  result = (TupleDesc)
2807  record_table_entry->key.u.shared_tupdesc);
2808  Assert(result->tdrefcount == -1);
2809 
2810  return result;
2811  }
2812 
2813  /* Allocate a new typmod number. This will be wasted if we error out. */
2814  typmod = (int)
2816  1);
2817 
2818  /* Copy the TupleDesc into shared memory. */
2819  shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2820 
2821  /*
2822  * Create an entry in the typmod table so that others will understand this
2823  * typmod number.
2824  */
2825  PG_TRY();
2826  {
2827  typmod_table_entry = (SharedTypmodTableEntry *)
2829  &typmod, &found);
2830  if (found)
2831  elog(ERROR, "cannot create duplicate shared record typmod");
2832  }
2833  PG_CATCH();
2834  {
2835  dsa_free(CurrentSession->area, shared_dp);
2836  PG_RE_THROW();
2837  }
2838  PG_END_TRY();
2839  typmod_table_entry->typmod = typmod;
2840  typmod_table_entry->shared_tupdesc = shared_dp;
2842  typmod_table_entry);
2843 
2844  /*
2845  * Finally create an entry in the record table so others with matching
2846  * tuple descriptors can reuse the typmod.
2847  */
2848  record_table_entry = (SharedRecordTableEntry *)
2850  &found);
2851  if (found)
2852  {
2853  /*
2854  * Someone concurrently inserted a matching tuple descriptor since the
2855  * first time we checked. Use that one instead.
2856  */
2858  record_table_entry);
2859 
2860  /* Might as well free up the space used by the one we created. */
2862  &typmod);
2863  Assert(found);
2864  dsa_free(CurrentSession->area, shared_dp);
2865 
2866  /* Return the one we found. */
2867  Assert(record_table_entry->key.shared);
2868  result = (TupleDesc)
2870  record_table_entry->key.u.shared_tupdesc);
2871  Assert(result->tdrefcount == -1);
2872 
2873  return result;
2874  }
2875 
2876  /* Store it and return it. */
2877  record_table_entry->key.shared = true;
2878  record_table_entry->key.u.shared_tupdesc = shared_dp;
2880  record_table_entry);
2881  result = (TupleDesc)
2882  dsa_get_address(CurrentSession->area, shared_dp);
2883  Assert(result->tdrefcount == -1);
2884 
2885  return result;
2886 }
2887 
2888 /*
2889  * On-DSM-detach hook to forget about the current shared record typmod
2890  * infrastructure. This is currently used by both leader and workers.
2891  */
2892 static void
2894 {
2895  /* Be cautious here: maybe we didn't finish initializing. */
2896  if (CurrentSession->shared_record_table != NULL)
2897  {
2900  }
2901  if (CurrentSession->shared_typmod_table != NULL)
2902  {
2905  }
2907 }
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:366
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
#define TextDatumGetCString(d)
Definition: builtins.h:98
#define NameStr(name)
Definition: c.h:749
unsigned int uint32
Definition: c.h:509
#define RegProcedureIsValid(p)
Definition: c.h:780
signed int int32
Definition: c.h:497
#define Assert(condition)
Definition: c.h:861
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:401
float float4
Definition: c.h:632
#define MemSet(start, val, len)
Definition: c.h:1023
#define OidIsValid(objectId)
Definition: c.h:778
size_t Size
Definition: c.h:608
void CreateCacheMemoryContext(void)
Definition: catcache.c:680
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:942
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:826
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_allocate(area, size)
Definition: dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:503
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition: dshash.c:590
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:307
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:581
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:572
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
dsa_pointer dshash_table_handle
Definition: dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition: dynahash.c:1405
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define PG_RE_THROW()
Definition: elog.h:412
#define PG_TRY(...)
Definition: elog.h:371
#define PG_END_TRY(...)
Definition: elog.h:396
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:381
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:138
@ DOM_CONSTRAINT_CHECK
Definition: execnodes.h:1013
@ DOM_CONSTRAINT_NOTNULL
Definition: execnodes.h:1012
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
char * format_type_be(Oid type_oid)
Definition: format_type.c:343
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:604
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:511
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:387
#define HASHSTANDARD_PROC
Definition: hash.h:355
#define HASHEXTENDED_PROC
Definition: hash.h:356
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define GETSTRUCT(TUP)
Definition: htup_details.h:653
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:749
#define IsParallelWorker()
Definition: parallel.h:60
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:2332
long val
Definition: informix.c:689
static int pg_cmp_u32(uint32 a, uint32 b)
Definition: int.h:604
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1558
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1516
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int i
Definition: isn.c:73
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lcons(void *datum, List *list)
Definition: list.c:495
#define AccessShareLock
Definition: lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1212
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1190
Oid get_multirange_range(Oid multirangeOid)
Definition: lsyscache.c:3483
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:796
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1285
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:166
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2832
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition: lsyscache.c:2538
@ LWTRANCHE_PER_SESSION_RECORD_TYPMOD
Definition: lwlock.h:198
@ LWTRANCHE_PER_SESSION_RECORD_TYPE
Definition: lwlock.h:197
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:568
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:637
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1541
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
MemoryContext CacheMemoryContext
Definition: mcxt.c:152
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
void * palloc(Size size)
Definition: mcxt.c:1317
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:170
#define BTORDER_PROC
Definition: nbtree.h:707
#define makeNode(_type_)
Definition: nodes.h:155
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
void * arg
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
FormData_pg_constraint * Form_pg_constraint
const void * data
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:44
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
FormData_pg_range * Form_pg_range
Definition: pg_range.h:58
FormData_pg_type * Form_pg_type
Definition: pg_type.h:261
Expr * expression_planner(Expr *expr)
Definition: planner.c:6688
#define qsort(a, b, c, d)
Definition: port.h:447
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
char * c
MemoryContextSwitchTo(old_ctx)
tree ctl
Definition: radixtree.h:1853
void * stringToNode(const char *str)
Definition: read.c:90
#define RelationGetDescr(relation)
Definition: rel.h:531
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
Session * CurrentSession
Definition: session.c:48
static pg_noinline void Size size
Definition: slab.c:607
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: relation.c:47
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define HTEqualStrategyNumber
Definition: stratnum.h:41
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
MemoryContext dccContext
Definition: typcache.c:127
DomainConstraintCache * dcc
Definition: typcache.h:172
MemoryContext refctx
Definition: typcache.h:167
MemoryContextCallback callback
Definition: typcache.h:173
TypeCacheEntry * tcache
Definition: typcache.h:168
DomainConstraintType constrainttype
Definition: execnodes.h:1019
ExprState * check_exprstate
Definition: execnodes.h:1022
float4 sort_order
Definition: typcache.c:135
Oid enum_oid
Definition: typcache.c:134
Oid fn_oid
Definition: fmgr.h:59
Definition: dynahash.c:220
Definition: pg_list.h:54
MemoryContextCallbackFunction func
Definition: palloc.h:49
TupleDesc tupdesc
Definition: typcache.c:159
TupleDesc rd_att
Definition: rel.h:112
Form_pg_class rd_rel
Definition: rel.h:111
dsm_segment * segment
Definition: session.h:27
dshash_table * shared_record_table
Definition: session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
dsa_area * area
Definition: session.h:28
dshash_table * shared_typmod_table
Definition: session.h:33
SharedRecordTableKey key
Definition: typcache.c:198
TupleDesc local_tupdesc
Definition: typcache.c:186
union SharedRecordTableKey::@33 u
dsa_pointer shared_tupdesc
Definition: typcache.c:187
dshash_table_handle typmod_table_handle
Definition: typcache.c:171
pg_atomic_uint32 next_typmod
Definition: typcache.c:173
dshash_table_handle record_table_handle
Definition: typcache.c:169
dsa_pointer shared_tupdesc
Definition: typcache.c:208
int tdrefcount
Definition: tupdesc.h:84
int32 tdtypmod
Definition: tupdesc.h:83
Oid tdtypeid
Definition: tupdesc.h:82
uint32 type_id_hash
Definition: typcache.h:36
uint64 tupDesc_identifier
Definition: typcache.h:90
FmgrInfo hash_proc_finfo
Definition: typcache.h:77
int32 domainBaseTypmod
Definition: typcache.h:115
Oid hash_extended_proc
Definition: typcache.h:66
Oid typsubscript
Definition: typcache.h:45
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:101
FmgrInfo cmp_proc_finfo
Definition: typcache.h:76
Oid rng_collation
Definition: typcache.h:100
char typalign
Definition: typcache.h:41
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:98
char typtype
Definition: typcache.h:43
TupleDesc tupDesc
Definition: typcache.h:89
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:78
DomainConstraintCache * domainData
Definition: typcache.h:121
struct TypeCacheEntry * rngtype
Definition: typcache.h:108
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:103
FmgrInfo eq_opr_finfo
Definition: typcache.h:75
Oid btree_opintype
Definition: typcache.h:58
struct TypeCacheEnumData * enumData
Definition: typcache.h:130
struct TypeCacheEntry * nextDomain
Definition: typcache.h:133
bool typbyval
Definition: typcache.h:40
FmgrInfo rng_canonical_finfo
Definition: typcache.h:102
int16 typlen
Definition: typcache.h:39
Oid hash_opintype
Definition: typcache.h:60
Oid typcollation
Definition: typcache.h:47
Oid domainBaseType
Definition: typcache.h:114
char typstorage
Definition: typcache.h:42
Oid rng_opfamily
Definition: typcache.h:99
Bitmapset * sorted_values
Definition: typcache.c:141
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:143
Definition: dsa.c:348
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:269
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:221
#define GetSysCacheHashValue1(cacheId, key1)
Definition: syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
static ItemArray items
Definition: test_tidstore.c:49
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:173
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:251
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:406
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:331
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:388
uint32 hashRowType(TupleDesc desc)
Definition: tupdesc.c:622
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:133
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:586
#define TupleDescSize(src)
Definition: tupdesc.h:102
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:116
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:85
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:86
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1643
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1330
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1756
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1850
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:2224
#define TCFLAGS_OPERATOR_FLAGS
Definition: typcache.c:107
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:98
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1659
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:100
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition: typcache.c:104
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2575
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1522
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2730
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1530
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2781
static int32 NextRecordTypmod
Definition: typcache.c:287
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1906
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:266
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:89
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1683
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1292
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1867
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1506
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:88
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:91
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1271
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:99
struct SharedRecordTableEntry SharedRecordTableEntry
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:2125
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1247
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1468
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:215
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1460
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:989
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition: typcache.c:338
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:90
struct SharedTypmodTableEntry SharedTypmodTableEntry
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition: typcache.c:97
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1691
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1444
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2760
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:931
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition: typcache.c:2062
static RecordCacheArrayEntry * RecordCacheArray
Definition: typcache.c:285
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1651
static HTAB * RecordCacheHash
Definition: typcache.c:276
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2473
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:81
struct RecordCacheEntry RecordCacheEntry
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:2893
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:96
struct RecordCacheArrayEntry RecordCacheArrayEntry
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:92
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2372
struct TypeCacheEnumData TypeCacheEnumData
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2449
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2420
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1011
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1417
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:95
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:2307
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1476
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:2103
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1699
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:93
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:94
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1452
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition: typcache.c:84
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:241
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:2502
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:103
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition: typcache.c:102
struct SharedRecordTableKey SharedRecordTableKey
static int32 RecordCacheArrayLen
Definition: typcache.c:286
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:1970
static HTAB * TypeCacheHash
Definition: typcache.c:78
static uint64 tupledesc_id_counter
Definition: typcache.c:294
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:356
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1514
#define TCFLAGS_HAVE_FIELD_HASHING
Definition: typcache.c:101
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:1954
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:256
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1884
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2747
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1260
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:87
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1368
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1727
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1538
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:1943
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:897
#define INVALID_TUPLEDESC_IDENTIFIER
Definition: typcache.h:156
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:144
#define TYPECACHE_EQ_OPR
Definition: typcache.h:137
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:147
#define TYPECACHE_TUPDESC
Definition: typcache.h:145
#define TYPECACHE_MULTIRANGE_INFO
Definition: typcache.h:153
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:176
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:142
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:151
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:146
#define TYPECACHE_DOMAIN_BASE_INFO
Definition: typcache.h:149
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition: typcache.h:150
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:148
#define TYPECACHE_GT_OPR
Definition: typcache.h:139
#define TYPECACHE_CMP_PROC
Definition: typcache.h:140
struct TypeCacheEntry TypeCacheEntry
#define TYPECACHE_LT_OPR
Definition: typcache.h:138
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:152
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:143
#define TYPECACHE_HASH_PROC
Definition: typcache.h:141