PostgreSQL Source Code  git master
typcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * typcache.c
4  * POSTGRES type cache code
5  *
6  * The type cache exists to speed lookup of certain information about data
7  * types that is not directly available from a type's pg_type row. For
8  * example, we use a type's default btree opclass, or the default hash
9  * opclass if no btree opclass exists, to determine which operators should
10  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11  *
12  * Several seemingly-odd choices have been made to support use of the type
13  * cache by generic array and record handling routines, such as array_eq(),
14  * record_cmp(), and hash_array(). Because those routines are used as index
15  * support operations, they cannot leak memory. To allow them to execute
16  * efficiently, all information that they would like to re-use across calls
17  * is kept in the type cache.
18  *
19  * Once created, a type cache entry lives as long as the backend does, so
20  * there is no need for a call to release a cache entry. If the type is
21  * dropped, the cache entry simply becomes wasted storage. This is not
22  * expected to happen often, and assuming that typcache entries are good
23  * permanently allows caching pointers to them in long-lived places.
24  *
25  * We have some provisions for updating cache entries if the stored data
26  * becomes obsolete. Core data extracted from the pg_type row is updated
27  * when we detect updates to pg_type. Information dependent on opclasses is
28  * cleared if we detect updates to pg_opclass. We also support clearing the
29  * tuple descriptor and operator/function parts of a rowtype's cache entry,
30  * since those may need to change as a consequence of ALTER TABLE. Domain
31  * constraint changes are also tracked properly.
32  *
33  *
34  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
35  * Portions Copyright (c) 1994, Regents of the University of California
36  *
37  * IDENTIFICATION
38  * src/backend/utils/cache/typcache.c
39  *
40  *-------------------------------------------------------------------------
41  */
42 #include "postgres.h"
43 
44 #include <limits.h>
45 
46 #include "access/hash.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "access/parallel.h"
50 #include "access/relation.h"
51 #include "access/session.h"
52 #include "access/table.h"
53 #include "catalog/pg_am.h"
54 #include "catalog/pg_constraint.h"
55 #include "catalog/pg_enum.h"
56 #include "catalog/pg_operator.h"
57 #include "catalog/pg_range.h"
58 #include "catalog/pg_type.h"
59 #include "commands/defrem.h"
60 #include "executor/executor.h"
61 #include "lib/dshash.h"
62 #include "optimizer/optimizer.h"
63 #include "port/pg_bitutils.h"
64 #include "storage/lwlock.h"
65 #include "utils/builtins.h"
66 #include "utils/catcache.h"
67 #include "utils/fmgroids.h"
68 #include "utils/inval.h"
69 #include "utils/lsyscache.h"
70 #include "utils/memutils.h"
71 #include "utils/rel.h"
72 #include "utils/snapmgr.h"
73 #include "utils/syscache.h"
74 #include "utils/typcache.h"
75 
76 
77 /* The main type cache hashtable searched by lookup_type_cache */
78 static HTAB *TypeCacheHash = NULL;
79 
80 /* List of type cache entries for domain types */
82 
83 /* Private flag bits in the TypeCacheEntry.flags field */
84 #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
85 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
86 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
87 #define TCFLAGS_CHECKED_EQ_OPR 0x000008
88 #define TCFLAGS_CHECKED_LT_OPR 0x000010
89 #define TCFLAGS_CHECKED_GT_OPR 0x000020
90 #define TCFLAGS_CHECKED_CMP_PROC 0x000040
91 #define TCFLAGS_CHECKED_HASH_PROC 0x000080
92 #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
93 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
94 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
95 #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
96 #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
97 #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
98 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
99 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
100 #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
101 #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
102 #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
103 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
104 #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
105 
106 /* The flags associated with equality/comparison/hashing are all but these: */
107 #define TCFLAGS_OPERATOR_FLAGS \
108  (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
109  TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
110  TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
111 
112 /*
113  * Data stored about a domain type's constraints. Note that we do not create
114  * this struct for the common case of a constraint-less domain; we just set
115  * domainData to NULL to indicate that.
116  *
117  * Within a DomainConstraintCache, we store expression plan trees, but the
118  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
119  * When needed, expression evaluation nodes are built by flat-copying the
120  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
121  * Such a node tree is not part of the DomainConstraintCache, but is
122  * considered to belong to a DomainConstraintRef.
123  */
125 {
126  List *constraints; /* list of DomainConstraintState nodes */
127  MemoryContext dccContext; /* memory context holding all associated data */
128  long dccRefCount; /* number of references to this struct */
129 };
130 
131 /* Private information to support comparisons of enum values */
132 typedef struct
133 {
134  Oid enum_oid; /* OID of one enum value */
135  float4 sort_order; /* its sort position */
136 } EnumItem;
137 
138 typedef struct TypeCacheEnumData
139 {
140  Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
141  Bitmapset *sorted_values; /* Set of OIDs known to be in order */
142  int num_values; /* total number of values in enum */
145 
146 /*
147  * We use a separate table for storing the definitions of non-anonymous
148  * record types. Once defined, a record type will be remembered for the
149  * life of the backend. Subsequent uses of the "same" record type (where
150  * sameness means equalTupleDescs) will refer to the existing table entry.
151  *
152  * Stored record types are remembered in a linear array of TupleDescs,
153  * which can be indexed quickly with the assigned typmod. There is also
154  * a hash table to speed searches for matching TupleDescs.
155  */
156 
157 typedef struct RecordCacheEntry
158 {
161 
162 /*
163  * To deal with non-anonymous record types that are exchanged by backends
164  * involved in a parallel query, we also need a shared version of the above.
165  */
167 {
168  /* A hash table for finding a matching TupleDesc. */
170  /* A hash table for finding a TupleDesc by typmod. */
172  /* A source of new record typmod numbers. */
174 };
175 
176 /*
177  * When using shared tuple descriptors as hash table keys we need a way to be
178  * able to search for an equal shared TupleDesc using a backend-local
179  * TupleDesc. So we use this type which can hold either, and hash and compare
180  * functions that know how to handle both.
181  */
182 typedef struct SharedRecordTableKey
183 {
184  union
185  {
188  } u;
189  bool shared;
191 
192 /*
193  * The shared version of RecordCacheEntry. This lets us look up a typmod
194  * using a TupleDesc which may be in local or shared memory.
195  */
197 {
200 
201 /*
202  * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
203  * up a TupleDesc in shared memory using a typmod.
204  */
206 {
210 
211 /*
212  * A comparator function for SharedRecordTableKey.
213  */
214 static int
215 shared_record_table_compare(const void *a, const void *b, size_t size,
216  void *arg)
217 {
218  dsa_area *area = (dsa_area *) arg;
221  TupleDesc t1;
222  TupleDesc t2;
223 
224  if (k1->shared)
225  t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
226  else
227  t1 = k1->u.local_tupdesc;
228 
229  if (k2->shared)
230  t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
231  else
232  t2 = k2->u.local_tupdesc;
233 
234  return equalTupleDescs(t1, t2) ? 0 : 1;
235 }
236 
237 /*
238  * A hash function for SharedRecordTableKey.
239  */
240 static uint32
241 shared_record_table_hash(const void *a, size_t size, void *arg)
242 {
243  dsa_area *area = (dsa_area *) arg;
245  TupleDesc t;
246 
247  if (k->shared)
248  t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
249  else
250  t = k->u.local_tupdesc;
251 
252  return hashTupleDesc(t);
253 }
254 
255 /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
257  sizeof(SharedRecordTableKey), /* unused */
258  sizeof(SharedRecordTableEntry),
262 };
263 
264 /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
266  sizeof(uint32),
267  sizeof(SharedTypmodTableEntry),
271 };
272 
273 /* hashtable for recognizing registered record types */
274 static HTAB *RecordCacheHash = NULL;
275 
276 typedef struct RecordCacheArrayEntry
277 {
278  uint64 id;
281 
282 /* array of info about registered record types, indexed by assigned typmod */
284 static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
285 static int32 NextRecordTypmod = 0; /* number of entries used */
286 
287 /*
288  * Process-wide counter for generating unique tupledesc identifiers.
289  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
290  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
291  */
293 
294 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
295 static void load_rangetype_info(TypeCacheEntry *typentry);
296 static void load_multirangetype_info(TypeCacheEntry *typentry);
297 static void load_domaintype_info(TypeCacheEntry *typentry);
298 static int dcs_cmp(const void *a, const void *b);
299 static void decr_dcc_refcount(DomainConstraintCache *dcc);
300 static void dccref_deletion_callback(void *arg);
301 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
302 static bool array_element_has_equality(TypeCacheEntry *typentry);
303 static bool array_element_has_compare(TypeCacheEntry *typentry);
304 static bool array_element_has_hashing(TypeCacheEntry *typentry);
306 static void cache_array_element_properties(TypeCacheEntry *typentry);
307 static bool record_fields_have_equality(TypeCacheEntry *typentry);
308 static bool record_fields_have_compare(TypeCacheEntry *typentry);
309 static bool record_fields_have_hashing(TypeCacheEntry *typentry);
311 static void cache_record_field_properties(TypeCacheEntry *typentry);
312 static bool range_element_has_hashing(TypeCacheEntry *typentry);
314 static void cache_range_element_properties(TypeCacheEntry *typentry);
315 static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
318 static void TypeCacheRelCallback(Datum arg, Oid relid);
319 static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
320 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
321 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
322 static void load_enum_cache_data(TypeCacheEntry *tcache);
323 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
324 static int enum_oid_cmp(const void *left, const void *right);
326  Datum datum);
328 static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
329  uint32 typmod);
330 
331 
332 /*
333  * lookup_type_cache
334  *
335  * Fetch the type cache entry for the specified datatype, and make sure that
336  * all the fields requested by bits in 'flags' are valid.
337  *
338  * The result is never NULL --- we will ereport() if the passed type OID is
339  * invalid. Note however that we may fail to find one or more of the
340  * values requested by 'flags'; the caller needs to check whether the fields
341  * are InvalidOid or not.
342  */
344 lookup_type_cache(Oid type_id, int flags)
345 {
346  TypeCacheEntry *typentry;
347  bool found;
348 
349  if (TypeCacheHash == NULL)
350  {
351  /* First time through: initialize the hash table */
352  HASHCTL ctl;
353 
354  ctl.keysize = sizeof(Oid);
355  ctl.entrysize = sizeof(TypeCacheEntry);
356  TypeCacheHash = hash_create("Type information cache", 64,
357  &ctl, HASH_ELEM | HASH_BLOBS);
358 
359  /* Also set up callbacks for SI invalidations */
364 
365  /* Also make sure CacheMemoryContext exists */
366  if (!CacheMemoryContext)
368  }
369 
370  /* Try to look up an existing entry */
372  &type_id,
373  HASH_FIND, NULL);
374  if (typentry == NULL)
375  {
376  /*
377  * If we didn't find one, we want to make one. But first look up the
378  * pg_type row, just to make sure we don't make a cache entry for an
379  * invalid type OID. If the type OID is not valid, present a
380  * user-facing error, since some code paths such as domain_in() allow
381  * this function to be reached with a user-supplied OID.
382  */
383  HeapTuple tp;
384  Form_pg_type typtup;
385 
386  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
387  if (!HeapTupleIsValid(tp))
388  ereport(ERROR,
389  (errcode(ERRCODE_UNDEFINED_OBJECT),
390  errmsg("type with OID %u does not exist", type_id)));
391  typtup = (Form_pg_type) GETSTRUCT(tp);
392  if (!typtup->typisdefined)
393  ereport(ERROR,
394  (errcode(ERRCODE_UNDEFINED_OBJECT),
395  errmsg("type \"%s\" is only a shell",
396  NameStr(typtup->typname))));
397 
398  /* Now make the typcache entry */
400  &type_id,
401  HASH_ENTER, &found);
402  Assert(!found); /* it wasn't there a moment ago */
403 
404  MemSet(typentry, 0, sizeof(TypeCacheEntry));
405 
406  /* These fields can never change, by definition */
407  typentry->type_id = type_id;
409  ObjectIdGetDatum(type_id));
410 
411  /* Keep this part in sync with the code below */
412  typentry->typlen = typtup->typlen;
413  typentry->typbyval = typtup->typbyval;
414  typentry->typalign = typtup->typalign;
415  typentry->typstorage = typtup->typstorage;
416  typentry->typtype = typtup->typtype;
417  typentry->typrelid = typtup->typrelid;
418  typentry->typsubscript = typtup->typsubscript;
419  typentry->typelem = typtup->typelem;
420  typentry->typcollation = typtup->typcollation;
421  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
422 
423  /* If it's a domain, immediately thread it into the domain cache list */
424  if (typentry->typtype == TYPTYPE_DOMAIN)
425  {
426  typentry->nextDomain = firstDomainTypeEntry;
427  firstDomainTypeEntry = typentry;
428  }
429 
430  ReleaseSysCache(tp);
431  }
432  else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
433  {
434  /*
435  * We have an entry, but its pg_type row got changed, so reload the
436  * data obtained directly from pg_type.
437  */
438  HeapTuple tp;
439  Form_pg_type typtup;
440 
441  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
442  if (!HeapTupleIsValid(tp))
443  ereport(ERROR,
444  (errcode(ERRCODE_UNDEFINED_OBJECT),
445  errmsg("type with OID %u does not exist", type_id)));
446  typtup = (Form_pg_type) GETSTRUCT(tp);
447  if (!typtup->typisdefined)
448  ereport(ERROR,
449  (errcode(ERRCODE_UNDEFINED_OBJECT),
450  errmsg("type \"%s\" is only a shell",
451  NameStr(typtup->typname))));
452 
453  /*
454  * Keep this part in sync with the code above. Many of these fields
455  * shouldn't ever change, particularly typtype, but copy 'em anyway.
456  */
457  typentry->typlen = typtup->typlen;
458  typentry->typbyval = typtup->typbyval;
459  typentry->typalign = typtup->typalign;
460  typentry->typstorage = typtup->typstorage;
461  typentry->typtype = typtup->typtype;
462  typentry->typrelid = typtup->typrelid;
463  typentry->typsubscript = typtup->typsubscript;
464  typentry->typelem = typtup->typelem;
465  typentry->typcollation = typtup->typcollation;
466  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
467 
468  ReleaseSysCache(tp);
469  }
470 
471  /*
472  * Look up opclasses if we haven't already and any dependent info is
473  * requested.
474  */
479  !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
480  {
481  Oid opclass;
482 
483  opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
484  if (OidIsValid(opclass))
485  {
486  typentry->btree_opf = get_opclass_family(opclass);
487  typentry->btree_opintype = get_opclass_input_type(opclass);
488  }
489  else
490  {
491  typentry->btree_opf = typentry->btree_opintype = InvalidOid;
492  }
493 
494  /*
495  * Reset information derived from btree opclass. Note in particular
496  * that we'll redetermine the eq_opr even if we previously found one;
497  * this matters in case a btree opclass has been added to a type that
498  * previously had only a hash opclass.
499  */
500  typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
505  }
506 
507  /*
508  * If we need to look up equality operator, and there's no btree opclass,
509  * force lookup of hash opclass.
510  */
511  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
512  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
513  typentry->btree_opf == InvalidOid)
514  flags |= TYPECACHE_HASH_OPFAMILY;
515 
520  !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
521  {
522  Oid opclass;
523 
524  opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
525  if (OidIsValid(opclass))
526  {
527  typentry->hash_opf = get_opclass_family(opclass);
528  typentry->hash_opintype = get_opclass_input_type(opclass);
529  }
530  else
531  {
532  typentry->hash_opf = typentry->hash_opintype = InvalidOid;
533  }
534 
535  /*
536  * Reset information derived from hash opclass. We do *not* reset the
537  * eq_opr; if we already found one from the btree opclass, that
538  * decision is still good.
539  */
540  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
542  typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
543  }
544 
545  /*
546  * Look for requested operators and functions, if we haven't already.
547  */
548  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
549  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
550  {
551  Oid eq_opr = InvalidOid;
552 
553  if (typentry->btree_opf != InvalidOid)
554  eq_opr = get_opfamily_member(typentry->btree_opf,
555  typentry->btree_opintype,
556  typentry->btree_opintype,
558  if (eq_opr == InvalidOid &&
559  typentry->hash_opf != InvalidOid)
560  eq_opr = get_opfamily_member(typentry->hash_opf,
561  typentry->hash_opintype,
562  typentry->hash_opintype,
564 
565  /*
566  * If the proposed equality operator is array_eq or record_eq, check
567  * to see if the element type or column types support equality. If
568  * not, array_eq or record_eq would fail at runtime, so we don't want
569  * to report that the type has equality. (We can omit similar
570  * checking for ranges and multiranges because ranges can't be created
571  * in the first place unless their subtypes support equality.)
572  */
573  if (eq_opr == ARRAY_EQ_OP &&
574  !array_element_has_equality(typentry))
575  eq_opr = InvalidOid;
576  else if (eq_opr == RECORD_EQ_OP &&
577  !record_fields_have_equality(typentry))
578  eq_opr = InvalidOid;
579 
580  /* Force update of eq_opr_finfo only if we're changing state */
581  if (typentry->eq_opr != eq_opr)
582  typentry->eq_opr_finfo.fn_oid = InvalidOid;
583 
584  typentry->eq_opr = eq_opr;
585 
586  /*
587  * Reset info about hash functions whenever we pick up new info about
588  * equality operator. This is so we can ensure that the hash
589  * functions match the operator.
590  */
591  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
593  typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
594  }
595  if ((flags & TYPECACHE_LT_OPR) &&
596  !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
597  {
598  Oid lt_opr = InvalidOid;
599 
600  if (typentry->btree_opf != InvalidOid)
601  lt_opr = get_opfamily_member(typentry->btree_opf,
602  typentry->btree_opintype,
603  typentry->btree_opintype,
605 
606  /*
607  * As above, make sure array_cmp or record_cmp will succeed; but again
608  * we need no special check for ranges or multiranges.
609  */
610  if (lt_opr == ARRAY_LT_OP &&
611  !array_element_has_compare(typentry))
612  lt_opr = InvalidOid;
613  else if (lt_opr == RECORD_LT_OP &&
614  !record_fields_have_compare(typentry))
615  lt_opr = InvalidOid;
616 
617  typentry->lt_opr = lt_opr;
618  typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
619  }
620  if ((flags & TYPECACHE_GT_OPR) &&
621  !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
622  {
623  Oid gt_opr = InvalidOid;
624 
625  if (typentry->btree_opf != InvalidOid)
626  gt_opr = get_opfamily_member(typentry->btree_opf,
627  typentry->btree_opintype,
628  typentry->btree_opintype,
630 
631  /*
632  * As above, make sure array_cmp or record_cmp will succeed; but again
633  * we need no special check for ranges or multiranges.
634  */
635  if (gt_opr == ARRAY_GT_OP &&
636  !array_element_has_compare(typentry))
637  gt_opr = InvalidOid;
638  else if (gt_opr == RECORD_GT_OP &&
639  !record_fields_have_compare(typentry))
640  gt_opr = InvalidOid;
641 
642  typentry->gt_opr = gt_opr;
643  typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
644  }
645  if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
646  !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
647  {
648  Oid cmp_proc = InvalidOid;
649 
650  if (typentry->btree_opf != InvalidOid)
651  cmp_proc = get_opfamily_proc(typentry->btree_opf,
652  typentry->btree_opintype,
653  typentry->btree_opintype,
654  BTORDER_PROC);
655 
656  /*
657  * As above, make sure array_cmp or record_cmp will succeed; but again
658  * we need no special check for ranges or multiranges.
659  */
660  if (cmp_proc == F_BTARRAYCMP &&
661  !array_element_has_compare(typentry))
662  cmp_proc = InvalidOid;
663  else if (cmp_proc == F_BTRECORDCMP &&
664  !record_fields_have_compare(typentry))
665  cmp_proc = InvalidOid;
666 
667  /* Force update of cmp_proc_finfo only if we're changing state */
668  if (typentry->cmp_proc != cmp_proc)
669  typentry->cmp_proc_finfo.fn_oid = InvalidOid;
670 
671  typentry->cmp_proc = cmp_proc;
672  typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
673  }
675  !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
676  {
677  Oid hash_proc = InvalidOid;
678 
679  /*
680  * We insist that the eq_opr, if one has been determined, match the
681  * hash opclass; else report there is no hash function.
682  */
683  if (typentry->hash_opf != InvalidOid &&
684  (!OidIsValid(typentry->eq_opr) ||
685  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
686  typentry->hash_opintype,
687  typentry->hash_opintype,
689  hash_proc = get_opfamily_proc(typentry->hash_opf,
690  typentry->hash_opintype,
691  typentry->hash_opintype,
693 
694  /*
695  * As above, make sure hash_array, hash_record, or hash_range will
696  * succeed.
697  */
698  if (hash_proc == F_HASH_ARRAY &&
699  !array_element_has_hashing(typentry))
700  hash_proc = InvalidOid;
701  else if (hash_proc == F_HASH_RECORD &&
702  !record_fields_have_hashing(typentry))
703  hash_proc = InvalidOid;
704  else if (hash_proc == F_HASH_RANGE &&
705  !range_element_has_hashing(typentry))
706  hash_proc = InvalidOid;
707 
708  /*
709  * Likewise for hash_multirange.
710  */
711  if (hash_proc == F_HASH_MULTIRANGE &&
713  hash_proc = InvalidOid;
714 
715  /* Force update of hash_proc_finfo only if we're changing state */
716  if (typentry->hash_proc != hash_proc)
717  typentry->hash_proc_finfo.fn_oid = InvalidOid;
718 
719  typentry->hash_proc = hash_proc;
720  typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
721  }
722  if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
725  {
726  Oid hash_extended_proc = InvalidOid;
727 
728  /*
729  * We insist that the eq_opr, if one has been determined, match the
730  * hash opclass; else report there is no hash function.
731  */
732  if (typentry->hash_opf != InvalidOid &&
733  (!OidIsValid(typentry->eq_opr) ||
734  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
735  typentry->hash_opintype,
736  typentry->hash_opintype,
738  hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
739  typentry->hash_opintype,
740  typentry->hash_opintype,
742 
743  /*
744  * As above, make sure hash_array_extended, hash_record_extended, or
745  * hash_range_extended will succeed.
746  */
747  if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
749  hash_extended_proc = InvalidOid;
750  else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
752  hash_extended_proc = InvalidOid;
753  else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
755  hash_extended_proc = InvalidOid;
756 
757  /*
758  * Likewise for hash_multirange_extended.
759  */
760  if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
762  hash_extended_proc = InvalidOid;
763 
764  /* Force update of proc finfo only if we're changing state */
765  if (typentry->hash_extended_proc != hash_extended_proc)
767 
768  typentry->hash_extended_proc = hash_extended_proc;
770  }
771 
772  /*
773  * Set up fmgr lookup info as requested
774  *
775  * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
776  * which is not quite right (they're really in the hash table's private
777  * memory context) but this will do for our purposes.
778  *
779  * Note: the code above avoids invalidating the finfo structs unless the
780  * referenced operator/function OID actually changes. This is to prevent
781  * unnecessary leakage of any subsidiary data attached to an finfo, since
782  * that would cause session-lifespan memory leaks.
783  */
784  if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
785  typentry->eq_opr_finfo.fn_oid == InvalidOid &&
786  typentry->eq_opr != InvalidOid)
787  {
788  Oid eq_opr_func;
789 
790  eq_opr_func = get_opcode(typentry->eq_opr);
791  if (eq_opr_func != InvalidOid)
792  fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
794  }
795  if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
796  typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
797  typentry->cmp_proc != InvalidOid)
798  {
799  fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
801  }
802  if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
803  typentry->hash_proc_finfo.fn_oid == InvalidOid &&
804  typentry->hash_proc != InvalidOid)
805  {
806  fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
808  }
809  if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
811  typentry->hash_extended_proc != InvalidOid)
812  {
814  &typentry->hash_extended_proc_finfo,
816  }
817 
818  /*
819  * If it's a composite type (row type), get tupdesc if requested
820  */
821  if ((flags & TYPECACHE_TUPDESC) &&
822  typentry->tupDesc == NULL &&
823  typentry->typtype == TYPTYPE_COMPOSITE)
824  {
825  load_typcache_tupdesc(typentry);
826  }
827 
828  /*
829  * If requested, get information about a range type
830  *
831  * This includes making sure that the basic info about the range element
832  * type is up-to-date.
833  */
834  if ((flags & TYPECACHE_RANGE_INFO) &&
835  typentry->typtype == TYPTYPE_RANGE)
836  {
837  if (typentry->rngelemtype == NULL)
838  load_rangetype_info(typentry);
839  else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
840  (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
841  }
842 
843  /*
844  * If requested, get information about a multirange type
845  */
846  if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
847  typentry->rngtype == NULL &&
848  typentry->typtype == TYPTYPE_MULTIRANGE)
849  {
850  load_multirangetype_info(typentry);
851  }
852 
853  /*
854  * If requested, get information about a domain type
855  */
856  if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
857  typentry->domainBaseType == InvalidOid &&
858  typentry->typtype == TYPTYPE_DOMAIN)
859  {
860  typentry->domainBaseTypmod = -1;
861  typentry->domainBaseType =
862  getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
863  }
864  if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
865  (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
866  typentry->typtype == TYPTYPE_DOMAIN)
867  {
868  load_domaintype_info(typentry);
869  }
870 
871  return typentry;
872 }
873 
874 /*
875  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
876  */
877 static void
879 {
880  Relation rel;
881 
882  if (!OidIsValid(typentry->typrelid)) /* should not happen */
883  elog(ERROR, "invalid typrelid for composite type %u",
884  typentry->type_id);
885  rel = relation_open(typentry->typrelid, AccessShareLock);
886  Assert(rel->rd_rel->reltype == typentry->type_id);
887 
888  /*
889  * Link to the tupdesc and increment its refcount (we assert it's a
890  * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
891  * because the reference mustn't be entered in the current resource owner;
892  * it can outlive the current query.
893  */
894  typentry->tupDesc = RelationGetDescr(rel);
895 
896  Assert(typentry->tupDesc->tdrefcount > 0);
897  typentry->tupDesc->tdrefcount++;
898 
899  /*
900  * In future, we could take some pains to not change tupDesc_identifier if
901  * the tupdesc didn't really change; but for now it's not worth it.
902  */
904 
906 }
907 
908 /*
909  * load_rangetype_info --- helper routine to set up range type information
910  */
911 static void
913 {
914  Form_pg_range pg_range;
915  HeapTuple tup;
916  Oid subtypeOid;
917  Oid opclassOid;
918  Oid canonicalOid;
919  Oid subdiffOid;
920  Oid opfamilyOid;
921  Oid opcintype;
922  Oid cmpFnOid;
923 
924  /* get information from pg_range */
926  /* should not fail, since we already checked typtype ... */
927  if (!HeapTupleIsValid(tup))
928  elog(ERROR, "cache lookup failed for range type %u",
929  typentry->type_id);
930  pg_range = (Form_pg_range) GETSTRUCT(tup);
931 
932  subtypeOid = pg_range->rngsubtype;
933  typentry->rng_collation = pg_range->rngcollation;
934  opclassOid = pg_range->rngsubopc;
935  canonicalOid = pg_range->rngcanonical;
936  subdiffOid = pg_range->rngsubdiff;
937 
938  ReleaseSysCache(tup);
939 
940  /* get opclass properties and look up the comparison function */
941  opfamilyOid = get_opclass_family(opclassOid);
942  opcintype = get_opclass_input_type(opclassOid);
943 
944  cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
945  BTORDER_PROC);
946  if (!RegProcedureIsValid(cmpFnOid))
947  elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
948  BTORDER_PROC, opcintype, opcintype, opfamilyOid);
949 
950  /* set up cached fmgrinfo structs */
951  fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
953  if (OidIsValid(canonicalOid))
954  fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
956  if (OidIsValid(subdiffOid))
957  fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
959 
960  /* Lastly, set up link to the element type --- this marks data valid */
961  typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
962 }
963 
964 /*
965  * load_multirangetype_info --- helper routine to set up multirange type
966  * information
967  */
968 static void
970 {
971  Oid rangetypeOid;
972 
973  rangetypeOid = get_multirange_range(typentry->type_id);
974  if (!OidIsValid(rangetypeOid))
975  elog(ERROR, "cache lookup failed for multirange type %u",
976  typentry->type_id);
977 
978  typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
979 }
980 
981 /*
982  * load_domaintype_info --- helper routine to set up domain constraint info
983  *
984  * Note: we assume we're called in a relatively short-lived context, so it's
985  * okay to leak data into the current context while scanning pg_constraint.
986  * We build the new DomainConstraintCache data in a context underneath
987  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
988  * complete.
989  */
990 static void
992 {
993  Oid typeOid = typentry->type_id;
995  bool notNull = false;
996  DomainConstraintState **ccons;
997  int cconslen;
998  Relation conRel;
999  MemoryContext oldcxt;
1000 
1001  /*
1002  * If we're here, any existing constraint info is stale, so release it.
1003  * For safety, be sure to null the link before trying to delete the data.
1004  */
1005  if (typentry->domainData)
1006  {
1007  dcc = typentry->domainData;
1008  typentry->domainData = NULL;
1009  decr_dcc_refcount(dcc);
1010  }
1011 
1012  /*
1013  * We try to optimize the common case of no domain constraints, so don't
1014  * create the dcc object and context until we find a constraint. Likewise
1015  * for the temp sorting array.
1016  */
1017  dcc = NULL;
1018  ccons = NULL;
1019  cconslen = 0;
1020 
1021  /*
1022  * Scan pg_constraint for relevant constraints. We want to find
1023  * constraints for not just this domain, but any ancestor domains, so the
1024  * outer loop crawls up the domain stack.
1025  */
1026  conRel = table_open(ConstraintRelationId, AccessShareLock);
1027 
1028  for (;;)
1029  {
1030  HeapTuple tup;
1031  HeapTuple conTup;
1032  Form_pg_type typTup;
1033  int nccons = 0;
1034  ScanKeyData key[1];
1035  SysScanDesc scan;
1036 
1037  tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1038  if (!HeapTupleIsValid(tup))
1039  elog(ERROR, "cache lookup failed for type %u", typeOid);
1040  typTup = (Form_pg_type) GETSTRUCT(tup);
1041 
1042  if (typTup->typtype != TYPTYPE_DOMAIN)
1043  {
1044  /* Not a domain, so done */
1045  ReleaseSysCache(tup);
1046  break;
1047  }
1048 
1049  /* Test for NOT NULL Constraint */
1050  if (typTup->typnotnull)
1051  notNull = true;
1052 
1053  /* Look for CHECK Constraints on this domain */
1054  ScanKeyInit(&key[0],
1055  Anum_pg_constraint_contypid,
1056  BTEqualStrategyNumber, F_OIDEQ,
1057  ObjectIdGetDatum(typeOid));
1058 
1059  scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1060  NULL, 1, key);
1061 
1062  while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1063  {
1065  Datum val;
1066  bool isNull;
1067  char *constring;
1068  Expr *check_expr;
1070 
1071  /* Ignore non-CHECK constraints (presently, shouldn't be any) */
1072  if (c->contype != CONSTRAINT_CHECK)
1073  continue;
1074 
1075  /* Not expecting conbin to be NULL, but we'll test for it anyway */
1076  val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1077  conRel->rd_att, &isNull);
1078  if (isNull)
1079  elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1080  NameStr(typTup->typname), NameStr(c->conname));
1081 
1082  /* Convert conbin to C string in caller context */
1083  constring = TextDatumGetCString(val);
1084 
1085  /* Create the DomainConstraintCache object and context if needed */
1086  if (dcc == NULL)
1087  {
1088  MemoryContext cxt;
1089 
1091  "Domain constraints",
1093  dcc = (DomainConstraintCache *)
1095  dcc->constraints = NIL;
1096  dcc->dccContext = cxt;
1097  dcc->dccRefCount = 0;
1098  }
1099 
1100  /* Create node trees in DomainConstraintCache's context */
1101  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1102 
1103  check_expr = (Expr *) stringToNode(constring);
1104 
1105  /*
1106  * Plan the expression, since ExecInitExpr will expect that.
1107  *
1108  * Note: caching the result of expression_planner() is not very
1109  * good practice. Ideally we'd use a CachedExpression here so
1110  * that we would react promptly to, eg, changes in inlined
1111  * functions. However, because we don't support mutable domain
1112  * CHECK constraints, it's not really clear that it's worth the
1113  * extra overhead to do that.
1114  */
1115  check_expr = expression_planner(check_expr);
1116 
1119  r->name = pstrdup(NameStr(c->conname));
1120  r->check_expr = check_expr;
1121  r->check_exprstate = NULL;
1122 
1123  MemoryContextSwitchTo(oldcxt);
1124 
1125  /* Accumulate constraints in an array, for sorting below */
1126  if (ccons == NULL)
1127  {
1128  cconslen = 8;
1129  ccons = (DomainConstraintState **)
1130  palloc(cconslen * sizeof(DomainConstraintState *));
1131  }
1132  else if (nccons >= cconslen)
1133  {
1134  cconslen *= 2;
1135  ccons = (DomainConstraintState **)
1136  repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1137  }
1138  ccons[nccons++] = r;
1139  }
1140 
1141  systable_endscan(scan);
1142 
1143  if (nccons > 0)
1144  {
1145  /*
1146  * Sort the items for this domain, so that CHECKs are applied in a
1147  * deterministic order.
1148  */
1149  if (nccons > 1)
1150  qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1151 
1152  /*
1153  * Now attach them to the overall list. Use lcons() here because
1154  * constraints of parent domains should be applied earlier.
1155  */
1156  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1157  while (nccons > 0)
1158  dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1159  MemoryContextSwitchTo(oldcxt);
1160  }
1161 
1162  /* loop to next domain in stack */
1163  typeOid = typTup->typbasetype;
1164  ReleaseSysCache(tup);
1165  }
1166 
1167  table_close(conRel, AccessShareLock);
1168 
1169  /*
1170  * Only need to add one NOT NULL check regardless of how many domains in
1171  * the stack request it.
1172  */
1173  if (notNull)
1174  {
1176 
1177  /* Create the DomainConstraintCache object and context if needed */
1178  if (dcc == NULL)
1179  {
1180  MemoryContext cxt;
1181 
1183  "Domain constraints",
1185  dcc = (DomainConstraintCache *)
1187  dcc->constraints = NIL;
1188  dcc->dccContext = cxt;
1189  dcc->dccRefCount = 0;
1190  }
1191 
1192  /* Create node trees in DomainConstraintCache's context */
1193  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1194 
1196 
1198  r->name = pstrdup("NOT NULL");
1199  r->check_expr = NULL;
1200  r->check_exprstate = NULL;
1201 
1202  /* lcons to apply the nullness check FIRST */
1203  dcc->constraints = lcons(r, dcc->constraints);
1204 
1205  MemoryContextSwitchTo(oldcxt);
1206  }
1207 
1208  /*
1209  * If we made a constraint object, move it into CacheMemoryContext and
1210  * attach it to the typcache entry.
1211  */
1212  if (dcc)
1213  {
1215  typentry->domainData = dcc;
1216  dcc->dccRefCount++; /* count the typcache's reference */
1217  }
1218 
1219  /* Either way, the typcache entry's domain data is now valid. */
1221 }
1222 
1223 /*
1224  * qsort comparator to sort DomainConstraintState pointers by name
1225  */
1226 static int
1227 dcs_cmp(const void *a, const void *b)
1228 {
1229  const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1230  const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1231 
1232  return strcmp((*ca)->name, (*cb)->name);
1233 }
1234 
1235 /*
1236  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1237  * and free it if no references remain
1238  */
1239 static void
1241 {
1242  Assert(dcc->dccRefCount > 0);
1243  if (--(dcc->dccRefCount) <= 0)
1245 }
1246 
1247 /*
1248  * Context reset/delete callback for a DomainConstraintRef
1249  */
1250 static void
1252 {
1254  DomainConstraintCache *dcc = ref->dcc;
1255 
1256  /* Paranoia --- be sure link is nulled before trying to release */
1257  if (dcc)
1258  {
1259  ref->constraints = NIL;
1260  ref->dcc = NULL;
1261  decr_dcc_refcount(dcc);
1262  }
1263 }
1264 
1265 /*
1266  * prep_domain_constraints --- prepare domain constraints for execution
1267  *
1268  * The expression trees stored in the DomainConstraintCache's list are
1269  * converted to executable expression state trees stored in execctx.
1270  */
1271 static List *
1273 {
1274  List *result = NIL;
1275  MemoryContext oldcxt;
1276  ListCell *lc;
1277 
1278  oldcxt = MemoryContextSwitchTo(execctx);
1279 
1280  foreach(lc, constraints)
1281  {
1283  DomainConstraintState *newr;
1284 
1286  newr->constrainttype = r->constrainttype;
1287  newr->name = r->name;
1288  newr->check_expr = r->check_expr;
1289  newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1290 
1291  result = lappend(result, newr);
1292  }
1293 
1294  MemoryContextSwitchTo(oldcxt);
1295 
1296  return result;
1297 }
1298 
1299 /*
1300  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1301  *
1302  * Caller must tell us the MemoryContext in which the DomainConstraintRef
1303  * lives. The ref will be cleaned up when that context is reset/deleted.
1304  *
1305  * Caller must also tell us whether it wants check_exprstate fields to be
1306  * computed in the DomainConstraintState nodes attached to this ref.
1307  * If it doesn't, we need not make a copy of the DomainConstraintState list.
1308  */
1309 void
1311  MemoryContext refctx, bool need_exprstate)
1312 {
1313  /* Look up the typcache entry --- we assume it survives indefinitely */
1315  ref->need_exprstate = need_exprstate;
1316  /* For safety, establish the callback before acquiring a refcount */
1317  ref->refctx = refctx;
1318  ref->dcc = NULL;
1320  ref->callback.arg = (void *) ref;
1322  /* Acquire refcount if there are constraints, and set up exported list */
1323  if (ref->tcache->domainData)
1324  {
1325  ref->dcc = ref->tcache->domainData;
1326  ref->dcc->dccRefCount++;
1327  if (ref->need_exprstate)
1329  ref->refctx);
1330  else
1331  ref->constraints = ref->dcc->constraints;
1332  }
1333  else
1334  ref->constraints = NIL;
1335 }
1336 
1337 /*
1338  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1339  *
1340  * If the domain's constraint set changed, ref->constraints is updated to
1341  * point at a new list of cached constraints.
1342  *
1343  * In the normal case where nothing happened to the domain, this is cheap
1344  * enough that it's reasonable (and expected) to check before *each* use
1345  * of the constraint info.
1346  */
1347 void
1349 {
1350  TypeCacheEntry *typentry = ref->tcache;
1351 
1352  /* Make sure typcache entry's data is up to date */
1353  if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1354  typentry->typtype == TYPTYPE_DOMAIN)
1355  load_domaintype_info(typentry);
1356 
1357  /* Transfer to ref object if there's new info, adjusting refcounts */
1358  if (ref->dcc != typentry->domainData)
1359  {
1360  /* Paranoia --- be sure link is nulled before trying to release */
1361  DomainConstraintCache *dcc = ref->dcc;
1362 
1363  if (dcc)
1364  {
1365  /*
1366  * Note: we just leak the previous list of executable domain
1367  * constraints. Alternatively, we could keep those in a child
1368  * context of ref->refctx and free that context at this point.
1369  * However, in practice this code path will be taken so seldom
1370  * that the extra bookkeeping for a child context doesn't seem
1371  * worthwhile; we'll just allow a leak for the lifespan of refctx.
1372  */
1373  ref->constraints = NIL;
1374  ref->dcc = NULL;
1375  decr_dcc_refcount(dcc);
1376  }
1377  dcc = typentry->domainData;
1378  if (dcc)
1379  {
1380  ref->dcc = dcc;
1381  dcc->dccRefCount++;
1382  if (ref->need_exprstate)
1384  ref->refctx);
1385  else
1386  ref->constraints = dcc->constraints;
1387  }
1388  }
1389 }
1390 
1391 /*
1392  * DomainHasConstraints --- utility routine to check if a domain has constraints
1393  *
1394  * This is defined to return false, not fail, if type is not a domain.
1395  */
1396 bool
1398 {
1399  TypeCacheEntry *typentry;
1400 
1401  /*
1402  * Note: a side effect is to cause the typcache's domain data to become
1403  * valid. This is fine since we'll likely need it soon if there is any.
1404  */
1405  typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1406 
1407  return (typentry->domainData != NULL);
1408 }
1409 
1410 
1411 /*
1412  * array_element_has_equality and friends are helper routines to check
1413  * whether we should believe that array_eq and related functions will work
1414  * on the given array type or composite type.
1415  *
1416  * The logic above may call these repeatedly on the same type entry, so we
1417  * make use of the typentry->flags field to cache the results once known.
1418  * Also, we assume that we'll probably want all these facts about the type
1419  * if we want any, so we cache them all using only one lookup of the
1420  * component datatype(s).
1421  */
1422 
1423 static bool
1425 {
1426  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1428  return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1429 }
1430 
1431 static bool
1433 {
1434  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1436  return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1437 }
1438 
1439 static bool
1441 {
1442  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1444  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1445 }
1446 
1447 static bool
1449 {
1450  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1452  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1453 }
1454 
1455 static void
1457 {
1458  Oid elem_type = get_base_element_type(typentry->type_id);
1459 
1460  if (OidIsValid(elem_type))
1461  {
1462  TypeCacheEntry *elementry;
1463 
1464  elementry = lookup_type_cache(elem_type,
1469  if (OidIsValid(elementry->eq_opr))
1470  typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1471  if (OidIsValid(elementry->cmp_proc))
1472  typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1473  if (OidIsValid(elementry->hash_proc))
1474  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1475  if (OidIsValid(elementry->hash_extended_proc))
1477  }
1479 }
1480 
1481 /*
1482  * Likewise, some helper functions for composite types.
1483  */
1484 
1485 static bool
1487 {
1488  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1490  return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1491 }
1492 
1493 static bool
1495 {
1496  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1498  return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1499 }
1500 
1501 static bool
1503 {
1504  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1506  return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1507 }
1508 
1509 static bool
1511 {
1512  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1514  return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1515 }
1516 
1517 static void
1519 {
1520  /*
1521  * For type RECORD, we can't really tell what will work, since we don't
1522  * have access here to the specific anonymous type. Just assume that
1523  * equality and comparison will (we may get a failure at runtime). We
1524  * could also claim that hashing works, but then if code that has the
1525  * option between a comparison-based (sort-based) and a hash-based plan
1526  * chooses hashing, stuff could fail that would otherwise work if it chose
1527  * a comparison-based plan. In practice more types support comparison
1528  * than hashing.
1529  */
1530  if (typentry->type_id == RECORDOID)
1531  {
1532  typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1534  }
1535  else if (typentry->typtype == TYPTYPE_COMPOSITE)
1536  {
1537  TupleDesc tupdesc;
1538  int newflags;
1539  int i;
1540 
1541  /* Fetch composite type's tupdesc if we don't have it already */
1542  if (typentry->tupDesc == NULL)
1543  load_typcache_tupdesc(typentry);
1544  tupdesc = typentry->tupDesc;
1545 
1546  /* Must bump the refcount while we do additional catalog lookups */
1547  IncrTupleDescRefCount(tupdesc);
1548 
1549  /* Have each property if all non-dropped fields have the property */
1550  newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1554  for (i = 0; i < tupdesc->natts; i++)
1555  {
1556  TypeCacheEntry *fieldentry;
1557  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1558 
1559  if (attr->attisdropped)
1560  continue;
1561 
1562  fieldentry = lookup_type_cache(attr->atttypid,
1567  if (!OidIsValid(fieldentry->eq_opr))
1568  newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1569  if (!OidIsValid(fieldentry->cmp_proc))
1570  newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1571  if (!OidIsValid(fieldentry->hash_proc))
1572  newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1573  if (!OidIsValid(fieldentry->hash_extended_proc))
1575 
1576  /* We can drop out of the loop once we disprove all bits */
1577  if (newflags == 0)
1578  break;
1579  }
1580  typentry->flags |= newflags;
1581 
1582  DecrTupleDescRefCount(tupdesc);
1583  }
1584  else if (typentry->typtype == TYPTYPE_DOMAIN)
1585  {
1586  /* If it's domain over composite, copy base type's properties */
1587  TypeCacheEntry *baseentry;
1588 
1589  /* load up basetype info if we didn't already */
1590  if (typentry->domainBaseType == InvalidOid)
1591  {
1592  typentry->domainBaseTypmod = -1;
1593  typentry->domainBaseType =
1594  getBaseTypeAndTypmod(typentry->type_id,
1595  &typentry->domainBaseTypmod);
1596  }
1597  baseentry = lookup_type_cache(typentry->domainBaseType,
1602  if (baseentry->typtype == TYPTYPE_COMPOSITE)
1603  {
1605  typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1609  }
1610  }
1612 }
1613 
1614 /*
1615  * Likewise, some helper functions for range and multirange types.
1616  *
1617  * We can borrow the flag bits for array element properties to use for range
1618  * element properties, since those flag bits otherwise have no use in a
1619  * range or multirange type's typcache entry.
1620  */
1621 
1622 static bool
1624 {
1625  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1627  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1628 }
1629 
1630 static bool
1632 {
1633  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1635  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1636 }
1637 
1638 static void
1640 {
1641  /* load up subtype link if we didn't already */
1642  if (typentry->rngelemtype == NULL &&
1643  typentry->typtype == TYPTYPE_RANGE)
1644  load_rangetype_info(typentry);
1645 
1646  if (typentry->rngelemtype != NULL)
1647  {
1648  TypeCacheEntry *elementry;
1649 
1650  /* might need to calculate subtype's hash function properties */
1651  elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1654  if (OidIsValid(elementry->hash_proc))
1655  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1656  if (OidIsValid(elementry->hash_extended_proc))
1658  }
1660 }
1661 
1662 static bool
1664 {
1665  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1667  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1668 }
1669 
1670 static bool
1672 {
1673  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1675  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1676 }
1677 
1678 static void
1680 {
1681  /* load up range link if we didn't already */
1682  if (typentry->rngtype == NULL &&
1683  typentry->typtype == TYPTYPE_MULTIRANGE)
1684  load_multirangetype_info(typentry);
1685 
1686  if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1687  {
1688  TypeCacheEntry *elementry;
1689 
1690  /* might need to calculate subtype's hash function properties */
1691  elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1694  if (OidIsValid(elementry->hash_proc))
1695  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1696  if (OidIsValid(elementry->hash_extended_proc))
1698  }
1700 }
1701 
1702 /*
1703  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1704  * to store 'typmod'.
1705  */
1706 static void
1708 {
1709  if (RecordCacheArray == NULL)
1710  {
1713  64 * sizeof(RecordCacheArrayEntry));
1714  RecordCacheArrayLen = 64;
1715  }
1716 
1717  if (typmod >= RecordCacheArrayLen)
1718  {
1719  int32 newlen = pg_nextpower2_32(typmod + 1);
1720 
1724  newlen);
1725  RecordCacheArrayLen = newlen;
1726  }
1727 }
1728 
1729 /*
1730  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1731  *
1732  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1733  * hasn't had its refcount bumped.
1734  */
1735 static TupleDesc
1736 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1737 {
1738  if (type_id != RECORDOID)
1739  {
1740  /*
1741  * It's a named composite type, so use the regular typcache.
1742  */
1743  TypeCacheEntry *typentry;
1744 
1745  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1746  if (typentry->tupDesc == NULL && !noError)
1747  ereport(ERROR,
1748  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1749  errmsg("type %s is not composite",
1750  format_type_be(type_id))));
1751  return typentry->tupDesc;
1752  }
1753  else
1754  {
1755  /*
1756  * It's a transient record type, so look in our record-type table.
1757  */
1758  if (typmod >= 0)
1759  {
1760  /* It is already in our local cache? */
1761  if (typmod < RecordCacheArrayLen &&
1762  RecordCacheArray[typmod].tupdesc != NULL)
1763  return RecordCacheArray[typmod].tupdesc;
1764 
1765  /* Are we attached to a shared record typmod registry? */
1767  {
1768  SharedTypmodTableEntry *entry;
1769 
1770  /* Try to find it in the shared typmod index. */
1772  &typmod, false);
1773  if (entry != NULL)
1774  {
1775  TupleDesc tupdesc;
1776 
1777  tupdesc = (TupleDesc)
1779  entry->shared_tupdesc);
1780  Assert(typmod == tupdesc->tdtypmod);
1781 
1782  /* We may need to extend the local RecordCacheArray. */
1784 
1785  /*
1786  * Our local array can now point directly to the TupleDesc
1787  * in shared memory, which is non-reference-counted.
1788  */
1789  RecordCacheArray[typmod].tupdesc = tupdesc;
1790  Assert(tupdesc->tdrefcount == -1);
1791 
1792  /*
1793  * We don't share tupdesc identifiers across processes, so
1794  * assign one locally.
1795  */
1797 
1799  entry);
1800 
1801  return RecordCacheArray[typmod].tupdesc;
1802  }
1803  }
1804  }
1805 
1806  if (!noError)
1807  ereport(ERROR,
1808  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1809  errmsg("record type has not been registered")));
1810  return NULL;
1811  }
1812 }
1813 
1814 /*
1815  * lookup_rowtype_tupdesc
1816  *
1817  * Given a typeid/typmod that should describe a known composite type,
1818  * return the tuple descriptor for the type. Will ereport on failure.
1819  * (Use ereport because this is reachable with user-specified OIDs,
1820  * for example from record_in().)
1821  *
1822  * Note: on success, we increment the refcount of the returned TupleDesc,
1823  * and log the reference in CurrentResourceOwner. Caller must call
1824  * ReleaseTupleDesc when done using the tupdesc. (There are some
1825  * cases in which the returned tupdesc is not refcounted, in which
1826  * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1827  * the tupdesc is guaranteed to live till process exit.)
1828  */
1829 TupleDesc
1831 {
1832  TupleDesc tupDesc;
1833 
1834  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1835  PinTupleDesc(tupDesc);
1836  return tupDesc;
1837 }
1838 
1839 /*
1840  * lookup_rowtype_tupdesc_noerror
1841  *
1842  * As above, but if the type is not a known composite type and noError
1843  * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1844  * type_id is passed, you'll get an ereport anyway.)
1845  */
1846 TupleDesc
1847 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1848 {
1849  TupleDesc tupDesc;
1850 
1851  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1852  if (tupDesc != NULL)
1853  PinTupleDesc(tupDesc);
1854  return tupDesc;
1855 }
1856 
1857 /*
1858  * lookup_rowtype_tupdesc_copy
1859  *
1860  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1861  * copied into the CurrentMemoryContext and is not reference-counted.
1862  */
1863 TupleDesc
1865 {
1866  TupleDesc tmp;
1867 
1868  tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1869  return CreateTupleDescCopyConstr(tmp);
1870 }
1871 
1872 /*
1873  * lookup_rowtype_tupdesc_domain
1874  *
1875  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1876  * a domain over a named composite type; so this is effectively equivalent to
1877  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1878  * except for being a tad faster.
1879  *
1880  * Note: the reason we don't fold the look-through-domain behavior into plain
1881  * lookup_rowtype_tupdesc() is that we want callers to know they might be
1882  * dealing with a domain. Otherwise they might construct a tuple that should
1883  * be of the domain type, but not apply domain constraints.
1884  */
1885 TupleDesc
1886 lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1887 {
1888  TupleDesc tupDesc;
1889 
1890  if (type_id != RECORDOID)
1891  {
1892  /*
1893  * Check for domain or named composite type. We might as well load
1894  * whichever data is needed.
1895  */
1896  TypeCacheEntry *typentry;
1897 
1898  typentry = lookup_type_cache(type_id,
1901  if (typentry->typtype == TYPTYPE_DOMAIN)
1903  typentry->domainBaseTypmod,
1904  noError);
1905  if (typentry->tupDesc == NULL && !noError)
1906  ereport(ERROR,
1907  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1908  errmsg("type %s is not composite",
1909  format_type_be(type_id))));
1910  tupDesc = typentry->tupDesc;
1911  }
1912  else
1913  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1914  if (tupDesc != NULL)
1915  PinTupleDesc(tupDesc);
1916  return tupDesc;
1917 }
1918 
1919 /*
1920  * Hash function for the hash table of RecordCacheEntry.
1921  */
1922 static uint32
1923 record_type_typmod_hash(const void *data, size_t size)
1924 {
1925  RecordCacheEntry *entry = (RecordCacheEntry *) data;
1926 
1927  return hashTupleDesc(entry->tupdesc);
1928 }
1929 
1930 /*
1931  * Match function for the hash table of RecordCacheEntry.
1932  */
1933 static int
1934 record_type_typmod_compare(const void *a, const void *b, size_t size)
1935 {
1936  RecordCacheEntry *left = (RecordCacheEntry *) a;
1937  RecordCacheEntry *right = (RecordCacheEntry *) b;
1938 
1939  return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
1940 }
1941 
1942 /*
1943  * assign_record_type_typmod
1944  *
1945  * Given a tuple descriptor for a RECORD type, find or create a cache entry
1946  * for the type, and set the tupdesc's tdtypmod field to a value that will
1947  * identify this cache entry to lookup_rowtype_tupdesc.
1948  */
1949 void
1951 {
1952  RecordCacheEntry *recentry;
1953  TupleDesc entDesc;
1954  bool found;
1955  MemoryContext oldcxt;
1956 
1957  Assert(tupDesc->tdtypeid == RECORDOID);
1958 
1959  if (RecordCacheHash == NULL)
1960  {
1961  /* First time through: initialize the hash table */
1962  HASHCTL ctl;
1963 
1964  ctl.keysize = sizeof(TupleDesc); /* just the pointer */
1965  ctl.entrysize = sizeof(RecordCacheEntry);
1968  RecordCacheHash = hash_create("Record information cache", 64,
1969  &ctl,
1971 
1972  /* Also make sure CacheMemoryContext exists */
1973  if (!CacheMemoryContext)
1975  }
1976 
1977  /*
1978  * Find a hashtable entry for this tuple descriptor. We don't use
1979  * HASH_ENTER yet, because if it's missing, we need to make sure that all
1980  * the allocations succeed before we create the new entry.
1981  */
1983  &tupDesc,
1984  HASH_FIND, &found);
1985  if (found && recentry->tupdesc != NULL)
1986  {
1987  tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
1988  return;
1989  }
1990 
1991  /* Not present, so need to manufacture an entry */
1993 
1994  /* Look in the SharedRecordTypmodRegistry, if attached */
1995  entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
1996  if (entDesc == NULL)
1997  {
1998  /*
1999  * Make sure we have room before we CreateTupleDescCopy() or advance
2000  * NextRecordTypmod.
2001  */
2003 
2004  /* Reference-counted local cache only. */
2005  entDesc = CreateTupleDescCopy(tupDesc);
2006  entDesc->tdrefcount = 1;
2007  entDesc->tdtypmod = NextRecordTypmod++;
2008  }
2009  else
2010  {
2012  }
2013 
2014  RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2015 
2016  /* Assign a unique tupdesc identifier, too. */
2018 
2019  /* Fully initialized; create the hash table entry */
2021  &tupDesc,
2022  HASH_ENTER, NULL);
2023  recentry->tupdesc = entDesc;
2024 
2025  /* Update the caller's tuple descriptor. */
2026  tupDesc->tdtypmod = entDesc->tdtypmod;
2027 
2028  MemoryContextSwitchTo(oldcxt);
2029 }
2030 
2031 /*
2032  * assign_record_type_identifier
2033  *
2034  * Get an identifier, which will be unique over the lifespan of this backend
2035  * process, for the current tuple descriptor of the specified composite type.
2036  * For named composite types, the value is guaranteed to change if the type's
2037  * definition does. For registered RECORD types, the value will not change
2038  * once assigned, since the registered type won't either. If an anonymous
2039  * RECORD type is specified, we return a new identifier on each call.
2040  */
2041 uint64
2043 {
2044  if (type_id != RECORDOID)
2045  {
2046  /*
2047  * It's a named composite type, so use the regular typcache.
2048  */
2049  TypeCacheEntry *typentry;
2050 
2051  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2052  if (typentry->tupDesc == NULL)
2053  ereport(ERROR,
2054  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2055  errmsg("type %s is not composite",
2056  format_type_be(type_id))));
2057  Assert(typentry->tupDesc_identifier != 0);
2058  return typentry->tupDesc_identifier;
2059  }
2060  else
2061  {
2062  /*
2063  * It's a transient record type, so look in our record-type table.
2064  */
2065  if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2066  RecordCacheArray[typmod].tupdesc != NULL)
2067  {
2068  Assert(RecordCacheArray[typmod].id != 0);
2069  return RecordCacheArray[typmod].id;
2070  }
2071 
2072  /* For anonymous or unrecognized record type, generate a new ID */
2073  return ++tupledesc_id_counter;
2074  }
2075 }
2076 
2077 /*
2078  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2079  * This exists only to avoid exposing private innards of
2080  * SharedRecordTypmodRegistry in a header.
2081  */
2082 size_t
2084 {
2085  return sizeof(SharedRecordTypmodRegistry);
2086 }
2087 
2088 /*
2089  * Initialize 'registry' in a pre-existing shared memory region, which must be
2090  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2091  * bytes.
2092  *
2093  * 'area' will be used to allocate shared memory space as required for the
2094  * typemod registration. The current process, expected to be a leader process
2095  * in a parallel query, will be attached automatically and its current record
2096  * types will be loaded into *registry. While attached, all calls to
2097  * assign_record_type_typmod will use the shared registry. Worker backends
2098  * will need to attach explicitly.
2099  *
2100  * Note that this function takes 'area' and 'segment' as arguments rather than
2101  * accessing them via CurrentSession, because they aren't installed there
2102  * until after this function runs.
2103  */
2104 void
2106  dsm_segment *segment,
2107  dsa_area *area)
2108 {
2109  MemoryContext old_context;
2110  dshash_table *record_table;
2111  dshash_table *typmod_table;
2112  int32 typmod;
2113 
2115 
2116  /* We can't already be attached to a shared registry. */
2120 
2121  old_context = MemoryContextSwitchTo(TopMemoryContext);
2122 
2123  /* Create the hash table of tuple descriptors indexed by themselves. */
2124  record_table = dshash_create(area, &srtr_record_table_params, area);
2125 
2126  /* Create the hash table of tuple descriptors indexed by typmod. */
2127  typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2128 
2129  MemoryContextSwitchTo(old_context);
2130 
2131  /* Initialize the SharedRecordTypmodRegistry. */
2132  registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2133  registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2135 
2136  /*
2137  * Copy all entries from this backend's private registry into the shared
2138  * registry.
2139  */
2140  for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2141  {
2142  SharedTypmodTableEntry *typmod_table_entry;
2143  SharedRecordTableEntry *record_table_entry;
2144  SharedRecordTableKey record_table_key;
2145  dsa_pointer shared_dp;
2146  TupleDesc tupdesc;
2147  bool found;
2148 
2149  tupdesc = RecordCacheArray[typmod].tupdesc;
2150  if (tupdesc == NULL)
2151  continue;
2152 
2153  /* Copy the TupleDesc into shared memory. */
2154  shared_dp = share_tupledesc(area, tupdesc, typmod);
2155 
2156  /* Insert into the typmod table. */
2157  typmod_table_entry = dshash_find_or_insert(typmod_table,
2158  &tupdesc->tdtypmod,
2159  &found);
2160  if (found)
2161  elog(ERROR, "cannot create duplicate shared record typmod");
2162  typmod_table_entry->typmod = tupdesc->tdtypmod;
2163  typmod_table_entry->shared_tupdesc = shared_dp;
2164  dshash_release_lock(typmod_table, typmod_table_entry);
2165 
2166  /* Insert into the record table. */
2167  record_table_key.shared = false;
2168  record_table_key.u.local_tupdesc = tupdesc;
2169  record_table_entry = dshash_find_or_insert(record_table,
2170  &record_table_key,
2171  &found);
2172  if (!found)
2173  {
2174  record_table_entry->key.shared = true;
2175  record_table_entry->key.u.shared_tupdesc = shared_dp;
2176  }
2177  dshash_release_lock(record_table, record_table_entry);
2178  }
2179 
2180  /*
2181  * Set up the global state that will tell assign_record_type_typmod and
2182  * lookup_rowtype_tupdesc_internal about the shared registry.
2183  */
2184  CurrentSession->shared_record_table = record_table;
2185  CurrentSession->shared_typmod_table = typmod_table;
2187 
2188  /*
2189  * We install a detach hook in the leader, but only to handle cleanup on
2190  * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2191  * the memory, the leader process will use a shared registry until it
2192  * exits.
2193  */
2195 }
2196 
2197 /*
2198  * Attach to 'registry', which must have been initialized already by another
2199  * backend. Future calls to assign_record_type_typmod and
2200  * lookup_rowtype_tupdesc_internal will use the shared registry until the
2201  * current session is detached.
2202  */
2203 void
2205 {
2206  MemoryContext old_context;
2207  dshash_table *record_table;
2208  dshash_table *typmod_table;
2209 
2211 
2212  /* We can't already be attached to a shared registry. */
2213  Assert(CurrentSession != NULL);
2214  Assert(CurrentSession->segment != NULL);
2215  Assert(CurrentSession->area != NULL);
2219 
2220  /*
2221  * We can't already have typmods in our local cache, because they'd clash
2222  * with those imported by SharedRecordTypmodRegistryInit. This should be
2223  * a freshly started parallel worker. If we ever support worker
2224  * recycling, a worker would need to zap its local cache in between
2225  * servicing different queries, in order to be able to call this and
2226  * synchronize typmods with a new leader; but that's problematic because
2227  * we can't be very sure that record-typmod-related state hasn't escaped
2228  * to anywhere else in the process.
2229  */
2230  Assert(NextRecordTypmod == 0);
2231 
2232  old_context = MemoryContextSwitchTo(TopMemoryContext);
2233 
2234  /* Attach to the two hash tables. */
2235  record_table = dshash_attach(CurrentSession->area,
2237  registry->record_table_handle,
2238  CurrentSession->area);
2239  typmod_table = dshash_attach(CurrentSession->area,
2241  registry->typmod_table_handle,
2242  NULL);
2243 
2244  MemoryContextSwitchTo(old_context);
2245 
2246  /*
2247  * Set up detach hook to run at worker exit. Currently this is the same
2248  * as the leader's detach hook, but in future they might need to be
2249  * different.
2250  */
2253  PointerGetDatum(registry));
2254 
2255  /*
2256  * Set up the session state that will tell assign_record_type_typmod and
2257  * lookup_rowtype_tupdesc_internal about the shared registry.
2258  */
2260  CurrentSession->shared_record_table = record_table;
2261  CurrentSession->shared_typmod_table = typmod_table;
2262 }
2263 
2264 /*
2265  * TypeCacheRelCallback
2266  * Relcache inval callback function
2267  *
2268  * Delete the cached tuple descriptor (if any) for the given rel's composite
2269  * type, or for all composite types if relid == InvalidOid. Also reset
2270  * whatever info we have cached about the composite type's comparability.
2271  *
2272  * This is called when a relcache invalidation event occurs for the given
2273  * relid. We must scan the whole typcache hash since we don't know the
2274  * type OID corresponding to the relid. We could do a direct search if this
2275  * were a syscache-flush callback on pg_type, but then we would need all
2276  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
2277  * invals against the rel's pg_type OID. The extra SI signaling could very
2278  * well cost more than we'd save, since in most usages there are not very
2279  * many entries in a backend's typcache. The risk of bugs-of-omission seems
2280  * high, too.
2281  *
2282  * Another possibility, with only localized impact, is to maintain a second
2283  * hashtable that indexes composite-type typcache entries by their typrelid.
2284  * But it's still not clear it's worth the trouble.
2285  */
2286 static void
2288 {
2289  HASH_SEQ_STATUS status;
2290  TypeCacheEntry *typentry;
2291 
2292  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2293  hash_seq_init(&status, TypeCacheHash);
2294  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2295  {
2296  if (typentry->typtype == TYPTYPE_COMPOSITE)
2297  {
2298  /* Skip if no match, unless we're zapping all composite types */
2299  if (relid != typentry->typrelid && relid != InvalidOid)
2300  continue;
2301 
2302  /* Delete tupdesc if we have it */
2303  if (typentry->tupDesc != NULL)
2304  {
2305  /*
2306  * Release our refcount, and free the tupdesc if none remain.
2307  * (Can't use DecrTupleDescRefCount because this reference is
2308  * not logged in current resource owner.)
2309  */
2310  Assert(typentry->tupDesc->tdrefcount > 0);
2311  if (--typentry->tupDesc->tdrefcount == 0)
2312  FreeTupleDesc(typentry->tupDesc);
2313  typentry->tupDesc = NULL;
2314 
2315  /*
2316  * Also clear tupDesc_identifier, so that anything watching
2317  * that will realize that the tupdesc has possibly changed.
2318  * (Alternatively, we could specify that to detect possible
2319  * tupdesc change, one must check for tupDesc != NULL as well
2320  * as tupDesc_identifier being the same as what was previously
2321  * seen. That seems error-prone.)
2322  */
2323  typentry->tupDesc_identifier = 0;
2324  }
2325 
2326  /* Reset equality/comparison/hashing validity information */
2327  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2328  }
2329  else if (typentry->typtype == TYPTYPE_DOMAIN)
2330  {
2331  /*
2332  * If it's domain over composite, reset flags. (We don't bother
2333  * trying to determine whether the specific base type needs a
2334  * reset.) Note that if we haven't determined whether the base
2335  * type is composite, we don't need to reset anything.
2336  */
2337  if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2338  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2339  }
2340  }
2341 }
2342 
2343 /*
2344  * TypeCacheTypCallback
2345  * Syscache inval callback function
2346  *
2347  * This is called when a syscache invalidation event occurs for any
2348  * pg_type row. If we have information cached about that type, mark
2349  * it as needing to be reloaded.
2350  */
2351 static void
2352 TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2353 {
2354  HASH_SEQ_STATUS status;
2355  TypeCacheEntry *typentry;
2356 
2357  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2358  hash_seq_init(&status, TypeCacheHash);
2359  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2360  {
2361  /* Is this the targeted type row (or it's a total cache flush)? */
2362  if (hashvalue == 0 || typentry->type_id_hash == hashvalue)
2363  {
2364  /*
2365  * Mark the data obtained directly from pg_type as invalid. Also,
2366  * if it's a domain, typnotnull might've changed, so we'll need to
2367  * recalculate its constraints.
2368  */
2369  typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2371  }
2372  }
2373 }
2374 
2375 /*
2376  * TypeCacheOpcCallback
2377  * Syscache inval callback function
2378  *
2379  * This is called when a syscache invalidation event occurs for any pg_opclass
2380  * row. In principle we could probably just invalidate data dependent on the
2381  * particular opclass, but since updates on pg_opclass are rare in production
2382  * it doesn't seem worth a lot of complication: we just mark all cached data
2383  * invalid.
2384  *
2385  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2386  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2387  * is not allowed to be used to add/drop the primary operators and functions
2388  * of an opclass, only cross-type members of a family; and the latter sorts
2389  * of members are not going to get cached here.
2390  */
2391 static void
2392 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2393 {
2394  HASH_SEQ_STATUS status;
2395  TypeCacheEntry *typentry;
2396 
2397  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2398  hash_seq_init(&status, TypeCacheHash);
2399  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2400  {
2401  /* Reset equality/comparison/hashing validity information */
2402  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2403  }
2404 }
2405 
2406 /*
2407  * TypeCacheConstrCallback
2408  * Syscache inval callback function
2409  *
2410  * This is called when a syscache invalidation event occurs for any
2411  * pg_constraint row. We flush information about domain constraints
2412  * when this happens.
2413  *
2414  * It's slightly annoying that we can't tell whether the inval event was for
2415  * a domain constraint record or not; there's usually more update traffic
2416  * for table constraints than domain constraints, so we'll do a lot of
2417  * useless flushes. Still, this is better than the old no-caching-at-all
2418  * approach to domain constraints.
2419  */
2420 static void
2421 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2422 {
2423  TypeCacheEntry *typentry;
2424 
2425  /*
2426  * Because this is called very frequently, and typically very few of the
2427  * typcache entries are for domains, we don't use hash_seq_search here.
2428  * Instead we thread all the domain-type entries together so that we can
2429  * visit them cheaply.
2430  */
2431  for (typentry = firstDomainTypeEntry;
2432  typentry != NULL;
2433  typentry = typentry->nextDomain)
2434  {
2435  /* Reset domain constraint validity information */
2437  }
2438 }
2439 
2440 
2441 /*
2442  * Check if given OID is part of the subset that's sortable by comparisons
2443  */
2444 static inline bool
2446 {
2447  Oid offset;
2448 
2449  if (arg < enumdata->bitmap_base)
2450  return false;
2451  offset = arg - enumdata->bitmap_base;
2452  if (offset > (Oid) INT_MAX)
2453  return false;
2454  return bms_is_member((int) offset, enumdata->sorted_values);
2455 }
2456 
2457 
2458 /*
2459  * compare_values_of_enum
2460  * Compare two members of an enum type.
2461  * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2462  *
2463  * Note: currently, the enumData cache is refreshed only if we are asked
2464  * to compare an enum value that is not already in the cache. This is okay
2465  * because there is no support for re-ordering existing values, so comparisons
2466  * of previously cached values will return the right answer even if other
2467  * values have been added since we last loaded the cache.
2468  *
2469  * Note: the enum logic has a special-case rule about even-numbered versus
2470  * odd-numbered OIDs, but we take no account of that rule here; this
2471  * routine shouldn't even get called when that rule applies.
2472  */
2473 int
2475 {
2476  TypeCacheEnumData *enumdata;
2477  EnumItem *item1;
2478  EnumItem *item2;
2479 
2480  /*
2481  * Equal OIDs are certainly equal --- this case was probably handled by
2482  * our caller, but we may as well check.
2483  */
2484  if (arg1 == arg2)
2485  return 0;
2486 
2487  /* Load up the cache if first time through */
2488  if (tcache->enumData == NULL)
2489  load_enum_cache_data(tcache);
2490  enumdata = tcache->enumData;
2491 
2492  /*
2493  * If both OIDs are known-sorted, we can just compare them directly.
2494  */
2495  if (enum_known_sorted(enumdata, arg1) &&
2496  enum_known_sorted(enumdata, arg2))
2497  {
2498  if (arg1 < arg2)
2499  return -1;
2500  else
2501  return 1;
2502  }
2503 
2504  /*
2505  * Slow path: we have to identify their actual sort-order positions.
2506  */
2507  item1 = find_enumitem(enumdata, arg1);
2508  item2 = find_enumitem(enumdata, arg2);
2509 
2510  if (item1 == NULL || item2 == NULL)
2511  {
2512  /*
2513  * We couldn't find one or both values. That means the enum has
2514  * changed under us, so re-initialize the cache and try again. We
2515  * don't bother retrying the known-sorted case in this path.
2516  */
2517  load_enum_cache_data(tcache);
2518  enumdata = tcache->enumData;
2519 
2520  item1 = find_enumitem(enumdata, arg1);
2521  item2 = find_enumitem(enumdata, arg2);
2522 
2523  /*
2524  * If we still can't find the values, complain: we must have corrupt
2525  * data.
2526  */
2527  if (item1 == NULL)
2528  elog(ERROR, "enum value %u not found in cache for enum %s",
2529  arg1, format_type_be(tcache->type_id));
2530  if (item2 == NULL)
2531  elog(ERROR, "enum value %u not found in cache for enum %s",
2532  arg2, format_type_be(tcache->type_id));
2533  }
2534 
2535  if (item1->sort_order < item2->sort_order)
2536  return -1;
2537  else if (item1->sort_order > item2->sort_order)
2538  return 1;
2539  else
2540  return 0;
2541 }
2542 
2543 /*
2544  * Load (or re-load) the enumData member of the typcache entry.
2545  */
2546 static void
2548 {
2549  TypeCacheEnumData *enumdata;
2550  Relation enum_rel;
2551  SysScanDesc enum_scan;
2552  HeapTuple enum_tuple;
2553  ScanKeyData skey;
2554  EnumItem *items;
2555  int numitems;
2556  int maxitems;
2557  Oid bitmap_base;
2558  Bitmapset *bitmap;
2559  MemoryContext oldcxt;
2560  int bm_size,
2561  start_pos;
2562 
2563  /* Check that this is actually an enum */
2564  if (tcache->typtype != TYPTYPE_ENUM)
2565  ereport(ERROR,
2566  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2567  errmsg("%s is not an enum",
2568  format_type_be(tcache->type_id))));
2569 
2570  /*
2571  * Read all the information for members of the enum type. We collect the
2572  * info in working memory in the caller's context, and then transfer it to
2573  * permanent memory in CacheMemoryContext. This minimizes the risk of
2574  * leaking memory from CacheMemoryContext in the event of an error partway
2575  * through.
2576  */
2577  maxitems = 64;
2578  items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2579  numitems = 0;
2580 
2581  /* Scan pg_enum for the members of the target enum type. */
2582  ScanKeyInit(&skey,
2583  Anum_pg_enum_enumtypid,
2584  BTEqualStrategyNumber, F_OIDEQ,
2585  ObjectIdGetDatum(tcache->type_id));
2586 
2587  enum_rel = table_open(EnumRelationId, AccessShareLock);
2588  enum_scan = systable_beginscan(enum_rel,
2589  EnumTypIdLabelIndexId,
2590  true, NULL,
2591  1, &skey);
2592 
2593  while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2594  {
2595  Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2596 
2597  if (numitems >= maxitems)
2598  {
2599  maxitems *= 2;
2600  items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2601  }
2602  items[numitems].enum_oid = en->oid;
2603  items[numitems].sort_order = en->enumsortorder;
2604  numitems++;
2605  }
2606 
2607  systable_endscan(enum_scan);
2608  table_close(enum_rel, AccessShareLock);
2609 
2610  /* Sort the items into OID order */
2611  qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2612 
2613  /*
2614  * Here, we create a bitmap listing a subset of the enum's OIDs that are
2615  * known to be in order and can thus be compared with just OID comparison.
2616  *
2617  * The point of this is that the enum's initial OIDs were certainly in
2618  * order, so there is some subset that can be compared via OID comparison;
2619  * and we'd rather not do binary searches unnecessarily.
2620  *
2621  * This is somewhat heuristic, and might identify a subset of OIDs that
2622  * isn't exactly what the type started with. That's okay as long as the
2623  * subset is correctly sorted.
2624  */
2625  bitmap_base = InvalidOid;
2626  bitmap = NULL;
2627  bm_size = 1; /* only save sets of at least 2 OIDs */
2628 
2629  for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2630  {
2631  /*
2632  * Identify longest sorted subsequence starting at start_pos
2633  */
2634  Bitmapset *this_bitmap = bms_make_singleton(0);
2635  int this_bm_size = 1;
2636  Oid start_oid = items[start_pos].enum_oid;
2637  float4 prev_order = items[start_pos].sort_order;
2638  int i;
2639 
2640  for (i = start_pos + 1; i < numitems; i++)
2641  {
2642  Oid offset;
2643 
2644  offset = items[i].enum_oid - start_oid;
2645  /* quit if bitmap would be too large; cutoff is arbitrary */
2646  if (offset >= 8192)
2647  break;
2648  /* include the item if it's in-order */
2649  if (items[i].sort_order > prev_order)
2650  {
2651  prev_order = items[i].sort_order;
2652  this_bitmap = bms_add_member(this_bitmap, (int) offset);
2653  this_bm_size++;
2654  }
2655  }
2656 
2657  /* Remember it if larger than previous best */
2658  if (this_bm_size > bm_size)
2659  {
2660  bms_free(bitmap);
2661  bitmap_base = start_oid;
2662  bitmap = this_bitmap;
2663  bm_size = this_bm_size;
2664  }
2665  else
2666  bms_free(this_bitmap);
2667 
2668  /*
2669  * Done if it's not possible to find a longer sequence in the rest of
2670  * the list. In typical cases this will happen on the first
2671  * iteration, which is why we create the bitmaps on the fly instead of
2672  * doing a second pass over the list.
2673  */
2674  if (bm_size >= (numitems - start_pos - 1))
2675  break;
2676  }
2677 
2678  /* OK, copy the data into CacheMemoryContext */
2680  enumdata = (TypeCacheEnumData *)
2681  palloc(offsetof(TypeCacheEnumData, enum_values) +
2682  numitems * sizeof(EnumItem));
2683  enumdata->bitmap_base = bitmap_base;
2684  enumdata->sorted_values = bms_copy(bitmap);
2685  enumdata->num_values = numitems;
2686  memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2687  MemoryContextSwitchTo(oldcxt);
2688 
2689  pfree(items);
2690  bms_free(bitmap);
2691 
2692  /* And link the finished cache struct into the typcache */
2693  if (tcache->enumData != NULL)
2694  pfree(tcache->enumData);
2695  tcache->enumData = enumdata;
2696 }
2697 
2698 /*
2699  * Locate the EnumItem with the given OID, if present
2700  */
2701 static EnumItem *
2703 {
2704  EnumItem srch;
2705 
2706  /* On some versions of Solaris, bsearch of zero items dumps core */
2707  if (enumdata->num_values <= 0)
2708  return NULL;
2709 
2710  srch.enum_oid = arg;
2711  return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2712  sizeof(EnumItem), enum_oid_cmp);
2713 }
2714 
2715 /*
2716  * qsort comparison function for OID-ordered EnumItems
2717  */
2718 static int
2719 enum_oid_cmp(const void *left, const void *right)
2720 {
2721  const EnumItem *l = (const EnumItem *) left;
2722  const EnumItem *r = (const EnumItem *) right;
2723 
2724  if (l->enum_oid < r->enum_oid)
2725  return -1;
2726  else if (l->enum_oid > r->enum_oid)
2727  return 1;
2728  else
2729  return 0;
2730 }
2731 
2732 /*
2733  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2734  * to the given value and return a dsa_pointer.
2735  */
2736 static dsa_pointer
2737 share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2738 {
2739  dsa_pointer shared_dp;
2740  TupleDesc shared;
2741 
2742  shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2743  shared = (TupleDesc) dsa_get_address(area, shared_dp);
2744  TupleDescCopy(shared, tupdesc);
2745  shared->tdtypmod = typmod;
2746 
2747  return shared_dp;
2748 }
2749 
2750 /*
2751  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2752  * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2753  * Tuple descriptors returned by this function are not reference counted, and
2754  * will exist at least as long as the current backend remained attached to the
2755  * current session.
2756  */
2757 static TupleDesc
2759 {
2760  TupleDesc result;
2762  SharedRecordTableEntry *record_table_entry;
2763  SharedTypmodTableEntry *typmod_table_entry;
2764  dsa_pointer shared_dp;
2765  bool found;
2766  uint32 typmod;
2767 
2768  /* If not even attached, nothing to do. */
2770  return NULL;
2771 
2772  /* Try to find a matching tuple descriptor in the record table. */
2773  key.shared = false;
2774  key.u.local_tupdesc = tupdesc;
2775  record_table_entry = (SharedRecordTableEntry *)
2777  if (record_table_entry)
2778  {
2779  Assert(record_table_entry->key.shared);
2781  record_table_entry);
2782  result = (TupleDesc)
2784  record_table_entry->key.u.shared_tupdesc);
2785  Assert(result->tdrefcount == -1);
2786 
2787  return result;
2788  }
2789 
2790  /* Allocate a new typmod number. This will be wasted if we error out. */
2791  typmod = (int)
2793  1);
2794 
2795  /* Copy the TupleDesc into shared memory. */
2796  shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2797 
2798  /*
2799  * Create an entry in the typmod table so that others will understand this
2800  * typmod number.
2801  */
2802  PG_TRY();
2803  {
2804  typmod_table_entry = (SharedTypmodTableEntry *)
2806  &typmod, &found);
2807  if (found)
2808  elog(ERROR, "cannot create duplicate shared record typmod");
2809  }
2810  PG_CATCH();
2811  {
2812  dsa_free(CurrentSession->area, shared_dp);
2813  PG_RE_THROW();
2814  }
2815  PG_END_TRY();
2816  typmod_table_entry->typmod = typmod;
2817  typmod_table_entry->shared_tupdesc = shared_dp;
2819  typmod_table_entry);
2820 
2821  /*
2822  * Finally create an entry in the record table so others with matching
2823  * tuple descriptors can reuse the typmod.
2824  */
2825  record_table_entry = (SharedRecordTableEntry *)
2827  &found);
2828  if (found)
2829  {
2830  /*
2831  * Someone concurrently inserted a matching tuple descriptor since the
2832  * first time we checked. Use that one instead.
2833  */
2835  record_table_entry);
2836 
2837  /* Might as well free up the space used by the one we created. */
2839  &typmod);
2840  Assert(found);
2841  dsa_free(CurrentSession->area, shared_dp);
2842 
2843  /* Return the one we found. */
2844  Assert(record_table_entry->key.shared);
2845  result = (TupleDesc)
2847  record_table_entry->key.u.shared_tupdesc);
2848  Assert(result->tdrefcount == -1);
2849 
2850  return result;
2851  }
2852 
2853  /* Store it and return it. */
2854  record_table_entry->key.shared = true;
2855  record_table_entry->key.u.shared_tupdesc = shared_dp;
2857  record_table_entry);
2858  result = (TupleDesc)
2859  dsa_get_address(CurrentSession->area, shared_dp);
2860  Assert(result->tdrefcount == -1);
2861 
2862  return result;
2863 }
2864 
2865 /*
2866  * On-DSM-detach hook to forget about the current shared record typmod
2867  * infrastructure. This is currently used by both leader and workers.
2868  */
2869 static void
2871 {
2872  /* Be cautious here: maybe we didn't finish initializing. */
2873  if (CurrentSession->shared_record_table != NULL)
2874  {
2877  }
2878  if (CurrentSession->shared_typmod_table != NULL)
2879  {
2882  }
2884 }
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:218
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:323
void bms_free(Bitmapset *a)
Definition: bitmapset.c:194
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:460
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:171
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:753
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:80
#define TextDatumGetCString(d)
Definition: builtins.h:95
#define NameStr(name)
Definition: c.h:735
unsigned int uint32
Definition: c.h:495
#define RegProcedureIsValid(p)
Definition: c.h:766
signed int int32
Definition: c.h:483
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:387
float float4
Definition: c.h:618
#define MemSet(start, val, len)
Definition: c.h:1009
#define OidIsValid(objectId)
Definition: c.h:764
void CreateCacheMemoryContext(void)
Definition: catcache.c:614
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:949
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:833
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_allocate(area, size)
Definition: dsa.h:84
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:503
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:307
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:581
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:572
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
dsa_pointer dshash_table_handle
Definition: dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1103
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define PG_RE_THROW()
Definition: elog.h:411
#define PG_TRY(...)
Definition: elog.h:370
#define PG_END_TRY(...)
Definition: elog.h:395
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:380
#define ereport(elevel,...)
Definition: elog.h:149
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:128
@ DOM_CONSTRAINT_CHECK
Definition: execnodes.h:993
@ DOM_CONSTRAINT_NOTNULL
Definition: execnodes.h:992
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
char * format_type_be(Oid type_oid)
Definition: format_type.c:343
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:599
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:506
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:387
#define HASHSTANDARD_PROC
Definition: hash.h:355
#define HASHEXTENDED_PROC
Definition: hash.h:356
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define GETSTRUCT(TUP)
Definition: htup_details.h:653
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:749
#define IsParallelWorker()
Definition: parallel.h:61
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:2310
long val
Definition: informix.c:664
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1561
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1519
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int i
Definition: isn.c:73
Assert(fmt[strlen(fmt) - 1] !='\n')
List * lappend(List *list, void *datum)
Definition: list.c:338
List * lcons(void *datum, List *list)
Definition: list.c:494
#define AccessShareLock
Definition: lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1216
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1194
Oid get_multirange_range(Oid multirangeOid)
Definition: lsyscache.c:3465
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:795
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1289
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:165
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2814
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition: lsyscache.c:2520
@ LWTRANCHE_PER_SESSION_RECORD_TYPMOD
Definition: lwlock.h:200
@ LWTRANCHE_PER_SESSION_RECORD_TYPE
Definition: lwlock.h:199
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:477
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:546
char * pstrdup(const char *in)
Definition: mcxt.c:1644
void pfree(void *pointer)
Definition: mcxt.c:1456
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1064
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1476
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1021
MemoryContext CacheMemoryContext
Definition: mcxt.c:144
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:403
void * palloc(Size size)
Definition: mcxt.c:1226
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:163
#define BTORDER_PROC
Definition: nbtree.h:707
#define makeNode(_type_)
Definition: nodes.h:176
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:110
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
void * arg
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
FormData_pg_constraint * Form_pg_constraint
const void * data
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:44
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
FormData_pg_range * Form_pg_range
Definition: pg_range.h:58
FormData_pg_type * Form_pg_type
Definition: pg_type.h:261
Expr * expression_planner(Expr *expr)
Definition: planner.c:6489
#define qsort(a, b, c, d)
Definition: port.h:445
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
char * c
void * stringToNode(const char *str)
Definition: read.c:90
#define RelationGetDescr(relation)
Definition: rel.h:530
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
Session * CurrentSession
Definition: session.c:48
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: relation.c:206
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: relation.c:48
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define HTEqualStrategyNumber
Definition: stratnum.h:41
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
MemoryContext dccContext
Definition: typcache.c:127
DomainConstraintCache * dcc
Definition: typcache.h:171
MemoryContext refctx
Definition: typcache.h:166
MemoryContextCallback callback
Definition: typcache.h:172
TypeCacheEntry * tcache
Definition: typcache.h:167
DomainConstraintType constrainttype
Definition: execnodes.h:999
ExprState * check_exprstate
Definition: execnodes.h:1002
float4 sort_order
Definition: typcache.c:135
Oid enum_oid
Definition: typcache.c:134
Oid fn_oid
Definition: fmgr.h:59
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
HashCompareFunc match
Definition: hsearch.h:80
Definition: dynahash.c:220
Definition: pg_list.h:54
MemoryContextCallbackFunction func
Definition: palloc.h:49
TupleDesc tupdesc
Definition: typcache.c:159
TupleDesc rd_att
Definition: rel.h:112
Form_pg_class rd_rel
Definition: rel.h:111
dsm_segment * segment
Definition: session.h:27
dshash_table * shared_record_table
Definition: session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
dsa_area * area
Definition: session.h:28
dshash_table * shared_typmod_table
Definition: session.h:33
SharedRecordTableKey key
Definition: typcache.c:198
TupleDesc local_tupdesc
Definition: typcache.c:186
union SharedRecordTableKey::@30 u
dsa_pointer shared_tupdesc
Definition: typcache.c:187
dshash_table_handle typmod_table_handle
Definition: typcache.c:171
pg_atomic_uint32 next_typmod
Definition: typcache.c:173
dshash_table_handle record_table_handle
Definition: typcache.c:169
dsa_pointer shared_tupdesc
Definition: typcache.c:208
int tdrefcount
Definition: tupdesc.h:84
int32 tdtypmod
Definition: tupdesc.h:83
Oid tdtypeid
Definition: tupdesc.h:82
uint32 type_id_hash
Definition: typcache.h:36
uint64 tupDesc_identifier
Definition: typcache.h:90
FmgrInfo hash_proc_finfo
Definition: typcache.h:77
int32 domainBaseTypmod
Definition: typcache.h:114
Oid hash_extended_proc
Definition: typcache.h:66
Oid typsubscript
Definition: typcache.h:45
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:100
FmgrInfo cmp_proc_finfo
Definition: typcache.h:76
Oid rng_collation
Definition: typcache.h:99
char typalign
Definition: typcache.h:41
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:98
char typtype
Definition: typcache.h:43
TupleDesc tupDesc
Definition: typcache.h:89
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:78
DomainConstraintCache * domainData
Definition: typcache.h:120
struct TypeCacheEntry * rngtype
Definition: typcache.h:107
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:102
FmgrInfo eq_opr_finfo
Definition: typcache.h:75
Oid btree_opintype
Definition: typcache.h:58
struct TypeCacheEnumData * enumData
Definition: typcache.h:129
struct TypeCacheEntry * nextDomain
Definition: typcache.h:132
bool typbyval
Definition: typcache.h:40
FmgrInfo rng_canonical_finfo
Definition: typcache.h:101
int16 typlen
Definition: typcache.h:39
Oid hash_opintype
Definition: typcache.h:60
Oid typcollation
Definition: typcache.h:47
Oid domainBaseType
Definition: typcache.h:113
char typstorage
Definition: typcache.h:42
Bitmapset * sorted_values
Definition: typcache.c:141
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:143
Definition: dsa.c:367
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:868
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:820
@ RANGETYPE
Definition: syscache.h:87
@ TYPEOID
Definition: syscache.h:114
@ CLAOID
Definition: syscache.h:48
@ CONSTROID
Definition: syscache.h:53
#define GetSysCacheHashValue1(cacheId, key1)
Definition: syscache.h:209
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:151
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:229
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:384
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:309
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:366
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:111
uint32 hashTupleDesc(TupleDesc desc)
Definition: tupdesc.c:554
bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:402
#define TupleDescSize(src)
Definition: tupdesc.h:102
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:116
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:85
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:86
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1623
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1310
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1736
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1830
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:2204
#define TCFLAGS_OPERATOR_FLAGS
Definition: typcache.c:107
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:98
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1639
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:100
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition: typcache.c:104
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2547
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1502
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2702
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1510
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2758
static int32 NextRecordTypmod
Definition: typcache.c:285
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1886
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:265
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:89
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1663
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1272
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1847
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1486
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:88
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:91
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1251
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:99
struct SharedRecordTableEntry SharedRecordTableEntry
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:2105
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1227
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1448
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:215
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1440
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:969
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:90
struct SharedTypmodTableEntry SharedTypmodTableEntry
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition: typcache.c:97
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1671
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1424
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2737
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:912
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition: typcache.c:2042
static RecordCacheArrayEntry * RecordCacheArray
Definition: typcache.c:283
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1631
static HTAB * RecordCacheHash
Definition: typcache.c:274
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2445
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:81
struct RecordCacheEntry RecordCacheEntry
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:2870
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:96
struct RecordCacheArrayEntry RecordCacheArrayEntry
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:92
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2352
struct TypeCacheEnumData TypeCacheEnumData
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2421
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2392
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:991
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1397
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:95
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:2287
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1456
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:2083
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1679
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:93
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:94
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1432
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition: typcache.c:84
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:241
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:2474
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:103
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition: typcache.c:102
struct SharedRecordTableKey SharedRecordTableKey
static int32 RecordCacheArrayLen
Definition: typcache.c:284
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:1950
static HTAB * TypeCacheHash
Definition: typcache.c:78
static uint64 tupledesc_id_counter
Definition: typcache.c:292
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:344
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1494
#define TCFLAGS_HAVE_FIELD_HASHING
Definition: typcache.c:101
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:1934
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:256
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1864
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2719
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1240
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:87
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1348
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1707
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1518
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:1923
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:878
#define INVALID_TUPLEDESC_IDENTIFIER
Definition: typcache.h:155
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:143
#define TYPECACHE_EQ_OPR
Definition: typcache.h:136
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:146
#define TYPECACHE_TUPDESC
Definition: typcache.h:144
#define TYPECACHE_MULTIRANGE_INFO
Definition: typcache.h:152
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:175
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:141
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:150
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:145
#define TYPECACHE_DOMAIN_BASE_INFO
Definition: typcache.h:148
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition: typcache.h:149
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:147
#define TYPECACHE_GT_OPR
Definition: typcache.h:138
#define TYPECACHE_CMP_PROC
Definition: typcache.h:139
struct TypeCacheEntry TypeCacheEntry
#define TYPECACHE_LT_OPR
Definition: typcache.h:137
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:151
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:142
#define TYPECACHE_HASH_PROC
Definition: typcache.h:140