PostgreSQL Source Code  git master
typcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * typcache.c
4  * POSTGRES type cache code
5  *
6  * The type cache exists to speed lookup of certain information about data
7  * types that is not directly available from a type's pg_type row. For
8  * example, we use a type's default btree opclass, or the default hash
9  * opclass if no btree opclass exists, to determine which operators should
10  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11  *
12  * Several seemingly-odd choices have been made to support use of the type
13  * cache by generic array and record handling routines, such as array_eq(),
14  * record_cmp(), and hash_array(). Because those routines are used as index
15  * support operations, they cannot leak memory. To allow them to execute
16  * efficiently, all information that they would like to re-use across calls
17  * is kept in the type cache.
18  *
19  * Once created, a type cache entry lives as long as the backend does, so
20  * there is no need for a call to release a cache entry. If the type is
21  * dropped, the cache entry simply becomes wasted storage. This is not
22  * expected to happen often, and assuming that typcache entries are good
23  * permanently allows caching pointers to them in long-lived places.
24  *
25  * We have some provisions for updating cache entries if the stored data
26  * becomes obsolete. Core data extracted from the pg_type row is updated
27  * when we detect updates to pg_type. Information dependent on opclasses is
28  * cleared if we detect updates to pg_opclass. We also support clearing the
29  * tuple descriptor and operator/function parts of a rowtype's cache entry,
30  * since those may need to change as a consequence of ALTER TABLE. Domain
31  * constraint changes are also tracked properly.
32  *
33  *
34  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
35  * Portions Copyright (c) 1994, Regents of the University of California
36  *
37  * IDENTIFICATION
38  * src/backend/utils/cache/typcache.c
39  *
40  *-------------------------------------------------------------------------
41  */
42 #include "postgres.h"
43 
44 #include <limits.h>
45 
46 #include "access/hash.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "access/parallel.h"
50 #include "access/relation.h"
51 #include "access/session.h"
52 #include "access/table.h"
53 #include "catalog/pg_am.h"
54 #include "catalog/pg_constraint.h"
55 #include "catalog/pg_enum.h"
56 #include "catalog/pg_operator.h"
57 #include "catalog/pg_range.h"
58 #include "catalog/pg_type.h"
59 #include "commands/defrem.h"
60 #include "common/int.h"
61 #include "executor/executor.h"
62 #include "lib/dshash.h"
63 #include "optimizer/optimizer.h"
64 #include "port/pg_bitutils.h"
65 #include "storage/lwlock.h"
66 #include "utils/builtins.h"
67 #include "utils/catcache.h"
68 #include "utils/fmgroids.h"
69 #include "utils/injection_point.h"
70 #include "utils/inval.h"
71 #include "utils/lsyscache.h"
72 #include "utils/memutils.h"
73 #include "utils/rel.h"
74 #include "utils/syscache.h"
75 #include "utils/typcache.h"
76 
77 
78 /* The main type cache hashtable searched by lookup_type_cache */
79 static HTAB *TypeCacheHash = NULL;
80 
81 /*
82  * The mapping of relation's OID to the corresponding composite type OID.
83  * We're keeping the map entry when the corresponding typentry has something
84  * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85  * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86  */
88 
90 {
91  Oid relid; /* OID of the relation */
92  Oid composite_typid; /* OID of the relation's composite type */
94 
95 /* List of type cache entries for domain types */
97 
98 /* Private flag bits in the TypeCacheEntry.flags field */
99 #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102 #define TCFLAGS_CHECKED_EQ_OPR 0x000008
103 #define TCFLAGS_CHECKED_LT_OPR 0x000010
104 #define TCFLAGS_CHECKED_GT_OPR 0x000020
105 #define TCFLAGS_CHECKED_CMP_PROC 0x000040
106 #define TCFLAGS_CHECKED_HASH_PROC 0x000080
107 #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110 #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111 #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112 #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115 #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116 #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117 #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119 #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120 
121 /* The flags associated with equality/comparison/hashing are all but these: */
122 #define TCFLAGS_OPERATOR_FLAGS \
123  (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124  TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125  TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126 
127 /*
128  * Data stored about a domain type's constraints. Note that we do not create
129  * this struct for the common case of a constraint-less domain; we just set
130  * domainData to NULL to indicate that.
131  *
132  * Within a DomainConstraintCache, we store expression plan trees, but the
133  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134  * When needed, expression evaluation nodes are built by flat-copying the
135  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136  * Such a node tree is not part of the DomainConstraintCache, but is
137  * considered to belong to a DomainConstraintRef.
138  */
140 {
141  List *constraints; /* list of DomainConstraintState nodes */
142  MemoryContext dccContext; /* memory context holding all associated data */
143  long dccRefCount; /* number of references to this struct */
144 };
145 
146 /* Private information to support comparisons of enum values */
147 typedef struct
148 {
149  Oid enum_oid; /* OID of one enum value */
150  float4 sort_order; /* its sort position */
151 } EnumItem;
152 
153 typedef struct TypeCacheEnumData
154 {
155  Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156  Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157  int num_values; /* total number of values in enum */
160 
161 /*
162  * We use a separate table for storing the definitions of non-anonymous
163  * record types. Once defined, a record type will be remembered for the
164  * life of the backend. Subsequent uses of the "same" record type (where
165  * sameness means equalRowTypes) will refer to the existing table entry.
166  *
167  * Stored record types are remembered in a linear array of TupleDescs,
168  * which can be indexed quickly with the assigned typmod. There is also
169  * a hash table to speed searches for matching TupleDescs.
170  */
171 
172 typedef struct RecordCacheEntry
173 {
176 
177 /*
178  * To deal with non-anonymous record types that are exchanged by backends
179  * involved in a parallel query, we also need a shared version of the above.
180  */
182 {
183  /* A hash table for finding a matching TupleDesc. */
185  /* A hash table for finding a TupleDesc by typmod. */
187  /* A source of new record typmod numbers. */
189 };
190 
191 /*
192  * When using shared tuple descriptors as hash table keys we need a way to be
193  * able to search for an equal shared TupleDesc using a backend-local
194  * TupleDesc. So we use this type which can hold either, and hash and compare
195  * functions that know how to handle both.
196  */
197 typedef struct SharedRecordTableKey
198 {
199  union
200  {
203  } u;
204  bool shared;
206 
207 /*
208  * The shared version of RecordCacheEntry. This lets us look up a typmod
209  * using a TupleDesc which may be in local or shared memory.
210  */
212 {
215 
216 /*
217  * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218  * up a TupleDesc in shared memory using a typmod.
219  */
221 {
225 
229 
230 /*
231  * A comparator function for SharedRecordTableKey.
232  */
233 static int
234 shared_record_table_compare(const void *a, const void *b, size_t size,
235  void *arg)
236 {
237  dsa_area *area = (dsa_area *) arg;
240  TupleDesc t1;
241  TupleDesc t2;
242 
243  if (k1->shared)
244  t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245  else
246  t1 = k1->u.local_tupdesc;
247 
248  if (k2->shared)
249  t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250  else
251  t2 = k2->u.local_tupdesc;
252 
253  return equalRowTypes(t1, t2) ? 0 : 1;
254 }
255 
256 /*
257  * A hash function for SharedRecordTableKey.
258  */
259 static uint32
260 shared_record_table_hash(const void *a, size_t size, void *arg)
261 {
262  dsa_area *area = (dsa_area *) arg;
264  TupleDesc t;
265 
266  if (k->shared)
267  t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
268  else
269  t = k->u.local_tupdesc;
270 
271  return hashRowType(t);
272 }
273 
274 /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
276  sizeof(SharedRecordTableKey), /* unused */
277  sizeof(SharedRecordTableEntry),
282 };
283 
284 /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
286  sizeof(uint32),
287  sizeof(SharedTypmodTableEntry),
292 };
293 
294 /* hashtable for recognizing registered record types */
295 static HTAB *RecordCacheHash = NULL;
296 
297 typedef struct RecordCacheArrayEntry
298 {
302 
303 /* array of info about registered record types, indexed by assigned typmod */
305 static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306 static int32 NextRecordTypmod = 0; /* number of entries used */
307 
308 /*
309  * Process-wide counter for generating unique tupledesc identifiers.
310  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312  */
314 
315 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316 static void load_rangetype_info(TypeCacheEntry *typentry);
317 static void load_multirangetype_info(TypeCacheEntry *typentry);
318 static void load_domaintype_info(TypeCacheEntry *typentry);
319 static int dcs_cmp(const void *a, const void *b);
320 static void decr_dcc_refcount(DomainConstraintCache *dcc);
321 static void dccref_deletion_callback(void *arg);
322 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323 static bool array_element_has_equality(TypeCacheEntry *typentry);
324 static bool array_element_has_compare(TypeCacheEntry *typentry);
325 static bool array_element_has_hashing(TypeCacheEntry *typentry);
327 static void cache_array_element_properties(TypeCacheEntry *typentry);
328 static bool record_fields_have_equality(TypeCacheEntry *typentry);
329 static bool record_fields_have_compare(TypeCacheEntry *typentry);
330 static bool record_fields_have_hashing(TypeCacheEntry *typentry);
332 static void cache_record_field_properties(TypeCacheEntry *typentry);
333 static bool range_element_has_hashing(TypeCacheEntry *typentry);
335 static void cache_range_element_properties(TypeCacheEntry *typentry);
336 static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
339 static void TypeCacheRelCallback(Datum arg, Oid relid);
340 static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
341 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
342 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
343 static void load_enum_cache_data(TypeCacheEntry *tcache);
344 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
345 static int enum_oid_cmp(const void *left, const void *right);
347  Datum datum);
349 static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
350  uint32 typmod);
351 static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry);
352 static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry);
353 
354 
355 /*
356  * Hash function compatible with one-arg system cache hash function.
357  */
358 static uint32
359 type_cache_syshash(const void *key, Size keysize)
360 {
361  Assert(keysize == sizeof(Oid));
362  return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
363 }
364 
365 /*
366  * lookup_type_cache
367  *
368  * Fetch the type cache entry for the specified datatype, and make sure that
369  * all the fields requested by bits in 'flags' are valid.
370  *
371  * The result is never NULL --- we will ereport() if the passed type OID is
372  * invalid. Note however that we may fail to find one or more of the
373  * values requested by 'flags'; the caller needs to check whether the fields
374  * are InvalidOid or not.
375  *
376  * Note that while filling TypeCacheEntry we might process concurrent
377  * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
378  * invalidated. In this case, we typically only clear flags while values are
379  * still available for the caller. It's expected that the caller holds
380  * enough locks on type-depending objects that the values are still relevant.
381  * It's also important that the tupdesc is filled after all other
382  * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
383  * invalidated during the lookup_type_cache() call.
384  */
386 lookup_type_cache(Oid type_id, int flags)
387 {
388  TypeCacheEntry *typentry;
389  bool found;
390  int in_progress_offset;
391 
392  if (TypeCacheHash == NULL)
393  {
394  /* First time through: initialize the hash table */
395  HASHCTL ctl;
396  int allocsize;
397 
398  ctl.keysize = sizeof(Oid);
399  ctl.entrysize = sizeof(TypeCacheEntry);
400 
401  /*
402  * TypeCacheEntry takes hash value from the system cache. For
403  * TypeCacheHash we use the same hash in order to speedup search by
404  * hash value. This is used by hash_seq_init_with_hash_value().
405  */
406  ctl.hash = type_cache_syshash;
407 
408  TypeCacheHash = hash_create("Type information cache", 64,
410 
412 
413  ctl.keysize = sizeof(Oid);
414  ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
415  RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
416  &ctl, HASH_ELEM | HASH_BLOBS);
417 
418  /* Also set up callbacks for SI invalidations */
423 
424  /* Also make sure CacheMemoryContext exists */
425  if (!CacheMemoryContext)
427 
428  /*
429  * reserve enough in_progress_list slots for many cases
430  */
431  allocsize = 4;
434  allocsize * sizeof(*in_progress_list));
435  in_progress_list_maxlen = allocsize;
436  }
437 
438  Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
439 
440  /* Register to catch invalidation messages */
442  {
443  int allocsize;
444 
445  allocsize = in_progress_list_maxlen * 2;
447  allocsize * sizeof(*in_progress_list));
448  in_progress_list_maxlen = allocsize;
449  }
450  in_progress_offset = in_progress_list_len++;
451  in_progress_list[in_progress_offset] = type_id;
452 
453  /* Try to look up an existing entry */
455  &type_id,
456  HASH_FIND, NULL);
457  if (typentry == NULL)
458  {
459  /*
460  * If we didn't find one, we want to make one. But first look up the
461  * pg_type row, just to make sure we don't make a cache entry for an
462  * invalid type OID. If the type OID is not valid, present a
463  * user-facing error, since some code paths such as domain_in() allow
464  * this function to be reached with a user-supplied OID.
465  */
466  HeapTuple tp;
467  Form_pg_type typtup;
468 
469  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
470  if (!HeapTupleIsValid(tp))
471  ereport(ERROR,
472  (errcode(ERRCODE_UNDEFINED_OBJECT),
473  errmsg("type with OID %u does not exist", type_id)));
474  typtup = (Form_pg_type) GETSTRUCT(tp);
475  if (!typtup->typisdefined)
476  ereport(ERROR,
477  (errcode(ERRCODE_UNDEFINED_OBJECT),
478  errmsg("type \"%s\" is only a shell",
479  NameStr(typtup->typname))));
480 
481  /* Now make the typcache entry */
483  &type_id,
484  HASH_ENTER, &found);
485  Assert(!found); /* it wasn't there a moment ago */
486 
487  MemSet(typentry, 0, sizeof(TypeCacheEntry));
488 
489  /* These fields can never change, by definition */
490  typentry->type_id = type_id;
491  typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
492 
493  /* Keep this part in sync with the code below */
494  typentry->typlen = typtup->typlen;
495  typentry->typbyval = typtup->typbyval;
496  typentry->typalign = typtup->typalign;
497  typentry->typstorage = typtup->typstorage;
498  typentry->typtype = typtup->typtype;
499  typentry->typrelid = typtup->typrelid;
500  typentry->typsubscript = typtup->typsubscript;
501  typentry->typelem = typtup->typelem;
502  typentry->typcollation = typtup->typcollation;
503  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
504 
505  /* If it's a domain, immediately thread it into the domain cache list */
506  if (typentry->typtype == TYPTYPE_DOMAIN)
507  {
508  typentry->nextDomain = firstDomainTypeEntry;
509  firstDomainTypeEntry = typentry;
510  }
511 
512  ReleaseSysCache(tp);
513  }
514  else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
515  {
516  /*
517  * We have an entry, but its pg_type row got changed, so reload the
518  * data obtained directly from pg_type.
519  */
520  HeapTuple tp;
521  Form_pg_type typtup;
522 
523  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
524  if (!HeapTupleIsValid(tp))
525  ereport(ERROR,
526  (errcode(ERRCODE_UNDEFINED_OBJECT),
527  errmsg("type with OID %u does not exist", type_id)));
528  typtup = (Form_pg_type) GETSTRUCT(tp);
529  if (!typtup->typisdefined)
530  ereport(ERROR,
531  (errcode(ERRCODE_UNDEFINED_OBJECT),
532  errmsg("type \"%s\" is only a shell",
533  NameStr(typtup->typname))));
534 
535  /*
536  * Keep this part in sync with the code above. Many of these fields
537  * shouldn't ever change, particularly typtype, but copy 'em anyway.
538  */
539  typentry->typlen = typtup->typlen;
540  typentry->typbyval = typtup->typbyval;
541  typentry->typalign = typtup->typalign;
542  typentry->typstorage = typtup->typstorage;
543  typentry->typtype = typtup->typtype;
544  typentry->typrelid = typtup->typrelid;
545  typentry->typsubscript = typtup->typsubscript;
546  typentry->typelem = typtup->typelem;
547  typentry->typcollation = typtup->typcollation;
548  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
549 
550  ReleaseSysCache(tp);
551  }
552 
553  /*
554  * Look up opclasses if we haven't already and any dependent info is
555  * requested.
556  */
561  !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
562  {
563  Oid opclass;
564 
565  opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
566  if (OidIsValid(opclass))
567  {
568  typentry->btree_opf = get_opclass_family(opclass);
569  typentry->btree_opintype = get_opclass_input_type(opclass);
570  }
571  else
572  {
573  typentry->btree_opf = typentry->btree_opintype = InvalidOid;
574  }
575 
576  /*
577  * Reset information derived from btree opclass. Note in particular
578  * that we'll redetermine the eq_opr even if we previously found one;
579  * this matters in case a btree opclass has been added to a type that
580  * previously had only a hash opclass.
581  */
582  typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
587  }
588 
589  /*
590  * If we need to look up equality operator, and there's no btree opclass,
591  * force lookup of hash opclass.
592  */
593  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
594  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
595  typentry->btree_opf == InvalidOid)
596  flags |= TYPECACHE_HASH_OPFAMILY;
597 
602  !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
603  {
604  Oid opclass;
605 
606  opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
607  if (OidIsValid(opclass))
608  {
609  typentry->hash_opf = get_opclass_family(opclass);
610  typentry->hash_opintype = get_opclass_input_type(opclass);
611  }
612  else
613  {
614  typentry->hash_opf = typentry->hash_opintype = InvalidOid;
615  }
616 
617  /*
618  * Reset information derived from hash opclass. We do *not* reset the
619  * eq_opr; if we already found one from the btree opclass, that
620  * decision is still good.
621  */
622  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
624  typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
625  }
626 
627  /*
628  * Look for requested operators and functions, if we haven't already.
629  */
630  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
631  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
632  {
633  Oid eq_opr = InvalidOid;
634 
635  if (typentry->btree_opf != InvalidOid)
636  eq_opr = get_opfamily_member(typentry->btree_opf,
637  typentry->btree_opintype,
638  typentry->btree_opintype,
640  if (eq_opr == InvalidOid &&
641  typentry->hash_opf != InvalidOid)
642  eq_opr = get_opfamily_member(typentry->hash_opf,
643  typentry->hash_opintype,
644  typentry->hash_opintype,
646 
647  /*
648  * If the proposed equality operator is array_eq or record_eq, check
649  * to see if the element type or column types support equality. If
650  * not, array_eq or record_eq would fail at runtime, so we don't want
651  * to report that the type has equality. (We can omit similar
652  * checking for ranges and multiranges because ranges can't be created
653  * in the first place unless their subtypes support equality.)
654  */
655  if (eq_opr == ARRAY_EQ_OP &&
656  !array_element_has_equality(typentry))
657  eq_opr = InvalidOid;
658  else if (eq_opr == RECORD_EQ_OP &&
659  !record_fields_have_equality(typentry))
660  eq_opr = InvalidOid;
661 
662  /* Force update of eq_opr_finfo only if we're changing state */
663  if (typentry->eq_opr != eq_opr)
664  typentry->eq_opr_finfo.fn_oid = InvalidOid;
665 
666  typentry->eq_opr = eq_opr;
667 
668  /*
669  * Reset info about hash functions whenever we pick up new info about
670  * equality operator. This is so we can ensure that the hash
671  * functions match the operator.
672  */
673  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
675  typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
676  }
677  if ((flags & TYPECACHE_LT_OPR) &&
678  !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
679  {
680  Oid lt_opr = InvalidOid;
681 
682  if (typentry->btree_opf != InvalidOid)
683  lt_opr = get_opfamily_member(typentry->btree_opf,
684  typentry->btree_opintype,
685  typentry->btree_opintype,
687 
688  /*
689  * As above, make sure array_cmp or record_cmp will succeed; but again
690  * we need no special check for ranges or multiranges.
691  */
692  if (lt_opr == ARRAY_LT_OP &&
693  !array_element_has_compare(typentry))
694  lt_opr = InvalidOid;
695  else if (lt_opr == RECORD_LT_OP &&
696  !record_fields_have_compare(typentry))
697  lt_opr = InvalidOid;
698 
699  typentry->lt_opr = lt_opr;
700  typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
701  }
702  if ((flags & TYPECACHE_GT_OPR) &&
703  !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
704  {
705  Oid gt_opr = InvalidOid;
706 
707  if (typentry->btree_opf != InvalidOid)
708  gt_opr = get_opfamily_member(typentry->btree_opf,
709  typentry->btree_opintype,
710  typentry->btree_opintype,
712 
713  /*
714  * As above, make sure array_cmp or record_cmp will succeed; but again
715  * we need no special check for ranges or multiranges.
716  */
717  if (gt_opr == ARRAY_GT_OP &&
718  !array_element_has_compare(typentry))
719  gt_opr = InvalidOid;
720  else if (gt_opr == RECORD_GT_OP &&
721  !record_fields_have_compare(typentry))
722  gt_opr = InvalidOid;
723 
724  typentry->gt_opr = gt_opr;
725  typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
726  }
727  if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
728  !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
729  {
730  Oid cmp_proc = InvalidOid;
731 
732  if (typentry->btree_opf != InvalidOid)
733  cmp_proc = get_opfamily_proc(typentry->btree_opf,
734  typentry->btree_opintype,
735  typentry->btree_opintype,
736  BTORDER_PROC);
737 
738  /*
739  * As above, make sure array_cmp or record_cmp will succeed; but again
740  * we need no special check for ranges or multiranges.
741  */
742  if (cmp_proc == F_BTARRAYCMP &&
743  !array_element_has_compare(typentry))
744  cmp_proc = InvalidOid;
745  else if (cmp_proc == F_BTRECORDCMP &&
746  !record_fields_have_compare(typentry))
747  cmp_proc = InvalidOid;
748 
749  /* Force update of cmp_proc_finfo only if we're changing state */
750  if (typentry->cmp_proc != cmp_proc)
751  typentry->cmp_proc_finfo.fn_oid = InvalidOid;
752 
753  typentry->cmp_proc = cmp_proc;
754  typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
755  }
757  !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
758  {
759  Oid hash_proc = InvalidOid;
760 
761  /*
762  * We insist that the eq_opr, if one has been determined, match the
763  * hash opclass; else report there is no hash function.
764  */
765  if (typentry->hash_opf != InvalidOid &&
766  (!OidIsValid(typentry->eq_opr) ||
767  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
768  typentry->hash_opintype,
769  typentry->hash_opintype,
771  hash_proc = get_opfamily_proc(typentry->hash_opf,
772  typentry->hash_opintype,
773  typentry->hash_opintype,
775 
776  /*
777  * As above, make sure hash_array, hash_record, or hash_range will
778  * succeed.
779  */
780  if (hash_proc == F_HASH_ARRAY &&
781  !array_element_has_hashing(typentry))
782  hash_proc = InvalidOid;
783  else if (hash_proc == F_HASH_RECORD &&
784  !record_fields_have_hashing(typentry))
785  hash_proc = InvalidOid;
786  else if (hash_proc == F_HASH_RANGE &&
787  !range_element_has_hashing(typentry))
788  hash_proc = InvalidOid;
789 
790  /*
791  * Likewise for hash_multirange.
792  */
793  if (hash_proc == F_HASH_MULTIRANGE &&
795  hash_proc = InvalidOid;
796 
797  /* Force update of hash_proc_finfo only if we're changing state */
798  if (typentry->hash_proc != hash_proc)
799  typentry->hash_proc_finfo.fn_oid = InvalidOid;
800 
801  typentry->hash_proc = hash_proc;
802  typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
803  }
804  if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
807  {
808  Oid hash_extended_proc = InvalidOid;
809 
810  /*
811  * We insist that the eq_opr, if one has been determined, match the
812  * hash opclass; else report there is no hash function.
813  */
814  if (typentry->hash_opf != InvalidOid &&
815  (!OidIsValid(typentry->eq_opr) ||
816  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
817  typentry->hash_opintype,
818  typentry->hash_opintype,
820  hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
821  typentry->hash_opintype,
822  typentry->hash_opintype,
824 
825  /*
826  * As above, make sure hash_array_extended, hash_record_extended, or
827  * hash_range_extended will succeed.
828  */
829  if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
831  hash_extended_proc = InvalidOid;
832  else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
834  hash_extended_proc = InvalidOid;
835  else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
837  hash_extended_proc = InvalidOid;
838 
839  /*
840  * Likewise for hash_multirange_extended.
841  */
842  if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
844  hash_extended_proc = InvalidOid;
845 
846  /* Force update of proc finfo only if we're changing state */
847  if (typentry->hash_extended_proc != hash_extended_proc)
849 
850  typentry->hash_extended_proc = hash_extended_proc;
852  }
853 
854  /*
855  * Set up fmgr lookup info as requested
856  *
857  * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
858  * which is not quite right (they're really in the hash table's private
859  * memory context) but this will do for our purposes.
860  *
861  * Note: the code above avoids invalidating the finfo structs unless the
862  * referenced operator/function OID actually changes. This is to prevent
863  * unnecessary leakage of any subsidiary data attached to an finfo, since
864  * that would cause session-lifespan memory leaks.
865  */
866  if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
867  typentry->eq_opr_finfo.fn_oid == InvalidOid &&
868  typentry->eq_opr != InvalidOid)
869  {
870  Oid eq_opr_func;
871 
872  eq_opr_func = get_opcode(typentry->eq_opr);
873  if (eq_opr_func != InvalidOid)
874  fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
876  }
877  if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
878  typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
879  typentry->cmp_proc != InvalidOid)
880  {
881  fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
883  }
884  if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
885  typentry->hash_proc_finfo.fn_oid == InvalidOid &&
886  typentry->hash_proc != InvalidOid)
887  {
888  fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
890  }
891  if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
893  typentry->hash_extended_proc != InvalidOid)
894  {
896  &typentry->hash_extended_proc_finfo,
898  }
899 
900  /*
901  * If it's a composite type (row type), get tupdesc if requested
902  */
903  if ((flags & TYPECACHE_TUPDESC) &&
904  typentry->tupDesc == NULL &&
905  typentry->typtype == TYPTYPE_COMPOSITE)
906  {
907  load_typcache_tupdesc(typentry);
908  }
909 
910  /*
911  * If requested, get information about a range type
912  *
913  * This includes making sure that the basic info about the range element
914  * type is up-to-date.
915  */
916  if ((flags & TYPECACHE_RANGE_INFO) &&
917  typentry->typtype == TYPTYPE_RANGE)
918  {
919  if (typentry->rngelemtype == NULL)
920  load_rangetype_info(typentry);
921  else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
922  (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
923  }
924 
925  /*
926  * If requested, get information about a multirange type
927  */
928  if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
929  typentry->rngtype == NULL &&
930  typentry->typtype == TYPTYPE_MULTIRANGE)
931  {
932  load_multirangetype_info(typentry);
933  }
934 
935  /*
936  * If requested, get information about a domain type
937  */
938  if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
939  typentry->domainBaseType == InvalidOid &&
940  typentry->typtype == TYPTYPE_DOMAIN)
941  {
942  typentry->domainBaseTypmod = -1;
943  typentry->domainBaseType =
944  getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
945  }
946  if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
947  (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
948  typentry->typtype == TYPTYPE_DOMAIN)
949  {
950  load_domaintype_info(typentry);
951  }
952 
953  INJECTION_POINT("typecache-before-rel-type-cache-insert");
954 
955  Assert(in_progress_offset + 1 == in_progress_list_len);
957 
959 
960  return typentry;
961 }
962 
963 /*
964  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
965  */
966 static void
968 {
969  Relation rel;
970 
971  if (!OidIsValid(typentry->typrelid)) /* should not happen */
972  elog(ERROR, "invalid typrelid for composite type %u",
973  typentry->type_id);
974  rel = relation_open(typentry->typrelid, AccessShareLock);
975  Assert(rel->rd_rel->reltype == typentry->type_id);
976 
977  /*
978  * Link to the tupdesc and increment its refcount (we assert it's a
979  * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
980  * because the reference mustn't be entered in the current resource owner;
981  * it can outlive the current query.
982  */
983  typentry->tupDesc = RelationGetDescr(rel);
984 
985  Assert(typentry->tupDesc->tdrefcount > 0);
986  typentry->tupDesc->tdrefcount++;
987 
988  /*
989  * In future, we could take some pains to not change tupDesc_identifier if
990  * the tupdesc didn't really change; but for now it's not worth it.
991  */
993 
995 }
996 
997 /*
998  * load_rangetype_info --- helper routine to set up range type information
999  */
1000 static void
1002 {
1003  Form_pg_range pg_range;
1004  HeapTuple tup;
1005  Oid subtypeOid;
1006  Oid opclassOid;
1007  Oid canonicalOid;
1008  Oid subdiffOid;
1009  Oid opfamilyOid;
1010  Oid opcintype;
1011  Oid cmpFnOid;
1012 
1013  /* get information from pg_range */
1014  tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1015  /* should not fail, since we already checked typtype ... */
1016  if (!HeapTupleIsValid(tup))
1017  elog(ERROR, "cache lookup failed for range type %u",
1018  typentry->type_id);
1019  pg_range = (Form_pg_range) GETSTRUCT(tup);
1020 
1021  subtypeOid = pg_range->rngsubtype;
1022  typentry->rng_collation = pg_range->rngcollation;
1023  opclassOid = pg_range->rngsubopc;
1024  canonicalOid = pg_range->rngcanonical;
1025  subdiffOid = pg_range->rngsubdiff;
1026 
1027  ReleaseSysCache(tup);
1028 
1029  /* get opclass properties and look up the comparison function */
1030  opfamilyOid = get_opclass_family(opclassOid);
1031  opcintype = get_opclass_input_type(opclassOid);
1032  typentry->rng_opfamily = opfamilyOid;
1033 
1034  cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1035  BTORDER_PROC);
1036  if (!RegProcedureIsValid(cmpFnOid))
1037  elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1038  BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1039 
1040  /* set up cached fmgrinfo structs */
1041  fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1043  if (OidIsValid(canonicalOid))
1044  fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1046  if (OidIsValid(subdiffOid))
1047  fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1049 
1050  /* Lastly, set up link to the element type --- this marks data valid */
1051  typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1052 }
1053 
1054 /*
1055  * load_multirangetype_info --- helper routine to set up multirange type
1056  * information
1057  */
1058 static void
1060 {
1061  Oid rangetypeOid;
1062 
1063  rangetypeOid = get_multirange_range(typentry->type_id);
1064  if (!OidIsValid(rangetypeOid))
1065  elog(ERROR, "cache lookup failed for multirange type %u",
1066  typentry->type_id);
1067 
1068  typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1069 }
1070 
1071 /*
1072  * load_domaintype_info --- helper routine to set up domain constraint info
1073  *
1074  * Note: we assume we're called in a relatively short-lived context, so it's
1075  * okay to leak data into the current context while scanning pg_constraint.
1076  * We build the new DomainConstraintCache data in a context underneath
1077  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1078  * complete.
1079  */
1080 static void
1082 {
1083  Oid typeOid = typentry->type_id;
1084  DomainConstraintCache *dcc;
1085  bool notNull = false;
1086  DomainConstraintState **ccons;
1087  int cconslen;
1088  Relation conRel;
1089  MemoryContext oldcxt;
1090 
1091  /*
1092  * If we're here, any existing constraint info is stale, so release it.
1093  * For safety, be sure to null the link before trying to delete the data.
1094  */
1095  if (typentry->domainData)
1096  {
1097  dcc = typentry->domainData;
1098  typentry->domainData = NULL;
1099  decr_dcc_refcount(dcc);
1100  }
1101 
1102  /*
1103  * We try to optimize the common case of no domain constraints, so don't
1104  * create the dcc object and context until we find a constraint. Likewise
1105  * for the temp sorting array.
1106  */
1107  dcc = NULL;
1108  ccons = NULL;
1109  cconslen = 0;
1110 
1111  /*
1112  * Scan pg_constraint for relevant constraints. We want to find
1113  * constraints for not just this domain, but any ancestor domains, so the
1114  * outer loop crawls up the domain stack.
1115  */
1116  conRel = table_open(ConstraintRelationId, AccessShareLock);
1117 
1118  for (;;)
1119  {
1120  HeapTuple tup;
1121  HeapTuple conTup;
1122  Form_pg_type typTup;
1123  int nccons = 0;
1124  ScanKeyData key[1];
1125  SysScanDesc scan;
1126 
1127  tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1128  if (!HeapTupleIsValid(tup))
1129  elog(ERROR, "cache lookup failed for type %u", typeOid);
1130  typTup = (Form_pg_type) GETSTRUCT(tup);
1131 
1132  if (typTup->typtype != TYPTYPE_DOMAIN)
1133  {
1134  /* Not a domain, so done */
1135  ReleaseSysCache(tup);
1136  break;
1137  }
1138 
1139  /* Test for NOT NULL Constraint */
1140  if (typTup->typnotnull)
1141  notNull = true;
1142 
1143  /* Look for CHECK Constraints on this domain */
1144  ScanKeyInit(&key[0],
1145  Anum_pg_constraint_contypid,
1146  BTEqualStrategyNumber, F_OIDEQ,
1147  ObjectIdGetDatum(typeOid));
1148 
1149  scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1150  NULL, 1, key);
1151 
1152  while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1153  {
1155  Datum val;
1156  bool isNull;
1157  char *constring;
1158  Expr *check_expr;
1160 
1161  /* Ignore non-CHECK constraints */
1162  if (c->contype != CONSTRAINT_CHECK)
1163  continue;
1164 
1165  /* Not expecting conbin to be NULL, but we'll test for it anyway */
1166  val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1167  conRel->rd_att, &isNull);
1168  if (isNull)
1169  elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1170  NameStr(typTup->typname), NameStr(c->conname));
1171 
1172  /* Convert conbin to C string in caller context */
1173  constring = TextDatumGetCString(val);
1174 
1175  /* Create the DomainConstraintCache object and context if needed */
1176  if (dcc == NULL)
1177  {
1178  MemoryContext cxt;
1179 
1181  "Domain constraints",
1183  dcc = (DomainConstraintCache *)
1185  dcc->constraints = NIL;
1186  dcc->dccContext = cxt;
1187  dcc->dccRefCount = 0;
1188  }
1189 
1190  /* Create node trees in DomainConstraintCache's context */
1191  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1192 
1193  check_expr = (Expr *) stringToNode(constring);
1194 
1195  /*
1196  * Plan the expression, since ExecInitExpr will expect that.
1197  *
1198  * Note: caching the result of expression_planner() is not very
1199  * good practice. Ideally we'd use a CachedExpression here so
1200  * that we would react promptly to, eg, changes in inlined
1201  * functions. However, because we don't support mutable domain
1202  * CHECK constraints, it's not really clear that it's worth the
1203  * extra overhead to do that.
1204  */
1205  check_expr = expression_planner(check_expr);
1206 
1209  r->name = pstrdup(NameStr(c->conname));
1210  r->check_expr = check_expr;
1211  r->check_exprstate = NULL;
1212 
1213  MemoryContextSwitchTo(oldcxt);
1214 
1215  /* Accumulate constraints in an array, for sorting below */
1216  if (ccons == NULL)
1217  {
1218  cconslen = 8;
1219  ccons = (DomainConstraintState **)
1220  palloc(cconslen * sizeof(DomainConstraintState *));
1221  }
1222  else if (nccons >= cconslen)
1223  {
1224  cconslen *= 2;
1225  ccons = (DomainConstraintState **)
1226  repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1227  }
1228  ccons[nccons++] = r;
1229  }
1230 
1231  systable_endscan(scan);
1232 
1233  if (nccons > 0)
1234  {
1235  /*
1236  * Sort the items for this domain, so that CHECKs are applied in a
1237  * deterministic order.
1238  */
1239  if (nccons > 1)
1240  qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1241 
1242  /*
1243  * Now attach them to the overall list. Use lcons() here because
1244  * constraints of parent domains should be applied earlier.
1245  */
1246  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1247  while (nccons > 0)
1248  dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1249  MemoryContextSwitchTo(oldcxt);
1250  }
1251 
1252  /* loop to next domain in stack */
1253  typeOid = typTup->typbasetype;
1254  ReleaseSysCache(tup);
1255  }
1256 
1257  table_close(conRel, AccessShareLock);
1258 
1259  /*
1260  * Only need to add one NOT NULL check regardless of how many domains in
1261  * the stack request it.
1262  */
1263  if (notNull)
1264  {
1266 
1267  /* Create the DomainConstraintCache object and context if needed */
1268  if (dcc == NULL)
1269  {
1270  MemoryContext cxt;
1271 
1273  "Domain constraints",
1275  dcc = (DomainConstraintCache *)
1277  dcc->constraints = NIL;
1278  dcc->dccContext = cxt;
1279  dcc->dccRefCount = 0;
1280  }
1281 
1282  /* Create node trees in DomainConstraintCache's context */
1283  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1284 
1286 
1288  r->name = pstrdup("NOT NULL");
1289  r->check_expr = NULL;
1290  r->check_exprstate = NULL;
1291 
1292  /* lcons to apply the nullness check FIRST */
1293  dcc->constraints = lcons(r, dcc->constraints);
1294 
1295  MemoryContextSwitchTo(oldcxt);
1296  }
1297 
1298  /*
1299  * If we made a constraint object, move it into CacheMemoryContext and
1300  * attach it to the typcache entry.
1301  */
1302  if (dcc)
1303  {
1305  typentry->domainData = dcc;
1306  dcc->dccRefCount++; /* count the typcache's reference */
1307  }
1308 
1309  /* Either way, the typcache entry's domain data is now valid. */
1311 }
1312 
1313 /*
1314  * qsort comparator to sort DomainConstraintState pointers by name
1315  */
1316 static int
1317 dcs_cmp(const void *a, const void *b)
1318 {
1319  const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1320  const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1321 
1322  return strcmp((*ca)->name, (*cb)->name);
1323 }
1324 
1325 /*
1326  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1327  * and free it if no references remain
1328  */
1329 static void
1331 {
1332  Assert(dcc->dccRefCount > 0);
1333  if (--(dcc->dccRefCount) <= 0)
1335 }
1336 
1337 /*
1338  * Context reset/delete callback for a DomainConstraintRef
1339  */
1340 static void
1342 {
1344  DomainConstraintCache *dcc = ref->dcc;
1345 
1346  /* Paranoia --- be sure link is nulled before trying to release */
1347  if (dcc)
1348  {
1349  ref->constraints = NIL;
1350  ref->dcc = NULL;
1351  decr_dcc_refcount(dcc);
1352  }
1353 }
1354 
1355 /*
1356  * prep_domain_constraints --- prepare domain constraints for execution
1357  *
1358  * The expression trees stored in the DomainConstraintCache's list are
1359  * converted to executable expression state trees stored in execctx.
1360  */
1361 static List *
1363 {
1364  List *result = NIL;
1365  MemoryContext oldcxt;
1366  ListCell *lc;
1367 
1368  oldcxt = MemoryContextSwitchTo(execctx);
1369 
1370  foreach(lc, constraints)
1371  {
1373  DomainConstraintState *newr;
1374 
1376  newr->constrainttype = r->constrainttype;
1377  newr->name = r->name;
1378  newr->check_expr = r->check_expr;
1379  newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1380 
1381  result = lappend(result, newr);
1382  }
1383 
1384  MemoryContextSwitchTo(oldcxt);
1385 
1386  return result;
1387 }
1388 
1389 /*
1390  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1391  *
1392  * Caller must tell us the MemoryContext in which the DomainConstraintRef
1393  * lives. The ref will be cleaned up when that context is reset/deleted.
1394  *
1395  * Caller must also tell us whether it wants check_exprstate fields to be
1396  * computed in the DomainConstraintState nodes attached to this ref.
1397  * If it doesn't, we need not make a copy of the DomainConstraintState list.
1398  */
1399 void
1401  MemoryContext refctx, bool need_exprstate)
1402 {
1403  /* Look up the typcache entry --- we assume it survives indefinitely */
1405  ref->need_exprstate = need_exprstate;
1406  /* For safety, establish the callback before acquiring a refcount */
1407  ref->refctx = refctx;
1408  ref->dcc = NULL;
1410  ref->callback.arg = ref;
1412  /* Acquire refcount if there are constraints, and set up exported list */
1413  if (ref->tcache->domainData)
1414  {
1415  ref->dcc = ref->tcache->domainData;
1416  ref->dcc->dccRefCount++;
1417  if (ref->need_exprstate)
1419  ref->refctx);
1420  else
1421  ref->constraints = ref->dcc->constraints;
1422  }
1423  else
1424  ref->constraints = NIL;
1425 }
1426 
1427 /*
1428  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1429  *
1430  * If the domain's constraint set changed, ref->constraints is updated to
1431  * point at a new list of cached constraints.
1432  *
1433  * In the normal case where nothing happened to the domain, this is cheap
1434  * enough that it's reasonable (and expected) to check before *each* use
1435  * of the constraint info.
1436  */
1437 void
1439 {
1440  TypeCacheEntry *typentry = ref->tcache;
1441 
1442  /* Make sure typcache entry's data is up to date */
1443  if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1444  typentry->typtype == TYPTYPE_DOMAIN)
1445  load_domaintype_info(typentry);
1446 
1447  /* Transfer to ref object if there's new info, adjusting refcounts */
1448  if (ref->dcc != typentry->domainData)
1449  {
1450  /* Paranoia --- be sure link is nulled before trying to release */
1451  DomainConstraintCache *dcc = ref->dcc;
1452 
1453  if (dcc)
1454  {
1455  /*
1456  * Note: we just leak the previous list of executable domain
1457  * constraints. Alternatively, we could keep those in a child
1458  * context of ref->refctx and free that context at this point.
1459  * However, in practice this code path will be taken so seldom
1460  * that the extra bookkeeping for a child context doesn't seem
1461  * worthwhile; we'll just allow a leak for the lifespan of refctx.
1462  */
1463  ref->constraints = NIL;
1464  ref->dcc = NULL;
1465  decr_dcc_refcount(dcc);
1466  }
1467  dcc = typentry->domainData;
1468  if (dcc)
1469  {
1470  ref->dcc = dcc;
1471  dcc->dccRefCount++;
1472  if (ref->need_exprstate)
1474  ref->refctx);
1475  else
1476  ref->constraints = dcc->constraints;
1477  }
1478  }
1479 }
1480 
1481 /*
1482  * DomainHasConstraints --- utility routine to check if a domain has constraints
1483  *
1484  * This is defined to return false, not fail, if type is not a domain.
1485  */
1486 bool
1488 {
1489  TypeCacheEntry *typentry;
1490 
1491  /*
1492  * Note: a side effect is to cause the typcache's domain data to become
1493  * valid. This is fine since we'll likely need it soon if there is any.
1494  */
1495  typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1496 
1497  return (typentry->domainData != NULL);
1498 }
1499 
1500 
1501 /*
1502  * array_element_has_equality and friends are helper routines to check
1503  * whether we should believe that array_eq and related functions will work
1504  * on the given array type or composite type.
1505  *
1506  * The logic above may call these repeatedly on the same type entry, so we
1507  * make use of the typentry->flags field to cache the results once known.
1508  * Also, we assume that we'll probably want all these facts about the type
1509  * if we want any, so we cache them all using only one lookup of the
1510  * component datatype(s).
1511  */
1512 
1513 static bool
1515 {
1516  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1518  return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1519 }
1520 
1521 static bool
1523 {
1524  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1526  return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1527 }
1528 
1529 static bool
1531 {
1532  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1534  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1535 }
1536 
1537 static bool
1539 {
1540  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1542  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1543 }
1544 
1545 static void
1547 {
1548  Oid elem_type = get_base_element_type(typentry->type_id);
1549 
1550  if (OidIsValid(elem_type))
1551  {
1552  TypeCacheEntry *elementry;
1553 
1554  elementry = lookup_type_cache(elem_type,
1559  if (OidIsValid(elementry->eq_opr))
1560  typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1561  if (OidIsValid(elementry->cmp_proc))
1562  typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1563  if (OidIsValid(elementry->hash_proc))
1564  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1565  if (OidIsValid(elementry->hash_extended_proc))
1567  }
1569 }
1570 
1571 /*
1572  * Likewise, some helper functions for composite types.
1573  */
1574 
1575 static bool
1577 {
1578  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1580  return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1581 }
1582 
1583 static bool
1585 {
1586  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1588  return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1589 }
1590 
1591 static bool
1593 {
1594  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1596  return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1597 }
1598 
1599 static bool
1601 {
1602  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1604  return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1605 }
1606 
1607 static void
1609 {
1610  /*
1611  * For type RECORD, we can't really tell what will work, since we don't
1612  * have access here to the specific anonymous type. Just assume that
1613  * equality and comparison will (we may get a failure at runtime). We
1614  * could also claim that hashing works, but then if code that has the
1615  * option between a comparison-based (sort-based) and a hash-based plan
1616  * chooses hashing, stuff could fail that would otherwise work if it chose
1617  * a comparison-based plan. In practice more types support comparison
1618  * than hashing.
1619  */
1620  if (typentry->type_id == RECORDOID)
1621  {
1622  typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1624  }
1625  else if (typentry->typtype == TYPTYPE_COMPOSITE)
1626  {
1627  TupleDesc tupdesc;
1628  int newflags;
1629  int i;
1630 
1631  /* Fetch composite type's tupdesc if we don't have it already */
1632  if (typentry->tupDesc == NULL)
1633  load_typcache_tupdesc(typentry);
1634  tupdesc = typentry->tupDesc;
1635 
1636  /* Must bump the refcount while we do additional catalog lookups */
1637  IncrTupleDescRefCount(tupdesc);
1638 
1639  /* Have each property if all non-dropped fields have the property */
1640  newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1644  for (i = 0; i < tupdesc->natts; i++)
1645  {
1646  TypeCacheEntry *fieldentry;
1647  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1648 
1649  if (attr->attisdropped)
1650  continue;
1651 
1652  fieldentry = lookup_type_cache(attr->atttypid,
1657  if (!OidIsValid(fieldentry->eq_opr))
1658  newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1659  if (!OidIsValid(fieldentry->cmp_proc))
1660  newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1661  if (!OidIsValid(fieldentry->hash_proc))
1662  newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1663  if (!OidIsValid(fieldentry->hash_extended_proc))
1665 
1666  /* We can drop out of the loop once we disprove all bits */
1667  if (newflags == 0)
1668  break;
1669  }
1670  typentry->flags |= newflags;
1671 
1672  DecrTupleDescRefCount(tupdesc);
1673  }
1674  else if (typentry->typtype == TYPTYPE_DOMAIN)
1675  {
1676  /* If it's domain over composite, copy base type's properties */
1677  TypeCacheEntry *baseentry;
1678 
1679  /* load up basetype info if we didn't already */
1680  if (typentry->domainBaseType == InvalidOid)
1681  {
1682  typentry->domainBaseTypmod = -1;
1683  typentry->domainBaseType =
1684  getBaseTypeAndTypmod(typentry->type_id,
1685  &typentry->domainBaseTypmod);
1686  }
1687  baseentry = lookup_type_cache(typentry->domainBaseType,
1692  if (baseentry->typtype == TYPTYPE_COMPOSITE)
1693  {
1695  typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1699  }
1700  }
1702 }
1703 
1704 /*
1705  * Likewise, some helper functions for range and multirange types.
1706  *
1707  * We can borrow the flag bits for array element properties to use for range
1708  * element properties, since those flag bits otherwise have no use in a
1709  * range or multirange type's typcache entry.
1710  */
1711 
1712 static bool
1714 {
1715  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1717  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1718 }
1719 
1720 static bool
1722 {
1723  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1725  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1726 }
1727 
1728 static void
1730 {
1731  /* load up subtype link if we didn't already */
1732  if (typentry->rngelemtype == NULL &&
1733  typentry->typtype == TYPTYPE_RANGE)
1734  load_rangetype_info(typentry);
1735 
1736  if (typentry->rngelemtype != NULL)
1737  {
1738  TypeCacheEntry *elementry;
1739 
1740  /* might need to calculate subtype's hash function properties */
1741  elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1744  if (OidIsValid(elementry->hash_proc))
1745  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1746  if (OidIsValid(elementry->hash_extended_proc))
1748  }
1750 }
1751 
1752 static bool
1754 {
1755  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1757  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1758 }
1759 
1760 static bool
1762 {
1763  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1765  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1766 }
1767 
1768 static void
1770 {
1771  /* load up range link if we didn't already */
1772  if (typentry->rngtype == NULL &&
1773  typentry->typtype == TYPTYPE_MULTIRANGE)
1774  load_multirangetype_info(typentry);
1775 
1776  if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1777  {
1778  TypeCacheEntry *elementry;
1779 
1780  /* might need to calculate subtype's hash function properties */
1781  elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1784  if (OidIsValid(elementry->hash_proc))
1785  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1786  if (OidIsValid(elementry->hash_extended_proc))
1788  }
1790 }
1791 
1792 /*
1793  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1794  * to store 'typmod'.
1795  */
1796 static void
1798 {
1799  if (RecordCacheArray == NULL)
1800  {
1803  64 * sizeof(RecordCacheArrayEntry));
1804  RecordCacheArrayLen = 64;
1805  }
1806 
1807  if (typmod >= RecordCacheArrayLen)
1808  {
1809  int32 newlen = pg_nextpower2_32(typmod + 1);
1810 
1814  newlen);
1815  RecordCacheArrayLen = newlen;
1816  }
1817 }
1818 
1819 /*
1820  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1821  *
1822  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1823  * hasn't had its refcount bumped.
1824  */
1825 static TupleDesc
1826 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1827 {
1828  if (type_id != RECORDOID)
1829  {
1830  /*
1831  * It's a named composite type, so use the regular typcache.
1832  */
1833  TypeCacheEntry *typentry;
1834 
1835  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1836  if (typentry->tupDesc == NULL && !noError)
1837  ereport(ERROR,
1838  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1839  errmsg("type %s is not composite",
1840  format_type_be(type_id))));
1841  return typentry->tupDesc;
1842  }
1843  else
1844  {
1845  /*
1846  * It's a transient record type, so look in our record-type table.
1847  */
1848  if (typmod >= 0)
1849  {
1850  /* It is already in our local cache? */
1851  if (typmod < RecordCacheArrayLen &&
1852  RecordCacheArray[typmod].tupdesc != NULL)
1853  return RecordCacheArray[typmod].tupdesc;
1854 
1855  /* Are we attached to a shared record typmod registry? */
1857  {
1858  SharedTypmodTableEntry *entry;
1859 
1860  /* Try to find it in the shared typmod index. */
1862  &typmod, false);
1863  if (entry != NULL)
1864  {
1865  TupleDesc tupdesc;
1866 
1867  tupdesc = (TupleDesc)
1869  entry->shared_tupdesc);
1870  Assert(typmod == tupdesc->tdtypmod);
1871 
1872  /* We may need to extend the local RecordCacheArray. */
1874 
1875  /*
1876  * Our local array can now point directly to the TupleDesc
1877  * in shared memory, which is non-reference-counted.
1878  */
1879  RecordCacheArray[typmod].tupdesc = tupdesc;
1880  Assert(tupdesc->tdrefcount == -1);
1881 
1882  /*
1883  * We don't share tupdesc identifiers across processes, so
1884  * assign one locally.
1885  */
1887 
1889  entry);
1890 
1891  return RecordCacheArray[typmod].tupdesc;
1892  }
1893  }
1894  }
1895 
1896  if (!noError)
1897  ereport(ERROR,
1898  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1899  errmsg("record type has not been registered")));
1900  return NULL;
1901  }
1902 }
1903 
1904 /*
1905  * lookup_rowtype_tupdesc
1906  *
1907  * Given a typeid/typmod that should describe a known composite type,
1908  * return the tuple descriptor for the type. Will ereport on failure.
1909  * (Use ereport because this is reachable with user-specified OIDs,
1910  * for example from record_in().)
1911  *
1912  * Note: on success, we increment the refcount of the returned TupleDesc,
1913  * and log the reference in CurrentResourceOwner. Caller must call
1914  * ReleaseTupleDesc when done using the tupdesc. (There are some
1915  * cases in which the returned tupdesc is not refcounted, in which
1916  * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1917  * the tupdesc is guaranteed to live till process exit.)
1918  */
1919 TupleDesc
1921 {
1922  TupleDesc tupDesc;
1923 
1924  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1925  PinTupleDesc(tupDesc);
1926  return tupDesc;
1927 }
1928 
1929 /*
1930  * lookup_rowtype_tupdesc_noerror
1931  *
1932  * As above, but if the type is not a known composite type and noError
1933  * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1934  * type_id is passed, you'll get an ereport anyway.)
1935  */
1936 TupleDesc
1937 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1938 {
1939  TupleDesc tupDesc;
1940 
1941  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1942  if (tupDesc != NULL)
1943  PinTupleDesc(tupDesc);
1944  return tupDesc;
1945 }
1946 
1947 /*
1948  * lookup_rowtype_tupdesc_copy
1949  *
1950  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1951  * copied into the CurrentMemoryContext and is not reference-counted.
1952  */
1953 TupleDesc
1955 {
1956  TupleDesc tmp;
1957 
1958  tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1959  return CreateTupleDescCopyConstr(tmp);
1960 }
1961 
1962 /*
1963  * lookup_rowtype_tupdesc_domain
1964  *
1965  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1966  * a domain over a named composite type; so this is effectively equivalent to
1967  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1968  * except for being a tad faster.
1969  *
1970  * Note: the reason we don't fold the look-through-domain behavior into plain
1971  * lookup_rowtype_tupdesc() is that we want callers to know they might be
1972  * dealing with a domain. Otherwise they might construct a tuple that should
1973  * be of the domain type, but not apply domain constraints.
1974  */
1975 TupleDesc
1976 lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1977 {
1978  TupleDesc tupDesc;
1979 
1980  if (type_id != RECORDOID)
1981  {
1982  /*
1983  * Check for domain or named composite type. We might as well load
1984  * whichever data is needed.
1985  */
1986  TypeCacheEntry *typentry;
1987 
1988  typentry = lookup_type_cache(type_id,
1991  if (typentry->typtype == TYPTYPE_DOMAIN)
1993  typentry->domainBaseTypmod,
1994  noError);
1995  if (typentry->tupDesc == NULL && !noError)
1996  ereport(ERROR,
1997  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1998  errmsg("type %s is not composite",
1999  format_type_be(type_id))));
2000  tupDesc = typentry->tupDesc;
2001  }
2002  else
2003  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2004  if (tupDesc != NULL)
2005  PinTupleDesc(tupDesc);
2006  return tupDesc;
2007 }
2008 
2009 /*
2010  * Hash function for the hash table of RecordCacheEntry.
2011  */
2012 static uint32
2013 record_type_typmod_hash(const void *data, size_t size)
2014 {
2015  RecordCacheEntry *entry = (RecordCacheEntry *) data;
2016 
2017  return hashRowType(entry->tupdesc);
2018 }
2019 
2020 /*
2021  * Match function for the hash table of RecordCacheEntry.
2022  */
2023 static int
2024 record_type_typmod_compare(const void *a, const void *b, size_t size)
2025 {
2026  RecordCacheEntry *left = (RecordCacheEntry *) a;
2027  RecordCacheEntry *right = (RecordCacheEntry *) b;
2028 
2029  return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2030 }
2031 
2032 /*
2033  * assign_record_type_typmod
2034  *
2035  * Given a tuple descriptor for a RECORD type, find or create a cache entry
2036  * for the type, and set the tupdesc's tdtypmod field to a value that will
2037  * identify this cache entry to lookup_rowtype_tupdesc.
2038  */
2039 void
2041 {
2042  RecordCacheEntry *recentry;
2043  TupleDesc entDesc;
2044  bool found;
2045  MemoryContext oldcxt;
2046 
2047  Assert(tupDesc->tdtypeid == RECORDOID);
2048 
2049  if (RecordCacheHash == NULL)
2050  {
2051  /* First time through: initialize the hash table */
2052  HASHCTL ctl;
2053 
2054  ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2055  ctl.entrysize = sizeof(RecordCacheEntry);
2058  RecordCacheHash = hash_create("Record information cache", 64,
2059  &ctl,
2061 
2062  /* Also make sure CacheMemoryContext exists */
2063  if (!CacheMemoryContext)
2065  }
2066 
2067  /*
2068  * Find a hashtable entry for this tuple descriptor. We don't use
2069  * HASH_ENTER yet, because if it's missing, we need to make sure that all
2070  * the allocations succeed before we create the new entry.
2071  */
2073  &tupDesc,
2074  HASH_FIND, &found);
2075  if (found && recentry->tupdesc != NULL)
2076  {
2077  tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2078  return;
2079  }
2080 
2081  /* Not present, so need to manufacture an entry */
2083 
2084  /* Look in the SharedRecordTypmodRegistry, if attached */
2085  entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2086  if (entDesc == NULL)
2087  {
2088  /*
2089  * Make sure we have room before we CreateTupleDescCopy() or advance
2090  * NextRecordTypmod.
2091  */
2093 
2094  /* Reference-counted local cache only. */
2095  entDesc = CreateTupleDescCopy(tupDesc);
2096  entDesc->tdrefcount = 1;
2097  entDesc->tdtypmod = NextRecordTypmod++;
2098  }
2099  else
2100  {
2102  }
2103 
2104  RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2105 
2106  /* Assign a unique tupdesc identifier, too. */
2108 
2109  /* Fully initialized; create the hash table entry */
2111  &tupDesc,
2112  HASH_ENTER, NULL);
2113  recentry->tupdesc = entDesc;
2114 
2115  /* Update the caller's tuple descriptor. */
2116  tupDesc->tdtypmod = entDesc->tdtypmod;
2117 
2118  MemoryContextSwitchTo(oldcxt);
2119 }
2120 
2121 /*
2122  * assign_record_type_identifier
2123  *
2124  * Get an identifier, which will be unique over the lifespan of this backend
2125  * process, for the current tuple descriptor of the specified composite type.
2126  * For named composite types, the value is guaranteed to change if the type's
2127  * definition does. For registered RECORD types, the value will not change
2128  * once assigned, since the registered type won't either. If an anonymous
2129  * RECORD type is specified, we return a new identifier on each call.
2130  */
2131 uint64
2133 {
2134  if (type_id != RECORDOID)
2135  {
2136  /*
2137  * It's a named composite type, so use the regular typcache.
2138  */
2139  TypeCacheEntry *typentry;
2140 
2141  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2142  if (typentry->tupDesc == NULL)
2143  ereport(ERROR,
2144  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2145  errmsg("type %s is not composite",
2146  format_type_be(type_id))));
2147  Assert(typentry->tupDesc_identifier != 0);
2148  return typentry->tupDesc_identifier;
2149  }
2150  else
2151  {
2152  /*
2153  * It's a transient record type, so look in our record-type table.
2154  */
2155  if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2156  RecordCacheArray[typmod].tupdesc != NULL)
2157  {
2158  Assert(RecordCacheArray[typmod].id != 0);
2159  return RecordCacheArray[typmod].id;
2160  }
2161 
2162  /* For anonymous or unrecognized record type, generate a new ID */
2163  return ++tupledesc_id_counter;
2164  }
2165 }
2166 
2167 /*
2168  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2169  * This exists only to avoid exposing private innards of
2170  * SharedRecordTypmodRegistry in a header.
2171  */
2172 size_t
2174 {
2175  return sizeof(SharedRecordTypmodRegistry);
2176 }
2177 
2178 /*
2179  * Initialize 'registry' in a pre-existing shared memory region, which must be
2180  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2181  * bytes.
2182  *
2183  * 'area' will be used to allocate shared memory space as required for the
2184  * typemod registration. The current process, expected to be a leader process
2185  * in a parallel query, will be attached automatically and its current record
2186  * types will be loaded into *registry. While attached, all calls to
2187  * assign_record_type_typmod will use the shared registry. Worker backends
2188  * will need to attach explicitly.
2189  *
2190  * Note that this function takes 'area' and 'segment' as arguments rather than
2191  * accessing them via CurrentSession, because they aren't installed there
2192  * until after this function runs.
2193  */
2194 void
2196  dsm_segment *segment,
2197  dsa_area *area)
2198 {
2199  MemoryContext old_context;
2200  dshash_table *record_table;
2201  dshash_table *typmod_table;
2202  int32 typmod;
2203 
2205 
2206  /* We can't already be attached to a shared registry. */
2210 
2211  old_context = MemoryContextSwitchTo(TopMemoryContext);
2212 
2213  /* Create the hash table of tuple descriptors indexed by themselves. */
2214  record_table = dshash_create(area, &srtr_record_table_params, area);
2215 
2216  /* Create the hash table of tuple descriptors indexed by typmod. */
2217  typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2218 
2219  MemoryContextSwitchTo(old_context);
2220 
2221  /* Initialize the SharedRecordTypmodRegistry. */
2222  registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2223  registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2225 
2226  /*
2227  * Copy all entries from this backend's private registry into the shared
2228  * registry.
2229  */
2230  for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2231  {
2232  SharedTypmodTableEntry *typmod_table_entry;
2233  SharedRecordTableEntry *record_table_entry;
2234  SharedRecordTableKey record_table_key;
2235  dsa_pointer shared_dp;
2236  TupleDesc tupdesc;
2237  bool found;
2238 
2239  tupdesc = RecordCacheArray[typmod].tupdesc;
2240  if (tupdesc == NULL)
2241  continue;
2242 
2243  /* Copy the TupleDesc into shared memory. */
2244  shared_dp = share_tupledesc(area, tupdesc, typmod);
2245 
2246  /* Insert into the typmod table. */
2247  typmod_table_entry = dshash_find_or_insert(typmod_table,
2248  &tupdesc->tdtypmod,
2249  &found);
2250  if (found)
2251  elog(ERROR, "cannot create duplicate shared record typmod");
2252  typmod_table_entry->typmod = tupdesc->tdtypmod;
2253  typmod_table_entry->shared_tupdesc = shared_dp;
2254  dshash_release_lock(typmod_table, typmod_table_entry);
2255 
2256  /* Insert into the record table. */
2257  record_table_key.shared = false;
2258  record_table_key.u.local_tupdesc = tupdesc;
2259  record_table_entry = dshash_find_or_insert(record_table,
2260  &record_table_key,
2261  &found);
2262  if (!found)
2263  {
2264  record_table_entry->key.shared = true;
2265  record_table_entry->key.u.shared_tupdesc = shared_dp;
2266  }
2267  dshash_release_lock(record_table, record_table_entry);
2268  }
2269 
2270  /*
2271  * Set up the global state that will tell assign_record_type_typmod and
2272  * lookup_rowtype_tupdesc_internal about the shared registry.
2273  */
2274  CurrentSession->shared_record_table = record_table;
2275  CurrentSession->shared_typmod_table = typmod_table;
2277 
2278  /*
2279  * We install a detach hook in the leader, but only to handle cleanup on
2280  * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2281  * the memory, the leader process will use a shared registry until it
2282  * exits.
2283  */
2285 }
2286 
2287 /*
2288  * Attach to 'registry', which must have been initialized already by another
2289  * backend. Future calls to assign_record_type_typmod and
2290  * lookup_rowtype_tupdesc_internal will use the shared registry until the
2291  * current session is detached.
2292  */
2293 void
2295 {
2296  MemoryContext old_context;
2297  dshash_table *record_table;
2298  dshash_table *typmod_table;
2299 
2301 
2302  /* We can't already be attached to a shared registry. */
2303  Assert(CurrentSession != NULL);
2304  Assert(CurrentSession->segment != NULL);
2305  Assert(CurrentSession->area != NULL);
2309 
2310  /*
2311  * We can't already have typmods in our local cache, because they'd clash
2312  * with those imported by SharedRecordTypmodRegistryInit. This should be
2313  * a freshly started parallel worker. If we ever support worker
2314  * recycling, a worker would need to zap its local cache in between
2315  * servicing different queries, in order to be able to call this and
2316  * synchronize typmods with a new leader; but that's problematic because
2317  * we can't be very sure that record-typmod-related state hasn't escaped
2318  * to anywhere else in the process.
2319  */
2320  Assert(NextRecordTypmod == 0);
2321 
2322  old_context = MemoryContextSwitchTo(TopMemoryContext);
2323 
2324  /* Attach to the two hash tables. */
2325  record_table = dshash_attach(CurrentSession->area,
2327  registry->record_table_handle,
2328  CurrentSession->area);
2329  typmod_table = dshash_attach(CurrentSession->area,
2331  registry->typmod_table_handle,
2332  NULL);
2333 
2334  MemoryContextSwitchTo(old_context);
2335 
2336  /*
2337  * Set up detach hook to run at worker exit. Currently this is the same
2338  * as the leader's detach hook, but in future they might need to be
2339  * different.
2340  */
2343  PointerGetDatum(registry));
2344 
2345  /*
2346  * Set up the session state that will tell assign_record_type_typmod and
2347  * lookup_rowtype_tupdesc_internal about the shared registry.
2348  */
2350  CurrentSession->shared_record_table = record_table;
2351  CurrentSession->shared_typmod_table = typmod_table;
2352 }
2353 
2354 /*
2355  * InvalidateCompositeTypeCacheEntry
2356  * Invalidate particular TypeCacheEntry on Relcache inval callback
2357  *
2358  * Delete the cached tuple descriptor (if any) for the given composite
2359  * type, and reset whatever info we have cached about the composite type's
2360  * comparability.
2361  */
2362 static void
2364 {
2365  bool hadTupDescOrOpclass;
2366 
2367  Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2368  OidIsValid(typentry->typrelid));
2369 
2370  hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2371  (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2372 
2373  /* Delete tupdesc if we have it */
2374  if (typentry->tupDesc != NULL)
2375  {
2376  /*
2377  * Release our refcount and free the tupdesc if none remain. We can't
2378  * use DecrTupleDescRefCount here because this reference is not logged
2379  * by the current resource owner.
2380  */
2381  Assert(typentry->tupDesc->tdrefcount > 0);
2382  if (--typentry->tupDesc->tdrefcount == 0)
2383  FreeTupleDesc(typentry->tupDesc);
2384  typentry->tupDesc = NULL;
2385 
2386  /*
2387  * Also clear tupDesc_identifier, so that anyone watching it will
2388  * realize that the tupdesc has changed.
2389  */
2390  typentry->tupDesc_identifier = 0;
2391  }
2392 
2393  /* Reset equality/comparison/hashing validity information */
2394  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2395 
2396  /* Call delete_rel_type_cache() if we actually cleared something */
2397  if (hadTupDescOrOpclass)
2399 }
2400 
2401 /*
2402  * TypeCacheRelCallback
2403  * Relcache inval callback function
2404  *
2405  * Delete the cached tuple descriptor (if any) for the given rel's composite
2406  * type, or for all composite types if relid == InvalidOid. Also reset
2407  * whatever info we have cached about the composite type's comparability.
2408  *
2409  * This is called when a relcache invalidation event occurs for the given
2410  * relid. We can't use syscache to find a type corresponding to the given
2411  * relation because the code can be called outside of transaction. Thus, we
2412  * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2413  */
2414 static void
2416 {
2417  TypeCacheEntry *typentry;
2418 
2419  /*
2420  * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2421  * callback wouldn't be registered
2422  */
2423  if (OidIsValid(relid))
2424  {
2425  RelIdToTypeIdCacheEntry *relentry;
2426 
2427  /*
2428  * Find an RelIdToTypeIdCacheHash entry, which should exist as soon as
2429  * corresponding typcache entry has something to clean.
2430  */
2432  &relid,
2433  HASH_FIND, NULL);
2434 
2435  if (relentry != NULL)
2436  {
2437  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
2438  &relentry->composite_typid,
2439  HASH_FIND, NULL);
2440 
2441  if (typentry != NULL)
2442  {
2443  Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2444  Assert(relid == typentry->typrelid);
2445 
2447  }
2448  }
2449 
2450  /*
2451  * Visit all the domain types sequentially. Typically, this shouldn't
2452  * affect performance since domain types are less tended to bloat.
2453  * Domain types are created manually, unlike composite types which are
2454  * automatically created for every temporary table.
2455  */
2456  for (typentry = firstDomainTypeEntry;
2457  typentry != NULL;
2458  typentry = typentry->nextDomain)
2459  {
2460  /*
2461  * If it's domain over composite, reset flags. (We don't bother
2462  * trying to determine whether the specific base type needs a
2463  * reset.) Note that if we haven't determined whether the base
2464  * type is composite, we don't need to reset anything.
2465  */
2466  if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2467  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2468  }
2469  }
2470  else
2471  {
2472  HASH_SEQ_STATUS status;
2473 
2474  /*
2475  * Relid is invalid. By convention, we need to reset all composite
2476  * types in cache. Also, we should reset flags for domain types, and
2477  * we loop over all entries in hash, so, do it in a single scan.
2478  */
2479  hash_seq_init(&status, TypeCacheHash);
2480  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2481  {
2482  if (typentry->typtype == TYPTYPE_COMPOSITE)
2483  {
2485  }
2486  else if (typentry->typtype == TYPTYPE_DOMAIN)
2487  {
2488  /*
2489  * If it's domain over composite, reset flags. (We don't
2490  * bother trying to determine whether the specific base type
2491  * needs a reset.) Note that if we haven't determined whether
2492  * the base type is composite, we don't need to reset
2493  * anything.
2494  */
2495  if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2496  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2497  }
2498  }
2499  }
2500 }
2501 
2502 /*
2503  * TypeCacheTypCallback
2504  * Syscache inval callback function
2505  *
2506  * This is called when a syscache invalidation event occurs for any
2507  * pg_type row. If we have information cached about that type, mark
2508  * it as needing to be reloaded.
2509  */
2510 static void
2511 TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2512 {
2513  HASH_SEQ_STATUS status;
2514  TypeCacheEntry *typentry;
2515 
2516  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2517 
2518  /*
2519  * By convention, zero hash value is passed to the callback as a sign that
2520  * it's time to invalidate the whole cache. See sinval.c, inval.c and
2521  * InvalidateSystemCachesExtended().
2522  */
2523  if (hashvalue == 0)
2524  hash_seq_init(&status, TypeCacheHash);
2525  else
2526  hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2527 
2528  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2529  {
2530  bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2531 
2532  Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2533 
2534  /*
2535  * Mark the data obtained directly from pg_type as invalid. Also, if
2536  * it's a domain, typnotnull might've changed, so we'll need to
2537  * recalculate its constraints.
2538  */
2539  typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2541 
2542  /*
2543  * Call delete_rel_type_cache() if we cleaned
2544  * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2545  */
2546  if (hadPgTypeData)
2548  }
2549 }
2550 
2551 /*
2552  * TypeCacheOpcCallback
2553  * Syscache inval callback function
2554  *
2555  * This is called when a syscache invalidation event occurs for any pg_opclass
2556  * row. In principle we could probably just invalidate data dependent on the
2557  * particular opclass, but since updates on pg_opclass are rare in production
2558  * it doesn't seem worth a lot of complication: we just mark all cached data
2559  * invalid.
2560  *
2561  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2562  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2563  * is not allowed to be used to add/drop the primary operators and functions
2564  * of an opclass, only cross-type members of a family; and the latter sorts
2565  * of members are not going to get cached here.
2566  */
2567 static void
2568 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2569 {
2570  HASH_SEQ_STATUS status;
2571  TypeCacheEntry *typentry;
2572 
2573  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2574  hash_seq_init(&status, TypeCacheHash);
2575  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2576  {
2577  /* Reset equality/comparison/hashing validity information */
2578  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2579  }
2580 }
2581 
2582 /*
2583  * TypeCacheConstrCallback
2584  * Syscache inval callback function
2585  *
2586  * This is called when a syscache invalidation event occurs for any
2587  * pg_constraint row. We flush information about domain constraints
2588  * when this happens.
2589  *
2590  * It's slightly annoying that we can't tell whether the inval event was for
2591  * a domain constraint record or not; there's usually more update traffic
2592  * for table constraints than domain constraints, so we'll do a lot of
2593  * useless flushes. Still, this is better than the old no-caching-at-all
2594  * approach to domain constraints.
2595  */
2596 static void
2597 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2598 {
2599  TypeCacheEntry *typentry;
2600 
2601  /*
2602  * Because this is called very frequently, and typically very few of the
2603  * typcache entries are for domains, we don't use hash_seq_search here.
2604  * Instead we thread all the domain-type entries together so that we can
2605  * visit them cheaply.
2606  */
2607  for (typentry = firstDomainTypeEntry;
2608  typentry != NULL;
2609  typentry = typentry->nextDomain)
2610  {
2611  /* Reset domain constraint validity information */
2613  }
2614 }
2615 
2616 
2617 /*
2618  * Check if given OID is part of the subset that's sortable by comparisons
2619  */
2620 static inline bool
2622 {
2623  Oid offset;
2624 
2625  if (arg < enumdata->bitmap_base)
2626  return false;
2627  offset = arg - enumdata->bitmap_base;
2628  if (offset > (Oid) INT_MAX)
2629  return false;
2630  return bms_is_member((int) offset, enumdata->sorted_values);
2631 }
2632 
2633 
2634 /*
2635  * compare_values_of_enum
2636  * Compare two members of an enum type.
2637  * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2638  *
2639  * Note: currently, the enumData cache is refreshed only if we are asked
2640  * to compare an enum value that is not already in the cache. This is okay
2641  * because there is no support for re-ordering existing values, so comparisons
2642  * of previously cached values will return the right answer even if other
2643  * values have been added since we last loaded the cache.
2644  *
2645  * Note: the enum logic has a special-case rule about even-numbered versus
2646  * odd-numbered OIDs, but we take no account of that rule here; this
2647  * routine shouldn't even get called when that rule applies.
2648  */
2649 int
2651 {
2652  TypeCacheEnumData *enumdata;
2653  EnumItem *item1;
2654  EnumItem *item2;
2655 
2656  /*
2657  * Equal OIDs are certainly equal --- this case was probably handled by
2658  * our caller, but we may as well check.
2659  */
2660  if (arg1 == arg2)
2661  return 0;
2662 
2663  /* Load up the cache if first time through */
2664  if (tcache->enumData == NULL)
2665  load_enum_cache_data(tcache);
2666  enumdata = tcache->enumData;
2667 
2668  /*
2669  * If both OIDs are known-sorted, we can just compare them directly.
2670  */
2671  if (enum_known_sorted(enumdata, arg1) &&
2672  enum_known_sorted(enumdata, arg2))
2673  {
2674  if (arg1 < arg2)
2675  return -1;
2676  else
2677  return 1;
2678  }
2679 
2680  /*
2681  * Slow path: we have to identify their actual sort-order positions.
2682  */
2683  item1 = find_enumitem(enumdata, arg1);
2684  item2 = find_enumitem(enumdata, arg2);
2685 
2686  if (item1 == NULL || item2 == NULL)
2687  {
2688  /*
2689  * We couldn't find one or both values. That means the enum has
2690  * changed under us, so re-initialize the cache and try again. We
2691  * don't bother retrying the known-sorted case in this path.
2692  */
2693  load_enum_cache_data(tcache);
2694  enumdata = tcache->enumData;
2695 
2696  item1 = find_enumitem(enumdata, arg1);
2697  item2 = find_enumitem(enumdata, arg2);
2698 
2699  /*
2700  * If we still can't find the values, complain: we must have corrupt
2701  * data.
2702  */
2703  if (item1 == NULL)
2704  elog(ERROR, "enum value %u not found in cache for enum %s",
2705  arg1, format_type_be(tcache->type_id));
2706  if (item2 == NULL)
2707  elog(ERROR, "enum value %u not found in cache for enum %s",
2708  arg2, format_type_be(tcache->type_id));
2709  }
2710 
2711  if (item1->sort_order < item2->sort_order)
2712  return -1;
2713  else if (item1->sort_order > item2->sort_order)
2714  return 1;
2715  else
2716  return 0;
2717 }
2718 
2719 /*
2720  * Load (or re-load) the enumData member of the typcache entry.
2721  */
2722 static void
2724 {
2725  TypeCacheEnumData *enumdata;
2726  Relation enum_rel;
2727  SysScanDesc enum_scan;
2728  HeapTuple enum_tuple;
2729  ScanKeyData skey;
2730  EnumItem *items;
2731  int numitems;
2732  int maxitems;
2733  Oid bitmap_base;
2734  Bitmapset *bitmap;
2735  MemoryContext oldcxt;
2736  int bm_size,
2737  start_pos;
2738 
2739  /* Check that this is actually an enum */
2740  if (tcache->typtype != TYPTYPE_ENUM)
2741  ereport(ERROR,
2742  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2743  errmsg("%s is not an enum",
2744  format_type_be(tcache->type_id))));
2745 
2746  /*
2747  * Read all the information for members of the enum type. We collect the
2748  * info in working memory in the caller's context, and then transfer it to
2749  * permanent memory in CacheMemoryContext. This minimizes the risk of
2750  * leaking memory from CacheMemoryContext in the event of an error partway
2751  * through.
2752  */
2753  maxitems = 64;
2754  items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2755  numitems = 0;
2756 
2757  /* Scan pg_enum for the members of the target enum type. */
2758  ScanKeyInit(&skey,
2759  Anum_pg_enum_enumtypid,
2760  BTEqualStrategyNumber, F_OIDEQ,
2761  ObjectIdGetDatum(tcache->type_id));
2762 
2763  enum_rel = table_open(EnumRelationId, AccessShareLock);
2764  enum_scan = systable_beginscan(enum_rel,
2765  EnumTypIdLabelIndexId,
2766  true, NULL,
2767  1, &skey);
2768 
2769  while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2770  {
2771  Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2772 
2773  if (numitems >= maxitems)
2774  {
2775  maxitems *= 2;
2776  items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2777  }
2778  items[numitems].enum_oid = en->oid;
2779  items[numitems].sort_order = en->enumsortorder;
2780  numitems++;
2781  }
2782 
2783  systable_endscan(enum_scan);
2784  table_close(enum_rel, AccessShareLock);
2785 
2786  /* Sort the items into OID order */
2787  qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2788 
2789  /*
2790  * Here, we create a bitmap listing a subset of the enum's OIDs that are
2791  * known to be in order and can thus be compared with just OID comparison.
2792  *
2793  * The point of this is that the enum's initial OIDs were certainly in
2794  * order, so there is some subset that can be compared via OID comparison;
2795  * and we'd rather not do binary searches unnecessarily.
2796  *
2797  * This is somewhat heuristic, and might identify a subset of OIDs that
2798  * isn't exactly what the type started with. That's okay as long as the
2799  * subset is correctly sorted.
2800  */
2801  bitmap_base = InvalidOid;
2802  bitmap = NULL;
2803  bm_size = 1; /* only save sets of at least 2 OIDs */
2804 
2805  for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2806  {
2807  /*
2808  * Identify longest sorted subsequence starting at start_pos
2809  */
2810  Bitmapset *this_bitmap = bms_make_singleton(0);
2811  int this_bm_size = 1;
2812  Oid start_oid = items[start_pos].enum_oid;
2813  float4 prev_order = items[start_pos].sort_order;
2814  int i;
2815 
2816  for (i = start_pos + 1; i < numitems; i++)
2817  {
2818  Oid offset;
2819 
2820  offset = items[i].enum_oid - start_oid;
2821  /* quit if bitmap would be too large; cutoff is arbitrary */
2822  if (offset >= 8192)
2823  break;
2824  /* include the item if it's in-order */
2825  if (items[i].sort_order > prev_order)
2826  {
2827  prev_order = items[i].sort_order;
2828  this_bitmap = bms_add_member(this_bitmap, (int) offset);
2829  this_bm_size++;
2830  }
2831  }
2832 
2833  /* Remember it if larger than previous best */
2834  if (this_bm_size > bm_size)
2835  {
2836  bms_free(bitmap);
2837  bitmap_base = start_oid;
2838  bitmap = this_bitmap;
2839  bm_size = this_bm_size;
2840  }
2841  else
2842  bms_free(this_bitmap);
2843 
2844  /*
2845  * Done if it's not possible to find a longer sequence in the rest of
2846  * the list. In typical cases this will happen on the first
2847  * iteration, which is why we create the bitmaps on the fly instead of
2848  * doing a second pass over the list.
2849  */
2850  if (bm_size >= (numitems - start_pos - 1))
2851  break;
2852  }
2853 
2854  /* OK, copy the data into CacheMemoryContext */
2856  enumdata = (TypeCacheEnumData *)
2857  palloc(offsetof(TypeCacheEnumData, enum_values) +
2858  numitems * sizeof(EnumItem));
2859  enumdata->bitmap_base = bitmap_base;
2860  enumdata->sorted_values = bms_copy(bitmap);
2861  enumdata->num_values = numitems;
2862  memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2863  MemoryContextSwitchTo(oldcxt);
2864 
2865  pfree(items);
2866  bms_free(bitmap);
2867 
2868  /* And link the finished cache struct into the typcache */
2869  if (tcache->enumData != NULL)
2870  pfree(tcache->enumData);
2871  tcache->enumData = enumdata;
2872 }
2873 
2874 /*
2875  * Locate the EnumItem with the given OID, if present
2876  */
2877 static EnumItem *
2879 {
2880  EnumItem srch;
2881 
2882  /* On some versions of Solaris, bsearch of zero items dumps core */
2883  if (enumdata->num_values <= 0)
2884  return NULL;
2885 
2886  srch.enum_oid = arg;
2887  return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2888  sizeof(EnumItem), enum_oid_cmp);
2889 }
2890 
2891 /*
2892  * qsort comparison function for OID-ordered EnumItems
2893  */
2894 static int
2895 enum_oid_cmp(const void *left, const void *right)
2896 {
2897  const EnumItem *l = (const EnumItem *) left;
2898  const EnumItem *r = (const EnumItem *) right;
2899 
2900  return pg_cmp_u32(l->enum_oid, r->enum_oid);
2901 }
2902 
2903 /*
2904  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2905  * to the given value and return a dsa_pointer.
2906  */
2907 static dsa_pointer
2908 share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2909 {
2910  dsa_pointer shared_dp;
2911  TupleDesc shared;
2912 
2913  shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2914  shared = (TupleDesc) dsa_get_address(area, shared_dp);
2915  TupleDescCopy(shared, tupdesc);
2916  shared->tdtypmod = typmod;
2917 
2918  return shared_dp;
2919 }
2920 
2921 /*
2922  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2923  * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2924  * Tuple descriptors returned by this function are not reference counted, and
2925  * will exist at least as long as the current backend remained attached to the
2926  * current session.
2927  */
2928 static TupleDesc
2930 {
2931  TupleDesc result;
2933  SharedRecordTableEntry *record_table_entry;
2934  SharedTypmodTableEntry *typmod_table_entry;
2935  dsa_pointer shared_dp;
2936  bool found;
2937  uint32 typmod;
2938 
2939  /* If not even attached, nothing to do. */
2941  return NULL;
2942 
2943  /* Try to find a matching tuple descriptor in the record table. */
2944  key.shared = false;
2945  key.u.local_tupdesc = tupdesc;
2946  record_table_entry = (SharedRecordTableEntry *)
2948  if (record_table_entry)
2949  {
2950  Assert(record_table_entry->key.shared);
2952  record_table_entry);
2953  result = (TupleDesc)
2955  record_table_entry->key.u.shared_tupdesc);
2956  Assert(result->tdrefcount == -1);
2957 
2958  return result;
2959  }
2960 
2961  /* Allocate a new typmod number. This will be wasted if we error out. */
2962  typmod = (int)
2964  1);
2965 
2966  /* Copy the TupleDesc into shared memory. */
2967  shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2968 
2969  /*
2970  * Create an entry in the typmod table so that others will understand this
2971  * typmod number.
2972  */
2973  PG_TRY();
2974  {
2975  typmod_table_entry = (SharedTypmodTableEntry *)
2977  &typmod, &found);
2978  if (found)
2979  elog(ERROR, "cannot create duplicate shared record typmod");
2980  }
2981  PG_CATCH();
2982  {
2983  dsa_free(CurrentSession->area, shared_dp);
2984  PG_RE_THROW();
2985  }
2986  PG_END_TRY();
2987  typmod_table_entry->typmod = typmod;
2988  typmod_table_entry->shared_tupdesc = shared_dp;
2990  typmod_table_entry);
2991 
2992  /*
2993  * Finally create an entry in the record table so others with matching
2994  * tuple descriptors can reuse the typmod.
2995  */
2996  record_table_entry = (SharedRecordTableEntry *)
2998  &found);
2999  if (found)
3000  {
3001  /*
3002  * Someone concurrently inserted a matching tuple descriptor since the
3003  * first time we checked. Use that one instead.
3004  */
3006  record_table_entry);
3007 
3008  /* Might as well free up the space used by the one we created. */
3010  &typmod);
3011  Assert(found);
3012  dsa_free(CurrentSession->area, shared_dp);
3013 
3014  /* Return the one we found. */
3015  Assert(record_table_entry->key.shared);
3016  result = (TupleDesc)
3018  record_table_entry->key.u.shared_tupdesc);
3019  Assert(result->tdrefcount == -1);
3020 
3021  return result;
3022  }
3023 
3024  /* Store it and return it. */
3025  record_table_entry->key.shared = true;
3026  record_table_entry->key.u.shared_tupdesc = shared_dp;
3028  record_table_entry);
3029  result = (TupleDesc)
3030  dsa_get_address(CurrentSession->area, shared_dp);
3031  Assert(result->tdrefcount == -1);
3032 
3033  return result;
3034 }
3035 
3036 /*
3037  * On-DSM-detach hook to forget about the current shared record typmod
3038  * infrastructure. This is currently used by both leader and workers.
3039  */
3040 static void
3042 {
3043  /* Be cautious here: maybe we didn't finish initializing. */
3044  if (CurrentSession->shared_record_table != NULL)
3045  {
3048  }
3049  if (CurrentSession->shared_typmod_table != NULL)
3050  {
3053  }
3055 }
3056 
3057 /*
3058  * Insert RelIdToTypeIdCacheHash entry if needed.
3059  */
3060 static void
3062 {
3063  /* Immediately quit for non-composite types */
3064  if (typentry->typtype != TYPTYPE_COMPOSITE)
3065  return;
3066 
3067  /* typrelid should be given for composite types */
3068  Assert(OidIsValid(typentry->typrelid));
3069 
3070  /*
3071  * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3072  * information indicating it should be here.
3073  */
3074  if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3075  (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3076  typentry->tupDesc != NULL)
3077  {
3078  RelIdToTypeIdCacheEntry *relentry;
3079  bool found;
3080 
3082  &typentry->typrelid,
3083  HASH_ENTER, &found);
3084  relentry->relid = typentry->typrelid;
3085  relentry->composite_typid = typentry->type_id;
3086  }
3087 }
3088 
3089 /*
3090  * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3091  * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3092  * or tupDesc.
3093  */
3094 static void
3096 {
3097 #ifdef USE_ASSERT_CHECKING
3098  int i;
3099  bool is_in_progress = false;
3100 
3101  for (i = 0; i < in_progress_list_len; i++)
3102  {
3103  if (in_progress_list[i] == typentry->type_id)
3104  {
3105  is_in_progress = true;
3106  break;
3107  }
3108  }
3109 #endif
3110 
3111  /* Immediately quit for non-composite types */
3112  if (typentry->typtype != TYPTYPE_COMPOSITE)
3113  return;
3114 
3115  /* typrelid should be given for composite types */
3116  Assert(OidIsValid(typentry->typrelid));
3117 
3118  /*
3119  * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3120  * information indicating entry should be still there.
3121  */
3122  if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3123  !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3124  typentry->tupDesc == NULL)
3125  {
3126  bool found;
3127 
3129  &typentry->typrelid,
3130  HASH_REMOVE, &found);
3131  Assert(found || is_in_progress);
3132  }
3133  else
3134  {
3135 #ifdef USE_ASSERT_CHECKING
3136  /*
3137  * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3138  * entry if it should exist.
3139  */
3140  bool found;
3141 
3142  if (!is_in_progress)
3143  {
3145  &typentry->typrelid,
3146  HASH_FIND, &found);
3147  Assert(found);
3148  }
3149 #endif
3150  }
3151 }
3152 
3153 /*
3154  * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3155  * entries, marked as in-progress by lookup_type_cache(). It may happen
3156  * in case of an error or interruption during the lookup_type_cache() call.
3157  */
3158 static void
3160 {
3161  int i;
3162 
3163  for (i = 0; i < in_progress_list_len; i++)
3164  {
3165  TypeCacheEntry *typentry;
3166 
3167  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
3168  &in_progress_list[i],
3169  HASH_FIND, NULL);
3170  if (typentry)
3172  }
3173 
3175 }
3176 
3177 void
3179 {
3181 }
3182 
3183 void
3185 {
3187 }
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:366
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
#define TextDatumGetCString(d)
Definition: builtins.h:98
#define NameStr(name)
Definition: c.h:700
#define RegProcedureIsValid(p)
Definition: c.h:731
#define Assert(condition)
Definition: c.h:812
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:417
int32_t int32
Definition: c.h:481
uint64_t uint64
Definition: c.h:486
uint32_t uint32
Definition: c.h:485
float float4
Definition: c.h:583
#define MemSet(start, val, len)
Definition: c.h:974
#define OidIsValid(objectId)
Definition: c.h:729
size_t Size
Definition: c.h:559
void CreateCacheMemoryContext(void)
Definition: catcache.c:680
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:942
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:826
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_allocate(area, size)
Definition: dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:503
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition: dshash.c:590
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:307
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:581
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:572
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
dsa_pointer dshash_table_handle
Definition: dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition: dynahash.c:1405
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define PG_RE_THROW()
Definition: elog.h:412
#define PG_TRY(...)
Definition: elog.h:371
#define PG_END_TRY(...)
Definition: elog.h:396
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:381
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:138
@ DOM_CONSTRAINT_CHECK
Definition: execnodes.h:1012
@ DOM_CONSTRAINT_NOTNULL
Definition: execnodes.h:1011
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
char * format_type_be(Oid type_oid)
Definition: format_type.c:343
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:606
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:513
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:387
#define HASHSTANDARD_PROC
Definition: hash.h:355
#define HASHEXTENDED_PROC
Definition: hash.h:356
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define GETSTRUCT(TUP)
Definition: htup_details.h:653
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:749
#define IsParallelWorker()
Definition: parallel.h:60
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:2338
long val
Definition: informix.c:689
#define INJECTION_POINT(name)
static int pg_cmp_u32(uint32 a, uint32 b)
Definition: int.h:604
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1746
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1704
int b
Definition: isn.c:69
int a
Definition: isn.c:68
int i
Definition: isn.c:72
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lcons(void *datum, List *list)
Definition: list.c:495
#define AccessShareLock
Definition: lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1212
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1190
Oid get_multirange_range(Oid multirangeOid)
Definition: lsyscache.c:3483
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:796
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1285
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:166
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2832
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition: lsyscache.c:2538
@ LWTRANCHE_PER_SESSION_RECORD_TYPMOD
Definition: lwlock.h:198
@ LWTRANCHE_PER_SESSION_RECORD_TYPE
Definition: lwlock.h:197
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:568
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:637
char * pstrdup(const char *in)
Definition: mcxt.c:1696
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1215
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1541
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
MemoryContext CacheMemoryContext
Definition: mcxt.c:152
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
void * palloc(Size size)
Definition: mcxt.c:1317
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:170
#define BTORDER_PROC
Definition: nbtree.h:707
#define makeNode(_type_)
Definition: nodes.h:155
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:209
void * arg
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
FormData_pg_constraint * Form_pg_constraint
const void * data
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:44
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
FormData_pg_range * Form_pg_range
Definition: pg_range.h:58
FormData_pg_type * Form_pg_type
Definition: pg_type.h:261
Expr * expression_planner(Expr *expr)
Definition: planner.c:6735
#define qsort(a, b, c, d)
Definition: port.h:447
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:252
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
char * c
MemoryContextSwitchTo(old_ctx)
tree ctl
Definition: radixtree.h:1853
void * stringToNode(const char *str)
Definition: read.c:90
#define RelationGetDescr(relation)
Definition: rel.h:531
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
Session * CurrentSession
Definition: session.c:48
static pg_noinline void Size size
Definition: slab.c:607
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: relation.c:47
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define HTEqualStrategyNumber
Definition: stratnum.h:41
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
MemoryContext dccContext
Definition: typcache.c:142
DomainConstraintCache * dcc
Definition: typcache.h:172
MemoryContext refctx
Definition: typcache.h:167
MemoryContextCallback callback
Definition: typcache.h:173
TypeCacheEntry * tcache
Definition: typcache.h:168
DomainConstraintType constrainttype
Definition: execnodes.h:1018
ExprState * check_exprstate
Definition: execnodes.h:1021
float4 sort_order
Definition: typcache.c:150
Oid enum_oid
Definition: typcache.c:149
Oid fn_oid
Definition: fmgr.h:59
Definition: dynahash.c:220
Definition: pg_list.h:54
MemoryContextCallbackFunction func
Definition: palloc.h:49
TupleDesc tupdesc
Definition: typcache.c:174
TupleDesc rd_att
Definition: rel.h:112
Form_pg_class rd_rel
Definition: rel.h:111
dsm_segment * segment
Definition: session.h:27
dshash_table * shared_record_table
Definition: session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
dsa_area * area
Definition: session.h:28
dshash_table * shared_typmod_table
Definition: session.h:33
SharedRecordTableKey key
Definition: typcache.c:213
TupleDesc local_tupdesc
Definition: typcache.c:201
dsa_pointer shared_tupdesc
Definition: typcache.c:202
union SharedRecordTableKey::@31 u
dshash_table_handle typmod_table_handle
Definition: typcache.c:186
pg_atomic_uint32 next_typmod
Definition: typcache.c:188
dshash_table_handle record_table_handle
Definition: typcache.c:184
dsa_pointer shared_tupdesc
Definition: typcache.c:223
int tdrefcount
Definition: tupdesc.h:84
int32 tdtypmod
Definition: tupdesc.h:83
Oid tdtypeid
Definition: tupdesc.h:82
uint32 type_id_hash
Definition: typcache.h:36
uint64 tupDesc_identifier
Definition: typcache.h:90
FmgrInfo hash_proc_finfo
Definition: typcache.h:77
int32 domainBaseTypmod
Definition: typcache.h:115
Oid hash_extended_proc
Definition: typcache.h:66
Oid typsubscript
Definition: typcache.h:45
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:101
FmgrInfo cmp_proc_finfo
Definition: typcache.h:76
Oid rng_collation
Definition: typcache.h:100
char typalign
Definition: typcache.h:41
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:98
char typtype
Definition: typcache.h:43
TupleDesc tupDesc
Definition: typcache.h:89
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:78
DomainConstraintCache * domainData
Definition: typcache.h:121
struct TypeCacheEntry * rngtype
Definition: typcache.h:108
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:103
FmgrInfo eq_opr_finfo
Definition: typcache.h:75
Oid btree_opintype
Definition: typcache.h:58
struct TypeCacheEnumData * enumData
Definition: typcache.h:130
struct TypeCacheEntry * nextDomain
Definition: typcache.h:133
bool typbyval
Definition: typcache.h:40
FmgrInfo rng_canonical_finfo
Definition: typcache.h:102
int16 typlen
Definition: typcache.h:39
Oid hash_opintype
Definition: typcache.h:60
Oid typcollation
Definition: typcache.h:47
Oid domainBaseType
Definition: typcache.h:114
char typstorage
Definition: typcache.h:42
Oid rng_opfamily
Definition: typcache.h:99
Bitmapset * sorted_values
Definition: typcache.c:156
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:158
Definition: dsa.c:348
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:269
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:221
#define GetSysCacheHashValue1(cacheId, key1)
Definition: syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
static ItemArray items
Definition: test_tidstore.c:48
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:173
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:251
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:406
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:331
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:388
uint32 hashRowType(TupleDesc desc)
Definition: tupdesc.c:622
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:133
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:586
#define TupleDescSize(src)
Definition: tupdesc.h:102
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:116
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:100
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:101
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1713
static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition: typcache.c:3061
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1400
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1826
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1920
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:2294
#define TCFLAGS_OPERATOR_FLAGS
Definition: typcache.c:122
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:113
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1729
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:115
void AtEOXact_TypeCache(void)
Definition: typcache.c:3178
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition: typcache.c:119
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2723
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1592
static HTAB * RelIdToTypeIdCacheHash
Definition: typcache.c:87
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2878
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1600
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2929
static int in_progress_list_maxlen
Definition: typcache.c:228
static int32 NextRecordTypmod
Definition: typcache.c:306
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1976
static Oid * in_progress_list
Definition: typcache.c:226
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:285
static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition: typcache.c:3095
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:104
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1753
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1362
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1937
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1576
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:103
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:106
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1341
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:114
static void InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
Definition: typcache.c:2363
struct SharedRecordTableEntry SharedRecordTableEntry
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:2195
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1317
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1538
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:234
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1530
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1059
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition: typcache.c:359
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:105
struct SharedTypmodTableEntry SharedTypmodTableEntry
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition: typcache.c:112
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1761
static int in_progress_list_len
Definition: typcache.c:227
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1514
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2908
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1001
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition: typcache.c:2132
static RecordCacheArrayEntry * RecordCacheArray
Definition: typcache.c:304
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1721
static HTAB * RecordCacheHash
Definition: typcache.c:295
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2621
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:96
struct RelIdToTypeIdCacheEntry RelIdToTypeIdCacheEntry
struct RecordCacheEntry RecordCacheEntry
void AtEOSubXact_TypeCache(void)
Definition: typcache.c:3184
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:3041
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:111
struct RecordCacheArrayEntry RecordCacheArrayEntry
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:107
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2511
struct TypeCacheEnumData TypeCacheEnumData
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2597
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2568
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1081
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1487
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:110
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:2415
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1546
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:2173
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1769
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:108
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:109
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1522
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition: typcache.c:99
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:260
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:2650
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:118
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition: typcache.c:117
struct SharedRecordTableKey SharedRecordTableKey
static int32 RecordCacheArrayLen
Definition: typcache.c:305
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:2040
static HTAB * TypeCacheHash
Definition: typcache.c:79
static uint64 tupledesc_id_counter
Definition: typcache.c:313
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:386
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1584
#define TCFLAGS_HAVE_FIELD_HASHING
Definition: typcache.c:116
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:2024
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:275
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1954
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2895
static void finalize_in_progress_typentries(void)
Definition: typcache.c:3159
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1330
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:102
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1438
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1797
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1608
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:2013
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:967
#define INVALID_TUPLEDESC_IDENTIFIER
Definition: typcache.h:156
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:144
#define TYPECACHE_EQ_OPR
Definition: typcache.h:137
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:147
#define TYPECACHE_TUPDESC
Definition: typcache.h:145
#define TYPECACHE_MULTIRANGE_INFO
Definition: typcache.h:153
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:176
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:142
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:151
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:146
#define TYPECACHE_DOMAIN_BASE_INFO
Definition: typcache.h:149
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition: typcache.h:150
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:148
#define TYPECACHE_GT_OPR
Definition: typcache.h:139
#define TYPECACHE_CMP_PROC
Definition: typcache.h:140
struct TypeCacheEntry TypeCacheEntry
#define TYPECACHE_LT_OPR
Definition: typcache.h:138
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:152
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:143
#define TYPECACHE_HASH_PROC
Definition: typcache.h:141