PostgreSQL Source Code  git master
typcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * typcache.c
4  * POSTGRES type cache code
5  *
6  * The type cache exists to speed lookup of certain information about data
7  * types that is not directly available from a type's pg_type row. For
8  * example, we use a type's default btree opclass, or the default hash
9  * opclass if no btree opclass exists, to determine which operators should
10  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11  *
12  * Several seemingly-odd choices have been made to support use of the type
13  * cache by generic array and record handling routines, such as array_eq(),
14  * record_cmp(), and hash_array(). Because those routines are used as index
15  * support operations, they cannot leak memory. To allow them to execute
16  * efficiently, all information that they would like to re-use across calls
17  * is kept in the type cache.
18  *
19  * Once created, a type cache entry lives as long as the backend does, so
20  * there is no need for a call to release a cache entry. If the type is
21  * dropped, the cache entry simply becomes wasted storage. This is not
22  * expected to happen often, and assuming that typcache entries are good
23  * permanently allows caching pointers to them in long-lived places.
24  *
25  * We have some provisions for updating cache entries if the stored data
26  * becomes obsolete. Core data extracted from the pg_type row is updated
27  * when we detect updates to pg_type. Information dependent on opclasses is
28  * cleared if we detect updates to pg_opclass. We also support clearing the
29  * tuple descriptor and operator/function parts of a rowtype's cache entry,
30  * since those may need to change as a consequence of ALTER TABLE. Domain
31  * constraint changes are also tracked properly.
32  *
33  *
34  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
35  * Portions Copyright (c) 1994, Regents of the University of California
36  *
37  * IDENTIFICATION
38  * src/backend/utils/cache/typcache.c
39  *
40  *-------------------------------------------------------------------------
41  */
42 #include "postgres.h"
43 
44 #include <limits.h>
45 
46 #include "access/hash.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "access/parallel.h"
50 #include "access/relation.h"
51 #include "access/session.h"
52 #include "access/table.h"
53 #include "catalog/pg_am.h"
54 #include "catalog/pg_constraint.h"
55 #include "catalog/pg_enum.h"
56 #include "catalog/pg_operator.h"
57 #include "catalog/pg_range.h"
58 #include "catalog/pg_type.h"
59 #include "commands/defrem.h"
60 #include "executor/executor.h"
61 #include "lib/dshash.h"
62 #include "optimizer/optimizer.h"
63 #include "port/pg_bitutils.h"
64 #include "storage/lwlock.h"
65 #include "utils/builtins.h"
66 #include "utils/catcache.h"
67 #include "utils/fmgroids.h"
68 #include "utils/inval.h"
69 #include "utils/lsyscache.h"
70 #include "utils/memutils.h"
71 #include "utils/rel.h"
72 #include "utils/snapmgr.h"
73 #include "utils/syscache.h"
74 #include "utils/typcache.h"
75 
76 
77 /* The main type cache hashtable searched by lookup_type_cache */
78 static HTAB *TypeCacheHash = NULL;
79 
80 /* List of type cache entries for domain types */
82 
83 /* Private flag bits in the TypeCacheEntry.flags field */
84 #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
85 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
86 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
87 #define TCFLAGS_CHECKED_EQ_OPR 0x000008
88 #define TCFLAGS_CHECKED_LT_OPR 0x000010
89 #define TCFLAGS_CHECKED_GT_OPR 0x000020
90 #define TCFLAGS_CHECKED_CMP_PROC 0x000040
91 #define TCFLAGS_CHECKED_HASH_PROC 0x000080
92 #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
93 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
94 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
95 #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
96 #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
97 #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
98 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
99 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
100 #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
101 #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
102 #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
103 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
104 #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
105 
106 /* The flags associated with equality/comparison/hashing are all but these: */
107 #define TCFLAGS_OPERATOR_FLAGS \
108  (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
109  TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
110  TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
111 
112 /*
113  * Data stored about a domain type's constraints. Note that we do not create
114  * this struct for the common case of a constraint-less domain; we just set
115  * domainData to NULL to indicate that.
116  *
117  * Within a DomainConstraintCache, we store expression plan trees, but the
118  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
119  * When needed, expression evaluation nodes are built by flat-copying the
120  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
121  * Such a node tree is not part of the DomainConstraintCache, but is
122  * considered to belong to a DomainConstraintRef.
123  */
125 {
126  List *constraints; /* list of DomainConstraintState nodes */
127  MemoryContext dccContext; /* memory context holding all associated data */
128  long dccRefCount; /* number of references to this struct */
129 };
130 
131 /* Private information to support comparisons of enum values */
132 typedef struct
133 {
134  Oid enum_oid; /* OID of one enum value */
135  float4 sort_order; /* its sort position */
136 } EnumItem;
137 
138 typedef struct TypeCacheEnumData
139 {
140  Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
141  Bitmapset *sorted_values; /* Set of OIDs known to be in order */
142  int num_values; /* total number of values in enum */
145 
146 /*
147  * We use a separate table for storing the definitions of non-anonymous
148  * record types. Once defined, a record type will be remembered for the
149  * life of the backend. Subsequent uses of the "same" record type (where
150  * sameness means equalTupleDescs) will refer to the existing table entry.
151  *
152  * Stored record types are remembered in a linear array of TupleDescs,
153  * which can be indexed quickly with the assigned typmod. There is also
154  * a hash table to speed searches for matching TupleDescs.
155  */
156 
157 typedef struct RecordCacheEntry
158 {
161 
162 /*
163  * To deal with non-anonymous record types that are exchanged by backends
164  * involved in a parallel query, we also need a shared version of the above.
165  */
167 {
168  /* A hash table for finding a matching TupleDesc. */
170  /* A hash table for finding a TupleDesc by typmod. */
172  /* A source of new record typmod numbers. */
174 };
175 
176 /*
177  * When using shared tuple descriptors as hash table keys we need a way to be
178  * able to search for an equal shared TupleDesc using a backend-local
179  * TupleDesc. So we use this type which can hold either, and hash and compare
180  * functions that know how to handle both.
181  */
182 typedef struct SharedRecordTableKey
183 {
184  union
185  {
188  } u;
189  bool shared;
191 
192 /*
193  * The shared version of RecordCacheEntry. This lets us look up a typmod
194  * using a TupleDesc which may be in local or shared memory.
195  */
197 {
200 
201 /*
202  * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
203  * up a TupleDesc in shared memory using a typmod.
204  */
206 {
210 
211 /*
212  * A comparator function for SharedRecordTableKey.
213  */
214 static int
215 shared_record_table_compare(const void *a, const void *b, size_t size,
216  void *arg)
217 {
218  dsa_area *area = (dsa_area *) arg;
221  TupleDesc t1;
222  TupleDesc t2;
223 
224  if (k1->shared)
225  t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
226  else
227  t1 = k1->u.local_tupdesc;
228 
229  if (k2->shared)
230  t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
231  else
232  t2 = k2->u.local_tupdesc;
233 
234  return equalTupleDescs(t1, t2) ? 0 : 1;
235 }
236 
237 /*
238  * A hash function for SharedRecordTableKey.
239  */
240 static uint32
241 shared_record_table_hash(const void *a, size_t size, void *arg)
242 {
243  dsa_area *area = (dsa_area *) arg;
245  TupleDesc t;
246 
247  if (k->shared)
248  t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
249  else
250  t = k->u.local_tupdesc;
251 
252  return hashTupleDesc(t);
253 }
254 
255 /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
257  sizeof(SharedRecordTableKey), /* unused */
258  sizeof(SharedRecordTableEntry),
262 };
263 
264 /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
266  sizeof(uint32),
267  sizeof(SharedTypmodTableEntry),
271 };
272 
273 /* hashtable for recognizing registered record types */
274 static HTAB *RecordCacheHash = NULL;
275 
276 /* arrays of info about registered record types, indexed by assigned typmod */
278 static uint64 *RecordIdentifierArray = NULL;
279 static int32 RecordCacheArrayLen = 0; /* allocated length of above arrays */
280 static int32 NextRecordTypmod = 0; /* number of entries used */
281 
282 /*
283  * Process-wide counter for generating unique tupledesc identifiers.
284  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
285  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
286  */
288 
289 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
290 static void load_rangetype_info(TypeCacheEntry *typentry);
291 static void load_multirangetype_info(TypeCacheEntry *typentry);
292 static void load_domaintype_info(TypeCacheEntry *typentry);
293 static int dcs_cmp(const void *a, const void *b);
294 static void decr_dcc_refcount(DomainConstraintCache *dcc);
295 static void dccref_deletion_callback(void *arg);
297 static bool array_element_has_equality(TypeCacheEntry *typentry);
298 static bool array_element_has_compare(TypeCacheEntry *typentry);
299 static bool array_element_has_hashing(TypeCacheEntry *typentry);
301 static void cache_array_element_properties(TypeCacheEntry *typentry);
302 static bool record_fields_have_equality(TypeCacheEntry *typentry);
303 static bool record_fields_have_compare(TypeCacheEntry *typentry);
304 static bool record_fields_have_hashing(TypeCacheEntry *typentry);
306 static void cache_record_field_properties(TypeCacheEntry *typentry);
307 static bool range_element_has_hashing(TypeCacheEntry *typentry);
309 static void cache_range_element_properties(TypeCacheEntry *typentry);
310 static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
313 static void TypeCacheRelCallback(Datum arg, Oid relid);
314 static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
315 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
316 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
317 static void load_enum_cache_data(TypeCacheEntry *tcache);
318 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
319 static int enum_oid_cmp(const void *left, const void *right);
321  Datum datum);
323 static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
324  uint32 typmod);
325 
326 
327 /*
328  * lookup_type_cache
329  *
330  * Fetch the type cache entry for the specified datatype, and make sure that
331  * all the fields requested by bits in 'flags' are valid.
332  *
333  * The result is never NULL --- we will ereport() if the passed type OID is
334  * invalid. Note however that we may fail to find one or more of the
335  * values requested by 'flags'; the caller needs to check whether the fields
336  * are InvalidOid or not.
337  */
339 lookup_type_cache(Oid type_id, int flags)
340 {
341  TypeCacheEntry *typentry;
342  bool found;
343 
344  if (TypeCacheHash == NULL)
345  {
346  /* First time through: initialize the hash table */
347  HASHCTL ctl;
348 
349  ctl.keysize = sizeof(Oid);
350  ctl.entrysize = sizeof(TypeCacheEntry);
351  TypeCacheHash = hash_create("Type information cache", 64,
352  &ctl, HASH_ELEM | HASH_BLOBS);
353 
354  /* Also set up callbacks for SI invalidations */
359 
360  /* Also make sure CacheMemoryContext exists */
361  if (!CacheMemoryContext)
363  }
364 
365  /* Try to look up an existing entry */
366  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
367  (void *) &type_id,
368  HASH_FIND, NULL);
369  if (typentry == NULL)
370  {
371  /*
372  * If we didn't find one, we want to make one. But first look up the
373  * pg_type row, just to make sure we don't make a cache entry for an
374  * invalid type OID. If the type OID is not valid, present a
375  * user-facing error, since some code paths such as domain_in() allow
376  * this function to be reached with a user-supplied OID.
377  */
378  HeapTuple tp;
379  Form_pg_type typtup;
380 
381  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
382  if (!HeapTupleIsValid(tp))
383  ereport(ERROR,
384  (errcode(ERRCODE_UNDEFINED_OBJECT),
385  errmsg("type with OID %u does not exist", type_id)));
386  typtup = (Form_pg_type) GETSTRUCT(tp);
387  if (!typtup->typisdefined)
388  ereport(ERROR,
389  (errcode(ERRCODE_UNDEFINED_OBJECT),
390  errmsg("type \"%s\" is only a shell",
391  NameStr(typtup->typname))));
392 
393  /* Now make the typcache entry */
394  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
395  (void *) &type_id,
396  HASH_ENTER, &found);
397  Assert(!found); /* it wasn't there a moment ago */
398 
399  MemSet(typentry, 0, sizeof(TypeCacheEntry));
400 
401  /* These fields can never change, by definition */
402  typentry->type_id = type_id;
404  ObjectIdGetDatum(type_id));
405 
406  /* Keep this part in sync with the code below */
407  typentry->typlen = typtup->typlen;
408  typentry->typbyval = typtup->typbyval;
409  typentry->typalign = typtup->typalign;
410  typentry->typstorage = typtup->typstorage;
411  typentry->typtype = typtup->typtype;
412  typentry->typrelid = typtup->typrelid;
413  typentry->typsubscript = typtup->typsubscript;
414  typentry->typelem = typtup->typelem;
415  typentry->typcollation = typtup->typcollation;
416  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
417 
418  /* If it's a domain, immediately thread it into the domain cache list */
419  if (typentry->typtype == TYPTYPE_DOMAIN)
420  {
421  typentry->nextDomain = firstDomainTypeEntry;
422  firstDomainTypeEntry = typentry;
423  }
424 
425  ReleaseSysCache(tp);
426  }
427  else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
428  {
429  /*
430  * We have an entry, but its pg_type row got changed, so reload the
431  * data obtained directly from pg_type.
432  */
433  HeapTuple tp;
434  Form_pg_type typtup;
435 
436  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
437  if (!HeapTupleIsValid(tp))
438  ereport(ERROR,
439  (errcode(ERRCODE_UNDEFINED_OBJECT),
440  errmsg("type with OID %u does not exist", type_id)));
441  typtup = (Form_pg_type) GETSTRUCT(tp);
442  if (!typtup->typisdefined)
443  ereport(ERROR,
444  (errcode(ERRCODE_UNDEFINED_OBJECT),
445  errmsg("type \"%s\" is only a shell",
446  NameStr(typtup->typname))));
447 
448  /*
449  * Keep this part in sync with the code above. Many of these fields
450  * shouldn't ever change, particularly typtype, but copy 'em anyway.
451  */
452  typentry->typlen = typtup->typlen;
453  typentry->typbyval = typtup->typbyval;
454  typentry->typalign = typtup->typalign;
455  typentry->typstorage = typtup->typstorage;
456  typentry->typtype = typtup->typtype;
457  typentry->typrelid = typtup->typrelid;
458  typentry->typsubscript = typtup->typsubscript;
459  typentry->typelem = typtup->typelem;
460  typentry->typcollation = typtup->typcollation;
461  typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
462 
463  ReleaseSysCache(tp);
464  }
465 
466  /*
467  * Look up opclasses if we haven't already and any dependent info is
468  * requested.
469  */
474  !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
475  {
476  Oid opclass;
477 
478  opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
479  if (OidIsValid(opclass))
480  {
481  typentry->btree_opf = get_opclass_family(opclass);
482  typentry->btree_opintype = get_opclass_input_type(opclass);
483  }
484  else
485  {
486  typentry->btree_opf = typentry->btree_opintype = InvalidOid;
487  }
488 
489  /*
490  * Reset information derived from btree opclass. Note in particular
491  * that we'll redetermine the eq_opr even if we previously found one;
492  * this matters in case a btree opclass has been added to a type that
493  * previously had only a hash opclass.
494  */
495  typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
500  }
501 
502  /*
503  * If we need to look up equality operator, and there's no btree opclass,
504  * force lookup of hash opclass.
505  */
506  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
507  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
508  typentry->btree_opf == InvalidOid)
509  flags |= TYPECACHE_HASH_OPFAMILY;
510 
515  !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
516  {
517  Oid opclass;
518 
519  opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
520  if (OidIsValid(opclass))
521  {
522  typentry->hash_opf = get_opclass_family(opclass);
523  typentry->hash_opintype = get_opclass_input_type(opclass);
524  }
525  else
526  {
527  typentry->hash_opf = typentry->hash_opintype = InvalidOid;
528  }
529 
530  /*
531  * Reset information derived from hash opclass. We do *not* reset the
532  * eq_opr; if we already found one from the btree opclass, that
533  * decision is still good.
534  */
535  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
537  typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
538  }
539 
540  /*
541  * Look for requested operators and functions, if we haven't already.
542  */
543  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
544  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
545  {
546  Oid eq_opr = InvalidOid;
547 
548  if (typentry->btree_opf != InvalidOid)
549  eq_opr = get_opfamily_member(typentry->btree_opf,
550  typentry->btree_opintype,
551  typentry->btree_opintype,
553  if (eq_opr == InvalidOid &&
554  typentry->hash_opf != InvalidOid)
555  eq_opr = get_opfamily_member(typentry->hash_opf,
556  typentry->hash_opintype,
557  typentry->hash_opintype,
559 
560  /*
561  * If the proposed equality operator is array_eq or record_eq, check
562  * to see if the element type or column types support equality. If
563  * not, array_eq or record_eq would fail at runtime, so we don't want
564  * to report that the type has equality. (We can omit similar
565  * checking for ranges and multiranges because ranges can't be created
566  * in the first place unless their subtypes support equality.)
567  */
568  if (eq_opr == ARRAY_EQ_OP &&
569  !array_element_has_equality(typentry))
570  eq_opr = InvalidOid;
571  else if (eq_opr == RECORD_EQ_OP &&
572  !record_fields_have_equality(typentry))
573  eq_opr = InvalidOid;
574 
575  /* Force update of eq_opr_finfo only if we're changing state */
576  if (typentry->eq_opr != eq_opr)
577  typentry->eq_opr_finfo.fn_oid = InvalidOid;
578 
579  typentry->eq_opr = eq_opr;
580 
581  /*
582  * Reset info about hash functions whenever we pick up new info about
583  * equality operator. This is so we can ensure that the hash
584  * functions match the operator.
585  */
586  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
588  typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
589  }
590  if ((flags & TYPECACHE_LT_OPR) &&
591  !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
592  {
593  Oid lt_opr = InvalidOid;
594 
595  if (typentry->btree_opf != InvalidOid)
596  lt_opr = get_opfamily_member(typentry->btree_opf,
597  typentry->btree_opintype,
598  typentry->btree_opintype,
600 
601  /*
602  * As above, make sure array_cmp or record_cmp will succeed; but again
603  * we need no special check for ranges or multiranges.
604  */
605  if (lt_opr == ARRAY_LT_OP &&
606  !array_element_has_compare(typentry))
607  lt_opr = InvalidOid;
608  else if (lt_opr == RECORD_LT_OP &&
609  !record_fields_have_compare(typentry))
610  lt_opr = InvalidOid;
611 
612  typentry->lt_opr = lt_opr;
613  typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
614  }
615  if ((flags & TYPECACHE_GT_OPR) &&
616  !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
617  {
618  Oid gt_opr = InvalidOid;
619 
620  if (typentry->btree_opf != InvalidOid)
621  gt_opr = get_opfamily_member(typentry->btree_opf,
622  typentry->btree_opintype,
623  typentry->btree_opintype,
625 
626  /*
627  * As above, make sure array_cmp or record_cmp will succeed; but again
628  * we need no special check for ranges or multiranges.
629  */
630  if (gt_opr == ARRAY_GT_OP &&
631  !array_element_has_compare(typentry))
632  gt_opr = InvalidOid;
633  else if (gt_opr == RECORD_GT_OP &&
634  !record_fields_have_compare(typentry))
635  gt_opr = InvalidOid;
636 
637  typentry->gt_opr = gt_opr;
638  typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
639  }
640  if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
641  !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
642  {
643  Oid cmp_proc = InvalidOid;
644 
645  if (typentry->btree_opf != InvalidOid)
646  cmp_proc = get_opfamily_proc(typentry->btree_opf,
647  typentry->btree_opintype,
648  typentry->btree_opintype,
649  BTORDER_PROC);
650 
651  /*
652  * As above, make sure array_cmp or record_cmp will succeed; but again
653  * we need no special check for ranges or multiranges.
654  */
655  if (cmp_proc == F_BTARRAYCMP &&
656  !array_element_has_compare(typentry))
657  cmp_proc = InvalidOid;
658  else if (cmp_proc == F_BTRECORDCMP &&
659  !record_fields_have_compare(typentry))
660  cmp_proc = InvalidOid;
661 
662  /* Force update of cmp_proc_finfo only if we're changing state */
663  if (typentry->cmp_proc != cmp_proc)
664  typentry->cmp_proc_finfo.fn_oid = InvalidOid;
665 
666  typentry->cmp_proc = cmp_proc;
667  typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
668  }
670  !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
671  {
672  Oid hash_proc = InvalidOid;
673 
674  /*
675  * We insist that the eq_opr, if one has been determined, match the
676  * hash opclass; else report there is no hash function.
677  */
678  if (typentry->hash_opf != InvalidOid &&
679  (!OidIsValid(typentry->eq_opr) ||
680  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
681  typentry->hash_opintype,
682  typentry->hash_opintype,
684  hash_proc = get_opfamily_proc(typentry->hash_opf,
685  typentry->hash_opintype,
686  typentry->hash_opintype,
688 
689  /*
690  * As above, make sure hash_array, hash_record, or hash_range will
691  * succeed.
692  */
693  if (hash_proc == F_HASH_ARRAY &&
694  !array_element_has_hashing(typentry))
695  hash_proc = InvalidOid;
696  else if (hash_proc == F_HASH_RECORD &&
697  !record_fields_have_hashing(typentry))
698  hash_proc = InvalidOid;
699  else if (hash_proc == F_HASH_RANGE &&
700  !range_element_has_hashing(typentry))
701  hash_proc = InvalidOid;
702 
703  /*
704  * Likewise for hash_multirange.
705  */
706  if (hash_proc == F_HASH_MULTIRANGE &&
708  hash_proc = InvalidOid;
709 
710  /* Force update of hash_proc_finfo only if we're changing state */
711  if (typentry->hash_proc != hash_proc)
712  typentry->hash_proc_finfo.fn_oid = InvalidOid;
713 
714  typentry->hash_proc = hash_proc;
715  typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
716  }
717  if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
720  {
721  Oid hash_extended_proc = InvalidOid;
722 
723  /*
724  * We insist that the eq_opr, if one has been determined, match the
725  * hash opclass; else report there is no hash function.
726  */
727  if (typentry->hash_opf != InvalidOid &&
728  (!OidIsValid(typentry->eq_opr) ||
729  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
730  typentry->hash_opintype,
731  typentry->hash_opintype,
733  hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
734  typentry->hash_opintype,
735  typentry->hash_opintype,
737 
738  /*
739  * As above, make sure hash_array_extended, hash_record_extended, or
740  * hash_range_extended will succeed.
741  */
742  if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
744  hash_extended_proc = InvalidOid;
745  else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
747  hash_extended_proc = InvalidOid;
748  else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
750  hash_extended_proc = InvalidOid;
751 
752  /*
753  * Likewise for hash_multirange_extended.
754  */
755  if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
757  hash_extended_proc = InvalidOid;
758 
759  /* Force update of proc finfo only if we're changing state */
760  if (typentry->hash_extended_proc != hash_extended_proc)
762 
763  typentry->hash_extended_proc = hash_extended_proc;
765  }
766 
767  /*
768  * Set up fmgr lookup info as requested
769  *
770  * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
771  * which is not quite right (they're really in the hash table's private
772  * memory context) but this will do for our purposes.
773  *
774  * Note: the code above avoids invalidating the finfo structs unless the
775  * referenced operator/function OID actually changes. This is to prevent
776  * unnecessary leakage of any subsidiary data attached to an finfo, since
777  * that would cause session-lifespan memory leaks.
778  */
779  if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
780  typentry->eq_opr_finfo.fn_oid == InvalidOid &&
781  typentry->eq_opr != InvalidOid)
782  {
783  Oid eq_opr_func;
784 
785  eq_opr_func = get_opcode(typentry->eq_opr);
786  if (eq_opr_func != InvalidOid)
787  fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
789  }
790  if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
791  typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
792  typentry->cmp_proc != InvalidOid)
793  {
794  fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
796  }
797  if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
798  typentry->hash_proc_finfo.fn_oid == InvalidOid &&
799  typentry->hash_proc != InvalidOid)
800  {
801  fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
803  }
804  if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
806  typentry->hash_extended_proc != InvalidOid)
807  {
809  &typentry->hash_extended_proc_finfo,
811  }
812 
813  /*
814  * If it's a composite type (row type), get tupdesc if requested
815  */
816  if ((flags & TYPECACHE_TUPDESC) &&
817  typentry->tupDesc == NULL &&
818  typentry->typtype == TYPTYPE_COMPOSITE)
819  {
820  load_typcache_tupdesc(typentry);
821  }
822 
823  /*
824  * If requested, get information about a range type
825  *
826  * This includes making sure that the basic info about the range element
827  * type is up-to-date.
828  */
829  if ((flags & TYPECACHE_RANGE_INFO) &&
830  typentry->typtype == TYPTYPE_RANGE)
831  {
832  if (typentry->rngelemtype == NULL)
833  load_rangetype_info(typentry);
834  else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
835  (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
836  }
837 
838  /*
839  * If requested, get information about a multirange type
840  */
841  if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
842  typentry->rngtype == NULL &&
843  typentry->typtype == TYPTYPE_MULTIRANGE)
844  {
845  load_multirangetype_info(typentry);
846  }
847 
848  /*
849  * If requested, get information about a domain type
850  */
851  if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
852  typentry->domainBaseType == InvalidOid &&
853  typentry->typtype == TYPTYPE_DOMAIN)
854  {
855  typentry->domainBaseTypmod = -1;
856  typentry->domainBaseType =
857  getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
858  }
859  if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
860  (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
861  typentry->typtype == TYPTYPE_DOMAIN)
862  {
863  load_domaintype_info(typentry);
864  }
865 
866  return typentry;
867 }
868 
869 /*
870  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
871  */
872 static void
874 {
875  Relation rel;
876 
877  if (!OidIsValid(typentry->typrelid)) /* should not happen */
878  elog(ERROR, "invalid typrelid for composite type %u",
879  typentry->type_id);
880  rel = relation_open(typentry->typrelid, AccessShareLock);
881  Assert(rel->rd_rel->reltype == typentry->type_id);
882 
883  /*
884  * Link to the tupdesc and increment its refcount (we assert it's a
885  * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
886  * because the reference mustn't be entered in the current resource owner;
887  * it can outlive the current query.
888  */
889  typentry->tupDesc = RelationGetDescr(rel);
890 
891  Assert(typentry->tupDesc->tdrefcount > 0);
892  typentry->tupDesc->tdrefcount++;
893 
894  /*
895  * In future, we could take some pains to not change tupDesc_identifier if
896  * the tupdesc didn't really change; but for now it's not worth it.
897  */
899 
901 }
902 
903 /*
904  * load_rangetype_info --- helper routine to set up range type information
905  */
906 static void
908 {
909  Form_pg_range pg_range;
910  HeapTuple tup;
911  Oid subtypeOid;
912  Oid opclassOid;
913  Oid canonicalOid;
914  Oid subdiffOid;
915  Oid opfamilyOid;
916  Oid opcintype;
917  Oid cmpFnOid;
918 
919  /* get information from pg_range */
921  /* should not fail, since we already checked typtype ... */
922  if (!HeapTupleIsValid(tup))
923  elog(ERROR, "cache lookup failed for range type %u",
924  typentry->type_id);
925  pg_range = (Form_pg_range) GETSTRUCT(tup);
926 
927  subtypeOid = pg_range->rngsubtype;
928  typentry->rng_collation = pg_range->rngcollation;
929  opclassOid = pg_range->rngsubopc;
930  canonicalOid = pg_range->rngcanonical;
931  subdiffOid = pg_range->rngsubdiff;
932 
933  ReleaseSysCache(tup);
934 
935  /* get opclass properties and look up the comparison function */
936  opfamilyOid = get_opclass_family(opclassOid);
937  opcintype = get_opclass_input_type(opclassOid);
938 
939  cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
940  BTORDER_PROC);
941  if (!RegProcedureIsValid(cmpFnOid))
942  elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
943  BTORDER_PROC, opcintype, opcintype, opfamilyOid);
944 
945  /* set up cached fmgrinfo structs */
946  fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
948  if (OidIsValid(canonicalOid))
949  fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
951  if (OidIsValid(subdiffOid))
952  fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
954 
955  /* Lastly, set up link to the element type --- this marks data valid */
956  typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
957 }
958 
959 /*
960  * load_multirangetype_info --- helper routine to set up multirange type
961  * information
962  */
963 static void
965 {
966  Oid rangetypeOid;
967 
968  rangetypeOid = get_multirange_range(typentry->type_id);
969  if (!OidIsValid(rangetypeOid))
970  elog(ERROR, "cache lookup failed for multirange type %u",
971  typentry->type_id);
972 
973  typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
974 }
975 
976 /*
977  * load_domaintype_info --- helper routine to set up domain constraint info
978  *
979  * Note: we assume we're called in a relatively short-lived context, so it's
980  * okay to leak data into the current context while scanning pg_constraint.
981  * We build the new DomainConstraintCache data in a context underneath
982  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
983  * complete.
984  */
985 static void
987 {
988  Oid typeOid = typentry->type_id;
990  bool notNull = false;
991  DomainConstraintState **ccons;
992  int cconslen;
993  Relation conRel;
994  MemoryContext oldcxt;
995 
996  /*
997  * If we're here, any existing constraint info is stale, so release it.
998  * For safety, be sure to null the link before trying to delete the data.
999  */
1000  if (typentry->domainData)
1001  {
1002  dcc = typentry->domainData;
1003  typentry->domainData = NULL;
1004  decr_dcc_refcount(dcc);
1005  }
1006 
1007  /*
1008  * We try to optimize the common case of no domain constraints, so don't
1009  * create the dcc object and context until we find a constraint. Likewise
1010  * for the temp sorting array.
1011  */
1012  dcc = NULL;
1013  ccons = NULL;
1014  cconslen = 0;
1015 
1016  /*
1017  * Scan pg_constraint for relevant constraints. We want to find
1018  * constraints for not just this domain, but any ancestor domains, so the
1019  * outer loop crawls up the domain stack.
1020  */
1021  conRel = table_open(ConstraintRelationId, AccessShareLock);
1022 
1023  for (;;)
1024  {
1025  HeapTuple tup;
1026  HeapTuple conTup;
1027  Form_pg_type typTup;
1028  int nccons = 0;
1029  ScanKeyData key[1];
1030  SysScanDesc scan;
1031 
1032  tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1033  if (!HeapTupleIsValid(tup))
1034  elog(ERROR, "cache lookup failed for type %u", typeOid);
1035  typTup = (Form_pg_type) GETSTRUCT(tup);
1036 
1037  if (typTup->typtype != TYPTYPE_DOMAIN)
1038  {
1039  /* Not a domain, so done */
1040  ReleaseSysCache(tup);
1041  break;
1042  }
1043 
1044  /* Test for NOT NULL Constraint */
1045  if (typTup->typnotnull)
1046  notNull = true;
1047 
1048  /* Look for CHECK Constraints on this domain */
1049  ScanKeyInit(&key[0],
1050  Anum_pg_constraint_contypid,
1051  BTEqualStrategyNumber, F_OIDEQ,
1052  ObjectIdGetDatum(typeOid));
1053 
1054  scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1055  NULL, 1, key);
1056 
1057  while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1058  {
1060  Datum val;
1061  bool isNull;
1062  char *constring;
1063  Expr *check_expr;
1065 
1066  /* Ignore non-CHECK constraints (presently, shouldn't be any) */
1067  if (c->contype != CONSTRAINT_CHECK)
1068  continue;
1069 
1070  /* Not expecting conbin to be NULL, but we'll test for it anyway */
1071  val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1072  conRel->rd_att, &isNull);
1073  if (isNull)
1074  elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1075  NameStr(typTup->typname), NameStr(c->conname));
1076 
1077  /* Convert conbin to C string in caller context */
1078  constring = TextDatumGetCString(val);
1079 
1080  /* Create the DomainConstraintCache object and context if needed */
1081  if (dcc == NULL)
1082  {
1083  MemoryContext cxt;
1084 
1086  "Domain constraints",
1088  dcc = (DomainConstraintCache *)
1090  dcc->constraints = NIL;
1091  dcc->dccContext = cxt;
1092  dcc->dccRefCount = 0;
1093  }
1094 
1095  /* Create node trees in DomainConstraintCache's context */
1096  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1097 
1098  check_expr = (Expr *) stringToNode(constring);
1099 
1100  /*
1101  * Plan the expression, since ExecInitExpr will expect that.
1102  *
1103  * Note: caching the result of expression_planner() is not very
1104  * good practice. Ideally we'd use a CachedExpression here so
1105  * that we would react promptly to, eg, changes in inlined
1106  * functions. However, because we don't support mutable domain
1107  * CHECK constraints, it's not really clear that it's worth the
1108  * extra overhead to do that.
1109  */
1110  check_expr = expression_planner(check_expr);
1111 
1114  r->name = pstrdup(NameStr(c->conname));
1115  r->check_expr = check_expr;
1116  r->check_exprstate = NULL;
1117 
1118  MemoryContextSwitchTo(oldcxt);
1119 
1120  /* Accumulate constraints in an array, for sorting below */
1121  if (ccons == NULL)
1122  {
1123  cconslen = 8;
1124  ccons = (DomainConstraintState **)
1125  palloc(cconslen * sizeof(DomainConstraintState *));
1126  }
1127  else if (nccons >= cconslen)
1128  {
1129  cconslen *= 2;
1130  ccons = (DomainConstraintState **)
1131  repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1132  }
1133  ccons[nccons++] = r;
1134  }
1135 
1136  systable_endscan(scan);
1137 
1138  if (nccons > 0)
1139  {
1140  /*
1141  * Sort the items for this domain, so that CHECKs are applied in a
1142  * deterministic order.
1143  */
1144  if (nccons > 1)
1145  qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1146 
1147  /*
1148  * Now attach them to the overall list. Use lcons() here because
1149  * constraints of parent domains should be applied earlier.
1150  */
1151  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1152  while (nccons > 0)
1153  dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1154  MemoryContextSwitchTo(oldcxt);
1155  }
1156 
1157  /* loop to next domain in stack */
1158  typeOid = typTup->typbasetype;
1159  ReleaseSysCache(tup);
1160  }
1161 
1162  table_close(conRel, AccessShareLock);
1163 
1164  /*
1165  * Only need to add one NOT NULL check regardless of how many domains in
1166  * the stack request it.
1167  */
1168  if (notNull)
1169  {
1171 
1172  /* Create the DomainConstraintCache object and context if needed */
1173  if (dcc == NULL)
1174  {
1175  MemoryContext cxt;
1176 
1178  "Domain constraints",
1180  dcc = (DomainConstraintCache *)
1182  dcc->constraints = NIL;
1183  dcc->dccContext = cxt;
1184  dcc->dccRefCount = 0;
1185  }
1186 
1187  /* Create node trees in DomainConstraintCache's context */
1188  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1189 
1191 
1193  r->name = pstrdup("NOT NULL");
1194  r->check_expr = NULL;
1195  r->check_exprstate = NULL;
1196 
1197  /* lcons to apply the nullness check FIRST */
1198  dcc->constraints = lcons(r, dcc->constraints);
1199 
1200  MemoryContextSwitchTo(oldcxt);
1201  }
1202 
1203  /*
1204  * If we made a constraint object, move it into CacheMemoryContext and
1205  * attach it to the typcache entry.
1206  */
1207  if (dcc)
1208  {
1210  typentry->domainData = dcc;
1211  dcc->dccRefCount++; /* count the typcache's reference */
1212  }
1213 
1214  /* Either way, the typcache entry's domain data is now valid. */
1216 }
1217 
1218 /*
1219  * qsort comparator to sort DomainConstraintState pointers by name
1220  */
1221 static int
1222 dcs_cmp(const void *a, const void *b)
1223 {
1224  const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1225  const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1226 
1227  return strcmp((*ca)->name, (*cb)->name);
1228 }
1229 
1230 /*
1231  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1232  * and free it if no references remain
1233  */
1234 static void
1236 {
1237  Assert(dcc->dccRefCount > 0);
1238  if (--(dcc->dccRefCount) <= 0)
1240 }
1241 
1242 /*
1243  * Context reset/delete callback for a DomainConstraintRef
1244  */
1245 static void
1247 {
1249  DomainConstraintCache *dcc = ref->dcc;
1250 
1251  /* Paranoia --- be sure link is nulled before trying to release */
1252  if (dcc)
1253  {
1254  ref->constraints = NIL;
1255  ref->dcc = NULL;
1256  decr_dcc_refcount(dcc);
1257  }
1258 }
1259 
1260 /*
1261  * prep_domain_constraints --- prepare domain constraints for execution
1262  *
1263  * The expression trees stored in the DomainConstraintCache's list are
1264  * converted to executable expression state trees stored in execctx.
1265  */
1266 static List *
1268 {
1269  List *result = NIL;
1270  MemoryContext oldcxt;
1271  ListCell *lc;
1272 
1273  oldcxt = MemoryContextSwitchTo(execctx);
1274 
1275  foreach(lc, constraints)
1276  {
1278  DomainConstraintState *newr;
1279 
1281  newr->constrainttype = r->constrainttype;
1282  newr->name = r->name;
1283  newr->check_expr = r->check_expr;
1284  newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1285 
1286  result = lappend(result, newr);
1287  }
1288 
1289  MemoryContextSwitchTo(oldcxt);
1290 
1291  return result;
1292 }
1293 
1294 /*
1295  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1296  *
1297  * Caller must tell us the MemoryContext in which the DomainConstraintRef
1298  * lives. The ref will be cleaned up when that context is reset/deleted.
1299  *
1300  * Caller must also tell us whether it wants check_exprstate fields to be
1301  * computed in the DomainConstraintState nodes attached to this ref.
1302  * If it doesn't, we need not make a copy of the DomainConstraintState list.
1303  */
1304 void
1306  MemoryContext refctx, bool need_exprstate)
1307 {
1308  /* Look up the typcache entry --- we assume it survives indefinitely */
1310  ref->need_exprstate = need_exprstate;
1311  /* For safety, establish the callback before acquiring a refcount */
1312  ref->refctx = refctx;
1313  ref->dcc = NULL;
1315  ref->callback.arg = (void *) ref;
1317  /* Acquire refcount if there are constraints, and set up exported list */
1318  if (ref->tcache->domainData)
1319  {
1320  ref->dcc = ref->tcache->domainData;
1321  ref->dcc->dccRefCount++;
1322  if (ref->need_exprstate)
1324  ref->refctx);
1325  else
1326  ref->constraints = ref->dcc->constraints;
1327  }
1328  else
1329  ref->constraints = NIL;
1330 }
1331 
1332 /*
1333  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1334  *
1335  * If the domain's constraint set changed, ref->constraints is updated to
1336  * point at a new list of cached constraints.
1337  *
1338  * In the normal case where nothing happened to the domain, this is cheap
1339  * enough that it's reasonable (and expected) to check before *each* use
1340  * of the constraint info.
1341  */
1342 void
1344 {
1345  TypeCacheEntry *typentry = ref->tcache;
1346 
1347  /* Make sure typcache entry's data is up to date */
1348  if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1349  typentry->typtype == TYPTYPE_DOMAIN)
1350  load_domaintype_info(typentry);
1351 
1352  /* Transfer to ref object if there's new info, adjusting refcounts */
1353  if (ref->dcc != typentry->domainData)
1354  {
1355  /* Paranoia --- be sure link is nulled before trying to release */
1356  DomainConstraintCache *dcc = ref->dcc;
1357 
1358  if (dcc)
1359  {
1360  /*
1361  * Note: we just leak the previous list of executable domain
1362  * constraints. Alternatively, we could keep those in a child
1363  * context of ref->refctx and free that context at this point.
1364  * However, in practice this code path will be taken so seldom
1365  * that the extra bookkeeping for a child context doesn't seem
1366  * worthwhile; we'll just allow a leak for the lifespan of refctx.
1367  */
1368  ref->constraints = NIL;
1369  ref->dcc = NULL;
1370  decr_dcc_refcount(dcc);
1371  }
1372  dcc = typentry->domainData;
1373  if (dcc)
1374  {
1375  ref->dcc = dcc;
1376  dcc->dccRefCount++;
1377  if (ref->need_exprstate)
1379  ref->refctx);
1380  else
1381  ref->constraints = dcc->constraints;
1382  }
1383  }
1384 }
1385 
1386 /*
1387  * DomainHasConstraints --- utility routine to check if a domain has constraints
1388  *
1389  * This is defined to return false, not fail, if type is not a domain.
1390  */
1391 bool
1393 {
1394  TypeCacheEntry *typentry;
1395 
1396  /*
1397  * Note: a side effect is to cause the typcache's domain data to become
1398  * valid. This is fine since we'll likely need it soon if there is any.
1399  */
1400  typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1401 
1402  return (typentry->domainData != NULL);
1403 }
1404 
1405 
1406 /*
1407  * array_element_has_equality and friends are helper routines to check
1408  * whether we should believe that array_eq and related functions will work
1409  * on the given array type or composite type.
1410  *
1411  * The logic above may call these repeatedly on the same type entry, so we
1412  * make use of the typentry->flags field to cache the results once known.
1413  * Also, we assume that we'll probably want all these facts about the type
1414  * if we want any, so we cache them all using only one lookup of the
1415  * component datatype(s).
1416  */
1417 
1418 static bool
1420 {
1421  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1423  return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1424 }
1425 
1426 static bool
1428 {
1429  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1431  return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1432 }
1433 
1434 static bool
1436 {
1437  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1439  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1440 }
1441 
1442 static bool
1444 {
1445  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1447  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1448 }
1449 
1450 static void
1452 {
1453  Oid elem_type = get_base_element_type(typentry->type_id);
1454 
1455  if (OidIsValid(elem_type))
1456  {
1457  TypeCacheEntry *elementry;
1458 
1459  elementry = lookup_type_cache(elem_type,
1464  if (OidIsValid(elementry->eq_opr))
1465  typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1466  if (OidIsValid(elementry->cmp_proc))
1467  typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1468  if (OidIsValid(elementry->hash_proc))
1469  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1470  if (OidIsValid(elementry->hash_extended_proc))
1472  }
1474 }
1475 
1476 /*
1477  * Likewise, some helper functions for composite types.
1478  */
1479 
1480 static bool
1482 {
1483  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1485  return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1486 }
1487 
1488 static bool
1490 {
1491  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1493  return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1494 }
1495 
1496 static bool
1498 {
1499  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1501  return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1502 }
1503 
1504 static bool
1506 {
1507  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1509  return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1510 }
1511 
1512 static void
1514 {
1515  /*
1516  * For type RECORD, we can't really tell what will work, since we don't
1517  * have access here to the specific anonymous type. Just assume that
1518  * equality and comparison will (we may get a failure at runtime). We
1519  * could also claim that hashing works, but then if code that has the
1520  * option between a comparison-based (sort-based) and a hash-based plan
1521  * chooses hashing, stuff could fail that would otherwise work if it chose
1522  * a comparison-based plan. In practice more types support comparison
1523  * than hashing.
1524  */
1525  if (typentry->type_id == RECORDOID)
1526  {
1527  typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1529  }
1530  else if (typentry->typtype == TYPTYPE_COMPOSITE)
1531  {
1532  TupleDesc tupdesc;
1533  int newflags;
1534  int i;
1535 
1536  /* Fetch composite type's tupdesc if we don't have it already */
1537  if (typentry->tupDesc == NULL)
1538  load_typcache_tupdesc(typentry);
1539  tupdesc = typentry->tupDesc;
1540 
1541  /* Must bump the refcount while we do additional catalog lookups */
1542  IncrTupleDescRefCount(tupdesc);
1543 
1544  /* Have each property if all non-dropped fields have the property */
1545  newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1549  for (i = 0; i < tupdesc->natts; i++)
1550  {
1551  TypeCacheEntry *fieldentry;
1552  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1553 
1554  if (attr->attisdropped)
1555  continue;
1556 
1557  fieldentry = lookup_type_cache(attr->atttypid,
1562  if (!OidIsValid(fieldentry->eq_opr))
1563  newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1564  if (!OidIsValid(fieldentry->cmp_proc))
1565  newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1566  if (!OidIsValid(fieldentry->hash_proc))
1567  newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1568  if (!OidIsValid(fieldentry->hash_extended_proc))
1570 
1571  /* We can drop out of the loop once we disprove all bits */
1572  if (newflags == 0)
1573  break;
1574  }
1575  typentry->flags |= newflags;
1576 
1577  DecrTupleDescRefCount(tupdesc);
1578  }
1579  else if (typentry->typtype == TYPTYPE_DOMAIN)
1580  {
1581  /* If it's domain over composite, copy base type's properties */
1582  TypeCacheEntry *baseentry;
1583 
1584  /* load up basetype info if we didn't already */
1585  if (typentry->domainBaseType == InvalidOid)
1586  {
1587  typentry->domainBaseTypmod = -1;
1588  typentry->domainBaseType =
1589  getBaseTypeAndTypmod(typentry->type_id,
1590  &typentry->domainBaseTypmod);
1591  }
1592  baseentry = lookup_type_cache(typentry->domainBaseType,
1597  if (baseentry->typtype == TYPTYPE_COMPOSITE)
1598  {
1600  typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1604  }
1605  }
1607 }
1608 
1609 /*
1610  * Likewise, some helper functions for range and multirange types.
1611  *
1612  * We can borrow the flag bits for array element properties to use for range
1613  * element properties, since those flag bits otherwise have no use in a
1614  * range or multirange type's typcache entry.
1615  */
1616 
1617 static bool
1619 {
1620  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1622  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1623 }
1624 
1625 static bool
1627 {
1628  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1630  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1631 }
1632 
1633 static void
1635 {
1636  /* load up subtype link if we didn't already */
1637  if (typentry->rngelemtype == NULL &&
1638  typentry->typtype == TYPTYPE_RANGE)
1639  load_rangetype_info(typentry);
1640 
1641  if (typentry->rngelemtype != NULL)
1642  {
1643  TypeCacheEntry *elementry;
1644 
1645  /* might need to calculate subtype's hash function properties */
1646  elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1649  if (OidIsValid(elementry->hash_proc))
1650  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1651  if (OidIsValid(elementry->hash_extended_proc))
1653  }
1655 }
1656 
1657 static bool
1659 {
1660  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1662  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1663 }
1664 
1665 static bool
1667 {
1668  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1670  return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1671 }
1672 
1673 static void
1675 {
1676  /* load up range link if we didn't already */
1677  if (typentry->rngtype == NULL &&
1678  typentry->typtype == TYPTYPE_MULTIRANGE)
1679  load_multirangetype_info(typentry);
1680 
1681  if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1682  {
1683  TypeCacheEntry *elementry;
1684 
1685  /* might need to calculate subtype's hash function properties */
1686  elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1689  if (OidIsValid(elementry->hash_proc))
1690  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1691  if (OidIsValid(elementry->hash_extended_proc))
1693  }
1695 }
1696 
1697 /*
1698  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1699  * to store 'typmod'.
1700  */
1701 static void
1703 {
1704  if (RecordCacheArray == NULL)
1705  {
1706  RecordCacheArray = (TupleDesc *)
1708  RecordIdentifierArray = (uint64 *)
1709  MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64));
1710  RecordCacheArrayLen = 64;
1711  }
1712 
1713  if (typmod >= RecordCacheArrayLen)
1714  {
1715  int32 newlen = pg_nextpower2_32(typmod + 1);
1716 
1717  RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
1718  newlen * sizeof(TupleDesc));
1719  memset(RecordCacheArray + RecordCacheArrayLen, 0,
1720  (newlen - RecordCacheArrayLen) * sizeof(TupleDesc));
1722  newlen * sizeof(uint64));
1724  (newlen - RecordCacheArrayLen) * sizeof(uint64));
1725  RecordCacheArrayLen = newlen;
1726  }
1727 }
1728 
1729 /*
1730  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1731  *
1732  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1733  * hasn't had its refcount bumped.
1734  */
1735 static TupleDesc
1736 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1737 {
1738  if (type_id != RECORDOID)
1739  {
1740  /*
1741  * It's a named composite type, so use the regular typcache.
1742  */
1743  TypeCacheEntry *typentry;
1744 
1745  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1746  if (typentry->tupDesc == NULL && !noError)
1747  ereport(ERROR,
1748  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1749  errmsg("type %s is not composite",
1750  format_type_be(type_id))));
1751  return typentry->tupDesc;
1752  }
1753  else
1754  {
1755  /*
1756  * It's a transient record type, so look in our record-type table.
1757  */
1758  if (typmod >= 0)
1759  {
1760  /* It is already in our local cache? */
1761  if (typmod < RecordCacheArrayLen &&
1762  RecordCacheArray[typmod] != NULL)
1763  return RecordCacheArray[typmod];
1764 
1765  /* Are we attached to a shared record typmod registry? */
1767  {
1768  SharedTypmodTableEntry *entry;
1769 
1770  /* Try to find it in the shared typmod index. */
1772  &typmod, false);
1773  if (entry != NULL)
1774  {
1775  TupleDesc tupdesc;
1776 
1777  tupdesc = (TupleDesc)
1779  entry->shared_tupdesc);
1780  Assert(typmod == tupdesc->tdtypmod);
1781 
1782  /* We may need to extend the local RecordCacheArray. */
1784 
1785  /*
1786  * Our local array can now point directly to the TupleDesc
1787  * in shared memory, which is non-reference-counted.
1788  */
1789  RecordCacheArray[typmod] = tupdesc;
1790  Assert(tupdesc->tdrefcount == -1);
1791 
1792  /*
1793  * We don't share tupdesc identifiers across processes, so
1794  * assign one locally.
1795  */
1797 
1799  entry);
1800 
1801  return RecordCacheArray[typmod];
1802  }
1803  }
1804  }
1805 
1806  if (!noError)
1807  ereport(ERROR,
1808  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1809  errmsg("record type has not been registered")));
1810  return NULL;
1811  }
1812 }
1813 
1814 /*
1815  * lookup_rowtype_tupdesc
1816  *
1817  * Given a typeid/typmod that should describe a known composite type,
1818  * return the tuple descriptor for the type. Will ereport on failure.
1819  * (Use ereport because this is reachable with user-specified OIDs,
1820  * for example from record_in().)
1821  *
1822  * Note: on success, we increment the refcount of the returned TupleDesc,
1823  * and log the reference in CurrentResourceOwner. Caller should call
1824  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
1825  */
1826 TupleDesc
1828 {
1829  TupleDesc tupDesc;
1830 
1831  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1832  PinTupleDesc(tupDesc);
1833  return tupDesc;
1834 }
1835 
1836 /*
1837  * lookup_rowtype_tupdesc_noerror
1838  *
1839  * As above, but if the type is not a known composite type and noError
1840  * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1841  * type_id is passed, you'll get an ereport anyway.)
1842  */
1843 TupleDesc
1844 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1845 {
1846  TupleDesc tupDesc;
1847 
1848  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1849  if (tupDesc != NULL)
1850  PinTupleDesc(tupDesc);
1851  return tupDesc;
1852 }
1853 
1854 /*
1855  * lookup_rowtype_tupdesc_copy
1856  *
1857  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1858  * copied into the CurrentMemoryContext and is not reference-counted.
1859  */
1860 TupleDesc
1862 {
1863  TupleDesc tmp;
1864 
1865  tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1866  return CreateTupleDescCopyConstr(tmp);
1867 }
1868 
1869 /*
1870  * lookup_rowtype_tupdesc_domain
1871  *
1872  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1873  * a domain over a named composite type; so this is effectively equivalent to
1874  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1875  * except for being a tad faster.
1876  *
1877  * Note: the reason we don't fold the look-through-domain behavior into plain
1878  * lookup_rowtype_tupdesc() is that we want callers to know they might be
1879  * dealing with a domain. Otherwise they might construct a tuple that should
1880  * be of the domain type, but not apply domain constraints.
1881  */
1882 TupleDesc
1883 lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1884 {
1885  TupleDesc tupDesc;
1886 
1887  if (type_id != RECORDOID)
1888  {
1889  /*
1890  * Check for domain or named composite type. We might as well load
1891  * whichever data is needed.
1892  */
1893  TypeCacheEntry *typentry;
1894 
1895  typentry = lookup_type_cache(type_id,
1898  if (typentry->typtype == TYPTYPE_DOMAIN)
1900  typentry->domainBaseTypmod,
1901  noError);
1902  if (typentry->tupDesc == NULL && !noError)
1903  ereport(ERROR,
1904  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1905  errmsg("type %s is not composite",
1906  format_type_be(type_id))));
1907  tupDesc = typentry->tupDesc;
1908  }
1909  else
1910  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1911  if (tupDesc != NULL)
1912  PinTupleDesc(tupDesc);
1913  return tupDesc;
1914 }
1915 
1916 /*
1917  * Hash function for the hash table of RecordCacheEntry.
1918  */
1919 static uint32
1920 record_type_typmod_hash(const void *data, size_t size)
1921 {
1922  RecordCacheEntry *entry = (RecordCacheEntry *) data;
1923 
1924  return hashTupleDesc(entry->tupdesc);
1925 }
1926 
1927 /*
1928  * Match function for the hash table of RecordCacheEntry.
1929  */
1930 static int
1931 record_type_typmod_compare(const void *a, const void *b, size_t size)
1932 {
1933  RecordCacheEntry *left = (RecordCacheEntry *) a;
1934  RecordCacheEntry *right = (RecordCacheEntry *) b;
1935 
1936  return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
1937 }
1938 
1939 /*
1940  * assign_record_type_typmod
1941  *
1942  * Given a tuple descriptor for a RECORD type, find or create a cache entry
1943  * for the type, and set the tupdesc's tdtypmod field to a value that will
1944  * identify this cache entry to lookup_rowtype_tupdesc.
1945  */
1946 void
1948 {
1949  RecordCacheEntry *recentry;
1950  TupleDesc entDesc;
1951  bool found;
1952  MemoryContext oldcxt;
1953 
1954  Assert(tupDesc->tdtypeid == RECORDOID);
1955 
1956  if (RecordCacheHash == NULL)
1957  {
1958  /* First time through: initialize the hash table */
1959  HASHCTL ctl;
1960 
1961  ctl.keysize = sizeof(TupleDesc); /* just the pointer */
1962  ctl.entrysize = sizeof(RecordCacheEntry);
1965  RecordCacheHash = hash_create("Record information cache", 64,
1966  &ctl,
1968 
1969  /* Also make sure CacheMemoryContext exists */
1970  if (!CacheMemoryContext)
1972  }
1973 
1974  /*
1975  * Find a hashtable entry for this tuple descriptor. We don't use
1976  * HASH_ENTER yet, because if it's missing, we need to make sure that all
1977  * the allocations succeed before we create the new entry.
1978  */
1979  recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1980  (void *) &tupDesc,
1981  HASH_FIND, &found);
1982  if (found && recentry->tupdesc != NULL)
1983  {
1984  tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
1985  return;
1986  }
1987 
1988  /* Not present, so need to manufacture an entry */
1990 
1991  /* Look in the SharedRecordTypmodRegistry, if attached */
1992  entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
1993  if (entDesc == NULL)
1994  {
1995  /*
1996  * Make sure we have room before we CreateTupleDescCopy() or advance
1997  * NextRecordTypmod.
1998  */
2000 
2001  /* Reference-counted local cache only. */
2002  entDesc = CreateTupleDescCopy(tupDesc);
2003  entDesc->tdrefcount = 1;
2004  entDesc->tdtypmod = NextRecordTypmod++;
2005  }
2006  else
2007  {
2009  }
2010 
2011  RecordCacheArray[entDesc->tdtypmod] = entDesc;
2012 
2013  /* Assign a unique tupdesc identifier, too. */
2015 
2016  /* Fully initialized; create the hash table entry */
2017  recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2018  (void *) &tupDesc,
2019  HASH_ENTER, NULL);
2020  recentry->tupdesc = entDesc;
2021 
2022  /* Update the caller's tuple descriptor. */
2023  tupDesc->tdtypmod = entDesc->tdtypmod;
2024 
2025  MemoryContextSwitchTo(oldcxt);
2026 }
2027 
2028 /*
2029  * assign_record_type_identifier
2030  *
2031  * Get an identifier, which will be unique over the lifespan of this backend
2032  * process, for the current tuple descriptor of the specified composite type.
2033  * For named composite types, the value is guaranteed to change if the type's
2034  * definition does. For registered RECORD types, the value will not change
2035  * once assigned, since the registered type won't either. If an anonymous
2036  * RECORD type is specified, we return a new identifier on each call.
2037  */
2038 uint64
2040 {
2041  if (type_id != RECORDOID)
2042  {
2043  /*
2044  * It's a named composite type, so use the regular typcache.
2045  */
2046  TypeCacheEntry *typentry;
2047 
2048  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2049  if (typentry->tupDesc == NULL)
2050  ereport(ERROR,
2051  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2052  errmsg("type %s is not composite",
2053  format_type_be(type_id))));
2054  Assert(typentry->tupDesc_identifier != 0);
2055  return typentry->tupDesc_identifier;
2056  }
2057  else
2058  {
2059  /*
2060  * It's a transient record type, so look in our record-type table.
2061  */
2062  if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2063  RecordCacheArray[typmod] != NULL)
2064  {
2065  Assert(RecordIdentifierArray[typmod] != 0);
2066  return RecordIdentifierArray[typmod];
2067  }
2068 
2069  /* For anonymous or unrecognized record type, generate a new ID */
2070  return ++tupledesc_id_counter;
2071  }
2072 }
2073 
2074 /*
2075  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2076  * This exists only to avoid exposing private innards of
2077  * SharedRecordTypmodRegistry in a header.
2078  */
2079 size_t
2081 {
2082  return sizeof(SharedRecordTypmodRegistry);
2083 }
2084 
2085 /*
2086  * Initialize 'registry' in a pre-existing shared memory region, which must be
2087  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2088  * bytes.
2089  *
2090  * 'area' will be used to allocate shared memory space as required for the
2091  * typemod registration. The current process, expected to be a leader process
2092  * in a parallel query, will be attached automatically and its current record
2093  * types will be loaded into *registry. While attached, all calls to
2094  * assign_record_type_typmod will use the shared registry. Worker backends
2095  * will need to attach explicitly.
2096  *
2097  * Note that this function takes 'area' and 'segment' as arguments rather than
2098  * accessing them via CurrentSession, because they aren't installed there
2099  * until after this function runs.
2100  */
2101 void
2103  dsm_segment *segment,
2104  dsa_area *area)
2105 {
2106  MemoryContext old_context;
2107  dshash_table *record_table;
2108  dshash_table *typmod_table;
2109  int32 typmod;
2110 
2112 
2113  /* We can't already be attached to a shared registry. */
2117 
2118  old_context = MemoryContextSwitchTo(TopMemoryContext);
2119 
2120  /* Create the hash table of tuple descriptors indexed by themselves. */
2121  record_table = dshash_create(area, &srtr_record_table_params, area);
2122 
2123  /* Create the hash table of tuple descriptors indexed by typmod. */
2124  typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2125 
2126  MemoryContextSwitchTo(old_context);
2127 
2128  /* Initialize the SharedRecordTypmodRegistry. */
2129  registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2130  registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2132 
2133  /*
2134  * Copy all entries from this backend's private registry into the shared
2135  * registry.
2136  */
2137  for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2138  {
2139  SharedTypmodTableEntry *typmod_table_entry;
2140  SharedRecordTableEntry *record_table_entry;
2141  SharedRecordTableKey record_table_key;
2142  dsa_pointer shared_dp;
2143  TupleDesc tupdesc;
2144  bool found;
2145 
2146  tupdesc = RecordCacheArray[typmod];
2147  if (tupdesc == NULL)
2148  continue;
2149 
2150  /* Copy the TupleDesc into shared memory. */
2151  shared_dp = share_tupledesc(area, tupdesc, typmod);
2152 
2153  /* Insert into the typmod table. */
2154  typmod_table_entry = dshash_find_or_insert(typmod_table,
2155  &tupdesc->tdtypmod,
2156  &found);
2157  if (found)
2158  elog(ERROR, "cannot create duplicate shared record typmod");
2159  typmod_table_entry->typmod = tupdesc->tdtypmod;
2160  typmod_table_entry->shared_tupdesc = shared_dp;
2161  dshash_release_lock(typmod_table, typmod_table_entry);
2162 
2163  /* Insert into the record table. */
2164  record_table_key.shared = false;
2165  record_table_key.u.local_tupdesc = tupdesc;
2166  record_table_entry = dshash_find_or_insert(record_table,
2167  &record_table_key,
2168  &found);
2169  if (!found)
2170  {
2171  record_table_entry->key.shared = true;
2172  record_table_entry->key.u.shared_tupdesc = shared_dp;
2173  }
2174  dshash_release_lock(record_table, record_table_entry);
2175  }
2176 
2177  /*
2178  * Set up the global state that will tell assign_record_type_typmod and
2179  * lookup_rowtype_tupdesc_internal about the shared registry.
2180  */
2181  CurrentSession->shared_record_table = record_table;
2182  CurrentSession->shared_typmod_table = typmod_table;
2184 
2185  /*
2186  * We install a detach hook in the leader, but only to handle cleanup on
2187  * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2188  * the memory, the leader process will use a shared registry until it
2189  * exits.
2190  */
2192 }
2193 
2194 /*
2195  * Attach to 'registry', which must have been initialized already by another
2196  * backend. Future calls to assign_record_type_typmod and
2197  * lookup_rowtype_tupdesc_internal will use the shared registry until the
2198  * current session is detached.
2199  */
2200 void
2202 {
2203  MemoryContext old_context;
2204  dshash_table *record_table;
2205  dshash_table *typmod_table;
2206 
2208 
2209  /* We can't already be attached to a shared registry. */
2210  Assert(CurrentSession != NULL);
2211  Assert(CurrentSession->segment != NULL);
2212  Assert(CurrentSession->area != NULL);
2216 
2217  /*
2218  * We can't already have typmods in our local cache, because they'd clash
2219  * with those imported by SharedRecordTypmodRegistryInit. This should be
2220  * a freshly started parallel worker. If we ever support worker
2221  * recycling, a worker would need to zap its local cache in between
2222  * servicing different queries, in order to be able to call this and
2223  * synchronize typmods with a new leader; but that's problematic because
2224  * we can't be very sure that record-typmod-related state hasn't escaped
2225  * to anywhere else in the process.
2226  */
2227  Assert(NextRecordTypmod == 0);
2228 
2229  old_context = MemoryContextSwitchTo(TopMemoryContext);
2230 
2231  /* Attach to the two hash tables. */
2232  record_table = dshash_attach(CurrentSession->area,
2233  &srtr_record_table_params,
2234  registry->record_table_handle,
2235  CurrentSession->area);
2236  typmod_table = dshash_attach(CurrentSession->area,
2237  &srtr_typmod_table_params,
2238  registry->typmod_table_handle,
2239  NULL);
2240 
2241  MemoryContextSwitchTo(old_context);
2242 
2243  /*
2244  * Set up detach hook to run at worker exit. Currently this is the same
2245  * as the leader's detach hook, but in future they might need to be
2246  * different.
2247  */
2250  PointerGetDatum(registry));
2251 
2252  /*
2253  * Set up the session state that will tell assign_record_type_typmod and
2254  * lookup_rowtype_tupdesc_internal about the shared registry.
2255  */
2257  CurrentSession->shared_record_table = record_table;
2258  CurrentSession->shared_typmod_table = typmod_table;
2259 }
2260 
2261 /*
2262  * TypeCacheRelCallback
2263  * Relcache inval callback function
2264  *
2265  * Delete the cached tuple descriptor (if any) for the given rel's composite
2266  * type, or for all composite types if relid == InvalidOid. Also reset
2267  * whatever info we have cached about the composite type's comparability.
2268  *
2269  * This is called when a relcache invalidation event occurs for the given
2270  * relid. We must scan the whole typcache hash since we don't know the
2271  * type OID corresponding to the relid. We could do a direct search if this
2272  * were a syscache-flush callback on pg_type, but then we would need all
2273  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
2274  * invals against the rel's pg_type OID. The extra SI signaling could very
2275  * well cost more than we'd save, since in most usages there are not very
2276  * many entries in a backend's typcache. The risk of bugs-of-omission seems
2277  * high, too.
2278  *
2279  * Another possibility, with only localized impact, is to maintain a second
2280  * hashtable that indexes composite-type typcache entries by their typrelid.
2281  * But it's still not clear it's worth the trouble.
2282  */
2283 static void
2285 {
2287  TypeCacheEntry *typentry;
2288 
2289  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2290  hash_seq_init(&status, TypeCacheHash);
2291  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2292  {
2293  if (typentry->typtype == TYPTYPE_COMPOSITE)
2294  {
2295  /* Skip if no match, unless we're zapping all composite types */
2296  if (relid != typentry->typrelid && relid != InvalidOid)
2297  continue;
2298 
2299  /* Delete tupdesc if we have it */
2300  if (typentry->tupDesc != NULL)
2301  {
2302  /*
2303  * Release our refcount, and free the tupdesc if none remain.
2304  * (Can't use DecrTupleDescRefCount because this reference is
2305  * not logged in current resource owner.)
2306  */
2307  Assert(typentry->tupDesc->tdrefcount > 0);
2308  if (--typentry->tupDesc->tdrefcount == 0)
2309  FreeTupleDesc(typentry->tupDesc);
2310  typentry->tupDesc = NULL;
2311 
2312  /*
2313  * Also clear tupDesc_identifier, so that anything watching
2314  * that will realize that the tupdesc has possibly changed.
2315  * (Alternatively, we could specify that to detect possible
2316  * tupdesc change, one must check for tupDesc != NULL as well
2317  * as tupDesc_identifier being the same as what was previously
2318  * seen. That seems error-prone.)
2319  */
2320  typentry->tupDesc_identifier = 0;
2321  }
2322 
2323  /* Reset equality/comparison/hashing validity information */
2324  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2325  }
2326  else if (typentry->typtype == TYPTYPE_DOMAIN)
2327  {
2328  /*
2329  * If it's domain over composite, reset flags. (We don't bother
2330  * trying to determine whether the specific base type needs a
2331  * reset.) Note that if we haven't determined whether the base
2332  * type is composite, we don't need to reset anything.
2333  */
2334  if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2335  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2336  }
2337  }
2338 }
2339 
2340 /*
2341  * TypeCacheTypCallback
2342  * Syscache inval callback function
2343  *
2344  * This is called when a syscache invalidation event occurs for any
2345  * pg_type row. If we have information cached about that type, mark
2346  * it as needing to be reloaded.
2347  */
2348 static void
2349 TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2350 {
2352  TypeCacheEntry *typentry;
2353 
2354  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2355  hash_seq_init(&status, TypeCacheHash);
2356  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2357  {
2358  /* Is this the targeted type row (or it's a total cache flush)? */
2359  if (hashvalue == 0 || typentry->type_id_hash == hashvalue)
2360  {
2361  /*
2362  * Mark the data obtained directly from pg_type as invalid. Also,
2363  * if it's a domain, typnotnull might've changed, so we'll need to
2364  * recalculate its constraints.
2365  */
2366  typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2368  }
2369  }
2370 }
2371 
2372 /*
2373  * TypeCacheOpcCallback
2374  * Syscache inval callback function
2375  *
2376  * This is called when a syscache invalidation event occurs for any pg_opclass
2377  * row. In principle we could probably just invalidate data dependent on the
2378  * particular opclass, but since updates on pg_opclass are rare in production
2379  * it doesn't seem worth a lot of complication: we just mark all cached data
2380  * invalid.
2381  *
2382  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2383  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2384  * is not allowed to be used to add/drop the primary operators and functions
2385  * of an opclass, only cross-type members of a family; and the latter sorts
2386  * of members are not going to get cached here.
2387  */
2388 static void
2389 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2390 {
2392  TypeCacheEntry *typentry;
2393 
2394  /* TypeCacheHash must exist, else this callback wouldn't be registered */
2395  hash_seq_init(&status, TypeCacheHash);
2396  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2397  {
2398  /* Reset equality/comparison/hashing validity information */
2399  typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2400  }
2401 }
2402 
2403 /*
2404  * TypeCacheConstrCallback
2405  * Syscache inval callback function
2406  *
2407  * This is called when a syscache invalidation event occurs for any
2408  * pg_constraint row. We flush information about domain constraints
2409  * when this happens.
2410  *
2411  * It's slightly annoying that we can't tell whether the inval event was for
2412  * a domain constraint record or not; there's usually more update traffic
2413  * for table constraints than domain constraints, so we'll do a lot of
2414  * useless flushes. Still, this is better than the old no-caching-at-all
2415  * approach to domain constraints.
2416  */
2417 static void
2418 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2419 {
2420  TypeCacheEntry *typentry;
2421 
2422  /*
2423  * Because this is called very frequently, and typically very few of the
2424  * typcache entries are for domains, we don't use hash_seq_search here.
2425  * Instead we thread all the domain-type entries together so that we can
2426  * visit them cheaply.
2427  */
2428  for (typentry = firstDomainTypeEntry;
2429  typentry != NULL;
2430  typentry = typentry->nextDomain)
2431  {
2432  /* Reset domain constraint validity information */
2434  }
2435 }
2436 
2437 
2438 /*
2439  * Check if given OID is part of the subset that's sortable by comparisons
2440  */
2441 static inline bool
2443 {
2444  Oid offset;
2445 
2446  if (arg < enumdata->bitmap_base)
2447  return false;
2448  offset = arg - enumdata->bitmap_base;
2449  if (offset > (Oid) INT_MAX)
2450  return false;
2451  return bms_is_member((int) offset, enumdata->sorted_values);
2452 }
2453 
2454 
2455 /*
2456  * compare_values_of_enum
2457  * Compare two members of an enum type.
2458  * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2459  *
2460  * Note: currently, the enumData cache is refreshed only if we are asked
2461  * to compare an enum value that is not already in the cache. This is okay
2462  * because there is no support for re-ordering existing values, so comparisons
2463  * of previously cached values will return the right answer even if other
2464  * values have been added since we last loaded the cache.
2465  *
2466  * Note: the enum logic has a special-case rule about even-numbered versus
2467  * odd-numbered OIDs, but we take no account of that rule here; this
2468  * routine shouldn't even get called when that rule applies.
2469  */
2470 int
2472 {
2473  TypeCacheEnumData *enumdata;
2474  EnumItem *item1;
2475  EnumItem *item2;
2476 
2477  /*
2478  * Equal OIDs are certainly equal --- this case was probably handled by
2479  * our caller, but we may as well check.
2480  */
2481  if (arg1 == arg2)
2482  return 0;
2483 
2484  /* Load up the cache if first time through */
2485  if (tcache->enumData == NULL)
2486  load_enum_cache_data(tcache);
2487  enumdata = tcache->enumData;
2488 
2489  /*
2490  * If both OIDs are known-sorted, we can just compare them directly.
2491  */
2492  if (enum_known_sorted(enumdata, arg1) &&
2493  enum_known_sorted(enumdata, arg2))
2494  {
2495  if (arg1 < arg2)
2496  return -1;
2497  else
2498  return 1;
2499  }
2500 
2501  /*
2502  * Slow path: we have to identify their actual sort-order positions.
2503  */
2504  item1 = find_enumitem(enumdata, arg1);
2505  item2 = find_enumitem(enumdata, arg2);
2506 
2507  if (item1 == NULL || item2 == NULL)
2508  {
2509  /*
2510  * We couldn't find one or both values. That means the enum has
2511  * changed under us, so re-initialize the cache and try again. We
2512  * don't bother retrying the known-sorted case in this path.
2513  */
2514  load_enum_cache_data(tcache);
2515  enumdata = tcache->enumData;
2516 
2517  item1 = find_enumitem(enumdata, arg1);
2518  item2 = find_enumitem(enumdata, arg2);
2519 
2520  /*
2521  * If we still can't find the values, complain: we must have corrupt
2522  * data.
2523  */
2524  if (item1 == NULL)
2525  elog(ERROR, "enum value %u not found in cache for enum %s",
2526  arg1, format_type_be(tcache->type_id));
2527  if (item2 == NULL)
2528  elog(ERROR, "enum value %u not found in cache for enum %s",
2529  arg2, format_type_be(tcache->type_id));
2530  }
2531 
2532  if (item1->sort_order < item2->sort_order)
2533  return -1;
2534  else if (item1->sort_order > item2->sort_order)
2535  return 1;
2536  else
2537  return 0;
2538 }
2539 
2540 /*
2541  * Load (or re-load) the enumData member of the typcache entry.
2542  */
2543 static void
2545 {
2546  TypeCacheEnumData *enumdata;
2547  Relation enum_rel;
2548  SysScanDesc enum_scan;
2549  HeapTuple enum_tuple;
2550  ScanKeyData skey;
2551  EnumItem *items;
2552  int numitems;
2553  int maxitems;
2554  Oid bitmap_base;
2555  Bitmapset *bitmap;
2556  MemoryContext oldcxt;
2557  int bm_size,
2558  start_pos;
2559 
2560  /* Check that this is actually an enum */
2561  if (tcache->typtype != TYPTYPE_ENUM)
2562  ereport(ERROR,
2563  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2564  errmsg("%s is not an enum",
2565  format_type_be(tcache->type_id))));
2566 
2567  /*
2568  * Read all the information for members of the enum type. We collect the
2569  * info in working memory in the caller's context, and then transfer it to
2570  * permanent memory in CacheMemoryContext. This minimizes the risk of
2571  * leaking memory from CacheMemoryContext in the event of an error partway
2572  * through.
2573  */
2574  maxitems = 64;
2575  items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2576  numitems = 0;
2577 
2578  /* Scan pg_enum for the members of the target enum type. */
2579  ScanKeyInit(&skey,
2580  Anum_pg_enum_enumtypid,
2581  BTEqualStrategyNumber, F_OIDEQ,
2582  ObjectIdGetDatum(tcache->type_id));
2583 
2584  enum_rel = table_open(EnumRelationId, AccessShareLock);
2585  enum_scan = systable_beginscan(enum_rel,
2586  EnumTypIdLabelIndexId,
2587  true, NULL,
2588  1, &skey);
2589 
2590  while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2591  {
2592  Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2593 
2594  if (numitems >= maxitems)
2595  {
2596  maxitems *= 2;
2597  items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2598  }
2599  items[numitems].enum_oid = en->oid;
2600  items[numitems].sort_order = en->enumsortorder;
2601  numitems++;
2602  }
2603 
2604  systable_endscan(enum_scan);
2605  table_close(enum_rel, AccessShareLock);
2606 
2607  /* Sort the items into OID order */
2608  qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2609 
2610  /*
2611  * Here, we create a bitmap listing a subset of the enum's OIDs that are
2612  * known to be in order and can thus be compared with just OID comparison.
2613  *
2614  * The point of this is that the enum's initial OIDs were certainly in
2615  * order, so there is some subset that can be compared via OID comparison;
2616  * and we'd rather not do binary searches unnecessarily.
2617  *
2618  * This is somewhat heuristic, and might identify a subset of OIDs that
2619  * isn't exactly what the type started with. That's okay as long as the
2620  * subset is correctly sorted.
2621  */
2622  bitmap_base = InvalidOid;
2623  bitmap = NULL;
2624  bm_size = 1; /* only save sets of at least 2 OIDs */
2625 
2626  for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2627  {
2628  /*
2629  * Identify longest sorted subsequence starting at start_pos
2630  */
2631  Bitmapset *this_bitmap = bms_make_singleton(0);
2632  int this_bm_size = 1;
2633  Oid start_oid = items[start_pos].enum_oid;
2634  float4 prev_order = items[start_pos].sort_order;
2635  int i;
2636 
2637  for (i = start_pos + 1; i < numitems; i++)
2638  {
2639  Oid offset;
2640 
2641  offset = items[i].enum_oid - start_oid;
2642  /* quit if bitmap would be too large; cutoff is arbitrary */
2643  if (offset >= 8192)
2644  break;
2645  /* include the item if it's in-order */
2646  if (items[i].sort_order > prev_order)
2647  {
2648  prev_order = items[i].sort_order;
2649  this_bitmap = bms_add_member(this_bitmap, (int) offset);
2650  this_bm_size++;
2651  }
2652  }
2653 
2654  /* Remember it if larger than previous best */
2655  if (this_bm_size > bm_size)
2656  {
2657  bms_free(bitmap);
2658  bitmap_base = start_oid;
2659  bitmap = this_bitmap;
2660  bm_size = this_bm_size;
2661  }
2662  else
2663  bms_free(this_bitmap);
2664 
2665  /*
2666  * Done if it's not possible to find a longer sequence in the rest of
2667  * the list. In typical cases this will happen on the first
2668  * iteration, which is why we create the bitmaps on the fly instead of
2669  * doing a second pass over the list.
2670  */
2671  if (bm_size >= (numitems - start_pos - 1))
2672  break;
2673  }
2674 
2675  /* OK, copy the data into CacheMemoryContext */
2677  enumdata = (TypeCacheEnumData *)
2678  palloc(offsetof(TypeCacheEnumData, enum_values) +
2679  numitems * sizeof(EnumItem));
2680  enumdata->bitmap_base = bitmap_base;
2681  enumdata->sorted_values = bms_copy(bitmap);
2682  enumdata->num_values = numitems;
2683  memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2684  MemoryContextSwitchTo(oldcxt);
2685 
2686  pfree(items);
2687  bms_free(bitmap);
2688 
2689  /* And link the finished cache struct into the typcache */
2690  if (tcache->enumData != NULL)
2691  pfree(tcache->enumData);
2692  tcache->enumData = enumdata;
2693 }
2694 
2695 /*
2696  * Locate the EnumItem with the given OID, if present
2697  */
2698 static EnumItem *
2700 {
2701  EnumItem srch;
2702 
2703  /* On some versions of Solaris, bsearch of zero items dumps core */
2704  if (enumdata->num_values <= 0)
2705  return NULL;
2706 
2707  srch.enum_oid = arg;
2708  return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2709  sizeof(EnumItem), enum_oid_cmp);
2710 }
2711 
2712 /*
2713  * qsort comparison function for OID-ordered EnumItems
2714  */
2715 static int
2716 enum_oid_cmp(const void *left, const void *right)
2717 {
2718  const EnumItem *l = (const EnumItem *) left;
2719  const EnumItem *r = (const EnumItem *) right;
2720 
2721  if (l->enum_oid < r->enum_oid)
2722  return -1;
2723  else if (l->enum_oid > r->enum_oid)
2724  return 1;
2725  else
2726  return 0;
2727 }
2728 
2729 /*
2730  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2731  * to the given value and return a dsa_pointer.
2732  */
2733 static dsa_pointer
2734 share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2735 {
2736  dsa_pointer shared_dp;
2737  TupleDesc shared;
2738 
2739  shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2740  shared = (TupleDesc) dsa_get_address(area, shared_dp);
2741  TupleDescCopy(shared, tupdesc);
2742  shared->tdtypmod = typmod;
2743 
2744  return shared_dp;
2745 }
2746 
2747 /*
2748  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2749  * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2750  * Tuple descriptors returned by this function are not reference counted, and
2751  * will exist at least as long as the current backend remained attached to the
2752  * current session.
2753  */
2754 static TupleDesc
2756 {
2757  TupleDesc result;
2759  SharedRecordTableEntry *record_table_entry;
2760  SharedTypmodTableEntry *typmod_table_entry;
2761  dsa_pointer shared_dp;
2762  bool found;
2763  uint32 typmod;
2764 
2765  /* If not even attached, nothing to do. */
2767  return NULL;
2768 
2769  /* Try to find a matching tuple descriptor in the record table. */
2770  key.shared = false;
2771  key.u.local_tupdesc = tupdesc;
2772  record_table_entry = (SharedRecordTableEntry *)
2774  if (record_table_entry)
2775  {
2776  Assert(record_table_entry->key.shared);
2778  record_table_entry);
2779  result = (TupleDesc)
2781  record_table_entry->key.u.shared_tupdesc);
2782  Assert(result->tdrefcount == -1);
2783 
2784  return result;
2785  }
2786 
2787  /* Allocate a new typmod number. This will be wasted if we error out. */
2788  typmod = (int)
2790  1);
2791 
2792  /* Copy the TupleDesc into shared memory. */
2793  shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2794 
2795  /*
2796  * Create an entry in the typmod table so that others will understand this
2797  * typmod number.
2798  */
2799  PG_TRY();
2800  {
2801  typmod_table_entry = (SharedTypmodTableEntry *)
2803  &typmod, &found);
2804  if (found)
2805  elog(ERROR, "cannot create duplicate shared record typmod");
2806  }
2807  PG_CATCH();
2808  {
2809  dsa_free(CurrentSession->area, shared_dp);
2810  PG_RE_THROW();
2811  }
2812  PG_END_TRY();
2813  typmod_table_entry->typmod = typmod;
2814  typmod_table_entry->shared_tupdesc = shared_dp;
2816  typmod_table_entry);
2817 
2818  /*
2819  * Finally create an entry in the record table so others with matching
2820  * tuple descriptors can reuse the typmod.
2821  */
2822  record_table_entry = (SharedRecordTableEntry *)
2824  &found);
2825  if (found)
2826  {
2827  /*
2828  * Someone concurrently inserted a matching tuple descriptor since the
2829  * first time we checked. Use that one instead.
2830  */
2832  record_table_entry);
2833 
2834  /* Might as well free up the space used by the one we created. */
2836  &typmod);
2837  Assert(found);
2838  dsa_free(CurrentSession->area, shared_dp);
2839 
2840  /* Return the one we found. */
2841  Assert(record_table_entry->key.shared);
2842  result = (TupleDesc)
2844  record_table_entry->key.u.shared_tupdesc);
2845  Assert(result->tdrefcount == -1);
2846 
2847  return result;
2848  }
2849 
2850  /* Store it and return it. */
2851  record_table_entry->key.shared = true;
2852  record_table_entry->key.u.shared_tupdesc = shared_dp;
2854  record_table_entry);
2855  result = (TupleDesc)
2856  dsa_get_address(CurrentSession->area, shared_dp);
2857  Assert(result->tdrefcount == -1);
2858 
2859  return result;
2860 }
2861 
2862 /*
2863  * On-DSM-detach hook to forget about the current shared record typmod
2864  * infrastructure. This is currently used by both leader and workers.
2865  */
2866 static void
2868 {
2869  /* Be cautious here: maybe we didn't finish initializing. */
2870  if (CurrentSession->shared_record_table != NULL)
2871  {
2874  }
2875  if (CurrentSession->shared_typmod_table != NULL)
2876  {
2879  }
2881 }
MemoryContextCallback callback
Definition: typcache.h:172
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:2471
struct TypeCacheEnumData TypeCacheEnumData
MemoryContextCallbackFunction func
Definition: palloc.h:49
struct TypeCacheEnumData * enumData
Definition: typcache.h:129
#define NIL
Definition: pg_list.h:65
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1435
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:873
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:366
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition: lsyscache.c:2485
FormData_pg_range * Form_pg_range
Definition: pg_range.h:58
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:100
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:111
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:218
#define AllocSetContextCreate
Definition: memutils.h:173
#define BTORDER_PROC
Definition: nbtree.h:700
DomainConstraintCache * dcc
Definition: typcache.h:171
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:167
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:147
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:98
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:2089
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:595
#define GETSTRUCT(TUP)
Definition: htup_details.h:654
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:74
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:263
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:711
Oid hash_opintype
Definition: typcache.h:60
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:87
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1343
#define HASH_ELEM
Definition: hsearch.h:95
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:81
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1827
uint32 type_id_hash
Definition: typcache.h:36
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:151
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition: typcache.c:104
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1666
#define RelationGetDescr(relation)
Definition: rel.h:503
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:95
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1246
MemoryContext dccContext
Definition: typcache.c:127
DomainConstraintType constrainttype
Definition: execnodes.h:931
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1505
dsa_pointer dshash_table_handle
Definition: dshash.h:24
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:92
DomainConstraintCache * domainData
Definition: typcache.h:120
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2755
#define TYPECACHE_MULTIRANGE_INFO
Definition: typcache.h:152
#define PointerGetDatum(X)
Definition: postgres.h:600
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:361
struct RecordCacheEntry RecordCacheEntry
struct TypeCacheEntry TypeCacheEntry
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:92
char * pstrdup(const char *in)
Definition: mcxt.c:1299
Oid typcollation
Definition: typcache.h:47
Session * CurrentSession
Definition: session.c:48
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:141
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:205
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:256
dshash_table * shared_record_table
Definition: session.h:32
Expr * expression_planner(Expr *expr)
Definition: planner.c:5807
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:143
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define AccessShareLock
Definition: lockdefs.h:36
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1634
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:215
Size entrysize
Definition: hsearch.h:76
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:241
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:350
#define TYPECACHE_EQ_OPR
Definition: typcache.h:136
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition: typcache.c:97
int errcode(int sqlerrcode)
Definition: elog.c:698
void * stringToNode(const char *str)
Definition: read.c:89
#define HASHEXTENDED_PROC
Definition: hash.h:354
#define MemSet(start, val, len)
Definition: c.h:1008
char * format_type_be(Oid type_oid)
Definition: format_type.c:339
static uint64 tupledesc_id_counter
Definition: typcache.c:287
uint32 hashTupleDesc(TupleDesc desc)
Definition: tupdesc.c:554
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1222
static HTAB * RecordCacheHash
Definition: typcache.c:274
#define GetSysCacheHashValue1(cacheId, key1)
Definition: syscache.h:202
SharedRecordTableKey key
Definition: typcache.c:198
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
Form_pg_class rd_rel
Definition: rel.h:109
unsigned int Oid
Definition: postgres_ext.h:31
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1096
int16 typlen
Definition: typcache.h:39
#define TupleDescSize(src)
Definition: tupdesc.h:102
#define OidIsValid(objectId)
Definition: c.h:710
bool typbyval
Definition: typcache.h:40
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:561
#define INVALID_TUPLEDESC_IDENTIFIER
Definition: typcache.h:155
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:2201
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:383
uint64 dsa_pointer
Definition: dsa.h:62
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:581
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1883
signed int int32
Definition: c.h:429
Oid get_multirange_range(Oid multirangeOid)
Definition: lsyscache.c:3433
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:1947
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:2102
static TupleDesc * RecordCacheArray
Definition: typcache.c:277
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1540
Oid domainBaseType
Definition: typcache.h:113
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1392
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:102
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1674
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:932
FmgrInfo cmp_proc_finfo
Definition: typcache.h:76
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1513
struct TypeCacheEntry * nextDomain
Definition: typcache.h:132
Definition: dynahash.c:219
dsa_pointer shared_tupdesc
Definition: typcache.c:208
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:502
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:93
pg_atomic_uint32 next_typmod
Definition: typcache.c:173
Bitmapset * sorted_values
Definition: typcache.c:141
void pfree(void *pointer)
Definition: mcxt.c:1169
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:89
#define ObjectIdGetDatum(X)
Definition: postgres.h:551
#define ERROR
Definition: elog.h:46
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition: typcache.c:84
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:100
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2442
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: relation.c:48
int32 tdtypmod
Definition: tupdesc.h:83
#define HTEqualStrategyNumber
Definition: stratnum.h:41
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:964
dsa_area * area
Definition: session.h:28
#define TCFLAGS_HAVE_FIELD_HASHING
Definition: typcache.c:101
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:186
char * c
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:362
char typstorage
Definition: typcache.h:42
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:2284
union SharedRecordTableKey::@28 u
#define RegProcedureIsValid(p)
Definition: c.h:712
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1427
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:302
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:164
int32 domainBaseTypmod
Definition: typcache.h:114
ExprState * check_exprstate
Definition: execnodes.h:934
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:146
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:44
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition: typcache.c:102
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:207
Oid hash_extended_proc
Definition: typcache.h:66
unsigned int uint32
Definition: c.h:441
FmgrInfo hash_proc_finfo
Definition: typcache.h:77
#define TYPECACHE_GT_OPR
Definition: typcache.h:138
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1844
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:151
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:136
#define HASHSTANDARD_PROC
Definition: hash.h:353
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:145
dsa_pointer shared_tupdesc
Definition: typcache.c:187
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2699
#define IsParallelWorker()
Definition: parallel.h:61
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:99
MemoryContext TopMemoryContext
Definition: mcxt.c:48
FmgrInfo rng_canonical_finfo
Definition: typcache.h:101
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:143
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:265
MemoryContext refctx
Definition: typcache.h:166
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:98
List * lappend(List *list, void *datum)
Definition: list.c:336
#define TYPECACHE_DOMAIN_BASE_INFO
Definition: typcache.h:148
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1451
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2349
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:1127
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1618
float float4
Definition: c.h:564
#define HASH_BLOBS
Definition: hsearch.h:97
#define TextDatumGetCString(d)
Definition: builtins.h:87
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:78
static int32 RecordCacheArrayLen
Definition: typcache.c:279
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:2867
struct SharedTypmodTableEntry SharedTypmodTableEntry
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1498
static int32 NextRecordTypmod
Definition: typcache.c:280
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
Oid enum_oid
Definition: typcache.c:134
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:150
uintptr_t Datum
Definition: postgres.h:411
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1175
Oid btree_opintype
Definition: typcache.h:58
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:196
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1702
Size keysize
Definition: hsearch.h:75
struct SharedRecordTableKey SharedRecordTableKey
TupleDesc rd_att
Definition: rel.h:110
HashCompareFunc match
Definition: hsearch.h:80
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:906
FmgrInfo eq_opr_finfo
Definition: typcache.h:75
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:339
#define InvalidOid
Definition: postgres_ext.h:36
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:907
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1256
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1658
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:175
Oid fn_oid
Definition: fmgr.h:59
#define ereport(elevel,...)
Definition: elog.h:157
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:1920
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:2080
dshash_table * shared_typmod_table
Definition: session.h:33
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:328
#define TYPECACHE_CMP_PROC
Definition: typcache.h:139
List * lcons(void *datum, List *list)
Definition: list.c:468
#define PG_CATCH()
Definition: elog.h:323
char typtype
Definition: typcache.h:43
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define makeNode(_type_)
Definition: nodes.h:584
FormData_pg_constraint * Form_pg_constraint
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:590
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:502
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: relation.c:206
#define Assert(condition)
Definition: c.h:804
#define lfirst(lc)
Definition: pg_list.h:169
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1267
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition: typcache.h:149
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:384
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:794
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:94
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2544
#define HASH_COMPARE
Definition: hsearch.h:99
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1305
TypeCacheEntry * tcache
Definition: typcache.h:167
void CreateCacheMemoryContext(void)
Definition: catcache.c:620
FormData_pg_type * Form_pg_type
Definition: pg_type.h:261
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:85
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2734
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:103
Oid rng_collation
Definition: typcache.h:99
uint64 tupDesc_identifier
Definition: typcache.h:90
struct TypeCacheEntry * rngtype
Definition: typcache.h:107
#define PG_RE_THROW()
Definition: elog.h:354
dshash_table_handle record_table_handle
Definition: typcache.c:169
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:736
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1489
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1183
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1182
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1497
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:96
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1419
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:309
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:116
#define TCFLAGS_OPERATOR_FLAGS
Definition: typcache.c:107
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:986
Oid typsubscript
Definition: typcache.h:45
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2779
Oid tdtypeid
Definition: tupdesc.h:82
float4 sort_order
Definition: typcache.c:135
Definition: dsa.c:354
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:229
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:820
struct TupleDescData * TupleDesc
Definition: tupdesc.h:89
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:91
char typalign
Definition: typcache.h:41
void * palloc(Size size)
Definition: mcxt.c:1062
int errmsg(const char *fmt,...)
Definition: elog.c:909
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2716
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:1931
#define elog(elevel,...)
Definition: elog.h:232
int i
#define TYPECACHE_LT_OPR
Definition: typcache.h:137
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:88
#define NameStr(name)
Definition: c.h:681
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:430
bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:402
TupleDesc local_tupdesc
Definition: typcache.c:186
void * arg
TupleDesc tupdesc
Definition: typcache.c:159
int tdrefcount
Definition: tupdesc.h:84
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:292
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:123
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:142
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:385
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:86
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1626
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:146
#define qsort(a, b, c, d)
Definition: port.h:505
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1481
dshash_table_handle typmod_table_handle
Definition: typcache.c:171
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:90
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:229
#define PG_TRY()
Definition: elog.h:313
#define BTLessStrategyNumber
Definition: stratnum.h:29
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:39
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1235
struct SharedRecordTableEntry SharedRecordTableEntry
Definition: pg_list.h:50
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1443
TupleDesc tupDesc
Definition: typcache.h:89
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition: typcache.c:2039
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2418
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1736
static HTAB * TypeCacheHash
Definition: typcache.c:78
long val
Definition: informix.c:664
#define TYPECACHE_HASH_PROC
Definition: typcache.h:140
#define TYPECACHE_TUPDESC
Definition: typcache.h:144
#define PG_END_TRY()
Definition: elog.h:338
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define offsetof(type, field)
Definition: c.h:727
dsm_segment * segment
Definition: session.h:27
static uint64 * RecordIdentifierArray
Definition: typcache.c:278
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2389
HashValueFunc hash
Definition: hsearch.h:78
#define HASH_FUNCTION
Definition: hsearch.h:98
#define dsa_allocate(area, size)
Definition: dsa.h:84
MemoryContext CacheMemoryContext
Definition: mcxt.c:51
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1861
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1205