PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
typcache.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * typcache.c
4  * POSTGRES type cache code
5  *
6  * The type cache exists to speed lookup of certain information about data
7  * types that is not directly available from a type's pg_type row. For
8  * example, we use a type's default btree opclass, or the default hash
9  * opclass if no btree opclass exists, to determine which operators should
10  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11  *
12  * Several seemingly-odd choices have been made to support use of the type
13  * cache by generic array and record handling routines, such as array_eq(),
14  * record_cmp(), and hash_array(). Because those routines are used as index
15  * support operations, they cannot leak memory. To allow them to execute
16  * efficiently, all information that they would like to re-use across calls
17  * is kept in the type cache.
18  *
19  * Once created, a type cache entry lives as long as the backend does, so
20  * there is no need for a call to release a cache entry. If the type is
21  * dropped, the cache entry simply becomes wasted storage. This is not
22  * expected to happen often, and assuming that typcache entries are good
23  * permanently allows caching pointers to them in long-lived places.
24  *
25  * We have some provisions for updating cache entries if the stored data
26  * becomes obsolete. Information dependent on opclasses is cleared if we
27  * detect updates to pg_opclass. We also support clearing the tuple
28  * descriptor and operator/function parts of a rowtype's cache entry,
29  * since those may need to change as a consequence of ALTER TABLE.
30  * Domain constraint changes are also tracked properly.
31  *
32  *
33  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
34  * Portions Copyright (c) 1994, Regents of the University of California
35  *
36  * IDENTIFICATION
37  * src/backend/utils/cache/typcache.c
38  *
39  *-------------------------------------------------------------------------
40  */
41 #include "postgres.h"
42 
43 #include <limits.h>
44 
45 #include "access/hash.h"
46 #include "access/heapam.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "access/parallel.h"
50 #include "access/session.h"
51 #include "catalog/indexing.h"
52 #include "catalog/pg_am.h"
53 #include "catalog/pg_constraint.h"
54 #include "catalog/pg_enum.h"
55 #include "catalog/pg_operator.h"
56 #include "catalog/pg_range.h"
57 #include "catalog/pg_type.h"
58 #include "commands/defrem.h"
59 #include "executor/executor.h"
60 #include "lib/dshash.h"
61 #include "optimizer/planner.h"
62 #include "storage/lwlock.h"
63 #include "utils/builtins.h"
64 #include "utils/catcache.h"
65 #include "utils/fmgroids.h"
66 #include "utils/inval.h"
67 #include "utils/lsyscache.h"
68 #include "utils/memutils.h"
69 #include "utils/rel.h"
70 #include "utils/snapmgr.h"
71 #include "utils/syscache.h"
72 #include "utils/typcache.h"
73 
74 
75 /* The main type cache hashtable searched by lookup_type_cache */
76 static HTAB *TypeCacheHash = NULL;
77 
78 /* List of type cache entries for domain types */
80 
81 /* Private flag bits in the TypeCacheEntry.flags field */
82 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x0001
83 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x0002
84 #define TCFLAGS_CHECKED_EQ_OPR 0x0004
85 #define TCFLAGS_CHECKED_LT_OPR 0x0008
86 #define TCFLAGS_CHECKED_GT_OPR 0x0010
87 #define TCFLAGS_CHECKED_CMP_PROC 0x0020
88 #define TCFLAGS_CHECKED_HASH_PROC 0x0040
89 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x0080
90 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x0100
91 #define TCFLAGS_HAVE_ELEM_COMPARE 0x0200
92 #define TCFLAGS_HAVE_ELEM_HASHING 0x0400
93 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x0800
94 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x1000
95 #define TCFLAGS_HAVE_FIELD_COMPARE 0x2000
96 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x4000
97 #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x8000
98 
99 /*
100  * Data stored about a domain type's constraints. Note that we do not create
101  * this struct for the common case of a constraint-less domain; we just set
102  * domainData to NULL to indicate that.
103  *
104  * Within a DomainConstraintCache, we store expression plan trees, but the
105  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
106  * When needed, expression evaluation nodes are built by flat-copying the
107  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
108  * Such a node tree is not part of the DomainConstraintCache, but is
109  * considered to belong to a DomainConstraintRef.
110  */
112 {
113  List *constraints; /* list of DomainConstraintState nodes */
114  MemoryContext dccContext; /* memory context holding all associated data */
115  long dccRefCount; /* number of references to this struct */
116 };
117 
118 /* Private information to support comparisons of enum values */
119 typedef struct
120 {
121  Oid enum_oid; /* OID of one enum value */
122  float4 sort_order; /* its sort position */
123 } EnumItem;
124 
125 typedef struct TypeCacheEnumData
126 {
127  Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
128  Bitmapset *sorted_values; /* Set of OIDs known to be in order */
129  int num_values; /* total number of values in enum */
130  EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
132 
133 /*
134  * We use a separate table for storing the definitions of non-anonymous
135  * record types. Once defined, a record type will be remembered for the
136  * life of the backend. Subsequent uses of the "same" record type (where
137  * sameness means equalTupleDescs) will refer to the existing table entry.
138  *
139  * Stored record types are remembered in a linear array of TupleDescs,
140  * which can be indexed quickly with the assigned typmod. There is also
141  * a hash table to speed searches for matching TupleDescs.
142  */
143 
144 typedef struct RecordCacheEntry
145 {
148 
149 /*
150  * To deal with non-anonymous record types that are exchanged by backends
151  * involved in a parallel query, we also need a shared verion of the above.
152  */
154 {
155  /* A hash table for finding a matching TupleDesc. */
157  /* A hash table for finding a TupleDesc by typmod. */
159  /* A source of new record typmod numbers. */
161 };
162 
163 /*
164  * When using shared tuple descriptors as hash table keys we need a way to be
165  * able to search for an equal shared TupleDesc using a backend-local
166  * TupleDesc. So we use this type which can hold either, and hash and compare
167  * functions that know how to handle both.
168  */
169 typedef struct SharedRecordTableKey
170 {
171  union
172  {
175  } u;
176  bool shared;
178 
179 /*
180  * The shared version of RecordCacheEntry. This lets us look up a typmod
181  * using a TupleDesc which may be in local or shared memory.
182  */
184 {
187 
188 /*
189  * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
190  * up a TupleDesc in shared memory using a typmod.
191  */
193 {
197 
198 /*
199  * A comparator function for SharedTupleDescTableKey.
200  */
201 static int
202 shared_record_table_compare(const void *a, const void *b, size_t size,
203  void *arg)
204 {
205  dsa_area *area = (dsa_area *) arg;
208  TupleDesc t1;
209  TupleDesc t2;
210 
211  if (k1->shared)
212  t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
213  else
214  t1 = k1->u.local_tupdesc;
215 
216  if (k2->shared)
217  t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
218  else
219  t2 = k2->u.local_tupdesc;
220 
221  return equalTupleDescs(t1, t2) ? 0 : 1;
222 }
223 
224 /*
225  * A hash function for SharedRecordTableKey.
226  */
227 static uint32
228 shared_record_table_hash(const void *a, size_t size, void *arg)
229 {
230  dsa_area *area = (dsa_area *) arg;
232  TupleDesc t;
233 
234  if (k->shared)
235  t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
236  else
237  t = k->u.local_tupdesc;
238 
239  return hashTupleDesc(t);
240 }
241 
242 /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
244  sizeof(SharedRecordTableKey), /* unused */
245  sizeof(SharedRecordTableEntry),
249 };
250 
251 /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
253  sizeof(uint32),
254  sizeof(SharedTypmodTableEntry),
258 };
259 
260 static HTAB *RecordCacheHash = NULL;
261 
263 static int32 RecordCacheArrayLen = 0; /* allocated length of array */
264 static int32 NextRecordTypmod = 0; /* number of entries used */
265 
266 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
267 static void load_rangetype_info(TypeCacheEntry *typentry);
268 static void load_domaintype_info(TypeCacheEntry *typentry);
269 static int dcs_cmp(const void *a, const void *b);
270 static void decr_dcc_refcount(DomainConstraintCache *dcc);
271 static void dccref_deletion_callback(void *arg);
272 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
273 static bool array_element_has_equality(TypeCacheEntry *typentry);
274 static bool array_element_has_compare(TypeCacheEntry *typentry);
275 static bool array_element_has_hashing(TypeCacheEntry *typentry);
276 static void cache_array_element_properties(TypeCacheEntry *typentry);
277 static bool record_fields_have_equality(TypeCacheEntry *typentry);
278 static bool record_fields_have_compare(TypeCacheEntry *typentry);
279 static void cache_record_field_properties(TypeCacheEntry *typentry);
280 static void TypeCacheRelCallback(Datum arg, Oid relid);
281 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
282 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
283 static void load_enum_cache_data(TypeCacheEntry *tcache);
284 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
285 static int enum_oid_cmp(const void *left, const void *right);
287  Datum datum);
289 static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
290  uint32 typmod);
291 
292 
293 /*
294  * lookup_type_cache
295  *
296  * Fetch the type cache entry for the specified datatype, and make sure that
297  * all the fields requested by bits in 'flags' are valid.
298  *
299  * The result is never NULL --- we will ereport() if the passed type OID is
300  * invalid. Note however that we may fail to find one or more of the
301  * values requested by 'flags'; the caller needs to check whether the fields
302  * are InvalidOid or not.
303  */
305 lookup_type_cache(Oid type_id, int flags)
306 {
307  TypeCacheEntry *typentry;
308  bool found;
309 
310  if (TypeCacheHash == NULL)
311  {
312  /* First time through: initialize the hash table */
313  HASHCTL ctl;
314 
315  MemSet(&ctl, 0, sizeof(ctl));
316  ctl.keysize = sizeof(Oid);
317  ctl.entrysize = sizeof(TypeCacheEntry);
318  TypeCacheHash = hash_create("Type information cache", 64,
319  &ctl, HASH_ELEM | HASH_BLOBS);
320 
321  /* Also set up callbacks for SI invalidations */
326 
327  /* Also make sure CacheMemoryContext exists */
328  if (!CacheMemoryContext)
330  }
331 
332  /* Try to look up an existing entry */
333  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
334  (void *) &type_id,
335  HASH_FIND, NULL);
336  if (typentry == NULL)
337  {
338  /*
339  * If we didn't find one, we want to make one. But first look up the
340  * pg_type row, just to make sure we don't make a cache entry for an
341  * invalid type OID. If the type OID is not valid, present a
342  * user-facing error, since some code paths such as domain_in() allow
343  * this function to be reached with a user-supplied OID.
344  */
345  HeapTuple tp;
346  Form_pg_type typtup;
347 
348  tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
349  if (!HeapTupleIsValid(tp))
350  ereport(ERROR,
351  (errcode(ERRCODE_UNDEFINED_OBJECT),
352  errmsg("type with OID %u does not exist", type_id)));
353  typtup = (Form_pg_type) GETSTRUCT(tp);
354  if (!typtup->typisdefined)
355  ereport(ERROR,
356  (errcode(ERRCODE_UNDEFINED_OBJECT),
357  errmsg("type \"%s\" is only a shell",
358  NameStr(typtup->typname))));
359 
360  /* Now make the typcache entry */
361  typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
362  (void *) &type_id,
363  HASH_ENTER, &found);
364  Assert(!found); /* it wasn't there a moment ago */
365 
366  MemSet(typentry, 0, sizeof(TypeCacheEntry));
367  typentry->type_id = type_id;
368  typentry->typlen = typtup->typlen;
369  typentry->typbyval = typtup->typbyval;
370  typentry->typalign = typtup->typalign;
371  typentry->typstorage = typtup->typstorage;
372  typentry->typtype = typtup->typtype;
373  typentry->typrelid = typtup->typrelid;
374 
375  /* If it's a domain, immediately thread it into the domain cache list */
376  if (typentry->typtype == TYPTYPE_DOMAIN)
377  {
378  typentry->nextDomain = firstDomainTypeEntry;
379  firstDomainTypeEntry = typentry;
380  }
381 
382  ReleaseSysCache(tp);
383  }
384 
385  /*
386  * Look up opclasses if we haven't already and any dependent info is
387  * requested.
388  */
393  !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
394  {
395  Oid opclass;
396 
397  opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
398  if (OidIsValid(opclass))
399  {
400  typentry->btree_opf = get_opclass_family(opclass);
401  typentry->btree_opintype = get_opclass_input_type(opclass);
402  }
403  else
404  {
405  typentry->btree_opf = typentry->btree_opintype = InvalidOid;
406  }
407 
408  /*
409  * Reset information derived from btree opclass. Note in particular
410  * that we'll redetermine the eq_opr even if we previously found one;
411  * this matters in case a btree opclass has been added to a type that
412  * previously had only a hash opclass.
413  */
414  typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
419  }
420 
421  /*
422  * If we need to look up equality operator, and there's no btree opclass,
423  * force lookup of hash opclass.
424  */
425  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
426  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
427  typentry->btree_opf == InvalidOid)
428  flags |= TYPECACHE_HASH_OPFAMILY;
429 
434  !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
435  {
436  Oid opclass;
437 
438  opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
439  if (OidIsValid(opclass))
440  {
441  typentry->hash_opf = get_opclass_family(opclass);
442  typentry->hash_opintype = get_opclass_input_type(opclass);
443  }
444  else
445  {
446  typentry->hash_opf = typentry->hash_opintype = InvalidOid;
447  }
448 
449  /*
450  * Reset information derived from hash opclass. We do *not* reset the
451  * eq_opr; if we already found one from the btree opclass, that
452  * decision is still good.
453  */
454  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
456  typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
457  }
458 
459  /*
460  * Look for requested operators and functions, if we haven't already.
461  */
462  if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
463  !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
464  {
465  Oid eq_opr = InvalidOid;
466 
467  if (typentry->btree_opf != InvalidOid)
468  eq_opr = get_opfamily_member(typentry->btree_opf,
469  typentry->btree_opintype,
470  typentry->btree_opintype,
472  if (eq_opr == InvalidOid &&
473  typentry->hash_opf != InvalidOid)
474  eq_opr = get_opfamily_member(typentry->hash_opf,
475  typentry->hash_opintype,
476  typentry->hash_opintype,
478 
479  /*
480  * If the proposed equality operator is array_eq or record_eq, check
481  * to see if the element type or column types support equality. If
482  * not, array_eq or record_eq would fail at runtime, so we don't want
483  * to report that the type has equality.
484  */
485  if (eq_opr == ARRAY_EQ_OP &&
486  !array_element_has_equality(typentry))
487  eq_opr = InvalidOid;
488  else if (eq_opr == RECORD_EQ_OP &&
489  !record_fields_have_equality(typentry))
490  eq_opr = InvalidOid;
491 
492  /* Force update of eq_opr_finfo only if we're changing state */
493  if (typentry->eq_opr != eq_opr)
494  typentry->eq_opr_finfo.fn_oid = InvalidOid;
495 
496  typentry->eq_opr = eq_opr;
497 
498  /*
499  * Reset info about hash functions whenever we pick up new info about
500  * equality operator. This is so we can ensure that the hash
501  * functions match the operator.
502  */
503  typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
505  typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
506  }
507  if ((flags & TYPECACHE_LT_OPR) &&
508  !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
509  {
510  Oid lt_opr = InvalidOid;
511 
512  if (typentry->btree_opf != InvalidOid)
513  lt_opr = get_opfamily_member(typentry->btree_opf,
514  typentry->btree_opintype,
515  typentry->btree_opintype,
517 
518  /* As above, make sure array_cmp or record_cmp will succeed */
519  if (lt_opr == ARRAY_LT_OP &&
520  !array_element_has_compare(typentry))
521  lt_opr = InvalidOid;
522  else if (lt_opr == RECORD_LT_OP &&
523  !record_fields_have_compare(typentry))
524  lt_opr = InvalidOid;
525 
526  typentry->lt_opr = lt_opr;
527  typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
528  }
529  if ((flags & TYPECACHE_GT_OPR) &&
530  !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
531  {
532  Oid gt_opr = InvalidOid;
533 
534  if (typentry->btree_opf != InvalidOid)
535  gt_opr = get_opfamily_member(typentry->btree_opf,
536  typentry->btree_opintype,
537  typentry->btree_opintype,
539 
540  /* As above, make sure array_cmp or record_cmp will succeed */
541  if (gt_opr == ARRAY_GT_OP &&
542  !array_element_has_compare(typentry))
543  gt_opr = InvalidOid;
544  else if (gt_opr == RECORD_GT_OP &&
545  !record_fields_have_compare(typentry))
546  gt_opr = InvalidOid;
547 
548  typentry->gt_opr = gt_opr;
549  typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
550  }
551  if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
552  !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
553  {
554  Oid cmp_proc = InvalidOid;
555 
556  if (typentry->btree_opf != InvalidOid)
557  cmp_proc = get_opfamily_proc(typentry->btree_opf,
558  typentry->btree_opintype,
559  typentry->btree_opintype,
560  BTORDER_PROC);
561 
562  /* As above, make sure array_cmp or record_cmp will succeed */
563  if (cmp_proc == F_BTARRAYCMP &&
564  !array_element_has_compare(typentry))
565  cmp_proc = InvalidOid;
566  else if (cmp_proc == F_BTRECORDCMP &&
567  !record_fields_have_compare(typentry))
568  cmp_proc = InvalidOid;
569 
570  /* Force update of cmp_proc_finfo only if we're changing state */
571  if (typentry->cmp_proc != cmp_proc)
572  typentry->cmp_proc_finfo.fn_oid = InvalidOid;
573 
574  typentry->cmp_proc = cmp_proc;
575  typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
576  }
578  !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
579  {
580  Oid hash_proc = InvalidOid;
581 
582  /*
583  * We insist that the eq_opr, if one has been determined, match the
584  * hash opclass; else report there is no hash function.
585  */
586  if (typentry->hash_opf != InvalidOid &&
587  (!OidIsValid(typentry->eq_opr) ||
588  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
589  typentry->hash_opintype,
590  typentry->hash_opintype,
592  hash_proc = get_opfamily_proc(typentry->hash_opf,
593  typentry->hash_opintype,
594  typentry->hash_opintype,
596 
597  /*
598  * As above, make sure hash_array will succeed. We don't currently
599  * support hashing for composite types, but when we do, we'll need
600  * more logic here to check that case too.
601  */
602  if (hash_proc == F_HASH_ARRAY &&
603  !array_element_has_hashing(typentry))
604  hash_proc = InvalidOid;
605 
606  /* Force update of hash_proc_finfo only if we're changing state */
607  if (typentry->hash_proc != hash_proc)
608  typentry->hash_proc_finfo.fn_oid = InvalidOid;
609 
610  typentry->hash_proc = hash_proc;
611  typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
612  }
613  if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
616  {
617  Oid hash_extended_proc = InvalidOid;
618 
619  /*
620  * We insist that the eq_opr, if one has been determined, match the
621  * hash opclass; else report there is no hash function.
622  */
623  if (typentry->hash_opf != InvalidOid &&
624  (!OidIsValid(typentry->eq_opr) ||
625  typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
626  typentry->hash_opintype,
627  typentry->hash_opintype,
629  hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
630  typentry->hash_opintype,
631  typentry->hash_opintype,
633 
634  /*
635  * As above, make sure hash_array_extended will succeed. We don't
636  * currently support hashing for composite types, but when we do,
637  * we'll need more logic here to check that case too.
638  */
639  if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
640  !array_element_has_hashing(typentry))
641  hash_extended_proc = InvalidOid;
642 
643  /* Force update of hash_proc_finfo only if we're changing state */
644  if (typentry->hash_extended_proc != hash_extended_proc)
646 
647  typentry->hash_extended_proc = hash_extended_proc;
649  }
650 
651  /*
652  * Set up fmgr lookup info as requested
653  *
654  * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
655  * which is not quite right (they're really in the hash table's private
656  * memory context) but this will do for our purposes.
657  *
658  * Note: the code above avoids invalidating the finfo structs unless the
659  * referenced operator/function OID actually changes. This is to prevent
660  * unnecessary leakage of any subsidiary data attached to an finfo, since
661  * that would cause session-lifespan memory leaks.
662  */
663  if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
664  typentry->eq_opr_finfo.fn_oid == InvalidOid &&
665  typentry->eq_opr != InvalidOid)
666  {
667  Oid eq_opr_func;
668 
669  eq_opr_func = get_opcode(typentry->eq_opr);
670  if (eq_opr_func != InvalidOid)
671  fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
673  }
674  if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
675  typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
676  typentry->cmp_proc != InvalidOid)
677  {
678  fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
680  }
681  if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
682  typentry->hash_proc_finfo.fn_oid == InvalidOid &&
683  typentry->hash_proc != InvalidOid)
684  {
685  fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
687  }
688  if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
690  typentry->hash_extended_proc != InvalidOid)
691  {
693  &typentry->hash_extended_proc_finfo,
695  }
696 
697  /*
698  * If it's a composite type (row type), get tupdesc if requested
699  */
700  if ((flags & TYPECACHE_TUPDESC) &&
701  typentry->tupDesc == NULL &&
702  typentry->typtype == TYPTYPE_COMPOSITE)
703  {
704  load_typcache_tupdesc(typentry);
705  }
706 
707  /*
708  * If requested, get information about a range type
709  */
710  if ((flags & TYPECACHE_RANGE_INFO) &&
711  typentry->rngelemtype == NULL &&
712  typentry->typtype == TYPTYPE_RANGE)
713  {
714  load_rangetype_info(typentry);
715  }
716 
717  /*
718  * If requested, get information about a domain type
719  */
720  if ((flags & TYPECACHE_DOMAIN_INFO) &&
721  (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
722  typentry->typtype == TYPTYPE_DOMAIN)
723  {
724  load_domaintype_info(typentry);
725  }
726 
727  return typentry;
728 }
729 
730 /*
731  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
732  */
733 static void
735 {
736  Relation rel;
737 
738  if (!OidIsValid(typentry->typrelid)) /* should not happen */
739  elog(ERROR, "invalid typrelid for composite type %u",
740  typentry->type_id);
741  rel = relation_open(typentry->typrelid, AccessShareLock);
742  Assert(rel->rd_rel->reltype == typentry->type_id);
743 
744  /*
745  * Link to the tupdesc and increment its refcount (we assert it's a
746  * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
747  * because the reference mustn't be entered in the current resource owner;
748  * it can outlive the current query.
749  */
750  typentry->tupDesc = RelationGetDescr(rel);
751 
752  Assert(typentry->tupDesc->tdrefcount > 0);
753  typentry->tupDesc->tdrefcount++;
754 
756 }
757 
758 /*
759  * load_rangetype_info --- helper routine to set up range type information
760  */
761 static void
763 {
764  Form_pg_range pg_range;
765  HeapTuple tup;
766  Oid subtypeOid;
767  Oid opclassOid;
768  Oid canonicalOid;
769  Oid subdiffOid;
770  Oid opfamilyOid;
771  Oid opcintype;
772  Oid cmpFnOid;
773 
774  /* get information from pg_range */
776  /* should not fail, since we already checked typtype ... */
777  if (!HeapTupleIsValid(tup))
778  elog(ERROR, "cache lookup failed for range type %u",
779  typentry->type_id);
780  pg_range = (Form_pg_range) GETSTRUCT(tup);
781 
782  subtypeOid = pg_range->rngsubtype;
783  typentry->rng_collation = pg_range->rngcollation;
784  opclassOid = pg_range->rngsubopc;
785  canonicalOid = pg_range->rngcanonical;
786  subdiffOid = pg_range->rngsubdiff;
787 
788  ReleaseSysCache(tup);
789 
790  /* get opclass properties and look up the comparison function */
791  opfamilyOid = get_opclass_family(opclassOid);
792  opcintype = get_opclass_input_type(opclassOid);
793 
794  cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
795  BTORDER_PROC);
796  if (!RegProcedureIsValid(cmpFnOid))
797  elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
798  BTORDER_PROC, opcintype, opcintype, opfamilyOid);
799 
800  /* set up cached fmgrinfo structs */
801  fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
803  if (OidIsValid(canonicalOid))
804  fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
806  if (OidIsValid(subdiffOid))
807  fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
809 
810  /* Lastly, set up link to the element type --- this marks data valid */
811  typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
812 }
813 
814 
815 /*
816  * load_domaintype_info --- helper routine to set up domain constraint info
817  *
818  * Note: we assume we're called in a relatively short-lived context, so it's
819  * okay to leak data into the current context while scanning pg_constraint.
820  * We build the new DomainConstraintCache data in a context underneath
821  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
822  * complete.
823  */
824 static void
826 {
827  Oid typeOid = typentry->type_id;
829  bool notNull = false;
830  DomainConstraintState **ccons;
831  int cconslen;
832  Relation conRel;
833  MemoryContext oldcxt;
834 
835  /*
836  * If we're here, any existing constraint info is stale, so release it.
837  * For safety, be sure to null the link before trying to delete the data.
838  */
839  if (typentry->domainData)
840  {
841  dcc = typentry->domainData;
842  typentry->domainData = NULL;
843  decr_dcc_refcount(dcc);
844  }
845 
846  /*
847  * We try to optimize the common case of no domain constraints, so don't
848  * create the dcc object and context until we find a constraint. Likewise
849  * for the temp sorting array.
850  */
851  dcc = NULL;
852  ccons = NULL;
853  cconslen = 0;
854 
855  /*
856  * Scan pg_constraint for relevant constraints. We want to find
857  * constraints for not just this domain, but any ancestor domains, so the
858  * outer loop crawls up the domain stack.
859  */
861 
862  for (;;)
863  {
864  HeapTuple tup;
865  HeapTuple conTup;
866  Form_pg_type typTup;
867  int nccons = 0;
868  ScanKeyData key[1];
869  SysScanDesc scan;
870 
871  tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
872  if (!HeapTupleIsValid(tup))
873  elog(ERROR, "cache lookup failed for type %u", typeOid);
874  typTup = (Form_pg_type) GETSTRUCT(tup);
875 
876  if (typTup->typtype != TYPTYPE_DOMAIN)
877  {
878  /* Not a domain, so done */
879  ReleaseSysCache(tup);
880  break;
881  }
882 
883  /* Test for NOT NULL Constraint */
884  if (typTup->typnotnull)
885  notNull = true;
886 
887  /* Look for CHECK Constraints on this domain */
888  ScanKeyInit(&key[0],
890  BTEqualStrategyNumber, F_OIDEQ,
891  ObjectIdGetDatum(typeOid));
892 
893  scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
894  NULL, 1, key);
895 
896  while (HeapTupleIsValid(conTup = systable_getnext(scan)))
897  {
899  Datum val;
900  bool isNull;
901  char *constring;
902  Expr *check_expr;
904 
905  /* Ignore non-CHECK constraints (presently, shouldn't be any) */
906  if (c->contype != CONSTRAINT_CHECK)
907  continue;
908 
909  /* Not expecting conbin to be NULL, but we'll test for it anyway */
911  conRel->rd_att, &isNull);
912  if (isNull)
913  elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
914  NameStr(typTup->typname), NameStr(c->conname));
915 
916  /* Convert conbin to C string in caller context */
917  constring = TextDatumGetCString(val);
918 
919  /* Create the DomainConstraintCache object and context if needed */
920  if (dcc == NULL)
921  {
922  MemoryContext cxt;
923 
925  "Domain constraints",
927  dcc = (DomainConstraintCache *)
929  dcc->constraints = NIL;
930  dcc->dccContext = cxt;
931  dcc->dccRefCount = 0;
932  }
933 
934  /* Create node trees in DomainConstraintCache's context */
935  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
936 
937  check_expr = (Expr *) stringToNode(constring);
938 
939  /* ExecInitExpr will assume we've planned the expression */
940  check_expr = expression_planner(check_expr);
941 
944  r->name = pstrdup(NameStr(c->conname));
945  r->check_expr = check_expr;
946  r->check_exprstate = NULL;
947 
948  MemoryContextSwitchTo(oldcxt);
949 
950  /* Accumulate constraints in an array, for sorting below */
951  if (ccons == NULL)
952  {
953  cconslen = 8;
954  ccons = (DomainConstraintState **)
955  palloc(cconslen * sizeof(DomainConstraintState *));
956  }
957  else if (nccons >= cconslen)
958  {
959  cconslen *= 2;
960  ccons = (DomainConstraintState **)
961  repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
962  }
963  ccons[nccons++] = r;
964  }
965 
966  systable_endscan(scan);
967 
968  if (nccons > 0)
969  {
970  /*
971  * Sort the items for this domain, so that CHECKs are applied in a
972  * deterministic order.
973  */
974  if (nccons > 1)
975  qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
976 
977  /*
978  * Now attach them to the overall list. Use lcons() here because
979  * constraints of parent domains should be applied earlier.
980  */
981  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
982  while (nccons > 0)
983  dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
984  MemoryContextSwitchTo(oldcxt);
985  }
986 
987  /* loop to next domain in stack */
988  typeOid = typTup->typbasetype;
989  ReleaseSysCache(tup);
990  }
991 
992  heap_close(conRel, AccessShareLock);
993 
994  /*
995  * Only need to add one NOT NULL check regardless of how many domains in
996  * the stack request it.
997  */
998  if (notNull)
999  {
1001 
1002  /* Create the DomainConstraintCache object and context if needed */
1003  if (dcc == NULL)
1004  {
1005  MemoryContext cxt;
1006 
1008  "Domain constraints",
1010  dcc = (DomainConstraintCache *)
1012  dcc->constraints = NIL;
1013  dcc->dccContext = cxt;
1014  dcc->dccRefCount = 0;
1015  }
1016 
1017  /* Create node trees in DomainConstraintCache's context */
1018  oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1019 
1021 
1023  r->name = pstrdup("NOT NULL");
1024  r->check_expr = NULL;
1025  r->check_exprstate = NULL;
1026 
1027  /* lcons to apply the nullness check FIRST */
1028  dcc->constraints = lcons(r, dcc->constraints);
1029 
1030  MemoryContextSwitchTo(oldcxt);
1031  }
1032 
1033  /*
1034  * If we made a constraint object, move it into CacheMemoryContext and
1035  * attach it to the typcache entry.
1036  */
1037  if (dcc)
1038  {
1040  typentry->domainData = dcc;
1041  dcc->dccRefCount++; /* count the typcache's reference */
1042  }
1043 
1044  /* Either way, the typcache entry's domain data is now valid. */
1046 }
1047 
1048 /*
1049  * qsort comparator to sort DomainConstraintState pointers by name
1050  */
1051 static int
1052 dcs_cmp(const void *a, const void *b)
1053 {
1054  const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1055  const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1056 
1057  return strcmp((*ca)->name, (*cb)->name);
1058 }
1059 
1060 /*
1061  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1062  * and free it if no references remain
1063  */
1064 static void
1066 {
1067  Assert(dcc->dccRefCount > 0);
1068  if (--(dcc->dccRefCount) <= 0)
1070 }
1071 
1072 /*
1073  * Context reset/delete callback for a DomainConstraintRef
1074  */
1075 static void
1077 {
1079  DomainConstraintCache *dcc = ref->dcc;
1080 
1081  /* Paranoia --- be sure link is nulled before trying to release */
1082  if (dcc)
1083  {
1084  ref->constraints = NIL;
1085  ref->dcc = NULL;
1086  decr_dcc_refcount(dcc);
1087  }
1088 }
1089 
1090 /*
1091  * prep_domain_constraints --- prepare domain constraints for execution
1092  *
1093  * The expression trees stored in the DomainConstraintCache's list are
1094  * converted to executable expression state trees stored in execctx.
1095  */
1096 static List *
1098 {
1099  List *result = NIL;
1100  MemoryContext oldcxt;
1101  ListCell *lc;
1102 
1103  oldcxt = MemoryContextSwitchTo(execctx);
1104 
1105  foreach(lc, constraints)
1106  {
1108  DomainConstraintState *newr;
1109 
1111  newr->constrainttype = r->constrainttype;
1112  newr->name = r->name;
1113  newr->check_expr = r->check_expr;
1114  newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1115 
1116  result = lappend(result, newr);
1117  }
1118 
1119  MemoryContextSwitchTo(oldcxt);
1120 
1121  return result;
1122 }
1123 
1124 /*
1125  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1126  *
1127  * Caller must tell us the MemoryContext in which the DomainConstraintRef
1128  * lives. The ref will be cleaned up when that context is reset/deleted.
1129  *
1130  * Caller must also tell us whether it wants check_exprstate fields to be
1131  * computed in the DomainConstraintState nodes attached to this ref.
1132  * If it doesn't, we need not make a copy of the DomainConstraintState list.
1133  */
1134 void
1136  MemoryContext refctx, bool need_exprstate)
1137 {
1138  /* Look up the typcache entry --- we assume it survives indefinitely */
1140  ref->need_exprstate = need_exprstate;
1141  /* For safety, establish the callback before acquiring a refcount */
1142  ref->refctx = refctx;
1143  ref->dcc = NULL;
1145  ref->callback.arg = (void *) ref;
1147  /* Acquire refcount if there are constraints, and set up exported list */
1148  if (ref->tcache->domainData)
1149  {
1150  ref->dcc = ref->tcache->domainData;
1151  ref->dcc->dccRefCount++;
1152  if (ref->need_exprstate)
1154  ref->refctx);
1155  else
1156  ref->constraints = ref->dcc->constraints;
1157  }
1158  else
1159  ref->constraints = NIL;
1160 }
1161 
1162 /*
1163  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1164  *
1165  * If the domain's constraint set changed, ref->constraints is updated to
1166  * point at a new list of cached constraints.
1167  *
1168  * In the normal case where nothing happened to the domain, this is cheap
1169  * enough that it's reasonable (and expected) to check before *each* use
1170  * of the constraint info.
1171  */
1172 void
1174 {
1175  TypeCacheEntry *typentry = ref->tcache;
1176 
1177  /* Make sure typcache entry's data is up to date */
1178  if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1179  typentry->typtype == TYPTYPE_DOMAIN)
1180  load_domaintype_info(typentry);
1181 
1182  /* Transfer to ref object if there's new info, adjusting refcounts */
1183  if (ref->dcc != typentry->domainData)
1184  {
1185  /* Paranoia --- be sure link is nulled before trying to release */
1186  DomainConstraintCache *dcc = ref->dcc;
1187 
1188  if (dcc)
1189  {
1190  /*
1191  * Note: we just leak the previous list of executable domain
1192  * constraints. Alternatively, we could keep those in a child
1193  * context of ref->refctx and free that context at this point.
1194  * However, in practice this code path will be taken so seldom
1195  * that the extra bookkeeping for a child context doesn't seem
1196  * worthwhile; we'll just allow a leak for the lifespan of refctx.
1197  */
1198  ref->constraints = NIL;
1199  ref->dcc = NULL;
1200  decr_dcc_refcount(dcc);
1201  }
1202  dcc = typentry->domainData;
1203  if (dcc)
1204  {
1205  ref->dcc = dcc;
1206  dcc->dccRefCount++;
1207  if (ref->need_exprstate)
1209  ref->refctx);
1210  else
1211  ref->constraints = dcc->constraints;
1212  }
1213  }
1214 }
1215 
1216 /*
1217  * DomainHasConstraints --- utility routine to check if a domain has constraints
1218  *
1219  * This is defined to return false, not fail, if type is not a domain.
1220  */
1221 bool
1223 {
1224  TypeCacheEntry *typentry;
1225 
1226  /*
1227  * Note: a side effect is to cause the typcache's domain data to become
1228  * valid. This is fine since we'll likely need it soon if there is any.
1229  */
1230  typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_INFO);
1231 
1232  return (typentry->domainData != NULL);
1233 }
1234 
1235 
1236 /*
1237  * array_element_has_equality and friends are helper routines to check
1238  * whether we should believe that array_eq and related functions will work
1239  * on the given array type or composite type.
1240  *
1241  * The logic above may call these repeatedly on the same type entry, so we
1242  * make use of the typentry->flags field to cache the results once known.
1243  * Also, we assume that we'll probably want all these facts about the type
1244  * if we want any, so we cache them all using only one lookup of the
1245  * component datatype(s).
1246  */
1247 
1248 static bool
1250 {
1251  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1253  return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1254 }
1255 
1256 static bool
1258 {
1259  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1261  return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1262 }
1263 
1264 static bool
1266 {
1267  if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1269  return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1270 }
1271 
1272 static void
1274 {
1275  Oid elem_type = get_base_element_type(typentry->type_id);
1276 
1277  if (OidIsValid(elem_type))
1278  {
1279  TypeCacheEntry *elementry;
1280 
1281  elementry = lookup_type_cache(elem_type,
1285  if (OidIsValid(elementry->eq_opr))
1286  typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1287  if (OidIsValid(elementry->cmp_proc))
1288  typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1289  if (OidIsValid(elementry->hash_proc))
1290  typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1291  }
1293 }
1294 
1295 static bool
1297 {
1298  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1300  return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1301 }
1302 
1303 static bool
1305 {
1306  if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1308  return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1309 }
1310 
1311 static void
1313 {
1314  /*
1315  * For type RECORD, we can't really tell what will work, since we don't
1316  * have access here to the specific anonymous type. Just assume that
1317  * everything will (we may get a failure at runtime ...)
1318  */
1319  if (typentry->type_id == RECORDOID)
1320  typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1322  else if (typentry->typtype == TYPTYPE_COMPOSITE)
1323  {
1324  TupleDesc tupdesc;
1325  int newflags;
1326  int i;
1327 
1328  /* Fetch composite type's tupdesc if we don't have it already */
1329  if (typentry->tupDesc == NULL)
1330  load_typcache_tupdesc(typentry);
1331  tupdesc = typentry->tupDesc;
1332 
1333  /* Must bump the refcount while we do additional catalog lookups */
1334  IncrTupleDescRefCount(tupdesc);
1335 
1336  /* Have each property if all non-dropped fields have the property */
1337  newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1339  for (i = 0; i < tupdesc->natts; i++)
1340  {
1341  TypeCacheEntry *fieldentry;
1342  Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1343 
1344  if (attr->attisdropped)
1345  continue;
1346 
1347  fieldentry = lookup_type_cache(attr->atttypid,
1350  if (!OidIsValid(fieldentry->eq_opr))
1351  newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1352  if (!OidIsValid(fieldentry->cmp_proc))
1353  newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1354 
1355  /* We can drop out of the loop once we disprove all bits */
1356  if (newflags == 0)
1357  break;
1358  }
1359  typentry->flags |= newflags;
1360 
1361  DecrTupleDescRefCount(tupdesc);
1362  }
1364 }
1365 
1366 /*
1367  * Make sure that RecordCacheArray is large enough to store 'typmod'.
1368  */
1369 static void
1371 {
1372  if (RecordCacheArray == NULL)
1373  {
1374  RecordCacheArray = (TupleDesc *)
1376  RecordCacheArrayLen = 64;
1377  }
1378 
1379  if (typmod >= RecordCacheArrayLen)
1380  {
1381  int32 newlen = RecordCacheArrayLen * 2;
1382 
1383  while (typmod >= newlen)
1384  newlen *= 2;
1385 
1386  RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
1387  newlen * sizeof(TupleDesc));
1388  memset(RecordCacheArray + RecordCacheArrayLen, 0,
1389  (newlen - RecordCacheArrayLen) * sizeof(TupleDesc));
1390  RecordCacheArrayLen = newlen;
1391  }
1392 }
1393 
1394 /*
1395  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1396  *
1397  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1398  * hasn't had its refcount bumped.
1399  */
1400 static TupleDesc
1401 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1402 {
1403  if (type_id != RECORDOID)
1404  {
1405  /*
1406  * It's a named composite type, so use the regular typcache.
1407  */
1408  TypeCacheEntry *typentry;
1409 
1410  typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1411  if (typentry->tupDesc == NULL && !noError)
1412  ereport(ERROR,
1413  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1414  errmsg("type %s is not composite",
1415  format_type_be(type_id))));
1416  return typentry->tupDesc;
1417  }
1418  else
1419  {
1420  /*
1421  * It's a transient record type, so look in our record-type table.
1422  */
1423  if (typmod >= 0)
1424  {
1425  /* It is already in our local cache? */
1426  if (typmod < RecordCacheArrayLen &&
1427  RecordCacheArray[typmod] != NULL)
1428  return RecordCacheArray[typmod];
1429 
1430  /* Are we attached to a shared record typmod registry? */
1432  {
1433  SharedTypmodTableEntry *entry;
1434 
1435  /* Try to find it in the shared typmod index. */
1437  &typmod, false);
1438  if (entry != NULL)
1439  {
1440  TupleDesc tupdesc;
1441 
1442  tupdesc = (TupleDesc)
1444  entry->shared_tupdesc);
1445  Assert(typmod == tupdesc->tdtypmod);
1446 
1447  /* We may need to extend the local RecordCacheArray. */
1449 
1450  /*
1451  * Our local array can now point directly to the TupleDesc
1452  * in shared memory.
1453  */
1454  RecordCacheArray[typmod] = tupdesc;
1455  Assert(tupdesc->tdrefcount == -1);
1456 
1458  entry);
1459 
1460  return RecordCacheArray[typmod];
1461  }
1462  }
1463  }
1464 
1465  if (!noError)
1466  ereport(ERROR,
1467  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1468  errmsg("record type has not been registered")));
1469  return NULL;
1470  }
1471 }
1472 
1473 /*
1474  * lookup_rowtype_tupdesc
1475  *
1476  * Given a typeid/typmod that should describe a known composite type,
1477  * return the tuple descriptor for the type. Will ereport on failure.
1478  * (Use ereport because this is reachable with user-specified OIDs,
1479  * for example from record_in().)
1480  *
1481  * Note: on success, we increment the refcount of the returned TupleDesc,
1482  * and log the reference in CurrentResourceOwner. Caller should call
1483  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
1484  */
1485 TupleDesc
1487 {
1488  TupleDesc tupDesc;
1489 
1490  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1491  PinTupleDesc(tupDesc);
1492  return tupDesc;
1493 }
1494 
1495 /*
1496  * lookup_rowtype_tupdesc_noerror
1497  *
1498  * As above, but if the type is not a known composite type and noError
1499  * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1500  * type_id is passed, you'll get an ereport anyway.)
1501  */
1502 TupleDesc
1503 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1504 {
1505  TupleDesc tupDesc;
1506 
1507  tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1508  if (tupDesc != NULL)
1509  PinTupleDesc(tupDesc);
1510  return tupDesc;
1511 }
1512 
1513 /*
1514  * lookup_rowtype_tupdesc_copy
1515  *
1516  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1517  * copied into the CurrentMemoryContext and is not reference-counted.
1518  */
1519 TupleDesc
1521 {
1522  TupleDesc tmp;
1523 
1524  tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1525  return CreateTupleDescCopyConstr(tmp);
1526 }
1527 
1528 /*
1529  * Hash function for the hash table of RecordCacheEntry.
1530  */
1531 static uint32
1532 record_type_typmod_hash(const void *data, size_t size)
1533 {
1534  RecordCacheEntry *entry = (RecordCacheEntry *) data;
1535 
1536  return hashTupleDesc(entry->tupdesc);
1537 }
1538 
1539 /*
1540  * Match function for the hash table of RecordCacheEntry.
1541  */
1542 static int
1543 record_type_typmod_compare(const void *a, const void *b, size_t size)
1544 {
1545  RecordCacheEntry *left = (RecordCacheEntry *) a;
1546  RecordCacheEntry *right = (RecordCacheEntry *) b;
1547 
1548  return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
1549 }
1550 
1551 /*
1552  * assign_record_type_typmod
1553  *
1554  * Given a tuple descriptor for a RECORD type, find or create a cache entry
1555  * for the type, and set the tupdesc's tdtypmod field to a value that will
1556  * identify this cache entry to lookup_rowtype_tupdesc.
1557  */
1558 void
1560 {
1561  RecordCacheEntry *recentry;
1562  TupleDesc entDesc;
1563  bool found;
1564  MemoryContext oldcxt;
1565 
1566  Assert(tupDesc->tdtypeid == RECORDOID);
1567 
1568  if (RecordCacheHash == NULL)
1569  {
1570  /* First time through: initialize the hash table */
1571  HASHCTL ctl;
1572 
1573  MemSet(&ctl, 0, sizeof(ctl));
1574  ctl.keysize = sizeof(TupleDesc); /* just the pointer */
1575  ctl.entrysize = sizeof(RecordCacheEntry);
1578  RecordCacheHash = hash_create("Record information cache", 64,
1579  &ctl,
1581 
1582  /* Also make sure CacheMemoryContext exists */
1583  if (!CacheMemoryContext)
1585  }
1586 
1587  /* Find or create a hashtable entry for this tuple descriptor */
1588  recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1589  (void *) &tupDesc,
1590  HASH_ENTER, &found);
1591  if (found && recentry->tupdesc != NULL)
1592  {
1593  tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
1594  return;
1595  }
1596 
1597  /* Not present, so need to manufacture an entry */
1598  recentry->tupdesc = NULL;
1600 
1601  /* Look in the SharedRecordTypmodRegistry, if attached */
1602  entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
1603  if (entDesc == NULL)
1604  {
1605  /* Reference-counted local cache only. */
1606  entDesc = CreateTupleDescCopy(tupDesc);
1607  entDesc->tdrefcount = 1;
1608  entDesc->tdtypmod = NextRecordTypmod++;
1609  }
1611  RecordCacheArray[entDesc->tdtypmod] = entDesc;
1612  recentry->tupdesc = entDesc;
1613 
1614  /* Update the caller's tuple descriptor. */
1615  tupDesc->tdtypmod = entDesc->tdtypmod;
1616 
1617  MemoryContextSwitchTo(oldcxt);
1618 }
1619 
1620 /*
1621  * Return the amout of shmem required to hold a SharedRecordTypmodRegistry.
1622  * This exists only to avoid exposing private innards of
1623  * SharedRecordTypmodRegistry in a header.
1624  */
1625 size_t
1627 {
1628  return sizeof(SharedRecordTypmodRegistry);
1629 }
1630 
1631 /*
1632  * Initialize 'registry' in a pre-existing shared memory region, which must be
1633  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
1634  * bytes.
1635  *
1636  * 'area' will be used to allocate shared memory space as required for the
1637  * typemod registration. The current process, expected to be a leader process
1638  * in a parallel query, will be attached automatically and its current record
1639  * types will be loaded into *registry. While attached, all calls to
1640  * assign_record_type_typmod will use the shared registry. Worker backends
1641  * will need to attach explicitly.
1642  *
1643  * Note that this function takes 'area' and 'segment' as arguments rather than
1644  * accessing them via CurrentSession, because they aren't installed there
1645  * until after this function runs.
1646  */
1647 void
1649  dsm_segment *segment,
1650  dsa_area *area)
1651 {
1652  MemoryContext old_context;
1653  dshash_table *record_table;
1654  dshash_table *typmod_table;
1655  int32 typmod;
1656 
1658 
1659  /* We can't already be attached to a shared registry. */
1663 
1664  old_context = MemoryContextSwitchTo(TopMemoryContext);
1665 
1666  /* Create the hash table of tuple descriptors indexed by themselves. */
1667  record_table = dshash_create(area, &srtr_record_table_params, area);
1668 
1669  /* Create the hash table of tuple descriptors indexed by typmod. */
1670  typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
1671 
1672  MemoryContextSwitchTo(old_context);
1673 
1674  /* Initialize the SharedRecordTypmodRegistry. */
1675  registry->record_table_handle = dshash_get_hash_table_handle(record_table);
1676  registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
1678 
1679  /*
1680  * Copy all entries from this backend's private registry into the shared
1681  * registry.
1682  */
1683  for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
1684  {
1685  SharedTypmodTableEntry *typmod_table_entry;
1686  SharedRecordTableEntry *record_table_entry;
1687  SharedRecordTableKey record_table_key;
1688  dsa_pointer shared_dp;
1689  TupleDesc tupdesc;
1690  bool found;
1691 
1692  tupdesc = RecordCacheArray[typmod];
1693  if (tupdesc == NULL)
1694  continue;
1695 
1696  /* Copy the TupleDesc into shared memory. */
1697  shared_dp = share_tupledesc(area, tupdesc, typmod);
1698 
1699  /* Insert into the typmod table. */
1700  typmod_table_entry = dshash_find_or_insert(typmod_table,
1701  &tupdesc->tdtypmod,
1702  &found);
1703  if (found)
1704  elog(ERROR, "cannot create duplicate shared record typmod");
1705  typmod_table_entry->typmod = tupdesc->tdtypmod;
1706  typmod_table_entry->shared_tupdesc = shared_dp;
1707  dshash_release_lock(typmod_table, typmod_table_entry);
1708 
1709  /* Insert into the record table. */
1710  record_table_key.shared = false;
1711  record_table_key.u.local_tupdesc = tupdesc;
1712  record_table_entry = dshash_find_or_insert(record_table,
1713  &record_table_key,
1714  &found);
1715  if (!found)
1716  {
1717  record_table_entry->key.shared = true;
1718  record_table_entry->key.u.shared_tupdesc = shared_dp;
1719  }
1720  dshash_release_lock(record_table, record_table_entry);
1721  }
1722 
1723  /*
1724  * Set up the global state that will tell assign_record_type_typmod and
1725  * lookup_rowtype_tupdesc_internal about the shared registry.
1726  */
1727  CurrentSession->shared_record_table = record_table;
1728  CurrentSession->shared_typmod_table = typmod_table;
1730 
1731  /*
1732  * We install a detach hook in the leader, but only to handle cleanup on
1733  * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
1734  * the memory, the leader process will use a shared registry until it
1735  * exits.
1736  */
1738 }
1739 
1740 /*
1741  * Attach to 'registry', which must have been initialized already by another
1742  * backend. Future calls to assign_record_type_typmod and
1743  * lookup_rowtype_tupdesc_internal will use the shared registry until the
1744  * current session is detached.
1745  */
1746 void
1748 {
1749  MemoryContext old_context;
1750  dshash_table *record_table;
1751  dshash_table *typmod_table;
1752 
1754 
1755  /* We can't already be attached to a shared registry. */
1756  Assert(CurrentSession != NULL);
1757  Assert(CurrentSession->segment != NULL);
1758  Assert(CurrentSession->area != NULL);
1762 
1763  /*
1764  * We can't already have typmods in our local cache, because they'd clash
1765  * with those imported by SharedRecordTypmodRegistryInit. This should be
1766  * a freshly started parallel worker. If we ever support worker
1767  * recycling, a worker would need to zap its local cache in between
1768  * servicing different queries, in order to be able to call this and
1769  * synchronize typmods with a new leader; but that's problematic because
1770  * we can't be very sure that record-typmod-related state hasn't escaped
1771  * to anywhere else in the process.
1772  */
1773  Assert(NextRecordTypmod == 0);
1774 
1775  old_context = MemoryContextSwitchTo(TopMemoryContext);
1776 
1777  /* Attach to the two hash tables. */
1778  record_table = dshash_attach(CurrentSession->area,
1779  &srtr_record_table_params,
1780  registry->record_table_handle,
1781  CurrentSession->area);
1782  typmod_table = dshash_attach(CurrentSession->area,
1783  &srtr_typmod_table_params,
1784  registry->typmod_table_handle,
1785  NULL);
1786 
1787  MemoryContextSwitchTo(old_context);
1788 
1789  /*
1790  * Set up detach hook to run at worker exit. Currently this is the same
1791  * as the leader's detach hook, but in future they might need to be
1792  * different.
1793  */
1796  PointerGetDatum(registry));
1797 
1798  /*
1799  * Set up the session state that will tell assign_record_type_typmod and
1800  * lookup_rowtype_tupdesc_internal about the shared registry.
1801  */
1803  CurrentSession->shared_record_table = record_table;
1804  CurrentSession->shared_typmod_table = typmod_table;
1805 }
1806 
1807 /*
1808  * TypeCacheRelCallback
1809  * Relcache inval callback function
1810  *
1811  * Delete the cached tuple descriptor (if any) for the given rel's composite
1812  * type, or for all composite types if relid == InvalidOid. Also reset
1813  * whatever info we have cached about the composite type's comparability.
1814  *
1815  * This is called when a relcache invalidation event occurs for the given
1816  * relid. We must scan the whole typcache hash since we don't know the
1817  * type OID corresponding to the relid. We could do a direct search if this
1818  * were a syscache-flush callback on pg_type, but then we would need all
1819  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
1820  * invals against the rel's pg_type OID. The extra SI signaling could very
1821  * well cost more than we'd save, since in most usages there are not very
1822  * many entries in a backend's typcache. The risk of bugs-of-omission seems
1823  * high, too.
1824  *
1825  * Another possibility, with only localized impact, is to maintain a second
1826  * hashtable that indexes composite-type typcache entries by their typrelid.
1827  * But it's still not clear it's worth the trouble.
1828  */
1829 static void
1831 {
1833  TypeCacheEntry *typentry;
1834 
1835  /* TypeCacheHash must exist, else this callback wouldn't be registered */
1836  hash_seq_init(&status, TypeCacheHash);
1837  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1838  {
1839  if (typentry->typtype != TYPTYPE_COMPOSITE)
1840  continue; /* skip non-composites */
1841 
1842  /* Skip if no match, unless we're zapping all composite types */
1843  if (relid != typentry->typrelid && relid != InvalidOid)
1844  continue;
1845 
1846  /* Delete tupdesc if we have it */
1847  if (typentry->tupDesc != NULL)
1848  {
1849  /*
1850  * Release our refcount, and free the tupdesc if none remain.
1851  * (Can't use DecrTupleDescRefCount because this reference is not
1852  * logged in current resource owner.)
1853  */
1854  Assert(typentry->tupDesc->tdrefcount > 0);
1855  if (--typentry->tupDesc->tdrefcount == 0)
1856  FreeTupleDesc(typentry->tupDesc);
1857  typentry->tupDesc = NULL;
1858  }
1859 
1860  /* Reset equality/comparison/hashing validity information */
1861  typentry->flags = 0;
1862  }
1863 }
1864 
1865 /*
1866  * TypeCacheOpcCallback
1867  * Syscache inval callback function
1868  *
1869  * This is called when a syscache invalidation event occurs for any pg_opclass
1870  * row. In principle we could probably just invalidate data dependent on the
1871  * particular opclass, but since updates on pg_opclass are rare in production
1872  * it doesn't seem worth a lot of complication: we just mark all cached data
1873  * invalid.
1874  *
1875  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
1876  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
1877  * is not allowed to be used to add/drop the primary operators and functions
1878  * of an opclass, only cross-type members of a family; and the latter sorts
1879  * of members are not going to get cached here.
1880  */
1881 static void
1882 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
1883 {
1885  TypeCacheEntry *typentry;
1886 
1887  /* TypeCacheHash must exist, else this callback wouldn't be registered */
1888  hash_seq_init(&status, TypeCacheHash);
1889  while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1890  {
1891  /* Reset equality/comparison/hashing validity information */
1892  typentry->flags = 0;
1893  }
1894 }
1895 
1896 /*
1897  * TypeCacheConstrCallback
1898  * Syscache inval callback function
1899  *
1900  * This is called when a syscache invalidation event occurs for any
1901  * pg_constraint or pg_type row. We flush information about domain
1902  * constraints when this happens.
1903  *
1904  * It's slightly annoying that we can't tell whether the inval event was for a
1905  * domain constraint/type record or not; there's usually more update traffic
1906  * for table constraints/types than domain constraints, so we'll do a lot of
1907  * useless flushes. Still, this is better than the old no-caching-at-all
1908  * approach to domain constraints.
1909  */
1910 static void
1911 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
1912 {
1913  TypeCacheEntry *typentry;
1914 
1915  /*
1916  * Because this is called very frequently, and typically very few of the
1917  * typcache entries are for domains, we don't use hash_seq_search here.
1918  * Instead we thread all the domain-type entries together so that we can
1919  * visit them cheaply.
1920  */
1921  for (typentry = firstDomainTypeEntry;
1922  typentry != NULL;
1923  typentry = typentry->nextDomain)
1924  {
1925  /* Reset domain constraint validity information */
1927  }
1928 }
1929 
1930 
1931 /*
1932  * Check if given OID is part of the subset that's sortable by comparisons
1933  */
1934 static inline bool
1936 {
1937  Oid offset;
1938 
1939  if (arg < enumdata->bitmap_base)
1940  return false;
1941  offset = arg - enumdata->bitmap_base;
1942  if (offset > (Oid) INT_MAX)
1943  return false;
1944  return bms_is_member((int) offset, enumdata->sorted_values);
1945 }
1946 
1947 
1948 /*
1949  * compare_values_of_enum
1950  * Compare two members of an enum type.
1951  * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
1952  *
1953  * Note: currently, the enumData cache is refreshed only if we are asked
1954  * to compare an enum value that is not already in the cache. This is okay
1955  * because there is no support for re-ordering existing values, so comparisons
1956  * of previously cached values will return the right answer even if other
1957  * values have been added since we last loaded the cache.
1958  *
1959  * Note: the enum logic has a special-case rule about even-numbered versus
1960  * odd-numbered OIDs, but we take no account of that rule here; this
1961  * routine shouldn't even get called when that rule applies.
1962  */
1963 int
1965 {
1966  TypeCacheEnumData *enumdata;
1967  EnumItem *item1;
1968  EnumItem *item2;
1969 
1970  /*
1971  * Equal OIDs are certainly equal --- this case was probably handled by
1972  * our caller, but we may as well check.
1973  */
1974  if (arg1 == arg2)
1975  return 0;
1976 
1977  /* Load up the cache if first time through */
1978  if (tcache->enumData == NULL)
1979  load_enum_cache_data(tcache);
1980  enumdata = tcache->enumData;
1981 
1982  /*
1983  * If both OIDs are known-sorted, we can just compare them directly.
1984  */
1985  if (enum_known_sorted(enumdata, arg1) &&
1986  enum_known_sorted(enumdata, arg2))
1987  {
1988  if (arg1 < arg2)
1989  return -1;
1990  else
1991  return 1;
1992  }
1993 
1994  /*
1995  * Slow path: we have to identify their actual sort-order positions.
1996  */
1997  item1 = find_enumitem(enumdata, arg1);
1998  item2 = find_enumitem(enumdata, arg2);
1999 
2000  if (item1 == NULL || item2 == NULL)
2001  {
2002  /*
2003  * We couldn't find one or both values. That means the enum has
2004  * changed under us, so re-initialize the cache and try again. We
2005  * don't bother retrying the known-sorted case in this path.
2006  */
2007  load_enum_cache_data(tcache);
2008  enumdata = tcache->enumData;
2009 
2010  item1 = find_enumitem(enumdata, arg1);
2011  item2 = find_enumitem(enumdata, arg2);
2012 
2013  /*
2014  * If we still can't find the values, complain: we must have corrupt
2015  * data.
2016  */
2017  if (item1 == NULL)
2018  elog(ERROR, "enum value %u not found in cache for enum %s",
2019  arg1, format_type_be(tcache->type_id));
2020  if (item2 == NULL)
2021  elog(ERROR, "enum value %u not found in cache for enum %s",
2022  arg2, format_type_be(tcache->type_id));
2023  }
2024 
2025  if (item1->sort_order < item2->sort_order)
2026  return -1;
2027  else if (item1->sort_order > item2->sort_order)
2028  return 1;
2029  else
2030  return 0;
2031 }
2032 
2033 /*
2034  * Load (or re-load) the enumData member of the typcache entry.
2035  */
2036 static void
2038 {
2039  TypeCacheEnumData *enumdata;
2040  Relation enum_rel;
2041  SysScanDesc enum_scan;
2042  HeapTuple enum_tuple;
2043  ScanKeyData skey;
2044  EnumItem *items;
2045  int numitems;
2046  int maxitems;
2047  Oid bitmap_base;
2048  Bitmapset *bitmap;
2049  MemoryContext oldcxt;
2050  int bm_size,
2051  start_pos;
2052 
2053  /* Check that this is actually an enum */
2054  if (tcache->typtype != TYPTYPE_ENUM)
2055  ereport(ERROR,
2056  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2057  errmsg("%s is not an enum",
2058  format_type_be(tcache->type_id))));
2059 
2060  /*
2061  * Read all the information for members of the enum type. We collect the
2062  * info in working memory in the caller's context, and then transfer it to
2063  * permanent memory in CacheMemoryContext. This minimizes the risk of
2064  * leaking memory from CacheMemoryContext in the event of an error partway
2065  * through.
2066  */
2067  maxitems = 64;
2068  items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2069  numitems = 0;
2070 
2071  /* Scan pg_enum for the members of the target enum type. */
2072  ScanKeyInit(&skey,
2074  BTEqualStrategyNumber, F_OIDEQ,
2075  ObjectIdGetDatum(tcache->type_id));
2076 
2078  enum_scan = systable_beginscan(enum_rel,
2080  true, NULL,
2081  1, &skey);
2082 
2083  while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2084  {
2085  Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2086 
2087  if (numitems >= maxitems)
2088  {
2089  maxitems *= 2;
2090  items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2091  }
2092  items[numitems].enum_oid = HeapTupleGetOid(enum_tuple);
2093  items[numitems].sort_order = en->enumsortorder;
2094  numitems++;
2095  }
2096 
2097  systable_endscan(enum_scan);
2098  heap_close(enum_rel, AccessShareLock);
2099 
2100  /* Sort the items into OID order */
2101  qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2102 
2103  /*
2104  * Here, we create a bitmap listing a subset of the enum's OIDs that are
2105  * known to be in order and can thus be compared with just OID comparison.
2106  *
2107  * The point of this is that the enum's initial OIDs were certainly in
2108  * order, so there is some subset that can be compared via OID comparison;
2109  * and we'd rather not do binary searches unnecessarily.
2110  *
2111  * This is somewhat heuristic, and might identify a subset of OIDs that
2112  * isn't exactly what the type started with. That's okay as long as the
2113  * subset is correctly sorted.
2114  */
2115  bitmap_base = InvalidOid;
2116  bitmap = NULL;
2117  bm_size = 1; /* only save sets of at least 2 OIDs */
2118 
2119  for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2120  {
2121  /*
2122  * Identify longest sorted subsequence starting at start_pos
2123  */
2124  Bitmapset *this_bitmap = bms_make_singleton(0);
2125  int this_bm_size = 1;
2126  Oid start_oid = items[start_pos].enum_oid;
2127  float4 prev_order = items[start_pos].sort_order;
2128  int i;
2129 
2130  for (i = start_pos + 1; i < numitems; i++)
2131  {
2132  Oid offset;
2133 
2134  offset = items[i].enum_oid - start_oid;
2135  /* quit if bitmap would be too large; cutoff is arbitrary */
2136  if (offset >= 8192)
2137  break;
2138  /* include the item if it's in-order */
2139  if (items[i].sort_order > prev_order)
2140  {
2141  prev_order = items[i].sort_order;
2142  this_bitmap = bms_add_member(this_bitmap, (int) offset);
2143  this_bm_size++;
2144  }
2145  }
2146 
2147  /* Remember it if larger than previous best */
2148  if (this_bm_size > bm_size)
2149  {
2150  bms_free(bitmap);
2151  bitmap_base = start_oid;
2152  bitmap = this_bitmap;
2153  bm_size = this_bm_size;
2154  }
2155  else
2156  bms_free(this_bitmap);
2157 
2158  /*
2159  * Done if it's not possible to find a longer sequence in the rest of
2160  * the list. In typical cases this will happen on the first
2161  * iteration, which is why we create the bitmaps on the fly instead of
2162  * doing a second pass over the list.
2163  */
2164  if (bm_size >= (numitems - start_pos - 1))
2165  break;
2166  }
2167 
2168  /* OK, copy the data into CacheMemoryContext */
2170  enumdata = (TypeCacheEnumData *)
2171  palloc(offsetof(TypeCacheEnumData, enum_values) +
2172  numitems * sizeof(EnumItem));
2173  enumdata->bitmap_base = bitmap_base;
2174  enumdata->sorted_values = bms_copy(bitmap);
2175  enumdata->num_values = numitems;
2176  memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2177  MemoryContextSwitchTo(oldcxt);
2178 
2179  pfree(items);
2180  bms_free(bitmap);
2181 
2182  /* And link the finished cache struct into the typcache */
2183  if (tcache->enumData != NULL)
2184  pfree(tcache->enumData);
2185  tcache->enumData = enumdata;
2186 }
2187 
2188 /*
2189  * Locate the EnumItem with the given OID, if present
2190  */
2191 static EnumItem *
2193 {
2194  EnumItem srch;
2195 
2196  /* On some versions of Solaris, bsearch of zero items dumps core */
2197  if (enumdata->num_values <= 0)
2198  return NULL;
2199 
2200  srch.enum_oid = arg;
2201  return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2202  sizeof(EnumItem), enum_oid_cmp);
2203 }
2204 
2205 /*
2206  * qsort comparison function for OID-ordered EnumItems
2207  */
2208 static int
2209 enum_oid_cmp(const void *left, const void *right)
2210 {
2211  const EnumItem *l = (const EnumItem *) left;
2212  const EnumItem *r = (const EnumItem *) right;
2213 
2214  if (l->enum_oid < r->enum_oid)
2215  return -1;
2216  else if (l->enum_oid > r->enum_oid)
2217  return 1;
2218  else
2219  return 0;
2220 }
2221 
2222 /*
2223  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2224  * to the given value and return a dsa_pointer.
2225  */
2226 static dsa_pointer
2227 share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2228 {
2229  dsa_pointer shared_dp;
2230  TupleDesc shared;
2231 
2232  shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2233  shared = (TupleDesc) dsa_get_address(area, shared_dp);
2234  TupleDescCopy(shared, tupdesc);
2235  shared->tdtypmod = typmod;
2236 
2237  return shared_dp;
2238 }
2239 
2240 /*
2241  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2242  * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2243  * Tuple descriptors returned by this function are not reference counted, and
2244  * will exist at least as long as the current backend remained attached to the
2245  * current session.
2246  */
2247 static TupleDesc
2249 {
2250  TupleDesc result;
2252  SharedRecordTableEntry *record_table_entry;
2253  SharedTypmodTableEntry *typmod_table_entry;
2254  dsa_pointer shared_dp;
2255  bool found;
2256  uint32 typmod;
2257 
2258  /* If not even attached, nothing to do. */
2260  return NULL;
2261 
2262  /* Try to find a matching tuple descriptor in the record table. */
2263  key.shared = false;
2264  key.u.local_tupdesc = tupdesc;
2265  record_table_entry = (SharedRecordTableEntry *)
2267  if (record_table_entry)
2268  {
2269  Assert(record_table_entry->key.shared);
2271  record_table_entry);
2272  result = (TupleDesc)
2274  record_table_entry->key.u.shared_tupdesc);
2275  Assert(result->tdrefcount == -1);
2276 
2277  return result;
2278  }
2279 
2280  /* Allocate a new typmod number. This will be wasted if we error out. */
2281  typmod = (int)
2283  1);
2284 
2285  /* Copy the TupleDesc into shared memory. */
2286  shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2287 
2288  /*
2289  * Create an entry in the typmod table so that others will understand this
2290  * typmod number.
2291  */
2292  PG_TRY();
2293  {
2294  typmod_table_entry = (SharedTypmodTableEntry *)
2296  &typmod, &found);
2297  if (found)
2298  elog(ERROR, "cannot create duplicate shared record typmod");
2299  }
2300  PG_CATCH();
2301  {
2302  dsa_free(CurrentSession->area, shared_dp);
2303  PG_RE_THROW();
2304  }
2305  PG_END_TRY();
2306  typmod_table_entry->typmod = typmod;
2307  typmod_table_entry->shared_tupdesc = shared_dp;
2309  typmod_table_entry);
2310 
2311  /*
2312  * Finally create an entry in the record table so others with matching
2313  * tuple descriptors can reuse the typmod.
2314  */
2315  record_table_entry = (SharedRecordTableEntry *)
2317  &found);
2318  if (found)
2319  {
2320  /*
2321  * Someone concurrently inserted a matching tuple descriptor since the
2322  * first time we checked. Use that one instead.
2323  */
2325  record_table_entry);
2326 
2327  /* Might as well free up the space used by the one we created. */
2329  &typmod);
2330  Assert(found);
2331  dsa_free(CurrentSession->area, shared_dp);
2332 
2333  /* Return the one we found. */
2334  Assert(record_table_entry->key.shared);
2335  result = (TupleDesc)
2337  record_table_entry->key.shared);
2338  Assert(result->tdrefcount == -1);
2339 
2340  return result;
2341  }
2342 
2343  /* Store it and return it. */
2344  record_table_entry->key.shared = true;
2345  record_table_entry->key.u.shared_tupdesc = shared_dp;
2347  record_table_entry);
2348  result = (TupleDesc)
2349  dsa_get_address(CurrentSession->area, shared_dp);
2350  Assert(result->tdrefcount == -1);
2351 
2352  return result;
2353 }
2354 
2355 /*
2356  * On-DSM-detach hook to forget about the current shared record typmod
2357  * infrastructure. This is currently used by both leader and workers.
2358  */
2359 static void
2361 {
2362  /* Be cautious here: maybe we didn't finish initializing. */
2363  if (CurrentSession->shared_record_table != NULL)
2364  {
2367  }
2368  if (CurrentSession->shared_typmod_table != NULL)
2369  {
2372  }
2374 }
MemoryContextCallback callback
Definition: typcache.h:145
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:1964
struct TypeCacheEnumData TypeCacheEnumData
MemoryContextCallbackFunction func
Definition: palloc.h:49
struct TypeCacheEnumData * enumData
Definition: typcache.h:107
#define NIL
Definition: pg_list.h:69
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1265
#define TYPTYPE_DOMAIN
Definition: pg_type.h:722
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:734
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:301
void * stringToNode(char *str)
Definition: read.c:38
FormData_pg_range * Form_pg_range
Definition: pg_range.h:49
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:90
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:102
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
#define BTORDER_PROC
Definition: nbtree.h:229
Oid tdtypeid
Definition: tupdesc.h:74
DomainConstraintCache * dcc
Definition: typcache.h:144
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:125
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:93
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:1391
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
#define GETSTRUCT(TUP)
Definition: htup_details.h:656
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:111
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:263
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
Oid hash_opintype
Definition: typcache.h:55
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:84
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1173
#define HASH_ELEM
Definition: hsearch.h:87
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:79
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1486
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:128
#define RelationGetDescr(relation)
Definition: rel.h:428
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:91
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1076
MemoryContext dccContext
Definition: typcache.c:114
DomainConstraintType constrainttype
Definition: execnodes.h:809
dsa_pointer dshash_table_handle
Definition: dshash.h:24
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:97
DomainConstraintCache * domainData
Definition: typcache.h:98
#define TYPTYPE_COMPOSITE
Definition: pg_type.h:721
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2248
#define PointerGetDatum(X)
Definition: postgres.h:562
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:317
struct RecordCacheEntry RecordCacheEntry
struct TypeCacheEntry TypeCacheEntry
#define HTEqualStrategyNumber
Definition: hash.h:299
#define TupleDescAttr(tupdesc, i)
Definition: tupdesc.h:84
char * pstrdup(const char *in)
Definition: mcxt.c:1076
Session * CurrentSession
Definition: session.c:48
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:119
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:175
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:243
dshash_table * shared_record_table
Definition: session.h:32
#define BTREE_AM_OID
Definition: pg_am.h:70
Expr * expression_planner(Expr *expr)
Definition: planner.c:5987
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:121
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define AccessShareLock
Definition: lockdefs.h:36
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:202
Size entrysize
Definition: hsearch.h:73
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:228
#define TYPECACHE_EQ_OPR
Definition: typcache.h:114
int errcode(int sqlerrcode)
Definition: elog.c:575
#define HASHEXTENDED_PROC
Definition: hash.h:315
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: heapam.c:1266
#define MemSet(start, val, len)
Definition: c.h:846
char * format_type_be(Oid type_oid)
Definition: format_type.c:94
uint32 hashTupleDesc(TupleDesc desc)
Definition: tupdesc.c:475
return result
Definition: formatting.c:1633
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1052
static HTAB * RecordCacheHash
Definition: typcache.c:260
#define heap_close(r, l)
Definition: heapam.h:97
SharedRecordTableKey key
Definition: typcache.c:185
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:902
FormData_pg_type * Form_pg_type
Definition: pg_type.h:233
Form_pg_class rd_rel
Definition: rel.h:114
unsigned int Oid
Definition: postgres_ext.h:31
#define RECORD_EQ_OP
Definition: pg_operator.h:1720
#define EnumTypIdLabelIndexId
Definition: indexing.h:157
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1037
int16 typlen
Definition: typcache.h:37
#define TupleDescSize(src)
Definition: tupdesc.h:95
#define OidIsValid(objectId)
Definition: c.h:532
bool typbyval
Definition: typcache.h:38
#define HASH_AM_OID
Definition: pg_am.h:73
#define Anum_pg_constraint_conbin
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:561
#define ConstraintTypidIndexId
Definition: indexing.h:128
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:1747
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:328
int natts
Definition: tupdesc.h:73
#define SearchSysCache1(cacheId, key1)
Definition: syscache.h:159
uint64 dsa_pointer
Definition: dsa.h:62
int32 tdtypmod
Definition: tupdesc.h:75
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:581
signed int int32
Definition: c.h:246
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:1559
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:1648
static TupleDesc * RecordCacheArray
Definition: typcache.c:262
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1431
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1222
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:92
#define CONSTRAINT_CHECK
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:924
FmgrInfo cmp_proc_finfo
Definition: typcache.h:71
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1312
struct TypeCacheEntry * nextDomain
Definition: typcache.h:110
Definition: dynahash.c:208
dsa_pointer shared_tupdesc
Definition: typcache.c:195
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:416
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:89
pg_atomic_uint32 next_typmod
Definition: typcache.c:160
Bitmapset * sorted_values
Definition: typcache.c:128
void pfree(void *pointer)
Definition: mcxt.c:949
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:86
#define ObjectIdGetDatum(X)
Definition: postgres.h:513
#define ERROR
Definition: elog.h:43
#define EnumRelationId
Definition: pg_enum.h:32
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:95
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:1935
#define ARRAY_LT_OP
Definition: pg_operator.h:781
dsa_area * area
Definition: session.h:28
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:179
char * c
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:362
char typstorage
Definition: typcache.h:40
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:1830
#define RegProcedureIsValid(p)
Definition: c.h:534
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1257
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:302
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:163
ExprState * check_exprstate
Definition: execnodes.h:812
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:46
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:187
Oid hash_extended_proc
Definition: typcache.h:61
unsigned int uint32
Definition: c.h:258
FmgrInfo hash_proc_finfo
Definition: typcache.h:72
#define RECORD_LT_OP
Definition: pg_operator.h:1725
#define RECORDOID
Definition: pg_type.h:680
#define TYPECACHE_GT_OPR
Definition: typcache.h:116
MemoryContext CurrentMemoryContext
Definition: mcxt.c:37
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1503
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:131
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
#define HASHSTANDARD_PROC
Definition: hash.h:314
#define ereport(elevel, rest)
Definition: elog.h:122
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:123
dsa_pointer shared_tupdesc
Definition: typcache.c:174
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2192
#define IsParallelWorker()
Definition: parallel.h:52
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:94
MemoryContext TopMemoryContext
Definition: mcxt.c:43
FmgrInfo rng_canonical_finfo
Definition: typcache.h:91
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:130
#define Anum_pg_constraint_contypid
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:252
MemoryContext refctx
Definition: typcache.h:139
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:88
#define RECORD_GT_OP
Definition: pg_operator.h:1728
List * lappend(List *list, void *datum)
Definition: list.c:128
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1273
#define TYPTYPE_RANGE
Definition: pg_type.h:725
float float4
Definition: c.h:374
#define HASH_BLOBS
Definition: hsearch.h:88
#define TextDatumGetCString(d)
Definition: builtins.h:92
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:73
static int32 RecordCacheArrayLen
Definition: typcache.c:263
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:2360
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
struct SharedTypmodTableEntry SharedTypmodTableEntry
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1389
static int32 NextRecordTypmod
Definition: typcache.c:264
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
Oid enum_oid
Definition: typcache.c:121
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:127
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
uintptr_t Datum
Definition: postgres.h:372
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:1117
Oid btree_opintype
Definition: typcache.h:53
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:196
Relation heap_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1290
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1370
Size keysize
Definition: hsearch.h:72
struct SharedRecordTableKey SharedRecordTableKey
TupleDesc rd_att
Definition: rel.h:115
HashCompareFunc match
Definition: hsearch.h:75
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:741
FmgrInfo eq_opr_finfo
Definition: typcache.h:70
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:305
#define InvalidOid
Definition: postgres_ext.h:36
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:762
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1094
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:148
Oid fn_oid
Definition: fmgr.h:59
struct tupleDesc * TupleDesc
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:1532
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:1626
dshash_table * shared_typmod_table
Definition: session.h:33
union SharedRecordTableKey::@36 u
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:339
#define TYPECACHE_CMP_PROC
Definition: typcache.h:117
List * lcons(void *datum, List *list)
Definition: list.c:259
#define PG_CATCH()
Definition: elog.h:293
char typtype
Definition: typcache.h:41
void bms_free(Bitmapset *a)
Definition: bitmapset.c:201
#define makeNode(_type_)
Definition: nodes.h:557
FormData_pg_constraint * Form_pg_constraint
#define HeapTupleIsValid(tuple)
Definition: htup.h:77
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:590
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:502
#define Assert(condition)
Definition: c.h:664
#define lfirst(lc)
Definition: pg_list.h:106
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1097
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:319
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:744
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:90
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2037
#define HASH_COMPARE
Definition: hsearch.h:90
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1135
#define ARRAY_GT_OP
Definition: pg_operator.h:784
TypeCacheEntry * tcache
Definition: typcache.h:140
void CreateCacheMemoryContext(void)
Definition: catcache.c:511
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:82
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2227
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:96
Oid rng_collation
Definition: typcache.h:89
#define PG_RE_THROW()
Definition: elog.h:314
dshash_table_handle record_table_handle
Definition: typcache.c:156
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1385
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:698
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1304
#define ARRAY_EQ_OP
Definition: pg_operator.h:776
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1047
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:962
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1375
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:92
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1249
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:251
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:109
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:825
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2557
#define Anum_pg_enum_enumtypid
Definition: pg_enum.h:53
float4 sort_order
Definition: typcache.c:122
Definition: dsa.c:354
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:196
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:812
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:88
char typalign
Definition: typcache.h:39
void * palloc(Size size)
Definition: mcxt.c:848
int errmsg(const char *fmt,...)
Definition: elog.c:797
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2209
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:706
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:1543
int tdrefcount
Definition: tupdesc.h:77
int i
#define TYPECACHE_LT_OPR
Definition: typcache.h:115
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:85
#define NameStr(name)
Definition: c.h:493
#define TYPTYPE_ENUM
Definition: pg_type.h:723
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:430
bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:337
TupleDesc local_tupdesc
Definition: typcache.c:173
void * arg
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: heapam.c:1120
TupleDesc tupdesc
Definition: typcache.c:146
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:265
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:113
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:120
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:385
#define ConstraintRelationId
Definition: pg_constraint.h:29
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:83
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:234
#define elog
Definition: elog.h:219
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:124
#define TYPECACHE_DOMAIN_INFO
Definition: typcache.h:126
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
#define qsort(a, b, c, d)
Definition: port.h:443
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1296
dshash_table_handle typmod_table_handle
Definition: typcache.c:158
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:87
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
#define PG_TRY()
Definition: elog.h:284
#define BTLessStrategyNumber
Definition: stratnum.h:29
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1065
struct SharedRecordTableEntry SharedRecordTableEntry
Definition: pg_list.h:45
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:420
TupleDesc tupDesc
Definition: typcache.h:80
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:1911
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1401
static HTAB * TypeCacheHash
Definition: typcache.c:76
long val
Definition: informix.c:689
#define TYPECACHE_HASH_PROC
Definition: typcache.h:118
#define TYPECACHE_TUPDESC
Definition: typcache.h:122
#define PG_END_TRY()
Definition: elog.h:300
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define offsetof(type, field)
Definition: c.h:549
dsm_segment * segment
Definition: session.h:27
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:1882
HashValueFunc hash
Definition: hsearch.h:74
#define HASH_FUNCTION
Definition: hsearch.h:89
#define dsa_allocate(area, size)
Definition: dsa.h:84
MemoryContext CacheMemoryContext
Definition: mcxt.c:46
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1520
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1069