PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
typcache.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * typcache.c
4 * POSTGRES type cache code
5 *
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. For
8 * example, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 *
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by generic array and record handling routines, such as array_eq(),
14 * record_cmp(), and hash_array(). Because those routines are used as index
15 * support operations, they cannot leak memory. To allow them to execute
16 * efficiently, all information that they would like to re-use across calls
17 * is kept in the type cache.
18 *
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. If the type is
21 * dropped, the cache entry simply becomes wasted storage. This is not
22 * expected to happen often, and assuming that typcache entries are good
23 * permanently allows caching pointers to them in long-lived places.
24 *
25 * We have some provisions for updating cache entries if the stored data
26 * becomes obsolete. Core data extracted from the pg_type row is updated
27 * when we detect updates to pg_type. Information dependent on opclasses is
28 * cleared if we detect updates to pg_opclass. We also support clearing the
29 * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 * since those may need to change as a consequence of ALTER TABLE. Domain
31 * constraint changes are also tracked properly.
32 *
33 *
34 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
35 * Portions Copyright (c) 1994, Regents of the University of California
36 *
37 * IDENTIFICATION
38 * src/backend/utils/cache/typcache.c
39 *
40 *-------------------------------------------------------------------------
41 */
42#include "postgres.h"
43
44#include <limits.h>
45
46#include "access/hash.h"
47#include "access/htup_details.h"
48#include "access/nbtree.h"
49#include "access/parallel.h"
50#include "access/relation.h"
51#include "access/session.h"
52#include "access/table.h"
53#include "catalog/pg_am.h"
55#include "catalog/pg_enum.h"
56#include "catalog/pg_operator.h"
57#include "catalog/pg_range.h"
58#include "catalog/pg_type.h"
59#include "commands/defrem.h"
60#include "common/int.h"
61#include "executor/executor.h"
62#include "lib/dshash.h"
63#include "optimizer/optimizer.h"
64#include "port/pg_bitutils.h"
65#include "storage/lwlock.h"
66#include "utils/builtins.h"
67#include "utils/catcache.h"
68#include "utils/fmgroids.h"
70#include "utils/inval.h"
71#include "utils/lsyscache.h"
72#include "utils/memutils.h"
73#include "utils/rel.h"
74#include "utils/syscache.h"
75#include "utils/typcache.h"
76
77
78/* The main type cache hashtable searched by lookup_type_cache */
79static HTAB *TypeCacheHash = NULL;
80
81/*
82 * The mapping of relation's OID to the corresponding composite type OID.
83 * We're keeping the map entry when the corresponding typentry has something
84 * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 */
88
90{
91 Oid relid; /* OID of the relation */
92 Oid composite_typid; /* OID of the relation's composite type */
94
95/* List of type cache entries for domain types */
97
98/* Private flag bits in the TypeCacheEntry.flags field */
99#define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100#define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101#define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102#define TCFLAGS_CHECKED_EQ_OPR 0x000008
103#define TCFLAGS_CHECKED_LT_OPR 0x000010
104#define TCFLAGS_CHECKED_GT_OPR 0x000020
105#define TCFLAGS_CHECKED_CMP_PROC 0x000040
106#define TCFLAGS_CHECKED_HASH_PROC 0x000080
107#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108#define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109#define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110#define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111#define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113#define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114#define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115#define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116#define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120
121/* The flags associated with equality/comparison/hashing are all but these: */
122#define TCFLAGS_OPERATOR_FLAGS \
123 (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126
127/*
128 * Data stored about a domain type's constraints. Note that we do not create
129 * this struct for the common case of a constraint-less domain; we just set
130 * domainData to NULL to indicate that.
131 *
132 * Within a DomainConstraintCache, we store expression plan trees, but the
133 * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 * When needed, expression evaluation nodes are built by flat-copying the
135 * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 * Such a node tree is not part of the DomainConstraintCache, but is
137 * considered to belong to a DomainConstraintRef.
138 */
140{
141 List *constraints; /* list of DomainConstraintState nodes */
142 MemoryContext dccContext; /* memory context holding all associated data */
143 long dccRefCount; /* number of references to this struct */
144};
145
146/* Private information to support comparisons of enum values */
147typedef struct
148{
149 Oid enum_oid; /* OID of one enum value */
150 float4 sort_order; /* its sort position */
151} EnumItem;
152
153typedef struct TypeCacheEnumData
154{
155 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 int num_values; /* total number of values in enum */
160
161/*
162 * We use a separate table for storing the definitions of non-anonymous
163 * record types. Once defined, a record type will be remembered for the
164 * life of the backend. Subsequent uses of the "same" record type (where
165 * sameness means equalRowTypes) will refer to the existing table entry.
166 *
167 * Stored record types are remembered in a linear array of TupleDescs,
168 * which can be indexed quickly with the assigned typmod. There is also
169 * a hash table to speed searches for matching TupleDescs.
170 */
171
172typedef struct RecordCacheEntry
173{
176
177/*
178 * To deal with non-anonymous record types that are exchanged by backends
179 * involved in a parallel query, we also need a shared version of the above.
180 */
182{
183 /* A hash table for finding a matching TupleDesc. */
185 /* A hash table for finding a TupleDesc by typmod. */
187 /* A source of new record typmod numbers. */
189};
190
191/*
192 * When using shared tuple descriptors as hash table keys we need a way to be
193 * able to search for an equal shared TupleDesc using a backend-local
194 * TupleDesc. So we use this type which can hold either, and hash and compare
195 * functions that know how to handle both.
196 */
198{
199 union
200 {
203 } u;
204 bool shared;
206
207/*
208 * The shared version of RecordCacheEntry. This lets us look up a typmod
209 * using a TupleDesc which may be in local or shared memory.
210 */
212{
215
216/*
217 * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 * up a TupleDesc in shared memory using a typmod.
219 */
221{
225
229
230/*
231 * A comparator function for SharedRecordTableKey.
232 */
233static int
234shared_record_table_compare(const void *a, const void *b, size_t size,
235 void *arg)
236{
237 dsa_area *area = (dsa_area *) arg;
240 TupleDesc t1;
241 TupleDesc t2;
242
243 if (k1->shared)
245 else
246 t1 = k1->u.local_tupdesc;
247
248 if (k2->shared)
250 else
251 t2 = k2->u.local_tupdesc;
252
253 return equalRowTypes(t1, t2) ? 0 : 1;
254}
255
256/*
257 * A hash function for SharedRecordTableKey.
258 */
259static uint32
260shared_record_table_hash(const void *a, size_t size, void *arg)
261{
262 dsa_area *area = (dsa_area *) arg;
264 TupleDesc t;
265
266 if (k->shared)
268 else
269 t = k->u.local_tupdesc;
270
271 return hashRowType(t);
272}
273
274/* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
276 sizeof(SharedRecordTableKey), /* unused */
282};
283
284/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
286 sizeof(uint32),
292};
293
294/* hashtable for recognizing registered record types */
295static HTAB *RecordCacheHash = NULL;
296
298{
302
303/* array of info about registered record types, indexed by assigned typmod */
305static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306static int32 NextRecordTypmod = 0; /* number of entries used */
307
308/*
309 * Process-wide counter for generating unique tupledesc identifiers.
310 * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 */
314
315static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316static void load_rangetype_info(TypeCacheEntry *typentry);
317static void load_multirangetype_info(TypeCacheEntry *typentry);
318static void load_domaintype_info(TypeCacheEntry *typentry);
319static int dcs_cmp(const void *a, const void *b);
321static void dccref_deletion_callback(void *arg);
322static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323static bool array_element_has_equality(TypeCacheEntry *typentry);
324static bool array_element_has_compare(TypeCacheEntry *typentry);
325static bool array_element_has_hashing(TypeCacheEntry *typentry);
328static bool record_fields_have_equality(TypeCacheEntry *typentry);
329static bool record_fields_have_compare(TypeCacheEntry *typentry);
330static bool record_fields_have_hashing(TypeCacheEntry *typentry);
332static void cache_record_field_properties(TypeCacheEntry *typentry);
333static bool range_element_has_hashing(TypeCacheEntry *typentry);
339static void TypeCacheRelCallback(Datum arg, Oid relid);
340static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
341static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
342static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
343static void load_enum_cache_data(TypeCacheEntry *tcache);
345static int enum_oid_cmp(const void *left, const void *right);
347 Datum datum);
350 uint32 typmod);
353
354
355/*
356 * Hash function compatible with one-arg system cache hash function.
357 */
358static uint32
359type_cache_syshash(const void *key, Size keysize)
360{
361 Assert(keysize == sizeof(Oid));
362 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
363}
364
365/*
366 * lookup_type_cache
367 *
368 * Fetch the type cache entry for the specified datatype, and make sure that
369 * all the fields requested by bits in 'flags' are valid.
370 *
371 * The result is never NULL --- we will ereport() if the passed type OID is
372 * invalid. Note however that we may fail to find one or more of the
373 * values requested by 'flags'; the caller needs to check whether the fields
374 * are InvalidOid or not.
375 *
376 * Note that while filling TypeCacheEntry we might process concurrent
377 * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
378 * invalidated. In this case, we typically only clear flags while values are
379 * still available for the caller. It's expected that the caller holds
380 * enough locks on type-depending objects that the values are still relevant.
381 * It's also important that the tupdesc is filled after all other
382 * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
383 * invalidated during the lookup_type_cache() call.
384 */
386lookup_type_cache(Oid type_id, int flags)
387{
388 TypeCacheEntry *typentry;
389 bool found;
390 int in_progress_offset;
391
392 if (TypeCacheHash == NULL)
393 {
394 /* First time through: initialize the hash table */
395 HASHCTL ctl;
396 int allocsize;
397
398 ctl.keysize = sizeof(Oid);
399 ctl.entrysize = sizeof(TypeCacheEntry);
400
401 /*
402 * TypeCacheEntry takes hash value from the system cache. For
403 * TypeCacheHash we use the same hash in order to speedup search by
404 * hash value. This is used by hash_seq_init_with_hash_value().
405 */
406 ctl.hash = type_cache_syshash;
407
408 TypeCacheHash = hash_create("Type information cache", 64,
410
412
413 ctl.keysize = sizeof(Oid);
414 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
415 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
417
418 /* Also set up callbacks for SI invalidations */
423
424 /* Also make sure CacheMemoryContext exists */
427
428 /*
429 * reserve enough in_progress_list slots for many cases
430 */
431 allocsize = 4;
434 allocsize * sizeof(*in_progress_list));
435 in_progress_list_maxlen = allocsize;
436 }
437
438 Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
439
440 /* Register to catch invalidation messages */
442 {
443 int allocsize;
444
445 allocsize = in_progress_list_maxlen * 2;
447 allocsize * sizeof(*in_progress_list));
448 in_progress_list_maxlen = allocsize;
449 }
450 in_progress_offset = in_progress_list_len++;
451 in_progress_list[in_progress_offset] = type_id;
452
453 /* Try to look up an existing entry */
455 &type_id,
456 HASH_FIND, NULL);
457 if (typentry == NULL)
458 {
459 /*
460 * If we didn't find one, we want to make one. But first look up the
461 * pg_type row, just to make sure we don't make a cache entry for an
462 * invalid type OID. If the type OID is not valid, present a
463 * user-facing error, since some code paths such as domain_in() allow
464 * this function to be reached with a user-supplied OID.
465 */
466 HeapTuple tp;
467 Form_pg_type typtup;
468
469 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
470 if (!HeapTupleIsValid(tp))
472 (errcode(ERRCODE_UNDEFINED_OBJECT),
473 errmsg("type with OID %u does not exist", type_id)));
474 typtup = (Form_pg_type) GETSTRUCT(tp);
475 if (!typtup->typisdefined)
477 (errcode(ERRCODE_UNDEFINED_OBJECT),
478 errmsg("type \"%s\" is only a shell",
479 NameStr(typtup->typname))));
480
481 /* Now make the typcache entry */
483 &type_id,
484 HASH_ENTER, &found);
485 Assert(!found); /* it wasn't there a moment ago */
486
487 MemSet(typentry, 0, sizeof(TypeCacheEntry));
488
489 /* These fields can never change, by definition */
490 typentry->type_id = type_id;
491 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
492
493 /* Keep this part in sync with the code below */
494 typentry->typlen = typtup->typlen;
495 typentry->typbyval = typtup->typbyval;
496 typentry->typalign = typtup->typalign;
497 typentry->typstorage = typtup->typstorage;
498 typentry->typtype = typtup->typtype;
499 typentry->typrelid = typtup->typrelid;
500 typentry->typsubscript = typtup->typsubscript;
501 typentry->typelem = typtup->typelem;
502 typentry->typarray = typtup->typarray;
503 typentry->typcollation = typtup->typcollation;
504 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
505
506 /* If it's a domain, immediately thread it into the domain cache list */
507 if (typentry->typtype == TYPTYPE_DOMAIN)
508 {
510 firstDomainTypeEntry = typentry;
511 }
512
513 ReleaseSysCache(tp);
514 }
515 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
516 {
517 /*
518 * We have an entry, but its pg_type row got changed, so reload the
519 * data obtained directly from pg_type.
520 */
521 HeapTuple tp;
522 Form_pg_type typtup;
523
524 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
525 if (!HeapTupleIsValid(tp))
527 (errcode(ERRCODE_UNDEFINED_OBJECT),
528 errmsg("type with OID %u does not exist", type_id)));
529 typtup = (Form_pg_type) GETSTRUCT(tp);
530 if (!typtup->typisdefined)
532 (errcode(ERRCODE_UNDEFINED_OBJECT),
533 errmsg("type \"%s\" is only a shell",
534 NameStr(typtup->typname))));
535
536 /*
537 * Keep this part in sync with the code above. Many of these fields
538 * shouldn't ever change, particularly typtype, but copy 'em anyway.
539 */
540 typentry->typlen = typtup->typlen;
541 typentry->typbyval = typtup->typbyval;
542 typentry->typalign = typtup->typalign;
543 typentry->typstorage = typtup->typstorage;
544 typentry->typtype = typtup->typtype;
545 typentry->typrelid = typtup->typrelid;
546 typentry->typsubscript = typtup->typsubscript;
547 typentry->typelem = typtup->typelem;
548 typentry->typarray = typtup->typarray;
549 typentry->typcollation = typtup->typcollation;
550 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
551
552 ReleaseSysCache(tp);
553 }
554
555 /*
556 * Look up opclasses if we haven't already and any dependent info is
557 * requested.
558 */
564 {
565 Oid opclass;
566
567 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
568 if (OidIsValid(opclass))
569 {
570 typentry->btree_opf = get_opclass_family(opclass);
571 typentry->btree_opintype = get_opclass_input_type(opclass);
572 }
573 else
574 {
575 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
576 }
577
578 /*
579 * Reset information derived from btree opclass. Note in particular
580 * that we'll redetermine the eq_opr even if we previously found one;
581 * this matters in case a btree opclass has been added to a type that
582 * previously had only a hash opclass.
583 */
584 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
589 }
590
591 /*
592 * If we need to look up equality operator, and there's no btree opclass,
593 * force lookup of hash opclass.
594 */
595 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
596 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
597 typentry->btree_opf == InvalidOid)
599
604 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
605 {
606 Oid opclass;
607
608 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
609 if (OidIsValid(opclass))
610 {
611 typentry->hash_opf = get_opclass_family(opclass);
612 typentry->hash_opintype = get_opclass_input_type(opclass);
613 }
614 else
615 {
616 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
617 }
618
619 /*
620 * Reset information derived from hash opclass. We do *not* reset the
621 * eq_opr; if we already found one from the btree opclass, that
622 * decision is still good.
623 */
624 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
627 }
628
629 /*
630 * Look for requested operators and functions, if we haven't already.
631 */
632 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
633 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
634 {
635 Oid eq_opr = InvalidOid;
636
637 if (typentry->btree_opf != InvalidOid)
638 eq_opr = get_opfamily_member(typentry->btree_opf,
639 typentry->btree_opintype,
640 typentry->btree_opintype,
642 if (eq_opr == InvalidOid &&
643 typentry->hash_opf != InvalidOid)
644 eq_opr = get_opfamily_member(typentry->hash_opf,
645 typentry->hash_opintype,
646 typentry->hash_opintype,
648
649 /*
650 * If the proposed equality operator is array_eq or record_eq, check
651 * to see if the element type or column types support equality. If
652 * not, array_eq or record_eq would fail at runtime, so we don't want
653 * to report that the type has equality. (We can omit similar
654 * checking for ranges and multiranges because ranges can't be created
655 * in the first place unless their subtypes support equality.)
656 */
657 if (eq_opr == ARRAY_EQ_OP &&
659 eq_opr = InvalidOid;
660 else if (eq_opr == RECORD_EQ_OP &&
662 eq_opr = InvalidOid;
663
664 /* Force update of eq_opr_finfo only if we're changing state */
665 if (typentry->eq_opr != eq_opr)
666 typentry->eq_opr_finfo.fn_oid = InvalidOid;
667
668 typentry->eq_opr = eq_opr;
669
670 /*
671 * Reset info about hash functions whenever we pick up new info about
672 * equality operator. This is so we can ensure that the hash
673 * functions match the operator.
674 */
675 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
677 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
678 }
679 if ((flags & TYPECACHE_LT_OPR) &&
680 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
681 {
682 Oid lt_opr = InvalidOid;
683
684 if (typentry->btree_opf != InvalidOid)
685 lt_opr = get_opfamily_member(typentry->btree_opf,
686 typentry->btree_opintype,
687 typentry->btree_opintype,
689
690 /*
691 * As above, make sure array_cmp or record_cmp will succeed; but again
692 * we need no special check for ranges or multiranges.
693 */
694 if (lt_opr == ARRAY_LT_OP &&
695 !array_element_has_compare(typentry))
696 lt_opr = InvalidOid;
697 else if (lt_opr == RECORD_LT_OP &&
699 lt_opr = InvalidOid;
700
701 typentry->lt_opr = lt_opr;
702 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
703 }
704 if ((flags & TYPECACHE_GT_OPR) &&
705 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
706 {
707 Oid gt_opr = InvalidOid;
708
709 if (typentry->btree_opf != InvalidOid)
710 gt_opr = get_opfamily_member(typentry->btree_opf,
711 typentry->btree_opintype,
712 typentry->btree_opintype,
714
715 /*
716 * As above, make sure array_cmp or record_cmp will succeed; but again
717 * we need no special check for ranges or multiranges.
718 */
719 if (gt_opr == ARRAY_GT_OP &&
720 !array_element_has_compare(typentry))
721 gt_opr = InvalidOid;
722 else if (gt_opr == RECORD_GT_OP &&
724 gt_opr = InvalidOid;
725
726 typentry->gt_opr = gt_opr;
727 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
728 }
730 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
731 {
732 Oid cmp_proc = InvalidOid;
733
734 if (typentry->btree_opf != InvalidOid)
735 cmp_proc = get_opfamily_proc(typentry->btree_opf,
736 typentry->btree_opintype,
737 typentry->btree_opintype,
739
740 /*
741 * As above, make sure array_cmp or record_cmp will succeed; but again
742 * we need no special check for ranges or multiranges.
743 */
744 if (cmp_proc == F_BTARRAYCMP &&
745 !array_element_has_compare(typentry))
746 cmp_proc = InvalidOid;
747 else if (cmp_proc == F_BTRECORDCMP &&
749 cmp_proc = InvalidOid;
750
751 /* Force update of cmp_proc_finfo only if we're changing state */
752 if (typentry->cmp_proc != cmp_proc)
753 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
754
755 typentry->cmp_proc = cmp_proc;
756 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
757 }
759 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
760 {
761 Oid hash_proc = InvalidOid;
762
763 /*
764 * We insist that the eq_opr, if one has been determined, match the
765 * hash opclass; else report there is no hash function.
766 */
767 if (typentry->hash_opf != InvalidOid &&
768 (!OidIsValid(typentry->eq_opr) ||
769 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
770 typentry->hash_opintype,
771 typentry->hash_opintype,
773 hash_proc = get_opfamily_proc(typentry->hash_opf,
774 typentry->hash_opintype,
775 typentry->hash_opintype,
777
778 /*
779 * As above, make sure hash_array, hash_record, or hash_range will
780 * succeed.
781 */
782 if (hash_proc == F_HASH_ARRAY &&
783 !array_element_has_hashing(typentry))
784 hash_proc = InvalidOid;
785 else if (hash_proc == F_HASH_RECORD &&
787 hash_proc = InvalidOid;
788 else if (hash_proc == F_HASH_RANGE &&
789 !range_element_has_hashing(typentry))
790 hash_proc = InvalidOid;
791
792 /*
793 * Likewise for hash_multirange.
794 */
795 if (hash_proc == F_HASH_MULTIRANGE &&
797 hash_proc = InvalidOid;
798
799 /* Force update of hash_proc_finfo only if we're changing state */
800 if (typentry->hash_proc != hash_proc)
802
803 typentry->hash_proc = hash_proc;
804 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
805 }
806 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
809 {
810 Oid hash_extended_proc = InvalidOid;
811
812 /*
813 * We insist that the eq_opr, if one has been determined, match the
814 * hash opclass; else report there is no hash function.
815 */
816 if (typentry->hash_opf != InvalidOid &&
817 (!OidIsValid(typentry->eq_opr) ||
818 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
819 typentry->hash_opintype,
820 typentry->hash_opintype,
822 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
823 typentry->hash_opintype,
824 typentry->hash_opintype,
826
827 /*
828 * As above, make sure hash_array_extended, hash_record_extended, or
829 * hash_range_extended will succeed.
830 */
831 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
833 hash_extended_proc = InvalidOid;
834 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
836 hash_extended_proc = InvalidOid;
837 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
839 hash_extended_proc = InvalidOid;
840
841 /*
842 * Likewise for hash_multirange_extended.
843 */
844 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
846 hash_extended_proc = InvalidOid;
847
848 /* Force update of proc finfo only if we're changing state */
849 if (typentry->hash_extended_proc != hash_extended_proc)
851
852 typentry->hash_extended_proc = hash_extended_proc;
854 }
855
856 /*
857 * Set up fmgr lookup info as requested
858 *
859 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
860 * which is not quite right (they're really in the hash table's private
861 * memory context) but this will do for our purposes.
862 *
863 * Note: the code above avoids invalidating the finfo structs unless the
864 * referenced operator/function OID actually changes. This is to prevent
865 * unnecessary leakage of any subsidiary data attached to an finfo, since
866 * that would cause session-lifespan memory leaks.
867 */
868 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
869 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
870 typentry->eq_opr != InvalidOid)
871 {
872 Oid eq_opr_func;
873
874 eq_opr_func = get_opcode(typentry->eq_opr);
875 if (eq_opr_func != InvalidOid)
876 fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
878 }
879 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
880 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
881 typentry->cmp_proc != InvalidOid)
882 {
883 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
885 }
886 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
887 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
888 typentry->hash_proc != InvalidOid)
889 {
890 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
892 }
895 typentry->hash_extended_proc != InvalidOid)
896 {
898 &typentry->hash_extended_proc_finfo,
900 }
901
902 /*
903 * If it's a composite type (row type), get tupdesc if requested
904 */
905 if ((flags & TYPECACHE_TUPDESC) &&
906 typentry->tupDesc == NULL &&
907 typentry->typtype == TYPTYPE_COMPOSITE)
908 {
909 load_typcache_tupdesc(typentry);
910 }
911
912 /*
913 * If requested, get information about a range type
914 *
915 * This includes making sure that the basic info about the range element
916 * type is up-to-date.
917 */
918 if ((flags & TYPECACHE_RANGE_INFO) &&
919 typentry->typtype == TYPTYPE_RANGE)
920 {
921 if (typentry->rngelemtype == NULL)
922 load_rangetype_info(typentry);
923 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
924 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
925 }
926
927 /*
928 * If requested, get information about a multirange type
929 */
930 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
931 typentry->rngtype == NULL &&
932 typentry->typtype == TYPTYPE_MULTIRANGE)
933 {
934 load_multirangetype_info(typentry);
935 }
936
937 /*
938 * If requested, get information about a domain type
939 */
940 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
941 typentry->domainBaseType == InvalidOid &&
942 typentry->typtype == TYPTYPE_DOMAIN)
943 {
944 typentry->domainBaseTypmod = -1;
945 typentry->domainBaseType =
946 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
947 }
948 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
949 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
950 typentry->typtype == TYPTYPE_DOMAIN)
951 {
952 load_domaintype_info(typentry);
953 }
954
955 INJECTION_POINT("typecache-before-rel-type-cache-insert");
956
957 Assert(in_progress_offset + 1 == in_progress_list_len);
959
961
962 return typentry;
963}
964
965/*
966 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
967 */
968static void
970{
971 Relation rel;
972
973 if (!OidIsValid(typentry->typrelid)) /* should not happen */
974 elog(ERROR, "invalid typrelid for composite type %u",
975 typentry->type_id);
976 rel = relation_open(typentry->typrelid, AccessShareLock);
977 Assert(rel->rd_rel->reltype == typentry->type_id);
978
979 /*
980 * Link to the tupdesc and increment its refcount (we assert it's a
981 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
982 * because the reference mustn't be entered in the current resource owner;
983 * it can outlive the current query.
984 */
985 typentry->tupDesc = RelationGetDescr(rel);
986
987 Assert(typentry->tupDesc->tdrefcount > 0);
988 typentry->tupDesc->tdrefcount++;
989
990 /*
991 * In future, we could take some pains to not change tupDesc_identifier if
992 * the tupdesc didn't really change; but for now it's not worth it.
993 */
995
997}
998
999/*
1000 * load_rangetype_info --- helper routine to set up range type information
1001 */
1002static void
1004{
1005 Form_pg_range pg_range;
1006 HeapTuple tup;
1007 Oid subtypeOid;
1008 Oid opclassOid;
1009 Oid canonicalOid;
1010 Oid subdiffOid;
1011 Oid opfamilyOid;
1012 Oid opcintype;
1013 Oid cmpFnOid;
1014
1015 /* get information from pg_range */
1016 tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1017 /* should not fail, since we already checked typtype ... */
1018 if (!HeapTupleIsValid(tup))
1019 elog(ERROR, "cache lookup failed for range type %u",
1020 typentry->type_id);
1021 pg_range = (Form_pg_range) GETSTRUCT(tup);
1022
1023 subtypeOid = pg_range->rngsubtype;
1024 typentry->rng_collation = pg_range->rngcollation;
1025 opclassOid = pg_range->rngsubopc;
1026 canonicalOid = pg_range->rngcanonical;
1027 subdiffOid = pg_range->rngsubdiff;
1028
1029 ReleaseSysCache(tup);
1030
1031 /* get opclass properties and look up the comparison function */
1032 opfamilyOid = get_opclass_family(opclassOid);
1033 opcintype = get_opclass_input_type(opclassOid);
1034 typentry->rng_opfamily = opfamilyOid;
1035
1036 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1037 BTORDER_PROC);
1038 if (!RegProcedureIsValid(cmpFnOid))
1039 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1040 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1041
1042 /* set up cached fmgrinfo structs */
1043 fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1045 if (OidIsValid(canonicalOid))
1046 fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1048 if (OidIsValid(subdiffOid))
1049 fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1051
1052 /* Lastly, set up link to the element type --- this marks data valid */
1053 typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1054}
1055
1056/*
1057 * load_multirangetype_info --- helper routine to set up multirange type
1058 * information
1059 */
1060static void
1062{
1063 Oid rangetypeOid;
1064
1065 rangetypeOid = get_multirange_range(typentry->type_id);
1066 if (!OidIsValid(rangetypeOid))
1067 elog(ERROR, "cache lookup failed for multirange type %u",
1068 typentry->type_id);
1069
1070 typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1071}
1072
1073/*
1074 * load_domaintype_info --- helper routine to set up domain constraint info
1075 *
1076 * Note: we assume we're called in a relatively short-lived context, so it's
1077 * okay to leak data into the current context while scanning pg_constraint.
1078 * We build the new DomainConstraintCache data in a context underneath
1079 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1080 * complete.
1081 */
1082static void
1084{
1085 Oid typeOid = typentry->type_id;
1087 bool notNull = false;
1088 DomainConstraintState **ccons;
1089 int cconslen;
1090 Relation conRel;
1091 MemoryContext oldcxt;
1092
1093 /*
1094 * If we're here, any existing constraint info is stale, so release it.
1095 * For safety, be sure to null the link before trying to delete the data.
1096 */
1097 if (typentry->domainData)
1098 {
1099 dcc = typentry->domainData;
1100 typentry->domainData = NULL;
1101 decr_dcc_refcount(dcc);
1102 }
1103
1104 /*
1105 * We try to optimize the common case of no domain constraints, so don't
1106 * create the dcc object and context until we find a constraint. Likewise
1107 * for the temp sorting array.
1108 */
1109 dcc = NULL;
1110 ccons = NULL;
1111 cconslen = 0;
1112
1113 /*
1114 * Scan pg_constraint for relevant constraints. We want to find
1115 * constraints for not just this domain, but any ancestor domains, so the
1116 * outer loop crawls up the domain stack.
1117 */
1118 conRel = table_open(ConstraintRelationId, AccessShareLock);
1119
1120 for (;;)
1121 {
1122 HeapTuple tup;
1123 HeapTuple conTup;
1124 Form_pg_type typTup;
1125 int nccons = 0;
1126 ScanKeyData key[1];
1127 SysScanDesc scan;
1128
1129 tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1130 if (!HeapTupleIsValid(tup))
1131 elog(ERROR, "cache lookup failed for type %u", typeOid);
1132 typTup = (Form_pg_type) GETSTRUCT(tup);
1133
1134 if (typTup->typtype != TYPTYPE_DOMAIN)
1135 {
1136 /* Not a domain, so done */
1137 ReleaseSysCache(tup);
1138 break;
1139 }
1140
1141 /* Test for NOT NULL Constraint */
1142 if (typTup->typnotnull)
1143 notNull = true;
1144
1145 /* Look for CHECK Constraints on this domain */
1146 ScanKeyInit(&key[0],
1147 Anum_pg_constraint_contypid,
1148 BTEqualStrategyNumber, F_OIDEQ,
1149 ObjectIdGetDatum(typeOid));
1150
1151 scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1152 NULL, 1, key);
1153
1154 while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1155 {
1157 Datum val;
1158 bool isNull;
1159 char *constring;
1160 Expr *check_expr;
1162
1163 /* Ignore non-CHECK constraints */
1164 if (c->contype != CONSTRAINT_CHECK)
1165 continue;
1166
1167 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1168 val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1169 conRel->rd_att, &isNull);
1170 if (isNull)
1171 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1172 NameStr(typTup->typname), NameStr(c->conname));
1173
1174 /* Convert conbin to C string in caller context */
1175 constring = TextDatumGetCString(val);
1176
1177 /* Create the DomainConstraintCache object and context if needed */
1178 if (dcc == NULL)
1179 {
1180 MemoryContext cxt;
1181
1183 "Domain constraints",
1185 dcc = (DomainConstraintCache *)
1187 dcc->constraints = NIL;
1188 dcc->dccContext = cxt;
1189 dcc->dccRefCount = 0;
1190 }
1191
1192 /* Create node trees in DomainConstraintCache's context */
1193 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1194
1195 check_expr = (Expr *) stringToNode(constring);
1196
1197 /*
1198 * Plan the expression, since ExecInitExpr will expect that.
1199 *
1200 * Note: caching the result of expression_planner() is not very
1201 * good practice. Ideally we'd use a CachedExpression here so
1202 * that we would react promptly to, eg, changes in inlined
1203 * functions. However, because we don't support mutable domain
1204 * CHECK constraints, it's not really clear that it's worth the
1205 * extra overhead to do that.
1206 */
1207 check_expr = expression_planner(check_expr);
1208
1211 r->name = pstrdup(NameStr(c->conname));
1212 r->check_expr = check_expr;
1213 r->check_exprstate = NULL;
1214
1215 MemoryContextSwitchTo(oldcxt);
1216
1217 /* Accumulate constraints in an array, for sorting below */
1218 if (ccons == NULL)
1219 {
1220 cconslen = 8;
1221 ccons = (DomainConstraintState **)
1222 palloc(cconslen * sizeof(DomainConstraintState *));
1223 }
1224 else if (nccons >= cconslen)
1225 {
1226 cconslen *= 2;
1227 ccons = (DomainConstraintState **)
1228 repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1229 }
1230 ccons[nccons++] = r;
1231 }
1232
1233 systable_endscan(scan);
1234
1235 if (nccons > 0)
1236 {
1237 /*
1238 * Sort the items for this domain, so that CHECKs are applied in a
1239 * deterministic order.
1240 */
1241 if (nccons > 1)
1242 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1243
1244 /*
1245 * Now attach them to the overall list. Use lcons() here because
1246 * constraints of parent domains should be applied earlier.
1247 */
1248 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1249 while (nccons > 0)
1250 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1251 MemoryContextSwitchTo(oldcxt);
1252 }
1253
1254 /* loop to next domain in stack */
1255 typeOid = typTup->typbasetype;
1256 ReleaseSysCache(tup);
1257 }
1258
1260
1261 /*
1262 * Only need to add one NOT NULL check regardless of how many domains in
1263 * the stack request it.
1264 */
1265 if (notNull)
1266 {
1268
1269 /* Create the DomainConstraintCache object and context if needed */
1270 if (dcc == NULL)
1271 {
1272 MemoryContext cxt;
1273
1275 "Domain constraints",
1277 dcc = (DomainConstraintCache *)
1279 dcc->constraints = NIL;
1280 dcc->dccContext = cxt;
1281 dcc->dccRefCount = 0;
1282 }
1283
1284 /* Create node trees in DomainConstraintCache's context */
1285 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1286
1288
1290 r->name = pstrdup("NOT NULL");
1291 r->check_expr = NULL;
1292 r->check_exprstate = NULL;
1293
1294 /* lcons to apply the nullness check FIRST */
1295 dcc->constraints = lcons(r, dcc->constraints);
1296
1297 MemoryContextSwitchTo(oldcxt);
1298 }
1299
1300 /*
1301 * If we made a constraint object, move it into CacheMemoryContext and
1302 * attach it to the typcache entry.
1303 */
1304 if (dcc)
1305 {
1307 typentry->domainData = dcc;
1308 dcc->dccRefCount++; /* count the typcache's reference */
1309 }
1310
1311 /* Either way, the typcache entry's domain data is now valid. */
1313}
1314
1315/*
1316 * qsort comparator to sort DomainConstraintState pointers by name
1317 */
1318static int
1319dcs_cmp(const void *a, const void *b)
1320{
1321 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1322 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1323
1324 return strcmp((*ca)->name, (*cb)->name);
1325}
1326
1327/*
1328 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1329 * and free it if no references remain
1330 */
1331static void
1333{
1334 Assert(dcc->dccRefCount > 0);
1335 if (--(dcc->dccRefCount) <= 0)
1337}
1338
1339/*
1340 * Context reset/delete callback for a DomainConstraintRef
1341 */
1342static void
1344{
1346 DomainConstraintCache *dcc = ref->dcc;
1347
1348 /* Paranoia --- be sure link is nulled before trying to release */
1349 if (dcc)
1350 {
1351 ref->constraints = NIL;
1352 ref->dcc = NULL;
1353 decr_dcc_refcount(dcc);
1354 }
1355}
1356
1357/*
1358 * prep_domain_constraints --- prepare domain constraints for execution
1359 *
1360 * The expression trees stored in the DomainConstraintCache's list are
1361 * converted to executable expression state trees stored in execctx.
1362 */
1363static List *
1365{
1366 List *result = NIL;
1367 MemoryContext oldcxt;
1368 ListCell *lc;
1369
1370 oldcxt = MemoryContextSwitchTo(execctx);
1371
1372 foreach(lc, constraints)
1373 {
1376
1378 newr->constrainttype = r->constrainttype;
1379 newr->name = r->name;
1380 newr->check_expr = r->check_expr;
1381 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1382
1383 result = lappend(result, newr);
1384 }
1385
1386 MemoryContextSwitchTo(oldcxt);
1387
1388 return result;
1389}
1390
1391/*
1392 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1393 *
1394 * Caller must tell us the MemoryContext in which the DomainConstraintRef
1395 * lives. The ref will be cleaned up when that context is reset/deleted.
1396 *
1397 * Caller must also tell us whether it wants check_exprstate fields to be
1398 * computed in the DomainConstraintState nodes attached to this ref.
1399 * If it doesn't, we need not make a copy of the DomainConstraintState list.
1400 */
1401void
1403 MemoryContext refctx, bool need_exprstate)
1404{
1405 /* Look up the typcache entry --- we assume it survives indefinitely */
1407 ref->need_exprstate = need_exprstate;
1408 /* For safety, establish the callback before acquiring a refcount */
1409 ref->refctx = refctx;
1410 ref->dcc = NULL;
1412 ref->callback.arg = ref;
1414 /* Acquire refcount if there are constraints, and set up exported list */
1415 if (ref->tcache->domainData)
1416 {
1417 ref->dcc = ref->tcache->domainData;
1418 ref->dcc->dccRefCount++;
1419 if (ref->need_exprstate)
1421 ref->refctx);
1422 else
1423 ref->constraints = ref->dcc->constraints;
1424 }
1425 else
1426 ref->constraints = NIL;
1427}
1428
1429/*
1430 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1431 *
1432 * If the domain's constraint set changed, ref->constraints is updated to
1433 * point at a new list of cached constraints.
1434 *
1435 * In the normal case where nothing happened to the domain, this is cheap
1436 * enough that it's reasonable (and expected) to check before *each* use
1437 * of the constraint info.
1438 */
1439void
1441{
1442 TypeCacheEntry *typentry = ref->tcache;
1443
1444 /* Make sure typcache entry's data is up to date */
1445 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1446 typentry->typtype == TYPTYPE_DOMAIN)
1447 load_domaintype_info(typentry);
1448
1449 /* Transfer to ref object if there's new info, adjusting refcounts */
1450 if (ref->dcc != typentry->domainData)
1451 {
1452 /* Paranoia --- be sure link is nulled before trying to release */
1453 DomainConstraintCache *dcc = ref->dcc;
1454
1455 if (dcc)
1456 {
1457 /*
1458 * Note: we just leak the previous list of executable domain
1459 * constraints. Alternatively, we could keep those in a child
1460 * context of ref->refctx and free that context at this point.
1461 * However, in practice this code path will be taken so seldom
1462 * that the extra bookkeeping for a child context doesn't seem
1463 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1464 */
1465 ref->constraints = NIL;
1466 ref->dcc = NULL;
1467 decr_dcc_refcount(dcc);
1468 }
1469 dcc = typentry->domainData;
1470 if (dcc)
1471 {
1472 ref->dcc = dcc;
1473 dcc->dccRefCount++;
1474 if (ref->need_exprstate)
1476 ref->refctx);
1477 else
1478 ref->constraints = dcc->constraints;
1479 }
1480 }
1481}
1482
1483/*
1484 * DomainHasConstraints --- utility routine to check if a domain has constraints
1485 *
1486 * This is defined to return false, not fail, if type is not a domain.
1487 */
1488bool
1490{
1491 TypeCacheEntry *typentry;
1492
1493 /*
1494 * Note: a side effect is to cause the typcache's domain data to become
1495 * valid. This is fine since we'll likely need it soon if there is any.
1496 */
1498
1499 return (typentry->domainData != NULL);
1500}
1501
1502
1503/*
1504 * array_element_has_equality and friends are helper routines to check
1505 * whether we should believe that array_eq and related functions will work
1506 * on the given array type or composite type.
1507 *
1508 * The logic above may call these repeatedly on the same type entry, so we
1509 * make use of the typentry->flags field to cache the results once known.
1510 * Also, we assume that we'll probably want all these facts about the type
1511 * if we want any, so we cache them all using only one lookup of the
1512 * component datatype(s).
1513 */
1514
1515static bool
1517{
1518 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1520 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1521}
1522
1523static bool
1525{
1526 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1528 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1529}
1530
1531static bool
1533{
1534 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1536 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1537}
1538
1539static bool
1541{
1542 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1544 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1545}
1546
1547static void
1549{
1550 Oid elem_type = get_base_element_type(typentry->type_id);
1551
1552 if (OidIsValid(elem_type))
1553 {
1554 TypeCacheEntry *elementry;
1555
1556 elementry = lookup_type_cache(elem_type,
1561 if (OidIsValid(elementry->eq_opr))
1562 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1563 if (OidIsValid(elementry->cmp_proc))
1564 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1565 if (OidIsValid(elementry->hash_proc))
1566 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1567 if (OidIsValid(elementry->hash_extended_proc))
1569 }
1571}
1572
1573/*
1574 * Likewise, some helper functions for composite types.
1575 */
1576
1577static bool
1579{
1580 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1582 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1583}
1584
1585static bool
1587{
1588 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1590 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1591}
1592
1593static bool
1595{
1596 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1598 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1599}
1600
1601static bool
1603{
1604 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1606 return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1607}
1608
1609static void
1611{
1612 /*
1613 * For type RECORD, we can't really tell what will work, since we don't
1614 * have access here to the specific anonymous type. Just assume that
1615 * equality and comparison will (we may get a failure at runtime). We
1616 * could also claim that hashing works, but then if code that has the
1617 * option between a comparison-based (sort-based) and a hash-based plan
1618 * chooses hashing, stuff could fail that would otherwise work if it chose
1619 * a comparison-based plan. In practice more types support comparison
1620 * than hashing.
1621 */
1622 if (typentry->type_id == RECORDOID)
1623 {
1624 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1626 }
1627 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1628 {
1629 TupleDesc tupdesc;
1630 int newflags;
1631 int i;
1632
1633 /* Fetch composite type's tupdesc if we don't have it already */
1634 if (typentry->tupDesc == NULL)
1635 load_typcache_tupdesc(typentry);
1636 tupdesc = typentry->tupDesc;
1637
1638 /* Must bump the refcount while we do additional catalog lookups */
1639 IncrTupleDescRefCount(tupdesc);
1640
1641 /* Have each property if all non-dropped fields have the property */
1642 newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1646 for (i = 0; i < tupdesc->natts; i++)
1647 {
1648 TypeCacheEntry *fieldentry;
1649 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1650
1651 if (attr->attisdropped)
1652 continue;
1653
1654 fieldentry = lookup_type_cache(attr->atttypid,
1659 if (!OidIsValid(fieldentry->eq_opr))
1660 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1661 if (!OidIsValid(fieldentry->cmp_proc))
1662 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1663 if (!OidIsValid(fieldentry->hash_proc))
1664 newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1665 if (!OidIsValid(fieldentry->hash_extended_proc))
1666 newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1667
1668 /* We can drop out of the loop once we disprove all bits */
1669 if (newflags == 0)
1670 break;
1671 }
1672 typentry->flags |= newflags;
1673
1674 DecrTupleDescRefCount(tupdesc);
1675 }
1676 else if (typentry->typtype == TYPTYPE_DOMAIN)
1677 {
1678 /* If it's domain over composite, copy base type's properties */
1679 TypeCacheEntry *baseentry;
1680
1681 /* load up basetype info if we didn't already */
1682 if (typentry->domainBaseType == InvalidOid)
1683 {
1684 typentry->domainBaseTypmod = -1;
1685 typentry->domainBaseType =
1686 getBaseTypeAndTypmod(typentry->type_id,
1687 &typentry->domainBaseTypmod);
1688 }
1689 baseentry = lookup_type_cache(typentry->domainBaseType,
1694 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1695 {
1697 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1701 }
1702 }
1704}
1705
1706/*
1707 * Likewise, some helper functions for range and multirange types.
1708 *
1709 * We can borrow the flag bits for array element properties to use for range
1710 * element properties, since those flag bits otherwise have no use in a
1711 * range or multirange type's typcache entry.
1712 */
1713
1714static bool
1716{
1717 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1719 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1720}
1721
1722static bool
1724{
1725 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1727 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1728}
1729
1730static void
1732{
1733 /* load up subtype link if we didn't already */
1734 if (typentry->rngelemtype == NULL &&
1735 typentry->typtype == TYPTYPE_RANGE)
1736 load_rangetype_info(typentry);
1737
1738 if (typentry->rngelemtype != NULL)
1739 {
1740 TypeCacheEntry *elementry;
1741
1742 /* might need to calculate subtype's hash function properties */
1743 elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1746 if (OidIsValid(elementry->hash_proc))
1747 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1748 if (OidIsValid(elementry->hash_extended_proc))
1750 }
1752}
1753
1754static bool
1756{
1757 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1759 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1760}
1761
1762static bool
1764{
1765 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1767 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1768}
1769
1770static void
1772{
1773 /* load up range link if we didn't already */
1774 if (typentry->rngtype == NULL &&
1775 typentry->typtype == TYPTYPE_MULTIRANGE)
1776 load_multirangetype_info(typentry);
1777
1778 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1779 {
1780 TypeCacheEntry *elementry;
1781
1782 /* might need to calculate subtype's hash function properties */
1783 elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1786 if (OidIsValid(elementry->hash_proc))
1787 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1788 if (OidIsValid(elementry->hash_extended_proc))
1790 }
1792}
1793
1794/*
1795 * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1796 * to store 'typmod'.
1797 */
1798static void
1800{
1801 if (RecordCacheArray == NULL)
1802 {
1805 64 * sizeof(RecordCacheArrayEntry));
1807 }
1808
1809 if (typmod >= RecordCacheArrayLen)
1810 {
1811 int32 newlen = pg_nextpower2_32(typmod + 1);
1812
1816 newlen);
1817 RecordCacheArrayLen = newlen;
1818 }
1819}
1820
1821/*
1822 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1823 *
1824 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1825 * hasn't had its refcount bumped.
1826 */
1827static TupleDesc
1828lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1829{
1830 if (type_id != RECORDOID)
1831 {
1832 /*
1833 * It's a named composite type, so use the regular typcache.
1834 */
1835 TypeCacheEntry *typentry;
1836
1837 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1838 if (typentry->tupDesc == NULL && !noError)
1839 ereport(ERROR,
1840 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1841 errmsg("type %s is not composite",
1842 format_type_be(type_id))));
1843 return typentry->tupDesc;
1844 }
1845 else
1846 {
1847 /*
1848 * It's a transient record type, so look in our record-type table.
1849 */
1850 if (typmod >= 0)
1851 {
1852 /* It is already in our local cache? */
1853 if (typmod < RecordCacheArrayLen &&
1854 RecordCacheArray[typmod].tupdesc != NULL)
1855 return RecordCacheArray[typmod].tupdesc;
1856
1857 /* Are we attached to a shared record typmod registry? */
1859 {
1861
1862 /* Try to find it in the shared typmod index. */
1864 &typmod, false);
1865 if (entry != NULL)
1866 {
1867 TupleDesc tupdesc;
1868
1869 tupdesc = (TupleDesc)
1871 entry->shared_tupdesc);
1872 Assert(typmod == tupdesc->tdtypmod);
1873
1874 /* We may need to extend the local RecordCacheArray. */
1876
1877 /*
1878 * Our local array can now point directly to the TupleDesc
1879 * in shared memory, which is non-reference-counted.
1880 */
1881 RecordCacheArray[typmod].tupdesc = tupdesc;
1882 Assert(tupdesc->tdrefcount == -1);
1883
1884 /*
1885 * We don't share tupdesc identifiers across processes, so
1886 * assign one locally.
1887 */
1889
1891 entry);
1892
1893 return RecordCacheArray[typmod].tupdesc;
1894 }
1895 }
1896 }
1897
1898 if (!noError)
1899 ereport(ERROR,
1900 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1901 errmsg("record type has not been registered")));
1902 return NULL;
1903 }
1904}
1905
1906/*
1907 * lookup_rowtype_tupdesc
1908 *
1909 * Given a typeid/typmod that should describe a known composite type,
1910 * return the tuple descriptor for the type. Will ereport on failure.
1911 * (Use ereport because this is reachable with user-specified OIDs,
1912 * for example from record_in().)
1913 *
1914 * Note: on success, we increment the refcount of the returned TupleDesc,
1915 * and log the reference in CurrentResourceOwner. Caller must call
1916 * ReleaseTupleDesc when done using the tupdesc. (There are some
1917 * cases in which the returned tupdesc is not refcounted, in which
1918 * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1919 * the tupdesc is guaranteed to live till process exit.)
1920 */
1923{
1924 TupleDesc tupDesc;
1925
1926 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1927 PinTupleDesc(tupDesc);
1928 return tupDesc;
1929}
1930
1931/*
1932 * lookup_rowtype_tupdesc_noerror
1933 *
1934 * As above, but if the type is not a known composite type and noError
1935 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1936 * type_id is passed, you'll get an ereport anyway.)
1937 */
1939lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1940{
1941 TupleDesc tupDesc;
1942
1943 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1944 if (tupDesc != NULL)
1945 PinTupleDesc(tupDesc);
1946 return tupDesc;
1947}
1948
1949/*
1950 * lookup_rowtype_tupdesc_copy
1951 *
1952 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1953 * copied into the CurrentMemoryContext and is not reference-counted.
1954 */
1957{
1958 TupleDesc tmp;
1959
1960 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1961 return CreateTupleDescCopyConstr(tmp);
1962}
1963
1964/*
1965 * lookup_rowtype_tupdesc_domain
1966 *
1967 * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1968 * a domain over a named composite type; so this is effectively equivalent to
1969 * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1970 * except for being a tad faster.
1971 *
1972 * Note: the reason we don't fold the look-through-domain behavior into plain
1973 * lookup_rowtype_tupdesc() is that we want callers to know they might be
1974 * dealing with a domain. Otherwise they might construct a tuple that should
1975 * be of the domain type, but not apply domain constraints.
1976 */
1978lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1979{
1980 TupleDesc tupDesc;
1981
1982 if (type_id != RECORDOID)
1983 {
1984 /*
1985 * Check for domain or named composite type. We might as well load
1986 * whichever data is needed.
1987 */
1988 TypeCacheEntry *typentry;
1989
1990 typentry = lookup_type_cache(type_id,
1993 if (typentry->typtype == TYPTYPE_DOMAIN)
1995 typentry->domainBaseTypmod,
1996 noError);
1997 if (typentry->tupDesc == NULL && !noError)
1998 ereport(ERROR,
1999 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2000 errmsg("type %s is not composite",
2001 format_type_be(type_id))));
2002 tupDesc = typentry->tupDesc;
2003 }
2004 else
2005 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2006 if (tupDesc != NULL)
2007 PinTupleDesc(tupDesc);
2008 return tupDesc;
2009}
2010
2011/*
2012 * Hash function for the hash table of RecordCacheEntry.
2013 */
2014static uint32
2015record_type_typmod_hash(const void *data, size_t size)
2016{
2018
2019 return hashRowType(entry->tupdesc);
2020}
2021
2022/*
2023 * Match function for the hash table of RecordCacheEntry.
2024 */
2025static int
2026record_type_typmod_compare(const void *a, const void *b, size_t size)
2027{
2029 RecordCacheEntry *right = (RecordCacheEntry *) b;
2030
2031 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2032}
2033
2034/*
2035 * assign_record_type_typmod
2036 *
2037 * Given a tuple descriptor for a RECORD type, find or create a cache entry
2038 * for the type, and set the tupdesc's tdtypmod field to a value that will
2039 * identify this cache entry to lookup_rowtype_tupdesc.
2040 */
2041void
2043{
2044 RecordCacheEntry *recentry;
2045 TupleDesc entDesc;
2046 bool found;
2047 MemoryContext oldcxt;
2048
2049 Assert(tupDesc->tdtypeid == RECORDOID);
2050
2051 if (RecordCacheHash == NULL)
2052 {
2053 /* First time through: initialize the hash table */
2054 HASHCTL ctl;
2055
2056 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2057 ctl.entrysize = sizeof(RecordCacheEntry);
2060 RecordCacheHash = hash_create("Record information cache", 64,
2061 &ctl,
2063
2064 /* Also make sure CacheMemoryContext exists */
2065 if (!CacheMemoryContext)
2067 }
2068
2069 /*
2070 * Find a hashtable entry for this tuple descriptor. We don't use
2071 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2072 * the allocations succeed before we create the new entry.
2073 */
2075 &tupDesc,
2076 HASH_FIND, &found);
2077 if (found && recentry->tupdesc != NULL)
2078 {
2079 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2080 return;
2081 }
2082
2083 /* Not present, so need to manufacture an entry */
2085
2086 /* Look in the SharedRecordTypmodRegistry, if attached */
2087 entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2088 if (entDesc == NULL)
2089 {
2090 /*
2091 * Make sure we have room before we CreateTupleDescCopy() or advance
2092 * NextRecordTypmod.
2093 */
2095
2096 /* Reference-counted local cache only. */
2097 entDesc = CreateTupleDescCopy(tupDesc);
2098 entDesc->tdrefcount = 1;
2099 entDesc->tdtypmod = NextRecordTypmod++;
2100 }
2101 else
2102 {
2104 }
2105
2106 RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2107
2108 /* Assign a unique tupdesc identifier, too. */
2110
2111 /* Fully initialized; create the hash table entry */
2113 &tupDesc,
2114 HASH_ENTER, NULL);
2115 recentry->tupdesc = entDesc;
2116
2117 /* Update the caller's tuple descriptor. */
2118 tupDesc->tdtypmod = entDesc->tdtypmod;
2119
2120 MemoryContextSwitchTo(oldcxt);
2121}
2122
2123/*
2124 * assign_record_type_identifier
2125 *
2126 * Get an identifier, which will be unique over the lifespan of this backend
2127 * process, for the current tuple descriptor of the specified composite type.
2128 * For named composite types, the value is guaranteed to change if the type's
2129 * definition does. For registered RECORD types, the value will not change
2130 * once assigned, since the registered type won't either. If an anonymous
2131 * RECORD type is specified, we return a new identifier on each call.
2132 */
2133uint64
2135{
2136 if (type_id != RECORDOID)
2137 {
2138 /*
2139 * It's a named composite type, so use the regular typcache.
2140 */
2141 TypeCacheEntry *typentry;
2142
2143 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2144 if (typentry->tupDesc == NULL)
2145 ereport(ERROR,
2146 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2147 errmsg("type %s is not composite",
2148 format_type_be(type_id))));
2149 Assert(typentry->tupDesc_identifier != 0);
2150 return typentry->tupDesc_identifier;
2151 }
2152 else
2153 {
2154 /*
2155 * It's a transient record type, so look in our record-type table.
2156 */
2157 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2158 RecordCacheArray[typmod].tupdesc != NULL)
2159 {
2160 Assert(RecordCacheArray[typmod].id != 0);
2161 return RecordCacheArray[typmod].id;
2162 }
2163
2164 /* For anonymous or unrecognized record type, generate a new ID */
2165 return ++tupledesc_id_counter;
2166 }
2167}
2168
2169/*
2170 * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2171 * This exists only to avoid exposing private innards of
2172 * SharedRecordTypmodRegistry in a header.
2173 */
2174size_t
2176{
2177 return sizeof(SharedRecordTypmodRegistry);
2178}
2179
2180/*
2181 * Initialize 'registry' in a pre-existing shared memory region, which must be
2182 * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2183 * bytes.
2184 *
2185 * 'area' will be used to allocate shared memory space as required for the
2186 * typemod registration. The current process, expected to be a leader process
2187 * in a parallel query, will be attached automatically and its current record
2188 * types will be loaded into *registry. While attached, all calls to
2189 * assign_record_type_typmod will use the shared registry. Worker backends
2190 * will need to attach explicitly.
2191 *
2192 * Note that this function takes 'area' and 'segment' as arguments rather than
2193 * accessing them via CurrentSession, because they aren't installed there
2194 * until after this function runs.
2195 */
2196void
2198 dsm_segment *segment,
2199 dsa_area *area)
2200{
2201 MemoryContext old_context;
2202 dshash_table *record_table;
2203 dshash_table *typmod_table;
2204 int32 typmod;
2205
2207
2208 /* We can't already be attached to a shared registry. */
2212
2214
2215 /* Create the hash table of tuple descriptors indexed by themselves. */
2217
2218 /* Create the hash table of tuple descriptors indexed by typmod. */
2219 typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2220
2221 MemoryContextSwitchTo(old_context);
2222
2223 /* Initialize the SharedRecordTypmodRegistry. */
2224 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2225 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2227
2228 /*
2229 * Copy all entries from this backend's private registry into the shared
2230 * registry.
2231 */
2232 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2233 {
2234 SharedTypmodTableEntry *typmod_table_entry;
2235 SharedRecordTableEntry *record_table_entry;
2236 SharedRecordTableKey record_table_key;
2237 dsa_pointer shared_dp;
2238 TupleDesc tupdesc;
2239 bool found;
2240
2241 tupdesc = RecordCacheArray[typmod].tupdesc;
2242 if (tupdesc == NULL)
2243 continue;
2244
2245 /* Copy the TupleDesc into shared memory. */
2246 shared_dp = share_tupledesc(area, tupdesc, typmod);
2247
2248 /* Insert into the typmod table. */
2249 typmod_table_entry = dshash_find_or_insert(typmod_table,
2250 &tupdesc->tdtypmod,
2251 &found);
2252 if (found)
2253 elog(ERROR, "cannot create duplicate shared record typmod");
2254 typmod_table_entry->typmod = tupdesc->tdtypmod;
2255 typmod_table_entry->shared_tupdesc = shared_dp;
2256 dshash_release_lock(typmod_table, typmod_table_entry);
2257
2258 /* Insert into the record table. */
2259 record_table_key.shared = false;
2260 record_table_key.u.local_tupdesc = tupdesc;
2261 record_table_entry = dshash_find_or_insert(record_table,
2262 &record_table_key,
2263 &found);
2264 if (!found)
2265 {
2266 record_table_entry->key.shared = true;
2267 record_table_entry->key.u.shared_tupdesc = shared_dp;
2268 }
2269 dshash_release_lock(record_table, record_table_entry);
2270 }
2271
2272 /*
2273 * Set up the global state that will tell assign_record_type_typmod and
2274 * lookup_rowtype_tupdesc_internal about the shared registry.
2275 */
2276 CurrentSession->shared_record_table = record_table;
2277 CurrentSession->shared_typmod_table = typmod_table;
2279
2280 /*
2281 * We install a detach hook in the leader, but only to handle cleanup on
2282 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2283 * the memory, the leader process will use a shared registry until it
2284 * exits.
2285 */
2287}
2288
2289/*
2290 * Attach to 'registry', which must have been initialized already by another
2291 * backend. Future calls to assign_record_type_typmod and
2292 * lookup_rowtype_tupdesc_internal will use the shared registry until the
2293 * current session is detached.
2294 */
2295void
2297{
2298 MemoryContext old_context;
2299 dshash_table *record_table;
2300 dshash_table *typmod_table;
2301
2303
2304 /* We can't already be attached to a shared registry. */
2305 Assert(CurrentSession != NULL);
2306 Assert(CurrentSession->segment != NULL);
2307 Assert(CurrentSession->area != NULL);
2311
2312 /*
2313 * We can't already have typmods in our local cache, because they'd clash
2314 * with those imported by SharedRecordTypmodRegistryInit. This should be
2315 * a freshly started parallel worker. If we ever support worker
2316 * recycling, a worker would need to zap its local cache in between
2317 * servicing different queries, in order to be able to call this and
2318 * synchronize typmods with a new leader; but that's problematic because
2319 * we can't be very sure that record-typmod-related state hasn't escaped
2320 * to anywhere else in the process.
2321 */
2323
2325
2326 /* Attach to the two hash tables. */
2327 record_table = dshash_attach(CurrentSession->area,
2329 registry->record_table_handle,
2331 typmod_table = dshash_attach(CurrentSession->area,
2333 registry->typmod_table_handle,
2334 NULL);
2335
2336 MemoryContextSwitchTo(old_context);
2337
2338 /*
2339 * Set up detach hook to run at worker exit. Currently this is the same
2340 * as the leader's detach hook, but in future they might need to be
2341 * different.
2342 */
2345 PointerGetDatum(registry));
2346
2347 /*
2348 * Set up the session state that will tell assign_record_type_typmod and
2349 * lookup_rowtype_tupdesc_internal about the shared registry.
2350 */
2352 CurrentSession->shared_record_table = record_table;
2353 CurrentSession->shared_typmod_table = typmod_table;
2354}
2355
2356/*
2357 * InvalidateCompositeTypeCacheEntry
2358 * Invalidate particular TypeCacheEntry on Relcache inval callback
2359 *
2360 * Delete the cached tuple descriptor (if any) for the given composite
2361 * type, and reset whatever info we have cached about the composite type's
2362 * comparability.
2363 */
2364static void
2366{
2367 bool hadTupDescOrOpclass;
2368
2369 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2370 OidIsValid(typentry->typrelid));
2371
2372 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2373 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2374
2375 /* Delete tupdesc if we have it */
2376 if (typentry->tupDesc != NULL)
2377 {
2378 /*
2379 * Release our refcount and free the tupdesc if none remain. We can't
2380 * use DecrTupleDescRefCount here because this reference is not logged
2381 * by the current resource owner.
2382 */
2383 Assert(typentry->tupDesc->tdrefcount > 0);
2384 if (--typentry->tupDesc->tdrefcount == 0)
2385 FreeTupleDesc(typentry->tupDesc);
2386 typentry->tupDesc = NULL;
2387
2388 /*
2389 * Also clear tupDesc_identifier, so that anyone watching it will
2390 * realize that the tupdesc has changed.
2391 */
2392 typentry->tupDesc_identifier = 0;
2393 }
2394
2395 /* Reset equality/comparison/hashing validity information */
2396 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2397
2398 /* Call delete_rel_type_cache() if we actually cleared something */
2399 if (hadTupDescOrOpclass)
2401}
2402
2403/*
2404 * TypeCacheRelCallback
2405 * Relcache inval callback function
2406 *
2407 * Delete the cached tuple descriptor (if any) for the given rel's composite
2408 * type, or for all composite types if relid == InvalidOid. Also reset
2409 * whatever info we have cached about the composite type's comparability.
2410 *
2411 * This is called when a relcache invalidation event occurs for the given
2412 * relid. We can't use syscache to find a type corresponding to the given
2413 * relation because the code can be called outside of transaction. Thus, we
2414 * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2415 */
2416static void
2418{
2419 TypeCacheEntry *typentry;
2420
2421 /*
2422 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2423 * callback wouldn't be registered
2424 */
2425 if (OidIsValid(relid))
2426 {
2427 RelIdToTypeIdCacheEntry *relentry;
2428
2429 /*
2430 * Find an RelIdToTypeIdCacheHash entry, which should exist as soon as
2431 * corresponding typcache entry has something to clean.
2432 */
2434 &relid,
2435 HASH_FIND, NULL);
2436
2437 if (relentry != NULL)
2438 {
2440 &relentry->composite_typid,
2441 HASH_FIND, NULL);
2442
2443 if (typentry != NULL)
2444 {
2445 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2446 Assert(relid == typentry->typrelid);
2447
2449 }
2450 }
2451
2452 /*
2453 * Visit all the domain types sequentially. Typically, this shouldn't
2454 * affect performance since domain types are less tended to bloat.
2455 * Domain types are created manually, unlike composite types which are
2456 * automatically created for every temporary table.
2457 */
2458 for (typentry = firstDomainTypeEntry;
2459 typentry != NULL;
2460 typentry = typentry->nextDomain)
2461 {
2462 /*
2463 * If it's domain over composite, reset flags. (We don't bother
2464 * trying to determine whether the specific base type needs a
2465 * reset.) Note that if we haven't determined whether the base
2466 * type is composite, we don't need to reset anything.
2467 */
2469 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2470 }
2471 }
2472 else
2473 {
2474 HASH_SEQ_STATUS status;
2475
2476 /*
2477 * Relid is invalid. By convention, we need to reset all composite
2478 * types in cache. Also, we should reset flags for domain types, and
2479 * we loop over all entries in hash, so, do it in a single scan.
2480 */
2481 hash_seq_init(&status, TypeCacheHash);
2482 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2483 {
2484 if (typentry->typtype == TYPTYPE_COMPOSITE)
2485 {
2487 }
2488 else if (typentry->typtype == TYPTYPE_DOMAIN)
2489 {
2490 /*
2491 * If it's domain over composite, reset flags. (We don't
2492 * bother trying to determine whether the specific base type
2493 * needs a reset.) Note that if we haven't determined whether
2494 * the base type is composite, we don't need to reset
2495 * anything.
2496 */
2498 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2499 }
2500 }
2501 }
2502}
2503
2504/*
2505 * TypeCacheTypCallback
2506 * Syscache inval callback function
2507 *
2508 * This is called when a syscache invalidation event occurs for any
2509 * pg_type row. If we have information cached about that type, mark
2510 * it as needing to be reloaded.
2511 */
2512static void
2513TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2514{
2515 HASH_SEQ_STATUS status;
2516 TypeCacheEntry *typentry;
2517
2518 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2519
2520 /*
2521 * By convention, zero hash value is passed to the callback as a sign that
2522 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2523 * InvalidateSystemCachesExtended().
2524 */
2525 if (hashvalue == 0)
2526 hash_seq_init(&status, TypeCacheHash);
2527 else
2528 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2529
2530 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2531 {
2532 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2533
2534 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2535
2536 /*
2537 * Mark the data obtained directly from pg_type as invalid. Also, if
2538 * it's a domain, typnotnull might've changed, so we'll need to
2539 * recalculate its constraints.
2540 */
2541 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2543
2544 /*
2545 * Call delete_rel_type_cache() if we cleaned
2546 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2547 */
2548 if (hadPgTypeData)
2550 }
2551}
2552
2553/*
2554 * TypeCacheOpcCallback
2555 * Syscache inval callback function
2556 *
2557 * This is called when a syscache invalidation event occurs for any pg_opclass
2558 * row. In principle we could probably just invalidate data dependent on the
2559 * particular opclass, but since updates on pg_opclass are rare in production
2560 * it doesn't seem worth a lot of complication: we just mark all cached data
2561 * invalid.
2562 *
2563 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2564 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2565 * is not allowed to be used to add/drop the primary operators and functions
2566 * of an opclass, only cross-type members of a family; and the latter sorts
2567 * of members are not going to get cached here.
2568 */
2569static void
2570TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2571{
2572 HASH_SEQ_STATUS status;
2573 TypeCacheEntry *typentry;
2574
2575 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2576 hash_seq_init(&status, TypeCacheHash);
2577 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2578 {
2579 /* Reset equality/comparison/hashing validity information */
2580 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2581 }
2582}
2583
2584/*
2585 * TypeCacheConstrCallback
2586 * Syscache inval callback function
2587 *
2588 * This is called when a syscache invalidation event occurs for any
2589 * pg_constraint row. We flush information about domain constraints
2590 * when this happens.
2591 *
2592 * It's slightly annoying that we can't tell whether the inval event was for
2593 * a domain constraint record or not; there's usually more update traffic
2594 * for table constraints than domain constraints, so we'll do a lot of
2595 * useless flushes. Still, this is better than the old no-caching-at-all
2596 * approach to domain constraints.
2597 */
2598static void
2599TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2600{
2601 TypeCacheEntry *typentry;
2602
2603 /*
2604 * Because this is called very frequently, and typically very few of the
2605 * typcache entries are for domains, we don't use hash_seq_search here.
2606 * Instead we thread all the domain-type entries together so that we can
2607 * visit them cheaply.
2608 */
2609 for (typentry = firstDomainTypeEntry;
2610 typentry != NULL;
2611 typentry = typentry->nextDomain)
2612 {
2613 /* Reset domain constraint validity information */
2614 typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2615 }
2616}
2617
2618
2619/*
2620 * Check if given OID is part of the subset that's sortable by comparisons
2621 */
2622static inline bool
2624{
2625 Oid offset;
2626
2627 if (arg < enumdata->bitmap_base)
2628 return false;
2629 offset = arg - enumdata->bitmap_base;
2630 if (offset > (Oid) INT_MAX)
2631 return false;
2632 return bms_is_member((int) offset, enumdata->sorted_values);
2633}
2634
2635
2636/*
2637 * compare_values_of_enum
2638 * Compare two members of an enum type.
2639 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2640 *
2641 * Note: currently, the enumData cache is refreshed only if we are asked
2642 * to compare an enum value that is not already in the cache. This is okay
2643 * because there is no support for re-ordering existing values, so comparisons
2644 * of previously cached values will return the right answer even if other
2645 * values have been added since we last loaded the cache.
2646 *
2647 * Note: the enum logic has a special-case rule about even-numbered versus
2648 * odd-numbered OIDs, but we take no account of that rule here; this
2649 * routine shouldn't even get called when that rule applies.
2650 */
2651int
2653{
2654 TypeCacheEnumData *enumdata;
2655 EnumItem *item1;
2656 EnumItem *item2;
2657
2658 /*
2659 * Equal OIDs are certainly equal --- this case was probably handled by
2660 * our caller, but we may as well check.
2661 */
2662 if (arg1 == arg2)
2663 return 0;
2664
2665 /* Load up the cache if first time through */
2666 if (tcache->enumData == NULL)
2667 load_enum_cache_data(tcache);
2668 enumdata = tcache->enumData;
2669
2670 /*
2671 * If both OIDs are known-sorted, we can just compare them directly.
2672 */
2673 if (enum_known_sorted(enumdata, arg1) &&
2674 enum_known_sorted(enumdata, arg2))
2675 {
2676 if (arg1 < arg2)
2677 return -1;
2678 else
2679 return 1;
2680 }
2681
2682 /*
2683 * Slow path: we have to identify their actual sort-order positions.
2684 */
2685 item1 = find_enumitem(enumdata, arg1);
2686 item2 = find_enumitem(enumdata, arg2);
2687
2688 if (item1 == NULL || item2 == NULL)
2689 {
2690 /*
2691 * We couldn't find one or both values. That means the enum has
2692 * changed under us, so re-initialize the cache and try again. We
2693 * don't bother retrying the known-sorted case in this path.
2694 */
2695 load_enum_cache_data(tcache);
2696 enumdata = tcache->enumData;
2697
2698 item1 = find_enumitem(enumdata, arg1);
2699 item2 = find_enumitem(enumdata, arg2);
2700
2701 /*
2702 * If we still can't find the values, complain: we must have corrupt
2703 * data.
2704 */
2705 if (item1 == NULL)
2706 elog(ERROR, "enum value %u not found in cache for enum %s",
2707 arg1, format_type_be(tcache->type_id));
2708 if (item2 == NULL)
2709 elog(ERROR, "enum value %u not found in cache for enum %s",
2710 arg2, format_type_be(tcache->type_id));
2711 }
2712
2713 if (item1->sort_order < item2->sort_order)
2714 return -1;
2715 else if (item1->sort_order > item2->sort_order)
2716 return 1;
2717 else
2718 return 0;
2719}
2720
2721/*
2722 * Load (or re-load) the enumData member of the typcache entry.
2723 */
2724static void
2726{
2727 TypeCacheEnumData *enumdata;
2728 Relation enum_rel;
2729 SysScanDesc enum_scan;
2730 HeapTuple enum_tuple;
2731 ScanKeyData skey;
2732 EnumItem *items;
2733 int numitems;
2734 int maxitems;
2735 Oid bitmap_base;
2736 Bitmapset *bitmap;
2737 MemoryContext oldcxt;
2738 int bm_size,
2739 start_pos;
2740
2741 /* Check that this is actually an enum */
2742 if (tcache->typtype != TYPTYPE_ENUM)
2743 ereport(ERROR,
2744 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2745 errmsg("%s is not an enum",
2746 format_type_be(tcache->type_id))));
2747
2748 /*
2749 * Read all the information for members of the enum type. We collect the
2750 * info in working memory in the caller's context, and then transfer it to
2751 * permanent memory in CacheMemoryContext. This minimizes the risk of
2752 * leaking memory from CacheMemoryContext in the event of an error partway
2753 * through.
2754 */
2755 maxitems = 64;
2756 items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2757 numitems = 0;
2758
2759 /* Scan pg_enum for the members of the target enum type. */
2760 ScanKeyInit(&skey,
2761 Anum_pg_enum_enumtypid,
2762 BTEqualStrategyNumber, F_OIDEQ,
2763 ObjectIdGetDatum(tcache->type_id));
2764
2765 enum_rel = table_open(EnumRelationId, AccessShareLock);
2766 enum_scan = systable_beginscan(enum_rel,
2767 EnumTypIdLabelIndexId,
2768 true, NULL,
2769 1, &skey);
2770
2771 while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2772 {
2773 Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2774
2775 if (numitems >= maxitems)
2776 {
2777 maxitems *= 2;
2778 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2779 }
2780 items[numitems].enum_oid = en->oid;
2781 items[numitems].sort_order = en->enumsortorder;
2782 numitems++;
2783 }
2784
2785 systable_endscan(enum_scan);
2786 table_close(enum_rel, AccessShareLock);
2787
2788 /* Sort the items into OID order */
2789 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2790
2791 /*
2792 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2793 * known to be in order and can thus be compared with just OID comparison.
2794 *
2795 * The point of this is that the enum's initial OIDs were certainly in
2796 * order, so there is some subset that can be compared via OID comparison;
2797 * and we'd rather not do binary searches unnecessarily.
2798 *
2799 * This is somewhat heuristic, and might identify a subset of OIDs that
2800 * isn't exactly what the type started with. That's okay as long as the
2801 * subset is correctly sorted.
2802 */
2803 bitmap_base = InvalidOid;
2804 bitmap = NULL;
2805 bm_size = 1; /* only save sets of at least 2 OIDs */
2806
2807 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2808 {
2809 /*
2810 * Identify longest sorted subsequence starting at start_pos
2811 */
2812 Bitmapset *this_bitmap = bms_make_singleton(0);
2813 int this_bm_size = 1;
2814 Oid start_oid = items[start_pos].enum_oid;
2815 float4 prev_order = items[start_pos].sort_order;
2816 int i;
2817
2818 for (i = start_pos + 1; i < numitems; i++)
2819 {
2820 Oid offset;
2821
2822 offset = items[i].enum_oid - start_oid;
2823 /* quit if bitmap would be too large; cutoff is arbitrary */
2824 if (offset >= 8192)
2825 break;
2826 /* include the item if it's in-order */
2827 if (items[i].sort_order > prev_order)
2828 {
2829 prev_order = items[i].sort_order;
2830 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2831 this_bm_size++;
2832 }
2833 }
2834
2835 /* Remember it if larger than previous best */
2836 if (this_bm_size > bm_size)
2837 {
2838 bms_free(bitmap);
2839 bitmap_base = start_oid;
2840 bitmap = this_bitmap;
2841 bm_size = this_bm_size;
2842 }
2843 else
2844 bms_free(this_bitmap);
2845
2846 /*
2847 * Done if it's not possible to find a longer sequence in the rest of
2848 * the list. In typical cases this will happen on the first
2849 * iteration, which is why we create the bitmaps on the fly instead of
2850 * doing a second pass over the list.
2851 */
2852 if (bm_size >= (numitems - start_pos - 1))
2853 break;
2854 }
2855
2856 /* OK, copy the data into CacheMemoryContext */
2858 enumdata = (TypeCacheEnumData *)
2859 palloc(offsetof(TypeCacheEnumData, enum_values) +
2860 numitems * sizeof(EnumItem));
2861 enumdata->bitmap_base = bitmap_base;
2862 enumdata->sorted_values = bms_copy(bitmap);
2863 enumdata->num_values = numitems;
2864 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2865 MemoryContextSwitchTo(oldcxt);
2866
2867 pfree(items);
2868 bms_free(bitmap);
2869
2870 /* And link the finished cache struct into the typcache */
2871 if (tcache->enumData != NULL)
2872 pfree(tcache->enumData);
2873 tcache->enumData = enumdata;
2874}
2875
2876/*
2877 * Locate the EnumItem with the given OID, if present
2878 */
2879static EnumItem *
2881{
2882 EnumItem srch;
2883
2884 /* On some versions of Solaris, bsearch of zero items dumps core */
2885 if (enumdata->num_values <= 0)
2886 return NULL;
2887
2888 srch.enum_oid = arg;
2889 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2890 sizeof(EnumItem), enum_oid_cmp);
2891}
2892
2893/*
2894 * qsort comparison function for OID-ordered EnumItems
2895 */
2896static int
2897enum_oid_cmp(const void *left, const void *right)
2898{
2899 const EnumItem *l = (const EnumItem *) left;
2900 const EnumItem *r = (const EnumItem *) right;
2901
2902 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2903}
2904
2905/*
2906 * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2907 * to the given value and return a dsa_pointer.
2908 */
2909static dsa_pointer
2911{
2912 dsa_pointer shared_dp;
2913 TupleDesc shared;
2914
2915 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2916 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2917 TupleDescCopy(shared, tupdesc);
2918 shared->tdtypmod = typmod;
2919
2920 return shared_dp;
2921}
2922
2923/*
2924 * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2925 * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2926 * Tuple descriptors returned by this function are not reference counted, and
2927 * will exist at least as long as the current backend remained attached to the
2928 * current session.
2929 */
2930static TupleDesc
2932{
2933 TupleDesc result;
2935 SharedRecordTableEntry *record_table_entry;
2936 SharedTypmodTableEntry *typmod_table_entry;
2937 dsa_pointer shared_dp;
2938 bool found;
2939 uint32 typmod;
2940
2941 /* If not even attached, nothing to do. */
2943 return NULL;
2944
2945 /* Try to find a matching tuple descriptor in the record table. */
2946 key.shared = false;
2947 key.u.local_tupdesc = tupdesc;
2948 record_table_entry = (SharedRecordTableEntry *)
2950 if (record_table_entry)
2951 {
2952 Assert(record_table_entry->key.shared);
2954 record_table_entry);
2955 result = (TupleDesc)
2957 record_table_entry->key.u.shared_tupdesc);
2958 Assert(result->tdrefcount == -1);
2959
2960 return result;
2961 }
2962
2963 /* Allocate a new typmod number. This will be wasted if we error out. */
2964 typmod = (int)
2966 1);
2967
2968 /* Copy the TupleDesc into shared memory. */
2969 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2970
2971 /*
2972 * Create an entry in the typmod table so that others will understand this
2973 * typmod number.
2974 */
2975 PG_TRY();
2976 {
2977 typmod_table_entry = (SharedTypmodTableEntry *)
2979 &typmod, &found);
2980 if (found)
2981 elog(ERROR, "cannot create duplicate shared record typmod");
2982 }
2983 PG_CATCH();
2984 {
2985 dsa_free(CurrentSession->area, shared_dp);
2986 PG_RE_THROW();
2987 }
2988 PG_END_TRY();
2989 typmod_table_entry->typmod = typmod;
2990 typmod_table_entry->shared_tupdesc = shared_dp;
2992 typmod_table_entry);
2993
2994 /*
2995 * Finally create an entry in the record table so others with matching
2996 * tuple descriptors can reuse the typmod.
2997 */
2998 record_table_entry = (SharedRecordTableEntry *)
3000 &found);
3001 if (found)
3002 {
3003 /*
3004 * Someone concurrently inserted a matching tuple descriptor since the
3005 * first time we checked. Use that one instead.
3006 */
3008 record_table_entry);
3009
3010 /* Might as well free up the space used by the one we created. */
3012 &typmod);
3013 Assert(found);
3014 dsa_free(CurrentSession->area, shared_dp);
3015
3016 /* Return the one we found. */
3017 Assert(record_table_entry->key.shared);
3018 result = (TupleDesc)
3020 record_table_entry->key.u.shared_tupdesc);
3021 Assert(result->tdrefcount == -1);
3022
3023 return result;
3024 }
3025
3026 /* Store it and return it. */
3027 record_table_entry->key.shared = true;
3028 record_table_entry->key.u.shared_tupdesc = shared_dp;
3030 record_table_entry);
3031 result = (TupleDesc)
3032 dsa_get_address(CurrentSession->area, shared_dp);
3033 Assert(result->tdrefcount == -1);
3034
3035 return result;
3036}
3037
3038/*
3039 * On-DSM-detach hook to forget about the current shared record typmod
3040 * infrastructure. This is currently used by both leader and workers.
3041 */
3042static void
3044{
3045 /* Be cautious here: maybe we didn't finish initializing. */
3047 {
3050 }
3052 {
3055 }
3057}
3058
3059/*
3060 * Insert RelIdToTypeIdCacheHash entry if needed.
3061 */
3062static void
3064{
3065 /* Immediately quit for non-composite types */
3066 if (typentry->typtype != TYPTYPE_COMPOSITE)
3067 return;
3068
3069 /* typrelid should be given for composite types */
3070 Assert(OidIsValid(typentry->typrelid));
3071
3072 /*
3073 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3074 * information indicating it should be here.
3075 */
3076 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3077 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3078 typentry->tupDesc != NULL)
3079 {
3080 RelIdToTypeIdCacheEntry *relentry;
3081 bool found;
3082
3084 &typentry->typrelid,
3085 HASH_ENTER, &found);
3086 relentry->relid = typentry->typrelid;
3087 relentry->composite_typid = typentry->type_id;
3088 }
3089}
3090
3091/*
3092 * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3093 * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3094 * or tupDesc.
3095 */
3096static void
3098{
3099#ifdef USE_ASSERT_CHECKING
3100 int i;
3101 bool is_in_progress = false;
3102
3103 for (i = 0; i < in_progress_list_len; i++)
3104 {
3105 if (in_progress_list[i] == typentry->type_id)
3106 {
3107 is_in_progress = true;
3108 break;
3109 }
3110 }
3111#endif
3112
3113 /* Immediately quit for non-composite types */
3114 if (typentry->typtype != TYPTYPE_COMPOSITE)
3115 return;
3116
3117 /* typrelid should be given for composite types */
3118 Assert(OidIsValid(typentry->typrelid));
3119
3120 /*
3121 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3122 * information indicating entry should be still there.
3123 */
3124 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3125 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3126 typentry->tupDesc == NULL)
3127 {
3128 bool found;
3129
3131 &typentry->typrelid,
3132 HASH_REMOVE, &found);
3133 Assert(found || is_in_progress);
3134 }
3135 else
3136 {
3137#ifdef USE_ASSERT_CHECKING
3138 /*
3139 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3140 * entry if it should exist.
3141 */
3142 bool found;
3143
3144 if (!is_in_progress)
3145 {
3147 &typentry->typrelid,
3148 HASH_FIND, &found);
3149 Assert(found);
3150 }
3151#endif
3152 }
3153}
3154
3155/*
3156 * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3157 * entries, marked as in-progress by lookup_type_cache(). It may happen
3158 * in case of an error or interruption during the lookup_type_cache() call.
3159 */
3160static void
3162{
3163 int i;
3164
3165 for (i = 0; i < in_progress_list_len; i++)
3166 {
3167 TypeCacheEntry *typentry;
3168
3171 HASH_FIND, NULL);
3172 if (typentry)
3174 }
3175
3177}
3178
3179void
3181{
3183}
3184
3185void
3187{
3189}
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:366
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
#define TextDatumGetCString(d)
Definition: builtins.h:98
#define NameStr(name)
Definition: c.h:717
#define RegProcedureIsValid(p)
Definition: c.h:748
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:434
int32_t int32
Definition: c.h:498
uint64_t uint64
Definition: c.h:503
uint32_t uint32
Definition: c.h:502
float float4
Definition: c.h:600
#define MemSet(start, val, len)
Definition: c.h:991
#define OidIsValid(objectId)
Definition: c.h:746
size_t Size
Definition: c.h:576
void CreateCacheMemoryContext(void)
Definition: catcache.c:708
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:942
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:826
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_allocate(area, size)
Definition: dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:503
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition: dshash.c:590
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:307
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:581
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:572
dsa_pointer dshash_table_handle
Definition: dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition: dynahash.c:1405
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define PG_RE_THROW()
Definition: elog.h:405
#define PG_TRY(...)
Definition: elog.h:372
#define PG_END_TRY(...)
Definition: elog.h:397
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:382
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:149
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:143
@ DOM_CONSTRAINT_CHECK
Definition: execnodes.h:1044
@ DOM_CONSTRAINT_NOTNULL
Definition: execnodes.h:1043
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
char * format_type_be(Oid type_oid)
Definition: format_type.c:343
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:388
#define HASHSTANDARD_PROC
Definition: hash.h:355
#define HASHEXTENDED_PROC
Definition: hash.h:356
Assert(PointerIsAligned(start, uint64))
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
Definition: htup_details.h:728
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:861
#define IsParallelWorker()
Definition: parallel.h:60
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:2345
long val
Definition: informix.c:689
#define INJECTION_POINT(name)
static int pg_cmp_u32(uint32 a, uint32 b)
Definition: int.h:652
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1844
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1802
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int i
Definition: isn.c:77
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lcons(void *datum, List *list)
Definition: list.c:495
#define AccessShareLock
Definition: lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1304
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1282
Oid get_multirange_range(Oid multirangeOid)
Definition: lsyscache.c:3623
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:888
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1425
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:167
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2972
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition: lsyscache.c:2678
@ LWTRANCHE_PER_SESSION_RECORD_TYPMOD
Definition: lwlock.h:201
@ LWTRANCHE_PER_SESSION_RECORD_TYPE
Definition: lwlock.h:200
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1256
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1290
char * pstrdup(const char *in)
Definition: mcxt.c:2322
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:599
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:668
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:2167
void pfree(void *pointer)
Definition: mcxt.c:2147
MemoryContext TopMemoryContext
Definition: mcxt.c:165
void * palloc(Size size)
Definition: mcxt.c:1940
MemoryContext CurrentMemoryContext
Definition: mcxt.c:159
MemoryContext CacheMemoryContext
Definition: mcxt.c:168
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:485
dsa_area * area
Definition: mcxt.c:175
#define AllocSetContextCreate
Definition: memutils.h:149
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:190
#define BTORDER_PROC
Definition: nbtree.h:717
#define makeNode(_type_)
Definition: nodes.h:161
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:202
void * arg
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
FormData_pg_constraint * Form_pg_constraint
const void * data
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:44
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
FormData_pg_range * Form_pg_range
Definition: pg_range.h:58
FormData_pg_type * Form_pg_type
Definition: pg_type.h:261
Expr * expression_planner(Expr *expr)
Definition: planner.c:6645
#define qsort(a, b, c, d)
Definition: port.h:479
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
uintptr_t Datum
Definition: postgres.h:69
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:257
#define InvalidOid
Definition: postgres_ext.h:35
unsigned int Oid
Definition: postgres_ext.h:30
char * c
tree ctl
Definition: radixtree.h:1838
void * stringToNode(const char *str)
Definition: read.c:90
#define RelationGetDescr(relation)
Definition: rel.h:542
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
Session * CurrentSession
Definition: session.c:48
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: relation.c:47
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define HTEqualStrategyNumber
Definition: stratnum.h:41
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
MemoryContext dccContext
Definition: typcache.c:142
DomainConstraintCache * dcc
Definition: typcache.h:173
MemoryContext refctx
Definition: typcache.h:168
MemoryContextCallback callback
Definition: typcache.h:174
TypeCacheEntry * tcache
Definition: typcache.h:169
DomainConstraintType constrainttype
Definition: execnodes.h:1050
ExprState * check_exprstate
Definition: execnodes.h:1053
float4 sort_order
Definition: typcache.c:150
Oid enum_oid
Definition: typcache.c:149
Oid fn_oid
Definition: fmgr.h:59
Definition: dynahash.c:220
Definition: pg_list.h:54
MemoryContextCallbackFunction func
Definition: palloc.h:49
TupleDesc tupdesc
Definition: typcache.c:174
TupleDesc rd_att
Definition: rel.h:112
Form_pg_class rd_rel
Definition: rel.h:111
dsm_segment * segment
Definition: session.h:27
dshash_table * shared_record_table
Definition: session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
dsa_area * area
Definition: session.h:28
dshash_table * shared_typmod_table
Definition: session.h:33
SharedRecordTableKey key
Definition: typcache.c:213
TupleDesc local_tupdesc
Definition: typcache.c:201
dsa_pointer shared_tupdesc
Definition: typcache.c:202
union SharedRecordTableKey::@31 u
dshash_table_handle typmod_table_handle
Definition: typcache.c:186
pg_atomic_uint32 next_typmod
Definition: typcache.c:188
dshash_table_handle record_table_handle
Definition: typcache.c:184
dsa_pointer shared_tupdesc
Definition: typcache.c:223
int tdrefcount
Definition: tupdesc.h:140
int32 tdtypmod
Definition: tupdesc.h:139
Oid tdtypeid
Definition: tupdesc.h:138
uint32 type_id_hash
Definition: typcache.h:36
uint64 tupDesc_identifier
Definition: typcache.h:91
FmgrInfo hash_proc_finfo
Definition: typcache.h:78
int32 domainBaseTypmod
Definition: typcache.h:116
Oid hash_extended_proc
Definition: typcache.h:67
Oid typsubscript
Definition: typcache.h:45
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:102
FmgrInfo cmp_proc_finfo
Definition: typcache.h:77
Oid rng_collation
Definition: typcache.h:101
char typalign
Definition: typcache.h:41
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:99
char typtype
Definition: typcache.h:43
TupleDesc tupDesc
Definition: typcache.h:90
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:79
DomainConstraintCache * domainData
Definition: typcache.h:122
struct TypeCacheEntry * rngtype
Definition: typcache.h:109
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:104
FmgrInfo eq_opr_finfo
Definition: typcache.h:76
Oid btree_opintype
Definition: typcache.h:59
struct TypeCacheEnumData * enumData
Definition: typcache.h:131
struct TypeCacheEntry * nextDomain
Definition: typcache.h:134
bool typbyval
Definition: typcache.h:40
FmgrInfo rng_canonical_finfo
Definition: typcache.h:103
int16 typlen
Definition: typcache.h:39
Oid hash_opintype
Definition: typcache.h:61
Oid typcollation
Definition: typcache.h:48
Oid domainBaseType
Definition: typcache.h:115
char typstorage
Definition: typcache.h:42
Oid rng_opfamily
Definition: typcache.h:100
Bitmapset * sorted_values
Definition: typcache.c:156
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:158
Definition: dsa.c:348
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:269
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:221
#define GetSysCacheHashValue1(cacheId, key1)
Definition: syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
static ItemArray items
Definition: test_tidstore.c:48
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:333
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:421
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:570
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:495
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:552
uint32 hashRowType(TupleDesc desc)
Definition: tupdesc.c:806
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:245
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:770
#define TupleDescSize(src)
Definition: tupdesc.h:198
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:213
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:160
struct TupleDescData * TupleDesc
Definition: tupdesc.h:145
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:100
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:101
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1715
static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition: typcache.c:3063
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1402
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1828
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1922
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:2296
#define TCFLAGS_OPERATOR_FLAGS
Definition: typcache.c:122
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:113
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1731
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:115
void AtEOXact_TypeCache(void)
Definition: typcache.c:3180
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition: typcache.c:119
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2725
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1594
static HTAB * RelIdToTypeIdCacheHash
Definition: typcache.c:87
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2880
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1602
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2931
static int in_progress_list_maxlen
Definition: typcache.c:228
static int32 NextRecordTypmod
Definition: typcache.c:306
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1978
static Oid * in_progress_list
Definition: typcache.c:226
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:285
static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition: typcache.c:3097
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:104
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1755
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1364
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1939
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1578
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:103
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:106
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1343
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:114
static void InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
Definition: typcache.c:2365
struct SharedRecordTableEntry SharedRecordTableEntry
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:2197
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1319
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1540
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:234
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1532
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1061
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition: typcache.c:359
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:105
struct SharedTypmodTableEntry SharedTypmodTableEntry
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition: typcache.c:112
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1763
static int in_progress_list_len
Definition: typcache.c:227
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1516
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2910
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1003
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition: typcache.c:2134
static RecordCacheArrayEntry * RecordCacheArray
Definition: typcache.c:304
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1723
static HTAB * RecordCacheHash
Definition: typcache.c:295
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2623
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:96
struct RelIdToTypeIdCacheEntry RelIdToTypeIdCacheEntry
struct RecordCacheEntry RecordCacheEntry
void AtEOSubXact_TypeCache(void)
Definition: typcache.c:3186
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:3043
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:111
struct RecordCacheArrayEntry RecordCacheArrayEntry
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:107
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2513
struct TypeCacheEnumData TypeCacheEnumData
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2599
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2570
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1083
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1489
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:110
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:2417
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1548
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:2175
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1771
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:108
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:109
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1524
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition: typcache.c:99
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:260
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:2652
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:118
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition: typcache.c:117
struct SharedRecordTableKey SharedRecordTableKey
static int32 RecordCacheArrayLen
Definition: typcache.c:305
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:2042
static HTAB * TypeCacheHash
Definition: typcache.c:79
static uint64 tupledesc_id_counter
Definition: typcache.c:313
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1586
#define TCFLAGS_HAVE_FIELD_HASHING
Definition: typcache.c:116
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:2026
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:275
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1956
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2897
static void finalize_in_progress_typentries(void)
Definition: typcache.c:3161
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1332
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:102
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1440
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:386
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1799
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1610
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:2015
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:969
#define INVALID_TUPLEDESC_IDENTIFIER
Definition: typcache.h:157
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:145
#define TYPECACHE_EQ_OPR
Definition: typcache.h:138
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:148
#define TYPECACHE_TUPDESC
Definition: typcache.h:146
#define TYPECACHE_MULTIRANGE_INFO
Definition: typcache.h:154
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:177
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:143
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:152
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:147
#define TYPECACHE_DOMAIN_BASE_INFO
Definition: typcache.h:150
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition: typcache.h:151
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:149
#define TYPECACHE_GT_OPR
Definition: typcache.h:140
#define TYPECACHE_CMP_PROC
Definition: typcache.h:141
struct TypeCacheEntry TypeCacheEntry
#define TYPECACHE_LT_OPR
Definition: typcache.h:139
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:153
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:144
#define TYPECACHE_HASH_PROC
Definition: typcache.h:142