PostgreSQL Source Code git master
Loading...
Searching...
No Matches
typcache.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * typcache.c
4 * POSTGRES type cache code
5 *
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. For
8 * example, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 *
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by generic array and record handling routines, such as array_eq(),
14 * record_cmp(), and hash_array(). Because those routines are used as index
15 * support operations, they cannot leak memory. To allow them to execute
16 * efficiently, all information that they would like to re-use across calls
17 * is kept in the type cache.
18 *
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. If the type is
21 * dropped, the cache entry simply becomes wasted storage. This is not
22 * expected to happen often, and assuming that typcache entries are good
23 * permanently allows caching pointers to them in long-lived places.
24 *
25 * We have some provisions for updating cache entries if the stored data
26 * becomes obsolete. Core data extracted from the pg_type row is updated
27 * when we detect updates to pg_type. Information dependent on opclasses is
28 * cleared if we detect updates to pg_opclass. We also support clearing the
29 * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 * since those may need to change as a consequence of ALTER TABLE. Domain
31 * constraint changes are also tracked properly.
32 *
33 *
34 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
35 * Portions Copyright (c) 1994, Regents of the University of California
36 *
37 * IDENTIFICATION
38 * src/backend/utils/cache/typcache.c
39 *
40 *-------------------------------------------------------------------------
41 */
42#include "postgres.h"
43
44#include <limits.h>
45
46#include "access/hash.h"
47#include "access/htup_details.h"
48#include "access/nbtree.h"
49#include "access/parallel.h"
50#include "access/relation.h"
51#include "access/session.h"
52#include "access/table.h"
53#include "catalog/pg_am.h"
55#include "catalog/pg_enum.h"
56#include "catalog/pg_operator.h"
57#include "catalog/pg_range.h"
58#include "catalog/pg_type.h"
59#include "commands/defrem.h"
60#include "common/int.h"
61#include "executor/executor.h"
62#include "lib/dshash.h"
63#include "optimizer/optimizer.h"
64#include "port/pg_bitutils.h"
65#include "storage/lwlock.h"
66#include "utils/builtins.h"
67#include "utils/catcache.h"
68#include "utils/fmgroids.h"
70#include "utils/inval.h"
71#include "utils/lsyscache.h"
72#include "utils/memutils.h"
73#include "utils/rel.h"
74#include "utils/syscache.h"
75#include "utils/typcache.h"
76
77
78/* The main type cache hashtable searched by lookup_type_cache */
80
81/*
82 * The mapping of relation's OID to the corresponding composite type OID.
83 * We're keeping the map entry when the corresponding typentry has something
84 * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 */
88
90{
91 Oid relid; /* OID of the relation */
92 Oid composite_typid; /* OID of the relation's composite type */
94
95/* List of type cache entries for domain types */
97
98/* Private flag bits in the TypeCacheEntry.flags field */
99#define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100#define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101#define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102#define TCFLAGS_CHECKED_EQ_OPR 0x000008
103#define TCFLAGS_CHECKED_LT_OPR 0x000010
104#define TCFLAGS_CHECKED_GT_OPR 0x000020
105#define TCFLAGS_CHECKED_CMP_PROC 0x000040
106#define TCFLAGS_CHECKED_HASH_PROC 0x000080
107#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108#define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109#define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110#define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111#define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113#define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114#define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115#define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116#define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120
121/* The flags associated with equality/comparison/hashing are all but these: */
122#define TCFLAGS_OPERATOR_FLAGS \
123 (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126
127/*
128 * Data stored about a domain type's constraints. Note that we do not create
129 * this struct for the common case of a constraint-less domain; we just set
130 * domainData to NULL to indicate that.
131 *
132 * Within a DomainConstraintCache, we store expression plan trees, but the
133 * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 * When needed, expression evaluation nodes are built by flat-copying the
135 * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 * Such a node tree is not part of the DomainConstraintCache, but is
137 * considered to belong to a DomainConstraintRef.
138 */
140{
141 List *constraints; /* list of DomainConstraintState nodes */
142 MemoryContext dccContext; /* memory context holding all associated data */
143 long dccRefCount; /* number of references to this struct */
144};
145
146/* Private information to support comparisons of enum values */
147typedef struct
148{
149 Oid enum_oid; /* OID of one enum value */
150 float4 sort_order; /* its sort position */
151} EnumItem;
152
153typedef struct TypeCacheEnumData
154{
155 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 int num_values; /* total number of values in enum */
160
161/*
162 * We use a separate table for storing the definitions of non-anonymous
163 * record types. Once defined, a record type will be remembered for the
164 * life of the backend. Subsequent uses of the "same" record type (where
165 * sameness means equalRowTypes) will refer to the existing table entry.
166 *
167 * Stored record types are remembered in a linear array of TupleDescs,
168 * which can be indexed quickly with the assigned typmod. There is also
169 * a hash table to speed searches for matching TupleDescs.
170 */
171
176
177/*
178 * To deal with non-anonymous record types that are exchanged by backends
179 * involved in a parallel query, we also need a shared version of the above.
180 */
182{
183 /* A hash table for finding a matching TupleDesc. */
185 /* A hash table for finding a TupleDesc by typmod. */
187 /* A source of new record typmod numbers. */
189};
190
191/*
192 * When using shared tuple descriptors as hash table keys we need a way to be
193 * able to search for an equal shared TupleDesc using a backend-local
194 * TupleDesc. So we use this type which can hold either, and hash and compare
195 * functions that know how to handle both.
196 */
206
207/*
208 * The shared version of RecordCacheEntry. This lets us look up a typmod
209 * using a TupleDesc which may be in local or shared memory.
210 */
215
216/*
217 * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 * up a TupleDesc in shared memory using a typmod.
219 */
225
229
230/*
231 * A comparator function for SharedRecordTableKey.
232 */
233static int
234shared_record_table_compare(const void *a, const void *b, size_t size,
235 void *arg)
236{
237 dsa_area *area = (dsa_area *) arg;
238 const SharedRecordTableKey *k1 = a;
239 const SharedRecordTableKey *k2 = b;
242
243 if (k1->shared)
244 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 else
246 t1 = k1->u.local_tupdesc;
247
248 if (k2->shared)
249 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 else
251 t2 = k2->u.local_tupdesc;
252
253 return equalRowTypes(t1, t2) ? 0 : 1;
254}
255
256/*
257 * A hash function for SharedRecordTableKey.
258 */
259static uint32
260shared_record_table_hash(const void *a, size_t size, void *arg)
261{
262 dsa_area *area = arg;
263 const SharedRecordTableKey *k = a;
264 TupleDesc t;
265
266 if (k->shared)
268 else
269 t = k->u.local_tupdesc;
270
271 return hashRowType(t);
272}
273
274/* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
283
284/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
293
294/* hashtable for recognizing registered record types */
296
302
303/* array of info about registered record types, indexed by assigned typmod */
305static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306static int32 NextRecordTypmod = 0; /* number of entries used */
307
308/*
309 * Process-wide counter for generating unique tupledesc identifiers.
310 * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 */
314
315static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316static void load_rangetype_info(TypeCacheEntry *typentry);
317static void load_multirangetype_info(TypeCacheEntry *typentry);
318static void load_domaintype_info(TypeCacheEntry *typentry);
319static int dcs_cmp(const void *a, const void *b);
321static void dccref_deletion_callback(void *arg);
323static bool array_element_has_equality(TypeCacheEntry *typentry);
324static bool array_element_has_compare(TypeCacheEntry *typentry);
325static bool array_element_has_hashing(TypeCacheEntry *typentry);
328static bool record_fields_have_equality(TypeCacheEntry *typentry);
329static bool record_fields_have_compare(TypeCacheEntry *typentry);
330static bool record_fields_have_hashing(TypeCacheEntry *typentry);
332static void cache_record_field_properties(TypeCacheEntry *typentry);
333static bool range_element_has_hashing(TypeCacheEntry *typentry);
339static void TypeCacheRelCallback(Datum arg, Oid relid);
341 uint32 hashvalue);
343 uint32 hashvalue);
345 uint32 hashvalue);
346static void load_enum_cache_data(TypeCacheEntry *tcache);
348static int enum_oid_cmp(const void *left, const void *right);
350 Datum datum);
352static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
353 uint32 typmod);
356
357
358/*
359 * Hash function compatible with one-arg system cache hash function.
360 */
361static uint32
362type_cache_syshash(const void *key, Size keysize)
363{
364 Assert(keysize == sizeof(Oid));
365 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
366}
367
368/*
369 * lookup_type_cache
370 *
371 * Fetch the type cache entry for the specified datatype, and make sure that
372 * all the fields requested by bits in 'flags' are valid.
373 *
374 * The result is never NULL --- we will ereport() if the passed type OID is
375 * invalid. Note however that we may fail to find one or more of the
376 * values requested by 'flags'; the caller needs to check whether the fields
377 * are InvalidOid or not.
378 *
379 * Note that while filling TypeCacheEntry we might process concurrent
380 * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
381 * invalidated. In this case, we typically only clear flags while values are
382 * still available for the caller. It's expected that the caller holds
383 * enough locks on type-depending objects that the values are still relevant.
384 * It's also important that the tupdesc is filled after all other
385 * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
386 * invalidated during the lookup_type_cache() call.
387 */
389lookup_type_cache(Oid type_id, int flags)
390{
391 TypeCacheEntry *typentry;
392 bool found;
394
395 if (TypeCacheHash == NULL)
396 {
397 /* First time through: initialize the hash table */
398 HASHCTL ctl;
399 int allocsize;
400
401 ctl.keysize = sizeof(Oid);
402 ctl.entrysize = sizeof(TypeCacheEntry);
403
404 /*
405 * TypeCacheEntry takes hash value from the system cache. For
406 * TypeCacheHash we use the same hash in order to speedup search by
407 * hash value. This is used by hash_seq_init_with_hash_value().
408 */
409 ctl.hash = type_cache_syshash;
410
411 TypeCacheHash = hash_create("Type information cache", 64,
413
415
416 ctl.keysize = sizeof(Oid);
417 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
418 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
420
421 /* Also set up callbacks for SI invalidations */
426
427 /* Also make sure CacheMemoryContext exists */
430
431 /*
432 * reserve enough in_progress_list slots for many cases
433 */
434 allocsize = 4;
437 allocsize * sizeof(*in_progress_list));
438 in_progress_list_maxlen = allocsize;
439 }
440
442
443 /* Register to catch invalidation messages */
445 {
446 int allocsize;
447
448 allocsize = in_progress_list_maxlen * 2;
450 allocsize * sizeof(*in_progress_list));
451 in_progress_list_maxlen = allocsize;
452 }
455
456 /* Try to look up an existing entry */
458 &type_id,
459 HASH_FIND, NULL);
460 if (typentry == NULL)
461 {
462 /*
463 * If we didn't find one, we want to make one. But first look up the
464 * pg_type row, just to make sure we don't make a cache entry for an
465 * invalid type OID. If the type OID is not valid, present a
466 * user-facing error, since some code paths such as domain_in() allow
467 * this function to be reached with a user-supplied OID.
468 */
469 HeapTuple tp;
471
473 if (!HeapTupleIsValid(tp))
476 errmsg("type with OID %u does not exist", type_id)));
478 if (!typtup->typisdefined)
481 errmsg("type \"%s\" is only a shell",
482 NameStr(typtup->typname))));
483
484 /* Now make the typcache entry */
486 &type_id,
487 HASH_ENTER, &found);
488 Assert(!found); /* it wasn't there a moment ago */
489
490 MemSet(typentry, 0, sizeof(TypeCacheEntry));
491
492 /* These fields can never change, by definition */
493 typentry->type_id = type_id;
494 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
495
496 /* Keep this part in sync with the code below */
497 typentry->typlen = typtup->typlen;
498 typentry->typbyval = typtup->typbyval;
499 typentry->typalign = typtup->typalign;
500 typentry->typstorage = typtup->typstorage;
501 typentry->typtype = typtup->typtype;
502 typentry->typrelid = typtup->typrelid;
503 typentry->typsubscript = typtup->typsubscript;
504 typentry->typelem = typtup->typelem;
505 typentry->typarray = typtup->typarray;
506 typentry->typcollation = typtup->typcollation;
507 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
508
509 /* If it's a domain, immediately thread it into the domain cache list */
510 if (typentry->typtype == TYPTYPE_DOMAIN)
511 {
513 firstDomainTypeEntry = typentry;
514 }
515
516 ReleaseSysCache(tp);
517 }
518 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
519 {
520 /*
521 * We have an entry, but its pg_type row got changed, so reload the
522 * data obtained directly from pg_type.
523 */
524 HeapTuple tp;
526
528 if (!HeapTupleIsValid(tp))
531 errmsg("type with OID %u does not exist", type_id)));
533 if (!typtup->typisdefined)
536 errmsg("type \"%s\" is only a shell",
537 NameStr(typtup->typname))));
538
539 /*
540 * Keep this part in sync with the code above. Many of these fields
541 * shouldn't ever change, particularly typtype, but copy 'em anyway.
542 */
543 typentry->typlen = typtup->typlen;
544 typentry->typbyval = typtup->typbyval;
545 typentry->typalign = typtup->typalign;
546 typentry->typstorage = typtup->typstorage;
547 typentry->typtype = typtup->typtype;
548 typentry->typrelid = typtup->typrelid;
549 typentry->typsubscript = typtup->typsubscript;
550 typentry->typelem = typtup->typelem;
551 typentry->typarray = typtup->typarray;
552 typentry->typcollation = typtup->typcollation;
553 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
554
555 ReleaseSysCache(tp);
556 }
557
558 /*
559 * Look up opclasses if we haven't already and any dependent info is
560 * requested.
561 */
567 {
568 Oid opclass;
569
570 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
571 if (OidIsValid(opclass))
572 {
573 typentry->btree_opf = get_opclass_family(opclass);
574 typentry->btree_opintype = get_opclass_input_type(opclass);
575 }
576 else
577 {
578 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
579 }
580
581 /*
582 * Reset information derived from btree opclass. Note in particular
583 * that we'll redetermine the eq_opr even if we previously found one;
584 * this matters in case a btree opclass has been added to a type that
585 * previously had only a hash opclass.
586 */
587 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
592 }
593
594 /*
595 * If we need to look up equality operator, and there's no btree opclass,
596 * force lookup of hash opclass.
597 */
598 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
599 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
600 typentry->btree_opf == InvalidOid)
602
607 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
608 {
609 Oid opclass;
610
611 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
612 if (OidIsValid(opclass))
613 {
614 typentry->hash_opf = get_opclass_family(opclass);
615 typentry->hash_opintype = get_opclass_input_type(opclass);
616 }
617 else
618 {
619 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
620 }
621
622 /*
623 * Reset information derived from hash opclass. We do *not* reset the
624 * eq_opr; if we already found one from the btree opclass, that
625 * decision is still good.
626 */
627 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
630 }
631
632 /*
633 * Look for requested operators and functions, if we haven't already.
634 */
635 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
636 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
637 {
638 Oid eq_opr = InvalidOid;
639
640 if (typentry->btree_opf != InvalidOid)
641 eq_opr = get_opfamily_member(typentry->btree_opf,
642 typentry->btree_opintype,
643 typentry->btree_opintype,
645 if (eq_opr == InvalidOid &&
646 typentry->hash_opf != InvalidOid)
647 eq_opr = get_opfamily_member(typentry->hash_opf,
648 typentry->hash_opintype,
649 typentry->hash_opintype,
651
652 /*
653 * If the proposed equality operator is array_eq or record_eq, check
654 * to see if the element type or column types support equality. If
655 * not, array_eq or record_eq would fail at runtime, so we don't want
656 * to report that the type has equality. (We can omit similar
657 * checking for ranges and multiranges because ranges can't be created
658 * in the first place unless their subtypes support equality.)
659 */
660 if (eq_opr == ARRAY_EQ_OP &&
662 eq_opr = InvalidOid;
663 else if (eq_opr == RECORD_EQ_OP &&
665 eq_opr = InvalidOid;
666
667 /* Force update of eq_opr_finfo only if we're changing state */
668 if (typentry->eq_opr != eq_opr)
669 typentry->eq_opr_finfo.fn_oid = InvalidOid;
670
671 typentry->eq_opr = eq_opr;
672
673 /*
674 * Reset info about hash functions whenever we pick up new info about
675 * equality operator. This is so we can ensure that the hash
676 * functions match the operator.
677 */
678 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
680 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
681 }
682 if ((flags & TYPECACHE_LT_OPR) &&
683 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
684 {
685 Oid lt_opr = InvalidOid;
686
687 if (typentry->btree_opf != InvalidOid)
688 lt_opr = get_opfamily_member(typentry->btree_opf,
689 typentry->btree_opintype,
690 typentry->btree_opintype,
692
693 /*
694 * As above, make sure array_cmp or record_cmp will succeed; but again
695 * we need no special check for ranges or multiranges.
696 */
697 if (lt_opr == ARRAY_LT_OP &&
698 !array_element_has_compare(typentry))
699 lt_opr = InvalidOid;
700 else if (lt_opr == RECORD_LT_OP &&
702 lt_opr = InvalidOid;
703
704 typentry->lt_opr = lt_opr;
705 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
706 }
707 if ((flags & TYPECACHE_GT_OPR) &&
708 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
709 {
710 Oid gt_opr = InvalidOid;
711
712 if (typentry->btree_opf != InvalidOid)
713 gt_opr = get_opfamily_member(typentry->btree_opf,
714 typentry->btree_opintype,
715 typentry->btree_opintype,
717
718 /*
719 * As above, make sure array_cmp or record_cmp will succeed; but again
720 * we need no special check for ranges or multiranges.
721 */
722 if (gt_opr == ARRAY_GT_OP &&
723 !array_element_has_compare(typentry))
724 gt_opr = InvalidOid;
725 else if (gt_opr == RECORD_GT_OP &&
727 gt_opr = InvalidOid;
728
729 typentry->gt_opr = gt_opr;
730 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
731 }
733 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
734 {
735 Oid cmp_proc = InvalidOid;
736
737 if (typentry->btree_opf != InvalidOid)
738 cmp_proc = get_opfamily_proc(typentry->btree_opf,
739 typentry->btree_opintype,
740 typentry->btree_opintype,
742
743 /*
744 * As above, make sure array_cmp or record_cmp will succeed; but again
745 * we need no special check for ranges or multiranges.
746 */
747 if (cmp_proc == F_BTARRAYCMP &&
748 !array_element_has_compare(typentry))
749 cmp_proc = InvalidOid;
750 else if (cmp_proc == F_BTRECORDCMP &&
752 cmp_proc = InvalidOid;
753
754 /* Force update of cmp_proc_finfo only if we're changing state */
755 if (typentry->cmp_proc != cmp_proc)
756 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
757
758 typentry->cmp_proc = cmp_proc;
759 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
760 }
762 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
763 {
764 Oid hash_proc = InvalidOid;
765
766 /*
767 * We insist that the eq_opr, if one has been determined, match the
768 * hash opclass; else report there is no hash function.
769 */
770 if (typentry->hash_opf != InvalidOid &&
771 (!OidIsValid(typentry->eq_opr) ||
772 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
773 typentry->hash_opintype,
774 typentry->hash_opintype,
776 hash_proc = get_opfamily_proc(typentry->hash_opf,
777 typentry->hash_opintype,
778 typentry->hash_opintype,
780
781 /*
782 * As above, make sure hash_array, hash_record, or hash_range will
783 * succeed.
784 */
785 if (hash_proc == F_HASH_ARRAY &&
786 !array_element_has_hashing(typentry))
787 hash_proc = InvalidOid;
788 else if (hash_proc == F_HASH_RECORD &&
790 hash_proc = InvalidOid;
791 else if (hash_proc == F_HASH_RANGE &&
792 !range_element_has_hashing(typentry))
793 hash_proc = InvalidOid;
794
795 /*
796 * Likewise for hash_multirange.
797 */
798 if (hash_proc == F_HASH_MULTIRANGE &&
800 hash_proc = InvalidOid;
801
802 /* Force update of hash_proc_finfo only if we're changing state */
803 if (typentry->hash_proc != hash_proc)
805
806 typentry->hash_proc = hash_proc;
807 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
808 }
809 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
812 {
813 Oid hash_extended_proc = InvalidOid;
814
815 /*
816 * We insist that the eq_opr, if one has been determined, match the
817 * hash opclass; else report there is no hash function.
818 */
819 if (typentry->hash_opf != InvalidOid &&
820 (!OidIsValid(typentry->eq_opr) ||
821 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
822 typentry->hash_opintype,
823 typentry->hash_opintype,
825 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
826 typentry->hash_opintype,
827 typentry->hash_opintype,
829
830 /*
831 * As above, make sure hash_array_extended, hash_record_extended, or
832 * hash_range_extended will succeed.
833 */
834 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
836 hash_extended_proc = InvalidOid;
837 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
839 hash_extended_proc = InvalidOid;
840 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
842 hash_extended_proc = InvalidOid;
843
844 /*
845 * Likewise for hash_multirange_extended.
846 */
847 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
849 hash_extended_proc = InvalidOid;
850
851 /* Force update of proc finfo only if we're changing state */
852 if (typentry->hash_extended_proc != hash_extended_proc)
854
855 typentry->hash_extended_proc = hash_extended_proc;
857 }
858
859 /*
860 * Set up fmgr lookup info as requested
861 *
862 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
863 * which is not quite right (they're really in the hash table's private
864 * memory context) but this will do for our purposes.
865 *
866 * Note: the code above avoids invalidating the finfo structs unless the
867 * referenced operator/function OID actually changes. This is to prevent
868 * unnecessary leakage of any subsidiary data attached to an finfo, since
869 * that would cause session-lifespan memory leaks.
870 */
871 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
872 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
873 typentry->eq_opr != InvalidOid)
874 {
876
877 eq_opr_func = get_opcode(typentry->eq_opr);
878 if (eq_opr_func != InvalidOid)
881 }
882 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
883 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
884 typentry->cmp_proc != InvalidOid)
885 {
886 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
888 }
889 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
890 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
891 typentry->hash_proc != InvalidOid)
892 {
893 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
895 }
898 typentry->hash_extended_proc != InvalidOid)
899 {
901 &typentry->hash_extended_proc_finfo,
903 }
904
905 /*
906 * If it's a composite type (row type), get tupdesc if requested
907 */
908 if ((flags & TYPECACHE_TUPDESC) &&
909 typentry->tupDesc == NULL &&
910 typentry->typtype == TYPTYPE_COMPOSITE)
911 {
912 load_typcache_tupdesc(typentry);
913 }
914
915 /*
916 * If requested, get information about a range type
917 *
918 * This includes making sure that the basic info about the range element
919 * type is up-to-date.
920 */
921 if ((flags & TYPECACHE_RANGE_INFO) &&
922 typentry->typtype == TYPTYPE_RANGE)
923 {
924 if (typentry->rngelemtype == NULL)
925 load_rangetype_info(typentry);
926 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
927 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
928 }
929
930 /*
931 * If requested, get information about a multirange type
932 */
933 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
934 typentry->rngtype == NULL &&
935 typentry->typtype == TYPTYPE_MULTIRANGE)
936 {
937 load_multirangetype_info(typentry);
938 }
939
940 /*
941 * If requested, get information about a domain type
942 */
943 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
944 typentry->domainBaseType == InvalidOid &&
945 typentry->typtype == TYPTYPE_DOMAIN)
946 {
947 typentry->domainBaseTypmod = -1;
948 typentry->domainBaseType =
949 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
950 }
951 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
952 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
953 typentry->typtype == TYPTYPE_DOMAIN)
954 {
955 load_domaintype_info(typentry);
956 }
957
958 INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
959
962
964
965 return typentry;
966}
967
968/*
969 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
970 */
971static void
973{
974 Relation rel;
975
976 if (!OidIsValid(typentry->typrelid)) /* should not happen */
977 elog(ERROR, "invalid typrelid for composite type %u",
978 typentry->type_id);
979 rel = relation_open(typentry->typrelid, AccessShareLock);
980 Assert(rel->rd_rel->reltype == typentry->type_id);
981
982 /*
983 * Link to the tupdesc and increment its refcount (we assert it's a
984 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
985 * because the reference mustn't be entered in the current resource owner;
986 * it can outlive the current query.
987 */
988 typentry->tupDesc = RelationGetDescr(rel);
989
990 Assert(typentry->tupDesc->tdrefcount > 0);
991 typentry->tupDesc->tdrefcount++;
992
993 /*
994 * In future, we could take some pains to not change tupDesc_identifier if
995 * the tupdesc didn't really change; but for now it's not worth it.
996 */
998
1000}
1001
1002/*
1003 * load_rangetype_info --- helper routine to set up range type information
1004 */
1005static void
1007{
1009 HeapTuple tup;
1015 Oid opcintype;
1016 Oid cmpFnOid;
1017
1018 /* get information from pg_range */
1020 /* should not fail, since we already checked typtype ... */
1021 if (!HeapTupleIsValid(tup))
1022 elog(ERROR, "cache lookup failed for range type %u",
1023 typentry->type_id);
1025
1026 subtypeOid = pg_range->rngsubtype;
1027 typentry->rng_collation = pg_range->rngcollation;
1028 opclassOid = pg_range->rngsubopc;
1029 canonicalOid = pg_range->rngcanonical;
1030 subdiffOid = pg_range->rngsubdiff;
1031
1033
1034 /* get opclass properties and look up the comparison function */
1037 typentry->rng_opfamily = opfamilyOid;
1038
1039 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1040 BTORDER_PROC);
1042 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1043 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1044
1045 /* set up cached fmgrinfo structs */
1054
1055 /* Lastly, set up link to the element type --- this marks data valid */
1057}
1058
1059/*
1060 * load_multirangetype_info --- helper routine to set up multirange type
1061 * information
1062 */
1063static void
1065{
1067
1070 elog(ERROR, "cache lookup failed for multirange type %u",
1071 typentry->type_id);
1072
1074}
1075
1076/*
1077 * load_domaintype_info --- helper routine to set up domain constraint info
1078 *
1079 * Note: we assume we're called in a relatively short-lived context, so it's
1080 * okay to leak data into the current context while scanning pg_constraint.
1081 * We build the new DomainConstraintCache data in a context underneath
1082 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1083 * complete.
1084 */
1085static void
1087{
1088 Oid typeOid = typentry->type_id;
1090 bool notNull = false;
1092 int cconslen;
1095
1096 /*
1097 * If we're here, any existing constraint info is stale, so release it.
1098 * For safety, be sure to null the link before trying to delete the data.
1099 */
1100 if (typentry->domainData)
1101 {
1102 dcc = typentry->domainData;
1103 typentry->domainData = NULL;
1104 decr_dcc_refcount(dcc);
1105 }
1106
1107 /*
1108 * We try to optimize the common case of no domain constraints, so don't
1109 * create the dcc object and context until we find a constraint. Likewise
1110 * for the temp sorting array.
1111 */
1112 dcc = NULL;
1113 ccons = NULL;
1114 cconslen = 0;
1115
1116 /*
1117 * Scan pg_constraint for relevant constraints. We want to find
1118 * constraints for not just this domain, but any ancestor domains, so the
1119 * outer loop crawls up the domain stack.
1120 */
1122
1123 for (;;)
1124 {
1125 HeapTuple tup;
1128 int nccons = 0;
1129 ScanKeyData key[1];
1130 SysScanDesc scan;
1131
1133 if (!HeapTupleIsValid(tup))
1134 elog(ERROR, "cache lookup failed for type %u", typeOid);
1136
1137 if (typTup->typtype != TYPTYPE_DOMAIN)
1138 {
1139 /* Not a domain, so done */
1141 break;
1142 }
1143
1144 /* Test for NOT NULL Constraint */
1145 if (typTup->typnotnull)
1146 notNull = true;
1147
1148 /* Look for CHECK Constraints on this domain */
1149 ScanKeyInit(&key[0],
1152 ObjectIdGetDatum(typeOid));
1153
1155 NULL, 1, key);
1156
1158 {
1160 Datum val;
1161 bool isNull;
1162 char *constring;
1163 Expr *check_expr;
1165
1166 /* Ignore non-CHECK constraints */
1167 if (c->contype != CONSTRAINT_CHECK)
1168 continue;
1169
1170 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1172 conRel->rd_att, &isNull);
1173 if (isNull)
1174 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1175 NameStr(typTup->typname), NameStr(c->conname));
1176
1177 /* Create the DomainConstraintCache object and context if needed */
1178 if (dcc == NULL)
1179 {
1180 MemoryContext cxt;
1181
1183 "Domain constraints",
1185 dcc = (DomainConstraintCache *)
1187 dcc->constraints = NIL;
1188 dcc->dccContext = cxt;
1189 dcc->dccRefCount = 0;
1190 }
1191
1192 /* Convert conbin to a node tree, still in caller's context */
1194 check_expr = (Expr *) stringToNode(constring);
1195
1196 /*
1197 * Plan the expression, since ExecInitExpr will expect that.
1198 *
1199 * Note: caching the result of expression_planner() is not very
1200 * good practice. Ideally we'd use a CachedExpression here so
1201 * that we would react promptly to, eg, changes in inlined
1202 * functions. However, because we don't support mutable domain
1203 * CHECK constraints, it's not really clear that it's worth the
1204 * extra overhead to do that.
1205 */
1206 check_expr = expression_planner(check_expr);
1207
1208 /* Create only the minimally needed stuff in dccContext */
1210
1213 r->name = pstrdup(NameStr(c->conname));
1214 r->check_expr = copyObject(check_expr);
1215 r->check_exprstate = NULL;
1216
1218
1219 /* Accumulate constraints in an array, for sorting below */
1220 if (ccons == NULL)
1221 {
1222 cconslen = 8;
1225 }
1226 else if (nccons >= cconslen)
1227 {
1228 cconslen *= 2;
1231 }
1232 ccons[nccons++] = r;
1233 }
1234
1235 systable_endscan(scan);
1236
1237 if (nccons > 0)
1238 {
1239 /*
1240 * Sort the items for this domain, so that CHECKs are applied in a
1241 * deterministic order.
1242 */
1243 if (nccons > 1)
1245
1246 /*
1247 * Now attach them to the overall list. Use lcons() here because
1248 * constraints of parent domains should be applied earlier.
1249 */
1251 while (nccons > 0)
1252 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1254 }
1255
1256 /* loop to next domain in stack */
1257 typeOid = typTup->typbasetype;
1259 }
1260
1262
1263 /*
1264 * Only need to add one NOT NULL check regardless of how many domains in
1265 * the stack request it.
1266 */
1267 if (notNull)
1268 {
1270
1271 /* Create the DomainConstraintCache object and context if needed */
1272 if (dcc == NULL)
1273 {
1274 MemoryContext cxt;
1275
1277 "Domain constraints",
1279 dcc = (DomainConstraintCache *)
1281 dcc->constraints = NIL;
1282 dcc->dccContext = cxt;
1283 dcc->dccRefCount = 0;
1284 }
1285
1286 /* Create node trees in DomainConstraintCache's context */
1288
1290
1292 r->name = pstrdup("NOT NULL");
1293 r->check_expr = NULL;
1294 r->check_exprstate = NULL;
1295
1296 /* lcons to apply the nullness check FIRST */
1297 dcc->constraints = lcons(r, dcc->constraints);
1298
1300 }
1301
1302 /*
1303 * If we made a constraint object, move it into CacheMemoryContext and
1304 * attach it to the typcache entry.
1305 */
1306 if (dcc)
1307 {
1309 typentry->domainData = dcc;
1310 dcc->dccRefCount++; /* count the typcache's reference */
1311 }
1312
1313 /* Either way, the typcache entry's domain data is now valid. */
1315}
1316
1317/*
1318 * qsort comparator to sort DomainConstraintState pointers by name
1319 */
1320static int
1321dcs_cmp(const void *a, const void *b)
1322{
1323 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1324 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1325
1326 return strcmp((*ca)->name, (*cb)->name);
1327}
1328
1329/*
1330 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1331 * and free it if no references remain
1332 */
1333static void
1335{
1336 Assert(dcc->dccRefCount > 0);
1337 if (--(dcc->dccRefCount) <= 0)
1339}
1340
1341/*
1342 * Context reset/delete callback for a DomainConstraintRef
1343 */
1344static void
1346{
1348 DomainConstraintCache *dcc = ref->dcc;
1349
1350 /* Paranoia --- be sure link is nulled before trying to release */
1351 if (dcc)
1352 {
1353 ref->constraints = NIL;
1354 ref->dcc = NULL;
1355 decr_dcc_refcount(dcc);
1356 }
1357}
1358
1359/*
1360 * prep_domain_constraints --- prepare domain constraints for execution
1361 *
1362 * The expression trees stored in the DomainConstraintCache's list are
1363 * converted to executable expression state trees stored in execctx.
1364 */
1365static List *
1367{
1368 List *result = NIL;
1370 ListCell *lc;
1371
1373
1374 foreach(lc, constraints)
1375 {
1378
1380 newr->constrainttype = r->constrainttype;
1381 newr->name = r->name;
1382 newr->check_expr = r->check_expr;
1383 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1384
1385 result = lappend(result, newr);
1386 }
1387
1389
1390 return result;
1391}
1392
1393/*
1394 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1395 *
1396 * Caller must tell us the MemoryContext in which the DomainConstraintRef
1397 * lives. The ref will be cleaned up when that context is reset/deleted.
1398 *
1399 * Caller must also tell us whether it wants check_exprstate fields to be
1400 * computed in the DomainConstraintState nodes attached to this ref.
1401 * If it doesn't, we need not make a copy of the DomainConstraintState list.
1402 */
1403void
1405 MemoryContext refctx, bool need_exprstate)
1406{
1407 /* Look up the typcache entry --- we assume it survives indefinitely */
1409 ref->need_exprstate = need_exprstate;
1410 /* For safety, establish the callback before acquiring a refcount */
1411 ref->refctx = refctx;
1412 ref->dcc = NULL;
1413 ref->callback.func = dccref_deletion_callback;
1414 ref->callback.arg = ref;
1415 MemoryContextRegisterResetCallback(refctx, &ref->callback);
1416 /* Acquire refcount if there are constraints, and set up exported list */
1417 if (ref->tcache->domainData)
1418 {
1419 ref->dcc = ref->tcache->domainData;
1420 ref->dcc->dccRefCount++;
1421 if (ref->need_exprstate)
1422 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1423 ref->refctx);
1424 else
1425 ref->constraints = ref->dcc->constraints;
1426 }
1427 else
1428 ref->constraints = NIL;
1429}
1430
1431/*
1432 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1433 *
1434 * If the domain's constraint set changed, ref->constraints is updated to
1435 * point at a new list of cached constraints.
1436 *
1437 * In the normal case where nothing happened to the domain, this is cheap
1438 * enough that it's reasonable (and expected) to check before *each* use
1439 * of the constraint info.
1440 */
1441void
1443{
1444 TypeCacheEntry *typentry = ref->tcache;
1445
1446 /* Make sure typcache entry's data is up to date */
1447 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1448 typentry->typtype == TYPTYPE_DOMAIN)
1449 load_domaintype_info(typentry);
1450
1451 /* Transfer to ref object if there's new info, adjusting refcounts */
1452 if (ref->dcc != typentry->domainData)
1453 {
1454 /* Paranoia --- be sure link is nulled before trying to release */
1455 DomainConstraintCache *dcc = ref->dcc;
1456
1457 if (dcc)
1458 {
1459 /*
1460 * Note: we just leak the previous list of executable domain
1461 * constraints. Alternatively, we could keep those in a child
1462 * context of ref->refctx and free that context at this point.
1463 * However, in practice this code path will be taken so seldom
1464 * that the extra bookkeeping for a child context doesn't seem
1465 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1466 */
1467 ref->constraints = NIL;
1468 ref->dcc = NULL;
1469 decr_dcc_refcount(dcc);
1470 }
1471 dcc = typentry->domainData;
1472 if (dcc)
1473 {
1474 ref->dcc = dcc;
1475 dcc->dccRefCount++;
1476 if (ref->need_exprstate)
1477 ref->constraints = prep_domain_constraints(dcc->constraints,
1478 ref->refctx);
1479 else
1480 ref->constraints = dcc->constraints;
1481 }
1482 }
1483}
1484
1485/*
1486 * DomainHasConstraints --- utility routine to check if a domain has constraints
1487 *
1488 * Returns true if the domain has any constraints at all. If has_volatile
1489 * is not NULL, also checks whether any CHECK constraint contains a volatile
1490 * expression and sets *has_volatile accordingly.
1491 *
1492 * This is defined to return false, not fail, if type is not a domain.
1493 */
1494bool
1496{
1497 TypeCacheEntry *typentry;
1498
1499 /*
1500 * Note: a side effect is to cause the typcache's domain data to become
1501 * valid. This is fine since we'll likely need it soon if there is any.
1502 */
1504
1505 if (typentry->domainData == NULL)
1506 return false;
1507
1508 if (has_volatile)
1509 {
1510 *has_volatile = false;
1511
1513 typentry->domainData->constraints)
1514 {
1515 if (constrstate->constrainttype == DOM_CONSTRAINT_CHECK &&
1517 {
1518 *has_volatile = true;
1519 break;
1520 }
1521 }
1522 }
1523
1524 return true;
1525}
1526
1527
1528/*
1529 * array_element_has_equality and friends are helper routines to check
1530 * whether we should believe that array_eq and related functions will work
1531 * on the given array type or composite type.
1532 *
1533 * The logic above may call these repeatedly on the same type entry, so we
1534 * make use of the typentry->flags field to cache the results once known.
1535 * Also, we assume that we'll probably want all these facts about the type
1536 * if we want any, so we cache them all using only one lookup of the
1537 * component datatype(s).
1538 */
1539
1540static bool
1542{
1543 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1545 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1546}
1547
1548static bool
1550{
1551 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1553 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1554}
1555
1556static bool
1558{
1559 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1561 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1562}
1563
1564static bool
1571
1572static void
1574{
1576
1577 if (OidIsValid(elem_type))
1578 {
1580
1586 if (OidIsValid(elementry->eq_opr))
1587 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1588 if (OidIsValid(elementry->cmp_proc))
1589 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1590 if (OidIsValid(elementry->hash_proc))
1591 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1592 if (OidIsValid(elementry->hash_extended_proc))
1594 }
1596}
1597
1598/*
1599 * Likewise, some helper functions for composite types.
1600 */
1601
1602static bool
1604{
1605 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1607 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1608}
1609
1610static bool
1612{
1613 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1615 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1616}
1617
1618static bool
1620{
1621 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1623 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1624}
1625
1626static bool
1633
1634static void
1636{
1637 /*
1638 * For type RECORD, we can't really tell what will work, since we don't
1639 * have access here to the specific anonymous type. Just assume that
1640 * equality and comparison will (we may get a failure at runtime). We
1641 * could also claim that hashing works, but then if code that has the
1642 * option between a comparison-based (sort-based) and a hash-based plan
1643 * chooses hashing, stuff could fail that would otherwise work if it chose
1644 * a comparison-based plan. In practice more types support comparison
1645 * than hashing.
1646 */
1647 if (typentry->type_id == RECORDOID)
1648 {
1649 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1651 }
1652 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1653 {
1654 TupleDesc tupdesc;
1655 int newflags;
1656 int i;
1657
1658 /* Fetch composite type's tupdesc if we don't have it already */
1659 if (typentry->tupDesc == NULL)
1660 load_typcache_tupdesc(typentry);
1661 tupdesc = typentry->tupDesc;
1662
1663 /* Must bump the refcount while we do additional catalog lookups */
1664 IncrTupleDescRefCount(tupdesc);
1665
1666 /* Have each property if all non-dropped fields have the property */
1671 for (i = 0; i < tupdesc->natts; i++)
1672 {
1674 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1675
1676 if (attr->attisdropped)
1677 continue;
1678
1679 fieldentry = lookup_type_cache(attr->atttypid,
1684 if (!OidIsValid(fieldentry->eq_opr))
1686 if (!OidIsValid(fieldentry->cmp_proc))
1688 if (!OidIsValid(fieldentry->hash_proc))
1690 if (!OidIsValid(fieldentry->hash_extended_proc))
1692
1693 /* We can drop out of the loop once we disprove all bits */
1694 if (newflags == 0)
1695 break;
1696 }
1697 typentry->flags |= newflags;
1698
1699 DecrTupleDescRefCount(tupdesc);
1700 }
1701 else if (typentry->typtype == TYPTYPE_DOMAIN)
1702 {
1703 /* If it's domain over composite, copy base type's properties */
1705
1706 /* load up basetype info if we didn't already */
1707 if (typentry->domainBaseType == InvalidOid)
1708 {
1709 typentry->domainBaseTypmod = -1;
1710 typentry->domainBaseType =
1711 getBaseTypeAndTypmod(typentry->type_id,
1712 &typentry->domainBaseTypmod);
1713 }
1719 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1720 {
1722 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1726 }
1727 }
1729}
1730
1731/*
1732 * Likewise, some helper functions for range and multirange types.
1733 *
1734 * We can borrow the flag bits for array element properties to use for range
1735 * element properties, since those flag bits otherwise have no use in a
1736 * range or multirange type's typcache entry.
1737 */
1738
1739static bool
1741{
1742 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1744 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1745}
1746
1747static bool
1754
1755static void
1757{
1758 /* load up subtype link if we didn't already */
1759 if (typentry->rngelemtype == NULL &&
1760 typentry->typtype == TYPTYPE_RANGE)
1761 load_rangetype_info(typentry);
1762
1763 if (typentry->rngelemtype != NULL)
1764 {
1766
1767 /* might need to calculate subtype's hash function properties */
1771 if (OidIsValid(elementry->hash_proc))
1772 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1773 if (OidIsValid(elementry->hash_extended_proc))
1775 }
1777}
1778
1779static bool
1781{
1782 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1784 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1785}
1786
1787static bool
1794
1795static void
1797{
1798 /* load up range link if we didn't already */
1799 if (typentry->rngtype == NULL &&
1800 typentry->typtype == TYPTYPE_MULTIRANGE)
1801 load_multirangetype_info(typentry);
1802
1803 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1804 {
1806
1807 /* might need to calculate subtype's hash function properties */
1811 if (OidIsValid(elementry->hash_proc))
1812 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1813 if (OidIsValid(elementry->hash_extended_proc))
1815 }
1817}
1818
1819/*
1820 * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1821 * to store 'typmod'.
1822 */
1823static void
1845
1846/*
1847 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1848 *
1849 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1850 * hasn't had its refcount bumped.
1851 */
1852static TupleDesc
1854{
1855 if (type_id != RECORDOID)
1856 {
1857 /*
1858 * It's a named composite type, so use the regular typcache.
1859 */
1860 TypeCacheEntry *typentry;
1861
1862 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1863 if (typentry->tupDesc == NULL && !noError)
1864 ereport(ERROR,
1866 errmsg("type %s is not composite",
1867 format_type_be(type_id))));
1868 return typentry->tupDesc;
1869 }
1870 else
1871 {
1872 /*
1873 * It's a transient record type, so look in our record-type table.
1874 */
1875 if (typmod >= 0)
1876 {
1877 /* It is already in our local cache? */
1878 if (typmod < RecordCacheArrayLen &&
1879 RecordCacheArray[typmod].tupdesc != NULL)
1880 return RecordCacheArray[typmod].tupdesc;
1881
1882 /* Are we attached to a shared record typmod registry? */
1884 {
1886
1887 /* Try to find it in the shared typmod index. */
1889 &typmod, false);
1890 if (entry != NULL)
1891 {
1892 TupleDesc tupdesc;
1893
1894 tupdesc = (TupleDesc)
1896 entry->shared_tupdesc);
1897 Assert(typmod == tupdesc->tdtypmod);
1898
1899 /* We may need to extend the local RecordCacheArray. */
1901
1902 /*
1903 * Our local array can now point directly to the TupleDesc
1904 * in shared memory, which is non-reference-counted.
1905 */
1906 RecordCacheArray[typmod].tupdesc = tupdesc;
1907 Assert(tupdesc->tdrefcount == -1);
1908
1909 /*
1910 * We don't share tupdesc identifiers across processes, so
1911 * assign one locally.
1912 */
1914
1916 entry);
1917
1918 return RecordCacheArray[typmod].tupdesc;
1919 }
1920 }
1921 }
1922
1923 if (!noError)
1924 ereport(ERROR,
1926 errmsg("record type has not been registered")));
1927 return NULL;
1928 }
1929}
1930
1931/*
1932 * lookup_rowtype_tupdesc
1933 *
1934 * Given a typeid/typmod that should describe a known composite type,
1935 * return the tuple descriptor for the type. Will ereport on failure.
1936 * (Use ereport because this is reachable with user-specified OIDs,
1937 * for example from record_in().)
1938 *
1939 * Note: on success, we increment the refcount of the returned TupleDesc,
1940 * and log the reference in CurrentResourceOwner. Caller must call
1941 * ReleaseTupleDesc when done using the tupdesc. (There are some
1942 * cases in which the returned tupdesc is not refcounted, in which
1943 * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1944 * the tupdesc is guaranteed to live till process exit.)
1945 */
1948{
1949 TupleDesc tupDesc;
1950
1951 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1952 PinTupleDesc(tupDesc);
1953 return tupDesc;
1954}
1955
1956/*
1957 * lookup_rowtype_tupdesc_noerror
1958 *
1959 * As above, but if the type is not a known composite type and noError
1960 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1961 * type_id is passed, you'll get an ereport anyway.)
1962 */
1965{
1966 TupleDesc tupDesc;
1967
1968 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1969 if (tupDesc != NULL)
1970 PinTupleDesc(tupDesc);
1971 return tupDesc;
1972}
1973
1974/*
1975 * lookup_rowtype_tupdesc_copy
1976 *
1977 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1978 * copied into the CurrentMemoryContext and is not reference-counted.
1979 */
1982{
1983 TupleDesc tmp;
1984
1985 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1986 return CreateTupleDescCopyConstr(tmp);
1987}
1988
1989/*
1990 * lookup_rowtype_tupdesc_domain
1991 *
1992 * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1993 * a domain over a named composite type; so this is effectively equivalent to
1994 * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1995 * except for being a tad faster.
1996 *
1997 * Note: the reason we don't fold the look-through-domain behavior into plain
1998 * lookup_rowtype_tupdesc() is that we want callers to know they might be
1999 * dealing with a domain. Otherwise they might construct a tuple that should
2000 * be of the domain type, but not apply domain constraints.
2001 */
2004{
2005 TupleDesc tupDesc;
2006
2007 if (type_id != RECORDOID)
2008 {
2009 /*
2010 * Check for domain or named composite type. We might as well load
2011 * whichever data is needed.
2012 */
2013 TypeCacheEntry *typentry;
2014
2015 typentry = lookup_type_cache(type_id,
2018 if (typentry->typtype == TYPTYPE_DOMAIN)
2020 typentry->domainBaseTypmod,
2021 noError);
2022 if (typentry->tupDesc == NULL && !noError)
2023 ereport(ERROR,
2025 errmsg("type %s is not composite",
2026 format_type_be(type_id))));
2027 tupDesc = typentry->tupDesc;
2028 }
2029 else
2030 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2031 if (tupDesc != NULL)
2032 PinTupleDesc(tupDesc);
2033 return tupDesc;
2034}
2035
2036/*
2037 * Hash function for the hash table of RecordCacheEntry.
2038 */
2039static uint32
2040record_type_typmod_hash(const void *data, size_t size)
2041{
2042 const RecordCacheEntry *entry = data;
2043
2044 return hashRowType(entry->tupdesc);
2045}
2046
2047/*
2048 * Match function for the hash table of RecordCacheEntry.
2049 */
2050static int
2051record_type_typmod_compare(const void *a, const void *b, size_t size)
2052{
2053 const RecordCacheEntry *left = a;
2054 const RecordCacheEntry *right = b;
2055
2056 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2057}
2058
2059/*
2060 * assign_record_type_typmod
2061 *
2062 * Given a tuple descriptor for a RECORD type, find or create a cache entry
2063 * for the type, and set the tupdesc's tdtypmod field to a value that will
2064 * identify this cache entry to lookup_rowtype_tupdesc.
2065 */
2066void
2068{
2071 bool found;
2073
2074 Assert(tupDesc->tdtypeid == RECORDOID);
2075
2076 if (RecordCacheHash == NULL)
2077 {
2078 /* First time through: initialize the hash table */
2079 HASHCTL ctl;
2080
2081 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2082 ctl.entrysize = sizeof(RecordCacheEntry);
2085 RecordCacheHash = hash_create("Record information cache", 64,
2086 &ctl,
2088
2089 /* Also make sure CacheMemoryContext exists */
2090 if (!CacheMemoryContext)
2092 }
2093
2094 /*
2095 * Find a hashtable entry for this tuple descriptor. We don't use
2096 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2097 * the allocations succeed before we create the new entry.
2098 */
2100 &tupDesc,
2101 HASH_FIND, &found);
2102 if (found && recentry->tupdesc != NULL)
2103 {
2104 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2105 return;
2106 }
2107
2108 /* Not present, so need to manufacture an entry */
2110
2111 /* Look in the SharedRecordTypmodRegistry, if attached */
2113 if (entDesc == NULL)
2114 {
2115 /*
2116 * Make sure we have room before we CreateTupleDescCopy() or advance
2117 * NextRecordTypmod.
2118 */
2120
2121 /* Reference-counted local cache only. */
2122 entDesc = CreateTupleDescCopy(tupDesc);
2123 entDesc->tdrefcount = 1;
2124 entDesc->tdtypmod = NextRecordTypmod++;
2125 }
2126 else
2127 {
2129 }
2130
2132
2133 /* Assign a unique tupdesc identifier, too. */
2135
2136 /* Fully initialized; create the hash table entry */
2138 &tupDesc,
2139 HASH_ENTER, NULL);
2140 recentry->tupdesc = entDesc;
2141
2142 /* Update the caller's tuple descriptor. */
2143 tupDesc->tdtypmod = entDesc->tdtypmod;
2144
2146}
2147
2148/*
2149 * assign_record_type_identifier
2150 *
2151 * Get an identifier, which will be unique over the lifespan of this backend
2152 * process, for the current tuple descriptor of the specified composite type.
2153 * For named composite types, the value is guaranteed to change if the type's
2154 * definition does. For registered RECORD types, the value will not change
2155 * once assigned, since the registered type won't either. If an anonymous
2156 * RECORD type is specified, we return a new identifier on each call.
2157 */
2158uint64
2160{
2161 if (type_id != RECORDOID)
2162 {
2163 /*
2164 * It's a named composite type, so use the regular typcache.
2165 */
2166 TypeCacheEntry *typentry;
2167
2168 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2169 if (typentry->tupDesc == NULL)
2170 ereport(ERROR,
2172 errmsg("type %s is not composite",
2173 format_type_be(type_id))));
2174 Assert(typentry->tupDesc_identifier != 0);
2175 return typentry->tupDesc_identifier;
2176 }
2177 else
2178 {
2179 /*
2180 * It's a transient record type, so look in our record-type table.
2181 */
2182 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2183 RecordCacheArray[typmod].tupdesc != NULL)
2184 {
2185 Assert(RecordCacheArray[typmod].id != 0);
2186 return RecordCacheArray[typmod].id;
2187 }
2188
2189 /* For anonymous or unrecognized record type, generate a new ID */
2190 return ++tupledesc_id_counter;
2191 }
2192}
2193
2194/*
2195 * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2196 * This exists only to avoid exposing private innards of
2197 * SharedRecordTypmodRegistry in a header.
2198 */
2199size_t
2204
2205/*
2206 * Initialize 'registry' in a pre-existing shared memory region, which must be
2207 * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2208 * bytes.
2209 *
2210 * 'area' will be used to allocate shared memory space as required for the
2211 * typemod registration. The current process, expected to be a leader process
2212 * in a parallel query, will be attached automatically and its current record
2213 * types will be loaded into *registry. While attached, all calls to
2214 * assign_record_type_typmod will use the shared registry. Worker backends
2215 * will need to attach explicitly.
2216 *
2217 * Note that this function takes 'area' and 'segment' as arguments rather than
2218 * accessing them via CurrentSession, because they aren't installed there
2219 * until after this function runs.
2220 */
2221void
2223 dsm_segment *segment,
2224 dsa_area *area)
2225{
2229 int32 typmod;
2230
2232
2233 /* We can't already be attached to a shared registry. */
2237
2239
2240 /* Create the hash table of tuple descriptors indexed by themselves. */
2242
2243 /* Create the hash table of tuple descriptors indexed by typmod. */
2245
2247
2248 /* Initialize the SharedRecordTypmodRegistry. */
2249 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2250 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2252
2253 /*
2254 * Copy all entries from this backend's private registry into the shared
2255 * registry.
2256 */
2257 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2258 {
2263 TupleDesc tupdesc;
2264 bool found;
2265
2266 tupdesc = RecordCacheArray[typmod].tupdesc;
2267 if (tupdesc == NULL)
2268 continue;
2269
2270 /* Copy the TupleDesc into shared memory. */
2271 shared_dp = share_tupledesc(area, tupdesc, typmod);
2272
2273 /* Insert into the typmod table. */
2275 &tupdesc->tdtypmod,
2276 &found);
2277 if (found)
2278 elog(ERROR, "cannot create duplicate shared record typmod");
2279 typmod_table_entry->typmod = tupdesc->tdtypmod;
2280 typmod_table_entry->shared_tupdesc = shared_dp;
2282
2283 /* Insert into the record table. */
2284 record_table_key.shared = false;
2285 record_table_key.u.local_tupdesc = tupdesc;
2288 &found);
2289 if (!found)
2290 {
2291 record_table_entry->key.shared = true;
2292 record_table_entry->key.u.shared_tupdesc = shared_dp;
2293 }
2295 }
2296
2297 /*
2298 * Set up the global state that will tell assign_record_type_typmod and
2299 * lookup_rowtype_tupdesc_internal about the shared registry.
2300 */
2304
2305 /*
2306 * We install a detach hook in the leader, but only to handle cleanup on
2307 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2308 * the memory, the leader process will use a shared registry until it
2309 * exits.
2310 */
2312}
2313
2314/*
2315 * Attach to 'registry', which must have been initialized already by another
2316 * backend. Future calls to assign_record_type_typmod and
2317 * lookup_rowtype_tupdesc_internal will use the shared registry until the
2318 * current session is detached.
2319 */
2320void
2322{
2326
2328
2329 /* We can't already be attached to a shared registry. */
2336
2337 /*
2338 * We can't already have typmods in our local cache, because they'd clash
2339 * with those imported by SharedRecordTypmodRegistryInit. This should be
2340 * a freshly started parallel worker. If we ever support worker
2341 * recycling, a worker would need to zap its local cache in between
2342 * servicing different queries, in order to be able to call this and
2343 * synchronize typmods with a new leader; but that's problematic because
2344 * we can't be very sure that record-typmod-related state hasn't escaped
2345 * to anywhere else in the process.
2346 */
2348
2350
2351 /* Attach to the two hash tables. */
2354 registry->record_table_handle,
2358 registry->typmod_table_handle,
2359 NULL);
2360
2362
2363 /*
2364 * Set up detach hook to run at worker exit. Currently this is the same
2365 * as the leader's detach hook, but in future they might need to be
2366 * different.
2367 */
2371
2372 /*
2373 * Set up the session state that will tell assign_record_type_typmod and
2374 * lookup_rowtype_tupdesc_internal about the shared registry.
2375 */
2379}
2380
2381/*
2382 * InvalidateCompositeTypeCacheEntry
2383 * Invalidate particular TypeCacheEntry on Relcache inval callback
2384 *
2385 * Delete the cached tuple descriptor (if any) for the given composite
2386 * type, and reset whatever info we have cached about the composite type's
2387 * comparability.
2388 */
2389static void
2391{
2393
2394 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2395 OidIsValid(typentry->typrelid));
2396
2397 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2398 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2399
2400 /* Delete tupdesc if we have it */
2401 if (typentry->tupDesc != NULL)
2402 {
2403 /*
2404 * Release our refcount and free the tupdesc if none remain. We can't
2405 * use DecrTupleDescRefCount here because this reference is not logged
2406 * by the current resource owner.
2407 */
2408 Assert(typentry->tupDesc->tdrefcount > 0);
2409 if (--typentry->tupDesc->tdrefcount == 0)
2410 FreeTupleDesc(typentry->tupDesc);
2411 typentry->tupDesc = NULL;
2412
2413 /*
2414 * Also clear tupDesc_identifier, so that anyone watching it will
2415 * realize that the tupdesc has changed.
2416 */
2417 typentry->tupDesc_identifier = 0;
2418 }
2419
2420 /* Reset equality/comparison/hashing validity information */
2421 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2422
2423 /*
2424 * Call delete_rel_type_cache_if_needed() if we actually cleared
2425 * something.
2426 */
2429}
2430
2431/*
2432 * TypeCacheRelCallback
2433 * Relcache inval callback function
2434 *
2435 * Delete the cached tuple descriptor (if any) for the given rel's composite
2436 * type, or for all composite types if relid == InvalidOid. Also reset
2437 * whatever info we have cached about the composite type's comparability.
2438 *
2439 * This is called when a relcache invalidation event occurs for the given
2440 * relid. We can't use syscache to find a type corresponding to the given
2441 * relation because the code can be called outside of transaction. Thus, we
2442 * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2443 */
2444static void
2446{
2447 TypeCacheEntry *typentry;
2448
2449 /*
2450 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2451 * callback wouldn't be registered
2452 */
2453 if (OidIsValid(relid))
2454 {
2456
2457 /*
2458 * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2459 * corresponding typcache entry has something to clean.
2460 */
2462 &relid,
2463 HASH_FIND, NULL);
2464
2465 if (relentry != NULL)
2466 {
2468 &relentry->composite_typid,
2469 HASH_FIND, NULL);
2470
2471 if (typentry != NULL)
2472 {
2473 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2474 Assert(relid == typentry->typrelid);
2475
2477 }
2478 }
2479
2480 /*
2481 * Visit all the domain types sequentially. Typically, this shouldn't
2482 * affect performance since domain types are less tended to bloat.
2483 * Domain types are created manually, unlike composite types which are
2484 * automatically created for every temporary table.
2485 */
2486 for (typentry = firstDomainTypeEntry;
2487 typentry != NULL;
2488 typentry = typentry->nextDomain)
2489 {
2490 /*
2491 * If it's domain over composite, reset flags. (We don't bother
2492 * trying to determine whether the specific base type needs a
2493 * reset.) Note that if we haven't determined whether the base
2494 * type is composite, we don't need to reset anything.
2495 */
2497 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2498 }
2499 }
2500 else
2501 {
2502 HASH_SEQ_STATUS status;
2503
2504 /*
2505 * Relid is invalid. By convention, we need to reset all composite
2506 * types in cache. Also, we should reset flags for domain types, and
2507 * we loop over all entries in hash, so, do it in a single scan.
2508 */
2509 hash_seq_init(&status, TypeCacheHash);
2510 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2511 {
2512 if (typentry->typtype == TYPTYPE_COMPOSITE)
2513 {
2515 }
2516 else if (typentry->typtype == TYPTYPE_DOMAIN)
2517 {
2518 /*
2519 * If it's domain over composite, reset flags. (We don't
2520 * bother trying to determine whether the specific base type
2521 * needs a reset.) Note that if we haven't determined whether
2522 * the base type is composite, we don't need to reset
2523 * anything.
2524 */
2526 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2527 }
2528 }
2529 }
2530}
2531
2532/*
2533 * TypeCacheTypCallback
2534 * Syscache inval callback function
2535 *
2536 * This is called when a syscache invalidation event occurs for any
2537 * pg_type row. If we have information cached about that type, mark
2538 * it as needing to be reloaded.
2539 */
2540static void
2542{
2543 HASH_SEQ_STATUS status;
2544 TypeCacheEntry *typentry;
2545
2546 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2547
2548 /*
2549 * By convention, zero hash value is passed to the callback as a sign that
2550 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2551 * InvalidateSystemCachesExtended().
2552 */
2553 if (hashvalue == 0)
2554 hash_seq_init(&status, TypeCacheHash);
2555 else
2556 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2557
2558 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2559 {
2560 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2561
2562 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2563
2564 /*
2565 * Mark the data obtained directly from pg_type as invalid. Also, if
2566 * it's a domain, typnotnull might've changed, so we'll need to
2567 * recalculate its constraints.
2568 */
2569 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2571
2572 /*
2573 * Call delete_rel_type_cache_if_needed() if we cleaned
2574 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2575 */
2576 if (hadPgTypeData)
2578 }
2579}
2580
2581/*
2582 * TypeCacheOpcCallback
2583 * Syscache inval callback function
2584 *
2585 * This is called when a syscache invalidation event occurs for any pg_opclass
2586 * row. In principle we could probably just invalidate data dependent on the
2587 * particular opclass, but since updates on pg_opclass are rare in production
2588 * it doesn't seem worth a lot of complication: we just mark all cached data
2589 * invalid.
2590 *
2591 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2592 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2593 * is not allowed to be used to add/drop the primary operators and functions
2594 * of an opclass, only cross-type members of a family; and the latter sorts
2595 * of members are not going to get cached here.
2596 */
2597static void
2599{
2600 HASH_SEQ_STATUS status;
2601 TypeCacheEntry *typentry;
2602
2603 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2604 hash_seq_init(&status, TypeCacheHash);
2605 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2606 {
2607 bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2608
2609 /* Reset equality/comparison/hashing validity information */
2610 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2611
2612 /*
2613 * Call delete_rel_type_cache_if_needed() if we actually cleared some
2614 * of TCFLAGS_OPERATOR_FLAGS.
2615 */
2616 if (hadOpclass)
2618 }
2619}
2620
2621/*
2622 * TypeCacheConstrCallback
2623 * Syscache inval callback function
2624 *
2625 * This is called when a syscache invalidation event occurs for any
2626 * pg_constraint row. We flush information about domain constraints
2627 * when this happens.
2628 *
2629 * It's slightly annoying that we can't tell whether the inval event was for
2630 * a domain constraint record or not; there's usually more update traffic
2631 * for table constraints than domain constraints, so we'll do a lot of
2632 * useless flushes. Still, this is better than the old no-caching-at-all
2633 * approach to domain constraints.
2634 */
2635static void
2637{
2638 TypeCacheEntry *typentry;
2639
2640 /*
2641 * Because this is called very frequently, and typically very few of the
2642 * typcache entries are for domains, we don't use hash_seq_search here.
2643 * Instead we thread all the domain-type entries together so that we can
2644 * visit them cheaply.
2645 */
2646 for (typentry = firstDomainTypeEntry;
2647 typentry != NULL;
2648 typentry = typentry->nextDomain)
2649 {
2650 /* Reset domain constraint validity information */
2652 }
2653}
2654
2655
2656/*
2657 * Check if given OID is part of the subset that's sortable by comparisons
2658 */
2659static inline bool
2661{
2662 Oid offset;
2663
2664 if (arg < enumdata->bitmap_base)
2665 return false;
2666 offset = arg - enumdata->bitmap_base;
2667 if (offset > (Oid) INT_MAX)
2668 return false;
2669 return bms_is_member((int) offset, enumdata->sorted_values);
2670}
2671
2672
2673/*
2674 * compare_values_of_enum
2675 * Compare two members of an enum type.
2676 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2677 *
2678 * Note: currently, the enumData cache is refreshed only if we are asked
2679 * to compare an enum value that is not already in the cache. This is okay
2680 * because there is no support for re-ordering existing values, so comparisons
2681 * of previously cached values will return the right answer even if other
2682 * values have been added since we last loaded the cache.
2683 *
2684 * Note: the enum logic has a special-case rule about even-numbered versus
2685 * odd-numbered OIDs, but we take no account of that rule here; this
2686 * routine shouldn't even get called when that rule applies.
2687 */
2688int
2690{
2692 EnumItem *item1;
2693 EnumItem *item2;
2694
2695 /*
2696 * Equal OIDs are certainly equal --- this case was probably handled by
2697 * our caller, but we may as well check.
2698 */
2699 if (arg1 == arg2)
2700 return 0;
2701
2702 /* Load up the cache if first time through */
2703 if (tcache->enumData == NULL)
2704 load_enum_cache_data(tcache);
2705 enumdata = tcache->enumData;
2706
2707 /*
2708 * If both OIDs are known-sorted, we can just compare them directly.
2709 */
2712 {
2713 if (arg1 < arg2)
2714 return -1;
2715 else
2716 return 1;
2717 }
2718
2719 /*
2720 * Slow path: we have to identify their actual sort-order positions.
2721 */
2724
2725 if (item1 == NULL || item2 == NULL)
2726 {
2727 /*
2728 * We couldn't find one or both values. That means the enum has
2729 * changed under us, so re-initialize the cache and try again. We
2730 * don't bother retrying the known-sorted case in this path.
2731 */
2732 load_enum_cache_data(tcache);
2733 enumdata = tcache->enumData;
2734
2737
2738 /*
2739 * If we still can't find the values, complain: we must have corrupt
2740 * data.
2741 */
2742 if (item1 == NULL)
2743 elog(ERROR, "enum value %u not found in cache for enum %s",
2744 arg1, format_type_be(tcache->type_id));
2745 if (item2 == NULL)
2746 elog(ERROR, "enum value %u not found in cache for enum %s",
2747 arg2, format_type_be(tcache->type_id));
2748 }
2749
2750 if (item1->sort_order < item2->sort_order)
2751 return -1;
2752 else if (item1->sort_order > item2->sort_order)
2753 return 1;
2754 else
2755 return 0;
2756}
2757
2758/*
2759 * Load (or re-load) the enumData member of the typcache entry.
2760 */
2761static void
2763{
2769 EnumItem *items;
2770 int numitems;
2771 int maxitems;
2772 Oid bitmap_base;
2773 Bitmapset *bitmap;
2775 int bm_size,
2776 start_pos;
2777
2778 /* Check that this is actually an enum */
2779 if (tcache->typtype != TYPTYPE_ENUM)
2780 ereport(ERROR,
2782 errmsg("%s is not an enum",
2783 format_type_be(tcache->type_id))));
2784
2785 /*
2786 * Read all the information for members of the enum type. We collect the
2787 * info in working memory in the caller's context, and then transfer it to
2788 * permanent memory in CacheMemoryContext. This minimizes the risk of
2789 * leaking memory from CacheMemoryContext in the event of an error partway
2790 * through.
2791 */
2792 maxitems = 64;
2793 items = palloc_array(EnumItem, maxitems);
2794 numitems = 0;
2795
2796 /* Scan pg_enum for the members of the target enum type. */
2800 ObjectIdGetDatum(tcache->type_id));
2801
2805 true, NULL,
2806 1, &skey);
2807
2809 {
2811
2812 if (numitems >= maxitems)
2813 {
2814 maxitems *= 2;
2815 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2816 }
2817 items[numitems].enum_oid = en->oid;
2818 items[numitems].sort_order = en->enumsortorder;
2819 numitems++;
2820 }
2821
2824
2825 /* Sort the items into OID order */
2826 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2827
2828 /*
2829 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2830 * known to be in order and can thus be compared with just OID comparison.
2831 *
2832 * The point of this is that the enum's initial OIDs were certainly in
2833 * order, so there is some subset that can be compared via OID comparison;
2834 * and we'd rather not do binary searches unnecessarily.
2835 *
2836 * This is somewhat heuristic, and might identify a subset of OIDs that
2837 * isn't exactly what the type started with. That's okay as long as the
2838 * subset is correctly sorted.
2839 */
2840 bitmap_base = InvalidOid;
2841 bitmap = NULL;
2842 bm_size = 1; /* only save sets of at least 2 OIDs */
2843
2844 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2845 {
2846 /*
2847 * Identify longest sorted subsequence starting at start_pos
2848 */
2850 int this_bm_size = 1;
2851 Oid start_oid = items[start_pos].enum_oid;
2852 float4 prev_order = items[start_pos].sort_order;
2853 int i;
2854
2855 for (i = start_pos + 1; i < numitems; i++)
2856 {
2857 Oid offset;
2858
2859 offset = items[i].enum_oid - start_oid;
2860 /* quit if bitmap would be too large; cutoff is arbitrary */
2861 if (offset >= 8192)
2862 break;
2863 /* include the item if it's in-order */
2864 if (items[i].sort_order > prev_order)
2865 {
2866 prev_order = items[i].sort_order;
2867 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2868 this_bm_size++;
2869 }
2870 }
2871
2872 /* Remember it if larger than previous best */
2873 if (this_bm_size > bm_size)
2874 {
2875 bms_free(bitmap);
2876 bitmap_base = start_oid;
2877 bitmap = this_bitmap;
2879 }
2880 else
2882
2883 /*
2884 * Done if it's not possible to find a longer sequence in the rest of
2885 * the list. In typical cases this will happen on the first
2886 * iteration, which is why we create the bitmaps on the fly instead of
2887 * doing a second pass over the list.
2888 */
2889 if (bm_size >= (numitems - start_pos - 1))
2890 break;
2891 }
2892
2893 /* OK, copy the data into CacheMemoryContext */
2896 palloc(offsetof(TypeCacheEnumData, enum_values) +
2897 numitems * sizeof(EnumItem));
2898 enumdata->bitmap_base = bitmap_base;
2899 enumdata->sorted_values = bms_copy(bitmap);
2900 enumdata->num_values = numitems;
2901 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2903
2904 pfree(items);
2905 bms_free(bitmap);
2906
2907 /* And link the finished cache struct into the typcache */
2908 if (tcache->enumData != NULL)
2909 pfree(tcache->enumData);
2910 tcache->enumData = enumdata;
2911}
2912
2913/*
2914 * Locate the EnumItem with the given OID, if present
2915 */
2916static EnumItem *
2918{
2919 EnumItem srch;
2920
2921 /* On some versions of Solaris, bsearch of zero items dumps core */
2922 if (enumdata->num_values <= 0)
2923 return NULL;
2924
2925 srch.enum_oid = arg;
2926 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2927 sizeof(EnumItem), enum_oid_cmp);
2928}
2929
2930/*
2931 * qsort comparison function for OID-ordered EnumItems
2932 */
2933static int
2934enum_oid_cmp(const void *left, const void *right)
2935{
2936 const EnumItem *l = (const EnumItem *) left;
2937 const EnumItem *r = (const EnumItem *) right;
2938
2939 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2940}
2941
2942/*
2943 * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2944 * to the given value and return a dsa_pointer.
2945 */
2946static dsa_pointer
2948{
2950 TupleDesc shared;
2951
2952 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2953 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2954 TupleDescCopy(shared, tupdesc);
2955 shared->tdtypmod = typmod;
2956
2957 return shared_dp;
2958}
2959
2960/*
2961 * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2962 * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2963 * Tuple descriptors returned by this function are not reference counted, and
2964 * will exist at least as long as the current backend remained attached to the
2965 * current session.
2966 */
2967static TupleDesc
2969{
2970 TupleDesc result;
2975 bool found;
2976 uint32 typmod;
2977
2978 /* If not even attached, nothing to do. */
2980 return NULL;
2981
2982 /* Try to find a matching tuple descriptor in the record table. */
2983 key.shared = false;
2984 key.u.local_tupdesc = tupdesc;
2988 {
2989 Assert(record_table_entry->key.shared);
2992 result = (TupleDesc)
2994 record_table_entry->key.u.shared_tupdesc);
2995 Assert(result->tdrefcount == -1);
2996
2997 return result;
2998 }
2999
3000 /* Allocate a new typmod number. This will be wasted if we error out. */
3001 typmod = (int)
3003 1);
3004
3005 /* Copy the TupleDesc into shared memory. */
3006 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
3007
3008 /*
3009 * Create an entry in the typmod table so that others will understand this
3010 * typmod number.
3011 */
3012 PG_TRY();
3013 {
3016 &typmod, &found);
3017 if (found)
3018 elog(ERROR, "cannot create duplicate shared record typmod");
3019 }
3020 PG_CATCH();
3021 {
3023 PG_RE_THROW();
3024 }
3025 PG_END_TRY();
3026 typmod_table_entry->typmod = typmod;
3027 typmod_table_entry->shared_tupdesc = shared_dp;
3030
3031 /*
3032 * Finally create an entry in the record table so others with matching
3033 * tuple descriptors can reuse the typmod.
3034 */
3037 &found);
3038 if (found)
3039 {
3040 /*
3041 * Someone concurrently inserted a matching tuple descriptor since the
3042 * first time we checked. Use that one instead.
3043 */
3046
3047 /* Might as well free up the space used by the one we created. */
3049 &typmod);
3050 Assert(found);
3052
3053 /* Return the one we found. */
3054 Assert(record_table_entry->key.shared);
3055 result = (TupleDesc)
3057 record_table_entry->key.u.shared_tupdesc);
3058 Assert(result->tdrefcount == -1);
3059
3060 return result;
3061 }
3062
3063 /* Store it and return it. */
3064 record_table_entry->key.shared = true;
3065 record_table_entry->key.u.shared_tupdesc = shared_dp;
3068 result = (TupleDesc)
3070 Assert(result->tdrefcount == -1);
3071
3072 return result;
3073}
3074
3075/*
3076 * On-DSM-detach hook to forget about the current shared record typmod
3077 * infrastructure. This is currently used by both leader and workers.
3078 */
3079static void
3095
3096/*
3097 * Insert RelIdToTypeIdCacheHash entry if needed.
3098 */
3099static void
3101{
3102 /* Immediately quit for non-composite types */
3103 if (typentry->typtype != TYPTYPE_COMPOSITE)
3104 return;
3105
3106 /* typrelid should be given for composite types */
3107 Assert(OidIsValid(typentry->typrelid));
3108
3109 /*
3110 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3111 * information indicating it should be here.
3112 */
3113 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3114 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3115 typentry->tupDesc != NULL)
3116 {
3118 bool found;
3119
3121 &typentry->typrelid,
3122 HASH_ENTER, &found);
3123 relentry->relid = typentry->typrelid;
3124 relentry->composite_typid = typentry->type_id;
3125 }
3126}
3127
3128/*
3129 * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3130 * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3131 * or tupDesc.
3132 */
3133static void
3135{
3136#ifdef USE_ASSERT_CHECKING
3137 int i;
3138 bool is_in_progress = false;
3139
3140 for (i = 0; i < in_progress_list_len; i++)
3141 {
3142 if (in_progress_list[i] == typentry->type_id)
3143 {
3144 is_in_progress = true;
3145 break;
3146 }
3147 }
3148#endif
3149
3150 /* Immediately quit for non-composite types */
3151 if (typentry->typtype != TYPTYPE_COMPOSITE)
3152 return;
3153
3154 /* typrelid should be given for composite types */
3155 Assert(OidIsValid(typentry->typrelid));
3156
3157 /*
3158 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3159 * information indicating entry should be still there.
3160 */
3161 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3162 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3163 typentry->tupDesc == NULL)
3164 {
3165 bool found;
3166
3168 &typentry->typrelid,
3169 HASH_REMOVE, &found);
3170 Assert(found || is_in_progress);
3171 }
3172 else
3173 {
3174#ifdef USE_ASSERT_CHECKING
3175 /*
3176 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3177 * entry if it should exist.
3178 */
3179 bool found;
3180
3181 if (!is_in_progress)
3182 {
3184 &typentry->typrelid,
3185 HASH_FIND, &found);
3186 Assert(found);
3187 }
3188#endif
3189 }
3190}
3191
3192/*
3193 * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3194 * entries, marked as in-progress by lookup_type_cache(). It may happen
3195 * in case of an error or interruption during the lookup_type_cache() call.
3196 */
3197static void
3199{
3200 int i;
3201
3202 for (i = 0; i < in_progress_list_len; i++)
3203 {
3204 TypeCacheEntry *typentry;
3205
3208 HASH_FIND, NULL);
3209 if (typentry)
3211 }
3212
3214}
3215
3216void
3221
3222void
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:219
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition atomics.h:366
Bitmapset * bms_make_singleton(int x)
Definition bitmapset.c:216
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
Bitmapset * bms_copy(const Bitmapset *a)
Definition bitmapset.c:122
#define TextDatumGetCString(d)
Definition builtins.h:99
#define NameStr(name)
Definition c.h:837
#define RegProcedureIsValid(p)
Definition c.h:864
#define Assert(condition)
Definition c.h:945
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:552
int32_t int32
Definition c.h:614
uint64_t uint64
Definition c.h:619
uint32_t uint32
Definition c.h:618
float float4
Definition c.h:715
#define MemSet(start, val, len)
Definition c.h:1109
#define OidIsValid(objectId)
Definition c.h:860
size_t Size
Definition c.h:691
void CreateCacheMemoryContext(void)
Definition catcache.c:715
bool contain_volatile_functions(Node *clause)
Definition clauses.c:549
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition dsa.c:957
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition dsa.c:841
uint64 dsa_pointer
Definition dsa.h:62
#define dsa_allocate(area, size)
Definition dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition dshash.c:524
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition dshash.c:611
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition dshash.c:579
void dshash_detach(dshash_table *hash_table)
Definition dshash.c:311
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition dshash.c:394
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition dshash.c:371
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition dshash.c:274
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition dshash.c:602
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition dshash.c:210
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition dshash.c:593
dsa_pointer dshash_table_handle
Definition dshash.h:24
#define dshash_find_or_insert(hash_table, key, found)
Definition dshash.h:109
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition dynahash.c:1400
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1415
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1380
Datum arg
Definition elog.c:1322
int errcode(int sqlerrcode)
Definition elog.c:874
#define PG_RE_THROW()
Definition elog.h:405
#define PG_TRY(...)
Definition elog.h:372
#define PG_END_TRY(...)
Definition elog.h:397
#define ERROR
Definition elog.h:39
#define PG_CATCH(...)
Definition elog.h:382
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition execExpr.c:143
@ DOM_CONSTRAINT_CHECK
Definition execnodes.h:1062
@ DOM_CONSTRAINT_NOTNULL
Definition execnodes.h:1061
#define palloc_array(type, count)
Definition fe_memutils.h:76
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition fmgr.c:139
char * format_type_be(Oid type_oid)
void systable_endscan(SysScanDesc sysscan)
Definition genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition genam.c:388
#define HASHSTANDARD_PROC
Definition hash.h:355
#define HASHEXTENDED_PROC
Definition hash.h:356
@ HASH_FIND
Definition hsearch.h:113
@ HASH_REMOVE
Definition hsearch.h:115
@ HASH_ENTER
Definition hsearch.h:114
#define HASH_ELEM
Definition hsearch.h:95
#define HASH_COMPARE
Definition hsearch.h:99
#define HASH_FUNCTION
Definition hsearch.h:98
#define HASH_BLOBS
Definition hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
#define IsParallelWorker()
Definition parallel.h:62
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition indexcmds.c:2369
long val
Definition informix.c:689
#define INJECTION_POINT(name, arg)
static int pg_cmp_u32(uint32 a, uint32 b)
Definition int.h:719
void CacheRegisterSyscacheCallback(SysCacheIdentifier cacheid, SyscacheCallbackFunction func, Datum arg)
Definition inval.c:1816
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition inval.c:1858
int b
Definition isn.c:74
int a
Definition isn.c:73
int i
Definition isn.c:77
List * lappend(List *list, void *datum)
Definition list.c:339
List * lcons(void *datum, List *list)
Definition list.c:495
#define AccessShareLock
Definition lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition lsyscache.c:1384
Oid get_opclass_family(Oid opclass)
Definition lsyscache.c:1362
Oid get_multirange_range(Oid multirangeOid)
Definition lsyscache.c:3705
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition lsyscache.c:915
RegProcedure get_opcode(Oid opno)
Definition lsyscache.c:1505
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition lsyscache.c:170
Oid get_base_element_type(Oid typid)
Definition lsyscache.c:3054
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition lsyscache.c:2760
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
char * pstrdup(const char *in)
Definition mcxt.c:1781
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition mcxt.c:582
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition mcxt.c:686
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
MemoryContext CacheMemoryContext
Definition mcxt.c:169
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition memutils.h:170
#define BTORDER_PROC
Definition nbtree.h:717
#define copyObject(obj)
Definition nodes.h:232
#define makeNode(_type_)
Definition nodes.h:161
static char * errmsg
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define repalloc0_array(pointer, type, oldcount, count)
Definition palloc.h:109
FormData_pg_attribute * Form_pg_attribute
static uint32 pg_nextpower2_32(uint32 num)
END_CATALOG_STRUCT typedef FormData_pg_constraint * Form_pg_constraint
const void * data
END_CATALOG_STRUCT typedef FormData_pg_enum * Form_pg_enum
Definition pg_enum.h:48
#define lfirst(lc)
Definition pg_list.h:172
#define NIL
Definition pg_list.h:68
#define foreach_node(type, var, lst)
Definition pg_list.h:496
END_CATALOG_STRUCT typedef FormData_pg_range * Form_pg_range
Definition pg_range.h:71
END_CATALOG_STRUCT typedef FormData_pg_type * Form_pg_type
Definition pg_type.h:265
Expr * expression_planner(Expr *expr)
Definition planner.c:6819
#define qsort(a, b, c, d)
Definition port.h:495
static Datum PointerGetDatum(const void *X)
Definition postgres.h:342
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:252
uint64_t Datum
Definition postgres.h:70
#define InvalidOid
unsigned int Oid
char * c
static int fb(int x)
tree ctl
Definition radixtree.h:1838
void * stringToNode(const char *str)
Definition read.c:90
#define RelationGetDescr(relation)
Definition rel.h:540
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition scankey.c:76
Session * CurrentSession
Definition session.c:48
void relation_close(Relation relation, LOCKMODE lockmode)
Definition relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition relation.c:47
#define BTGreaterStrategyNumber
Definition stratnum.h:33
#define HTEqualStrategyNumber
Definition stratnum.h:41
#define BTLessStrategyNumber
Definition stratnum.h:29
#define BTEqualStrategyNumber
Definition stratnum.h:31
MemoryContext dccContext
Definition typcache.c:142
DomainConstraintType constrainttype
Definition execnodes.h:1068
ExprState * check_exprstate
Definition execnodes.h:1071
float4 sort_order
Definition typcache.c:150
Oid enum_oid
Definition typcache.c:149
Oid fn_oid
Definition fmgr.h:59
Size keysize
Definition hsearch.h:75
Definition pg_list.h:54
Definition nodes.h:135
TupleDesc tupdesc
Definition typcache.c:174
Form_pg_class rd_rel
Definition rel.h:111
dsm_segment * segment
Definition session.h:27
dshash_table * shared_record_table
Definition session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition session.h:31
dsa_area * area
Definition session.h:28
dshash_table * shared_typmod_table
Definition session.h:33
SharedRecordTableKey key
Definition typcache.c:213
TupleDesc local_tupdesc
Definition typcache.c:201
union SharedRecordTableKey::@33 u
dsa_pointer shared_tupdesc
Definition typcache.c:202
dshash_table_handle typmod_table_handle
Definition typcache.c:186
pg_atomic_uint32 next_typmod
Definition typcache.c:188
dshash_table_handle record_table_handle
Definition typcache.c:184
dsa_pointer shared_tupdesc
Definition typcache.c:223
int32 tdtypmod
Definition tupdesc.h:152
uint32 type_id_hash
Definition typcache.h:36
uint64 tupDesc_identifier
Definition typcache.h:91
FmgrInfo hash_proc_finfo
Definition typcache.h:78
int32 domainBaseTypmod
Definition typcache.h:116
Oid hash_extended_proc
Definition typcache.h:67
FmgrInfo rng_cmp_proc_finfo
Definition typcache.h:102
FmgrInfo cmp_proc_finfo
Definition typcache.h:77
struct TypeCacheEntry * rngelemtype
Definition typcache.h:99
TupleDesc tupDesc
Definition typcache.h:90
FmgrInfo hash_extended_proc_finfo
Definition typcache.h:79
DomainConstraintCache * domainData
Definition typcache.h:122
struct TypeCacheEntry * rngtype
Definition typcache.h:109
FmgrInfo rng_subdiff_finfo
Definition typcache.h:104
FmgrInfo eq_opr_finfo
Definition typcache.h:76
Oid btree_opintype
Definition typcache.h:59
struct TypeCacheEnumData * enumData
Definition typcache.h:131
struct TypeCacheEntry * nextDomain
Definition typcache.h:134
FmgrInfo rng_canonical_finfo
Definition typcache.h:103
Oid hash_opintype
Definition typcache.h:61
char typstorage
Definition typcache.h:42
Bitmapset * sorted_values
Definition typcache.c:156
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition typcache.c:158
void ReleaseSysCache(HeapTuple tuple)
Definition syscache.c:264
HeapTuple SearchSysCache1(SysCacheIdentifier cacheId, Datum key1)
Definition syscache.c:220
#define GetSysCacheHashValue1(cacheId, key1)
Definition syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
static ItemArray items
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition tupdesc.c:334
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition tupdesc.c:424
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:632
void FreeTupleDesc(TupleDesc tupdesc)
Definition tupdesc.c:557
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:614
uint32 hashRowType(TupleDesc desc)
Definition tupdesc.c:868
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition tupdesc.c:242
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition tupdesc.c:832
#define TupleDescSize(src)
Definition tupdesc.h:216
#define PinTupleDesc(tupdesc)
Definition tupdesc.h:232
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:178
struct TupleDescData * TupleDesc
Definition tupdesc.h:163
bool DomainHasConstraints(Oid type_id, bool *has_volatile)
Definition typcache.c:1495
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition typcache.c:100
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition typcache.c:101
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1740
static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3100
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition typcache.c:1404
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1853
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition typcache.c:1947
static void TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2598
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition typcache.c:2321
#define TCFLAGS_OPERATOR_FLAGS
Definition typcache.c:122
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition typcache.c:113
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1756
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition typcache.c:115
void AtEOXact_TypeCache(void)
Definition typcache.c:3217
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition typcache.c:119
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition typcache.c:2762
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1619
static HTAB * RelIdToTypeIdCacheHash
Definition typcache.c:87
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2917
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1627
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition typcache.c:2968
static int in_progress_list_maxlen
Definition typcache.c:228
static int32 NextRecordTypmod
Definition typcache.c:306
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:2003
static Oid * in_progress_list
Definition typcache.c:226
static const dshash_parameters srtr_typmod_table_params
Definition typcache.c:285
static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3134
#define TCFLAGS_CHECKED_GT_OPR
Definition typcache.c:104
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1780
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition typcache.c:1366
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1964
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition typcache.c:1603
#define TCFLAGS_CHECKED_LT_OPR
Definition typcache.c:103
#define TCFLAGS_CHECKED_HASH_PROC
Definition typcache.c:106
static void dccref_deletion_callback(void *arg)
Definition typcache.c:1345
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition typcache.c:114
static void InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
Definition typcache.c:2390
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition typcache.c:2222
static int dcs_cmp(const void *a, const void *b)
Definition typcache.c:1321
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1565
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition typcache.c:234
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1557
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1064
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition typcache.c:362
#define TCFLAGS_CHECKED_CMP_PROC
Definition typcache.c:105
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition typcache.c:112
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1788
static int in_progress_list_len
Definition typcache.c:227
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition typcache.c:1541
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition typcache.c:2947
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1006
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition typcache.c:2159
static RecordCacheArrayEntry * RecordCacheArray
Definition typcache.c:304
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1748
static HTAB * RecordCacheHash
Definition typcache.c:295
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2660
static TypeCacheEntry * firstDomainTypeEntry
Definition typcache.c:96
void AtEOSubXact_TypeCache(void)
Definition typcache.c:3223
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition typcache.c:3080
#define TCFLAGS_HAVE_ELEM_HASHING
Definition typcache.c:111
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition typcache.c:107
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition typcache.c:1086
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition typcache.c:110
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition typcache.c:2445
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1573
static void TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2541
size_t SharedRecordTypmodRegistryEstimate(void)
Definition typcache.c:2200
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1796
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition typcache.c:108
static void TypeCacheConstrCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2636
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition typcache.c:109
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition typcache.c:1549
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition typcache.c:99
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition typcache.c:260
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition typcache.c:2689
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition typcache.c:118
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition typcache.c:117
static int32 RecordCacheArrayLen
Definition typcache.c:305
void assign_record_type_typmod(TupleDesc tupDesc)
Definition typcache.c:2067
static HTAB * TypeCacheHash
Definition typcache.c:79
static uint64 tupledesc_id_counter
Definition typcache.c:313
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition typcache.c:1611
#define TCFLAGS_HAVE_FIELD_HASHING
Definition typcache.c:116
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition typcache.c:2051
static const dshash_parameters srtr_record_table_params
Definition typcache.c:275
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition typcache.c:1981
static int enum_oid_cmp(const void *left, const void *right)
Definition typcache.c:2934
static void finalize_in_progress_typentries(void)
Definition typcache.c:3198
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition typcache.c:1334
#define TCFLAGS_CHECKED_EQ_OPR
Definition typcache.c:102
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition typcache.c:1442
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition typcache.c:389
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition typcache.c:1824
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition typcache.c:1635
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition typcache.c:2040
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition typcache.c:972
#define INVALID_TUPLEDESC_IDENTIFIER
Definition typcache.h:157
#define TYPECACHE_HASH_PROC_FINFO
Definition typcache.h:145
#define TYPECACHE_EQ_OPR
Definition typcache.h:138
#define TYPECACHE_HASH_OPFAMILY
Definition typcache.h:148
#define TYPECACHE_TUPDESC
Definition typcache.h:146
#define TYPECACHE_MULTIRANGE_INFO
Definition typcache.h:154
#define TYPECACHE_EQ_OPR_FINFO
Definition typcache.h:143
#define TYPECACHE_HASH_EXTENDED_PROC
Definition typcache.h:152
#define TYPECACHE_BTREE_OPFAMILY
Definition typcache.h:147
#define TYPECACHE_DOMAIN_BASE_INFO
Definition typcache.h:150
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition typcache.h:151
#define TYPECACHE_RANGE_INFO
Definition typcache.h:149
#define TYPECACHE_GT_OPR
Definition typcache.h:140
#define TYPECACHE_CMP_PROC
Definition typcache.h:141
#define TYPECACHE_LT_OPR
Definition typcache.h:139
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition typcache.h:153
#define TYPECACHE_CMP_PROC_FINFO
Definition typcache.h:144
#define TYPECACHE_HASH_PROC
Definition typcache.h:142