PostgreSQL Source Code git master
Loading...
Searching...
No Matches
typcache.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * typcache.c
4 * POSTGRES type cache code
5 *
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. For
8 * example, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 *
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by generic array and record handling routines, such as array_eq(),
14 * record_cmp(), and hash_array(). Because those routines are used as index
15 * support operations, they cannot leak memory. To allow them to execute
16 * efficiently, all information that they would like to re-use across calls
17 * is kept in the type cache.
18 *
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. If the type is
21 * dropped, the cache entry simply becomes wasted storage. This is not
22 * expected to happen often, and assuming that typcache entries are good
23 * permanently allows caching pointers to them in long-lived places.
24 *
25 * We have some provisions for updating cache entries if the stored data
26 * becomes obsolete. Core data extracted from the pg_type row is updated
27 * when we detect updates to pg_type. Information dependent on opclasses is
28 * cleared if we detect updates to pg_opclass. We also support clearing the
29 * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 * since those may need to change as a consequence of ALTER TABLE. Domain
31 * constraint changes are also tracked properly.
32 *
33 *
34 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
35 * Portions Copyright (c) 1994, Regents of the University of California
36 *
37 * IDENTIFICATION
38 * src/backend/utils/cache/typcache.c
39 *
40 *-------------------------------------------------------------------------
41 */
42#include "postgres.h"
43
44#include <limits.h>
45
46#include "access/hash.h"
47#include "access/htup_details.h"
48#include "access/nbtree.h"
49#include "access/parallel.h"
50#include "access/relation.h"
51#include "access/session.h"
52#include "access/table.h"
53#include "catalog/pg_am.h"
55#include "catalog/pg_enum.h"
56#include "catalog/pg_operator.h"
57#include "catalog/pg_range.h"
58#include "catalog/pg_type.h"
59#include "commands/defrem.h"
60#include "common/int.h"
61#include "executor/executor.h"
62#include "lib/dshash.h"
63#include "optimizer/optimizer.h"
64#include "port/pg_bitutils.h"
65#include "storage/lwlock.h"
66#include "utils/builtins.h"
67#include "utils/catcache.h"
68#include "utils/fmgroids.h"
70#include "utils/inval.h"
71#include "utils/lsyscache.h"
72#include "utils/memutils.h"
73#include "utils/rel.h"
74#include "utils/syscache.h"
75#include "utils/typcache.h"
76
77
78/* The main type cache hashtable searched by lookup_type_cache */
80
81/*
82 * The mapping of relation's OID to the corresponding composite type OID.
83 * We're keeping the map entry when the corresponding typentry has something
84 * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 */
88
90{
91 Oid relid; /* OID of the relation */
92 Oid composite_typid; /* OID of the relation's composite type */
94
95/* List of type cache entries for domain types */
97
98/* Private flag bits in the TypeCacheEntry.flags field */
99#define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100#define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101#define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102#define TCFLAGS_CHECKED_EQ_OPR 0x000008
103#define TCFLAGS_CHECKED_LT_OPR 0x000010
104#define TCFLAGS_CHECKED_GT_OPR 0x000020
105#define TCFLAGS_CHECKED_CMP_PROC 0x000040
106#define TCFLAGS_CHECKED_HASH_PROC 0x000080
107#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108#define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109#define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110#define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111#define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113#define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114#define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115#define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116#define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120
121/* The flags associated with equality/comparison/hashing are all but these: */
122#define TCFLAGS_OPERATOR_FLAGS \
123 (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126
127/*
128 * Data stored about a domain type's constraints. Note that we do not create
129 * this struct for the common case of a constraint-less domain; we just set
130 * domainData to NULL to indicate that.
131 *
132 * Within a DomainConstraintCache, we store expression plan trees, but the
133 * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 * When needed, expression evaluation nodes are built by flat-copying the
135 * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 * Such a node tree is not part of the DomainConstraintCache, but is
137 * considered to belong to a DomainConstraintRef.
138 */
140{
141 List *constraints; /* list of DomainConstraintState nodes */
142 MemoryContext dccContext; /* memory context holding all associated data */
143 long dccRefCount; /* number of references to this struct */
144};
145
146/* Private information to support comparisons of enum values */
147typedef struct
148{
149 Oid enum_oid; /* OID of one enum value */
150 float4 sort_order; /* its sort position */
151} EnumItem;
152
153typedef struct TypeCacheEnumData
154{
155 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 int num_values; /* total number of values in enum */
160
161/*
162 * We use a separate table for storing the definitions of non-anonymous
163 * record types. Once defined, a record type will be remembered for the
164 * life of the backend. Subsequent uses of the "same" record type (where
165 * sameness means equalRowTypes) will refer to the existing table entry.
166 *
167 * Stored record types are remembered in a linear array of TupleDescs,
168 * which can be indexed quickly with the assigned typmod. There is also
169 * a hash table to speed searches for matching TupleDescs.
170 */
171
176
177/*
178 * To deal with non-anonymous record types that are exchanged by backends
179 * involved in a parallel query, we also need a shared version of the above.
180 */
182{
183 /* A hash table for finding a matching TupleDesc. */
185 /* A hash table for finding a TupleDesc by typmod. */
187 /* A source of new record typmod numbers. */
189};
190
191/*
192 * When using shared tuple descriptors as hash table keys we need a way to be
193 * able to search for an equal shared TupleDesc using a backend-local
194 * TupleDesc. So we use this type which can hold either, and hash and compare
195 * functions that know how to handle both.
196 */
206
207/*
208 * The shared version of RecordCacheEntry. This lets us look up a typmod
209 * using a TupleDesc which may be in local or shared memory.
210 */
215
216/*
217 * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 * up a TupleDesc in shared memory using a typmod.
219 */
225
229
230/*
231 * A comparator function for SharedRecordTableKey.
232 */
233static int
234shared_record_table_compare(const void *a, const void *b, size_t size,
235 void *arg)
236{
237 dsa_area *area = (dsa_area *) arg;
238 const SharedRecordTableKey *k1 = a;
239 const SharedRecordTableKey *k2 = b;
242
243 if (k1->shared)
244 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 else
246 t1 = k1->u.local_tupdesc;
247
248 if (k2->shared)
249 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 else
251 t2 = k2->u.local_tupdesc;
252
253 return equalRowTypes(t1, t2) ? 0 : 1;
254}
255
256/*
257 * A hash function for SharedRecordTableKey.
258 */
259static uint32
260shared_record_table_hash(const void *a, size_t size, void *arg)
261{
262 dsa_area *area = arg;
263 const SharedRecordTableKey *k = a;
264 TupleDesc t;
265
266 if (k->shared)
268 else
269 t = k->u.local_tupdesc;
270
271 return hashRowType(t);
272}
273
274/* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
283
284/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
293
294/* hashtable for recognizing registered record types */
296
302
303/* array of info about registered record types, indexed by assigned typmod */
305static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306static int32 NextRecordTypmod = 0; /* number of entries used */
307
308/*
309 * Process-wide counter for generating unique tupledesc identifiers.
310 * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 */
314
315static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316static void load_rangetype_info(TypeCacheEntry *typentry);
317static void load_multirangetype_info(TypeCacheEntry *typentry);
318static void load_domaintype_info(TypeCacheEntry *typentry);
319static int dcs_cmp(const void *a, const void *b);
321static void dccref_deletion_callback(void *arg);
323static bool array_element_has_equality(TypeCacheEntry *typentry);
324static bool array_element_has_compare(TypeCacheEntry *typentry);
325static bool array_element_has_hashing(TypeCacheEntry *typentry);
328static bool record_fields_have_equality(TypeCacheEntry *typentry);
329static bool record_fields_have_compare(TypeCacheEntry *typentry);
330static bool record_fields_have_hashing(TypeCacheEntry *typentry);
332static void cache_record_field_properties(TypeCacheEntry *typentry);
333static bool range_element_has_hashing(TypeCacheEntry *typentry);
339static void TypeCacheRelCallback(Datum arg, Oid relid);
341 uint32 hashvalue);
343 uint32 hashvalue);
345 uint32 hashvalue);
346static void load_enum_cache_data(TypeCacheEntry *tcache);
348static int enum_oid_cmp(const void *left, const void *right);
350 Datum datum);
352static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
353 uint32 typmod);
356
357
358/*
359 * Hash function compatible with one-arg system cache hash function.
360 */
361static uint32
362type_cache_syshash(const void *key, Size keysize)
363{
364 Assert(keysize == sizeof(Oid));
365 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
366}
367
368/*
369 * lookup_type_cache
370 *
371 * Fetch the type cache entry for the specified datatype, and make sure that
372 * all the fields requested by bits in 'flags' are valid.
373 *
374 * The result is never NULL --- we will ereport() if the passed type OID is
375 * invalid. Note however that we may fail to find one or more of the
376 * values requested by 'flags'; the caller needs to check whether the fields
377 * are InvalidOid or not.
378 *
379 * Note that while filling TypeCacheEntry we might process concurrent
380 * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
381 * invalidated. In this case, we typically only clear flags while values are
382 * still available for the caller. It's expected that the caller holds
383 * enough locks on type-depending objects that the values are still relevant.
384 * It's also important that the tupdesc is filled after all other
385 * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
386 * invalidated during the lookup_type_cache() call.
387 */
389lookup_type_cache(Oid type_id, int flags)
390{
391 TypeCacheEntry *typentry;
392 bool found;
394
395 if (TypeCacheHash == NULL)
396 {
397 /* First time through: initialize the hash table */
398 HASHCTL ctl;
399 int allocsize;
400
401 ctl.keysize = sizeof(Oid);
402 ctl.entrysize = sizeof(TypeCacheEntry);
403
404 /*
405 * TypeCacheEntry takes hash value from the system cache. For
406 * TypeCacheHash we use the same hash in order to speedup search by
407 * hash value. This is used by hash_seq_init_with_hash_value().
408 */
409 ctl.hash = type_cache_syshash;
410
411 TypeCacheHash = hash_create("Type information cache", 64,
413
415
416 ctl.keysize = sizeof(Oid);
417 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
418 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
420
421 /* Also set up callbacks for SI invalidations */
426
427 /* Also make sure CacheMemoryContext exists */
430
431 /*
432 * reserve enough in_progress_list slots for many cases
433 */
434 allocsize = 4;
437 allocsize * sizeof(*in_progress_list));
438 in_progress_list_maxlen = allocsize;
439 }
440
442
443 /* Register to catch invalidation messages */
445 {
446 int allocsize;
447
448 allocsize = in_progress_list_maxlen * 2;
450 allocsize * sizeof(*in_progress_list));
451 in_progress_list_maxlen = allocsize;
452 }
455
456 /* Try to look up an existing entry */
458 &type_id,
459 HASH_FIND, NULL);
460 if (typentry == NULL)
461 {
462 /*
463 * If we didn't find one, we want to make one. But first look up the
464 * pg_type row, just to make sure we don't make a cache entry for an
465 * invalid type OID. If the type OID is not valid, present a
466 * user-facing error, since some code paths such as domain_in() allow
467 * this function to be reached with a user-supplied OID.
468 */
469 HeapTuple tp;
471
473 if (!HeapTupleIsValid(tp))
476 errmsg("type with OID %u does not exist", type_id)));
478 if (!typtup->typisdefined)
481 errmsg("type \"%s\" is only a shell",
482 NameStr(typtup->typname))));
483
484 /* Now make the typcache entry */
486 &type_id,
487 HASH_ENTER, &found);
488 Assert(!found); /* it wasn't there a moment ago */
489
490 MemSet(typentry, 0, sizeof(TypeCacheEntry));
491
492 /* These fields can never change, by definition */
493 typentry->type_id = type_id;
494 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
495
496 /* Keep this part in sync with the code below */
497 typentry->typlen = typtup->typlen;
498 typentry->typbyval = typtup->typbyval;
499 typentry->typalign = typtup->typalign;
500 typentry->typstorage = typtup->typstorage;
501 typentry->typtype = typtup->typtype;
502 typentry->typrelid = typtup->typrelid;
503 typentry->typsubscript = typtup->typsubscript;
504 typentry->typelem = typtup->typelem;
505 typentry->typarray = typtup->typarray;
506 typentry->typcollation = typtup->typcollation;
507 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
508
509 /* If it's a domain, immediately thread it into the domain cache list */
510 if (typentry->typtype == TYPTYPE_DOMAIN)
511 {
513 firstDomainTypeEntry = typentry;
514 }
515
516 ReleaseSysCache(tp);
517 }
518 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
519 {
520 /*
521 * We have an entry, but its pg_type row got changed, so reload the
522 * data obtained directly from pg_type.
523 */
524 HeapTuple tp;
526
528 if (!HeapTupleIsValid(tp))
531 errmsg("type with OID %u does not exist", type_id)));
533 if (!typtup->typisdefined)
536 errmsg("type \"%s\" is only a shell",
537 NameStr(typtup->typname))));
538
539 /*
540 * Keep this part in sync with the code above. Many of these fields
541 * shouldn't ever change, particularly typtype, but copy 'em anyway.
542 */
543 typentry->typlen = typtup->typlen;
544 typentry->typbyval = typtup->typbyval;
545 typentry->typalign = typtup->typalign;
546 typentry->typstorage = typtup->typstorage;
547 typentry->typtype = typtup->typtype;
548 typentry->typrelid = typtup->typrelid;
549 typentry->typsubscript = typtup->typsubscript;
550 typentry->typelem = typtup->typelem;
551 typentry->typarray = typtup->typarray;
552 typentry->typcollation = typtup->typcollation;
553 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
554
555 ReleaseSysCache(tp);
556 }
557
558 /*
559 * Look up opclasses if we haven't already and any dependent info is
560 * requested.
561 */
567 {
568 Oid opclass;
569
570 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
571 if (OidIsValid(opclass))
572 {
573 typentry->btree_opf = get_opclass_family(opclass);
574 typentry->btree_opintype = get_opclass_input_type(opclass);
575 }
576 else
577 {
578 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
579 }
580
581 /*
582 * Reset information derived from btree opclass. Note in particular
583 * that we'll redetermine the eq_opr even if we previously found one;
584 * this matters in case a btree opclass has been added to a type that
585 * previously had only a hash opclass.
586 */
587 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
592 }
593
594 /*
595 * If we need to look up equality operator, and there's no btree opclass,
596 * force lookup of hash opclass.
597 */
598 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
599 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
600 typentry->btree_opf == InvalidOid)
602
607 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
608 {
609 Oid opclass;
610
611 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
612 if (OidIsValid(opclass))
613 {
614 typentry->hash_opf = get_opclass_family(opclass);
615 typentry->hash_opintype = get_opclass_input_type(opclass);
616 }
617 else
618 {
619 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
620 }
621
622 /*
623 * Reset information derived from hash opclass. We do *not* reset the
624 * eq_opr; if we already found one from the btree opclass, that
625 * decision is still good.
626 */
627 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
630 }
631
632 /*
633 * Look for requested operators and functions, if we haven't already.
634 */
635 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
636 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
637 {
638 Oid eq_opr = InvalidOid;
639
640 if (typentry->btree_opf != InvalidOid)
641 eq_opr = get_opfamily_member(typentry->btree_opf,
642 typentry->btree_opintype,
643 typentry->btree_opintype,
645 if (eq_opr == InvalidOid &&
646 typentry->hash_opf != InvalidOid)
647 eq_opr = get_opfamily_member(typentry->hash_opf,
648 typentry->hash_opintype,
649 typentry->hash_opintype,
651
652 /*
653 * If the proposed equality operator is array_eq or record_eq, check
654 * to see if the element type or column types support equality. If
655 * not, array_eq or record_eq would fail at runtime, so we don't want
656 * to report that the type has equality. (We can omit similar
657 * checking for ranges and multiranges because ranges can't be created
658 * in the first place unless their subtypes support equality.)
659 */
660 if (eq_opr == ARRAY_EQ_OP &&
662 eq_opr = InvalidOid;
663 else if (eq_opr == RECORD_EQ_OP &&
665 eq_opr = InvalidOid;
666
667 /* Force update of eq_opr_finfo only if we're changing state */
668 if (typentry->eq_opr != eq_opr)
669 typentry->eq_opr_finfo.fn_oid = InvalidOid;
670
671 typentry->eq_opr = eq_opr;
672
673 /*
674 * Reset info about hash functions whenever we pick up new info about
675 * equality operator. This is so we can ensure that the hash
676 * functions match the operator.
677 */
678 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
680 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
681 }
682 if ((flags & TYPECACHE_LT_OPR) &&
683 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
684 {
685 Oid lt_opr = InvalidOid;
686
687 if (typentry->btree_opf != InvalidOid)
688 lt_opr = get_opfamily_member(typentry->btree_opf,
689 typentry->btree_opintype,
690 typentry->btree_opintype,
692
693 /*
694 * As above, make sure array_cmp or record_cmp will succeed; but again
695 * we need no special check for ranges or multiranges.
696 */
697 if (lt_opr == ARRAY_LT_OP &&
698 !array_element_has_compare(typentry))
699 lt_opr = InvalidOid;
700 else if (lt_opr == RECORD_LT_OP &&
702 lt_opr = InvalidOid;
703
704 typentry->lt_opr = lt_opr;
705 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
706 }
707 if ((flags & TYPECACHE_GT_OPR) &&
708 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
709 {
710 Oid gt_opr = InvalidOid;
711
712 if (typentry->btree_opf != InvalidOid)
713 gt_opr = get_opfamily_member(typentry->btree_opf,
714 typentry->btree_opintype,
715 typentry->btree_opintype,
717
718 /*
719 * As above, make sure array_cmp or record_cmp will succeed; but again
720 * we need no special check for ranges or multiranges.
721 */
722 if (gt_opr == ARRAY_GT_OP &&
723 !array_element_has_compare(typentry))
724 gt_opr = InvalidOid;
725 else if (gt_opr == RECORD_GT_OP &&
727 gt_opr = InvalidOid;
728
729 typentry->gt_opr = gt_opr;
730 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
731 }
733 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
734 {
735 Oid cmp_proc = InvalidOid;
736
737 if (typentry->btree_opf != InvalidOid)
738 cmp_proc = get_opfamily_proc(typentry->btree_opf,
739 typentry->btree_opintype,
740 typentry->btree_opintype,
742
743 /*
744 * As above, make sure array_cmp or record_cmp will succeed; but again
745 * we need no special check for ranges or multiranges.
746 */
747 if (cmp_proc == F_BTARRAYCMP &&
748 !array_element_has_compare(typentry))
749 cmp_proc = InvalidOid;
750 else if (cmp_proc == F_BTRECORDCMP &&
752 cmp_proc = InvalidOid;
753
754 /* Force update of cmp_proc_finfo only if we're changing state */
755 if (typentry->cmp_proc != cmp_proc)
756 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
757
758 typentry->cmp_proc = cmp_proc;
759 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
760 }
762 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
763 {
764 Oid hash_proc = InvalidOid;
765
766 /*
767 * We insist that the eq_opr, if one has been determined, match the
768 * hash opclass; else report there is no hash function.
769 */
770 if (typentry->hash_opf != InvalidOid &&
771 (!OidIsValid(typentry->eq_opr) ||
772 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
773 typentry->hash_opintype,
774 typentry->hash_opintype,
776 hash_proc = get_opfamily_proc(typentry->hash_opf,
777 typentry->hash_opintype,
778 typentry->hash_opintype,
780
781 /*
782 * As above, make sure hash_array, hash_record, or hash_range will
783 * succeed.
784 */
785 if (hash_proc == F_HASH_ARRAY &&
786 !array_element_has_hashing(typentry))
787 hash_proc = InvalidOid;
788 else if (hash_proc == F_HASH_RECORD &&
790 hash_proc = InvalidOid;
791 else if (hash_proc == F_HASH_RANGE &&
792 !range_element_has_hashing(typentry))
793 hash_proc = InvalidOid;
794
795 /*
796 * Likewise for hash_multirange.
797 */
798 if (hash_proc == F_HASH_MULTIRANGE &&
800 hash_proc = InvalidOid;
801
802 /* Force update of hash_proc_finfo only if we're changing state */
803 if (typentry->hash_proc != hash_proc)
805
806 typentry->hash_proc = hash_proc;
807 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
808 }
809 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
812 {
813 Oid hash_extended_proc = InvalidOid;
814
815 /*
816 * We insist that the eq_opr, if one has been determined, match the
817 * hash opclass; else report there is no hash function.
818 */
819 if (typentry->hash_opf != InvalidOid &&
820 (!OidIsValid(typentry->eq_opr) ||
821 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
822 typentry->hash_opintype,
823 typentry->hash_opintype,
825 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
826 typentry->hash_opintype,
827 typentry->hash_opintype,
829
830 /*
831 * As above, make sure hash_array_extended, hash_record_extended, or
832 * hash_range_extended will succeed.
833 */
834 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
836 hash_extended_proc = InvalidOid;
837 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
839 hash_extended_proc = InvalidOid;
840 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
842 hash_extended_proc = InvalidOid;
843
844 /*
845 * Likewise for hash_multirange_extended.
846 */
847 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
849 hash_extended_proc = InvalidOid;
850
851 /* Force update of proc finfo only if we're changing state */
852 if (typentry->hash_extended_proc != hash_extended_proc)
854
855 typentry->hash_extended_proc = hash_extended_proc;
857 }
858
859 /*
860 * Set up fmgr lookup info as requested
861 *
862 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
863 * which is not quite right (they're really in the hash table's private
864 * memory context) but this will do for our purposes.
865 *
866 * Note: the code above avoids invalidating the finfo structs unless the
867 * referenced operator/function OID actually changes. This is to prevent
868 * unnecessary leakage of any subsidiary data attached to an finfo, since
869 * that would cause session-lifespan memory leaks.
870 */
871 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
872 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
873 typentry->eq_opr != InvalidOid)
874 {
876
877 eq_opr_func = get_opcode(typentry->eq_opr);
878 if (eq_opr_func != InvalidOid)
881 }
882 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
883 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
884 typentry->cmp_proc != InvalidOid)
885 {
886 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
888 }
889 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
890 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
891 typentry->hash_proc != InvalidOid)
892 {
893 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
895 }
898 typentry->hash_extended_proc != InvalidOid)
899 {
901 &typentry->hash_extended_proc_finfo,
903 }
904
905 /*
906 * If it's a composite type (row type), get tupdesc if requested
907 */
908 if ((flags & TYPECACHE_TUPDESC) &&
909 typentry->tupDesc == NULL &&
910 typentry->typtype == TYPTYPE_COMPOSITE)
911 {
912 load_typcache_tupdesc(typentry);
913 }
914
915 /*
916 * If requested, get information about a range type
917 *
918 * This includes making sure that the basic info about the range element
919 * type is up-to-date.
920 */
921 if ((flags & TYPECACHE_RANGE_INFO) &&
922 typentry->typtype == TYPTYPE_RANGE)
923 {
924 if (typentry->rngelemtype == NULL)
925 load_rangetype_info(typentry);
926 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
927 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
928 }
929
930 /*
931 * If requested, get information about a multirange type
932 */
933 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
934 typentry->rngtype == NULL &&
935 typentry->typtype == TYPTYPE_MULTIRANGE)
936 {
937 load_multirangetype_info(typentry);
938 }
939
940 /*
941 * If requested, get information about a domain type
942 */
943 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
944 typentry->domainBaseType == InvalidOid &&
945 typentry->typtype == TYPTYPE_DOMAIN)
946 {
947 typentry->domainBaseTypmod = -1;
948 typentry->domainBaseType =
949 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
950 }
951 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
952 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
953 typentry->typtype == TYPTYPE_DOMAIN)
954 {
955 load_domaintype_info(typentry);
956 }
957
958 INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
959
962
964
965 return typentry;
966}
967
968/*
969 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
970 */
971static void
973{
974 Relation rel;
975
976 if (!OidIsValid(typentry->typrelid)) /* should not happen */
977 elog(ERROR, "invalid typrelid for composite type %u",
978 typentry->type_id);
979 rel = relation_open(typentry->typrelid, AccessShareLock);
980 Assert(rel->rd_rel->reltype == typentry->type_id);
981
982 /*
983 * Link to the tupdesc and increment its refcount (we assert it's a
984 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
985 * because the reference mustn't be entered in the current resource owner;
986 * it can outlive the current query.
987 */
988 typentry->tupDesc = RelationGetDescr(rel);
989
990 Assert(typentry->tupDesc->tdrefcount > 0);
991 typentry->tupDesc->tdrefcount++;
992
993 /*
994 * In future, we could take some pains to not change tupDesc_identifier if
995 * the tupdesc didn't really change; but for now it's not worth it.
996 */
998
1000}
1001
1002/*
1003 * load_rangetype_info --- helper routine to set up range type information
1004 */
1005static void
1007{
1009 HeapTuple tup;
1015 Oid opcintype;
1016 Oid cmpFnOid;
1017
1018 /* get information from pg_range */
1020 /* should not fail, since we already checked typtype ... */
1021 if (!HeapTupleIsValid(tup))
1022 elog(ERROR, "cache lookup failed for range type %u",
1023 typentry->type_id);
1025
1026 subtypeOid = pg_range->rngsubtype;
1027 typentry->rng_collation = pg_range->rngcollation;
1028 opclassOid = pg_range->rngsubopc;
1029 canonicalOid = pg_range->rngcanonical;
1030 subdiffOid = pg_range->rngsubdiff;
1031
1033
1034 /* get opclass properties and look up the comparison function */
1037 typentry->rng_opfamily = opfamilyOid;
1038
1039 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1040 BTORDER_PROC);
1042 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1043 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1044
1045 /* set up cached fmgrinfo structs */
1054
1055 /* Lastly, set up link to the element type --- this marks data valid */
1057}
1058
1059/*
1060 * load_multirangetype_info --- helper routine to set up multirange type
1061 * information
1062 */
1063static void
1065{
1067
1070 elog(ERROR, "cache lookup failed for multirange type %u",
1071 typentry->type_id);
1072
1074}
1075
1076/*
1077 * load_domaintype_info --- helper routine to set up domain constraint info
1078 *
1079 * Note: we assume we're called in a relatively short-lived context, so it's
1080 * okay to leak data into the current context while scanning pg_constraint.
1081 * We build the new DomainConstraintCache data in a context underneath
1082 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1083 * complete.
1084 */
1085static void
1087{
1088 Oid typeOid = typentry->type_id;
1090 bool notNull = false;
1092 int cconslen;
1095
1096 /*
1097 * If we're here, any existing constraint info is stale, so release it.
1098 * For safety, be sure to null the link before trying to delete the data.
1099 */
1100 if (typentry->domainData)
1101 {
1102 dcc = typentry->domainData;
1103 typentry->domainData = NULL;
1104 decr_dcc_refcount(dcc);
1105 }
1106
1107 /*
1108 * We try to optimize the common case of no domain constraints, so don't
1109 * create the dcc object and context until we find a constraint. Likewise
1110 * for the temp sorting array.
1111 */
1112 dcc = NULL;
1113 ccons = NULL;
1114 cconslen = 0;
1115
1116 /*
1117 * Scan pg_constraint for relevant constraints. We want to find
1118 * constraints for not just this domain, but any ancestor domains, so the
1119 * outer loop crawls up the domain stack.
1120 */
1122
1123 for (;;)
1124 {
1125 HeapTuple tup;
1128 int nccons = 0;
1129 ScanKeyData key[1];
1130 SysScanDesc scan;
1131
1133 if (!HeapTupleIsValid(tup))
1134 elog(ERROR, "cache lookup failed for type %u", typeOid);
1136
1137 if (typTup->typtype != TYPTYPE_DOMAIN)
1138 {
1139 /* Not a domain, so done */
1141 break;
1142 }
1143
1144 /* Test for NOT NULL Constraint */
1145 if (typTup->typnotnull)
1146 notNull = true;
1147
1148 /* Look for CHECK Constraints on this domain */
1149 ScanKeyInit(&key[0],
1152 ObjectIdGetDatum(typeOid));
1153
1155 NULL, 1, key);
1156
1158 {
1160 Datum val;
1161 bool isNull;
1162 char *constring;
1163 Expr *check_expr;
1165
1166 /* Ignore non-CHECK constraints */
1167 if (c->contype != CONSTRAINT_CHECK)
1168 continue;
1169
1170 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1172 conRel->rd_att, &isNull);
1173 if (isNull)
1174 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1175 NameStr(typTup->typname), NameStr(c->conname));
1176
1177 /* Create the DomainConstraintCache object and context if needed */
1178 if (dcc == NULL)
1179 {
1180 MemoryContext cxt;
1181
1183 "Domain constraints",
1185 dcc = (DomainConstraintCache *)
1187 dcc->constraints = NIL;
1188 dcc->dccContext = cxt;
1189 dcc->dccRefCount = 0;
1190 }
1191
1192 /* Convert conbin to a node tree, still in caller's context */
1194 check_expr = (Expr *) stringToNode(constring);
1195
1196 /*
1197 * Plan the expression, since ExecInitExpr will expect that.
1198 *
1199 * Note: caching the result of expression_planner() is not very
1200 * good practice. Ideally we'd use a CachedExpression here so
1201 * that we would react promptly to, eg, changes in inlined
1202 * functions. However, because we don't support mutable domain
1203 * CHECK constraints, it's not really clear that it's worth the
1204 * extra overhead to do that.
1205 */
1206 check_expr = expression_planner(check_expr);
1207
1208 /* Create only the minimally needed stuff in dccContext */
1210
1213 r->name = pstrdup(NameStr(c->conname));
1214 r->check_expr = copyObject(check_expr);
1215 r->check_exprstate = NULL;
1216
1218
1219 /* Accumulate constraints in an array, for sorting below */
1220 if (ccons == NULL)
1221 {
1222 cconslen = 8;
1225 }
1226 else if (nccons >= cconslen)
1227 {
1228 cconslen *= 2;
1231 }
1232 ccons[nccons++] = r;
1233 }
1234
1235 systable_endscan(scan);
1236
1237 if (nccons > 0)
1238 {
1239 /*
1240 * Sort the items for this domain, so that CHECKs are applied in a
1241 * deterministic order.
1242 */
1243 if (nccons > 1)
1245
1246 /*
1247 * Now attach them to the overall list. Use lcons() here because
1248 * constraints of parent domains should be applied earlier.
1249 */
1251 while (nccons > 0)
1252 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1254 }
1255
1256 /* loop to next domain in stack */
1257 typeOid = typTup->typbasetype;
1259 }
1260
1262
1263 /*
1264 * Only need to add one NOT NULL check regardless of how many domains in
1265 * the stack request it.
1266 */
1267 if (notNull)
1268 {
1270
1271 /* Create the DomainConstraintCache object and context if needed */
1272 if (dcc == NULL)
1273 {
1274 MemoryContext cxt;
1275
1277 "Domain constraints",
1279 dcc = (DomainConstraintCache *)
1281 dcc->constraints = NIL;
1282 dcc->dccContext = cxt;
1283 dcc->dccRefCount = 0;
1284 }
1285
1286 /* Create node trees in DomainConstraintCache's context */
1288
1290
1292 r->name = pstrdup("NOT NULL");
1293 r->check_expr = NULL;
1294 r->check_exprstate = NULL;
1295
1296 /* lcons to apply the nullness check FIRST */
1297 dcc->constraints = lcons(r, dcc->constraints);
1298
1300 }
1301
1302 /*
1303 * If we made a constraint object, move it into CacheMemoryContext and
1304 * attach it to the typcache entry.
1305 */
1306 if (dcc)
1307 {
1309 typentry->domainData = dcc;
1310 dcc->dccRefCount++; /* count the typcache's reference */
1311 }
1312
1313 /* Either way, the typcache entry's domain data is now valid. */
1315}
1316
1317/*
1318 * qsort comparator to sort DomainConstraintState pointers by name
1319 */
1320static int
1321dcs_cmp(const void *a, const void *b)
1322{
1323 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1324 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1325
1326 return strcmp((*ca)->name, (*cb)->name);
1327}
1328
1329/*
1330 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1331 * and free it if no references remain
1332 */
1333static void
1335{
1336 Assert(dcc->dccRefCount > 0);
1337 if (--(dcc->dccRefCount) <= 0)
1339}
1340
1341/*
1342 * Context reset/delete callback for a DomainConstraintRef
1343 */
1344static void
1346{
1348 DomainConstraintCache *dcc = ref->dcc;
1349
1350 /* Paranoia --- be sure link is nulled before trying to release */
1351 if (dcc)
1352 {
1353 ref->constraints = NIL;
1354 ref->dcc = NULL;
1355 decr_dcc_refcount(dcc);
1356 }
1357}
1358
1359/*
1360 * prep_domain_constraints --- prepare domain constraints for execution
1361 *
1362 * The expression trees stored in the DomainConstraintCache's list are
1363 * converted to executable expression state trees stored in execctx.
1364 */
1365static List *
1367{
1368 List *result = NIL;
1370 ListCell *lc;
1371
1373
1374 foreach(lc, constraints)
1375 {
1378
1380 newr->constrainttype = r->constrainttype;
1381 newr->name = r->name;
1382 newr->check_expr = r->check_expr;
1383 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1384
1385 result = lappend(result, newr);
1386 }
1387
1389
1390 return result;
1391}
1392
1393/*
1394 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1395 *
1396 * Caller must tell us the MemoryContext in which the DomainConstraintRef
1397 * lives. The ref will be cleaned up when that context is reset/deleted.
1398 *
1399 * Caller must also tell us whether it wants check_exprstate fields to be
1400 * computed in the DomainConstraintState nodes attached to this ref.
1401 * If it doesn't, we need not make a copy of the DomainConstraintState list.
1402 */
1403void
1405 MemoryContext refctx, bool need_exprstate)
1406{
1407 /* Look up the typcache entry --- we assume it survives indefinitely */
1409 ref->need_exprstate = need_exprstate;
1410 /* For safety, establish the callback before acquiring a refcount */
1411 ref->refctx = refctx;
1412 ref->dcc = NULL;
1413 ref->callback.func = dccref_deletion_callback;
1414 ref->callback.arg = ref;
1415 MemoryContextRegisterResetCallback(refctx, &ref->callback);
1416 /* Acquire refcount if there are constraints, and set up exported list */
1417 if (ref->tcache->domainData)
1418 {
1419 ref->dcc = ref->tcache->domainData;
1420 ref->dcc->dccRefCount++;
1421 if (ref->need_exprstate)
1422 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1423 ref->refctx);
1424 else
1425 ref->constraints = ref->dcc->constraints;
1426 }
1427 else
1428 ref->constraints = NIL;
1429}
1430
1431/*
1432 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1433 *
1434 * If the domain's constraint set changed, ref->constraints is updated to
1435 * point at a new list of cached constraints.
1436 *
1437 * In the normal case where nothing happened to the domain, this is cheap
1438 * enough that it's reasonable (and expected) to check before *each* use
1439 * of the constraint info.
1440 */
1441void
1443{
1444 TypeCacheEntry *typentry = ref->tcache;
1445
1446 /* Make sure typcache entry's data is up to date */
1447 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1448 typentry->typtype == TYPTYPE_DOMAIN)
1449 load_domaintype_info(typentry);
1450
1451 /* Transfer to ref object if there's new info, adjusting refcounts */
1452 if (ref->dcc != typentry->domainData)
1453 {
1454 /* Paranoia --- be sure link is nulled before trying to release */
1455 DomainConstraintCache *dcc = ref->dcc;
1456
1457 if (dcc)
1458 {
1459 /*
1460 * Note: we just leak the previous list of executable domain
1461 * constraints. Alternatively, we could keep those in a child
1462 * context of ref->refctx and free that context at this point.
1463 * However, in practice this code path will be taken so seldom
1464 * that the extra bookkeeping for a child context doesn't seem
1465 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1466 */
1467 ref->constraints = NIL;
1468 ref->dcc = NULL;
1469 decr_dcc_refcount(dcc);
1470 }
1471 dcc = typentry->domainData;
1472 if (dcc)
1473 {
1474 ref->dcc = dcc;
1475 dcc->dccRefCount++;
1476 if (ref->need_exprstate)
1477 ref->constraints = prep_domain_constraints(dcc->constraints,
1478 ref->refctx);
1479 else
1480 ref->constraints = dcc->constraints;
1481 }
1482 }
1483}
1484
1485/*
1486 * DomainHasConstraints --- utility routine to check if a domain has constraints
1487 *
1488 * This is defined to return false, not fail, if type is not a domain.
1489 */
1490bool
1492{
1493 TypeCacheEntry *typentry;
1494
1495 /*
1496 * Note: a side effect is to cause the typcache's domain data to become
1497 * valid. This is fine since we'll likely need it soon if there is any.
1498 */
1500
1501 return (typentry->domainData != NULL);
1502}
1503
1504
1505/*
1506 * array_element_has_equality and friends are helper routines to check
1507 * whether we should believe that array_eq and related functions will work
1508 * on the given array type or composite type.
1509 *
1510 * The logic above may call these repeatedly on the same type entry, so we
1511 * make use of the typentry->flags field to cache the results once known.
1512 * Also, we assume that we'll probably want all these facts about the type
1513 * if we want any, so we cache them all using only one lookup of the
1514 * component datatype(s).
1515 */
1516
1517static bool
1519{
1520 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1522 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1523}
1524
1525static bool
1527{
1528 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1530 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1531}
1532
1533static bool
1535{
1536 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1538 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1539}
1540
1541static bool
1548
1549static void
1551{
1553
1554 if (OidIsValid(elem_type))
1555 {
1557
1563 if (OidIsValid(elementry->eq_opr))
1564 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1565 if (OidIsValid(elementry->cmp_proc))
1566 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1567 if (OidIsValid(elementry->hash_proc))
1568 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1569 if (OidIsValid(elementry->hash_extended_proc))
1571 }
1573}
1574
1575/*
1576 * Likewise, some helper functions for composite types.
1577 */
1578
1579static bool
1581{
1582 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1584 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1585}
1586
1587static bool
1589{
1590 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1592 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1593}
1594
1595static bool
1597{
1598 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1600 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1601}
1602
1603static bool
1610
1611static void
1613{
1614 /*
1615 * For type RECORD, we can't really tell what will work, since we don't
1616 * have access here to the specific anonymous type. Just assume that
1617 * equality and comparison will (we may get a failure at runtime). We
1618 * could also claim that hashing works, but then if code that has the
1619 * option between a comparison-based (sort-based) and a hash-based plan
1620 * chooses hashing, stuff could fail that would otherwise work if it chose
1621 * a comparison-based plan. In practice more types support comparison
1622 * than hashing.
1623 */
1624 if (typentry->type_id == RECORDOID)
1625 {
1626 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1628 }
1629 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1630 {
1631 TupleDesc tupdesc;
1632 int newflags;
1633 int i;
1634
1635 /* Fetch composite type's tupdesc if we don't have it already */
1636 if (typentry->tupDesc == NULL)
1637 load_typcache_tupdesc(typentry);
1638 tupdesc = typentry->tupDesc;
1639
1640 /* Must bump the refcount while we do additional catalog lookups */
1641 IncrTupleDescRefCount(tupdesc);
1642
1643 /* Have each property if all non-dropped fields have the property */
1648 for (i = 0; i < tupdesc->natts; i++)
1649 {
1651 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1652
1653 if (attr->attisdropped)
1654 continue;
1655
1656 fieldentry = lookup_type_cache(attr->atttypid,
1661 if (!OidIsValid(fieldentry->eq_opr))
1663 if (!OidIsValid(fieldentry->cmp_proc))
1665 if (!OidIsValid(fieldentry->hash_proc))
1667 if (!OidIsValid(fieldentry->hash_extended_proc))
1669
1670 /* We can drop out of the loop once we disprove all bits */
1671 if (newflags == 0)
1672 break;
1673 }
1674 typentry->flags |= newflags;
1675
1676 DecrTupleDescRefCount(tupdesc);
1677 }
1678 else if (typentry->typtype == TYPTYPE_DOMAIN)
1679 {
1680 /* If it's domain over composite, copy base type's properties */
1682
1683 /* load up basetype info if we didn't already */
1684 if (typentry->domainBaseType == InvalidOid)
1685 {
1686 typentry->domainBaseTypmod = -1;
1687 typentry->domainBaseType =
1688 getBaseTypeAndTypmod(typentry->type_id,
1689 &typentry->domainBaseTypmod);
1690 }
1696 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1697 {
1699 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1703 }
1704 }
1706}
1707
1708/*
1709 * Likewise, some helper functions for range and multirange types.
1710 *
1711 * We can borrow the flag bits for array element properties to use for range
1712 * element properties, since those flag bits otherwise have no use in a
1713 * range or multirange type's typcache entry.
1714 */
1715
1716static bool
1718{
1719 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1721 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1722}
1723
1724static bool
1731
1732static void
1734{
1735 /* load up subtype link if we didn't already */
1736 if (typentry->rngelemtype == NULL &&
1737 typentry->typtype == TYPTYPE_RANGE)
1738 load_rangetype_info(typentry);
1739
1740 if (typentry->rngelemtype != NULL)
1741 {
1743
1744 /* might need to calculate subtype's hash function properties */
1748 if (OidIsValid(elementry->hash_proc))
1749 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1750 if (OidIsValid(elementry->hash_extended_proc))
1752 }
1754}
1755
1756static bool
1758{
1759 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1761 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1762}
1763
1764static bool
1771
1772static void
1774{
1775 /* load up range link if we didn't already */
1776 if (typentry->rngtype == NULL &&
1777 typentry->typtype == TYPTYPE_MULTIRANGE)
1778 load_multirangetype_info(typentry);
1779
1780 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1781 {
1783
1784 /* might need to calculate subtype's hash function properties */
1788 if (OidIsValid(elementry->hash_proc))
1789 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1790 if (OidIsValid(elementry->hash_extended_proc))
1792 }
1794}
1795
1796/*
1797 * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1798 * to store 'typmod'.
1799 */
1800static void
1822
1823/*
1824 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1825 *
1826 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1827 * hasn't had its refcount bumped.
1828 */
1829static TupleDesc
1831{
1832 if (type_id != RECORDOID)
1833 {
1834 /*
1835 * It's a named composite type, so use the regular typcache.
1836 */
1837 TypeCacheEntry *typentry;
1838
1839 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1840 if (typentry->tupDesc == NULL && !noError)
1841 ereport(ERROR,
1843 errmsg("type %s is not composite",
1844 format_type_be(type_id))));
1845 return typentry->tupDesc;
1846 }
1847 else
1848 {
1849 /*
1850 * It's a transient record type, so look in our record-type table.
1851 */
1852 if (typmod >= 0)
1853 {
1854 /* It is already in our local cache? */
1855 if (typmod < RecordCacheArrayLen &&
1856 RecordCacheArray[typmod].tupdesc != NULL)
1857 return RecordCacheArray[typmod].tupdesc;
1858
1859 /* Are we attached to a shared record typmod registry? */
1861 {
1863
1864 /* Try to find it in the shared typmod index. */
1866 &typmod, false);
1867 if (entry != NULL)
1868 {
1869 TupleDesc tupdesc;
1870
1871 tupdesc = (TupleDesc)
1873 entry->shared_tupdesc);
1874 Assert(typmod == tupdesc->tdtypmod);
1875
1876 /* We may need to extend the local RecordCacheArray. */
1878
1879 /*
1880 * Our local array can now point directly to the TupleDesc
1881 * in shared memory, which is non-reference-counted.
1882 */
1883 RecordCacheArray[typmod].tupdesc = tupdesc;
1884 Assert(tupdesc->tdrefcount == -1);
1885
1886 /*
1887 * We don't share tupdesc identifiers across processes, so
1888 * assign one locally.
1889 */
1891
1893 entry);
1894
1895 return RecordCacheArray[typmod].tupdesc;
1896 }
1897 }
1898 }
1899
1900 if (!noError)
1901 ereport(ERROR,
1903 errmsg("record type has not been registered")));
1904 return NULL;
1905 }
1906}
1907
1908/*
1909 * lookup_rowtype_tupdesc
1910 *
1911 * Given a typeid/typmod that should describe a known composite type,
1912 * return the tuple descriptor for the type. Will ereport on failure.
1913 * (Use ereport because this is reachable with user-specified OIDs,
1914 * for example from record_in().)
1915 *
1916 * Note: on success, we increment the refcount of the returned TupleDesc,
1917 * and log the reference in CurrentResourceOwner. Caller must call
1918 * ReleaseTupleDesc when done using the tupdesc. (There are some
1919 * cases in which the returned tupdesc is not refcounted, in which
1920 * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1921 * the tupdesc is guaranteed to live till process exit.)
1922 */
1925{
1926 TupleDesc tupDesc;
1927
1928 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1929 PinTupleDesc(tupDesc);
1930 return tupDesc;
1931}
1932
1933/*
1934 * lookup_rowtype_tupdesc_noerror
1935 *
1936 * As above, but if the type is not a known composite type and noError
1937 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1938 * type_id is passed, you'll get an ereport anyway.)
1939 */
1942{
1943 TupleDesc tupDesc;
1944
1945 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1946 if (tupDesc != NULL)
1947 PinTupleDesc(tupDesc);
1948 return tupDesc;
1949}
1950
1951/*
1952 * lookup_rowtype_tupdesc_copy
1953 *
1954 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1955 * copied into the CurrentMemoryContext and is not reference-counted.
1956 */
1959{
1960 TupleDesc tmp;
1961
1962 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1963 return CreateTupleDescCopyConstr(tmp);
1964}
1965
1966/*
1967 * lookup_rowtype_tupdesc_domain
1968 *
1969 * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1970 * a domain over a named composite type; so this is effectively equivalent to
1971 * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1972 * except for being a tad faster.
1973 *
1974 * Note: the reason we don't fold the look-through-domain behavior into plain
1975 * lookup_rowtype_tupdesc() is that we want callers to know they might be
1976 * dealing with a domain. Otherwise they might construct a tuple that should
1977 * be of the domain type, but not apply domain constraints.
1978 */
1981{
1982 TupleDesc tupDesc;
1983
1984 if (type_id != RECORDOID)
1985 {
1986 /*
1987 * Check for domain or named composite type. We might as well load
1988 * whichever data is needed.
1989 */
1990 TypeCacheEntry *typentry;
1991
1992 typentry = lookup_type_cache(type_id,
1995 if (typentry->typtype == TYPTYPE_DOMAIN)
1997 typentry->domainBaseTypmod,
1998 noError);
1999 if (typentry->tupDesc == NULL && !noError)
2000 ereport(ERROR,
2002 errmsg("type %s is not composite",
2003 format_type_be(type_id))));
2004 tupDesc = typentry->tupDesc;
2005 }
2006 else
2007 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2008 if (tupDesc != NULL)
2009 PinTupleDesc(tupDesc);
2010 return tupDesc;
2011}
2012
2013/*
2014 * Hash function for the hash table of RecordCacheEntry.
2015 */
2016static uint32
2017record_type_typmod_hash(const void *data, size_t size)
2018{
2019 const RecordCacheEntry *entry = data;
2020
2021 return hashRowType(entry->tupdesc);
2022}
2023
2024/*
2025 * Match function for the hash table of RecordCacheEntry.
2026 */
2027static int
2028record_type_typmod_compare(const void *a, const void *b, size_t size)
2029{
2030 const RecordCacheEntry *left = a;
2031 const RecordCacheEntry *right = b;
2032
2033 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2034}
2035
2036/*
2037 * assign_record_type_typmod
2038 *
2039 * Given a tuple descriptor for a RECORD type, find or create a cache entry
2040 * for the type, and set the tupdesc's tdtypmod field to a value that will
2041 * identify this cache entry to lookup_rowtype_tupdesc.
2042 */
2043void
2045{
2048 bool found;
2050
2051 Assert(tupDesc->tdtypeid == RECORDOID);
2052
2053 if (RecordCacheHash == NULL)
2054 {
2055 /* First time through: initialize the hash table */
2056 HASHCTL ctl;
2057
2058 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2059 ctl.entrysize = sizeof(RecordCacheEntry);
2062 RecordCacheHash = hash_create("Record information cache", 64,
2063 &ctl,
2065
2066 /* Also make sure CacheMemoryContext exists */
2067 if (!CacheMemoryContext)
2069 }
2070
2071 /*
2072 * Find a hashtable entry for this tuple descriptor. We don't use
2073 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2074 * the allocations succeed before we create the new entry.
2075 */
2077 &tupDesc,
2078 HASH_FIND, &found);
2079 if (found && recentry->tupdesc != NULL)
2080 {
2081 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2082 return;
2083 }
2084
2085 /* Not present, so need to manufacture an entry */
2087
2088 /* Look in the SharedRecordTypmodRegistry, if attached */
2090 if (entDesc == NULL)
2091 {
2092 /*
2093 * Make sure we have room before we CreateTupleDescCopy() or advance
2094 * NextRecordTypmod.
2095 */
2097
2098 /* Reference-counted local cache only. */
2099 entDesc = CreateTupleDescCopy(tupDesc);
2100 entDesc->tdrefcount = 1;
2101 entDesc->tdtypmod = NextRecordTypmod++;
2102 }
2103 else
2104 {
2106 }
2107
2109
2110 /* Assign a unique tupdesc identifier, too. */
2112
2113 /* Fully initialized; create the hash table entry */
2115 &tupDesc,
2116 HASH_ENTER, NULL);
2117 recentry->tupdesc = entDesc;
2118
2119 /* Update the caller's tuple descriptor. */
2120 tupDesc->tdtypmod = entDesc->tdtypmod;
2121
2123}
2124
2125/*
2126 * assign_record_type_identifier
2127 *
2128 * Get an identifier, which will be unique over the lifespan of this backend
2129 * process, for the current tuple descriptor of the specified composite type.
2130 * For named composite types, the value is guaranteed to change if the type's
2131 * definition does. For registered RECORD types, the value will not change
2132 * once assigned, since the registered type won't either. If an anonymous
2133 * RECORD type is specified, we return a new identifier on each call.
2134 */
2135uint64
2137{
2138 if (type_id != RECORDOID)
2139 {
2140 /*
2141 * It's a named composite type, so use the regular typcache.
2142 */
2143 TypeCacheEntry *typentry;
2144
2145 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2146 if (typentry->tupDesc == NULL)
2147 ereport(ERROR,
2149 errmsg("type %s is not composite",
2150 format_type_be(type_id))));
2151 Assert(typentry->tupDesc_identifier != 0);
2152 return typentry->tupDesc_identifier;
2153 }
2154 else
2155 {
2156 /*
2157 * It's a transient record type, so look in our record-type table.
2158 */
2159 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2160 RecordCacheArray[typmod].tupdesc != NULL)
2161 {
2162 Assert(RecordCacheArray[typmod].id != 0);
2163 return RecordCacheArray[typmod].id;
2164 }
2165
2166 /* For anonymous or unrecognized record type, generate a new ID */
2167 return ++tupledesc_id_counter;
2168 }
2169}
2170
2171/*
2172 * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2173 * This exists only to avoid exposing private innards of
2174 * SharedRecordTypmodRegistry in a header.
2175 */
2176size_t
2181
2182/*
2183 * Initialize 'registry' in a pre-existing shared memory region, which must be
2184 * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2185 * bytes.
2186 *
2187 * 'area' will be used to allocate shared memory space as required for the
2188 * typemod registration. The current process, expected to be a leader process
2189 * in a parallel query, will be attached automatically and its current record
2190 * types will be loaded into *registry. While attached, all calls to
2191 * assign_record_type_typmod will use the shared registry. Worker backends
2192 * will need to attach explicitly.
2193 *
2194 * Note that this function takes 'area' and 'segment' as arguments rather than
2195 * accessing them via CurrentSession, because they aren't installed there
2196 * until after this function runs.
2197 */
2198void
2200 dsm_segment *segment,
2201 dsa_area *area)
2202{
2206 int32 typmod;
2207
2209
2210 /* We can't already be attached to a shared registry. */
2214
2216
2217 /* Create the hash table of tuple descriptors indexed by themselves. */
2219
2220 /* Create the hash table of tuple descriptors indexed by typmod. */
2222
2224
2225 /* Initialize the SharedRecordTypmodRegistry. */
2226 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2227 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2229
2230 /*
2231 * Copy all entries from this backend's private registry into the shared
2232 * registry.
2233 */
2234 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2235 {
2240 TupleDesc tupdesc;
2241 bool found;
2242
2243 tupdesc = RecordCacheArray[typmod].tupdesc;
2244 if (tupdesc == NULL)
2245 continue;
2246
2247 /* Copy the TupleDesc into shared memory. */
2248 shared_dp = share_tupledesc(area, tupdesc, typmod);
2249
2250 /* Insert into the typmod table. */
2252 &tupdesc->tdtypmod,
2253 &found);
2254 if (found)
2255 elog(ERROR, "cannot create duplicate shared record typmod");
2256 typmod_table_entry->typmod = tupdesc->tdtypmod;
2257 typmod_table_entry->shared_tupdesc = shared_dp;
2259
2260 /* Insert into the record table. */
2261 record_table_key.shared = false;
2262 record_table_key.u.local_tupdesc = tupdesc;
2265 &found);
2266 if (!found)
2267 {
2268 record_table_entry->key.shared = true;
2269 record_table_entry->key.u.shared_tupdesc = shared_dp;
2270 }
2272 }
2273
2274 /*
2275 * Set up the global state that will tell assign_record_type_typmod and
2276 * lookup_rowtype_tupdesc_internal about the shared registry.
2277 */
2281
2282 /*
2283 * We install a detach hook in the leader, but only to handle cleanup on
2284 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2285 * the memory, the leader process will use a shared registry until it
2286 * exits.
2287 */
2289}
2290
2291/*
2292 * Attach to 'registry', which must have been initialized already by another
2293 * backend. Future calls to assign_record_type_typmod and
2294 * lookup_rowtype_tupdesc_internal will use the shared registry until the
2295 * current session is detached.
2296 */
2297void
2299{
2303
2305
2306 /* We can't already be attached to a shared registry. */
2313
2314 /*
2315 * We can't already have typmods in our local cache, because they'd clash
2316 * with those imported by SharedRecordTypmodRegistryInit. This should be
2317 * a freshly started parallel worker. If we ever support worker
2318 * recycling, a worker would need to zap its local cache in between
2319 * servicing different queries, in order to be able to call this and
2320 * synchronize typmods with a new leader; but that's problematic because
2321 * we can't be very sure that record-typmod-related state hasn't escaped
2322 * to anywhere else in the process.
2323 */
2325
2327
2328 /* Attach to the two hash tables. */
2331 registry->record_table_handle,
2335 registry->typmod_table_handle,
2336 NULL);
2337
2339
2340 /*
2341 * Set up detach hook to run at worker exit. Currently this is the same
2342 * as the leader's detach hook, but in future they might need to be
2343 * different.
2344 */
2348
2349 /*
2350 * Set up the session state that will tell assign_record_type_typmod and
2351 * lookup_rowtype_tupdesc_internal about the shared registry.
2352 */
2356}
2357
2358/*
2359 * InvalidateCompositeTypeCacheEntry
2360 * Invalidate particular TypeCacheEntry on Relcache inval callback
2361 *
2362 * Delete the cached tuple descriptor (if any) for the given composite
2363 * type, and reset whatever info we have cached about the composite type's
2364 * comparability.
2365 */
2366static void
2368{
2370
2371 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2372 OidIsValid(typentry->typrelid));
2373
2374 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2375 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2376
2377 /* Delete tupdesc if we have it */
2378 if (typentry->tupDesc != NULL)
2379 {
2380 /*
2381 * Release our refcount and free the tupdesc if none remain. We can't
2382 * use DecrTupleDescRefCount here because this reference is not logged
2383 * by the current resource owner.
2384 */
2385 Assert(typentry->tupDesc->tdrefcount > 0);
2386 if (--typentry->tupDesc->tdrefcount == 0)
2387 FreeTupleDesc(typentry->tupDesc);
2388 typentry->tupDesc = NULL;
2389
2390 /*
2391 * Also clear tupDesc_identifier, so that anyone watching it will
2392 * realize that the tupdesc has changed.
2393 */
2394 typentry->tupDesc_identifier = 0;
2395 }
2396
2397 /* Reset equality/comparison/hashing validity information */
2398 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2399
2400 /*
2401 * Call delete_rel_type_cache_if_needed() if we actually cleared
2402 * something.
2403 */
2406}
2407
2408/*
2409 * TypeCacheRelCallback
2410 * Relcache inval callback function
2411 *
2412 * Delete the cached tuple descriptor (if any) for the given rel's composite
2413 * type, or for all composite types if relid == InvalidOid. Also reset
2414 * whatever info we have cached about the composite type's comparability.
2415 *
2416 * This is called when a relcache invalidation event occurs for the given
2417 * relid. We can't use syscache to find a type corresponding to the given
2418 * relation because the code can be called outside of transaction. Thus, we
2419 * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2420 */
2421static void
2423{
2424 TypeCacheEntry *typentry;
2425
2426 /*
2427 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2428 * callback wouldn't be registered
2429 */
2430 if (OidIsValid(relid))
2431 {
2433
2434 /*
2435 * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2436 * corresponding typcache entry has something to clean.
2437 */
2439 &relid,
2440 HASH_FIND, NULL);
2441
2442 if (relentry != NULL)
2443 {
2445 &relentry->composite_typid,
2446 HASH_FIND, NULL);
2447
2448 if (typentry != NULL)
2449 {
2450 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2451 Assert(relid == typentry->typrelid);
2452
2454 }
2455 }
2456
2457 /*
2458 * Visit all the domain types sequentially. Typically, this shouldn't
2459 * affect performance since domain types are less tended to bloat.
2460 * Domain types are created manually, unlike composite types which are
2461 * automatically created for every temporary table.
2462 */
2463 for (typentry = firstDomainTypeEntry;
2464 typentry != NULL;
2465 typentry = typentry->nextDomain)
2466 {
2467 /*
2468 * If it's domain over composite, reset flags. (We don't bother
2469 * trying to determine whether the specific base type needs a
2470 * reset.) Note that if we haven't determined whether the base
2471 * type is composite, we don't need to reset anything.
2472 */
2474 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2475 }
2476 }
2477 else
2478 {
2479 HASH_SEQ_STATUS status;
2480
2481 /*
2482 * Relid is invalid. By convention, we need to reset all composite
2483 * types in cache. Also, we should reset flags for domain types, and
2484 * we loop over all entries in hash, so, do it in a single scan.
2485 */
2486 hash_seq_init(&status, TypeCacheHash);
2487 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2488 {
2489 if (typentry->typtype == TYPTYPE_COMPOSITE)
2490 {
2492 }
2493 else if (typentry->typtype == TYPTYPE_DOMAIN)
2494 {
2495 /*
2496 * If it's domain over composite, reset flags. (We don't
2497 * bother trying to determine whether the specific base type
2498 * needs a reset.) Note that if we haven't determined whether
2499 * the base type is composite, we don't need to reset
2500 * anything.
2501 */
2503 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2504 }
2505 }
2506 }
2507}
2508
2509/*
2510 * TypeCacheTypCallback
2511 * Syscache inval callback function
2512 *
2513 * This is called when a syscache invalidation event occurs for any
2514 * pg_type row. If we have information cached about that type, mark
2515 * it as needing to be reloaded.
2516 */
2517static void
2519{
2520 HASH_SEQ_STATUS status;
2521 TypeCacheEntry *typentry;
2522
2523 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2524
2525 /*
2526 * By convention, zero hash value is passed to the callback as a sign that
2527 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2528 * InvalidateSystemCachesExtended().
2529 */
2530 if (hashvalue == 0)
2531 hash_seq_init(&status, TypeCacheHash);
2532 else
2533 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2534
2535 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2536 {
2537 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2538
2539 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2540
2541 /*
2542 * Mark the data obtained directly from pg_type as invalid. Also, if
2543 * it's a domain, typnotnull might've changed, so we'll need to
2544 * recalculate its constraints.
2545 */
2546 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2548
2549 /*
2550 * Call delete_rel_type_cache_if_needed() if we cleaned
2551 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2552 */
2553 if (hadPgTypeData)
2555 }
2556}
2557
2558/*
2559 * TypeCacheOpcCallback
2560 * Syscache inval callback function
2561 *
2562 * This is called when a syscache invalidation event occurs for any pg_opclass
2563 * row. In principle we could probably just invalidate data dependent on the
2564 * particular opclass, but since updates on pg_opclass are rare in production
2565 * it doesn't seem worth a lot of complication: we just mark all cached data
2566 * invalid.
2567 *
2568 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2569 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2570 * is not allowed to be used to add/drop the primary operators and functions
2571 * of an opclass, only cross-type members of a family; and the latter sorts
2572 * of members are not going to get cached here.
2573 */
2574static void
2576{
2577 HASH_SEQ_STATUS status;
2578 TypeCacheEntry *typentry;
2579
2580 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2581 hash_seq_init(&status, TypeCacheHash);
2582 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2583 {
2584 bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2585
2586 /* Reset equality/comparison/hashing validity information */
2587 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2588
2589 /*
2590 * Call delete_rel_type_cache_if_needed() if we actually cleared some
2591 * of TCFLAGS_OPERATOR_FLAGS.
2592 */
2593 if (hadOpclass)
2595 }
2596}
2597
2598/*
2599 * TypeCacheConstrCallback
2600 * Syscache inval callback function
2601 *
2602 * This is called when a syscache invalidation event occurs for any
2603 * pg_constraint row. We flush information about domain constraints
2604 * when this happens.
2605 *
2606 * It's slightly annoying that we can't tell whether the inval event was for
2607 * a domain constraint record or not; there's usually more update traffic
2608 * for table constraints than domain constraints, so we'll do a lot of
2609 * useless flushes. Still, this is better than the old no-caching-at-all
2610 * approach to domain constraints.
2611 */
2612static void
2614{
2615 TypeCacheEntry *typentry;
2616
2617 /*
2618 * Because this is called very frequently, and typically very few of the
2619 * typcache entries are for domains, we don't use hash_seq_search here.
2620 * Instead we thread all the domain-type entries together so that we can
2621 * visit them cheaply.
2622 */
2623 for (typentry = firstDomainTypeEntry;
2624 typentry != NULL;
2625 typentry = typentry->nextDomain)
2626 {
2627 /* Reset domain constraint validity information */
2629 }
2630}
2631
2632
2633/*
2634 * Check if given OID is part of the subset that's sortable by comparisons
2635 */
2636static inline bool
2638{
2639 Oid offset;
2640
2641 if (arg < enumdata->bitmap_base)
2642 return false;
2643 offset = arg - enumdata->bitmap_base;
2644 if (offset > (Oid) INT_MAX)
2645 return false;
2646 return bms_is_member((int) offset, enumdata->sorted_values);
2647}
2648
2649
2650/*
2651 * compare_values_of_enum
2652 * Compare two members of an enum type.
2653 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2654 *
2655 * Note: currently, the enumData cache is refreshed only if we are asked
2656 * to compare an enum value that is not already in the cache. This is okay
2657 * because there is no support for re-ordering existing values, so comparisons
2658 * of previously cached values will return the right answer even if other
2659 * values have been added since we last loaded the cache.
2660 *
2661 * Note: the enum logic has a special-case rule about even-numbered versus
2662 * odd-numbered OIDs, but we take no account of that rule here; this
2663 * routine shouldn't even get called when that rule applies.
2664 */
2665int
2667{
2669 EnumItem *item1;
2670 EnumItem *item2;
2671
2672 /*
2673 * Equal OIDs are certainly equal --- this case was probably handled by
2674 * our caller, but we may as well check.
2675 */
2676 if (arg1 == arg2)
2677 return 0;
2678
2679 /* Load up the cache if first time through */
2680 if (tcache->enumData == NULL)
2681 load_enum_cache_data(tcache);
2682 enumdata = tcache->enumData;
2683
2684 /*
2685 * If both OIDs are known-sorted, we can just compare them directly.
2686 */
2689 {
2690 if (arg1 < arg2)
2691 return -1;
2692 else
2693 return 1;
2694 }
2695
2696 /*
2697 * Slow path: we have to identify their actual sort-order positions.
2698 */
2701
2702 if (item1 == NULL || item2 == NULL)
2703 {
2704 /*
2705 * We couldn't find one or both values. That means the enum has
2706 * changed under us, so re-initialize the cache and try again. We
2707 * don't bother retrying the known-sorted case in this path.
2708 */
2709 load_enum_cache_data(tcache);
2710 enumdata = tcache->enumData;
2711
2714
2715 /*
2716 * If we still can't find the values, complain: we must have corrupt
2717 * data.
2718 */
2719 if (item1 == NULL)
2720 elog(ERROR, "enum value %u not found in cache for enum %s",
2721 arg1, format_type_be(tcache->type_id));
2722 if (item2 == NULL)
2723 elog(ERROR, "enum value %u not found in cache for enum %s",
2724 arg2, format_type_be(tcache->type_id));
2725 }
2726
2727 if (item1->sort_order < item2->sort_order)
2728 return -1;
2729 else if (item1->sort_order > item2->sort_order)
2730 return 1;
2731 else
2732 return 0;
2733}
2734
2735/*
2736 * Load (or re-load) the enumData member of the typcache entry.
2737 */
2738static void
2740{
2746 EnumItem *items;
2747 int numitems;
2748 int maxitems;
2749 Oid bitmap_base;
2750 Bitmapset *bitmap;
2752 int bm_size,
2753 start_pos;
2754
2755 /* Check that this is actually an enum */
2756 if (tcache->typtype != TYPTYPE_ENUM)
2757 ereport(ERROR,
2759 errmsg("%s is not an enum",
2760 format_type_be(tcache->type_id))));
2761
2762 /*
2763 * Read all the information for members of the enum type. We collect the
2764 * info in working memory in the caller's context, and then transfer it to
2765 * permanent memory in CacheMemoryContext. This minimizes the risk of
2766 * leaking memory from CacheMemoryContext in the event of an error partway
2767 * through.
2768 */
2769 maxitems = 64;
2770 items = palloc_array(EnumItem, maxitems);
2771 numitems = 0;
2772
2773 /* Scan pg_enum for the members of the target enum type. */
2777 ObjectIdGetDatum(tcache->type_id));
2778
2782 true, NULL,
2783 1, &skey);
2784
2786 {
2788
2789 if (numitems >= maxitems)
2790 {
2791 maxitems *= 2;
2792 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2793 }
2794 items[numitems].enum_oid = en->oid;
2795 items[numitems].sort_order = en->enumsortorder;
2796 numitems++;
2797 }
2798
2801
2802 /* Sort the items into OID order */
2803 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2804
2805 /*
2806 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2807 * known to be in order and can thus be compared with just OID comparison.
2808 *
2809 * The point of this is that the enum's initial OIDs were certainly in
2810 * order, so there is some subset that can be compared via OID comparison;
2811 * and we'd rather not do binary searches unnecessarily.
2812 *
2813 * This is somewhat heuristic, and might identify a subset of OIDs that
2814 * isn't exactly what the type started with. That's okay as long as the
2815 * subset is correctly sorted.
2816 */
2817 bitmap_base = InvalidOid;
2818 bitmap = NULL;
2819 bm_size = 1; /* only save sets of at least 2 OIDs */
2820
2821 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2822 {
2823 /*
2824 * Identify longest sorted subsequence starting at start_pos
2825 */
2827 int this_bm_size = 1;
2828 Oid start_oid = items[start_pos].enum_oid;
2829 float4 prev_order = items[start_pos].sort_order;
2830 int i;
2831
2832 for (i = start_pos + 1; i < numitems; i++)
2833 {
2834 Oid offset;
2835
2836 offset = items[i].enum_oid - start_oid;
2837 /* quit if bitmap would be too large; cutoff is arbitrary */
2838 if (offset >= 8192)
2839 break;
2840 /* include the item if it's in-order */
2841 if (items[i].sort_order > prev_order)
2842 {
2843 prev_order = items[i].sort_order;
2844 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2845 this_bm_size++;
2846 }
2847 }
2848
2849 /* Remember it if larger than previous best */
2850 if (this_bm_size > bm_size)
2851 {
2852 bms_free(bitmap);
2853 bitmap_base = start_oid;
2854 bitmap = this_bitmap;
2856 }
2857 else
2859
2860 /*
2861 * Done if it's not possible to find a longer sequence in the rest of
2862 * the list. In typical cases this will happen on the first
2863 * iteration, which is why we create the bitmaps on the fly instead of
2864 * doing a second pass over the list.
2865 */
2866 if (bm_size >= (numitems - start_pos - 1))
2867 break;
2868 }
2869
2870 /* OK, copy the data into CacheMemoryContext */
2873 palloc(offsetof(TypeCacheEnumData, enum_values) +
2874 numitems * sizeof(EnumItem));
2875 enumdata->bitmap_base = bitmap_base;
2876 enumdata->sorted_values = bms_copy(bitmap);
2877 enumdata->num_values = numitems;
2878 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2880
2881 pfree(items);
2882 bms_free(bitmap);
2883
2884 /* And link the finished cache struct into the typcache */
2885 if (tcache->enumData != NULL)
2886 pfree(tcache->enumData);
2887 tcache->enumData = enumdata;
2888}
2889
2890/*
2891 * Locate the EnumItem with the given OID, if present
2892 */
2893static EnumItem *
2895{
2896 EnumItem srch;
2897
2898 /* On some versions of Solaris, bsearch of zero items dumps core */
2899 if (enumdata->num_values <= 0)
2900 return NULL;
2901
2902 srch.enum_oid = arg;
2903 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2904 sizeof(EnumItem), enum_oid_cmp);
2905}
2906
2907/*
2908 * qsort comparison function for OID-ordered EnumItems
2909 */
2910static int
2911enum_oid_cmp(const void *left, const void *right)
2912{
2913 const EnumItem *l = (const EnumItem *) left;
2914 const EnumItem *r = (const EnumItem *) right;
2915
2916 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2917}
2918
2919/*
2920 * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2921 * to the given value and return a dsa_pointer.
2922 */
2923static dsa_pointer
2925{
2927 TupleDesc shared;
2928
2929 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2930 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2931 TupleDescCopy(shared, tupdesc);
2932 shared->tdtypmod = typmod;
2933
2934 return shared_dp;
2935}
2936
2937/*
2938 * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2939 * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2940 * Tuple descriptors returned by this function are not reference counted, and
2941 * will exist at least as long as the current backend remained attached to the
2942 * current session.
2943 */
2944static TupleDesc
2946{
2947 TupleDesc result;
2952 bool found;
2953 uint32 typmod;
2954
2955 /* If not even attached, nothing to do. */
2957 return NULL;
2958
2959 /* Try to find a matching tuple descriptor in the record table. */
2960 key.shared = false;
2961 key.u.local_tupdesc = tupdesc;
2965 {
2966 Assert(record_table_entry->key.shared);
2969 result = (TupleDesc)
2971 record_table_entry->key.u.shared_tupdesc);
2972 Assert(result->tdrefcount == -1);
2973
2974 return result;
2975 }
2976
2977 /* Allocate a new typmod number. This will be wasted if we error out. */
2978 typmod = (int)
2980 1);
2981
2982 /* Copy the TupleDesc into shared memory. */
2983 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2984
2985 /*
2986 * Create an entry in the typmod table so that others will understand this
2987 * typmod number.
2988 */
2989 PG_TRY();
2990 {
2993 &typmod, &found);
2994 if (found)
2995 elog(ERROR, "cannot create duplicate shared record typmod");
2996 }
2997 PG_CATCH();
2998 {
3000 PG_RE_THROW();
3001 }
3002 PG_END_TRY();
3003 typmod_table_entry->typmod = typmod;
3004 typmod_table_entry->shared_tupdesc = shared_dp;
3007
3008 /*
3009 * Finally create an entry in the record table so others with matching
3010 * tuple descriptors can reuse the typmod.
3011 */
3014 &found);
3015 if (found)
3016 {
3017 /*
3018 * Someone concurrently inserted a matching tuple descriptor since the
3019 * first time we checked. Use that one instead.
3020 */
3023
3024 /* Might as well free up the space used by the one we created. */
3026 &typmod);
3027 Assert(found);
3029
3030 /* Return the one we found. */
3031 Assert(record_table_entry->key.shared);
3032 result = (TupleDesc)
3034 record_table_entry->key.u.shared_tupdesc);
3035 Assert(result->tdrefcount == -1);
3036
3037 return result;
3038 }
3039
3040 /* Store it and return it. */
3041 record_table_entry->key.shared = true;
3042 record_table_entry->key.u.shared_tupdesc = shared_dp;
3045 result = (TupleDesc)
3047 Assert(result->tdrefcount == -1);
3048
3049 return result;
3050}
3051
3052/*
3053 * On-DSM-detach hook to forget about the current shared record typmod
3054 * infrastructure. This is currently used by both leader and workers.
3055 */
3056static void
3072
3073/*
3074 * Insert RelIdToTypeIdCacheHash entry if needed.
3075 */
3076static void
3078{
3079 /* Immediately quit for non-composite types */
3080 if (typentry->typtype != TYPTYPE_COMPOSITE)
3081 return;
3082
3083 /* typrelid should be given for composite types */
3084 Assert(OidIsValid(typentry->typrelid));
3085
3086 /*
3087 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3088 * information indicating it should be here.
3089 */
3090 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3091 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3092 typentry->tupDesc != NULL)
3093 {
3095 bool found;
3096
3098 &typentry->typrelid,
3099 HASH_ENTER, &found);
3100 relentry->relid = typentry->typrelid;
3101 relentry->composite_typid = typentry->type_id;
3102 }
3103}
3104
3105/*
3106 * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3107 * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3108 * or tupDesc.
3109 */
3110static void
3112{
3113#ifdef USE_ASSERT_CHECKING
3114 int i;
3115 bool is_in_progress = false;
3116
3117 for (i = 0; i < in_progress_list_len; i++)
3118 {
3119 if (in_progress_list[i] == typentry->type_id)
3120 {
3121 is_in_progress = true;
3122 break;
3123 }
3124 }
3125#endif
3126
3127 /* Immediately quit for non-composite types */
3128 if (typentry->typtype != TYPTYPE_COMPOSITE)
3129 return;
3130
3131 /* typrelid should be given for composite types */
3132 Assert(OidIsValid(typentry->typrelid));
3133
3134 /*
3135 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3136 * information indicating entry should be still there.
3137 */
3138 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3139 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3140 typentry->tupDesc == NULL)
3141 {
3142 bool found;
3143
3145 &typentry->typrelid,
3146 HASH_REMOVE, &found);
3147 Assert(found || is_in_progress);
3148 }
3149 else
3150 {
3151#ifdef USE_ASSERT_CHECKING
3152 /*
3153 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3154 * entry if it should exist.
3155 */
3156 bool found;
3157
3158 if (!is_in_progress)
3159 {
3161 &typentry->typrelid,
3162 HASH_FIND, &found);
3163 Assert(found);
3164 }
3165#endif
3166 }
3167}
3168
3169/*
3170 * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3171 * entries, marked as in-progress by lookup_type_cache(). It may happen
3172 * in case of an error or interruption during the lookup_type_cache() call.
3173 */
3174static void
3176{
3177 int i;
3178
3179 for (i = 0; i < in_progress_list_len; i++)
3180 {
3181 TypeCacheEntry *typentry;
3182
3185 HASH_FIND, NULL);
3186 if (typentry)
3188 }
3189
3191}
3192
3193void
3198
3199void
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:219
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition atomics.h:366
Bitmapset * bms_make_singleton(int x)
Definition bitmapset.c:216
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:799
Bitmapset * bms_copy(const Bitmapset *a)
Definition bitmapset.c:122
#define TextDatumGetCString(d)
Definition builtins.h:99
#define NameStr(name)
Definition c.h:777
#define RegProcedureIsValid(p)
Definition c.h:804
#define Assert(condition)
Definition c.h:885
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:492
int32_t int32
Definition c.h:554
uint64_t uint64
Definition c.h:559
uint32_t uint32
Definition c.h:558
float float4
Definition c.h:655
#define MemSet(start, val, len)
Definition c.h:1035
#define OidIsValid(objectId)
Definition c.h:800
size_t Size
Definition c.h:631
void CreateCacheMemoryContext(void)
Definition catcache.c:715
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition dsa.c:957
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition dsa.c:841
uint64 dsa_pointer
Definition dsa.h:62
#define dsa_allocate(area, size)
Definition dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition dshash.c:505
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition dshash.c:592
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition dshash.c:560
void dshash_detach(dshash_table *hash_table)
Definition dshash.c:309
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition dshash.c:392
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition dshash.c:369
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition dshash.c:272
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition dshash.c:435
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition dshash.c:583
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition dshash.c:208
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition dshash.c:574
dsa_pointer dshash_table_handle
Definition dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition dynahash.c:1400
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1415
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1380
Datum arg
Definition elog.c:1322
int errcode(int sqlerrcode)
Definition elog.c:874
int errmsg(const char *fmt,...)
Definition elog.c:1093
#define PG_RE_THROW()
Definition elog.h:405
#define PG_TRY(...)
Definition elog.h:372
#define PG_END_TRY(...)
Definition elog.h:397
#define ERROR
Definition elog.h:39
#define PG_CATCH(...)
Definition elog.h:382
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition execExpr.c:143
@ DOM_CONSTRAINT_CHECK
Definition execnodes.h:1053
@ DOM_CONSTRAINT_NOTNULL
Definition execnodes.h:1052
#define palloc_array(type, count)
Definition fe_memutils.h:76
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition fmgr.c:138
char * format_type_be(Oid type_oid)
void systable_endscan(SysScanDesc sysscan)
Definition genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition genam.c:388
#define HASHSTANDARD_PROC
Definition hash.h:355
#define HASHEXTENDED_PROC
Definition hash.h:356
@ HASH_FIND
Definition hsearch.h:113
@ HASH_REMOVE
Definition hsearch.h:115
@ HASH_ENTER
Definition hsearch.h:114
#define HASH_ELEM
Definition hsearch.h:95
#define HASH_COMPARE
Definition hsearch.h:99
#define HASH_FUNCTION
Definition hsearch.h:98
#define HASH_BLOBS
Definition hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
#define IsParallelWorker()
Definition parallel.h:62
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition indexcmds.c:2368
long val
Definition informix.c:689
#define INJECTION_POINT(name, arg)
static int pg_cmp_u32(uint32 a, uint32 b)
Definition int.h:719
void CacheRegisterSyscacheCallback(SysCacheIdentifier cacheid, SyscacheCallbackFunction func, Datum arg)
Definition inval.c:1816
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition inval.c:1858
int b
Definition isn.c:74
int a
Definition isn.c:73
int i
Definition isn.c:77
List * lappend(List *list, void *datum)
Definition list.c:339
List * lcons(void *datum, List *list)
Definition list.c:495
#define AccessShareLock
Definition lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition lsyscache.c:1314
Oid get_opclass_family(Oid opclass)
Definition lsyscache.c:1292
Oid get_multirange_range(Oid multirangeOid)
Definition lsyscache.c:3635
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition lsyscache.c:872
RegProcedure get_opcode(Oid opno)
Definition lsyscache.c:1435
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition lsyscache.c:168
Oid get_base_element_type(Oid typid)
Definition lsyscache.c:2984
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition lsyscache.c:2690
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
char * pstrdup(const char *in)
Definition mcxt.c:1781
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition mcxt.c:582
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition mcxt.c:686
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
MemoryContext CacheMemoryContext
Definition mcxt.c:169
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition memutils.h:170
#define BTORDER_PROC
Definition nbtree.h:717
#define copyObject(obj)
Definition nodes.h:232
#define makeNode(_type_)
Definition nodes.h:161
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define repalloc0_array(pointer, type, oldcount, count)
Definition palloc.h:109
FormData_pg_attribute * Form_pg_attribute
static uint32 pg_nextpower2_32(uint32 num)
END_CATALOG_STRUCT typedef FormData_pg_constraint * Form_pg_constraint
const void * data
END_CATALOG_STRUCT typedef FormData_pg_enum * Form_pg_enum
Definition pg_enum.h:48
#define lfirst(lc)
Definition pg_list.h:172
#define NIL
Definition pg_list.h:68
END_CATALOG_STRUCT typedef FormData_pg_range * Form_pg_range
Definition pg_range.h:71
END_CATALOG_STRUCT typedef FormData_pg_type * Form_pg_type
Definition pg_type.h:265
Expr * expression_planner(Expr *expr)
Definition planner.c:6819
#define qsort(a, b, c, d)
Definition port.h:495
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:262
uint64_t Datum
Definition postgres.h:70
#define InvalidOid
unsigned int Oid
char * c
static int fb(int x)
tree ctl
Definition radixtree.h:1838
void * stringToNode(const char *str)
Definition read.c:90
#define RelationGetDescr(relation)
Definition rel.h:540
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition scankey.c:76
Session * CurrentSession
Definition session.c:48
void relation_close(Relation relation, LOCKMODE lockmode)
Definition relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition relation.c:47
#define BTGreaterStrategyNumber
Definition stratnum.h:33
#define HTEqualStrategyNumber
Definition stratnum.h:41
#define BTLessStrategyNumber
Definition stratnum.h:29
#define BTEqualStrategyNumber
Definition stratnum.h:31
MemoryContext dccContext
Definition typcache.c:142
DomainConstraintType constrainttype
Definition execnodes.h:1059
ExprState * check_exprstate
Definition execnodes.h:1062
float4 sort_order
Definition typcache.c:150
Oid enum_oid
Definition typcache.c:149
Oid fn_oid
Definition fmgr.h:59
Size keysize
Definition hsearch.h:75
Definition pg_list.h:54
TupleDesc tupdesc
Definition typcache.c:174
Form_pg_class rd_rel
Definition rel.h:111
dsm_segment * segment
Definition session.h:27
dshash_table * shared_record_table
Definition session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition session.h:31
dsa_area * area
Definition session.h:28
dshash_table * shared_typmod_table
Definition session.h:33
SharedRecordTableKey key
Definition typcache.c:213
TupleDesc local_tupdesc
Definition typcache.c:201
union SharedRecordTableKey::@33 u
dsa_pointer shared_tupdesc
Definition typcache.c:202
dshash_table_handle typmod_table_handle
Definition typcache.c:186
pg_atomic_uint32 next_typmod
Definition typcache.c:188
dshash_table_handle record_table_handle
Definition typcache.c:184
dsa_pointer shared_tupdesc
Definition typcache.c:223
int32 tdtypmod
Definition tupdesc.h:139
uint32 type_id_hash
Definition typcache.h:36
uint64 tupDesc_identifier
Definition typcache.h:91
FmgrInfo hash_proc_finfo
Definition typcache.h:78
int32 domainBaseTypmod
Definition typcache.h:116
Oid hash_extended_proc
Definition typcache.h:67
FmgrInfo rng_cmp_proc_finfo
Definition typcache.h:102
FmgrInfo cmp_proc_finfo
Definition typcache.h:77
struct TypeCacheEntry * rngelemtype
Definition typcache.h:99
TupleDesc tupDesc
Definition typcache.h:90
FmgrInfo hash_extended_proc_finfo
Definition typcache.h:79
DomainConstraintCache * domainData
Definition typcache.h:122
struct TypeCacheEntry * rngtype
Definition typcache.h:109
FmgrInfo rng_subdiff_finfo
Definition typcache.h:104
FmgrInfo eq_opr_finfo
Definition typcache.h:76
Oid btree_opintype
Definition typcache.h:59
struct TypeCacheEnumData * enumData
Definition typcache.h:131
struct TypeCacheEntry * nextDomain
Definition typcache.h:134
FmgrInfo rng_canonical_finfo
Definition typcache.h:103
Oid hash_opintype
Definition typcache.h:61
char typstorage
Definition typcache.h:42
Bitmapset * sorted_values
Definition typcache.c:156
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition typcache.c:158
void ReleaseSysCache(HeapTuple tuple)
Definition syscache.c:264
HeapTuple SearchSysCache1(SysCacheIdentifier cacheId, Datum key1)
Definition syscache.c:220
#define GetSysCacheHashValue1(cacheId, key1)
Definition syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
static ItemArray items
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition tupdesc.c:323
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition tupdesc.c:411
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:560
void FreeTupleDesc(TupleDesc tupdesc)
Definition tupdesc.c:485
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:542
uint32 hashRowType(TupleDesc desc)
Definition tupdesc.c:796
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition tupdesc.c:235
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition tupdesc.c:760
#define TupleDescSize(src)
Definition tupdesc.h:198
#define PinTupleDesc(tupdesc)
Definition tupdesc.h:213
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:160
struct TupleDescData * TupleDesc
Definition tupdesc.h:145
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition typcache.c:100
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition typcache.c:101
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1717
static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3077
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition typcache.c:1404
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1830
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition typcache.c:1924
static void TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2575
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition typcache.c:2298
#define TCFLAGS_OPERATOR_FLAGS
Definition typcache.c:122
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition typcache.c:113
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1733
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition typcache.c:115
void AtEOXact_TypeCache(void)
Definition typcache.c:3194
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition typcache.c:119
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition typcache.c:2739
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1596
static HTAB * RelIdToTypeIdCacheHash
Definition typcache.c:87
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2894
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1604
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition typcache.c:2945
static int in_progress_list_maxlen
Definition typcache.c:228
static int32 NextRecordTypmod
Definition typcache.c:306
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1980
static Oid * in_progress_list
Definition typcache.c:226
static const dshash_parameters srtr_typmod_table_params
Definition typcache.c:285
static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3111
#define TCFLAGS_CHECKED_GT_OPR
Definition typcache.c:104
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1757
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition typcache.c:1366
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1941
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition typcache.c:1580
#define TCFLAGS_CHECKED_LT_OPR
Definition typcache.c:103
#define TCFLAGS_CHECKED_HASH_PROC
Definition typcache.c:106
static void dccref_deletion_callback(void *arg)
Definition typcache.c:1345
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition typcache.c:114
static void InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
Definition typcache.c:2367
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition typcache.c:2199
static int dcs_cmp(const void *a, const void *b)
Definition typcache.c:1321
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1542
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition typcache.c:234
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1534
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1064
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition typcache.c:362
#define TCFLAGS_CHECKED_CMP_PROC
Definition typcache.c:105
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition typcache.c:112
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1765
static int in_progress_list_len
Definition typcache.c:227
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition typcache.c:1518
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition typcache.c:2924
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1006
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition typcache.c:2136
static RecordCacheArrayEntry * RecordCacheArray
Definition typcache.c:304
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1725
static HTAB * RecordCacheHash
Definition typcache.c:295
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2637
static TypeCacheEntry * firstDomainTypeEntry
Definition typcache.c:96
void AtEOSubXact_TypeCache(void)
Definition typcache.c:3200
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition typcache.c:3057
#define TCFLAGS_HAVE_ELEM_HASHING
Definition typcache.c:111
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition typcache.c:107
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition typcache.c:1086
bool DomainHasConstraints(Oid type_id)
Definition typcache.c:1491
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition typcache.c:110
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition typcache.c:2422
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1550
static void TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2518
size_t SharedRecordTypmodRegistryEstimate(void)
Definition typcache.c:2177
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1773
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition typcache.c:108
static void TypeCacheConstrCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2613
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition typcache.c:109
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition typcache.c:1526
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition typcache.c:99
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition typcache.c:260
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition typcache.c:2666
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition typcache.c:118
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition typcache.c:117
static int32 RecordCacheArrayLen
Definition typcache.c:305
void assign_record_type_typmod(TupleDesc tupDesc)
Definition typcache.c:2044
static HTAB * TypeCacheHash
Definition typcache.c:79
static uint64 tupledesc_id_counter
Definition typcache.c:313
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition typcache.c:1588
#define TCFLAGS_HAVE_FIELD_HASHING
Definition typcache.c:116
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition typcache.c:2028
static const dshash_parameters srtr_record_table_params
Definition typcache.c:275
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition typcache.c:1958
static int enum_oid_cmp(const void *left, const void *right)
Definition typcache.c:2911
static void finalize_in_progress_typentries(void)
Definition typcache.c:3175
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition typcache.c:1334
#define TCFLAGS_CHECKED_EQ_OPR
Definition typcache.c:102
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition typcache.c:1442
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition typcache.c:389
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition typcache.c:1801
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition typcache.c:1612
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition typcache.c:2017
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition typcache.c:972
#define INVALID_TUPLEDESC_IDENTIFIER
Definition typcache.h:157
#define TYPECACHE_HASH_PROC_FINFO
Definition typcache.h:145
#define TYPECACHE_EQ_OPR
Definition typcache.h:138
#define TYPECACHE_HASH_OPFAMILY
Definition typcache.h:148
#define TYPECACHE_TUPDESC
Definition typcache.h:146
#define TYPECACHE_MULTIRANGE_INFO
Definition typcache.h:154
#define TYPECACHE_EQ_OPR_FINFO
Definition typcache.h:143
#define TYPECACHE_HASH_EXTENDED_PROC
Definition typcache.h:152
#define TYPECACHE_BTREE_OPFAMILY
Definition typcache.h:147
#define TYPECACHE_DOMAIN_BASE_INFO
Definition typcache.h:150
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition typcache.h:151
#define TYPECACHE_RANGE_INFO
Definition typcache.h:149
#define TYPECACHE_GT_OPR
Definition typcache.h:140
#define TYPECACHE_CMP_PROC
Definition typcache.h:141
#define TYPECACHE_LT_OPR
Definition typcache.h:139
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition typcache.h:153
#define TYPECACHE_CMP_PROC_FINFO
Definition typcache.h:144
#define TYPECACHE_HASH_PROC
Definition typcache.h:142