PostgreSQL Source Code git master
Loading...
Searching...
No Matches
typcache.c File Reference
#include "postgres.h"
#include <limits.h>
#include "access/hash.h"
#include "access/htup_details.h"
#include "access/nbtree.h"
#include "access/parallel.h"
#include "access/relation.h"
#include "access/session.h"
#include "access/table.h"
#include "catalog/pg_am.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_enum.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_range.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "common/int.h"
#include "executor/executor.h"
#include "lib/dshash.h"
#include "optimizer/optimizer.h"
#include "port/pg_bitutils.h"
#include "storage/lwlock.h"
#include "utils/builtins.h"
#include "utils/catcache.h"
#include "utils/fmgroids.h"
#include "utils/injection_point.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/syscache.h"
#include "utils/typcache.h"
Include dependency graph for typcache.c:

Go to the source code of this file.

Data Structures

struct  RelIdToTypeIdCacheEntry
 
struct  DomainConstraintCache
 
struct  EnumItem
 
struct  TypeCacheEnumData
 
struct  RecordCacheEntry
 
struct  SharedRecordTypmodRegistry
 
struct  SharedRecordTableKey
 
struct  SharedRecordTableEntry
 
struct  SharedTypmodTableEntry
 
struct  RecordCacheArrayEntry
 

Macros

#define TCFLAGS_HAVE_PG_TYPE_DATA   0x000001
 
#define TCFLAGS_CHECKED_BTREE_OPCLASS   0x000002
 
#define TCFLAGS_CHECKED_HASH_OPCLASS   0x000004
 
#define TCFLAGS_CHECKED_EQ_OPR   0x000008
 
#define TCFLAGS_CHECKED_LT_OPR   0x000010
 
#define TCFLAGS_CHECKED_GT_OPR   0x000020
 
#define TCFLAGS_CHECKED_CMP_PROC   0x000040
 
#define TCFLAGS_CHECKED_HASH_PROC   0x000080
 
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC   0x000100
 
#define TCFLAGS_CHECKED_ELEM_PROPERTIES   0x000200
 
#define TCFLAGS_HAVE_ELEM_EQUALITY   0x000400
 
#define TCFLAGS_HAVE_ELEM_COMPARE   0x000800
 
#define TCFLAGS_HAVE_ELEM_HASHING   0x001000
 
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING   0x002000
 
#define TCFLAGS_CHECKED_FIELD_PROPERTIES   0x004000
 
#define TCFLAGS_HAVE_FIELD_EQUALITY   0x008000
 
#define TCFLAGS_HAVE_FIELD_COMPARE   0x010000
 
#define TCFLAGS_HAVE_FIELD_HASHING   0x020000
 
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING   0x040000
 
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS   0x080000
 
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE   0x100000
 
#define TCFLAGS_OPERATOR_FLAGS
 

Typedefs

typedef struct RelIdToTypeIdCacheEntry RelIdToTypeIdCacheEntry
 
typedef struct TypeCacheEnumData TypeCacheEnumData
 
typedef struct RecordCacheEntry RecordCacheEntry
 
typedef struct SharedRecordTableKey SharedRecordTableKey
 
typedef struct SharedRecordTableEntry SharedRecordTableEntry
 
typedef struct SharedTypmodTableEntry SharedTypmodTableEntry
 
typedef struct RecordCacheArrayEntry RecordCacheArrayEntry
 

Functions

static int shared_record_table_compare (const void *a, const void *b, size_t size, void *arg)
 
static uint32 shared_record_table_hash (const void *a, size_t size, void *arg)
 
static void load_typcache_tupdesc (TypeCacheEntry *typentry)
 
static void load_rangetype_info (TypeCacheEntry *typentry)
 
static void load_multirangetype_info (TypeCacheEntry *typentry)
 
static void load_domaintype_info (TypeCacheEntry *typentry)
 
static int dcs_cmp (const void *a, const void *b)
 
static void decr_dcc_refcount (DomainConstraintCache *dcc)
 
static void dccref_deletion_callback (void *arg)
 
static Listprep_domain_constraints (List *constraints, MemoryContext execctx)
 
static bool array_element_has_equality (TypeCacheEntry *typentry)
 
static bool array_element_has_compare (TypeCacheEntry *typentry)
 
static bool array_element_has_hashing (TypeCacheEntry *typentry)
 
static bool array_element_has_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_array_element_properties (TypeCacheEntry *typentry)
 
static bool record_fields_have_equality (TypeCacheEntry *typentry)
 
static bool record_fields_have_compare (TypeCacheEntry *typentry)
 
static bool record_fields_have_hashing (TypeCacheEntry *typentry)
 
static bool record_fields_have_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_record_field_properties (TypeCacheEntry *typentry)
 
static bool range_element_has_hashing (TypeCacheEntry *typentry)
 
static bool range_element_has_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_range_element_properties (TypeCacheEntry *typentry)
 
static bool multirange_element_has_hashing (TypeCacheEntry *typentry)
 
static bool multirange_element_has_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_multirange_element_properties (TypeCacheEntry *typentry)
 
static void TypeCacheRelCallback (Datum arg, Oid relid)
 
static void TypeCacheTypCallback (Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
 
static void TypeCacheOpcCallback (Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
 
static void TypeCacheConstrCallback (Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
 
static void load_enum_cache_data (TypeCacheEntry *tcache)
 
static EnumItemfind_enumitem (TypeCacheEnumData *enumdata, Oid arg)
 
static int enum_oid_cmp (const void *left, const void *right)
 
static void shared_record_typmod_registry_detach (dsm_segment *segment, Datum datum)
 
static TupleDesc find_or_make_matching_shared_tupledesc (TupleDesc tupdesc)
 
static dsa_pointer share_tupledesc (dsa_area *area, TupleDesc tupdesc, uint32 typmod)
 
static void insert_rel_type_cache_if_needed (TypeCacheEntry *typentry)
 
static void delete_rel_type_cache_if_needed (TypeCacheEntry *typentry)
 
static uint32 type_cache_syshash (const void *key, Size keysize)
 
TypeCacheEntrylookup_type_cache (Oid type_id, int flags)
 
void InitDomainConstraintRef (Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
 
void UpdateDomainConstraintRef (DomainConstraintRef *ref)
 
bool DomainHasConstraints (Oid type_id)
 
static void ensure_record_cache_typmod_slot_exists (int32 typmod)
 
static TupleDesc lookup_rowtype_tupdesc_internal (Oid type_id, int32 typmod, bool noError)
 
TupleDesc lookup_rowtype_tupdesc (Oid type_id, int32 typmod)
 
TupleDesc lookup_rowtype_tupdesc_noerror (Oid type_id, int32 typmod, bool noError)
 
TupleDesc lookup_rowtype_tupdesc_copy (Oid type_id, int32 typmod)
 
TupleDesc lookup_rowtype_tupdesc_domain (Oid type_id, int32 typmod, bool noError)
 
static uint32 record_type_typmod_hash (const void *data, size_t size)
 
static int record_type_typmod_compare (const void *a, const void *b, size_t size)
 
void assign_record_type_typmod (TupleDesc tupDesc)
 
uint64 assign_record_type_identifier (Oid type_id, int32 typmod)
 
size_t SharedRecordTypmodRegistryEstimate (void)
 
void SharedRecordTypmodRegistryInit (SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
 
void SharedRecordTypmodRegistryAttach (SharedRecordTypmodRegistry *registry)
 
static void InvalidateCompositeTypeCacheEntry (TypeCacheEntry *typentry)
 
static bool enum_known_sorted (TypeCacheEnumData *enumdata, Oid arg)
 
int compare_values_of_enum (TypeCacheEntry *tcache, Oid arg1, Oid arg2)
 
static void finalize_in_progress_typentries (void)
 
void AtEOXact_TypeCache (void)
 
void AtEOSubXact_TypeCache (void)
 

Variables

static HTABTypeCacheHash = NULL
 
static HTABRelIdToTypeIdCacheHash = NULL
 
static TypeCacheEntryfirstDomainTypeEntry = NULL
 
static Oidin_progress_list
 
static int in_progress_list_len
 
static int in_progress_list_maxlen
 
static const dshash_parameters srtr_record_table_params
 
static const dshash_parameters srtr_typmod_table_params
 
static HTABRecordCacheHash = NULL
 
static RecordCacheArrayEntryRecordCacheArray = NULL
 
static int32 RecordCacheArrayLen = 0
 
static int32 NextRecordTypmod = 0
 
static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER
 

Macro Definition Documentation

◆ TCFLAGS_CHECKED_BTREE_OPCLASS

#define TCFLAGS_CHECKED_BTREE_OPCLASS   0x000002

Definition at line 100 of file typcache.c.

◆ TCFLAGS_CHECKED_CMP_PROC

#define TCFLAGS_CHECKED_CMP_PROC   0x000040

Definition at line 105 of file typcache.c.

◆ TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS

#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS   0x080000

Definition at line 118 of file typcache.c.

◆ TCFLAGS_CHECKED_ELEM_PROPERTIES

#define TCFLAGS_CHECKED_ELEM_PROPERTIES   0x000200

Definition at line 108 of file typcache.c.

◆ TCFLAGS_CHECKED_EQ_OPR

#define TCFLAGS_CHECKED_EQ_OPR   0x000008

Definition at line 102 of file typcache.c.

◆ TCFLAGS_CHECKED_FIELD_PROPERTIES

#define TCFLAGS_CHECKED_FIELD_PROPERTIES   0x004000

Definition at line 113 of file typcache.c.

◆ TCFLAGS_CHECKED_GT_OPR

#define TCFLAGS_CHECKED_GT_OPR   0x000020

Definition at line 104 of file typcache.c.

◆ TCFLAGS_CHECKED_HASH_EXTENDED_PROC

#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC   0x000100

Definition at line 107 of file typcache.c.

◆ TCFLAGS_CHECKED_HASH_OPCLASS

#define TCFLAGS_CHECKED_HASH_OPCLASS   0x000004

Definition at line 101 of file typcache.c.

◆ TCFLAGS_CHECKED_HASH_PROC

#define TCFLAGS_CHECKED_HASH_PROC   0x000080

Definition at line 106 of file typcache.c.

◆ TCFLAGS_CHECKED_LT_OPR

#define TCFLAGS_CHECKED_LT_OPR   0x000010

Definition at line 103 of file typcache.c.

◆ TCFLAGS_DOMAIN_BASE_IS_COMPOSITE

#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE   0x100000

Definition at line 119 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_COMPARE

#define TCFLAGS_HAVE_ELEM_COMPARE   0x000800

Definition at line 110 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_EQUALITY

#define TCFLAGS_HAVE_ELEM_EQUALITY   0x000400

Definition at line 109 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_EXTENDED_HASHING

#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING   0x002000

Definition at line 112 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_HASHING

#define TCFLAGS_HAVE_ELEM_HASHING   0x001000

Definition at line 111 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_COMPARE

#define TCFLAGS_HAVE_FIELD_COMPARE   0x010000

Definition at line 115 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_EQUALITY

#define TCFLAGS_HAVE_FIELD_EQUALITY   0x008000

Definition at line 114 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_EXTENDED_HASHING

#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING   0x040000

Definition at line 117 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_HASHING

#define TCFLAGS_HAVE_FIELD_HASHING   0x020000

Definition at line 116 of file typcache.c.

◆ TCFLAGS_HAVE_PG_TYPE_DATA

#define TCFLAGS_HAVE_PG_TYPE_DATA   0x000001

Definition at line 99 of file typcache.c.

◆ TCFLAGS_OPERATOR_FLAGS

#define TCFLAGS_OPERATOR_FLAGS
Value:
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition typcache.c:119
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition typcache.c:99
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition typcache.c:118

Definition at line 122 of file typcache.c.

139{
140 List *constraints; /* list of DomainConstraintState nodes */
141 MemoryContext dccContext; /* memory context holding all associated data */
142 long dccRefCount; /* number of references to this struct */
143};
144
145/* Private information to support comparisons of enum values */
146typedef struct
147{
148 Oid enum_oid; /* OID of one enum value */
149 float4 sort_order; /* its sort position */
150} EnumItem;
151
152typedef struct TypeCacheEnumData
153{
154 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
155 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
156 int num_values; /* total number of values in enum */
159
160/*
161 * We use a separate table for storing the definitions of non-anonymous
162 * record types. Once defined, a record type will be remembered for the
163 * life of the backend. Subsequent uses of the "same" record type (where
164 * sameness means equalRowTypes) will refer to the existing table entry.
165 *
166 * Stored record types are remembered in a linear array of TupleDescs,
167 * which can be indexed quickly with the assigned typmod. There is also
168 * a hash table to speed searches for matching TupleDescs.
169 */
170
171typedef struct RecordCacheEntry
172{
175
176/*
177 * To deal with non-anonymous record types that are exchanged by backends
178 * involved in a parallel query, we also need a shared version of the above.
179 */
181{
182 /* A hash table for finding a matching TupleDesc. */
184 /* A hash table for finding a TupleDesc by typmod. */
186 /* A source of new record typmod numbers. */
188};
189
190/*
191 * When using shared tuple descriptors as hash table keys we need a way to be
192 * able to search for an equal shared TupleDesc using a backend-local
193 * TupleDesc. So we use this type which can hold either, and hash and compare
194 * functions that know how to handle both.
195 */
196typedef struct SharedRecordTableKey
197{
198 union
199 {
202 } u;
203 bool shared;
205
206/*
207 * The shared version of RecordCacheEntry. This lets us look up a typmod
208 * using a TupleDesc which may be in local or shared memory.
209 */
210typedef struct SharedRecordTableEntry
211{
214
215/*
216 * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
217 * up a TupleDesc in shared memory using a typmod.
218 */
219typedef struct SharedTypmodTableEntry
220{
224
225static Oid *in_progress_list;
226static int in_progress_list_len;
227static int in_progress_list_maxlen;
228
229/*
230 * A comparator function for SharedRecordTableKey.
231 */
232static int
233shared_record_table_compare(const void *a, const void *b, size_t size,
234 void *arg)
235{
236 dsa_area *area = (dsa_area *) arg;
237 const SharedRecordTableKey *k1 = a;
238 const SharedRecordTableKey *k2 = b;
241
242 if (k1->shared)
243 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
244 else
245 t1 = k1->u.local_tupdesc;
246
247 if (k2->shared)
248 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
249 else
250 t2 = k2->u.local_tupdesc;
251
252 return equalRowTypes(t1, t2) ? 0 : 1;
253}
254
255/*
256 * A hash function for SharedRecordTableKey.
257 */
258static uint32
259shared_record_table_hash(const void *a, size_t size, void *arg)
260{
261 dsa_area *area = arg;
262 const SharedRecordTableKey *k = a;
263 TupleDesc t;
264
265 if (k->shared)
267 else
268 t = k->u.local_tupdesc;
269
270 return hashRowType(t);
271}
272
273/* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
275 sizeof(SharedRecordTableKey), /* unused */
281};
282
283/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
285 sizeof(uint32),
291};
292
293/* hashtable for recognizing registered record types */
294static HTAB *RecordCacheHash = NULL;
295
296typedef struct RecordCacheArrayEntry
297{
298 uint64 id;
301
302/* array of info about registered record types, indexed by assigned typmod */
304static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
305static int32 NextRecordTypmod = 0; /* number of entries used */
306
307/*
308 * Process-wide counter for generating unique tupledesc identifiers.
309 * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
310 * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
311 */
313
314static void load_typcache_tupdesc(TypeCacheEntry *typentry);
315static void load_rangetype_info(TypeCacheEntry *typentry);
316static void load_multirangetype_info(TypeCacheEntry *typentry);
317static void load_domaintype_info(TypeCacheEntry *typentry);
318static int dcs_cmp(const void *a, const void *b);
320static void dccref_deletion_callback(void *arg);
322static bool array_element_has_equality(TypeCacheEntry *typentry);
323static bool array_element_has_compare(TypeCacheEntry *typentry);
324static bool array_element_has_hashing(TypeCacheEntry *typentry);
327static bool record_fields_have_equality(TypeCacheEntry *typentry);
328static bool record_fields_have_compare(TypeCacheEntry *typentry);
329static bool record_fields_have_hashing(TypeCacheEntry *typentry);
331static void cache_record_field_properties(TypeCacheEntry *typentry);
332static bool range_element_has_hashing(TypeCacheEntry *typentry);
338static void TypeCacheRelCallback(Datum arg, Oid relid);
340 uint32 hashvalue);
342 uint32 hashvalue);
344 uint32 hashvalue);
345static void load_enum_cache_data(TypeCacheEntry *tcache);
347static int enum_oid_cmp(const void *left, const void *right);
349 Datum datum);
351static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
352 uint32 typmod);
355
356
357/*
358 * Hash function compatible with one-arg system cache hash function.
359 */
360static uint32
361type_cache_syshash(const void *key, Size keysize)
362{
363 Assert(keysize == sizeof(Oid));
364 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
365}
366
367/*
368 * lookup_type_cache
369 *
370 * Fetch the type cache entry for the specified datatype, and make sure that
371 * all the fields requested by bits in 'flags' are valid.
372 *
373 * The result is never NULL --- we will ereport() if the passed type OID is
374 * invalid. Note however that we may fail to find one or more of the
375 * values requested by 'flags'; the caller needs to check whether the fields
376 * are InvalidOid or not.
377 *
378 * Note that while filling TypeCacheEntry we might process concurrent
379 * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
380 * invalidated. In this case, we typically only clear flags while values are
381 * still available for the caller. It's expected that the caller holds
382 * enough locks on type-depending objects that the values are still relevant.
383 * It's also important that the tupdesc is filled after all other
384 * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
385 * invalidated during the lookup_type_cache() call.
386 */
388lookup_type_cache(Oid type_id, int flags)
389{
390 TypeCacheEntry *typentry;
391 bool found;
393
394 if (TypeCacheHash == NULL)
395 {
396 /* First time through: initialize the hash table */
397 HASHCTL ctl;
398 int allocsize;
399
400 ctl.keysize = sizeof(Oid);
401 ctl.entrysize = sizeof(TypeCacheEntry);
402
403 /*
404 * TypeCacheEntry takes hash value from the system cache. For
405 * TypeCacheHash we use the same hash in order to speedup search by
406 * hash value. This is used by hash_seq_init_with_hash_value().
407 */
408 ctl.hash = type_cache_syshash;
409
410 TypeCacheHash = hash_create("Type information cache", 64,
412
414
415 ctl.keysize = sizeof(Oid);
416 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
417 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
419
420 /* Also set up callbacks for SI invalidations */
425
426 /* Also make sure CacheMemoryContext exists */
429
430 /*
431 * reserve enough in_progress_list slots for many cases
432 */
433 allocsize = 4;
436 allocsize * sizeof(*in_progress_list));
437 in_progress_list_maxlen = allocsize;
438 }
439
441
442 /* Register to catch invalidation messages */
444 {
445 int allocsize;
446
447 allocsize = in_progress_list_maxlen * 2;
449 allocsize * sizeof(*in_progress_list));
450 in_progress_list_maxlen = allocsize;
451 }
454
455 /* Try to look up an existing entry */
457 &type_id,
458 HASH_FIND, NULL);
459 if (typentry == NULL)
460 {
461 /*
462 * If we didn't find one, we want to make one. But first look up the
463 * pg_type row, just to make sure we don't make a cache entry for an
464 * invalid type OID. If the type OID is not valid, present a
465 * user-facing error, since some code paths such as domain_in() allow
466 * this function to be reached with a user-supplied OID.
467 */
468 HeapTuple tp;
470
472 if (!HeapTupleIsValid(tp))
475 errmsg("type with OID %u does not exist", type_id)));
477 if (!typtup->typisdefined)
480 errmsg("type \"%s\" is only a shell",
481 NameStr(typtup->typname))));
482
483 /* Now make the typcache entry */
485 &type_id,
486 HASH_ENTER, &found);
487 Assert(!found); /* it wasn't there a moment ago */
488
489 MemSet(typentry, 0, sizeof(TypeCacheEntry));
490
491 /* These fields can never change, by definition */
492 typentry->type_id = type_id;
493 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
494
495 /* Keep this part in sync with the code below */
496 typentry->typlen = typtup->typlen;
497 typentry->typbyval = typtup->typbyval;
498 typentry->typalign = typtup->typalign;
499 typentry->typstorage = typtup->typstorage;
500 typentry->typtype = typtup->typtype;
501 typentry->typrelid = typtup->typrelid;
502 typentry->typsubscript = typtup->typsubscript;
503 typentry->typelem = typtup->typelem;
504 typentry->typarray = typtup->typarray;
505 typentry->typcollation = typtup->typcollation;
506 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
507
508 /* If it's a domain, immediately thread it into the domain cache list */
509 if (typentry->typtype == TYPTYPE_DOMAIN)
510 {
512 firstDomainTypeEntry = typentry;
513 }
514
515 ReleaseSysCache(tp);
516 }
517 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
518 {
519 /*
520 * We have an entry, but its pg_type row got changed, so reload the
521 * data obtained directly from pg_type.
522 */
523 HeapTuple tp;
525
527 if (!HeapTupleIsValid(tp))
530 errmsg("type with OID %u does not exist", type_id)));
532 if (!typtup->typisdefined)
535 errmsg("type \"%s\" is only a shell",
536 NameStr(typtup->typname))));
537
538 /*
539 * Keep this part in sync with the code above. Many of these fields
540 * shouldn't ever change, particularly typtype, but copy 'em anyway.
541 */
542 typentry->typlen = typtup->typlen;
543 typentry->typbyval = typtup->typbyval;
544 typentry->typalign = typtup->typalign;
545 typentry->typstorage = typtup->typstorage;
546 typentry->typtype = typtup->typtype;
547 typentry->typrelid = typtup->typrelid;
548 typentry->typsubscript = typtup->typsubscript;
549 typentry->typelem = typtup->typelem;
550 typentry->typarray = typtup->typarray;
551 typentry->typcollation = typtup->typcollation;
552 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
553
554 ReleaseSysCache(tp);
555 }
556
557 /*
558 * Look up opclasses if we haven't already and any dependent info is
559 * requested.
560 */
566 {
567 Oid opclass;
568
569 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
570 if (OidIsValid(opclass))
571 {
572 typentry->btree_opf = get_opclass_family(opclass);
573 typentry->btree_opintype = get_opclass_input_type(opclass);
574 }
575 else
576 {
577 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
578 }
579
580 /*
581 * Reset information derived from btree opclass. Note in particular
582 * that we'll redetermine the eq_opr even if we previously found one;
583 * this matters in case a btree opclass has been added to a type that
584 * previously had only a hash opclass.
585 */
586 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
591 }
592
593 /*
594 * If we need to look up equality operator, and there's no btree opclass,
595 * force lookup of hash opclass.
596 */
597 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
598 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
599 typentry->btree_opf == InvalidOid)
601
606 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
607 {
608 Oid opclass;
609
610 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
611 if (OidIsValid(opclass))
612 {
613 typentry->hash_opf = get_opclass_family(opclass);
614 typentry->hash_opintype = get_opclass_input_type(opclass);
615 }
616 else
617 {
618 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
619 }
620
621 /*
622 * Reset information derived from hash opclass. We do *not* reset the
623 * eq_opr; if we already found one from the btree opclass, that
624 * decision is still good.
625 */
626 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
629 }
630
631 /*
632 * Look for requested operators and functions, if we haven't already.
633 */
634 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
635 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
636 {
637 Oid eq_opr = InvalidOid;
638
639 if (typentry->btree_opf != InvalidOid)
640 eq_opr = get_opfamily_member(typentry->btree_opf,
641 typentry->btree_opintype,
642 typentry->btree_opintype,
644 if (eq_opr == InvalidOid &&
645 typentry->hash_opf != InvalidOid)
646 eq_opr = get_opfamily_member(typentry->hash_opf,
647 typentry->hash_opintype,
648 typentry->hash_opintype,
650
651 /*
652 * If the proposed equality operator is array_eq or record_eq, check
653 * to see if the element type or column types support equality. If
654 * not, array_eq or record_eq would fail at runtime, so we don't want
655 * to report that the type has equality. (We can omit similar
656 * checking for ranges and multiranges because ranges can't be created
657 * in the first place unless their subtypes support equality.)
658 */
659 if (eq_opr == ARRAY_EQ_OP &&
661 eq_opr = InvalidOid;
662 else if (eq_opr == RECORD_EQ_OP &&
664 eq_opr = InvalidOid;
665
666 /* Force update of eq_opr_finfo only if we're changing state */
667 if (typentry->eq_opr != eq_opr)
668 typentry->eq_opr_finfo.fn_oid = InvalidOid;
669
670 typentry->eq_opr = eq_opr;
671
672 /*
673 * Reset info about hash functions whenever we pick up new info about
674 * equality operator. This is so we can ensure that the hash
675 * functions match the operator.
676 */
677 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
679 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
680 }
681 if ((flags & TYPECACHE_LT_OPR) &&
682 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
683 {
684 Oid lt_opr = InvalidOid;
685
686 if (typentry->btree_opf != InvalidOid)
687 lt_opr = get_opfamily_member(typentry->btree_opf,
688 typentry->btree_opintype,
689 typentry->btree_opintype,
691
692 /*
693 * As above, make sure array_cmp or record_cmp will succeed; but again
694 * we need no special check for ranges or multiranges.
695 */
696 if (lt_opr == ARRAY_LT_OP &&
697 !array_element_has_compare(typentry))
698 lt_opr = InvalidOid;
699 else if (lt_opr == RECORD_LT_OP &&
701 lt_opr = InvalidOid;
702
703 typentry->lt_opr = lt_opr;
704 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
705 }
706 if ((flags & TYPECACHE_GT_OPR) &&
707 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
708 {
709 Oid gt_opr = InvalidOid;
710
711 if (typentry->btree_opf != InvalidOid)
712 gt_opr = get_opfamily_member(typentry->btree_opf,
713 typentry->btree_opintype,
714 typentry->btree_opintype,
716
717 /*
718 * As above, make sure array_cmp or record_cmp will succeed; but again
719 * we need no special check for ranges or multiranges.
720 */
721 if (gt_opr == ARRAY_GT_OP &&
722 !array_element_has_compare(typentry))
723 gt_opr = InvalidOid;
724 else if (gt_opr == RECORD_GT_OP &&
726 gt_opr = InvalidOid;
727
728 typentry->gt_opr = gt_opr;
729 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
730 }
732 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
733 {
734 Oid cmp_proc = InvalidOid;
735
736 if (typentry->btree_opf != InvalidOid)
737 cmp_proc = get_opfamily_proc(typentry->btree_opf,
738 typentry->btree_opintype,
739 typentry->btree_opintype,
741
742 /*
743 * As above, make sure array_cmp or record_cmp will succeed; but again
744 * we need no special check for ranges or multiranges.
745 */
746 if (cmp_proc == F_BTARRAYCMP &&
747 !array_element_has_compare(typentry))
748 cmp_proc = InvalidOid;
749 else if (cmp_proc == F_BTRECORDCMP &&
751 cmp_proc = InvalidOid;
752
753 /* Force update of cmp_proc_finfo only if we're changing state */
754 if (typentry->cmp_proc != cmp_proc)
755 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
756
757 typentry->cmp_proc = cmp_proc;
758 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
759 }
761 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
762 {
763 Oid hash_proc = InvalidOid;
764
765 /*
766 * We insist that the eq_opr, if one has been determined, match the
767 * hash opclass; else report there is no hash function.
768 */
769 if (typentry->hash_opf != InvalidOid &&
770 (!OidIsValid(typentry->eq_opr) ||
771 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
772 typentry->hash_opintype,
773 typentry->hash_opintype,
775 hash_proc = get_opfamily_proc(typentry->hash_opf,
776 typentry->hash_opintype,
777 typentry->hash_opintype,
779
780 /*
781 * As above, make sure hash_array, hash_record, or hash_range will
782 * succeed.
783 */
784 if (hash_proc == F_HASH_ARRAY &&
785 !array_element_has_hashing(typentry))
786 hash_proc = InvalidOid;
787 else if (hash_proc == F_HASH_RECORD &&
789 hash_proc = InvalidOid;
790 else if (hash_proc == F_HASH_RANGE &&
791 !range_element_has_hashing(typentry))
792 hash_proc = InvalidOid;
793
794 /*
795 * Likewise for hash_multirange.
796 */
797 if (hash_proc == F_HASH_MULTIRANGE &&
799 hash_proc = InvalidOid;
800
801 /* Force update of hash_proc_finfo only if we're changing state */
802 if (typentry->hash_proc != hash_proc)
804
805 typentry->hash_proc = hash_proc;
806 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
807 }
808 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
811 {
812 Oid hash_extended_proc = InvalidOid;
813
814 /*
815 * We insist that the eq_opr, if one has been determined, match the
816 * hash opclass; else report there is no hash function.
817 */
818 if (typentry->hash_opf != InvalidOid &&
819 (!OidIsValid(typentry->eq_opr) ||
820 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
821 typentry->hash_opintype,
822 typentry->hash_opintype,
824 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
825 typentry->hash_opintype,
826 typentry->hash_opintype,
828
829 /*
830 * As above, make sure hash_array_extended, hash_record_extended, or
831 * hash_range_extended will succeed.
832 */
833 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
835 hash_extended_proc = InvalidOid;
836 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
838 hash_extended_proc = InvalidOid;
839 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
841 hash_extended_proc = InvalidOid;
842
843 /*
844 * Likewise for hash_multirange_extended.
845 */
846 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
848 hash_extended_proc = InvalidOid;
849
850 /* Force update of proc finfo only if we're changing state */
851 if (typentry->hash_extended_proc != hash_extended_proc)
853
854 typentry->hash_extended_proc = hash_extended_proc;
856 }
857
858 /*
859 * Set up fmgr lookup info as requested
860 *
861 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
862 * which is not quite right (they're really in the hash table's private
863 * memory context) but this will do for our purposes.
864 *
865 * Note: the code above avoids invalidating the finfo structs unless the
866 * referenced operator/function OID actually changes. This is to prevent
867 * unnecessary leakage of any subsidiary data attached to an finfo, since
868 * that would cause session-lifespan memory leaks.
869 */
870 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
871 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
872 typentry->eq_opr != InvalidOid)
873 {
875
876 eq_opr_func = get_opcode(typentry->eq_opr);
877 if (eq_opr_func != InvalidOid)
880 }
881 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
882 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
883 typentry->cmp_proc != InvalidOid)
884 {
885 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
887 }
888 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
889 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
890 typentry->hash_proc != InvalidOid)
891 {
892 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
894 }
897 typentry->hash_extended_proc != InvalidOid)
898 {
900 &typentry->hash_extended_proc_finfo,
902 }
903
904 /*
905 * If it's a composite type (row type), get tupdesc if requested
906 */
907 if ((flags & TYPECACHE_TUPDESC) &&
908 typentry->tupDesc == NULL &&
909 typentry->typtype == TYPTYPE_COMPOSITE)
910 {
911 load_typcache_tupdesc(typentry);
912 }
913
914 /*
915 * If requested, get information about a range type
916 *
917 * This includes making sure that the basic info about the range element
918 * type is up-to-date.
919 */
920 if ((flags & TYPECACHE_RANGE_INFO) &&
921 typentry->typtype == TYPTYPE_RANGE)
922 {
923 if (typentry->rngelemtype == NULL)
924 load_rangetype_info(typentry);
925 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
926 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
927 }
928
929 /*
930 * If requested, get information about a multirange type
931 */
932 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
933 typentry->rngtype == NULL &&
934 typentry->typtype == TYPTYPE_MULTIRANGE)
935 {
936 load_multirangetype_info(typentry);
937 }
938
939 /*
940 * If requested, get information about a domain type
941 */
942 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
943 typentry->domainBaseType == InvalidOid &&
944 typentry->typtype == TYPTYPE_DOMAIN)
945 {
946 typentry->domainBaseTypmod = -1;
947 typentry->domainBaseType =
948 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
949 }
950 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
951 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
952 typentry->typtype == TYPTYPE_DOMAIN)
953 {
954 load_domaintype_info(typentry);
955 }
956
957 INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
958
961
963
964 return typentry;
965}
966
967/*
968 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
969 */
970static void
972{
973 Relation rel;
974
975 if (!OidIsValid(typentry->typrelid)) /* should not happen */
976 elog(ERROR, "invalid typrelid for composite type %u",
977 typentry->type_id);
978 rel = relation_open(typentry->typrelid, AccessShareLock);
979 Assert(rel->rd_rel->reltype == typentry->type_id);
980
981 /*
982 * Link to the tupdesc and increment its refcount (we assert it's a
983 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
984 * because the reference mustn't be entered in the current resource owner;
985 * it can outlive the current query.
986 */
987 typentry->tupDesc = RelationGetDescr(rel);
988
989 Assert(typentry->tupDesc->tdrefcount > 0);
990 typentry->tupDesc->tdrefcount++;
991
992 /*
993 * In future, we could take some pains to not change tupDesc_identifier if
994 * the tupdesc didn't really change; but for now it's not worth it.
995 */
997
999}
1000
1001/*
1002 * load_rangetype_info --- helper routine to set up range type information
1003 */
1004static void
1006{
1008 HeapTuple tup;
1014 Oid opcintype;
1015 Oid cmpFnOid;
1016
1017 /* get information from pg_range */
1019 /* should not fail, since we already checked typtype ... */
1020 if (!HeapTupleIsValid(tup))
1021 elog(ERROR, "cache lookup failed for range type %u",
1022 typentry->type_id);
1024
1025 subtypeOid = pg_range->rngsubtype;
1026 typentry->rng_collation = pg_range->rngcollation;
1027 opclassOid = pg_range->rngsubopc;
1028 canonicalOid = pg_range->rngcanonical;
1029 subdiffOid = pg_range->rngsubdiff;
1030
1032
1033 /* get opclass properties and look up the comparison function */
1036 typentry->rng_opfamily = opfamilyOid;
1037
1038 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1039 BTORDER_PROC);
1041 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1042 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1043
1044 /* set up cached fmgrinfo structs */
1053
1054 /* Lastly, set up link to the element type --- this marks data valid */
1056}
1057
1058/*
1059 * load_multirangetype_info --- helper routine to set up multirange type
1060 * information
1061 */
1062static void
1064{
1066
1069 elog(ERROR, "cache lookup failed for multirange type %u",
1070 typentry->type_id);
1071
1073}
1074
1075/*
1076 * load_domaintype_info --- helper routine to set up domain constraint info
1077 *
1078 * Note: we assume we're called in a relatively short-lived context, so it's
1079 * okay to leak data into the current context while scanning pg_constraint.
1080 * We build the new DomainConstraintCache data in a context underneath
1081 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1082 * complete.
1083 */
1084static void
1086{
1087 Oid typeOid = typentry->type_id;
1089 bool notNull = false;
1091 int cconslen;
1094
1095 /*
1096 * If we're here, any existing constraint info is stale, so release it.
1097 * For safety, be sure to null the link before trying to delete the data.
1098 */
1099 if (typentry->domainData)
1100 {
1101 dcc = typentry->domainData;
1102 typentry->domainData = NULL;
1103 decr_dcc_refcount(dcc);
1104 }
1105
1106 /*
1107 * We try to optimize the common case of no domain constraints, so don't
1108 * create the dcc object and context until we find a constraint. Likewise
1109 * for the temp sorting array.
1110 */
1111 dcc = NULL;
1112 ccons = NULL;
1113 cconslen = 0;
1114
1115 /*
1116 * Scan pg_constraint for relevant constraints. We want to find
1117 * constraints for not just this domain, but any ancestor domains, so the
1118 * outer loop crawls up the domain stack.
1119 */
1121
1122 for (;;)
1123 {
1124 HeapTuple tup;
1127 int nccons = 0;
1128 ScanKeyData key[1];
1129 SysScanDesc scan;
1130
1132 if (!HeapTupleIsValid(tup))
1133 elog(ERROR, "cache lookup failed for type %u", typeOid);
1135
1136 if (typTup->typtype != TYPTYPE_DOMAIN)
1137 {
1138 /* Not a domain, so done */
1140 break;
1141 }
1142
1143 /* Test for NOT NULL Constraint */
1144 if (typTup->typnotnull)
1145 notNull = true;
1146
1147 /* Look for CHECK Constraints on this domain */
1148 ScanKeyInit(&key[0],
1151 ObjectIdGetDatum(typeOid));
1152
1154 NULL, 1, key);
1155
1157 {
1159 Datum val;
1160 bool isNull;
1161 char *constring;
1162 Expr *check_expr;
1164
1165 /* Ignore non-CHECK constraints */
1166 if (c->contype != CONSTRAINT_CHECK)
1167 continue;
1168
1169 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1171 conRel->rd_att, &isNull);
1172 if (isNull)
1173 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1174 NameStr(typTup->typname), NameStr(c->conname));
1175
1176 /* Create the DomainConstraintCache object and context if needed */
1177 if (dcc == NULL)
1178 {
1179 MemoryContext cxt;
1180
1182 "Domain constraints",
1184 dcc = (DomainConstraintCache *)
1186 dcc->constraints = NIL;
1187 dcc->dccContext = cxt;
1188 dcc->dccRefCount = 0;
1189 }
1190
1191 /* Convert conbin to a node tree, still in caller's context */
1193 check_expr = (Expr *) stringToNode(constring);
1194
1195 /*
1196 * Plan the expression, since ExecInitExpr will expect that.
1197 *
1198 * Note: caching the result of expression_planner() is not very
1199 * good practice. Ideally we'd use a CachedExpression here so
1200 * that we would react promptly to, eg, changes in inlined
1201 * functions. However, because we don't support mutable domain
1202 * CHECK constraints, it's not really clear that it's worth the
1203 * extra overhead to do that.
1204 */
1205 check_expr = expression_planner(check_expr);
1206
1207 /* Create only the minimally needed stuff in dccContext */
1209
1212 r->name = pstrdup(NameStr(c->conname));
1213 r->check_expr = copyObject(check_expr);
1214 r->check_exprstate = NULL;
1215
1217
1218 /* Accumulate constraints in an array, for sorting below */
1219 if (ccons == NULL)
1220 {
1221 cconslen = 8;
1224 }
1225 else if (nccons >= cconslen)
1226 {
1227 cconslen *= 2;
1230 }
1231 ccons[nccons++] = r;
1232 }
1233
1234 systable_endscan(scan);
1235
1236 if (nccons > 0)
1237 {
1238 /*
1239 * Sort the items for this domain, so that CHECKs are applied in a
1240 * deterministic order.
1241 */
1242 if (nccons > 1)
1244
1245 /*
1246 * Now attach them to the overall list. Use lcons() here because
1247 * constraints of parent domains should be applied earlier.
1248 */
1250 while (nccons > 0)
1251 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1253 }
1254
1255 /* loop to next domain in stack */
1256 typeOid = typTup->typbasetype;
1258 }
1259
1261
1262 /*
1263 * Only need to add one NOT NULL check regardless of how many domains in
1264 * the stack request it.
1265 */
1266 if (notNull)
1267 {
1269
1270 /* Create the DomainConstraintCache object and context if needed */
1271 if (dcc == NULL)
1272 {
1273 MemoryContext cxt;
1274
1276 "Domain constraints",
1278 dcc = (DomainConstraintCache *)
1280 dcc->constraints = NIL;
1281 dcc->dccContext = cxt;
1282 dcc->dccRefCount = 0;
1283 }
1284
1285 /* Create node trees in DomainConstraintCache's context */
1287
1289
1291 r->name = pstrdup("NOT NULL");
1292 r->check_expr = NULL;
1293 r->check_exprstate = NULL;
1294
1295 /* lcons to apply the nullness check FIRST */
1296 dcc->constraints = lcons(r, dcc->constraints);
1297
1299 }
1300
1301 /*
1302 * If we made a constraint object, move it into CacheMemoryContext and
1303 * attach it to the typcache entry.
1304 */
1305 if (dcc)
1306 {
1308 typentry->domainData = dcc;
1309 dcc->dccRefCount++; /* count the typcache's reference */
1310 }
1311
1312 /* Either way, the typcache entry's domain data is now valid. */
1314}
1315
1316/*
1317 * qsort comparator to sort DomainConstraintState pointers by name
1318 */
1319static int
1320dcs_cmp(const void *a, const void *b)
1321{
1322 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1323 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1324
1325 return strcmp((*ca)->name, (*cb)->name);
1326}
1327
1328/*
1329 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1330 * and free it if no references remain
1331 */
1332static void
1334{
1335 Assert(dcc->dccRefCount > 0);
1336 if (--(dcc->dccRefCount) <= 0)
1338}
1339
1340/*
1341 * Context reset/delete callback for a DomainConstraintRef
1342 */
1343static void
1345{
1347 DomainConstraintCache *dcc = ref->dcc;
1348
1349 /* Paranoia --- be sure link is nulled before trying to release */
1350 if (dcc)
1351 {
1352 ref->constraints = NIL;
1353 ref->dcc = NULL;
1354 decr_dcc_refcount(dcc);
1355 }
1356}
1357
1358/*
1359 * prep_domain_constraints --- prepare domain constraints for execution
1360 *
1361 * The expression trees stored in the DomainConstraintCache's list are
1362 * converted to executable expression state trees stored in execctx.
1363 */
1364static List *
1366{
1367 List *result = NIL;
1369 ListCell *lc;
1370
1372
1373 foreach(lc, constraints)
1374 {
1377
1379 newr->constrainttype = r->constrainttype;
1380 newr->name = r->name;
1381 newr->check_expr = r->check_expr;
1382 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1383
1384 result = lappend(result, newr);
1385 }
1386
1388
1389 return result;
1390}
1391
1392/*
1393 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1394 *
1395 * Caller must tell us the MemoryContext in which the DomainConstraintRef
1396 * lives. The ref will be cleaned up when that context is reset/deleted.
1397 *
1398 * Caller must also tell us whether it wants check_exprstate fields to be
1399 * computed in the DomainConstraintState nodes attached to this ref.
1400 * If it doesn't, we need not make a copy of the DomainConstraintState list.
1401 */
1402void
1404 MemoryContext refctx, bool need_exprstate)
1405{
1406 /* Look up the typcache entry --- we assume it survives indefinitely */
1408 ref->need_exprstate = need_exprstate;
1409 /* For safety, establish the callback before acquiring a refcount */
1410 ref->refctx = refctx;
1411 ref->dcc = NULL;
1412 ref->callback.func = dccref_deletion_callback;
1413 ref->callback.arg = ref;
1414 MemoryContextRegisterResetCallback(refctx, &ref->callback);
1415 /* Acquire refcount if there are constraints, and set up exported list */
1416 if (ref->tcache->domainData)
1417 {
1418 ref->dcc = ref->tcache->domainData;
1419 ref->dcc->dccRefCount++;
1420 if (ref->need_exprstate)
1421 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1422 ref->refctx);
1423 else
1424 ref->constraints = ref->dcc->constraints;
1425 }
1426 else
1427 ref->constraints = NIL;
1428}
1429
1430/*
1431 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1432 *
1433 * If the domain's constraint set changed, ref->constraints is updated to
1434 * point at a new list of cached constraints.
1435 *
1436 * In the normal case where nothing happened to the domain, this is cheap
1437 * enough that it's reasonable (and expected) to check before *each* use
1438 * of the constraint info.
1439 */
1440void
1442{
1443 TypeCacheEntry *typentry = ref->tcache;
1444
1445 /* Make sure typcache entry's data is up to date */
1446 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1447 typentry->typtype == TYPTYPE_DOMAIN)
1448 load_domaintype_info(typentry);
1449
1450 /* Transfer to ref object if there's new info, adjusting refcounts */
1451 if (ref->dcc != typentry->domainData)
1452 {
1453 /* Paranoia --- be sure link is nulled before trying to release */
1454 DomainConstraintCache *dcc = ref->dcc;
1455
1456 if (dcc)
1457 {
1458 /*
1459 * Note: we just leak the previous list of executable domain
1460 * constraints. Alternatively, we could keep those in a child
1461 * context of ref->refctx and free that context at this point.
1462 * However, in practice this code path will be taken so seldom
1463 * that the extra bookkeeping for a child context doesn't seem
1464 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1465 */
1466 ref->constraints = NIL;
1467 ref->dcc = NULL;
1468 decr_dcc_refcount(dcc);
1469 }
1470 dcc = typentry->domainData;
1471 if (dcc)
1472 {
1473 ref->dcc = dcc;
1474 dcc->dccRefCount++;
1475 if (ref->need_exprstate)
1476 ref->constraints = prep_domain_constraints(dcc->constraints,
1477 ref->refctx);
1478 else
1479 ref->constraints = dcc->constraints;
1480 }
1481 }
1482}
1483
1484/*
1485 * DomainHasConstraints --- utility routine to check if a domain has constraints
1486 *
1487 * This is defined to return false, not fail, if type is not a domain.
1488 */
1489bool
1491{
1492 TypeCacheEntry *typentry;
1493
1494 /*
1495 * Note: a side effect is to cause the typcache's domain data to become
1496 * valid. This is fine since we'll likely need it soon if there is any.
1497 */
1499
1500 return (typentry->domainData != NULL);
1501}
1502
1503
1504/*
1505 * array_element_has_equality and friends are helper routines to check
1506 * whether we should believe that array_eq and related functions will work
1507 * on the given array type or composite type.
1508 *
1509 * The logic above may call these repeatedly on the same type entry, so we
1510 * make use of the typentry->flags field to cache the results once known.
1511 * Also, we assume that we'll probably want all these facts about the type
1512 * if we want any, so we cache them all using only one lookup of the
1513 * component datatype(s).
1514 */
1515
1516static bool
1518{
1519 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1521 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1522}
1523
1524static bool
1526{
1527 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1529 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1530}
1531
1532static bool
1534{
1535 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1537 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1538}
1539
1540static bool
1542{
1543 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1545 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1546}
1547
1548static void
1550{
1552
1553 if (OidIsValid(elem_type))
1554 {
1556
1562 if (OidIsValid(elementry->eq_opr))
1563 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1564 if (OidIsValid(elementry->cmp_proc))
1565 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1566 if (OidIsValid(elementry->hash_proc))
1567 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1568 if (OidIsValid(elementry->hash_extended_proc))
1570 }
1572}
1573
1574/*
1575 * Likewise, some helper functions for composite types.
1576 */
1577
1578static bool
1580{
1581 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1583 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1584}
1585
1586static bool
1588{
1589 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1591 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1592}
1593
1594static bool
1596{
1597 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1599 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1600}
1601
1602static bool
1604{
1605 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1607 return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1608}
1609
1610static void
1612{
1613 /*
1614 * For type RECORD, we can't really tell what will work, since we don't
1615 * have access here to the specific anonymous type. Just assume that
1616 * equality and comparison will (we may get a failure at runtime). We
1617 * could also claim that hashing works, but then if code that has the
1618 * option between a comparison-based (sort-based) and a hash-based plan
1619 * chooses hashing, stuff could fail that would otherwise work if it chose
1620 * a comparison-based plan. In practice more types support comparison
1621 * than hashing.
1622 */
1623 if (typentry->type_id == RECORDOID)
1624 {
1625 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1627 }
1628 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1629 {
1630 TupleDesc tupdesc;
1631 int newflags;
1632 int i;
1633
1634 /* Fetch composite type's tupdesc if we don't have it already */
1635 if (typentry->tupDesc == NULL)
1636 load_typcache_tupdesc(typentry);
1637 tupdesc = typentry->tupDesc;
1638
1639 /* Must bump the refcount while we do additional catalog lookups */
1640 IncrTupleDescRefCount(tupdesc);
1641
1642 /* Have each property if all non-dropped fields have the property */
1647 for (i = 0; i < tupdesc->natts; i++)
1648 {
1650 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1651
1652 if (attr->attisdropped)
1653 continue;
1654
1655 fieldentry = lookup_type_cache(attr->atttypid,
1660 if (!OidIsValid(fieldentry->eq_opr))
1662 if (!OidIsValid(fieldentry->cmp_proc))
1664 if (!OidIsValid(fieldentry->hash_proc))
1666 if (!OidIsValid(fieldentry->hash_extended_proc))
1668
1669 /* We can drop out of the loop once we disprove all bits */
1670 if (newflags == 0)
1671 break;
1672 }
1673 typentry->flags |= newflags;
1674
1675 DecrTupleDescRefCount(tupdesc);
1676 }
1677 else if (typentry->typtype == TYPTYPE_DOMAIN)
1678 {
1679 /* If it's domain over composite, copy base type's properties */
1681
1682 /* load up basetype info if we didn't already */
1683 if (typentry->domainBaseType == InvalidOid)
1684 {
1685 typentry->domainBaseTypmod = -1;
1686 typentry->domainBaseType =
1687 getBaseTypeAndTypmod(typentry->type_id,
1688 &typentry->domainBaseTypmod);
1689 }
1695 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1696 {
1698 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1702 }
1703 }
1705}
1706
1707/*
1708 * Likewise, some helper functions for range and multirange types.
1709 *
1710 * We can borrow the flag bits for array element properties to use for range
1711 * element properties, since those flag bits otherwise have no use in a
1712 * range or multirange type's typcache entry.
1713 */
1714
1715static bool
1717{
1718 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1720 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1721}
1722
1723static bool
1725{
1726 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1728 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1729}
1730
1731static void
1733{
1734 /* load up subtype link if we didn't already */
1735 if (typentry->rngelemtype == NULL &&
1736 typentry->typtype == TYPTYPE_RANGE)
1737 load_rangetype_info(typentry);
1738
1739 if (typentry->rngelemtype != NULL)
1740 {
1742
1743 /* might need to calculate subtype's hash function properties */
1747 if (OidIsValid(elementry->hash_proc))
1748 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1749 if (OidIsValid(elementry->hash_extended_proc))
1751 }
1753}
1754
1755static bool
1757{
1758 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1760 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1761}
1762
1763static bool
1765{
1766 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1768 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1769}
1770
1771static void
1773{
1774 /* load up range link if we didn't already */
1775 if (typentry->rngtype == NULL &&
1776 typentry->typtype == TYPTYPE_MULTIRANGE)
1777 load_multirangetype_info(typentry);
1778
1779 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1780 {
1782
1783 /* might need to calculate subtype's hash function properties */
1787 if (OidIsValid(elementry->hash_proc))
1788 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1789 if (OidIsValid(elementry->hash_extended_proc))
1791 }
1793}
1794
1795/*
1796 * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1797 * to store 'typmod'.
1798 */
1799static void
1801{
1802 if (RecordCacheArray == NULL)
1803 {
1806 64 * sizeof(RecordCacheArrayEntry));
1808 }
1809
1810 if (typmod >= RecordCacheArrayLen)
1811 {
1812 int32 newlen = pg_nextpower2_32(typmod + 1);
1813
1817 newlen);
1819 }
1820}
1821
1822/*
1823 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1824 *
1825 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1826 * hasn't had its refcount bumped.
1827 */
1828static TupleDesc
1829lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1830{
1831 if (type_id != RECORDOID)
1832 {
1833 /*
1834 * It's a named composite type, so use the regular typcache.
1835 */
1836 TypeCacheEntry *typentry;
1837
1838 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1839 if (typentry->tupDesc == NULL && !noError)
1840 ereport(ERROR,
1842 errmsg("type %s is not composite",
1843 format_type_be(type_id))));
1844 return typentry->tupDesc;
1845 }
1846 else
1847 {
1848 /*
1849 * It's a transient record type, so look in our record-type table.
1850 */
1851 if (typmod >= 0)
1852 {
1853 /* It is already in our local cache? */
1854 if (typmod < RecordCacheArrayLen &&
1855 RecordCacheArray[typmod].tupdesc != NULL)
1856 return RecordCacheArray[typmod].tupdesc;
1857
1858 /* Are we attached to a shared record typmod registry? */
1860 {
1862
1863 /* Try to find it in the shared typmod index. */
1865 &typmod, false);
1866 if (entry != NULL)
1867 {
1868 TupleDesc tupdesc;
1869
1870 tupdesc = (TupleDesc)
1872 entry->shared_tupdesc);
1873 Assert(typmod == tupdesc->tdtypmod);
1874
1875 /* We may need to extend the local RecordCacheArray. */
1877
1878 /*
1879 * Our local array can now point directly to the TupleDesc
1880 * in shared memory, which is non-reference-counted.
1881 */
1882 RecordCacheArray[typmod].tupdesc = tupdesc;
1883 Assert(tupdesc->tdrefcount == -1);
1884
1885 /*
1886 * We don't share tupdesc identifiers across processes, so
1887 * assign one locally.
1888 */
1890
1892 entry);
1893
1894 return RecordCacheArray[typmod].tupdesc;
1895 }
1896 }
1897 }
1898
1899 if (!noError)
1900 ereport(ERROR,
1902 errmsg("record type has not been registered")));
1903 return NULL;
1904 }
1905}
1906
1907/*
1908 * lookup_rowtype_tupdesc
1909 *
1910 * Given a typeid/typmod that should describe a known composite type,
1911 * return the tuple descriptor for the type. Will ereport on failure.
1912 * (Use ereport because this is reachable with user-specified OIDs,
1913 * for example from record_in().)
1914 *
1915 * Note: on success, we increment the refcount of the returned TupleDesc,
1916 * and log the reference in CurrentResourceOwner. Caller must call
1917 * ReleaseTupleDesc when done using the tupdesc. (There are some
1918 * cases in which the returned tupdesc is not refcounted, in which
1919 * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1920 * the tupdesc is guaranteed to live till process exit.)
1921 */
1923lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1924{
1925 TupleDesc tupDesc;
1926
1927 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1928 PinTupleDesc(tupDesc);
1929 return tupDesc;
1930}
1931
1932/*
1933 * lookup_rowtype_tupdesc_noerror
1934 *
1935 * As above, but if the type is not a known composite type and noError
1936 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1937 * type_id is passed, you'll get an ereport anyway.)
1938 */
1940lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1941{
1942 TupleDesc tupDesc;
1943
1944 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1945 if (tupDesc != NULL)
1946 PinTupleDesc(tupDesc);
1947 return tupDesc;
1948}
1949
1950/*
1951 * lookup_rowtype_tupdesc_copy
1952 *
1953 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1954 * copied into the CurrentMemoryContext and is not reference-counted.
1955 */
1957lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1958{
1959 TupleDesc tmp;
1960
1961 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1962 return CreateTupleDescCopyConstr(tmp);
1963}
1964
1965/*
1966 * lookup_rowtype_tupdesc_domain
1967 *
1968 * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1969 * a domain over a named composite type; so this is effectively equivalent to
1970 * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1971 * except for being a tad faster.
1972 *
1973 * Note: the reason we don't fold the look-through-domain behavior into plain
1974 * lookup_rowtype_tupdesc() is that we want callers to know they might be
1975 * dealing with a domain. Otherwise they might construct a tuple that should
1976 * be of the domain type, but not apply domain constraints.
1977 */
1979lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1980{
1981 TupleDesc tupDesc;
1982
1983 if (type_id != RECORDOID)
1984 {
1985 /*
1986 * Check for domain or named composite type. We might as well load
1987 * whichever data is needed.
1988 */
1989 TypeCacheEntry *typentry;
1990
1991 typentry = lookup_type_cache(type_id,
1994 if (typentry->typtype == TYPTYPE_DOMAIN)
1996 typentry->domainBaseTypmod,
1997 noError);
1998 if (typentry->tupDesc == NULL && !noError)
1999 ereport(ERROR,
2001 errmsg("type %s is not composite",
2002 format_type_be(type_id))));
2003 tupDesc = typentry->tupDesc;
2004 }
2005 else
2006 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2007 if (tupDesc != NULL)
2008 PinTupleDesc(tupDesc);
2009 return tupDesc;
2010}
2011
2012/*
2013 * Hash function for the hash table of RecordCacheEntry.
2014 */
2015static uint32
2016record_type_typmod_hash(const void *data, size_t size)
2017{
2018 const RecordCacheEntry *entry = data;
2019
2020 return hashRowType(entry->tupdesc);
2021}
2022
2023/*
2024 * Match function for the hash table of RecordCacheEntry.
2025 */
2026static int
2027record_type_typmod_compare(const void *a, const void *b, size_t size)
2028{
2029 const RecordCacheEntry *left = a;
2030 const RecordCacheEntry *right = b;
2031
2032 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2033}
2034
2035/*
2036 * assign_record_type_typmod
2037 *
2038 * Given a tuple descriptor for a RECORD type, find or create a cache entry
2039 * for the type, and set the tupdesc's tdtypmod field to a value that will
2040 * identify this cache entry to lookup_rowtype_tupdesc.
2041 */
2042void
2044{
2047 bool found;
2049
2050 Assert(tupDesc->tdtypeid == RECORDOID);
2051
2052 if (RecordCacheHash == NULL)
2053 {
2054 /* First time through: initialize the hash table */
2055 HASHCTL ctl;
2056
2057 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2058 ctl.entrysize = sizeof(RecordCacheEntry);
2061 RecordCacheHash = hash_create("Record information cache", 64,
2062 &ctl,
2064
2065 /* Also make sure CacheMemoryContext exists */
2066 if (!CacheMemoryContext)
2068 }
2069
2070 /*
2071 * Find a hashtable entry for this tuple descriptor. We don't use
2072 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2073 * the allocations succeed before we create the new entry.
2074 */
2076 &tupDesc,
2077 HASH_FIND, &found);
2078 if (found && recentry->tupdesc != NULL)
2079 {
2080 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2081 return;
2082 }
2083
2084 /* Not present, so need to manufacture an entry */
2086
2087 /* Look in the SharedRecordTypmodRegistry, if attached */
2089 if (entDesc == NULL)
2090 {
2091 /*
2092 * Make sure we have room before we CreateTupleDescCopy() or advance
2093 * NextRecordTypmod.
2094 */
2096
2097 /* Reference-counted local cache only. */
2098 entDesc = CreateTupleDescCopy(tupDesc);
2099 entDesc->tdrefcount = 1;
2100 entDesc->tdtypmod = NextRecordTypmod++;
2101 }
2102 else
2103 {
2105 }
2106
2108
2109 /* Assign a unique tupdesc identifier, too. */
2111
2112 /* Fully initialized; create the hash table entry */
2114 &tupDesc,
2115 HASH_ENTER, NULL);
2116 recentry->tupdesc = entDesc;
2117
2118 /* Update the caller's tuple descriptor. */
2119 tupDesc->tdtypmod = entDesc->tdtypmod;
2120
2122}
2123
2124/*
2125 * assign_record_type_identifier
2126 *
2127 * Get an identifier, which will be unique over the lifespan of this backend
2128 * process, for the current tuple descriptor of the specified composite type.
2129 * For named composite types, the value is guaranteed to change if the type's
2130 * definition does. For registered RECORD types, the value will not change
2131 * once assigned, since the registered type won't either. If an anonymous
2132 * RECORD type is specified, we return a new identifier on each call.
2133 */
2134uint64
2136{
2137 if (type_id != RECORDOID)
2138 {
2139 /*
2140 * It's a named composite type, so use the regular typcache.
2141 */
2142 TypeCacheEntry *typentry;
2143
2144 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2145 if (typentry->tupDesc == NULL)
2146 ereport(ERROR,
2148 errmsg("type %s is not composite",
2149 format_type_be(type_id))));
2150 Assert(typentry->tupDesc_identifier != 0);
2151 return typentry->tupDesc_identifier;
2152 }
2153 else
2154 {
2155 /*
2156 * It's a transient record type, so look in our record-type table.
2157 */
2158 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2159 RecordCacheArray[typmod].tupdesc != NULL)
2160 {
2161 Assert(RecordCacheArray[typmod].id != 0);
2162 return RecordCacheArray[typmod].id;
2163 }
2164
2165 /* For anonymous or unrecognized record type, generate a new ID */
2166 return ++tupledesc_id_counter;
2167 }
2168}
2169
2170/*
2171 * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2172 * This exists only to avoid exposing private innards of
2173 * SharedRecordTypmodRegistry in a header.
2174 */
2175size_t
2177{
2178 return sizeof(SharedRecordTypmodRegistry);
2179}
2180
2181/*
2182 * Initialize 'registry' in a pre-existing shared memory region, which must be
2183 * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2184 * bytes.
2185 *
2186 * 'area' will be used to allocate shared memory space as required for the
2187 * typemod registration. The current process, expected to be a leader process
2188 * in a parallel query, will be attached automatically and its current record
2189 * types will be loaded into *registry. While attached, all calls to
2190 * assign_record_type_typmod will use the shared registry. Worker backends
2191 * will need to attach explicitly.
2192 *
2193 * Note that this function takes 'area' and 'segment' as arguments rather than
2194 * accessing them via CurrentSession, because they aren't installed there
2195 * until after this function runs.
2196 */
2197void
2199 dsm_segment *segment,
2200 dsa_area *area)
2201{
2205 int32 typmod;
2206
2208
2209 /* We can't already be attached to a shared registry. */
2213
2215
2216 /* Create the hash table of tuple descriptors indexed by themselves. */
2218
2219 /* Create the hash table of tuple descriptors indexed by typmod. */
2221
2223
2224 /* Initialize the SharedRecordTypmodRegistry. */
2225 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2226 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2228
2229 /*
2230 * Copy all entries from this backend's private registry into the shared
2231 * registry.
2232 */
2233 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2234 {
2239 TupleDesc tupdesc;
2240 bool found;
2241
2242 tupdesc = RecordCacheArray[typmod].tupdesc;
2243 if (tupdesc == NULL)
2244 continue;
2245
2246 /* Copy the TupleDesc into shared memory. */
2247 shared_dp = share_tupledesc(area, tupdesc, typmod);
2248
2249 /* Insert into the typmod table. */
2251 &tupdesc->tdtypmod,
2252 &found);
2253 if (found)
2254 elog(ERROR, "cannot create duplicate shared record typmod");
2255 typmod_table_entry->typmod = tupdesc->tdtypmod;
2256 typmod_table_entry->shared_tupdesc = shared_dp;
2258
2259 /* Insert into the record table. */
2260 record_table_key.shared = false;
2261 record_table_key.u.local_tupdesc = tupdesc;
2264 &found);
2265 if (!found)
2266 {
2267 record_table_entry->key.shared = true;
2268 record_table_entry->key.u.shared_tupdesc = shared_dp;
2269 }
2271 }
2272
2273 /*
2274 * Set up the global state that will tell assign_record_type_typmod and
2275 * lookup_rowtype_tupdesc_internal about the shared registry.
2276 */
2280
2281 /*
2282 * We install a detach hook in the leader, but only to handle cleanup on
2283 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2284 * the memory, the leader process will use a shared registry until it
2285 * exits.
2286 */
2288}
2289
2290/*
2291 * Attach to 'registry', which must have been initialized already by another
2292 * backend. Future calls to assign_record_type_typmod and
2293 * lookup_rowtype_tupdesc_internal will use the shared registry until the
2294 * current session is detached.
2295 */
2296void
2298{
2302
2304
2305 /* We can't already be attached to a shared registry. */
2312
2313 /*
2314 * We can't already have typmods in our local cache, because they'd clash
2315 * with those imported by SharedRecordTypmodRegistryInit. This should be
2316 * a freshly started parallel worker. If we ever support worker
2317 * recycling, a worker would need to zap its local cache in between
2318 * servicing different queries, in order to be able to call this and
2319 * synchronize typmods with a new leader; but that's problematic because
2320 * we can't be very sure that record-typmod-related state hasn't escaped
2321 * to anywhere else in the process.
2322 */
2324
2326
2327 /* Attach to the two hash tables. */
2330 registry->record_table_handle,
2334 registry->typmod_table_handle,
2335 NULL);
2336
2338
2339 /*
2340 * Set up detach hook to run at worker exit. Currently this is the same
2341 * as the leader's detach hook, but in future they might need to be
2342 * different.
2343 */
2347
2348 /*
2349 * Set up the session state that will tell assign_record_type_typmod and
2350 * lookup_rowtype_tupdesc_internal about the shared registry.
2351 */
2355}
2356
2357/*
2358 * InvalidateCompositeTypeCacheEntry
2359 * Invalidate particular TypeCacheEntry on Relcache inval callback
2360 *
2361 * Delete the cached tuple descriptor (if any) for the given composite
2362 * type, and reset whatever info we have cached about the composite type's
2363 * comparability.
2364 */
2365static void
2367{
2369
2370 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2371 OidIsValid(typentry->typrelid));
2372
2373 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2374 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2375
2376 /* Delete tupdesc if we have it */
2377 if (typentry->tupDesc != NULL)
2378 {
2379 /*
2380 * Release our refcount and free the tupdesc if none remain. We can't
2381 * use DecrTupleDescRefCount here because this reference is not logged
2382 * by the current resource owner.
2383 */
2384 Assert(typentry->tupDesc->tdrefcount > 0);
2385 if (--typentry->tupDesc->tdrefcount == 0)
2386 FreeTupleDesc(typentry->tupDesc);
2387 typentry->tupDesc = NULL;
2388
2389 /*
2390 * Also clear tupDesc_identifier, so that anyone watching it will
2391 * realize that the tupdesc has changed.
2392 */
2393 typentry->tupDesc_identifier = 0;
2394 }
2395
2396 /* Reset equality/comparison/hashing validity information */
2397 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2398
2399 /*
2400 * Call delete_rel_type_cache_if_needed() if we actually cleared
2401 * something.
2402 */
2405}
2406
2407/*
2408 * TypeCacheRelCallback
2409 * Relcache inval callback function
2410 *
2411 * Delete the cached tuple descriptor (if any) for the given rel's composite
2412 * type, or for all composite types if relid == InvalidOid. Also reset
2413 * whatever info we have cached about the composite type's comparability.
2414 *
2415 * This is called when a relcache invalidation event occurs for the given
2416 * relid. We can't use syscache to find a type corresponding to the given
2417 * relation because the code can be called outside of transaction. Thus, we
2418 * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2419 */
2420static void
2422{
2423 TypeCacheEntry *typentry;
2424
2425 /*
2426 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2427 * callback wouldn't be registered
2428 */
2429 if (OidIsValid(relid))
2430 {
2432
2433 /*
2434 * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2435 * corresponding typcache entry has something to clean.
2436 */
2438 &relid,
2439 HASH_FIND, NULL);
2440
2441 if (relentry != NULL)
2442 {
2444 &relentry->composite_typid,
2445 HASH_FIND, NULL);
2446
2447 if (typentry != NULL)
2448 {
2449 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2450 Assert(relid == typentry->typrelid);
2451
2453 }
2454 }
2455
2456 /*
2457 * Visit all the domain types sequentially. Typically, this shouldn't
2458 * affect performance since domain types are less tended to bloat.
2459 * Domain types are created manually, unlike composite types which are
2460 * automatically created for every temporary table.
2461 */
2462 for (typentry = firstDomainTypeEntry;
2463 typentry != NULL;
2464 typentry = typentry->nextDomain)
2465 {
2466 /*
2467 * If it's domain over composite, reset flags. (We don't bother
2468 * trying to determine whether the specific base type needs a
2469 * reset.) Note that if we haven't determined whether the base
2470 * type is composite, we don't need to reset anything.
2471 */
2473 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2474 }
2475 }
2476 else
2477 {
2478 HASH_SEQ_STATUS status;
2479
2480 /*
2481 * Relid is invalid. By convention, we need to reset all composite
2482 * types in cache. Also, we should reset flags for domain types, and
2483 * we loop over all entries in hash, so, do it in a single scan.
2484 */
2485 hash_seq_init(&status, TypeCacheHash);
2486 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2487 {
2488 if (typentry->typtype == TYPTYPE_COMPOSITE)
2489 {
2491 }
2492 else if (typentry->typtype == TYPTYPE_DOMAIN)
2493 {
2494 /*
2495 * If it's domain over composite, reset flags. (We don't
2496 * bother trying to determine whether the specific base type
2497 * needs a reset.) Note that if we haven't determined whether
2498 * the base type is composite, we don't need to reset
2499 * anything.
2500 */
2502 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2503 }
2504 }
2505 }
2506}
2507
2508/*
2509 * TypeCacheTypCallback
2510 * Syscache inval callback function
2511 *
2512 * This is called when a syscache invalidation event occurs for any
2513 * pg_type row. If we have information cached about that type, mark
2514 * it as needing to be reloaded.
2515 */
2516static void
2518{
2519 HASH_SEQ_STATUS status;
2520 TypeCacheEntry *typentry;
2521
2522 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2523
2524 /*
2525 * By convention, zero hash value is passed to the callback as a sign that
2526 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2527 * InvalidateSystemCachesExtended().
2528 */
2529 if (hashvalue == 0)
2530 hash_seq_init(&status, TypeCacheHash);
2531 else
2532 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2533
2534 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2535 {
2536 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2537
2538 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2539
2540 /*
2541 * Mark the data obtained directly from pg_type as invalid. Also, if
2542 * it's a domain, typnotnull might've changed, so we'll need to
2543 * recalculate its constraints.
2544 */
2545 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2547
2548 /*
2549 * Call delete_rel_type_cache_if_needed() if we cleaned
2550 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2551 */
2552 if (hadPgTypeData)
2554 }
2555}
2556
2557/*
2558 * TypeCacheOpcCallback
2559 * Syscache inval callback function
2560 *
2561 * This is called when a syscache invalidation event occurs for any pg_opclass
2562 * row. In principle we could probably just invalidate data dependent on the
2563 * particular opclass, but since updates on pg_opclass are rare in production
2564 * it doesn't seem worth a lot of complication: we just mark all cached data
2565 * invalid.
2566 *
2567 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2568 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2569 * is not allowed to be used to add/drop the primary operators and functions
2570 * of an opclass, only cross-type members of a family; and the latter sorts
2571 * of members are not going to get cached here.
2572 */
2573static void
2575{
2576 HASH_SEQ_STATUS status;
2577 TypeCacheEntry *typentry;
2578
2579 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2580 hash_seq_init(&status, TypeCacheHash);
2581 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2582 {
2583 bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2584
2585 /* Reset equality/comparison/hashing validity information */
2586 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2587
2588 /*
2589 * Call delete_rel_type_cache_if_needed() if we actually cleared some
2590 * of TCFLAGS_OPERATOR_FLAGS.
2591 */
2592 if (hadOpclass)
2594 }
2595}
2596
2597/*
2598 * TypeCacheConstrCallback
2599 * Syscache inval callback function
2600 *
2601 * This is called when a syscache invalidation event occurs for any
2602 * pg_constraint row. We flush information about domain constraints
2603 * when this happens.
2604 *
2605 * It's slightly annoying that we can't tell whether the inval event was for
2606 * a domain constraint record or not; there's usually more update traffic
2607 * for table constraints than domain constraints, so we'll do a lot of
2608 * useless flushes. Still, this is better than the old no-caching-at-all
2609 * approach to domain constraints.
2610 */
2611static void
2613{
2614 TypeCacheEntry *typentry;
2615
2616 /*
2617 * Because this is called very frequently, and typically very few of the
2618 * typcache entries are for domains, we don't use hash_seq_search here.
2619 * Instead we thread all the domain-type entries together so that we can
2620 * visit them cheaply.
2621 */
2622 for (typentry = firstDomainTypeEntry;
2623 typentry != NULL;
2624 typentry = typentry->nextDomain)
2625 {
2626 /* Reset domain constraint validity information */
2628 }
2629}
2630
2631
2632/*
2633 * Check if given OID is part of the subset that's sortable by comparisons
2634 */
2635static inline bool
2637{
2638 Oid offset;
2639
2640 if (arg < enumdata->bitmap_base)
2641 return false;
2642 offset = arg - enumdata->bitmap_base;
2643 if (offset > (Oid) INT_MAX)
2644 return false;
2645 return bms_is_member((int) offset, enumdata->sorted_values);
2646}
2647
2648
2649/*
2650 * compare_values_of_enum
2651 * Compare two members of an enum type.
2652 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2653 *
2654 * Note: currently, the enumData cache is refreshed only if we are asked
2655 * to compare an enum value that is not already in the cache. This is okay
2656 * because there is no support for re-ordering existing values, so comparisons
2657 * of previously cached values will return the right answer even if other
2658 * values have been added since we last loaded the cache.
2659 *
2660 * Note: the enum logic has a special-case rule about even-numbered versus
2661 * odd-numbered OIDs, but we take no account of that rule here; this
2662 * routine shouldn't even get called when that rule applies.
2663 */
2664int
2666{
2668 EnumItem *item1;
2669 EnumItem *item2;
2670
2671 /*
2672 * Equal OIDs are certainly equal --- this case was probably handled by
2673 * our caller, but we may as well check.
2674 */
2675 if (arg1 == arg2)
2676 return 0;
2677
2678 /* Load up the cache if first time through */
2679 if (tcache->enumData == NULL)
2680 load_enum_cache_data(tcache);
2681 enumdata = tcache->enumData;
2682
2683 /*
2684 * If both OIDs are known-sorted, we can just compare them directly.
2685 */
2688 {
2689 if (arg1 < arg2)
2690 return -1;
2691 else
2692 return 1;
2693 }
2694
2695 /*
2696 * Slow path: we have to identify their actual sort-order positions.
2697 */
2700
2701 if (item1 == NULL || item2 == NULL)
2702 {
2703 /*
2704 * We couldn't find one or both values. That means the enum has
2705 * changed under us, so re-initialize the cache and try again. We
2706 * don't bother retrying the known-sorted case in this path.
2707 */
2708 load_enum_cache_data(tcache);
2709 enumdata = tcache->enumData;
2710
2713
2714 /*
2715 * If we still can't find the values, complain: we must have corrupt
2716 * data.
2717 */
2718 if (item1 == NULL)
2719 elog(ERROR, "enum value %u not found in cache for enum %s",
2720 arg1, format_type_be(tcache->type_id));
2721 if (item2 == NULL)
2722 elog(ERROR, "enum value %u not found in cache for enum %s",
2723 arg2, format_type_be(tcache->type_id));
2724 }
2725
2726 if (item1->sort_order < item2->sort_order)
2727 return -1;
2728 else if (item1->sort_order > item2->sort_order)
2729 return 1;
2730 else
2731 return 0;
2732}
2733
2734/*
2735 * Load (or re-load) the enumData member of the typcache entry.
2736 */
2737static void
2739{
2745 EnumItem *items;
2746 int numitems;
2747 int maxitems;
2748 Oid bitmap_base;
2749 Bitmapset *bitmap;
2751 int bm_size,
2752 start_pos;
2753
2754 /* Check that this is actually an enum */
2755 if (tcache->typtype != TYPTYPE_ENUM)
2756 ereport(ERROR,
2758 errmsg("%s is not an enum",
2759 format_type_be(tcache->type_id))));
2760
2761 /*
2762 * Read all the information for members of the enum type. We collect the
2763 * info in working memory in the caller's context, and then transfer it to
2764 * permanent memory in CacheMemoryContext. This minimizes the risk of
2765 * leaking memory from CacheMemoryContext in the event of an error partway
2766 * through.
2767 */
2768 maxitems = 64;
2769 items = palloc_array(EnumItem, maxitems);
2770 numitems = 0;
2771
2772 /* Scan pg_enum for the members of the target enum type. */
2776 ObjectIdGetDatum(tcache->type_id));
2777
2781 true, NULL,
2782 1, &skey);
2783
2785 {
2787
2788 if (numitems >= maxitems)
2789 {
2790 maxitems *= 2;
2791 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2792 }
2793 items[numitems].enum_oid = en->oid;
2794 items[numitems].sort_order = en->enumsortorder;
2795 numitems++;
2796 }
2797
2800
2801 /* Sort the items into OID order */
2802 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2803
2804 /*
2805 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2806 * known to be in order and can thus be compared with just OID comparison.
2807 *
2808 * The point of this is that the enum's initial OIDs were certainly in
2809 * order, so there is some subset that can be compared via OID comparison;
2810 * and we'd rather not do binary searches unnecessarily.
2811 *
2812 * This is somewhat heuristic, and might identify a subset of OIDs that
2813 * isn't exactly what the type started with. That's okay as long as the
2814 * subset is correctly sorted.
2815 */
2816 bitmap_base = InvalidOid;
2817 bitmap = NULL;
2818 bm_size = 1; /* only save sets of at least 2 OIDs */
2819
2820 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2821 {
2822 /*
2823 * Identify longest sorted subsequence starting at start_pos
2824 */
2826 int this_bm_size = 1;
2827 Oid start_oid = items[start_pos].enum_oid;
2828 float4 prev_order = items[start_pos].sort_order;
2829 int i;
2830
2831 for (i = start_pos + 1; i < numitems; i++)
2832 {
2833 Oid offset;
2834
2835 offset = items[i].enum_oid - start_oid;
2836 /* quit if bitmap would be too large; cutoff is arbitrary */
2837 if (offset >= 8192)
2838 break;
2839 /* include the item if it's in-order */
2840 if (items[i].sort_order > prev_order)
2841 {
2842 prev_order = items[i].sort_order;
2843 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2844 this_bm_size++;
2845 }
2846 }
2847
2848 /* Remember it if larger than previous best */
2849 if (this_bm_size > bm_size)
2850 {
2851 bms_free(bitmap);
2852 bitmap_base = start_oid;
2853 bitmap = this_bitmap;
2855 }
2856 else
2858
2859 /*
2860 * Done if it's not possible to find a longer sequence in the rest of
2861 * the list. In typical cases this will happen on the first
2862 * iteration, which is why we create the bitmaps on the fly instead of
2863 * doing a second pass over the list.
2864 */
2865 if (bm_size >= (numitems - start_pos - 1))
2866 break;
2867 }
2868
2869 /* OK, copy the data into CacheMemoryContext */
2872 palloc(offsetof(TypeCacheEnumData, enum_values) +
2873 numitems * sizeof(EnumItem));
2874 enumdata->bitmap_base = bitmap_base;
2875 enumdata->sorted_values = bms_copy(bitmap);
2876 enumdata->num_values = numitems;
2877 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2879
2880 pfree(items);
2881 bms_free(bitmap);
2882
2883 /* And link the finished cache struct into the typcache */
2884 if (tcache->enumData != NULL)
2885 pfree(tcache->enumData);
2886 tcache->enumData = enumdata;
2887}
2888
2889/*
2890 * Locate the EnumItem with the given OID, if present
2891 */
2892static EnumItem *
2894{
2895 EnumItem srch;
2896
2897 /* On some versions of Solaris, bsearch of zero items dumps core */
2898 if (enumdata->num_values <= 0)
2899 return NULL;
2900
2901 srch.enum_oid = arg;
2902 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2903 sizeof(EnumItem), enum_oid_cmp);
2904}
2905
2906/*
2907 * qsort comparison function for OID-ordered EnumItems
2908 */
2909static int
2910enum_oid_cmp(const void *left, const void *right)
2911{
2912 const EnumItem *l = (const EnumItem *) left;
2913 const EnumItem *r = (const EnumItem *) right;
2914
2915 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2916}
2917
2918/*
2919 * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2920 * to the given value and return a dsa_pointer.
2921 */
2922static dsa_pointer
2923share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2924{
2926 TupleDesc shared;
2927
2928 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2929 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2930 TupleDescCopy(shared, tupdesc);
2931 shared->tdtypmod = typmod;
2932
2933 return shared_dp;
2934}
2935
2936/*
2937 * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2938 * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2939 * Tuple descriptors returned by this function are not reference counted, and
2940 * will exist at least as long as the current backend remained attached to the
2941 * current session.
2942 */
2943static TupleDesc
2945{
2946 TupleDesc result;
2951 bool found;
2952 uint32 typmod;
2953
2954 /* If not even attached, nothing to do. */
2956 return NULL;
2957
2958 /* Try to find a matching tuple descriptor in the record table. */
2959 key.shared = false;
2960 key.u.local_tupdesc = tupdesc;
2964 {
2965 Assert(record_table_entry->key.shared);
2968 result = (TupleDesc)
2970 record_table_entry->key.u.shared_tupdesc);
2971 Assert(result->tdrefcount == -1);
2972
2973 return result;
2974 }
2975
2976 /* Allocate a new typmod number. This will be wasted if we error out. */
2977 typmod = (int)
2979 1);
2980
2981 /* Copy the TupleDesc into shared memory. */
2982 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2983
2984 /*
2985 * Create an entry in the typmod table so that others will understand this
2986 * typmod number.
2987 */
2988 PG_TRY();
2989 {
2992 &typmod, &found);
2993 if (found)
2994 elog(ERROR, "cannot create duplicate shared record typmod");
2995 }
2996 PG_CATCH();
2997 {
2999 PG_RE_THROW();
3000 }
3001 PG_END_TRY();
3002 typmod_table_entry->typmod = typmod;
3003 typmod_table_entry->shared_tupdesc = shared_dp;
3006
3007 /*
3008 * Finally create an entry in the record table so others with matching
3009 * tuple descriptors can reuse the typmod.
3010 */
3013 &found);
3014 if (found)
3015 {
3016 /*
3017 * Someone concurrently inserted a matching tuple descriptor since the
3018 * first time we checked. Use that one instead.
3019 */
3022
3023 /* Might as well free up the space used by the one we created. */
3025 &typmod);
3026 Assert(found);
3028
3029 /* Return the one we found. */
3030 Assert(record_table_entry->key.shared);
3031 result = (TupleDesc)
3033 record_table_entry->key.u.shared_tupdesc);
3034 Assert(result->tdrefcount == -1);
3035
3036 return result;
3037 }
3038
3039 /* Store it and return it. */
3040 record_table_entry->key.shared = true;
3041 record_table_entry->key.u.shared_tupdesc = shared_dp;
3044 result = (TupleDesc)
3046 Assert(result->tdrefcount == -1);
3047
3048 return result;
3049}
3050
3051/*
3052 * On-DSM-detach hook to forget about the current shared record typmod
3053 * infrastructure. This is currently used by both leader and workers.
3054 */
3055static void
3057{
3058 /* Be cautious here: maybe we didn't finish initializing. */
3060 {
3063 }
3065 {
3068 }
3070}
3071
3072/*
3073 * Insert RelIdToTypeIdCacheHash entry if needed.
3074 */
3075static void
3077{
3078 /* Immediately quit for non-composite types */
3079 if (typentry->typtype != TYPTYPE_COMPOSITE)
3080 return;
3081
3082 /* typrelid should be given for composite types */
3083 Assert(OidIsValid(typentry->typrelid));
3084
3085 /*
3086 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3087 * information indicating it should be here.
3088 */
3089 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3090 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3091 typentry->tupDesc != NULL)
3092 {
3094 bool found;
3095
3097 &typentry->typrelid,
3098 HASH_ENTER, &found);
3099 relentry->relid = typentry->typrelid;
3100 relentry->composite_typid = typentry->type_id;
3101 }
3102}
3103
3104/*
3105 * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3106 * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3107 * or tupDesc.
3108 */
3109static void
3111{
3112#ifdef USE_ASSERT_CHECKING
3113 int i;
3114 bool is_in_progress = false;
3115
3116 for (i = 0; i < in_progress_list_len; i++)
3117 {
3118 if (in_progress_list[i] == typentry->type_id)
3119 {
3120 is_in_progress = true;
3121 break;
3122 }
3123 }
3124#endif
3125
3126 /* Immediately quit for non-composite types */
3127 if (typentry->typtype != TYPTYPE_COMPOSITE)
3128 return;
3129
3130 /* typrelid should be given for composite types */
3131 Assert(OidIsValid(typentry->typrelid));
3132
3133 /*
3134 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3135 * information indicating entry should be still there.
3136 */
3137 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3138 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3139 typentry->tupDesc == NULL)
3140 {
3141 bool found;
3142
3144 &typentry->typrelid,
3145 HASH_REMOVE, &found);
3146 Assert(found || is_in_progress);
3147 }
3148 else
3149 {
3150#ifdef USE_ASSERT_CHECKING
3151 /*
3152 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3153 * entry if it should exist.
3154 */
3155 bool found;
3156
3157 if (!is_in_progress)
3158 {
3160 &typentry->typrelid,
3161 HASH_FIND, &found);
3162 Assert(found);
3163 }
3164#endif
3165 }
3166}
3167
3168/*
3169 * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3170 * entries, marked as in-progress by lookup_type_cache(). It may happen
3171 * in case of an error or interruption during the lookup_type_cache() call.
3172 */
3173static void
3175{
3176 int i;
3177
3178 for (i = 0; i < in_progress_list_len; i++)
3179 {
3180 TypeCacheEntry *typentry;
3181
3184 HASH_FIND, NULL);
3185 if (typentry)
3187 }
3188
3190}
3191
3192void
3194{
3196}
3197
3198void
3200{
3202}
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:219
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition atomics.h:366
Bitmapset * bms_make_singleton(int x)
Definition bitmapset.c:216
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:814
Bitmapset * bms_copy(const Bitmapset *a)
Definition bitmapset.c:122
#define TextDatumGetCString(d)
Definition builtins.h:99
#define NameStr(name)
Definition c.h:777
#define RegProcedureIsValid(p)
Definition c.h:804
#define Assert(condition)
Definition c.h:885
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:492
int32_t int32
Definition c.h:554
uint64_t uint64
Definition c.h:559
uint32_t uint32
Definition c.h:558
float float4
Definition c.h:655
#define MemSet(start, val, len)
Definition c.h:1035
#define OidIsValid(objectId)
Definition c.h:800
size_t Size
Definition c.h:631
void CreateCacheMemoryContext(void)
Definition catcache.c:715
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition dsa.c:957
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition dsa.c:841
uint64 dsa_pointer
Definition dsa.h:62
#define dsa_allocate(area, size)
Definition dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition dshash.c:505
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition dshash.c:592
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition dshash.c:560
void dshash_detach(dshash_table *hash_table)
Definition dshash.c:309
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition dshash.c:392
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition dshash.c:369
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition dshash.c:272
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition dshash.c:435
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition dshash.c:583
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition dshash.c:208
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition dshash.c:574
dsa_pointer dshash_table_handle
Definition dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition dynahash.c:1400
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1415
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1380
int errcode(int sqlerrcode)
Definition elog.c:864
int errmsg(const char *fmt,...)
Definition elog.c:1081
#define PG_RE_THROW()
Definition elog.h:405
#define PG_TRY(...)
Definition elog.h:372
#define PG_END_TRY(...)
Definition elog.h:397
#define ERROR
Definition elog.h:39
#define PG_CATCH(...)
Definition elog.h:382
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition execExpr.c:143
@ DOM_CONSTRAINT_CHECK
Definition execnodes.h:1053
@ DOM_CONSTRAINT_NOTNULL
Definition execnodes.h:1052
#define palloc_array(type, count)
Definition fe_memutils.h:76
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition fmgr.c:138
char * format_type_be(Oid type_oid)
void systable_endscan(SysScanDesc sysscan)
Definition genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition genam.c:388
#define HASHSTANDARD_PROC
Definition hash.h:355
#define HASHEXTENDED_PROC
Definition hash.h:356
@ HASH_FIND
Definition hsearch.h:113
@ HASH_REMOVE
Definition hsearch.h:115
@ HASH_ENTER
Definition hsearch.h:114
#define HASH_ELEM
Definition hsearch.h:95
#define HASH_COMPARE
Definition hsearch.h:99
#define HASH_FUNCTION
Definition hsearch.h:98
#define HASH_BLOBS
Definition hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
#define IsParallelWorker()
Definition parallel.h:60
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition indexcmds.c:2368
long val
Definition informix.c:689
#define INJECTION_POINT(name, arg)
static int pg_cmp_u32(uint32 a, uint32 b)
Definition int.h:719
void CacheRegisterSyscacheCallback(SysCacheIdentifier cacheid, SyscacheCallbackFunction func, Datum arg)
Definition inval.c:1816
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition inval.c:1858
int b
Definition isn.c:74
int a
Definition isn.c:73
int i
Definition isn.c:77
List * lappend(List *list, void *datum)
Definition list.c:339
List * lcons(void *datum, List *list)
Definition list.c:495
#define AccessShareLock
Definition lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition lsyscache.c:1314
Oid get_opclass_family(Oid opclass)
Definition lsyscache.c:1292
Oid get_multirange_range(Oid multirangeOid)
Definition lsyscache.c:3635
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition lsyscache.c:872
RegProcedure get_opcode(Oid opno)
Definition lsyscache.c:1435
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition lsyscache.c:168
Oid get_base_element_type(Oid typid)
Definition lsyscache.c:2984
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition lsyscache.c:2690
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
char * pstrdup(const char *in)
Definition mcxt.c:1781
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition mcxt.c:582
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition mcxt.c:686
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
MemoryContext CacheMemoryContext
Definition mcxt.c:169
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition memutils.h:170
#define BTORDER_PROC
Definition nbtree.h:717
#define copyObject(obj)
Definition nodes.h:232
#define makeNode(_type_)
Definition nodes.h:161
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define repalloc0_array(pointer, type, oldcount, count)
Definition palloc.h:109
FormData_pg_attribute * Form_pg_attribute
void * arg
static uint32 pg_nextpower2_32(uint32 num)
FormData_pg_constraint * Form_pg_constraint
const void * data
FormData_pg_enum * Form_pg_enum
Definition pg_enum.h:44
#define lfirst(lc)
Definition pg_list.h:172
#define NIL
Definition pg_list.h:68
FormData_pg_range * Form_pg_range
Definition pg_range.h:67
FormData_pg_type * Form_pg_type
Definition pg_type.h:261
Expr * expression_planner(Expr *expr)
Definition planner.c:6819
#define qsort(a, b, c, d)
Definition port.h:495
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:262
uint64_t Datum
Definition postgres.h:70
#define InvalidOid
unsigned int Oid
char * c
static int fb(int x)
tree ctl
Definition radixtree.h:1838
void * stringToNode(const char *str)
Definition read.c:90
#define RelationGetDescr(relation)
Definition rel.h:540
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition scankey.c:76
Session * CurrentSession
Definition session.c:48
void relation_close(Relation relation, LOCKMODE lockmode)
Definition relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition relation.c:47
#define BTGreaterStrategyNumber
Definition stratnum.h:33
#define HTEqualStrategyNumber
Definition stratnum.h:41
#define BTLessStrategyNumber
Definition stratnum.h:29
#define BTEqualStrategyNumber
Definition stratnum.h:31
MemoryContext dccContext
Definition typcache.c:142
DomainConstraintType constrainttype
Definition execnodes.h:1059
ExprState * check_exprstate
Definition execnodes.h:1062
Oid enum_oid
Definition typcache.c:149
Oid fn_oid
Definition fmgr.h:59
Size keysize
Definition hsearch.h:75
Definition pg_list.h:54
TupleDesc tupdesc
Definition typcache.c:174
Form_pg_class rd_rel
Definition rel.h:111
dsm_segment * segment
Definition session.h:27
dshash_table * shared_record_table
Definition session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition session.h:31
dsa_area * area
Definition session.h:28
dshash_table * shared_typmod_table
Definition session.h:33
SharedRecordTableKey key
Definition typcache.c:213
TupleDesc local_tupdesc
Definition typcache.c:201
union SharedRecordTableKey::@33 u
dsa_pointer shared_tupdesc
Definition typcache.c:202
dshash_table_handle typmod_table_handle
Definition typcache.c:186
pg_atomic_uint32 next_typmod
Definition typcache.c:188
dshash_table_handle record_table_handle
Definition typcache.c:184
dsa_pointer shared_tupdesc
Definition typcache.c:223
int32 tdtypmod
Definition tupdesc.h:139
uint32 type_id_hash
Definition typcache.h:36
uint64 tupDesc_identifier
Definition typcache.h:91
FmgrInfo hash_proc_finfo
Definition typcache.h:78
int32 domainBaseTypmod
Definition typcache.h:116
Oid hash_extended_proc
Definition typcache.h:67
FmgrInfo rng_cmp_proc_finfo
Definition typcache.h:102
FmgrInfo cmp_proc_finfo
Definition typcache.h:77
struct TypeCacheEntry * rngelemtype
Definition typcache.h:99
TupleDesc tupDesc
Definition typcache.h:90
FmgrInfo hash_extended_proc_finfo
Definition typcache.h:79
DomainConstraintCache * domainData
Definition typcache.h:122
struct TypeCacheEntry * rngtype
Definition typcache.h:109
FmgrInfo rng_subdiff_finfo
Definition typcache.h:104
FmgrInfo eq_opr_finfo
Definition typcache.h:76
Oid btree_opintype
Definition typcache.h:59
struct TypeCacheEnumData * enumData
Definition typcache.h:131
struct TypeCacheEntry * nextDomain
Definition typcache.h:134
FmgrInfo rng_canonical_finfo
Definition typcache.h:103
Oid hash_opintype
Definition typcache.h:61
char typstorage
Definition typcache.h:42
Bitmapset * sorted_values
Definition typcache.c:156
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition typcache.c:158
void ReleaseSysCache(HeapTuple tuple)
Definition syscache.c:264
HeapTuple SearchSysCache1(SysCacheIdentifier cacheId, Datum key1)
Definition syscache.c:220
#define GetSysCacheHashValue1(cacheId, key1)
Definition syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
static ItemArray items
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition tupdesc.c:323
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition tupdesc.c:411
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:560
void FreeTupleDesc(TupleDesc tupdesc)
Definition tupdesc.c:485
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:542
uint32 hashRowType(TupleDesc desc)
Definition tupdesc.c:796
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition tupdesc.c:235
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition tupdesc.c:760
#define TupleDescSize(src)
Definition tupdesc.h:198
#define PinTupleDesc(tupdesc)
Definition tupdesc.h:213
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:160
struct TupleDescData * TupleDesc
Definition tupdesc.h:145
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition typcache.c:100
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition typcache.c:101
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1717
static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3077
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition typcache.c:1404
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1830
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition typcache.c:1924
static void TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2575
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition typcache.c:2298
#define TCFLAGS_OPERATOR_FLAGS
Definition typcache.c:122
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition typcache.c:113
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1733
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition typcache.c:115
void AtEOXact_TypeCache(void)
Definition typcache.c:3194
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition typcache.c:2739
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1596
static HTAB * RelIdToTypeIdCacheHash
Definition typcache.c:87
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2894
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1604
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition typcache.c:2945
static int in_progress_list_maxlen
Definition typcache.c:228
static int32 NextRecordTypmod
Definition typcache.c:306
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1980
static Oid * in_progress_list
Definition typcache.c:226
static const dshash_parameters srtr_typmod_table_params
Definition typcache.c:285
static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3111
#define TCFLAGS_CHECKED_GT_OPR
Definition typcache.c:104
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1757
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition typcache.c:1366
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1941
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition typcache.c:1580
#define TCFLAGS_CHECKED_LT_OPR
Definition typcache.c:103
#define TCFLAGS_CHECKED_HASH_PROC
Definition typcache.c:106
static void dccref_deletion_callback(void *arg)
Definition typcache.c:1345
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition typcache.c:114
static void InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
Definition typcache.c:2367
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition typcache.c:2199
static int dcs_cmp(const void *a, const void *b)
Definition typcache.c:1321
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1542
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition typcache.c:234
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1534
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1064
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition typcache.c:362
#define TCFLAGS_CHECKED_CMP_PROC
Definition typcache.c:105
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition typcache.c:112
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1765
static int in_progress_list_len
Definition typcache.c:227
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition typcache.c:1518
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition typcache.c:2924
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1006
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition typcache.c:2136
static RecordCacheArrayEntry * RecordCacheArray
Definition typcache.c:304
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1725
static HTAB * RecordCacheHash
Definition typcache.c:295
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2637
static TypeCacheEntry * firstDomainTypeEntry
Definition typcache.c:96
void AtEOSubXact_TypeCache(void)
Definition typcache.c:3200
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition typcache.c:3057
#define TCFLAGS_HAVE_ELEM_HASHING
Definition typcache.c:111
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition typcache.c:107
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition typcache.c:1086
bool DomainHasConstraints(Oid type_id)
Definition typcache.c:1491
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition typcache.c:110
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition typcache.c:2422
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1550
static void TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2518
size_t SharedRecordTypmodRegistryEstimate(void)
Definition typcache.c:2177
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1773
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition typcache.c:108
static void TypeCacheConstrCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
Definition typcache.c:2613
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition typcache.c:109
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition typcache.c:1526
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition typcache.c:260
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition typcache.c:2666
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition typcache.c:117
static int32 RecordCacheArrayLen
Definition typcache.c:305
void assign_record_type_typmod(TupleDesc tupDesc)
Definition typcache.c:2044
static HTAB * TypeCacheHash
Definition typcache.c:79
static uint64 tupledesc_id_counter
Definition typcache.c:313
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition typcache.c:1588
#define TCFLAGS_HAVE_FIELD_HASHING
Definition typcache.c:116
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition typcache.c:2028
static const dshash_parameters srtr_record_table_params
Definition typcache.c:275
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition typcache.c:1958
static int enum_oid_cmp(const void *left, const void *right)
Definition typcache.c:2911
static void finalize_in_progress_typentries(void)
Definition typcache.c:3175
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition typcache.c:1334
#define TCFLAGS_CHECKED_EQ_OPR
Definition typcache.c:102
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition typcache.c:1442
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition typcache.c:389
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition typcache.c:1801
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition typcache.c:1612
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition typcache.c:2017
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition typcache.c:972
#define INVALID_TUPLEDESC_IDENTIFIER
Definition typcache.h:157
#define TYPECACHE_HASH_PROC_FINFO
Definition typcache.h:145
#define TYPECACHE_EQ_OPR
Definition typcache.h:138
#define TYPECACHE_HASH_OPFAMILY
Definition typcache.h:148
#define TYPECACHE_TUPDESC
Definition typcache.h:146
#define TYPECACHE_MULTIRANGE_INFO
Definition typcache.h:154
#define TYPECACHE_EQ_OPR_FINFO
Definition typcache.h:143
#define TYPECACHE_HASH_EXTENDED_PROC
Definition typcache.h:152
#define TYPECACHE_BTREE_OPFAMILY
Definition typcache.h:147
#define TYPECACHE_DOMAIN_BASE_INFO
Definition typcache.h:150
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition typcache.h:151
#define TYPECACHE_RANGE_INFO
Definition typcache.h:149
#define TYPECACHE_GT_OPR
Definition typcache.h:140
#define TYPECACHE_CMP_PROC
Definition typcache.h:141
#define TYPECACHE_LT_OPR
Definition typcache.h:139
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition typcache.h:153
#define TYPECACHE_CMP_PROC_FINFO
Definition typcache.h:144
#define TYPECACHE_HASH_PROC
Definition typcache.h:142

Typedef Documentation

◆ RecordCacheArrayEntry

◆ RecordCacheEntry

◆ RelIdToTypeIdCacheEntry

◆ SharedRecordTableEntry

◆ SharedRecordTableKey

◆ SharedTypmodTableEntry

◆ TypeCacheEnumData

Function Documentation

◆ array_element_has_compare()

static bool array_element_has_compare ( TypeCacheEntry typentry)
static

Definition at line 1526 of file typcache.c.

1527{
1528 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1530 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1531}

References cache_array_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_COMPARE.

Referenced by lookup_type_cache().

◆ array_element_has_equality()

static bool array_element_has_equality ( TypeCacheEntry typentry)
static

Definition at line 1518 of file typcache.c.

1519{
1520 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1522 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1523}

References cache_array_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_EQUALITY.

Referenced by lookup_type_cache().

◆ array_element_has_extended_hashing()

static bool array_element_has_extended_hashing ( TypeCacheEntry typentry)
static

◆ array_element_has_hashing()

static bool array_element_has_hashing ( TypeCacheEntry typentry)
static

Definition at line 1534 of file typcache.c.

1535{
1536 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1538 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1539}

References cache_array_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_HASHING.

Referenced by lookup_type_cache().

◆ assign_record_type_identifier()

uint64 assign_record_type_identifier ( Oid  type_id,
int32  typmod 
)

Definition at line 2136 of file typcache.c.

2137{
2138 if (type_id != RECORDOID)
2139 {
2140 /*
2141 * It's a named composite type, so use the regular typcache.
2142 */
2143 TypeCacheEntry *typentry;
2144
2145 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2146 if (typentry->tupDesc == NULL)
2147 ereport(ERROR,
2149 errmsg("type %s is not composite",
2150 format_type_be(type_id))));
2151 Assert(typentry->tupDesc_identifier != 0);
2152 return typentry->tupDesc_identifier;
2153 }
2154 else
2155 {
2156 /*
2157 * It's a transient record type, so look in our record-type table.
2158 */
2159 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2160 RecordCacheArray[typmod].tupdesc != NULL)
2161 {
2162 Assert(RecordCacheArray[typmod].id != 0);
2163 return RecordCacheArray[typmod].id;
2164 }
2165
2166 /* For anonymous or unrecognized record type, generate a new ID */
2167 return ++tupledesc_id_counter;
2168 }
2169}

References Assert, ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), RecordCacheArrayEntry::id, lookup_type_cache(), RecordCacheArray, RecordCacheArrayLen, TypeCacheEntry::tupDesc, TypeCacheEntry::tupDesc_identifier, tupledesc_id_counter, and TYPECACHE_TUPDESC.

Referenced by expanded_record_fetch_tupdesc(), make_expanded_record_from_tupdesc(), and make_expanded_record_from_typeid().

◆ assign_record_type_typmod()

void assign_record_type_typmod ( TupleDesc  tupDesc)

Definition at line 2044 of file typcache.c.

2045{
2048 bool found;
2050
2051 Assert(tupDesc->tdtypeid == RECORDOID);
2052
2053 if (RecordCacheHash == NULL)
2054 {
2055 /* First time through: initialize the hash table */
2056 HASHCTL ctl;
2057
2058 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2059 ctl.entrysize = sizeof(RecordCacheEntry);
2062 RecordCacheHash = hash_create("Record information cache", 64,
2063 &ctl,
2065
2066 /* Also make sure CacheMemoryContext exists */
2067 if (!CacheMemoryContext)
2069 }
2070
2071 /*
2072 * Find a hashtable entry for this tuple descriptor. We don't use
2073 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2074 * the allocations succeed before we create the new entry.
2075 */
2077 &tupDesc,
2078 HASH_FIND, &found);
2079 if (found && recentry->tupdesc != NULL)
2080 {
2081 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2082 return;
2083 }
2084
2085 /* Not present, so need to manufacture an entry */
2087
2088 /* Look in the SharedRecordTypmodRegistry, if attached */
2090 if (entDesc == NULL)
2091 {
2092 /*
2093 * Make sure we have room before we CreateTupleDescCopy() or advance
2094 * NextRecordTypmod.
2095 */
2097
2098 /* Reference-counted local cache only. */
2099 entDesc = CreateTupleDescCopy(tupDesc);
2100 entDesc->tdrefcount = 1;
2101 entDesc->tdtypmod = NextRecordTypmod++;
2102 }
2103 else
2104 {
2106 }
2107
2109
2110 /* Assign a unique tupdesc identifier, too. */
2112
2113 /* Fully initialized; create the hash table entry */
2115 &tupDesc,
2116 HASH_ENTER, NULL);
2117 recentry->tupdesc = entDesc;
2118
2119 /* Update the caller's tuple descriptor. */
2120 tupDesc->tdtypmod = entDesc->tdtypmod;
2121
2123}

References Assert, CacheMemoryContext, CreateCacheMemoryContext(), CreateTupleDescCopy(), ctl, ensure_record_cache_typmod_slot_exists(), fb(), find_or_make_matching_shared_tupledesc(), HASH_COMPARE, hash_create(), HASH_ELEM, HASH_ENTER, HASH_FIND, HASH_FUNCTION, hash_search(), RecordCacheArrayEntry::id, HASHCTL::keysize, MemoryContextSwitchTo(), NextRecordTypmod, record_type_typmod_compare(), record_type_typmod_hash(), RecordCacheArray, RecordCacheHash, TupleDescData::tdtypeid, TupleDescData::tdtypmod, RecordCacheArrayEntry::tupdesc, and tupledesc_id_counter.

Referenced by BlessTupleDesc(), ER_get_flat_size(), internal_get_result_type(), and SPI_returntuple().

◆ AtEOSubXact_TypeCache()

void AtEOSubXact_TypeCache ( void  )

Definition at line 3200 of file typcache.c.

3201{
3203}

References finalize_in_progress_typentries().

Referenced by AbortSubTransaction(), and CommitSubTransaction().

◆ AtEOXact_TypeCache()

void AtEOXact_TypeCache ( void  )

◆ cache_array_element_properties()

◆ cache_multirange_element_properties()

static void cache_multirange_element_properties ( TypeCacheEntry typentry)
static

Definition at line 1773 of file typcache.c.

1774{
1775 /* load up range link if we didn't already */
1776 if (typentry->rngtype == NULL &&
1777 typentry->typtype == TYPTYPE_MULTIRANGE)
1778 load_multirangetype_info(typentry);
1779
1780 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1781 {
1783
1784 /* might need to calculate subtype's hash function properties */
1788 if (OidIsValid(elementry->hash_proc))
1789 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1790 if (OidIsValid(elementry->hash_extended_proc))
1792 }
1794}

References fb(), TypeCacheEntry::flags, load_multirangetype_info(), lookup_type_cache(), OidIsValid, TypeCacheEntry::rngelemtype, TypeCacheEntry::rngtype, TCFLAGS_CHECKED_ELEM_PROPERTIES, TCFLAGS_HAVE_ELEM_EXTENDED_HASHING, TCFLAGS_HAVE_ELEM_HASHING, TypeCacheEntry::type_id, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_PROC, and TypeCacheEntry::typtype.

Referenced by multirange_element_has_extended_hashing(), and multirange_element_has_hashing().

◆ cache_range_element_properties()

static void cache_range_element_properties ( TypeCacheEntry typentry)
static

Definition at line 1733 of file typcache.c.

1734{
1735 /* load up subtype link if we didn't already */
1736 if (typentry->rngelemtype == NULL &&
1737 typentry->typtype == TYPTYPE_RANGE)
1738 load_rangetype_info(typentry);
1739
1740 if (typentry->rngelemtype != NULL)
1741 {
1743
1744 /* might need to calculate subtype's hash function properties */
1748 if (OidIsValid(elementry->hash_proc))
1749 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1750 if (OidIsValid(elementry->hash_extended_proc))
1752 }
1754}

References fb(), TypeCacheEntry::flags, load_rangetype_info(), lookup_type_cache(), OidIsValid, TypeCacheEntry::rngelemtype, TCFLAGS_CHECKED_ELEM_PROPERTIES, TCFLAGS_HAVE_ELEM_EXTENDED_HASHING, TCFLAGS_HAVE_ELEM_HASHING, TypeCacheEntry::type_id, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_PROC, and TypeCacheEntry::typtype.

Referenced by range_element_has_extended_hashing(), and range_element_has_hashing().

◆ cache_record_field_properties()

static void cache_record_field_properties ( TypeCacheEntry typentry)
static

Definition at line 1612 of file typcache.c.

1613{
1614 /*
1615 * For type RECORD, we can't really tell what will work, since we don't
1616 * have access here to the specific anonymous type. Just assume that
1617 * equality and comparison will (we may get a failure at runtime). We
1618 * could also claim that hashing works, but then if code that has the
1619 * option between a comparison-based (sort-based) and a hash-based plan
1620 * chooses hashing, stuff could fail that would otherwise work if it chose
1621 * a comparison-based plan. In practice more types support comparison
1622 * than hashing.
1623 */
1624 if (typentry->type_id == RECORDOID)
1625 {
1626 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1628 }
1629 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1630 {
1631 TupleDesc tupdesc;
1632 int newflags;
1633 int i;
1634
1635 /* Fetch composite type's tupdesc if we don't have it already */
1636 if (typentry->tupDesc == NULL)
1637 load_typcache_tupdesc(typentry);
1638 tupdesc = typentry->tupDesc;
1639
1640 /* Must bump the refcount while we do additional catalog lookups */
1641 IncrTupleDescRefCount(tupdesc);
1642
1643 /* Have each property if all non-dropped fields have the property */
1648 for (i = 0; i < tupdesc->natts; i++)
1649 {
1651 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1652
1653 if (attr->attisdropped)
1654 continue;
1655
1656 fieldentry = lookup_type_cache(attr->atttypid,
1661 if (!OidIsValid(fieldentry->eq_opr))
1663 if (!OidIsValid(fieldentry->cmp_proc))
1665 if (!OidIsValid(fieldentry->hash_proc))
1667 if (!OidIsValid(fieldentry->hash_extended_proc))
1669
1670 /* We can drop out of the loop once we disprove all bits */
1671 if (newflags == 0)
1672 break;
1673 }
1674 typentry->flags |= newflags;
1675
1676 DecrTupleDescRefCount(tupdesc);
1677 }
1678 else if (typentry->typtype == TYPTYPE_DOMAIN)
1679 {
1680 /* If it's domain over composite, copy base type's properties */
1682
1683 /* load up basetype info if we didn't already */
1684 if (typentry->domainBaseType == InvalidOid)
1685 {
1686 typentry->domainBaseTypmod = -1;
1687 typentry->domainBaseType =
1688 getBaseTypeAndTypmod(typentry->type_id,
1689 &typentry->domainBaseTypmod);
1690 }
1696 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1697 {
1699 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1703 }
1704 }
1706}

References DecrTupleDescRefCount(), TypeCacheEntry::domainBaseType, TypeCacheEntry::domainBaseTypmod, fb(), TypeCacheEntry::flags, getBaseTypeAndTypmod(), i, IncrTupleDescRefCount(), InvalidOid, load_typcache_tupdesc(), lookup_type_cache(), TupleDescData::natts, OidIsValid, TCFLAGS_CHECKED_FIELD_PROPERTIES, TCFLAGS_DOMAIN_BASE_IS_COMPOSITE, TCFLAGS_HAVE_FIELD_COMPARE, TCFLAGS_HAVE_FIELD_EQUALITY, TCFLAGS_HAVE_FIELD_EXTENDED_HASHING, TCFLAGS_HAVE_FIELD_HASHING, TypeCacheEntry::tupDesc, TupleDescAttr(), TypeCacheEntry::type_id, TYPECACHE_CMP_PROC, TYPECACHE_EQ_OPR, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_PROC, and TypeCacheEntry::typtype.

Referenced by record_fields_have_compare(), record_fields_have_equality(), record_fields_have_extended_hashing(), and record_fields_have_hashing().

◆ compare_values_of_enum()

int compare_values_of_enum ( TypeCacheEntry tcache,
Oid  arg1,
Oid  arg2 
)

Definition at line 2666 of file typcache.c.

2667{
2669 EnumItem *item1;
2670 EnumItem *item2;
2671
2672 /*
2673 * Equal OIDs are certainly equal --- this case was probably handled by
2674 * our caller, but we may as well check.
2675 */
2676 if (arg1 == arg2)
2677 return 0;
2678
2679 /* Load up the cache if first time through */
2680 if (tcache->enumData == NULL)
2681 load_enum_cache_data(tcache);
2682 enumdata = tcache->enumData;
2683
2684 /*
2685 * If both OIDs are known-sorted, we can just compare them directly.
2686 */
2689 {
2690 if (arg1 < arg2)
2691 return -1;
2692 else
2693 return 1;
2694 }
2695
2696 /*
2697 * Slow path: we have to identify their actual sort-order positions.
2698 */
2701
2702 if (item1 == NULL || item2 == NULL)
2703 {
2704 /*
2705 * We couldn't find one or both values. That means the enum has
2706 * changed under us, so re-initialize the cache and try again. We
2707 * don't bother retrying the known-sorted case in this path.
2708 */
2709 load_enum_cache_data(tcache);
2710 enumdata = tcache->enumData;
2711
2714
2715 /*
2716 * If we still can't find the values, complain: we must have corrupt
2717 * data.
2718 */
2719 if (item1 == NULL)
2720 elog(ERROR, "enum value %u not found in cache for enum %s",
2721 arg1, format_type_be(tcache->type_id));
2722 if (item2 == NULL)
2723 elog(ERROR, "enum value %u not found in cache for enum %s",
2724 arg2, format_type_be(tcache->type_id));
2725 }
2726
2727 if (item1->sort_order < item2->sort_order)
2728 return -1;
2729 else if (item1->sort_order > item2->sort_order)
2730 return 1;
2731 else
2732 return 0;
2733}

References elog, enum_known_sorted(), TypeCacheEntry::enumData, ERROR, fb(), find_enumitem(), format_type_be(), load_enum_cache_data(), and TypeCacheEntry::type_id.

Referenced by enum_cmp_internal().

◆ dccref_deletion_callback()

static void dccref_deletion_callback ( void arg)
static

Definition at line 1345 of file typcache.c.

1346{
1348 DomainConstraintCache *dcc = ref->dcc;
1349
1350 /* Paranoia --- be sure link is nulled before trying to release */
1351 if (dcc)
1352 {
1353 ref->constraints = NIL;
1354 ref->dcc = NULL;
1355 decr_dcc_refcount(dcc);
1356 }
1357}

References arg, DomainConstraintCache::constraints, decr_dcc_refcount(), fb(), and NIL.

Referenced by InitDomainConstraintRef().

◆ dcs_cmp()

static int dcs_cmp ( const void a,
const void b 
)
static

Definition at line 1321 of file typcache.c.

1322{
1323 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1324 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1325
1326 return strcmp((*ca)->name, (*cb)->name);
1327}

References a, b, and fb().

Referenced by load_domaintype_info().

◆ decr_dcc_refcount()

static void decr_dcc_refcount ( DomainConstraintCache dcc)
static

◆ delete_rel_type_cache_if_needed()

static void delete_rel_type_cache_if_needed ( TypeCacheEntry typentry)
static

Definition at line 3111 of file typcache.c.

3112{
3113#ifdef USE_ASSERT_CHECKING
3114 int i;
3115 bool is_in_progress = false;
3116
3117 for (i = 0; i < in_progress_list_len; i++)
3118 {
3119 if (in_progress_list[i] == typentry->type_id)
3120 {
3121 is_in_progress = true;
3122 break;
3123 }
3124 }
3125#endif
3126
3127 /* Immediately quit for non-composite types */
3128 if (typentry->typtype != TYPTYPE_COMPOSITE)
3129 return;
3130
3131 /* typrelid should be given for composite types */
3132 Assert(OidIsValid(typentry->typrelid));
3133
3134 /*
3135 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3136 * information indicating entry should be still there.
3137 */
3138 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3139 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3140 typentry->tupDesc == NULL)
3141 {
3142 bool found;
3143
3145 &typentry->typrelid,
3146 HASH_REMOVE, &found);
3147 Assert(found || is_in_progress);
3148 }
3149 else
3150 {
3151#ifdef USE_ASSERT_CHECKING
3152 /*
3153 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3154 * entry if it should exist.
3155 */
3156 bool found;
3157
3158 if (!is_in_progress)
3159 {
3161 &typentry->typrelid,
3162 HASH_FIND, &found);
3163 Assert(found);
3164 }
3165#endif
3166 }
3167}

References Assert, fb(), TypeCacheEntry::flags, HASH_FIND, HASH_REMOVE, hash_search(), i, in_progress_list, in_progress_list_len, OidIsValid, RelIdToTypeIdCacheHash, TCFLAGS_HAVE_PG_TYPE_DATA, TCFLAGS_OPERATOR_FLAGS, TypeCacheEntry::tupDesc, TypeCacheEntry::type_id, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by InvalidateCompositeTypeCacheEntry(), TypeCacheOpcCallback(), and TypeCacheTypCallback().

◆ DomainHasConstraints()

bool DomainHasConstraints ( Oid  type_id)

Definition at line 1491 of file typcache.c.

1492{
1493 TypeCacheEntry *typentry;
1494
1495 /*
1496 * Note: a side effect is to cause the typcache's domain data to become
1497 * valid. This is fine since we'll likely need it soon if there is any.
1498 */
1500
1501 return (typentry->domainData != NULL);
1502}

References TypeCacheEntry::domainData, fb(), lookup_type_cache(), and TYPECACHE_DOMAIN_CONSTR_INFO.

Referenced by ATColumnChangeRequiresRewrite(), ATExecAddColumn(), eval_const_expressions_mutator(), ExecInitJsonCoercion(), and transformJsonFuncExpr().

◆ ensure_record_cache_typmod_slot_exists()

static void ensure_record_cache_typmod_slot_exists ( int32  typmod)
static

◆ enum_known_sorted()

static bool enum_known_sorted ( TypeCacheEnumData enumdata,
Oid  arg 
)
inlinestatic

Definition at line 2637 of file typcache.c.

2638{
2639 Oid offset;
2640
2641 if (arg < enumdata->bitmap_base)
2642 return false;
2643 offset = arg - enumdata->bitmap_base;
2644 if (offset > (Oid) INT_MAX)
2645 return false;
2646 return bms_is_member((int) offset, enumdata->sorted_values);
2647}

References arg, bms_is_member(), and fb().

Referenced by compare_values_of_enum().

◆ enum_oid_cmp()

static int enum_oid_cmp ( const void left,
const void right 
)
static

Definition at line 2911 of file typcache.c.

2912{
2913 const EnumItem *l = (const EnumItem *) left;
2914 const EnumItem *r = (const EnumItem *) right;
2915
2916 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2917}

References EnumItem::enum_oid, and pg_cmp_u32().

Referenced by find_enumitem(), and load_enum_cache_data().

◆ finalize_in_progress_typentries()

static void finalize_in_progress_typentries ( void  )
static

Definition at line 3175 of file typcache.c.

3176{
3177 int i;
3178
3179 for (i = 0; i < in_progress_list_len; i++)
3180 {
3181 TypeCacheEntry *typentry;
3182
3185 HASH_FIND, NULL);
3186 if (typentry)
3188 }
3189
3191}

References fb(), HASH_FIND, hash_search(), i, in_progress_list, in_progress_list_len, insert_rel_type_cache_if_needed(), and TypeCacheHash.

Referenced by AtEOSubXact_TypeCache(), and AtEOXact_TypeCache().

◆ find_enumitem()

static EnumItem * find_enumitem ( TypeCacheEnumData enumdata,
Oid  arg 
)
static

Definition at line 2894 of file typcache.c.

2895{
2896 EnumItem srch;
2897
2898 /* On some versions of Solaris, bsearch of zero items dumps core */
2899 if (enumdata->num_values <= 0)
2900 return NULL;
2901
2902 srch.enum_oid = arg;
2903 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2904 sizeof(EnumItem), enum_oid_cmp);
2905}

References arg, EnumItem::enum_oid, enum_oid_cmp(), and fb().

Referenced by compare_values_of_enum().

◆ find_or_make_matching_shared_tupledesc()

static TupleDesc find_or_make_matching_shared_tupledesc ( TupleDesc  tupdesc)
static

Definition at line 2945 of file typcache.c.

2946{
2947 TupleDesc result;
2952 bool found;
2953 uint32 typmod;
2954
2955 /* If not even attached, nothing to do. */
2957 return NULL;
2958
2959 /* Try to find a matching tuple descriptor in the record table. */
2960 key.shared = false;
2961 key.u.local_tupdesc = tupdesc;
2965 {
2966 Assert(record_table_entry->key.shared);
2969 result = (TupleDesc)
2971 record_table_entry->key.u.shared_tupdesc);
2972 Assert(result->tdrefcount == -1);
2973
2974 return result;
2975 }
2976
2977 /* Allocate a new typmod number. This will be wasted if we error out. */
2978 typmod = (int)
2980 1);
2981
2982 /* Copy the TupleDesc into shared memory. */
2983 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2984
2985 /*
2986 * Create an entry in the typmod table so that others will understand this
2987 * typmod number.
2988 */
2989 PG_TRY();
2990 {
2993 &typmod, &found);
2994 if (found)
2995 elog(ERROR, "cannot create duplicate shared record typmod");
2996 }
2997 PG_CATCH();
2998 {
3000 PG_RE_THROW();
3001 }
3002 PG_END_TRY();
3003 typmod_table_entry->typmod = typmod;
3004 typmod_table_entry->shared_tupdesc = shared_dp;
3007
3008 /*
3009 * Finally create an entry in the record table so others with matching
3010 * tuple descriptors can reuse the typmod.
3011 */
3014 &found);
3015 if (found)
3016 {
3017 /*
3018 * Someone concurrently inserted a matching tuple descriptor since the
3019 * first time we checked. Use that one instead.
3020 */
3023
3024 /* Might as well free up the space used by the one we created. */
3026 &typmod);
3027 Assert(found);
3029
3030 /* Return the one we found. */
3031 Assert(record_table_entry->key.shared);
3032 result = (TupleDesc)
3034 record_table_entry->key.u.shared_tupdesc);
3035 Assert(result->tdrefcount == -1);
3036
3037 return result;
3038 }
3039
3040 /* Store it and return it. */
3041 record_table_entry->key.shared = true;
3042 record_table_entry->key.u.shared_tupdesc = shared_dp;
3045 result = (TupleDesc)
3047 Assert(result->tdrefcount == -1);
3048
3049 return result;
3050}

References Session::area, Assert, CurrentSession, dsa_free(), dsa_get_address(), dshash_delete_key(), dshash_find(), dshash_find_or_insert(), dshash_release_lock(), elog, ERROR, fb(), SharedRecordTypmodRegistry::next_typmod, pg_atomic_fetch_add_u32(), PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, share_tupledesc(), Session::shared_record_table, Session::shared_typmod_registry, Session::shared_typmod_table, and TupleDescData::tdrefcount.

Referenced by assign_record_type_typmod().

◆ InitDomainConstraintRef()

void InitDomainConstraintRef ( Oid  type_id,
DomainConstraintRef ref,
MemoryContext  refctx,
bool  need_exprstate 
)

Definition at line 1404 of file typcache.c.

1406{
1407 /* Look up the typcache entry --- we assume it survives indefinitely */
1409 ref->need_exprstate = need_exprstate;
1410 /* For safety, establish the callback before acquiring a refcount */
1411 ref->refctx = refctx;
1412 ref->dcc = NULL;
1413 ref->callback.func = dccref_deletion_callback;
1414 ref->callback.arg = ref;
1415 MemoryContextRegisterResetCallback(refctx, &ref->callback);
1416 /* Acquire refcount if there are constraints, and set up exported list */
1417 if (ref->tcache->domainData)
1418 {
1419 ref->dcc = ref->tcache->domainData;
1420 ref->dcc->dccRefCount++;
1421 if (ref->need_exprstate)
1422 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1423 ref->refctx);
1424 else
1425 ref->constraints = ref->dcc->constraints;
1426 }
1427 else
1428 ref->constraints = NIL;
1429}

References dccref_deletion_callback(), fb(), lookup_type_cache(), MemoryContextRegisterResetCallback(), NIL, prep_domain_constraints(), and TYPECACHE_DOMAIN_CONSTR_INFO.

Referenced by domain_state_setup(), and ExecInitCoerceToDomain().

◆ insert_rel_type_cache_if_needed()

static void insert_rel_type_cache_if_needed ( TypeCacheEntry typentry)
static

Definition at line 3077 of file typcache.c.

3078{
3079 /* Immediately quit for non-composite types */
3080 if (typentry->typtype != TYPTYPE_COMPOSITE)
3081 return;
3082
3083 /* typrelid should be given for composite types */
3084 Assert(OidIsValid(typentry->typrelid));
3085
3086 /*
3087 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3088 * information indicating it should be here.
3089 */
3090 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3091 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3092 typentry->tupDesc != NULL)
3093 {
3095 bool found;
3096
3098 &typentry->typrelid,
3099 HASH_ENTER, &found);
3100 relentry->relid = typentry->typrelid;
3101 relentry->composite_typid = typentry->type_id;
3102 }
3103}

References Assert, fb(), TypeCacheEntry::flags, HASH_ENTER, hash_search(), OidIsValid, RelIdToTypeIdCacheEntry::relid, RelIdToTypeIdCacheHash, TCFLAGS_HAVE_PG_TYPE_DATA, TCFLAGS_OPERATOR_FLAGS, TypeCacheEntry::tupDesc, TypeCacheEntry::type_id, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by finalize_in_progress_typentries(), and lookup_type_cache().

◆ InvalidateCompositeTypeCacheEntry()

static void InvalidateCompositeTypeCacheEntry ( TypeCacheEntry typentry)
static

Definition at line 2367 of file typcache.c.

2368{
2370
2371 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2372 OidIsValid(typentry->typrelid));
2373
2374 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2375 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2376
2377 /* Delete tupdesc if we have it */
2378 if (typentry->tupDesc != NULL)
2379 {
2380 /*
2381 * Release our refcount and free the tupdesc if none remain. We can't
2382 * use DecrTupleDescRefCount here because this reference is not logged
2383 * by the current resource owner.
2384 */
2385 Assert(typentry->tupDesc->tdrefcount > 0);
2386 if (--typentry->tupDesc->tdrefcount == 0)
2387 FreeTupleDesc(typentry->tupDesc);
2388 typentry->tupDesc = NULL;
2389
2390 /*
2391 * Also clear tupDesc_identifier, so that anyone watching it will
2392 * realize that the tupdesc has changed.
2393 */
2394 typentry->tupDesc_identifier = 0;
2395 }
2396
2397 /* Reset equality/comparison/hashing validity information */
2398 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2399
2400 /*
2401 * Call delete_rel_type_cache_if_needed() if we actually cleared
2402 * something.
2403 */
2406}

References Assert, delete_rel_type_cache_if_needed(), fb(), TypeCacheEntry::flags, FreeTupleDesc(), OidIsValid, TCFLAGS_OPERATOR_FLAGS, TupleDescData::tdrefcount, TypeCacheEntry::tupDesc, TypeCacheEntry::tupDesc_identifier, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by TypeCacheRelCallback().

◆ load_domaintype_info()

static void load_domaintype_info ( TypeCacheEntry typentry)
static

Definition at line 1086 of file typcache.c.

1087{
1088 Oid typeOid = typentry->type_id;
1090 bool notNull = false;
1092 int cconslen;
1095
1096 /*
1097 * If we're here, any existing constraint info is stale, so release it.
1098 * For safety, be sure to null the link before trying to delete the data.
1099 */
1100 if (typentry->domainData)
1101 {
1102 dcc = typentry->domainData;
1103 typentry->domainData = NULL;
1104 decr_dcc_refcount(dcc);
1105 }
1106
1107 /*
1108 * We try to optimize the common case of no domain constraints, so don't
1109 * create the dcc object and context until we find a constraint. Likewise
1110 * for the temp sorting array.
1111 */
1112 dcc = NULL;
1113 ccons = NULL;
1114 cconslen = 0;
1115
1116 /*
1117 * Scan pg_constraint for relevant constraints. We want to find
1118 * constraints for not just this domain, but any ancestor domains, so the
1119 * outer loop crawls up the domain stack.
1120 */
1122
1123 for (;;)
1124 {
1125 HeapTuple tup;
1128 int nccons = 0;
1129 ScanKeyData key[1];
1130 SysScanDesc scan;
1131
1133 if (!HeapTupleIsValid(tup))
1134 elog(ERROR, "cache lookup failed for type %u", typeOid);
1136
1137 if (typTup->typtype != TYPTYPE_DOMAIN)
1138 {
1139 /* Not a domain, so done */
1141 break;
1142 }
1143
1144 /* Test for NOT NULL Constraint */
1145 if (typTup->typnotnull)
1146 notNull = true;
1147
1148 /* Look for CHECK Constraints on this domain */
1149 ScanKeyInit(&key[0],
1152 ObjectIdGetDatum(typeOid));
1153
1155 NULL, 1, key);
1156
1158 {
1160 Datum val;
1161 bool isNull;
1162 char *constring;
1163 Expr *check_expr;
1165
1166 /* Ignore non-CHECK constraints */
1167 if (c->contype != CONSTRAINT_CHECK)
1168 continue;
1169
1170 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1172 conRel->rd_att, &isNull);
1173 if (isNull)
1174 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1175 NameStr(typTup->typname), NameStr(c->conname));
1176
1177 /* Create the DomainConstraintCache object and context if needed */
1178 if (dcc == NULL)
1179 {
1180 MemoryContext cxt;
1181
1183 "Domain constraints",
1185 dcc = (DomainConstraintCache *)
1187 dcc->constraints = NIL;
1188 dcc->dccContext = cxt;
1189 dcc->dccRefCount = 0;
1190 }
1191
1192 /* Convert conbin to a node tree, still in caller's context */
1194 check_expr = (Expr *) stringToNode(constring);
1195
1196 /*
1197 * Plan the expression, since ExecInitExpr will expect that.
1198 *
1199 * Note: caching the result of expression_planner() is not very
1200 * good practice. Ideally we'd use a CachedExpression here so
1201 * that we would react promptly to, eg, changes in inlined
1202 * functions. However, because we don't support mutable domain
1203 * CHECK constraints, it's not really clear that it's worth the
1204 * extra overhead to do that.
1205 */
1206 check_expr = expression_planner(check_expr);
1207
1208 /* Create only the minimally needed stuff in dccContext */
1210
1213 r->name = pstrdup(NameStr(c->conname));
1214 r->check_expr = copyObject(check_expr);
1215 r->check_exprstate = NULL;
1216
1218
1219 /* Accumulate constraints in an array, for sorting below */
1220 if (ccons == NULL)
1221 {
1222 cconslen = 8;
1225 }
1226 else if (nccons >= cconslen)
1227 {
1228 cconslen *= 2;
1231 }
1232 ccons[nccons++] = r;
1233 }
1234
1235 systable_endscan(scan);
1236
1237 if (nccons > 0)
1238 {
1239 /*
1240 * Sort the items for this domain, so that CHECKs are applied in a
1241 * deterministic order.
1242 */
1243 if (nccons > 1)
1245
1246 /*
1247 * Now attach them to the overall list. Use lcons() here because
1248 * constraints of parent domains should be applied earlier.
1249 */
1251 while (nccons > 0)
1252 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1254 }
1255
1256 /* loop to next domain in stack */
1257 typeOid = typTup->typbasetype;
1259 }
1260
1262
1263 /*
1264 * Only need to add one NOT NULL check regardless of how many domains in
1265 * the stack request it.
1266 */
1267 if (notNull)
1268 {
1270
1271 /* Create the DomainConstraintCache object and context if needed */
1272 if (dcc == NULL)
1273 {
1274 MemoryContext cxt;
1275
1277 "Domain constraints",
1279 dcc = (DomainConstraintCache *)
1281 dcc->constraints = NIL;
1282 dcc->dccContext = cxt;
1283 dcc->dccRefCount = 0;
1284 }
1285
1286 /* Create node trees in DomainConstraintCache's context */
1288
1290
1292 r->name = pstrdup("NOT NULL");
1293 r->check_expr = NULL;
1294 r->check_exprstate = NULL;
1295
1296 /* lcons to apply the nullness check FIRST */
1297 dcc->constraints = lcons(r, dcc->constraints);
1298
1300 }
1301
1302 /*
1303 * If we made a constraint object, move it into CacheMemoryContext and
1304 * attach it to the typcache entry.
1305 */
1306 if (dcc)
1307 {
1309 typentry->domainData = dcc;
1310 dcc->dccRefCount++; /* count the typcache's reference */
1311 }
1312
1313 /* Either way, the typcache entry's domain data is now valid. */
1315}

References AccessShareLock, ALLOCSET_SMALL_SIZES, AllocSetContextCreate, BTEqualStrategyNumber, CacheMemoryContext, DomainConstraintState::check_expr, DomainConstraintState::check_exprstate, DomainConstraintCache::constraints, DomainConstraintState::constrainttype, copyObject, CurrentMemoryContext, DomainConstraintCache::dccContext, DomainConstraintCache::dccRefCount, dcs_cmp(), decr_dcc_refcount(), DOM_CONSTRAINT_CHECK, DOM_CONSTRAINT_NOTNULL, TypeCacheEntry::domainData, elog, ERROR, expression_planner(), fastgetattr(), fb(), TypeCacheEntry::flags, GETSTRUCT(), HeapTupleIsValid, lcons(), makeNode, MemoryContextAlloc(), MemoryContextSetParent(), MemoryContextSwitchTo(), DomainConstraintState::name, NameStr, NIL, ObjectIdGetDatum(), palloc(), pstrdup(), qsort, ReleaseSysCache(), repalloc(), ScanKeyInit(), SearchSysCache1(), stringToNode(), systable_beginscan(), systable_endscan(), systable_getnext(), table_close(), table_open(), TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, TextDatumGetCString, TypeCacheEntry::type_id, and val.

Referenced by lookup_type_cache(), and UpdateDomainConstraintRef().

◆ load_enum_cache_data()

static void load_enum_cache_data ( TypeCacheEntry tcache)
static

Definition at line 2739 of file typcache.c.

2740{
2746 EnumItem *items;
2747 int numitems;
2748 int maxitems;
2749 Oid bitmap_base;
2750 Bitmapset *bitmap;
2752 int bm_size,
2753 start_pos;
2754
2755 /* Check that this is actually an enum */
2756 if (tcache->typtype != TYPTYPE_ENUM)
2757 ereport(ERROR,
2759 errmsg("%s is not an enum",
2760 format_type_be(tcache->type_id))));
2761
2762 /*
2763 * Read all the information for members of the enum type. We collect the
2764 * info in working memory in the caller's context, and then transfer it to
2765 * permanent memory in CacheMemoryContext. This minimizes the risk of
2766 * leaking memory from CacheMemoryContext in the event of an error partway
2767 * through.
2768 */
2769 maxitems = 64;
2770 items = palloc_array(EnumItem, maxitems);
2771 numitems = 0;
2772
2773 /* Scan pg_enum for the members of the target enum type. */
2777 ObjectIdGetDatum(tcache->type_id));
2778
2782 true, NULL,
2783 1, &skey);
2784
2786 {
2788
2789 if (numitems >= maxitems)
2790 {
2791 maxitems *= 2;
2792 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2793 }
2794 items[numitems].enum_oid = en->oid;
2795 items[numitems].sort_order = en->enumsortorder;
2796 numitems++;
2797 }
2798
2801
2802 /* Sort the items into OID order */
2803 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2804
2805 /*
2806 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2807 * known to be in order and can thus be compared with just OID comparison.
2808 *
2809 * The point of this is that the enum's initial OIDs were certainly in
2810 * order, so there is some subset that can be compared via OID comparison;
2811 * and we'd rather not do binary searches unnecessarily.
2812 *
2813 * This is somewhat heuristic, and might identify a subset of OIDs that
2814 * isn't exactly what the type started with. That's okay as long as the
2815 * subset is correctly sorted.
2816 */
2817 bitmap_base = InvalidOid;
2818 bitmap = NULL;
2819 bm_size = 1; /* only save sets of at least 2 OIDs */
2820
2821 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2822 {
2823 /*
2824 * Identify longest sorted subsequence starting at start_pos
2825 */
2827 int this_bm_size = 1;
2828 Oid start_oid = items[start_pos].enum_oid;
2829 float4 prev_order = items[start_pos].sort_order;
2830 int i;
2831
2832 for (i = start_pos + 1; i < numitems; i++)
2833 {
2834 Oid offset;
2835
2836 offset = items[i].enum_oid - start_oid;
2837 /* quit if bitmap would be too large; cutoff is arbitrary */
2838 if (offset >= 8192)
2839 break;
2840 /* include the item if it's in-order */
2841 if (items[i].sort_order > prev_order)
2842 {
2843 prev_order = items[i].sort_order;
2844 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2845 this_bm_size++;
2846 }
2847 }
2848
2849 /* Remember it if larger than previous best */
2850 if (this_bm_size > bm_size)
2851 {
2852 bms_free(bitmap);
2853 bitmap_base = start_oid;
2854 bitmap = this_bitmap;
2856 }
2857 else
2859
2860 /*
2861 * Done if it's not possible to find a longer sequence in the rest of
2862 * the list. In typical cases this will happen on the first
2863 * iteration, which is why we create the bitmaps on the fly instead of
2864 * doing a second pass over the list.
2865 */
2866 if (bm_size >= (numitems - start_pos - 1))
2867 break;
2868 }
2869
2870 /* OK, copy the data into CacheMemoryContext */
2873 palloc(offsetof(TypeCacheEnumData, enum_values) +
2874 numitems * sizeof(EnumItem));
2875 enumdata->bitmap_base = bitmap_base;
2876 enumdata->sorted_values = bms_copy(bitmap);
2877 enumdata->num_values = numitems;
2878 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2880
2881 pfree(items);
2882 bms_free(bitmap);
2883
2884 /* And link the finished cache struct into the typcache */
2885 if (tcache->enumData != NULL)
2886 pfree(tcache->enumData);
2887 tcache->enumData = enumdata;
2888}

References AccessShareLock, bms_add_member(), bms_copy(), bms_free(), bms_make_singleton(), BTEqualStrategyNumber, CacheMemoryContext, enum_oid_cmp(), TypeCacheEntry::enumData, ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), GETSTRUCT(), HeapTupleIsValid, i, InvalidOid, items, MemoryContextSwitchTo(), ObjectIdGetDatum(), palloc(), palloc_array, pfree(), qsort, repalloc(), ScanKeyInit(), systable_beginscan(), systable_endscan(), systable_getnext(), table_close(), table_open(), TypeCacheEntry::type_id, and TypeCacheEntry::typtype.

Referenced by compare_values_of_enum().

◆ load_multirangetype_info()

static void load_multirangetype_info ( TypeCacheEntry typentry)
static

Definition at line 1064 of file typcache.c.

1065{
1067
1070 elog(ERROR, "cache lookup failed for multirange type %u",
1071 typentry->type_id);
1072
1074}

References elog, ERROR, fb(), get_multirange_range(), lookup_type_cache(), OidIsValid, TypeCacheEntry::rngtype, TypeCacheEntry::type_id, and TYPECACHE_RANGE_INFO.

Referenced by cache_multirange_element_properties(), and lookup_type_cache().

◆ load_rangetype_info()

static void load_rangetype_info ( TypeCacheEntry typentry)
static

Definition at line 1006 of file typcache.c.

1007{
1009 HeapTuple tup;
1015 Oid opcintype;
1016 Oid cmpFnOid;
1017
1018 /* get information from pg_range */
1020 /* should not fail, since we already checked typtype ... */
1021 if (!HeapTupleIsValid(tup))
1022 elog(ERROR, "cache lookup failed for range type %u",
1023 typentry->type_id);
1025
1026 subtypeOid = pg_range->rngsubtype;
1027 typentry->rng_collation = pg_range->rngcollation;
1028 opclassOid = pg_range->rngsubopc;
1029 canonicalOid = pg_range->rngcanonical;
1030 subdiffOid = pg_range->rngsubdiff;
1031
1033
1034 /* get opclass properties and look up the comparison function */
1037 typentry->rng_opfamily = opfamilyOid;
1038
1039 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1040 BTORDER_PROC);
1042 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1043 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1044
1045 /* set up cached fmgrinfo structs */
1054
1055 /* Lastly, set up link to the element type --- this marks data valid */
1057}

References BTORDER_PROC, CacheMemoryContext, elog, ERROR, fb(), fmgr_info_cxt(), get_opclass_family(), get_opclass_input_type(), get_opfamily_proc(), GETSTRUCT(), HeapTupleIsValid, lookup_type_cache(), ObjectIdGetDatum(), OidIsValid, RegProcedureIsValid, ReleaseSysCache(), TypeCacheEntry::rng_canonical_finfo, TypeCacheEntry::rng_cmp_proc_finfo, TypeCacheEntry::rng_collation, TypeCacheEntry::rng_opfamily, TypeCacheEntry::rng_subdiff_finfo, TypeCacheEntry::rngelemtype, SearchSysCache1(), and TypeCacheEntry::type_id.

Referenced by cache_range_element_properties(), and lookup_type_cache().

◆ load_typcache_tupdesc()

static void load_typcache_tupdesc ( TypeCacheEntry typentry)
static

Definition at line 972 of file typcache.c.

973{
974 Relation rel;
975
976 if (!OidIsValid(typentry->typrelid)) /* should not happen */
977 elog(ERROR, "invalid typrelid for composite type %u",
978 typentry->type_id);
979 rel = relation_open(typentry->typrelid, AccessShareLock);
980 Assert(rel->rd_rel->reltype == typentry->type_id);
981
982 /*
983 * Link to the tupdesc and increment its refcount (we assert it's a
984 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
985 * because the reference mustn't be entered in the current resource owner;
986 * it can outlive the current query.
987 */
988 typentry->tupDesc = RelationGetDescr(rel);
989
990 Assert(typentry->tupDesc->tdrefcount > 0);
991 typentry->tupDesc->tdrefcount++;
992
993 /*
994 * In future, we could take some pains to not change tupDesc_identifier if
995 * the tupdesc didn't really change; but for now it's not worth it.
996 */
998
1000}

References AccessShareLock, Assert, elog, ERROR, OidIsValid, RelationData::rd_rel, relation_close(), relation_open(), RelationGetDescr, TupleDescData::tdrefcount, TypeCacheEntry::tupDesc, TypeCacheEntry::tupDesc_identifier, tupledesc_id_counter, TypeCacheEntry::type_id, and TypeCacheEntry::typrelid.

Referenced by cache_record_field_properties(), and lookup_type_cache().

◆ lookup_rowtype_tupdesc()

◆ lookup_rowtype_tupdesc_copy()

TupleDesc lookup_rowtype_tupdesc_copy ( Oid  type_id,
int32  typmod 
)

◆ lookup_rowtype_tupdesc_domain()

TupleDesc lookup_rowtype_tupdesc_domain ( Oid  type_id,
int32  typmod,
bool  noError 
)

Definition at line 1980 of file typcache.c.

1981{
1982 TupleDesc tupDesc;
1983
1984 if (type_id != RECORDOID)
1985 {
1986 /*
1987 * Check for domain or named composite type. We might as well load
1988 * whichever data is needed.
1989 */
1990 TypeCacheEntry *typentry;
1991
1992 typentry = lookup_type_cache(type_id,
1995 if (typentry->typtype == TYPTYPE_DOMAIN)
1997 typentry->domainBaseTypmod,
1998 noError);
1999 if (typentry->tupDesc == NULL && !noError)
2000 ereport(ERROR,
2002 errmsg("type %s is not composite",
2003 format_type_be(type_id))));
2004 tupDesc = typentry->tupDesc;
2005 }
2006 else
2007 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2008 if (tupDesc != NULL)
2009 PinTupleDesc(tupDesc);
2010 return tupDesc;
2011}

References TypeCacheEntry::domainBaseType, TypeCacheEntry::domainBaseTypmod, ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), lookup_rowtype_tupdesc_internal(), lookup_rowtype_tupdesc_noerror(), lookup_type_cache(), PinTupleDesc, TypeCacheEntry::tupDesc, TYPECACHE_DOMAIN_BASE_INFO, TYPECACHE_TUPDESC, and TypeCacheEntry::typtype.

Referenced by ExecEvalWholeRowVar(), hstore_from_record(), hstore_populate_record(), plperl_sv_to_datum(), and rowtype_field_matches().

◆ lookup_rowtype_tupdesc_internal()

static TupleDesc lookup_rowtype_tupdesc_internal ( Oid  type_id,
int32  typmod,
bool  noError 
)
static

Definition at line 1830 of file typcache.c.

1831{
1832 if (type_id != RECORDOID)
1833 {
1834 /*
1835 * It's a named composite type, so use the regular typcache.
1836 */
1837 TypeCacheEntry *typentry;
1838
1839 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1840 if (typentry->tupDesc == NULL && !noError)
1841 ereport(ERROR,
1843 errmsg("type %s is not composite",
1844 format_type_be(type_id))));
1845 return typentry->tupDesc;
1846 }
1847 else
1848 {
1849 /*
1850 * It's a transient record type, so look in our record-type table.
1851 */
1852 if (typmod >= 0)
1853 {
1854 /* It is already in our local cache? */
1855 if (typmod < RecordCacheArrayLen &&
1856 RecordCacheArray[typmod].tupdesc != NULL)
1857 return RecordCacheArray[typmod].tupdesc;
1858
1859 /* Are we attached to a shared record typmod registry? */
1861 {
1863
1864 /* Try to find it in the shared typmod index. */
1866 &typmod, false);
1867 if (entry != NULL)
1868 {
1869 TupleDesc tupdesc;
1870
1871 tupdesc = (TupleDesc)
1873 entry->shared_tupdesc);
1874 Assert(typmod == tupdesc->tdtypmod);
1875
1876 /* We may need to extend the local RecordCacheArray. */
1878
1879 /*
1880 * Our local array can now point directly to the TupleDesc
1881 * in shared memory, which is non-reference-counted.
1882 */
1883 RecordCacheArray[typmod].tupdesc = tupdesc;
1884 Assert(tupdesc->tdrefcount == -1);
1885
1886 /*
1887 * We don't share tupdesc identifiers across processes, so
1888 * assign one locally.
1889 */
1891
1893 entry);
1894
1895 return RecordCacheArray[typmod].tupdesc;
1896 }
1897 }
1898 }
1899
1900 if (!noError)
1901 ereport(ERROR,
1903 errmsg("record type has not been registered")));
1904 return NULL;
1905 }
1906}

References Session::area, Assert, CurrentSession, dsa_get_address(), dshash_find(), dshash_release_lock(), ensure_record_cache_typmod_slot_exists(), ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), RecordCacheArrayEntry::id, lookup_type_cache(), RecordCacheArray, RecordCacheArrayLen, SharedTypmodTableEntry::shared_tupdesc, Session::shared_typmod_registry, Session::shared_typmod_table, TupleDescData::tdrefcount, TupleDescData::tdtypmod, RecordCacheArrayEntry::tupdesc, TypeCacheEntry::tupDesc, tupledesc_id_counter, and TYPECACHE_TUPDESC.

Referenced by lookup_rowtype_tupdesc(), lookup_rowtype_tupdesc_copy(), lookup_rowtype_tupdesc_domain(), and lookup_rowtype_tupdesc_noerror().

◆ lookup_rowtype_tupdesc_noerror()

TupleDesc lookup_rowtype_tupdesc_noerror ( Oid  type_id,
int32  typmod,
bool  noError 
)

Definition at line 1941 of file typcache.c.

1942{
1943 TupleDesc tupDesc;
1944
1945 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1946 if (tupDesc != NULL)
1947 PinTupleDesc(tupDesc);
1948 return tupDesc;
1949}

References fb(), lookup_rowtype_tupdesc_internal(), and PinTupleDesc.

Referenced by lookup_rowtype_tupdesc_domain().

◆ lookup_type_cache()

TypeCacheEntry * lookup_type_cache ( Oid  type_id,
int  flags 
)

Definition at line 389 of file typcache.c.

390{
391 TypeCacheEntry *typentry;
392 bool found;
394
395 if (TypeCacheHash == NULL)
396 {
397 /* First time through: initialize the hash table */
398 HASHCTL ctl;
399 int allocsize;
400
401 ctl.keysize = sizeof(Oid);
402 ctl.entrysize = sizeof(TypeCacheEntry);
403
404 /*
405 * TypeCacheEntry takes hash value from the system cache. For
406 * TypeCacheHash we use the same hash in order to speedup search by
407 * hash value. This is used by hash_seq_init_with_hash_value().
408 */
409 ctl.hash = type_cache_syshash;
410
411 TypeCacheHash = hash_create("Type information cache", 64,
413
415
416 ctl.keysize = sizeof(Oid);
417 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
418 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
420
421 /* Also set up callbacks for SI invalidations */
426
427 /* Also make sure CacheMemoryContext exists */
430
431 /*
432 * reserve enough in_progress_list slots for many cases
433 */
434 allocsize = 4;
437 allocsize * sizeof(*in_progress_list));
438 in_progress_list_maxlen = allocsize;
439 }
440
442
443 /* Register to catch invalidation messages */
445 {
446 int allocsize;
447
448 allocsize = in_progress_list_maxlen * 2;
450 allocsize * sizeof(*in_progress_list));
451 in_progress_list_maxlen = allocsize;
452 }
455
456 /* Try to look up an existing entry */
458 &type_id,
459 HASH_FIND, NULL);
460 if (typentry == NULL)
461 {
462 /*
463 * If we didn't find one, we want to make one. But first look up the
464 * pg_type row, just to make sure we don't make a cache entry for an
465 * invalid type OID. If the type OID is not valid, present a
466 * user-facing error, since some code paths such as domain_in() allow
467 * this function to be reached with a user-supplied OID.
468 */
469 HeapTuple tp;
471
473 if (!HeapTupleIsValid(tp))
476 errmsg("type with OID %u does not exist", type_id)));
478 if (!typtup->typisdefined)
481 errmsg("type \"%s\" is only a shell",
482 NameStr(typtup->typname))));
483
484 /* Now make the typcache entry */
486 &type_id,
487 HASH_ENTER, &found);
488 Assert(!found); /* it wasn't there a moment ago */
489
490 MemSet(typentry, 0, sizeof(TypeCacheEntry));
491
492 /* These fields can never change, by definition */
493 typentry->type_id = type_id;
494 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
495
496 /* Keep this part in sync with the code below */
497 typentry->typlen = typtup->typlen;
498 typentry->typbyval = typtup->typbyval;
499 typentry->typalign = typtup->typalign;
500 typentry->typstorage = typtup->typstorage;
501 typentry->typtype = typtup->typtype;
502 typentry->typrelid = typtup->typrelid;
503 typentry->typsubscript = typtup->typsubscript;
504 typentry->typelem = typtup->typelem;
505 typentry->typarray = typtup->typarray;
506 typentry->typcollation = typtup->typcollation;
507 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
508
509 /* If it's a domain, immediately thread it into the domain cache list */
510 if (typentry->typtype == TYPTYPE_DOMAIN)
511 {
513 firstDomainTypeEntry = typentry;
514 }
515
516 ReleaseSysCache(tp);
517 }
518 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
519 {
520 /*
521 * We have an entry, but its pg_type row got changed, so reload the
522 * data obtained directly from pg_type.
523 */
524 HeapTuple tp;
526
528 if (!HeapTupleIsValid(tp))
531 errmsg("type with OID %u does not exist", type_id)));
533 if (!typtup->typisdefined)
536 errmsg("type \"%s\" is only a shell",
537 NameStr(typtup->typname))));
538
539 /*
540 * Keep this part in sync with the code above. Many of these fields
541 * shouldn't ever change, particularly typtype, but copy 'em anyway.
542 */
543 typentry->typlen = typtup->typlen;
544 typentry->typbyval = typtup->typbyval;
545 typentry->typalign = typtup->typalign;
546 typentry->typstorage = typtup->typstorage;
547 typentry->typtype = typtup->typtype;
548 typentry->typrelid = typtup->typrelid;
549 typentry->typsubscript = typtup->typsubscript;
550 typentry->typelem = typtup->typelem;
551 typentry->typarray = typtup->typarray;
552 typentry->typcollation = typtup->typcollation;
553 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
554
555 ReleaseSysCache(tp);
556 }
557
558 /*
559 * Look up opclasses if we haven't already and any dependent info is
560 * requested.
561 */
567 {
568 Oid opclass;
569
570 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
571 if (OidIsValid(opclass))
572 {
573 typentry->btree_opf = get_opclass_family(opclass);
574 typentry->btree_opintype = get_opclass_input_type(opclass);
575 }
576 else
577 {
578 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
579 }
580
581 /*
582 * Reset information derived from btree opclass. Note in particular
583 * that we'll redetermine the eq_opr even if we previously found one;
584 * this matters in case a btree opclass has been added to a type that
585 * previously had only a hash opclass.
586 */
587 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
592 }
593
594 /*
595 * If we need to look up equality operator, and there's no btree opclass,
596 * force lookup of hash opclass.
597 */
598 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
599 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
600 typentry->btree_opf == InvalidOid)
602
607 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
608 {
609 Oid opclass;
610
611 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
612 if (OidIsValid(opclass))
613 {
614 typentry->hash_opf = get_opclass_family(opclass);
615 typentry->hash_opintype = get_opclass_input_type(opclass);
616 }
617 else
618 {
619 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
620 }
621
622 /*
623 * Reset information derived from hash opclass. We do *not* reset the
624 * eq_opr; if we already found one from the btree opclass, that
625 * decision is still good.
626 */
627 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
630 }
631
632 /*
633 * Look for requested operators and functions, if we haven't already.
634 */
635 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
636 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
637 {
638 Oid eq_opr = InvalidOid;
639
640 if (typentry->btree_opf != InvalidOid)
641 eq_opr = get_opfamily_member(typentry->btree_opf,
642 typentry->btree_opintype,
643 typentry->btree_opintype,
645 if (eq_opr == InvalidOid &&
646 typentry->hash_opf != InvalidOid)
647 eq_opr = get_opfamily_member(typentry->hash_opf,
648 typentry->hash_opintype,
649 typentry->hash_opintype,
651
652 /*
653 * If the proposed equality operator is array_eq or record_eq, check
654 * to see if the element type or column types support equality. If
655 * not, array_eq or record_eq would fail at runtime, so we don't want
656 * to report that the type has equality. (We can omit similar
657 * checking for ranges and multiranges because ranges can't be created
658 * in the first place unless their subtypes support equality.)
659 */
660 if (eq_opr == ARRAY_EQ_OP &&
662 eq_opr = InvalidOid;
663 else if (eq_opr == RECORD_EQ_OP &&
665 eq_opr = InvalidOid;
666
667 /* Force update of eq_opr_finfo only if we're changing state */
668 if (typentry->eq_opr != eq_opr)
669 typentry->eq_opr_finfo.fn_oid = InvalidOid;
670
671 typentry->eq_opr = eq_opr;
672
673 /*
674 * Reset info about hash functions whenever we pick up new info about
675 * equality operator. This is so we can ensure that the hash
676 * functions match the operator.
677 */
678 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
680 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
681 }
682 if ((flags & TYPECACHE_LT_OPR) &&
683 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
684 {
685 Oid lt_opr = InvalidOid;
686
687 if (typentry->btree_opf != InvalidOid)
688 lt_opr = get_opfamily_member(typentry->btree_opf,
689 typentry->btree_opintype,
690 typentry->btree_opintype,
692
693 /*
694 * As above, make sure array_cmp or record_cmp will succeed; but again
695 * we need no special check for ranges or multiranges.
696 */
697 if (lt_opr == ARRAY_LT_OP &&
698 !array_element_has_compare(typentry))
699 lt_opr = InvalidOid;
700 else if (lt_opr == RECORD_LT_OP &&
702 lt_opr = InvalidOid;
703
704 typentry->lt_opr = lt_opr;
705 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
706 }
707 if ((flags & TYPECACHE_GT_OPR) &&
708 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
709 {
710 Oid gt_opr = InvalidOid;
711
712 if (typentry->btree_opf != InvalidOid)
713 gt_opr = get_opfamily_member(typentry->btree_opf,
714 typentry->btree_opintype,
715 typentry->btree_opintype,
717
718 /*
719 * As above, make sure array_cmp or record_cmp will succeed; but again
720 * we need no special check for ranges or multiranges.
721 */
722 if (gt_opr == ARRAY_GT_OP &&
723 !array_element_has_compare(typentry))
724 gt_opr = InvalidOid;
725 else if (gt_opr == RECORD_GT_OP &&
727 gt_opr = InvalidOid;
728
729 typentry->gt_opr = gt_opr;
730 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
731 }
733 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
734 {
735 Oid cmp_proc = InvalidOid;
736
737 if (typentry->btree_opf != InvalidOid)
738 cmp_proc = get_opfamily_proc(typentry->btree_opf,
739 typentry->btree_opintype,
740 typentry->btree_opintype,
742
743 /*
744 * As above, make sure array_cmp or record_cmp will succeed; but again
745 * we need no special check for ranges or multiranges.
746 */
747 if (cmp_proc == F_BTARRAYCMP &&
748 !array_element_has_compare(typentry))
749 cmp_proc = InvalidOid;
750 else if (cmp_proc == F_BTRECORDCMP &&
752 cmp_proc = InvalidOid;
753
754 /* Force update of cmp_proc_finfo only if we're changing state */
755 if (typentry->cmp_proc != cmp_proc)
756 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
757
758 typentry->cmp_proc = cmp_proc;
759 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
760 }
762 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
763 {
764 Oid hash_proc = InvalidOid;
765
766 /*
767 * We insist that the eq_opr, if one has been determined, match the
768 * hash opclass; else report there is no hash function.
769 */
770 if (typentry->hash_opf != InvalidOid &&
771 (!OidIsValid(typentry->eq_opr) ||
772 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
773 typentry->hash_opintype,
774 typentry->hash_opintype,
776 hash_proc = get_opfamily_proc(typentry->hash_opf,
777 typentry->hash_opintype,
778 typentry->hash_opintype,
780
781 /*
782 * As above, make sure hash_array, hash_record, or hash_range will
783 * succeed.
784 */
785 if (hash_proc == F_HASH_ARRAY &&
786 !array_element_has_hashing(typentry))
787 hash_proc = InvalidOid;
788 else if (hash_proc == F_HASH_RECORD &&
790 hash_proc = InvalidOid;
791 else if (hash_proc == F_HASH_RANGE &&
792 !range_element_has_hashing(typentry))
793 hash_proc = InvalidOid;
794
795 /*
796 * Likewise for hash_multirange.
797 */
798 if (hash_proc == F_HASH_MULTIRANGE &&
800 hash_proc = InvalidOid;
801
802 /* Force update of hash_proc_finfo only if we're changing state */
803 if (typentry->hash_proc != hash_proc)
805
806 typentry->hash_proc = hash_proc;
807 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
808 }
809 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
812 {
813 Oid hash_extended_proc = InvalidOid;
814
815 /*
816 * We insist that the eq_opr, if one has been determined, match the
817 * hash opclass; else report there is no hash function.
818 */
819 if (typentry->hash_opf != InvalidOid &&
820 (!OidIsValid(typentry->eq_opr) ||
821 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
822 typentry->hash_opintype,
823 typentry->hash_opintype,
825 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
826 typentry->hash_opintype,
827 typentry->hash_opintype,
829
830 /*
831 * As above, make sure hash_array_extended, hash_record_extended, or
832 * hash_range_extended will succeed.
833 */
834 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
836 hash_extended_proc = InvalidOid;
837 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
839 hash_extended_proc = InvalidOid;
840 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
842 hash_extended_proc = InvalidOid;
843
844 /*
845 * Likewise for hash_multirange_extended.
846 */
847 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
849 hash_extended_proc = InvalidOid;
850
851 /* Force update of proc finfo only if we're changing state */
852 if (typentry->hash_extended_proc != hash_extended_proc)
854
855 typentry->hash_extended_proc = hash_extended_proc;
857 }
858
859 /*
860 * Set up fmgr lookup info as requested
861 *
862 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
863 * which is not quite right (they're really in the hash table's private
864 * memory context) but this will do for our purposes.
865 *
866 * Note: the code above avoids invalidating the finfo structs unless the
867 * referenced operator/function OID actually changes. This is to prevent
868 * unnecessary leakage of any subsidiary data attached to an finfo, since
869 * that would cause session-lifespan memory leaks.
870 */
871 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
872 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
873 typentry->eq_opr != InvalidOid)
874 {
876
877 eq_opr_func = get_opcode(typentry->eq_opr);
878 if (eq_opr_func != InvalidOid)
881 }
882 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
883 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
884 typentry->cmp_proc != InvalidOid)
885 {
886 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
888 }
889 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
890 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
891 typentry->hash_proc != InvalidOid)
892 {
893 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
895 }
898 typentry->hash_extended_proc != InvalidOid)
899 {
901 &typentry->hash_extended_proc_finfo,
903 }
904
905 /*
906 * If it's a composite type (row type), get tupdesc if requested
907 */
908 if ((flags & TYPECACHE_TUPDESC) &&
909 typentry->tupDesc == NULL &&
910 typentry->typtype == TYPTYPE_COMPOSITE)
911 {
912 load_typcache_tupdesc(typentry);
913 }
914
915 /*
916 * If requested, get information about a range type
917 *
918 * This includes making sure that the basic info about the range element
919 * type is up-to-date.
920 */
921 if ((flags & TYPECACHE_RANGE_INFO) &&
922 typentry->typtype == TYPTYPE_RANGE)
923 {
924 if (typentry->rngelemtype == NULL)
925 load_rangetype_info(typentry);
926 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
927 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
928 }
929
930 /*
931 * If requested, get information about a multirange type
932 */
933 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
934 typentry->rngtype == NULL &&
935 typentry->typtype == TYPTYPE_MULTIRANGE)
936 {
937 load_multirangetype_info(typentry);
938 }
939
940 /*
941 * If requested, get information about a domain type
942 */
943 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
944 typentry->domainBaseType == InvalidOid &&
945 typentry->typtype == TYPTYPE_DOMAIN)
946 {
947 typentry->domainBaseTypmod = -1;
948 typentry->domainBaseType =
949 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
950 }
951 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
952 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
953 typentry->typtype == TYPTYPE_DOMAIN)
954 {
955 load_domaintype_info(typentry);
956 }
957
958 INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
959
962
964
965 return typentry;
966}

References array_element_has_compare(), array_element_has_equality(), array_element_has_extended_hashing(), array_element_has_hashing(), Assert, BTEqualStrategyNumber, BTGreaterStrategyNumber, BTLessStrategyNumber, BTORDER_PROC, TypeCacheEntry::btree_opf, TypeCacheEntry::btree_opintype, CacheMemoryContext, CacheRegisterRelcacheCallback(), CacheRegisterSyscacheCallback(), TypeCacheEntry::cmp_proc, TypeCacheEntry::cmp_proc_finfo, CreateCacheMemoryContext(), ctl, TypeCacheEntry::domainBaseType, TypeCacheEntry::domainBaseTypmod, TypeCacheEntry::eq_opr, TypeCacheEntry::eq_opr_finfo, ereport, errcode(), errmsg(), ERROR, fb(), firstDomainTypeEntry, TypeCacheEntry::flags, fmgr_info_cxt(), FmgrInfo::fn_oid, get_hash_value(), get_opclass_family(), get_opclass_input_type(), get_opcode(), get_opfamily_member(), get_opfamily_proc(), getBaseTypeAndTypmod(), GetDefaultOpClass(), GETSTRUCT(), TypeCacheEntry::gt_opr, HASH_BLOBS, hash_create(), HASH_ELEM, HASH_ENTER, TypeCacheEntry::hash_extended_proc, TypeCacheEntry::hash_extended_proc_finfo, HASH_FIND, HASH_FUNCTION, TypeCacheEntry::hash_opf, TypeCacheEntry::hash_opintype, TypeCacheEntry::hash_proc, TypeCacheEntry::hash_proc_finfo, hash_search(), HASHEXTENDED_PROC, HASHSTANDARD_PROC, HeapTupleIsValid, HTEqualStrategyNumber, in_progress_list, in_progress_list_len, in_progress_list_maxlen, INJECTION_POINT, insert_rel_type_cache_if_needed(), InvalidOid, HASHCTL::keysize, load_domaintype_info(), load_multirangetype_info(), load_rangetype_info(), load_typcache_tupdesc(), lookup_type_cache(), TypeCacheEntry::lt_opr, MemoryContextAlloc(), MemSet, multirange_element_has_extended_hashing(), multirange_element_has_hashing(), NameStr, TypeCacheEntry::nextDomain, ObjectIdGetDatum(), OidIsValid, range_element_has_extended_hashing(), range_element_has_hashing(), record_fields_have_compare(), record_fields_have_equality(), record_fields_have_extended_hashing(), record_fields_have_hashing(), ReleaseSysCache(), RelIdToTypeIdCacheHash, repalloc(), TypeCacheEntry::rngelemtype, TypeCacheEntry::rngtype, SearchSysCache1(), TCFLAGS_CHECKED_BTREE_OPCLASS, TCFLAGS_CHECKED_CMP_PROC, TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, TCFLAGS_CHECKED_EQ_OPR, TCFLAGS_CHECKED_GT_OPR, TCFLAGS_CHECKED_HASH_EXTENDED_PROC, TCFLAGS_CHECKED_HASH_OPCLASS, TCFLAGS_CHECKED_HASH_PROC, TCFLAGS_CHECKED_LT_OPR, TCFLAGS_HAVE_PG_TYPE_DATA, TypeCacheEntry::tupDesc, TypeCacheEntry::typalign, TypeCacheEntry::typarray, TypeCacheEntry::typbyval, TypeCacheEntry::typcollation, type_cache_syshash(), TypeCacheEntry::type_id, TypeCacheEntry::type_id_hash, TYPECACHE_BTREE_OPFAMILY, TYPECACHE_CMP_PROC, TYPECACHE_CMP_PROC_FINFO, TYPECACHE_DOMAIN_BASE_INFO, TYPECACHE_DOMAIN_CONSTR_INFO, TYPECACHE_EQ_OPR, TYPECACHE_EQ_OPR_FINFO, TYPECACHE_GT_OPR, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_EXTENDED_PROC_FINFO, TYPECACHE_HASH_OPFAMILY, TYPECACHE_HASH_PROC, TYPECACHE_HASH_PROC_FINFO, TYPECACHE_LT_OPR, TYPECACHE_MULTIRANGE_INFO, TYPECACHE_RANGE_INFO, TYPECACHE_TUPDESC, TypeCacheConstrCallback(), TypeCacheHash, TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), TypeCacheEntry::typelem, TypeCacheEntry::typlen, TypeCacheEntry::typrelid, TypeCacheEntry::typstorage, TypeCacheEntry::typsubscript, and TypeCacheEntry::typtype.

Referenced by analyzeCTE(), appendOrderBySuffix(), array_cmp(), array_contain_compare(), array_eq(), array_position_common(), array_positions(), array_replace_internal(), array_reverse(), array_sample(), array_shuffle(), array_sort_internal(), array_typanalyze(), assign_record_type_identifier(), brin_bloom_opcinfo(), brin_inclusion_opcinfo(), brin_minmax_multi_opcinfo(), brin_minmax_opcinfo(), build_datatype(), build_mss(), cache_array_element_properties(), cache_multirange_element_properties(), cache_range_element_properties(), cache_record_field_properties(), calc_arraycontsel(), check_exclusion_or_unique_constraint(), check_memoizable(), contain_leaked_vars_walker(), create_grouping_expr_infos(), CreateStatistics(), dependency_degree(), domain_state_setup(), DomainHasConstraints(), enum_cmp_internal(), ExecInitExprRec(), find_simplified_clause(), foreign_expr_walker(), get_cached_rowtype(), get_multirange_io_data(), get_range_io_data(), get_rule_orderby(), get_sort_group_operators(), GinBufferInit(), hash_array(), hash_array_extended(), hash_multirange(), hash_multirange_extended(), hash_range(), hash_range_extended(), hash_record(), hash_record_extended(), init_grouping_targets(), InitDomainConstraintRef(), initGinState(), IsIndexUsableForReplicaIdentityFull(), load_multirangetype_info(), load_rangetype_info(), lookup_rowtype_tupdesc_domain(), lookup_rowtype_tupdesc_internal(), lookup_type_cache(), make_expanded_record_from_tupdesc(), make_expanded_record_from_typeid(), multirange_get_typcache(), multirange_minus_multi(), multirange_unnest(), ndistinct_for_combination(), op_hashjoinable(), op_mergejoinable(), paraminfo_get_equal_hashops(), PLy_input_setup_func(), PLy_output_setup_func(), range_fast_cmp(), range_get_typcache(), range_minus_multi(), record_cmp(), record_eq(), revalidate_rectypeid(), scalararraysel(), scalararraysel_containment(), show_sortorder_options(), statatt_get_elem_type(), statatt_get_type(), statext_mcv_serialize(), tuples_equal(), tuplesort_begin_index_gin(), and width_bucket_array().

◆ multirange_element_has_extended_hashing()

static bool multirange_element_has_extended_hashing ( TypeCacheEntry typentry)
static

◆ multirange_element_has_hashing()

static bool multirange_element_has_hashing ( TypeCacheEntry typentry)
static

◆ prep_domain_constraints()

static List * prep_domain_constraints ( List constraints,
MemoryContext  execctx 
)
static

Definition at line 1366 of file typcache.c.

1367{
1368 List *result = NIL;
1370 ListCell *lc;
1371
1373
1374 foreach(lc, constraints)
1375 {
1378
1380 newr->constrainttype = r->constrainttype;
1381 newr->name = r->name;
1382 newr->check_expr = r->check_expr;
1383 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1384
1385 result = lappend(result, newr);
1386 }
1387
1389
1390 return result;
1391}

References DomainConstraintState::check_expr, DomainConstraintState::constrainttype, ExecInitExpr(), fb(), lappend(), lfirst, makeNode, MemoryContextSwitchTo(), DomainConstraintState::name, and NIL.

Referenced by InitDomainConstraintRef(), and UpdateDomainConstraintRef().

◆ range_element_has_extended_hashing()

static bool range_element_has_extended_hashing ( TypeCacheEntry typentry)
static

◆ range_element_has_hashing()

static bool range_element_has_hashing ( TypeCacheEntry typentry)
static

Definition at line 1717 of file typcache.c.

1718{
1719 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1721 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1722}

References cache_range_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_HASHING.

Referenced by lookup_type_cache().

◆ record_fields_have_compare()

static bool record_fields_have_compare ( TypeCacheEntry typentry)
static

Definition at line 1588 of file typcache.c.

1589{
1590 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1592 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1593}

References cache_record_field_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_FIELD_PROPERTIES, and TCFLAGS_HAVE_FIELD_COMPARE.

Referenced by lookup_type_cache().

◆ record_fields_have_equality()

static bool record_fields_have_equality ( TypeCacheEntry typentry)
static

◆ record_fields_have_extended_hashing()

static bool record_fields_have_extended_hashing ( TypeCacheEntry typentry)
static

◆ record_fields_have_hashing()

static bool record_fields_have_hashing ( TypeCacheEntry typentry)
static

Definition at line 1596 of file typcache.c.

1597{
1598 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1600 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1601}

References cache_record_field_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_FIELD_PROPERTIES, and TCFLAGS_HAVE_FIELD_HASHING.

Referenced by lookup_type_cache().

◆ record_type_typmod_compare()

static int record_type_typmod_compare ( const void a,
const void b,
size_t  size 
)
static

Definition at line 2028 of file typcache.c.

2029{
2030 const RecordCacheEntry *left = a;
2031 const RecordCacheEntry *right = b;
2032
2033 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2034}

References a, b, equalRowTypes(), and RecordCacheEntry::tupdesc.

Referenced by assign_record_type_typmod().

◆ record_type_typmod_hash()

static uint32 record_type_typmod_hash ( const void data,
size_t  size 
)
static

Definition at line 2017 of file typcache.c.

2018{
2019 const RecordCacheEntry *entry = data;
2020
2021 return hashRowType(entry->tupdesc);
2022}

References data, hashRowType(), and RecordCacheEntry::tupdesc.

Referenced by assign_record_type_typmod().

◆ share_tupledesc()

static dsa_pointer share_tupledesc ( dsa_area area,
TupleDesc  tupdesc,
uint32  typmod 
)
static

Definition at line 2924 of file typcache.c.

2925{
2927 TupleDesc shared;
2928
2929 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2930 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2931 TupleDescCopy(shared, tupdesc);
2932 shared->tdtypmod = typmod;
2933
2934 return shared_dp;
2935}

References dsa_allocate, dsa_get_address(), fb(), TupleDescData::tdtypmod, TupleDescCopy(), and TupleDescSize.

Referenced by find_or_make_matching_shared_tupledesc(), and SharedRecordTypmodRegistryInit().

◆ shared_record_table_compare()

static int shared_record_table_compare ( const void a,
const void b,
size_t  size,
void arg 
)
static

Definition at line 234 of file typcache.c.

236{
237 dsa_area *area = (dsa_area *) arg;
238 const SharedRecordTableKey *k1 = a;
239 const SharedRecordTableKey *k2 = b;
242
243 if (k1->shared)
244 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 else
246 t1 = k1->u.local_tupdesc;
247
248 if (k2->shared)
249 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 else
251 t2 = k2->u.local_tupdesc;
252
253 return equalRowTypes(t1, t2) ? 0 : 1;
254}

References a, arg, b, dsa_get_address(), equalRowTypes(), and fb().

◆ shared_record_table_hash()

static uint32 shared_record_table_hash ( const void a,
size_t  size,
void arg 
)
static

Definition at line 260 of file typcache.c.

261{
262 dsa_area *area = arg;
263 const SharedRecordTableKey *k = a;
264 TupleDesc t;
265
266 if (k->shared)
268 else
269 t = k->u.local_tupdesc;
270
271 return hashRowType(t);
272}

References a, arg, dsa_get_address(), hashRowType(), SharedRecordTableKey::local_tupdesc, SharedRecordTableKey::shared, SharedRecordTableKey::shared_tupdesc, and SharedRecordTableKey::u.

◆ shared_record_typmod_registry_detach()

static void shared_record_typmod_registry_detach ( dsm_segment segment,
Datum  datum 
)
static

◆ SharedRecordTypmodRegistryAttach()

void SharedRecordTypmodRegistryAttach ( SharedRecordTypmodRegistry registry)

Definition at line 2298 of file typcache.c.

2299{
2303
2305
2306 /* We can't already be attached to a shared registry. */
2313
2314 /*
2315 * We can't already have typmods in our local cache, because they'd clash
2316 * with those imported by SharedRecordTypmodRegistryInit. This should be
2317 * a freshly started parallel worker. If we ever support worker
2318 * recycling, a worker would need to zap its local cache in between
2319 * servicing different queries, in order to be able to call this and
2320 * synchronize typmods with a new leader; but that's problematic because
2321 * we can't be very sure that record-typmod-related state hasn't escaped
2322 * to anywhere else in the process.
2323 */
2325
2327
2328 /* Attach to the two hash tables. */
2331 registry->record_table_handle,
2335 registry->typmod_table_handle,
2336 NULL);
2337
2339
2340 /*
2341 * Set up detach hook to run at worker exit. Currently this is the same
2342 * as the leader's detach hook, but in future they might need to be
2343 * different.
2344 */
2348
2349 /*
2350 * Set up the session state that will tell assign_record_type_typmod and
2351 * lookup_rowtype_tupdesc_internal about the shared registry.
2352 */
2356}

References Session::area, Assert, CurrentSession, dshash_attach(), fb(), IsParallelWorker, MemoryContextSwitchTo(), NextRecordTypmod, on_dsm_detach(), PointerGetDatum(), Session::segment, Session::shared_record_table, shared_record_typmod_registry_detach(), Session::shared_typmod_registry, Session::shared_typmod_table, srtr_record_table_params, srtr_typmod_table_params, and TopMemoryContext.

Referenced by AttachSession().

◆ SharedRecordTypmodRegistryEstimate()

size_t SharedRecordTypmodRegistryEstimate ( void  )

Definition at line 2177 of file typcache.c.

2178{
2179 return sizeof(SharedRecordTypmodRegistry);
2180}

Referenced by GetSessionDsmHandle().

◆ SharedRecordTypmodRegistryInit()

void SharedRecordTypmodRegistryInit ( SharedRecordTypmodRegistry registry,
dsm_segment segment,
dsa_area area 
)

Definition at line 2199 of file typcache.c.

2202{
2206 int32 typmod;
2207
2209
2210 /* We can't already be attached to a shared registry. */
2214
2216
2217 /* Create the hash table of tuple descriptors indexed by themselves. */
2219
2220 /* Create the hash table of tuple descriptors indexed by typmod. */
2222
2224
2225 /* Initialize the SharedRecordTypmodRegistry. */
2226 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2227 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2229
2230 /*
2231 * Copy all entries from this backend's private registry into the shared
2232 * registry.
2233 */
2234 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2235 {
2240 TupleDesc tupdesc;
2241 bool found;
2242
2243 tupdesc = RecordCacheArray[typmod].tupdesc;
2244 if (tupdesc == NULL)
2245 continue;
2246
2247 /* Copy the TupleDesc into shared memory. */
2248 shared_dp = share_tupledesc(area, tupdesc, typmod);
2249
2250 /* Insert into the typmod table. */
2252 &tupdesc->tdtypmod,
2253 &found);
2254 if (found)
2255 elog(ERROR, "cannot create duplicate shared record typmod");
2256 typmod_table_entry->typmod = tupdesc->tdtypmod;
2257 typmod_table_entry->shared_tupdesc = shared_dp;
2259
2260 /* Insert into the record table. */
2261 record_table_key.shared = false;
2262 record_table_key.u.local_tupdesc = tupdesc;
2265 &found);
2266 if (!found)
2267 {
2268 record_table_entry->key.shared = true;
2269 record_table_entry->key.u.shared_tupdesc = shared_dp;
2270 }
2272 }
2273
2274 /*
2275 * Set up the global state that will tell assign_record_type_typmod and
2276 * lookup_rowtype_tupdesc_internal about the shared registry.
2277 */
2281
2282 /*
2283 * We install a detach hook in the leader, but only to handle cleanup on
2284 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2285 * the memory, the leader process will use a shared registry until it
2286 * exits.
2287 */
2289}

References Assert, CurrentSession, dshash_create(), dshash_find_or_insert(), dshash_get_hash_table_handle(), dshash_release_lock(), elog, ERROR, fb(), IsParallelWorker, MemoryContextSwitchTo(), NextRecordTypmod, on_dsm_detach(), pg_atomic_init_u32(), RecordCacheArray, share_tupledesc(), Session::shared_record_table, shared_record_typmod_registry_detach(), Session::shared_typmod_registry, Session::shared_typmod_table, srtr_record_table_params, srtr_typmod_table_params, TupleDescData::tdtypmod, TopMemoryContext, and RecordCacheArrayEntry::tupdesc.

Referenced by GetSessionDsmHandle().

◆ type_cache_syshash()

static uint32 type_cache_syshash ( const void key,
Size  keysize 
)
static

Definition at line 362 of file typcache.c.

363{
364 Assert(keysize == sizeof(Oid));
365 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
366}

References Assert, fb(), GetSysCacheHashValue1, and ObjectIdGetDatum().

Referenced by lookup_type_cache().

◆ TypeCacheConstrCallback()

static void TypeCacheConstrCallback ( Datum  arg,
SysCacheIdentifier  cacheid,
uint32  hashvalue 
)
static

Definition at line 2613 of file typcache.c.

2614{
2615 TypeCacheEntry *typentry;
2616
2617 /*
2618 * Because this is called very frequently, and typically very few of the
2619 * typcache entries are for domains, we don't use hash_seq_search here.
2620 * Instead we thread all the domain-type entries together so that we can
2621 * visit them cheaply.
2622 */
2623 for (typentry = firstDomainTypeEntry;
2624 typentry != NULL;
2625 typentry = typentry->nextDomain)
2626 {
2627 /* Reset domain constraint validity information */
2629 }
2630}

References fb(), firstDomainTypeEntry, TypeCacheEntry::flags, and TypeCacheEntry::nextDomain.

Referenced by lookup_type_cache().

◆ TypeCacheOpcCallback()

static void TypeCacheOpcCallback ( Datum  arg,
SysCacheIdentifier  cacheid,
uint32  hashvalue 
)
static

Definition at line 2575 of file typcache.c.

2576{
2577 HASH_SEQ_STATUS status;
2578 TypeCacheEntry *typentry;
2579
2580 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2581 hash_seq_init(&status, TypeCacheHash);
2582 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2583 {
2584 bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2585
2586 /* Reset equality/comparison/hashing validity information */
2587 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2588
2589 /*
2590 * Call delete_rel_type_cache_if_needed() if we actually cleared some
2591 * of TCFLAGS_OPERATOR_FLAGS.
2592 */
2593 if (hadOpclass)
2595 }
2596}

References delete_rel_type_cache_if_needed(), fb(), TypeCacheEntry::flags, hash_seq_init(), hash_seq_search(), TCFLAGS_OPERATOR_FLAGS, and TypeCacheHash.

Referenced by lookup_type_cache().

◆ TypeCacheRelCallback()

static void TypeCacheRelCallback ( Datum  arg,
Oid  relid 
)
static

Definition at line 2422 of file typcache.c.

2423{
2424 TypeCacheEntry *typentry;
2425
2426 /*
2427 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2428 * callback wouldn't be registered
2429 */
2430 if (OidIsValid(relid))
2431 {
2433
2434 /*
2435 * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2436 * corresponding typcache entry has something to clean.
2437 */
2439 &relid,
2440 HASH_FIND, NULL);
2441
2442 if (relentry != NULL)
2443 {
2445 &relentry->composite_typid,
2446 HASH_FIND, NULL);
2447
2448 if (typentry != NULL)
2449 {
2450 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2451 Assert(relid == typentry->typrelid);
2452
2454 }
2455 }
2456
2457 /*
2458 * Visit all the domain types sequentially. Typically, this shouldn't
2459 * affect performance since domain types are less tended to bloat.
2460 * Domain types are created manually, unlike composite types which are
2461 * automatically created for every temporary table.
2462 */
2463 for (typentry = firstDomainTypeEntry;
2464 typentry != NULL;
2465 typentry = typentry->nextDomain)
2466 {
2467 /*
2468 * If it's domain over composite, reset flags. (We don't bother
2469 * trying to determine whether the specific base type needs a
2470 * reset.) Note that if we haven't determined whether the base
2471 * type is composite, we don't need to reset anything.
2472 */
2474 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2475 }
2476 }
2477 else
2478 {
2479 HASH_SEQ_STATUS status;
2480
2481 /*
2482 * Relid is invalid. By convention, we need to reset all composite
2483 * types in cache. Also, we should reset flags for domain types, and
2484 * we loop over all entries in hash, so, do it in a single scan.
2485 */
2486 hash_seq_init(&status, TypeCacheHash);
2487 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2488 {
2489 if (typentry->typtype == TYPTYPE_COMPOSITE)
2490 {
2492 }
2493 else if (typentry->typtype == TYPTYPE_DOMAIN)
2494 {
2495 /*
2496 * If it's domain over composite, reset flags. (We don't
2497 * bother trying to determine whether the specific base type
2498 * needs a reset.) Note that if we haven't determined whether
2499 * the base type is composite, we don't need to reset
2500 * anything.
2501 */
2503 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2504 }
2505 }
2506 }
2507}

References Assert, fb(), firstDomainTypeEntry, TypeCacheEntry::flags, HASH_FIND, hash_search(), hash_seq_init(), hash_seq_search(), InvalidateCompositeTypeCacheEntry(), TypeCacheEntry::nextDomain, OidIsValid, RelIdToTypeIdCacheHash, TCFLAGS_DOMAIN_BASE_IS_COMPOSITE, TypeCacheHash, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by lookup_type_cache().

◆ TypeCacheTypCallback()

static void TypeCacheTypCallback ( Datum  arg,
SysCacheIdentifier  cacheid,
uint32  hashvalue 
)
static

Definition at line 2518 of file typcache.c.

2519{
2520 HASH_SEQ_STATUS status;
2521 TypeCacheEntry *typentry;
2522
2523 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2524
2525 /*
2526 * By convention, zero hash value is passed to the callback as a sign that
2527 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2528 * InvalidateSystemCachesExtended().
2529 */
2530 if (hashvalue == 0)
2531 hash_seq_init(&status, TypeCacheHash);
2532 else
2533 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2534
2535 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2536 {
2537 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2538
2539 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2540
2541 /*
2542 * Mark the data obtained directly from pg_type as invalid. Also, if
2543 * it's a domain, typnotnull might've changed, so we'll need to
2544 * recalculate its constraints.
2545 */
2546 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2548
2549 /*
2550 * Call delete_rel_type_cache_if_needed() if we cleaned
2551 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2552 */
2553 if (hadPgTypeData)
2555 }
2556}

References Assert, delete_rel_type_cache_if_needed(), fb(), TypeCacheEntry::flags, hash_seq_init(), hash_seq_init_with_hash_value(), hash_seq_search(), TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, TCFLAGS_HAVE_PG_TYPE_DATA, TypeCacheEntry::type_id_hash, and TypeCacheHash.

Referenced by lookup_type_cache().

◆ UpdateDomainConstraintRef()

void UpdateDomainConstraintRef ( DomainConstraintRef ref)

Definition at line 1442 of file typcache.c.

1443{
1444 TypeCacheEntry *typentry = ref->tcache;
1445
1446 /* Make sure typcache entry's data is up to date */
1447 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1448 typentry->typtype == TYPTYPE_DOMAIN)
1449 load_domaintype_info(typentry);
1450
1451 /* Transfer to ref object if there's new info, adjusting refcounts */
1452 if (ref->dcc != typentry->domainData)
1453 {
1454 /* Paranoia --- be sure link is nulled before trying to release */
1455 DomainConstraintCache *dcc = ref->dcc;
1456
1457 if (dcc)
1458 {
1459 /*
1460 * Note: we just leak the previous list of executable domain
1461 * constraints. Alternatively, we could keep those in a child
1462 * context of ref->refctx and free that context at this point.
1463 * However, in practice this code path will be taken so seldom
1464 * that the extra bookkeeping for a child context doesn't seem
1465 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1466 */
1467 ref->constraints = NIL;
1468 ref->dcc = NULL;
1469 decr_dcc_refcount(dcc);
1470 }
1471 dcc = typentry->domainData;
1472 if (dcc)
1473 {
1474 ref->dcc = dcc;
1475 dcc->dccRefCount++;
1476 if (ref->need_exprstate)
1477 ref->constraints = prep_domain_constraints(dcc->constraints,
1478 ref->refctx);
1479 else
1480 ref->constraints = dcc->constraints;
1481 }
1482 }
1483}

References DomainConstraintCache::constraints, DomainConstraintCache::dccRefCount, decr_dcc_refcount(), TypeCacheEntry::domainData, fb(), TypeCacheEntry::flags, load_domaintype_info(), NIL, prep_domain_constraints(), TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, and TypeCacheEntry::typtype.

Referenced by domain_check_input().

Variable Documentation

◆ firstDomainTypeEntry

TypeCacheEntry* firstDomainTypeEntry = NULL
static

Definition at line 96 of file typcache.c.

Referenced by lookup_type_cache(), TypeCacheConstrCallback(), and TypeCacheRelCallback().

◆ in_progress_list

Oid* in_progress_list
static

◆ in_progress_list_len

int in_progress_list_len
static

◆ in_progress_list_maxlen

int in_progress_list_maxlen
static

Definition at line 228 of file typcache.c.

Referenced by lookup_type_cache().

◆ NextRecordTypmod

int32 NextRecordTypmod = 0
static

◆ RecordCacheArray

◆ RecordCacheArrayLen

int32 RecordCacheArrayLen = 0
static

◆ RecordCacheHash

HTAB* RecordCacheHash = NULL
static

Definition at line 295 of file typcache.c.

Referenced by assign_record_type_typmod().

◆ RelIdToTypeIdCacheHash

HTAB* RelIdToTypeIdCacheHash = NULL
static

◆ srtr_record_table_params

◆ srtr_typmod_table_params

◆ tupledesc_id_counter

◆ TypeCacheHash