PostgreSQL Source Code git master
Loading...
Searching...
No Matches
typcache.c File Reference
#include "postgres.h"
#include <limits.h>
#include "access/hash.h"
#include "access/htup_details.h"
#include "access/nbtree.h"
#include "access/parallel.h"
#include "access/relation.h"
#include "access/session.h"
#include "access/table.h"
#include "catalog/pg_am.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_enum.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_range.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "common/int.h"
#include "executor/executor.h"
#include "lib/dshash.h"
#include "optimizer/optimizer.h"
#include "port/pg_bitutils.h"
#include "storage/lwlock.h"
#include "utils/builtins.h"
#include "utils/catcache.h"
#include "utils/fmgroids.h"
#include "utils/injection_point.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/syscache.h"
#include "utils/typcache.h"
Include dependency graph for typcache.c:

Go to the source code of this file.

Data Structures

struct  RelIdToTypeIdCacheEntry
 
struct  DomainConstraintCache
 
struct  EnumItem
 
struct  TypeCacheEnumData
 
struct  RecordCacheEntry
 
struct  SharedRecordTypmodRegistry
 
struct  SharedRecordTableKey
 
struct  SharedRecordTableEntry
 
struct  SharedTypmodTableEntry
 
struct  RecordCacheArrayEntry
 

Macros

#define TCFLAGS_HAVE_PG_TYPE_DATA   0x000001
 
#define TCFLAGS_CHECKED_BTREE_OPCLASS   0x000002
 
#define TCFLAGS_CHECKED_HASH_OPCLASS   0x000004
 
#define TCFLAGS_CHECKED_EQ_OPR   0x000008
 
#define TCFLAGS_CHECKED_LT_OPR   0x000010
 
#define TCFLAGS_CHECKED_GT_OPR   0x000020
 
#define TCFLAGS_CHECKED_CMP_PROC   0x000040
 
#define TCFLAGS_CHECKED_HASH_PROC   0x000080
 
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC   0x000100
 
#define TCFLAGS_CHECKED_ELEM_PROPERTIES   0x000200
 
#define TCFLAGS_HAVE_ELEM_EQUALITY   0x000400
 
#define TCFLAGS_HAVE_ELEM_COMPARE   0x000800
 
#define TCFLAGS_HAVE_ELEM_HASHING   0x001000
 
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING   0x002000
 
#define TCFLAGS_CHECKED_FIELD_PROPERTIES   0x004000
 
#define TCFLAGS_HAVE_FIELD_EQUALITY   0x008000
 
#define TCFLAGS_HAVE_FIELD_COMPARE   0x010000
 
#define TCFLAGS_HAVE_FIELD_HASHING   0x020000
 
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING   0x040000
 
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS   0x080000
 
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE   0x100000
 
#define TCFLAGS_OPERATOR_FLAGS
 

Typedefs

typedef struct RelIdToTypeIdCacheEntry RelIdToTypeIdCacheEntry
 
typedef struct TypeCacheEnumData TypeCacheEnumData
 
typedef struct RecordCacheEntry RecordCacheEntry
 
typedef struct SharedRecordTableKey SharedRecordTableKey
 
typedef struct SharedRecordTableEntry SharedRecordTableEntry
 
typedef struct SharedTypmodTableEntry SharedTypmodTableEntry
 
typedef struct RecordCacheArrayEntry RecordCacheArrayEntry
 

Functions

static int shared_record_table_compare (const void *a, const void *b, size_t size, void *arg)
 
static uint32 shared_record_table_hash (const void *a, size_t size, void *arg)
 
static void load_typcache_tupdesc (TypeCacheEntry *typentry)
 
static void load_rangetype_info (TypeCacheEntry *typentry)
 
static void load_multirangetype_info (TypeCacheEntry *typentry)
 
static void load_domaintype_info (TypeCacheEntry *typentry)
 
static int dcs_cmp (const void *a, const void *b)
 
static void decr_dcc_refcount (DomainConstraintCache *dcc)
 
static void dccref_deletion_callback (void *arg)
 
static Listprep_domain_constraints (List *constraints, MemoryContext execctx)
 
static bool array_element_has_equality (TypeCacheEntry *typentry)
 
static bool array_element_has_compare (TypeCacheEntry *typentry)
 
static bool array_element_has_hashing (TypeCacheEntry *typentry)
 
static bool array_element_has_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_array_element_properties (TypeCacheEntry *typentry)
 
static bool record_fields_have_equality (TypeCacheEntry *typentry)
 
static bool record_fields_have_compare (TypeCacheEntry *typentry)
 
static bool record_fields_have_hashing (TypeCacheEntry *typentry)
 
static bool record_fields_have_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_record_field_properties (TypeCacheEntry *typentry)
 
static bool range_element_has_hashing (TypeCacheEntry *typentry)
 
static bool range_element_has_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_range_element_properties (TypeCacheEntry *typentry)
 
static bool multirange_element_has_hashing (TypeCacheEntry *typentry)
 
static bool multirange_element_has_extended_hashing (TypeCacheEntry *typentry)
 
static void cache_multirange_element_properties (TypeCacheEntry *typentry)
 
static void TypeCacheRelCallback (Datum arg, Oid relid)
 
static void TypeCacheTypCallback (Datum arg, int cacheid, uint32 hashvalue)
 
static void TypeCacheOpcCallback (Datum arg, int cacheid, uint32 hashvalue)
 
static void TypeCacheConstrCallback (Datum arg, int cacheid, uint32 hashvalue)
 
static void load_enum_cache_data (TypeCacheEntry *tcache)
 
static EnumItemfind_enumitem (TypeCacheEnumData *enumdata, Oid arg)
 
static int enum_oid_cmp (const void *left, const void *right)
 
static void shared_record_typmod_registry_detach (dsm_segment *segment, Datum datum)
 
static TupleDesc find_or_make_matching_shared_tupledesc (TupleDesc tupdesc)
 
static dsa_pointer share_tupledesc (dsa_area *area, TupleDesc tupdesc, uint32 typmod)
 
static void insert_rel_type_cache_if_needed (TypeCacheEntry *typentry)
 
static void delete_rel_type_cache_if_needed (TypeCacheEntry *typentry)
 
static uint32 type_cache_syshash (const void *key, Size keysize)
 
TypeCacheEntrylookup_type_cache (Oid type_id, int flags)
 
void InitDomainConstraintRef (Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
 
void UpdateDomainConstraintRef (DomainConstraintRef *ref)
 
bool DomainHasConstraints (Oid type_id)
 
static void ensure_record_cache_typmod_slot_exists (int32 typmod)
 
static TupleDesc lookup_rowtype_tupdesc_internal (Oid type_id, int32 typmod, bool noError)
 
TupleDesc lookup_rowtype_tupdesc (Oid type_id, int32 typmod)
 
TupleDesc lookup_rowtype_tupdesc_noerror (Oid type_id, int32 typmod, bool noError)
 
TupleDesc lookup_rowtype_tupdesc_copy (Oid type_id, int32 typmod)
 
TupleDesc lookup_rowtype_tupdesc_domain (Oid type_id, int32 typmod, bool noError)
 
static uint32 record_type_typmod_hash (const void *data, size_t size)
 
static int record_type_typmod_compare (const void *a, const void *b, size_t size)
 
void assign_record_type_typmod (TupleDesc tupDesc)
 
uint64 assign_record_type_identifier (Oid type_id, int32 typmod)
 
size_t SharedRecordTypmodRegistryEstimate (void)
 
void SharedRecordTypmodRegistryInit (SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
 
void SharedRecordTypmodRegistryAttach (SharedRecordTypmodRegistry *registry)
 
static void InvalidateCompositeTypeCacheEntry (TypeCacheEntry *typentry)
 
static bool enum_known_sorted (TypeCacheEnumData *enumdata, Oid arg)
 
int compare_values_of_enum (TypeCacheEntry *tcache, Oid arg1, Oid arg2)
 
static void finalize_in_progress_typentries (void)
 
void AtEOXact_TypeCache (void)
 
void AtEOSubXact_TypeCache (void)
 

Variables

static HTABTypeCacheHash = NULL
 
static HTABRelIdToTypeIdCacheHash = NULL
 
static TypeCacheEntryfirstDomainTypeEntry = NULL
 
static Oidin_progress_list
 
static int in_progress_list_len
 
static int in_progress_list_maxlen
 
static const dshash_parameters srtr_record_table_params
 
static const dshash_parameters srtr_typmod_table_params
 
static HTABRecordCacheHash = NULL
 
static RecordCacheArrayEntryRecordCacheArray = NULL
 
static int32 RecordCacheArrayLen = 0
 
static int32 NextRecordTypmod = 0
 
static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER
 

Macro Definition Documentation

◆ TCFLAGS_CHECKED_BTREE_OPCLASS

#define TCFLAGS_CHECKED_BTREE_OPCLASS   0x000002

Definition at line 100 of file typcache.c.

◆ TCFLAGS_CHECKED_CMP_PROC

#define TCFLAGS_CHECKED_CMP_PROC   0x000040

Definition at line 105 of file typcache.c.

◆ TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS

#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS   0x080000

Definition at line 118 of file typcache.c.

◆ TCFLAGS_CHECKED_ELEM_PROPERTIES

#define TCFLAGS_CHECKED_ELEM_PROPERTIES   0x000200

Definition at line 108 of file typcache.c.

◆ TCFLAGS_CHECKED_EQ_OPR

#define TCFLAGS_CHECKED_EQ_OPR   0x000008

Definition at line 102 of file typcache.c.

◆ TCFLAGS_CHECKED_FIELD_PROPERTIES

#define TCFLAGS_CHECKED_FIELD_PROPERTIES   0x004000

Definition at line 113 of file typcache.c.

◆ TCFLAGS_CHECKED_GT_OPR

#define TCFLAGS_CHECKED_GT_OPR   0x000020

Definition at line 104 of file typcache.c.

◆ TCFLAGS_CHECKED_HASH_EXTENDED_PROC

#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC   0x000100

Definition at line 107 of file typcache.c.

◆ TCFLAGS_CHECKED_HASH_OPCLASS

#define TCFLAGS_CHECKED_HASH_OPCLASS   0x000004

Definition at line 101 of file typcache.c.

◆ TCFLAGS_CHECKED_HASH_PROC

#define TCFLAGS_CHECKED_HASH_PROC   0x000080

Definition at line 106 of file typcache.c.

◆ TCFLAGS_CHECKED_LT_OPR

#define TCFLAGS_CHECKED_LT_OPR   0x000010

Definition at line 103 of file typcache.c.

◆ TCFLAGS_DOMAIN_BASE_IS_COMPOSITE

#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE   0x100000

Definition at line 119 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_COMPARE

#define TCFLAGS_HAVE_ELEM_COMPARE   0x000800

Definition at line 110 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_EQUALITY

#define TCFLAGS_HAVE_ELEM_EQUALITY   0x000400

Definition at line 109 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_EXTENDED_HASHING

#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING   0x002000

Definition at line 112 of file typcache.c.

◆ TCFLAGS_HAVE_ELEM_HASHING

#define TCFLAGS_HAVE_ELEM_HASHING   0x001000

Definition at line 111 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_COMPARE

#define TCFLAGS_HAVE_FIELD_COMPARE   0x010000

Definition at line 115 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_EQUALITY

#define TCFLAGS_HAVE_FIELD_EQUALITY   0x008000

Definition at line 114 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_EXTENDED_HASHING

#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING   0x040000

Definition at line 117 of file typcache.c.

◆ TCFLAGS_HAVE_FIELD_HASHING

#define TCFLAGS_HAVE_FIELD_HASHING   0x020000

Definition at line 116 of file typcache.c.

◆ TCFLAGS_HAVE_PG_TYPE_DATA

#define TCFLAGS_HAVE_PG_TYPE_DATA   0x000001

Definition at line 99 of file typcache.c.

◆ TCFLAGS_OPERATOR_FLAGS

#define TCFLAGS_OPERATOR_FLAGS
Value:
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition typcache.c:119
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition typcache.c:99
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition typcache.c:118

Definition at line 122 of file typcache.c.

139{
140 List *constraints; /* list of DomainConstraintState nodes */
141 MemoryContext dccContext; /* memory context holding all associated data */
142 long dccRefCount; /* number of references to this struct */
143};
144
145/* Private information to support comparisons of enum values */
146typedef struct
147{
148 Oid enum_oid; /* OID of one enum value */
149 float4 sort_order; /* its sort position */
150} EnumItem;
151
152typedef struct TypeCacheEnumData
153{
154 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
155 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
156 int num_values; /* total number of values in enum */
159
160/*
161 * We use a separate table for storing the definitions of non-anonymous
162 * record types. Once defined, a record type will be remembered for the
163 * life of the backend. Subsequent uses of the "same" record type (where
164 * sameness means equalRowTypes) will refer to the existing table entry.
165 *
166 * Stored record types are remembered in a linear array of TupleDescs,
167 * which can be indexed quickly with the assigned typmod. There is also
168 * a hash table to speed searches for matching TupleDescs.
169 */
170
171typedef struct RecordCacheEntry
172{
175
176/*
177 * To deal with non-anonymous record types that are exchanged by backends
178 * involved in a parallel query, we also need a shared version of the above.
179 */
181{
182 /* A hash table for finding a matching TupleDesc. */
184 /* A hash table for finding a TupleDesc by typmod. */
186 /* A source of new record typmod numbers. */
188};
189
190/*
191 * When using shared tuple descriptors as hash table keys we need a way to be
192 * able to search for an equal shared TupleDesc using a backend-local
193 * TupleDesc. So we use this type which can hold either, and hash and compare
194 * functions that know how to handle both.
195 */
196typedef struct SharedRecordTableKey
197{
198 union
199 {
202 } u;
203 bool shared;
205
206/*
207 * The shared version of RecordCacheEntry. This lets us look up a typmod
208 * using a TupleDesc which may be in local or shared memory.
209 */
210typedef struct SharedRecordTableEntry
211{
214
215/*
216 * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
217 * up a TupleDesc in shared memory using a typmod.
218 */
219typedef struct SharedTypmodTableEntry
220{
224
225static Oid *in_progress_list;
226static int in_progress_list_len;
227static int in_progress_list_maxlen;
228
229/*
230 * A comparator function for SharedRecordTableKey.
231 */
232static int
233shared_record_table_compare(const void *a, const void *b, size_t size,
234 void *arg)
235{
236 dsa_area *area = (dsa_area *) arg;
237 const SharedRecordTableKey *k1 = a;
238 const SharedRecordTableKey *k2 = b;
241
242 if (k1->shared)
243 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
244 else
245 t1 = k1->u.local_tupdesc;
246
247 if (k2->shared)
248 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
249 else
250 t2 = k2->u.local_tupdesc;
251
252 return equalRowTypes(t1, t2) ? 0 : 1;
253}
254
255/*
256 * A hash function for SharedRecordTableKey.
257 */
258static uint32
259shared_record_table_hash(const void *a, size_t size, void *arg)
260{
261 dsa_area *area = arg;
262 const SharedRecordTableKey *k = a;
263 TupleDesc t;
264
265 if (k->shared)
267 else
268 t = k->u.local_tupdesc;
269
270 return hashRowType(t);
271}
272
273/* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
275 sizeof(SharedRecordTableKey), /* unused */
281};
282
283/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
285 sizeof(uint32),
291};
292
293/* hashtable for recognizing registered record types */
294static HTAB *RecordCacheHash = NULL;
295
296typedef struct RecordCacheArrayEntry
297{
298 uint64 id;
301
302/* array of info about registered record types, indexed by assigned typmod */
304static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
305static int32 NextRecordTypmod = 0; /* number of entries used */
306
307/*
308 * Process-wide counter for generating unique tupledesc identifiers.
309 * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
310 * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
311 */
313
314static void load_typcache_tupdesc(TypeCacheEntry *typentry);
315static void load_rangetype_info(TypeCacheEntry *typentry);
316static void load_multirangetype_info(TypeCacheEntry *typentry);
317static void load_domaintype_info(TypeCacheEntry *typentry);
318static int dcs_cmp(const void *a, const void *b);
320static void dccref_deletion_callback(void *arg);
322static bool array_element_has_equality(TypeCacheEntry *typentry);
323static bool array_element_has_compare(TypeCacheEntry *typentry);
324static bool array_element_has_hashing(TypeCacheEntry *typentry);
327static bool record_fields_have_equality(TypeCacheEntry *typentry);
328static bool record_fields_have_compare(TypeCacheEntry *typentry);
329static bool record_fields_have_hashing(TypeCacheEntry *typentry);
331static void cache_record_field_properties(TypeCacheEntry *typentry);
332static bool range_element_has_hashing(TypeCacheEntry *typentry);
338static void TypeCacheRelCallback(Datum arg, Oid relid);
339static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
340static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
341static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
342static void load_enum_cache_data(TypeCacheEntry *tcache);
344static int enum_oid_cmp(const void *left, const void *right);
346 Datum datum);
348static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
349 uint32 typmod);
352
353
354/*
355 * Hash function compatible with one-arg system cache hash function.
356 */
357static uint32
358type_cache_syshash(const void *key, Size keysize)
359{
360 Assert(keysize == sizeof(Oid));
361 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
362}
363
364/*
365 * lookup_type_cache
366 *
367 * Fetch the type cache entry for the specified datatype, and make sure that
368 * all the fields requested by bits in 'flags' are valid.
369 *
370 * The result is never NULL --- we will ereport() if the passed type OID is
371 * invalid. Note however that we may fail to find one or more of the
372 * values requested by 'flags'; the caller needs to check whether the fields
373 * are InvalidOid or not.
374 *
375 * Note that while filling TypeCacheEntry we might process concurrent
376 * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
377 * invalidated. In this case, we typically only clear flags while values are
378 * still available for the caller. It's expected that the caller holds
379 * enough locks on type-depending objects that the values are still relevant.
380 * It's also important that the tupdesc is filled after all other
381 * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
382 * invalidated during the lookup_type_cache() call.
383 */
385lookup_type_cache(Oid type_id, int flags)
386{
387 TypeCacheEntry *typentry;
388 bool found;
390
391 if (TypeCacheHash == NULL)
392 {
393 /* First time through: initialize the hash table */
394 HASHCTL ctl;
395 int allocsize;
396
397 ctl.keysize = sizeof(Oid);
398 ctl.entrysize = sizeof(TypeCacheEntry);
399
400 /*
401 * TypeCacheEntry takes hash value from the system cache. For
402 * TypeCacheHash we use the same hash in order to speedup search by
403 * hash value. This is used by hash_seq_init_with_hash_value().
404 */
405 ctl.hash = type_cache_syshash;
406
407 TypeCacheHash = hash_create("Type information cache", 64,
409
411
412 ctl.keysize = sizeof(Oid);
413 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
414 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
416
417 /* Also set up callbacks for SI invalidations */
422
423 /* Also make sure CacheMemoryContext exists */
426
427 /*
428 * reserve enough in_progress_list slots for many cases
429 */
430 allocsize = 4;
433 allocsize * sizeof(*in_progress_list));
434 in_progress_list_maxlen = allocsize;
435 }
436
438
439 /* Register to catch invalidation messages */
441 {
442 int allocsize;
443
444 allocsize = in_progress_list_maxlen * 2;
446 allocsize * sizeof(*in_progress_list));
447 in_progress_list_maxlen = allocsize;
448 }
451
452 /* Try to look up an existing entry */
454 &type_id,
455 HASH_FIND, NULL);
456 if (typentry == NULL)
457 {
458 /*
459 * If we didn't find one, we want to make one. But first look up the
460 * pg_type row, just to make sure we don't make a cache entry for an
461 * invalid type OID. If the type OID is not valid, present a
462 * user-facing error, since some code paths such as domain_in() allow
463 * this function to be reached with a user-supplied OID.
464 */
465 HeapTuple tp;
467
469 if (!HeapTupleIsValid(tp))
472 errmsg("type with OID %u does not exist", type_id)));
474 if (!typtup->typisdefined)
477 errmsg("type \"%s\" is only a shell",
478 NameStr(typtup->typname))));
479
480 /* Now make the typcache entry */
482 &type_id,
483 HASH_ENTER, &found);
484 Assert(!found); /* it wasn't there a moment ago */
485
486 MemSet(typentry, 0, sizeof(TypeCacheEntry));
487
488 /* These fields can never change, by definition */
489 typentry->type_id = type_id;
490 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
491
492 /* Keep this part in sync with the code below */
493 typentry->typlen = typtup->typlen;
494 typentry->typbyval = typtup->typbyval;
495 typentry->typalign = typtup->typalign;
496 typentry->typstorage = typtup->typstorage;
497 typentry->typtype = typtup->typtype;
498 typentry->typrelid = typtup->typrelid;
499 typentry->typsubscript = typtup->typsubscript;
500 typentry->typelem = typtup->typelem;
501 typentry->typarray = typtup->typarray;
502 typentry->typcollation = typtup->typcollation;
503 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
504
505 /* If it's a domain, immediately thread it into the domain cache list */
506 if (typentry->typtype == TYPTYPE_DOMAIN)
507 {
509 firstDomainTypeEntry = typentry;
510 }
511
512 ReleaseSysCache(tp);
513 }
514 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
515 {
516 /*
517 * We have an entry, but its pg_type row got changed, so reload the
518 * data obtained directly from pg_type.
519 */
520 HeapTuple tp;
522
524 if (!HeapTupleIsValid(tp))
527 errmsg("type with OID %u does not exist", type_id)));
529 if (!typtup->typisdefined)
532 errmsg("type \"%s\" is only a shell",
533 NameStr(typtup->typname))));
534
535 /*
536 * Keep this part in sync with the code above. Many of these fields
537 * shouldn't ever change, particularly typtype, but copy 'em anyway.
538 */
539 typentry->typlen = typtup->typlen;
540 typentry->typbyval = typtup->typbyval;
541 typentry->typalign = typtup->typalign;
542 typentry->typstorage = typtup->typstorage;
543 typentry->typtype = typtup->typtype;
544 typentry->typrelid = typtup->typrelid;
545 typentry->typsubscript = typtup->typsubscript;
546 typentry->typelem = typtup->typelem;
547 typentry->typarray = typtup->typarray;
548 typentry->typcollation = typtup->typcollation;
549 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
550
551 ReleaseSysCache(tp);
552 }
553
554 /*
555 * Look up opclasses if we haven't already and any dependent info is
556 * requested.
557 */
563 {
564 Oid opclass;
565
566 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
567 if (OidIsValid(opclass))
568 {
569 typentry->btree_opf = get_opclass_family(opclass);
570 typentry->btree_opintype = get_opclass_input_type(opclass);
571 }
572 else
573 {
574 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
575 }
576
577 /*
578 * Reset information derived from btree opclass. Note in particular
579 * that we'll redetermine the eq_opr even if we previously found one;
580 * this matters in case a btree opclass has been added to a type that
581 * previously had only a hash opclass.
582 */
583 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
588 }
589
590 /*
591 * If we need to look up equality operator, and there's no btree opclass,
592 * force lookup of hash opclass.
593 */
594 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
595 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
596 typentry->btree_opf == InvalidOid)
598
603 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
604 {
605 Oid opclass;
606
607 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
608 if (OidIsValid(opclass))
609 {
610 typentry->hash_opf = get_opclass_family(opclass);
611 typentry->hash_opintype = get_opclass_input_type(opclass);
612 }
613 else
614 {
615 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
616 }
617
618 /*
619 * Reset information derived from hash opclass. We do *not* reset the
620 * eq_opr; if we already found one from the btree opclass, that
621 * decision is still good.
622 */
623 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
626 }
627
628 /*
629 * Look for requested operators and functions, if we haven't already.
630 */
631 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
632 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
633 {
634 Oid eq_opr = InvalidOid;
635
636 if (typentry->btree_opf != InvalidOid)
637 eq_opr = get_opfamily_member(typentry->btree_opf,
638 typentry->btree_opintype,
639 typentry->btree_opintype,
641 if (eq_opr == InvalidOid &&
642 typentry->hash_opf != InvalidOid)
643 eq_opr = get_opfamily_member(typentry->hash_opf,
644 typentry->hash_opintype,
645 typentry->hash_opintype,
647
648 /*
649 * If the proposed equality operator is array_eq or record_eq, check
650 * to see if the element type or column types support equality. If
651 * not, array_eq or record_eq would fail at runtime, so we don't want
652 * to report that the type has equality. (We can omit similar
653 * checking for ranges and multiranges because ranges can't be created
654 * in the first place unless their subtypes support equality.)
655 */
656 if (eq_opr == ARRAY_EQ_OP &&
658 eq_opr = InvalidOid;
659 else if (eq_opr == RECORD_EQ_OP &&
661 eq_opr = InvalidOid;
662
663 /* Force update of eq_opr_finfo only if we're changing state */
664 if (typentry->eq_opr != eq_opr)
665 typentry->eq_opr_finfo.fn_oid = InvalidOid;
666
667 typentry->eq_opr = eq_opr;
668
669 /*
670 * Reset info about hash functions whenever we pick up new info about
671 * equality operator. This is so we can ensure that the hash
672 * functions match the operator.
673 */
674 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
676 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
677 }
678 if ((flags & TYPECACHE_LT_OPR) &&
679 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
680 {
681 Oid lt_opr = InvalidOid;
682
683 if (typentry->btree_opf != InvalidOid)
684 lt_opr = get_opfamily_member(typentry->btree_opf,
685 typentry->btree_opintype,
686 typentry->btree_opintype,
688
689 /*
690 * As above, make sure array_cmp or record_cmp will succeed; but again
691 * we need no special check for ranges or multiranges.
692 */
693 if (lt_opr == ARRAY_LT_OP &&
694 !array_element_has_compare(typentry))
695 lt_opr = InvalidOid;
696 else if (lt_opr == RECORD_LT_OP &&
698 lt_opr = InvalidOid;
699
700 typentry->lt_opr = lt_opr;
701 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
702 }
703 if ((flags & TYPECACHE_GT_OPR) &&
704 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
705 {
706 Oid gt_opr = InvalidOid;
707
708 if (typentry->btree_opf != InvalidOid)
709 gt_opr = get_opfamily_member(typentry->btree_opf,
710 typentry->btree_opintype,
711 typentry->btree_opintype,
713
714 /*
715 * As above, make sure array_cmp or record_cmp will succeed; but again
716 * we need no special check for ranges or multiranges.
717 */
718 if (gt_opr == ARRAY_GT_OP &&
719 !array_element_has_compare(typentry))
720 gt_opr = InvalidOid;
721 else if (gt_opr == RECORD_GT_OP &&
723 gt_opr = InvalidOid;
724
725 typentry->gt_opr = gt_opr;
726 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
727 }
729 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
730 {
731 Oid cmp_proc = InvalidOid;
732
733 if (typentry->btree_opf != InvalidOid)
734 cmp_proc = get_opfamily_proc(typentry->btree_opf,
735 typentry->btree_opintype,
736 typentry->btree_opintype,
738
739 /*
740 * As above, make sure array_cmp or record_cmp will succeed; but again
741 * we need no special check for ranges or multiranges.
742 */
743 if (cmp_proc == F_BTARRAYCMP &&
744 !array_element_has_compare(typentry))
745 cmp_proc = InvalidOid;
746 else if (cmp_proc == F_BTRECORDCMP &&
748 cmp_proc = InvalidOid;
749
750 /* Force update of cmp_proc_finfo only if we're changing state */
751 if (typentry->cmp_proc != cmp_proc)
752 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
753
754 typentry->cmp_proc = cmp_proc;
755 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
756 }
758 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
759 {
760 Oid hash_proc = InvalidOid;
761
762 /*
763 * We insist that the eq_opr, if one has been determined, match the
764 * hash opclass; else report there is no hash function.
765 */
766 if (typentry->hash_opf != InvalidOid &&
767 (!OidIsValid(typentry->eq_opr) ||
768 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
769 typentry->hash_opintype,
770 typentry->hash_opintype,
772 hash_proc = get_opfamily_proc(typentry->hash_opf,
773 typentry->hash_opintype,
774 typentry->hash_opintype,
776
777 /*
778 * As above, make sure hash_array, hash_record, or hash_range will
779 * succeed.
780 */
781 if (hash_proc == F_HASH_ARRAY &&
782 !array_element_has_hashing(typentry))
783 hash_proc = InvalidOid;
784 else if (hash_proc == F_HASH_RECORD &&
786 hash_proc = InvalidOid;
787 else if (hash_proc == F_HASH_RANGE &&
788 !range_element_has_hashing(typentry))
789 hash_proc = InvalidOid;
790
791 /*
792 * Likewise for hash_multirange.
793 */
794 if (hash_proc == F_HASH_MULTIRANGE &&
796 hash_proc = InvalidOid;
797
798 /* Force update of hash_proc_finfo only if we're changing state */
799 if (typentry->hash_proc != hash_proc)
801
802 typentry->hash_proc = hash_proc;
803 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
804 }
805 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
808 {
809 Oid hash_extended_proc = InvalidOid;
810
811 /*
812 * We insist that the eq_opr, if one has been determined, match the
813 * hash opclass; else report there is no hash function.
814 */
815 if (typentry->hash_opf != InvalidOid &&
816 (!OidIsValid(typentry->eq_opr) ||
817 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
818 typentry->hash_opintype,
819 typentry->hash_opintype,
821 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
822 typentry->hash_opintype,
823 typentry->hash_opintype,
825
826 /*
827 * As above, make sure hash_array_extended, hash_record_extended, or
828 * hash_range_extended will succeed.
829 */
830 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
832 hash_extended_proc = InvalidOid;
833 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
835 hash_extended_proc = InvalidOid;
836 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
838 hash_extended_proc = InvalidOid;
839
840 /*
841 * Likewise for hash_multirange_extended.
842 */
843 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
845 hash_extended_proc = InvalidOid;
846
847 /* Force update of proc finfo only if we're changing state */
848 if (typentry->hash_extended_proc != hash_extended_proc)
850
851 typentry->hash_extended_proc = hash_extended_proc;
853 }
854
855 /*
856 * Set up fmgr lookup info as requested
857 *
858 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
859 * which is not quite right (they're really in the hash table's private
860 * memory context) but this will do for our purposes.
861 *
862 * Note: the code above avoids invalidating the finfo structs unless the
863 * referenced operator/function OID actually changes. This is to prevent
864 * unnecessary leakage of any subsidiary data attached to an finfo, since
865 * that would cause session-lifespan memory leaks.
866 */
867 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
868 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
869 typentry->eq_opr != InvalidOid)
870 {
872
873 eq_opr_func = get_opcode(typentry->eq_opr);
874 if (eq_opr_func != InvalidOid)
877 }
878 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
879 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
880 typentry->cmp_proc != InvalidOid)
881 {
882 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
884 }
885 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
886 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
887 typentry->hash_proc != InvalidOid)
888 {
889 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
891 }
894 typentry->hash_extended_proc != InvalidOid)
895 {
897 &typentry->hash_extended_proc_finfo,
899 }
900
901 /*
902 * If it's a composite type (row type), get tupdesc if requested
903 */
904 if ((flags & TYPECACHE_TUPDESC) &&
905 typentry->tupDesc == NULL &&
906 typentry->typtype == TYPTYPE_COMPOSITE)
907 {
908 load_typcache_tupdesc(typentry);
909 }
910
911 /*
912 * If requested, get information about a range type
913 *
914 * This includes making sure that the basic info about the range element
915 * type is up-to-date.
916 */
917 if ((flags & TYPECACHE_RANGE_INFO) &&
918 typentry->typtype == TYPTYPE_RANGE)
919 {
920 if (typentry->rngelemtype == NULL)
921 load_rangetype_info(typentry);
922 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
923 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
924 }
925
926 /*
927 * If requested, get information about a multirange type
928 */
929 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
930 typentry->rngtype == NULL &&
931 typentry->typtype == TYPTYPE_MULTIRANGE)
932 {
933 load_multirangetype_info(typentry);
934 }
935
936 /*
937 * If requested, get information about a domain type
938 */
939 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
940 typentry->domainBaseType == InvalidOid &&
941 typentry->typtype == TYPTYPE_DOMAIN)
942 {
943 typentry->domainBaseTypmod = -1;
944 typentry->domainBaseType =
945 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
946 }
947 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
948 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
949 typentry->typtype == TYPTYPE_DOMAIN)
950 {
951 load_domaintype_info(typentry);
952 }
953
954 INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
955
958
960
961 return typentry;
962}
963
964/*
965 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
966 */
967static void
969{
970 Relation rel;
971
972 if (!OidIsValid(typentry->typrelid)) /* should not happen */
973 elog(ERROR, "invalid typrelid for composite type %u",
974 typentry->type_id);
975 rel = relation_open(typentry->typrelid, AccessShareLock);
976 Assert(rel->rd_rel->reltype == typentry->type_id);
977
978 /*
979 * Link to the tupdesc and increment its refcount (we assert it's a
980 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
981 * because the reference mustn't be entered in the current resource owner;
982 * it can outlive the current query.
983 */
984 typentry->tupDesc = RelationGetDescr(rel);
985
986 Assert(typentry->tupDesc->tdrefcount > 0);
987 typentry->tupDesc->tdrefcount++;
988
989 /*
990 * In future, we could take some pains to not change tupDesc_identifier if
991 * the tupdesc didn't really change; but for now it's not worth it.
992 */
994
996}
997
998/*
999 * load_rangetype_info --- helper routine to set up range type information
1000 */
1001static void
1003{
1005 HeapTuple tup;
1011 Oid opcintype;
1012 Oid cmpFnOid;
1013
1014 /* get information from pg_range */
1016 /* should not fail, since we already checked typtype ... */
1017 if (!HeapTupleIsValid(tup))
1018 elog(ERROR, "cache lookup failed for range type %u",
1019 typentry->type_id);
1021
1022 subtypeOid = pg_range->rngsubtype;
1023 typentry->rng_collation = pg_range->rngcollation;
1024 opclassOid = pg_range->rngsubopc;
1025 canonicalOid = pg_range->rngcanonical;
1026 subdiffOid = pg_range->rngsubdiff;
1027
1029
1030 /* get opclass properties and look up the comparison function */
1033 typentry->rng_opfamily = opfamilyOid;
1034
1035 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1036 BTORDER_PROC);
1038 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1039 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1040
1041 /* set up cached fmgrinfo structs */
1050
1051 /* Lastly, set up link to the element type --- this marks data valid */
1053}
1054
1055/*
1056 * load_multirangetype_info --- helper routine to set up multirange type
1057 * information
1058 */
1059static void
1061{
1063
1066 elog(ERROR, "cache lookup failed for multirange type %u",
1067 typentry->type_id);
1068
1070}
1071
1072/*
1073 * load_domaintype_info --- helper routine to set up domain constraint info
1074 *
1075 * Note: we assume we're called in a relatively short-lived context, so it's
1076 * okay to leak data into the current context while scanning pg_constraint.
1077 * We build the new DomainConstraintCache data in a context underneath
1078 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1079 * complete.
1080 */
1081static void
1083{
1084 Oid typeOid = typentry->type_id;
1086 bool notNull = false;
1088 int cconslen;
1091
1092 /*
1093 * If we're here, any existing constraint info is stale, so release it.
1094 * For safety, be sure to null the link before trying to delete the data.
1095 */
1096 if (typentry->domainData)
1097 {
1098 dcc = typentry->domainData;
1099 typentry->domainData = NULL;
1100 decr_dcc_refcount(dcc);
1101 }
1102
1103 /*
1104 * We try to optimize the common case of no domain constraints, so don't
1105 * create the dcc object and context until we find a constraint. Likewise
1106 * for the temp sorting array.
1107 */
1108 dcc = NULL;
1109 ccons = NULL;
1110 cconslen = 0;
1111
1112 /*
1113 * Scan pg_constraint for relevant constraints. We want to find
1114 * constraints for not just this domain, but any ancestor domains, so the
1115 * outer loop crawls up the domain stack.
1116 */
1118
1119 for (;;)
1120 {
1121 HeapTuple tup;
1124 int nccons = 0;
1125 ScanKeyData key[1];
1126 SysScanDesc scan;
1127
1129 if (!HeapTupleIsValid(tup))
1130 elog(ERROR, "cache lookup failed for type %u", typeOid);
1132
1133 if (typTup->typtype != TYPTYPE_DOMAIN)
1134 {
1135 /* Not a domain, so done */
1137 break;
1138 }
1139
1140 /* Test for NOT NULL Constraint */
1141 if (typTup->typnotnull)
1142 notNull = true;
1143
1144 /* Look for CHECK Constraints on this domain */
1145 ScanKeyInit(&key[0],
1148 ObjectIdGetDatum(typeOid));
1149
1151 NULL, 1, key);
1152
1154 {
1156 Datum val;
1157 bool isNull;
1158 char *constring;
1159 Expr *check_expr;
1161
1162 /* Ignore non-CHECK constraints */
1163 if (c->contype != CONSTRAINT_CHECK)
1164 continue;
1165
1166 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1168 conRel->rd_att, &isNull);
1169 if (isNull)
1170 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1171 NameStr(typTup->typname), NameStr(c->conname));
1172
1173 /* Create the DomainConstraintCache object and context if needed */
1174 if (dcc == NULL)
1175 {
1176 MemoryContext cxt;
1177
1179 "Domain constraints",
1181 dcc = (DomainConstraintCache *)
1183 dcc->constraints = NIL;
1184 dcc->dccContext = cxt;
1185 dcc->dccRefCount = 0;
1186 }
1187
1188 /* Convert conbin to a node tree, still in caller's context */
1190 check_expr = (Expr *) stringToNode(constring);
1191
1192 /*
1193 * Plan the expression, since ExecInitExpr will expect that.
1194 *
1195 * Note: caching the result of expression_planner() is not very
1196 * good practice. Ideally we'd use a CachedExpression here so
1197 * that we would react promptly to, eg, changes in inlined
1198 * functions. However, because we don't support mutable domain
1199 * CHECK constraints, it's not really clear that it's worth the
1200 * extra overhead to do that.
1201 */
1202 check_expr = expression_planner(check_expr);
1203
1204 /* Create only the minimally needed stuff in dccContext */
1206
1209 r->name = pstrdup(NameStr(c->conname));
1210 r->check_expr = copyObject(check_expr);
1211 r->check_exprstate = NULL;
1212
1214
1215 /* Accumulate constraints in an array, for sorting below */
1216 if (ccons == NULL)
1217 {
1218 cconslen = 8;
1221 }
1222 else if (nccons >= cconslen)
1223 {
1224 cconslen *= 2;
1227 }
1228 ccons[nccons++] = r;
1229 }
1230
1231 systable_endscan(scan);
1232
1233 if (nccons > 0)
1234 {
1235 /*
1236 * Sort the items for this domain, so that CHECKs are applied in a
1237 * deterministic order.
1238 */
1239 if (nccons > 1)
1241
1242 /*
1243 * Now attach them to the overall list. Use lcons() here because
1244 * constraints of parent domains should be applied earlier.
1245 */
1247 while (nccons > 0)
1248 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1250 }
1251
1252 /* loop to next domain in stack */
1253 typeOid = typTup->typbasetype;
1255 }
1256
1258
1259 /*
1260 * Only need to add one NOT NULL check regardless of how many domains in
1261 * the stack request it.
1262 */
1263 if (notNull)
1264 {
1266
1267 /* Create the DomainConstraintCache object and context if needed */
1268 if (dcc == NULL)
1269 {
1270 MemoryContext cxt;
1271
1273 "Domain constraints",
1275 dcc = (DomainConstraintCache *)
1277 dcc->constraints = NIL;
1278 dcc->dccContext = cxt;
1279 dcc->dccRefCount = 0;
1280 }
1281
1282 /* Create node trees in DomainConstraintCache's context */
1284
1286
1288 r->name = pstrdup("NOT NULL");
1289 r->check_expr = NULL;
1290 r->check_exprstate = NULL;
1291
1292 /* lcons to apply the nullness check FIRST */
1293 dcc->constraints = lcons(r, dcc->constraints);
1294
1296 }
1297
1298 /*
1299 * If we made a constraint object, move it into CacheMemoryContext and
1300 * attach it to the typcache entry.
1301 */
1302 if (dcc)
1303 {
1305 typentry->domainData = dcc;
1306 dcc->dccRefCount++; /* count the typcache's reference */
1307 }
1308
1309 /* Either way, the typcache entry's domain data is now valid. */
1311}
1312
1313/*
1314 * qsort comparator to sort DomainConstraintState pointers by name
1315 */
1316static int
1317dcs_cmp(const void *a, const void *b)
1318{
1319 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1320 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1321
1322 return strcmp((*ca)->name, (*cb)->name);
1323}
1324
1325/*
1326 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1327 * and free it if no references remain
1328 */
1329static void
1331{
1332 Assert(dcc->dccRefCount > 0);
1333 if (--(dcc->dccRefCount) <= 0)
1335}
1336
1337/*
1338 * Context reset/delete callback for a DomainConstraintRef
1339 */
1340static void
1342{
1344 DomainConstraintCache *dcc = ref->dcc;
1345
1346 /* Paranoia --- be sure link is nulled before trying to release */
1347 if (dcc)
1348 {
1349 ref->constraints = NIL;
1350 ref->dcc = NULL;
1351 decr_dcc_refcount(dcc);
1352 }
1353}
1354
1355/*
1356 * prep_domain_constraints --- prepare domain constraints for execution
1357 *
1358 * The expression trees stored in the DomainConstraintCache's list are
1359 * converted to executable expression state trees stored in execctx.
1360 */
1361static List *
1363{
1364 List *result = NIL;
1366 ListCell *lc;
1367
1369
1370 foreach(lc, constraints)
1371 {
1374
1376 newr->constrainttype = r->constrainttype;
1377 newr->name = r->name;
1378 newr->check_expr = r->check_expr;
1379 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1380
1381 result = lappend(result, newr);
1382 }
1383
1385
1386 return result;
1387}
1388
1389/*
1390 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1391 *
1392 * Caller must tell us the MemoryContext in which the DomainConstraintRef
1393 * lives. The ref will be cleaned up when that context is reset/deleted.
1394 *
1395 * Caller must also tell us whether it wants check_exprstate fields to be
1396 * computed in the DomainConstraintState nodes attached to this ref.
1397 * If it doesn't, we need not make a copy of the DomainConstraintState list.
1398 */
1399void
1401 MemoryContext refctx, bool need_exprstate)
1402{
1403 /* Look up the typcache entry --- we assume it survives indefinitely */
1405 ref->need_exprstate = need_exprstate;
1406 /* For safety, establish the callback before acquiring a refcount */
1407 ref->refctx = refctx;
1408 ref->dcc = NULL;
1409 ref->callback.func = dccref_deletion_callback;
1410 ref->callback.arg = ref;
1411 MemoryContextRegisterResetCallback(refctx, &ref->callback);
1412 /* Acquire refcount if there are constraints, and set up exported list */
1413 if (ref->tcache->domainData)
1414 {
1415 ref->dcc = ref->tcache->domainData;
1416 ref->dcc->dccRefCount++;
1417 if (ref->need_exprstate)
1418 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1419 ref->refctx);
1420 else
1421 ref->constraints = ref->dcc->constraints;
1422 }
1423 else
1424 ref->constraints = NIL;
1425}
1426
1427/*
1428 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1429 *
1430 * If the domain's constraint set changed, ref->constraints is updated to
1431 * point at a new list of cached constraints.
1432 *
1433 * In the normal case where nothing happened to the domain, this is cheap
1434 * enough that it's reasonable (and expected) to check before *each* use
1435 * of the constraint info.
1436 */
1437void
1439{
1440 TypeCacheEntry *typentry = ref->tcache;
1441
1442 /* Make sure typcache entry's data is up to date */
1443 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1444 typentry->typtype == TYPTYPE_DOMAIN)
1445 load_domaintype_info(typentry);
1446
1447 /* Transfer to ref object if there's new info, adjusting refcounts */
1448 if (ref->dcc != typentry->domainData)
1449 {
1450 /* Paranoia --- be sure link is nulled before trying to release */
1451 DomainConstraintCache *dcc = ref->dcc;
1452
1453 if (dcc)
1454 {
1455 /*
1456 * Note: we just leak the previous list of executable domain
1457 * constraints. Alternatively, we could keep those in a child
1458 * context of ref->refctx and free that context at this point.
1459 * However, in practice this code path will be taken so seldom
1460 * that the extra bookkeeping for a child context doesn't seem
1461 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1462 */
1463 ref->constraints = NIL;
1464 ref->dcc = NULL;
1465 decr_dcc_refcount(dcc);
1466 }
1467 dcc = typentry->domainData;
1468 if (dcc)
1469 {
1470 ref->dcc = dcc;
1471 dcc->dccRefCount++;
1472 if (ref->need_exprstate)
1473 ref->constraints = prep_domain_constraints(dcc->constraints,
1474 ref->refctx);
1475 else
1476 ref->constraints = dcc->constraints;
1477 }
1478 }
1479}
1480
1481/*
1482 * DomainHasConstraints --- utility routine to check if a domain has constraints
1483 *
1484 * This is defined to return false, not fail, if type is not a domain.
1485 */
1486bool
1488{
1489 TypeCacheEntry *typentry;
1490
1491 /*
1492 * Note: a side effect is to cause the typcache's domain data to become
1493 * valid. This is fine since we'll likely need it soon if there is any.
1494 */
1496
1497 return (typentry->domainData != NULL);
1498}
1499
1500
1501/*
1502 * array_element_has_equality and friends are helper routines to check
1503 * whether we should believe that array_eq and related functions will work
1504 * on the given array type or composite type.
1505 *
1506 * The logic above may call these repeatedly on the same type entry, so we
1507 * make use of the typentry->flags field to cache the results once known.
1508 * Also, we assume that we'll probably want all these facts about the type
1509 * if we want any, so we cache them all using only one lookup of the
1510 * component datatype(s).
1511 */
1512
1513static bool
1515{
1516 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1518 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1519}
1520
1521static bool
1523{
1524 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1526 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1527}
1528
1529static bool
1531{
1532 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1534 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1535}
1536
1537static bool
1539{
1540 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1542 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1543}
1544
1545static void
1547{
1549
1550 if (OidIsValid(elem_type))
1551 {
1553
1559 if (OidIsValid(elementry->eq_opr))
1560 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1561 if (OidIsValid(elementry->cmp_proc))
1562 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1563 if (OidIsValid(elementry->hash_proc))
1564 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1565 if (OidIsValid(elementry->hash_extended_proc))
1567 }
1569}
1570
1571/*
1572 * Likewise, some helper functions for composite types.
1573 */
1574
1575static bool
1577{
1578 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1580 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1581}
1582
1583static bool
1585{
1586 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1588 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1589}
1590
1591static bool
1593{
1594 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1596 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1597}
1598
1599static bool
1601{
1602 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1604 return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1605}
1606
1607static void
1609{
1610 /*
1611 * For type RECORD, we can't really tell what will work, since we don't
1612 * have access here to the specific anonymous type. Just assume that
1613 * equality and comparison will (we may get a failure at runtime). We
1614 * could also claim that hashing works, but then if code that has the
1615 * option between a comparison-based (sort-based) and a hash-based plan
1616 * chooses hashing, stuff could fail that would otherwise work if it chose
1617 * a comparison-based plan. In practice more types support comparison
1618 * than hashing.
1619 */
1620 if (typentry->type_id == RECORDOID)
1621 {
1622 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1624 }
1625 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1626 {
1627 TupleDesc tupdesc;
1628 int newflags;
1629 int i;
1630
1631 /* Fetch composite type's tupdesc if we don't have it already */
1632 if (typentry->tupDesc == NULL)
1633 load_typcache_tupdesc(typentry);
1634 tupdesc = typentry->tupDesc;
1635
1636 /* Must bump the refcount while we do additional catalog lookups */
1637 IncrTupleDescRefCount(tupdesc);
1638
1639 /* Have each property if all non-dropped fields have the property */
1644 for (i = 0; i < tupdesc->natts; i++)
1645 {
1647 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1648
1649 if (attr->attisdropped)
1650 continue;
1651
1652 fieldentry = lookup_type_cache(attr->atttypid,
1657 if (!OidIsValid(fieldentry->eq_opr))
1659 if (!OidIsValid(fieldentry->cmp_proc))
1661 if (!OidIsValid(fieldentry->hash_proc))
1663 if (!OidIsValid(fieldentry->hash_extended_proc))
1665
1666 /* We can drop out of the loop once we disprove all bits */
1667 if (newflags == 0)
1668 break;
1669 }
1670 typentry->flags |= newflags;
1671
1672 DecrTupleDescRefCount(tupdesc);
1673 }
1674 else if (typentry->typtype == TYPTYPE_DOMAIN)
1675 {
1676 /* If it's domain over composite, copy base type's properties */
1678
1679 /* load up basetype info if we didn't already */
1680 if (typentry->domainBaseType == InvalidOid)
1681 {
1682 typentry->domainBaseTypmod = -1;
1683 typentry->domainBaseType =
1684 getBaseTypeAndTypmod(typentry->type_id,
1685 &typentry->domainBaseTypmod);
1686 }
1692 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1693 {
1695 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1699 }
1700 }
1702}
1703
1704/*
1705 * Likewise, some helper functions for range and multirange types.
1706 *
1707 * We can borrow the flag bits for array element properties to use for range
1708 * element properties, since those flag bits otherwise have no use in a
1709 * range or multirange type's typcache entry.
1710 */
1711
1712static bool
1714{
1715 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1717 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1718}
1719
1720static bool
1722{
1723 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1725 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1726}
1727
1728static void
1730{
1731 /* load up subtype link if we didn't already */
1732 if (typentry->rngelemtype == NULL &&
1733 typentry->typtype == TYPTYPE_RANGE)
1734 load_rangetype_info(typentry);
1735
1736 if (typentry->rngelemtype != NULL)
1737 {
1739
1740 /* might need to calculate subtype's hash function properties */
1744 if (OidIsValid(elementry->hash_proc))
1745 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1746 if (OidIsValid(elementry->hash_extended_proc))
1748 }
1750}
1751
1752static bool
1754{
1755 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1757 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1758}
1759
1760static bool
1762{
1763 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1765 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1766}
1767
1768static void
1770{
1771 /* load up range link if we didn't already */
1772 if (typentry->rngtype == NULL &&
1773 typentry->typtype == TYPTYPE_MULTIRANGE)
1774 load_multirangetype_info(typentry);
1775
1776 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1777 {
1779
1780 /* might need to calculate subtype's hash function properties */
1784 if (OidIsValid(elementry->hash_proc))
1785 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1786 if (OidIsValid(elementry->hash_extended_proc))
1788 }
1790}
1791
1792/*
1793 * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1794 * to store 'typmod'.
1795 */
1796static void
1798{
1799 if (RecordCacheArray == NULL)
1800 {
1803 64 * sizeof(RecordCacheArrayEntry));
1805 }
1806
1807 if (typmod >= RecordCacheArrayLen)
1808 {
1809 int32 newlen = pg_nextpower2_32(typmod + 1);
1810
1814 newlen);
1816 }
1817}
1818
1819/*
1820 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1821 *
1822 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1823 * hasn't had its refcount bumped.
1824 */
1825static TupleDesc
1826lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1827{
1828 if (type_id != RECORDOID)
1829 {
1830 /*
1831 * It's a named composite type, so use the regular typcache.
1832 */
1833 TypeCacheEntry *typentry;
1834
1835 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1836 if (typentry->tupDesc == NULL && !noError)
1837 ereport(ERROR,
1839 errmsg("type %s is not composite",
1840 format_type_be(type_id))));
1841 return typentry->tupDesc;
1842 }
1843 else
1844 {
1845 /*
1846 * It's a transient record type, so look in our record-type table.
1847 */
1848 if (typmod >= 0)
1849 {
1850 /* It is already in our local cache? */
1851 if (typmod < RecordCacheArrayLen &&
1852 RecordCacheArray[typmod].tupdesc != NULL)
1853 return RecordCacheArray[typmod].tupdesc;
1854
1855 /* Are we attached to a shared record typmod registry? */
1857 {
1859
1860 /* Try to find it in the shared typmod index. */
1862 &typmod, false);
1863 if (entry != NULL)
1864 {
1865 TupleDesc tupdesc;
1866
1867 tupdesc = (TupleDesc)
1869 entry->shared_tupdesc);
1870 Assert(typmod == tupdesc->tdtypmod);
1871
1872 /* We may need to extend the local RecordCacheArray. */
1874
1875 /*
1876 * Our local array can now point directly to the TupleDesc
1877 * in shared memory, which is non-reference-counted.
1878 */
1879 RecordCacheArray[typmod].tupdesc = tupdesc;
1880 Assert(tupdesc->tdrefcount == -1);
1881
1882 /*
1883 * We don't share tupdesc identifiers across processes, so
1884 * assign one locally.
1885 */
1887
1889 entry);
1890
1891 return RecordCacheArray[typmod].tupdesc;
1892 }
1893 }
1894 }
1895
1896 if (!noError)
1897 ereport(ERROR,
1899 errmsg("record type has not been registered")));
1900 return NULL;
1901 }
1902}
1903
1904/*
1905 * lookup_rowtype_tupdesc
1906 *
1907 * Given a typeid/typmod that should describe a known composite type,
1908 * return the tuple descriptor for the type. Will ereport on failure.
1909 * (Use ereport because this is reachable with user-specified OIDs,
1910 * for example from record_in().)
1911 *
1912 * Note: on success, we increment the refcount of the returned TupleDesc,
1913 * and log the reference in CurrentResourceOwner. Caller must call
1914 * ReleaseTupleDesc when done using the tupdesc. (There are some
1915 * cases in which the returned tupdesc is not refcounted, in which
1916 * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1917 * the tupdesc is guaranteed to live till process exit.)
1918 */
1920lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1921{
1922 TupleDesc tupDesc;
1923
1924 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1925 PinTupleDesc(tupDesc);
1926 return tupDesc;
1927}
1928
1929/*
1930 * lookup_rowtype_tupdesc_noerror
1931 *
1932 * As above, but if the type is not a known composite type and noError
1933 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1934 * type_id is passed, you'll get an ereport anyway.)
1935 */
1937lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1938{
1939 TupleDesc tupDesc;
1940
1941 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1942 if (tupDesc != NULL)
1943 PinTupleDesc(tupDesc);
1944 return tupDesc;
1945}
1946
1947/*
1948 * lookup_rowtype_tupdesc_copy
1949 *
1950 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1951 * copied into the CurrentMemoryContext and is not reference-counted.
1952 */
1954lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1955{
1956 TupleDesc tmp;
1957
1958 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1959 return CreateTupleDescCopyConstr(tmp);
1960}
1961
1962/*
1963 * lookup_rowtype_tupdesc_domain
1964 *
1965 * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1966 * a domain over a named composite type; so this is effectively equivalent to
1967 * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1968 * except for being a tad faster.
1969 *
1970 * Note: the reason we don't fold the look-through-domain behavior into plain
1971 * lookup_rowtype_tupdesc() is that we want callers to know they might be
1972 * dealing with a domain. Otherwise they might construct a tuple that should
1973 * be of the domain type, but not apply domain constraints.
1974 */
1976lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1977{
1978 TupleDesc tupDesc;
1979
1980 if (type_id != RECORDOID)
1981 {
1982 /*
1983 * Check for domain or named composite type. We might as well load
1984 * whichever data is needed.
1985 */
1986 TypeCacheEntry *typentry;
1987
1988 typentry = lookup_type_cache(type_id,
1991 if (typentry->typtype == TYPTYPE_DOMAIN)
1993 typentry->domainBaseTypmod,
1994 noError);
1995 if (typentry->tupDesc == NULL && !noError)
1996 ereport(ERROR,
1998 errmsg("type %s is not composite",
1999 format_type_be(type_id))));
2000 tupDesc = typentry->tupDesc;
2001 }
2002 else
2003 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2004 if (tupDesc != NULL)
2005 PinTupleDesc(tupDesc);
2006 return tupDesc;
2007}
2008
2009/*
2010 * Hash function for the hash table of RecordCacheEntry.
2011 */
2012static uint32
2013record_type_typmod_hash(const void *data, size_t size)
2014{
2015 const RecordCacheEntry *entry = data;
2016
2017 return hashRowType(entry->tupdesc);
2018}
2019
2020/*
2021 * Match function for the hash table of RecordCacheEntry.
2022 */
2023static int
2024record_type_typmod_compare(const void *a, const void *b, size_t size)
2025{
2026 const RecordCacheEntry *left = a;
2027 const RecordCacheEntry *right = b;
2028
2029 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2030}
2031
2032/*
2033 * assign_record_type_typmod
2034 *
2035 * Given a tuple descriptor for a RECORD type, find or create a cache entry
2036 * for the type, and set the tupdesc's tdtypmod field to a value that will
2037 * identify this cache entry to lookup_rowtype_tupdesc.
2038 */
2039void
2041{
2044 bool found;
2046
2047 Assert(tupDesc->tdtypeid == RECORDOID);
2048
2049 if (RecordCacheHash == NULL)
2050 {
2051 /* First time through: initialize the hash table */
2052 HASHCTL ctl;
2053
2054 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2055 ctl.entrysize = sizeof(RecordCacheEntry);
2058 RecordCacheHash = hash_create("Record information cache", 64,
2059 &ctl,
2061
2062 /* Also make sure CacheMemoryContext exists */
2063 if (!CacheMemoryContext)
2065 }
2066
2067 /*
2068 * Find a hashtable entry for this tuple descriptor. We don't use
2069 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2070 * the allocations succeed before we create the new entry.
2071 */
2073 &tupDesc,
2074 HASH_FIND, &found);
2075 if (found && recentry->tupdesc != NULL)
2076 {
2077 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2078 return;
2079 }
2080
2081 /* Not present, so need to manufacture an entry */
2083
2084 /* Look in the SharedRecordTypmodRegistry, if attached */
2086 if (entDesc == NULL)
2087 {
2088 /*
2089 * Make sure we have room before we CreateTupleDescCopy() or advance
2090 * NextRecordTypmod.
2091 */
2093
2094 /* Reference-counted local cache only. */
2095 entDesc = CreateTupleDescCopy(tupDesc);
2096 entDesc->tdrefcount = 1;
2097 entDesc->tdtypmod = NextRecordTypmod++;
2098 }
2099 else
2100 {
2102 }
2103
2105
2106 /* Assign a unique tupdesc identifier, too. */
2108
2109 /* Fully initialized; create the hash table entry */
2111 &tupDesc,
2112 HASH_ENTER, NULL);
2113 recentry->tupdesc = entDesc;
2114
2115 /* Update the caller's tuple descriptor. */
2116 tupDesc->tdtypmod = entDesc->tdtypmod;
2117
2119}
2120
2121/*
2122 * assign_record_type_identifier
2123 *
2124 * Get an identifier, which will be unique over the lifespan of this backend
2125 * process, for the current tuple descriptor of the specified composite type.
2126 * For named composite types, the value is guaranteed to change if the type's
2127 * definition does. For registered RECORD types, the value will not change
2128 * once assigned, since the registered type won't either. If an anonymous
2129 * RECORD type is specified, we return a new identifier on each call.
2130 */
2131uint64
2133{
2134 if (type_id != RECORDOID)
2135 {
2136 /*
2137 * It's a named composite type, so use the regular typcache.
2138 */
2139 TypeCacheEntry *typentry;
2140
2141 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2142 if (typentry->tupDesc == NULL)
2143 ereport(ERROR,
2145 errmsg("type %s is not composite",
2146 format_type_be(type_id))));
2147 Assert(typentry->tupDesc_identifier != 0);
2148 return typentry->tupDesc_identifier;
2149 }
2150 else
2151 {
2152 /*
2153 * It's a transient record type, so look in our record-type table.
2154 */
2155 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2156 RecordCacheArray[typmod].tupdesc != NULL)
2157 {
2158 Assert(RecordCacheArray[typmod].id != 0);
2159 return RecordCacheArray[typmod].id;
2160 }
2161
2162 /* For anonymous or unrecognized record type, generate a new ID */
2163 return ++tupledesc_id_counter;
2164 }
2165}
2166
2167/*
2168 * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2169 * This exists only to avoid exposing private innards of
2170 * SharedRecordTypmodRegistry in a header.
2171 */
2172size_t
2174{
2175 return sizeof(SharedRecordTypmodRegistry);
2176}
2177
2178/*
2179 * Initialize 'registry' in a pre-existing shared memory region, which must be
2180 * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2181 * bytes.
2182 *
2183 * 'area' will be used to allocate shared memory space as required for the
2184 * typemod registration. The current process, expected to be a leader process
2185 * in a parallel query, will be attached automatically and its current record
2186 * types will be loaded into *registry. While attached, all calls to
2187 * assign_record_type_typmod will use the shared registry. Worker backends
2188 * will need to attach explicitly.
2189 *
2190 * Note that this function takes 'area' and 'segment' as arguments rather than
2191 * accessing them via CurrentSession, because they aren't installed there
2192 * until after this function runs.
2193 */
2194void
2196 dsm_segment *segment,
2197 dsa_area *area)
2198{
2202 int32 typmod;
2203
2205
2206 /* We can't already be attached to a shared registry. */
2210
2212
2213 /* Create the hash table of tuple descriptors indexed by themselves. */
2215
2216 /* Create the hash table of tuple descriptors indexed by typmod. */
2218
2220
2221 /* Initialize the SharedRecordTypmodRegistry. */
2222 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2223 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2225
2226 /*
2227 * Copy all entries from this backend's private registry into the shared
2228 * registry.
2229 */
2230 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2231 {
2236 TupleDesc tupdesc;
2237 bool found;
2238
2239 tupdesc = RecordCacheArray[typmod].tupdesc;
2240 if (tupdesc == NULL)
2241 continue;
2242
2243 /* Copy the TupleDesc into shared memory. */
2244 shared_dp = share_tupledesc(area, tupdesc, typmod);
2245
2246 /* Insert into the typmod table. */
2248 &tupdesc->tdtypmod,
2249 &found);
2250 if (found)
2251 elog(ERROR, "cannot create duplicate shared record typmod");
2252 typmod_table_entry->typmod = tupdesc->tdtypmod;
2253 typmod_table_entry->shared_tupdesc = shared_dp;
2255
2256 /* Insert into the record table. */
2257 record_table_key.shared = false;
2258 record_table_key.u.local_tupdesc = tupdesc;
2261 &found);
2262 if (!found)
2263 {
2264 record_table_entry->key.shared = true;
2265 record_table_entry->key.u.shared_tupdesc = shared_dp;
2266 }
2268 }
2269
2270 /*
2271 * Set up the global state that will tell assign_record_type_typmod and
2272 * lookup_rowtype_tupdesc_internal about the shared registry.
2273 */
2277
2278 /*
2279 * We install a detach hook in the leader, but only to handle cleanup on
2280 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2281 * the memory, the leader process will use a shared registry until it
2282 * exits.
2283 */
2285}
2286
2287/*
2288 * Attach to 'registry', which must have been initialized already by another
2289 * backend. Future calls to assign_record_type_typmod and
2290 * lookup_rowtype_tupdesc_internal will use the shared registry until the
2291 * current session is detached.
2292 */
2293void
2295{
2299
2301
2302 /* We can't already be attached to a shared registry. */
2309
2310 /*
2311 * We can't already have typmods in our local cache, because they'd clash
2312 * with those imported by SharedRecordTypmodRegistryInit. This should be
2313 * a freshly started parallel worker. If we ever support worker
2314 * recycling, a worker would need to zap its local cache in between
2315 * servicing different queries, in order to be able to call this and
2316 * synchronize typmods with a new leader; but that's problematic because
2317 * we can't be very sure that record-typmod-related state hasn't escaped
2318 * to anywhere else in the process.
2319 */
2321
2323
2324 /* Attach to the two hash tables. */
2327 registry->record_table_handle,
2331 registry->typmod_table_handle,
2332 NULL);
2333
2335
2336 /*
2337 * Set up detach hook to run at worker exit. Currently this is the same
2338 * as the leader's detach hook, but in future they might need to be
2339 * different.
2340 */
2344
2345 /*
2346 * Set up the session state that will tell assign_record_type_typmod and
2347 * lookup_rowtype_tupdesc_internal about the shared registry.
2348 */
2352}
2353
2354/*
2355 * InvalidateCompositeTypeCacheEntry
2356 * Invalidate particular TypeCacheEntry on Relcache inval callback
2357 *
2358 * Delete the cached tuple descriptor (if any) for the given composite
2359 * type, and reset whatever info we have cached about the composite type's
2360 * comparability.
2361 */
2362static void
2364{
2366
2367 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2368 OidIsValid(typentry->typrelid));
2369
2370 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2371 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2372
2373 /* Delete tupdesc if we have it */
2374 if (typentry->tupDesc != NULL)
2375 {
2376 /*
2377 * Release our refcount and free the tupdesc if none remain. We can't
2378 * use DecrTupleDescRefCount here because this reference is not logged
2379 * by the current resource owner.
2380 */
2381 Assert(typentry->tupDesc->tdrefcount > 0);
2382 if (--typentry->tupDesc->tdrefcount == 0)
2383 FreeTupleDesc(typentry->tupDesc);
2384 typentry->tupDesc = NULL;
2385
2386 /*
2387 * Also clear tupDesc_identifier, so that anyone watching it will
2388 * realize that the tupdesc has changed.
2389 */
2390 typentry->tupDesc_identifier = 0;
2391 }
2392
2393 /* Reset equality/comparison/hashing validity information */
2394 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2395
2396 /*
2397 * Call delete_rel_type_cache_if_needed() if we actually cleared
2398 * something.
2399 */
2402}
2403
2404/*
2405 * TypeCacheRelCallback
2406 * Relcache inval callback function
2407 *
2408 * Delete the cached tuple descriptor (if any) for the given rel's composite
2409 * type, or for all composite types if relid == InvalidOid. Also reset
2410 * whatever info we have cached about the composite type's comparability.
2411 *
2412 * This is called when a relcache invalidation event occurs for the given
2413 * relid. We can't use syscache to find a type corresponding to the given
2414 * relation because the code can be called outside of transaction. Thus, we
2415 * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2416 */
2417static void
2419{
2420 TypeCacheEntry *typentry;
2421
2422 /*
2423 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2424 * callback wouldn't be registered
2425 */
2426 if (OidIsValid(relid))
2427 {
2429
2430 /*
2431 * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2432 * corresponding typcache entry has something to clean.
2433 */
2435 &relid,
2436 HASH_FIND, NULL);
2437
2438 if (relentry != NULL)
2439 {
2441 &relentry->composite_typid,
2442 HASH_FIND, NULL);
2443
2444 if (typentry != NULL)
2445 {
2446 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2447 Assert(relid == typentry->typrelid);
2448
2450 }
2451 }
2452
2453 /*
2454 * Visit all the domain types sequentially. Typically, this shouldn't
2455 * affect performance since domain types are less tended to bloat.
2456 * Domain types are created manually, unlike composite types which are
2457 * automatically created for every temporary table.
2458 */
2459 for (typentry = firstDomainTypeEntry;
2460 typentry != NULL;
2461 typentry = typentry->nextDomain)
2462 {
2463 /*
2464 * If it's domain over composite, reset flags. (We don't bother
2465 * trying to determine whether the specific base type needs a
2466 * reset.) Note that if we haven't determined whether the base
2467 * type is composite, we don't need to reset anything.
2468 */
2470 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2471 }
2472 }
2473 else
2474 {
2475 HASH_SEQ_STATUS status;
2476
2477 /*
2478 * Relid is invalid. By convention, we need to reset all composite
2479 * types in cache. Also, we should reset flags for domain types, and
2480 * we loop over all entries in hash, so, do it in a single scan.
2481 */
2482 hash_seq_init(&status, TypeCacheHash);
2483 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2484 {
2485 if (typentry->typtype == TYPTYPE_COMPOSITE)
2486 {
2488 }
2489 else if (typentry->typtype == TYPTYPE_DOMAIN)
2490 {
2491 /*
2492 * If it's domain over composite, reset flags. (We don't
2493 * bother trying to determine whether the specific base type
2494 * needs a reset.) Note that if we haven't determined whether
2495 * the base type is composite, we don't need to reset
2496 * anything.
2497 */
2499 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2500 }
2501 }
2502 }
2503}
2504
2505/*
2506 * TypeCacheTypCallback
2507 * Syscache inval callback function
2508 *
2509 * This is called when a syscache invalidation event occurs for any
2510 * pg_type row. If we have information cached about that type, mark
2511 * it as needing to be reloaded.
2512 */
2513static void
2515{
2516 HASH_SEQ_STATUS status;
2517 TypeCacheEntry *typentry;
2518
2519 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2520
2521 /*
2522 * By convention, zero hash value is passed to the callback as a sign that
2523 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2524 * InvalidateSystemCachesExtended().
2525 */
2526 if (hashvalue == 0)
2527 hash_seq_init(&status, TypeCacheHash);
2528 else
2529 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2530
2531 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2532 {
2533 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2534
2535 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2536
2537 /*
2538 * Mark the data obtained directly from pg_type as invalid. Also, if
2539 * it's a domain, typnotnull might've changed, so we'll need to
2540 * recalculate its constraints.
2541 */
2542 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2544
2545 /*
2546 * Call delete_rel_type_cache_if_needed() if we cleaned
2547 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2548 */
2549 if (hadPgTypeData)
2551 }
2552}
2553
2554/*
2555 * TypeCacheOpcCallback
2556 * Syscache inval callback function
2557 *
2558 * This is called when a syscache invalidation event occurs for any pg_opclass
2559 * row. In principle we could probably just invalidate data dependent on the
2560 * particular opclass, but since updates on pg_opclass are rare in production
2561 * it doesn't seem worth a lot of complication: we just mark all cached data
2562 * invalid.
2563 *
2564 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2565 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2566 * is not allowed to be used to add/drop the primary operators and functions
2567 * of an opclass, only cross-type members of a family; and the latter sorts
2568 * of members are not going to get cached here.
2569 */
2570static void
2572{
2573 HASH_SEQ_STATUS status;
2574 TypeCacheEntry *typentry;
2575
2576 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2577 hash_seq_init(&status, TypeCacheHash);
2578 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2579 {
2580 bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2581
2582 /* Reset equality/comparison/hashing validity information */
2583 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2584
2585 /*
2586 * Call delete_rel_type_cache_if_needed() if we actually cleared some
2587 * of TCFLAGS_OPERATOR_FLAGS.
2588 */
2589 if (hadOpclass)
2591 }
2592}
2593
2594/*
2595 * TypeCacheConstrCallback
2596 * Syscache inval callback function
2597 *
2598 * This is called when a syscache invalidation event occurs for any
2599 * pg_constraint row. We flush information about domain constraints
2600 * when this happens.
2601 *
2602 * It's slightly annoying that we can't tell whether the inval event was for
2603 * a domain constraint record or not; there's usually more update traffic
2604 * for table constraints than domain constraints, so we'll do a lot of
2605 * useless flushes. Still, this is better than the old no-caching-at-all
2606 * approach to domain constraints.
2607 */
2608static void
2610{
2611 TypeCacheEntry *typentry;
2612
2613 /*
2614 * Because this is called very frequently, and typically very few of the
2615 * typcache entries are for domains, we don't use hash_seq_search here.
2616 * Instead we thread all the domain-type entries together so that we can
2617 * visit them cheaply.
2618 */
2619 for (typentry = firstDomainTypeEntry;
2620 typentry != NULL;
2621 typentry = typentry->nextDomain)
2622 {
2623 /* Reset domain constraint validity information */
2625 }
2626}
2627
2628
2629/*
2630 * Check if given OID is part of the subset that's sortable by comparisons
2631 */
2632static inline bool
2634{
2635 Oid offset;
2636
2637 if (arg < enumdata->bitmap_base)
2638 return false;
2639 offset = arg - enumdata->bitmap_base;
2640 if (offset > (Oid) INT_MAX)
2641 return false;
2642 return bms_is_member((int) offset, enumdata->sorted_values);
2643}
2644
2645
2646/*
2647 * compare_values_of_enum
2648 * Compare two members of an enum type.
2649 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2650 *
2651 * Note: currently, the enumData cache is refreshed only if we are asked
2652 * to compare an enum value that is not already in the cache. This is okay
2653 * because there is no support for re-ordering existing values, so comparisons
2654 * of previously cached values will return the right answer even if other
2655 * values have been added since we last loaded the cache.
2656 *
2657 * Note: the enum logic has a special-case rule about even-numbered versus
2658 * odd-numbered OIDs, but we take no account of that rule here; this
2659 * routine shouldn't even get called when that rule applies.
2660 */
2661int
2663{
2665 EnumItem *item1;
2666 EnumItem *item2;
2667
2668 /*
2669 * Equal OIDs are certainly equal --- this case was probably handled by
2670 * our caller, but we may as well check.
2671 */
2672 if (arg1 == arg2)
2673 return 0;
2674
2675 /* Load up the cache if first time through */
2676 if (tcache->enumData == NULL)
2677 load_enum_cache_data(tcache);
2678 enumdata = tcache->enumData;
2679
2680 /*
2681 * If both OIDs are known-sorted, we can just compare them directly.
2682 */
2685 {
2686 if (arg1 < arg2)
2687 return -1;
2688 else
2689 return 1;
2690 }
2691
2692 /*
2693 * Slow path: we have to identify their actual sort-order positions.
2694 */
2697
2698 if (item1 == NULL || item2 == NULL)
2699 {
2700 /*
2701 * We couldn't find one or both values. That means the enum has
2702 * changed under us, so re-initialize the cache and try again. We
2703 * don't bother retrying the known-sorted case in this path.
2704 */
2705 load_enum_cache_data(tcache);
2706 enumdata = tcache->enumData;
2707
2710
2711 /*
2712 * If we still can't find the values, complain: we must have corrupt
2713 * data.
2714 */
2715 if (item1 == NULL)
2716 elog(ERROR, "enum value %u not found in cache for enum %s",
2717 arg1, format_type_be(tcache->type_id));
2718 if (item2 == NULL)
2719 elog(ERROR, "enum value %u not found in cache for enum %s",
2720 arg2, format_type_be(tcache->type_id));
2721 }
2722
2723 if (item1->sort_order < item2->sort_order)
2724 return -1;
2725 else if (item1->sort_order > item2->sort_order)
2726 return 1;
2727 else
2728 return 0;
2729}
2730
2731/*
2732 * Load (or re-load) the enumData member of the typcache entry.
2733 */
2734static void
2736{
2742 EnumItem *items;
2743 int numitems;
2744 int maxitems;
2745 Oid bitmap_base;
2746 Bitmapset *bitmap;
2748 int bm_size,
2749 start_pos;
2750
2751 /* Check that this is actually an enum */
2752 if (tcache->typtype != TYPTYPE_ENUM)
2753 ereport(ERROR,
2755 errmsg("%s is not an enum",
2756 format_type_be(tcache->type_id))));
2757
2758 /*
2759 * Read all the information for members of the enum type. We collect the
2760 * info in working memory in the caller's context, and then transfer it to
2761 * permanent memory in CacheMemoryContext. This minimizes the risk of
2762 * leaking memory from CacheMemoryContext in the event of an error partway
2763 * through.
2764 */
2765 maxitems = 64;
2766 items = palloc_array(EnumItem, maxitems);
2767 numitems = 0;
2768
2769 /* Scan pg_enum for the members of the target enum type. */
2773 ObjectIdGetDatum(tcache->type_id));
2774
2778 true, NULL,
2779 1, &skey);
2780
2782 {
2784
2785 if (numitems >= maxitems)
2786 {
2787 maxitems *= 2;
2788 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2789 }
2790 items[numitems].enum_oid = en->oid;
2791 items[numitems].sort_order = en->enumsortorder;
2792 numitems++;
2793 }
2794
2797
2798 /* Sort the items into OID order */
2799 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2800
2801 /*
2802 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2803 * known to be in order and can thus be compared with just OID comparison.
2804 *
2805 * The point of this is that the enum's initial OIDs were certainly in
2806 * order, so there is some subset that can be compared via OID comparison;
2807 * and we'd rather not do binary searches unnecessarily.
2808 *
2809 * This is somewhat heuristic, and might identify a subset of OIDs that
2810 * isn't exactly what the type started with. That's okay as long as the
2811 * subset is correctly sorted.
2812 */
2813 bitmap_base = InvalidOid;
2814 bitmap = NULL;
2815 bm_size = 1; /* only save sets of at least 2 OIDs */
2816
2817 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2818 {
2819 /*
2820 * Identify longest sorted subsequence starting at start_pos
2821 */
2823 int this_bm_size = 1;
2824 Oid start_oid = items[start_pos].enum_oid;
2825 float4 prev_order = items[start_pos].sort_order;
2826 int i;
2827
2828 for (i = start_pos + 1; i < numitems; i++)
2829 {
2830 Oid offset;
2831
2832 offset = items[i].enum_oid - start_oid;
2833 /* quit if bitmap would be too large; cutoff is arbitrary */
2834 if (offset >= 8192)
2835 break;
2836 /* include the item if it's in-order */
2837 if (items[i].sort_order > prev_order)
2838 {
2839 prev_order = items[i].sort_order;
2840 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2841 this_bm_size++;
2842 }
2843 }
2844
2845 /* Remember it if larger than previous best */
2846 if (this_bm_size > bm_size)
2847 {
2848 bms_free(bitmap);
2849 bitmap_base = start_oid;
2850 bitmap = this_bitmap;
2852 }
2853 else
2855
2856 /*
2857 * Done if it's not possible to find a longer sequence in the rest of
2858 * the list. In typical cases this will happen on the first
2859 * iteration, which is why we create the bitmaps on the fly instead of
2860 * doing a second pass over the list.
2861 */
2862 if (bm_size >= (numitems - start_pos - 1))
2863 break;
2864 }
2865
2866 /* OK, copy the data into CacheMemoryContext */
2869 palloc(offsetof(TypeCacheEnumData, enum_values) +
2870 numitems * sizeof(EnumItem));
2871 enumdata->bitmap_base = bitmap_base;
2872 enumdata->sorted_values = bms_copy(bitmap);
2873 enumdata->num_values = numitems;
2874 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2876
2877 pfree(items);
2878 bms_free(bitmap);
2879
2880 /* And link the finished cache struct into the typcache */
2881 if (tcache->enumData != NULL)
2882 pfree(tcache->enumData);
2883 tcache->enumData = enumdata;
2884}
2885
2886/*
2887 * Locate the EnumItem with the given OID, if present
2888 */
2889static EnumItem *
2891{
2892 EnumItem srch;
2893
2894 /* On some versions of Solaris, bsearch of zero items dumps core */
2895 if (enumdata->num_values <= 0)
2896 return NULL;
2897
2898 srch.enum_oid = arg;
2899 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2900 sizeof(EnumItem), enum_oid_cmp);
2901}
2902
2903/*
2904 * qsort comparison function for OID-ordered EnumItems
2905 */
2906static int
2907enum_oid_cmp(const void *left, const void *right)
2908{
2909 const EnumItem *l = (const EnumItem *) left;
2910 const EnumItem *r = (const EnumItem *) right;
2911
2912 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2913}
2914
2915/*
2916 * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2917 * to the given value and return a dsa_pointer.
2918 */
2919static dsa_pointer
2920share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2921{
2923 TupleDesc shared;
2924
2925 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2926 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2927 TupleDescCopy(shared, tupdesc);
2928 shared->tdtypmod = typmod;
2929
2930 return shared_dp;
2931}
2932
2933/*
2934 * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2935 * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2936 * Tuple descriptors returned by this function are not reference counted, and
2937 * will exist at least as long as the current backend remained attached to the
2938 * current session.
2939 */
2940static TupleDesc
2942{
2943 TupleDesc result;
2948 bool found;
2949 uint32 typmod;
2950
2951 /* If not even attached, nothing to do. */
2953 return NULL;
2954
2955 /* Try to find a matching tuple descriptor in the record table. */
2956 key.shared = false;
2957 key.u.local_tupdesc = tupdesc;
2961 {
2962 Assert(record_table_entry->key.shared);
2965 result = (TupleDesc)
2967 record_table_entry->key.u.shared_tupdesc);
2968 Assert(result->tdrefcount == -1);
2969
2970 return result;
2971 }
2972
2973 /* Allocate a new typmod number. This will be wasted if we error out. */
2974 typmod = (int)
2976 1);
2977
2978 /* Copy the TupleDesc into shared memory. */
2979 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2980
2981 /*
2982 * Create an entry in the typmod table so that others will understand this
2983 * typmod number.
2984 */
2985 PG_TRY();
2986 {
2989 &typmod, &found);
2990 if (found)
2991 elog(ERROR, "cannot create duplicate shared record typmod");
2992 }
2993 PG_CATCH();
2994 {
2996 PG_RE_THROW();
2997 }
2998 PG_END_TRY();
2999 typmod_table_entry->typmod = typmod;
3000 typmod_table_entry->shared_tupdesc = shared_dp;
3003
3004 /*
3005 * Finally create an entry in the record table so others with matching
3006 * tuple descriptors can reuse the typmod.
3007 */
3010 &found);
3011 if (found)
3012 {
3013 /*
3014 * Someone concurrently inserted a matching tuple descriptor since the
3015 * first time we checked. Use that one instead.
3016 */
3019
3020 /* Might as well free up the space used by the one we created. */
3022 &typmod);
3023 Assert(found);
3025
3026 /* Return the one we found. */
3027 Assert(record_table_entry->key.shared);
3028 result = (TupleDesc)
3030 record_table_entry->key.u.shared_tupdesc);
3031 Assert(result->tdrefcount == -1);
3032
3033 return result;
3034 }
3035
3036 /* Store it and return it. */
3037 record_table_entry->key.shared = true;
3038 record_table_entry->key.u.shared_tupdesc = shared_dp;
3041 result = (TupleDesc)
3043 Assert(result->tdrefcount == -1);
3044
3045 return result;
3046}
3047
3048/*
3049 * On-DSM-detach hook to forget about the current shared record typmod
3050 * infrastructure. This is currently used by both leader and workers.
3051 */
3052static void
3054{
3055 /* Be cautious here: maybe we didn't finish initializing. */
3057 {
3060 }
3062 {
3065 }
3067}
3068
3069/*
3070 * Insert RelIdToTypeIdCacheHash entry if needed.
3071 */
3072static void
3074{
3075 /* Immediately quit for non-composite types */
3076 if (typentry->typtype != TYPTYPE_COMPOSITE)
3077 return;
3078
3079 /* typrelid should be given for composite types */
3080 Assert(OidIsValid(typentry->typrelid));
3081
3082 /*
3083 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3084 * information indicating it should be here.
3085 */
3086 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3087 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3088 typentry->tupDesc != NULL)
3089 {
3091 bool found;
3092
3094 &typentry->typrelid,
3095 HASH_ENTER, &found);
3096 relentry->relid = typentry->typrelid;
3097 relentry->composite_typid = typentry->type_id;
3098 }
3099}
3100
3101/*
3102 * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3103 * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3104 * or tupDesc.
3105 */
3106static void
3108{
3109#ifdef USE_ASSERT_CHECKING
3110 int i;
3111 bool is_in_progress = false;
3112
3113 for (i = 0; i < in_progress_list_len; i++)
3114 {
3115 if (in_progress_list[i] == typentry->type_id)
3116 {
3117 is_in_progress = true;
3118 break;
3119 }
3120 }
3121#endif
3122
3123 /* Immediately quit for non-composite types */
3124 if (typentry->typtype != TYPTYPE_COMPOSITE)
3125 return;
3126
3127 /* typrelid should be given for composite types */
3128 Assert(OidIsValid(typentry->typrelid));
3129
3130 /*
3131 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3132 * information indicating entry should be still there.
3133 */
3134 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3135 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3136 typentry->tupDesc == NULL)
3137 {
3138 bool found;
3139
3141 &typentry->typrelid,
3142 HASH_REMOVE, &found);
3143 Assert(found || is_in_progress);
3144 }
3145 else
3146 {
3147#ifdef USE_ASSERT_CHECKING
3148 /*
3149 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3150 * entry if it should exist.
3151 */
3152 bool found;
3153
3154 if (!is_in_progress)
3155 {
3157 &typentry->typrelid,
3158 HASH_FIND, &found);
3159 Assert(found);
3160 }
3161#endif
3162 }
3163}
3164
3165/*
3166 * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3167 * entries, marked as in-progress by lookup_type_cache(). It may happen
3168 * in case of an error or interruption during the lookup_type_cache() call.
3169 */
3170static void
3172{
3173 int i;
3174
3175 for (i = 0; i < in_progress_list_len; i++)
3176 {
3177 TypeCacheEntry *typentry;
3178
3181 HASH_FIND, NULL);
3182 if (typentry)
3184 }
3185
3187}
3188
3189void
3191{
3193}
3194
3195void
3197{
3199}
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:219
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition atomics.h:366
Bitmapset * bms_make_singleton(int x)
Definition bitmapset.c:216
void bms_free(Bitmapset *a)
Definition bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition bitmapset.c:814
Bitmapset * bms_copy(const Bitmapset *a)
Definition bitmapset.c:122
#define TextDatumGetCString(d)
Definition builtins.h:98
#define NameStr(name)
Definition c.h:765
#define RegProcedureIsValid(p)
Definition c.h:792
#define Assert(condition)
Definition c.h:873
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:480
int32_t int32
Definition c.h:542
uint64_t uint64
Definition c.h:547
uint32_t uint32
Definition c.h:546
float float4
Definition c.h:643
#define MemSet(start, val, len)
Definition c.h:1013
#define OidIsValid(objectId)
Definition c.h:788
size_t Size
Definition c.h:619
void CreateCacheMemoryContext(void)
Definition catcache.c:715
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition dsa.c:957
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition dsa.c:841
uint64 dsa_pointer
Definition dsa.h:62
#define dsa_allocate(area, size)
Definition dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition dshash.c:505
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition dshash.c:592
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition dshash.c:560
void dshash_detach(dshash_table *hash_table)
Definition dshash.c:309
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition dshash.c:392
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition dshash.c:369
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition dshash.c:272
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition dshash.c:435
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition dshash.c:583
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition dshash.c:208
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition dshash.c:574
dsa_pointer dshash_table_handle
Definition dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition dynahash.c:1400
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1415
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1380
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define PG_RE_THROW()
Definition elog.h:405
#define PG_TRY(...)
Definition elog.h:372
#define PG_END_TRY(...)
Definition elog.h:397
#define ERROR
Definition elog.h:39
#define PG_CATCH(...)
Definition elog.h:382
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition execExpr.c:143
@ DOM_CONSTRAINT_CHECK
Definition execnodes.h:1052
@ DOM_CONSTRAINT_NOTNULL
Definition execnodes.h:1051
#define palloc_array(type, count)
Definition fe_memutils.h:76
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition fmgr.c:138
char * format_type_be(Oid type_oid)
void systable_endscan(SysScanDesc sysscan)
Definition genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition genam.c:388
#define HASHSTANDARD_PROC
Definition hash.h:355
#define HASHEXTENDED_PROC
Definition hash.h:356
@ HASH_FIND
Definition hsearch.h:113
@ HASH_REMOVE
Definition hsearch.h:115
@ HASH_ENTER
Definition hsearch.h:114
#define HASH_ELEM
Definition hsearch.h:95
#define HASH_COMPARE
Definition hsearch.h:99
#define HASH_FUNCTION
Definition hsearch.h:98
#define HASH_BLOBS
Definition hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
#define IsParallelWorker()
Definition parallel.h:60
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition indexcmds.c:2368
long val
Definition informix.c:689
#define INJECTION_POINT(name, arg)
static int pg_cmp_u32(uint32 a, uint32 b)
Definition int.h:719
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition inval.c:1858
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition inval.c:1816
int b
Definition isn.c:74
int a
Definition isn.c:73
int i
Definition isn.c:77
List * lappend(List *list, void *datum)
Definition list.c:339
List * lcons(void *datum, List *list)
Definition list.c:495
#define AccessShareLock
Definition lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition lsyscache.c:1314
Oid get_opclass_family(Oid opclass)
Definition lsyscache.c:1292
Oid get_multirange_range(Oid multirangeOid)
Definition lsyscache.c:3633
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition lsyscache.c:872
RegProcedure get_opcode(Oid opno)
Definition lsyscache.c:1435
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition lsyscache.c:168
Oid get_base_element_type(Oid typid)
Definition lsyscache.c:2982
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition lsyscache.c:2688
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition mcxt.c:1266
char * pstrdup(const char *in)
Definition mcxt.c:1781
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition mcxt.c:582
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition mcxt.c:686
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
MemoryContext CacheMemoryContext
Definition mcxt.c:169
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition memutils.h:170
#define BTORDER_PROC
Definition nbtree.h:717
#define copyObject(obj)
Definition nodes.h:232
#define makeNode(_type_)
Definition nodes.h:161
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define repalloc0_array(pointer, type, oldcount, count)
Definition palloc.h:109
FormData_pg_attribute * Form_pg_attribute
void * arg
static uint32 pg_nextpower2_32(uint32 num)
FormData_pg_constraint * Form_pg_constraint
const void * data
FormData_pg_enum * Form_pg_enum
Definition pg_enum.h:44
#define lfirst(lc)
Definition pg_list.h:172
#define NIL
Definition pg_list.h:68
FormData_pg_range * Form_pg_range
Definition pg_range.h:67
FormData_pg_type * Form_pg_type
Definition pg_type.h:261
Expr * expression_planner(Expr *expr)
Definition planner.c:6817
#define qsort(a, b, c, d)
Definition port.h:495
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:262
uint64_t Datum
Definition postgres.h:70
#define InvalidOid
unsigned int Oid
char * c
static int fb(int x)
tree ctl
Definition radixtree.h:1838
void * stringToNode(const char *str)
Definition read.c:90
#define RelationGetDescr(relation)
Definition rel.h:540
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition scankey.c:76
Session * CurrentSession
Definition session.c:48
void relation_close(Relation relation, LOCKMODE lockmode)
Definition relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition relation.c:47
#define BTGreaterStrategyNumber
Definition stratnum.h:33
#define HTEqualStrategyNumber
Definition stratnum.h:41
#define BTLessStrategyNumber
Definition stratnum.h:29
#define BTEqualStrategyNumber
Definition stratnum.h:31
MemoryContext dccContext
Definition typcache.c:142
DomainConstraintType constrainttype
Definition execnodes.h:1058
ExprState * check_exprstate
Definition execnodes.h:1061
Oid enum_oid
Definition typcache.c:149
Oid fn_oid
Definition fmgr.h:59
Size keysize
Definition hsearch.h:75
Definition pg_list.h:54
TupleDesc tupdesc
Definition typcache.c:174
Form_pg_class rd_rel
Definition rel.h:111
dsm_segment * segment
Definition session.h:27
dshash_table * shared_record_table
Definition session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition session.h:31
dsa_area * area
Definition session.h:28
dshash_table * shared_typmod_table
Definition session.h:33
SharedRecordTableKey key
Definition typcache.c:213
TupleDesc local_tupdesc
Definition typcache.c:201
union SharedRecordTableKey::@33 u
dsa_pointer shared_tupdesc
Definition typcache.c:202
dshash_table_handle typmod_table_handle
Definition typcache.c:186
pg_atomic_uint32 next_typmod
Definition typcache.c:188
dshash_table_handle record_table_handle
Definition typcache.c:184
dsa_pointer shared_tupdesc
Definition typcache.c:223
int32 tdtypmod
Definition tupdesc.h:139
uint32 type_id_hash
Definition typcache.h:36
uint64 tupDesc_identifier
Definition typcache.h:91
FmgrInfo hash_proc_finfo
Definition typcache.h:78
int32 domainBaseTypmod
Definition typcache.h:116
Oid hash_extended_proc
Definition typcache.h:67
FmgrInfo rng_cmp_proc_finfo
Definition typcache.h:102
FmgrInfo cmp_proc_finfo
Definition typcache.h:77
struct TypeCacheEntry * rngelemtype
Definition typcache.h:99
TupleDesc tupDesc
Definition typcache.h:90
FmgrInfo hash_extended_proc_finfo
Definition typcache.h:79
DomainConstraintCache * domainData
Definition typcache.h:122
struct TypeCacheEntry * rngtype
Definition typcache.h:109
FmgrInfo rng_subdiff_finfo
Definition typcache.h:104
FmgrInfo eq_opr_finfo
Definition typcache.h:76
Oid btree_opintype
Definition typcache.h:59
struct TypeCacheEnumData * enumData
Definition typcache.h:131
struct TypeCacheEntry * nextDomain
Definition typcache.h:134
FmgrInfo rng_canonical_finfo
Definition typcache.h:103
Oid hash_opintype
Definition typcache.h:61
char typstorage
Definition typcache.h:42
Bitmapset * sorted_values
Definition typcache.c:156
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition typcache.c:158
void ReleaseSysCache(HeapTuple tuple)
Definition syscache.c:264
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition syscache.c:220
#define GetSysCacheHashValue1(cacheId, key1)
Definition syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition table.c:40
static ItemArray items
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition tupdesc.c:340
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition tupdesc.c:428
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:577
void FreeTupleDesc(TupleDesc tupdesc)
Definition tupdesc.c:502
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition tupdesc.c:559
uint32 hashRowType(TupleDesc desc)
Definition tupdesc.c:813
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition tupdesc.c:252
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition tupdesc.c:777
#define TupleDescSize(src)
Definition tupdesc.h:198
#define PinTupleDesc(tupdesc)
Definition tupdesc.h:213
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:160
struct TupleDescData * TupleDesc
Definition tupdesc.h:145
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition typcache.c:100
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition typcache.c:101
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1714
static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3074
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition typcache.c:1401
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1827
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition typcache.c:1921
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition typcache.c:2295
#define TCFLAGS_OPERATOR_FLAGS
Definition typcache.c:122
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition typcache.c:113
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1730
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition typcache.c:115
void AtEOXact_TypeCache(void)
Definition typcache.c:3191
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition typcache.c:2736
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1593
static HTAB * RelIdToTypeIdCacheHash
Definition typcache.c:87
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2891
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1601
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition typcache.c:2942
static int in_progress_list_maxlen
Definition typcache.c:228
static int32 NextRecordTypmod
Definition typcache.c:306
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1977
static Oid * in_progress_list
Definition typcache.c:226
static const dshash_parameters srtr_typmod_table_params
Definition typcache.c:285
static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition typcache.c:3108
#define TCFLAGS_CHECKED_GT_OPR
Definition typcache.c:104
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1754
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition typcache.c:1363
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition typcache.c:1938
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition typcache.c:1577
#define TCFLAGS_CHECKED_LT_OPR
Definition typcache.c:103
#define TCFLAGS_CHECKED_HASH_PROC
Definition typcache.c:106
static void dccref_deletion_callback(void *arg)
Definition typcache.c:1342
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition typcache.c:114
static void InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
Definition typcache.c:2364
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition typcache.c:2196
static int dcs_cmp(const void *a, const void *b)
Definition typcache.c:1318
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1539
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition typcache.c:234
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1531
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1061
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition typcache.c:359
#define TCFLAGS_CHECKED_CMP_PROC
Definition typcache.c:105
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition typcache.c:112
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1762
static int in_progress_list_len
Definition typcache.c:227
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition typcache.c:1515
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition typcache.c:2921
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition typcache.c:1003
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition typcache.c:2133
static RecordCacheArrayEntry * RecordCacheArray
Definition typcache.c:304
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition typcache.c:1722
static HTAB * RecordCacheHash
Definition typcache.c:295
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition typcache.c:2634
static TypeCacheEntry * firstDomainTypeEntry
Definition typcache.c:96
void AtEOSubXact_TypeCache(void)
Definition typcache.c:3197
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition typcache.c:3054
#define TCFLAGS_HAVE_ELEM_HASHING
Definition typcache.c:111
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition typcache.c:107
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition typcache.c:2515
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition typcache.c:2610
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition typcache.c:2572
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition typcache.c:1083
bool DomainHasConstraints(Oid type_id)
Definition typcache.c:1488
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition typcache.c:110
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition typcache.c:2419
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1547
size_t SharedRecordTypmodRegistryEstimate(void)
Definition typcache.c:2174
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition typcache.c:1770
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition typcache.c:108
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition typcache.c:109
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition typcache.c:1523
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition typcache.c:260
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition typcache.c:2663
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition typcache.c:117
static int32 RecordCacheArrayLen
Definition typcache.c:305
void assign_record_type_typmod(TupleDesc tupDesc)
Definition typcache.c:2041
static HTAB * TypeCacheHash
Definition typcache.c:79
static uint64 tupledesc_id_counter
Definition typcache.c:313
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition typcache.c:1585
#define TCFLAGS_HAVE_FIELD_HASHING
Definition typcache.c:116
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition typcache.c:2025
static const dshash_parameters srtr_record_table_params
Definition typcache.c:275
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition typcache.c:1955
static int enum_oid_cmp(const void *left, const void *right)
Definition typcache.c:2908
static void finalize_in_progress_typentries(void)
Definition typcache.c:3172
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition typcache.c:1331
#define TCFLAGS_CHECKED_EQ_OPR
Definition typcache.c:102
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition typcache.c:1439
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition typcache.c:386
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition typcache.c:1798
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition typcache.c:1609
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition typcache.c:2014
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition typcache.c:969
#define INVALID_TUPLEDESC_IDENTIFIER
Definition typcache.h:157
#define TYPECACHE_HASH_PROC_FINFO
Definition typcache.h:145
#define TYPECACHE_EQ_OPR
Definition typcache.h:138
#define TYPECACHE_HASH_OPFAMILY
Definition typcache.h:148
#define TYPECACHE_TUPDESC
Definition typcache.h:146
#define TYPECACHE_MULTIRANGE_INFO
Definition typcache.h:154
#define TYPECACHE_EQ_OPR_FINFO
Definition typcache.h:143
#define TYPECACHE_HASH_EXTENDED_PROC
Definition typcache.h:152
#define TYPECACHE_BTREE_OPFAMILY
Definition typcache.h:147
#define TYPECACHE_DOMAIN_BASE_INFO
Definition typcache.h:150
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition typcache.h:151
#define TYPECACHE_RANGE_INFO
Definition typcache.h:149
#define TYPECACHE_GT_OPR
Definition typcache.h:140
#define TYPECACHE_CMP_PROC
Definition typcache.h:141
#define TYPECACHE_LT_OPR
Definition typcache.h:139
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition typcache.h:153
#define TYPECACHE_CMP_PROC_FINFO
Definition typcache.h:144
#define TYPECACHE_HASH_PROC
Definition typcache.h:142

Typedef Documentation

◆ RecordCacheArrayEntry

◆ RecordCacheEntry

◆ RelIdToTypeIdCacheEntry

◆ SharedRecordTableEntry

◆ SharedRecordTableKey

◆ SharedTypmodTableEntry

◆ TypeCacheEnumData

Function Documentation

◆ array_element_has_compare()

static bool array_element_has_compare ( TypeCacheEntry typentry)
static

Definition at line 1523 of file typcache.c.

1524{
1525 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1527 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1528}

References cache_array_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_COMPARE.

Referenced by lookup_type_cache().

◆ array_element_has_equality()

static bool array_element_has_equality ( TypeCacheEntry typentry)
static

Definition at line 1515 of file typcache.c.

1516{
1517 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1519 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1520}

References cache_array_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_EQUALITY.

Referenced by lookup_type_cache().

◆ array_element_has_extended_hashing()

static bool array_element_has_extended_hashing ( TypeCacheEntry typentry)
static

◆ array_element_has_hashing()

static bool array_element_has_hashing ( TypeCacheEntry typentry)
static

Definition at line 1531 of file typcache.c.

1532{
1533 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1535 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1536}

References cache_array_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_HASHING.

Referenced by lookup_type_cache().

◆ assign_record_type_identifier()

uint64 assign_record_type_identifier ( Oid  type_id,
int32  typmod 
)

Definition at line 2133 of file typcache.c.

2134{
2135 if (type_id != RECORDOID)
2136 {
2137 /*
2138 * It's a named composite type, so use the regular typcache.
2139 */
2140 TypeCacheEntry *typentry;
2141
2142 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2143 if (typentry->tupDesc == NULL)
2144 ereport(ERROR,
2146 errmsg("type %s is not composite",
2147 format_type_be(type_id))));
2148 Assert(typentry->tupDesc_identifier != 0);
2149 return typentry->tupDesc_identifier;
2150 }
2151 else
2152 {
2153 /*
2154 * It's a transient record type, so look in our record-type table.
2155 */
2156 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2157 RecordCacheArray[typmod].tupdesc != NULL)
2158 {
2159 Assert(RecordCacheArray[typmod].id != 0);
2160 return RecordCacheArray[typmod].id;
2161 }
2162
2163 /* For anonymous or unrecognized record type, generate a new ID */
2164 return ++tupledesc_id_counter;
2165 }
2166}

References Assert, ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), RecordCacheArrayEntry::id, lookup_type_cache(), RecordCacheArray, RecordCacheArrayLen, TypeCacheEntry::tupDesc, TypeCacheEntry::tupDesc_identifier, tupledesc_id_counter, and TYPECACHE_TUPDESC.

Referenced by expanded_record_fetch_tupdesc(), make_expanded_record_from_tupdesc(), and make_expanded_record_from_typeid().

◆ assign_record_type_typmod()

void assign_record_type_typmod ( TupleDesc  tupDesc)

Definition at line 2041 of file typcache.c.

2042{
2045 bool found;
2047
2048 Assert(tupDesc->tdtypeid == RECORDOID);
2049
2050 if (RecordCacheHash == NULL)
2051 {
2052 /* First time through: initialize the hash table */
2053 HASHCTL ctl;
2054
2055 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2056 ctl.entrysize = sizeof(RecordCacheEntry);
2059 RecordCacheHash = hash_create("Record information cache", 64,
2060 &ctl,
2062
2063 /* Also make sure CacheMemoryContext exists */
2064 if (!CacheMemoryContext)
2066 }
2067
2068 /*
2069 * Find a hashtable entry for this tuple descriptor. We don't use
2070 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2071 * the allocations succeed before we create the new entry.
2072 */
2074 &tupDesc,
2075 HASH_FIND, &found);
2076 if (found && recentry->tupdesc != NULL)
2077 {
2078 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2079 return;
2080 }
2081
2082 /* Not present, so need to manufacture an entry */
2084
2085 /* Look in the SharedRecordTypmodRegistry, if attached */
2087 if (entDesc == NULL)
2088 {
2089 /*
2090 * Make sure we have room before we CreateTupleDescCopy() or advance
2091 * NextRecordTypmod.
2092 */
2094
2095 /* Reference-counted local cache only. */
2096 entDesc = CreateTupleDescCopy(tupDesc);
2097 entDesc->tdrefcount = 1;
2098 entDesc->tdtypmod = NextRecordTypmod++;
2099 }
2100 else
2101 {
2103 }
2104
2106
2107 /* Assign a unique tupdesc identifier, too. */
2109
2110 /* Fully initialized; create the hash table entry */
2112 &tupDesc,
2113 HASH_ENTER, NULL);
2114 recentry->tupdesc = entDesc;
2115
2116 /* Update the caller's tuple descriptor. */
2117 tupDesc->tdtypmod = entDesc->tdtypmod;
2118
2120}

References Assert, CacheMemoryContext, CreateCacheMemoryContext(), CreateTupleDescCopy(), ctl, ensure_record_cache_typmod_slot_exists(), fb(), find_or_make_matching_shared_tupledesc(), HASH_COMPARE, hash_create(), HASH_ELEM, HASH_ENTER, HASH_FIND, HASH_FUNCTION, hash_search(), RecordCacheArrayEntry::id, HASHCTL::keysize, MemoryContextSwitchTo(), NextRecordTypmod, record_type_typmod_compare(), record_type_typmod_hash(), RecordCacheArray, RecordCacheHash, TupleDescData::tdtypeid, TupleDescData::tdtypmod, RecordCacheArrayEntry::tupdesc, and tupledesc_id_counter.

Referenced by BlessTupleDesc(), ER_get_flat_size(), internal_get_result_type(), and SPI_returntuple().

◆ AtEOSubXact_TypeCache()

void AtEOSubXact_TypeCache ( void  )

Definition at line 3197 of file typcache.c.

3198{
3200}

References finalize_in_progress_typentries().

Referenced by AbortSubTransaction(), and CommitSubTransaction().

◆ AtEOXact_TypeCache()

void AtEOXact_TypeCache ( void  )

◆ cache_array_element_properties()

◆ cache_multirange_element_properties()

static void cache_multirange_element_properties ( TypeCacheEntry typentry)
static

Definition at line 1770 of file typcache.c.

1771{
1772 /* load up range link if we didn't already */
1773 if (typentry->rngtype == NULL &&
1774 typentry->typtype == TYPTYPE_MULTIRANGE)
1775 load_multirangetype_info(typentry);
1776
1777 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1778 {
1780
1781 /* might need to calculate subtype's hash function properties */
1785 if (OidIsValid(elementry->hash_proc))
1786 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1787 if (OidIsValid(elementry->hash_extended_proc))
1789 }
1791}

References fb(), TypeCacheEntry::flags, load_multirangetype_info(), lookup_type_cache(), OidIsValid, TypeCacheEntry::rngelemtype, TypeCacheEntry::rngtype, TCFLAGS_CHECKED_ELEM_PROPERTIES, TCFLAGS_HAVE_ELEM_EXTENDED_HASHING, TCFLAGS_HAVE_ELEM_HASHING, TypeCacheEntry::type_id, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_PROC, and TypeCacheEntry::typtype.

Referenced by multirange_element_has_extended_hashing(), and multirange_element_has_hashing().

◆ cache_range_element_properties()

static void cache_range_element_properties ( TypeCacheEntry typentry)
static

Definition at line 1730 of file typcache.c.

1731{
1732 /* load up subtype link if we didn't already */
1733 if (typentry->rngelemtype == NULL &&
1734 typentry->typtype == TYPTYPE_RANGE)
1735 load_rangetype_info(typentry);
1736
1737 if (typentry->rngelemtype != NULL)
1738 {
1740
1741 /* might need to calculate subtype's hash function properties */
1745 if (OidIsValid(elementry->hash_proc))
1746 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1747 if (OidIsValid(elementry->hash_extended_proc))
1749 }
1751}

References fb(), TypeCacheEntry::flags, load_rangetype_info(), lookup_type_cache(), OidIsValid, TypeCacheEntry::rngelemtype, TCFLAGS_CHECKED_ELEM_PROPERTIES, TCFLAGS_HAVE_ELEM_EXTENDED_HASHING, TCFLAGS_HAVE_ELEM_HASHING, TypeCacheEntry::type_id, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_PROC, and TypeCacheEntry::typtype.

Referenced by range_element_has_extended_hashing(), and range_element_has_hashing().

◆ cache_record_field_properties()

static void cache_record_field_properties ( TypeCacheEntry typentry)
static

Definition at line 1609 of file typcache.c.

1610{
1611 /*
1612 * For type RECORD, we can't really tell what will work, since we don't
1613 * have access here to the specific anonymous type. Just assume that
1614 * equality and comparison will (we may get a failure at runtime). We
1615 * could also claim that hashing works, but then if code that has the
1616 * option between a comparison-based (sort-based) and a hash-based plan
1617 * chooses hashing, stuff could fail that would otherwise work if it chose
1618 * a comparison-based plan. In practice more types support comparison
1619 * than hashing.
1620 */
1621 if (typentry->type_id == RECORDOID)
1622 {
1623 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1625 }
1626 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1627 {
1628 TupleDesc tupdesc;
1629 int newflags;
1630 int i;
1631
1632 /* Fetch composite type's tupdesc if we don't have it already */
1633 if (typentry->tupDesc == NULL)
1634 load_typcache_tupdesc(typentry);
1635 tupdesc = typentry->tupDesc;
1636
1637 /* Must bump the refcount while we do additional catalog lookups */
1638 IncrTupleDescRefCount(tupdesc);
1639
1640 /* Have each property if all non-dropped fields have the property */
1645 for (i = 0; i < tupdesc->natts; i++)
1646 {
1648 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1649
1650 if (attr->attisdropped)
1651 continue;
1652
1653 fieldentry = lookup_type_cache(attr->atttypid,
1658 if (!OidIsValid(fieldentry->eq_opr))
1660 if (!OidIsValid(fieldentry->cmp_proc))
1662 if (!OidIsValid(fieldentry->hash_proc))
1664 if (!OidIsValid(fieldentry->hash_extended_proc))
1666
1667 /* We can drop out of the loop once we disprove all bits */
1668 if (newflags == 0)
1669 break;
1670 }
1671 typentry->flags |= newflags;
1672
1673 DecrTupleDescRefCount(tupdesc);
1674 }
1675 else if (typentry->typtype == TYPTYPE_DOMAIN)
1676 {
1677 /* If it's domain over composite, copy base type's properties */
1679
1680 /* load up basetype info if we didn't already */
1681 if (typentry->domainBaseType == InvalidOid)
1682 {
1683 typentry->domainBaseTypmod = -1;
1684 typentry->domainBaseType =
1685 getBaseTypeAndTypmod(typentry->type_id,
1686 &typentry->domainBaseTypmod);
1687 }
1693 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1694 {
1696 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1700 }
1701 }
1703}

References DecrTupleDescRefCount(), TypeCacheEntry::domainBaseType, TypeCacheEntry::domainBaseTypmod, fb(), TypeCacheEntry::flags, getBaseTypeAndTypmod(), i, IncrTupleDescRefCount(), InvalidOid, load_typcache_tupdesc(), lookup_type_cache(), TupleDescData::natts, OidIsValid, TCFLAGS_CHECKED_FIELD_PROPERTIES, TCFLAGS_DOMAIN_BASE_IS_COMPOSITE, TCFLAGS_HAVE_FIELD_COMPARE, TCFLAGS_HAVE_FIELD_EQUALITY, TCFLAGS_HAVE_FIELD_EXTENDED_HASHING, TCFLAGS_HAVE_FIELD_HASHING, TypeCacheEntry::tupDesc, TupleDescAttr(), TypeCacheEntry::type_id, TYPECACHE_CMP_PROC, TYPECACHE_EQ_OPR, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_PROC, and TypeCacheEntry::typtype.

Referenced by record_fields_have_compare(), record_fields_have_equality(), record_fields_have_extended_hashing(), and record_fields_have_hashing().

◆ compare_values_of_enum()

int compare_values_of_enum ( TypeCacheEntry tcache,
Oid  arg1,
Oid  arg2 
)

Definition at line 2663 of file typcache.c.

2664{
2666 EnumItem *item1;
2667 EnumItem *item2;
2668
2669 /*
2670 * Equal OIDs are certainly equal --- this case was probably handled by
2671 * our caller, but we may as well check.
2672 */
2673 if (arg1 == arg2)
2674 return 0;
2675
2676 /* Load up the cache if first time through */
2677 if (tcache->enumData == NULL)
2678 load_enum_cache_data(tcache);
2679 enumdata = tcache->enumData;
2680
2681 /*
2682 * If both OIDs are known-sorted, we can just compare them directly.
2683 */
2686 {
2687 if (arg1 < arg2)
2688 return -1;
2689 else
2690 return 1;
2691 }
2692
2693 /*
2694 * Slow path: we have to identify their actual sort-order positions.
2695 */
2698
2699 if (item1 == NULL || item2 == NULL)
2700 {
2701 /*
2702 * We couldn't find one or both values. That means the enum has
2703 * changed under us, so re-initialize the cache and try again. We
2704 * don't bother retrying the known-sorted case in this path.
2705 */
2706 load_enum_cache_data(tcache);
2707 enumdata = tcache->enumData;
2708
2711
2712 /*
2713 * If we still can't find the values, complain: we must have corrupt
2714 * data.
2715 */
2716 if (item1 == NULL)
2717 elog(ERROR, "enum value %u not found in cache for enum %s",
2718 arg1, format_type_be(tcache->type_id));
2719 if (item2 == NULL)
2720 elog(ERROR, "enum value %u not found in cache for enum %s",
2721 arg2, format_type_be(tcache->type_id));
2722 }
2723
2724 if (item1->sort_order < item2->sort_order)
2725 return -1;
2726 else if (item1->sort_order > item2->sort_order)
2727 return 1;
2728 else
2729 return 0;
2730}

References elog, enum_known_sorted(), TypeCacheEntry::enumData, ERROR, fb(), find_enumitem(), format_type_be(), load_enum_cache_data(), and TypeCacheEntry::type_id.

Referenced by enum_cmp_internal().

◆ dccref_deletion_callback()

static void dccref_deletion_callback ( void arg)
static

Definition at line 1342 of file typcache.c.

1343{
1345 DomainConstraintCache *dcc = ref->dcc;
1346
1347 /* Paranoia --- be sure link is nulled before trying to release */
1348 if (dcc)
1349 {
1350 ref->constraints = NIL;
1351 ref->dcc = NULL;
1352 decr_dcc_refcount(dcc);
1353 }
1354}

References arg, DomainConstraintCache::constraints, decr_dcc_refcount(), fb(), and NIL.

Referenced by InitDomainConstraintRef().

◆ dcs_cmp()

static int dcs_cmp ( const void a,
const void b 
)
static

Definition at line 1318 of file typcache.c.

1319{
1320 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1321 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1322
1323 return strcmp((*ca)->name, (*cb)->name);
1324}

References a, b, and fb().

Referenced by load_domaintype_info().

◆ decr_dcc_refcount()

static void decr_dcc_refcount ( DomainConstraintCache dcc)
static

◆ delete_rel_type_cache_if_needed()

static void delete_rel_type_cache_if_needed ( TypeCacheEntry typentry)
static

Definition at line 3108 of file typcache.c.

3109{
3110#ifdef USE_ASSERT_CHECKING
3111 int i;
3112 bool is_in_progress = false;
3113
3114 for (i = 0; i < in_progress_list_len; i++)
3115 {
3116 if (in_progress_list[i] == typentry->type_id)
3117 {
3118 is_in_progress = true;
3119 break;
3120 }
3121 }
3122#endif
3123
3124 /* Immediately quit for non-composite types */
3125 if (typentry->typtype != TYPTYPE_COMPOSITE)
3126 return;
3127
3128 /* typrelid should be given for composite types */
3129 Assert(OidIsValid(typentry->typrelid));
3130
3131 /*
3132 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3133 * information indicating entry should be still there.
3134 */
3135 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3136 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3137 typentry->tupDesc == NULL)
3138 {
3139 bool found;
3140
3142 &typentry->typrelid,
3143 HASH_REMOVE, &found);
3144 Assert(found || is_in_progress);
3145 }
3146 else
3147 {
3148#ifdef USE_ASSERT_CHECKING
3149 /*
3150 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3151 * entry if it should exist.
3152 */
3153 bool found;
3154
3155 if (!is_in_progress)
3156 {
3158 &typentry->typrelid,
3159 HASH_FIND, &found);
3160 Assert(found);
3161 }
3162#endif
3163 }
3164}

References Assert, fb(), TypeCacheEntry::flags, HASH_FIND, HASH_REMOVE, hash_search(), i, in_progress_list, in_progress_list_len, OidIsValid, RelIdToTypeIdCacheHash, TCFLAGS_HAVE_PG_TYPE_DATA, TCFLAGS_OPERATOR_FLAGS, TypeCacheEntry::tupDesc, TypeCacheEntry::type_id, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by InvalidateCompositeTypeCacheEntry(), TypeCacheOpcCallback(), and TypeCacheTypCallback().

◆ DomainHasConstraints()

bool DomainHasConstraints ( Oid  type_id)

Definition at line 1488 of file typcache.c.

1489{
1490 TypeCacheEntry *typentry;
1491
1492 /*
1493 * Note: a side effect is to cause the typcache's domain data to become
1494 * valid. This is fine since we'll likely need it soon if there is any.
1495 */
1497
1498 return (typentry->domainData != NULL);
1499}

References TypeCacheEntry::domainData, fb(), lookup_type_cache(), and TYPECACHE_DOMAIN_CONSTR_INFO.

Referenced by ATColumnChangeRequiresRewrite(), ATExecAddColumn(), eval_const_expressions_mutator(), ExecInitJsonCoercion(), and transformJsonFuncExpr().

◆ ensure_record_cache_typmod_slot_exists()

static void ensure_record_cache_typmod_slot_exists ( int32  typmod)
static

◆ enum_known_sorted()

static bool enum_known_sorted ( TypeCacheEnumData enumdata,
Oid  arg 
)
inlinestatic

Definition at line 2634 of file typcache.c.

2635{
2636 Oid offset;
2637
2638 if (arg < enumdata->bitmap_base)
2639 return false;
2640 offset = arg - enumdata->bitmap_base;
2641 if (offset > (Oid) INT_MAX)
2642 return false;
2643 return bms_is_member((int) offset, enumdata->sorted_values);
2644}

References arg, bms_is_member(), and fb().

Referenced by compare_values_of_enum().

◆ enum_oid_cmp()

static int enum_oid_cmp ( const void left,
const void right 
)
static

Definition at line 2908 of file typcache.c.

2909{
2910 const EnumItem *l = (const EnumItem *) left;
2911 const EnumItem *r = (const EnumItem *) right;
2912
2913 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2914}

References EnumItem::enum_oid, and pg_cmp_u32().

Referenced by find_enumitem(), and load_enum_cache_data().

◆ finalize_in_progress_typentries()

static void finalize_in_progress_typentries ( void  )
static

Definition at line 3172 of file typcache.c.

3173{
3174 int i;
3175
3176 for (i = 0; i < in_progress_list_len; i++)
3177 {
3178 TypeCacheEntry *typentry;
3179
3182 HASH_FIND, NULL);
3183 if (typentry)
3185 }
3186
3188}

References fb(), HASH_FIND, hash_search(), i, in_progress_list, in_progress_list_len, insert_rel_type_cache_if_needed(), and TypeCacheHash.

Referenced by AtEOSubXact_TypeCache(), and AtEOXact_TypeCache().

◆ find_enumitem()

static EnumItem * find_enumitem ( TypeCacheEnumData enumdata,
Oid  arg 
)
static

Definition at line 2891 of file typcache.c.

2892{
2893 EnumItem srch;
2894
2895 /* On some versions of Solaris, bsearch of zero items dumps core */
2896 if (enumdata->num_values <= 0)
2897 return NULL;
2898
2899 srch.enum_oid = arg;
2900 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2901 sizeof(EnumItem), enum_oid_cmp);
2902}

References arg, EnumItem::enum_oid, enum_oid_cmp(), and fb().

Referenced by compare_values_of_enum().

◆ find_or_make_matching_shared_tupledesc()

static TupleDesc find_or_make_matching_shared_tupledesc ( TupleDesc  tupdesc)
static

Definition at line 2942 of file typcache.c.

2943{
2944 TupleDesc result;
2949 bool found;
2950 uint32 typmod;
2951
2952 /* If not even attached, nothing to do. */
2954 return NULL;
2955
2956 /* Try to find a matching tuple descriptor in the record table. */
2957 key.shared = false;
2958 key.u.local_tupdesc = tupdesc;
2962 {
2963 Assert(record_table_entry->key.shared);
2966 result = (TupleDesc)
2968 record_table_entry->key.u.shared_tupdesc);
2969 Assert(result->tdrefcount == -1);
2970
2971 return result;
2972 }
2973
2974 /* Allocate a new typmod number. This will be wasted if we error out. */
2975 typmod = (int)
2977 1);
2978
2979 /* Copy the TupleDesc into shared memory. */
2980 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2981
2982 /*
2983 * Create an entry in the typmod table so that others will understand this
2984 * typmod number.
2985 */
2986 PG_TRY();
2987 {
2990 &typmod, &found);
2991 if (found)
2992 elog(ERROR, "cannot create duplicate shared record typmod");
2993 }
2994 PG_CATCH();
2995 {
2997 PG_RE_THROW();
2998 }
2999 PG_END_TRY();
3000 typmod_table_entry->typmod = typmod;
3001 typmod_table_entry->shared_tupdesc = shared_dp;
3004
3005 /*
3006 * Finally create an entry in the record table so others with matching
3007 * tuple descriptors can reuse the typmod.
3008 */
3011 &found);
3012 if (found)
3013 {
3014 /*
3015 * Someone concurrently inserted a matching tuple descriptor since the
3016 * first time we checked. Use that one instead.
3017 */
3020
3021 /* Might as well free up the space used by the one we created. */
3023 &typmod);
3024 Assert(found);
3026
3027 /* Return the one we found. */
3028 Assert(record_table_entry->key.shared);
3029 result = (TupleDesc)
3031 record_table_entry->key.u.shared_tupdesc);
3032 Assert(result->tdrefcount == -1);
3033
3034 return result;
3035 }
3036
3037 /* Store it and return it. */
3038 record_table_entry->key.shared = true;
3039 record_table_entry->key.u.shared_tupdesc = shared_dp;
3042 result = (TupleDesc)
3044 Assert(result->tdrefcount == -1);
3045
3046 return result;
3047}

References Session::area, Assert, CurrentSession, dsa_free(), dsa_get_address(), dshash_delete_key(), dshash_find(), dshash_find_or_insert(), dshash_release_lock(), elog, ERROR, fb(), SharedRecordTypmodRegistry::next_typmod, pg_atomic_fetch_add_u32(), PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, share_tupledesc(), Session::shared_record_table, Session::shared_typmod_registry, Session::shared_typmod_table, and TupleDescData::tdrefcount.

Referenced by assign_record_type_typmod().

◆ InitDomainConstraintRef()

void InitDomainConstraintRef ( Oid  type_id,
DomainConstraintRef ref,
MemoryContext  refctx,
bool  need_exprstate 
)

Definition at line 1401 of file typcache.c.

1403{
1404 /* Look up the typcache entry --- we assume it survives indefinitely */
1406 ref->need_exprstate = need_exprstate;
1407 /* For safety, establish the callback before acquiring a refcount */
1408 ref->refctx = refctx;
1409 ref->dcc = NULL;
1410 ref->callback.func = dccref_deletion_callback;
1411 ref->callback.arg = ref;
1412 MemoryContextRegisterResetCallback(refctx, &ref->callback);
1413 /* Acquire refcount if there are constraints, and set up exported list */
1414 if (ref->tcache->domainData)
1415 {
1416 ref->dcc = ref->tcache->domainData;
1417 ref->dcc->dccRefCount++;
1418 if (ref->need_exprstate)
1419 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1420 ref->refctx);
1421 else
1422 ref->constraints = ref->dcc->constraints;
1423 }
1424 else
1425 ref->constraints = NIL;
1426}

References dccref_deletion_callback(), fb(), lookup_type_cache(), MemoryContextRegisterResetCallback(), NIL, prep_domain_constraints(), and TYPECACHE_DOMAIN_CONSTR_INFO.

Referenced by domain_state_setup(), and ExecInitCoerceToDomain().

◆ insert_rel_type_cache_if_needed()

static void insert_rel_type_cache_if_needed ( TypeCacheEntry typentry)
static

Definition at line 3074 of file typcache.c.

3075{
3076 /* Immediately quit for non-composite types */
3077 if (typentry->typtype != TYPTYPE_COMPOSITE)
3078 return;
3079
3080 /* typrelid should be given for composite types */
3081 Assert(OidIsValid(typentry->typrelid));
3082
3083 /*
3084 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3085 * information indicating it should be here.
3086 */
3087 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3088 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3089 typentry->tupDesc != NULL)
3090 {
3092 bool found;
3093
3095 &typentry->typrelid,
3096 HASH_ENTER, &found);
3097 relentry->relid = typentry->typrelid;
3098 relentry->composite_typid = typentry->type_id;
3099 }
3100}

References Assert, fb(), TypeCacheEntry::flags, HASH_ENTER, hash_search(), OidIsValid, RelIdToTypeIdCacheEntry::relid, RelIdToTypeIdCacheHash, TCFLAGS_HAVE_PG_TYPE_DATA, TCFLAGS_OPERATOR_FLAGS, TypeCacheEntry::tupDesc, TypeCacheEntry::type_id, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by finalize_in_progress_typentries(), and lookup_type_cache().

◆ InvalidateCompositeTypeCacheEntry()

static void InvalidateCompositeTypeCacheEntry ( TypeCacheEntry typentry)
static

Definition at line 2364 of file typcache.c.

2365{
2367
2368 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2369 OidIsValid(typentry->typrelid));
2370
2371 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2372 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2373
2374 /* Delete tupdesc if we have it */
2375 if (typentry->tupDesc != NULL)
2376 {
2377 /*
2378 * Release our refcount and free the tupdesc if none remain. We can't
2379 * use DecrTupleDescRefCount here because this reference is not logged
2380 * by the current resource owner.
2381 */
2382 Assert(typentry->tupDesc->tdrefcount > 0);
2383 if (--typentry->tupDesc->tdrefcount == 0)
2384 FreeTupleDesc(typentry->tupDesc);
2385 typentry->tupDesc = NULL;
2386
2387 /*
2388 * Also clear tupDesc_identifier, so that anyone watching it will
2389 * realize that the tupdesc has changed.
2390 */
2391 typentry->tupDesc_identifier = 0;
2392 }
2393
2394 /* Reset equality/comparison/hashing validity information */
2395 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2396
2397 /*
2398 * Call delete_rel_type_cache_if_needed() if we actually cleared
2399 * something.
2400 */
2403}

References Assert, delete_rel_type_cache_if_needed(), fb(), TypeCacheEntry::flags, FreeTupleDesc(), OidIsValid, TCFLAGS_OPERATOR_FLAGS, TupleDescData::tdrefcount, TypeCacheEntry::tupDesc, TypeCacheEntry::tupDesc_identifier, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by TypeCacheRelCallback().

◆ load_domaintype_info()

static void load_domaintype_info ( TypeCacheEntry typentry)
static

Definition at line 1083 of file typcache.c.

1084{
1085 Oid typeOid = typentry->type_id;
1087 bool notNull = false;
1089 int cconslen;
1092
1093 /*
1094 * If we're here, any existing constraint info is stale, so release it.
1095 * For safety, be sure to null the link before trying to delete the data.
1096 */
1097 if (typentry->domainData)
1098 {
1099 dcc = typentry->domainData;
1100 typentry->domainData = NULL;
1101 decr_dcc_refcount(dcc);
1102 }
1103
1104 /*
1105 * We try to optimize the common case of no domain constraints, so don't
1106 * create the dcc object and context until we find a constraint. Likewise
1107 * for the temp sorting array.
1108 */
1109 dcc = NULL;
1110 ccons = NULL;
1111 cconslen = 0;
1112
1113 /*
1114 * Scan pg_constraint for relevant constraints. We want to find
1115 * constraints for not just this domain, but any ancestor domains, so the
1116 * outer loop crawls up the domain stack.
1117 */
1119
1120 for (;;)
1121 {
1122 HeapTuple tup;
1125 int nccons = 0;
1126 ScanKeyData key[1];
1127 SysScanDesc scan;
1128
1130 if (!HeapTupleIsValid(tup))
1131 elog(ERROR, "cache lookup failed for type %u", typeOid);
1133
1134 if (typTup->typtype != TYPTYPE_DOMAIN)
1135 {
1136 /* Not a domain, so done */
1138 break;
1139 }
1140
1141 /* Test for NOT NULL Constraint */
1142 if (typTup->typnotnull)
1143 notNull = true;
1144
1145 /* Look for CHECK Constraints on this domain */
1146 ScanKeyInit(&key[0],
1149 ObjectIdGetDatum(typeOid));
1150
1152 NULL, 1, key);
1153
1155 {
1157 Datum val;
1158 bool isNull;
1159 char *constring;
1160 Expr *check_expr;
1162
1163 /* Ignore non-CHECK constraints */
1164 if (c->contype != CONSTRAINT_CHECK)
1165 continue;
1166
1167 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1169 conRel->rd_att, &isNull);
1170 if (isNull)
1171 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1172 NameStr(typTup->typname), NameStr(c->conname));
1173
1174 /* Create the DomainConstraintCache object and context if needed */
1175 if (dcc == NULL)
1176 {
1177 MemoryContext cxt;
1178
1180 "Domain constraints",
1182 dcc = (DomainConstraintCache *)
1184 dcc->constraints = NIL;
1185 dcc->dccContext = cxt;
1186 dcc->dccRefCount = 0;
1187 }
1188
1189 /* Convert conbin to a node tree, still in caller's context */
1191 check_expr = (Expr *) stringToNode(constring);
1192
1193 /*
1194 * Plan the expression, since ExecInitExpr will expect that.
1195 *
1196 * Note: caching the result of expression_planner() is not very
1197 * good practice. Ideally we'd use a CachedExpression here so
1198 * that we would react promptly to, eg, changes in inlined
1199 * functions. However, because we don't support mutable domain
1200 * CHECK constraints, it's not really clear that it's worth the
1201 * extra overhead to do that.
1202 */
1203 check_expr = expression_planner(check_expr);
1204
1205 /* Create only the minimally needed stuff in dccContext */
1207
1210 r->name = pstrdup(NameStr(c->conname));
1211 r->check_expr = copyObject(check_expr);
1212 r->check_exprstate = NULL;
1213
1215
1216 /* Accumulate constraints in an array, for sorting below */
1217 if (ccons == NULL)
1218 {
1219 cconslen = 8;
1222 }
1223 else if (nccons >= cconslen)
1224 {
1225 cconslen *= 2;
1228 }
1229 ccons[nccons++] = r;
1230 }
1231
1232 systable_endscan(scan);
1233
1234 if (nccons > 0)
1235 {
1236 /*
1237 * Sort the items for this domain, so that CHECKs are applied in a
1238 * deterministic order.
1239 */
1240 if (nccons > 1)
1242
1243 /*
1244 * Now attach them to the overall list. Use lcons() here because
1245 * constraints of parent domains should be applied earlier.
1246 */
1248 while (nccons > 0)
1249 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1251 }
1252
1253 /* loop to next domain in stack */
1254 typeOid = typTup->typbasetype;
1256 }
1257
1259
1260 /*
1261 * Only need to add one NOT NULL check regardless of how many domains in
1262 * the stack request it.
1263 */
1264 if (notNull)
1265 {
1267
1268 /* Create the DomainConstraintCache object and context if needed */
1269 if (dcc == NULL)
1270 {
1271 MemoryContext cxt;
1272
1274 "Domain constraints",
1276 dcc = (DomainConstraintCache *)
1278 dcc->constraints = NIL;
1279 dcc->dccContext = cxt;
1280 dcc->dccRefCount = 0;
1281 }
1282
1283 /* Create node trees in DomainConstraintCache's context */
1285
1287
1289 r->name = pstrdup("NOT NULL");
1290 r->check_expr = NULL;
1291 r->check_exprstate = NULL;
1292
1293 /* lcons to apply the nullness check FIRST */
1294 dcc->constraints = lcons(r, dcc->constraints);
1295
1297 }
1298
1299 /*
1300 * If we made a constraint object, move it into CacheMemoryContext and
1301 * attach it to the typcache entry.
1302 */
1303 if (dcc)
1304 {
1306 typentry->domainData = dcc;
1307 dcc->dccRefCount++; /* count the typcache's reference */
1308 }
1309
1310 /* Either way, the typcache entry's domain data is now valid. */
1312}

References AccessShareLock, ALLOCSET_SMALL_SIZES, AllocSetContextCreate, BTEqualStrategyNumber, CacheMemoryContext, DomainConstraintState::check_expr, DomainConstraintState::check_exprstate, DomainConstraintCache::constraints, DomainConstraintState::constrainttype, copyObject, CurrentMemoryContext, DomainConstraintCache::dccContext, DomainConstraintCache::dccRefCount, dcs_cmp(), decr_dcc_refcount(), DOM_CONSTRAINT_CHECK, DOM_CONSTRAINT_NOTNULL, TypeCacheEntry::domainData, elog, ERROR, expression_planner(), fastgetattr(), fb(), TypeCacheEntry::flags, GETSTRUCT(), HeapTupleIsValid, lcons(), makeNode, MemoryContextAlloc(), MemoryContextSetParent(), MemoryContextSwitchTo(), DomainConstraintState::name, NameStr, NIL, ObjectIdGetDatum(), palloc(), pstrdup(), qsort, ReleaseSysCache(), repalloc(), ScanKeyInit(), SearchSysCache1(), stringToNode(), systable_beginscan(), systable_endscan(), systable_getnext(), table_close(), table_open(), TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, TextDatumGetCString, TypeCacheEntry::type_id, and val.

Referenced by lookup_type_cache(), and UpdateDomainConstraintRef().

◆ load_enum_cache_data()

static void load_enum_cache_data ( TypeCacheEntry tcache)
static

Definition at line 2736 of file typcache.c.

2737{
2743 EnumItem *items;
2744 int numitems;
2745 int maxitems;
2746 Oid bitmap_base;
2747 Bitmapset *bitmap;
2749 int bm_size,
2750 start_pos;
2751
2752 /* Check that this is actually an enum */
2753 if (tcache->typtype != TYPTYPE_ENUM)
2754 ereport(ERROR,
2756 errmsg("%s is not an enum",
2757 format_type_be(tcache->type_id))));
2758
2759 /*
2760 * Read all the information for members of the enum type. We collect the
2761 * info in working memory in the caller's context, and then transfer it to
2762 * permanent memory in CacheMemoryContext. This minimizes the risk of
2763 * leaking memory from CacheMemoryContext in the event of an error partway
2764 * through.
2765 */
2766 maxitems = 64;
2767 items = palloc_array(EnumItem, maxitems);
2768 numitems = 0;
2769
2770 /* Scan pg_enum for the members of the target enum type. */
2774 ObjectIdGetDatum(tcache->type_id));
2775
2779 true, NULL,
2780 1, &skey);
2781
2783 {
2785
2786 if (numitems >= maxitems)
2787 {
2788 maxitems *= 2;
2789 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2790 }
2791 items[numitems].enum_oid = en->oid;
2792 items[numitems].sort_order = en->enumsortorder;
2793 numitems++;
2794 }
2795
2798
2799 /* Sort the items into OID order */
2800 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2801
2802 /*
2803 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2804 * known to be in order and can thus be compared with just OID comparison.
2805 *
2806 * The point of this is that the enum's initial OIDs were certainly in
2807 * order, so there is some subset that can be compared via OID comparison;
2808 * and we'd rather not do binary searches unnecessarily.
2809 *
2810 * This is somewhat heuristic, and might identify a subset of OIDs that
2811 * isn't exactly what the type started with. That's okay as long as the
2812 * subset is correctly sorted.
2813 */
2814 bitmap_base = InvalidOid;
2815 bitmap = NULL;
2816 bm_size = 1; /* only save sets of at least 2 OIDs */
2817
2818 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2819 {
2820 /*
2821 * Identify longest sorted subsequence starting at start_pos
2822 */
2824 int this_bm_size = 1;
2825 Oid start_oid = items[start_pos].enum_oid;
2826 float4 prev_order = items[start_pos].sort_order;
2827 int i;
2828
2829 for (i = start_pos + 1; i < numitems; i++)
2830 {
2831 Oid offset;
2832
2833 offset = items[i].enum_oid - start_oid;
2834 /* quit if bitmap would be too large; cutoff is arbitrary */
2835 if (offset >= 8192)
2836 break;
2837 /* include the item if it's in-order */
2838 if (items[i].sort_order > prev_order)
2839 {
2840 prev_order = items[i].sort_order;
2841 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2842 this_bm_size++;
2843 }
2844 }
2845
2846 /* Remember it if larger than previous best */
2847 if (this_bm_size > bm_size)
2848 {
2849 bms_free(bitmap);
2850 bitmap_base = start_oid;
2851 bitmap = this_bitmap;
2853 }
2854 else
2856
2857 /*
2858 * Done if it's not possible to find a longer sequence in the rest of
2859 * the list. In typical cases this will happen on the first
2860 * iteration, which is why we create the bitmaps on the fly instead of
2861 * doing a second pass over the list.
2862 */
2863 if (bm_size >= (numitems - start_pos - 1))
2864 break;
2865 }
2866
2867 /* OK, copy the data into CacheMemoryContext */
2870 palloc(offsetof(TypeCacheEnumData, enum_values) +
2871 numitems * sizeof(EnumItem));
2872 enumdata->bitmap_base = bitmap_base;
2873 enumdata->sorted_values = bms_copy(bitmap);
2874 enumdata->num_values = numitems;
2875 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2877
2878 pfree(items);
2879 bms_free(bitmap);
2880
2881 /* And link the finished cache struct into the typcache */
2882 if (tcache->enumData != NULL)
2883 pfree(tcache->enumData);
2884 tcache->enumData = enumdata;
2885}

References AccessShareLock, bms_add_member(), bms_copy(), bms_free(), bms_make_singleton(), BTEqualStrategyNumber, CacheMemoryContext, enum_oid_cmp(), TypeCacheEntry::enumData, ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), GETSTRUCT(), HeapTupleIsValid, i, InvalidOid, items, MemoryContextSwitchTo(), ObjectIdGetDatum(), palloc(), palloc_array, pfree(), qsort, repalloc(), ScanKeyInit(), systable_beginscan(), systable_endscan(), systable_getnext(), table_close(), table_open(), TypeCacheEntry::type_id, and TypeCacheEntry::typtype.

Referenced by compare_values_of_enum().

◆ load_multirangetype_info()

static void load_multirangetype_info ( TypeCacheEntry typentry)
static

Definition at line 1061 of file typcache.c.

1062{
1064
1067 elog(ERROR, "cache lookup failed for multirange type %u",
1068 typentry->type_id);
1069
1071}

References elog, ERROR, fb(), get_multirange_range(), lookup_type_cache(), OidIsValid, TypeCacheEntry::rngtype, TypeCacheEntry::type_id, and TYPECACHE_RANGE_INFO.

Referenced by cache_multirange_element_properties(), and lookup_type_cache().

◆ load_rangetype_info()

static void load_rangetype_info ( TypeCacheEntry typentry)
static

Definition at line 1003 of file typcache.c.

1004{
1006 HeapTuple tup;
1012 Oid opcintype;
1013 Oid cmpFnOid;
1014
1015 /* get information from pg_range */
1017 /* should not fail, since we already checked typtype ... */
1018 if (!HeapTupleIsValid(tup))
1019 elog(ERROR, "cache lookup failed for range type %u",
1020 typentry->type_id);
1022
1023 subtypeOid = pg_range->rngsubtype;
1024 typentry->rng_collation = pg_range->rngcollation;
1025 opclassOid = pg_range->rngsubopc;
1026 canonicalOid = pg_range->rngcanonical;
1027 subdiffOid = pg_range->rngsubdiff;
1028
1030
1031 /* get opclass properties and look up the comparison function */
1034 typentry->rng_opfamily = opfamilyOid;
1035
1036 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1037 BTORDER_PROC);
1039 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1040 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1041
1042 /* set up cached fmgrinfo structs */
1051
1052 /* Lastly, set up link to the element type --- this marks data valid */
1054}

References BTORDER_PROC, CacheMemoryContext, elog, ERROR, fb(), fmgr_info_cxt(), get_opclass_family(), get_opclass_input_type(), get_opfamily_proc(), GETSTRUCT(), HeapTupleIsValid, lookup_type_cache(), ObjectIdGetDatum(), OidIsValid, RegProcedureIsValid, ReleaseSysCache(), TypeCacheEntry::rng_canonical_finfo, TypeCacheEntry::rng_cmp_proc_finfo, TypeCacheEntry::rng_collation, TypeCacheEntry::rng_opfamily, TypeCacheEntry::rng_subdiff_finfo, TypeCacheEntry::rngelemtype, SearchSysCache1(), and TypeCacheEntry::type_id.

Referenced by cache_range_element_properties(), and lookup_type_cache().

◆ load_typcache_tupdesc()

static void load_typcache_tupdesc ( TypeCacheEntry typentry)
static

Definition at line 969 of file typcache.c.

970{
971 Relation rel;
972
973 if (!OidIsValid(typentry->typrelid)) /* should not happen */
974 elog(ERROR, "invalid typrelid for composite type %u",
975 typentry->type_id);
976 rel = relation_open(typentry->typrelid, AccessShareLock);
977 Assert(rel->rd_rel->reltype == typentry->type_id);
978
979 /*
980 * Link to the tupdesc and increment its refcount (we assert it's a
981 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
982 * because the reference mustn't be entered in the current resource owner;
983 * it can outlive the current query.
984 */
985 typentry->tupDesc = RelationGetDescr(rel);
986
987 Assert(typentry->tupDesc->tdrefcount > 0);
988 typentry->tupDesc->tdrefcount++;
989
990 /*
991 * In future, we could take some pains to not change tupDesc_identifier if
992 * the tupdesc didn't really change; but for now it's not worth it.
993 */
995
997}

References AccessShareLock, Assert, elog, ERROR, OidIsValid, RelationData::rd_rel, relation_close(), relation_open(), RelationGetDescr, TupleDescData::tdrefcount, TypeCacheEntry::tupDesc, TypeCacheEntry::tupDesc_identifier, tupledesc_id_counter, TypeCacheEntry::type_id, and TypeCacheEntry::typrelid.

Referenced by cache_record_field_properties(), and lookup_type_cache().

◆ lookup_rowtype_tupdesc()

◆ lookup_rowtype_tupdesc_copy()

TupleDesc lookup_rowtype_tupdesc_copy ( Oid  type_id,
int32  typmod 
)

◆ lookup_rowtype_tupdesc_domain()

TupleDesc lookup_rowtype_tupdesc_domain ( Oid  type_id,
int32  typmod,
bool  noError 
)

Definition at line 1977 of file typcache.c.

1978{
1979 TupleDesc tupDesc;
1980
1981 if (type_id != RECORDOID)
1982 {
1983 /*
1984 * Check for domain or named composite type. We might as well load
1985 * whichever data is needed.
1986 */
1987 TypeCacheEntry *typentry;
1988
1989 typentry = lookup_type_cache(type_id,
1992 if (typentry->typtype == TYPTYPE_DOMAIN)
1994 typentry->domainBaseTypmod,
1995 noError);
1996 if (typentry->tupDesc == NULL && !noError)
1997 ereport(ERROR,
1999 errmsg("type %s is not composite",
2000 format_type_be(type_id))));
2001 tupDesc = typentry->tupDesc;
2002 }
2003 else
2004 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2005 if (tupDesc != NULL)
2006 PinTupleDesc(tupDesc);
2007 return tupDesc;
2008}

References TypeCacheEntry::domainBaseType, TypeCacheEntry::domainBaseTypmod, ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), lookup_rowtype_tupdesc_internal(), lookup_rowtype_tupdesc_noerror(), lookup_type_cache(), PinTupleDesc, TypeCacheEntry::tupDesc, TYPECACHE_DOMAIN_BASE_INFO, TYPECACHE_TUPDESC, and TypeCacheEntry::typtype.

Referenced by ExecEvalWholeRowVar(), hstore_from_record(), hstore_populate_record(), plperl_sv_to_datum(), and rowtype_field_matches().

◆ lookup_rowtype_tupdesc_internal()

static TupleDesc lookup_rowtype_tupdesc_internal ( Oid  type_id,
int32  typmod,
bool  noError 
)
static

Definition at line 1827 of file typcache.c.

1828{
1829 if (type_id != RECORDOID)
1830 {
1831 /*
1832 * It's a named composite type, so use the regular typcache.
1833 */
1834 TypeCacheEntry *typentry;
1835
1836 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1837 if (typentry->tupDesc == NULL && !noError)
1838 ereport(ERROR,
1840 errmsg("type %s is not composite",
1841 format_type_be(type_id))));
1842 return typentry->tupDesc;
1843 }
1844 else
1845 {
1846 /*
1847 * It's a transient record type, so look in our record-type table.
1848 */
1849 if (typmod >= 0)
1850 {
1851 /* It is already in our local cache? */
1852 if (typmod < RecordCacheArrayLen &&
1853 RecordCacheArray[typmod].tupdesc != NULL)
1854 return RecordCacheArray[typmod].tupdesc;
1855
1856 /* Are we attached to a shared record typmod registry? */
1858 {
1860
1861 /* Try to find it in the shared typmod index. */
1863 &typmod, false);
1864 if (entry != NULL)
1865 {
1866 TupleDesc tupdesc;
1867
1868 tupdesc = (TupleDesc)
1870 entry->shared_tupdesc);
1871 Assert(typmod == tupdesc->tdtypmod);
1872
1873 /* We may need to extend the local RecordCacheArray. */
1875
1876 /*
1877 * Our local array can now point directly to the TupleDesc
1878 * in shared memory, which is non-reference-counted.
1879 */
1880 RecordCacheArray[typmod].tupdesc = tupdesc;
1881 Assert(tupdesc->tdrefcount == -1);
1882
1883 /*
1884 * We don't share tupdesc identifiers across processes, so
1885 * assign one locally.
1886 */
1888
1890 entry);
1891
1892 return RecordCacheArray[typmod].tupdesc;
1893 }
1894 }
1895 }
1896
1897 if (!noError)
1898 ereport(ERROR,
1900 errmsg("record type has not been registered")));
1901 return NULL;
1902 }
1903}

References Session::area, Assert, CurrentSession, dsa_get_address(), dshash_find(), dshash_release_lock(), ensure_record_cache_typmod_slot_exists(), ereport, errcode(), errmsg(), ERROR, fb(), format_type_be(), RecordCacheArrayEntry::id, lookup_type_cache(), RecordCacheArray, RecordCacheArrayLen, SharedTypmodTableEntry::shared_tupdesc, Session::shared_typmod_registry, Session::shared_typmod_table, TupleDescData::tdrefcount, TupleDescData::tdtypmod, RecordCacheArrayEntry::tupdesc, TypeCacheEntry::tupDesc, tupledesc_id_counter, and TYPECACHE_TUPDESC.

Referenced by lookup_rowtype_tupdesc(), lookup_rowtype_tupdesc_copy(), lookup_rowtype_tupdesc_domain(), and lookup_rowtype_tupdesc_noerror().

◆ lookup_rowtype_tupdesc_noerror()

TupleDesc lookup_rowtype_tupdesc_noerror ( Oid  type_id,
int32  typmod,
bool  noError 
)

Definition at line 1938 of file typcache.c.

1939{
1940 TupleDesc tupDesc;
1941
1942 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1943 if (tupDesc != NULL)
1944 PinTupleDesc(tupDesc);
1945 return tupDesc;
1946}

References fb(), lookup_rowtype_tupdesc_internal(), and PinTupleDesc.

Referenced by lookup_rowtype_tupdesc_domain().

◆ lookup_type_cache()

TypeCacheEntry * lookup_type_cache ( Oid  type_id,
int  flags 
)

Definition at line 386 of file typcache.c.

387{
388 TypeCacheEntry *typentry;
389 bool found;
391
392 if (TypeCacheHash == NULL)
393 {
394 /* First time through: initialize the hash table */
395 HASHCTL ctl;
396 int allocsize;
397
398 ctl.keysize = sizeof(Oid);
399 ctl.entrysize = sizeof(TypeCacheEntry);
400
401 /*
402 * TypeCacheEntry takes hash value from the system cache. For
403 * TypeCacheHash we use the same hash in order to speedup search by
404 * hash value. This is used by hash_seq_init_with_hash_value().
405 */
406 ctl.hash = type_cache_syshash;
407
408 TypeCacheHash = hash_create("Type information cache", 64,
410
412
413 ctl.keysize = sizeof(Oid);
414 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
415 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
417
418 /* Also set up callbacks for SI invalidations */
423
424 /* Also make sure CacheMemoryContext exists */
427
428 /*
429 * reserve enough in_progress_list slots for many cases
430 */
431 allocsize = 4;
434 allocsize * sizeof(*in_progress_list));
435 in_progress_list_maxlen = allocsize;
436 }
437
439
440 /* Register to catch invalidation messages */
442 {
443 int allocsize;
444
445 allocsize = in_progress_list_maxlen * 2;
447 allocsize * sizeof(*in_progress_list));
448 in_progress_list_maxlen = allocsize;
449 }
452
453 /* Try to look up an existing entry */
455 &type_id,
456 HASH_FIND, NULL);
457 if (typentry == NULL)
458 {
459 /*
460 * If we didn't find one, we want to make one. But first look up the
461 * pg_type row, just to make sure we don't make a cache entry for an
462 * invalid type OID. If the type OID is not valid, present a
463 * user-facing error, since some code paths such as domain_in() allow
464 * this function to be reached with a user-supplied OID.
465 */
466 HeapTuple tp;
468
470 if (!HeapTupleIsValid(tp))
473 errmsg("type with OID %u does not exist", type_id)));
475 if (!typtup->typisdefined)
478 errmsg("type \"%s\" is only a shell",
479 NameStr(typtup->typname))));
480
481 /* Now make the typcache entry */
483 &type_id,
484 HASH_ENTER, &found);
485 Assert(!found); /* it wasn't there a moment ago */
486
487 MemSet(typentry, 0, sizeof(TypeCacheEntry));
488
489 /* These fields can never change, by definition */
490 typentry->type_id = type_id;
491 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
492
493 /* Keep this part in sync with the code below */
494 typentry->typlen = typtup->typlen;
495 typentry->typbyval = typtup->typbyval;
496 typentry->typalign = typtup->typalign;
497 typentry->typstorage = typtup->typstorage;
498 typentry->typtype = typtup->typtype;
499 typentry->typrelid = typtup->typrelid;
500 typentry->typsubscript = typtup->typsubscript;
501 typentry->typelem = typtup->typelem;
502 typentry->typarray = typtup->typarray;
503 typentry->typcollation = typtup->typcollation;
504 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
505
506 /* If it's a domain, immediately thread it into the domain cache list */
507 if (typentry->typtype == TYPTYPE_DOMAIN)
508 {
510 firstDomainTypeEntry = typentry;
511 }
512
513 ReleaseSysCache(tp);
514 }
515 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
516 {
517 /*
518 * We have an entry, but its pg_type row got changed, so reload the
519 * data obtained directly from pg_type.
520 */
521 HeapTuple tp;
523
525 if (!HeapTupleIsValid(tp))
528 errmsg("type with OID %u does not exist", type_id)));
530 if (!typtup->typisdefined)
533 errmsg("type \"%s\" is only a shell",
534 NameStr(typtup->typname))));
535
536 /*
537 * Keep this part in sync with the code above. Many of these fields
538 * shouldn't ever change, particularly typtype, but copy 'em anyway.
539 */
540 typentry->typlen = typtup->typlen;
541 typentry->typbyval = typtup->typbyval;
542 typentry->typalign = typtup->typalign;
543 typentry->typstorage = typtup->typstorage;
544 typentry->typtype = typtup->typtype;
545 typentry->typrelid = typtup->typrelid;
546 typentry->typsubscript = typtup->typsubscript;
547 typentry->typelem = typtup->typelem;
548 typentry->typarray = typtup->typarray;
549 typentry->typcollation = typtup->typcollation;
550 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
551
552 ReleaseSysCache(tp);
553 }
554
555 /*
556 * Look up opclasses if we haven't already and any dependent info is
557 * requested.
558 */
564 {
565 Oid opclass;
566
567 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
568 if (OidIsValid(opclass))
569 {
570 typentry->btree_opf = get_opclass_family(opclass);
571 typentry->btree_opintype = get_opclass_input_type(opclass);
572 }
573 else
574 {
575 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
576 }
577
578 /*
579 * Reset information derived from btree opclass. Note in particular
580 * that we'll redetermine the eq_opr even if we previously found one;
581 * this matters in case a btree opclass has been added to a type that
582 * previously had only a hash opclass.
583 */
584 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
589 }
590
591 /*
592 * If we need to look up equality operator, and there's no btree opclass,
593 * force lookup of hash opclass.
594 */
595 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
596 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
597 typentry->btree_opf == InvalidOid)
599
604 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
605 {
606 Oid opclass;
607
608 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
609 if (OidIsValid(opclass))
610 {
611 typentry->hash_opf = get_opclass_family(opclass);
612 typentry->hash_opintype = get_opclass_input_type(opclass);
613 }
614 else
615 {
616 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
617 }
618
619 /*
620 * Reset information derived from hash opclass. We do *not* reset the
621 * eq_opr; if we already found one from the btree opclass, that
622 * decision is still good.
623 */
624 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
627 }
628
629 /*
630 * Look for requested operators and functions, if we haven't already.
631 */
632 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
633 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
634 {
635 Oid eq_opr = InvalidOid;
636
637 if (typentry->btree_opf != InvalidOid)
638 eq_opr = get_opfamily_member(typentry->btree_opf,
639 typentry->btree_opintype,
640 typentry->btree_opintype,
642 if (eq_opr == InvalidOid &&
643 typentry->hash_opf != InvalidOid)
644 eq_opr = get_opfamily_member(typentry->hash_opf,
645 typentry->hash_opintype,
646 typentry->hash_opintype,
648
649 /*
650 * If the proposed equality operator is array_eq or record_eq, check
651 * to see if the element type or column types support equality. If
652 * not, array_eq or record_eq would fail at runtime, so we don't want
653 * to report that the type has equality. (We can omit similar
654 * checking for ranges and multiranges because ranges can't be created
655 * in the first place unless their subtypes support equality.)
656 */
657 if (eq_opr == ARRAY_EQ_OP &&
659 eq_opr = InvalidOid;
660 else if (eq_opr == RECORD_EQ_OP &&
662 eq_opr = InvalidOid;
663
664 /* Force update of eq_opr_finfo only if we're changing state */
665 if (typentry->eq_opr != eq_opr)
666 typentry->eq_opr_finfo.fn_oid = InvalidOid;
667
668 typentry->eq_opr = eq_opr;
669
670 /*
671 * Reset info about hash functions whenever we pick up new info about
672 * equality operator. This is so we can ensure that the hash
673 * functions match the operator.
674 */
675 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
677 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
678 }
679 if ((flags & TYPECACHE_LT_OPR) &&
680 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
681 {
682 Oid lt_opr = InvalidOid;
683
684 if (typentry->btree_opf != InvalidOid)
685 lt_opr = get_opfamily_member(typentry->btree_opf,
686 typentry->btree_opintype,
687 typentry->btree_opintype,
689
690 /*
691 * As above, make sure array_cmp or record_cmp will succeed; but again
692 * we need no special check for ranges or multiranges.
693 */
694 if (lt_opr == ARRAY_LT_OP &&
695 !array_element_has_compare(typentry))
696 lt_opr = InvalidOid;
697 else if (lt_opr == RECORD_LT_OP &&
699 lt_opr = InvalidOid;
700
701 typentry->lt_opr = lt_opr;
702 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
703 }
704 if ((flags & TYPECACHE_GT_OPR) &&
705 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
706 {
707 Oid gt_opr = InvalidOid;
708
709 if (typentry->btree_opf != InvalidOid)
710 gt_opr = get_opfamily_member(typentry->btree_opf,
711 typentry->btree_opintype,
712 typentry->btree_opintype,
714
715 /*
716 * As above, make sure array_cmp or record_cmp will succeed; but again
717 * we need no special check for ranges or multiranges.
718 */
719 if (gt_opr == ARRAY_GT_OP &&
720 !array_element_has_compare(typentry))
721 gt_opr = InvalidOid;
722 else if (gt_opr == RECORD_GT_OP &&
724 gt_opr = InvalidOid;
725
726 typentry->gt_opr = gt_opr;
727 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
728 }
730 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
731 {
732 Oid cmp_proc = InvalidOid;
733
734 if (typentry->btree_opf != InvalidOid)
735 cmp_proc = get_opfamily_proc(typentry->btree_opf,
736 typentry->btree_opintype,
737 typentry->btree_opintype,
739
740 /*
741 * As above, make sure array_cmp or record_cmp will succeed; but again
742 * we need no special check for ranges or multiranges.
743 */
744 if (cmp_proc == F_BTARRAYCMP &&
745 !array_element_has_compare(typentry))
746 cmp_proc = InvalidOid;
747 else if (cmp_proc == F_BTRECORDCMP &&
749 cmp_proc = InvalidOid;
750
751 /* Force update of cmp_proc_finfo only if we're changing state */
752 if (typentry->cmp_proc != cmp_proc)
753 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
754
755 typentry->cmp_proc = cmp_proc;
756 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
757 }
759 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
760 {
761 Oid hash_proc = InvalidOid;
762
763 /*
764 * We insist that the eq_opr, if one has been determined, match the
765 * hash opclass; else report there is no hash function.
766 */
767 if (typentry->hash_opf != InvalidOid &&
768 (!OidIsValid(typentry->eq_opr) ||
769 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
770 typentry->hash_opintype,
771 typentry->hash_opintype,
773 hash_proc = get_opfamily_proc(typentry->hash_opf,
774 typentry->hash_opintype,
775 typentry->hash_opintype,
777
778 /*
779 * As above, make sure hash_array, hash_record, or hash_range will
780 * succeed.
781 */
782 if (hash_proc == F_HASH_ARRAY &&
783 !array_element_has_hashing(typentry))
784 hash_proc = InvalidOid;
785 else if (hash_proc == F_HASH_RECORD &&
787 hash_proc = InvalidOid;
788 else if (hash_proc == F_HASH_RANGE &&
789 !range_element_has_hashing(typentry))
790 hash_proc = InvalidOid;
791
792 /*
793 * Likewise for hash_multirange.
794 */
795 if (hash_proc == F_HASH_MULTIRANGE &&
797 hash_proc = InvalidOid;
798
799 /* Force update of hash_proc_finfo only if we're changing state */
800 if (typentry->hash_proc != hash_proc)
802
803 typentry->hash_proc = hash_proc;
804 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
805 }
806 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
809 {
810 Oid hash_extended_proc = InvalidOid;
811
812 /*
813 * We insist that the eq_opr, if one has been determined, match the
814 * hash opclass; else report there is no hash function.
815 */
816 if (typentry->hash_opf != InvalidOid &&
817 (!OidIsValid(typentry->eq_opr) ||
818 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
819 typentry->hash_opintype,
820 typentry->hash_opintype,
822 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
823 typentry->hash_opintype,
824 typentry->hash_opintype,
826
827 /*
828 * As above, make sure hash_array_extended, hash_record_extended, or
829 * hash_range_extended will succeed.
830 */
831 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
833 hash_extended_proc = InvalidOid;
834 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
836 hash_extended_proc = InvalidOid;
837 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
839 hash_extended_proc = InvalidOid;
840
841 /*
842 * Likewise for hash_multirange_extended.
843 */
844 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
846 hash_extended_proc = InvalidOid;
847
848 /* Force update of proc finfo only if we're changing state */
849 if (typentry->hash_extended_proc != hash_extended_proc)
851
852 typentry->hash_extended_proc = hash_extended_proc;
854 }
855
856 /*
857 * Set up fmgr lookup info as requested
858 *
859 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
860 * which is not quite right (they're really in the hash table's private
861 * memory context) but this will do for our purposes.
862 *
863 * Note: the code above avoids invalidating the finfo structs unless the
864 * referenced operator/function OID actually changes. This is to prevent
865 * unnecessary leakage of any subsidiary data attached to an finfo, since
866 * that would cause session-lifespan memory leaks.
867 */
868 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
869 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
870 typentry->eq_opr != InvalidOid)
871 {
873
874 eq_opr_func = get_opcode(typentry->eq_opr);
875 if (eq_opr_func != InvalidOid)
878 }
879 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
880 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
881 typentry->cmp_proc != InvalidOid)
882 {
883 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
885 }
886 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
887 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
888 typentry->hash_proc != InvalidOid)
889 {
890 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
892 }
895 typentry->hash_extended_proc != InvalidOid)
896 {
898 &typentry->hash_extended_proc_finfo,
900 }
901
902 /*
903 * If it's a composite type (row type), get tupdesc if requested
904 */
905 if ((flags & TYPECACHE_TUPDESC) &&
906 typentry->tupDesc == NULL &&
907 typentry->typtype == TYPTYPE_COMPOSITE)
908 {
909 load_typcache_tupdesc(typentry);
910 }
911
912 /*
913 * If requested, get information about a range type
914 *
915 * This includes making sure that the basic info about the range element
916 * type is up-to-date.
917 */
918 if ((flags & TYPECACHE_RANGE_INFO) &&
919 typentry->typtype == TYPTYPE_RANGE)
920 {
921 if (typentry->rngelemtype == NULL)
922 load_rangetype_info(typentry);
923 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
924 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
925 }
926
927 /*
928 * If requested, get information about a multirange type
929 */
930 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
931 typentry->rngtype == NULL &&
932 typentry->typtype == TYPTYPE_MULTIRANGE)
933 {
934 load_multirangetype_info(typentry);
935 }
936
937 /*
938 * If requested, get information about a domain type
939 */
940 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
941 typentry->domainBaseType == InvalidOid &&
942 typentry->typtype == TYPTYPE_DOMAIN)
943 {
944 typentry->domainBaseTypmod = -1;
945 typentry->domainBaseType =
946 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
947 }
948 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
949 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
950 typentry->typtype == TYPTYPE_DOMAIN)
951 {
952 load_domaintype_info(typentry);
953 }
954
955 INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
956
959
961
962 return typentry;
963}

References array_element_has_compare(), array_element_has_equality(), array_element_has_extended_hashing(), array_element_has_hashing(), Assert, BTEqualStrategyNumber, BTGreaterStrategyNumber, BTLessStrategyNumber, BTORDER_PROC, TypeCacheEntry::btree_opf, TypeCacheEntry::btree_opintype, CacheMemoryContext, CacheRegisterRelcacheCallback(), CacheRegisterSyscacheCallback(), TypeCacheEntry::cmp_proc, TypeCacheEntry::cmp_proc_finfo, CreateCacheMemoryContext(), ctl, TypeCacheEntry::domainBaseType, TypeCacheEntry::domainBaseTypmod, TypeCacheEntry::eq_opr, TypeCacheEntry::eq_opr_finfo, ereport, errcode(), errmsg(), ERROR, fb(), firstDomainTypeEntry, TypeCacheEntry::flags, fmgr_info_cxt(), FmgrInfo::fn_oid, get_hash_value(), get_opclass_family(), get_opclass_input_type(), get_opcode(), get_opfamily_member(), get_opfamily_proc(), getBaseTypeAndTypmod(), GetDefaultOpClass(), GETSTRUCT(), TypeCacheEntry::gt_opr, HASH_BLOBS, hash_create(), HASH_ELEM, HASH_ENTER, TypeCacheEntry::hash_extended_proc, TypeCacheEntry::hash_extended_proc_finfo, HASH_FIND, HASH_FUNCTION, TypeCacheEntry::hash_opf, TypeCacheEntry::hash_opintype, TypeCacheEntry::hash_proc, TypeCacheEntry::hash_proc_finfo, hash_search(), HASHEXTENDED_PROC, HASHSTANDARD_PROC, HeapTupleIsValid, HTEqualStrategyNumber, in_progress_list, in_progress_list_len, in_progress_list_maxlen, INJECTION_POINT, insert_rel_type_cache_if_needed(), InvalidOid, HASHCTL::keysize, load_domaintype_info(), load_multirangetype_info(), load_rangetype_info(), load_typcache_tupdesc(), lookup_type_cache(), TypeCacheEntry::lt_opr, MemoryContextAlloc(), MemSet, multirange_element_has_extended_hashing(), multirange_element_has_hashing(), NameStr, TypeCacheEntry::nextDomain, ObjectIdGetDatum(), OidIsValid, range_element_has_extended_hashing(), range_element_has_hashing(), record_fields_have_compare(), record_fields_have_equality(), record_fields_have_extended_hashing(), record_fields_have_hashing(), ReleaseSysCache(), RelIdToTypeIdCacheHash, repalloc(), TypeCacheEntry::rngelemtype, TypeCacheEntry::rngtype, SearchSysCache1(), TCFLAGS_CHECKED_BTREE_OPCLASS, TCFLAGS_CHECKED_CMP_PROC, TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, TCFLAGS_CHECKED_EQ_OPR, TCFLAGS_CHECKED_GT_OPR, TCFLAGS_CHECKED_HASH_EXTENDED_PROC, TCFLAGS_CHECKED_HASH_OPCLASS, TCFLAGS_CHECKED_HASH_PROC, TCFLAGS_CHECKED_LT_OPR, TCFLAGS_HAVE_PG_TYPE_DATA, TypeCacheEntry::tupDesc, TypeCacheEntry::typalign, TypeCacheEntry::typarray, TypeCacheEntry::typbyval, TypeCacheEntry::typcollation, type_cache_syshash(), TypeCacheEntry::type_id, TypeCacheEntry::type_id_hash, TYPECACHE_BTREE_OPFAMILY, TYPECACHE_CMP_PROC, TYPECACHE_CMP_PROC_FINFO, TYPECACHE_DOMAIN_BASE_INFO, TYPECACHE_DOMAIN_CONSTR_INFO, TYPECACHE_EQ_OPR, TYPECACHE_EQ_OPR_FINFO, TYPECACHE_GT_OPR, TYPECACHE_HASH_EXTENDED_PROC, TYPECACHE_HASH_EXTENDED_PROC_FINFO, TYPECACHE_HASH_OPFAMILY, TYPECACHE_HASH_PROC, TYPECACHE_HASH_PROC_FINFO, TYPECACHE_LT_OPR, TYPECACHE_MULTIRANGE_INFO, TYPECACHE_RANGE_INFO, TYPECACHE_TUPDESC, TypeCacheConstrCallback(), TypeCacheHash, TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), TypeCacheEntry::typelem, TypeCacheEntry::typlen, TypeCacheEntry::typrelid, TypeCacheEntry::typstorage, TypeCacheEntry::typsubscript, and TypeCacheEntry::typtype.

Referenced by analyzeCTE(), appendOrderBySuffix(), array_cmp(), array_contain_compare(), array_eq(), array_position_common(), array_positions(), array_replace_internal(), array_reverse(), array_sample(), array_shuffle(), array_sort_internal(), array_typanalyze(), assign_record_type_identifier(), brin_bloom_opcinfo(), brin_inclusion_opcinfo(), brin_minmax_multi_opcinfo(), brin_minmax_opcinfo(), build_datatype(), build_mss(), cache_array_element_properties(), cache_multirange_element_properties(), cache_range_element_properties(), cache_record_field_properties(), calc_arraycontsel(), check_exclusion_or_unique_constraint(), check_memoizable(), contain_leaked_vars_walker(), create_grouping_expr_infos(), CreateStatistics(), dependency_degree(), domain_state_setup(), DomainHasConstraints(), enum_cmp_internal(), ExecInitExprRec(), find_simplified_clause(), foreign_expr_walker(), get_cached_rowtype(), get_multirange_io_data(), get_range_io_data(), get_rule_orderby(), get_sort_group_operators(), GinBufferInit(), hash_array(), hash_array_extended(), hash_multirange(), hash_multirange_extended(), hash_range(), hash_range_extended(), hash_record(), hash_record_extended(), init_grouping_targets(), InitDomainConstraintRef(), initGinState(), IsIndexUsableForReplicaIdentityFull(), load_multirangetype_info(), load_rangetype_info(), lookup_rowtype_tupdesc_domain(), lookup_rowtype_tupdesc_internal(), lookup_type_cache(), make_expanded_record_from_tupdesc(), make_expanded_record_from_typeid(), multirange_get_typcache(), multirange_minus_multi(), multirange_unnest(), ndistinct_for_combination(), op_hashjoinable(), op_mergejoinable(), paraminfo_get_equal_hashops(), PLy_input_setup_func(), PLy_output_setup_func(), range_fast_cmp(), range_get_typcache(), range_minus_multi(), record_cmp(), record_eq(), revalidate_rectypeid(), scalararraysel(), scalararraysel_containment(), show_sortorder_options(), statatt_get_elem_type(), statatt_get_type(), statext_mcv_serialize(), tuples_equal(), tuplesort_begin_index_gin(), and width_bucket_array().

◆ multirange_element_has_extended_hashing()

static bool multirange_element_has_extended_hashing ( TypeCacheEntry typentry)
static

◆ multirange_element_has_hashing()

static bool multirange_element_has_hashing ( TypeCacheEntry typentry)
static

◆ prep_domain_constraints()

static List * prep_domain_constraints ( List constraints,
MemoryContext  execctx 
)
static

Definition at line 1363 of file typcache.c.

1364{
1365 List *result = NIL;
1367 ListCell *lc;
1368
1370
1371 foreach(lc, constraints)
1372 {
1375
1377 newr->constrainttype = r->constrainttype;
1378 newr->name = r->name;
1379 newr->check_expr = r->check_expr;
1380 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1381
1382 result = lappend(result, newr);
1383 }
1384
1386
1387 return result;
1388}

References DomainConstraintState::check_expr, DomainConstraintState::constrainttype, ExecInitExpr(), fb(), lappend(), lfirst, makeNode, MemoryContextSwitchTo(), DomainConstraintState::name, and NIL.

Referenced by InitDomainConstraintRef(), and UpdateDomainConstraintRef().

◆ range_element_has_extended_hashing()

static bool range_element_has_extended_hashing ( TypeCacheEntry typentry)
static

◆ range_element_has_hashing()

static bool range_element_has_hashing ( TypeCacheEntry typentry)
static

Definition at line 1714 of file typcache.c.

1715{
1716 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1718 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1719}

References cache_range_element_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_ELEM_PROPERTIES, and TCFLAGS_HAVE_ELEM_HASHING.

Referenced by lookup_type_cache().

◆ record_fields_have_compare()

static bool record_fields_have_compare ( TypeCacheEntry typentry)
static

Definition at line 1585 of file typcache.c.

1586{
1587 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1589 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1590}

References cache_record_field_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_FIELD_PROPERTIES, and TCFLAGS_HAVE_FIELD_COMPARE.

Referenced by lookup_type_cache().

◆ record_fields_have_equality()

static bool record_fields_have_equality ( TypeCacheEntry typentry)
static

◆ record_fields_have_extended_hashing()

static bool record_fields_have_extended_hashing ( TypeCacheEntry typentry)
static

◆ record_fields_have_hashing()

static bool record_fields_have_hashing ( TypeCacheEntry typentry)
static

Definition at line 1593 of file typcache.c.

1594{
1595 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1597 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1598}

References cache_record_field_properties(), TypeCacheEntry::flags, TCFLAGS_CHECKED_FIELD_PROPERTIES, and TCFLAGS_HAVE_FIELD_HASHING.

Referenced by lookup_type_cache().

◆ record_type_typmod_compare()

static int record_type_typmod_compare ( const void a,
const void b,
size_t  size 
)
static

Definition at line 2025 of file typcache.c.

2026{
2027 const RecordCacheEntry *left = a;
2028 const RecordCacheEntry *right = b;
2029
2030 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2031}

References a, b, equalRowTypes(), and RecordCacheEntry::tupdesc.

Referenced by assign_record_type_typmod().

◆ record_type_typmod_hash()

static uint32 record_type_typmod_hash ( const void data,
size_t  size 
)
static

Definition at line 2014 of file typcache.c.

2015{
2016 const RecordCacheEntry *entry = data;
2017
2018 return hashRowType(entry->tupdesc);
2019}

References data, hashRowType(), and RecordCacheEntry::tupdesc.

Referenced by assign_record_type_typmod().

◆ share_tupledesc()

static dsa_pointer share_tupledesc ( dsa_area area,
TupleDesc  tupdesc,
uint32  typmod 
)
static

Definition at line 2921 of file typcache.c.

2922{
2924 TupleDesc shared;
2925
2926 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2927 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2928 TupleDescCopy(shared, tupdesc);
2929 shared->tdtypmod = typmod;
2930
2931 return shared_dp;
2932}

References dsa_allocate, dsa_get_address(), fb(), TupleDescData::tdtypmod, TupleDescCopy(), and TupleDescSize.

Referenced by find_or_make_matching_shared_tupledesc(), and SharedRecordTypmodRegistryInit().

◆ shared_record_table_compare()

static int shared_record_table_compare ( const void a,
const void b,
size_t  size,
void arg 
)
static

Definition at line 234 of file typcache.c.

236{
237 dsa_area *area = (dsa_area *) arg;
238 const SharedRecordTableKey *k1 = a;
239 const SharedRecordTableKey *k2 = b;
242
243 if (k1->shared)
244 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 else
246 t1 = k1->u.local_tupdesc;
247
248 if (k2->shared)
249 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 else
251 t2 = k2->u.local_tupdesc;
252
253 return equalRowTypes(t1, t2) ? 0 : 1;
254}

References a, arg, b, dsa_get_address(), equalRowTypes(), and fb().

◆ shared_record_table_hash()

static uint32 shared_record_table_hash ( const void a,
size_t  size,
void arg 
)
static

Definition at line 260 of file typcache.c.

261{
262 dsa_area *area = arg;
263 const SharedRecordTableKey *k = a;
264 TupleDesc t;
265
266 if (k->shared)
268 else
269 t = k->u.local_tupdesc;
270
271 return hashRowType(t);
272}

References a, arg, dsa_get_address(), hashRowType(), SharedRecordTableKey::local_tupdesc, SharedRecordTableKey::shared, SharedRecordTableKey::shared_tupdesc, and SharedRecordTableKey::u.

◆ shared_record_typmod_registry_detach()

static void shared_record_typmod_registry_detach ( dsm_segment segment,
Datum  datum 
)
static

◆ SharedRecordTypmodRegistryAttach()

void SharedRecordTypmodRegistryAttach ( SharedRecordTypmodRegistry registry)

Definition at line 2295 of file typcache.c.

2296{
2300
2302
2303 /* We can't already be attached to a shared registry. */
2310
2311 /*
2312 * We can't already have typmods in our local cache, because they'd clash
2313 * with those imported by SharedRecordTypmodRegistryInit. This should be
2314 * a freshly started parallel worker. If we ever support worker
2315 * recycling, a worker would need to zap its local cache in between
2316 * servicing different queries, in order to be able to call this and
2317 * synchronize typmods with a new leader; but that's problematic because
2318 * we can't be very sure that record-typmod-related state hasn't escaped
2319 * to anywhere else in the process.
2320 */
2322
2324
2325 /* Attach to the two hash tables. */
2328 registry->record_table_handle,
2332 registry->typmod_table_handle,
2333 NULL);
2334
2336
2337 /*
2338 * Set up detach hook to run at worker exit. Currently this is the same
2339 * as the leader's detach hook, but in future they might need to be
2340 * different.
2341 */
2345
2346 /*
2347 * Set up the session state that will tell assign_record_type_typmod and
2348 * lookup_rowtype_tupdesc_internal about the shared registry.
2349 */
2353}

References Session::area, Assert, CurrentSession, dshash_attach(), fb(), IsParallelWorker, MemoryContextSwitchTo(), NextRecordTypmod, on_dsm_detach(), PointerGetDatum(), Session::segment, Session::shared_record_table, shared_record_typmod_registry_detach(), Session::shared_typmod_registry, Session::shared_typmod_table, srtr_record_table_params, srtr_typmod_table_params, and TopMemoryContext.

Referenced by AttachSession().

◆ SharedRecordTypmodRegistryEstimate()

size_t SharedRecordTypmodRegistryEstimate ( void  )

Definition at line 2174 of file typcache.c.

2175{
2176 return sizeof(SharedRecordTypmodRegistry);
2177}

Referenced by GetSessionDsmHandle().

◆ SharedRecordTypmodRegistryInit()

void SharedRecordTypmodRegistryInit ( SharedRecordTypmodRegistry registry,
dsm_segment segment,
dsa_area area 
)

Definition at line 2196 of file typcache.c.

2199{
2203 int32 typmod;
2204
2206
2207 /* We can't already be attached to a shared registry. */
2211
2213
2214 /* Create the hash table of tuple descriptors indexed by themselves. */
2216
2217 /* Create the hash table of tuple descriptors indexed by typmod. */
2219
2221
2222 /* Initialize the SharedRecordTypmodRegistry. */
2223 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2224 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2226
2227 /*
2228 * Copy all entries from this backend's private registry into the shared
2229 * registry.
2230 */
2231 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2232 {
2237 TupleDesc tupdesc;
2238 bool found;
2239
2240 tupdesc = RecordCacheArray[typmod].tupdesc;
2241 if (tupdesc == NULL)
2242 continue;
2243
2244 /* Copy the TupleDesc into shared memory. */
2245 shared_dp = share_tupledesc(area, tupdesc, typmod);
2246
2247 /* Insert into the typmod table. */
2249 &tupdesc->tdtypmod,
2250 &found);
2251 if (found)
2252 elog(ERROR, "cannot create duplicate shared record typmod");
2253 typmod_table_entry->typmod = tupdesc->tdtypmod;
2254 typmod_table_entry->shared_tupdesc = shared_dp;
2256
2257 /* Insert into the record table. */
2258 record_table_key.shared = false;
2259 record_table_key.u.local_tupdesc = tupdesc;
2262 &found);
2263 if (!found)
2264 {
2265 record_table_entry->key.shared = true;
2266 record_table_entry->key.u.shared_tupdesc = shared_dp;
2267 }
2269 }
2270
2271 /*
2272 * Set up the global state that will tell assign_record_type_typmod and
2273 * lookup_rowtype_tupdesc_internal about the shared registry.
2274 */
2278
2279 /*
2280 * We install a detach hook in the leader, but only to handle cleanup on
2281 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2282 * the memory, the leader process will use a shared registry until it
2283 * exits.
2284 */
2286}

References Assert, CurrentSession, dshash_create(), dshash_find_or_insert(), dshash_get_hash_table_handle(), dshash_release_lock(), elog, ERROR, fb(), IsParallelWorker, MemoryContextSwitchTo(), NextRecordTypmod, on_dsm_detach(), pg_atomic_init_u32(), RecordCacheArray, share_tupledesc(), Session::shared_record_table, shared_record_typmod_registry_detach(), Session::shared_typmod_registry, Session::shared_typmod_table, srtr_record_table_params, srtr_typmod_table_params, TupleDescData::tdtypmod, TopMemoryContext, and RecordCacheArrayEntry::tupdesc.

Referenced by GetSessionDsmHandle().

◆ type_cache_syshash()

static uint32 type_cache_syshash ( const void key,
Size  keysize 
)
static

Definition at line 359 of file typcache.c.

360{
361 Assert(keysize == sizeof(Oid));
362 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
363}

References Assert, fb(), GetSysCacheHashValue1, and ObjectIdGetDatum().

Referenced by lookup_type_cache().

◆ TypeCacheConstrCallback()

static void TypeCacheConstrCallback ( Datum  arg,
int  cacheid,
uint32  hashvalue 
)
static

Definition at line 2610 of file typcache.c.

2611{
2612 TypeCacheEntry *typentry;
2613
2614 /*
2615 * Because this is called very frequently, and typically very few of the
2616 * typcache entries are for domains, we don't use hash_seq_search here.
2617 * Instead we thread all the domain-type entries together so that we can
2618 * visit them cheaply.
2619 */
2620 for (typentry = firstDomainTypeEntry;
2621 typentry != NULL;
2622 typentry = typentry->nextDomain)
2623 {
2624 /* Reset domain constraint validity information */
2626 }
2627}

References fb(), firstDomainTypeEntry, TypeCacheEntry::flags, and TypeCacheEntry::nextDomain.

Referenced by lookup_type_cache().

◆ TypeCacheOpcCallback()

static void TypeCacheOpcCallback ( Datum  arg,
int  cacheid,
uint32  hashvalue 
)
static

Definition at line 2572 of file typcache.c.

2573{
2574 HASH_SEQ_STATUS status;
2575 TypeCacheEntry *typentry;
2576
2577 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2578 hash_seq_init(&status, TypeCacheHash);
2579 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2580 {
2581 bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2582
2583 /* Reset equality/comparison/hashing validity information */
2584 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2585
2586 /*
2587 * Call delete_rel_type_cache_if_needed() if we actually cleared some
2588 * of TCFLAGS_OPERATOR_FLAGS.
2589 */
2590 if (hadOpclass)
2592 }
2593}

References delete_rel_type_cache_if_needed(), fb(), TypeCacheEntry::flags, hash_seq_init(), hash_seq_search(), TCFLAGS_OPERATOR_FLAGS, and TypeCacheHash.

Referenced by lookup_type_cache().

◆ TypeCacheRelCallback()

static void TypeCacheRelCallback ( Datum  arg,
Oid  relid 
)
static

Definition at line 2419 of file typcache.c.

2420{
2421 TypeCacheEntry *typentry;
2422
2423 /*
2424 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2425 * callback wouldn't be registered
2426 */
2427 if (OidIsValid(relid))
2428 {
2430
2431 /*
2432 * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2433 * corresponding typcache entry has something to clean.
2434 */
2436 &relid,
2437 HASH_FIND, NULL);
2438
2439 if (relentry != NULL)
2440 {
2442 &relentry->composite_typid,
2443 HASH_FIND, NULL);
2444
2445 if (typentry != NULL)
2446 {
2447 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2448 Assert(relid == typentry->typrelid);
2449
2451 }
2452 }
2453
2454 /*
2455 * Visit all the domain types sequentially. Typically, this shouldn't
2456 * affect performance since domain types are less tended to bloat.
2457 * Domain types are created manually, unlike composite types which are
2458 * automatically created for every temporary table.
2459 */
2460 for (typentry = firstDomainTypeEntry;
2461 typentry != NULL;
2462 typentry = typentry->nextDomain)
2463 {
2464 /*
2465 * If it's domain over composite, reset flags. (We don't bother
2466 * trying to determine whether the specific base type needs a
2467 * reset.) Note that if we haven't determined whether the base
2468 * type is composite, we don't need to reset anything.
2469 */
2471 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2472 }
2473 }
2474 else
2475 {
2476 HASH_SEQ_STATUS status;
2477
2478 /*
2479 * Relid is invalid. By convention, we need to reset all composite
2480 * types in cache. Also, we should reset flags for domain types, and
2481 * we loop over all entries in hash, so, do it in a single scan.
2482 */
2483 hash_seq_init(&status, TypeCacheHash);
2484 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2485 {
2486 if (typentry->typtype == TYPTYPE_COMPOSITE)
2487 {
2489 }
2490 else if (typentry->typtype == TYPTYPE_DOMAIN)
2491 {
2492 /*
2493 * If it's domain over composite, reset flags. (We don't
2494 * bother trying to determine whether the specific base type
2495 * needs a reset.) Note that if we haven't determined whether
2496 * the base type is composite, we don't need to reset
2497 * anything.
2498 */
2500 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2501 }
2502 }
2503 }
2504}

References Assert, fb(), firstDomainTypeEntry, TypeCacheEntry::flags, HASH_FIND, hash_search(), hash_seq_init(), hash_seq_search(), InvalidateCompositeTypeCacheEntry(), TypeCacheEntry::nextDomain, OidIsValid, RelIdToTypeIdCacheHash, TCFLAGS_DOMAIN_BASE_IS_COMPOSITE, TypeCacheHash, TypeCacheEntry::typrelid, and TypeCacheEntry::typtype.

Referenced by lookup_type_cache().

◆ TypeCacheTypCallback()

static void TypeCacheTypCallback ( Datum  arg,
int  cacheid,
uint32  hashvalue 
)
static

Definition at line 2515 of file typcache.c.

2516{
2517 HASH_SEQ_STATUS status;
2518 TypeCacheEntry *typentry;
2519
2520 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2521
2522 /*
2523 * By convention, zero hash value is passed to the callback as a sign that
2524 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2525 * InvalidateSystemCachesExtended().
2526 */
2527 if (hashvalue == 0)
2528 hash_seq_init(&status, TypeCacheHash);
2529 else
2530 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2531
2532 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2533 {
2534 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2535
2536 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2537
2538 /*
2539 * Mark the data obtained directly from pg_type as invalid. Also, if
2540 * it's a domain, typnotnull might've changed, so we'll need to
2541 * recalculate its constraints.
2542 */
2543 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2545
2546 /*
2547 * Call delete_rel_type_cache_if_needed() if we cleaned
2548 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2549 */
2550 if (hadPgTypeData)
2552 }
2553}

References Assert, delete_rel_type_cache_if_needed(), fb(), TypeCacheEntry::flags, hash_seq_init(), hash_seq_init_with_hash_value(), hash_seq_search(), TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, TCFLAGS_HAVE_PG_TYPE_DATA, TypeCacheEntry::type_id_hash, and TypeCacheHash.

Referenced by lookup_type_cache().

◆ UpdateDomainConstraintRef()

void UpdateDomainConstraintRef ( DomainConstraintRef ref)

Definition at line 1439 of file typcache.c.

1440{
1441 TypeCacheEntry *typentry = ref->tcache;
1442
1443 /* Make sure typcache entry's data is up to date */
1444 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1445 typentry->typtype == TYPTYPE_DOMAIN)
1446 load_domaintype_info(typentry);
1447
1448 /* Transfer to ref object if there's new info, adjusting refcounts */
1449 if (ref->dcc != typentry->domainData)
1450 {
1451 /* Paranoia --- be sure link is nulled before trying to release */
1452 DomainConstraintCache *dcc = ref->dcc;
1453
1454 if (dcc)
1455 {
1456 /*
1457 * Note: we just leak the previous list of executable domain
1458 * constraints. Alternatively, we could keep those in a child
1459 * context of ref->refctx and free that context at this point.
1460 * However, in practice this code path will be taken so seldom
1461 * that the extra bookkeeping for a child context doesn't seem
1462 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1463 */
1464 ref->constraints = NIL;
1465 ref->dcc = NULL;
1466 decr_dcc_refcount(dcc);
1467 }
1468 dcc = typentry->domainData;
1469 if (dcc)
1470 {
1471 ref->dcc = dcc;
1472 dcc->dccRefCount++;
1473 if (ref->need_exprstate)
1474 ref->constraints = prep_domain_constraints(dcc->constraints,
1475 ref->refctx);
1476 else
1477 ref->constraints = dcc->constraints;
1478 }
1479 }
1480}

References DomainConstraintCache::constraints, DomainConstraintCache::dccRefCount, decr_dcc_refcount(), TypeCacheEntry::domainData, fb(), TypeCacheEntry::flags, load_domaintype_info(), NIL, prep_domain_constraints(), TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS, and TypeCacheEntry::typtype.

Referenced by domain_check_input().

Variable Documentation

◆ firstDomainTypeEntry

TypeCacheEntry* firstDomainTypeEntry = NULL
static

Definition at line 96 of file typcache.c.

Referenced by lookup_type_cache(), TypeCacheConstrCallback(), and TypeCacheRelCallback().

◆ in_progress_list

Oid* in_progress_list
static

◆ in_progress_list_len

int in_progress_list_len
static

◆ in_progress_list_maxlen

int in_progress_list_maxlen
static

Definition at line 228 of file typcache.c.

Referenced by lookup_type_cache().

◆ NextRecordTypmod

int32 NextRecordTypmod = 0
static

◆ RecordCacheArray

◆ RecordCacheArrayLen

int32 RecordCacheArrayLen = 0
static

◆ RecordCacheHash

HTAB* RecordCacheHash = NULL
static

Definition at line 295 of file typcache.c.

Referenced by assign_record_type_typmod().

◆ RelIdToTypeIdCacheHash

HTAB* RelIdToTypeIdCacheHash = NULL
static

◆ srtr_record_table_params

◆ srtr_typmod_table_params

◆ tupledesc_id_counter

◆ TypeCacheHash