1 /*-------------------------------------------------------------------------
2 *
3 * typcache.c
4 * POSTGRES type cache code
5 *
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. For
8 * example, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 *
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by generic array and record handling routines, such as array_eq(),
14 * record_cmp(), and hash_array(). Because those routines are used as index
15 * support operations, they cannot leak memory. To allow them to execute
16 * efficiently, all information that they would like to re-use across calls
17 * is kept in the type cache.
18 *
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. If the type is
21 * dropped, the cache entry simply becomes wasted storage. This is not
22 * expected to happen often, and assuming that typcache entries are good
23 * permanently allows caching pointers to them in long-lived places.
24 *
25 * We have some provisions for updating cache entries if the stored data
26 * becomes obsolete. Information dependent on opclasses is cleared if we
27 * detect updates to pg_opclass. We also support clearing the tuple
28 * descriptor and operator/function parts of a rowtype's cache entry,
29 * since those may need to change as a consequence of ALTER TABLE.
30 * Domain constraint changes are also tracked properly.
31 *
32 *
33 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
34 * Portions Copyright (c) 1994, Regents of the University of California
35 *
36 * IDENTIFICATION
37 * src/backend/utils/cache/typcache.c
38 *
39 *-------------------------------------------------------------------------
40 */
41 #include "postgres.h"
42
43 #include <limits.h>
44
45 #include "access/hash.h"
46 #include "access/heapam.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "access/parallel.h"
50 #include "access/session.h"
51 #include "catalog/indexing.h"
52 #include "catalog/pg_am.h"
53 #include "catalog/pg_constraint.h"
54 #include "catalog/pg_enum.h"
55 #include "catalog/pg_operator.h"
56 #include "catalog/pg_range.h"
57 #include "catalog/pg_type.h"
58 #include "commands/defrem.h"
59 #include "executor/executor.h"
60 #include "lib/dshash.h"
61 #include "optimizer/planner.h"
62 #include "storage/lwlock.h"
63 #include "utils/builtins.h"
64 #include "utils/catcache.h"
65 #include "utils/fmgroids.h"
66 #include "utils/inval.h"
67 #include "utils/lsyscache.h"
68 #include "utils/memutils.h"
69 #include "utils/rel.h"
70 #include "utils/snapmgr.h"
71 #include "utils/syscache.h"
72 #include "utils/typcache.h"
73
74
75 /* The main type cache hashtable searched by lookup_type_cache */
76 static HTAB *TypeCacheHash = NULL;
77
78 /* List of type cache entries for domain types */
79 static TypeCacheEntry *firstDomainTypeEntry = NULL;
80
81 /* Private flag bits in the TypeCacheEntry.flags field */
82 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000001
83 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000002
84 #define TCFLAGS_CHECKED_EQ_OPR 0x000004
85 #define TCFLAGS_CHECKED_LT_OPR 0x000008
86 #define TCFLAGS_CHECKED_GT_OPR 0x000010
87 #define TCFLAGS_CHECKED_CMP_PROC 0x000020
88 #define TCFLAGS_CHECKED_HASH_PROC 0x000040
89 #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000080
90 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000100
91 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000200
92 #define TCFLAGS_HAVE_ELEM_COMPARE 0x000400
93 #define TCFLAGS_HAVE_ELEM_HASHING 0x000800
94 #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x001000
95 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x002000
96 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x004000
97 #define TCFLAGS_HAVE_FIELD_COMPARE 0x008000
98 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x010000
99 #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x020000
100
101 /*
102 * Data stored about a domain type's constraints. Note that we do not create
103 * this struct for the common case of a constraint-less domain; we just set
104 * domainData to NULL to indicate that.
105 *
106 * Within a DomainConstraintCache, we store expression plan trees, but the
107 * check_exprstate fields of the DomainConstraintState nodes are just NULL.
108 * When needed, expression evaluation nodes are built by flat-copying the
109 * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
110 * Such a node tree is not part of the DomainConstraintCache, but is
111 * considered to belong to a DomainConstraintRef.
112 */
113 struct DomainConstraintCache
114 {
115 List *constraints; /* list of DomainConstraintState nodes */
116 MemoryContext dccContext; /* memory context holding all associated data */
117 long dccRefCount; /* number of references to this struct */
118 };
119
120 /* Private information to support comparisons of enum values */
121 typedef struct
122 {
123 Oid enum_oid; /* OID of one enum value */
124 float4 sort_order; /* its sort position */
125 } EnumItem;
126
127 typedef struct TypeCacheEnumData
128 {
129 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
130 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
131 int num_values; /* total number of values in enum */
132 EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
133 } TypeCacheEnumData;
134
135 /*
136 * We use a separate table for storing the definitions of non-anonymous
137 * record types. Once defined, a record type will be remembered for the
138 * life of the backend. Subsequent uses of the "same" record type (where
139 * sameness means equalTupleDescs) will refer to the existing table entry.
140 *
141 * Stored record types are remembered in a linear array of TupleDescs,
142 * which can be indexed quickly with the assigned typmod. There is also
143 * a hash table to speed searches for matching TupleDescs.
144 */
145
146 typedef struct RecordCacheEntry
147 {
148 TupleDesc tupdesc;
149 } RecordCacheEntry;
150
151 /*
152 * To deal with non-anonymous record types that are exchanged by backends
153 * involved in a parallel query, we also need a shared version of the above.
154 */
155 struct SharedRecordTypmodRegistry
156 {
157 /* A hash table for finding a matching TupleDesc. */
158 dshash_table_handle record_table_handle;
159 /* A hash table for finding a TupleDesc by typmod. */
160 dshash_table_handle typmod_table_handle;
161 /* A source of new record typmod numbers. */
162 pg_atomic_uint32 next_typmod;
163 };
164
165 /*
166 * When using shared tuple descriptors as hash table keys we need a way to be
167 * able to search for an equal shared TupleDesc using a backend-local
168 * TupleDesc. So we use this type which can hold either, and hash and compare
169 * functions that know how to handle both.
170 */
171 typedef struct SharedRecordTableKey
172 {
173 union
174 {
175 TupleDesc local_tupdesc;
176 dsa_pointer shared_tupdesc;
177 } u;
178 bool shared;
179 } SharedRecordTableKey;
180
181 /*
182 * The shared version of RecordCacheEntry. This lets us look up a typmod
183 * using a TupleDesc which may be in local or shared memory.
184 */
185 typedef struct SharedRecordTableEntry
186 {
187 SharedRecordTableKey key;
188 } SharedRecordTableEntry;
189
190 /*
191 * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
192 * up a TupleDesc in shared memory using a typmod.
193 */
194 typedef struct SharedTypmodTableEntry
195 {
196 uint32 typmod;
197 dsa_pointer shared_tupdesc;
198 } SharedTypmodTableEntry;
199
200 /*
201 * A comparator function for SharedRecordTableKey.
202 */
203 static int
shared_record_table_compare(const void * a,const void * b,size_t size,void * arg)204 shared_record_table_compare(const void *a, const void *b, size_t size,
205 void *arg)
206 {
207 dsa_area *area = (dsa_area *) arg;
208 SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
209 SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
210 TupleDesc t1;
211 TupleDesc t2;
212
213 if (k1->shared)
214 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
215 else
216 t1 = k1->u.local_tupdesc;
217
218 if (k2->shared)
219 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
220 else
221 t2 = k2->u.local_tupdesc;
222
223 return equalTupleDescs(t1, t2) ? 0 : 1;
224 }
225
226 /*
227 * A hash function for SharedRecordTableKey.
228 */
229 static uint32
shared_record_table_hash(const void * a,size_t size,void * arg)230 shared_record_table_hash(const void *a, size_t size, void *arg)
231 {
232 dsa_area *area = (dsa_area *) arg;
233 SharedRecordTableKey *k = (SharedRecordTableKey *) a;
234 TupleDesc t;
235
236 if (k->shared)
237 t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
238 else
239 t = k->u.local_tupdesc;
240
241 return hashTupleDesc(t);
242 }
243
244 /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
245 static const dshash_parameters srtr_record_table_params = {
246 sizeof(SharedRecordTableKey), /* unused */
247 sizeof(SharedRecordTableEntry),
248 shared_record_table_compare,
249 shared_record_table_hash,
250 LWTRANCHE_SESSION_RECORD_TABLE
251 };
252
253 /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
254 static const dshash_parameters srtr_typmod_table_params = {
255 sizeof(uint32),
256 sizeof(SharedTypmodTableEntry),
257 dshash_memcmp,
258 dshash_memhash,
259 LWTRANCHE_SESSION_TYPMOD_TABLE
260 };
261
262 /* hashtable for recognizing registered record types */
263 static HTAB *RecordCacheHash = NULL;
264
265 /* arrays of info about registered record types, indexed by assigned typmod */
266 static TupleDesc *RecordCacheArray = NULL;
267 static uint64 *RecordIdentifierArray = NULL;
268 static int32 RecordCacheArrayLen = 0; /* allocated length of above arrays */
269 static int32 NextRecordTypmod = 0; /* number of entries used */
270
271 /*
272 * Process-wide counter for generating unique tupledesc identifiers.
273 * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
274 * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
275 */
276 static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
277
278 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
279 static void load_rangetype_info(TypeCacheEntry *typentry);
280 static void load_domaintype_info(TypeCacheEntry *typentry);
281 static int dcs_cmp(const void *a, const void *b);
282 static void decr_dcc_refcount(DomainConstraintCache *dcc);
283 static void dccref_deletion_callback(void *arg);
284 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
285 static bool array_element_has_equality(TypeCacheEntry *typentry);
286 static bool array_element_has_compare(TypeCacheEntry *typentry);
287 static bool array_element_has_hashing(TypeCacheEntry *typentry);
288 static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
289 static void cache_array_element_properties(TypeCacheEntry *typentry);
290 static bool record_fields_have_equality(TypeCacheEntry *typentry);
291 static bool record_fields_have_compare(TypeCacheEntry *typentry);
292 static void cache_record_field_properties(TypeCacheEntry *typentry);
293 static bool range_element_has_hashing(TypeCacheEntry *typentry);
294 static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
295 static void cache_range_element_properties(TypeCacheEntry *typentry);
296 static void TypeCacheRelCallback(Datum arg, Oid relid);
297 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
298 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
299 static void load_enum_cache_data(TypeCacheEntry *tcache);
300 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
301 static int enum_oid_cmp(const void *left, const void *right);
302 static void shared_record_typmod_registry_detach(dsm_segment *segment,
303 Datum datum);
304 static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
305 static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
306 uint32 typmod);
307
308
309 /*
310 * lookup_type_cache
311 *
312 * Fetch the type cache entry for the specified datatype, and make sure that
313 * all the fields requested by bits in 'flags' are valid.
314 *
315 * The result is never NULL --- we will ereport() if the passed type OID is
316 * invalid. Note however that we may fail to find one or more of the
317 * values requested by 'flags'; the caller needs to check whether the fields
318 * are InvalidOid or not.
319 */
320 TypeCacheEntry *
lookup_type_cache(Oid type_id,int flags)321 lookup_type_cache(Oid type_id, int flags)
322 {
323 TypeCacheEntry *typentry;
324 bool found;
325
326 if (TypeCacheHash == NULL)
327 {
328 /* First time through: initialize the hash table */
329 HASHCTL ctl;
330
331 MemSet(&ctl, 0, sizeof(ctl));
332 ctl.keysize = sizeof(Oid);
333 ctl.entrysize = sizeof(TypeCacheEntry);
334 TypeCacheHash = hash_create("Type information cache", 64,
335 &ctl, HASH_ELEM | HASH_BLOBS);
336
337 /* Also set up callbacks for SI invalidations */
338 CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
339 CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
340 CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
341 CacheRegisterSyscacheCallback(TYPEOID, TypeCacheConstrCallback, (Datum) 0);
342
343 /* Also make sure CacheMemoryContext exists */
344 if (!CacheMemoryContext)
345 CreateCacheMemoryContext();
346 }
347
348 /* Try to look up an existing entry */
349 typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
350 (void *) &type_id,
351 HASH_FIND, NULL);
352 if (typentry == NULL)
353 {
354 /*
355 * If we didn't find one, we want to make one. But first look up the
356 * pg_type row, just to make sure we don't make a cache entry for an
357 * invalid type OID. If the type OID is not valid, present a
358 * user-facing error, since some code paths such as domain_in() allow
359 * this function to be reached with a user-supplied OID.
360 */
361 HeapTuple tp;
362 Form_pg_type typtup;
363
364 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
365 if (!HeapTupleIsValid(tp))
366 ereport(ERROR,
367 (errcode(ERRCODE_UNDEFINED_OBJECT),
368 errmsg("type with OID %u does not exist", type_id)));
369 typtup = (Form_pg_type) GETSTRUCT(tp);
370 if (!typtup->typisdefined)
371 ereport(ERROR,
372 (errcode(ERRCODE_UNDEFINED_OBJECT),
373 errmsg("type \"%s\" is only a shell",
374 NameStr(typtup->typname))));
375
376 /* Now make the typcache entry */
377 typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
378 (void *) &type_id,
379 HASH_ENTER, &found);
380 Assert(!found); /* it wasn't there a moment ago */
381
382 MemSet(typentry, 0, sizeof(TypeCacheEntry));
383 typentry->type_id = type_id;
384 typentry->typlen = typtup->typlen;
385 typentry->typbyval = typtup->typbyval;
386 typentry->typalign = typtup->typalign;
387 typentry->typstorage = typtup->typstorage;
388 typentry->typtype = typtup->typtype;
389 typentry->typrelid = typtup->typrelid;
390 typentry->typelem = typtup->typelem;
391
392 /* If it's a domain, immediately thread it into the domain cache list */
393 if (typentry->typtype == TYPTYPE_DOMAIN)
394 {
395 typentry->nextDomain = firstDomainTypeEntry;
396 firstDomainTypeEntry = typentry;
397 }
398
399 ReleaseSysCache(tp);
400 }
401
402 /*
403 * Look up opclasses if we haven't already and any dependent info is
404 * requested.
405 */
406 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
407 TYPECACHE_CMP_PROC |
408 TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
409 TYPECACHE_BTREE_OPFAMILY)) &&
410 !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
411 {
412 Oid opclass;
413
414 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
415 if (OidIsValid(opclass))
416 {
417 typentry->btree_opf = get_opclass_family(opclass);
418 typentry->btree_opintype = get_opclass_input_type(opclass);
419 }
420 else
421 {
422 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
423 }
424
425 /*
426 * Reset information derived from btree opclass. Note in particular
427 * that we'll redetermine the eq_opr even if we previously found one;
428 * this matters in case a btree opclass has been added to a type that
429 * previously had only a hash opclass.
430 */
431 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
432 TCFLAGS_CHECKED_LT_OPR |
433 TCFLAGS_CHECKED_GT_OPR |
434 TCFLAGS_CHECKED_CMP_PROC);
435 typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
436 }
437
438 /*
439 * If we need to look up equality operator, and there's no btree opclass,
440 * force lookup of hash opclass.
441 */
442 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
443 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
444 typentry->btree_opf == InvalidOid)
445 flags |= TYPECACHE_HASH_OPFAMILY;
446
447 if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
448 TYPECACHE_HASH_EXTENDED_PROC |
449 TYPECACHE_HASH_EXTENDED_PROC_FINFO |
450 TYPECACHE_HASH_OPFAMILY)) &&
451 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
452 {
453 Oid opclass;
454
455 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
456 if (OidIsValid(opclass))
457 {
458 typentry->hash_opf = get_opclass_family(opclass);
459 typentry->hash_opintype = get_opclass_input_type(opclass);
460 }
461 else
462 {
463 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
464 }
465
466 /*
467 * Reset information derived from hash opclass. We do *not* reset the
468 * eq_opr; if we already found one from the btree opclass, that
469 * decision is still good.
470 */
471 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
472 TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
473 typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
474 }
475
476 /*
477 * Look for requested operators and functions, if we haven't already.
478 */
479 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
480 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
481 {
482 Oid eq_opr = InvalidOid;
483
484 if (typentry->btree_opf != InvalidOid)
485 eq_opr = get_opfamily_member(typentry->btree_opf,
486 typentry->btree_opintype,
487 typentry->btree_opintype,
488 BTEqualStrategyNumber);
489 if (eq_opr == InvalidOid &&
490 typentry->hash_opf != InvalidOid)
491 eq_opr = get_opfamily_member(typentry->hash_opf,
492 typentry->hash_opintype,
493 typentry->hash_opintype,
494 HTEqualStrategyNumber);
495
496 /*
497 * If the proposed equality operator is array_eq or record_eq, check
498 * to see if the element type or column types support equality. If
499 * not, array_eq or record_eq would fail at runtime, so we don't want
500 * to report that the type has equality. (We can omit similar
501 * checking for ranges because ranges can't be created in the first
502 * place unless their subtypes support equality.)
503 */
504 if (eq_opr == ARRAY_EQ_OP &&
505 !array_element_has_equality(typentry))
506 eq_opr = InvalidOid;
507 else if (eq_opr == RECORD_EQ_OP &&
508 !record_fields_have_equality(typentry))
509 eq_opr = InvalidOid;
510
511 /* Force update of eq_opr_finfo only if we're changing state */
512 if (typentry->eq_opr != eq_opr)
513 typentry->eq_opr_finfo.fn_oid = InvalidOid;
514
515 typentry->eq_opr = eq_opr;
516
517 /*
518 * Reset info about hash functions whenever we pick up new info about
519 * equality operator. This is so we can ensure that the hash
520 * functions match the operator.
521 */
522 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
523 TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
524 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
525 }
526 if ((flags & TYPECACHE_LT_OPR) &&
527 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
528 {
529 Oid lt_opr = InvalidOid;
530
531 if (typentry->btree_opf != InvalidOid)
532 lt_opr = get_opfamily_member(typentry->btree_opf,
533 typentry->btree_opintype,
534 typentry->btree_opintype,
535 BTLessStrategyNumber);
536
537 /*
538 * As above, make sure array_cmp or record_cmp will succeed; but again
539 * we need no special check for ranges.
540 */
541 if (lt_opr == ARRAY_LT_OP &&
542 !array_element_has_compare(typentry))
543 lt_opr = InvalidOid;
544 else if (lt_opr == RECORD_LT_OP &&
545 !record_fields_have_compare(typentry))
546 lt_opr = InvalidOid;
547
548 typentry->lt_opr = lt_opr;
549 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
550 }
551 if ((flags & TYPECACHE_GT_OPR) &&
552 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
553 {
554 Oid gt_opr = InvalidOid;
555
556 if (typentry->btree_opf != InvalidOid)
557 gt_opr = get_opfamily_member(typentry->btree_opf,
558 typentry->btree_opintype,
559 typentry->btree_opintype,
560 BTGreaterStrategyNumber);
561
562 /*
563 * As above, make sure array_cmp or record_cmp will succeed; but again
564 * we need no special check for ranges.
565 */
566 if (gt_opr == ARRAY_GT_OP &&
567 !array_element_has_compare(typentry))
568 gt_opr = InvalidOid;
569 else if (gt_opr == RECORD_GT_OP &&
570 !record_fields_have_compare(typentry))
571 gt_opr = InvalidOid;
572
573 typentry->gt_opr = gt_opr;
574 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
575 }
576 if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
577 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
578 {
579 Oid cmp_proc = InvalidOid;
580
581 if (typentry->btree_opf != InvalidOid)
582 cmp_proc = get_opfamily_proc(typentry->btree_opf,
583 typentry->btree_opintype,
584 typentry->btree_opintype,
585 BTORDER_PROC);
586
587 /*
588 * As above, make sure array_cmp or record_cmp will succeed; but again
589 * we need no special check for ranges.
590 */
591 if (cmp_proc == F_BTARRAYCMP &&
592 !array_element_has_compare(typentry))
593 cmp_proc = InvalidOid;
594 else if (cmp_proc == F_BTRECORDCMP &&
595 !record_fields_have_compare(typentry))
596 cmp_proc = InvalidOid;
597
598 /* Force update of cmp_proc_finfo only if we're changing state */
599 if (typentry->cmp_proc != cmp_proc)
600 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
601
602 typentry->cmp_proc = cmp_proc;
603 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
604 }
605 if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
606 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
607 {
608 Oid hash_proc = InvalidOid;
609
610 /*
611 * We insist that the eq_opr, if one has been determined, match the
612 * hash opclass; else report there is no hash function.
613 */
614 if (typentry->hash_opf != InvalidOid &&
615 (!OidIsValid(typentry->eq_opr) ||
616 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
617 typentry->hash_opintype,
618 typentry->hash_opintype,
619 HTEqualStrategyNumber)))
620 hash_proc = get_opfamily_proc(typentry->hash_opf,
621 typentry->hash_opintype,
622 typentry->hash_opintype,
623 HASHSTANDARD_PROC);
624
625 /*
626 * As above, make sure hash_array will succeed. We don't currently
627 * support hashing for composite types, but when we do, we'll need
628 * more logic here to check that case too.
629 */
630 if (hash_proc == F_HASH_ARRAY &&
631 !array_element_has_hashing(typentry))
632 hash_proc = InvalidOid;
633
634 /*
635 * Likewise for hash_range.
636 */
637 if (hash_proc == F_HASH_RANGE &&
638 !range_element_has_hashing(typentry))
639 hash_proc = InvalidOid;
640
641 /* Force update of hash_proc_finfo only if we're changing state */
642 if (typentry->hash_proc != hash_proc)
643 typentry->hash_proc_finfo.fn_oid = InvalidOid;
644
645 typentry->hash_proc = hash_proc;
646 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
647 }
648 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
649 TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
650 !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
651 {
652 Oid hash_extended_proc = InvalidOid;
653
654 /*
655 * We insist that the eq_opr, if one has been determined, match the
656 * hash opclass; else report there is no hash function.
657 */
658 if (typentry->hash_opf != InvalidOid &&
659 (!OidIsValid(typentry->eq_opr) ||
660 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
661 typentry->hash_opintype,
662 typentry->hash_opintype,
663 HTEqualStrategyNumber)))
664 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
665 typentry->hash_opintype,
666 typentry->hash_opintype,
667 HASHEXTENDED_PROC);
668
669 /*
670 * As above, make sure hash_array_extended will succeed. We don't
671 * currently support hashing for composite types, but when we do,
672 * we'll need more logic here to check that case too.
673 */
674 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
675 !array_element_has_extended_hashing(typentry))
676 hash_extended_proc = InvalidOid;
677
678 /*
679 * Likewise for hash_range_extended.
680 */
681 if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
682 !range_element_has_extended_hashing(typentry))
683 hash_extended_proc = InvalidOid;
684
685 /* Force update of proc finfo only if we're changing state */
686 if (typentry->hash_extended_proc != hash_extended_proc)
687 typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
688
689 typentry->hash_extended_proc = hash_extended_proc;
690 typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
691 }
692
693 /*
694 * Set up fmgr lookup info as requested
695 *
696 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
697 * which is not quite right (they're really in the hash table's private
698 * memory context) but this will do for our purposes.
699 *
700 * Note: the code above avoids invalidating the finfo structs unless the
701 * referenced operator/function OID actually changes. This is to prevent
702 * unnecessary leakage of any subsidiary data attached to an finfo, since
703 * that would cause session-lifespan memory leaks.
704 */
705 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
706 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
707 typentry->eq_opr != InvalidOid)
708 {
709 Oid eq_opr_func;
710
711 eq_opr_func = get_opcode(typentry->eq_opr);
712 if (eq_opr_func != InvalidOid)
713 fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
714 CacheMemoryContext);
715 }
716 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
717 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
718 typentry->cmp_proc != InvalidOid)
719 {
720 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
721 CacheMemoryContext);
722 }
723 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
724 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
725 typentry->hash_proc != InvalidOid)
726 {
727 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
728 CacheMemoryContext);
729 }
730 if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
731 typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
732 typentry->hash_extended_proc != InvalidOid)
733 {
734 fmgr_info_cxt(typentry->hash_extended_proc,
735 &typentry->hash_extended_proc_finfo,
736 CacheMemoryContext);
737 }
738
739 /*
740 * If it's a composite type (row type), get tupdesc if requested
741 */
742 if ((flags & TYPECACHE_TUPDESC) &&
743 typentry->tupDesc == NULL &&
744 typentry->typtype == TYPTYPE_COMPOSITE)
745 {
746 load_typcache_tupdesc(typentry);
747 }
748
749 /*
750 * If requested, get information about a range type
751 */
752 if ((flags & TYPECACHE_RANGE_INFO) &&
753 typentry->rngelemtype == NULL &&
754 typentry->typtype == TYPTYPE_RANGE)
755 {
756 load_rangetype_info(typentry);
757 }
758
759 /*
760 * If requested, get information about a domain type
761 */
762 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
763 typentry->domainBaseType == InvalidOid &&
764 typentry->typtype == TYPTYPE_DOMAIN)
765 {
766 typentry->domainBaseTypmod = -1;
767 typentry->domainBaseType =
768 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
769 }
770 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
771 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
772 typentry->typtype == TYPTYPE_DOMAIN)
773 {
774 load_domaintype_info(typentry);
775 }
776
777 return typentry;
778 }
779
780 /*
781 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
782 */
783 static void
load_typcache_tupdesc(TypeCacheEntry * typentry)784 load_typcache_tupdesc(TypeCacheEntry *typentry)
785 {
786 Relation rel;
787
788 if (!OidIsValid(typentry->typrelid)) /* should not happen */
789 elog(ERROR, "invalid typrelid for composite type %u",
790 typentry->type_id);
791 rel = relation_open(typentry->typrelid, AccessShareLock);
792 Assert(rel->rd_rel->reltype == typentry->type_id);
793
794 /*
795 * Link to the tupdesc and increment its refcount (we assert it's a
796 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
797 * because the reference mustn't be entered in the current resource owner;
798 * it can outlive the current query.
799 */
800 typentry->tupDesc = RelationGetDescr(rel);
801
802 Assert(typentry->tupDesc->tdrefcount > 0);
803 typentry->tupDesc->tdrefcount++;
804
805 /*
806 * In future, we could take some pains to not change tupDesc_identifier if
807 * the tupdesc didn't really change; but for now it's not worth it.
808 */
809 typentry->tupDesc_identifier = ++tupledesc_id_counter;
810
811 relation_close(rel, AccessShareLock);
812 }
813
814 /*
815 * load_rangetype_info --- helper routine to set up range type information
816 */
817 static void
load_rangetype_info(TypeCacheEntry * typentry)818 load_rangetype_info(TypeCacheEntry *typentry)
819 {
820 Form_pg_range pg_range;
821 HeapTuple tup;
822 Oid subtypeOid;
823 Oid opclassOid;
824 Oid canonicalOid;
825 Oid subdiffOid;
826 Oid opfamilyOid;
827 Oid opcintype;
828 Oid cmpFnOid;
829
830 /* get information from pg_range */
831 tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
832 /* should not fail, since we already checked typtype ... */
833 if (!HeapTupleIsValid(tup))
834 elog(ERROR, "cache lookup failed for range type %u",
835 typentry->type_id);
836 pg_range = (Form_pg_range) GETSTRUCT(tup);
837
838 subtypeOid = pg_range->rngsubtype;
839 typentry->rng_collation = pg_range->rngcollation;
840 opclassOid = pg_range->rngsubopc;
841 canonicalOid = pg_range->rngcanonical;
842 subdiffOid = pg_range->rngsubdiff;
843
844 ReleaseSysCache(tup);
845
846 /* get opclass properties and look up the comparison function */
847 opfamilyOid = get_opclass_family(opclassOid);
848 opcintype = get_opclass_input_type(opclassOid);
849
850 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
851 BTORDER_PROC);
852 if (!RegProcedureIsValid(cmpFnOid))
853 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
854 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
855
856 /* set up cached fmgrinfo structs */
857 fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
858 CacheMemoryContext);
859 if (OidIsValid(canonicalOid))
860 fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
861 CacheMemoryContext);
862 if (OidIsValid(subdiffOid))
863 fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
864 CacheMemoryContext);
865
866 /* Lastly, set up link to the element type --- this marks data valid */
867 typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
868 }
869
870
871 /*
872 * load_domaintype_info --- helper routine to set up domain constraint info
873 *
874 * Note: we assume we're called in a relatively short-lived context, so it's
875 * okay to leak data into the current context while scanning pg_constraint.
876 * We build the new DomainConstraintCache data in a context underneath
877 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
878 * complete.
879 */
880 static void
load_domaintype_info(TypeCacheEntry * typentry)881 load_domaintype_info(TypeCacheEntry *typentry)
882 {
883 Oid typeOid = typentry->type_id;
884 DomainConstraintCache *dcc;
885 bool notNull = false;
886 DomainConstraintState **ccons;
887 int cconslen;
888 Relation conRel;
889 MemoryContext oldcxt;
890
891 /*
892 * If we're here, any existing constraint info is stale, so release it.
893 * For safety, be sure to null the link before trying to delete the data.
894 */
895 if (typentry->domainData)
896 {
897 dcc = typentry->domainData;
898 typentry->domainData = NULL;
899 decr_dcc_refcount(dcc);
900 }
901
902 /*
903 * We try to optimize the common case of no domain constraints, so don't
904 * create the dcc object and context until we find a constraint. Likewise
905 * for the temp sorting array.
906 */
907 dcc = NULL;
908 ccons = NULL;
909 cconslen = 0;
910
911 /*
912 * Scan pg_constraint for relevant constraints. We want to find
913 * constraints for not just this domain, but any ancestor domains, so the
914 * outer loop crawls up the domain stack.
915 */
916 conRel = heap_open(ConstraintRelationId, AccessShareLock);
917
918 for (;;)
919 {
920 HeapTuple tup;
921 HeapTuple conTup;
922 Form_pg_type typTup;
923 int nccons = 0;
924 ScanKeyData key[1];
925 SysScanDesc scan;
926
927 tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
928 if (!HeapTupleIsValid(tup))
929 elog(ERROR, "cache lookup failed for type %u", typeOid);
930 typTup = (Form_pg_type) GETSTRUCT(tup);
931
932 if (typTup->typtype != TYPTYPE_DOMAIN)
933 {
934 /* Not a domain, so done */
935 ReleaseSysCache(tup);
936 break;
937 }
938
939 /* Test for NOT NULL Constraint */
940 if (typTup->typnotnull)
941 notNull = true;
942
943 /* Look for CHECK Constraints on this domain */
944 ScanKeyInit(&key[0],
945 Anum_pg_constraint_contypid,
946 BTEqualStrategyNumber, F_OIDEQ,
947 ObjectIdGetDatum(typeOid));
948
949 scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
950 NULL, 1, key);
951
952 while (HeapTupleIsValid(conTup = systable_getnext(scan)))
953 {
954 Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
955 Datum val;
956 bool isNull;
957 char *constring;
958 Expr *check_expr;
959 DomainConstraintState *r;
960
961 /* Ignore non-CHECK constraints (presently, shouldn't be any) */
962 if (c->contype != CONSTRAINT_CHECK)
963 continue;
964
965 /* Not expecting conbin to be NULL, but we'll test for it anyway */
966 val = fastgetattr(conTup, Anum_pg_constraint_conbin,
967 conRel->rd_att, &isNull);
968 if (isNull)
969 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
970 NameStr(typTup->typname), NameStr(c->conname));
971
972 /* Convert conbin to C string in caller context */
973 constring = TextDatumGetCString(val);
974
975 /* Create the DomainConstraintCache object and context if needed */
976 if (dcc == NULL)
977 {
978 MemoryContext cxt;
979
980 cxt = AllocSetContextCreate(CurrentMemoryContext,
981 "Domain constraints",
982 ALLOCSET_SMALL_SIZES);
983 dcc = (DomainConstraintCache *)
984 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
985 dcc->constraints = NIL;
986 dcc->dccContext = cxt;
987 dcc->dccRefCount = 0;
988 }
989
990 /* Create node trees in DomainConstraintCache's context */
991 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
992
993 check_expr = (Expr *) stringToNode(constring);
994
995 /* ExecInitExpr will assume we've planned the expression */
996 check_expr = expression_planner(check_expr);
997
998 r = makeNode(DomainConstraintState);
999 r->constrainttype = DOM_CONSTRAINT_CHECK;
1000 r->name = pstrdup(NameStr(c->conname));
1001 r->check_expr = check_expr;
1002 r->check_exprstate = NULL;
1003
1004 MemoryContextSwitchTo(oldcxt);
1005
1006 /* Accumulate constraints in an array, for sorting below */
1007 if (ccons == NULL)
1008 {
1009 cconslen = 8;
1010 ccons = (DomainConstraintState **)
1011 palloc(cconslen * sizeof(DomainConstraintState *));
1012 }
1013 else if (nccons >= cconslen)
1014 {
1015 cconslen *= 2;
1016 ccons = (DomainConstraintState **)
1017 repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1018 }
1019 ccons[nccons++] = r;
1020 }
1021
1022 systable_endscan(scan);
1023
1024 if (nccons > 0)
1025 {
1026 /*
1027 * Sort the items for this domain, so that CHECKs are applied in a
1028 * deterministic order.
1029 */
1030 if (nccons > 1)
1031 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1032
1033 /*
1034 * Now attach them to the overall list. Use lcons() here because
1035 * constraints of parent domains should be applied earlier.
1036 */
1037 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1038 while (nccons > 0)
1039 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1040 MemoryContextSwitchTo(oldcxt);
1041 }
1042
1043 /* loop to next domain in stack */
1044 typeOid = typTup->typbasetype;
1045 ReleaseSysCache(tup);
1046 }
1047
1048 heap_close(conRel, AccessShareLock);
1049
1050 /*
1051 * Only need to add one NOT NULL check regardless of how many domains in
1052 * the stack request it.
1053 */
1054 if (notNull)
1055 {
1056 DomainConstraintState *r;
1057
1058 /* Create the DomainConstraintCache object and context if needed */
1059 if (dcc == NULL)
1060 {
1061 MemoryContext cxt;
1062
1063 cxt = AllocSetContextCreate(CurrentMemoryContext,
1064 "Domain constraints",
1065 ALLOCSET_SMALL_SIZES);
1066 dcc = (DomainConstraintCache *)
1067 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1068 dcc->constraints = NIL;
1069 dcc->dccContext = cxt;
1070 dcc->dccRefCount = 0;
1071 }
1072
1073 /* Create node trees in DomainConstraintCache's context */
1074 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1075
1076 r = makeNode(DomainConstraintState);
1077
1078 r->constrainttype = DOM_CONSTRAINT_NOTNULL;
1079 r->name = pstrdup("NOT NULL");
1080 r->check_expr = NULL;
1081 r->check_exprstate = NULL;
1082
1083 /* lcons to apply the nullness check FIRST */
1084 dcc->constraints = lcons(r, dcc->constraints);
1085
1086 MemoryContextSwitchTo(oldcxt);
1087 }
1088
1089 /*
1090 * If we made a constraint object, move it into CacheMemoryContext and
1091 * attach it to the typcache entry.
1092 */
1093 if (dcc)
1094 {
1095 MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
1096 typentry->domainData = dcc;
1097 dcc->dccRefCount++; /* count the typcache's reference */
1098 }
1099
1100 /* Either way, the typcache entry's domain data is now valid. */
1101 typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1102 }
1103
1104 /*
1105 * qsort comparator to sort DomainConstraintState pointers by name
1106 */
1107 static int
dcs_cmp(const void * a,const void * b)1108 dcs_cmp(const void *a, const void *b)
1109 {
1110 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1111 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1112
1113 return strcmp((*ca)->name, (*cb)->name);
1114 }
1115
1116 /*
1117 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1118 * and free it if no references remain
1119 */
1120 static void
decr_dcc_refcount(DomainConstraintCache * dcc)1121 decr_dcc_refcount(DomainConstraintCache *dcc)
1122 {
1123 Assert(dcc->dccRefCount > 0);
1124 if (--(dcc->dccRefCount) <= 0)
1125 MemoryContextDelete(dcc->dccContext);
1126 }
1127
1128 /*
1129 * Context reset/delete callback for a DomainConstraintRef
1130 */
1131 static void
dccref_deletion_callback(void * arg)1132 dccref_deletion_callback(void *arg)
1133 {
1134 DomainConstraintRef *ref = (DomainConstraintRef *) arg;
1135 DomainConstraintCache *dcc = ref->dcc;
1136
1137 /* Paranoia --- be sure link is nulled before trying to release */
1138 if (dcc)
1139 {
1140 ref->constraints = NIL;
1141 ref->dcc = NULL;
1142 decr_dcc_refcount(dcc);
1143 }
1144 }
1145
1146 /*
1147 * prep_domain_constraints --- prepare domain constraints for execution
1148 *
1149 * The expression trees stored in the DomainConstraintCache's list are
1150 * converted to executable expression state trees stored in execctx.
1151 */
1152 static List *
prep_domain_constraints(List * constraints,MemoryContext execctx)1153 prep_domain_constraints(List *constraints, MemoryContext execctx)
1154 {
1155 List *result = NIL;
1156 MemoryContext oldcxt;
1157 ListCell *lc;
1158
1159 oldcxt = MemoryContextSwitchTo(execctx);
1160
1161 foreach(lc, constraints)
1162 {
1163 DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
1164 DomainConstraintState *newr;
1165
1166 newr = makeNode(DomainConstraintState);
1167 newr->constrainttype = r->constrainttype;
1168 newr->name = r->name;
1169 newr->check_expr = r->check_expr;
1170 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1171
1172 result = lappend(result, newr);
1173 }
1174
1175 MemoryContextSwitchTo(oldcxt);
1176
1177 return result;
1178 }
1179
1180 /*
1181 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1182 *
1183 * Caller must tell us the MemoryContext in which the DomainConstraintRef
1184 * lives. The ref will be cleaned up when that context is reset/deleted.
1185 *
1186 * Caller must also tell us whether it wants check_exprstate fields to be
1187 * computed in the DomainConstraintState nodes attached to this ref.
1188 * If it doesn't, we need not make a copy of the DomainConstraintState list.
1189 */
1190 void
InitDomainConstraintRef(Oid type_id,DomainConstraintRef * ref,MemoryContext refctx,bool need_exprstate)1191 InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
1192 MemoryContext refctx, bool need_exprstate)
1193 {
1194 /* Look up the typcache entry --- we assume it survives indefinitely */
1195 ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1196 ref->need_exprstate = need_exprstate;
1197 /* For safety, establish the callback before acquiring a refcount */
1198 ref->refctx = refctx;
1199 ref->dcc = NULL;
1200 ref->callback.func = dccref_deletion_callback;
1201 ref->callback.arg = (void *) ref;
1202 MemoryContextRegisterResetCallback(refctx, &ref->callback);
1203 /* Acquire refcount if there are constraints, and set up exported list */
1204 if (ref->tcache->domainData)
1205 {
1206 ref->dcc = ref->tcache->domainData;
1207 ref->dcc->dccRefCount++;
1208 if (ref->need_exprstate)
1209 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1210 ref->refctx);
1211 else
1212 ref->constraints = ref->dcc->constraints;
1213 }
1214 else
1215 ref->constraints = NIL;
1216 }
1217
1218 /*
1219 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1220 *
1221 * If the domain's constraint set changed, ref->constraints is updated to
1222 * point at a new list of cached constraints.
1223 *
1224 * In the normal case where nothing happened to the domain, this is cheap
1225 * enough that it's reasonable (and expected) to check before *each* use
1226 * of the constraint info.
1227 */
1228 void
UpdateDomainConstraintRef(DomainConstraintRef * ref)1229 UpdateDomainConstraintRef(DomainConstraintRef *ref)
1230 {
1231 TypeCacheEntry *typentry = ref->tcache;
1232
1233 /* Make sure typcache entry's data is up to date */
1234 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1235 typentry->typtype == TYPTYPE_DOMAIN)
1236 load_domaintype_info(typentry);
1237
1238 /* Transfer to ref object if there's new info, adjusting refcounts */
1239 if (ref->dcc != typentry->domainData)
1240 {
1241 /* Paranoia --- be sure link is nulled before trying to release */
1242 DomainConstraintCache *dcc = ref->dcc;
1243
1244 if (dcc)
1245 {
1246 /*
1247 * Note: we just leak the previous list of executable domain
1248 * constraints. Alternatively, we could keep those in a child
1249 * context of ref->refctx and free that context at this point.
1250 * However, in practice this code path will be taken so seldom
1251 * that the extra bookkeeping for a child context doesn't seem
1252 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1253 */
1254 ref->constraints = NIL;
1255 ref->dcc = NULL;
1256 decr_dcc_refcount(dcc);
1257 }
1258 dcc = typentry->domainData;
1259 if (dcc)
1260 {
1261 ref->dcc = dcc;
1262 dcc->dccRefCount++;
1263 if (ref->need_exprstate)
1264 ref->constraints = prep_domain_constraints(dcc->constraints,
1265 ref->refctx);
1266 else
1267 ref->constraints = dcc->constraints;
1268 }
1269 }
1270 }
1271
1272 /*
1273 * DomainHasConstraints --- utility routine to check if a domain has constraints
1274 *
1275 * This is defined to return false, not fail, if type is not a domain.
1276 */
1277 bool
DomainHasConstraints(Oid type_id)1278 DomainHasConstraints(Oid type_id)
1279 {
1280 TypeCacheEntry *typentry;
1281
1282 /*
1283 * Note: a side effect is to cause the typcache's domain data to become
1284 * valid. This is fine since we'll likely need it soon if there is any.
1285 */
1286 typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1287
1288 return (typentry->domainData != NULL);
1289 }
1290
1291
1292 /*
1293 * array_element_has_equality and friends are helper routines to check
1294 * whether we should believe that array_eq and related functions will work
1295 * on the given array type or composite type.
1296 *
1297 * The logic above may call these repeatedly on the same type entry, so we
1298 * make use of the typentry->flags field to cache the results once known.
1299 * Also, we assume that we'll probably want all these facts about the type
1300 * if we want any, so we cache them all using only one lookup of the
1301 * component datatype(s).
1302 */
1303
1304 static bool
array_element_has_equality(TypeCacheEntry * typentry)1305 array_element_has_equality(TypeCacheEntry *typentry)
1306 {
1307 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1308 cache_array_element_properties(typentry);
1309 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1310 }
1311
1312 static bool
array_element_has_compare(TypeCacheEntry * typentry)1313 array_element_has_compare(TypeCacheEntry *typentry)
1314 {
1315 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1316 cache_array_element_properties(typentry);
1317 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1318 }
1319
1320 static bool
array_element_has_hashing(TypeCacheEntry * typentry)1321 array_element_has_hashing(TypeCacheEntry *typentry)
1322 {
1323 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1324 cache_array_element_properties(typentry);
1325 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1326 }
1327
1328 static bool
array_element_has_extended_hashing(TypeCacheEntry * typentry)1329 array_element_has_extended_hashing(TypeCacheEntry *typentry)
1330 {
1331 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1332 cache_array_element_properties(typentry);
1333 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1334 }
1335
1336 static void
cache_array_element_properties(TypeCacheEntry * typentry)1337 cache_array_element_properties(TypeCacheEntry *typentry)
1338 {
1339 Oid elem_type = get_base_element_type(typentry->type_id);
1340
1341 if (OidIsValid(elem_type))
1342 {
1343 TypeCacheEntry *elementry;
1344
1345 elementry = lookup_type_cache(elem_type,
1346 TYPECACHE_EQ_OPR |
1347 TYPECACHE_CMP_PROC |
1348 TYPECACHE_HASH_PROC |
1349 TYPECACHE_HASH_EXTENDED_PROC);
1350 if (OidIsValid(elementry->eq_opr))
1351 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1352 if (OidIsValid(elementry->cmp_proc))
1353 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1354 if (OidIsValid(elementry->hash_proc))
1355 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1356 if (OidIsValid(elementry->hash_extended_proc))
1357 typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1358 }
1359 typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1360 }
1361
1362 /*
1363 * Likewise, some helper functions for composite types.
1364 */
1365
1366 static bool
record_fields_have_equality(TypeCacheEntry * typentry)1367 record_fields_have_equality(TypeCacheEntry *typentry)
1368 {
1369 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1370 cache_record_field_properties(typentry);
1371 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1372 }
1373
1374 static bool
record_fields_have_compare(TypeCacheEntry * typentry)1375 record_fields_have_compare(TypeCacheEntry *typentry)
1376 {
1377 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1378 cache_record_field_properties(typentry);
1379 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1380 }
1381
1382 static void
cache_record_field_properties(TypeCacheEntry * typentry)1383 cache_record_field_properties(TypeCacheEntry *typentry)
1384 {
1385 /*
1386 * For type RECORD, we can't really tell what will work, since we don't
1387 * have access here to the specific anonymous type. Just assume that
1388 * everything will (we may get a failure at runtime ...)
1389 */
1390 if (typentry->type_id == RECORDOID)
1391 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1392 TCFLAGS_HAVE_FIELD_COMPARE);
1393 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1394 {
1395 TupleDesc tupdesc;
1396 int newflags;
1397 int i;
1398
1399 /* Fetch composite type's tupdesc if we don't have it already */
1400 if (typentry->tupDesc == NULL)
1401 load_typcache_tupdesc(typentry);
1402 tupdesc = typentry->tupDesc;
1403
1404 /* Must bump the refcount while we do additional catalog lookups */
1405 IncrTupleDescRefCount(tupdesc);
1406
1407 /* Have each property if all non-dropped fields have the property */
1408 newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1409 TCFLAGS_HAVE_FIELD_COMPARE);
1410 for (i = 0; i < tupdesc->natts; i++)
1411 {
1412 TypeCacheEntry *fieldentry;
1413 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1414
1415 if (attr->attisdropped)
1416 continue;
1417
1418 fieldentry = lookup_type_cache(attr->atttypid,
1419 TYPECACHE_EQ_OPR |
1420 TYPECACHE_CMP_PROC);
1421 if (!OidIsValid(fieldentry->eq_opr))
1422 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1423 if (!OidIsValid(fieldentry->cmp_proc))
1424 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1425
1426 /* We can drop out of the loop once we disprove all bits */
1427 if (newflags == 0)
1428 break;
1429 }
1430 typentry->flags |= newflags;
1431
1432 DecrTupleDescRefCount(tupdesc);
1433 }
1434 else if (typentry->typtype == TYPTYPE_DOMAIN)
1435 {
1436 /* If it's domain over composite, copy base type's properties */
1437 TypeCacheEntry *baseentry;
1438
1439 /* load up basetype info if we didn't already */
1440 if (typentry->domainBaseType == InvalidOid)
1441 {
1442 typentry->domainBaseTypmod = -1;
1443 typentry->domainBaseType =
1444 getBaseTypeAndTypmod(typentry->type_id,
1445 &typentry->domainBaseTypmod);
1446 }
1447 baseentry = lookup_type_cache(typentry->domainBaseType,
1448 TYPECACHE_EQ_OPR |
1449 TYPECACHE_CMP_PROC);
1450 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1451 {
1452 typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
1453 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1454 TCFLAGS_HAVE_FIELD_COMPARE);
1455 }
1456 }
1457 typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1458 }
1459
1460 /*
1461 * Likewise, some helper functions for range types.
1462 *
1463 * We can borrow the flag bits for array element properties to use for range
1464 * element properties, since those flag bits otherwise have no use in a
1465 * range type's typcache entry.
1466 */
1467
1468 static bool
range_element_has_hashing(TypeCacheEntry * typentry)1469 range_element_has_hashing(TypeCacheEntry *typentry)
1470 {
1471 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1472 cache_range_element_properties(typentry);
1473 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1474 }
1475
1476 static bool
range_element_has_extended_hashing(TypeCacheEntry * typentry)1477 range_element_has_extended_hashing(TypeCacheEntry *typentry)
1478 {
1479 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1480 cache_range_element_properties(typentry);
1481 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1482 }
1483
1484 static void
cache_range_element_properties(TypeCacheEntry * typentry)1485 cache_range_element_properties(TypeCacheEntry *typentry)
1486 {
1487 /* load up subtype link if we didn't already */
1488 if (typentry->rngelemtype == NULL &&
1489 typentry->typtype == TYPTYPE_RANGE)
1490 load_rangetype_info(typentry);
1491
1492 if (typentry->rngelemtype != NULL)
1493 {
1494 TypeCacheEntry *elementry;
1495
1496 /* might need to calculate subtype's hash function properties */
1497 elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1498 TYPECACHE_HASH_PROC |
1499 TYPECACHE_HASH_EXTENDED_PROC);
1500 if (OidIsValid(elementry->hash_proc))
1501 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1502 if (OidIsValid(elementry->hash_extended_proc))
1503 typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1504 }
1505 typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1506 }
1507
1508 /*
1509 * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1510 * to store 'typmod'.
1511 */
1512 static void
ensure_record_cache_typmod_slot_exists(int32 typmod)1513 ensure_record_cache_typmod_slot_exists(int32 typmod)
1514 {
1515 if (RecordCacheArray == NULL)
1516 {
1517 RecordCacheArray = (TupleDesc *)
1518 MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(TupleDesc));
1519 RecordIdentifierArray = (uint64 *)
1520 MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64));
1521 RecordCacheArrayLen = 64;
1522 }
1523
1524 if (typmod >= RecordCacheArrayLen)
1525 {
1526 int32 newlen = RecordCacheArrayLen * 2;
1527
1528 while (typmod >= newlen)
1529 newlen *= 2;
1530
1531 RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
1532 newlen * sizeof(TupleDesc));
1533 memset(RecordCacheArray + RecordCacheArrayLen, 0,
1534 (newlen - RecordCacheArrayLen) * sizeof(TupleDesc));
1535 RecordIdentifierArray = (uint64 *) repalloc(RecordIdentifierArray,
1536 newlen * sizeof(uint64));
1537 memset(RecordIdentifierArray + RecordCacheArrayLen, 0,
1538 (newlen - RecordCacheArrayLen) * sizeof(uint64));
1539 RecordCacheArrayLen = newlen;
1540 }
1541 }
1542
1543 /*
1544 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1545 *
1546 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1547 * hasn't had its refcount bumped.
1548 */
1549 static TupleDesc
lookup_rowtype_tupdesc_internal(Oid type_id,int32 typmod,bool noError)1550 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1551 {
1552 if (type_id != RECORDOID)
1553 {
1554 /*
1555 * It's a named composite type, so use the regular typcache.
1556 */
1557 TypeCacheEntry *typentry;
1558
1559 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1560 if (typentry->tupDesc == NULL && !noError)
1561 ereport(ERROR,
1562 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1563 errmsg("type %s is not composite",
1564 format_type_be(type_id))));
1565 return typentry->tupDesc;
1566 }
1567 else
1568 {
1569 /*
1570 * It's a transient record type, so look in our record-type table.
1571 */
1572 if (typmod >= 0)
1573 {
1574 /* It is already in our local cache? */
1575 if (typmod < RecordCacheArrayLen &&
1576 RecordCacheArray[typmod] != NULL)
1577 return RecordCacheArray[typmod];
1578
1579 /* Are we attached to a shared record typmod registry? */
1580 if (CurrentSession->shared_typmod_registry != NULL)
1581 {
1582 SharedTypmodTableEntry *entry;
1583
1584 /* Try to find it in the shared typmod index. */
1585 entry = dshash_find(CurrentSession->shared_typmod_table,
1586 &typmod, false);
1587 if (entry != NULL)
1588 {
1589 TupleDesc tupdesc;
1590
1591 tupdesc = (TupleDesc)
1592 dsa_get_address(CurrentSession->area,
1593 entry->shared_tupdesc);
1594 Assert(typmod == tupdesc->tdtypmod);
1595
1596 /* We may need to extend the local RecordCacheArray. */
1597 ensure_record_cache_typmod_slot_exists(typmod);
1598
1599 /*
1600 * Our local array can now point directly to the TupleDesc
1601 * in shared memory, which is non-reference-counted.
1602 */
1603 RecordCacheArray[typmod] = tupdesc;
1604 Assert(tupdesc->tdrefcount == -1);
1605
1606 /*
1607 * We don't share tupdesc identifiers across processes, so
1608 * assign one locally.
1609 */
1610 RecordIdentifierArray[typmod] = ++tupledesc_id_counter;
1611
1612 dshash_release_lock(CurrentSession->shared_typmod_table,
1613 entry);
1614
1615 return RecordCacheArray[typmod];
1616 }
1617 }
1618 }
1619
1620 if (!noError)
1621 ereport(ERROR,
1622 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1623 errmsg("record type has not been registered")));
1624 return NULL;
1625 }
1626 }
1627
1628 /*
1629 * lookup_rowtype_tupdesc
1630 *
1631 * Given a typeid/typmod that should describe a known composite type,
1632 * return the tuple descriptor for the type. Will ereport on failure.
1633 * (Use ereport because this is reachable with user-specified OIDs,
1634 * for example from record_in().)
1635 *
1636 * Note: on success, we increment the refcount of the returned TupleDesc,
1637 * and log the reference in CurrentResourceOwner. Caller should call
1638 * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
1639 */
1640 TupleDesc
lookup_rowtype_tupdesc(Oid type_id,int32 typmod)1641 lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1642 {
1643 TupleDesc tupDesc;
1644
1645 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1646 PinTupleDesc(tupDesc);
1647 return tupDesc;
1648 }
1649
1650 /*
1651 * lookup_rowtype_tupdesc_noerror
1652 *
1653 * As above, but if the type is not a known composite type and noError
1654 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1655 * type_id is passed, you'll get an ereport anyway.)
1656 */
1657 TupleDesc
lookup_rowtype_tupdesc_noerror(Oid type_id,int32 typmod,bool noError)1658 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1659 {
1660 TupleDesc tupDesc;
1661
1662 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1663 if (tupDesc != NULL)
1664 PinTupleDesc(tupDesc);
1665 return tupDesc;
1666 }
1667
1668 /*
1669 * lookup_rowtype_tupdesc_copy
1670 *
1671 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1672 * copied into the CurrentMemoryContext and is not reference-counted.
1673 */
1674 TupleDesc
lookup_rowtype_tupdesc_copy(Oid type_id,int32 typmod)1675 lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1676 {
1677 TupleDesc tmp;
1678
1679 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1680 return CreateTupleDescCopyConstr(tmp);
1681 }
1682
1683 /*
1684 * lookup_rowtype_tupdesc_domain
1685 *
1686 * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1687 * a domain over a named composite type; so this is effectively equivalent to
1688 * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1689 * except for being a tad faster.
1690 *
1691 * Note: the reason we don't fold the look-through-domain behavior into plain
1692 * lookup_rowtype_tupdesc() is that we want callers to know they might be
1693 * dealing with a domain. Otherwise they might construct a tuple that should
1694 * be of the domain type, but not apply domain constraints.
1695 */
1696 TupleDesc
lookup_rowtype_tupdesc_domain(Oid type_id,int32 typmod,bool noError)1697 lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1698 {
1699 TupleDesc tupDesc;
1700
1701 if (type_id != RECORDOID)
1702 {
1703 /*
1704 * Check for domain or named composite type. We might as well load
1705 * whichever data is needed.
1706 */
1707 TypeCacheEntry *typentry;
1708
1709 typentry = lookup_type_cache(type_id,
1710 TYPECACHE_TUPDESC |
1711 TYPECACHE_DOMAIN_BASE_INFO);
1712 if (typentry->typtype == TYPTYPE_DOMAIN)
1713 return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
1714 typentry->domainBaseTypmod,
1715 noError);
1716 if (typentry->tupDesc == NULL && !noError)
1717 ereport(ERROR,
1718 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1719 errmsg("type %s is not composite",
1720 format_type_be(type_id))));
1721 tupDesc = typentry->tupDesc;
1722 }
1723 else
1724 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1725 if (tupDesc != NULL)
1726 PinTupleDesc(tupDesc);
1727 return tupDesc;
1728 }
1729
1730 /*
1731 * Hash function for the hash table of RecordCacheEntry.
1732 */
1733 static uint32
record_type_typmod_hash(const void * data,size_t size)1734 record_type_typmod_hash(const void *data, size_t size)
1735 {
1736 RecordCacheEntry *entry = (RecordCacheEntry *) data;
1737
1738 return hashTupleDesc(entry->tupdesc);
1739 }
1740
1741 /*
1742 * Match function for the hash table of RecordCacheEntry.
1743 */
1744 static int
record_type_typmod_compare(const void * a,const void * b,size_t size)1745 record_type_typmod_compare(const void *a, const void *b, size_t size)
1746 {
1747 RecordCacheEntry *left = (RecordCacheEntry *) a;
1748 RecordCacheEntry *right = (RecordCacheEntry *) b;
1749
1750 return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
1751 }
1752
1753 /*
1754 * assign_record_type_typmod
1755 *
1756 * Given a tuple descriptor for a RECORD type, find or create a cache entry
1757 * for the type, and set the tupdesc's tdtypmod field to a value that will
1758 * identify this cache entry to lookup_rowtype_tupdesc.
1759 */
1760 void
assign_record_type_typmod(TupleDesc tupDesc)1761 assign_record_type_typmod(TupleDesc tupDesc)
1762 {
1763 RecordCacheEntry *recentry;
1764 TupleDesc entDesc;
1765 bool found;
1766 MemoryContext oldcxt;
1767
1768 Assert(tupDesc->tdtypeid == RECORDOID);
1769
1770 if (RecordCacheHash == NULL)
1771 {
1772 /* First time through: initialize the hash table */
1773 HASHCTL ctl;
1774
1775 MemSet(&ctl, 0, sizeof(ctl));
1776 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
1777 ctl.entrysize = sizeof(RecordCacheEntry);
1778 ctl.hash = record_type_typmod_hash;
1779 ctl.match = record_type_typmod_compare;
1780 RecordCacheHash = hash_create("Record information cache", 64,
1781 &ctl,
1782 HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
1783
1784 /* Also make sure CacheMemoryContext exists */
1785 if (!CacheMemoryContext)
1786 CreateCacheMemoryContext();
1787 }
1788
1789 /*
1790 * Find a hashtable entry for this tuple descriptor. We don't use
1791 * HASH_ENTER yet, because if it's missing, we need to make sure that all
1792 * the allocations succeed before we create the new entry.
1793 */
1794 recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1795 (void *) &tupDesc,
1796 HASH_FIND, &found);
1797 if (found && recentry->tupdesc != NULL)
1798 {
1799 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
1800 return;
1801 }
1802
1803 /* Not present, so need to manufacture an entry */
1804 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1805
1806 /* Look in the SharedRecordTypmodRegistry, if attached */
1807 entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
1808 if (entDesc == NULL)
1809 {
1810 /*
1811 * Make sure we have room before we CreateTupleDescCopy() or advance
1812 * NextRecordTypmod.
1813 */
1814 ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
1815
1816 /* Reference-counted local cache only. */
1817 entDesc = CreateTupleDescCopy(tupDesc);
1818 entDesc->tdrefcount = 1;
1819 entDesc->tdtypmod = NextRecordTypmod++;
1820 }
1821 else
1822 {
1823 ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
1824 }
1825
1826 RecordCacheArray[entDesc->tdtypmod] = entDesc;
1827
1828 /* Assign a unique tupdesc identifier, too. */
1829 RecordIdentifierArray[entDesc->tdtypmod] = ++tupledesc_id_counter;
1830
1831 /* Fully initialized; create the hash table entry */
1832 recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1833 (void *) &tupDesc,
1834 HASH_ENTER, NULL);
1835 recentry->tupdesc = entDesc;
1836
1837 /* Update the caller's tuple descriptor. */
1838 tupDesc->tdtypmod = entDesc->tdtypmod;
1839
1840 MemoryContextSwitchTo(oldcxt);
1841 }
1842
1843 /*
1844 * assign_record_type_identifier
1845 *
1846 * Get an identifier, which will be unique over the lifespan of this backend
1847 * process, for the current tuple descriptor of the specified composite type.
1848 * For named composite types, the value is guaranteed to change if the type's
1849 * definition does. For registered RECORD types, the value will not change
1850 * once assigned, since the registered type won't either. If an anonymous
1851 * RECORD type is specified, we return a new identifier on each call.
1852 */
1853 uint64
assign_record_type_identifier(Oid type_id,int32 typmod)1854 assign_record_type_identifier(Oid type_id, int32 typmod)
1855 {
1856 if (type_id != RECORDOID)
1857 {
1858 /*
1859 * It's a named composite type, so use the regular typcache.
1860 */
1861 TypeCacheEntry *typentry;
1862
1863 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1864 if (typentry->tupDesc == NULL)
1865 ereport(ERROR,
1866 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1867 errmsg("type %s is not composite",
1868 format_type_be(type_id))));
1869 Assert(typentry->tupDesc_identifier != 0);
1870 return typentry->tupDesc_identifier;
1871 }
1872 else
1873 {
1874 /*
1875 * It's a transient record type, so look in our record-type table.
1876 */
1877 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
1878 RecordCacheArray[typmod] != NULL)
1879 {
1880 Assert(RecordIdentifierArray[typmod] != 0);
1881 return RecordIdentifierArray[typmod];
1882 }
1883
1884 /* For anonymous or unrecognized record type, generate a new ID */
1885 return ++tupledesc_id_counter;
1886 }
1887 }
1888
1889 /*
1890 * Return the amout of shmem required to hold a SharedRecordTypmodRegistry.
1891 * This exists only to avoid exposing private innards of
1892 * SharedRecordTypmodRegistry in a header.
1893 */
1894 size_t
SharedRecordTypmodRegistryEstimate(void)1895 SharedRecordTypmodRegistryEstimate(void)
1896 {
1897 return sizeof(SharedRecordTypmodRegistry);
1898 }
1899
1900 /*
1901 * Initialize 'registry' in a pre-existing shared memory region, which must be
1902 * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
1903 * bytes.
1904 *
1905 * 'area' will be used to allocate shared memory space as required for the
1906 * typemod registration. The current process, expected to be a leader process
1907 * in a parallel query, will be attached automatically and its current record
1908 * types will be loaded into *registry. While attached, all calls to
1909 * assign_record_type_typmod will use the shared registry. Worker backends
1910 * will need to attach explicitly.
1911 *
1912 * Note that this function takes 'area' and 'segment' as arguments rather than
1913 * accessing them via CurrentSession, because they aren't installed there
1914 * until after this function runs.
1915 */
1916 void
SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry * registry,dsm_segment * segment,dsa_area * area)1917 SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
1918 dsm_segment *segment,
1919 dsa_area *area)
1920 {
1921 MemoryContext old_context;
1922 dshash_table *record_table;
1923 dshash_table *typmod_table;
1924 int32 typmod;
1925
1926 Assert(!IsParallelWorker());
1927
1928 /* We can't already be attached to a shared registry. */
1929 Assert(CurrentSession->shared_typmod_registry == NULL);
1930 Assert(CurrentSession->shared_record_table == NULL);
1931 Assert(CurrentSession->shared_typmod_table == NULL);
1932
1933 old_context = MemoryContextSwitchTo(TopMemoryContext);
1934
1935 /* Create the hash table of tuple descriptors indexed by themselves. */
1936 record_table = dshash_create(area, &srtr_record_table_params, area);
1937
1938 /* Create the hash table of tuple descriptors indexed by typmod. */
1939 typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
1940
1941 MemoryContextSwitchTo(old_context);
1942
1943 /* Initialize the SharedRecordTypmodRegistry. */
1944 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
1945 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
1946 pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod);
1947
1948 /*
1949 * Copy all entries from this backend's private registry into the shared
1950 * registry.
1951 */
1952 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
1953 {
1954 SharedTypmodTableEntry *typmod_table_entry;
1955 SharedRecordTableEntry *record_table_entry;
1956 SharedRecordTableKey record_table_key;
1957 dsa_pointer shared_dp;
1958 TupleDesc tupdesc;
1959 bool found;
1960
1961 tupdesc = RecordCacheArray[typmod];
1962 if (tupdesc == NULL)
1963 continue;
1964
1965 /* Copy the TupleDesc into shared memory. */
1966 shared_dp = share_tupledesc(area, tupdesc, typmod);
1967
1968 /* Insert into the typmod table. */
1969 typmod_table_entry = dshash_find_or_insert(typmod_table,
1970 &tupdesc->tdtypmod,
1971 &found);
1972 if (found)
1973 elog(ERROR, "cannot create duplicate shared record typmod");
1974 typmod_table_entry->typmod = tupdesc->tdtypmod;
1975 typmod_table_entry->shared_tupdesc = shared_dp;
1976 dshash_release_lock(typmod_table, typmod_table_entry);
1977
1978 /* Insert into the record table. */
1979 record_table_key.shared = false;
1980 record_table_key.u.local_tupdesc = tupdesc;
1981 record_table_entry = dshash_find_or_insert(record_table,
1982 &record_table_key,
1983 &found);
1984 if (!found)
1985 {
1986 record_table_entry->key.shared = true;
1987 record_table_entry->key.u.shared_tupdesc = shared_dp;
1988 }
1989 dshash_release_lock(record_table, record_table_entry);
1990 }
1991
1992 /*
1993 * Set up the global state that will tell assign_record_type_typmod and
1994 * lookup_rowtype_tupdesc_internal about the shared registry.
1995 */
1996 CurrentSession->shared_record_table = record_table;
1997 CurrentSession->shared_typmod_table = typmod_table;
1998 CurrentSession->shared_typmod_registry = registry;
1999
2000 /*
2001 * We install a detach hook in the leader, but only to handle cleanup on
2002 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2003 * the memory, the leader process will use a shared registry until it
2004 * exits.
2005 */
2006 on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
2007 }
2008
2009 /*
2010 * Attach to 'registry', which must have been initialized already by another
2011 * backend. Future calls to assign_record_type_typmod and
2012 * lookup_rowtype_tupdesc_internal will use the shared registry until the
2013 * current session is detached.
2014 */
2015 void
SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry * registry)2016 SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
2017 {
2018 MemoryContext old_context;
2019 dshash_table *record_table;
2020 dshash_table *typmod_table;
2021
2022 Assert(IsParallelWorker());
2023
2024 /* We can't already be attached to a shared registry. */
2025 Assert(CurrentSession != NULL);
2026 Assert(CurrentSession->segment != NULL);
2027 Assert(CurrentSession->area != NULL);
2028 Assert(CurrentSession->shared_typmod_registry == NULL);
2029 Assert(CurrentSession->shared_record_table == NULL);
2030 Assert(CurrentSession->shared_typmod_table == NULL);
2031
2032 /*
2033 * We can't already have typmods in our local cache, because they'd clash
2034 * with those imported by SharedRecordTypmodRegistryInit. This should be
2035 * a freshly started parallel worker. If we ever support worker
2036 * recycling, a worker would need to zap its local cache in between
2037 * servicing different queries, in order to be able to call this and
2038 * synchronize typmods with a new leader; but that's problematic because
2039 * we can't be very sure that record-typmod-related state hasn't escaped
2040 * to anywhere else in the process.
2041 */
2042 Assert(NextRecordTypmod == 0);
2043
2044 old_context = MemoryContextSwitchTo(TopMemoryContext);
2045
2046 /* Attach to the two hash tables. */
2047 record_table = dshash_attach(CurrentSession->area,
2048 &srtr_record_table_params,
2049 registry->record_table_handle,
2050 CurrentSession->area);
2051 typmod_table = dshash_attach(CurrentSession->area,
2052 &srtr_typmod_table_params,
2053 registry->typmod_table_handle,
2054 NULL);
2055
2056 MemoryContextSwitchTo(old_context);
2057
2058 /*
2059 * Set up detach hook to run at worker exit. Currently this is the same
2060 * as the leader's detach hook, but in future they might need to be
2061 * different.
2062 */
2063 on_dsm_detach(CurrentSession->segment,
2064 shared_record_typmod_registry_detach,
2065 PointerGetDatum(registry));
2066
2067 /*
2068 * Set up the session state that will tell assign_record_type_typmod and
2069 * lookup_rowtype_tupdesc_internal about the shared registry.
2070 */
2071 CurrentSession->shared_typmod_registry = registry;
2072 CurrentSession->shared_record_table = record_table;
2073 CurrentSession->shared_typmod_table = typmod_table;
2074 }
2075
2076 /*
2077 * TypeCacheRelCallback
2078 * Relcache inval callback function
2079 *
2080 * Delete the cached tuple descriptor (if any) for the given rel's composite
2081 * type, or for all composite types if relid == InvalidOid. Also reset
2082 * whatever info we have cached about the composite type's comparability.
2083 *
2084 * This is called when a relcache invalidation event occurs for the given
2085 * relid. We must scan the whole typcache hash since we don't know the
2086 * type OID corresponding to the relid. We could do a direct search if this
2087 * were a syscache-flush callback on pg_type, but then we would need all
2088 * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
2089 * invals against the rel's pg_type OID. The extra SI signaling could very
2090 * well cost more than we'd save, since in most usages there are not very
2091 * many entries in a backend's typcache. The risk of bugs-of-omission seems
2092 * high, too.
2093 *
2094 * Another possibility, with only localized impact, is to maintain a second
2095 * hashtable that indexes composite-type typcache entries by their typrelid.
2096 * But it's still not clear it's worth the trouble.
2097 */
2098 static void
TypeCacheRelCallback(Datum arg,Oid relid)2099 TypeCacheRelCallback(Datum arg, Oid relid)
2100 {
2101 HASH_SEQ_STATUS status;
2102 TypeCacheEntry *typentry;
2103
2104 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2105 hash_seq_init(&status, TypeCacheHash);
2106 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2107 {
2108 if (typentry->typtype == TYPTYPE_COMPOSITE)
2109 {
2110 /* Skip if no match, unless we're zapping all composite types */
2111 if (relid != typentry->typrelid && relid != InvalidOid)
2112 continue;
2113
2114 /* Delete tupdesc if we have it */
2115 if (typentry->tupDesc != NULL)
2116 {
2117 /*
2118 * Release our refcount, and free the tupdesc if none remain.
2119 * (Can't use DecrTupleDescRefCount because this reference is
2120 * not logged in current resource owner.)
2121 */
2122 Assert(typentry->tupDesc->tdrefcount > 0);
2123 if (--typentry->tupDesc->tdrefcount == 0)
2124 FreeTupleDesc(typentry->tupDesc);
2125 typentry->tupDesc = NULL;
2126
2127 /*
2128 * Also clear tupDesc_identifier, so that anything watching
2129 * that will realize that the tupdesc has possibly changed.
2130 * (Alternatively, we could specify that to detect possible
2131 * tupdesc change, one must check for tupDesc != NULL as well
2132 * as tupDesc_identifier being the same as what was previously
2133 * seen. That seems error-prone.)
2134 */
2135 typentry->tupDesc_identifier = 0;
2136 }
2137
2138 /* Reset equality/comparison/hashing validity information */
2139 typentry->flags = 0;
2140 }
2141 else if (typentry->typtype == TYPTYPE_DOMAIN)
2142 {
2143 /*
2144 * If it's domain over composite, reset flags. (We don't bother
2145 * trying to determine whether the specific base type needs a
2146 * reset.) Note that if we haven't determined whether the base
2147 * type is composite, we don't need to reset anything.
2148 */
2149 if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2150 typentry->flags = 0;
2151 }
2152 }
2153 }
2154
2155 /*
2156 * TypeCacheOpcCallback
2157 * Syscache inval callback function
2158 *
2159 * This is called when a syscache invalidation event occurs for any pg_opclass
2160 * row. In principle we could probably just invalidate data dependent on the
2161 * particular opclass, but since updates on pg_opclass are rare in production
2162 * it doesn't seem worth a lot of complication: we just mark all cached data
2163 * invalid.
2164 *
2165 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2166 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2167 * is not allowed to be used to add/drop the primary operators and functions
2168 * of an opclass, only cross-type members of a family; and the latter sorts
2169 * of members are not going to get cached here.
2170 */
2171 static void
TypeCacheOpcCallback(Datum arg,int cacheid,uint32 hashvalue)2172 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2173 {
2174 HASH_SEQ_STATUS status;
2175 TypeCacheEntry *typentry;
2176
2177 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2178 hash_seq_init(&status, TypeCacheHash);
2179 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2180 {
2181 /* Reset equality/comparison/hashing validity information */
2182 typentry->flags = 0;
2183 }
2184 }
2185
2186 /*
2187 * TypeCacheConstrCallback
2188 * Syscache inval callback function
2189 *
2190 * This is called when a syscache invalidation event occurs for any
2191 * pg_constraint or pg_type row. We flush information about domain
2192 * constraints when this happens.
2193 *
2194 * It's slightly annoying that we can't tell whether the inval event was for a
2195 * domain constraint/type record or not; there's usually more update traffic
2196 * for table constraints/types than domain constraints, so we'll do a lot of
2197 * useless flushes. Still, this is better than the old no-caching-at-all
2198 * approach to domain constraints.
2199 */
2200 static void
TypeCacheConstrCallback(Datum arg,int cacheid,uint32 hashvalue)2201 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2202 {
2203 TypeCacheEntry *typentry;
2204
2205 /*
2206 * Because this is called very frequently, and typically very few of the
2207 * typcache entries are for domains, we don't use hash_seq_search here.
2208 * Instead we thread all the domain-type entries together so that we can
2209 * visit them cheaply.
2210 */
2211 for (typentry = firstDomainTypeEntry;
2212 typentry != NULL;
2213 typentry = typentry->nextDomain)
2214 {
2215 /* Reset domain constraint validity information */
2216 typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2217 }
2218 }
2219
2220
2221 /*
2222 * Check if given OID is part of the subset that's sortable by comparisons
2223 */
2224 static inline bool
enum_known_sorted(TypeCacheEnumData * enumdata,Oid arg)2225 enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
2226 {
2227 Oid offset;
2228
2229 if (arg < enumdata->bitmap_base)
2230 return false;
2231 offset = arg - enumdata->bitmap_base;
2232 if (offset > (Oid) INT_MAX)
2233 return false;
2234 return bms_is_member((int) offset, enumdata->sorted_values);
2235 }
2236
2237
2238 /*
2239 * compare_values_of_enum
2240 * Compare two members of an enum type.
2241 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2242 *
2243 * Note: currently, the enumData cache is refreshed only if we are asked
2244 * to compare an enum value that is not already in the cache. This is okay
2245 * because there is no support for re-ordering existing values, so comparisons
2246 * of previously cached values will return the right answer even if other
2247 * values have been added since we last loaded the cache.
2248 *
2249 * Note: the enum logic has a special-case rule about even-numbered versus
2250 * odd-numbered OIDs, but we take no account of that rule here; this
2251 * routine shouldn't even get called when that rule applies.
2252 */
2253 int
compare_values_of_enum(TypeCacheEntry * tcache,Oid arg1,Oid arg2)2254 compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
2255 {
2256 TypeCacheEnumData *enumdata;
2257 EnumItem *item1;
2258 EnumItem *item2;
2259
2260 /*
2261 * Equal OIDs are certainly equal --- this case was probably handled by
2262 * our caller, but we may as well check.
2263 */
2264 if (arg1 == arg2)
2265 return 0;
2266
2267 /* Load up the cache if first time through */
2268 if (tcache->enumData == NULL)
2269 load_enum_cache_data(tcache);
2270 enumdata = tcache->enumData;
2271
2272 /*
2273 * If both OIDs are known-sorted, we can just compare them directly.
2274 */
2275 if (enum_known_sorted(enumdata, arg1) &&
2276 enum_known_sorted(enumdata, arg2))
2277 {
2278 if (arg1 < arg2)
2279 return -1;
2280 else
2281 return 1;
2282 }
2283
2284 /*
2285 * Slow path: we have to identify their actual sort-order positions.
2286 */
2287 item1 = find_enumitem(enumdata, arg1);
2288 item2 = find_enumitem(enumdata, arg2);
2289
2290 if (item1 == NULL || item2 == NULL)
2291 {
2292 /*
2293 * We couldn't find one or both values. That means the enum has
2294 * changed under us, so re-initialize the cache and try again. We
2295 * don't bother retrying the known-sorted case in this path.
2296 */
2297 load_enum_cache_data(tcache);
2298 enumdata = tcache->enumData;
2299
2300 item1 = find_enumitem(enumdata, arg1);
2301 item2 = find_enumitem(enumdata, arg2);
2302
2303 /*
2304 * If we still can't find the values, complain: we must have corrupt
2305 * data.
2306 */
2307 if (item1 == NULL)
2308 elog(ERROR, "enum value %u not found in cache for enum %s",
2309 arg1, format_type_be(tcache->type_id));
2310 if (item2 == NULL)
2311 elog(ERROR, "enum value %u not found in cache for enum %s",
2312 arg2, format_type_be(tcache->type_id));
2313 }
2314
2315 if (item1->sort_order < item2->sort_order)
2316 return -1;
2317 else if (item1->sort_order > item2->sort_order)
2318 return 1;
2319 else
2320 return 0;
2321 }
2322
2323 /*
2324 * Load (or re-load) the enumData member of the typcache entry.
2325 */
2326 static void
load_enum_cache_data(TypeCacheEntry * tcache)2327 load_enum_cache_data(TypeCacheEntry *tcache)
2328 {
2329 TypeCacheEnumData *enumdata;
2330 Relation enum_rel;
2331 SysScanDesc enum_scan;
2332 HeapTuple enum_tuple;
2333 ScanKeyData skey;
2334 EnumItem *items;
2335 int numitems;
2336 int maxitems;
2337 Oid bitmap_base;
2338 Bitmapset *bitmap;
2339 MemoryContext oldcxt;
2340 int bm_size,
2341 start_pos;
2342
2343 /* Check that this is actually an enum */
2344 if (tcache->typtype != TYPTYPE_ENUM)
2345 ereport(ERROR,
2346 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2347 errmsg("%s is not an enum",
2348 format_type_be(tcache->type_id))));
2349
2350 /*
2351 * Read all the information for members of the enum type. We collect the
2352 * info in working memory in the caller's context, and then transfer it to
2353 * permanent memory in CacheMemoryContext. This minimizes the risk of
2354 * leaking memory from CacheMemoryContext in the event of an error partway
2355 * through.
2356 */
2357 maxitems = 64;
2358 items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2359 numitems = 0;
2360
2361 /* Scan pg_enum for the members of the target enum type. */
2362 ScanKeyInit(&skey,
2363 Anum_pg_enum_enumtypid,
2364 BTEqualStrategyNumber, F_OIDEQ,
2365 ObjectIdGetDatum(tcache->type_id));
2366
2367 enum_rel = heap_open(EnumRelationId, AccessShareLock);
2368 enum_scan = systable_beginscan(enum_rel,
2369 EnumTypIdLabelIndexId,
2370 true, NULL,
2371 1, &skey);
2372
2373 while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2374 {
2375 Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2376
2377 if (numitems >= maxitems)
2378 {
2379 maxitems *= 2;
2380 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2381 }
2382 items[numitems].enum_oid = HeapTupleGetOid(enum_tuple);
2383 items[numitems].sort_order = en->enumsortorder;
2384 numitems++;
2385 }
2386
2387 systable_endscan(enum_scan);
2388 heap_close(enum_rel, AccessShareLock);
2389
2390 /* Sort the items into OID order */
2391 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2392
2393 /*
2394 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2395 * known to be in order and can thus be compared with just OID comparison.
2396 *
2397 * The point of this is that the enum's initial OIDs were certainly in
2398 * order, so there is some subset that can be compared via OID comparison;
2399 * and we'd rather not do binary searches unnecessarily.
2400 *
2401 * This is somewhat heuristic, and might identify a subset of OIDs that
2402 * isn't exactly what the type started with. That's okay as long as the
2403 * subset is correctly sorted.
2404 */
2405 bitmap_base = InvalidOid;
2406 bitmap = NULL;
2407 bm_size = 1; /* only save sets of at least 2 OIDs */
2408
2409 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2410 {
2411 /*
2412 * Identify longest sorted subsequence starting at start_pos
2413 */
2414 Bitmapset *this_bitmap = bms_make_singleton(0);
2415 int this_bm_size = 1;
2416 Oid start_oid = items[start_pos].enum_oid;
2417 float4 prev_order = items[start_pos].sort_order;
2418 int i;
2419
2420 for (i = start_pos + 1; i < numitems; i++)
2421 {
2422 Oid offset;
2423
2424 offset = items[i].enum_oid - start_oid;
2425 /* quit if bitmap would be too large; cutoff is arbitrary */
2426 if (offset >= 8192)
2427 break;
2428 /* include the item if it's in-order */
2429 if (items[i].sort_order > prev_order)
2430 {
2431 prev_order = items[i].sort_order;
2432 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2433 this_bm_size++;
2434 }
2435 }
2436
2437 /* Remember it if larger than previous best */
2438 if (this_bm_size > bm_size)
2439 {
2440 bms_free(bitmap);
2441 bitmap_base = start_oid;
2442 bitmap = this_bitmap;
2443 bm_size = this_bm_size;
2444 }
2445 else
2446 bms_free(this_bitmap);
2447
2448 /*
2449 * Done if it's not possible to find a longer sequence in the rest of
2450 * the list. In typical cases this will happen on the first
2451 * iteration, which is why we create the bitmaps on the fly instead of
2452 * doing a second pass over the list.
2453 */
2454 if (bm_size >= (numitems - start_pos - 1))
2455 break;
2456 }
2457
2458 /* OK, copy the data into CacheMemoryContext */
2459 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2460 enumdata = (TypeCacheEnumData *)
2461 palloc(offsetof(TypeCacheEnumData, enum_values) +
2462 numitems * sizeof(EnumItem));
2463 enumdata->bitmap_base = bitmap_base;
2464 enumdata->sorted_values = bms_copy(bitmap);
2465 enumdata->num_values = numitems;
2466 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2467 MemoryContextSwitchTo(oldcxt);
2468
2469 pfree(items);
2470 bms_free(bitmap);
2471
2472 /* And link the finished cache struct into the typcache */
2473 if (tcache->enumData != NULL)
2474 pfree(tcache->enumData);
2475 tcache->enumData = enumdata;
2476 }
2477
2478 /*
2479 * Locate the EnumItem with the given OID, if present
2480 */
2481 static EnumItem *
find_enumitem(TypeCacheEnumData * enumdata,Oid arg)2482 find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
2483 {
2484 EnumItem srch;
2485
2486 /* On some versions of Solaris, bsearch of zero items dumps core */
2487 if (enumdata->num_values <= 0)
2488 return NULL;
2489
2490 srch.enum_oid = arg;
2491 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2492 sizeof(EnumItem), enum_oid_cmp);
2493 }
2494
2495 /*
2496 * qsort comparison function for OID-ordered EnumItems
2497 */
2498 static int
enum_oid_cmp(const void * left,const void * right)2499 enum_oid_cmp(const void *left, const void *right)
2500 {
2501 const EnumItem *l = (const EnumItem *) left;
2502 const EnumItem *r = (const EnumItem *) right;
2503
2504 if (l->enum_oid < r->enum_oid)
2505 return -1;
2506 else if (l->enum_oid > r->enum_oid)
2507 return 1;
2508 else
2509 return 0;
2510 }
2511
2512 /*
2513 * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2514 * to the given value and return a dsa_pointer.
2515 */
2516 static dsa_pointer
share_tupledesc(dsa_area * area,TupleDesc tupdesc,uint32 typmod)2517 share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2518 {
2519 dsa_pointer shared_dp;
2520 TupleDesc shared;
2521
2522 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2523 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2524 TupleDescCopy(shared, tupdesc);
2525 shared->tdtypmod = typmod;
2526
2527 return shared_dp;
2528 }
2529
2530 /*
2531 * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2532 * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2533 * Tuple descriptors returned by this function are not reference counted, and
2534 * will exist at least as long as the current backend remained attached to the
2535 * current session.
2536 */
2537 static TupleDesc
find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)2538 find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
2539 {
2540 TupleDesc result;
2541 SharedRecordTableKey key;
2542 SharedRecordTableEntry *record_table_entry;
2543 SharedTypmodTableEntry *typmod_table_entry;
2544 dsa_pointer shared_dp;
2545 bool found;
2546 uint32 typmod;
2547
2548 /* If not even attached, nothing to do. */
2549 if (CurrentSession->shared_typmod_registry == NULL)
2550 return NULL;
2551
2552 /* Try to find a matching tuple descriptor in the record table. */
2553 key.shared = false;
2554 key.u.local_tupdesc = tupdesc;
2555 record_table_entry = (SharedRecordTableEntry *)
2556 dshash_find(CurrentSession->shared_record_table, &key, false);
2557 if (record_table_entry)
2558 {
2559 Assert(record_table_entry->key.shared);
2560 dshash_release_lock(CurrentSession->shared_record_table,
2561 record_table_entry);
2562 result = (TupleDesc)
2563 dsa_get_address(CurrentSession->area,
2564 record_table_entry->key.u.shared_tupdesc);
2565 Assert(result->tdrefcount == -1);
2566
2567 return result;
2568 }
2569
2570 /* Allocate a new typmod number. This will be wasted if we error out. */
2571 typmod = (int)
2572 pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
2573 1);
2574
2575 /* Copy the TupleDesc into shared memory. */
2576 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2577
2578 /*
2579 * Create an entry in the typmod table so that others will understand this
2580 * typmod number.
2581 */
2582 PG_TRY();
2583 {
2584 typmod_table_entry = (SharedTypmodTableEntry *)
2585 dshash_find_or_insert(CurrentSession->shared_typmod_table,
2586 &typmod, &found);
2587 if (found)
2588 elog(ERROR, "cannot create duplicate shared record typmod");
2589 }
2590 PG_CATCH();
2591 {
2592 dsa_free(CurrentSession->area, shared_dp);
2593 PG_RE_THROW();
2594 }
2595 PG_END_TRY();
2596 typmod_table_entry->typmod = typmod;
2597 typmod_table_entry->shared_tupdesc = shared_dp;
2598 dshash_release_lock(CurrentSession->shared_typmod_table,
2599 typmod_table_entry);
2600
2601 /*
2602 * Finally create an entry in the record table so others with matching
2603 * tuple descriptors can reuse the typmod.
2604 */
2605 record_table_entry = (SharedRecordTableEntry *)
2606 dshash_find_or_insert(CurrentSession->shared_record_table, &key,
2607 &found);
2608 if (found)
2609 {
2610 /*
2611 * Someone concurrently inserted a matching tuple descriptor since the
2612 * first time we checked. Use that one instead.
2613 */
2614 dshash_release_lock(CurrentSession->shared_record_table,
2615 record_table_entry);
2616
2617 /* Might as well free up the space used by the one we created. */
2618 found = dshash_delete_key(CurrentSession->shared_typmod_table,
2619 &typmod);
2620 Assert(found);
2621 dsa_free(CurrentSession->area, shared_dp);
2622
2623 /* Return the one we found. */
2624 Assert(record_table_entry->key.shared);
2625 result = (TupleDesc)
2626 dsa_get_address(CurrentSession->area,
2627 record_table_entry->key.u.shared_tupdesc);
2628 Assert(result->tdrefcount == -1);
2629
2630 return result;
2631 }
2632
2633 /* Store it and return it. */
2634 record_table_entry->key.shared = true;
2635 record_table_entry->key.u.shared_tupdesc = shared_dp;
2636 dshash_release_lock(CurrentSession->shared_record_table,
2637 record_table_entry);
2638 result = (TupleDesc)
2639 dsa_get_address(CurrentSession->area, shared_dp);
2640 Assert(result->tdrefcount == -1);
2641
2642 return result;
2643 }
2644
2645 /*
2646 * On-DSM-detach hook to forget about the current shared record typmod
2647 * infrastructure. This is currently used by both leader and workers.
2648 */
2649 static void
shared_record_typmod_registry_detach(dsm_segment * segment,Datum datum)2650 shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
2651 {
2652 /* Be cautious here: maybe we didn't finish initializing. */
2653 if (CurrentSession->shared_record_table != NULL)
2654 {
2655 dshash_detach(CurrentSession->shared_record_table);
2656 CurrentSession->shared_record_table = NULL;
2657 }
2658 if (CurrentSession->shared_typmod_table != NULL)
2659 {
2660 dshash_detach(CurrentSession->shared_typmod_table);
2661 CurrentSession->shared_typmod_table = NULL;
2662 }
2663 CurrentSession->shared_typmod_registry = NULL;
2664 }
2665