1 /*-------------------------------------------------------------------------
2 *
3 * typcache.c
4 * POSTGRES type cache code
5 *
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. For
8 * example, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 *
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by generic array and record handling routines, such as array_eq(),
14 * record_cmp(), and hash_array(). Because those routines are used as index
15 * support operations, they cannot leak memory. To allow them to execute
16 * efficiently, all information that they would like to re-use across calls
17 * is kept in the type cache.
18 *
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. If the type is
21 * dropped, the cache entry simply becomes wasted storage. This is not
22 * expected to happen often, and assuming that typcache entries are good
23 * permanently allows caching pointers to them in long-lived places.
24 *
25 * We have some provisions for updating cache entries if the stored data
26 * becomes obsolete. Information dependent on opclasses is cleared if we
27 * detect updates to pg_opclass. We also support clearing the tuple
28 * descriptor and operator/function parts of a rowtype's cache entry,
29 * since those may need to change as a consequence of ALTER TABLE.
30 * Domain constraint changes are also tracked properly.
31 *
32 *
33 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
34 * Portions Copyright (c) 1994, Regents of the University of California
35 *
36 * IDENTIFICATION
37 * src/backend/utils/cache/typcache.c
38 *
39 *-------------------------------------------------------------------------
40 */
41 #include "postgres.h"
42
43 #include <limits.h>
44
45 #include "access/hash.h"
46 #include "access/heapam.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "catalog/indexing.h"
50 #include "catalog/pg_am.h"
51 #include "catalog/pg_constraint.h"
52 #include "catalog/pg_enum.h"
53 #include "catalog/pg_operator.h"
54 #include "catalog/pg_range.h"
55 #include "catalog/pg_type.h"
56 #include "commands/defrem.h"
57 #include "executor/executor.h"
58 #include "optimizer/planner.h"
59 #include "utils/builtins.h"
60 #include "utils/catcache.h"
61 #include "utils/fmgroids.h"
62 #include "utils/inval.h"
63 #include "utils/lsyscache.h"
64 #include "utils/memutils.h"
65 #include "utils/rel.h"
66 #include "utils/snapmgr.h"
67 #include "utils/syscache.h"
68 #include "utils/typcache.h"
69
70
71 /* The main type cache hashtable searched by lookup_type_cache */
72 static HTAB *TypeCacheHash = NULL;
73
74 /* List of type cache entries for domain types */
75 static TypeCacheEntry *firstDomainTypeEntry = NULL;
76
77 /* Private flag bits in the TypeCacheEntry.flags field */
78 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x0001
79 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x0002
80 #define TCFLAGS_CHECKED_EQ_OPR 0x0004
81 #define TCFLAGS_CHECKED_LT_OPR 0x0008
82 #define TCFLAGS_CHECKED_GT_OPR 0x0010
83 #define TCFLAGS_CHECKED_CMP_PROC 0x0020
84 #define TCFLAGS_CHECKED_HASH_PROC 0x0040
85 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x0080
86 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x0100
87 #define TCFLAGS_HAVE_ELEM_COMPARE 0x0200
88 #define TCFLAGS_HAVE_ELEM_HASHING 0x0400
89 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x0800
90 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x1000
91 #define TCFLAGS_HAVE_FIELD_COMPARE 0x2000
92 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x4000
93
94 /*
95 * Data stored about a domain type's constraints. Note that we do not create
96 * this struct for the common case of a constraint-less domain; we just set
97 * domainData to NULL to indicate that.
98 *
99 * Within a DomainConstraintCache, we store expression plan trees, but the
100 * check_exprstate fields of the DomainConstraintState nodes are just NULL.
101 * When needed, expression evaluation nodes are built by flat-copying the
102 * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
103 * Such a node tree is not part of the DomainConstraintCache, but is
104 * considered to belong to a DomainConstraintRef.
105 */
106 struct DomainConstraintCache
107 {
108 List *constraints; /* list of DomainConstraintState nodes */
109 MemoryContext dccContext; /* memory context holding all associated data */
110 long dccRefCount; /* number of references to this struct */
111 };
112
113 /* Private information to support comparisons of enum values */
114 typedef struct
115 {
116 Oid enum_oid; /* OID of one enum value */
117 float4 sort_order; /* its sort position */
118 } EnumItem;
119
120 typedef struct TypeCacheEnumData
121 {
122 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
123 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
124 int num_values; /* total number of values in enum */
125 EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
126 } TypeCacheEnumData;
127
128 /*
129 * We use a separate table for storing the definitions of non-anonymous
130 * record types. Once defined, a record type will be remembered for the
131 * life of the backend. Subsequent uses of the "same" record type (where
132 * sameness means equalTupleDescs) will refer to the existing table entry.
133 *
134 * Stored record types are remembered in a linear array of TupleDescs,
135 * which can be indexed quickly with the assigned typmod. There is also
136 * a hash table to speed searches for matching TupleDescs. The hash key
137 * uses just the first N columns' type OIDs, and so we may have multiple
138 * entries with the same hash key.
139 */
140 #define REC_HASH_KEYS 16 /* use this many columns in hash key */
141
142 typedef struct RecordCacheEntry
143 {
144 /* the hash lookup key MUST BE FIRST */
145 Oid hashkey[REC_HASH_KEYS]; /* column type IDs, zero-filled */
146
147 /* list of TupleDescs for record types with this hashkey */
148 List *tupdescs;
149 } RecordCacheEntry;
150
151 static HTAB *RecordCacheHash = NULL;
152
153 static TupleDesc *RecordCacheArray = NULL;
154 static int32 RecordCacheArrayLen = 0; /* allocated length of array */
155 static int32 NextRecordTypmod = 0; /* number of entries used */
156
157 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
158 static void load_rangetype_info(TypeCacheEntry *typentry);
159 static void load_domaintype_info(TypeCacheEntry *typentry);
160 static int dcs_cmp(const void *a, const void *b);
161 static void decr_dcc_refcount(DomainConstraintCache *dcc);
162 static void dccref_deletion_callback(void *arg);
163 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
164 static bool array_element_has_equality(TypeCacheEntry *typentry);
165 static bool array_element_has_compare(TypeCacheEntry *typentry);
166 static bool array_element_has_hashing(TypeCacheEntry *typentry);
167 static void cache_array_element_properties(TypeCacheEntry *typentry);
168 static bool record_fields_have_equality(TypeCacheEntry *typentry);
169 static bool record_fields_have_compare(TypeCacheEntry *typentry);
170 static void cache_record_field_properties(TypeCacheEntry *typentry);
171 static bool range_element_has_hashing(TypeCacheEntry *typentry);
172 static void cache_range_element_properties(TypeCacheEntry *typentry);
173 static void TypeCacheRelCallback(Datum arg, Oid relid);
174 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
175 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
176 static void load_enum_cache_data(TypeCacheEntry *tcache);
177 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
178 static int enum_oid_cmp(const void *left, const void *right);
179
180
181 /*
182 * lookup_type_cache
183 *
184 * Fetch the type cache entry for the specified datatype, and make sure that
185 * all the fields requested by bits in 'flags' are valid.
186 *
187 * The result is never NULL --- we will ereport() if the passed type OID is
188 * invalid. Note however that we may fail to find one or more of the
189 * values requested by 'flags'; the caller needs to check whether the fields
190 * are InvalidOid or not.
191 */
192 TypeCacheEntry *
lookup_type_cache(Oid type_id,int flags)193 lookup_type_cache(Oid type_id, int flags)
194 {
195 TypeCacheEntry *typentry;
196 bool found;
197
198 if (TypeCacheHash == NULL)
199 {
200 /* First time through: initialize the hash table */
201 HASHCTL ctl;
202
203 MemSet(&ctl, 0, sizeof(ctl));
204 ctl.keysize = sizeof(Oid);
205 ctl.entrysize = sizeof(TypeCacheEntry);
206 TypeCacheHash = hash_create("Type information cache", 64,
207 &ctl, HASH_ELEM | HASH_BLOBS);
208
209 /* Also set up callbacks for SI invalidations */
210 CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
211 CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
212 CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
213 CacheRegisterSyscacheCallback(TYPEOID, TypeCacheConstrCallback, (Datum) 0);
214
215 /* Also make sure CacheMemoryContext exists */
216 if (!CacheMemoryContext)
217 CreateCacheMemoryContext();
218 }
219
220 /* Try to look up an existing entry */
221 typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
222 (void *) &type_id,
223 HASH_FIND, NULL);
224 if (typentry == NULL)
225 {
226 /*
227 * If we didn't find one, we want to make one. But first look up the
228 * pg_type row, just to make sure we don't make a cache entry for an
229 * invalid type OID. If the type OID is not valid, present a
230 * user-facing error, since some code paths such as domain_in() allow
231 * this function to be reached with a user-supplied OID.
232 */
233 HeapTuple tp;
234 Form_pg_type typtup;
235
236 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
237 if (!HeapTupleIsValid(tp))
238 ereport(ERROR,
239 (errcode(ERRCODE_UNDEFINED_OBJECT),
240 errmsg("type with OID %u does not exist", type_id)));
241 typtup = (Form_pg_type) GETSTRUCT(tp);
242 if (!typtup->typisdefined)
243 ereport(ERROR,
244 (errcode(ERRCODE_UNDEFINED_OBJECT),
245 errmsg("type \"%s\" is only a shell",
246 NameStr(typtup->typname))));
247
248 /* Now make the typcache entry */
249 typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
250 (void *) &type_id,
251 HASH_ENTER, &found);
252 Assert(!found); /* it wasn't there a moment ago */
253
254 MemSet(typentry, 0, sizeof(TypeCacheEntry));
255 typentry->type_id = type_id;
256 typentry->typlen = typtup->typlen;
257 typentry->typbyval = typtup->typbyval;
258 typentry->typalign = typtup->typalign;
259 typentry->typstorage = typtup->typstorage;
260 typentry->typtype = typtup->typtype;
261 typentry->typrelid = typtup->typrelid;
262
263 /* If it's a domain, immediately thread it into the domain cache list */
264 if (typentry->typtype == TYPTYPE_DOMAIN)
265 {
266 typentry->nextDomain = firstDomainTypeEntry;
267 firstDomainTypeEntry = typentry;
268 }
269
270 ReleaseSysCache(tp);
271 }
272
273 /*
274 * Look up opclasses if we haven't already and any dependent info is
275 * requested.
276 */
277 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
278 TYPECACHE_CMP_PROC |
279 TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
280 TYPECACHE_BTREE_OPFAMILY)) &&
281 !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
282 {
283 Oid opclass;
284
285 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
286 if (OidIsValid(opclass))
287 {
288 typentry->btree_opf = get_opclass_family(opclass);
289 typentry->btree_opintype = get_opclass_input_type(opclass);
290 }
291 else
292 {
293 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
294 }
295
296 /*
297 * Reset information derived from btree opclass. Note in particular
298 * that we'll redetermine the eq_opr even if we previously found one;
299 * this matters in case a btree opclass has been added to a type that
300 * previously had only a hash opclass.
301 */
302 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
303 TCFLAGS_CHECKED_LT_OPR |
304 TCFLAGS_CHECKED_GT_OPR |
305 TCFLAGS_CHECKED_CMP_PROC);
306 typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
307 }
308
309 /*
310 * If we need to look up equality operator, and there's no btree opclass,
311 * force lookup of hash opclass.
312 */
313 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
314 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
315 typentry->btree_opf == InvalidOid)
316 flags |= TYPECACHE_HASH_OPFAMILY;
317
318 if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
319 TYPECACHE_HASH_OPFAMILY)) &&
320 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
321 {
322 Oid opclass;
323
324 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
325 if (OidIsValid(opclass))
326 {
327 typentry->hash_opf = get_opclass_family(opclass);
328 typentry->hash_opintype = get_opclass_input_type(opclass);
329 }
330 else
331 {
332 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
333 }
334
335 /*
336 * Reset information derived from hash opclass. We do *not* reset the
337 * eq_opr; if we already found one from the btree opclass, that
338 * decision is still good.
339 */
340 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
341 typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
342 }
343
344 /*
345 * Look for requested operators and functions, if we haven't already.
346 */
347 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
348 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
349 {
350 Oid eq_opr = InvalidOid;
351
352 if (typentry->btree_opf != InvalidOid)
353 eq_opr = get_opfamily_member(typentry->btree_opf,
354 typentry->btree_opintype,
355 typentry->btree_opintype,
356 BTEqualStrategyNumber);
357 if (eq_opr == InvalidOid &&
358 typentry->hash_opf != InvalidOid)
359 eq_opr = get_opfamily_member(typentry->hash_opf,
360 typentry->hash_opintype,
361 typentry->hash_opintype,
362 HTEqualStrategyNumber);
363
364 /*
365 * If the proposed equality operator is array_eq or record_eq, check
366 * to see if the element type or column types support equality. If
367 * not, array_eq or record_eq would fail at runtime, so we don't want
368 * to report that the type has equality.
369 */
370 if (eq_opr == ARRAY_EQ_OP &&
371 !array_element_has_equality(typentry))
372 eq_opr = InvalidOid;
373 else if (eq_opr == RECORD_EQ_OP &&
374 !record_fields_have_equality(typentry))
375 eq_opr = InvalidOid;
376
377 /* Force update of eq_opr_finfo only if we're changing state */
378 if (typentry->eq_opr != eq_opr)
379 typentry->eq_opr_finfo.fn_oid = InvalidOid;
380
381 typentry->eq_opr = eq_opr;
382
383 /*
384 * Reset info about hash function whenever we pick up new info about
385 * equality operator. This is so we can ensure that the hash function
386 * matches the operator.
387 */
388 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
389 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
390 }
391 if ((flags & TYPECACHE_LT_OPR) &&
392 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
393 {
394 Oid lt_opr = InvalidOid;
395
396 if (typentry->btree_opf != InvalidOid)
397 lt_opr = get_opfamily_member(typentry->btree_opf,
398 typentry->btree_opintype,
399 typentry->btree_opintype,
400 BTLessStrategyNumber);
401
402 /* As above, make sure array_cmp or record_cmp will succeed */
403 if (lt_opr == ARRAY_LT_OP &&
404 !array_element_has_compare(typentry))
405 lt_opr = InvalidOid;
406 else if (lt_opr == RECORD_LT_OP &&
407 !record_fields_have_compare(typentry))
408 lt_opr = InvalidOid;
409
410 typentry->lt_opr = lt_opr;
411 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
412 }
413 if ((flags & TYPECACHE_GT_OPR) &&
414 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
415 {
416 Oid gt_opr = InvalidOid;
417
418 if (typentry->btree_opf != InvalidOid)
419 gt_opr = get_opfamily_member(typentry->btree_opf,
420 typentry->btree_opintype,
421 typentry->btree_opintype,
422 BTGreaterStrategyNumber);
423
424 /* As above, make sure array_cmp or record_cmp will succeed */
425 if (gt_opr == ARRAY_GT_OP &&
426 !array_element_has_compare(typentry))
427 gt_opr = InvalidOid;
428 else if (gt_opr == RECORD_GT_OP &&
429 !record_fields_have_compare(typentry))
430 gt_opr = InvalidOid;
431
432 typentry->gt_opr = gt_opr;
433 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
434 }
435 if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
436 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
437 {
438 Oid cmp_proc = InvalidOid;
439
440 if (typentry->btree_opf != InvalidOid)
441 cmp_proc = get_opfamily_proc(typentry->btree_opf,
442 typentry->btree_opintype,
443 typentry->btree_opintype,
444 BTORDER_PROC);
445
446 /* As above, make sure array_cmp or record_cmp will succeed */
447 if (cmp_proc == F_BTARRAYCMP &&
448 !array_element_has_compare(typentry))
449 cmp_proc = InvalidOid;
450 else if (cmp_proc == F_BTRECORDCMP &&
451 !record_fields_have_compare(typentry))
452 cmp_proc = InvalidOid;
453
454 /* Force update of cmp_proc_finfo only if we're changing state */
455 if (typentry->cmp_proc != cmp_proc)
456 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
457
458 typentry->cmp_proc = cmp_proc;
459 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
460 }
461 if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
462 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
463 {
464 Oid hash_proc = InvalidOid;
465
466 /*
467 * We insist that the eq_opr, if one has been determined, match the
468 * hash opclass; else report there is no hash function.
469 */
470 if (typentry->hash_opf != InvalidOid &&
471 (!OidIsValid(typentry->eq_opr) ||
472 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
473 typentry->hash_opintype,
474 typentry->hash_opintype,
475 HTEqualStrategyNumber)))
476 hash_proc = get_opfamily_proc(typentry->hash_opf,
477 typentry->hash_opintype,
478 typentry->hash_opintype,
479 HASHPROC);
480
481 /*
482 * As above, make sure hash_array will succeed. We don't currently
483 * support hashing for composite types, but when we do, we'll need
484 * more logic here to check that case too.
485 */
486 if (hash_proc == F_HASH_ARRAY &&
487 !array_element_has_hashing(typentry))
488 hash_proc = InvalidOid;
489
490 /*
491 * Likewise for hash_range.
492 */
493 if (hash_proc == F_HASH_RANGE &&
494 !range_element_has_hashing(typentry))
495 hash_proc = InvalidOid;
496
497 /* Force update of hash_proc_finfo only if we're changing state */
498 if (typentry->hash_proc != hash_proc)
499 typentry->hash_proc_finfo.fn_oid = InvalidOid;
500
501 typentry->hash_proc = hash_proc;
502 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
503 }
504
505 /*
506 * Set up fmgr lookup info as requested
507 *
508 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
509 * which is not quite right (they're really in the hash table's private
510 * memory context) but this will do for our purposes.
511 *
512 * Note: the code above avoids invalidating the finfo structs unless the
513 * referenced operator/function OID actually changes. This is to prevent
514 * unnecessary leakage of any subsidiary data attached to an finfo, since
515 * that would cause session-lifespan memory leaks.
516 */
517 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
518 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
519 typentry->eq_opr != InvalidOid)
520 {
521 Oid eq_opr_func;
522
523 eq_opr_func = get_opcode(typentry->eq_opr);
524 if (eq_opr_func != InvalidOid)
525 fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
526 CacheMemoryContext);
527 }
528 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
529 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
530 typentry->cmp_proc != InvalidOid)
531 {
532 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
533 CacheMemoryContext);
534 }
535 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
536 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
537 typentry->hash_proc != InvalidOid)
538 {
539 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
540 CacheMemoryContext);
541 }
542
543 /*
544 * If it's a composite type (row type), get tupdesc if requested
545 */
546 if ((flags & TYPECACHE_TUPDESC) &&
547 typentry->tupDesc == NULL &&
548 typentry->typtype == TYPTYPE_COMPOSITE)
549 {
550 load_typcache_tupdesc(typentry);
551 }
552
553 /*
554 * If requested, get information about a range type
555 */
556 if ((flags & TYPECACHE_RANGE_INFO) &&
557 typentry->rngelemtype == NULL &&
558 typentry->typtype == TYPTYPE_RANGE)
559 {
560 load_rangetype_info(typentry);
561 }
562
563 /*
564 * If requested, get information about a domain type
565 */
566 if ((flags & TYPECACHE_DOMAIN_INFO) &&
567 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
568 typentry->typtype == TYPTYPE_DOMAIN)
569 {
570 load_domaintype_info(typentry);
571 }
572
573 return typentry;
574 }
575
576 /*
577 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
578 */
579 static void
load_typcache_tupdesc(TypeCacheEntry * typentry)580 load_typcache_tupdesc(TypeCacheEntry *typentry)
581 {
582 Relation rel;
583
584 if (!OidIsValid(typentry->typrelid)) /* should not happen */
585 elog(ERROR, "invalid typrelid for composite type %u",
586 typentry->type_id);
587 rel = relation_open(typentry->typrelid, AccessShareLock);
588 Assert(rel->rd_rel->reltype == typentry->type_id);
589
590 /*
591 * Link to the tupdesc and increment its refcount (we assert it's a
592 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
593 * because the reference mustn't be entered in the current resource owner;
594 * it can outlive the current query.
595 */
596 typentry->tupDesc = RelationGetDescr(rel);
597
598 Assert(typentry->tupDesc->tdrefcount > 0);
599 typentry->tupDesc->tdrefcount++;
600
601 relation_close(rel, AccessShareLock);
602 }
603
604 /*
605 * load_rangetype_info --- helper routine to set up range type information
606 */
607 static void
load_rangetype_info(TypeCacheEntry * typentry)608 load_rangetype_info(TypeCacheEntry *typentry)
609 {
610 Form_pg_range pg_range;
611 HeapTuple tup;
612 Oid subtypeOid;
613 Oid opclassOid;
614 Oid canonicalOid;
615 Oid subdiffOid;
616 Oid opfamilyOid;
617 Oid opcintype;
618 Oid cmpFnOid;
619
620 /* get information from pg_range */
621 tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
622 /* should not fail, since we already checked typtype ... */
623 if (!HeapTupleIsValid(tup))
624 elog(ERROR, "cache lookup failed for range type %u",
625 typentry->type_id);
626 pg_range = (Form_pg_range) GETSTRUCT(tup);
627
628 subtypeOid = pg_range->rngsubtype;
629 typentry->rng_collation = pg_range->rngcollation;
630 opclassOid = pg_range->rngsubopc;
631 canonicalOid = pg_range->rngcanonical;
632 subdiffOid = pg_range->rngsubdiff;
633
634 ReleaseSysCache(tup);
635
636 /* get opclass properties and look up the comparison function */
637 opfamilyOid = get_opclass_family(opclassOid);
638 opcintype = get_opclass_input_type(opclassOid);
639
640 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
641 BTORDER_PROC);
642 if (!RegProcedureIsValid(cmpFnOid))
643 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
644 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
645
646 /* set up cached fmgrinfo structs */
647 fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
648 CacheMemoryContext);
649 if (OidIsValid(canonicalOid))
650 fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
651 CacheMemoryContext);
652 if (OidIsValid(subdiffOid))
653 fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
654 CacheMemoryContext);
655
656 /* Lastly, set up link to the element type --- this marks data valid */
657 typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
658 }
659
660
661 /*
662 * load_domaintype_info --- helper routine to set up domain constraint info
663 *
664 * Note: we assume we're called in a relatively short-lived context, so it's
665 * okay to leak data into the current context while scanning pg_constraint.
666 * We build the new DomainConstraintCache data in a context underneath
667 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
668 * complete.
669 */
670 static void
load_domaintype_info(TypeCacheEntry * typentry)671 load_domaintype_info(TypeCacheEntry *typentry)
672 {
673 Oid typeOid = typentry->type_id;
674 DomainConstraintCache *dcc;
675 bool notNull = false;
676 DomainConstraintState **ccons;
677 int cconslen;
678 Relation conRel;
679 MemoryContext oldcxt;
680
681 /*
682 * If we're here, any existing constraint info is stale, so release it.
683 * For safety, be sure to null the link before trying to delete the data.
684 */
685 if (typentry->domainData)
686 {
687 dcc = typentry->domainData;
688 typentry->domainData = NULL;
689 decr_dcc_refcount(dcc);
690 }
691
692 /*
693 * We try to optimize the common case of no domain constraints, so don't
694 * create the dcc object and context until we find a constraint. Likewise
695 * for the temp sorting array.
696 */
697 dcc = NULL;
698 ccons = NULL;
699 cconslen = 0;
700
701 /*
702 * Scan pg_constraint for relevant constraints. We want to find
703 * constraints for not just this domain, but any ancestor domains, so the
704 * outer loop crawls up the domain stack.
705 */
706 conRel = heap_open(ConstraintRelationId, AccessShareLock);
707
708 for (;;)
709 {
710 HeapTuple tup;
711 HeapTuple conTup;
712 Form_pg_type typTup;
713 int nccons = 0;
714 ScanKeyData key[1];
715 SysScanDesc scan;
716
717 tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
718 if (!HeapTupleIsValid(tup))
719 elog(ERROR, "cache lookup failed for type %u", typeOid);
720 typTup = (Form_pg_type) GETSTRUCT(tup);
721
722 if (typTup->typtype != TYPTYPE_DOMAIN)
723 {
724 /* Not a domain, so done */
725 ReleaseSysCache(tup);
726 break;
727 }
728
729 /* Test for NOT NULL Constraint */
730 if (typTup->typnotnull)
731 notNull = true;
732
733 /* Look for CHECK Constraints on this domain */
734 ScanKeyInit(&key[0],
735 Anum_pg_constraint_contypid,
736 BTEqualStrategyNumber, F_OIDEQ,
737 ObjectIdGetDatum(typeOid));
738
739 scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
740 NULL, 1, key);
741
742 while (HeapTupleIsValid(conTup = systable_getnext(scan)))
743 {
744 Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
745 Datum val;
746 bool isNull;
747 char *constring;
748 Expr *check_expr;
749 DomainConstraintState *r;
750
751 /* Ignore non-CHECK constraints (presently, shouldn't be any) */
752 if (c->contype != CONSTRAINT_CHECK)
753 continue;
754
755 /* Not expecting conbin to be NULL, but we'll test for it anyway */
756 val = fastgetattr(conTup, Anum_pg_constraint_conbin,
757 conRel->rd_att, &isNull);
758 if (isNull)
759 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
760 NameStr(typTup->typname), NameStr(c->conname));
761
762 /* Convert conbin to C string in caller context */
763 constring = TextDatumGetCString(val);
764
765 /* Create the DomainConstraintCache object and context if needed */
766 if (dcc == NULL)
767 {
768 MemoryContext cxt;
769
770 cxt = AllocSetContextCreate(CurrentMemoryContext,
771 "Domain constraints",
772 ALLOCSET_SMALL_SIZES);
773 dcc = (DomainConstraintCache *)
774 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
775 dcc->constraints = NIL;
776 dcc->dccContext = cxt;
777 dcc->dccRefCount = 0;
778 }
779
780 /* Create node trees in DomainConstraintCache's context */
781 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
782
783 check_expr = (Expr *) stringToNode(constring);
784
785 /* ExecInitExpr will assume we've planned the expression */
786 check_expr = expression_planner(check_expr);
787
788 r = makeNode(DomainConstraintState);
789 r->constrainttype = DOM_CONSTRAINT_CHECK;
790 r->name = pstrdup(NameStr(c->conname));
791 r->check_expr = check_expr;
792 r->check_exprstate = NULL;
793
794 MemoryContextSwitchTo(oldcxt);
795
796 /* Accumulate constraints in an array, for sorting below */
797 if (ccons == NULL)
798 {
799 cconslen = 8;
800 ccons = (DomainConstraintState **)
801 palloc(cconslen * sizeof(DomainConstraintState *));
802 }
803 else if (nccons >= cconslen)
804 {
805 cconslen *= 2;
806 ccons = (DomainConstraintState **)
807 repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
808 }
809 ccons[nccons++] = r;
810 }
811
812 systable_endscan(scan);
813
814 if (nccons > 0)
815 {
816 /*
817 * Sort the items for this domain, so that CHECKs are applied in a
818 * deterministic order.
819 */
820 if (nccons > 1)
821 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
822
823 /*
824 * Now attach them to the overall list. Use lcons() here because
825 * constraints of parent domains should be applied earlier.
826 */
827 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
828 while (nccons > 0)
829 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
830 MemoryContextSwitchTo(oldcxt);
831 }
832
833 /* loop to next domain in stack */
834 typeOid = typTup->typbasetype;
835 ReleaseSysCache(tup);
836 }
837
838 heap_close(conRel, AccessShareLock);
839
840 /*
841 * Only need to add one NOT NULL check regardless of how many domains in
842 * the stack request it.
843 */
844 if (notNull)
845 {
846 DomainConstraintState *r;
847
848 /* Create the DomainConstraintCache object and context if needed */
849 if (dcc == NULL)
850 {
851 MemoryContext cxt;
852
853 cxt = AllocSetContextCreate(CurrentMemoryContext,
854 "Domain constraints",
855 ALLOCSET_SMALL_SIZES);
856 dcc = (DomainConstraintCache *)
857 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
858 dcc->constraints = NIL;
859 dcc->dccContext = cxt;
860 dcc->dccRefCount = 0;
861 }
862
863 /* Create node trees in DomainConstraintCache's context */
864 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
865
866 r = makeNode(DomainConstraintState);
867
868 r->constrainttype = DOM_CONSTRAINT_NOTNULL;
869 r->name = pstrdup("NOT NULL");
870 r->check_expr = NULL;
871 r->check_exprstate = NULL;
872
873 /* lcons to apply the nullness check FIRST */
874 dcc->constraints = lcons(r, dcc->constraints);
875
876 MemoryContextSwitchTo(oldcxt);
877 }
878
879 /*
880 * If we made a constraint object, move it into CacheMemoryContext and
881 * attach it to the typcache entry.
882 */
883 if (dcc)
884 {
885 MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
886 typentry->domainData = dcc;
887 dcc->dccRefCount++; /* count the typcache's reference */
888 }
889
890 /* Either way, the typcache entry's domain data is now valid. */
891 typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
892 }
893
894 /*
895 * qsort comparator to sort DomainConstraintState pointers by name
896 */
897 static int
dcs_cmp(const void * a,const void * b)898 dcs_cmp(const void *a, const void *b)
899 {
900 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
901 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
902
903 return strcmp((*ca)->name, (*cb)->name);
904 }
905
906 /*
907 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
908 * and free it if no references remain
909 */
910 static void
decr_dcc_refcount(DomainConstraintCache * dcc)911 decr_dcc_refcount(DomainConstraintCache *dcc)
912 {
913 Assert(dcc->dccRefCount > 0);
914 if (--(dcc->dccRefCount) <= 0)
915 MemoryContextDelete(dcc->dccContext);
916 }
917
918 /*
919 * Context reset/delete callback for a DomainConstraintRef
920 */
921 static void
dccref_deletion_callback(void * arg)922 dccref_deletion_callback(void *arg)
923 {
924 DomainConstraintRef *ref = (DomainConstraintRef *) arg;
925 DomainConstraintCache *dcc = ref->dcc;
926
927 /* Paranoia --- be sure link is nulled before trying to release */
928 if (dcc)
929 {
930 ref->constraints = NIL;
931 ref->dcc = NULL;
932 decr_dcc_refcount(dcc);
933 }
934 }
935
936 /*
937 * prep_domain_constraints --- prepare domain constraints for execution
938 *
939 * The expression trees stored in the DomainConstraintCache's list are
940 * converted to executable expression state trees stored in execctx.
941 */
942 static List *
prep_domain_constraints(List * constraints,MemoryContext execctx)943 prep_domain_constraints(List *constraints, MemoryContext execctx)
944 {
945 List *result = NIL;
946 MemoryContext oldcxt;
947 ListCell *lc;
948
949 oldcxt = MemoryContextSwitchTo(execctx);
950
951 foreach(lc, constraints)
952 {
953 DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
954 DomainConstraintState *newr;
955
956 newr = makeNode(DomainConstraintState);
957 newr->constrainttype = r->constrainttype;
958 newr->name = r->name;
959 newr->check_expr = r->check_expr;
960 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
961
962 result = lappend(result, newr);
963 }
964
965 MemoryContextSwitchTo(oldcxt);
966
967 return result;
968 }
969
970 /*
971 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
972 *
973 * Caller must tell us the MemoryContext in which the DomainConstraintRef
974 * lives. The ref will be cleaned up when that context is reset/deleted.
975 *
976 * Caller must also tell us whether it wants check_exprstate fields to be
977 * computed in the DomainConstraintState nodes attached to this ref.
978 * If it doesn't, we need not make a copy of the DomainConstraintState list.
979 */
980 void
InitDomainConstraintRef(Oid type_id,DomainConstraintRef * ref,MemoryContext refctx,bool need_exprstate)981 InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
982 MemoryContext refctx, bool need_exprstate)
983 {
984 /* Look up the typcache entry --- we assume it survives indefinitely */
985 ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_INFO);
986 ref->need_exprstate = need_exprstate;
987 /* For safety, establish the callback before acquiring a refcount */
988 ref->refctx = refctx;
989 ref->dcc = NULL;
990 ref->callback.func = dccref_deletion_callback;
991 ref->callback.arg = (void *) ref;
992 MemoryContextRegisterResetCallback(refctx, &ref->callback);
993 /* Acquire refcount if there are constraints, and set up exported list */
994 if (ref->tcache->domainData)
995 {
996 ref->dcc = ref->tcache->domainData;
997 ref->dcc->dccRefCount++;
998 if (ref->need_exprstate)
999 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1000 ref->refctx);
1001 else
1002 ref->constraints = ref->dcc->constraints;
1003 }
1004 else
1005 ref->constraints = NIL;
1006 }
1007
1008 /*
1009 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1010 *
1011 * If the domain's constraint set changed, ref->constraints is updated to
1012 * point at a new list of cached constraints.
1013 *
1014 * In the normal case where nothing happened to the domain, this is cheap
1015 * enough that it's reasonable (and expected) to check before *each* use
1016 * of the constraint info.
1017 */
1018 void
UpdateDomainConstraintRef(DomainConstraintRef * ref)1019 UpdateDomainConstraintRef(DomainConstraintRef *ref)
1020 {
1021 TypeCacheEntry *typentry = ref->tcache;
1022
1023 /* Make sure typcache entry's data is up to date */
1024 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1025 typentry->typtype == TYPTYPE_DOMAIN)
1026 load_domaintype_info(typentry);
1027
1028 /* Transfer to ref object if there's new info, adjusting refcounts */
1029 if (ref->dcc != typentry->domainData)
1030 {
1031 /* Paranoia --- be sure link is nulled before trying to release */
1032 DomainConstraintCache *dcc = ref->dcc;
1033
1034 if (dcc)
1035 {
1036 /*
1037 * Note: we just leak the previous list of executable domain
1038 * constraints. Alternatively, we could keep those in a child
1039 * context of ref->refctx and free that context at this point.
1040 * However, in practice this code path will be taken so seldom
1041 * that the extra bookkeeping for a child context doesn't seem
1042 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1043 */
1044 ref->constraints = NIL;
1045 ref->dcc = NULL;
1046 decr_dcc_refcount(dcc);
1047 }
1048 dcc = typentry->domainData;
1049 if (dcc)
1050 {
1051 ref->dcc = dcc;
1052 dcc->dccRefCount++;
1053 if (ref->need_exprstate)
1054 ref->constraints = prep_domain_constraints(dcc->constraints,
1055 ref->refctx);
1056 else
1057 ref->constraints = dcc->constraints;
1058 }
1059 }
1060 }
1061
1062 /*
1063 * DomainHasConstraints --- utility routine to check if a domain has constraints
1064 *
1065 * This is defined to return false, not fail, if type is not a domain.
1066 */
1067 bool
DomainHasConstraints(Oid type_id)1068 DomainHasConstraints(Oid type_id)
1069 {
1070 TypeCacheEntry *typentry;
1071
1072 /*
1073 * Note: a side effect is to cause the typcache's domain data to become
1074 * valid. This is fine since we'll likely need it soon if there is any.
1075 */
1076 typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_INFO);
1077
1078 return (typentry->domainData != NULL);
1079 }
1080
1081
1082 /*
1083 * array_element_has_equality and friends are helper routines to check
1084 * whether we should believe that array_eq and related functions will work
1085 * on the given array type or composite type.
1086 *
1087 * The logic above may call these repeatedly on the same type entry, so we
1088 * make use of the typentry->flags field to cache the results once known.
1089 * Also, we assume that we'll probably want all these facts about the type
1090 * if we want any, so we cache them all using only one lookup of the
1091 * component datatype(s).
1092 */
1093
1094 static bool
array_element_has_equality(TypeCacheEntry * typentry)1095 array_element_has_equality(TypeCacheEntry *typentry)
1096 {
1097 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1098 cache_array_element_properties(typentry);
1099 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1100 }
1101
1102 static bool
array_element_has_compare(TypeCacheEntry * typentry)1103 array_element_has_compare(TypeCacheEntry *typentry)
1104 {
1105 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1106 cache_array_element_properties(typentry);
1107 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1108 }
1109
1110 static bool
array_element_has_hashing(TypeCacheEntry * typentry)1111 array_element_has_hashing(TypeCacheEntry *typentry)
1112 {
1113 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1114 cache_array_element_properties(typentry);
1115 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1116 }
1117
1118 static void
cache_array_element_properties(TypeCacheEntry * typentry)1119 cache_array_element_properties(TypeCacheEntry *typentry)
1120 {
1121 Oid elem_type = get_base_element_type(typentry->type_id);
1122
1123 if (OidIsValid(elem_type))
1124 {
1125 TypeCacheEntry *elementry;
1126
1127 elementry = lookup_type_cache(elem_type,
1128 TYPECACHE_EQ_OPR |
1129 TYPECACHE_CMP_PROC |
1130 TYPECACHE_HASH_PROC);
1131 if (OidIsValid(elementry->eq_opr))
1132 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1133 if (OidIsValid(elementry->cmp_proc))
1134 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1135 if (OidIsValid(elementry->hash_proc))
1136 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1137 }
1138 typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1139 }
1140
1141 /*
1142 * Likewise, some helper functions for composite types.
1143 */
1144
1145 static bool
record_fields_have_equality(TypeCacheEntry * typentry)1146 record_fields_have_equality(TypeCacheEntry *typentry)
1147 {
1148 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1149 cache_record_field_properties(typentry);
1150 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1151 }
1152
1153 static bool
record_fields_have_compare(TypeCacheEntry * typentry)1154 record_fields_have_compare(TypeCacheEntry *typentry)
1155 {
1156 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1157 cache_record_field_properties(typentry);
1158 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1159 }
1160
1161 static void
cache_record_field_properties(TypeCacheEntry * typentry)1162 cache_record_field_properties(TypeCacheEntry *typentry)
1163 {
1164 /*
1165 * For type RECORD, we can't really tell what will work, since we don't
1166 * have access here to the specific anonymous type. Just assume that
1167 * everything will (we may get a failure at runtime ...)
1168 */
1169 if (typentry->type_id == RECORDOID)
1170 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1171 TCFLAGS_HAVE_FIELD_COMPARE);
1172 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1173 {
1174 TupleDesc tupdesc;
1175 int newflags;
1176 int i;
1177
1178 /* Fetch composite type's tupdesc if we don't have it already */
1179 if (typentry->tupDesc == NULL)
1180 load_typcache_tupdesc(typentry);
1181 tupdesc = typentry->tupDesc;
1182
1183 /* Must bump the refcount while we do additional catalog lookups */
1184 IncrTupleDescRefCount(tupdesc);
1185
1186 /* Have each property if all non-dropped fields have the property */
1187 newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1188 TCFLAGS_HAVE_FIELD_COMPARE);
1189 for (i = 0; i < tupdesc->natts; i++)
1190 {
1191 TypeCacheEntry *fieldentry;
1192
1193 if (tupdesc->attrs[i]->attisdropped)
1194 continue;
1195
1196 fieldentry = lookup_type_cache(tupdesc->attrs[i]->atttypid,
1197 TYPECACHE_EQ_OPR |
1198 TYPECACHE_CMP_PROC);
1199 if (!OidIsValid(fieldentry->eq_opr))
1200 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1201 if (!OidIsValid(fieldentry->cmp_proc))
1202 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1203
1204 /* We can drop out of the loop once we disprove all bits */
1205 if (newflags == 0)
1206 break;
1207 }
1208 typentry->flags |= newflags;
1209
1210 DecrTupleDescRefCount(tupdesc);
1211 }
1212 typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1213 }
1214
1215 /*
1216 * Likewise, some helper functions for range types.
1217 *
1218 * We can borrow the flag bits for array element properties to use for range
1219 * element properties, since those flag bits otherwise have no use in a
1220 * range type's typcache entry.
1221 */
1222
1223 static bool
range_element_has_hashing(TypeCacheEntry * typentry)1224 range_element_has_hashing(TypeCacheEntry *typentry)
1225 {
1226 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1227 cache_range_element_properties(typentry);
1228 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1229 }
1230
1231 static void
cache_range_element_properties(TypeCacheEntry * typentry)1232 cache_range_element_properties(TypeCacheEntry *typentry)
1233 {
1234 /* load up subtype link if we didn't already */
1235 if (typentry->rngelemtype == NULL &&
1236 typentry->typtype == TYPTYPE_RANGE)
1237 load_rangetype_info(typentry);
1238
1239 if (typentry->rngelemtype != NULL)
1240 {
1241 TypeCacheEntry *elementry;
1242
1243 /* might need to calculate subtype's hash function properties */
1244 elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1245 TYPECACHE_HASH_PROC);
1246 if (OidIsValid(elementry->hash_proc))
1247 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1248 }
1249 typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1250 }
1251
1252
1253 /*
1254 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1255 *
1256 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1257 * hasn't had its refcount bumped.
1258 */
1259 static TupleDesc
lookup_rowtype_tupdesc_internal(Oid type_id,int32 typmod,bool noError)1260 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1261 {
1262 if (type_id != RECORDOID)
1263 {
1264 /*
1265 * It's a named composite type, so use the regular typcache.
1266 */
1267 TypeCacheEntry *typentry;
1268
1269 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1270 if (typentry->tupDesc == NULL && !noError)
1271 ereport(ERROR,
1272 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1273 errmsg("type %s is not composite",
1274 format_type_be(type_id))));
1275 return typentry->tupDesc;
1276 }
1277 else
1278 {
1279 /*
1280 * It's a transient record type, so look in our record-type table.
1281 */
1282 if (typmod < 0 || typmod >= NextRecordTypmod)
1283 {
1284 if (!noError)
1285 ereport(ERROR,
1286 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1287 errmsg("record type has not been registered")));
1288 return NULL;
1289 }
1290 return RecordCacheArray[typmod];
1291 }
1292 }
1293
1294 /*
1295 * lookup_rowtype_tupdesc
1296 *
1297 * Given a typeid/typmod that should describe a known composite type,
1298 * return the tuple descriptor for the type. Will ereport on failure.
1299 * (Use ereport because this is reachable with user-specified OIDs,
1300 * for example from record_in().)
1301 *
1302 * Note: on success, we increment the refcount of the returned TupleDesc,
1303 * and log the reference in CurrentResourceOwner. Caller should call
1304 * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
1305 */
1306 TupleDesc
lookup_rowtype_tupdesc(Oid type_id,int32 typmod)1307 lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1308 {
1309 TupleDesc tupDesc;
1310
1311 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1312 IncrTupleDescRefCount(tupDesc);
1313 return tupDesc;
1314 }
1315
1316 /*
1317 * lookup_rowtype_tupdesc_noerror
1318 *
1319 * As above, but if the type is not a known composite type and noError
1320 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1321 * type_id is passed, you'll get an ereport anyway.)
1322 */
1323 TupleDesc
lookup_rowtype_tupdesc_noerror(Oid type_id,int32 typmod,bool noError)1324 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1325 {
1326 TupleDesc tupDesc;
1327
1328 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1329 if (tupDesc != NULL)
1330 IncrTupleDescRefCount(tupDesc);
1331 return tupDesc;
1332 }
1333
1334 /*
1335 * lookup_rowtype_tupdesc_copy
1336 *
1337 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1338 * copied into the CurrentMemoryContext and is not reference-counted.
1339 */
1340 TupleDesc
lookup_rowtype_tupdesc_copy(Oid type_id,int32 typmod)1341 lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1342 {
1343 TupleDesc tmp;
1344
1345 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1346 return CreateTupleDescCopyConstr(tmp);
1347 }
1348
1349
1350 /*
1351 * assign_record_type_typmod
1352 *
1353 * Given a tuple descriptor for a RECORD type, find or create a cache entry
1354 * for the type, and set the tupdesc's tdtypmod field to a value that will
1355 * identify this cache entry to lookup_rowtype_tupdesc.
1356 */
1357 void
assign_record_type_typmod(TupleDesc tupDesc)1358 assign_record_type_typmod(TupleDesc tupDesc)
1359 {
1360 RecordCacheEntry *recentry;
1361 TupleDesc entDesc;
1362 Oid hashkey[REC_HASH_KEYS];
1363 bool found;
1364 int i;
1365 ListCell *l;
1366 int32 newtypmod;
1367 MemoryContext oldcxt;
1368
1369 Assert(tupDesc->tdtypeid == RECORDOID);
1370
1371 if (RecordCacheHash == NULL)
1372 {
1373 /* First time through: initialize the hash table */
1374 HASHCTL ctl;
1375
1376 MemSet(&ctl, 0, sizeof(ctl));
1377 ctl.keysize = REC_HASH_KEYS * sizeof(Oid);
1378 ctl.entrysize = sizeof(RecordCacheEntry);
1379 RecordCacheHash = hash_create("Record information cache", 64,
1380 &ctl, HASH_ELEM | HASH_BLOBS);
1381
1382 /* Also make sure CacheMemoryContext exists */
1383 if (!CacheMemoryContext)
1384 CreateCacheMemoryContext();
1385 }
1386
1387 /* Find or create a hashtable entry for this hash class */
1388 MemSet(hashkey, 0, sizeof(hashkey));
1389 for (i = 0; i < tupDesc->natts; i++)
1390 {
1391 if (i >= REC_HASH_KEYS)
1392 break;
1393 hashkey[i] = tupDesc->attrs[i]->atttypid;
1394 }
1395 recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1396 (void *) hashkey,
1397 HASH_ENTER, &found);
1398 if (!found)
1399 {
1400 /* New entry ... hash_search initialized only the hash key */
1401 recentry->tupdescs = NIL;
1402 }
1403
1404 /* Look for existing record cache entry */
1405 foreach(l, recentry->tupdescs)
1406 {
1407 entDesc = (TupleDesc) lfirst(l);
1408 if (equalTupleDescs(tupDesc, entDesc))
1409 {
1410 tupDesc->tdtypmod = entDesc->tdtypmod;
1411 return;
1412 }
1413 }
1414
1415 /* Not present, so need to manufacture an entry */
1416 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1417
1418 if (RecordCacheArray == NULL)
1419 {
1420 RecordCacheArray = (TupleDesc *) palloc(64 * sizeof(TupleDesc));
1421 RecordCacheArrayLen = 64;
1422 }
1423 else if (NextRecordTypmod >= RecordCacheArrayLen)
1424 {
1425 int32 newlen = RecordCacheArrayLen * 2;
1426
1427 RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
1428 newlen * sizeof(TupleDesc));
1429 RecordCacheArrayLen = newlen;
1430 }
1431
1432 /* if fail in subrs, no damage except possibly some wasted memory... */
1433 entDesc = CreateTupleDescCopy(tupDesc);
1434 recentry->tupdescs = lcons(entDesc, recentry->tupdescs);
1435 /* mark it as a reference-counted tupdesc */
1436 entDesc->tdrefcount = 1;
1437 /* now it's safe to advance NextRecordTypmod */
1438 newtypmod = NextRecordTypmod++;
1439 entDesc->tdtypmod = newtypmod;
1440 RecordCacheArray[newtypmod] = entDesc;
1441
1442 /* report to caller as well */
1443 tupDesc->tdtypmod = newtypmod;
1444
1445 MemoryContextSwitchTo(oldcxt);
1446 }
1447
1448 /*
1449 * TypeCacheRelCallback
1450 * Relcache inval callback function
1451 *
1452 * Delete the cached tuple descriptor (if any) for the given rel's composite
1453 * type, or for all composite types if relid == InvalidOid. Also reset
1454 * whatever info we have cached about the composite type's comparability.
1455 *
1456 * This is called when a relcache invalidation event occurs for the given
1457 * relid. We must scan the whole typcache hash since we don't know the
1458 * type OID corresponding to the relid. We could do a direct search if this
1459 * were a syscache-flush callback on pg_type, but then we would need all
1460 * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
1461 * invals against the rel's pg_type OID. The extra SI signaling could very
1462 * well cost more than we'd save, since in most usages there are not very
1463 * many entries in a backend's typcache. The risk of bugs-of-omission seems
1464 * high, too.
1465 *
1466 * Another possibility, with only localized impact, is to maintain a second
1467 * hashtable that indexes composite-type typcache entries by their typrelid.
1468 * But it's still not clear it's worth the trouble.
1469 */
1470 static void
TypeCacheRelCallback(Datum arg,Oid relid)1471 TypeCacheRelCallback(Datum arg, Oid relid)
1472 {
1473 HASH_SEQ_STATUS status;
1474 TypeCacheEntry *typentry;
1475
1476 /* TypeCacheHash must exist, else this callback wouldn't be registered */
1477 hash_seq_init(&status, TypeCacheHash);
1478 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1479 {
1480 if (typentry->typtype != TYPTYPE_COMPOSITE)
1481 continue; /* skip non-composites */
1482
1483 /* Skip if no match, unless we're zapping all composite types */
1484 if (relid != typentry->typrelid && relid != InvalidOid)
1485 continue;
1486
1487 /* Delete tupdesc if we have it */
1488 if (typentry->tupDesc != NULL)
1489 {
1490 /*
1491 * Release our refcount, and free the tupdesc if none remain.
1492 * (Can't use DecrTupleDescRefCount because this reference is not
1493 * logged in current resource owner.)
1494 */
1495 Assert(typentry->tupDesc->tdrefcount > 0);
1496 if (--typentry->tupDesc->tdrefcount == 0)
1497 FreeTupleDesc(typentry->tupDesc);
1498 typentry->tupDesc = NULL;
1499 }
1500
1501 /* Reset equality/comparison/hashing validity information */
1502 typentry->flags = 0;
1503 }
1504 }
1505
1506 /*
1507 * TypeCacheOpcCallback
1508 * Syscache inval callback function
1509 *
1510 * This is called when a syscache invalidation event occurs for any pg_opclass
1511 * row. In principle we could probably just invalidate data dependent on the
1512 * particular opclass, but since updates on pg_opclass are rare in production
1513 * it doesn't seem worth a lot of complication: we just mark all cached data
1514 * invalid.
1515 *
1516 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
1517 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
1518 * is not allowed to be used to add/drop the primary operators and functions
1519 * of an opclass, only cross-type members of a family; and the latter sorts
1520 * of members are not going to get cached here.
1521 */
1522 static void
TypeCacheOpcCallback(Datum arg,int cacheid,uint32 hashvalue)1523 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
1524 {
1525 HASH_SEQ_STATUS status;
1526 TypeCacheEntry *typentry;
1527
1528 /* TypeCacheHash must exist, else this callback wouldn't be registered */
1529 hash_seq_init(&status, TypeCacheHash);
1530 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1531 {
1532 /* Reset equality/comparison/hashing validity information */
1533 typentry->flags = 0;
1534 }
1535 }
1536
1537 /*
1538 * TypeCacheConstrCallback
1539 * Syscache inval callback function
1540 *
1541 * This is called when a syscache invalidation event occurs for any
1542 * pg_constraint or pg_type row. We flush information about domain
1543 * constraints when this happens.
1544 *
1545 * It's slightly annoying that we can't tell whether the inval event was for a
1546 * domain constraint/type record or not; there's usually more update traffic
1547 * for table constraints/types than domain constraints, so we'll do a lot of
1548 * useless flushes. Still, this is better than the old no-caching-at-all
1549 * approach to domain constraints.
1550 */
1551 static void
TypeCacheConstrCallback(Datum arg,int cacheid,uint32 hashvalue)1552 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
1553 {
1554 TypeCacheEntry *typentry;
1555
1556 /*
1557 * Because this is called very frequently, and typically very few of the
1558 * typcache entries are for domains, we don't use hash_seq_search here.
1559 * Instead we thread all the domain-type entries together so that we can
1560 * visit them cheaply.
1561 */
1562 for (typentry = firstDomainTypeEntry;
1563 typentry != NULL;
1564 typentry = typentry->nextDomain)
1565 {
1566 /* Reset domain constraint validity information */
1567 typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1568 }
1569 }
1570
1571
1572 /*
1573 * Check if given OID is part of the subset that's sortable by comparisons
1574 */
1575 static inline bool
enum_known_sorted(TypeCacheEnumData * enumdata,Oid arg)1576 enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
1577 {
1578 Oid offset;
1579
1580 if (arg < enumdata->bitmap_base)
1581 return false;
1582 offset = arg - enumdata->bitmap_base;
1583 if (offset > (Oid) INT_MAX)
1584 return false;
1585 return bms_is_member((int) offset, enumdata->sorted_values);
1586 }
1587
1588
1589 /*
1590 * compare_values_of_enum
1591 * Compare two members of an enum type.
1592 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
1593 *
1594 * Note: currently, the enumData cache is refreshed only if we are asked
1595 * to compare an enum value that is not already in the cache. This is okay
1596 * because there is no support for re-ordering existing values, so comparisons
1597 * of previously cached values will return the right answer even if other
1598 * values have been added since we last loaded the cache.
1599 *
1600 * Note: the enum logic has a special-case rule about even-numbered versus
1601 * odd-numbered OIDs, but we take no account of that rule here; this
1602 * routine shouldn't even get called when that rule applies.
1603 */
1604 int
compare_values_of_enum(TypeCacheEntry * tcache,Oid arg1,Oid arg2)1605 compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
1606 {
1607 TypeCacheEnumData *enumdata;
1608 EnumItem *item1;
1609 EnumItem *item2;
1610
1611 /*
1612 * Equal OIDs are certainly equal --- this case was probably handled by
1613 * our caller, but we may as well check.
1614 */
1615 if (arg1 == arg2)
1616 return 0;
1617
1618 /* Load up the cache if first time through */
1619 if (tcache->enumData == NULL)
1620 load_enum_cache_data(tcache);
1621 enumdata = tcache->enumData;
1622
1623 /*
1624 * If both OIDs are known-sorted, we can just compare them directly.
1625 */
1626 if (enum_known_sorted(enumdata, arg1) &&
1627 enum_known_sorted(enumdata, arg2))
1628 {
1629 if (arg1 < arg2)
1630 return -1;
1631 else
1632 return 1;
1633 }
1634
1635 /*
1636 * Slow path: we have to identify their actual sort-order positions.
1637 */
1638 item1 = find_enumitem(enumdata, arg1);
1639 item2 = find_enumitem(enumdata, arg2);
1640
1641 if (item1 == NULL || item2 == NULL)
1642 {
1643 /*
1644 * We couldn't find one or both values. That means the enum has
1645 * changed under us, so re-initialize the cache and try again. We
1646 * don't bother retrying the known-sorted case in this path.
1647 */
1648 load_enum_cache_data(tcache);
1649 enumdata = tcache->enumData;
1650
1651 item1 = find_enumitem(enumdata, arg1);
1652 item2 = find_enumitem(enumdata, arg2);
1653
1654 /*
1655 * If we still can't find the values, complain: we must have corrupt
1656 * data.
1657 */
1658 if (item1 == NULL)
1659 elog(ERROR, "enum value %u not found in cache for enum %s",
1660 arg1, format_type_be(tcache->type_id));
1661 if (item2 == NULL)
1662 elog(ERROR, "enum value %u not found in cache for enum %s",
1663 arg2, format_type_be(tcache->type_id));
1664 }
1665
1666 if (item1->sort_order < item2->sort_order)
1667 return -1;
1668 else if (item1->sort_order > item2->sort_order)
1669 return 1;
1670 else
1671 return 0;
1672 }
1673
1674 /*
1675 * Load (or re-load) the enumData member of the typcache entry.
1676 */
1677 static void
load_enum_cache_data(TypeCacheEntry * tcache)1678 load_enum_cache_data(TypeCacheEntry *tcache)
1679 {
1680 TypeCacheEnumData *enumdata;
1681 Relation enum_rel;
1682 SysScanDesc enum_scan;
1683 HeapTuple enum_tuple;
1684 ScanKeyData skey;
1685 EnumItem *items;
1686 int numitems;
1687 int maxitems;
1688 Oid bitmap_base;
1689 Bitmapset *bitmap;
1690 MemoryContext oldcxt;
1691 int bm_size,
1692 start_pos;
1693
1694 /* Check that this is actually an enum */
1695 if (tcache->typtype != TYPTYPE_ENUM)
1696 ereport(ERROR,
1697 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1698 errmsg("%s is not an enum",
1699 format_type_be(tcache->type_id))));
1700
1701 /*
1702 * Read all the information for members of the enum type. We collect the
1703 * info in working memory in the caller's context, and then transfer it to
1704 * permanent memory in CacheMemoryContext. This minimizes the risk of
1705 * leaking memory from CacheMemoryContext in the event of an error partway
1706 * through.
1707 */
1708 maxitems = 64;
1709 items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
1710 numitems = 0;
1711
1712 /* Scan pg_enum for the members of the target enum type. */
1713 ScanKeyInit(&skey,
1714 Anum_pg_enum_enumtypid,
1715 BTEqualStrategyNumber, F_OIDEQ,
1716 ObjectIdGetDatum(tcache->type_id));
1717
1718 enum_rel = heap_open(EnumRelationId, AccessShareLock);
1719 enum_scan = systable_beginscan(enum_rel,
1720 EnumTypIdLabelIndexId,
1721 true, NULL,
1722 1, &skey);
1723
1724 while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
1725 {
1726 Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
1727
1728 if (numitems >= maxitems)
1729 {
1730 maxitems *= 2;
1731 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
1732 }
1733 items[numitems].enum_oid = HeapTupleGetOid(enum_tuple);
1734 items[numitems].sort_order = en->enumsortorder;
1735 numitems++;
1736 }
1737
1738 systable_endscan(enum_scan);
1739 heap_close(enum_rel, AccessShareLock);
1740
1741 /* Sort the items into OID order */
1742 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
1743
1744 /*
1745 * Here, we create a bitmap listing a subset of the enum's OIDs that are
1746 * known to be in order and can thus be compared with just OID comparison.
1747 *
1748 * The point of this is that the enum's initial OIDs were certainly in
1749 * order, so there is some subset that can be compared via OID comparison;
1750 * and we'd rather not do binary searches unnecessarily.
1751 *
1752 * This is somewhat heuristic, and might identify a subset of OIDs that
1753 * isn't exactly what the type started with. That's okay as long as the
1754 * subset is correctly sorted.
1755 */
1756 bitmap_base = InvalidOid;
1757 bitmap = NULL;
1758 bm_size = 1; /* only save sets of at least 2 OIDs */
1759
1760 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
1761 {
1762 /*
1763 * Identify longest sorted subsequence starting at start_pos
1764 */
1765 Bitmapset *this_bitmap = bms_make_singleton(0);
1766 int this_bm_size = 1;
1767 Oid start_oid = items[start_pos].enum_oid;
1768 float4 prev_order = items[start_pos].sort_order;
1769 int i;
1770
1771 for (i = start_pos + 1; i < numitems; i++)
1772 {
1773 Oid offset;
1774
1775 offset = items[i].enum_oid - start_oid;
1776 /* quit if bitmap would be too large; cutoff is arbitrary */
1777 if (offset >= 8192)
1778 break;
1779 /* include the item if it's in-order */
1780 if (items[i].sort_order > prev_order)
1781 {
1782 prev_order = items[i].sort_order;
1783 this_bitmap = bms_add_member(this_bitmap, (int) offset);
1784 this_bm_size++;
1785 }
1786 }
1787
1788 /* Remember it if larger than previous best */
1789 if (this_bm_size > bm_size)
1790 {
1791 bms_free(bitmap);
1792 bitmap_base = start_oid;
1793 bitmap = this_bitmap;
1794 bm_size = this_bm_size;
1795 }
1796 else
1797 bms_free(this_bitmap);
1798
1799 /*
1800 * Done if it's not possible to find a longer sequence in the rest of
1801 * the list. In typical cases this will happen on the first
1802 * iteration, which is why we create the bitmaps on the fly instead of
1803 * doing a second pass over the list.
1804 */
1805 if (bm_size >= (numitems - start_pos - 1))
1806 break;
1807 }
1808
1809 /* OK, copy the data into CacheMemoryContext */
1810 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1811 enumdata = (TypeCacheEnumData *)
1812 palloc(offsetof(TypeCacheEnumData, enum_values) +
1813 numitems * sizeof(EnumItem));
1814 enumdata->bitmap_base = bitmap_base;
1815 enumdata->sorted_values = bms_copy(bitmap);
1816 enumdata->num_values = numitems;
1817 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
1818 MemoryContextSwitchTo(oldcxt);
1819
1820 pfree(items);
1821 bms_free(bitmap);
1822
1823 /* And link the finished cache struct into the typcache */
1824 if (tcache->enumData != NULL)
1825 pfree(tcache->enumData);
1826 tcache->enumData = enumdata;
1827 }
1828
1829 /*
1830 * Locate the EnumItem with the given OID, if present
1831 */
1832 static EnumItem *
find_enumitem(TypeCacheEnumData * enumdata,Oid arg)1833 find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
1834 {
1835 EnumItem srch;
1836
1837 /* On some versions of Solaris, bsearch of zero items dumps core */
1838 if (enumdata->num_values <= 0)
1839 return NULL;
1840
1841 srch.enum_oid = arg;
1842 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
1843 sizeof(EnumItem), enum_oid_cmp);
1844 }
1845
1846 /*
1847 * qsort comparison function for OID-ordered EnumItems
1848 */
1849 static int
enum_oid_cmp(const void * left,const void * right)1850 enum_oid_cmp(const void *left, const void *right)
1851 {
1852 const EnumItem *l = (const EnumItem *) left;
1853 const EnumItem *r = (const EnumItem *) right;
1854
1855 if (l->enum_oid < r->enum_oid)
1856 return -1;
1857 else if (l->enum_oid > r->enum_oid)
1858 return 1;
1859 else
1860 return 0;
1861 }
1862