1 /*-------------------------------------------------------------------------
2 *
3 * catcache.c
4 * System catalog cache for tuples matching a key.
5 *
6 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/utils/cache/catcache.c
12 *
13 *-------------------------------------------------------------------------
14 */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/heaptoast.h"
19 #include "access/relscan.h"
20 #include "access/sysattr.h"
21 #include "access/table.h"
22 #include "access/valid.h"
23 #include "access/xact.h"
24 #include "catalog/pg_collation.h"
25 #include "catalog/pg_operator.h"
26 #include "catalog/pg_type.h"
27 #include "common/hashfn.h"
28 #include "miscadmin.h"
29 #ifdef CATCACHE_STATS
30 #include "storage/ipc.h" /* for on_proc_exit */
31 #endif
32 #include "storage/lmgr.h"
33 #include "utils/builtins.h"
34 #include "utils/datum.h"
35 #include "utils/fmgroids.h"
36 #include "utils/inval.h"
37 #include "utils/memutils.h"
38 #include "utils/rel.h"
39 #include "utils/resowner_private.h"
40 #include "utils/syscache.h"
41
42
43 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
44
45 /*
46 * Given a hash value and the size of the hash table, find the bucket
47 * in which the hash value belongs. Since the hash table must contain
48 * a power-of-2 number of elements, this is a simple bitmask.
49 */
50 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
51
52
53 /*
54 * variables, macros and other stuff
55 */
56
57 #ifdef CACHEDEBUG
58 #define CACHE_elog(...) elog(__VA_ARGS__)
59 #else
60 #define CACHE_elog(...)
61 #endif
62
63 /* Cache management header --- pointer is NULL until created */
64 static CatCacheHeader *CacheHdr = NULL;
65
66 static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
67 int nkeys,
68 Datum v1, Datum v2,
69 Datum v3, Datum v4);
70
71 static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
72 int nkeys,
73 uint32 hashValue,
74 Index hashIndex,
75 Datum v1, Datum v2,
76 Datum v3, Datum v4);
77
78 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
79 Datum v1, Datum v2, Datum v3, Datum v4);
80 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
81 HeapTuple tuple);
82 static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
83 const Datum *cachekeys,
84 const Datum *searchkeys);
85
86 #ifdef CATCACHE_STATS
87 static void CatCachePrintStats(int code, Datum arg);
88 #endif
89 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
90 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
91 static void CatalogCacheInitializeCache(CatCache *cache);
92 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
93 Datum *arguments,
94 uint32 hashValue, Index hashIndex,
95 bool negative);
96
97 static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
98 Datum *keys);
99 static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
100 Datum *srckeys, Datum *dstkeys);
101
102
103 /*
104 * internal support functions
105 */
106
107 /*
108 * Hash and equality functions for system types that are used as cache key
109 * fields. In some cases, we just call the regular SQL-callable functions for
110 * the appropriate data type, but that tends to be a little slow, and the
111 * speed of these functions is performance-critical. Therefore, for data
112 * types that frequently occur as catcache keys, we hard-code the logic here.
113 * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
114 * in certain cases (like int4) we can adopt a faster hash algorithm as well.
115 */
116
117 static bool
chareqfast(Datum a,Datum b)118 chareqfast(Datum a, Datum b)
119 {
120 return DatumGetChar(a) == DatumGetChar(b);
121 }
122
123 static uint32
charhashfast(Datum datum)124 charhashfast(Datum datum)
125 {
126 return murmurhash32((int32) DatumGetChar(datum));
127 }
128
129 static bool
nameeqfast(Datum a,Datum b)130 nameeqfast(Datum a, Datum b)
131 {
132 char *ca = NameStr(*DatumGetName(a));
133 char *cb = NameStr(*DatumGetName(b));
134
135 return strncmp(ca, cb, NAMEDATALEN) == 0;
136 }
137
138 static uint32
namehashfast(Datum datum)139 namehashfast(Datum datum)
140 {
141 char *key = NameStr(*DatumGetName(datum));
142
143 return hash_any((unsigned char *) key, strlen(key));
144 }
145
146 static bool
int2eqfast(Datum a,Datum b)147 int2eqfast(Datum a, Datum b)
148 {
149 return DatumGetInt16(a) == DatumGetInt16(b);
150 }
151
152 static uint32
int2hashfast(Datum datum)153 int2hashfast(Datum datum)
154 {
155 return murmurhash32((int32) DatumGetInt16(datum));
156 }
157
158 static bool
int4eqfast(Datum a,Datum b)159 int4eqfast(Datum a, Datum b)
160 {
161 return DatumGetInt32(a) == DatumGetInt32(b);
162 }
163
164 static uint32
int4hashfast(Datum datum)165 int4hashfast(Datum datum)
166 {
167 return murmurhash32((int32) DatumGetInt32(datum));
168 }
169
170 static bool
texteqfast(Datum a,Datum b)171 texteqfast(Datum a, Datum b)
172 {
173 /*
174 * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
175 * want to take the fast "deterministic" path in texteq().
176 */
177 return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
178 }
179
180 static uint32
texthashfast(Datum datum)181 texthashfast(Datum datum)
182 {
183 /* analogously here as in texteqfast() */
184 return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
185 }
186
187 static bool
oidvectoreqfast(Datum a,Datum b)188 oidvectoreqfast(Datum a, Datum b)
189 {
190 return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
191 }
192
193 static uint32
oidvectorhashfast(Datum datum)194 oidvectorhashfast(Datum datum)
195 {
196 return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
197 }
198
199 /* Lookup support functions for a type. */
200 static void
GetCCHashEqFuncs(Oid keytype,CCHashFN * hashfunc,RegProcedure * eqfunc,CCFastEqualFN * fasteqfunc)201 GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
202 {
203 switch (keytype)
204 {
205 case BOOLOID:
206 *hashfunc = charhashfast;
207 *fasteqfunc = chareqfast;
208 *eqfunc = F_BOOLEQ;
209 break;
210 case CHAROID:
211 *hashfunc = charhashfast;
212 *fasteqfunc = chareqfast;
213 *eqfunc = F_CHAREQ;
214 break;
215 case NAMEOID:
216 *hashfunc = namehashfast;
217 *fasteqfunc = nameeqfast;
218 *eqfunc = F_NAMEEQ;
219 break;
220 case INT2OID:
221 *hashfunc = int2hashfast;
222 *fasteqfunc = int2eqfast;
223 *eqfunc = F_INT2EQ;
224 break;
225 case INT4OID:
226 *hashfunc = int4hashfast;
227 *fasteqfunc = int4eqfast;
228 *eqfunc = F_INT4EQ;
229 break;
230 case TEXTOID:
231 *hashfunc = texthashfast;
232 *fasteqfunc = texteqfast;
233 *eqfunc = F_TEXTEQ;
234 break;
235 case OIDOID:
236 case REGPROCOID:
237 case REGPROCEDUREOID:
238 case REGOPEROID:
239 case REGOPERATOROID:
240 case REGCLASSOID:
241 case REGTYPEOID:
242 case REGCONFIGOID:
243 case REGDICTIONARYOID:
244 case REGROLEOID:
245 case REGNAMESPACEOID:
246 *hashfunc = int4hashfast;
247 *fasteqfunc = int4eqfast;
248 *eqfunc = F_OIDEQ;
249 break;
250 case OIDVECTOROID:
251 *hashfunc = oidvectorhashfast;
252 *fasteqfunc = oidvectoreqfast;
253 *eqfunc = F_OIDVECTOREQ;
254 break;
255 default:
256 elog(FATAL, "type %u not supported as catcache key", keytype);
257 *hashfunc = NULL; /* keep compiler quiet */
258
259 *eqfunc = InvalidOid;
260 break;
261 }
262 }
263
264 /*
265 * CatalogCacheComputeHashValue
266 *
267 * Compute the hash value associated with a given set of lookup keys
268 */
269 static uint32
CatalogCacheComputeHashValue(CatCache * cache,int nkeys,Datum v1,Datum v2,Datum v3,Datum v4)270 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
271 Datum v1, Datum v2, Datum v3, Datum v4)
272 {
273 uint32 hashValue = 0;
274 uint32 oneHash;
275 CCHashFN *cc_hashfunc = cache->cc_hashfunc;
276
277 CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
278 cache->cc_relname, nkeys, cache);
279
280 switch (nkeys)
281 {
282 case 4:
283 oneHash = (cc_hashfunc[3]) (v4);
284
285 hashValue ^= oneHash << 24;
286 hashValue ^= oneHash >> 8;
287 /* FALLTHROUGH */
288 case 3:
289 oneHash = (cc_hashfunc[2]) (v3);
290
291 hashValue ^= oneHash << 16;
292 hashValue ^= oneHash >> 16;
293 /* FALLTHROUGH */
294 case 2:
295 oneHash = (cc_hashfunc[1]) (v2);
296
297 hashValue ^= oneHash << 8;
298 hashValue ^= oneHash >> 24;
299 /* FALLTHROUGH */
300 case 1:
301 oneHash = (cc_hashfunc[0]) (v1);
302
303 hashValue ^= oneHash;
304 break;
305 default:
306 elog(FATAL, "wrong number of hash keys: %d", nkeys);
307 break;
308 }
309
310 return hashValue;
311 }
312
313 /*
314 * CatalogCacheComputeTupleHashValue
315 *
316 * Compute the hash value associated with a given tuple to be cached
317 */
318 static uint32
CatalogCacheComputeTupleHashValue(CatCache * cache,int nkeys,HeapTuple tuple)319 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
320 {
321 Datum v1 = 0,
322 v2 = 0,
323 v3 = 0,
324 v4 = 0;
325 bool isNull = false;
326 int *cc_keyno = cache->cc_keyno;
327 TupleDesc cc_tupdesc = cache->cc_tupdesc;
328
329 /* Now extract key fields from tuple, insert into scankey */
330 switch (nkeys)
331 {
332 case 4:
333 v4 = fastgetattr(tuple,
334 cc_keyno[3],
335 cc_tupdesc,
336 &isNull);
337 Assert(!isNull);
338 /* FALLTHROUGH */
339 case 3:
340 v3 = fastgetattr(tuple,
341 cc_keyno[2],
342 cc_tupdesc,
343 &isNull);
344 Assert(!isNull);
345 /* FALLTHROUGH */
346 case 2:
347 v2 = fastgetattr(tuple,
348 cc_keyno[1],
349 cc_tupdesc,
350 &isNull);
351 Assert(!isNull);
352 /* FALLTHROUGH */
353 case 1:
354 v1 = fastgetattr(tuple,
355 cc_keyno[0],
356 cc_tupdesc,
357 &isNull);
358 Assert(!isNull);
359 break;
360 default:
361 elog(FATAL, "wrong number of hash keys: %d", nkeys);
362 break;
363 }
364
365 return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
366 }
367
368 /*
369 * CatalogCacheCompareTuple
370 *
371 * Compare a tuple to the passed arguments.
372 */
373 static inline bool
CatalogCacheCompareTuple(const CatCache * cache,int nkeys,const Datum * cachekeys,const Datum * searchkeys)374 CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
375 const Datum *cachekeys,
376 const Datum *searchkeys)
377 {
378 const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
379 int i;
380
381 for (i = 0; i < nkeys; i++)
382 {
383 if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
384 return false;
385 }
386 return true;
387 }
388
389
390 #ifdef CATCACHE_STATS
391
392 static void
CatCachePrintStats(int code,Datum arg)393 CatCachePrintStats(int code, Datum arg)
394 {
395 slist_iter iter;
396 long cc_searches = 0;
397 long cc_hits = 0;
398 long cc_neg_hits = 0;
399 long cc_newloads = 0;
400 long cc_invals = 0;
401 long cc_lsearches = 0;
402 long cc_lhits = 0;
403
404 slist_foreach(iter, &CacheHdr->ch_caches)
405 {
406 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
407
408 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
409 continue; /* don't print unused caches */
410 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
411 cache->cc_relname,
412 cache->cc_indexoid,
413 cache->cc_ntup,
414 cache->cc_searches,
415 cache->cc_hits,
416 cache->cc_neg_hits,
417 cache->cc_hits + cache->cc_neg_hits,
418 cache->cc_newloads,
419 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
420 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
421 cache->cc_invals,
422 cache->cc_lsearches,
423 cache->cc_lhits);
424 cc_searches += cache->cc_searches;
425 cc_hits += cache->cc_hits;
426 cc_neg_hits += cache->cc_neg_hits;
427 cc_newloads += cache->cc_newloads;
428 cc_invals += cache->cc_invals;
429 cc_lsearches += cache->cc_lsearches;
430 cc_lhits += cache->cc_lhits;
431 }
432 elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
433 CacheHdr->ch_ntup,
434 cc_searches,
435 cc_hits,
436 cc_neg_hits,
437 cc_hits + cc_neg_hits,
438 cc_newloads,
439 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
440 cc_searches - cc_hits - cc_neg_hits,
441 cc_invals,
442 cc_lsearches,
443 cc_lhits);
444 }
445 #endif /* CATCACHE_STATS */
446
447
448 /*
449 * CatCacheRemoveCTup
450 *
451 * Unlink and delete the given cache entry
452 *
453 * NB: if it is a member of a CatCList, the CatCList is deleted too.
454 * Both the cache entry and the list had better have zero refcount.
455 */
456 static void
CatCacheRemoveCTup(CatCache * cache,CatCTup * ct)457 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
458 {
459 Assert(ct->refcount == 0);
460 Assert(ct->my_cache == cache);
461
462 if (ct->c_list)
463 {
464 /*
465 * The cleanest way to handle this is to call CatCacheRemoveCList,
466 * which will recurse back to me, and the recursive call will do the
467 * work. Set the "dead" flag to make sure it does recurse.
468 */
469 ct->dead = true;
470 CatCacheRemoveCList(cache, ct->c_list);
471 return; /* nothing left to do */
472 }
473
474 /* delink from linked list */
475 dlist_delete(&ct->cache_elem);
476
477 /*
478 * Free keys when we're dealing with a negative entry, normal entries just
479 * point into tuple, allocated together with the CatCTup.
480 */
481 if (ct->negative)
482 CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
483 cache->cc_keyno, ct->keys);
484
485 pfree(ct);
486
487 --cache->cc_ntup;
488 --CacheHdr->ch_ntup;
489 }
490
491 /*
492 * CatCacheRemoveCList
493 *
494 * Unlink and delete the given cache list entry
495 *
496 * NB: any dead member entries that become unreferenced are deleted too.
497 */
498 static void
CatCacheRemoveCList(CatCache * cache,CatCList * cl)499 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
500 {
501 int i;
502
503 Assert(cl->refcount == 0);
504 Assert(cl->my_cache == cache);
505
506 /* delink from member tuples */
507 for (i = cl->n_members; --i >= 0;)
508 {
509 CatCTup *ct = cl->members[i];
510
511 Assert(ct->c_list == cl);
512 ct->c_list = NULL;
513 /* if the member is dead and now has no references, remove it */
514 if (
515 #ifndef CATCACHE_FORCE_RELEASE
516 ct->dead &&
517 #endif
518 ct->refcount == 0)
519 CatCacheRemoveCTup(cache, ct);
520 }
521
522 /* delink from linked list */
523 dlist_delete(&cl->cache_elem);
524
525 /* free associated column data */
526 CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
527 cache->cc_keyno, cl->keys);
528
529 pfree(cl);
530 }
531
532
533 /*
534 * CatCacheInvalidate
535 *
536 * Invalidate entries in the specified cache, given a hash value.
537 *
538 * We delete cache entries that match the hash value, whether positive
539 * or negative. We don't care whether the invalidation is the result
540 * of a tuple insertion or a deletion.
541 *
542 * We used to try to match positive cache entries by TID, but that is
543 * unsafe after a VACUUM FULL on a system catalog: an inval event could
544 * be queued before VACUUM FULL, and then processed afterwards, when the
545 * target tuple that has to be invalidated has a different TID than it
546 * did when the event was created. So now we just compare hash values and
547 * accept the small risk of unnecessary invalidations due to false matches.
548 *
549 * This routine is only quasi-public: it should only be used by inval.c.
550 */
551 void
CatCacheInvalidate(CatCache * cache,uint32 hashValue)552 CatCacheInvalidate(CatCache *cache, uint32 hashValue)
553 {
554 Index hashIndex;
555 dlist_mutable_iter iter;
556
557 CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
558
559 /*
560 * We don't bother to check whether the cache has finished initialization
561 * yet; if not, there will be no entries in it so no problem.
562 */
563
564 /*
565 * Invalidate *all* CatCLists in this cache; it's too hard to tell which
566 * searches might still be correct, so just zap 'em all.
567 */
568 dlist_foreach_modify(iter, &cache->cc_lists)
569 {
570 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
571
572 if (cl->refcount > 0)
573 cl->dead = true;
574 else
575 CatCacheRemoveCList(cache, cl);
576 }
577
578 /*
579 * inspect the proper hash bucket for tuple matches
580 */
581 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
582 dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
583 {
584 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
585
586 if (hashValue == ct->hash_value)
587 {
588 if (ct->refcount > 0 ||
589 (ct->c_list && ct->c_list->refcount > 0))
590 {
591 ct->dead = true;
592 /* list, if any, was marked dead above */
593 Assert(ct->c_list == NULL || ct->c_list->dead);
594 }
595 else
596 CatCacheRemoveCTup(cache, ct);
597 CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
598 #ifdef CATCACHE_STATS
599 cache->cc_invals++;
600 #endif
601 /* could be multiple matches, so keep looking! */
602 }
603 }
604 }
605
606 /* ----------------------------------------------------------------
607 * public functions
608 * ----------------------------------------------------------------
609 */
610
611
612 /*
613 * Standard routine for creating cache context if it doesn't exist yet
614 *
615 * There are a lot of places (probably far more than necessary) that check
616 * whether CacheMemoryContext exists yet and want to create it if not.
617 * We centralize knowledge of exactly how to create it here.
618 */
619 void
CreateCacheMemoryContext(void)620 CreateCacheMemoryContext(void)
621 {
622 /*
623 * Purely for paranoia, check that context doesn't exist; caller probably
624 * did so already.
625 */
626 if (!CacheMemoryContext)
627 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
628 "CacheMemoryContext",
629 ALLOCSET_DEFAULT_SIZES);
630 }
631
632
633 /*
634 * ResetCatalogCache
635 *
636 * Reset one catalog cache to empty.
637 *
638 * This is not very efficient if the target cache is nearly empty.
639 * However, it shouldn't need to be efficient; we don't invoke it often.
640 */
641 static void
ResetCatalogCache(CatCache * cache)642 ResetCatalogCache(CatCache *cache)
643 {
644 dlist_mutable_iter iter;
645 int i;
646
647 /* Remove each list in this cache, or at least mark it dead */
648 dlist_foreach_modify(iter, &cache->cc_lists)
649 {
650 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
651
652 if (cl->refcount > 0)
653 cl->dead = true;
654 else
655 CatCacheRemoveCList(cache, cl);
656 }
657
658 /* Remove each tuple in this cache, or at least mark it dead */
659 for (i = 0; i < cache->cc_nbuckets; i++)
660 {
661 dlist_head *bucket = &cache->cc_bucket[i];
662
663 dlist_foreach_modify(iter, bucket)
664 {
665 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
666
667 if (ct->refcount > 0 ||
668 (ct->c_list && ct->c_list->refcount > 0))
669 {
670 ct->dead = true;
671 /* list, if any, was marked dead above */
672 Assert(ct->c_list == NULL || ct->c_list->dead);
673 }
674 else
675 CatCacheRemoveCTup(cache, ct);
676 #ifdef CATCACHE_STATS
677 cache->cc_invals++;
678 #endif
679 }
680 }
681 }
682
683 /*
684 * ResetCatalogCaches
685 *
686 * Reset all caches when a shared cache inval event forces it
687 */
688 void
ResetCatalogCaches(void)689 ResetCatalogCaches(void)
690 {
691 slist_iter iter;
692
693 CACHE_elog(DEBUG2, "ResetCatalogCaches called");
694
695 slist_foreach(iter, &CacheHdr->ch_caches)
696 {
697 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
698
699 ResetCatalogCache(cache);
700 }
701
702 CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
703 }
704
705 /*
706 * CatalogCacheFlushCatalog
707 *
708 * Flush all catcache entries that came from the specified system catalog.
709 * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
710 * tuples very likely now have different TIDs than before. (At one point
711 * we also tried to force re-execution of CatalogCacheInitializeCache for
712 * the cache(s) on that catalog. This is a bad idea since it leads to all
713 * kinds of trouble if a cache flush occurs while loading cache entries.
714 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
715 * rather than relying on the relcache to keep a tupdesc for us. Of course
716 * this assumes the tupdesc of a cachable system table will not change...)
717 */
718 void
CatalogCacheFlushCatalog(Oid catId)719 CatalogCacheFlushCatalog(Oid catId)
720 {
721 slist_iter iter;
722
723 CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
724
725 slist_foreach(iter, &CacheHdr->ch_caches)
726 {
727 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
728
729 /* Does this cache store tuples of the target catalog? */
730 if (cache->cc_reloid == catId)
731 {
732 /* Yes, so flush all its contents */
733 ResetCatalogCache(cache);
734
735 /* Tell inval.c to call syscache callbacks for this cache */
736 CallSyscacheCallbacks(cache->id, 0);
737 }
738 }
739
740 CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
741 }
742
743 /*
744 * InitCatCache
745 *
746 * This allocates and initializes a cache for a system catalog relation.
747 * Actually, the cache is only partially initialized to avoid opening the
748 * relation. The relation will be opened and the rest of the cache
749 * structure initialized on the first access.
750 */
751 #ifdef CACHEDEBUG
752 #define InitCatCache_DEBUG2 \
753 do { \
754 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
755 cp->cc_reloid, cp->cc_indexoid, cp->id, \
756 cp->cc_nkeys, cp->cc_nbuckets); \
757 } while(0)
758 #else
759 #define InitCatCache_DEBUG2
760 #endif
761
762 CatCache *
InitCatCache(int id,Oid reloid,Oid indexoid,int nkeys,const int * key,int nbuckets)763 InitCatCache(int id,
764 Oid reloid,
765 Oid indexoid,
766 int nkeys,
767 const int *key,
768 int nbuckets)
769 {
770 CatCache *cp;
771 MemoryContext oldcxt;
772 size_t sz;
773 int i;
774
775 /*
776 * nbuckets is the initial number of hash buckets to use in this catcache.
777 * It will be enlarged later if it becomes too full.
778 *
779 * nbuckets must be a power of two. We check this via Assert rather than
780 * a full runtime check because the values will be coming from constant
781 * tables.
782 *
783 * If you're confused by the power-of-two check, see comments in
784 * bitmapset.c for an explanation.
785 */
786 Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
787
788 /*
789 * first switch to the cache context so our allocations do not vanish at
790 * the end of a transaction
791 */
792 if (!CacheMemoryContext)
793 CreateCacheMemoryContext();
794
795 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
796
797 /*
798 * if first time through, initialize the cache group header
799 */
800 if (CacheHdr == NULL)
801 {
802 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
803 slist_init(&CacheHdr->ch_caches);
804 CacheHdr->ch_ntup = 0;
805 #ifdef CATCACHE_STATS
806 /* set up to dump stats at backend exit */
807 on_proc_exit(CatCachePrintStats, 0);
808 #endif
809 }
810
811 /*
812 * Allocate a new cache structure, aligning to a cacheline boundary
813 *
814 * Note: we rely on zeroing to initialize all the dlist headers correctly
815 */
816 sz = sizeof(CatCache) + PG_CACHE_LINE_SIZE;
817 cp = (CatCache *) CACHELINEALIGN(palloc0(sz));
818 cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
819
820 /*
821 * initialize the cache's relation information for the relation
822 * corresponding to this cache, and initialize some of the new cache's
823 * other internal fields. But don't open the relation yet.
824 */
825 cp->id = id;
826 cp->cc_relname = "(not known yet)";
827 cp->cc_reloid = reloid;
828 cp->cc_indexoid = indexoid;
829 cp->cc_relisshared = false; /* temporary */
830 cp->cc_tupdesc = (TupleDesc) NULL;
831 cp->cc_ntup = 0;
832 cp->cc_nbuckets = nbuckets;
833 cp->cc_nkeys = nkeys;
834 for (i = 0; i < nkeys; ++i)
835 cp->cc_keyno[i] = key[i];
836
837 /*
838 * new cache is initialized as far as we can go for now. print some
839 * debugging information, if appropriate.
840 */
841 InitCatCache_DEBUG2;
842
843 /*
844 * add completed cache to top of group header's list
845 */
846 slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
847
848 /*
849 * back to the old context before we return...
850 */
851 MemoryContextSwitchTo(oldcxt);
852
853 return cp;
854 }
855
856 /*
857 * Enlarge a catcache, doubling the number of buckets.
858 */
859 static void
RehashCatCache(CatCache * cp)860 RehashCatCache(CatCache *cp)
861 {
862 dlist_head *newbucket;
863 int newnbuckets;
864 int i;
865
866 elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
867 cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
868
869 /* Allocate a new, larger, hash table. */
870 newnbuckets = cp->cc_nbuckets * 2;
871 newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
872
873 /* Move all entries from old hash table to new. */
874 for (i = 0; i < cp->cc_nbuckets; i++)
875 {
876 dlist_mutable_iter iter;
877
878 dlist_foreach_modify(iter, &cp->cc_bucket[i])
879 {
880 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
881 int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
882
883 dlist_delete(iter.cur);
884 dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
885 }
886 }
887
888 /* Switch to the new array. */
889 pfree(cp->cc_bucket);
890 cp->cc_nbuckets = newnbuckets;
891 cp->cc_bucket = newbucket;
892 }
893
894 /*
895 * CatalogCacheInitializeCache
896 *
897 * This function does final initialization of a catcache: obtain the tuple
898 * descriptor and set up the hash and equality function links. We assume
899 * that the relcache entry can be opened at this point!
900 */
901 #ifdef CACHEDEBUG
902 #define CatalogCacheInitializeCache_DEBUG1 \
903 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
904 cache->cc_reloid)
905
906 #define CatalogCacheInitializeCache_DEBUG2 \
907 do { \
908 if (cache->cc_keyno[i] > 0) { \
909 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
910 i+1, cache->cc_nkeys, cache->cc_keyno[i], \
911 TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
912 } else { \
913 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
914 i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
915 } \
916 } while(0)
917 #else
918 #define CatalogCacheInitializeCache_DEBUG1
919 #define CatalogCacheInitializeCache_DEBUG2
920 #endif
921
922 static void
CatalogCacheInitializeCache(CatCache * cache)923 CatalogCacheInitializeCache(CatCache *cache)
924 {
925 Relation relation;
926 MemoryContext oldcxt;
927 TupleDesc tupdesc;
928 int i;
929
930 CatalogCacheInitializeCache_DEBUG1;
931
932 relation = table_open(cache->cc_reloid, AccessShareLock);
933
934 /*
935 * switch to the cache context so our allocations do not vanish at the end
936 * of a transaction
937 */
938 Assert(CacheMemoryContext != NULL);
939
940 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
941
942 /*
943 * copy the relcache's tuple descriptor to permanent cache storage
944 */
945 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
946
947 /*
948 * save the relation's name and relisshared flag, too (cc_relname is used
949 * only for debugging purposes)
950 */
951 cache->cc_relname = pstrdup(RelationGetRelationName(relation));
952 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
953
954 /*
955 * return to the caller's memory context and close the rel
956 */
957 MemoryContextSwitchTo(oldcxt);
958
959 table_close(relation, AccessShareLock);
960
961 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
962 cache->cc_relname, cache->cc_nkeys);
963
964 /*
965 * initialize cache's key information
966 */
967 for (i = 0; i < cache->cc_nkeys; ++i)
968 {
969 Oid keytype;
970 RegProcedure eqfunc;
971
972 CatalogCacheInitializeCache_DEBUG2;
973
974 if (cache->cc_keyno[i] > 0)
975 {
976 Form_pg_attribute attr = TupleDescAttr(tupdesc,
977 cache->cc_keyno[i] - 1);
978
979 keytype = attr->atttypid;
980 /* cache key columns should always be NOT NULL */
981 Assert(attr->attnotnull);
982 }
983 else
984 {
985 if (cache->cc_keyno[i] < 0)
986 elog(FATAL, "sys attributes are not supported in caches");
987 keytype = OIDOID;
988 }
989
990 GetCCHashEqFuncs(keytype,
991 &cache->cc_hashfunc[i],
992 &eqfunc,
993 &cache->cc_fastequal[i]);
994
995 /*
996 * Do equality-function lookup (we assume this won't need a catalog
997 * lookup for any supported type)
998 */
999 fmgr_info_cxt(eqfunc,
1000 &cache->cc_skey[i].sk_func,
1001 CacheMemoryContext);
1002
1003 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1004 cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1005
1006 /* Fill in sk_strategy as well --- always standard equality */
1007 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1008 cache->cc_skey[i].sk_subtype = InvalidOid;
1009 /* If a catcache key requires a collation, it must be C collation */
1010 cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1011
1012 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1013 cache->cc_relname, i, cache);
1014 }
1015
1016 /*
1017 * mark this cache fully initialized
1018 */
1019 cache->cc_tupdesc = tupdesc;
1020 }
1021
1022 /*
1023 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1024 *
1025 * One reason to call this routine is to ensure that the relcache has
1026 * created entries for all the catalogs and indexes referenced by catcaches.
1027 * Therefore, provide an option to open the index as well as fixing the
1028 * cache itself. An exception is the indexes on pg_am, which we don't use
1029 * (cf. IndexScanOK).
1030 */
1031 void
InitCatCachePhase2(CatCache * cache,bool touch_index)1032 InitCatCachePhase2(CatCache *cache, bool touch_index)
1033 {
1034 if (cache->cc_tupdesc == NULL)
1035 CatalogCacheInitializeCache(cache);
1036
1037 if (touch_index &&
1038 cache->id != AMOID &&
1039 cache->id != AMNAME)
1040 {
1041 Relation idesc;
1042
1043 /*
1044 * We must lock the underlying catalog before opening the index to
1045 * avoid deadlock, since index_open could possibly result in reading
1046 * this same catalog, and if anyone else is exclusive-locking this
1047 * catalog and index they'll be doing it in that order.
1048 */
1049 LockRelationOid(cache->cc_reloid, AccessShareLock);
1050 idesc = index_open(cache->cc_indexoid, AccessShareLock);
1051
1052 /*
1053 * While we've got the index open, let's check that it's unique (and
1054 * not just deferrable-unique, thank you very much). This is just to
1055 * catch thinkos in definitions of new catcaches, so we don't worry
1056 * about the pg_am indexes not getting tested.
1057 */
1058 Assert(idesc->rd_index->indisunique &&
1059 idesc->rd_index->indimmediate);
1060
1061 index_close(idesc, AccessShareLock);
1062 UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1063 }
1064 }
1065
1066
1067 /*
1068 * IndexScanOK
1069 *
1070 * This function checks for tuples that will be fetched by
1071 * IndexSupportInitialize() during relcache initialization for
1072 * certain system indexes that support critical syscaches.
1073 * We can't use an indexscan to fetch these, else we'll get into
1074 * infinite recursion. A plain heap scan will work, however.
1075 * Once we have completed relcache initialization (signaled by
1076 * criticalRelcachesBuilt), we don't have to worry anymore.
1077 *
1078 * Similarly, during backend startup we have to be able to use the
1079 * pg_authid, pg_auth_members and pg_database syscaches for
1080 * authentication even if we don't yet have relcache entries for those
1081 * catalogs' indexes.
1082 */
1083 static bool
IndexScanOK(CatCache * cache,ScanKey cur_skey)1084 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1085 {
1086 switch (cache->id)
1087 {
1088 case INDEXRELID:
1089
1090 /*
1091 * Rather than tracking exactly which indexes have to be loaded
1092 * before we can use indexscans (which changes from time to time),
1093 * just force all pg_index searches to be heap scans until we've
1094 * built the critical relcaches.
1095 */
1096 if (!criticalRelcachesBuilt)
1097 return false;
1098 break;
1099
1100 case AMOID:
1101 case AMNAME:
1102
1103 /*
1104 * Always do heap scans in pg_am, because it's so small there's
1105 * not much point in an indexscan anyway. We *must* do this when
1106 * initially building critical relcache entries, but we might as
1107 * well just always do it.
1108 */
1109 return false;
1110
1111 case AUTHNAME:
1112 case AUTHOID:
1113 case AUTHMEMMEMROLE:
1114 case DATABASEOID:
1115
1116 /*
1117 * Protect authentication lookups occurring before relcache has
1118 * collected entries for shared indexes.
1119 */
1120 if (!criticalSharedRelcachesBuilt)
1121 return false;
1122 break;
1123
1124 default:
1125 break;
1126 }
1127
1128 /* Normal case, allow index scan */
1129 return true;
1130 }
1131
1132 /*
1133 * SearchCatCache
1134 *
1135 * This call searches a system cache for a tuple, opening the relation
1136 * if necessary (on the first access to a particular cache).
1137 *
1138 * The result is NULL if not found, or a pointer to a HeapTuple in
1139 * the cache. The caller must not modify the tuple, and must call
1140 * ReleaseCatCache() when done with it.
1141 *
1142 * The search key values should be expressed as Datums of the key columns'
1143 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1144 * exception, the passed-in key for a NAME column can be just a C string;
1145 * the caller need not go to the trouble of converting it to a fully
1146 * null-padded NAME.
1147 */
1148 HeapTuple
SearchCatCache(CatCache * cache,Datum v1,Datum v2,Datum v3,Datum v4)1149 SearchCatCache(CatCache *cache,
1150 Datum v1,
1151 Datum v2,
1152 Datum v3,
1153 Datum v4)
1154 {
1155 return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1156 }
1157
1158
1159 /*
1160 * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1161 * arguments. The compiler can inline the body and unroll loops, making them a
1162 * bit faster than SearchCatCache().
1163 */
1164
1165 HeapTuple
SearchCatCache1(CatCache * cache,Datum v1)1166 SearchCatCache1(CatCache *cache,
1167 Datum v1)
1168 {
1169 return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1170 }
1171
1172
1173 HeapTuple
SearchCatCache2(CatCache * cache,Datum v1,Datum v2)1174 SearchCatCache2(CatCache *cache,
1175 Datum v1, Datum v2)
1176 {
1177 return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1178 }
1179
1180
1181 HeapTuple
SearchCatCache3(CatCache * cache,Datum v1,Datum v2,Datum v3)1182 SearchCatCache3(CatCache *cache,
1183 Datum v1, Datum v2, Datum v3)
1184 {
1185 return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1186 }
1187
1188
1189 HeapTuple
SearchCatCache4(CatCache * cache,Datum v1,Datum v2,Datum v3,Datum v4)1190 SearchCatCache4(CatCache *cache,
1191 Datum v1, Datum v2, Datum v3, Datum v4)
1192 {
1193 return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1194 }
1195
1196 /*
1197 * Work-horse for SearchCatCache/SearchCatCacheN.
1198 */
1199 static inline HeapTuple
SearchCatCacheInternal(CatCache * cache,int nkeys,Datum v1,Datum v2,Datum v3,Datum v4)1200 SearchCatCacheInternal(CatCache *cache,
1201 int nkeys,
1202 Datum v1,
1203 Datum v2,
1204 Datum v3,
1205 Datum v4)
1206 {
1207 Datum arguments[CATCACHE_MAXKEYS];
1208 uint32 hashValue;
1209 Index hashIndex;
1210 dlist_iter iter;
1211 dlist_head *bucket;
1212 CatCTup *ct;
1213
1214 /* Make sure we're in an xact, even if this ends up being a cache hit */
1215 Assert(IsTransactionState());
1216
1217 Assert(cache->cc_nkeys == nkeys);
1218
1219 /*
1220 * one-time startup overhead for each cache
1221 */
1222 if (unlikely(cache->cc_tupdesc == NULL))
1223 CatalogCacheInitializeCache(cache);
1224
1225 #ifdef CATCACHE_STATS
1226 cache->cc_searches++;
1227 #endif
1228
1229 /* Initialize local parameter array */
1230 arguments[0] = v1;
1231 arguments[1] = v2;
1232 arguments[2] = v3;
1233 arguments[3] = v4;
1234
1235 /*
1236 * find the hash bucket in which to look for the tuple
1237 */
1238 hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1239 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1240
1241 /*
1242 * scan the hash bucket until we find a match or exhaust our tuples
1243 *
1244 * Note: it's okay to use dlist_foreach here, even though we modify the
1245 * dlist within the loop, because we don't continue the loop afterwards.
1246 */
1247 bucket = &cache->cc_bucket[hashIndex];
1248 dlist_foreach(iter, bucket)
1249 {
1250 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1251
1252 if (ct->dead)
1253 continue; /* ignore dead entries */
1254
1255 if (ct->hash_value != hashValue)
1256 continue; /* quickly skip entry if wrong hash val */
1257
1258 if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1259 continue;
1260
1261 /*
1262 * We found a match in the cache. Move it to the front of the list
1263 * for its hashbucket, in order to speed subsequent searches. (The
1264 * most frequently accessed elements in any hashbucket will tend to be
1265 * near the front of the hashbucket's list.)
1266 */
1267 dlist_move_head(bucket, &ct->cache_elem);
1268
1269 /*
1270 * If it's a positive entry, bump its refcount and return it. If it's
1271 * negative, we can report failure to the caller.
1272 */
1273 if (!ct->negative)
1274 {
1275 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1276 ct->refcount++;
1277 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1278
1279 CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1280 cache->cc_relname, hashIndex);
1281
1282 #ifdef CATCACHE_STATS
1283 cache->cc_hits++;
1284 #endif
1285
1286 return &ct->tuple;
1287 }
1288 else
1289 {
1290 CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1291 cache->cc_relname, hashIndex);
1292
1293 #ifdef CATCACHE_STATS
1294 cache->cc_neg_hits++;
1295 #endif
1296
1297 return NULL;
1298 }
1299 }
1300
1301 return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1302 }
1303
1304 /*
1305 * Search the actual catalogs, rather than the cache.
1306 *
1307 * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1308 * as small as possible. To avoid that effort being undone by a helpful
1309 * compiler, try to explicitly forbid inlining.
1310 */
1311 static pg_noinline HeapTuple
SearchCatCacheMiss(CatCache * cache,int nkeys,uint32 hashValue,Index hashIndex,Datum v1,Datum v2,Datum v3,Datum v4)1312 SearchCatCacheMiss(CatCache *cache,
1313 int nkeys,
1314 uint32 hashValue,
1315 Index hashIndex,
1316 Datum v1,
1317 Datum v2,
1318 Datum v3,
1319 Datum v4)
1320 {
1321 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1322 Relation relation;
1323 SysScanDesc scandesc;
1324 HeapTuple ntp;
1325 CatCTup *ct;
1326 Datum arguments[CATCACHE_MAXKEYS];
1327
1328 /* Initialize local parameter array */
1329 arguments[0] = v1;
1330 arguments[1] = v2;
1331 arguments[2] = v3;
1332 arguments[3] = v4;
1333
1334 /*
1335 * Ok, need to make a lookup in the relation, copy the scankey and fill
1336 * out any per-call fields.
1337 */
1338 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1339 cur_skey[0].sk_argument = v1;
1340 cur_skey[1].sk_argument = v2;
1341 cur_skey[2].sk_argument = v3;
1342 cur_skey[3].sk_argument = v4;
1343
1344 /*
1345 * Tuple was not found in cache, so we have to try to retrieve it directly
1346 * from the relation. If found, we will add it to the cache; if not
1347 * found, we will add a negative cache entry instead.
1348 *
1349 * NOTE: it is possible for recursive cache lookups to occur while reading
1350 * the relation --- for example, due to shared-cache-inval messages being
1351 * processed during table_open(). This is OK. It's even possible for one
1352 * of those lookups to find and enter the very same tuple we are trying to
1353 * fetch here. If that happens, we will enter a second copy of the tuple
1354 * into the cache. The first copy will never be referenced again, and
1355 * will eventually age out of the cache, so there's no functional problem.
1356 * This case is rare enough that it's not worth expending extra cycles to
1357 * detect.
1358 */
1359 relation = table_open(cache->cc_reloid, AccessShareLock);
1360
1361 scandesc = systable_beginscan(relation,
1362 cache->cc_indexoid,
1363 IndexScanOK(cache, cur_skey),
1364 NULL,
1365 nkeys,
1366 cur_skey);
1367
1368 ct = NULL;
1369
1370 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1371 {
1372 ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1373 hashValue, hashIndex,
1374 false);
1375 /* immediately set the refcount to 1 */
1376 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1377 ct->refcount++;
1378 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1379 break; /* assume only one match */
1380 }
1381
1382 systable_endscan(scandesc);
1383
1384 table_close(relation, AccessShareLock);
1385
1386 /*
1387 * If tuple was not found, we need to build a negative cache entry
1388 * containing a fake tuple. The fake tuple has the correct key columns,
1389 * but nulls everywhere else.
1390 *
1391 * In bootstrap mode, we don't build negative entries, because the cache
1392 * invalidation mechanism isn't alive and can't clear them if the tuple
1393 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1394 * cache inval for that.)
1395 */
1396 if (ct == NULL)
1397 {
1398 if (IsBootstrapProcessingMode())
1399 return NULL;
1400
1401 ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1402 hashValue, hashIndex,
1403 true);
1404
1405 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1406 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1407 CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1408 cache->cc_relname, hashIndex);
1409
1410 /*
1411 * We are not returning the negative entry to the caller, so leave its
1412 * refcount zero.
1413 */
1414
1415 return NULL;
1416 }
1417
1418 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1419 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1420 CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1421 cache->cc_relname, hashIndex);
1422
1423 #ifdef CATCACHE_STATS
1424 cache->cc_newloads++;
1425 #endif
1426
1427 return &ct->tuple;
1428 }
1429
1430 /*
1431 * ReleaseCatCache
1432 *
1433 * Decrement the reference count of a catcache entry (releasing the
1434 * hold grabbed by a successful SearchCatCache).
1435 *
1436 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1437 * will be freed as soon as their refcount goes to zero. In combination
1438 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1439 * to catch references to already-released catcache entries.
1440 */
1441 void
ReleaseCatCache(HeapTuple tuple)1442 ReleaseCatCache(HeapTuple tuple)
1443 {
1444 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1445 offsetof(CatCTup, tuple));
1446
1447 /* Safety checks to ensure we were handed a cache entry */
1448 Assert(ct->ct_magic == CT_MAGIC);
1449 Assert(ct->refcount > 0);
1450
1451 ct->refcount--;
1452 ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1453
1454 if (
1455 #ifndef CATCACHE_FORCE_RELEASE
1456 ct->dead &&
1457 #endif
1458 ct->refcount == 0 &&
1459 (ct->c_list == NULL || ct->c_list->refcount == 0))
1460 CatCacheRemoveCTup(ct->my_cache, ct);
1461 }
1462
1463
1464 /*
1465 * GetCatCacheHashValue
1466 *
1467 * Compute the hash value for a given set of search keys.
1468 *
1469 * The reason for exposing this as part of the API is that the hash value is
1470 * exposed in cache invalidation operations, so there are places outside the
1471 * catcache code that need to be able to compute the hash values.
1472 */
1473 uint32
GetCatCacheHashValue(CatCache * cache,Datum v1,Datum v2,Datum v3,Datum v4)1474 GetCatCacheHashValue(CatCache *cache,
1475 Datum v1,
1476 Datum v2,
1477 Datum v3,
1478 Datum v4)
1479 {
1480 /*
1481 * one-time startup overhead for each cache
1482 */
1483 if (cache->cc_tupdesc == NULL)
1484 CatalogCacheInitializeCache(cache);
1485
1486 /*
1487 * calculate the hash value
1488 */
1489 return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1490 }
1491
1492
1493 /*
1494 * SearchCatCacheList
1495 *
1496 * Generate a list of all tuples matching a partial key (that is,
1497 * a key specifying just the first K of the cache's N key columns).
1498 *
1499 * It doesn't make any sense to specify all of the cache's key columns
1500 * here: since the key is unique, there could be at most one match, so
1501 * you ought to use SearchCatCache() instead. Hence this function takes
1502 * one fewer Datum argument than SearchCatCache() does.
1503 *
1504 * The caller must not modify the list object or the pointed-to tuples,
1505 * and must call ReleaseCatCacheList() when done with the list.
1506 */
1507 CatCList *
SearchCatCacheList(CatCache * cache,int nkeys,Datum v1,Datum v2,Datum v3)1508 SearchCatCacheList(CatCache *cache,
1509 int nkeys,
1510 Datum v1,
1511 Datum v2,
1512 Datum v3)
1513 {
1514 Datum v4 = 0; /* dummy last-column value */
1515 Datum arguments[CATCACHE_MAXKEYS];
1516 uint32 lHashValue;
1517 dlist_iter iter;
1518 CatCList *cl;
1519 CatCTup *ct;
1520 List *volatile ctlist;
1521 ListCell *ctlist_item;
1522 int nmembers;
1523 bool ordered;
1524 HeapTuple ntp;
1525 MemoryContext oldcxt;
1526 int i;
1527
1528 /*
1529 * one-time startup overhead for each cache
1530 */
1531 if (cache->cc_tupdesc == NULL)
1532 CatalogCacheInitializeCache(cache);
1533
1534 Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1535
1536 #ifdef CATCACHE_STATS
1537 cache->cc_lsearches++;
1538 #endif
1539
1540 /* Initialize local parameter array */
1541 arguments[0] = v1;
1542 arguments[1] = v2;
1543 arguments[2] = v3;
1544 arguments[3] = v4;
1545
1546 /*
1547 * compute a hash value of the given keys for faster search. We don't
1548 * presently divide the CatCList items into buckets, but this still lets
1549 * us skip non-matching items quickly most of the time.
1550 */
1551 lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1552
1553 /*
1554 * scan the items until we find a match or exhaust our list
1555 *
1556 * Note: it's okay to use dlist_foreach here, even though we modify the
1557 * dlist within the loop, because we don't continue the loop afterwards.
1558 */
1559 dlist_foreach(iter, &cache->cc_lists)
1560 {
1561 cl = dlist_container(CatCList, cache_elem, iter.cur);
1562
1563 if (cl->dead)
1564 continue; /* ignore dead entries */
1565
1566 if (cl->hash_value != lHashValue)
1567 continue; /* quickly skip entry if wrong hash val */
1568
1569 /*
1570 * see if the cached list matches our key.
1571 */
1572 if (cl->nkeys != nkeys)
1573 continue;
1574
1575 if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1576 continue;
1577
1578 /*
1579 * We found a matching list. Move the list to the front of the
1580 * cache's list-of-lists, to speed subsequent searches. (We do not
1581 * move the members to the fronts of their hashbucket lists, however,
1582 * since there's no point in that unless they are searched for
1583 * individually.)
1584 */
1585 dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1586
1587 /* Bump the list's refcount and return it */
1588 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1589 cl->refcount++;
1590 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1591
1592 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1593 cache->cc_relname);
1594
1595 #ifdef CATCACHE_STATS
1596 cache->cc_lhits++;
1597 #endif
1598
1599 return cl;
1600 }
1601
1602 /*
1603 * List was not found in cache, so we have to build it by reading the
1604 * relation. For each matching tuple found in the relation, use an
1605 * existing cache entry if possible, else build a new one.
1606 *
1607 * We have to bump the member refcounts temporarily to ensure they won't
1608 * get dropped from the cache while loading other members. We use a PG_TRY
1609 * block to ensure we can undo those refcounts if we get an error before
1610 * we finish constructing the CatCList.
1611 */
1612 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1613
1614 ctlist = NIL;
1615
1616 PG_TRY();
1617 {
1618 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1619 Relation relation;
1620 SysScanDesc scandesc;
1621
1622 /*
1623 * Ok, need to make a lookup in the relation, copy the scankey and
1624 * fill out any per-call fields.
1625 */
1626 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1627 cur_skey[0].sk_argument = v1;
1628 cur_skey[1].sk_argument = v2;
1629 cur_skey[2].sk_argument = v3;
1630 cur_skey[3].sk_argument = v4;
1631
1632 relation = table_open(cache->cc_reloid, AccessShareLock);
1633
1634 scandesc = systable_beginscan(relation,
1635 cache->cc_indexoid,
1636 IndexScanOK(cache, cur_skey),
1637 NULL,
1638 nkeys,
1639 cur_skey);
1640
1641 /* The list will be ordered iff we are doing an index scan */
1642 ordered = (scandesc->irel != NULL);
1643
1644 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1645 {
1646 uint32 hashValue;
1647 Index hashIndex;
1648 bool found = false;
1649 dlist_head *bucket;
1650
1651 /*
1652 * See if there's an entry for this tuple already.
1653 */
1654 ct = NULL;
1655 hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1656 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1657
1658 bucket = &cache->cc_bucket[hashIndex];
1659 dlist_foreach(iter, bucket)
1660 {
1661 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1662
1663 if (ct->dead || ct->negative)
1664 continue; /* ignore dead and negative entries */
1665
1666 if (ct->hash_value != hashValue)
1667 continue; /* quickly skip entry if wrong hash val */
1668
1669 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1670 continue; /* not same tuple */
1671
1672 /*
1673 * Found a match, but can't use it if it belongs to another
1674 * list already
1675 */
1676 if (ct->c_list)
1677 continue;
1678
1679 found = true;
1680 break; /* A-OK */
1681 }
1682
1683 if (!found)
1684 {
1685 /* We didn't find a usable entry, so make a new one */
1686 ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1687 hashValue, hashIndex,
1688 false);
1689 }
1690
1691 /* Careful here: add entry to ctlist, then bump its refcount */
1692 /* This way leaves state correct if lappend runs out of memory */
1693 ctlist = lappend(ctlist, ct);
1694 ct->refcount++;
1695 }
1696
1697 systable_endscan(scandesc);
1698
1699 table_close(relation, AccessShareLock);
1700
1701 /* Now we can build the CatCList entry. */
1702 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1703 nmembers = list_length(ctlist);
1704 cl = (CatCList *)
1705 palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1706
1707 /* Extract key values */
1708 CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1709 arguments, cl->keys);
1710 MemoryContextSwitchTo(oldcxt);
1711
1712 /*
1713 * We are now past the last thing that could trigger an elog before we
1714 * have finished building the CatCList and remembering it in the
1715 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1716 * we'd better do so before we start marking the members as belonging
1717 * to the list.
1718 */
1719
1720 }
1721 PG_CATCH();
1722 {
1723 foreach(ctlist_item, ctlist)
1724 {
1725 ct = (CatCTup *) lfirst(ctlist_item);
1726 Assert(ct->c_list == NULL);
1727 Assert(ct->refcount > 0);
1728 ct->refcount--;
1729 if (
1730 #ifndef CATCACHE_FORCE_RELEASE
1731 ct->dead &&
1732 #endif
1733 ct->refcount == 0 &&
1734 (ct->c_list == NULL || ct->c_list->refcount == 0))
1735 CatCacheRemoveCTup(cache, ct);
1736 }
1737
1738 PG_RE_THROW();
1739 }
1740 PG_END_TRY();
1741
1742 cl->cl_magic = CL_MAGIC;
1743 cl->my_cache = cache;
1744 cl->refcount = 0; /* for the moment */
1745 cl->dead = false;
1746 cl->ordered = ordered;
1747 cl->nkeys = nkeys;
1748 cl->hash_value = lHashValue;
1749 cl->n_members = nmembers;
1750
1751 i = 0;
1752 foreach(ctlist_item, ctlist)
1753 {
1754 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1755 Assert(ct->c_list == NULL);
1756 ct->c_list = cl;
1757 /* release the temporary refcount on the member */
1758 Assert(ct->refcount > 0);
1759 ct->refcount--;
1760 /* mark list dead if any members already dead */
1761 if (ct->dead)
1762 cl->dead = true;
1763 }
1764 Assert(i == nmembers);
1765
1766 dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1767
1768 /* Finally, bump the list's refcount and return it */
1769 cl->refcount++;
1770 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1771
1772 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1773 cache->cc_relname, nmembers);
1774
1775 return cl;
1776 }
1777
1778 /*
1779 * ReleaseCatCacheList
1780 *
1781 * Decrement the reference count of a catcache list.
1782 */
1783 void
ReleaseCatCacheList(CatCList * list)1784 ReleaseCatCacheList(CatCList *list)
1785 {
1786 /* Safety checks to ensure we were handed a cache entry */
1787 Assert(list->cl_magic == CL_MAGIC);
1788 Assert(list->refcount > 0);
1789 list->refcount--;
1790 ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1791
1792 if (
1793 #ifndef CATCACHE_FORCE_RELEASE
1794 list->dead &&
1795 #endif
1796 list->refcount == 0)
1797 CatCacheRemoveCList(list->my_cache, list);
1798 }
1799
1800
1801 /*
1802 * CatalogCacheCreateEntry
1803 * Create a new CatCTup entry, copying the given HeapTuple and other
1804 * supplied data into it. The new entry initially has refcount 0.
1805 */
1806 static CatCTup *
CatalogCacheCreateEntry(CatCache * cache,HeapTuple ntp,Datum * arguments,uint32 hashValue,Index hashIndex,bool negative)1807 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
1808 uint32 hashValue, Index hashIndex,
1809 bool negative)
1810 {
1811 CatCTup *ct;
1812 HeapTuple dtp;
1813 MemoryContext oldcxt;
1814
1815 /* negative entries have no tuple associated */
1816 if (ntp)
1817 {
1818 int i;
1819
1820 Assert(!negative);
1821
1822 /*
1823 * If there are any out-of-line toasted fields in the tuple, expand
1824 * them in-line. This saves cycles during later use of the catcache
1825 * entry, and also protects us against the possibility of the toast
1826 * tuples being freed before we attempt to fetch them, in case of
1827 * something using a slightly stale catcache entry.
1828 */
1829 if (HeapTupleHasExternal(ntp))
1830 dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1831 else
1832 dtp = ntp;
1833
1834 /* Allocate memory for CatCTup and the cached tuple in one go */
1835 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1836
1837 ct = (CatCTup *) palloc(sizeof(CatCTup) +
1838 MAXIMUM_ALIGNOF + dtp->t_len);
1839 ct->tuple.t_len = dtp->t_len;
1840 ct->tuple.t_self = dtp->t_self;
1841 ct->tuple.t_tableOid = dtp->t_tableOid;
1842 ct->tuple.t_data = (HeapTupleHeader)
1843 MAXALIGN(((char *) ct) + sizeof(CatCTup));
1844 /* copy tuple contents */
1845 memcpy((char *) ct->tuple.t_data,
1846 (const char *) dtp->t_data,
1847 dtp->t_len);
1848 MemoryContextSwitchTo(oldcxt);
1849
1850 if (dtp != ntp)
1851 heap_freetuple(dtp);
1852
1853 /* extract keys - they'll point into the tuple if not by-value */
1854 for (i = 0; i < cache->cc_nkeys; i++)
1855 {
1856 Datum atp;
1857 bool isnull;
1858
1859 atp = heap_getattr(&ct->tuple,
1860 cache->cc_keyno[i],
1861 cache->cc_tupdesc,
1862 &isnull);
1863 Assert(!isnull);
1864 ct->keys[i] = atp;
1865 }
1866 }
1867 else
1868 {
1869 Assert(negative);
1870 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1871 ct = (CatCTup *) palloc(sizeof(CatCTup));
1872
1873 /*
1874 * Store keys - they'll point into separately allocated memory if not
1875 * by-value.
1876 */
1877 CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
1878 arguments, ct->keys);
1879 MemoryContextSwitchTo(oldcxt);
1880 }
1881
1882 /*
1883 * Finish initializing the CatCTup header, and add it to the cache's
1884 * linked list and counts.
1885 */
1886 ct->ct_magic = CT_MAGIC;
1887 ct->my_cache = cache;
1888 ct->c_list = NULL;
1889 ct->refcount = 0; /* for the moment */
1890 ct->dead = false;
1891 ct->negative = negative;
1892 ct->hash_value = hashValue;
1893
1894 dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1895
1896 cache->cc_ntup++;
1897 CacheHdr->ch_ntup++;
1898
1899 /*
1900 * If the hash table has become too full, enlarge the buckets array. Quite
1901 * arbitrarily, we enlarge when fill factor > 2.
1902 */
1903 if (cache->cc_ntup > cache->cc_nbuckets * 2)
1904 RehashCatCache(cache);
1905
1906 return ct;
1907 }
1908
1909 /*
1910 * Helper routine that frees keys stored in the keys array.
1911 */
1912 static void
CatCacheFreeKeys(TupleDesc tupdesc,int nkeys,int * attnos,Datum * keys)1913 CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
1914 {
1915 int i;
1916
1917 for (i = 0; i < nkeys; i++)
1918 {
1919 int attnum = attnos[i];
1920 Form_pg_attribute att;
1921
1922 /* system attribute are not supported in caches */
1923 Assert(attnum > 0);
1924
1925 att = TupleDescAttr(tupdesc, attnum - 1);
1926
1927 if (!att->attbyval)
1928 pfree(DatumGetPointer(keys[i]));
1929 }
1930 }
1931
1932 /*
1933 * Helper routine that copies the keys in the srckeys array into the dstkeys
1934 * one, guaranteeing that the datums are fully allocated in the current memory
1935 * context.
1936 */
1937 static void
CatCacheCopyKeys(TupleDesc tupdesc,int nkeys,int * attnos,Datum * srckeys,Datum * dstkeys)1938 CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
1939 Datum *srckeys, Datum *dstkeys)
1940 {
1941 int i;
1942
1943 /*
1944 * XXX: memory and lookup performance could possibly be improved by
1945 * storing all keys in one allocation.
1946 */
1947
1948 for (i = 0; i < nkeys; i++)
1949 {
1950 int attnum = attnos[i];
1951 Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
1952 Datum src = srckeys[i];
1953 NameData srcname;
1954
1955 /*
1956 * Must be careful in case the caller passed a C string where a NAME
1957 * is wanted: convert the given argument to a correctly padded NAME.
1958 * Otherwise the memcpy() done by datumCopy() could fall off the end
1959 * of memory.
1960 */
1961 if (att->atttypid == NAMEOID)
1962 {
1963 namestrcpy(&srcname, DatumGetCString(src));
1964 src = NameGetDatum(&srcname);
1965 }
1966
1967 dstkeys[i] = datumCopy(src,
1968 att->attbyval,
1969 att->attlen);
1970 }
1971
1972 }
1973
1974 /*
1975 * PrepareToInvalidateCacheTuple()
1976 *
1977 * This is part of a rather subtle chain of events, so pay attention:
1978 *
1979 * When a tuple is inserted or deleted, it cannot be flushed from the
1980 * catcaches immediately, for reasons explained at the top of cache/inval.c.
1981 * Instead we have to add entry(s) for the tuple to a list of pending tuple
1982 * invalidations that will be done at the end of the command or transaction.
1983 *
1984 * The lists of tuples that need to be flushed are kept by inval.c. This
1985 * routine is a helper routine for inval.c. Given a tuple belonging to
1986 * the specified relation, find all catcaches it could be in, compute the
1987 * correct hash value for each such catcache, and call the specified
1988 * function to record the cache id and hash value in inval.c's lists.
1989 * SysCacheInvalidate will be called later, if appropriate,
1990 * using the recorded information.
1991 *
1992 * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1993 * For an update, we are called just once, with tuple being the old tuple
1994 * version and newtuple the new version. We should make two list entries
1995 * if the tuple's hash value changed, but only one if it didn't.
1996 *
1997 * Note that it is irrelevant whether the given tuple is actually loaded
1998 * into the catcache at the moment. Even if it's not there now, it might
1999 * be by the end of the command, or there might be a matching negative entry
2000 * to flush --- or other backends' caches might have such entries --- so
2001 * we have to make list entries to flush it later.
2002 *
2003 * Also note that it's not an error if there are no catcaches for the
2004 * specified relation. inval.c doesn't know exactly which rels have
2005 * catcaches --- it will call this routine for any tuple that's in a
2006 * system relation.
2007 */
2008 void
PrepareToInvalidateCacheTuple(Relation relation,HeapTuple tuple,HeapTuple newtuple,void (* function)(int,uint32,Oid))2009 PrepareToInvalidateCacheTuple(Relation relation,
2010 HeapTuple tuple,
2011 HeapTuple newtuple,
2012 void (*function) (int, uint32, Oid))
2013 {
2014 slist_iter iter;
2015 Oid reloid;
2016
2017 CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2018
2019 /*
2020 * sanity checks
2021 */
2022 Assert(RelationIsValid(relation));
2023 Assert(HeapTupleIsValid(tuple));
2024 Assert(PointerIsValid(function));
2025 Assert(CacheHdr != NULL);
2026
2027 reloid = RelationGetRelid(relation);
2028
2029 /* ----------------
2030 * for each cache
2031 * if the cache contains tuples from the specified relation
2032 * compute the tuple's hash value(s) in this cache,
2033 * and call the passed function to register the information.
2034 * ----------------
2035 */
2036
2037 slist_foreach(iter, &CacheHdr->ch_caches)
2038 {
2039 CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2040 uint32 hashvalue;
2041 Oid dbid;
2042
2043 if (ccp->cc_reloid != reloid)
2044 continue;
2045
2046 /* Just in case cache hasn't finished initialization yet... */
2047 if (ccp->cc_tupdesc == NULL)
2048 CatalogCacheInitializeCache(ccp);
2049
2050 hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2051 dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2052
2053 (*function) (ccp->id, hashvalue, dbid);
2054
2055 if (newtuple)
2056 {
2057 uint32 newhashvalue;
2058
2059 newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2060
2061 if (newhashvalue != hashvalue)
2062 (*function) (ccp->id, newhashvalue, dbid);
2063 }
2064 }
2065 }
2066
2067
2068 /*
2069 * Subroutines for warning about reference leaks. These are exported so
2070 * that resowner.c can call them.
2071 */
2072 void
PrintCatCacheLeakWarning(HeapTuple tuple)2073 PrintCatCacheLeakWarning(HeapTuple tuple)
2074 {
2075 CatCTup *ct = (CatCTup *) (((char *) tuple) -
2076 offsetof(CatCTup, tuple));
2077
2078 /* Safety check to ensure we were handed a cache entry */
2079 Assert(ct->ct_magic == CT_MAGIC);
2080
2081 elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2082 ct->my_cache->cc_relname, ct->my_cache->id,
2083 ItemPointerGetBlockNumber(&(tuple->t_self)),
2084 ItemPointerGetOffsetNumber(&(tuple->t_self)),
2085 ct->refcount);
2086 }
2087
2088 void
PrintCatCacheListLeakWarning(CatCList * list)2089 PrintCatCacheListLeakWarning(CatCList *list)
2090 {
2091 elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
2092 list->my_cache->cc_relname, list->my_cache->id,
2093 list, list->refcount);
2094 }
2095