1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *	  System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/genam.h"
18 #include "access/heaptoast.h"
19 #include "access/relscan.h"
20 #include "access/sysattr.h"
21 #include "access/table.h"
22 #include "access/valid.h"
23 #include "access/xact.h"
24 #include "catalog/pg_collation.h"
25 #include "catalog/pg_operator.h"
26 #include "catalog/pg_type.h"
27 #include "common/hashfn.h"
28 #include "miscadmin.h"
29 #ifdef CATCACHE_STATS
30 #include "storage/ipc.h"		/* for on_proc_exit */
31 #endif
32 #include "storage/lmgr.h"
33 #include "utils/builtins.h"
34 #include "utils/datum.h"
35 #include "utils/fmgroids.h"
36 #include "utils/inval.h"
37 #include "utils/memutils.h"
38 #include "utils/rel.h"
39 #include "utils/resowner_private.h"
40 #include "utils/syscache.h"
41 
42 
43  /* #define CACHEDEBUG */	/* turns DEBUG elogs on */
44 
45 /*
46  * Given a hash value and the size of the hash table, find the bucket
47  * in which the hash value belongs. Since the hash table must contain
48  * a power-of-2 number of elements, this is a simple bitmask.
49  */
50 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
51 
52 
53 /*
54  *		variables, macros and other stuff
55  */
56 
57 #ifdef CACHEDEBUG
58 #define CACHE_elog(...)				elog(__VA_ARGS__)
59 #else
60 #define CACHE_elog(...)
61 #endif
62 
63 /* Cache management header --- pointer is NULL until created */
64 static CatCacheHeader *CacheHdr = NULL;
65 
66 static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
67 											   int nkeys,
68 											   Datum v1, Datum v2,
69 											   Datum v3, Datum v4);
70 
71 static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
72 												int nkeys,
73 												uint32 hashValue,
74 												Index hashIndex,
75 												Datum v1, Datum v2,
76 												Datum v3, Datum v4);
77 
78 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
79 										   Datum v1, Datum v2, Datum v3, Datum v4);
80 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
81 												HeapTuple tuple);
82 static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
83 											const Datum *cachekeys,
84 											const Datum *searchkeys);
85 
86 #ifdef CATCACHE_STATS
87 static void CatCachePrintStats(int code, Datum arg);
88 #endif
89 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
90 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
91 static void CatalogCacheInitializeCache(CatCache *cache);
92 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
93 										Datum *arguments,
94 										uint32 hashValue, Index hashIndex,
95 										bool negative);
96 
97 static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
98 							 Datum *keys);
99 static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
100 							 Datum *srckeys, Datum *dstkeys);
101 
102 
103 /*
104  *					internal support functions
105  */
106 
107 /*
108  * Hash and equality functions for system types that are used as cache key
109  * fields.  In some cases, we just call the regular SQL-callable functions for
110  * the appropriate data type, but that tends to be a little slow, and the
111  * speed of these functions is performance-critical.  Therefore, for data
112  * types that frequently occur as catcache keys, we hard-code the logic here.
113  * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
114  * in certain cases (like int4) we can adopt a faster hash algorithm as well.
115  */
116 
117 static bool
chareqfast(Datum a,Datum b)118 chareqfast(Datum a, Datum b)
119 {
120 	return DatumGetChar(a) == DatumGetChar(b);
121 }
122 
123 static uint32
charhashfast(Datum datum)124 charhashfast(Datum datum)
125 {
126 	return murmurhash32((int32) DatumGetChar(datum));
127 }
128 
129 static bool
nameeqfast(Datum a,Datum b)130 nameeqfast(Datum a, Datum b)
131 {
132 	char	   *ca = NameStr(*DatumGetName(a));
133 	char	   *cb = NameStr(*DatumGetName(b));
134 
135 	return strncmp(ca, cb, NAMEDATALEN) == 0;
136 }
137 
138 static uint32
namehashfast(Datum datum)139 namehashfast(Datum datum)
140 {
141 	char	   *key = NameStr(*DatumGetName(datum));
142 
143 	return hash_any((unsigned char *) key, strlen(key));
144 }
145 
146 static bool
int2eqfast(Datum a,Datum b)147 int2eqfast(Datum a, Datum b)
148 {
149 	return DatumGetInt16(a) == DatumGetInt16(b);
150 }
151 
152 static uint32
int2hashfast(Datum datum)153 int2hashfast(Datum datum)
154 {
155 	return murmurhash32((int32) DatumGetInt16(datum));
156 }
157 
158 static bool
int4eqfast(Datum a,Datum b)159 int4eqfast(Datum a, Datum b)
160 {
161 	return DatumGetInt32(a) == DatumGetInt32(b);
162 }
163 
164 static uint32
int4hashfast(Datum datum)165 int4hashfast(Datum datum)
166 {
167 	return murmurhash32((int32) DatumGetInt32(datum));
168 }
169 
170 static bool
texteqfast(Datum a,Datum b)171 texteqfast(Datum a, Datum b)
172 {
173 	/*
174 	 * The use of DEFAULT_COLLATION_OID is fairly arbitrary here.  We just
175 	 * want to take the fast "deterministic" path in texteq().
176 	 */
177 	return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
178 }
179 
180 static uint32
texthashfast(Datum datum)181 texthashfast(Datum datum)
182 {
183 	/* analogously here as in texteqfast() */
184 	return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
185 }
186 
187 static bool
oidvectoreqfast(Datum a,Datum b)188 oidvectoreqfast(Datum a, Datum b)
189 {
190 	return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
191 }
192 
193 static uint32
oidvectorhashfast(Datum datum)194 oidvectorhashfast(Datum datum)
195 {
196 	return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
197 }
198 
199 /* Lookup support functions for a type. */
200 static void
GetCCHashEqFuncs(Oid keytype,CCHashFN * hashfunc,RegProcedure * eqfunc,CCFastEqualFN * fasteqfunc)201 GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
202 {
203 	switch (keytype)
204 	{
205 		case BOOLOID:
206 			*hashfunc = charhashfast;
207 			*fasteqfunc = chareqfast;
208 			*eqfunc = F_BOOLEQ;
209 			break;
210 		case CHAROID:
211 			*hashfunc = charhashfast;
212 			*fasteqfunc = chareqfast;
213 			*eqfunc = F_CHAREQ;
214 			break;
215 		case NAMEOID:
216 			*hashfunc = namehashfast;
217 			*fasteqfunc = nameeqfast;
218 			*eqfunc = F_NAMEEQ;
219 			break;
220 		case INT2OID:
221 			*hashfunc = int2hashfast;
222 			*fasteqfunc = int2eqfast;
223 			*eqfunc = F_INT2EQ;
224 			break;
225 		case INT4OID:
226 			*hashfunc = int4hashfast;
227 			*fasteqfunc = int4eqfast;
228 			*eqfunc = F_INT4EQ;
229 			break;
230 		case TEXTOID:
231 			*hashfunc = texthashfast;
232 			*fasteqfunc = texteqfast;
233 			*eqfunc = F_TEXTEQ;
234 			break;
235 		case OIDOID:
236 		case REGPROCOID:
237 		case REGPROCEDUREOID:
238 		case REGOPEROID:
239 		case REGOPERATOROID:
240 		case REGCLASSOID:
241 		case REGTYPEOID:
242 		case REGCONFIGOID:
243 		case REGDICTIONARYOID:
244 		case REGROLEOID:
245 		case REGNAMESPACEOID:
246 			*hashfunc = int4hashfast;
247 			*fasteqfunc = int4eqfast;
248 			*eqfunc = F_OIDEQ;
249 			break;
250 		case OIDVECTOROID:
251 			*hashfunc = oidvectorhashfast;
252 			*fasteqfunc = oidvectoreqfast;
253 			*eqfunc = F_OIDVECTOREQ;
254 			break;
255 		default:
256 			elog(FATAL, "type %u not supported as catcache key", keytype);
257 			*hashfunc = NULL;	/* keep compiler quiet */
258 
259 			*eqfunc = InvalidOid;
260 			break;
261 	}
262 }
263 
264 /*
265  *		CatalogCacheComputeHashValue
266  *
267  * Compute the hash value associated with a given set of lookup keys
268  */
269 static uint32
CatalogCacheComputeHashValue(CatCache * cache,int nkeys,Datum v1,Datum v2,Datum v3,Datum v4)270 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
271 							 Datum v1, Datum v2, Datum v3, Datum v4)
272 {
273 	uint32		hashValue = 0;
274 	uint32		oneHash;
275 	CCHashFN   *cc_hashfunc = cache->cc_hashfunc;
276 
277 	CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
278 			   cache->cc_relname, nkeys, cache);
279 
280 	switch (nkeys)
281 	{
282 		case 4:
283 			oneHash = (cc_hashfunc[3]) (v4);
284 
285 			hashValue ^= oneHash << 24;
286 			hashValue ^= oneHash >> 8;
287 			/* FALLTHROUGH */
288 		case 3:
289 			oneHash = (cc_hashfunc[2]) (v3);
290 
291 			hashValue ^= oneHash << 16;
292 			hashValue ^= oneHash >> 16;
293 			/* FALLTHROUGH */
294 		case 2:
295 			oneHash = (cc_hashfunc[1]) (v2);
296 
297 			hashValue ^= oneHash << 8;
298 			hashValue ^= oneHash >> 24;
299 			/* FALLTHROUGH */
300 		case 1:
301 			oneHash = (cc_hashfunc[0]) (v1);
302 
303 			hashValue ^= oneHash;
304 			break;
305 		default:
306 			elog(FATAL, "wrong number of hash keys: %d", nkeys);
307 			break;
308 	}
309 
310 	return hashValue;
311 }
312 
313 /*
314  *		CatalogCacheComputeTupleHashValue
315  *
316  * Compute the hash value associated with a given tuple to be cached
317  */
318 static uint32
CatalogCacheComputeTupleHashValue(CatCache * cache,int nkeys,HeapTuple tuple)319 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
320 {
321 	Datum		v1 = 0,
322 				v2 = 0,
323 				v3 = 0,
324 				v4 = 0;
325 	bool		isNull = false;
326 	int		   *cc_keyno = cache->cc_keyno;
327 	TupleDesc	cc_tupdesc = cache->cc_tupdesc;
328 
329 	/* Now extract key fields from tuple, insert into scankey */
330 	switch (nkeys)
331 	{
332 		case 4:
333 			v4 = fastgetattr(tuple,
334 							 cc_keyno[3],
335 							 cc_tupdesc,
336 							 &isNull);
337 			Assert(!isNull);
338 			/* FALLTHROUGH */
339 		case 3:
340 			v3 = fastgetattr(tuple,
341 							 cc_keyno[2],
342 							 cc_tupdesc,
343 							 &isNull);
344 			Assert(!isNull);
345 			/* FALLTHROUGH */
346 		case 2:
347 			v2 = fastgetattr(tuple,
348 							 cc_keyno[1],
349 							 cc_tupdesc,
350 							 &isNull);
351 			Assert(!isNull);
352 			/* FALLTHROUGH */
353 		case 1:
354 			v1 = fastgetattr(tuple,
355 							 cc_keyno[0],
356 							 cc_tupdesc,
357 							 &isNull);
358 			Assert(!isNull);
359 			break;
360 		default:
361 			elog(FATAL, "wrong number of hash keys: %d", nkeys);
362 			break;
363 	}
364 
365 	return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
366 }
367 
368 /*
369  *		CatalogCacheCompareTuple
370  *
371  * Compare a tuple to the passed arguments.
372  */
373 static inline bool
CatalogCacheCompareTuple(const CatCache * cache,int nkeys,const Datum * cachekeys,const Datum * searchkeys)374 CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
375 						 const Datum *cachekeys,
376 						 const Datum *searchkeys)
377 {
378 	const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
379 	int			i;
380 
381 	for (i = 0; i < nkeys; i++)
382 	{
383 		if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
384 			return false;
385 	}
386 	return true;
387 }
388 
389 
390 #ifdef CATCACHE_STATS
391 
392 static void
CatCachePrintStats(int code,Datum arg)393 CatCachePrintStats(int code, Datum arg)
394 {
395 	slist_iter	iter;
396 	long		cc_searches = 0;
397 	long		cc_hits = 0;
398 	long		cc_neg_hits = 0;
399 	long		cc_newloads = 0;
400 	long		cc_invals = 0;
401 	long		cc_lsearches = 0;
402 	long		cc_lhits = 0;
403 
404 	slist_foreach(iter, &CacheHdr->ch_caches)
405 	{
406 		CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
407 
408 		if (cache->cc_ntup == 0 && cache->cc_searches == 0)
409 			continue;			/* don't print unused caches */
410 		elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
411 			 cache->cc_relname,
412 			 cache->cc_indexoid,
413 			 cache->cc_ntup,
414 			 cache->cc_searches,
415 			 cache->cc_hits,
416 			 cache->cc_neg_hits,
417 			 cache->cc_hits + cache->cc_neg_hits,
418 			 cache->cc_newloads,
419 			 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
420 			 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
421 			 cache->cc_invals,
422 			 cache->cc_lsearches,
423 			 cache->cc_lhits);
424 		cc_searches += cache->cc_searches;
425 		cc_hits += cache->cc_hits;
426 		cc_neg_hits += cache->cc_neg_hits;
427 		cc_newloads += cache->cc_newloads;
428 		cc_invals += cache->cc_invals;
429 		cc_lsearches += cache->cc_lsearches;
430 		cc_lhits += cache->cc_lhits;
431 	}
432 	elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
433 		 CacheHdr->ch_ntup,
434 		 cc_searches,
435 		 cc_hits,
436 		 cc_neg_hits,
437 		 cc_hits + cc_neg_hits,
438 		 cc_newloads,
439 		 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
440 		 cc_searches - cc_hits - cc_neg_hits,
441 		 cc_invals,
442 		 cc_lsearches,
443 		 cc_lhits);
444 }
445 #endif							/* CATCACHE_STATS */
446 
447 
448 /*
449  *		CatCacheRemoveCTup
450  *
451  * Unlink and delete the given cache entry
452  *
453  * NB: if it is a member of a CatCList, the CatCList is deleted too.
454  * Both the cache entry and the list had better have zero refcount.
455  */
456 static void
CatCacheRemoveCTup(CatCache * cache,CatCTup * ct)457 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
458 {
459 	Assert(ct->refcount == 0);
460 	Assert(ct->my_cache == cache);
461 
462 	if (ct->c_list)
463 	{
464 		/*
465 		 * The cleanest way to handle this is to call CatCacheRemoveCList,
466 		 * which will recurse back to me, and the recursive call will do the
467 		 * work.  Set the "dead" flag to make sure it does recurse.
468 		 */
469 		ct->dead = true;
470 		CatCacheRemoveCList(cache, ct->c_list);
471 		return;					/* nothing left to do */
472 	}
473 
474 	/* delink from linked list */
475 	dlist_delete(&ct->cache_elem);
476 
477 	/*
478 	 * Free keys when we're dealing with a negative entry, normal entries just
479 	 * point into tuple, allocated together with the CatCTup.
480 	 */
481 	if (ct->negative)
482 		CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
483 						 cache->cc_keyno, ct->keys);
484 
485 	pfree(ct);
486 
487 	--cache->cc_ntup;
488 	--CacheHdr->ch_ntup;
489 }
490 
491 /*
492  *		CatCacheRemoveCList
493  *
494  * Unlink and delete the given cache list entry
495  *
496  * NB: any dead member entries that become unreferenced are deleted too.
497  */
498 static void
CatCacheRemoveCList(CatCache * cache,CatCList * cl)499 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
500 {
501 	int			i;
502 
503 	Assert(cl->refcount == 0);
504 	Assert(cl->my_cache == cache);
505 
506 	/* delink from member tuples */
507 	for (i = cl->n_members; --i >= 0;)
508 	{
509 		CatCTup    *ct = cl->members[i];
510 
511 		Assert(ct->c_list == cl);
512 		ct->c_list = NULL;
513 		/* if the member is dead and now has no references, remove it */
514 		if (
515 #ifndef CATCACHE_FORCE_RELEASE
516 			ct->dead &&
517 #endif
518 			ct->refcount == 0)
519 			CatCacheRemoveCTup(cache, ct);
520 	}
521 
522 	/* delink from linked list */
523 	dlist_delete(&cl->cache_elem);
524 
525 	/* free associated column data */
526 	CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
527 					 cache->cc_keyno, cl->keys);
528 
529 	pfree(cl);
530 }
531 
532 
533 /*
534  *	CatCacheInvalidate
535  *
536  *	Invalidate entries in the specified cache, given a hash value.
537  *
538  *	We delete cache entries that match the hash value, whether positive
539  *	or negative.  We don't care whether the invalidation is the result
540  *	of a tuple insertion or a deletion.
541  *
542  *	We used to try to match positive cache entries by TID, but that is
543  *	unsafe after a VACUUM FULL on a system catalog: an inval event could
544  *	be queued before VACUUM FULL, and then processed afterwards, when the
545  *	target tuple that has to be invalidated has a different TID than it
546  *	did when the event was created.  So now we just compare hash values and
547  *	accept the small risk of unnecessary invalidations due to false matches.
548  *
549  *	This routine is only quasi-public: it should only be used by inval.c.
550  */
551 void
CatCacheInvalidate(CatCache * cache,uint32 hashValue)552 CatCacheInvalidate(CatCache *cache, uint32 hashValue)
553 {
554 	Index		hashIndex;
555 	dlist_mutable_iter iter;
556 
557 	CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
558 
559 	/*
560 	 * We don't bother to check whether the cache has finished initialization
561 	 * yet; if not, there will be no entries in it so no problem.
562 	 */
563 
564 	/*
565 	 * Invalidate *all* CatCLists in this cache; it's too hard to tell which
566 	 * searches might still be correct, so just zap 'em all.
567 	 */
568 	dlist_foreach_modify(iter, &cache->cc_lists)
569 	{
570 		CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
571 
572 		if (cl->refcount > 0)
573 			cl->dead = true;
574 		else
575 			CatCacheRemoveCList(cache, cl);
576 	}
577 
578 	/*
579 	 * inspect the proper hash bucket for tuple matches
580 	 */
581 	hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
582 	dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
583 	{
584 		CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
585 
586 		if (hashValue == ct->hash_value)
587 		{
588 			if (ct->refcount > 0 ||
589 				(ct->c_list && ct->c_list->refcount > 0))
590 			{
591 				ct->dead = true;
592 				/* list, if any, was marked dead above */
593 				Assert(ct->c_list == NULL || ct->c_list->dead);
594 			}
595 			else
596 				CatCacheRemoveCTup(cache, ct);
597 			CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
598 #ifdef CATCACHE_STATS
599 			cache->cc_invals++;
600 #endif
601 			/* could be multiple matches, so keep looking! */
602 		}
603 	}
604 }
605 
606 /* ----------------------------------------------------------------
607  *					   public functions
608  * ----------------------------------------------------------------
609  */
610 
611 
612 /*
613  * Standard routine for creating cache context if it doesn't exist yet
614  *
615  * There are a lot of places (probably far more than necessary) that check
616  * whether CacheMemoryContext exists yet and want to create it if not.
617  * We centralize knowledge of exactly how to create it here.
618  */
619 void
CreateCacheMemoryContext(void)620 CreateCacheMemoryContext(void)
621 {
622 	/*
623 	 * Purely for paranoia, check that context doesn't exist; caller probably
624 	 * did so already.
625 	 */
626 	if (!CacheMemoryContext)
627 		CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
628 												   "CacheMemoryContext",
629 												   ALLOCSET_DEFAULT_SIZES);
630 }
631 
632 
633 /*
634  *		ResetCatalogCache
635  *
636  * Reset one catalog cache to empty.
637  *
638  * This is not very efficient if the target cache is nearly empty.
639  * However, it shouldn't need to be efficient; we don't invoke it often.
640  */
641 static void
ResetCatalogCache(CatCache * cache)642 ResetCatalogCache(CatCache *cache)
643 {
644 	dlist_mutable_iter iter;
645 	int			i;
646 
647 	/* Remove each list in this cache, or at least mark it dead */
648 	dlist_foreach_modify(iter, &cache->cc_lists)
649 	{
650 		CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
651 
652 		if (cl->refcount > 0)
653 			cl->dead = true;
654 		else
655 			CatCacheRemoveCList(cache, cl);
656 	}
657 
658 	/* Remove each tuple in this cache, or at least mark it dead */
659 	for (i = 0; i < cache->cc_nbuckets; i++)
660 	{
661 		dlist_head *bucket = &cache->cc_bucket[i];
662 
663 		dlist_foreach_modify(iter, bucket)
664 		{
665 			CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
666 
667 			if (ct->refcount > 0 ||
668 				(ct->c_list && ct->c_list->refcount > 0))
669 			{
670 				ct->dead = true;
671 				/* list, if any, was marked dead above */
672 				Assert(ct->c_list == NULL || ct->c_list->dead);
673 			}
674 			else
675 				CatCacheRemoveCTup(cache, ct);
676 #ifdef CATCACHE_STATS
677 			cache->cc_invals++;
678 #endif
679 		}
680 	}
681 }
682 
683 /*
684  *		ResetCatalogCaches
685  *
686  * Reset all caches when a shared cache inval event forces it
687  */
688 void
ResetCatalogCaches(void)689 ResetCatalogCaches(void)
690 {
691 	slist_iter	iter;
692 
693 	CACHE_elog(DEBUG2, "ResetCatalogCaches called");
694 
695 	slist_foreach(iter, &CacheHdr->ch_caches)
696 	{
697 		CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
698 
699 		ResetCatalogCache(cache);
700 	}
701 
702 	CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
703 }
704 
705 /*
706  *		CatalogCacheFlushCatalog
707  *
708  *	Flush all catcache entries that came from the specified system catalog.
709  *	This is needed after VACUUM FULL/CLUSTER on the catalog, since the
710  *	tuples very likely now have different TIDs than before.  (At one point
711  *	we also tried to force re-execution of CatalogCacheInitializeCache for
712  *	the cache(s) on that catalog.  This is a bad idea since it leads to all
713  *	kinds of trouble if a cache flush occurs while loading cache entries.
714  *	We now avoid the need to do it by copying cc_tupdesc out of the relcache,
715  *	rather than relying on the relcache to keep a tupdesc for us.  Of course
716  *	this assumes the tupdesc of a cachable system table will not change...)
717  */
718 void
CatalogCacheFlushCatalog(Oid catId)719 CatalogCacheFlushCatalog(Oid catId)
720 {
721 	slist_iter	iter;
722 
723 	CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
724 
725 	slist_foreach(iter, &CacheHdr->ch_caches)
726 	{
727 		CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
728 
729 		/* Does this cache store tuples of the target catalog? */
730 		if (cache->cc_reloid == catId)
731 		{
732 			/* Yes, so flush all its contents */
733 			ResetCatalogCache(cache);
734 
735 			/* Tell inval.c to call syscache callbacks for this cache */
736 			CallSyscacheCallbacks(cache->id, 0);
737 		}
738 	}
739 
740 	CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
741 }
742 
743 /*
744  *		InitCatCache
745  *
746  *	This allocates and initializes a cache for a system catalog relation.
747  *	Actually, the cache is only partially initialized to avoid opening the
748  *	relation.  The relation will be opened and the rest of the cache
749  *	structure initialized on the first access.
750  */
751 #ifdef CACHEDEBUG
752 #define InitCatCache_DEBUG2 \
753 do { \
754 	elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
755 		 cp->cc_reloid, cp->cc_indexoid, cp->id, \
756 		 cp->cc_nkeys, cp->cc_nbuckets); \
757 } while(0)
758 #else
759 #define InitCatCache_DEBUG2
760 #endif
761 
762 CatCache *
InitCatCache(int id,Oid reloid,Oid indexoid,int nkeys,const int * key,int nbuckets)763 InitCatCache(int id,
764 			 Oid reloid,
765 			 Oid indexoid,
766 			 int nkeys,
767 			 const int *key,
768 			 int nbuckets)
769 {
770 	CatCache   *cp;
771 	MemoryContext oldcxt;
772 	size_t		sz;
773 	int			i;
774 
775 	/*
776 	 * nbuckets is the initial number of hash buckets to use in this catcache.
777 	 * It will be enlarged later if it becomes too full.
778 	 *
779 	 * nbuckets must be a power of two.  We check this via Assert rather than
780 	 * a full runtime check because the values will be coming from constant
781 	 * tables.
782 	 *
783 	 * If you're confused by the power-of-two check, see comments in
784 	 * bitmapset.c for an explanation.
785 	 */
786 	Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
787 
788 	/*
789 	 * first switch to the cache context so our allocations do not vanish at
790 	 * the end of a transaction
791 	 */
792 	if (!CacheMemoryContext)
793 		CreateCacheMemoryContext();
794 
795 	oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
796 
797 	/*
798 	 * if first time through, initialize the cache group header
799 	 */
800 	if (CacheHdr == NULL)
801 	{
802 		CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
803 		slist_init(&CacheHdr->ch_caches);
804 		CacheHdr->ch_ntup = 0;
805 #ifdef CATCACHE_STATS
806 		/* set up to dump stats at backend exit */
807 		on_proc_exit(CatCachePrintStats, 0);
808 #endif
809 	}
810 
811 	/*
812 	 * Allocate a new cache structure, aligning to a cacheline boundary
813 	 *
814 	 * Note: we rely on zeroing to initialize all the dlist headers correctly
815 	 */
816 	sz = sizeof(CatCache) + PG_CACHE_LINE_SIZE;
817 	cp = (CatCache *) CACHELINEALIGN(palloc0(sz));
818 	cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
819 
820 	/*
821 	 * initialize the cache's relation information for the relation
822 	 * corresponding to this cache, and initialize some of the new cache's
823 	 * other internal fields.  But don't open the relation yet.
824 	 */
825 	cp->id = id;
826 	cp->cc_relname = "(not known yet)";
827 	cp->cc_reloid = reloid;
828 	cp->cc_indexoid = indexoid;
829 	cp->cc_relisshared = false; /* temporary */
830 	cp->cc_tupdesc = (TupleDesc) NULL;
831 	cp->cc_ntup = 0;
832 	cp->cc_nbuckets = nbuckets;
833 	cp->cc_nkeys = nkeys;
834 	for (i = 0; i < nkeys; ++i)
835 		cp->cc_keyno[i] = key[i];
836 
837 	/*
838 	 * new cache is initialized as far as we can go for now. print some
839 	 * debugging information, if appropriate.
840 	 */
841 	InitCatCache_DEBUG2;
842 
843 	/*
844 	 * add completed cache to top of group header's list
845 	 */
846 	slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
847 
848 	/*
849 	 * back to the old context before we return...
850 	 */
851 	MemoryContextSwitchTo(oldcxt);
852 
853 	return cp;
854 }
855 
856 /*
857  * Enlarge a catcache, doubling the number of buckets.
858  */
859 static void
RehashCatCache(CatCache * cp)860 RehashCatCache(CatCache *cp)
861 {
862 	dlist_head *newbucket;
863 	int			newnbuckets;
864 	int			i;
865 
866 	elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
867 		 cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
868 
869 	/* Allocate a new, larger, hash table. */
870 	newnbuckets = cp->cc_nbuckets * 2;
871 	newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
872 
873 	/* Move all entries from old hash table to new. */
874 	for (i = 0; i < cp->cc_nbuckets; i++)
875 	{
876 		dlist_mutable_iter iter;
877 
878 		dlist_foreach_modify(iter, &cp->cc_bucket[i])
879 		{
880 			CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
881 			int			hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
882 
883 			dlist_delete(iter.cur);
884 			dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
885 		}
886 	}
887 
888 	/* Switch to the new array. */
889 	pfree(cp->cc_bucket);
890 	cp->cc_nbuckets = newnbuckets;
891 	cp->cc_bucket = newbucket;
892 }
893 
894 /*
895  *		CatalogCacheInitializeCache
896  *
897  * This function does final initialization of a catcache: obtain the tuple
898  * descriptor and set up the hash and equality function links.  We assume
899  * that the relcache entry can be opened at this point!
900  */
901 #ifdef CACHEDEBUG
902 #define CatalogCacheInitializeCache_DEBUG1 \
903 	elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
904 		 cache->cc_reloid)
905 
906 #define CatalogCacheInitializeCache_DEBUG2 \
907 do { \
908 		if (cache->cc_keyno[i] > 0) { \
909 			elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
910 				i+1, cache->cc_nkeys, cache->cc_keyno[i], \
911 				 TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
912 		} else { \
913 			elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
914 				i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
915 		} \
916 } while(0)
917 #else
918 #define CatalogCacheInitializeCache_DEBUG1
919 #define CatalogCacheInitializeCache_DEBUG2
920 #endif
921 
922 static void
CatalogCacheInitializeCache(CatCache * cache)923 CatalogCacheInitializeCache(CatCache *cache)
924 {
925 	Relation	relation;
926 	MemoryContext oldcxt;
927 	TupleDesc	tupdesc;
928 	int			i;
929 
930 	CatalogCacheInitializeCache_DEBUG1;
931 
932 	relation = table_open(cache->cc_reloid, AccessShareLock);
933 
934 	/*
935 	 * switch to the cache context so our allocations do not vanish at the end
936 	 * of a transaction
937 	 */
938 	Assert(CacheMemoryContext != NULL);
939 
940 	oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
941 
942 	/*
943 	 * copy the relcache's tuple descriptor to permanent cache storage
944 	 */
945 	tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
946 
947 	/*
948 	 * save the relation's name and relisshared flag, too (cc_relname is used
949 	 * only for debugging purposes)
950 	 */
951 	cache->cc_relname = pstrdup(RelationGetRelationName(relation));
952 	cache->cc_relisshared = RelationGetForm(relation)->relisshared;
953 
954 	/*
955 	 * return to the caller's memory context and close the rel
956 	 */
957 	MemoryContextSwitchTo(oldcxt);
958 
959 	table_close(relation, AccessShareLock);
960 
961 	CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
962 			   cache->cc_relname, cache->cc_nkeys);
963 
964 	/*
965 	 * initialize cache's key information
966 	 */
967 	for (i = 0; i < cache->cc_nkeys; ++i)
968 	{
969 		Oid			keytype;
970 		RegProcedure eqfunc;
971 
972 		CatalogCacheInitializeCache_DEBUG2;
973 
974 		if (cache->cc_keyno[i] > 0)
975 		{
976 			Form_pg_attribute attr = TupleDescAttr(tupdesc,
977 												   cache->cc_keyno[i] - 1);
978 
979 			keytype = attr->atttypid;
980 			/* cache key columns should always be NOT NULL */
981 			Assert(attr->attnotnull);
982 		}
983 		else
984 		{
985 			if (cache->cc_keyno[i] < 0)
986 				elog(FATAL, "sys attributes are not supported in caches");
987 			keytype = OIDOID;
988 		}
989 
990 		GetCCHashEqFuncs(keytype,
991 						 &cache->cc_hashfunc[i],
992 						 &eqfunc,
993 						 &cache->cc_fastequal[i]);
994 
995 		/*
996 		 * Do equality-function lookup (we assume this won't need a catalog
997 		 * lookup for any supported type)
998 		 */
999 		fmgr_info_cxt(eqfunc,
1000 					  &cache->cc_skey[i].sk_func,
1001 					  CacheMemoryContext);
1002 
1003 		/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1004 		cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1005 
1006 		/* Fill in sk_strategy as well --- always standard equality */
1007 		cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1008 		cache->cc_skey[i].sk_subtype = InvalidOid;
1009 		/* If a catcache key requires a collation, it must be C collation */
1010 		cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1011 
1012 		CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1013 				   cache->cc_relname, i, cache);
1014 	}
1015 
1016 	/*
1017 	 * mark this cache fully initialized
1018 	 */
1019 	cache->cc_tupdesc = tupdesc;
1020 }
1021 
1022 /*
1023  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1024  *
1025  * One reason to call this routine is to ensure that the relcache has
1026  * created entries for all the catalogs and indexes referenced by catcaches.
1027  * Therefore, provide an option to open the index as well as fixing the
1028  * cache itself.  An exception is the indexes on pg_am, which we don't use
1029  * (cf. IndexScanOK).
1030  */
1031 void
InitCatCachePhase2(CatCache * cache,bool touch_index)1032 InitCatCachePhase2(CatCache *cache, bool touch_index)
1033 {
1034 	if (cache->cc_tupdesc == NULL)
1035 		CatalogCacheInitializeCache(cache);
1036 
1037 	if (touch_index &&
1038 		cache->id != AMOID &&
1039 		cache->id != AMNAME)
1040 	{
1041 		Relation	idesc;
1042 
1043 		/*
1044 		 * We must lock the underlying catalog before opening the index to
1045 		 * avoid deadlock, since index_open could possibly result in reading
1046 		 * this same catalog, and if anyone else is exclusive-locking this
1047 		 * catalog and index they'll be doing it in that order.
1048 		 */
1049 		LockRelationOid(cache->cc_reloid, AccessShareLock);
1050 		idesc = index_open(cache->cc_indexoid, AccessShareLock);
1051 
1052 		/*
1053 		 * While we've got the index open, let's check that it's unique (and
1054 		 * not just deferrable-unique, thank you very much).  This is just to
1055 		 * catch thinkos in definitions of new catcaches, so we don't worry
1056 		 * about the pg_am indexes not getting tested.
1057 		 */
1058 		Assert(idesc->rd_index->indisunique &&
1059 			   idesc->rd_index->indimmediate);
1060 
1061 		index_close(idesc, AccessShareLock);
1062 		UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1063 	}
1064 }
1065 
1066 
1067 /*
1068  *		IndexScanOK
1069  *
1070  *		This function checks for tuples that will be fetched by
1071  *		IndexSupportInitialize() during relcache initialization for
1072  *		certain system indexes that support critical syscaches.
1073  *		We can't use an indexscan to fetch these, else we'll get into
1074  *		infinite recursion.  A plain heap scan will work, however.
1075  *		Once we have completed relcache initialization (signaled by
1076  *		criticalRelcachesBuilt), we don't have to worry anymore.
1077  *
1078  *		Similarly, during backend startup we have to be able to use the
1079  *		pg_authid and pg_auth_members syscaches for authentication even if
1080  *		we don't yet have relcache entries for those catalogs' indexes.
1081  */
1082 static bool
IndexScanOK(CatCache * cache,ScanKey cur_skey)1083 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1084 {
1085 	switch (cache->id)
1086 	{
1087 		case INDEXRELID:
1088 
1089 			/*
1090 			 * Rather than tracking exactly which indexes have to be loaded
1091 			 * before we can use indexscans (which changes from time to time),
1092 			 * just force all pg_index searches to be heap scans until we've
1093 			 * built the critical relcaches.
1094 			 */
1095 			if (!criticalRelcachesBuilt)
1096 				return false;
1097 			break;
1098 
1099 		case AMOID:
1100 		case AMNAME:
1101 
1102 			/*
1103 			 * Always do heap scans in pg_am, because it's so small there's
1104 			 * not much point in an indexscan anyway.  We *must* do this when
1105 			 * initially building critical relcache entries, but we might as
1106 			 * well just always do it.
1107 			 */
1108 			return false;
1109 
1110 		case AUTHNAME:
1111 		case AUTHOID:
1112 		case AUTHMEMMEMROLE:
1113 
1114 			/*
1115 			 * Protect authentication lookups occurring before relcache has
1116 			 * collected entries for shared indexes.
1117 			 */
1118 			if (!criticalSharedRelcachesBuilt)
1119 				return false;
1120 			break;
1121 
1122 		default:
1123 			break;
1124 	}
1125 
1126 	/* Normal case, allow index scan */
1127 	return true;
1128 }
1129 
1130 /*
1131  *	SearchCatCacheInternal
1132  *
1133  *		This call searches a system cache for a tuple, opening the relation
1134  *		if necessary (on the first access to a particular cache).
1135  *
1136  *		The result is NULL if not found, or a pointer to a HeapTuple in
1137  *		the cache.  The caller must not modify the tuple, and must call
1138  *		ReleaseCatCache() when done with it.
1139  *
1140  * The search key values should be expressed as Datums of the key columns'
1141  * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
1142  * exception, the passed-in key for a NAME column can be just a C string;
1143  * the caller need not go to the trouble of converting it to a fully
1144  * null-padded NAME.
1145  */
1146 HeapTuple
SearchCatCache(CatCache * cache,Datum v1,Datum v2,Datum v3,Datum v4)1147 SearchCatCache(CatCache *cache,
1148 			   Datum v1,
1149 			   Datum v2,
1150 			   Datum v3,
1151 			   Datum v4)
1152 {
1153 	return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1154 }
1155 
1156 
1157 /*
1158  * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1159  * arguments. The compiler can inline the body and unroll loops, making them a
1160  * bit faster than SearchCatCache().
1161  */
1162 
1163 HeapTuple
SearchCatCache1(CatCache * cache,Datum v1)1164 SearchCatCache1(CatCache *cache,
1165 				Datum v1)
1166 {
1167 	return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1168 }
1169 
1170 
1171 HeapTuple
SearchCatCache2(CatCache * cache,Datum v1,Datum v2)1172 SearchCatCache2(CatCache *cache,
1173 				Datum v1, Datum v2)
1174 {
1175 	return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1176 }
1177 
1178 
1179 HeapTuple
SearchCatCache3(CatCache * cache,Datum v1,Datum v2,Datum v3)1180 SearchCatCache3(CatCache *cache,
1181 				Datum v1, Datum v2, Datum v3)
1182 {
1183 	return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1184 }
1185 
1186 
1187 HeapTuple
SearchCatCache4(CatCache * cache,Datum v1,Datum v2,Datum v3,Datum v4)1188 SearchCatCache4(CatCache *cache,
1189 				Datum v1, Datum v2, Datum v3, Datum v4)
1190 {
1191 	return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1192 }
1193 
1194 /*
1195  * Work-horse for SearchCatCache/SearchCatCacheN.
1196  */
1197 static inline HeapTuple
SearchCatCacheInternal(CatCache * cache,int nkeys,Datum v1,Datum v2,Datum v3,Datum v4)1198 SearchCatCacheInternal(CatCache *cache,
1199 					   int nkeys,
1200 					   Datum v1,
1201 					   Datum v2,
1202 					   Datum v3,
1203 					   Datum v4)
1204 {
1205 	Datum		arguments[CATCACHE_MAXKEYS];
1206 	uint32		hashValue;
1207 	Index		hashIndex;
1208 	dlist_iter	iter;
1209 	dlist_head *bucket;
1210 	CatCTup    *ct;
1211 
1212 	/* Make sure we're in an xact, even if this ends up being a cache hit */
1213 	Assert(IsTransactionState());
1214 
1215 	Assert(cache->cc_nkeys == nkeys);
1216 
1217 	/*
1218 	 * one-time startup overhead for each cache
1219 	 */
1220 	if (unlikely(cache->cc_tupdesc == NULL))
1221 		CatalogCacheInitializeCache(cache);
1222 
1223 #ifdef CATCACHE_STATS
1224 	cache->cc_searches++;
1225 #endif
1226 
1227 	/* Initialize local parameter array */
1228 	arguments[0] = v1;
1229 	arguments[1] = v2;
1230 	arguments[2] = v3;
1231 	arguments[3] = v4;
1232 
1233 	/*
1234 	 * find the hash bucket in which to look for the tuple
1235 	 */
1236 	hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1237 	hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1238 
1239 	/*
1240 	 * scan the hash bucket until we find a match or exhaust our tuples
1241 	 *
1242 	 * Note: it's okay to use dlist_foreach here, even though we modify the
1243 	 * dlist within the loop, because we don't continue the loop afterwards.
1244 	 */
1245 	bucket = &cache->cc_bucket[hashIndex];
1246 	dlist_foreach(iter, bucket)
1247 	{
1248 		ct = dlist_container(CatCTup, cache_elem, iter.cur);
1249 
1250 		if (ct->dead)
1251 			continue;			/* ignore dead entries */
1252 
1253 		if (ct->hash_value != hashValue)
1254 			continue;			/* quickly skip entry if wrong hash val */
1255 
1256 		if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1257 			continue;
1258 
1259 		/*
1260 		 * We found a match in the cache.  Move it to the front of the list
1261 		 * for its hashbucket, in order to speed subsequent searches.  (The
1262 		 * most frequently accessed elements in any hashbucket will tend to be
1263 		 * near the front of the hashbucket's list.)
1264 		 */
1265 		dlist_move_head(bucket, &ct->cache_elem);
1266 
1267 		/*
1268 		 * If it's a positive entry, bump its refcount and return it. If it's
1269 		 * negative, we can report failure to the caller.
1270 		 */
1271 		if (!ct->negative)
1272 		{
1273 			ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1274 			ct->refcount++;
1275 			ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1276 
1277 			CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1278 					   cache->cc_relname, hashIndex);
1279 
1280 #ifdef CATCACHE_STATS
1281 			cache->cc_hits++;
1282 #endif
1283 
1284 			return &ct->tuple;
1285 		}
1286 		else
1287 		{
1288 			CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1289 					   cache->cc_relname, hashIndex);
1290 
1291 #ifdef CATCACHE_STATS
1292 			cache->cc_neg_hits++;
1293 #endif
1294 
1295 			return NULL;
1296 		}
1297 	}
1298 
1299 	return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1300 }
1301 
1302 /*
1303  * Search the actual catalogs, rather than the cache.
1304  *
1305  * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1306  * as small as possible.  To avoid that effort being undone by a helpful
1307  * compiler, try to explicitly forbid inlining.
1308  */
1309 static pg_noinline HeapTuple
SearchCatCacheMiss(CatCache * cache,int nkeys,uint32 hashValue,Index hashIndex,Datum v1,Datum v2,Datum v3,Datum v4)1310 SearchCatCacheMiss(CatCache *cache,
1311 				   int nkeys,
1312 				   uint32 hashValue,
1313 				   Index hashIndex,
1314 				   Datum v1,
1315 				   Datum v2,
1316 				   Datum v3,
1317 				   Datum v4)
1318 {
1319 	ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1320 	Relation	relation;
1321 	SysScanDesc scandesc;
1322 	HeapTuple	ntp;
1323 	CatCTup    *ct;
1324 	Datum		arguments[CATCACHE_MAXKEYS];
1325 
1326 	/* Initialize local parameter array */
1327 	arguments[0] = v1;
1328 	arguments[1] = v2;
1329 	arguments[2] = v3;
1330 	arguments[3] = v4;
1331 
1332 	/*
1333 	 * Ok, need to make a lookup in the relation, copy the scankey and fill
1334 	 * out any per-call fields.
1335 	 */
1336 	memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1337 	cur_skey[0].sk_argument = v1;
1338 	cur_skey[1].sk_argument = v2;
1339 	cur_skey[2].sk_argument = v3;
1340 	cur_skey[3].sk_argument = v4;
1341 
1342 	/*
1343 	 * Tuple was not found in cache, so we have to try to retrieve it directly
1344 	 * from the relation.  If found, we will add it to the cache; if not
1345 	 * found, we will add a negative cache entry instead.
1346 	 *
1347 	 * NOTE: it is possible for recursive cache lookups to occur while reading
1348 	 * the relation --- for example, due to shared-cache-inval messages being
1349 	 * processed during table_open().  This is OK.  It's even possible for one
1350 	 * of those lookups to find and enter the very same tuple we are trying to
1351 	 * fetch here.  If that happens, we will enter a second copy of the tuple
1352 	 * into the cache.  The first copy will never be referenced again, and
1353 	 * will eventually age out of the cache, so there's no functional problem.
1354 	 * This case is rare enough that it's not worth expending extra cycles to
1355 	 * detect.
1356 	 */
1357 	relation = table_open(cache->cc_reloid, AccessShareLock);
1358 
1359 	scandesc = systable_beginscan(relation,
1360 								  cache->cc_indexoid,
1361 								  IndexScanOK(cache, cur_skey),
1362 								  NULL,
1363 								  nkeys,
1364 								  cur_skey);
1365 
1366 	ct = NULL;
1367 
1368 	while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1369 	{
1370 		ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1371 									 hashValue, hashIndex,
1372 									 false);
1373 		/* immediately set the refcount to 1 */
1374 		ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1375 		ct->refcount++;
1376 		ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1377 		break;					/* assume only one match */
1378 	}
1379 
1380 	systable_endscan(scandesc);
1381 
1382 	table_close(relation, AccessShareLock);
1383 
1384 	/*
1385 	 * If tuple was not found, we need to build a negative cache entry
1386 	 * containing a fake tuple.  The fake tuple has the correct key columns,
1387 	 * but nulls everywhere else.
1388 	 *
1389 	 * In bootstrap mode, we don't build negative entries, because the cache
1390 	 * invalidation mechanism isn't alive and can't clear them if the tuple
1391 	 * gets created later.  (Bootstrap doesn't do UPDATEs, so it doesn't need
1392 	 * cache inval for that.)
1393 	 */
1394 	if (ct == NULL)
1395 	{
1396 		if (IsBootstrapProcessingMode())
1397 			return NULL;
1398 
1399 		ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1400 									 hashValue, hashIndex,
1401 									 true);
1402 
1403 		CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1404 				   cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1405 		CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1406 				   cache->cc_relname, hashIndex);
1407 
1408 		/*
1409 		 * We are not returning the negative entry to the caller, so leave its
1410 		 * refcount zero.
1411 		 */
1412 
1413 		return NULL;
1414 	}
1415 
1416 	CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1417 			   cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1418 	CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1419 			   cache->cc_relname, hashIndex);
1420 
1421 #ifdef CATCACHE_STATS
1422 	cache->cc_newloads++;
1423 #endif
1424 
1425 	return &ct->tuple;
1426 }
1427 
1428 /*
1429  *	ReleaseCatCache
1430  *
1431  *	Decrement the reference count of a catcache entry (releasing the
1432  *	hold grabbed by a successful SearchCatCache).
1433  *
1434  *	NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1435  *	will be freed as soon as their refcount goes to zero.  In combination
1436  *	with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1437  *	to catch references to already-released catcache entries.
1438  */
1439 void
ReleaseCatCache(HeapTuple tuple)1440 ReleaseCatCache(HeapTuple tuple)
1441 {
1442 	CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1443 								  offsetof(CatCTup, tuple));
1444 
1445 	/* Safety checks to ensure we were handed a cache entry */
1446 	Assert(ct->ct_magic == CT_MAGIC);
1447 	Assert(ct->refcount > 0);
1448 
1449 	ct->refcount--;
1450 	ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1451 
1452 	if (
1453 #ifndef CATCACHE_FORCE_RELEASE
1454 		ct->dead &&
1455 #endif
1456 		ct->refcount == 0 &&
1457 		(ct->c_list == NULL || ct->c_list->refcount == 0))
1458 		CatCacheRemoveCTup(ct->my_cache, ct);
1459 }
1460 
1461 
1462 /*
1463  *	GetCatCacheHashValue
1464  *
1465  *		Compute the hash value for a given set of search keys.
1466  *
1467  * The reason for exposing this as part of the API is that the hash value is
1468  * exposed in cache invalidation operations, so there are places outside the
1469  * catcache code that need to be able to compute the hash values.
1470  */
1471 uint32
GetCatCacheHashValue(CatCache * cache,Datum v1,Datum v2,Datum v3,Datum v4)1472 GetCatCacheHashValue(CatCache *cache,
1473 					 Datum v1,
1474 					 Datum v2,
1475 					 Datum v3,
1476 					 Datum v4)
1477 {
1478 	/*
1479 	 * one-time startup overhead for each cache
1480 	 */
1481 	if (cache->cc_tupdesc == NULL)
1482 		CatalogCacheInitializeCache(cache);
1483 
1484 	/*
1485 	 * calculate the hash value
1486 	 */
1487 	return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1488 }
1489 
1490 
1491 /*
1492  *	SearchCatCacheList
1493  *
1494  *		Generate a list of all tuples matching a partial key (that is,
1495  *		a key specifying just the first K of the cache's N key columns).
1496  *
1497  *		It doesn't make any sense to specify all of the cache's key columns
1498  *		here: since the key is unique, there could be at most one match, so
1499  *		you ought to use SearchCatCache() instead.  Hence this function takes
1500  *		one less Datum argument than SearchCatCache() does.
1501  *
1502  *		The caller must not modify the list object or the pointed-to tuples,
1503  *		and must call ReleaseCatCacheList() when done with the list.
1504  */
1505 CatCList *
SearchCatCacheList(CatCache * cache,int nkeys,Datum v1,Datum v2,Datum v3)1506 SearchCatCacheList(CatCache *cache,
1507 				   int nkeys,
1508 				   Datum v1,
1509 				   Datum v2,
1510 				   Datum v3)
1511 {
1512 	Datum		v4 = 0;			/* dummy last-column value */
1513 	Datum		arguments[CATCACHE_MAXKEYS];
1514 	uint32		lHashValue;
1515 	dlist_iter	iter;
1516 	CatCList   *cl;
1517 	CatCTup    *ct;
1518 	List	   *volatile ctlist;
1519 	ListCell   *ctlist_item;
1520 	int			nmembers;
1521 	bool		ordered;
1522 	HeapTuple	ntp;
1523 	MemoryContext oldcxt;
1524 	int			i;
1525 
1526 	/*
1527 	 * one-time startup overhead for each cache
1528 	 */
1529 	if (cache->cc_tupdesc == NULL)
1530 		CatalogCacheInitializeCache(cache);
1531 
1532 	Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1533 
1534 #ifdef CATCACHE_STATS
1535 	cache->cc_lsearches++;
1536 #endif
1537 
1538 	/* Initialize local parameter array */
1539 	arguments[0] = v1;
1540 	arguments[1] = v2;
1541 	arguments[2] = v3;
1542 	arguments[3] = v4;
1543 
1544 	/*
1545 	 * compute a hash value of the given keys for faster search.  We don't
1546 	 * presently divide the CatCList items into buckets, but this still lets
1547 	 * us skip non-matching items quickly most of the time.
1548 	 */
1549 	lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1550 
1551 	/*
1552 	 * scan the items until we find a match or exhaust our list
1553 	 *
1554 	 * Note: it's okay to use dlist_foreach here, even though we modify the
1555 	 * dlist within the loop, because we don't continue the loop afterwards.
1556 	 */
1557 	dlist_foreach(iter, &cache->cc_lists)
1558 	{
1559 		cl = dlist_container(CatCList, cache_elem, iter.cur);
1560 
1561 		if (cl->dead)
1562 			continue;			/* ignore dead entries */
1563 
1564 		if (cl->hash_value != lHashValue)
1565 			continue;			/* quickly skip entry if wrong hash val */
1566 
1567 		/*
1568 		 * see if the cached list matches our key.
1569 		 */
1570 		if (cl->nkeys != nkeys)
1571 			continue;
1572 
1573 		if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1574 			continue;
1575 
1576 		/*
1577 		 * We found a matching list.  Move the list to the front of the
1578 		 * cache's list-of-lists, to speed subsequent searches.  (We do not
1579 		 * move the members to the fronts of their hashbucket lists, however,
1580 		 * since there's no point in that unless they are searched for
1581 		 * individually.)
1582 		 */
1583 		dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1584 
1585 		/* Bump the list's refcount and return it */
1586 		ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1587 		cl->refcount++;
1588 		ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1589 
1590 		CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1591 				   cache->cc_relname);
1592 
1593 #ifdef CATCACHE_STATS
1594 		cache->cc_lhits++;
1595 #endif
1596 
1597 		return cl;
1598 	}
1599 
1600 	/*
1601 	 * List was not found in cache, so we have to build it by reading the
1602 	 * relation.  For each matching tuple found in the relation, use an
1603 	 * existing cache entry if possible, else build a new one.
1604 	 *
1605 	 * We have to bump the member refcounts temporarily to ensure they won't
1606 	 * get dropped from the cache while loading other members. We use a PG_TRY
1607 	 * block to ensure we can undo those refcounts if we get an error before
1608 	 * we finish constructing the CatCList.
1609 	 */
1610 	ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1611 
1612 	ctlist = NIL;
1613 
1614 	PG_TRY();
1615 	{
1616 		ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1617 		Relation	relation;
1618 		SysScanDesc scandesc;
1619 
1620 		/*
1621 		 * Ok, need to make a lookup in the relation, copy the scankey and
1622 		 * fill out any per-call fields.
1623 		 */
1624 		memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1625 		cur_skey[0].sk_argument = v1;
1626 		cur_skey[1].sk_argument = v2;
1627 		cur_skey[2].sk_argument = v3;
1628 		cur_skey[3].sk_argument = v4;
1629 
1630 		relation = table_open(cache->cc_reloid, AccessShareLock);
1631 
1632 		scandesc = systable_beginscan(relation,
1633 									  cache->cc_indexoid,
1634 									  IndexScanOK(cache, cur_skey),
1635 									  NULL,
1636 									  nkeys,
1637 									  cur_skey);
1638 
1639 		/* The list will be ordered iff we are doing an index scan */
1640 		ordered = (scandesc->irel != NULL);
1641 
1642 		while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1643 		{
1644 			uint32		hashValue;
1645 			Index		hashIndex;
1646 			bool		found = false;
1647 			dlist_head *bucket;
1648 
1649 			/*
1650 			 * See if there's an entry for this tuple already.
1651 			 */
1652 			ct = NULL;
1653 			hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1654 			hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1655 
1656 			bucket = &cache->cc_bucket[hashIndex];
1657 			dlist_foreach(iter, bucket)
1658 			{
1659 				ct = dlist_container(CatCTup, cache_elem, iter.cur);
1660 
1661 				if (ct->dead || ct->negative)
1662 					continue;	/* ignore dead and negative entries */
1663 
1664 				if (ct->hash_value != hashValue)
1665 					continue;	/* quickly skip entry if wrong hash val */
1666 
1667 				if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1668 					continue;	/* not same tuple */
1669 
1670 				/*
1671 				 * Found a match, but can't use it if it belongs to another
1672 				 * list already
1673 				 */
1674 				if (ct->c_list)
1675 					continue;
1676 
1677 				found = true;
1678 				break;			/* A-OK */
1679 			}
1680 
1681 			if (!found)
1682 			{
1683 				/* We didn't find a usable entry, so make a new one */
1684 				ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1685 											 hashValue, hashIndex,
1686 											 false);
1687 			}
1688 
1689 			/* Careful here: add entry to ctlist, then bump its refcount */
1690 			/* This way leaves state correct if lappend runs out of memory */
1691 			ctlist = lappend(ctlist, ct);
1692 			ct->refcount++;
1693 		}
1694 
1695 		systable_endscan(scandesc);
1696 
1697 		table_close(relation, AccessShareLock);
1698 
1699 		/* Now we can build the CatCList entry. */
1700 		oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1701 		nmembers = list_length(ctlist);
1702 		cl = (CatCList *)
1703 			palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1704 
1705 		/* Extract key values */
1706 		CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1707 						 arguments, cl->keys);
1708 		MemoryContextSwitchTo(oldcxt);
1709 
1710 		/*
1711 		 * We are now past the last thing that could trigger an elog before we
1712 		 * have finished building the CatCList and remembering it in the
1713 		 * resource owner.  So it's OK to fall out of the PG_TRY, and indeed
1714 		 * we'd better do so before we start marking the members as belonging
1715 		 * to the list.
1716 		 */
1717 
1718 	}
1719 	PG_CATCH();
1720 	{
1721 		foreach(ctlist_item, ctlist)
1722 		{
1723 			ct = (CatCTup *) lfirst(ctlist_item);
1724 			Assert(ct->c_list == NULL);
1725 			Assert(ct->refcount > 0);
1726 			ct->refcount--;
1727 			if (
1728 #ifndef CATCACHE_FORCE_RELEASE
1729 				ct->dead &&
1730 #endif
1731 				ct->refcount == 0 &&
1732 				(ct->c_list == NULL || ct->c_list->refcount == 0))
1733 				CatCacheRemoveCTup(cache, ct);
1734 		}
1735 
1736 		PG_RE_THROW();
1737 	}
1738 	PG_END_TRY();
1739 
1740 	cl->cl_magic = CL_MAGIC;
1741 	cl->my_cache = cache;
1742 	cl->refcount = 0;			/* for the moment */
1743 	cl->dead = false;
1744 	cl->ordered = ordered;
1745 	cl->nkeys = nkeys;
1746 	cl->hash_value = lHashValue;
1747 	cl->n_members = nmembers;
1748 
1749 	i = 0;
1750 	foreach(ctlist_item, ctlist)
1751 	{
1752 		cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1753 		Assert(ct->c_list == NULL);
1754 		ct->c_list = cl;
1755 		/* release the temporary refcount on the member */
1756 		Assert(ct->refcount > 0);
1757 		ct->refcount--;
1758 		/* mark list dead if any members already dead */
1759 		if (ct->dead)
1760 			cl->dead = true;
1761 	}
1762 	Assert(i == nmembers);
1763 
1764 	dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1765 
1766 	/* Finally, bump the list's refcount and return it */
1767 	cl->refcount++;
1768 	ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1769 
1770 	CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1771 			   cache->cc_relname, nmembers);
1772 
1773 	return cl;
1774 }
1775 
1776 /*
1777  *	ReleaseCatCacheList
1778  *
1779  *	Decrement the reference count of a catcache list.
1780  */
1781 void
ReleaseCatCacheList(CatCList * list)1782 ReleaseCatCacheList(CatCList *list)
1783 {
1784 	/* Safety checks to ensure we were handed a cache entry */
1785 	Assert(list->cl_magic == CL_MAGIC);
1786 	Assert(list->refcount > 0);
1787 	list->refcount--;
1788 	ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1789 
1790 	if (
1791 #ifndef CATCACHE_FORCE_RELEASE
1792 		list->dead &&
1793 #endif
1794 		list->refcount == 0)
1795 		CatCacheRemoveCList(list->my_cache, list);
1796 }
1797 
1798 
1799 /*
1800  * CatalogCacheCreateEntry
1801  *		Create a new CatCTup entry, copying the given HeapTuple and other
1802  *		supplied data into it.  The new entry initially has refcount 0.
1803  */
1804 static CatCTup *
CatalogCacheCreateEntry(CatCache * cache,HeapTuple ntp,Datum * arguments,uint32 hashValue,Index hashIndex,bool negative)1805 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
1806 						uint32 hashValue, Index hashIndex,
1807 						bool negative)
1808 {
1809 	CatCTup    *ct;
1810 	HeapTuple	dtp;
1811 	MemoryContext oldcxt;
1812 
1813 	/* negative entries have no tuple associated */
1814 	if (ntp)
1815 	{
1816 		int			i;
1817 
1818 		Assert(!negative);
1819 
1820 		/*
1821 		 * If there are any out-of-line toasted fields in the tuple, expand
1822 		 * them in-line.  This saves cycles during later use of the catcache
1823 		 * entry, and also protects us against the possibility of the toast
1824 		 * tuples being freed before we attempt to fetch them, in case of
1825 		 * something using a slightly stale catcache entry.
1826 		 */
1827 		if (HeapTupleHasExternal(ntp))
1828 			dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1829 		else
1830 			dtp = ntp;
1831 
1832 		/* Allocate memory for CatCTup and the cached tuple in one go */
1833 		oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1834 
1835 		ct = (CatCTup *) palloc(sizeof(CatCTup) +
1836 								MAXIMUM_ALIGNOF + dtp->t_len);
1837 		ct->tuple.t_len = dtp->t_len;
1838 		ct->tuple.t_self = dtp->t_self;
1839 		ct->tuple.t_tableOid = dtp->t_tableOid;
1840 		ct->tuple.t_data = (HeapTupleHeader)
1841 			MAXALIGN(((char *) ct) + sizeof(CatCTup));
1842 		/* copy tuple contents */
1843 		memcpy((char *) ct->tuple.t_data,
1844 			   (const char *) dtp->t_data,
1845 			   dtp->t_len);
1846 		MemoryContextSwitchTo(oldcxt);
1847 
1848 		if (dtp != ntp)
1849 			heap_freetuple(dtp);
1850 
1851 		/* extract keys - they'll point into the tuple if not by-value */
1852 		for (i = 0; i < cache->cc_nkeys; i++)
1853 		{
1854 			Datum		atp;
1855 			bool		isnull;
1856 
1857 			atp = heap_getattr(&ct->tuple,
1858 							   cache->cc_keyno[i],
1859 							   cache->cc_tupdesc,
1860 							   &isnull);
1861 			Assert(!isnull);
1862 			ct->keys[i] = atp;
1863 		}
1864 	}
1865 	else
1866 	{
1867 		Assert(negative);
1868 		oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1869 		ct = (CatCTup *) palloc(sizeof(CatCTup));
1870 
1871 		/*
1872 		 * Store keys - they'll point into separately allocated memory if not
1873 		 * by-value.
1874 		 */
1875 		CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
1876 						 arguments, ct->keys);
1877 		MemoryContextSwitchTo(oldcxt);
1878 	}
1879 
1880 	/*
1881 	 * Finish initializing the CatCTup header, and add it to the cache's
1882 	 * linked list and counts.
1883 	 */
1884 	ct->ct_magic = CT_MAGIC;
1885 	ct->my_cache = cache;
1886 	ct->c_list = NULL;
1887 	ct->refcount = 0;			/* for the moment */
1888 	ct->dead = false;
1889 	ct->negative = negative;
1890 	ct->hash_value = hashValue;
1891 
1892 	dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1893 
1894 	cache->cc_ntup++;
1895 	CacheHdr->ch_ntup++;
1896 
1897 	/*
1898 	 * If the hash table has become too full, enlarge the buckets array. Quite
1899 	 * arbitrarily, we enlarge when fill factor > 2.
1900 	 */
1901 	if (cache->cc_ntup > cache->cc_nbuckets * 2)
1902 		RehashCatCache(cache);
1903 
1904 	return ct;
1905 }
1906 
1907 /*
1908  * Helper routine that frees keys stored in the keys array.
1909  */
1910 static void
CatCacheFreeKeys(TupleDesc tupdesc,int nkeys,int * attnos,Datum * keys)1911 CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
1912 {
1913 	int			i;
1914 
1915 	for (i = 0; i < nkeys; i++)
1916 	{
1917 		int			attnum = attnos[i];
1918 		Form_pg_attribute att;
1919 
1920 		/* system attribute are not supported in caches */
1921 		Assert(attnum > 0);
1922 
1923 		att = TupleDescAttr(tupdesc, attnum - 1);
1924 
1925 		if (!att->attbyval)
1926 			pfree(DatumGetPointer(keys[i]));
1927 	}
1928 }
1929 
1930 /*
1931  * Helper routine that copies the keys in the srckeys array into the dstkeys
1932  * one, guaranteeing that the datums are fully allocated in the current memory
1933  * context.
1934  */
1935 static void
CatCacheCopyKeys(TupleDesc tupdesc,int nkeys,int * attnos,Datum * srckeys,Datum * dstkeys)1936 CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
1937 				 Datum *srckeys, Datum *dstkeys)
1938 {
1939 	int			i;
1940 
1941 	/*
1942 	 * XXX: memory and lookup performance could possibly be improved by
1943 	 * storing all keys in one allocation.
1944 	 */
1945 
1946 	for (i = 0; i < nkeys; i++)
1947 	{
1948 		int			attnum = attnos[i];
1949 		Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
1950 		Datum		src = srckeys[i];
1951 		NameData	srcname;
1952 
1953 		/*
1954 		 * Must be careful in case the caller passed a C string where a NAME
1955 		 * is wanted: convert the given argument to a correctly padded NAME.
1956 		 * Otherwise the memcpy() done by datumCopy() could fall off the end
1957 		 * of memory.
1958 		 */
1959 		if (att->atttypid == NAMEOID)
1960 		{
1961 			namestrcpy(&srcname, DatumGetCString(src));
1962 			src = NameGetDatum(&srcname);
1963 		}
1964 
1965 		dstkeys[i] = datumCopy(src,
1966 							   att->attbyval,
1967 							   att->attlen);
1968 	}
1969 
1970 }
1971 
1972 /*
1973  *	PrepareToInvalidateCacheTuple()
1974  *
1975  *	This is part of a rather subtle chain of events, so pay attention:
1976  *
1977  *	When a tuple is inserted or deleted, it cannot be flushed from the
1978  *	catcaches immediately, for reasons explained at the top of cache/inval.c.
1979  *	Instead we have to add entry(s) for the tuple to a list of pending tuple
1980  *	invalidations that will be done at the end of the command or transaction.
1981  *
1982  *	The lists of tuples that need to be flushed are kept by inval.c.  This
1983  *	routine is a helper routine for inval.c.  Given a tuple belonging to
1984  *	the specified relation, find all catcaches it could be in, compute the
1985  *	correct hash value for each such catcache, and call the specified
1986  *	function to record the cache id and hash value in inval.c's lists.
1987  *	SysCacheInvalidate will be called later, if appropriate,
1988  *	using the recorded information.
1989  *
1990  *	For an insert or delete, tuple is the target tuple and newtuple is NULL.
1991  *	For an update, we are called just once, with tuple being the old tuple
1992  *	version and newtuple the new version.  We should make two list entries
1993  *	if the tuple's hash value changed, but only one if it didn't.
1994  *
1995  *	Note that it is irrelevant whether the given tuple is actually loaded
1996  *	into the catcache at the moment.  Even if it's not there now, it might
1997  *	be by the end of the command, or there might be a matching negative entry
1998  *	to flush --- or other backends' caches might have such entries --- so
1999  *	we have to make list entries to flush it later.
2000  *
2001  *	Also note that it's not an error if there are no catcaches for the
2002  *	specified relation.  inval.c doesn't know exactly which rels have
2003  *	catcaches --- it will call this routine for any tuple that's in a
2004  *	system relation.
2005  */
2006 void
PrepareToInvalidateCacheTuple(Relation relation,HeapTuple tuple,HeapTuple newtuple,void (* function)(int,uint32,Oid))2007 PrepareToInvalidateCacheTuple(Relation relation,
2008 							  HeapTuple tuple,
2009 							  HeapTuple newtuple,
2010 							  void (*function) (int, uint32, Oid))
2011 {
2012 	slist_iter	iter;
2013 	Oid			reloid;
2014 
2015 	CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2016 
2017 	/*
2018 	 * sanity checks
2019 	 */
2020 	Assert(RelationIsValid(relation));
2021 	Assert(HeapTupleIsValid(tuple));
2022 	Assert(PointerIsValid(function));
2023 	Assert(CacheHdr != NULL);
2024 
2025 	reloid = RelationGetRelid(relation);
2026 
2027 	/* ----------------
2028 	 *	for each cache
2029 	 *	   if the cache contains tuples from the specified relation
2030 	 *		   compute the tuple's hash value(s) in this cache,
2031 	 *		   and call the passed function to register the information.
2032 	 * ----------------
2033 	 */
2034 
2035 	slist_foreach(iter, &CacheHdr->ch_caches)
2036 	{
2037 		CatCache   *ccp = slist_container(CatCache, cc_next, iter.cur);
2038 		uint32		hashvalue;
2039 		Oid			dbid;
2040 
2041 		if (ccp->cc_reloid != reloid)
2042 			continue;
2043 
2044 		/* Just in case cache hasn't finished initialization yet... */
2045 		if (ccp->cc_tupdesc == NULL)
2046 			CatalogCacheInitializeCache(ccp);
2047 
2048 		hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2049 		dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2050 
2051 		(*function) (ccp->id, hashvalue, dbid);
2052 
2053 		if (newtuple)
2054 		{
2055 			uint32		newhashvalue;
2056 
2057 			newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2058 
2059 			if (newhashvalue != hashvalue)
2060 				(*function) (ccp->id, newhashvalue, dbid);
2061 		}
2062 	}
2063 }
2064 
2065 
2066 /*
2067  * Subroutines for warning about reference leaks.  These are exported so
2068  * that resowner.c can call them.
2069  */
2070 void
PrintCatCacheLeakWarning(HeapTuple tuple)2071 PrintCatCacheLeakWarning(HeapTuple tuple)
2072 {
2073 	CatCTup    *ct = (CatCTup *) (((char *) tuple) -
2074 								  offsetof(CatCTup, tuple));
2075 
2076 	/* Safety check to ensure we were handed a cache entry */
2077 	Assert(ct->ct_magic == CT_MAGIC);
2078 
2079 	elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2080 		 ct->my_cache->cc_relname, ct->my_cache->id,
2081 		 ItemPointerGetBlockNumber(&(tuple->t_self)),
2082 		 ItemPointerGetOffsetNumber(&(tuple->t_self)),
2083 		 ct->refcount);
2084 }
2085 
2086 void
PrintCatCacheListLeakWarning(CatCList * list)2087 PrintCatCacheListLeakWarning(CatCList *list)
2088 {
2089 	elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
2090 		 list->my_cache->cc_relname, list->my_cache->id,
2091 		 list, list->refcount);
2092 }
2093