1 /*-------------------------------------------------------------------------
2  *
3  * inval.c
4  *	  POSTGRES cache invalidation dispatcher code.
5  *
6  *	This is subtle stuff, so pay attention:
7  *
8  *	When a tuple is updated or deleted, our standard visibility rules
9  *	consider that it is *still valid* so long as we are in the same command,
10  *	ie, until the next CommandCounterIncrement() or transaction commit.
11  *	(See access/heap/heapam_visibility.c, and note that system catalogs are
12  *  generally scanned under the most current snapshot available, rather than
13  *  the transaction snapshot.)	At the command boundary, the old tuple stops
14  *	being valid and the new version, if any, becomes valid.  Therefore,
15  *	we cannot simply flush a tuple from the system caches during heap_update()
16  *	or heap_delete().  The tuple is still good at that point; what's more,
17  *	even if we did flush it, it might be reloaded into the caches by a later
18  *	request in the same command.  So the correct behavior is to keep a list
19  *	of outdated (updated/deleted) tuples and then do the required cache
20  *	flushes at the next command boundary.  We must also keep track of
21  *	inserted tuples so that we can flush "negative" cache entries that match
22  *	the new tuples; again, that mustn't happen until end of command.
23  *
24  *	Once we have finished the command, we still need to remember inserted
25  *	tuples (including new versions of updated tuples), so that we can flush
26  *	them from the caches if we abort the transaction.  Similarly, we'd better
27  *	be able to flush "negative" cache entries that may have been loaded in
28  *	place of deleted tuples, so we still need the deleted ones too.
29  *
30  *	If we successfully complete the transaction, we have to broadcast all
31  *	these invalidation events to other backends (via the SI message queue)
32  *	so that they can flush obsolete entries from their caches.  Note we have
33  *	to record the transaction commit before sending SI messages, otherwise
34  *	the other backends won't see our updated tuples as good.
35  *
36  *	When a subtransaction aborts, we can process and discard any events
37  *	it has queued.  When a subtransaction commits, we just add its events
38  *	to the pending lists of the parent transaction.
39  *
40  *	In short, we need to remember until xact end every insert or delete
41  *	of a tuple that might be in the system caches.  Updates are treated as
42  *	two events, delete + insert, for simplicity.  (If the update doesn't
43  *	change the tuple hash value, catcache.c optimizes this into one event.)
44  *
45  *	We do not need to register EVERY tuple operation in this way, just those
46  *	on tuples in relations that have associated catcaches.  We do, however,
47  *	have to register every operation on every tuple that *could* be in a
48  *	catcache, whether or not it currently is in our cache.  Also, if the
49  *	tuple is in a relation that has multiple catcaches, we need to register
50  *	an invalidation message for each such catcache.  catcache.c's
51  *	PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52  *	catcaches may need invalidation for a given tuple.
53  *
54  *	Also, whenever we see an operation on a pg_class, pg_attribute, or
55  *	pg_index tuple, we register a relcache flush operation for the relation
56  *	described by that tuple (as specified in CacheInvalidateHeapTuple()).
57  *	Likewise for pg_constraint tuples for foreign keys on relations.
58  *
59  *	We keep the relcache flush requests in lists separate from the catcache
60  *	tuple flush requests.  This allows us to issue all the pending catcache
61  *	flushes before we issue relcache flushes, which saves us from loading
62  *	a catcache tuple during relcache load only to flush it again right away.
63  *	Also, we avoid queuing multiple relcache flush requests for the same
64  *	relation, since a relcache flush is relatively expensive to do.
65  *	(XXX is it worth testing likewise for duplicate catcache flush entries?
66  *	Probably not.)
67  *
68  *	Many subsystems own higher-level caches that depend on relcache and/or
69  *	catcache, and they register callbacks here to invalidate their caches.
70  *	While building a higher-level cache entry, a backend may receive a
71  *	callback for the being-built entry or one of its dependencies.  This
72  *	implies the new higher-level entry would be born stale, and it might
73  *	remain stale for the life of the backend.  Many caches do not prevent
74  *	that.  They rely on DDL for can't-miss catalog changes taking
75  *	AccessExclusiveLock on suitable objects.  (For a change made with less
76  *	locking, backends might never read the change.)  The relation cache,
77  *	however, needs to reflect changes from CREATE INDEX CONCURRENTLY no later
78  *	than the beginning of the next transaction.  Hence, when a relevant
79  *	invalidation callback arrives during a build, relcache.c reattempts that
80  *	build.  Caches with similar needs could do likewise.
81  *
82  *	If a relcache flush is issued for a system relation that we preload
83  *	from the relcache init file, we must also delete the init file so that
84  *	it will be rebuilt during the next backend restart.  The actual work of
85  *	manipulating the init file is in relcache.c, but we keep track of the
86  *	need for it here.
87  *
88  *	The request lists proper are kept in CurTransactionContext of their
89  *	creating (sub)transaction, since they can be forgotten on abort of that
90  *	transaction but must be kept till top-level commit otherwise.  For
91  *	simplicity we keep the controlling list-of-lists in TopTransactionContext.
92  *
93  *	Currently, inval messages are sent without regard for the possibility
94  *	that the object described by the catalog tuple might be a session-local
95  *	object such as a temporary table.  This is because (1) this code has
96  *	no practical way to tell the difference, and (2) it is not certain that
97  *	other backends don't have catalog cache or even relcache entries for
98  *	such tables, anyway; there is nothing that prevents that.  It might be
99  *	worth trying to avoid sending such inval traffic in the future, if those
100  *	problems can be overcome cheaply.
101  *
102  *
103  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
104  * Portions Copyright (c) 1994, Regents of the University of California
105  *
106  * IDENTIFICATION
107  *	  src/backend/utils/cache/inval.c
108  *
109  *-------------------------------------------------------------------------
110  */
111 #include "postgres.h"
112 
113 #include <limits.h>
114 
115 #include "access/htup_details.h"
116 #include "access/xact.h"
117 #include "catalog/catalog.h"
118 #include "catalog/pg_constraint.h"
119 #include "miscadmin.h"
120 #include "storage/sinval.h"
121 #include "storage/smgr.h"
122 #include "utils/catcache.h"
123 #include "utils/inval.h"
124 #include "utils/memdebug.h"
125 #include "utils/memutils.h"
126 #include "utils/rel.h"
127 #include "utils/relmapper.h"
128 #include "utils/snapmgr.h"
129 #include "utils/syscache.h"
130 
131 
132 /*
133  * To minimize palloc traffic, we keep pending requests in successively-
134  * larger chunks (a slightly more sophisticated version of an expansible
135  * array).  All request types can be stored as SharedInvalidationMessage
136  * records.  The ordering of requests within a list is never significant.
137  */
138 typedef struct InvalidationChunk
139 {
140 	struct InvalidationChunk *next; /* list link */
141 	int			nitems;			/* # items currently stored in chunk */
142 	int			maxitems;		/* size of allocated array in this chunk */
143 	SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER];
144 } InvalidationChunk;
145 
146 typedef struct InvalidationListHeader
147 {
148 	InvalidationChunk *cclist;	/* list of chunks holding catcache msgs */
149 	InvalidationChunk *rclist;	/* list of chunks holding relcache msgs */
150 } InvalidationListHeader;
151 
152 /*----------------
153  * Invalidation info is divided into two lists:
154  *	1) events so far in current command, not yet reflected to caches.
155  *	2) events in previous commands of current transaction; these have
156  *	   been reflected to local caches, and must be either broadcast to
157  *	   other backends or rolled back from local cache when we commit
158  *	   or abort the transaction.
159  * Actually, we need two such lists for each level of nested transaction,
160  * so that we can discard events from an aborted subtransaction.  When
161  * a subtransaction commits, we append its lists to the parent's lists.
162  *
163  * The relcache-file-invalidated flag can just be a simple boolean,
164  * since we only act on it at transaction commit; we don't care which
165  * command of the transaction set it.
166  *----------------
167  */
168 
169 typedef struct TransInvalidationInfo
170 {
171 	/* Back link to parent transaction's info */
172 	struct TransInvalidationInfo *parent;
173 
174 	/* Subtransaction nesting depth */
175 	int			my_level;
176 
177 	/* head of current-command event list */
178 	InvalidationListHeader CurrentCmdInvalidMsgs;
179 
180 	/* head of previous-commands event list */
181 	InvalidationListHeader PriorCmdInvalidMsgs;
182 
183 	/* init file must be invalidated? */
184 	bool		RelcacheInitFileInval;
185 } TransInvalidationInfo;
186 
187 static TransInvalidationInfo *transInvalInfo = NULL;
188 
189 static SharedInvalidationMessage *SharedInvalidMessagesArray;
190 static int	numSharedInvalidMessagesArray;
191 static int	maxSharedInvalidMessagesArray;
192 
193 
194 /*
195  * Dynamically-registered callback functions.  Current implementation
196  * assumes there won't be enough of these to justify a dynamically resizable
197  * array; it'd be easy to improve that if needed.
198  *
199  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
200  * syscache are linked into a list pointed to by syscache_callback_links[id].
201  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
202  */
203 
204 #define MAX_SYSCACHE_CALLBACKS 64
205 #define MAX_RELCACHE_CALLBACKS 10
206 
207 static struct SYSCACHECALLBACK
208 {
209 	int16		id;				/* cache number */
210 	int16		link;			/* next callback index+1 for same cache */
211 	SyscacheCallbackFunction function;
212 	Datum		arg;
213 }			syscache_callback_list[MAX_SYSCACHE_CALLBACKS];
214 
215 static int16 syscache_callback_links[SysCacheSize];
216 
217 static int	syscache_callback_count = 0;
218 
219 static struct RELCACHECALLBACK
220 {
221 	RelcacheCallbackFunction function;
222 	Datum		arg;
223 }			relcache_callback_list[MAX_RELCACHE_CALLBACKS];
224 
225 static int	relcache_callback_count = 0;
226 
227 /* ----------------------------------------------------------------
228  *				Invalidation list support functions
229  *
230  * These three routines encapsulate processing of the "chunked"
231  * representation of what is logically just a list of messages.
232  * ----------------------------------------------------------------
233  */
234 
235 /*
236  * AddInvalidationMessage
237  *		Add an invalidation message to a list (of chunks).
238  *
239  * Note that we do not pay any great attention to maintaining the original
240  * ordering of the messages.
241  */
242 static void
AddInvalidationMessage(InvalidationChunk ** listHdr,SharedInvalidationMessage * msg)243 AddInvalidationMessage(InvalidationChunk **listHdr,
244 					   SharedInvalidationMessage *msg)
245 {
246 	InvalidationChunk *chunk = *listHdr;
247 
248 	if (chunk == NULL)
249 	{
250 		/* First time through; create initial chunk */
251 #define FIRSTCHUNKSIZE 32
252 		chunk = (InvalidationChunk *)
253 			MemoryContextAlloc(CurTransactionContext,
254 							   offsetof(InvalidationChunk, msgs) +
255 							   FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
256 		chunk->nitems = 0;
257 		chunk->maxitems = FIRSTCHUNKSIZE;
258 		chunk->next = *listHdr;
259 		*listHdr = chunk;
260 	}
261 	else if (chunk->nitems >= chunk->maxitems)
262 	{
263 		/* Need another chunk; double size of last chunk */
264 		int			chunksize = 2 * chunk->maxitems;
265 
266 		chunk = (InvalidationChunk *)
267 			MemoryContextAlloc(CurTransactionContext,
268 							   offsetof(InvalidationChunk, msgs) +
269 							   chunksize * sizeof(SharedInvalidationMessage));
270 		chunk->nitems = 0;
271 		chunk->maxitems = chunksize;
272 		chunk->next = *listHdr;
273 		*listHdr = chunk;
274 	}
275 	/* Okay, add message to current chunk */
276 	chunk->msgs[chunk->nitems] = *msg;
277 	chunk->nitems++;
278 }
279 
280 /*
281  * Append one list of invalidation message chunks to another, resetting
282  * the source chunk-list pointer to NULL.
283  */
284 static void
AppendInvalidationMessageList(InvalidationChunk ** destHdr,InvalidationChunk ** srcHdr)285 AppendInvalidationMessageList(InvalidationChunk **destHdr,
286 							  InvalidationChunk **srcHdr)
287 {
288 	InvalidationChunk *chunk = *srcHdr;
289 
290 	if (chunk == NULL)
291 		return;					/* nothing to do */
292 
293 	while (chunk->next != NULL)
294 		chunk = chunk->next;
295 
296 	chunk->next = *destHdr;
297 
298 	*destHdr = *srcHdr;
299 
300 	*srcHdr = NULL;
301 }
302 
303 /*
304  * Process a list of invalidation messages.
305  *
306  * This is a macro that executes the given code fragment for each message in
307  * a message chunk list.  The fragment should refer to the message as *msg.
308  */
309 #define ProcessMessageList(listHdr, codeFragment) \
310 	do { \
311 		InvalidationChunk *_chunk; \
312 		for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
313 		{ \
314 			int		_cindex; \
315 			for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
316 			{ \
317 				SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
318 				codeFragment; \
319 			} \
320 		} \
321 	} while (0)
322 
323 /*
324  * Process a list of invalidation messages group-wise.
325  *
326  * As above, but the code fragment can handle an array of messages.
327  * The fragment should refer to the messages as msgs[], with n entries.
328  */
329 #define ProcessMessageListMulti(listHdr, codeFragment) \
330 	do { \
331 		InvalidationChunk *_chunk; \
332 		for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
333 		{ \
334 			SharedInvalidationMessage *msgs = _chunk->msgs; \
335 			int		n = _chunk->nitems; \
336 			codeFragment; \
337 		} \
338 	} while (0)
339 
340 
341 /* ----------------------------------------------------------------
342  *				Invalidation set support functions
343  *
344  * These routines understand about the division of a logical invalidation
345  * list into separate physical lists for catcache and relcache entries.
346  * ----------------------------------------------------------------
347  */
348 
349 /*
350  * Add a catcache inval entry
351  */
352 static void
AddCatcacheInvalidationMessage(InvalidationListHeader * hdr,int id,uint32 hashValue,Oid dbId)353 AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
354 							   int id, uint32 hashValue, Oid dbId)
355 {
356 	SharedInvalidationMessage msg;
357 
358 	Assert(id < CHAR_MAX);
359 	msg.cc.id = (int8) id;
360 	msg.cc.dbId = dbId;
361 	msg.cc.hashValue = hashValue;
362 
363 	/*
364 	 * Define padding bytes in SharedInvalidationMessage structs to be
365 	 * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
366 	 * multiple processes, will cause spurious valgrind warnings about
367 	 * undefined memory being used. That's because valgrind remembers the
368 	 * undefined bytes from the last local process's store, not realizing that
369 	 * another process has written since, filling the previously uninitialized
370 	 * bytes
371 	 */
372 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
373 
374 	AddInvalidationMessage(&hdr->cclist, &msg);
375 }
376 
377 /*
378  * Add a whole-catalog inval entry
379  */
380 static void
AddCatalogInvalidationMessage(InvalidationListHeader * hdr,Oid dbId,Oid catId)381 AddCatalogInvalidationMessage(InvalidationListHeader *hdr,
382 							  Oid dbId, Oid catId)
383 {
384 	SharedInvalidationMessage msg;
385 
386 	msg.cat.id = SHAREDINVALCATALOG_ID;
387 	msg.cat.dbId = dbId;
388 	msg.cat.catId = catId;
389 	/* check AddCatcacheInvalidationMessage() for an explanation */
390 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
391 
392 	AddInvalidationMessage(&hdr->cclist, &msg);
393 }
394 
395 /*
396  * Add a relcache inval entry
397  */
398 static void
AddRelcacheInvalidationMessage(InvalidationListHeader * hdr,Oid dbId,Oid relId)399 AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
400 							   Oid dbId, Oid relId)
401 {
402 	SharedInvalidationMessage msg;
403 
404 	/*
405 	 * Don't add a duplicate item. We assume dbId need not be checked because
406 	 * it will never change. InvalidOid for relId means all relations so we
407 	 * don't need to add individual ones when it is present.
408 	 */
409 	ProcessMessageList(hdr->rclist,
410 					   if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
411 						   (msg->rc.relId == relId ||
412 							msg->rc.relId == InvalidOid))
413 					   return);
414 
415 	/* OK, add the item */
416 	msg.rc.id = SHAREDINVALRELCACHE_ID;
417 	msg.rc.dbId = dbId;
418 	msg.rc.relId = relId;
419 	/* check AddCatcacheInvalidationMessage() for an explanation */
420 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
421 
422 	AddInvalidationMessage(&hdr->rclist, &msg);
423 }
424 
425 /*
426  * Add a snapshot inval entry
427  */
428 static void
AddSnapshotInvalidationMessage(InvalidationListHeader * hdr,Oid dbId,Oid relId)429 AddSnapshotInvalidationMessage(InvalidationListHeader *hdr,
430 							   Oid dbId, Oid relId)
431 {
432 	SharedInvalidationMessage msg;
433 
434 	/* Don't add a duplicate item */
435 	/* We assume dbId need not be checked because it will never change */
436 	ProcessMessageList(hdr->rclist,
437 					   if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
438 						   msg->sn.relId == relId)
439 					   return);
440 
441 	/* OK, add the item */
442 	msg.sn.id = SHAREDINVALSNAPSHOT_ID;
443 	msg.sn.dbId = dbId;
444 	msg.sn.relId = relId;
445 	/* check AddCatcacheInvalidationMessage() for an explanation */
446 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
447 
448 	AddInvalidationMessage(&hdr->rclist, &msg);
449 }
450 
451 /*
452  * Append one list of invalidation messages to another, resetting
453  * the source list to empty.
454  */
455 static void
AppendInvalidationMessages(InvalidationListHeader * dest,InvalidationListHeader * src)456 AppendInvalidationMessages(InvalidationListHeader *dest,
457 						   InvalidationListHeader *src)
458 {
459 	AppendInvalidationMessageList(&dest->cclist, &src->cclist);
460 	AppendInvalidationMessageList(&dest->rclist, &src->rclist);
461 }
462 
463 /*
464  * Execute the given function for all the messages in an invalidation list.
465  * The list is not altered.
466  *
467  * catcache entries are processed first, for reasons mentioned above.
468  */
469 static void
ProcessInvalidationMessages(InvalidationListHeader * hdr,void (* func)(SharedInvalidationMessage * msg))470 ProcessInvalidationMessages(InvalidationListHeader *hdr,
471 							void (*func) (SharedInvalidationMessage *msg))
472 {
473 	ProcessMessageList(hdr->cclist, func(msg));
474 	ProcessMessageList(hdr->rclist, func(msg));
475 }
476 
477 /*
478  * As above, but the function is able to process an array of messages
479  * rather than just one at a time.
480  */
481 static void
ProcessInvalidationMessagesMulti(InvalidationListHeader * hdr,void (* func)(const SharedInvalidationMessage * msgs,int n))482 ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr,
483 								 void (*func) (const SharedInvalidationMessage *msgs, int n))
484 {
485 	ProcessMessageListMulti(hdr->cclist, func(msgs, n));
486 	ProcessMessageListMulti(hdr->rclist, func(msgs, n));
487 }
488 
489 /* ----------------------------------------------------------------
490  *					  private support functions
491  * ----------------------------------------------------------------
492  */
493 
494 /*
495  * RegisterCatcacheInvalidation
496  *
497  * Register an invalidation event for a catcache tuple entry.
498  */
499 static void
RegisterCatcacheInvalidation(int cacheId,uint32 hashValue,Oid dbId)500 RegisterCatcacheInvalidation(int cacheId,
501 							 uint32 hashValue,
502 							 Oid dbId)
503 {
504 	AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
505 								   cacheId, hashValue, dbId);
506 }
507 
508 /*
509  * RegisterCatalogInvalidation
510  *
511  * Register an invalidation event for all catcache entries from a catalog.
512  */
513 static void
RegisterCatalogInvalidation(Oid dbId,Oid catId)514 RegisterCatalogInvalidation(Oid dbId, Oid catId)
515 {
516 	AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
517 								  dbId, catId);
518 }
519 
520 /*
521  * RegisterRelcacheInvalidation
522  *
523  * As above, but register a relcache invalidation event.
524  */
525 static void
RegisterRelcacheInvalidation(Oid dbId,Oid relId)526 RegisterRelcacheInvalidation(Oid dbId, Oid relId)
527 {
528 	AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
529 								   dbId, relId);
530 
531 	/*
532 	 * Most of the time, relcache invalidation is associated with system
533 	 * catalog updates, but there are a few cases where it isn't.  Quick hack
534 	 * to ensure that the next CommandCounterIncrement() will think that we
535 	 * need to do CommandEndInvalidationMessages().
536 	 */
537 	(void) GetCurrentCommandId(true);
538 
539 	/*
540 	 * If the relation being invalidated is one of those cached in a relcache
541 	 * init file, mark that we need to zap that file at commit. For simplicity
542 	 * invalidations for a specific database always invalidate the shared file
543 	 * as well.  Also zap when we are invalidating whole relcache.
544 	 */
545 	if (relId == InvalidOid || RelationIdIsInInitFile(relId))
546 		transInvalInfo->RelcacheInitFileInval = true;
547 }
548 
549 /*
550  * RegisterSnapshotInvalidation
551  *
552  * Register an invalidation event for MVCC scans against a given catalog.
553  * Only needed for catalogs that don't have catcaches.
554  */
555 static void
RegisterSnapshotInvalidation(Oid dbId,Oid relId)556 RegisterSnapshotInvalidation(Oid dbId, Oid relId)
557 {
558 	AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
559 								   dbId, relId);
560 }
561 
562 /*
563  * LocalExecuteInvalidationMessage
564  *
565  * Process a single invalidation message (which could be of any type).
566  * Only the local caches are flushed; this does not transmit the message
567  * to other backends.
568  */
569 void
LocalExecuteInvalidationMessage(SharedInvalidationMessage * msg)570 LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
571 {
572 	if (msg->id >= 0)
573 	{
574 		if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
575 		{
576 			InvalidateCatalogSnapshot();
577 
578 			SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
579 
580 			CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
581 		}
582 	}
583 	else if (msg->id == SHAREDINVALCATALOG_ID)
584 	{
585 		if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
586 		{
587 			InvalidateCatalogSnapshot();
588 
589 			CatalogCacheFlushCatalog(msg->cat.catId);
590 
591 			/* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
592 		}
593 	}
594 	else if (msg->id == SHAREDINVALRELCACHE_ID)
595 	{
596 		if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
597 		{
598 			int			i;
599 
600 			if (msg->rc.relId == InvalidOid)
601 				RelationCacheInvalidate(false);
602 			else
603 				RelationCacheInvalidateEntry(msg->rc.relId);
604 
605 			for (i = 0; i < relcache_callback_count; i++)
606 			{
607 				struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
608 
609 				ccitem->function(ccitem->arg, msg->rc.relId);
610 			}
611 		}
612 	}
613 	else if (msg->id == SHAREDINVALSMGR_ID)
614 	{
615 		/*
616 		 * We could have smgr entries for relations of other databases, so no
617 		 * short-circuit test is possible here.
618 		 */
619 		RelFileNodeBackend rnode;
620 
621 		rnode.node = msg->sm.rnode;
622 		rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
623 		smgrclosenode(rnode);
624 	}
625 	else if (msg->id == SHAREDINVALRELMAP_ID)
626 	{
627 		/* We only care about our own database and shared catalogs */
628 		if (msg->rm.dbId == InvalidOid)
629 			RelationMapInvalidate(true);
630 		else if (msg->rm.dbId == MyDatabaseId)
631 			RelationMapInvalidate(false);
632 	}
633 	else if (msg->id == SHAREDINVALSNAPSHOT_ID)
634 	{
635 		/* We only care about our own database and shared catalogs */
636 		if (msg->sn.dbId == InvalidOid)
637 			InvalidateCatalogSnapshot();
638 		else if (msg->sn.dbId == MyDatabaseId)
639 			InvalidateCatalogSnapshot();
640 	}
641 	else
642 		elog(FATAL, "unrecognized SI message ID: %d", msg->id);
643 }
644 
645 /*
646  *		InvalidateSystemCaches
647  *
648  *		This blows away all tuples in the system catalog caches and
649  *		all the cached relation descriptors and smgr cache entries.
650  *		Relation descriptors that have positive refcounts are then rebuilt.
651  *
652  *		We call this when we see a shared-inval-queue overflow signal,
653  *		since that tells us we've lost some shared-inval messages and hence
654  *		don't know what needs to be invalidated.
655  */
656 void
InvalidateSystemCaches(void)657 InvalidateSystemCaches(void)
658 {
659 	InvalidateSystemCachesExtended(false);
660 }
661 
662 void
InvalidateSystemCachesExtended(bool debug_discard)663 InvalidateSystemCachesExtended(bool debug_discard)
664 {
665 	int			i;
666 
667 	InvalidateCatalogSnapshot();
668 	ResetCatalogCaches();
669 	RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
670 
671 	for (i = 0; i < syscache_callback_count; i++)
672 	{
673 		struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
674 
675 		ccitem->function(ccitem->arg, ccitem->id, 0);
676 	}
677 
678 	for (i = 0; i < relcache_callback_count; i++)
679 	{
680 		struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
681 
682 		ccitem->function(ccitem->arg, InvalidOid);
683 	}
684 }
685 
686 
687 /* ----------------------------------------------------------------
688  *					  public functions
689  * ----------------------------------------------------------------
690  */
691 
692 /*
693  * AcceptInvalidationMessages
694  *		Read and process invalidation messages from the shared invalidation
695  *		message queue.
696  *
697  * Note:
698  *		This should be called as the first step in processing a transaction.
699  */
700 void
AcceptInvalidationMessages(void)701 AcceptInvalidationMessages(void)
702 {
703 	ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
704 								 InvalidateSystemCaches);
705 
706 	/*
707 	 * Test code to force cache flushes anytime a flush could happen.
708 	 *
709 	 * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a
710 	 * fairly thorough test that the system contains no cache-flush hazards.
711 	 * However, it also makes the system unbelievably slow --- the regression
712 	 * tests take about 100 times longer than normal.
713 	 *
714 	 * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
715 	 * slows things by at least a factor of 10000, so I wouldn't suggest
716 	 * trying to run the entire regression tests that way.  It's useful to try
717 	 * a few simple tests, to make sure that cache reload isn't subject to
718 	 * internal cache-flush hazards, but after you've done a few thousand
719 	 * recursive reloads it's unlikely you'll learn more.
720 	 */
721 #if defined(CLOBBER_CACHE_ALWAYS)
722 	{
723 		static bool in_recursion = false;
724 
725 		if (!in_recursion)
726 		{
727 			in_recursion = true;
728 			InvalidateSystemCaches();
729 			in_recursion = false;
730 		}
731 	}
732 #elif defined(CLOBBER_CACHE_RECURSIVELY)
733 	{
734 		static int	recursion_depth = 0;
735 
736 		/* Maximum depth is arbitrary depending on your threshold of pain */
737 		if (recursion_depth < 3)
738 		{
739 			recursion_depth++;
740 			InvalidateSystemCachesExtended(true);
741 			recursion_depth--;
742 		}
743 	}
744 #endif
745 }
746 
747 /*
748  * PrepareInvalidationState
749  *		Initialize inval lists for the current (sub)transaction.
750  */
751 static void
PrepareInvalidationState(void)752 PrepareInvalidationState(void)
753 {
754 	TransInvalidationInfo *myInfo;
755 
756 	if (transInvalInfo != NULL &&
757 		transInvalInfo->my_level == GetCurrentTransactionNestLevel())
758 		return;
759 
760 	myInfo = (TransInvalidationInfo *)
761 		MemoryContextAllocZero(TopTransactionContext,
762 							   sizeof(TransInvalidationInfo));
763 	myInfo->parent = transInvalInfo;
764 	myInfo->my_level = GetCurrentTransactionNestLevel();
765 
766 	/*
767 	 * If there's any previous entry, this one should be for a deeper nesting
768 	 * level.
769 	 */
770 	Assert(transInvalInfo == NULL ||
771 		   myInfo->my_level > transInvalInfo->my_level);
772 
773 	transInvalInfo = myInfo;
774 }
775 
776 /*
777  * PostPrepare_Inval
778  *		Clean up after successful PREPARE.
779  *
780  * Here, we want to act as though the transaction aborted, so that we will
781  * undo any syscache changes it made, thereby bringing us into sync with the
782  * outside world, which doesn't believe the transaction committed yet.
783  *
784  * If the prepared transaction is later aborted, there is nothing more to
785  * do; if it commits, we will receive the consequent inval messages just
786  * like everyone else.
787  */
788 void
PostPrepare_Inval(void)789 PostPrepare_Inval(void)
790 {
791 	AtEOXact_Inval(false);
792 }
793 
794 /*
795  * Collect invalidation messages into SharedInvalidMessagesArray array.
796  */
797 static void
MakeSharedInvalidMessagesArray(const SharedInvalidationMessage * msgs,int n)798 MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
799 {
800 	/*
801 	 * Initialise array first time through in each commit
802 	 */
803 	if (SharedInvalidMessagesArray == NULL)
804 	{
805 		maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE;
806 		numSharedInvalidMessagesArray = 0;
807 
808 		/*
809 		 * Although this is being palloc'd we don't actually free it directly.
810 		 * We're so close to EOXact that we now we're going to lose it anyhow.
811 		 */
812 		SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
813 											* sizeof(SharedInvalidationMessage));
814 	}
815 
816 	if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
817 	{
818 		while ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
819 			maxSharedInvalidMessagesArray *= 2;
820 
821 		SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
822 											  maxSharedInvalidMessagesArray
823 											  * sizeof(SharedInvalidationMessage));
824 	}
825 
826 	/*
827 	 * Append the next chunk onto the array
828 	 */
829 	memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
830 		   msgs, n * sizeof(SharedInvalidationMessage));
831 	numSharedInvalidMessagesArray += n;
832 }
833 
834 /*
835  * xactGetCommittedInvalidationMessages() is executed by
836  * RecordTransactionCommit() to add invalidation messages onto the
837  * commit record. This applies only to commit message types, never to
838  * abort records. Must always run before AtEOXact_Inval(), since that
839  * removes the data we need to see.
840  *
841  * Remember that this runs before we have officially committed, so we
842  * must not do anything here to change what might occur *if* we should
843  * fail between here and the actual commit.
844  *
845  * see also xact_redo_commit() and xact_desc_commit()
846  */
847 int
xactGetCommittedInvalidationMessages(SharedInvalidationMessage ** msgs,bool * RelcacheInitFileInval)848 xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
849 									 bool *RelcacheInitFileInval)
850 {
851 	MemoryContext oldcontext;
852 
853 	/* Quick exit if we haven't done anything with invalidation messages. */
854 	if (transInvalInfo == NULL)
855 	{
856 		*RelcacheInitFileInval = false;
857 		*msgs = NULL;
858 		return 0;
859 	}
860 
861 	/* Must be at top of stack */
862 	Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
863 
864 	/*
865 	 * Relcache init file invalidation requires processing both before and
866 	 * after we send the SI messages.  However, we need not do anything unless
867 	 * we committed.
868 	 */
869 	*RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
870 
871 	/*
872 	 * Walk through TransInvalidationInfo to collect all the messages into a
873 	 * single contiguous array of invalidation messages. It must be contiguous
874 	 * so we can copy directly into WAL message. Maintain the order that they
875 	 * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
876 	 * in redo is as similar as possible to original. We want the same bugs,
877 	 * if any, not new ones.
878 	 */
879 	oldcontext = MemoryContextSwitchTo(CurTransactionContext);
880 
881 	ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
882 									 MakeSharedInvalidMessagesArray);
883 	ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
884 									 MakeSharedInvalidMessagesArray);
885 	MemoryContextSwitchTo(oldcontext);
886 
887 	Assert(!(numSharedInvalidMessagesArray > 0 &&
888 			 SharedInvalidMessagesArray == NULL));
889 
890 	*msgs = SharedInvalidMessagesArray;
891 
892 	return numSharedInvalidMessagesArray;
893 }
894 
895 /*
896  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
897  * standby_redo() to process invalidation messages. Currently that happens
898  * only at end-of-xact.
899  *
900  * Relcache init file invalidation requires processing both
901  * before and after we send the SI messages. See AtEOXact_Inval()
902  */
903 void
ProcessCommittedInvalidationMessages(SharedInvalidationMessage * msgs,int nmsgs,bool RelcacheInitFileInval,Oid dbid,Oid tsid)904 ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
905 									 int nmsgs, bool RelcacheInitFileInval,
906 									 Oid dbid, Oid tsid)
907 {
908 	if (nmsgs <= 0)
909 		return;
910 
911 	elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
912 		 (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
913 
914 	if (RelcacheInitFileInval)
915 	{
916 		elog(trace_recovery(DEBUG4), "removing relcache init files for database %u",
917 			 dbid);
918 
919 		/*
920 		 * RelationCacheInitFilePreInvalidate, when the invalidation message
921 		 * is for a specific database, requires DatabasePath to be set, but we
922 		 * should not use SetDatabasePath during recovery, since it is
923 		 * intended to be used only once by normal backends.  Hence, a quick
924 		 * hack: set DatabasePath directly then unset after use.
925 		 */
926 		if (OidIsValid(dbid))
927 			DatabasePath = GetDatabasePath(dbid, tsid);
928 
929 		RelationCacheInitFilePreInvalidate();
930 
931 		if (OidIsValid(dbid))
932 		{
933 			pfree(DatabasePath);
934 			DatabasePath = NULL;
935 		}
936 	}
937 
938 	SendSharedInvalidMessages(msgs, nmsgs);
939 
940 	if (RelcacheInitFileInval)
941 		RelationCacheInitFilePostInvalidate();
942 }
943 
944 /*
945  * AtEOXact_Inval
946  *		Process queued-up invalidation messages at end of main transaction.
947  *
948  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
949  * to the shared invalidation message queue.  Note that these will be read
950  * not only by other backends, but also by our own backend at the next
951  * transaction start (via AcceptInvalidationMessages).  This means that
952  * we can skip immediate local processing of anything that's still in
953  * CurrentCmdInvalidMsgs, and just send that list out too.
954  *
955  * If not isCommit, we are aborting, and must locally process the messages
956  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends,
957  * since they'll not have seen our changed tuples anyway.  We can forget
958  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
959  * the caches yet.
960  *
961  * In any case, reset the various lists to empty.  We need not physically
962  * free memory here, since TopTransactionContext is about to be emptied
963  * anyway.
964  *
965  * Note:
966  *		This should be called as the last step in processing a transaction.
967  */
968 void
AtEOXact_Inval(bool isCommit)969 AtEOXact_Inval(bool isCommit)
970 {
971 	/* Quick exit if no messages */
972 	if (transInvalInfo == NULL)
973 		return;
974 
975 	/* Must be at top of stack */
976 	Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
977 
978 	if (isCommit)
979 	{
980 		/*
981 		 * Relcache init file invalidation requires processing both before and
982 		 * after we send the SI messages.  However, we need not do anything
983 		 * unless we committed.
984 		 */
985 		if (transInvalInfo->RelcacheInitFileInval)
986 			RelationCacheInitFilePreInvalidate();
987 
988 		AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
989 								   &transInvalInfo->CurrentCmdInvalidMsgs);
990 
991 		ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
992 										 SendSharedInvalidMessages);
993 
994 		if (transInvalInfo->RelcacheInitFileInval)
995 			RelationCacheInitFilePostInvalidate();
996 	}
997 	else
998 	{
999 		ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1000 									LocalExecuteInvalidationMessage);
1001 	}
1002 
1003 	/* Need not free anything explicitly */
1004 	transInvalInfo = NULL;
1005 	SharedInvalidMessagesArray = NULL;
1006 	numSharedInvalidMessagesArray = 0;
1007 }
1008 
1009 /*
1010  * AtEOSubXact_Inval
1011  *		Process queued-up invalidation messages at end of subtransaction.
1012  *
1013  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
1014  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
1015  * parent's PriorCmdInvalidMsgs list.
1016  *
1017  * If not isCommit, we are aborting, and must locally process the messages
1018  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends.
1019  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
1020  * touched the caches yet.
1021  *
1022  * In any case, pop the transaction stack.  We need not physically free memory
1023  * here, since CurTransactionContext is about to be emptied anyway
1024  * (if aborting).  Beware of the possibility of aborting the same nesting
1025  * level twice, though.
1026  */
1027 void
AtEOSubXact_Inval(bool isCommit)1028 AtEOSubXact_Inval(bool isCommit)
1029 {
1030 	int			my_level;
1031 	TransInvalidationInfo *myInfo = transInvalInfo;
1032 
1033 	/* Quick exit if no messages. */
1034 	if (myInfo == NULL)
1035 		return;
1036 
1037 	/* Also bail out quickly if messages are not for this level. */
1038 	my_level = GetCurrentTransactionNestLevel();
1039 	if (myInfo->my_level != my_level)
1040 	{
1041 		Assert(myInfo->my_level < my_level);
1042 		return;
1043 	}
1044 
1045 	if (isCommit)
1046 	{
1047 		/* If CurrentCmdInvalidMsgs still has anything, fix it */
1048 		CommandEndInvalidationMessages();
1049 
1050 		/*
1051 		 * We create invalidation stack entries lazily, so the parent might
1052 		 * not have one.  Instead of creating one, moving all the data over,
1053 		 * and then freeing our own, we can just adjust the level of our own
1054 		 * entry.
1055 		 */
1056 		if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1057 		{
1058 			myInfo->my_level--;
1059 			return;
1060 		}
1061 
1062 		/* Pass up my inval messages to parent */
1063 		AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs,
1064 								   &myInfo->PriorCmdInvalidMsgs);
1065 
1066 		/* Pending relcache inval becomes parent's problem too */
1067 		if (myInfo->RelcacheInitFileInval)
1068 			myInfo->parent->RelcacheInitFileInval = true;
1069 
1070 		/* Pop the transaction state stack */
1071 		transInvalInfo = myInfo->parent;
1072 
1073 		/* Need not free anything else explicitly */
1074 		pfree(myInfo);
1075 	}
1076 	else
1077 	{
1078 		ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs,
1079 									LocalExecuteInvalidationMessage);
1080 
1081 		/* Pop the transaction state stack */
1082 		transInvalInfo = myInfo->parent;
1083 
1084 		/* Need not free anything else explicitly */
1085 		pfree(myInfo);
1086 	}
1087 }
1088 
1089 /*
1090  * CommandEndInvalidationMessages
1091  *		Process queued-up invalidation messages at end of one command
1092  *		in a transaction.
1093  *
1094  * Here, we send no messages to the shared queue, since we don't know yet if
1095  * we will commit.  We do need to locally process the CurrentCmdInvalidMsgs
1096  * list, so as to flush our caches of any entries we have outdated in the
1097  * current command.  We then move the current-cmd list over to become part
1098  * of the prior-cmds list.
1099  *
1100  * Note:
1101  *		This should be called during CommandCounterIncrement(),
1102  *		after we have advanced the command ID.
1103  */
1104 void
CommandEndInvalidationMessages(void)1105 CommandEndInvalidationMessages(void)
1106 {
1107 	/*
1108 	 * You might think this shouldn't be called outside any transaction, but
1109 	 * bootstrap does it, and also ABORT issued when not in a transaction. So
1110 	 * just quietly return if no state to work on.
1111 	 */
1112 	if (transInvalInfo == NULL)
1113 		return;
1114 
1115 	ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
1116 								LocalExecuteInvalidationMessage);
1117 	AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1118 							   &transInvalInfo->CurrentCmdInvalidMsgs);
1119 }
1120 
1121 
1122 /*
1123  * CacheInvalidateHeapTuple
1124  *		Register the given tuple for invalidation at end of command
1125  *		(ie, current command is creating or outdating this tuple).
1126  *		Also, detect whether a relcache invalidation is implied.
1127  *
1128  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1129  * For an update, we are called just once, with tuple being the old tuple
1130  * version and newtuple the new version.  This allows avoidance of duplicate
1131  * effort during an update.
1132  */
1133 void
CacheInvalidateHeapTuple(Relation relation,HeapTuple tuple,HeapTuple newtuple)1134 CacheInvalidateHeapTuple(Relation relation,
1135 						 HeapTuple tuple,
1136 						 HeapTuple newtuple)
1137 {
1138 	Oid			tupleRelId;
1139 	Oid			databaseId;
1140 	Oid			relationId;
1141 
1142 	/* Do nothing during bootstrap */
1143 	if (IsBootstrapProcessingMode())
1144 		return;
1145 
1146 	/*
1147 	 * We only need to worry about invalidation for tuples that are in system
1148 	 * catalogs; user-relation tuples are never in catcaches and can't affect
1149 	 * the relcache either.
1150 	 */
1151 	if (!IsCatalogRelation(relation))
1152 		return;
1153 
1154 	/*
1155 	 * IsCatalogRelation() will return true for TOAST tables of system
1156 	 * catalogs, but we don't care about those, either.
1157 	 */
1158 	if (IsToastRelation(relation))
1159 		return;
1160 
1161 	/*
1162 	 * If we're not prepared to queue invalidation messages for this
1163 	 * subtransaction level, get ready now.
1164 	 */
1165 	PrepareInvalidationState();
1166 
1167 	/*
1168 	 * First let the catcache do its thing
1169 	 */
1170 	tupleRelId = RelationGetRelid(relation);
1171 	if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1172 	{
1173 		databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1174 		RegisterSnapshotInvalidation(databaseId, tupleRelId);
1175 	}
1176 	else
1177 		PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1178 									  RegisterCatcacheInvalidation);
1179 
1180 	/*
1181 	 * Now, is this tuple one of the primary definers of a relcache entry? See
1182 	 * comments in file header for deeper explanation.
1183 	 *
1184 	 * Note we ignore newtuple here; we assume an update cannot move a tuple
1185 	 * from being part of one relcache entry to being part of another.
1186 	 */
1187 	if (tupleRelId == RelationRelationId)
1188 	{
1189 		Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1190 
1191 		relationId = classtup->oid;
1192 		if (classtup->relisshared)
1193 			databaseId = InvalidOid;
1194 		else
1195 			databaseId = MyDatabaseId;
1196 	}
1197 	else if (tupleRelId == AttributeRelationId)
1198 	{
1199 		Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1200 
1201 		relationId = atttup->attrelid;
1202 
1203 		/*
1204 		 * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1205 		 * even if the rel in question is shared (which we can't easily tell).
1206 		 * This essentially means that only backends in this same database
1207 		 * will react to the relcache flush request.  This is in fact
1208 		 * appropriate, since only those backends could see our pg_attribute
1209 		 * change anyway.  It looks a bit ugly though.  (In practice, shared
1210 		 * relations can't have schema changes after bootstrap, so we should
1211 		 * never come here for a shared rel anyway.)
1212 		 */
1213 		databaseId = MyDatabaseId;
1214 	}
1215 	else if (tupleRelId == IndexRelationId)
1216 	{
1217 		Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1218 
1219 		/*
1220 		 * When a pg_index row is updated, we should send out a relcache inval
1221 		 * for the index relation.  As above, we don't know the shared status
1222 		 * of the index, but in practice it doesn't matter since indexes of
1223 		 * shared catalogs can't have such updates.
1224 		 */
1225 		relationId = indextup->indexrelid;
1226 		databaseId = MyDatabaseId;
1227 	}
1228 	else if (tupleRelId == ConstraintRelationId)
1229 	{
1230 		Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
1231 
1232 		/*
1233 		 * Foreign keys are part of relcache entries, too, so send out an
1234 		 * inval for the table that the FK applies to.
1235 		 */
1236 		if (constrtup->contype == CONSTRAINT_FOREIGN &&
1237 			OidIsValid(constrtup->conrelid))
1238 		{
1239 			relationId = constrtup->conrelid;
1240 			databaseId = MyDatabaseId;
1241 		}
1242 		else
1243 			return;
1244 	}
1245 	else
1246 		return;
1247 
1248 	/*
1249 	 * Yes.  We need to register a relcache invalidation event.
1250 	 */
1251 	RegisterRelcacheInvalidation(databaseId, relationId);
1252 }
1253 
1254 /*
1255  * CacheInvalidateCatalog
1256  *		Register invalidation of the whole content of a system catalog.
1257  *
1258  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1259  * changed any tuples as moved them around.  Some uses of catcache entries
1260  * expect their TIDs to be correct, so we have to blow away the entries.
1261  *
1262  * Note: we expect caller to verify that the rel actually is a system
1263  * catalog.  If it isn't, no great harm is done, just a wasted sinval message.
1264  */
1265 void
CacheInvalidateCatalog(Oid catalogId)1266 CacheInvalidateCatalog(Oid catalogId)
1267 {
1268 	Oid			databaseId;
1269 
1270 	PrepareInvalidationState();
1271 
1272 	if (IsSharedRelation(catalogId))
1273 		databaseId = InvalidOid;
1274 	else
1275 		databaseId = MyDatabaseId;
1276 
1277 	RegisterCatalogInvalidation(databaseId, catalogId);
1278 }
1279 
1280 /*
1281  * CacheInvalidateRelcache
1282  *		Register invalidation of the specified relation's relcache entry
1283  *		at end of command.
1284  *
1285  * This is used in places that need to force relcache rebuild but aren't
1286  * changing any of the tuples recognized as contributors to the relcache
1287  * entry by CacheInvalidateHeapTuple.  (An example is dropping an index.)
1288  */
1289 void
CacheInvalidateRelcache(Relation relation)1290 CacheInvalidateRelcache(Relation relation)
1291 {
1292 	Oid			databaseId;
1293 	Oid			relationId;
1294 
1295 	PrepareInvalidationState();
1296 
1297 	relationId = RelationGetRelid(relation);
1298 	if (relation->rd_rel->relisshared)
1299 		databaseId = InvalidOid;
1300 	else
1301 		databaseId = MyDatabaseId;
1302 
1303 	RegisterRelcacheInvalidation(databaseId, relationId);
1304 }
1305 
1306 /*
1307  * CacheInvalidateRelcacheAll
1308  *		Register invalidation of the whole relcache at the end of command.
1309  *
1310  * This is used by alter publication as changes in publications may affect
1311  * large number of tables.
1312  */
1313 void
CacheInvalidateRelcacheAll(void)1314 CacheInvalidateRelcacheAll(void)
1315 {
1316 	PrepareInvalidationState();
1317 
1318 	RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
1319 }
1320 
1321 /*
1322  * CacheInvalidateRelcacheByTuple
1323  *		As above, but relation is identified by passing its pg_class tuple.
1324  */
1325 void
CacheInvalidateRelcacheByTuple(HeapTuple classTuple)1326 CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
1327 {
1328 	Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1329 	Oid			databaseId;
1330 	Oid			relationId;
1331 
1332 	PrepareInvalidationState();
1333 
1334 	relationId = classtup->oid;
1335 	if (classtup->relisshared)
1336 		databaseId = InvalidOid;
1337 	else
1338 		databaseId = MyDatabaseId;
1339 	RegisterRelcacheInvalidation(databaseId, relationId);
1340 }
1341 
1342 /*
1343  * CacheInvalidateRelcacheByRelid
1344  *		As above, but relation is identified by passing its OID.
1345  *		This is the least efficient of the three options; use one of
1346  *		the above routines if you have a Relation or pg_class tuple.
1347  */
1348 void
CacheInvalidateRelcacheByRelid(Oid relid)1349 CacheInvalidateRelcacheByRelid(Oid relid)
1350 {
1351 	HeapTuple	tup;
1352 
1353 	PrepareInvalidationState();
1354 
1355 	tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1356 	if (!HeapTupleIsValid(tup))
1357 		elog(ERROR, "cache lookup failed for relation %u", relid);
1358 	CacheInvalidateRelcacheByTuple(tup);
1359 	ReleaseSysCache(tup);
1360 }
1361 
1362 
1363 /*
1364  * CacheInvalidateSmgr
1365  *		Register invalidation of smgr references to a physical relation.
1366  *
1367  * Sending this type of invalidation msg forces other backends to close open
1368  * smgr entries for the rel.  This should be done to flush dangling open-file
1369  * references when the physical rel is being dropped or truncated.  Because
1370  * these are nontransactional (i.e., not-rollback-able) operations, we just
1371  * send the inval message immediately without any queuing.
1372  *
1373  * Note: in most cases there will have been a relcache flush issued against
1374  * the rel at the logical level.  We need a separate smgr-level flush because
1375  * it is possible for backends to have open smgr entries for rels they don't
1376  * have a relcache entry for, e.g. because the only thing they ever did with
1377  * the rel is write out dirty shared buffers.
1378  *
1379  * Note: because these messages are nontransactional, they won't be captured
1380  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateSmgr()
1381  * should happen in low-level smgr.c routines, which are executed while
1382  * replaying WAL as well as when creating it.
1383  *
1384  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1385  * three bytes of the backend ID using what would otherwise be padding space.
1386  * Thus, the maximum possible backend ID is 2^23-1.
1387  */
1388 void
CacheInvalidateSmgr(RelFileNodeBackend rnode)1389 CacheInvalidateSmgr(RelFileNodeBackend rnode)
1390 {
1391 	SharedInvalidationMessage msg;
1392 
1393 	msg.sm.id = SHAREDINVALSMGR_ID;
1394 	msg.sm.backend_hi = rnode.backend >> 16;
1395 	msg.sm.backend_lo = rnode.backend & 0xffff;
1396 	msg.sm.rnode = rnode.node;
1397 	/* check AddCatcacheInvalidationMessage() for an explanation */
1398 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1399 
1400 	SendSharedInvalidMessages(&msg, 1);
1401 }
1402 
1403 /*
1404  * CacheInvalidateRelmap
1405  *		Register invalidation of the relation mapping for a database,
1406  *		or for the shared catalogs if databaseId is zero.
1407  *
1408  * Sending this type of invalidation msg forces other backends to re-read
1409  * the indicated relation mapping file.  It is also necessary to send a
1410  * relcache inval for the specific relations whose mapping has been altered,
1411  * else the relcache won't get updated with the new filenode data.
1412  *
1413  * Note: because these messages are nontransactional, they won't be captured
1414  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateRelmap()
1415  * should happen in low-level relmapper.c routines, which are executed while
1416  * replaying WAL as well as when creating it.
1417  */
1418 void
CacheInvalidateRelmap(Oid databaseId)1419 CacheInvalidateRelmap(Oid databaseId)
1420 {
1421 	SharedInvalidationMessage msg;
1422 
1423 	msg.rm.id = SHAREDINVALRELMAP_ID;
1424 	msg.rm.dbId = databaseId;
1425 	/* check AddCatcacheInvalidationMessage() for an explanation */
1426 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1427 
1428 	SendSharedInvalidMessages(&msg, 1);
1429 }
1430 
1431 
1432 /*
1433  * CacheRegisterSyscacheCallback
1434  *		Register the specified function to be called for all future
1435  *		invalidation events in the specified cache.  The cache ID and the
1436  *		hash value of the tuple being invalidated will be passed to the
1437  *		function.
1438  *
1439  * NOTE: Hash value zero will be passed if a cache reset request is received.
1440  * In this case the called routines should flush all cached state.
1441  * Yes, there's a possibility of a false match to zero, but it doesn't seem
1442  * worth troubling over, especially since most of the current callees just
1443  * flush all cached state anyway.
1444  */
1445 void
CacheRegisterSyscacheCallback(int cacheid,SyscacheCallbackFunction func,Datum arg)1446 CacheRegisterSyscacheCallback(int cacheid,
1447 							  SyscacheCallbackFunction func,
1448 							  Datum arg)
1449 {
1450 	if (cacheid < 0 || cacheid >= SysCacheSize)
1451 		elog(FATAL, "invalid cache ID: %d", cacheid);
1452 	if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS)
1453 		elog(FATAL, "out of syscache_callback_list slots");
1454 
1455 	if (syscache_callback_links[cacheid] == 0)
1456 	{
1457 		/* first callback for this cache */
1458 		syscache_callback_links[cacheid] = syscache_callback_count + 1;
1459 	}
1460 	else
1461 	{
1462 		/* add to end of chain, so that older callbacks are called first */
1463 		int			i = syscache_callback_links[cacheid] - 1;
1464 
1465 		while (syscache_callback_list[i].link > 0)
1466 			i = syscache_callback_list[i].link - 1;
1467 		syscache_callback_list[i].link = syscache_callback_count + 1;
1468 	}
1469 
1470 	syscache_callback_list[syscache_callback_count].id = cacheid;
1471 	syscache_callback_list[syscache_callback_count].link = 0;
1472 	syscache_callback_list[syscache_callback_count].function = func;
1473 	syscache_callback_list[syscache_callback_count].arg = arg;
1474 
1475 	++syscache_callback_count;
1476 }
1477 
1478 /*
1479  * CacheRegisterRelcacheCallback
1480  *		Register the specified function to be called for all future
1481  *		relcache invalidation events.  The OID of the relation being
1482  *		invalidated will be passed to the function.
1483  *
1484  * NOTE: InvalidOid will be passed if a cache reset request is received.
1485  * In this case the called routines should flush all cached state.
1486  */
1487 void
CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,Datum arg)1488 CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
1489 							  Datum arg)
1490 {
1491 	if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS)
1492 		elog(FATAL, "out of relcache_callback_list slots");
1493 
1494 	relcache_callback_list[relcache_callback_count].function = func;
1495 	relcache_callback_list[relcache_callback_count].arg = arg;
1496 
1497 	++relcache_callback_count;
1498 }
1499 
1500 /*
1501  * CallSyscacheCallbacks
1502  *
1503  * This is exported so that CatalogCacheFlushCatalog can call it, saving
1504  * this module from knowing which catcache IDs correspond to which catalogs.
1505  */
1506 void
CallSyscacheCallbacks(int cacheid,uint32 hashvalue)1507 CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1508 {
1509 	int			i;
1510 
1511 	if (cacheid < 0 || cacheid >= SysCacheSize)
1512 		elog(ERROR, "invalid cache ID: %d", cacheid);
1513 
1514 	i = syscache_callback_links[cacheid] - 1;
1515 	while (i >= 0)
1516 	{
1517 		struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1518 
1519 		Assert(ccitem->id == cacheid);
1520 		ccitem->function(ccitem->arg, cacheid, hashvalue);
1521 		i = ccitem->link - 1;
1522 	}
1523 }
1524