1 /*-------------------------------------------------------------------------
2  *
3  * inval.c
4  *	  POSTGRES cache invalidation dispatcher code.
5  *
6  *	This is subtle stuff, so pay attention:
7  *
8  *	When a tuple is updated or deleted, our standard visibility rules
9  *	consider that it is *still valid* so long as we are in the same command,
10  *	ie, until the next CommandCounterIncrement() or transaction commit.
11  *	(See access/heap/heapam_visibility.c, and note that system catalogs are
12  *  generally scanned under the most current snapshot available, rather than
13  *  the transaction snapshot.)	At the command boundary, the old tuple stops
14  *	being valid and the new version, if any, becomes valid.  Therefore,
15  *	we cannot simply flush a tuple from the system caches during heap_update()
16  *	or heap_delete().  The tuple is still good at that point; what's more,
17  *	even if we did flush it, it might be reloaded into the caches by a later
18  *	request in the same command.  So the correct behavior is to keep a list
19  *	of outdated (updated/deleted) tuples and then do the required cache
20  *	flushes at the next command boundary.  We must also keep track of
21  *	inserted tuples so that we can flush "negative" cache entries that match
22  *	the new tuples; again, that mustn't happen until end of command.
23  *
24  *	Once we have finished the command, we still need to remember inserted
25  *	tuples (including new versions of updated tuples), so that we can flush
26  *	them from the caches if we abort the transaction.  Similarly, we'd better
27  *	be able to flush "negative" cache entries that may have been loaded in
28  *	place of deleted tuples, so we still need the deleted ones too.
29  *
30  *	If we successfully complete the transaction, we have to broadcast all
31  *	these invalidation events to other backends (via the SI message queue)
32  *	so that they can flush obsolete entries from their caches.  Note we have
33  *	to record the transaction commit before sending SI messages, otherwise
34  *	the other backends won't see our updated tuples as good.
35  *
36  *	When a subtransaction aborts, we can process and discard any events
37  *	it has queued.  When a subtransaction commits, we just add its events
38  *	to the pending lists of the parent transaction.
39  *
40  *	In short, we need to remember until xact end every insert or delete
41  *	of a tuple that might be in the system caches.  Updates are treated as
42  *	two events, delete + insert, for simplicity.  (If the update doesn't
43  *	change the tuple hash value, catcache.c optimizes this into one event.)
44  *
45  *	We do not need to register EVERY tuple operation in this way, just those
46  *	on tuples in relations that have associated catcaches.  We do, however,
47  *	have to register every operation on every tuple that *could* be in a
48  *	catcache, whether or not it currently is in our cache.  Also, if the
49  *	tuple is in a relation that has multiple catcaches, we need to register
50  *	an invalidation message for each such catcache.  catcache.c's
51  *	PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52  *	catcaches may need invalidation for a given tuple.
53  *
54  *	Also, whenever we see an operation on a pg_class, pg_attribute, or
55  *	pg_index tuple, we register a relcache flush operation for the relation
56  *	described by that tuple (as specified in CacheInvalidateHeapTuple()).
57  *	Likewise for pg_constraint tuples for foreign keys on relations.
58  *
59  *	We keep the relcache flush requests in lists separate from the catcache
60  *	tuple flush requests.  This allows us to issue all the pending catcache
61  *	flushes before we issue relcache flushes, which saves us from loading
62  *	a catcache tuple during relcache load only to flush it again right away.
63  *	Also, we avoid queuing multiple relcache flush requests for the same
64  *	relation, since a relcache flush is relatively expensive to do.
65  *	(XXX is it worth testing likewise for duplicate catcache flush entries?
66  *	Probably not.)
67  *
68  *	Many subsystems own higher-level caches that depend on relcache and/or
69  *	catcache, and they register callbacks here to invalidate their caches.
70  *	While building a higher-level cache entry, a backend may receive a
71  *	callback for the being-built entry or one of its dependencies.  This
72  *	implies the new higher-level entry would be born stale, and it might
73  *	remain stale for the life of the backend.  Many caches do not prevent
74  *	that.  They rely on DDL for can't-miss catalog changes taking
75  *	AccessExclusiveLock on suitable objects.  (For a change made with less
76  *	locking, backends might never read the change.)  The relation cache,
77  *	however, needs to reflect changes from CREATE INDEX CONCURRENTLY no later
78  *	than the beginning of the next transaction.  Hence, when a relevant
79  *	invalidation callback arrives during a build, relcache.c reattempts that
80  *	build.  Caches with similar needs could do likewise.
81  *
82  *	If a relcache flush is issued for a system relation that we preload
83  *	from the relcache init file, we must also delete the init file so that
84  *	it will be rebuilt during the next backend restart.  The actual work of
85  *	manipulating the init file is in relcache.c, but we keep track of the
86  *	need for it here.
87  *
88  *	The request lists proper are kept in CurTransactionContext of their
89  *	creating (sub)transaction, since they can be forgotten on abort of that
90  *	transaction but must be kept till top-level commit otherwise.  For
91  *	simplicity we keep the controlling list-of-lists in TopTransactionContext.
92  *
93  *	Currently, inval messages are sent without regard for the possibility
94  *	that the object described by the catalog tuple might be a session-local
95  *	object such as a temporary table.  This is because (1) this code has
96  *	no practical way to tell the difference, and (2) it is not certain that
97  *	other backends don't have catalog cache or even relcache entries for
98  *	such tables, anyway; there is nothing that prevents that.  It might be
99  *	worth trying to avoid sending such inval traffic in the future, if those
100  *	problems can be overcome cheaply.
101  *
102  *	When wal_level=logical, write invalidations into WAL at each command end to
103  *	support the decoding of the in-progress transactions.  See
104  *	CommandEndInvalidationMessages.
105  *
106  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
107  * Portions Copyright (c) 1994, Regents of the University of California
108  *
109  * IDENTIFICATION
110  *	  src/backend/utils/cache/inval.c
111  *
112  *-------------------------------------------------------------------------
113  */
114 #include "postgres.h"
115 
116 #include <limits.h>
117 
118 #include "access/htup_details.h"
119 #include "access/xact.h"
120 #include "catalog/catalog.h"
121 #include "catalog/pg_constraint.h"
122 #include "miscadmin.h"
123 #include "storage/sinval.h"
124 #include "storage/smgr.h"
125 #include "utils/catcache.h"
126 #include "utils/guc.h"
127 #include "utils/inval.h"
128 #include "utils/memdebug.h"
129 #include "utils/memutils.h"
130 #include "utils/rel.h"
131 #include "utils/relmapper.h"
132 #include "utils/snapmgr.h"
133 #include "utils/syscache.h"
134 
135 
136 /*
137  * To minimize palloc traffic, we keep pending requests in successively-
138  * larger chunks (a slightly more sophisticated version of an expansible
139  * array).  All request types can be stored as SharedInvalidationMessage
140  * records.  The ordering of requests within a list is never significant.
141  */
142 typedef struct InvalidationChunk
143 {
144 	struct InvalidationChunk *next; /* list link */
145 	int			nitems;			/* # items currently stored in chunk */
146 	int			maxitems;		/* size of allocated array in this chunk */
147 	SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER];
148 } InvalidationChunk;
149 
150 typedef struct InvalidationListHeader
151 {
152 	InvalidationChunk *cclist;	/* list of chunks holding catcache msgs */
153 	InvalidationChunk *rclist;	/* list of chunks holding relcache msgs */
154 } InvalidationListHeader;
155 
156 /*----------------
157  * Invalidation info is divided into two lists:
158  *	1) events so far in current command, not yet reflected to caches.
159  *	2) events in previous commands of current transaction; these have
160  *	   been reflected to local caches, and must be either broadcast to
161  *	   other backends or rolled back from local cache when we commit
162  *	   or abort the transaction.
163  * Actually, we need two such lists for each level of nested transaction,
164  * so that we can discard events from an aborted subtransaction.  When
165  * a subtransaction commits, we append its lists to the parent's lists.
166  *
167  * The relcache-file-invalidated flag can just be a simple boolean,
168  * since we only act on it at transaction commit; we don't care which
169  * command of the transaction set it.
170  *----------------
171  */
172 
173 typedef struct TransInvalidationInfo
174 {
175 	/* Back link to parent transaction's info */
176 	struct TransInvalidationInfo *parent;
177 
178 	/* Subtransaction nesting depth */
179 	int			my_level;
180 
181 	/* head of current-command event list */
182 	InvalidationListHeader CurrentCmdInvalidMsgs;
183 
184 	/* head of previous-commands event list */
185 	InvalidationListHeader PriorCmdInvalidMsgs;
186 
187 	/* init file must be invalidated? */
188 	bool		RelcacheInitFileInval;
189 } TransInvalidationInfo;
190 
191 static TransInvalidationInfo *transInvalInfo = NULL;
192 
193 static SharedInvalidationMessage *SharedInvalidMessagesArray;
194 static int	numSharedInvalidMessagesArray;
195 static int	maxSharedInvalidMessagesArray;
196 
197 /* GUC storage */
198 int			debug_discard_caches = 0;
199 
200 /*
201  * Dynamically-registered callback functions.  Current implementation
202  * assumes there won't be enough of these to justify a dynamically resizable
203  * array; it'd be easy to improve that if needed.
204  *
205  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
206  * syscache are linked into a list pointed to by syscache_callback_links[id].
207  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
208  */
209 
210 #define MAX_SYSCACHE_CALLBACKS 64
211 #define MAX_RELCACHE_CALLBACKS 10
212 
213 static struct SYSCACHECALLBACK
214 {
215 	int16		id;				/* cache number */
216 	int16		link;			/* next callback index+1 for same cache */
217 	SyscacheCallbackFunction function;
218 	Datum		arg;
219 }			syscache_callback_list[MAX_SYSCACHE_CALLBACKS];
220 
221 static int16 syscache_callback_links[SysCacheSize];
222 
223 static int	syscache_callback_count = 0;
224 
225 static struct RELCACHECALLBACK
226 {
227 	RelcacheCallbackFunction function;
228 	Datum		arg;
229 }			relcache_callback_list[MAX_RELCACHE_CALLBACKS];
230 
231 static int	relcache_callback_count = 0;
232 
233 /* ----------------------------------------------------------------
234  *				Invalidation list support functions
235  *
236  * These three routines encapsulate processing of the "chunked"
237  * representation of what is logically just a list of messages.
238  * ----------------------------------------------------------------
239  */
240 
241 /*
242  * AddInvalidationMessage
243  *		Add an invalidation message to a list (of chunks).
244  *
245  * Note that we do not pay any great attention to maintaining the original
246  * ordering of the messages.
247  */
248 static void
AddInvalidationMessage(InvalidationChunk ** listHdr,SharedInvalidationMessage * msg)249 AddInvalidationMessage(InvalidationChunk **listHdr,
250 					   SharedInvalidationMessage *msg)
251 {
252 	InvalidationChunk *chunk = *listHdr;
253 
254 	if (chunk == NULL)
255 	{
256 		/* First time through; create initial chunk */
257 #define FIRSTCHUNKSIZE 32
258 		chunk = (InvalidationChunk *)
259 			MemoryContextAlloc(CurTransactionContext,
260 							   offsetof(InvalidationChunk, msgs) +
261 							   FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
262 		chunk->nitems = 0;
263 		chunk->maxitems = FIRSTCHUNKSIZE;
264 		chunk->next = *listHdr;
265 		*listHdr = chunk;
266 	}
267 	else if (chunk->nitems >= chunk->maxitems)
268 	{
269 		/* Need another chunk; double size of last chunk */
270 		int			chunksize = 2 * chunk->maxitems;
271 
272 		chunk = (InvalidationChunk *)
273 			MemoryContextAlloc(CurTransactionContext,
274 							   offsetof(InvalidationChunk, msgs) +
275 							   chunksize * sizeof(SharedInvalidationMessage));
276 		chunk->nitems = 0;
277 		chunk->maxitems = chunksize;
278 		chunk->next = *listHdr;
279 		*listHdr = chunk;
280 	}
281 	/* Okay, add message to current chunk */
282 	chunk->msgs[chunk->nitems] = *msg;
283 	chunk->nitems++;
284 }
285 
286 /*
287  * Append one list of invalidation message chunks to another, resetting
288  * the source chunk-list pointer to NULL.
289  */
290 static void
AppendInvalidationMessageList(InvalidationChunk ** destHdr,InvalidationChunk ** srcHdr)291 AppendInvalidationMessageList(InvalidationChunk **destHdr,
292 							  InvalidationChunk **srcHdr)
293 {
294 	InvalidationChunk *chunk = *srcHdr;
295 
296 	if (chunk == NULL)
297 		return;					/* nothing to do */
298 
299 	while (chunk->next != NULL)
300 		chunk = chunk->next;
301 
302 	chunk->next = *destHdr;
303 
304 	*destHdr = *srcHdr;
305 
306 	*srcHdr = NULL;
307 }
308 
309 /*
310  * Process a list of invalidation messages.
311  *
312  * This is a macro that executes the given code fragment for each message in
313  * a message chunk list.  The fragment should refer to the message as *msg.
314  */
315 #define ProcessMessageList(listHdr, codeFragment) \
316 	do { \
317 		InvalidationChunk *_chunk; \
318 		for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
319 		{ \
320 			int		_cindex; \
321 			for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
322 			{ \
323 				SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
324 				codeFragment; \
325 			} \
326 		} \
327 	} while (0)
328 
329 /*
330  * Process a list of invalidation messages group-wise.
331  *
332  * As above, but the code fragment can handle an array of messages.
333  * The fragment should refer to the messages as msgs[], with n entries.
334  */
335 #define ProcessMessageListMulti(listHdr, codeFragment) \
336 	do { \
337 		InvalidationChunk *_chunk; \
338 		for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
339 		{ \
340 			SharedInvalidationMessage *msgs = _chunk->msgs; \
341 			int		n = _chunk->nitems; \
342 			codeFragment; \
343 		} \
344 	} while (0)
345 
346 
347 /* ----------------------------------------------------------------
348  *				Invalidation set support functions
349  *
350  * These routines understand about the division of a logical invalidation
351  * list into separate physical lists for catcache and relcache entries.
352  * ----------------------------------------------------------------
353  */
354 
355 /*
356  * Add a catcache inval entry
357  */
358 static void
AddCatcacheInvalidationMessage(InvalidationListHeader * hdr,int id,uint32 hashValue,Oid dbId)359 AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
360 							   int id, uint32 hashValue, Oid dbId)
361 {
362 	SharedInvalidationMessage msg;
363 
364 	Assert(id < CHAR_MAX);
365 	msg.cc.id = (int8) id;
366 	msg.cc.dbId = dbId;
367 	msg.cc.hashValue = hashValue;
368 
369 	/*
370 	 * Define padding bytes in SharedInvalidationMessage structs to be
371 	 * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
372 	 * multiple processes, will cause spurious valgrind warnings about
373 	 * undefined memory being used. That's because valgrind remembers the
374 	 * undefined bytes from the last local process's store, not realizing that
375 	 * another process has written since, filling the previously uninitialized
376 	 * bytes
377 	 */
378 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
379 
380 	AddInvalidationMessage(&hdr->cclist, &msg);
381 }
382 
383 /*
384  * Add a whole-catalog inval entry
385  */
386 static void
AddCatalogInvalidationMessage(InvalidationListHeader * hdr,Oid dbId,Oid catId)387 AddCatalogInvalidationMessage(InvalidationListHeader *hdr,
388 							  Oid dbId, Oid catId)
389 {
390 	SharedInvalidationMessage msg;
391 
392 	msg.cat.id = SHAREDINVALCATALOG_ID;
393 	msg.cat.dbId = dbId;
394 	msg.cat.catId = catId;
395 	/* check AddCatcacheInvalidationMessage() for an explanation */
396 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
397 
398 	AddInvalidationMessage(&hdr->cclist, &msg);
399 }
400 
401 /*
402  * Add a relcache inval entry
403  */
404 static void
AddRelcacheInvalidationMessage(InvalidationListHeader * hdr,Oid dbId,Oid relId)405 AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
406 							   Oid dbId, Oid relId)
407 {
408 	SharedInvalidationMessage msg;
409 
410 	/*
411 	 * Don't add a duplicate item. We assume dbId need not be checked because
412 	 * it will never change. InvalidOid for relId means all relations so we
413 	 * don't need to add individual ones when it is present.
414 	 */
415 	ProcessMessageList(hdr->rclist,
416 					   if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
417 						   (msg->rc.relId == relId ||
418 							msg->rc.relId == InvalidOid))
419 					   return);
420 
421 	/* OK, add the item */
422 	msg.rc.id = SHAREDINVALRELCACHE_ID;
423 	msg.rc.dbId = dbId;
424 	msg.rc.relId = relId;
425 	/* check AddCatcacheInvalidationMessage() for an explanation */
426 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
427 
428 	AddInvalidationMessage(&hdr->rclist, &msg);
429 }
430 
431 /*
432  * Add a snapshot inval entry
433  */
434 static void
AddSnapshotInvalidationMessage(InvalidationListHeader * hdr,Oid dbId,Oid relId)435 AddSnapshotInvalidationMessage(InvalidationListHeader *hdr,
436 							   Oid dbId, Oid relId)
437 {
438 	SharedInvalidationMessage msg;
439 
440 	/* Don't add a duplicate item */
441 	/* We assume dbId need not be checked because it will never change */
442 	ProcessMessageList(hdr->rclist,
443 					   if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
444 						   msg->sn.relId == relId)
445 					   return);
446 
447 	/* OK, add the item */
448 	msg.sn.id = SHAREDINVALSNAPSHOT_ID;
449 	msg.sn.dbId = dbId;
450 	msg.sn.relId = relId;
451 	/* check AddCatcacheInvalidationMessage() for an explanation */
452 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
453 
454 	AddInvalidationMessage(&hdr->rclist, &msg);
455 }
456 
457 /*
458  * Append one list of invalidation messages to another, resetting
459  * the source list to empty.
460  */
461 static void
AppendInvalidationMessages(InvalidationListHeader * dest,InvalidationListHeader * src)462 AppendInvalidationMessages(InvalidationListHeader *dest,
463 						   InvalidationListHeader *src)
464 {
465 	AppendInvalidationMessageList(&dest->cclist, &src->cclist);
466 	AppendInvalidationMessageList(&dest->rclist, &src->rclist);
467 }
468 
469 /*
470  * Execute the given function for all the messages in an invalidation list.
471  * The list is not altered.
472  *
473  * catcache entries are processed first, for reasons mentioned above.
474  */
475 static void
ProcessInvalidationMessages(InvalidationListHeader * hdr,void (* func)(SharedInvalidationMessage * msg))476 ProcessInvalidationMessages(InvalidationListHeader *hdr,
477 							void (*func) (SharedInvalidationMessage *msg))
478 {
479 	ProcessMessageList(hdr->cclist, func(msg));
480 	ProcessMessageList(hdr->rclist, func(msg));
481 }
482 
483 /*
484  * As above, but the function is able to process an array of messages
485  * rather than just one at a time.
486  */
487 static void
ProcessInvalidationMessagesMulti(InvalidationListHeader * hdr,void (* func)(const SharedInvalidationMessage * msgs,int n))488 ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr,
489 								 void (*func) (const SharedInvalidationMessage *msgs, int n))
490 {
491 	ProcessMessageListMulti(hdr->cclist, func(msgs, n));
492 	ProcessMessageListMulti(hdr->rclist, func(msgs, n));
493 }
494 
495 /* ----------------------------------------------------------------
496  *					  private support functions
497  * ----------------------------------------------------------------
498  */
499 
500 /*
501  * RegisterCatcacheInvalidation
502  *
503  * Register an invalidation event for a catcache tuple entry.
504  */
505 static void
RegisterCatcacheInvalidation(int cacheId,uint32 hashValue,Oid dbId)506 RegisterCatcacheInvalidation(int cacheId,
507 							 uint32 hashValue,
508 							 Oid dbId)
509 {
510 	AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
511 								   cacheId, hashValue, dbId);
512 }
513 
514 /*
515  * RegisterCatalogInvalidation
516  *
517  * Register an invalidation event for all catcache entries from a catalog.
518  */
519 static void
RegisterCatalogInvalidation(Oid dbId,Oid catId)520 RegisterCatalogInvalidation(Oid dbId, Oid catId)
521 {
522 	AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
523 								  dbId, catId);
524 }
525 
526 /*
527  * RegisterRelcacheInvalidation
528  *
529  * As above, but register a relcache invalidation event.
530  */
531 static void
RegisterRelcacheInvalidation(Oid dbId,Oid relId)532 RegisterRelcacheInvalidation(Oid dbId, Oid relId)
533 {
534 	AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
535 								   dbId, relId);
536 
537 	/*
538 	 * Most of the time, relcache invalidation is associated with system
539 	 * catalog updates, but there are a few cases where it isn't.  Quick hack
540 	 * to ensure that the next CommandCounterIncrement() will think that we
541 	 * need to do CommandEndInvalidationMessages().
542 	 */
543 	(void) GetCurrentCommandId(true);
544 
545 	/*
546 	 * If the relation being invalidated is one of those cached in a relcache
547 	 * init file, mark that we need to zap that file at commit. For simplicity
548 	 * invalidations for a specific database always invalidate the shared file
549 	 * as well.  Also zap when we are invalidating whole relcache.
550 	 */
551 	if (relId == InvalidOid || RelationIdIsInInitFile(relId))
552 		transInvalInfo->RelcacheInitFileInval = true;
553 }
554 
555 /*
556  * RegisterSnapshotInvalidation
557  *
558  * Register an invalidation event for MVCC scans against a given catalog.
559  * Only needed for catalogs that don't have catcaches.
560  */
561 static void
RegisterSnapshotInvalidation(Oid dbId,Oid relId)562 RegisterSnapshotInvalidation(Oid dbId, Oid relId)
563 {
564 	AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
565 								   dbId, relId);
566 }
567 
568 /*
569  * LocalExecuteInvalidationMessage
570  *
571  * Process a single invalidation message (which could be of any type).
572  * Only the local caches are flushed; this does not transmit the message
573  * to other backends.
574  */
575 void
LocalExecuteInvalidationMessage(SharedInvalidationMessage * msg)576 LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
577 {
578 	if (msg->id >= 0)
579 	{
580 		if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
581 		{
582 			InvalidateCatalogSnapshot();
583 
584 			SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
585 
586 			CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
587 		}
588 	}
589 	else if (msg->id == SHAREDINVALCATALOG_ID)
590 	{
591 		if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
592 		{
593 			InvalidateCatalogSnapshot();
594 
595 			CatalogCacheFlushCatalog(msg->cat.catId);
596 
597 			/* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
598 		}
599 	}
600 	else if (msg->id == SHAREDINVALRELCACHE_ID)
601 	{
602 		if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
603 		{
604 			int			i;
605 
606 			if (msg->rc.relId == InvalidOid)
607 				RelationCacheInvalidate(false);
608 			else
609 				RelationCacheInvalidateEntry(msg->rc.relId);
610 
611 			for (i = 0; i < relcache_callback_count; i++)
612 			{
613 				struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
614 
615 				ccitem->function(ccitem->arg, msg->rc.relId);
616 			}
617 		}
618 	}
619 	else if (msg->id == SHAREDINVALSMGR_ID)
620 	{
621 		/*
622 		 * We could have smgr entries for relations of other databases, so no
623 		 * short-circuit test is possible here.
624 		 */
625 		RelFileNodeBackend rnode;
626 
627 		rnode.node = msg->sm.rnode;
628 		rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
629 		smgrclosenode(rnode);
630 	}
631 	else if (msg->id == SHAREDINVALRELMAP_ID)
632 	{
633 		/* We only care about our own database and shared catalogs */
634 		if (msg->rm.dbId == InvalidOid)
635 			RelationMapInvalidate(true);
636 		else if (msg->rm.dbId == MyDatabaseId)
637 			RelationMapInvalidate(false);
638 	}
639 	else if (msg->id == SHAREDINVALSNAPSHOT_ID)
640 	{
641 		/* We only care about our own database and shared catalogs */
642 		if (msg->sn.dbId == InvalidOid)
643 			InvalidateCatalogSnapshot();
644 		else if (msg->sn.dbId == MyDatabaseId)
645 			InvalidateCatalogSnapshot();
646 	}
647 	else
648 		elog(FATAL, "unrecognized SI message ID: %d", msg->id);
649 }
650 
651 /*
652  *		InvalidateSystemCaches
653  *
654  *		This blows away all tuples in the system catalog caches and
655  *		all the cached relation descriptors and smgr cache entries.
656  *		Relation descriptors that have positive refcounts are then rebuilt.
657  *
658  *		We call this when we see a shared-inval-queue overflow signal,
659  *		since that tells us we've lost some shared-inval messages and hence
660  *		don't know what needs to be invalidated.
661  */
662 void
InvalidateSystemCaches(void)663 InvalidateSystemCaches(void)
664 {
665 	InvalidateSystemCachesExtended(false);
666 }
667 
668 void
InvalidateSystemCachesExtended(bool debug_discard)669 InvalidateSystemCachesExtended(bool debug_discard)
670 {
671 	int			i;
672 
673 	InvalidateCatalogSnapshot();
674 	ResetCatalogCaches();
675 	RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
676 
677 	for (i = 0; i < syscache_callback_count; i++)
678 	{
679 		struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
680 
681 		ccitem->function(ccitem->arg, ccitem->id, 0);
682 	}
683 
684 	for (i = 0; i < relcache_callback_count; i++)
685 	{
686 		struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
687 
688 		ccitem->function(ccitem->arg, InvalidOid);
689 	}
690 }
691 
692 
693 /* ----------------------------------------------------------------
694  *					  public functions
695  * ----------------------------------------------------------------
696  */
697 
698 /*
699  * AcceptInvalidationMessages
700  *		Read and process invalidation messages from the shared invalidation
701  *		message queue.
702  *
703  * Note:
704  *		This should be called as the first step in processing a transaction.
705  */
706 void
AcceptInvalidationMessages(void)707 AcceptInvalidationMessages(void)
708 {
709 	ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
710 								 InvalidateSystemCaches);
711 
712 	/*----------
713 	 * Test code to force cache flushes anytime a flush could happen.
714 	 *
715 	 * This helps detect intermittent faults caused by code that reads a cache
716 	 * entry and then performs an action that could invalidate the entry, but
717 	 * rarely actually does so.  This can spot issues that would otherwise
718 	 * only arise with badly timed concurrent DDL, for example.
719 	 *
720 	 * The default debug_discard_caches = 0 does no forced cache flushes.
721 	 *
722 	 * If used with CLOBBER_FREED_MEMORY,
723 	 * debug_discard_caches = 1 (formerly known as CLOBBER_CACHE_ALWAYS)
724 	 * provides a fairly thorough test that the system contains no cache-flush
725 	 * hazards.  However, it also makes the system unbelievably slow --- the
726 	 * regression tests take about 100 times longer than normal.
727 	 *
728 	 * If you're a glutton for punishment, try
729 	 * debug_discard_caches = 3 (formerly known as CLOBBER_CACHE_RECURSIVELY).
730 	 * This slows things by at least a factor of 10000, so I wouldn't suggest
731 	 * trying to run the entire regression tests that way.  It's useful to try
732 	 * a few simple tests, to make sure that cache reload isn't subject to
733 	 * internal cache-flush hazards, but after you've done a few thousand
734 	 * recursive reloads it's unlikely you'll learn more.
735 	 *----------
736 	 */
737 #ifdef DISCARD_CACHES_ENABLED
738 	{
739 		static int	recursion_depth = 0;
740 
741 		if (recursion_depth < debug_discard_caches)
742 		{
743 			recursion_depth++;
744 			InvalidateSystemCachesExtended(true);
745 			recursion_depth--;
746 		}
747 	}
748 #endif
749 }
750 
751 /*
752  * PrepareInvalidationState
753  *		Initialize inval lists for the current (sub)transaction.
754  */
755 static void
PrepareInvalidationState(void)756 PrepareInvalidationState(void)
757 {
758 	TransInvalidationInfo *myInfo;
759 
760 	if (transInvalInfo != NULL &&
761 		transInvalInfo->my_level == GetCurrentTransactionNestLevel())
762 		return;
763 
764 	myInfo = (TransInvalidationInfo *)
765 		MemoryContextAllocZero(TopTransactionContext,
766 							   sizeof(TransInvalidationInfo));
767 	myInfo->parent = transInvalInfo;
768 	myInfo->my_level = GetCurrentTransactionNestLevel();
769 
770 	/*
771 	 * If there's any previous entry, this one should be for a deeper nesting
772 	 * level.
773 	 */
774 	Assert(transInvalInfo == NULL ||
775 		   myInfo->my_level > transInvalInfo->my_level);
776 
777 	transInvalInfo = myInfo;
778 }
779 
780 /*
781  * PostPrepare_Inval
782  *		Clean up after successful PREPARE.
783  *
784  * Here, we want to act as though the transaction aborted, so that we will
785  * undo any syscache changes it made, thereby bringing us into sync with the
786  * outside world, which doesn't believe the transaction committed yet.
787  *
788  * If the prepared transaction is later aborted, there is nothing more to
789  * do; if it commits, we will receive the consequent inval messages just
790  * like everyone else.
791  */
792 void
PostPrepare_Inval(void)793 PostPrepare_Inval(void)
794 {
795 	AtEOXact_Inval(false);
796 }
797 
798 /*
799  * Collect invalidation messages into SharedInvalidMessagesArray array.
800  */
801 static void
MakeSharedInvalidMessagesArray(const SharedInvalidationMessage * msgs,int n)802 MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
803 {
804 	/*
805 	 * Initialise array first time through in each commit
806 	 */
807 	if (SharedInvalidMessagesArray == NULL)
808 	{
809 		maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE;
810 		numSharedInvalidMessagesArray = 0;
811 
812 		/*
813 		 * Although this is being palloc'd we don't actually free it directly.
814 		 * We're so close to EOXact that we now we're going to lose it anyhow.
815 		 */
816 		SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
817 											* sizeof(SharedInvalidationMessage));
818 	}
819 
820 	if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
821 	{
822 		while ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
823 			maxSharedInvalidMessagesArray *= 2;
824 
825 		SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
826 											  maxSharedInvalidMessagesArray
827 											  * sizeof(SharedInvalidationMessage));
828 	}
829 
830 	/*
831 	 * Append the next chunk onto the array
832 	 */
833 	memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
834 		   msgs, n * sizeof(SharedInvalidationMessage));
835 	numSharedInvalidMessagesArray += n;
836 }
837 
838 /*
839  * xactGetCommittedInvalidationMessages() is executed by
840  * RecordTransactionCommit() to add invalidation messages onto the
841  * commit record. This applies only to commit message types, never to
842  * abort records. Must always run before AtEOXact_Inval(), since that
843  * removes the data we need to see.
844  *
845  * Remember that this runs before we have officially committed, so we
846  * must not do anything here to change what might occur *if* we should
847  * fail between here and the actual commit.
848  *
849  * see also xact_redo_commit() and xact_desc_commit()
850  */
851 int
xactGetCommittedInvalidationMessages(SharedInvalidationMessage ** msgs,bool * RelcacheInitFileInval)852 xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
853 									 bool *RelcacheInitFileInval)
854 {
855 	MemoryContext oldcontext;
856 
857 	/* Quick exit if we haven't done anything with invalidation messages. */
858 	if (transInvalInfo == NULL)
859 	{
860 		*RelcacheInitFileInval = false;
861 		*msgs = NULL;
862 		return 0;
863 	}
864 
865 	/* Must be at top of stack */
866 	Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
867 
868 	/*
869 	 * Relcache init file invalidation requires processing both before and
870 	 * after we send the SI messages.  However, we need not do anything unless
871 	 * we committed.
872 	 */
873 	*RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
874 
875 	/*
876 	 * Walk through TransInvalidationInfo to collect all the messages into a
877 	 * single contiguous array of invalidation messages. It must be contiguous
878 	 * so we can copy directly into WAL message. Maintain the order that they
879 	 * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
880 	 * in redo is as similar as possible to original. We want the same bugs,
881 	 * if any, not new ones.
882 	 */
883 	oldcontext = MemoryContextSwitchTo(CurTransactionContext);
884 
885 	ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
886 									 MakeSharedInvalidMessagesArray);
887 	ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
888 									 MakeSharedInvalidMessagesArray);
889 	MemoryContextSwitchTo(oldcontext);
890 
891 	Assert(!(numSharedInvalidMessagesArray > 0 &&
892 			 SharedInvalidMessagesArray == NULL));
893 
894 	*msgs = SharedInvalidMessagesArray;
895 
896 	return numSharedInvalidMessagesArray;
897 }
898 
899 /*
900  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
901  * standby_redo() to process invalidation messages. Currently that happens
902  * only at end-of-xact.
903  *
904  * Relcache init file invalidation requires processing both
905  * before and after we send the SI messages. See AtEOXact_Inval()
906  */
907 void
ProcessCommittedInvalidationMessages(SharedInvalidationMessage * msgs,int nmsgs,bool RelcacheInitFileInval,Oid dbid,Oid tsid)908 ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
909 									 int nmsgs, bool RelcacheInitFileInval,
910 									 Oid dbid, Oid tsid)
911 {
912 	if (nmsgs <= 0)
913 		return;
914 
915 	elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
916 		 (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
917 
918 	if (RelcacheInitFileInval)
919 	{
920 		elog(trace_recovery(DEBUG4), "removing relcache init files for database %u",
921 			 dbid);
922 
923 		/*
924 		 * RelationCacheInitFilePreInvalidate, when the invalidation message
925 		 * is for a specific database, requires DatabasePath to be set, but we
926 		 * should not use SetDatabasePath during recovery, since it is
927 		 * intended to be used only once by normal backends.  Hence, a quick
928 		 * hack: set DatabasePath directly then unset after use.
929 		 */
930 		if (OidIsValid(dbid))
931 			DatabasePath = GetDatabasePath(dbid, tsid);
932 
933 		RelationCacheInitFilePreInvalidate();
934 
935 		if (OidIsValid(dbid))
936 		{
937 			pfree(DatabasePath);
938 			DatabasePath = NULL;
939 		}
940 	}
941 
942 	SendSharedInvalidMessages(msgs, nmsgs);
943 
944 	if (RelcacheInitFileInval)
945 		RelationCacheInitFilePostInvalidate();
946 }
947 
948 /*
949  * AtEOXact_Inval
950  *		Process queued-up invalidation messages at end of main transaction.
951  *
952  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
953  * to the shared invalidation message queue.  Note that these will be read
954  * not only by other backends, but also by our own backend at the next
955  * transaction start (via AcceptInvalidationMessages).  This means that
956  * we can skip immediate local processing of anything that's still in
957  * CurrentCmdInvalidMsgs, and just send that list out too.
958  *
959  * If not isCommit, we are aborting, and must locally process the messages
960  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends,
961  * since they'll not have seen our changed tuples anyway.  We can forget
962  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
963  * the caches yet.
964  *
965  * In any case, reset the various lists to empty.  We need not physically
966  * free memory here, since TopTransactionContext is about to be emptied
967  * anyway.
968  *
969  * Note:
970  *		This should be called as the last step in processing a transaction.
971  */
972 void
AtEOXact_Inval(bool isCommit)973 AtEOXact_Inval(bool isCommit)
974 {
975 	/* Quick exit if no messages */
976 	if (transInvalInfo == NULL)
977 		return;
978 
979 	/* Must be at top of stack */
980 	Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
981 
982 	if (isCommit)
983 	{
984 		/*
985 		 * Relcache init file invalidation requires processing both before and
986 		 * after we send the SI messages.  However, we need not do anything
987 		 * unless we committed.
988 		 */
989 		if (transInvalInfo->RelcacheInitFileInval)
990 			RelationCacheInitFilePreInvalidate();
991 
992 		AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
993 								   &transInvalInfo->CurrentCmdInvalidMsgs);
994 
995 		ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
996 										 SendSharedInvalidMessages);
997 
998 		if (transInvalInfo->RelcacheInitFileInval)
999 			RelationCacheInitFilePostInvalidate();
1000 	}
1001 	else
1002 	{
1003 		ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1004 									LocalExecuteInvalidationMessage);
1005 	}
1006 
1007 	/* Need not free anything explicitly */
1008 	transInvalInfo = NULL;
1009 	SharedInvalidMessagesArray = NULL;
1010 	numSharedInvalidMessagesArray = 0;
1011 }
1012 
1013 /*
1014  * AtEOSubXact_Inval
1015  *		Process queued-up invalidation messages at end of subtransaction.
1016  *
1017  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
1018  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
1019  * parent's PriorCmdInvalidMsgs list.
1020  *
1021  * If not isCommit, we are aborting, and must locally process the messages
1022  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends.
1023  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
1024  * touched the caches yet.
1025  *
1026  * In any case, pop the transaction stack.  We need not physically free memory
1027  * here, since CurTransactionContext is about to be emptied anyway
1028  * (if aborting).  Beware of the possibility of aborting the same nesting
1029  * level twice, though.
1030  */
1031 void
AtEOSubXact_Inval(bool isCommit)1032 AtEOSubXact_Inval(bool isCommit)
1033 {
1034 	int			my_level;
1035 	TransInvalidationInfo *myInfo = transInvalInfo;
1036 
1037 	/* Quick exit if no messages. */
1038 	if (myInfo == NULL)
1039 		return;
1040 
1041 	/* Also bail out quickly if messages are not for this level. */
1042 	my_level = GetCurrentTransactionNestLevel();
1043 	if (myInfo->my_level != my_level)
1044 	{
1045 		Assert(myInfo->my_level < my_level);
1046 		return;
1047 	}
1048 
1049 	if (isCommit)
1050 	{
1051 		/* If CurrentCmdInvalidMsgs still has anything, fix it */
1052 		CommandEndInvalidationMessages();
1053 
1054 		/*
1055 		 * We create invalidation stack entries lazily, so the parent might
1056 		 * not have one.  Instead of creating one, moving all the data over,
1057 		 * and then freeing our own, we can just adjust the level of our own
1058 		 * entry.
1059 		 */
1060 		if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1061 		{
1062 			myInfo->my_level--;
1063 			return;
1064 		}
1065 
1066 		/* Pass up my inval messages to parent */
1067 		AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs,
1068 								   &myInfo->PriorCmdInvalidMsgs);
1069 
1070 		/* Pending relcache inval becomes parent's problem too */
1071 		if (myInfo->RelcacheInitFileInval)
1072 			myInfo->parent->RelcacheInitFileInval = true;
1073 
1074 		/* Pop the transaction state stack */
1075 		transInvalInfo = myInfo->parent;
1076 
1077 		/* Need not free anything else explicitly */
1078 		pfree(myInfo);
1079 	}
1080 	else
1081 	{
1082 		ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs,
1083 									LocalExecuteInvalidationMessage);
1084 
1085 		/* Pop the transaction state stack */
1086 		transInvalInfo = myInfo->parent;
1087 
1088 		/* Need not free anything else explicitly */
1089 		pfree(myInfo);
1090 	}
1091 }
1092 
1093 /*
1094  * CommandEndInvalidationMessages
1095  *		Process queued-up invalidation messages at end of one command
1096  *		in a transaction.
1097  *
1098  * Here, we send no messages to the shared queue, since we don't know yet if
1099  * we will commit.  We do need to locally process the CurrentCmdInvalidMsgs
1100  * list, so as to flush our caches of any entries we have outdated in the
1101  * current command.  We then move the current-cmd list over to become part
1102  * of the prior-cmds list.
1103  *
1104  * Note:
1105  *		This should be called during CommandCounterIncrement(),
1106  *		after we have advanced the command ID.
1107  */
1108 void
CommandEndInvalidationMessages(void)1109 CommandEndInvalidationMessages(void)
1110 {
1111 	/*
1112 	 * You might think this shouldn't be called outside any transaction, but
1113 	 * bootstrap does it, and also ABORT issued when not in a transaction. So
1114 	 * just quietly return if no state to work on.
1115 	 */
1116 	if (transInvalInfo == NULL)
1117 		return;
1118 
1119 	ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
1120 								LocalExecuteInvalidationMessage);
1121 
1122 	/* WAL Log per-command invalidation messages for wal_level=logical */
1123 	if (XLogLogicalInfoActive())
1124 		LogLogicalInvalidations();
1125 
1126 	AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1127 							   &transInvalInfo->CurrentCmdInvalidMsgs);
1128 }
1129 
1130 
1131 /*
1132  * CacheInvalidateHeapTuple
1133  *		Register the given tuple for invalidation at end of command
1134  *		(ie, current command is creating or outdating this tuple).
1135  *		Also, detect whether a relcache invalidation is implied.
1136  *
1137  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1138  * For an update, we are called just once, with tuple being the old tuple
1139  * version and newtuple the new version.  This allows avoidance of duplicate
1140  * effort during an update.
1141  */
1142 void
CacheInvalidateHeapTuple(Relation relation,HeapTuple tuple,HeapTuple newtuple)1143 CacheInvalidateHeapTuple(Relation relation,
1144 						 HeapTuple tuple,
1145 						 HeapTuple newtuple)
1146 {
1147 	Oid			tupleRelId;
1148 	Oid			databaseId;
1149 	Oid			relationId;
1150 
1151 	/* Do nothing during bootstrap */
1152 	if (IsBootstrapProcessingMode())
1153 		return;
1154 
1155 	/*
1156 	 * We only need to worry about invalidation for tuples that are in system
1157 	 * catalogs; user-relation tuples are never in catcaches and can't affect
1158 	 * the relcache either.
1159 	 */
1160 	if (!IsCatalogRelation(relation))
1161 		return;
1162 
1163 	/*
1164 	 * IsCatalogRelation() will return true for TOAST tables of system
1165 	 * catalogs, but we don't care about those, either.
1166 	 */
1167 	if (IsToastRelation(relation))
1168 		return;
1169 
1170 	/*
1171 	 * If we're not prepared to queue invalidation messages for this
1172 	 * subtransaction level, get ready now.
1173 	 */
1174 	PrepareInvalidationState();
1175 
1176 	/*
1177 	 * First let the catcache do its thing
1178 	 */
1179 	tupleRelId = RelationGetRelid(relation);
1180 	if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1181 	{
1182 		databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1183 		RegisterSnapshotInvalidation(databaseId, tupleRelId);
1184 	}
1185 	else
1186 		PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1187 									  RegisterCatcacheInvalidation);
1188 
1189 	/*
1190 	 * Now, is this tuple one of the primary definers of a relcache entry? See
1191 	 * comments in file header for deeper explanation.
1192 	 *
1193 	 * Note we ignore newtuple here; we assume an update cannot move a tuple
1194 	 * from being part of one relcache entry to being part of another.
1195 	 */
1196 	if (tupleRelId == RelationRelationId)
1197 	{
1198 		Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1199 
1200 		relationId = classtup->oid;
1201 		if (classtup->relisshared)
1202 			databaseId = InvalidOid;
1203 		else
1204 			databaseId = MyDatabaseId;
1205 	}
1206 	else if (tupleRelId == AttributeRelationId)
1207 	{
1208 		Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1209 
1210 		relationId = atttup->attrelid;
1211 
1212 		/*
1213 		 * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1214 		 * even if the rel in question is shared (which we can't easily tell).
1215 		 * This essentially means that only backends in this same database
1216 		 * will react to the relcache flush request.  This is in fact
1217 		 * appropriate, since only those backends could see our pg_attribute
1218 		 * change anyway.  It looks a bit ugly though.  (In practice, shared
1219 		 * relations can't have schema changes after bootstrap, so we should
1220 		 * never come here for a shared rel anyway.)
1221 		 */
1222 		databaseId = MyDatabaseId;
1223 	}
1224 	else if (tupleRelId == IndexRelationId)
1225 	{
1226 		Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1227 
1228 		/*
1229 		 * When a pg_index row is updated, we should send out a relcache inval
1230 		 * for the index relation.  As above, we don't know the shared status
1231 		 * of the index, but in practice it doesn't matter since indexes of
1232 		 * shared catalogs can't have such updates.
1233 		 */
1234 		relationId = indextup->indexrelid;
1235 		databaseId = MyDatabaseId;
1236 	}
1237 	else if (tupleRelId == ConstraintRelationId)
1238 	{
1239 		Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
1240 
1241 		/*
1242 		 * Foreign keys are part of relcache entries, too, so send out an
1243 		 * inval for the table that the FK applies to.
1244 		 */
1245 		if (constrtup->contype == CONSTRAINT_FOREIGN &&
1246 			OidIsValid(constrtup->conrelid))
1247 		{
1248 			relationId = constrtup->conrelid;
1249 			databaseId = MyDatabaseId;
1250 		}
1251 		else
1252 			return;
1253 	}
1254 	else
1255 		return;
1256 
1257 	/*
1258 	 * Yes.  We need to register a relcache invalidation event.
1259 	 */
1260 	RegisterRelcacheInvalidation(databaseId, relationId);
1261 }
1262 
1263 /*
1264  * CacheInvalidateCatalog
1265  *		Register invalidation of the whole content of a system catalog.
1266  *
1267  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1268  * changed any tuples as moved them around.  Some uses of catcache entries
1269  * expect their TIDs to be correct, so we have to blow away the entries.
1270  *
1271  * Note: we expect caller to verify that the rel actually is a system
1272  * catalog.  If it isn't, no great harm is done, just a wasted sinval message.
1273  */
1274 void
CacheInvalidateCatalog(Oid catalogId)1275 CacheInvalidateCatalog(Oid catalogId)
1276 {
1277 	Oid			databaseId;
1278 
1279 	PrepareInvalidationState();
1280 
1281 	if (IsSharedRelation(catalogId))
1282 		databaseId = InvalidOid;
1283 	else
1284 		databaseId = MyDatabaseId;
1285 
1286 	RegisterCatalogInvalidation(databaseId, catalogId);
1287 }
1288 
1289 /*
1290  * CacheInvalidateRelcache
1291  *		Register invalidation of the specified relation's relcache entry
1292  *		at end of command.
1293  *
1294  * This is used in places that need to force relcache rebuild but aren't
1295  * changing any of the tuples recognized as contributors to the relcache
1296  * entry by CacheInvalidateHeapTuple.  (An example is dropping an index.)
1297  */
1298 void
CacheInvalidateRelcache(Relation relation)1299 CacheInvalidateRelcache(Relation relation)
1300 {
1301 	Oid			databaseId;
1302 	Oid			relationId;
1303 
1304 	PrepareInvalidationState();
1305 
1306 	relationId = RelationGetRelid(relation);
1307 	if (relation->rd_rel->relisshared)
1308 		databaseId = InvalidOid;
1309 	else
1310 		databaseId = MyDatabaseId;
1311 
1312 	RegisterRelcacheInvalidation(databaseId, relationId);
1313 }
1314 
1315 /*
1316  * CacheInvalidateRelcacheAll
1317  *		Register invalidation of the whole relcache at the end of command.
1318  *
1319  * This is used by alter publication as changes in publications may affect
1320  * large number of tables.
1321  */
1322 void
CacheInvalidateRelcacheAll(void)1323 CacheInvalidateRelcacheAll(void)
1324 {
1325 	PrepareInvalidationState();
1326 
1327 	RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
1328 }
1329 
1330 /*
1331  * CacheInvalidateRelcacheByTuple
1332  *		As above, but relation is identified by passing its pg_class tuple.
1333  */
1334 void
CacheInvalidateRelcacheByTuple(HeapTuple classTuple)1335 CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
1336 {
1337 	Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1338 	Oid			databaseId;
1339 	Oid			relationId;
1340 
1341 	PrepareInvalidationState();
1342 
1343 	relationId = classtup->oid;
1344 	if (classtup->relisshared)
1345 		databaseId = InvalidOid;
1346 	else
1347 		databaseId = MyDatabaseId;
1348 	RegisterRelcacheInvalidation(databaseId, relationId);
1349 }
1350 
1351 /*
1352  * CacheInvalidateRelcacheByRelid
1353  *		As above, but relation is identified by passing its OID.
1354  *		This is the least efficient of the three options; use one of
1355  *		the above routines if you have a Relation or pg_class tuple.
1356  */
1357 void
CacheInvalidateRelcacheByRelid(Oid relid)1358 CacheInvalidateRelcacheByRelid(Oid relid)
1359 {
1360 	HeapTuple	tup;
1361 
1362 	PrepareInvalidationState();
1363 
1364 	tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1365 	if (!HeapTupleIsValid(tup))
1366 		elog(ERROR, "cache lookup failed for relation %u", relid);
1367 	CacheInvalidateRelcacheByTuple(tup);
1368 	ReleaseSysCache(tup);
1369 }
1370 
1371 
1372 /*
1373  * CacheInvalidateSmgr
1374  *		Register invalidation of smgr references to a physical relation.
1375  *
1376  * Sending this type of invalidation msg forces other backends to close open
1377  * smgr entries for the rel.  This should be done to flush dangling open-file
1378  * references when the physical rel is being dropped or truncated.  Because
1379  * these are nontransactional (i.e., not-rollback-able) operations, we just
1380  * send the inval message immediately without any queuing.
1381  *
1382  * Note: in most cases there will have been a relcache flush issued against
1383  * the rel at the logical level.  We need a separate smgr-level flush because
1384  * it is possible for backends to have open smgr entries for rels they don't
1385  * have a relcache entry for, e.g. because the only thing they ever did with
1386  * the rel is write out dirty shared buffers.
1387  *
1388  * Note: because these messages are nontransactional, they won't be captured
1389  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateSmgr()
1390  * should happen in low-level smgr.c routines, which are executed while
1391  * replaying WAL as well as when creating it.
1392  *
1393  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1394  * three bytes of the backend ID using what would otherwise be padding space.
1395  * Thus, the maximum possible backend ID is 2^23-1.
1396  */
1397 void
CacheInvalidateSmgr(RelFileNodeBackend rnode)1398 CacheInvalidateSmgr(RelFileNodeBackend rnode)
1399 {
1400 	SharedInvalidationMessage msg;
1401 
1402 	msg.sm.id = SHAREDINVALSMGR_ID;
1403 	msg.sm.backend_hi = rnode.backend >> 16;
1404 	msg.sm.backend_lo = rnode.backend & 0xffff;
1405 	msg.sm.rnode = rnode.node;
1406 	/* check AddCatcacheInvalidationMessage() for an explanation */
1407 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1408 
1409 	SendSharedInvalidMessages(&msg, 1);
1410 }
1411 
1412 /*
1413  * CacheInvalidateRelmap
1414  *		Register invalidation of the relation mapping for a database,
1415  *		or for the shared catalogs if databaseId is zero.
1416  *
1417  * Sending this type of invalidation msg forces other backends to re-read
1418  * the indicated relation mapping file.  It is also necessary to send a
1419  * relcache inval for the specific relations whose mapping has been altered,
1420  * else the relcache won't get updated with the new filenode data.
1421  *
1422  * Note: because these messages are nontransactional, they won't be captured
1423  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateRelmap()
1424  * should happen in low-level relmapper.c routines, which are executed while
1425  * replaying WAL as well as when creating it.
1426  */
1427 void
CacheInvalidateRelmap(Oid databaseId)1428 CacheInvalidateRelmap(Oid databaseId)
1429 {
1430 	SharedInvalidationMessage msg;
1431 
1432 	msg.rm.id = SHAREDINVALRELMAP_ID;
1433 	msg.rm.dbId = databaseId;
1434 	/* check AddCatcacheInvalidationMessage() for an explanation */
1435 	VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1436 
1437 	SendSharedInvalidMessages(&msg, 1);
1438 }
1439 
1440 
1441 /*
1442  * CacheRegisterSyscacheCallback
1443  *		Register the specified function to be called for all future
1444  *		invalidation events in the specified cache.  The cache ID and the
1445  *		hash value of the tuple being invalidated will be passed to the
1446  *		function.
1447  *
1448  * NOTE: Hash value zero will be passed if a cache reset request is received.
1449  * In this case the called routines should flush all cached state.
1450  * Yes, there's a possibility of a false match to zero, but it doesn't seem
1451  * worth troubling over, especially since most of the current callees just
1452  * flush all cached state anyway.
1453  */
1454 void
CacheRegisterSyscacheCallback(int cacheid,SyscacheCallbackFunction func,Datum arg)1455 CacheRegisterSyscacheCallback(int cacheid,
1456 							  SyscacheCallbackFunction func,
1457 							  Datum arg)
1458 {
1459 	if (cacheid < 0 || cacheid >= SysCacheSize)
1460 		elog(FATAL, "invalid cache ID: %d", cacheid);
1461 	if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS)
1462 		elog(FATAL, "out of syscache_callback_list slots");
1463 
1464 	if (syscache_callback_links[cacheid] == 0)
1465 	{
1466 		/* first callback for this cache */
1467 		syscache_callback_links[cacheid] = syscache_callback_count + 1;
1468 	}
1469 	else
1470 	{
1471 		/* add to end of chain, so that older callbacks are called first */
1472 		int			i = syscache_callback_links[cacheid] - 1;
1473 
1474 		while (syscache_callback_list[i].link > 0)
1475 			i = syscache_callback_list[i].link - 1;
1476 		syscache_callback_list[i].link = syscache_callback_count + 1;
1477 	}
1478 
1479 	syscache_callback_list[syscache_callback_count].id = cacheid;
1480 	syscache_callback_list[syscache_callback_count].link = 0;
1481 	syscache_callback_list[syscache_callback_count].function = func;
1482 	syscache_callback_list[syscache_callback_count].arg = arg;
1483 
1484 	++syscache_callback_count;
1485 }
1486 
1487 /*
1488  * CacheRegisterRelcacheCallback
1489  *		Register the specified function to be called for all future
1490  *		relcache invalidation events.  The OID of the relation being
1491  *		invalidated will be passed to the function.
1492  *
1493  * NOTE: InvalidOid will be passed if a cache reset request is received.
1494  * In this case the called routines should flush all cached state.
1495  */
1496 void
CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,Datum arg)1497 CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
1498 							  Datum arg)
1499 {
1500 	if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS)
1501 		elog(FATAL, "out of relcache_callback_list slots");
1502 
1503 	relcache_callback_list[relcache_callback_count].function = func;
1504 	relcache_callback_list[relcache_callback_count].arg = arg;
1505 
1506 	++relcache_callback_count;
1507 }
1508 
1509 /*
1510  * CallSyscacheCallbacks
1511  *
1512  * This is exported so that CatalogCacheFlushCatalog can call it, saving
1513  * this module from knowing which catcache IDs correspond to which catalogs.
1514  */
1515 void
CallSyscacheCallbacks(int cacheid,uint32 hashvalue)1516 CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1517 {
1518 	int			i;
1519 
1520 	if (cacheid < 0 || cacheid >= SysCacheSize)
1521 		elog(ERROR, "invalid cache ID: %d", cacheid);
1522 
1523 	i = syscache_callback_links[cacheid] - 1;
1524 	while (i >= 0)
1525 	{
1526 		struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1527 
1528 		Assert(ccitem->id == cacheid);
1529 		ccitem->function(ccitem->arg, cacheid, hashvalue);
1530 		i = ccitem->link - 1;
1531 	}
1532 }
1533 
1534 /*
1535  * LogLogicalInvalidations
1536  *
1537  * Emit WAL for invalidations.  This is currently only used for logging
1538  * invalidations at the command end or at commit time if any invalidations
1539  * are pending.
1540  */
1541 void
LogLogicalInvalidations()1542 LogLogicalInvalidations()
1543 {
1544 	xl_xact_invals xlrec;
1545 	SharedInvalidationMessage *invalMessages;
1546 	int			nmsgs = 0;
1547 
1548 	/* Quick exit if we haven't done anything with invalidation messages. */
1549 	if (transInvalInfo == NULL)
1550 		return;
1551 
1552 	ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
1553 									 MakeSharedInvalidMessagesArray);
1554 
1555 	Assert(!(numSharedInvalidMessagesArray > 0 &&
1556 			 SharedInvalidMessagesArray == NULL));
1557 
1558 	invalMessages = SharedInvalidMessagesArray;
1559 	nmsgs = numSharedInvalidMessagesArray;
1560 	SharedInvalidMessagesArray = NULL;
1561 	numSharedInvalidMessagesArray = 0;
1562 
1563 	if (nmsgs > 0)
1564 	{
1565 		/* prepare record */
1566 		memset(&xlrec, 0, MinSizeOfXactInvals);
1567 		xlrec.nmsgs = nmsgs;
1568 
1569 		/* perform insertion */
1570 		XLogBeginInsert();
1571 		XLogRegisterData((char *) (&xlrec), MinSizeOfXactInvals);
1572 		XLogRegisterData((char *) invalMessages,
1573 						 nmsgs * sizeof(SharedInvalidationMessage));
1574 		XLogInsert(RM_XACT_ID, XLOG_XACT_INVALIDATIONS);
1575 
1576 		pfree(invalMessages);
1577 	}
1578 }
1579