1 /*-------------------------------------------------------------------------
2  *
3  * predicate.c
4  *	  POSTGRES predicate locking
5  *	  to support full serializable transaction isolation
6  *
7  *
8  * The approach taken is to implement Serializable Snapshot Isolation (SSI)
9  * as initially described in this paper:
10  *
11  *	Michael J. Cahill, Uwe Röhm, and Alan D. Fekete. 2008.
12  *	Serializable isolation for snapshot databases.
13  *	In SIGMOD '08: Proceedings of the 2008 ACM SIGMOD
14  *	international conference on Management of data,
15  *	pages 729-738, New York, NY, USA. ACM.
16  *	http://doi.acm.org/10.1145/1376616.1376690
17  *
18  * and further elaborated in Cahill's doctoral thesis:
19  *
20  *	Michael James Cahill. 2009.
21  *	Serializable Isolation for Snapshot Databases.
22  *	Sydney Digital Theses.
23  *	University of Sydney, School of Information Technologies.
24  *	http://hdl.handle.net/2123/5353
25  *
26  *
27  * Predicate locks for Serializable Snapshot Isolation (SSI) are SIREAD
28  * locks, which are so different from normal locks that a distinct set of
29  * structures is required to handle them.  They are needed to detect
30  * rw-conflicts when the read happens before the write.  (When the write
31  * occurs first, the reading transaction can check for a conflict by
32  * examining the MVCC data.)
33  *
34  * (1)	Besides tuples actually read, they must cover ranges of tuples
35  *		which would have been read based on the predicate.  This will
36  *		require modelling the predicates through locks against database
37  *		objects such as pages, index ranges, or entire tables.
38  *
39  * (2)	They must be kept in RAM for quick access.  Because of this, it
40  *		isn't possible to always maintain tuple-level granularity -- when
41  *		the space allocated to store these approaches exhaustion, a
42  *		request for a lock may need to scan for situations where a single
43  *		transaction holds many fine-grained locks which can be coalesced
44  *		into a single coarser-grained lock.
45  *
46  * (3)	They never block anything; they are more like flags than locks
47  *		in that regard; although they refer to database objects and are
48  *		used to identify rw-conflicts with normal write locks.
49  *
50  * (4)	While they are associated with a transaction, they must survive
51  *		a successful COMMIT of that transaction, and remain until all
52  *		overlapping transactions complete.  This even means that they
53  *		must survive termination of the transaction's process.  If a
54  *		top level transaction is rolled back, however, it is immediately
55  *		flagged so that it can be ignored, and its SIREAD locks can be
56  *		released any time after that.
57  *
58  * (5)	The only transactions which create SIREAD locks or check for
59  *		conflicts with them are serializable transactions.
60  *
61  * (6)	When a write lock for a top level transaction is found to cover
62  *		an existing SIREAD lock for the same transaction, the SIREAD lock
63  *		can be deleted.
64  *
65  * (7)	A write from a serializable transaction must ensure that an xact
66  *		record exists for the transaction, with the same lifespan (until
67  *		all concurrent transaction complete or the transaction is rolled
68  *		back) so that rw-dependencies to that transaction can be
69  *		detected.
70  *
71  * We use an optimization for read-only transactions. Under certain
72  * circumstances, a read-only transaction's snapshot can be shown to
73  * never have conflicts with other transactions.  This is referred to
74  * as a "safe" snapshot (and one known not to be is "unsafe").
75  * However, it can't be determined whether a snapshot is safe until
76  * all concurrent read/write transactions complete.
77  *
78  * Once a read-only transaction is known to have a safe snapshot, it
79  * can release its predicate locks and exempt itself from further
80  * predicate lock tracking. READ ONLY DEFERRABLE transactions run only
81  * on safe snapshots, waiting as necessary for one to be available.
82  *
83  *
84  * Lightweight locks to manage access to the predicate locking shared
85  * memory objects must be taken in this order, and should be released in
86  * reverse order:
87  *
88  *	SerializableFinishedListLock
89  *		- Protects the list of transactions which have completed but which
90  *			may yet matter because they overlap still-active transactions.
91  *
92  *	SerializablePredicateLockListLock
93  *		- Protects the linked list of locks held by a transaction.  Note
94  *			that the locks themselves are also covered by the partition
95  *			locks of their respective lock targets; this lock only affects
96  *			the linked list connecting the locks related to a transaction.
97  *		- All transactions share this single lock (with no partitioning).
98  *		- There is never a need for a process other than the one running
99  *			an active transaction to walk the list of locks held by that
100  *			transaction.
101  *		- It is relatively infrequent that another process needs to
102  *			modify the list for a transaction, but it does happen for such
103  *			things as index page splits for pages with predicate locks and
104  *			freeing of predicate locked pages by a vacuum process.  When
105  *			removing a lock in such cases, the lock itself contains the
106  *			pointers needed to remove it from the list.  When adding a
107  *			lock in such cases, the lock can be added using the anchor in
108  *			the transaction structure.  Neither requires walking the list.
109  *		- Cleaning up the list for a terminated transaction is sometimes
110  *			not done on a retail basis, in which case no lock is required.
111  *		- Due to the above, a process accessing its active transaction's
112  *			list always uses a shared lock, regardless of whether it is
113  *			walking or maintaining the list.  This improves concurrency
114  *			for the common access patterns.
115  *		- A process which needs to alter the list of a transaction other
116  *			than its own active transaction must acquire an exclusive
117  *			lock.
118  *
119  *	PredicateLockHashPartitionLock(hashcode)
120  *		- The same lock protects a target, all locks on that target, and
121  *			the linked list of locks on the target.
122  *		- When more than one is needed, acquire in ascending address order.
123  *		- When all are needed (rare), acquire in ascending index order with
124  *			PredicateLockHashPartitionLockByIndex(index).
125  *
126  *	SerializableXactHashLock
127  *		- Protects both PredXact and SerializableXidHash.
128  *
129  *
130  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
131  * Portions Copyright (c) 1994, Regents of the University of California
132  *
133  *
134  * IDENTIFICATION
135  *	  src/backend/storage/lmgr/predicate.c
136  *
137  *-------------------------------------------------------------------------
138  */
139 /*
140  * INTERFACE ROUTINES
141  *
142  * housekeeping for setting up shared memory predicate lock structures
143  *		InitPredicateLocks(void)
144  *		PredicateLockShmemSize(void)
145  *
146  * predicate lock reporting
147  *		GetPredicateLockStatusData(void)
148  *		PageIsPredicateLocked(Relation relation, BlockNumber blkno)
149  *
150  * predicate lock maintenance
151  *		GetSerializableTransactionSnapshot(Snapshot snapshot)
152  *		SetSerializableTransactionSnapshot(Snapshot snapshot,
153  *										   VirtualTransactionId *sourcevxid)
154  *		RegisterPredicateLockingXid(void)
155  *		PredicateLockRelation(Relation relation, Snapshot snapshot)
156  *		PredicateLockPage(Relation relation, BlockNumber blkno,
157  *						Snapshot snapshot)
158  *		PredicateLockTuple(Relation relation, HeapTuple tuple,
159  *						Snapshot snapshot)
160  *		PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
161  *							   BlockNumber newblkno)
162  *		PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
163  *								 BlockNumber newblkno)
164  *		TransferPredicateLocksToHeapRelation(Relation relation)
165  *		ReleasePredicateLocks(bool isCommit)
166  *
167  * conflict detection (may also trigger rollback)
168  *		CheckForSerializableConflictOut(bool visible, Relation relation,
169  *										HeapTupleData *tup, Buffer buffer,
170  *										Snapshot snapshot)
171  *		CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
172  *									   Buffer buffer)
173  *		CheckTableForSerializableConflictIn(Relation relation)
174  *
175  * final rollback checking
176  *		PreCommit_CheckForSerializationFailure(void)
177  *
178  * two-phase commit support
179  *		AtPrepare_PredicateLocks(void);
180  *		PostPrepare_PredicateLocks(TransactionId xid);
181  *		PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
182  *		predicatelock_twophase_recover(TransactionId xid, uint16 info,
183  *									   void *recdata, uint32 len);
184  */
185 
186 #include "postgres.h"
187 
188 #include "access/htup_details.h"
189 #include "access/slru.h"
190 #include "access/subtrans.h"
191 #include "access/transam.h"
192 #include "access/twophase.h"
193 #include "access/twophase_rmgr.h"
194 #include "access/xact.h"
195 #include "access/xlog.h"
196 #include "miscadmin.h"
197 #include "pgstat.h"
198 #include "storage/bufmgr.h"
199 #include "storage/predicate.h"
200 #include "storage/predicate_internals.h"
201 #include "storage/proc.h"
202 #include "storage/procarray.h"
203 #include "utils/rel.h"
204 #include "utils/snapmgr.h"
205 #include "utils/tqual.h"
206 
207 /* Uncomment the next line to test the graceful degradation code. */
208 /* #define TEST_OLDSERXID */
209 
210 /*
211  * Test the most selective fields first, for performance.
212  *
213  * a is covered by b if all of the following hold:
214  *	1) a.database = b.database
215  *	2) a.relation = b.relation
216  *	3) b.offset is invalid (b is page-granularity or higher)
217  *	4) either of the following:
218  *		4a) a.offset is valid (a is tuple-granularity) and a.page = b.page
219  *	 or 4b) a.offset is invalid and b.page is invalid (a is
220  *			page-granularity and b is relation-granularity
221  */
222 #define TargetTagIsCoveredBy(covered_target, covering_target)			\
223 	((GET_PREDICATELOCKTARGETTAG_RELATION(covered_target) == /* (2) */	\
224 	  GET_PREDICATELOCKTARGETTAG_RELATION(covering_target))				\
225 	 && (GET_PREDICATELOCKTARGETTAG_OFFSET(covering_target) ==			\
226 		 InvalidOffsetNumber)								 /* (3) */	\
227 	 && (((GET_PREDICATELOCKTARGETTAG_OFFSET(covered_target) !=			\
228 		   InvalidOffsetNumber)								 /* (4a) */ \
229 		  && (GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) ==		\
230 			  GET_PREDICATELOCKTARGETTAG_PAGE(covered_target)))			\
231 		 || ((GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) ==		\
232 			  InvalidBlockNumber)							 /* (4b) */ \
233 			 && (GET_PREDICATELOCKTARGETTAG_PAGE(covered_target)		\
234 				 != InvalidBlockNumber)))								\
235 	 && (GET_PREDICATELOCKTARGETTAG_DB(covered_target) ==	 /* (1) */	\
236 		 GET_PREDICATELOCKTARGETTAG_DB(covering_target)))
237 
238 /*
239  * The predicate locking target and lock shared hash tables are partitioned to
240  * reduce contention.  To determine which partition a given target belongs to,
241  * compute the tag's hash code with PredicateLockTargetTagHashCode(), then
242  * apply one of these macros.
243  * NB: NUM_PREDICATELOCK_PARTITIONS must be a power of 2!
244  */
245 #define PredicateLockHashPartition(hashcode) \
246 	((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
247 #define PredicateLockHashPartitionLock(hashcode) \
248 	(&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + \
249 		PredicateLockHashPartition(hashcode)].lock)
250 #define PredicateLockHashPartitionLockByIndex(i) \
251 	(&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
252 
253 #define NPREDICATELOCKTARGETENTS() \
254 	mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
255 
256 #define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
257 
258 /*
259  * Note that a sxact is marked "prepared" once it has passed
260  * PreCommit_CheckForSerializationFailure, even if it isn't using
261  * 2PC. This is the point at which it can no longer be aborted.
262  *
263  * The PREPARED flag remains set after commit, so SxactIsCommitted
264  * implies SxactIsPrepared.
265  */
266 #define SxactIsCommitted(sxact) (((sxact)->flags & SXACT_FLAG_COMMITTED) != 0)
267 #define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
268 #define SxactIsRolledBack(sxact) (((sxact)->flags & SXACT_FLAG_ROLLED_BACK) != 0)
269 #define SxactIsDoomed(sxact) (((sxact)->flags & SXACT_FLAG_DOOMED) != 0)
270 #define SxactIsReadOnly(sxact) (((sxact)->flags & SXACT_FLAG_READ_ONLY) != 0)
271 #define SxactHasSummaryConflictIn(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_IN) != 0)
272 #define SxactHasSummaryConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_OUT) != 0)
273 /*
274  * The following macro actually means that the specified transaction has a
275  * conflict out *to a transaction which committed ahead of it*.  It's hard
276  * to get that into a name of a reasonable length.
277  */
278 #define SxactHasConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_CONFLICT_OUT) != 0)
279 #define SxactIsDeferrableWaiting(sxact) (((sxact)->flags & SXACT_FLAG_DEFERRABLE_WAITING) != 0)
280 #define SxactIsROSafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_SAFE) != 0)
281 #define SxactIsROUnsafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_UNSAFE) != 0)
282 
283 /*
284  * Compute the hash code associated with a PREDICATELOCKTARGETTAG.
285  *
286  * To avoid unnecessary recomputations of the hash code, we try to do this
287  * just once per function, and then pass it around as needed.  Aside from
288  * passing the hashcode to hash_search_with_hash_value(), we can extract
289  * the lock partition number from the hashcode.
290  */
291 #define PredicateLockTargetTagHashCode(predicatelocktargettag) \
292 	get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
293 
294 /*
295  * Given a predicate lock tag, and the hash for its target,
296  * compute the lock hash.
297  *
298  * To make the hash code also depend on the transaction, we xor the sxid
299  * struct's address into the hash code, left-shifted so that the
300  * partition-number bits don't change.  Since this is only a hash, we
301  * don't care if we lose high-order bits of the address; use an
302  * intermediate variable to suppress cast-pointer-to-int warnings.
303  */
304 #define PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash) \
305 	((targethash) ^ ((uint32) PointerGetDatum((predicatelocktag)->myXact)) \
306 	 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
307 
308 
309 /*
310  * The SLRU buffer area through which we access the old xids.
311  */
312 static SlruCtlData OldSerXidSlruCtlData;
313 
314 #define OldSerXidSlruCtl			(&OldSerXidSlruCtlData)
315 
316 #define OLDSERXID_PAGESIZE			BLCKSZ
317 #define OLDSERXID_ENTRYSIZE			sizeof(SerCommitSeqNo)
318 #define OLDSERXID_ENTRIESPERPAGE	(OLDSERXID_PAGESIZE / OLDSERXID_ENTRYSIZE)
319 
320 /*
321  * Set maximum pages based on the number needed to track all transactions.
322  */
323 #define OLDSERXID_MAX_PAGE			(MaxTransactionId / OLDSERXID_ENTRIESPERPAGE)
324 
325 #define OldSerXidNextPage(page) (((page) >= OLDSERXID_MAX_PAGE) ? 0 : (page) + 1)
326 
327 #define OldSerXidValue(slotno, xid) (*((SerCommitSeqNo *) \
328 	(OldSerXidSlruCtl->shared->page_buffer[slotno] + \
329 	((((uint32) (xid)) % OLDSERXID_ENTRIESPERPAGE) * OLDSERXID_ENTRYSIZE))))
330 
331 #define OldSerXidPage(xid)	(((uint32) (xid)) / OLDSERXID_ENTRIESPERPAGE)
332 
333 typedef struct OldSerXidControlData
334 {
335 	int			headPage;		/* newest initialized page */
336 	TransactionId headXid;		/* newest valid Xid in the SLRU */
337 	TransactionId tailXid;		/* oldest xmin we might be interested in */
338 }			OldSerXidControlData;
339 
340 typedef struct OldSerXidControlData *OldSerXidControl;
341 
342 static OldSerXidControl oldSerXidControl;
343 
344 /*
345  * When the oldest committed transaction on the "finished" list is moved to
346  * SLRU, its predicate locks will be moved to this "dummy" transaction,
347  * collapsing duplicate targets.  When a duplicate is found, the later
348  * commitSeqNo is used.
349  */
350 static SERIALIZABLEXACT *OldCommittedSxact;
351 
352 
353 /*
354  * These configuration variables are used to set the predicate lock table size
355  * and to control promotion of predicate locks to coarser granularity in an
356  * attempt to degrade performance (mostly as false positive serialization
357  * failure) gracefully in the face of memory pressurel
358  */
359 int			max_predicate_locks_per_xact;	/* set by guc.c */
360 int			max_predicate_locks_per_relation;	/* set by guc.c */
361 int			max_predicate_locks_per_page;	/* set by guc.c */
362 
363 /*
364  * This provides a list of objects in order to track transactions
365  * participating in predicate locking.  Entries in the list are fixed size,
366  * and reside in shared memory.  The memory address of an entry must remain
367  * fixed during its lifetime.  The list will be protected from concurrent
368  * update externally; no provision is made in this code to manage that.  The
369  * number of entries in the list, and the size allowed for each entry is
370  * fixed upon creation.
371  */
372 static PredXactList PredXact;
373 
374 /*
375  * This provides a pool of RWConflict data elements to use in conflict lists
376  * between transactions.
377  */
378 static RWConflictPoolHeader RWConflictPool;
379 
380 /*
381  * The predicate locking hash tables are in shared memory.
382  * Each backend keeps pointers to them.
383  */
384 static HTAB *SerializableXidHash;
385 static HTAB *PredicateLockTargetHash;
386 static HTAB *PredicateLockHash;
387 static SHM_QUEUE *FinishedSerializableTransactions;
388 
389 /*
390  * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
391  * this entry, you can ensure that there's enough scratch space available for
392  * inserting one entry in the hash table. This is an otherwise-invalid tag.
393  */
394 static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0};
395 static uint32 ScratchTargetTagHash;
396 static LWLock *ScratchPartitionLock;
397 
398 /*
399  * The local hash table used to determine when to combine multiple fine-
400  * grained locks into a single courser-grained lock.
401  */
402 static HTAB *LocalPredicateLockHash = NULL;
403 
404 /*
405  * Keep a pointer to the currently-running serializable transaction (if any)
406  * for quick reference. Also, remember if we have written anything that could
407  * cause a rw-conflict.
408  */
409 static SERIALIZABLEXACT *MySerializableXact = InvalidSerializableXact;
410 static bool MyXactDidWrite = false;
411 
412 /* local functions */
413 
414 static SERIALIZABLEXACT *CreatePredXact(void);
415 static void ReleasePredXact(SERIALIZABLEXACT *sxact);
416 static SERIALIZABLEXACT *FirstPredXact(void);
417 static SERIALIZABLEXACT *NextPredXact(SERIALIZABLEXACT *sxact);
418 
419 static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer);
420 static void SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
421 static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact);
422 static void ReleaseRWConflict(RWConflict conflict);
423 static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact);
424 
425 static bool OldSerXidPagePrecedesLogically(int page1, int page2);
426 static void OldSerXidInit(void);
427 static void OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo);
428 static SerCommitSeqNo OldSerXidGetMinConflictCommitSeqNo(TransactionId xid);
429 static void OldSerXidSetActiveSerXmin(TransactionId xid);
430 
431 static uint32 predicatelock_hash(const void *key, Size keysize);
432 static void SummarizeOldestCommittedSxact(void);
433 static Snapshot GetSafeSnapshot(Snapshot snapshot);
434 static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot,
435 									  VirtualTransactionId *sourcevxid,
436 									  int sourcepid);
437 static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
438 static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
439 						  PREDICATELOCKTARGETTAG *parent);
440 static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
441 static void RemoveScratchTarget(bool lockheld);
442 static void RestoreScratchTarget(bool lockheld);
443 static void RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target,
444 						   uint32 targettaghash);
445 static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
446 static int	MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag);
447 static bool CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag);
448 static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag);
449 static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
450 					uint32 targettaghash,
451 					SERIALIZABLEXACT *sxact);
452 static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash);
453 static bool TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
454 								  PREDICATELOCKTARGETTAG newtargettag,
455 								  bool removeOld);
456 static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
457 static void DropAllPredicateLocksFromTable(Relation relation,
458 							   bool transfer);
459 static void SetNewSxactGlobalXmin(void);
460 static void ClearOldPredicateLocks(void);
461 static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
462 						   bool summarize);
463 static bool XidIsConcurrent(TransactionId xid);
464 static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag);
465 static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
466 static void OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
467 										SERIALIZABLEXACT *writer);
468 
469 
470 /*------------------------------------------------------------------------*/
471 
472 /*
473  * Does this relation participate in predicate locking? Temporary and system
474  * relations are exempt, as are materialized views.
475  */
476 static inline bool
PredicateLockingNeededForRelation(Relation relation)477 PredicateLockingNeededForRelation(Relation relation)
478 {
479 	return !(relation->rd_id < FirstBootstrapObjectId ||
480 			 RelationUsesLocalBuffers(relation) ||
481 			 relation->rd_rel->relkind == RELKIND_MATVIEW);
482 }
483 
484 /*
485  * When a public interface method is called for a read, this is the test to
486  * see if we should do a quick return.
487  *
488  * Note: this function has side-effects! If this transaction has been flagged
489  * as RO-safe since the last call, we release all predicate locks and reset
490  * MySerializableXact. That makes subsequent calls to return quickly.
491  *
492  * This is marked as 'inline' to eliminate the function call overhead in the
493  * common case that serialization is not needed.
494  */
495 static inline bool
SerializationNeededForRead(Relation relation,Snapshot snapshot)496 SerializationNeededForRead(Relation relation, Snapshot snapshot)
497 {
498 	/* Nothing to do if this is not a serializable transaction */
499 	if (MySerializableXact == InvalidSerializableXact)
500 		return false;
501 
502 	/*
503 	 * Don't acquire locks or conflict when scanning with a special snapshot.
504 	 * This excludes things like CLUSTER and REINDEX. They use the wholesale
505 	 * functions TransferPredicateLocksToHeapRelation() and
506 	 * CheckTableForSerializableConflictIn() to participate in serialization,
507 	 * but the scans involved don't need serialization.
508 	 */
509 	if (!IsMVCCSnapshot(snapshot))
510 		return false;
511 
512 	/*
513 	 * Check if we have just become "RO-safe". If we have, immediately release
514 	 * all locks as they're not needed anymore. This also resets
515 	 * MySerializableXact, so that subsequent calls to this function can exit
516 	 * quickly.
517 	 *
518 	 * A transaction is flagged as RO_SAFE if all concurrent R/W transactions
519 	 * commit without having conflicts out to an earlier snapshot, thus
520 	 * ensuring that no conflicts are possible for this transaction.
521 	 */
522 	if (SxactIsROSafe(MySerializableXact))
523 	{
524 		ReleasePredicateLocks(false);
525 		return false;
526 	}
527 
528 	/* Check if the relation doesn't participate in predicate locking */
529 	if (!PredicateLockingNeededForRelation(relation))
530 		return false;
531 
532 	return true;				/* no excuse to skip predicate locking */
533 }
534 
535 /*
536  * Like SerializationNeededForRead(), but called on writes.
537  * The logic is the same, but there is no snapshot and we can't be RO-safe.
538  */
539 static inline bool
SerializationNeededForWrite(Relation relation)540 SerializationNeededForWrite(Relation relation)
541 {
542 	/* Nothing to do if this is not a serializable transaction */
543 	if (MySerializableXact == InvalidSerializableXact)
544 		return false;
545 
546 	/* Check if the relation doesn't participate in predicate locking */
547 	if (!PredicateLockingNeededForRelation(relation))
548 		return false;
549 
550 	return true;				/* no excuse to skip predicate locking */
551 }
552 
553 
554 /*------------------------------------------------------------------------*/
555 
556 /*
557  * These functions are a simple implementation of a list for this specific
558  * type of struct.  If there is ever a generalized shared memory list, we
559  * should probably switch to that.
560  */
561 static SERIALIZABLEXACT *
CreatePredXact(void)562 CreatePredXact(void)
563 {
564 	PredXactListElement ptle;
565 
566 	ptle = (PredXactListElement)
567 		SHMQueueNext(&PredXact->availableList,
568 					 &PredXact->availableList,
569 					 offsetof(PredXactListElementData, link));
570 	if (!ptle)
571 		return NULL;
572 
573 	SHMQueueDelete(&ptle->link);
574 	SHMQueueInsertBefore(&PredXact->activeList, &ptle->link);
575 	return &ptle->sxact;
576 }
577 
578 static void
ReleasePredXact(SERIALIZABLEXACT * sxact)579 ReleasePredXact(SERIALIZABLEXACT *sxact)
580 {
581 	PredXactListElement ptle;
582 
583 	Assert(ShmemAddrIsValid(sxact));
584 
585 	ptle = (PredXactListElement)
586 		(((char *) sxact)
587 		 - offsetof(PredXactListElementData, sxact)
588 		 + offsetof(PredXactListElementData, link));
589 	SHMQueueDelete(&ptle->link);
590 	SHMQueueInsertBefore(&PredXact->availableList, &ptle->link);
591 }
592 
593 static SERIALIZABLEXACT *
FirstPredXact(void)594 FirstPredXact(void)
595 {
596 	PredXactListElement ptle;
597 
598 	ptle = (PredXactListElement)
599 		SHMQueueNext(&PredXact->activeList,
600 					 &PredXact->activeList,
601 					 offsetof(PredXactListElementData, link));
602 	if (!ptle)
603 		return NULL;
604 
605 	return &ptle->sxact;
606 }
607 
608 static SERIALIZABLEXACT *
NextPredXact(SERIALIZABLEXACT * sxact)609 NextPredXact(SERIALIZABLEXACT *sxact)
610 {
611 	PredXactListElement ptle;
612 
613 	Assert(ShmemAddrIsValid(sxact));
614 
615 	ptle = (PredXactListElement)
616 		(((char *) sxact)
617 		 - offsetof(PredXactListElementData, sxact)
618 		 + offsetof(PredXactListElementData, link));
619 	ptle = (PredXactListElement)
620 		SHMQueueNext(&PredXact->activeList,
621 					 &ptle->link,
622 					 offsetof(PredXactListElementData, link));
623 	if (!ptle)
624 		return NULL;
625 
626 	return &ptle->sxact;
627 }
628 
629 /*------------------------------------------------------------------------*/
630 
631 /*
632  * These functions manage primitive access to the RWConflict pool and lists.
633  */
634 static bool
RWConflictExists(const SERIALIZABLEXACT * reader,const SERIALIZABLEXACT * writer)635 RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
636 {
637 	RWConflict	conflict;
638 
639 	Assert(reader != writer);
640 
641 	/* Check the ends of the purported conflict first. */
642 	if (SxactIsDoomed(reader)
643 		|| SxactIsDoomed(writer)
644 		|| SHMQueueEmpty(&reader->outConflicts)
645 		|| SHMQueueEmpty(&writer->inConflicts))
646 		return false;
647 
648 	/* A conflict is possible; walk the list to find out. */
649 	conflict = (RWConflict)
650 		SHMQueueNext(&reader->outConflicts,
651 					 &reader->outConflicts,
652 					 offsetof(RWConflictData, outLink));
653 	while (conflict)
654 	{
655 		if (conflict->sxactIn == writer)
656 			return true;
657 		conflict = (RWConflict)
658 			SHMQueueNext(&reader->outConflicts,
659 						 &conflict->outLink,
660 						 offsetof(RWConflictData, outLink));
661 	}
662 
663 	/* No conflict found. */
664 	return false;
665 }
666 
667 static void
SetRWConflict(SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)668 SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
669 {
670 	RWConflict	conflict;
671 
672 	Assert(reader != writer);
673 	Assert(!RWConflictExists(reader, writer));
674 
675 	conflict = (RWConflict)
676 		SHMQueueNext(&RWConflictPool->availableList,
677 					 &RWConflictPool->availableList,
678 					 offsetof(RWConflictData, outLink));
679 	if (!conflict)
680 		ereport(ERROR,
681 				(errcode(ERRCODE_OUT_OF_MEMORY),
682 				 errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
683 				 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
684 
685 	SHMQueueDelete(&conflict->outLink);
686 
687 	conflict->sxactOut = reader;
688 	conflict->sxactIn = writer;
689 	SHMQueueInsertBefore(&reader->outConflicts, &conflict->outLink);
690 	SHMQueueInsertBefore(&writer->inConflicts, &conflict->inLink);
691 }
692 
693 static void
SetPossibleUnsafeConflict(SERIALIZABLEXACT * roXact,SERIALIZABLEXACT * activeXact)694 SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
695 						  SERIALIZABLEXACT *activeXact)
696 {
697 	RWConflict	conflict;
698 
699 	Assert(roXact != activeXact);
700 	Assert(SxactIsReadOnly(roXact));
701 	Assert(!SxactIsReadOnly(activeXact));
702 
703 	conflict = (RWConflict)
704 		SHMQueueNext(&RWConflictPool->availableList,
705 					 &RWConflictPool->availableList,
706 					 offsetof(RWConflictData, outLink));
707 	if (!conflict)
708 		ereport(ERROR,
709 				(errcode(ERRCODE_OUT_OF_MEMORY),
710 				 errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
711 				 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
712 
713 	SHMQueueDelete(&conflict->outLink);
714 
715 	conflict->sxactOut = activeXact;
716 	conflict->sxactIn = roXact;
717 	SHMQueueInsertBefore(&activeXact->possibleUnsafeConflicts,
718 						 &conflict->outLink);
719 	SHMQueueInsertBefore(&roXact->possibleUnsafeConflicts,
720 						 &conflict->inLink);
721 }
722 
723 static void
ReleaseRWConflict(RWConflict conflict)724 ReleaseRWConflict(RWConflict conflict)
725 {
726 	SHMQueueDelete(&conflict->inLink);
727 	SHMQueueDelete(&conflict->outLink);
728 	SHMQueueInsertBefore(&RWConflictPool->availableList, &conflict->outLink);
729 }
730 
731 static void
FlagSxactUnsafe(SERIALIZABLEXACT * sxact)732 FlagSxactUnsafe(SERIALIZABLEXACT *sxact)
733 {
734 	RWConflict	conflict,
735 				nextConflict;
736 
737 	Assert(SxactIsReadOnly(sxact));
738 	Assert(!SxactIsROSafe(sxact));
739 
740 	sxact->flags |= SXACT_FLAG_RO_UNSAFE;
741 
742 	/*
743 	 * We know this isn't a safe snapshot, so we can stop looking for other
744 	 * potential conflicts.
745 	 */
746 	conflict = (RWConflict)
747 		SHMQueueNext(&sxact->possibleUnsafeConflicts,
748 					 &sxact->possibleUnsafeConflicts,
749 					 offsetof(RWConflictData, inLink));
750 	while (conflict)
751 	{
752 		nextConflict = (RWConflict)
753 			SHMQueueNext(&sxact->possibleUnsafeConflicts,
754 						 &conflict->inLink,
755 						 offsetof(RWConflictData, inLink));
756 
757 		Assert(!SxactIsReadOnly(conflict->sxactOut));
758 		Assert(sxact == conflict->sxactIn);
759 
760 		ReleaseRWConflict(conflict);
761 
762 		conflict = nextConflict;
763 	}
764 }
765 
766 /*------------------------------------------------------------------------*/
767 
768 /*
769  * Decide whether an OldSerXid page number is "older" for truncation purposes.
770  * Analogous to CLOGPagePrecedes().
771  */
772 static bool
OldSerXidPagePrecedesLogically(int page1,int page2)773 OldSerXidPagePrecedesLogically(int page1, int page2)
774 {
775 	TransactionId xid1;
776 	TransactionId xid2;
777 
778 	xid1 = ((TransactionId) page1) * OLDSERXID_ENTRIESPERPAGE;
779 	xid1 += FirstNormalTransactionId + 1;
780 	xid2 = ((TransactionId) page2) * OLDSERXID_ENTRIESPERPAGE;
781 	xid2 += FirstNormalTransactionId + 1;
782 
783 	return (TransactionIdPrecedes(xid1, xid2) &&
784 			TransactionIdPrecedes(xid1, xid2 + OLDSERXID_ENTRIESPERPAGE - 1));
785 }
786 
787 #ifdef USE_ASSERT_CHECKING
788 static void
OldSerXidPagePrecedesLogicallyUnitTests(void)789 OldSerXidPagePrecedesLogicallyUnitTests(void)
790 {
791 	int			per_page = OLDSERXID_ENTRIESPERPAGE,
792 				offset = per_page / 2;
793 	int			newestPage,
794 				oldestPage,
795 				headPage,
796 				targetPage;
797 	TransactionId newestXact,
798 				oldestXact;
799 
800 	/* GetNewTransactionId() has assigned the last XID it can safely use. */
801 	newestPage = 2 * SLRU_PAGES_PER_SEGMENT - 1;	/* nothing special */
802 	newestXact = newestPage * per_page + offset;
803 	Assert(newestXact / per_page == newestPage);
804 	oldestXact = newestXact + 1;
805 	oldestXact -= 1U << 31;
806 	oldestPage = oldestXact / per_page;
807 
808 	/*
809 	 * In this scenario, the SLRU headPage pertains to the last ~1000 XIDs
810 	 * assigned.  oldestXact finishes, ~2B XIDs having elapsed since it
811 	 * started.  Further transactions cause us to summarize oldestXact to
812 	 * tailPage.  Function must return false so OldSerXidAdd() doesn't zero
813 	 * tailPage (which may contain entries for other old, recently-finished
814 	 * XIDs) and half the SLRU.  Reaching this requires burning ~2B XIDs in
815 	 * single-user mode, a negligible possibility.
816 	 */
817 	headPage = newestPage;
818 	targetPage = oldestPage;
819 	Assert(!OldSerXidPagePrecedesLogically(headPage, targetPage));
820 
821 	/*
822 	 * In this scenario, the SLRU headPage pertains to oldestXact.  We're
823 	 * summarizing an XID near newestXact.  (Assume few other XIDs used
824 	 * SERIALIZABLE, hence the minimal headPage advancement.  Assume
825 	 * oldestXact was long-running and only recently reached the SLRU.)
826 	 * Function must return true to make OldSerXidAdd() create targetPage.
827 	 *
828 	 * Today's implementation mishandles this case, but it doesn't matter
829 	 * enough to fix.  Verify that the defect affects just one page by
830 	 * asserting correct treatment of its prior page.  Reaching this case
831 	 * requires burning ~2B XIDs in single-user mode, a negligible
832 	 * possibility.  Moreover, if it does happen, the consequence would be
833 	 * mild, namely a new transaction failing in SimpleLruReadPage().
834 	 */
835 	headPage = oldestPage;
836 	targetPage = newestPage;
837 	Assert(OldSerXidPagePrecedesLogically(headPage, targetPage - 1));
838 #if 0
839 	Assert(OldSerXidPagePrecedesLogically(headPage, targetPage));
840 #endif
841 }
842 #endif
843 
844 /*
845  * Initialize for the tracking of old serializable committed xids.
846  */
847 static void
OldSerXidInit(void)848 OldSerXidInit(void)
849 {
850 	bool		found;
851 
852 	/*
853 	 * Set up SLRU management of the pg_serial data.
854 	 */
855 	OldSerXidSlruCtl->PagePrecedes = OldSerXidPagePrecedesLogically;
856 	SimpleLruInit(OldSerXidSlruCtl, "oldserxid",
857 				  NUM_OLDSERXID_BUFFERS, 0, OldSerXidLock, "pg_serial",
858 				  LWTRANCHE_OLDSERXID_BUFFERS);
859 	/* Override default assumption that writes should be fsync'd */
860 	OldSerXidSlruCtl->do_fsync = false;
861 #ifdef USE_ASSERT_CHECKING
862 	OldSerXidPagePrecedesLogicallyUnitTests();
863 #endif
864 	SlruPagePrecedesUnitTests(OldSerXidSlruCtl, OLDSERXID_ENTRIESPERPAGE);
865 
866 	/*
867 	 * Create or attach to the OldSerXidControl structure.
868 	 */
869 	oldSerXidControl = (OldSerXidControl)
870 		ShmemInitStruct("OldSerXidControlData", sizeof(OldSerXidControlData), &found);
871 
872 	Assert(found == IsUnderPostmaster);
873 	if (!found)
874 	{
875 		/*
876 		 * Set control information to reflect empty SLRU.
877 		 */
878 		oldSerXidControl->headPage = -1;
879 		oldSerXidControl->headXid = InvalidTransactionId;
880 		oldSerXidControl->tailXid = InvalidTransactionId;
881 	}
882 }
883 
884 /*
885  * Record a committed read write serializable xid and the minimum
886  * commitSeqNo of any transactions to which this xid had a rw-conflict out.
887  * An invalid seqNo means that there were no conflicts out from xid.
888  */
889 static void
OldSerXidAdd(TransactionId xid,SerCommitSeqNo minConflictCommitSeqNo)890 OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
891 {
892 	TransactionId tailXid;
893 	int			targetPage;
894 	int			slotno;
895 	int			firstZeroPage;
896 	bool		isNewPage;
897 
898 	Assert(TransactionIdIsValid(xid));
899 
900 	targetPage = OldSerXidPage(xid);
901 
902 	LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
903 
904 	/*
905 	 * If no serializable transactions are active, there shouldn't be anything
906 	 * to push out to the SLRU.  Hitting this assert would mean there's
907 	 * something wrong with the earlier cleanup logic.
908 	 */
909 	tailXid = oldSerXidControl->tailXid;
910 	Assert(TransactionIdIsValid(tailXid));
911 
912 	/*
913 	 * If the SLRU is currently unused, zero out the whole active region from
914 	 * tailXid to headXid before taking it into use. Otherwise zero out only
915 	 * any new pages that enter the tailXid-headXid range as we advance
916 	 * headXid.
917 	 */
918 	if (oldSerXidControl->headPage < 0)
919 	{
920 		firstZeroPage = OldSerXidPage(tailXid);
921 		isNewPage = true;
922 	}
923 	else
924 	{
925 		firstZeroPage = OldSerXidNextPage(oldSerXidControl->headPage);
926 		isNewPage = OldSerXidPagePrecedesLogically(oldSerXidControl->headPage,
927 												   targetPage);
928 	}
929 
930 	if (!TransactionIdIsValid(oldSerXidControl->headXid)
931 		|| TransactionIdFollows(xid, oldSerXidControl->headXid))
932 		oldSerXidControl->headXid = xid;
933 	if (isNewPage)
934 		oldSerXidControl->headPage = targetPage;
935 
936 	if (isNewPage)
937 	{
938 		/* Initialize intervening pages. */
939 		while (firstZeroPage != targetPage)
940 		{
941 			(void) SimpleLruZeroPage(OldSerXidSlruCtl, firstZeroPage);
942 			firstZeroPage = OldSerXidNextPage(firstZeroPage);
943 		}
944 		slotno = SimpleLruZeroPage(OldSerXidSlruCtl, targetPage);
945 	}
946 	else
947 		slotno = SimpleLruReadPage(OldSerXidSlruCtl, targetPage, true, xid);
948 
949 	OldSerXidValue(slotno, xid) = minConflictCommitSeqNo;
950 	OldSerXidSlruCtl->shared->page_dirty[slotno] = true;
951 
952 	LWLockRelease(OldSerXidLock);
953 }
954 
955 /*
956  * Get the minimum commitSeqNo for any conflict out for the given xid.  For
957  * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
958  * will be returned.
959  */
960 static SerCommitSeqNo
OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)961 OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)
962 {
963 	TransactionId headXid;
964 	TransactionId tailXid;
965 	SerCommitSeqNo val;
966 	int			slotno;
967 
968 	Assert(TransactionIdIsValid(xid));
969 
970 	LWLockAcquire(OldSerXidLock, LW_SHARED);
971 	headXid = oldSerXidControl->headXid;
972 	tailXid = oldSerXidControl->tailXid;
973 	LWLockRelease(OldSerXidLock);
974 
975 	if (!TransactionIdIsValid(headXid))
976 		return 0;
977 
978 	Assert(TransactionIdIsValid(tailXid));
979 
980 	if (TransactionIdPrecedes(xid, tailXid)
981 		|| TransactionIdFollows(xid, headXid))
982 		return 0;
983 
984 	/*
985 	 * The following function must be called without holding OldSerXidLock,
986 	 * but will return with that lock held, which must then be released.
987 	 */
988 	slotno = SimpleLruReadPage_ReadOnly(OldSerXidSlruCtl,
989 										OldSerXidPage(xid), xid);
990 	val = OldSerXidValue(slotno, xid);
991 	LWLockRelease(OldSerXidLock);
992 	return val;
993 }
994 
995 /*
996  * Call this whenever there is a new xmin for active serializable
997  * transactions.  We don't need to keep information on transactions which
998  * precede that.  InvalidTransactionId means none active, so everything in
999  * the SLRU can be discarded.
1000  */
1001 static void
OldSerXidSetActiveSerXmin(TransactionId xid)1002 OldSerXidSetActiveSerXmin(TransactionId xid)
1003 {
1004 	LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1005 
1006 	/*
1007 	 * When no sxacts are active, nothing overlaps, set the xid values to
1008 	 * invalid to show that there are no valid entries.  Don't clear headPage,
1009 	 * though.  A new xmin might still land on that page, and we don't want to
1010 	 * repeatedly zero out the same page.
1011 	 */
1012 	if (!TransactionIdIsValid(xid))
1013 	{
1014 		oldSerXidControl->tailXid = InvalidTransactionId;
1015 		oldSerXidControl->headXid = InvalidTransactionId;
1016 		LWLockRelease(OldSerXidLock);
1017 		return;
1018 	}
1019 
1020 	/*
1021 	 * When we're recovering prepared transactions, the global xmin might move
1022 	 * backwards depending on the order they're recovered. Normally that's not
1023 	 * OK, but during recovery no serializable transactions will commit, so
1024 	 * the SLRU is empty and we can get away with it.
1025 	 */
1026 	if (RecoveryInProgress())
1027 	{
1028 		Assert(oldSerXidControl->headPage < 0);
1029 		if (!TransactionIdIsValid(oldSerXidControl->tailXid)
1030 			|| TransactionIdPrecedes(xid, oldSerXidControl->tailXid))
1031 		{
1032 			oldSerXidControl->tailXid = xid;
1033 		}
1034 		LWLockRelease(OldSerXidLock);
1035 		return;
1036 	}
1037 
1038 	Assert(!TransactionIdIsValid(oldSerXidControl->tailXid)
1039 		   || TransactionIdFollows(xid, oldSerXidControl->tailXid));
1040 
1041 	oldSerXidControl->tailXid = xid;
1042 
1043 	LWLockRelease(OldSerXidLock);
1044 }
1045 
1046 /*
1047  * Perform a checkpoint --- either during shutdown, or on-the-fly
1048  *
1049  * We don't have any data that needs to survive a restart, but this is a
1050  * convenient place to truncate the SLRU.
1051  */
1052 void
CheckPointPredicate(void)1053 CheckPointPredicate(void)
1054 {
1055 	int			tailPage;
1056 
1057 	LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1058 
1059 	/* Exit quickly if the SLRU is currently not in use. */
1060 	if (oldSerXidControl->headPage < 0)
1061 	{
1062 		LWLockRelease(OldSerXidLock);
1063 		return;
1064 	}
1065 
1066 	if (TransactionIdIsValid(oldSerXidControl->tailXid))
1067 	{
1068 		/* We can truncate the SLRU up to the page containing tailXid */
1069 		tailPage = OldSerXidPage(oldSerXidControl->tailXid);
1070 	}
1071 	else
1072 	{
1073 		/*----------
1074 		 * The SLRU is no longer needed. Truncate to head before we set head
1075 		 * invalid.
1076 		 *
1077 		 * XXX: It's possible that the SLRU is not needed again until XID
1078 		 * wrap-around has happened, so that the segment containing headPage
1079 		 * that we leave behind will appear to be new again. In that case it
1080 		 * won't be removed until XID horizon advances enough to make it
1081 		 * current again.
1082 		 *
1083 		 * XXX: This should happen in vac_truncate_clog(), not in checkpoints.
1084 		 * Consider this scenario, starting from a system with no in-progress
1085 		 * transactions and VACUUM FREEZE having maximized oldestXact:
1086 		 * - Start a SERIALIZABLE transaction.
1087 		 * - Start, finish, and summarize a SERIALIZABLE transaction, creating
1088 		 *   one SLRU page.
1089 		 * - Consume XIDs to reach xidStopLimit.
1090 		 * - Finish all transactions.  Due to the long-running SERIALIZABLE
1091 		 *   transaction, earlier checkpoints did not touch headPage.  The
1092 		 *   next checkpoint will change it, but that checkpoint happens after
1093 		 *   the end of the scenario.
1094 		 * - VACUUM to advance XID limits.
1095 		 * - Consume ~2M XIDs, crossing the former xidWrapLimit.
1096 		 * - Start, finish, and summarize a SERIALIZABLE transaction.
1097 		 *   OldSerXidAdd() declines to create the targetPage, because
1098 		 *   headPage is not regarded as in the past relative to that
1099 		 *   targetPage.  The transaction instigating the summarize fails in
1100 		 *   SimpleLruReadPage().
1101 		 */
1102 		tailPage = oldSerXidControl->headPage;
1103 		oldSerXidControl->headPage = -1;
1104 	}
1105 
1106 	LWLockRelease(OldSerXidLock);
1107 
1108 	/* Truncate away pages that are no longer required */
1109 	SimpleLruTruncate(OldSerXidSlruCtl, tailPage);
1110 
1111 	/*
1112 	 * Flush dirty SLRU pages to disk
1113 	 *
1114 	 * This is not actually necessary from a correctness point of view. We do
1115 	 * it merely as a debugging aid.
1116 	 *
1117 	 * We're doing this after the truncation to avoid writing pages right
1118 	 * before deleting the file in which they sit, which would be completely
1119 	 * pointless.
1120 	 */
1121 	SimpleLruFlush(OldSerXidSlruCtl, true);
1122 }
1123 
1124 /*------------------------------------------------------------------------*/
1125 
1126 /*
1127  * InitPredicateLocks -- Initialize the predicate locking data structures.
1128  *
1129  * This is called from CreateSharedMemoryAndSemaphores(), which see for
1130  * more comments.  In the normal postmaster case, the shared hash tables
1131  * are created here.  Backends inherit the pointers
1132  * to the shared tables via fork().  In the EXEC_BACKEND case, each
1133  * backend re-executes this code to obtain pointers to the already existing
1134  * shared hash tables.
1135  */
1136 void
InitPredicateLocks(void)1137 InitPredicateLocks(void)
1138 {
1139 	HASHCTL		info;
1140 	long		max_table_size;
1141 	Size		requestSize;
1142 	bool		found;
1143 
1144 #ifndef EXEC_BACKEND
1145 	Assert(!IsUnderPostmaster);
1146 #endif
1147 
1148 	/*
1149 	 * Compute size of predicate lock target hashtable. Note these
1150 	 * calculations must agree with PredicateLockShmemSize!
1151 	 */
1152 	max_table_size = NPREDICATELOCKTARGETENTS();
1153 
1154 	/*
1155 	 * Allocate hash table for PREDICATELOCKTARGET structs.  This stores
1156 	 * per-predicate-lock-target information.
1157 	 */
1158 	MemSet(&info, 0, sizeof(info));
1159 	info.keysize = sizeof(PREDICATELOCKTARGETTAG);
1160 	info.entrysize = sizeof(PREDICATELOCKTARGET);
1161 	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1162 
1163 	PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
1164 											max_table_size,
1165 											max_table_size,
1166 											&info,
1167 											HASH_ELEM | HASH_BLOBS |
1168 											HASH_PARTITION | HASH_FIXED_SIZE);
1169 
1170 	/*
1171 	 * Reserve a dummy entry in the hash table; we use it to make sure there's
1172 	 * always one entry available when we need to split or combine a page,
1173 	 * because running out of space there could mean aborting a
1174 	 * non-serializable transaction.
1175 	 */
1176 	if (!IsUnderPostmaster)
1177 	{
1178 		(void) hash_search(PredicateLockTargetHash, &ScratchTargetTag,
1179 						   HASH_ENTER, &found);
1180 		Assert(!found);
1181 	}
1182 
1183 	/* Pre-calculate the hash and partition lock of the scratch entry */
1184 	ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag);
1185 	ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash);
1186 
1187 	/*
1188 	 * Allocate hash table for PREDICATELOCK structs.  This stores per
1189 	 * xact-lock-of-a-target information.
1190 	 */
1191 	MemSet(&info, 0, sizeof(info));
1192 	info.keysize = sizeof(PREDICATELOCKTAG);
1193 	info.entrysize = sizeof(PREDICATELOCK);
1194 	info.hash = predicatelock_hash;
1195 	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1196 
1197 	/* Assume an average of 2 xacts per target */
1198 	max_table_size *= 2;
1199 
1200 	PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
1201 									  max_table_size,
1202 									  max_table_size,
1203 									  &info,
1204 									  HASH_ELEM | HASH_FUNCTION |
1205 									  HASH_PARTITION | HASH_FIXED_SIZE);
1206 
1207 	/*
1208 	 * Compute size for serializable transaction hashtable. Note these
1209 	 * calculations must agree with PredicateLockShmemSize!
1210 	 */
1211 	max_table_size = (MaxBackends + max_prepared_xacts);
1212 
1213 	/*
1214 	 * Allocate a list to hold information on transactions participating in
1215 	 * predicate locking.
1216 	 *
1217 	 * Assume an average of 10 predicate locking transactions per backend.
1218 	 * This allows aggressive cleanup while detail is present before data must
1219 	 * be summarized for storage in SLRU and the "dummy" transaction.
1220 	 */
1221 	max_table_size *= 10;
1222 
1223 	PredXact = ShmemInitStruct("PredXactList",
1224 							   PredXactListDataSize,
1225 							   &found);
1226 	Assert(found == IsUnderPostmaster);
1227 	if (!found)
1228 	{
1229 		int			i;
1230 
1231 		SHMQueueInit(&PredXact->availableList);
1232 		SHMQueueInit(&PredXact->activeList);
1233 		PredXact->SxactGlobalXmin = InvalidTransactionId;
1234 		PredXact->SxactGlobalXminCount = 0;
1235 		PredXact->WritableSxactCount = 0;
1236 		PredXact->LastSxactCommitSeqNo = FirstNormalSerCommitSeqNo - 1;
1237 		PredXact->CanPartialClearThrough = 0;
1238 		PredXact->HavePartialClearedThrough = 0;
1239 		requestSize = mul_size((Size) max_table_size,
1240 							   PredXactListElementDataSize);
1241 		PredXact->element = ShmemAlloc(requestSize);
1242 		/* Add all elements to available list, clean. */
1243 		memset(PredXact->element, 0, requestSize);
1244 		for (i = 0; i < max_table_size; i++)
1245 		{
1246 			SHMQueueInsertBefore(&(PredXact->availableList),
1247 								 &(PredXact->element[i].link));
1248 		}
1249 		PredXact->OldCommittedSxact = CreatePredXact();
1250 		SetInvalidVirtualTransactionId(PredXact->OldCommittedSxact->vxid);
1251 		PredXact->OldCommittedSxact->prepareSeqNo = 0;
1252 		PredXact->OldCommittedSxact->commitSeqNo = 0;
1253 		PredXact->OldCommittedSxact->SeqNo.lastCommitBeforeSnapshot = 0;
1254 		SHMQueueInit(&PredXact->OldCommittedSxact->outConflicts);
1255 		SHMQueueInit(&PredXact->OldCommittedSxact->inConflicts);
1256 		SHMQueueInit(&PredXact->OldCommittedSxact->predicateLocks);
1257 		SHMQueueInit(&PredXact->OldCommittedSxact->finishedLink);
1258 		SHMQueueInit(&PredXact->OldCommittedSxact->possibleUnsafeConflicts);
1259 		PredXact->OldCommittedSxact->topXid = InvalidTransactionId;
1260 		PredXact->OldCommittedSxact->finishedBefore = InvalidTransactionId;
1261 		PredXact->OldCommittedSxact->xmin = InvalidTransactionId;
1262 		PredXact->OldCommittedSxact->flags = SXACT_FLAG_COMMITTED;
1263 		PredXact->OldCommittedSxact->pid = 0;
1264 	}
1265 	/* This never changes, so let's keep a local copy. */
1266 	OldCommittedSxact = PredXact->OldCommittedSxact;
1267 
1268 	/*
1269 	 * Allocate hash table for SERIALIZABLEXID structs.  This stores per-xid
1270 	 * information for serializable transactions which have accessed data.
1271 	 */
1272 	MemSet(&info, 0, sizeof(info));
1273 	info.keysize = sizeof(SERIALIZABLEXIDTAG);
1274 	info.entrysize = sizeof(SERIALIZABLEXID);
1275 
1276 	SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
1277 										max_table_size,
1278 										max_table_size,
1279 										&info,
1280 										HASH_ELEM | HASH_BLOBS |
1281 										HASH_FIXED_SIZE);
1282 
1283 	/*
1284 	 * Allocate space for tracking rw-conflicts in lists attached to the
1285 	 * transactions.
1286 	 *
1287 	 * Assume an average of 5 conflicts per transaction.  Calculations suggest
1288 	 * that this will prevent resource exhaustion in even the most pessimal
1289 	 * loads up to max_connections = 200 with all 200 connections pounding the
1290 	 * database with serializable transactions.  Beyond that, there may be
1291 	 * occasional transactions canceled when trying to flag conflicts. That's
1292 	 * probably OK.
1293 	 */
1294 	max_table_size *= 5;
1295 
1296 	RWConflictPool = ShmemInitStruct("RWConflictPool",
1297 									 RWConflictPoolHeaderDataSize,
1298 									 &found);
1299 	Assert(found == IsUnderPostmaster);
1300 	if (!found)
1301 	{
1302 		int			i;
1303 
1304 		SHMQueueInit(&RWConflictPool->availableList);
1305 		requestSize = mul_size((Size) max_table_size,
1306 							   RWConflictDataSize);
1307 		RWConflictPool->element = ShmemAlloc(requestSize);
1308 		/* Add all elements to available list, clean. */
1309 		memset(RWConflictPool->element, 0, requestSize);
1310 		for (i = 0; i < max_table_size; i++)
1311 		{
1312 			SHMQueueInsertBefore(&(RWConflictPool->availableList),
1313 								 &(RWConflictPool->element[i].outLink));
1314 		}
1315 	}
1316 
1317 	/*
1318 	 * Create or attach to the header for the list of finished serializable
1319 	 * transactions.
1320 	 */
1321 	FinishedSerializableTransactions = (SHM_QUEUE *)
1322 		ShmemInitStruct("FinishedSerializableTransactions",
1323 						sizeof(SHM_QUEUE),
1324 						&found);
1325 	Assert(found == IsUnderPostmaster);
1326 	if (!found)
1327 		SHMQueueInit(FinishedSerializableTransactions);
1328 
1329 	/*
1330 	 * Initialize the SLRU storage for old committed serializable
1331 	 * transactions.
1332 	 */
1333 	OldSerXidInit();
1334 }
1335 
1336 /*
1337  * Estimate shared-memory space used for predicate lock table
1338  */
1339 Size
PredicateLockShmemSize(void)1340 PredicateLockShmemSize(void)
1341 {
1342 	Size		size = 0;
1343 	long		max_table_size;
1344 
1345 	/* predicate lock target hash table */
1346 	max_table_size = NPREDICATELOCKTARGETENTS();
1347 	size = add_size(size, hash_estimate_size(max_table_size,
1348 											 sizeof(PREDICATELOCKTARGET)));
1349 
1350 	/* predicate lock hash table */
1351 	max_table_size *= 2;
1352 	size = add_size(size, hash_estimate_size(max_table_size,
1353 											 sizeof(PREDICATELOCK)));
1354 
1355 	/*
1356 	 * Since NPREDICATELOCKTARGETENTS is only an estimate, add 10% safety
1357 	 * margin.
1358 	 */
1359 	size = add_size(size, size / 10);
1360 
1361 	/* transaction list */
1362 	max_table_size = MaxBackends + max_prepared_xacts;
1363 	max_table_size *= 10;
1364 	size = add_size(size, PredXactListDataSize);
1365 	size = add_size(size, mul_size((Size) max_table_size,
1366 								   PredXactListElementDataSize));
1367 
1368 	/* transaction xid table */
1369 	size = add_size(size, hash_estimate_size(max_table_size,
1370 											 sizeof(SERIALIZABLEXID)));
1371 
1372 	/* rw-conflict pool */
1373 	max_table_size *= 5;
1374 	size = add_size(size, RWConflictPoolHeaderDataSize);
1375 	size = add_size(size, mul_size((Size) max_table_size,
1376 								   RWConflictDataSize));
1377 
1378 	/* Head for list of finished serializable transactions. */
1379 	size = add_size(size, sizeof(SHM_QUEUE));
1380 
1381 	/* Shared memory structures for SLRU tracking of old committed xids. */
1382 	size = add_size(size, sizeof(OldSerXidControlData));
1383 	size = add_size(size, SimpleLruShmemSize(NUM_OLDSERXID_BUFFERS, 0));
1384 
1385 	return size;
1386 }
1387 
1388 
1389 /*
1390  * Compute the hash code associated with a PREDICATELOCKTAG.
1391  *
1392  * Because we want to use just one set of partition locks for both the
1393  * PREDICATELOCKTARGET and PREDICATELOCK hash tables, we have to make sure
1394  * that PREDICATELOCKs fall into the same partition number as their
1395  * associated PREDICATELOCKTARGETs.  dynahash.c expects the partition number
1396  * to be the low-order bits of the hash code, and therefore a
1397  * PREDICATELOCKTAG's hash code must have the same low-order bits as the
1398  * associated PREDICATELOCKTARGETTAG's hash code.  We achieve this with this
1399  * specialized hash function.
1400  */
1401 static uint32
predicatelock_hash(const void * key,Size keysize)1402 predicatelock_hash(const void *key, Size keysize)
1403 {
1404 	const PREDICATELOCKTAG *predicatelocktag = (const PREDICATELOCKTAG *) key;
1405 	uint32		targethash;
1406 
1407 	Assert(keysize == sizeof(PREDICATELOCKTAG));
1408 
1409 	/* Look into the associated target object, and compute its hash code */
1410 	targethash = PredicateLockTargetTagHashCode(&predicatelocktag->myTarget->tag);
1411 
1412 	return PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash);
1413 }
1414 
1415 
1416 /*
1417  * GetPredicateLockStatusData
1418  *		Return a table containing the internal state of the predicate
1419  *		lock manager for use in pg_lock_status.
1420  *
1421  * Like GetLockStatusData, this function tries to hold the partition LWLocks
1422  * for as short a time as possible by returning two arrays that simply
1423  * contain the PREDICATELOCKTARGETTAG and SERIALIZABLEXACT for each lock
1424  * table entry. Multiple copies of the same PREDICATELOCKTARGETTAG and
1425  * SERIALIZABLEXACT will likely appear.
1426  */
1427 PredicateLockData *
GetPredicateLockStatusData(void)1428 GetPredicateLockStatusData(void)
1429 {
1430 	PredicateLockData *data;
1431 	int			i;
1432 	int			els,
1433 				el;
1434 	HASH_SEQ_STATUS seqstat;
1435 	PREDICATELOCK *predlock;
1436 
1437 	data = (PredicateLockData *) palloc(sizeof(PredicateLockData));
1438 
1439 	/*
1440 	 * To ensure consistency, take simultaneous locks on all partition locks
1441 	 * in ascending order, then SerializableXactHashLock.
1442 	 */
1443 	for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
1444 		LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
1445 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1446 
1447 	/* Get number of locks and allocate appropriately-sized arrays. */
1448 	els = hash_get_num_entries(PredicateLockHash);
1449 	data->nelements = els;
1450 	data->locktags = (PREDICATELOCKTARGETTAG *)
1451 		palloc(sizeof(PREDICATELOCKTARGETTAG) * els);
1452 	data->xacts = (SERIALIZABLEXACT *)
1453 		palloc(sizeof(SERIALIZABLEXACT) * els);
1454 
1455 
1456 	/* Scan through PredicateLockHash and copy contents */
1457 	hash_seq_init(&seqstat, PredicateLockHash);
1458 
1459 	el = 0;
1460 
1461 	while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
1462 	{
1463 		data->locktags[el] = predlock->tag.myTarget->tag;
1464 		data->xacts[el] = *predlock->tag.myXact;
1465 		el++;
1466 	}
1467 
1468 	Assert(el == els);
1469 
1470 	/* Release locks in reverse order */
1471 	LWLockRelease(SerializableXactHashLock);
1472 	for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
1473 		LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
1474 
1475 	return data;
1476 }
1477 
1478 /*
1479  * Free up shared memory structures by pushing the oldest sxact (the one at
1480  * the front of the SummarizeOldestCommittedSxact queue) into summary form.
1481  * Each call will free exactly one SERIALIZABLEXACT structure and may also
1482  * free one or more of these structures: SERIALIZABLEXID, PREDICATELOCK,
1483  * PREDICATELOCKTARGET, RWConflictData.
1484  */
1485 static void
SummarizeOldestCommittedSxact(void)1486 SummarizeOldestCommittedSxact(void)
1487 {
1488 	SERIALIZABLEXACT *sxact;
1489 
1490 	LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
1491 
1492 	/*
1493 	 * This function is only called if there are no sxact slots available.
1494 	 * Some of them must belong to old, already-finished transactions, so
1495 	 * there should be something in FinishedSerializableTransactions list that
1496 	 * we can summarize. However, there's a race condition: while we were not
1497 	 * holding any locks, a transaction might have ended and cleaned up all
1498 	 * the finished sxact entries already, freeing up their sxact slots. In
1499 	 * that case, we have nothing to do here. The caller will find one of the
1500 	 * slots released by the other backend when it retries.
1501 	 */
1502 	if (SHMQueueEmpty(FinishedSerializableTransactions))
1503 	{
1504 		LWLockRelease(SerializableFinishedListLock);
1505 		return;
1506 	}
1507 
1508 	/*
1509 	 * Grab the first sxact off the finished list -- this will be the earliest
1510 	 * commit.  Remove it from the list.
1511 	 */
1512 	sxact = (SERIALIZABLEXACT *)
1513 		SHMQueueNext(FinishedSerializableTransactions,
1514 					 FinishedSerializableTransactions,
1515 					 offsetof(SERIALIZABLEXACT, finishedLink));
1516 	SHMQueueDelete(&(sxact->finishedLink));
1517 
1518 	/* Add to SLRU summary information. */
1519 	if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
1520 		OldSerXidAdd(sxact->topXid, SxactHasConflictOut(sxact)
1521 					 ? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
1522 
1523 	/* Summarize and release the detail. */
1524 	ReleaseOneSerializableXact(sxact, false, true);
1525 
1526 	LWLockRelease(SerializableFinishedListLock);
1527 }
1528 
1529 /*
1530  * GetSafeSnapshot
1531  *		Obtain and register a snapshot for a READ ONLY DEFERRABLE
1532  *		transaction. Ensures that the snapshot is "safe", i.e. a
1533  *		read-only transaction running on it can execute serializably
1534  *		without further checks. This requires waiting for concurrent
1535  *		transactions to complete, and retrying with a new snapshot if
1536  *		one of them could possibly create a conflict.
1537  *
1538  *		As with GetSerializableTransactionSnapshot (which this is a subroutine
1539  *		for), the passed-in Snapshot pointer should reference a static data
1540  *		area that can safely be passed to GetSnapshotData.
1541  */
1542 static Snapshot
GetSafeSnapshot(Snapshot origSnapshot)1543 GetSafeSnapshot(Snapshot origSnapshot)
1544 {
1545 	Snapshot	snapshot;
1546 
1547 	Assert(XactReadOnly && XactDeferrable);
1548 
1549 	while (true)
1550 	{
1551 		/*
1552 		 * GetSerializableTransactionSnapshotInt is going to call
1553 		 * GetSnapshotData, so we need to provide it the static snapshot area
1554 		 * our caller passed to us.  The pointer returned is actually the same
1555 		 * one passed to it, but we avoid assuming that here.
1556 		 */
1557 		snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
1558 														 NULL, InvalidPid);
1559 
1560 		if (MySerializableXact == InvalidSerializableXact)
1561 			return snapshot;	/* no concurrent r/w xacts; it's safe */
1562 
1563 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1564 
1565 		/*
1566 		 * Wait for concurrent transactions to finish. Stop early if one of
1567 		 * them marked us as conflicted.
1568 		 */
1569 		MySerializableXact->flags |= SXACT_FLAG_DEFERRABLE_WAITING;
1570 		while (!(SHMQueueEmpty(&MySerializableXact->possibleUnsafeConflicts) ||
1571 				 SxactIsROUnsafe(MySerializableXact)))
1572 		{
1573 			LWLockRelease(SerializableXactHashLock);
1574 			ProcWaitForSignal(WAIT_EVENT_SAFE_SNAPSHOT);
1575 			LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1576 		}
1577 		MySerializableXact->flags &= ~SXACT_FLAG_DEFERRABLE_WAITING;
1578 
1579 		if (!SxactIsROUnsafe(MySerializableXact))
1580 		{
1581 			LWLockRelease(SerializableXactHashLock);
1582 			break;				/* success */
1583 		}
1584 
1585 		LWLockRelease(SerializableXactHashLock);
1586 
1587 		/* else, need to retry... */
1588 		ereport(DEBUG2,
1589 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1590 				 errmsg("deferrable snapshot was unsafe; trying a new one")));
1591 		ReleasePredicateLocks(false);
1592 	}
1593 
1594 	/*
1595 	 * Now we have a safe snapshot, so we don't need to do any further checks.
1596 	 */
1597 	Assert(SxactIsROSafe(MySerializableXact));
1598 	ReleasePredicateLocks(false);
1599 
1600 	return snapshot;
1601 }
1602 
1603 /*
1604  * GetSafeSnapshotBlockingPids
1605  *		If the specified process is currently blocked in GetSafeSnapshot,
1606  *		write the process IDs of all processes that it is blocked by
1607  *		into the caller-supplied buffer output[].  The list is truncated at
1608  *		output_size, and the number of PIDs written into the buffer is
1609  *		returned.  Returns zero if the given PID is not currently blocked
1610  *		in GetSafeSnapshot.
1611  */
1612 int
GetSafeSnapshotBlockingPids(int blocked_pid,int * output,int output_size)1613 GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
1614 {
1615 	int			num_written = 0;
1616 	SERIALIZABLEXACT *sxact;
1617 
1618 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1619 
1620 	/* Find blocked_pid's SERIALIZABLEXACT by linear search. */
1621 	for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
1622 	{
1623 		if (sxact->pid == blocked_pid)
1624 			break;
1625 	}
1626 
1627 	/* Did we find it, and is it currently waiting in GetSafeSnapshot? */
1628 	if (sxact != NULL && SxactIsDeferrableWaiting(sxact))
1629 	{
1630 		RWConflict	possibleUnsafeConflict;
1631 
1632 		/* Traverse the list of possible unsafe conflicts collecting PIDs. */
1633 		possibleUnsafeConflict = (RWConflict)
1634 			SHMQueueNext(&sxact->possibleUnsafeConflicts,
1635 						 &sxact->possibleUnsafeConflicts,
1636 						 offsetof(RWConflictData, inLink));
1637 
1638 		while (possibleUnsafeConflict != NULL && num_written < output_size)
1639 		{
1640 			output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
1641 			possibleUnsafeConflict = (RWConflict)
1642 				SHMQueueNext(&sxact->possibleUnsafeConflicts,
1643 							 &possibleUnsafeConflict->inLink,
1644 							 offsetof(RWConflictData, inLink));
1645 		}
1646 	}
1647 
1648 	LWLockRelease(SerializableXactHashLock);
1649 
1650 	return num_written;
1651 }
1652 
1653 /*
1654  * Acquire a snapshot that can be used for the current transaction.
1655  *
1656  * Make sure we have a SERIALIZABLEXACT reference in MySerializableXact.
1657  * It should be current for this process and be contained in PredXact.
1658  *
1659  * The passed-in Snapshot pointer should reference a static data area that
1660  * can safely be passed to GetSnapshotData.  The return value is actually
1661  * always this same pointer; no new snapshot data structure is allocated
1662  * within this function.
1663  */
1664 Snapshot
GetSerializableTransactionSnapshot(Snapshot snapshot)1665 GetSerializableTransactionSnapshot(Snapshot snapshot)
1666 {
1667 	Assert(IsolationIsSerializable());
1668 
1669 	/*
1670 	 * Can't use serializable mode while recovery is still active, as it is,
1671 	 * for example, on a hot standby.  We could get here despite the check in
1672 	 * check_XactIsoLevel() if default_transaction_isolation is set to
1673 	 * serializable, so phrase the hint accordingly.
1674 	 */
1675 	if (RecoveryInProgress())
1676 		ereport(ERROR,
1677 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1678 				 errmsg("cannot use serializable mode in a hot standby"),
1679 				 errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
1680 				 errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
1681 
1682 	/*
1683 	 * A special optimization is available for SERIALIZABLE READ ONLY
1684 	 * DEFERRABLE transactions -- we can wait for a suitable snapshot and
1685 	 * thereby avoid all SSI overhead once it's running.
1686 	 */
1687 	if (XactReadOnly && XactDeferrable)
1688 		return GetSafeSnapshot(snapshot);
1689 
1690 	return GetSerializableTransactionSnapshotInt(snapshot,
1691 												 NULL, InvalidPid);
1692 }
1693 
1694 /*
1695  * Import a snapshot to be used for the current transaction.
1696  *
1697  * This is nearly the same as GetSerializableTransactionSnapshot, except that
1698  * we don't take a new snapshot, but rather use the data we're handed.
1699  *
1700  * The caller must have verified that the snapshot came from a serializable
1701  * transaction; and if we're read-write, the source transaction must not be
1702  * read-only.
1703  */
1704 void
SetSerializableTransactionSnapshot(Snapshot snapshot,VirtualTransactionId * sourcevxid,int sourcepid)1705 SetSerializableTransactionSnapshot(Snapshot snapshot,
1706 								   VirtualTransactionId *sourcevxid,
1707 								   int sourcepid)
1708 {
1709 	Assert(IsolationIsSerializable());
1710 
1711 	/*
1712 	 * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
1713 	 * import snapshots, since there's no way to wait for a safe snapshot when
1714 	 * we're using the snap we're told to.  (XXX instead of throwing an error,
1715 	 * we could just ignore the XactDeferrable flag?)
1716 	 */
1717 	if (XactReadOnly && XactDeferrable)
1718 		ereport(ERROR,
1719 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1720 				 errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
1721 
1722 	(void) GetSerializableTransactionSnapshotInt(snapshot, sourcevxid,
1723 												 sourcepid);
1724 }
1725 
1726 /*
1727  * Guts of GetSerializableTransactionSnapshot
1728  *
1729  * If sourcexid is valid, this is actually an import operation and we should
1730  * skip calling GetSnapshotData, because the snapshot contents are already
1731  * loaded up.  HOWEVER: to avoid race conditions, we must check that the
1732  * source xact is still running after we acquire SerializableXactHashLock.
1733  * We do that by calling ProcArrayInstallImportedXmin.
1734  */
1735 static Snapshot
GetSerializableTransactionSnapshotInt(Snapshot snapshot,VirtualTransactionId * sourcevxid,int sourcepid)1736 GetSerializableTransactionSnapshotInt(Snapshot snapshot,
1737 									  VirtualTransactionId *sourcevxid,
1738 									  int sourcepid)
1739 {
1740 	PGPROC	   *proc;
1741 	VirtualTransactionId vxid;
1742 	SERIALIZABLEXACT *sxact,
1743 			   *othersxact;
1744 	HASHCTL		hash_ctl;
1745 
1746 	/* We only do this for serializable transactions.  Once. */
1747 	Assert(MySerializableXact == InvalidSerializableXact);
1748 
1749 	Assert(!RecoveryInProgress());
1750 
1751 	/*
1752 	 * Since all parts of a serializable transaction must use the same
1753 	 * snapshot, it is too late to establish one after a parallel operation
1754 	 * has begun.
1755 	 */
1756 	if (IsInParallelMode())
1757 		elog(ERROR, "cannot establish serializable snapshot during a parallel operation");
1758 
1759 	proc = MyProc;
1760 	Assert(proc != NULL);
1761 	GET_VXID_FROM_PGPROC(vxid, *proc);
1762 
1763 	/*
1764 	 * First we get the sxact structure, which may involve looping and access
1765 	 * to the "finished" list to free a structure for use.
1766 	 *
1767 	 * We must hold SerializableXactHashLock when taking/checking the snapshot
1768 	 * to avoid race conditions, for much the same reasons that
1769 	 * GetSnapshotData takes the ProcArrayLock.  Since we might have to
1770 	 * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
1771 	 * this means we have to create the sxact first, which is a bit annoying
1772 	 * (in particular, an elog(ERROR) in procarray.c would cause us to leak
1773 	 * the sxact).  Consider refactoring to avoid this.
1774 	 */
1775 #ifdef TEST_OLDSERXID
1776 	SummarizeOldestCommittedSxact();
1777 #endif
1778 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1779 	do
1780 	{
1781 		sxact = CreatePredXact();
1782 		/* If null, push out committed sxact to SLRU summary & retry. */
1783 		if (!sxact)
1784 		{
1785 			LWLockRelease(SerializableXactHashLock);
1786 			SummarizeOldestCommittedSxact();
1787 			LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1788 		}
1789 	} while (!sxact);
1790 
1791 	/* Get the snapshot, or check that it's safe to use */
1792 	if (!sourcevxid)
1793 		snapshot = GetSnapshotData(snapshot);
1794 	else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcevxid))
1795 	{
1796 		ReleasePredXact(sxact);
1797 		LWLockRelease(SerializableXactHashLock);
1798 		ereport(ERROR,
1799 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1800 				 errmsg("could not import the requested snapshot"),
1801 				 errdetail("The source process with PID %d is not running anymore.",
1802 						   sourcepid)));
1803 	}
1804 
1805 	/*
1806 	 * If there are no serializable transactions which are not read-only, we
1807 	 * can "opt out" of predicate locking and conflict checking for a
1808 	 * read-only transaction.
1809 	 *
1810 	 * The reason this is safe is that a read-only transaction can only become
1811 	 * part of a dangerous structure if it overlaps a writable transaction
1812 	 * which in turn overlaps a writable transaction which committed before
1813 	 * the read-only transaction started.  A new writable transaction can
1814 	 * overlap this one, but it can't meet the other condition of overlapping
1815 	 * a transaction which committed before this one started.
1816 	 */
1817 	if (XactReadOnly && PredXact->WritableSxactCount == 0)
1818 	{
1819 		ReleasePredXact(sxact);
1820 		LWLockRelease(SerializableXactHashLock);
1821 		return snapshot;
1822 	}
1823 
1824 	/* Maintain serializable global xmin info. */
1825 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
1826 	{
1827 		Assert(PredXact->SxactGlobalXminCount == 0);
1828 		PredXact->SxactGlobalXmin = snapshot->xmin;
1829 		PredXact->SxactGlobalXminCount = 1;
1830 		OldSerXidSetActiveSerXmin(snapshot->xmin);
1831 	}
1832 	else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
1833 	{
1834 		Assert(PredXact->SxactGlobalXminCount > 0);
1835 		PredXact->SxactGlobalXminCount++;
1836 	}
1837 	else
1838 	{
1839 		Assert(TransactionIdFollows(snapshot->xmin, PredXact->SxactGlobalXmin));
1840 	}
1841 
1842 	/* Initialize the structure. */
1843 	sxact->vxid = vxid;
1844 	sxact->SeqNo.lastCommitBeforeSnapshot = PredXact->LastSxactCommitSeqNo;
1845 	sxact->prepareSeqNo = InvalidSerCommitSeqNo;
1846 	sxact->commitSeqNo = InvalidSerCommitSeqNo;
1847 	SHMQueueInit(&(sxact->outConflicts));
1848 	SHMQueueInit(&(sxact->inConflicts));
1849 	SHMQueueInit(&(sxact->possibleUnsafeConflicts));
1850 	sxact->topXid = GetTopTransactionIdIfAny();
1851 	sxact->finishedBefore = InvalidTransactionId;
1852 	sxact->xmin = snapshot->xmin;
1853 	sxact->pid = MyProcPid;
1854 	SHMQueueInit(&(sxact->predicateLocks));
1855 	SHMQueueElemInit(&(sxact->finishedLink));
1856 	sxact->flags = 0;
1857 	if (XactReadOnly)
1858 	{
1859 		sxact->flags |= SXACT_FLAG_READ_ONLY;
1860 
1861 		/*
1862 		 * Register all concurrent r/w transactions as possible conflicts; if
1863 		 * all of them commit without any outgoing conflicts to earlier
1864 		 * transactions then this snapshot can be deemed safe (and we can run
1865 		 * without tracking predicate locks).
1866 		 */
1867 		for (othersxact = FirstPredXact();
1868 			 othersxact != NULL;
1869 			 othersxact = NextPredXact(othersxact))
1870 		{
1871 			if (!SxactIsCommitted(othersxact)
1872 				&& !SxactIsDoomed(othersxact)
1873 				&& !SxactIsReadOnly(othersxact))
1874 			{
1875 				SetPossibleUnsafeConflict(sxact, othersxact);
1876 			}
1877 		}
1878 	}
1879 	else
1880 	{
1881 		++(PredXact->WritableSxactCount);
1882 		Assert(PredXact->WritableSxactCount <=
1883 			   (MaxBackends + max_prepared_xacts));
1884 	}
1885 
1886 	MySerializableXact = sxact;
1887 	MyXactDidWrite = false;		/* haven't written anything yet */
1888 
1889 	LWLockRelease(SerializableXactHashLock);
1890 
1891 	/* Initialize the backend-local hash table of parent locks */
1892 	Assert(LocalPredicateLockHash == NULL);
1893 	MemSet(&hash_ctl, 0, sizeof(hash_ctl));
1894 	hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
1895 	hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
1896 	LocalPredicateLockHash = hash_create("Local predicate lock",
1897 										 max_predicate_locks_per_xact,
1898 										 &hash_ctl,
1899 										 HASH_ELEM | HASH_BLOBS);
1900 
1901 	return snapshot;
1902 }
1903 
1904 /*
1905  * Register the top level XID in SerializableXidHash.
1906  * Also store it for easy reference in MySerializableXact.
1907  */
1908 void
RegisterPredicateLockingXid(TransactionId xid)1909 RegisterPredicateLockingXid(TransactionId xid)
1910 {
1911 	SERIALIZABLEXIDTAG sxidtag;
1912 	SERIALIZABLEXID *sxid;
1913 	bool		found;
1914 
1915 	/*
1916 	 * If we're not tracking predicate lock data for this transaction, we
1917 	 * should ignore the request and return quickly.
1918 	 */
1919 	if (MySerializableXact == InvalidSerializableXact)
1920 		return;
1921 
1922 	/* We should have a valid XID and be at the top level. */
1923 	Assert(TransactionIdIsValid(xid));
1924 
1925 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1926 
1927 	/* This should only be done once per transaction. */
1928 	Assert(MySerializableXact->topXid == InvalidTransactionId);
1929 
1930 	MySerializableXact->topXid = xid;
1931 
1932 	sxidtag.xid = xid;
1933 	sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
1934 										   &sxidtag,
1935 										   HASH_ENTER, &found);
1936 	Assert(!found);
1937 
1938 	/* Initialize the structure. */
1939 	sxid->myXact = MySerializableXact;
1940 	LWLockRelease(SerializableXactHashLock);
1941 }
1942 
1943 
1944 /*
1945  * Check whether there are any predicate locks held by any transaction
1946  * for the page at the given block number.
1947  *
1948  * Note that the transaction may be completed but not yet subject to
1949  * cleanup due to overlapping serializable transactions.  This must
1950  * return valid information regardless of transaction isolation level.
1951  *
1952  * Also note that this doesn't check for a conflicting relation lock,
1953  * just a lock specifically on the given page.
1954  *
1955  * One use is to support proper behavior during GiST index vacuum.
1956  */
1957 bool
PageIsPredicateLocked(Relation relation,BlockNumber blkno)1958 PageIsPredicateLocked(Relation relation, BlockNumber blkno)
1959 {
1960 	PREDICATELOCKTARGETTAG targettag;
1961 	uint32		targettaghash;
1962 	LWLock	   *partitionLock;
1963 	PREDICATELOCKTARGET *target;
1964 
1965 	SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
1966 									relation->rd_node.dbNode,
1967 									relation->rd_id,
1968 									blkno);
1969 
1970 	targettaghash = PredicateLockTargetTagHashCode(&targettag);
1971 	partitionLock = PredicateLockHashPartitionLock(targettaghash);
1972 	LWLockAcquire(partitionLock, LW_SHARED);
1973 	target = (PREDICATELOCKTARGET *)
1974 		hash_search_with_hash_value(PredicateLockTargetHash,
1975 									&targettag, targettaghash,
1976 									HASH_FIND, NULL);
1977 	LWLockRelease(partitionLock);
1978 
1979 	return (target != NULL);
1980 }
1981 
1982 
1983 /*
1984  * Check whether a particular lock is held by this transaction.
1985  *
1986  * Important note: this function may return false even if the lock is
1987  * being held, because it uses the local lock table which is not
1988  * updated if another transaction modifies our lock list (e.g. to
1989  * split an index page). It can also return true when a coarser
1990  * granularity lock that covers this target is being held. Be careful
1991  * to only use this function in circumstances where such errors are
1992  * acceptable!
1993  */
1994 static bool
PredicateLockExists(const PREDICATELOCKTARGETTAG * targettag)1995 PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag)
1996 {
1997 	LOCALPREDICATELOCK *lock;
1998 
1999 	/* check local hash table */
2000 	lock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2001 											  targettag,
2002 											  HASH_FIND, NULL);
2003 
2004 	if (!lock)
2005 		return false;
2006 
2007 	/*
2008 	 * Found entry in the table, but still need to check whether it's actually
2009 	 * held -- it could just be a parent of some held lock.
2010 	 */
2011 	return lock->held;
2012 }
2013 
2014 /*
2015  * Return the parent lock tag in the lock hierarchy: the next coarser
2016  * lock that covers the provided tag.
2017  *
2018  * Returns true and sets *parent to the parent tag if one exists,
2019  * returns false if none exists.
2020  */
2021 static bool
GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG * tag,PREDICATELOCKTARGETTAG * parent)2022 GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
2023 						  PREDICATELOCKTARGETTAG *parent)
2024 {
2025 	switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2026 	{
2027 		case PREDLOCKTAG_RELATION:
2028 			/* relation locks have no parent lock */
2029 			return false;
2030 
2031 		case PREDLOCKTAG_PAGE:
2032 			/* parent lock is relation lock */
2033 			SET_PREDICATELOCKTARGETTAG_RELATION(*parent,
2034 												GET_PREDICATELOCKTARGETTAG_DB(*tag),
2035 												GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
2036 
2037 			return true;
2038 
2039 		case PREDLOCKTAG_TUPLE:
2040 			/* parent lock is page lock */
2041 			SET_PREDICATELOCKTARGETTAG_PAGE(*parent,
2042 											GET_PREDICATELOCKTARGETTAG_DB(*tag),
2043 											GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
2044 											GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
2045 			return true;
2046 	}
2047 
2048 	/* not reachable */
2049 	Assert(false);
2050 	return false;
2051 }
2052 
2053 /*
2054  * Check whether the lock we are considering is already covered by a
2055  * coarser lock for our transaction.
2056  *
2057  * Like PredicateLockExists, this function might return a false
2058  * negative, but it will never return a false positive.
2059  */
2060 static bool
CoarserLockCovers(const PREDICATELOCKTARGETTAG * newtargettag)2061 CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
2062 {
2063 	PREDICATELOCKTARGETTAG targettag,
2064 				parenttag;
2065 
2066 	targettag = *newtargettag;
2067 
2068 	/* check parents iteratively until no more */
2069 	while (GetParentPredicateLockTag(&targettag, &parenttag))
2070 	{
2071 		targettag = parenttag;
2072 		if (PredicateLockExists(&targettag))
2073 			return true;
2074 	}
2075 
2076 	/* no more parents to check; lock is not covered */
2077 	return false;
2078 }
2079 
2080 /*
2081  * Remove the dummy entry from the predicate lock target hash, to free up some
2082  * scratch space. The caller must be holding SerializablePredicateLockListLock,
2083  * and must restore the entry with RestoreScratchTarget() before releasing the
2084  * lock.
2085  *
2086  * If lockheld is true, the caller is already holding the partition lock
2087  * of the partition containing the scratch entry.
2088  */
2089 static void
RemoveScratchTarget(bool lockheld)2090 RemoveScratchTarget(bool lockheld)
2091 {
2092 	bool		found;
2093 
2094 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2095 
2096 	if (!lockheld)
2097 		LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2098 	hash_search_with_hash_value(PredicateLockTargetHash,
2099 								&ScratchTargetTag,
2100 								ScratchTargetTagHash,
2101 								HASH_REMOVE, &found);
2102 	Assert(found);
2103 	if (!lockheld)
2104 		LWLockRelease(ScratchPartitionLock);
2105 }
2106 
2107 /*
2108  * Re-insert the dummy entry in predicate lock target hash.
2109  */
2110 static void
RestoreScratchTarget(bool lockheld)2111 RestoreScratchTarget(bool lockheld)
2112 {
2113 	bool		found;
2114 
2115 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2116 
2117 	if (!lockheld)
2118 		LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2119 	hash_search_with_hash_value(PredicateLockTargetHash,
2120 								&ScratchTargetTag,
2121 								ScratchTargetTagHash,
2122 								HASH_ENTER, &found);
2123 	Assert(!found);
2124 	if (!lockheld)
2125 		LWLockRelease(ScratchPartitionLock);
2126 }
2127 
2128 /*
2129  * Check whether the list of related predicate locks is empty for a
2130  * predicate lock target, and remove the target if it is.
2131  */
2132 static void
RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET * target,uint32 targettaghash)2133 RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
2134 {
2135 	PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY;
2136 
2137 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2138 
2139 	/* Can't remove it until no locks at this target. */
2140 	if (!SHMQueueEmpty(&target->predicateLocks))
2141 		return;
2142 
2143 	/* Actually remove the target. */
2144 	rmtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2145 										   &target->tag,
2146 										   targettaghash,
2147 										   HASH_REMOVE, NULL);
2148 	Assert(rmtarget == target);
2149 }
2150 
2151 /*
2152  * Delete child target locks owned by this process.
2153  * This implementation is assuming that the usage of each target tag field
2154  * is uniform.  No need to make this hard if we don't have to.
2155  *
2156  * We aren't acquiring lightweight locks for the predicate lock or lock
2157  * target structures associated with this transaction unless we're going
2158  * to modify them, because no other process is permitted to modify our
2159  * locks.
2160  */
2161 static void
DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG * newtargettag)2162 DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
2163 {
2164 	SERIALIZABLEXACT *sxact;
2165 	PREDICATELOCK *predlock;
2166 
2167 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2168 	sxact = MySerializableXact;
2169 	predlock = (PREDICATELOCK *)
2170 		SHMQueueNext(&(sxact->predicateLocks),
2171 					 &(sxact->predicateLocks),
2172 					 offsetof(PREDICATELOCK, xactLink));
2173 	while (predlock)
2174 	{
2175 		SHM_QUEUE  *predlocksxactlink;
2176 		PREDICATELOCK *nextpredlock;
2177 		PREDICATELOCKTAG oldlocktag;
2178 		PREDICATELOCKTARGET *oldtarget;
2179 		PREDICATELOCKTARGETTAG oldtargettag;
2180 
2181 		predlocksxactlink = &(predlock->xactLink);
2182 		nextpredlock = (PREDICATELOCK *)
2183 			SHMQueueNext(&(sxact->predicateLocks),
2184 						 predlocksxactlink,
2185 						 offsetof(PREDICATELOCK, xactLink));
2186 
2187 		oldlocktag = predlock->tag;
2188 		Assert(oldlocktag.myXact == sxact);
2189 		oldtarget = oldlocktag.myTarget;
2190 		oldtargettag = oldtarget->tag;
2191 
2192 		if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
2193 		{
2194 			uint32		oldtargettaghash;
2195 			LWLock	   *partitionLock;
2196 			PREDICATELOCK *rmpredlock PG_USED_FOR_ASSERTS_ONLY;
2197 
2198 			oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2199 			partitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2200 
2201 			LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2202 
2203 			SHMQueueDelete(predlocksxactlink);
2204 			SHMQueueDelete(&(predlock->targetLink));
2205 			rmpredlock = hash_search_with_hash_value
2206 				(PredicateLockHash,
2207 				 &oldlocktag,
2208 				 PredicateLockHashCodeFromTargetHashCode(&oldlocktag,
2209 														 oldtargettaghash),
2210 				 HASH_REMOVE, NULL);
2211 			Assert(rmpredlock == predlock);
2212 
2213 			RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2214 
2215 			LWLockRelease(partitionLock);
2216 
2217 			DecrementParentLocks(&oldtargettag);
2218 		}
2219 
2220 		predlock = nextpredlock;
2221 	}
2222 	LWLockRelease(SerializablePredicateLockListLock);
2223 }
2224 
2225 /*
2226  * Returns the promotion limit for a given predicate lock target.  This is the
2227  * max number of descendant locks allowed before promoting to the specified
2228  * tag. Note that the limit includes non-direct descendants (e.g., both tuples
2229  * and pages for a relation lock).
2230  *
2231  * Currently the default limit is 2 for a page lock, and half of the value of
2232  * max_pred_locks_per_transaction - 1 for a relation lock, to match behavior
2233  * of earlier releases when upgrading.
2234  *
2235  * TODO SSI: We should probably add additional GUCs to allow a maximum ratio
2236  * of page and tuple locks based on the pages in a relation, and the maximum
2237  * ratio of tuple locks to tuples in a page.  This would provide more
2238  * generally "balanced" allocation of locks to where they are most useful,
2239  * while still allowing the absolute numbers to prevent one relation from
2240  * tying up all predicate lock resources.
2241  */
2242 static int
MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG * tag)2243 MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag)
2244 {
2245 	switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2246 	{
2247 		case PREDLOCKTAG_RELATION:
2248 			return max_predicate_locks_per_relation < 0
2249 				? (max_predicate_locks_per_xact
2250 				   / (-max_predicate_locks_per_relation)) - 1
2251 				: max_predicate_locks_per_relation;
2252 
2253 		case PREDLOCKTAG_PAGE:
2254 			return max_predicate_locks_per_page;
2255 
2256 		case PREDLOCKTAG_TUPLE:
2257 
2258 			/*
2259 			 * not reachable: nothing is finer-granularity than a tuple, so we
2260 			 * should never try to promote to it.
2261 			 */
2262 			Assert(false);
2263 			return 0;
2264 	}
2265 
2266 	/* not reachable */
2267 	Assert(false);
2268 	return 0;
2269 }
2270 
2271 /*
2272  * For all ancestors of a newly-acquired predicate lock, increment
2273  * their child count in the parent hash table. If any of them have
2274  * more descendants than their promotion threshold, acquire the
2275  * coarsest such lock.
2276  *
2277  * Returns true if a parent lock was acquired and false otherwise.
2278  */
2279 static bool
CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG * reqtag)2280 CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag)
2281 {
2282 	PREDICATELOCKTARGETTAG targettag,
2283 				nexttag,
2284 				promotiontag;
2285 	LOCALPREDICATELOCK *parentlock;
2286 	bool		found,
2287 				promote;
2288 
2289 	promote = false;
2290 
2291 	targettag = *reqtag;
2292 
2293 	/* check parents iteratively */
2294 	while (GetParentPredicateLockTag(&targettag, &nexttag))
2295 	{
2296 		targettag = nexttag;
2297 		parentlock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2298 														&targettag,
2299 														HASH_ENTER,
2300 														&found);
2301 		if (!found)
2302 		{
2303 			parentlock->held = false;
2304 			parentlock->childLocks = 1;
2305 		}
2306 		else
2307 			parentlock->childLocks++;
2308 
2309 		if (parentlock->childLocks >
2310 			MaxPredicateChildLocks(&targettag))
2311 		{
2312 			/*
2313 			 * We should promote to this parent lock. Continue to check its
2314 			 * ancestors, however, both to get their child counts right and to
2315 			 * check whether we should just go ahead and promote to one of
2316 			 * them.
2317 			 */
2318 			promotiontag = targettag;
2319 			promote = true;
2320 		}
2321 	}
2322 
2323 	if (promote)
2324 	{
2325 		/* acquire coarsest ancestor eligible for promotion */
2326 		PredicateLockAcquire(&promotiontag);
2327 		return true;
2328 	}
2329 	else
2330 		return false;
2331 }
2332 
2333 /*
2334  * When releasing a lock, decrement the child count on all ancestor
2335  * locks.
2336  *
2337  * This is called only when releasing a lock via
2338  * DeleteChildTargetLocks (i.e. when a lock becomes redundant because
2339  * we've acquired its parent, possibly due to promotion) or when a new
2340  * MVCC write lock makes the predicate lock unnecessary. There's no
2341  * point in calling it when locks are released at transaction end, as
2342  * this information is no longer needed.
2343  */
2344 static void
DecrementParentLocks(const PREDICATELOCKTARGETTAG * targettag)2345 DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag)
2346 {
2347 	PREDICATELOCKTARGETTAG parenttag,
2348 				nexttag;
2349 
2350 	parenttag = *targettag;
2351 
2352 	while (GetParentPredicateLockTag(&parenttag, &nexttag))
2353 	{
2354 		uint32		targettaghash;
2355 		LOCALPREDICATELOCK *parentlock,
2356 				   *rmlock PG_USED_FOR_ASSERTS_ONLY;
2357 
2358 		parenttag = nexttag;
2359 		targettaghash = PredicateLockTargetTagHashCode(&parenttag);
2360 		parentlock = (LOCALPREDICATELOCK *)
2361 			hash_search_with_hash_value(LocalPredicateLockHash,
2362 										&parenttag, targettaghash,
2363 										HASH_FIND, NULL);
2364 
2365 		/*
2366 		 * There's a small chance the parent lock doesn't exist in the lock
2367 		 * table. This can happen if we prematurely removed it because an
2368 		 * index split caused the child refcount to be off.
2369 		 */
2370 		if (parentlock == NULL)
2371 			continue;
2372 
2373 		parentlock->childLocks--;
2374 
2375 		/*
2376 		 * Under similar circumstances the parent lock's refcount might be
2377 		 * zero. This only happens if we're holding that lock (otherwise we
2378 		 * would have removed the entry).
2379 		 */
2380 		if (parentlock->childLocks < 0)
2381 		{
2382 			Assert(parentlock->held);
2383 			parentlock->childLocks = 0;
2384 		}
2385 
2386 		if ((parentlock->childLocks == 0) && (!parentlock->held))
2387 		{
2388 			rmlock = (LOCALPREDICATELOCK *)
2389 				hash_search_with_hash_value(LocalPredicateLockHash,
2390 											&parenttag, targettaghash,
2391 											HASH_REMOVE, NULL);
2392 			Assert(rmlock == parentlock);
2393 		}
2394 	}
2395 }
2396 
2397 /*
2398  * Indicate that a predicate lock on the given target is held by the
2399  * specified transaction. Has no effect if the lock is already held.
2400  *
2401  * This updates the lock table and the sxact's lock list, and creates
2402  * the lock target if necessary, but does *not* do anything related to
2403  * granularity promotion or the local lock table. See
2404  * PredicateLockAcquire for that.
2405  */
2406 static void
CreatePredicateLock(const PREDICATELOCKTARGETTAG * targettag,uint32 targettaghash,SERIALIZABLEXACT * sxact)2407 CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
2408 					uint32 targettaghash,
2409 					SERIALIZABLEXACT *sxact)
2410 {
2411 	PREDICATELOCKTARGET *target;
2412 	PREDICATELOCKTAG locktag;
2413 	PREDICATELOCK *lock;
2414 	LWLock	   *partitionLock;
2415 	bool		found;
2416 
2417 	partitionLock = PredicateLockHashPartitionLock(targettaghash);
2418 
2419 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2420 	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2421 
2422 	/* Make sure that the target is represented. */
2423 	target = (PREDICATELOCKTARGET *)
2424 		hash_search_with_hash_value(PredicateLockTargetHash,
2425 									targettag, targettaghash,
2426 									HASH_ENTER_NULL, &found);
2427 	if (!target)
2428 		ereport(ERROR,
2429 				(errcode(ERRCODE_OUT_OF_MEMORY),
2430 				 errmsg("out of shared memory"),
2431 				 errhint("You might need to increase max_pred_locks_per_transaction.")));
2432 	if (!found)
2433 		SHMQueueInit(&(target->predicateLocks));
2434 
2435 	/* We've got the sxact and target, make sure they're joined. */
2436 	locktag.myTarget = target;
2437 	locktag.myXact = sxact;
2438 	lock = (PREDICATELOCK *)
2439 		hash_search_with_hash_value(PredicateLockHash, &locktag,
2440 									PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
2441 									HASH_ENTER_NULL, &found);
2442 	if (!lock)
2443 		ereport(ERROR,
2444 				(errcode(ERRCODE_OUT_OF_MEMORY),
2445 				 errmsg("out of shared memory"),
2446 				 errhint("You might need to increase max_pred_locks_per_transaction.")));
2447 
2448 	if (!found)
2449 	{
2450 		SHMQueueInsertBefore(&(target->predicateLocks), &(lock->targetLink));
2451 		SHMQueueInsertBefore(&(sxact->predicateLocks),
2452 							 &(lock->xactLink));
2453 		lock->commitSeqNo = InvalidSerCommitSeqNo;
2454 	}
2455 
2456 	LWLockRelease(partitionLock);
2457 	LWLockRelease(SerializablePredicateLockListLock);
2458 }
2459 
2460 /*
2461  * Acquire a predicate lock on the specified target for the current
2462  * connection if not already held. This updates the local lock table
2463  * and uses it to implement granularity promotion. It will consolidate
2464  * multiple locks into a coarser lock if warranted, and will release
2465  * any finer-grained locks covered by the new one.
2466  */
2467 static void
PredicateLockAcquire(const PREDICATELOCKTARGETTAG * targettag)2468 PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag)
2469 {
2470 	uint32		targettaghash;
2471 	bool		found;
2472 	LOCALPREDICATELOCK *locallock;
2473 
2474 	/* Do we have the lock already, or a covering lock? */
2475 	if (PredicateLockExists(targettag))
2476 		return;
2477 
2478 	if (CoarserLockCovers(targettag))
2479 		return;
2480 
2481 	/* the same hash and LW lock apply to the lock target and the local lock. */
2482 	targettaghash = PredicateLockTargetTagHashCode(targettag);
2483 
2484 	/* Acquire lock in local table */
2485 	locallock = (LOCALPREDICATELOCK *)
2486 		hash_search_with_hash_value(LocalPredicateLockHash,
2487 									targettag, targettaghash,
2488 									HASH_ENTER, &found);
2489 	locallock->held = true;
2490 	if (!found)
2491 		locallock->childLocks = 0;
2492 
2493 	/* Actually create the lock */
2494 	CreatePredicateLock(targettag, targettaghash, MySerializableXact);
2495 
2496 	/*
2497 	 * Lock has been acquired. Check whether it should be promoted to a
2498 	 * coarser granularity, or whether there are finer-granularity locks to
2499 	 * clean up.
2500 	 */
2501 	if (CheckAndPromotePredicateLockRequest(targettag))
2502 	{
2503 		/*
2504 		 * Lock request was promoted to a coarser-granularity lock, and that
2505 		 * lock was acquired. It will delete this lock and any of its
2506 		 * children, so we're done.
2507 		 */
2508 	}
2509 	else
2510 	{
2511 		/* Clean up any finer-granularity locks */
2512 		if (GET_PREDICATELOCKTARGETTAG_TYPE(*targettag) != PREDLOCKTAG_TUPLE)
2513 			DeleteChildTargetLocks(targettag);
2514 	}
2515 }
2516 
2517 
2518 /*
2519  *		PredicateLockRelation
2520  *
2521  * Gets a predicate lock at the relation level.
2522  * Skip if not in full serializable transaction isolation level.
2523  * Skip if this is a temporary table.
2524  * Clear any finer-grained predicate locks this session has on the relation.
2525  */
2526 void
PredicateLockRelation(Relation relation,Snapshot snapshot)2527 PredicateLockRelation(Relation relation, Snapshot snapshot)
2528 {
2529 	PREDICATELOCKTARGETTAG tag;
2530 
2531 	if (!SerializationNeededForRead(relation, snapshot))
2532 		return;
2533 
2534 	SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2535 										relation->rd_node.dbNode,
2536 										relation->rd_id);
2537 	PredicateLockAcquire(&tag);
2538 }
2539 
2540 /*
2541  *		PredicateLockPage
2542  *
2543  * Gets a predicate lock at the page level.
2544  * Skip if not in full serializable transaction isolation level.
2545  * Skip if this is a temporary table.
2546  * Skip if a coarser predicate lock already covers this page.
2547  * Clear any finer-grained predicate locks this session has on the relation.
2548  */
2549 void
PredicateLockPage(Relation relation,BlockNumber blkno,Snapshot snapshot)2550 PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
2551 {
2552 	PREDICATELOCKTARGETTAG tag;
2553 
2554 	if (!SerializationNeededForRead(relation, snapshot))
2555 		return;
2556 
2557 	SET_PREDICATELOCKTARGETTAG_PAGE(tag,
2558 									relation->rd_node.dbNode,
2559 									relation->rd_id,
2560 									blkno);
2561 	PredicateLockAcquire(&tag);
2562 }
2563 
2564 /*
2565  *		PredicateLockTuple
2566  *
2567  * Gets a predicate lock at the tuple level.
2568  * Skip if not in full serializable transaction isolation level.
2569  * Skip if this is a temporary table.
2570  */
2571 void
PredicateLockTuple(Relation relation,HeapTuple tuple,Snapshot snapshot)2572 PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
2573 {
2574 	PREDICATELOCKTARGETTAG tag;
2575 	ItemPointer tid;
2576 	TransactionId targetxmin;
2577 
2578 	if (!SerializationNeededForRead(relation, snapshot))
2579 		return;
2580 
2581 	/*
2582 	 * If it's a heap tuple, return if this xact wrote it.
2583 	 */
2584 	if (relation->rd_index == NULL)
2585 	{
2586 		TransactionId myxid;
2587 
2588 		targetxmin = HeapTupleHeaderGetXmin(tuple->t_data);
2589 
2590 		myxid = GetTopTransactionIdIfAny();
2591 		if (TransactionIdIsValid(myxid))
2592 		{
2593 			if (TransactionIdFollowsOrEquals(targetxmin, TransactionXmin))
2594 			{
2595 				TransactionId xid = SubTransGetTopmostTransaction(targetxmin);
2596 
2597 				if (TransactionIdEquals(xid, myxid))
2598 				{
2599 					/* We wrote it; we already have a write lock. */
2600 					return;
2601 				}
2602 			}
2603 		}
2604 	}
2605 
2606 	/*
2607 	 * Do quick-but-not-definitive test for a relation lock first.  This will
2608 	 * never cause a return when the relation is *not* locked, but will
2609 	 * occasionally let the check continue when there really *is* a relation
2610 	 * level lock.
2611 	 */
2612 	SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2613 										relation->rd_node.dbNode,
2614 										relation->rd_id);
2615 	if (PredicateLockExists(&tag))
2616 		return;
2617 
2618 	tid = &(tuple->t_self);
2619 	SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
2620 									 relation->rd_node.dbNode,
2621 									 relation->rd_id,
2622 									 ItemPointerGetBlockNumber(tid),
2623 									 ItemPointerGetOffsetNumber(tid));
2624 	PredicateLockAcquire(&tag);
2625 }
2626 
2627 
2628 /*
2629  *		DeleteLockTarget
2630  *
2631  * Remove a predicate lock target along with any locks held for it.
2632  *
2633  * Caller must hold SerializablePredicateLockListLock and the
2634  * appropriate hash partition lock for the target.
2635  */
2636 static void
DeleteLockTarget(PREDICATELOCKTARGET * target,uint32 targettaghash)2637 DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
2638 {
2639 	PREDICATELOCK *predlock;
2640 	SHM_QUEUE  *predlocktargetlink;
2641 	PREDICATELOCK *nextpredlock;
2642 	bool		found;
2643 
2644 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2645 	Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
2646 
2647 	predlock = (PREDICATELOCK *)
2648 		SHMQueueNext(&(target->predicateLocks),
2649 					 &(target->predicateLocks),
2650 					 offsetof(PREDICATELOCK, targetLink));
2651 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2652 	while (predlock)
2653 	{
2654 		predlocktargetlink = &(predlock->targetLink);
2655 		nextpredlock = (PREDICATELOCK *)
2656 			SHMQueueNext(&(target->predicateLocks),
2657 						 predlocktargetlink,
2658 						 offsetof(PREDICATELOCK, targetLink));
2659 
2660 		SHMQueueDelete(&(predlock->xactLink));
2661 		SHMQueueDelete(&(predlock->targetLink));
2662 
2663 		hash_search_with_hash_value
2664 			(PredicateLockHash,
2665 			 &predlock->tag,
2666 			 PredicateLockHashCodeFromTargetHashCode(&predlock->tag,
2667 													 targettaghash),
2668 			 HASH_REMOVE, &found);
2669 		Assert(found);
2670 
2671 		predlock = nextpredlock;
2672 	}
2673 	LWLockRelease(SerializableXactHashLock);
2674 
2675 	/* Remove the target itself, if possible. */
2676 	RemoveTargetIfNoLongerUsed(target, targettaghash);
2677 }
2678 
2679 
2680 /*
2681  *		TransferPredicateLocksToNewTarget
2682  *
2683  * Move or copy all the predicate locks for a lock target, for use by
2684  * index page splits/combines and other things that create or replace
2685  * lock targets. If 'removeOld' is true, the old locks and the target
2686  * will be removed.
2687  *
2688  * Returns true on success, or false if we ran out of shared memory to
2689  * allocate the new target or locks. Guaranteed to always succeed if
2690  * removeOld is set (by using the scratch entry in PredicateLockTargetHash
2691  * for scratch space).
2692  *
2693  * Warning: the "removeOld" option should be used only with care,
2694  * because this function does not (indeed, can not) update other
2695  * backends' LocalPredicateLockHash. If we are only adding new
2696  * entries, this is not a problem: the local lock table is used only
2697  * as a hint, so missing entries for locks that are held are
2698  * OK. Having entries for locks that are no longer held, as can happen
2699  * when using "removeOld", is not in general OK. We can only use it
2700  * safely when replacing a lock with a coarser-granularity lock that
2701  * covers it, or if we are absolutely certain that no one will need to
2702  * refer to that lock in the future.
2703  *
2704  * Caller must hold SerializablePredicateLockListLock.
2705  */
2706 static bool
TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,PREDICATELOCKTARGETTAG newtargettag,bool removeOld)2707 TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
2708 								  PREDICATELOCKTARGETTAG newtargettag,
2709 								  bool removeOld)
2710 {
2711 	uint32		oldtargettaghash;
2712 	LWLock	   *oldpartitionLock;
2713 	PREDICATELOCKTARGET *oldtarget;
2714 	uint32		newtargettaghash;
2715 	LWLock	   *newpartitionLock;
2716 	bool		found;
2717 	bool		outOfShmem = false;
2718 
2719 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2720 
2721 	oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2722 	newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
2723 	oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2724 	newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
2725 
2726 	if (removeOld)
2727 	{
2728 		/*
2729 		 * Remove the dummy entry to give us scratch space, so we know we'll
2730 		 * be able to create the new lock target.
2731 		 */
2732 		RemoveScratchTarget(false);
2733 	}
2734 
2735 	/*
2736 	 * We must get the partition locks in ascending sequence to avoid
2737 	 * deadlocks. If old and new partitions are the same, we must request the
2738 	 * lock only once.
2739 	 */
2740 	if (oldpartitionLock < newpartitionLock)
2741 	{
2742 		LWLockAcquire(oldpartitionLock,
2743 					  (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2744 		LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2745 	}
2746 	else if (oldpartitionLock > newpartitionLock)
2747 	{
2748 		LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2749 		LWLockAcquire(oldpartitionLock,
2750 					  (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2751 	}
2752 	else
2753 		LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2754 
2755 	/*
2756 	 * Look for the old target.  If not found, that's OK; no predicate locks
2757 	 * are affected, so we can just clean up and return. If it does exist,
2758 	 * walk its list of predicate locks and move or copy them to the new
2759 	 * target.
2760 	 */
2761 	oldtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2762 											&oldtargettag,
2763 											oldtargettaghash,
2764 											HASH_FIND, NULL);
2765 
2766 	if (oldtarget)
2767 	{
2768 		PREDICATELOCKTARGET *newtarget;
2769 		PREDICATELOCK *oldpredlock;
2770 		PREDICATELOCKTAG newpredlocktag;
2771 
2772 		newtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2773 												&newtargettag,
2774 												newtargettaghash,
2775 												HASH_ENTER_NULL, &found);
2776 
2777 		if (!newtarget)
2778 		{
2779 			/* Failed to allocate due to insufficient shmem */
2780 			outOfShmem = true;
2781 			goto exit;
2782 		}
2783 
2784 		/* If we created a new entry, initialize it */
2785 		if (!found)
2786 			SHMQueueInit(&(newtarget->predicateLocks));
2787 
2788 		newpredlocktag.myTarget = newtarget;
2789 
2790 		/*
2791 		 * Loop through all the locks on the old target, replacing them with
2792 		 * locks on the new target.
2793 		 */
2794 		oldpredlock = (PREDICATELOCK *)
2795 			SHMQueueNext(&(oldtarget->predicateLocks),
2796 						 &(oldtarget->predicateLocks),
2797 						 offsetof(PREDICATELOCK, targetLink));
2798 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2799 		while (oldpredlock)
2800 		{
2801 			SHM_QUEUE  *predlocktargetlink;
2802 			PREDICATELOCK *nextpredlock;
2803 			PREDICATELOCK *newpredlock;
2804 			SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
2805 
2806 			predlocktargetlink = &(oldpredlock->targetLink);
2807 			nextpredlock = (PREDICATELOCK *)
2808 				SHMQueueNext(&(oldtarget->predicateLocks),
2809 							 predlocktargetlink,
2810 							 offsetof(PREDICATELOCK, targetLink));
2811 			newpredlocktag.myXact = oldpredlock->tag.myXact;
2812 
2813 			if (removeOld)
2814 			{
2815 				SHMQueueDelete(&(oldpredlock->xactLink));
2816 				SHMQueueDelete(&(oldpredlock->targetLink));
2817 
2818 				hash_search_with_hash_value
2819 					(PredicateLockHash,
2820 					 &oldpredlock->tag,
2821 					 PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
2822 															 oldtargettaghash),
2823 					 HASH_REMOVE, &found);
2824 				Assert(found);
2825 			}
2826 
2827 			newpredlock = (PREDICATELOCK *)
2828 				hash_search_with_hash_value(PredicateLockHash,
2829 											&newpredlocktag,
2830 											PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
2831 																					newtargettaghash),
2832 											HASH_ENTER_NULL,
2833 											&found);
2834 			if (!newpredlock)
2835 			{
2836 				/* Out of shared memory. Undo what we've done so far. */
2837 				LWLockRelease(SerializableXactHashLock);
2838 				DeleteLockTarget(newtarget, newtargettaghash);
2839 				outOfShmem = true;
2840 				goto exit;
2841 			}
2842 			if (!found)
2843 			{
2844 				SHMQueueInsertBefore(&(newtarget->predicateLocks),
2845 									 &(newpredlock->targetLink));
2846 				SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
2847 									 &(newpredlock->xactLink));
2848 				newpredlock->commitSeqNo = oldCommitSeqNo;
2849 			}
2850 			else
2851 			{
2852 				if (newpredlock->commitSeqNo < oldCommitSeqNo)
2853 					newpredlock->commitSeqNo = oldCommitSeqNo;
2854 			}
2855 
2856 			Assert(newpredlock->commitSeqNo != 0);
2857 			Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
2858 				   || (newpredlock->tag.myXact == OldCommittedSxact));
2859 
2860 			oldpredlock = nextpredlock;
2861 		}
2862 		LWLockRelease(SerializableXactHashLock);
2863 
2864 		if (removeOld)
2865 		{
2866 			Assert(SHMQueueEmpty(&oldtarget->predicateLocks));
2867 			RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2868 		}
2869 	}
2870 
2871 
2872 exit:
2873 	/* Release partition locks in reverse order of acquisition. */
2874 	if (oldpartitionLock < newpartitionLock)
2875 	{
2876 		LWLockRelease(newpartitionLock);
2877 		LWLockRelease(oldpartitionLock);
2878 	}
2879 	else if (oldpartitionLock > newpartitionLock)
2880 	{
2881 		LWLockRelease(oldpartitionLock);
2882 		LWLockRelease(newpartitionLock);
2883 	}
2884 	else
2885 		LWLockRelease(newpartitionLock);
2886 
2887 	if (removeOld)
2888 	{
2889 		/* We shouldn't run out of memory if we're moving locks */
2890 		Assert(!outOfShmem);
2891 
2892 		/* Put the scratch entry back */
2893 		RestoreScratchTarget(false);
2894 	}
2895 
2896 	return !outOfShmem;
2897 }
2898 
2899 /*
2900  * Drop all predicate locks of any granularity from the specified relation,
2901  * which can be a heap relation or an index relation.  If 'transfer' is true,
2902  * acquire a relation lock on the heap for any transactions with any lock(s)
2903  * on the specified relation.
2904  *
2905  * This requires grabbing a lot of LW locks and scanning the entire lock
2906  * target table for matches.  That makes this more expensive than most
2907  * predicate lock management functions, but it will only be called for DDL
2908  * type commands that are expensive anyway, and there are fast returns when
2909  * no serializable transactions are active or the relation is temporary.
2910  *
2911  * We don't use the TransferPredicateLocksToNewTarget function because it
2912  * acquires its own locks on the partitions of the two targets involved,
2913  * and we'll already be holding all partition locks.
2914  *
2915  * We can't throw an error from here, because the call could be from a
2916  * transaction which is not serializable.
2917  *
2918  * NOTE: This is currently only called with transfer set to true, but that may
2919  * change.  If we decide to clean up the locks from a table on commit of a
2920  * transaction which executed DROP TABLE, the false condition will be useful.
2921  */
2922 static void
DropAllPredicateLocksFromTable(Relation relation,bool transfer)2923 DropAllPredicateLocksFromTable(Relation relation, bool transfer)
2924 {
2925 	HASH_SEQ_STATUS seqstat;
2926 	PREDICATELOCKTARGET *oldtarget;
2927 	PREDICATELOCKTARGET *heaptarget;
2928 	Oid			dbId;
2929 	Oid			relId;
2930 	Oid			heapId;
2931 	int			i;
2932 	bool		isIndex;
2933 	bool		found;
2934 	uint32		heaptargettaghash;
2935 
2936 	/*
2937 	 * Bail out quickly if there are no serializable transactions running.
2938 	 * It's safe to check this without taking locks because the caller is
2939 	 * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
2940 	 * would matter here can be acquired while that is held.
2941 	 */
2942 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
2943 		return;
2944 
2945 	if (!PredicateLockingNeededForRelation(relation))
2946 		return;
2947 
2948 	dbId = relation->rd_node.dbNode;
2949 	relId = relation->rd_id;
2950 	if (relation->rd_index == NULL)
2951 	{
2952 		isIndex = false;
2953 		heapId = relId;
2954 	}
2955 	else
2956 	{
2957 		isIndex = true;
2958 		heapId = relation->rd_index->indrelid;
2959 	}
2960 	Assert(heapId != InvalidOid);
2961 	Assert(transfer || !isIndex);	/* index OID only makes sense with
2962 									 * transfer */
2963 
2964 	/* Retrieve first time needed, then keep. */
2965 	heaptargettaghash = 0;
2966 	heaptarget = NULL;
2967 
2968 	/* Acquire locks on all lock partitions */
2969 	LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
2970 	for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
2971 		LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
2972 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2973 
2974 	/*
2975 	 * Remove the dummy entry to give us scratch space, so we know we'll be
2976 	 * able to create the new lock target.
2977 	 */
2978 	if (transfer)
2979 		RemoveScratchTarget(true);
2980 
2981 	/* Scan through target map */
2982 	hash_seq_init(&seqstat, PredicateLockTargetHash);
2983 
2984 	while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
2985 	{
2986 		PREDICATELOCK *oldpredlock;
2987 
2988 		/*
2989 		 * Check whether this is a target which needs attention.
2990 		 */
2991 		if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
2992 			continue;			/* wrong relation id */
2993 		if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
2994 			continue;			/* wrong database id */
2995 		if (transfer && !isIndex
2996 			&& GET_PREDICATELOCKTARGETTAG_TYPE(oldtarget->tag) == PREDLOCKTAG_RELATION)
2997 			continue;			/* already the right lock */
2998 
2999 		/*
3000 		 * If we made it here, we have work to do.  We make sure the heap
3001 		 * relation lock exists, then we walk the list of predicate locks for
3002 		 * the old target we found, moving all locks to the heap relation lock
3003 		 * -- unless they already hold that.
3004 		 */
3005 
3006 		/*
3007 		 * First make sure we have the heap relation target.  We only need to
3008 		 * do this once.
3009 		 */
3010 		if (transfer && heaptarget == NULL)
3011 		{
3012 			PREDICATELOCKTARGETTAG heaptargettag;
3013 
3014 			SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
3015 			heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
3016 			heaptarget = hash_search_with_hash_value(PredicateLockTargetHash,
3017 													 &heaptargettag,
3018 													 heaptargettaghash,
3019 													 HASH_ENTER, &found);
3020 			if (!found)
3021 				SHMQueueInit(&heaptarget->predicateLocks);
3022 		}
3023 
3024 		/*
3025 		 * Loop through all the locks on the old target, replacing them with
3026 		 * locks on the new target.
3027 		 */
3028 		oldpredlock = (PREDICATELOCK *)
3029 			SHMQueueNext(&(oldtarget->predicateLocks),
3030 						 &(oldtarget->predicateLocks),
3031 						 offsetof(PREDICATELOCK, targetLink));
3032 		while (oldpredlock)
3033 		{
3034 			PREDICATELOCK *nextpredlock;
3035 			PREDICATELOCK *newpredlock;
3036 			SerCommitSeqNo oldCommitSeqNo;
3037 			SERIALIZABLEXACT *oldXact;
3038 
3039 			nextpredlock = (PREDICATELOCK *)
3040 				SHMQueueNext(&(oldtarget->predicateLocks),
3041 							 &(oldpredlock->targetLink),
3042 							 offsetof(PREDICATELOCK, targetLink));
3043 
3044 			/*
3045 			 * Remove the old lock first. This avoids the chance of running
3046 			 * out of lock structure entries for the hash table.
3047 			 */
3048 			oldCommitSeqNo = oldpredlock->commitSeqNo;
3049 			oldXact = oldpredlock->tag.myXact;
3050 
3051 			SHMQueueDelete(&(oldpredlock->xactLink));
3052 
3053 			/*
3054 			 * No need for retail delete from oldtarget list, we're removing
3055 			 * the whole target anyway.
3056 			 */
3057 			hash_search(PredicateLockHash,
3058 						&oldpredlock->tag,
3059 						HASH_REMOVE, &found);
3060 			Assert(found);
3061 
3062 			if (transfer)
3063 			{
3064 				PREDICATELOCKTAG newpredlocktag;
3065 
3066 				newpredlocktag.myTarget = heaptarget;
3067 				newpredlocktag.myXact = oldXact;
3068 				newpredlock = (PREDICATELOCK *)
3069 					hash_search_with_hash_value(PredicateLockHash,
3070 												&newpredlocktag,
3071 												PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
3072 																						heaptargettaghash),
3073 												HASH_ENTER,
3074 												&found);
3075 				if (!found)
3076 				{
3077 					SHMQueueInsertBefore(&(heaptarget->predicateLocks),
3078 										 &(newpredlock->targetLink));
3079 					SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
3080 										 &(newpredlock->xactLink));
3081 					newpredlock->commitSeqNo = oldCommitSeqNo;
3082 				}
3083 				else
3084 				{
3085 					if (newpredlock->commitSeqNo < oldCommitSeqNo)
3086 						newpredlock->commitSeqNo = oldCommitSeqNo;
3087 				}
3088 
3089 				Assert(newpredlock->commitSeqNo != 0);
3090 				Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
3091 					   || (newpredlock->tag.myXact == OldCommittedSxact));
3092 			}
3093 
3094 			oldpredlock = nextpredlock;
3095 		}
3096 
3097 		hash_search(PredicateLockTargetHash, &oldtarget->tag, HASH_REMOVE,
3098 					&found);
3099 		Assert(found);
3100 	}
3101 
3102 	/* Put the scratch entry back */
3103 	if (transfer)
3104 		RestoreScratchTarget(true);
3105 
3106 	/* Release locks in reverse order */
3107 	LWLockRelease(SerializableXactHashLock);
3108 	for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
3109 		LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
3110 	LWLockRelease(SerializablePredicateLockListLock);
3111 }
3112 
3113 /*
3114  * TransferPredicateLocksToHeapRelation
3115  *		For all transactions, transfer all predicate locks for the given
3116  *		relation to a single relation lock on the heap.
3117  */
3118 void
TransferPredicateLocksToHeapRelation(Relation relation)3119 TransferPredicateLocksToHeapRelation(Relation relation)
3120 {
3121 	DropAllPredicateLocksFromTable(relation, true);
3122 }
3123 
3124 
3125 /*
3126  *		PredicateLockPageSplit
3127  *
3128  * Copies any predicate locks for the old page to the new page.
3129  * Skip if this is a temporary table or toast table.
3130  *
3131  * NOTE: A page split (or overflow) affects all serializable transactions,
3132  * even if it occurs in the context of another transaction isolation level.
3133  *
3134  * NOTE: This currently leaves the local copy of the locks without
3135  * information on the new lock which is in shared memory.  This could cause
3136  * problems if enough page splits occur on locked pages without the processes
3137  * which hold the locks getting in and noticing.
3138  */
3139 void
PredicateLockPageSplit(Relation relation,BlockNumber oldblkno,BlockNumber newblkno)3140 PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
3141 					   BlockNumber newblkno)
3142 {
3143 	PREDICATELOCKTARGETTAG oldtargettag;
3144 	PREDICATELOCKTARGETTAG newtargettag;
3145 	bool		success;
3146 
3147 	/*
3148 	 * Bail out quickly if there are no serializable transactions running.
3149 	 *
3150 	 * It's safe to do this check without taking any additional locks. Even if
3151 	 * a serializable transaction starts concurrently, we know it can't take
3152 	 * any SIREAD locks on the page being split because the caller is holding
3153 	 * the associated buffer page lock. Memory reordering isn't an issue; the
3154 	 * memory barrier in the LWLock acquisition guarantees that this read
3155 	 * occurs while the buffer page lock is held.
3156 	 */
3157 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
3158 		return;
3159 
3160 	if (!PredicateLockingNeededForRelation(relation))
3161 		return;
3162 
3163 	Assert(oldblkno != newblkno);
3164 	Assert(BlockNumberIsValid(oldblkno));
3165 	Assert(BlockNumberIsValid(newblkno));
3166 
3167 	SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
3168 									relation->rd_node.dbNode,
3169 									relation->rd_id,
3170 									oldblkno);
3171 	SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
3172 									relation->rd_node.dbNode,
3173 									relation->rd_id,
3174 									newblkno);
3175 
3176 	LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
3177 
3178 	/*
3179 	 * Try copying the locks over to the new page's tag, creating it if
3180 	 * necessary.
3181 	 */
3182 	success = TransferPredicateLocksToNewTarget(oldtargettag,
3183 												newtargettag,
3184 												false);
3185 
3186 	if (!success)
3187 	{
3188 		/*
3189 		 * No more predicate lock entries are available. Failure isn't an
3190 		 * option here, so promote the page lock to a relation lock.
3191 		 */
3192 
3193 		/* Get the parent relation lock's lock tag */
3194 		success = GetParentPredicateLockTag(&oldtargettag,
3195 											&newtargettag);
3196 		Assert(success);
3197 
3198 		/*
3199 		 * Move the locks to the parent. This shouldn't fail.
3200 		 *
3201 		 * Note that here we are removing locks held by other backends,
3202 		 * leading to a possible inconsistency in their local lock hash table.
3203 		 * This is OK because we're replacing it with a lock that covers the
3204 		 * old one.
3205 		 */
3206 		success = TransferPredicateLocksToNewTarget(oldtargettag,
3207 													newtargettag,
3208 													true);
3209 		Assert(success);
3210 	}
3211 
3212 	LWLockRelease(SerializablePredicateLockListLock);
3213 }
3214 
3215 /*
3216  *		PredicateLockPageCombine
3217  *
3218  * Combines predicate locks for two existing pages.
3219  * Skip if this is a temporary table or toast table.
3220  *
3221  * NOTE: A page combine affects all serializable transactions, even if it
3222  * occurs in the context of another transaction isolation level.
3223  */
3224 void
PredicateLockPageCombine(Relation relation,BlockNumber oldblkno,BlockNumber newblkno)3225 PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
3226 						 BlockNumber newblkno)
3227 {
3228 	/*
3229 	 * Page combines differ from page splits in that we ought to be able to
3230 	 * remove the locks on the old page after transferring them to the new
3231 	 * page, instead of duplicating them. However, because we can't edit other
3232 	 * backends' local lock tables, removing the old lock would leave them
3233 	 * with an entry in their LocalPredicateLockHash for a lock they're not
3234 	 * holding, which isn't acceptable. So we wind up having to do the same
3235 	 * work as a page split, acquiring a lock on the new page and keeping the
3236 	 * old page locked too. That can lead to some false positives, but should
3237 	 * be rare in practice.
3238 	 */
3239 	PredicateLockPageSplit(relation, oldblkno, newblkno);
3240 }
3241 
3242 /*
3243  * Walk the list of in-progress serializable transactions and find the new
3244  * xmin.
3245  */
3246 static void
SetNewSxactGlobalXmin(void)3247 SetNewSxactGlobalXmin(void)
3248 {
3249 	SERIALIZABLEXACT *sxact;
3250 
3251 	Assert(LWLockHeldByMe(SerializableXactHashLock));
3252 
3253 	PredXact->SxactGlobalXmin = InvalidTransactionId;
3254 	PredXact->SxactGlobalXminCount = 0;
3255 
3256 	for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
3257 	{
3258 		if (!SxactIsRolledBack(sxact)
3259 			&& !SxactIsCommitted(sxact)
3260 			&& sxact != OldCommittedSxact)
3261 		{
3262 			Assert(sxact->xmin != InvalidTransactionId);
3263 			if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3264 				|| TransactionIdPrecedes(sxact->xmin,
3265 										 PredXact->SxactGlobalXmin))
3266 			{
3267 				PredXact->SxactGlobalXmin = sxact->xmin;
3268 				PredXact->SxactGlobalXminCount = 1;
3269 			}
3270 			else if (TransactionIdEquals(sxact->xmin,
3271 										 PredXact->SxactGlobalXmin))
3272 				PredXact->SxactGlobalXminCount++;
3273 		}
3274 	}
3275 
3276 	OldSerXidSetActiveSerXmin(PredXact->SxactGlobalXmin);
3277 }
3278 
3279 /*
3280  *		ReleasePredicateLocks
3281  *
3282  * Releases predicate locks based on completion of the current transaction,
3283  * whether committed or rolled back.  It can also be called for a read only
3284  * transaction when it becomes impossible for the transaction to become
3285  * part of a dangerous structure.
3286  *
3287  * We do nothing unless this is a serializable transaction.
3288  *
3289  * This method must ensure that shared memory hash tables are cleaned
3290  * up in some relatively timely fashion.
3291  *
3292  * If this transaction is committing and is holding any predicate locks,
3293  * it must be added to a list of completed serializable transactions still
3294  * holding locks.
3295  */
3296 void
ReleasePredicateLocks(bool isCommit)3297 ReleasePredicateLocks(bool isCommit)
3298 {
3299 	bool		needToClear;
3300 	RWConflict	conflict,
3301 				nextConflict,
3302 				possibleUnsafeConflict;
3303 	SERIALIZABLEXACT *roXact;
3304 
3305 	/*
3306 	 * We can't trust XactReadOnly here, because a transaction which started
3307 	 * as READ WRITE can show as READ ONLY later, e.g., within
3308 	 * subtransactions.  We want to flag a transaction as READ ONLY if it
3309 	 * commits without writing so that de facto READ ONLY transactions get the
3310 	 * benefit of some RO optimizations, so we will use this local variable to
3311 	 * get some cleanup logic right which is based on whether the transaction
3312 	 * was declared READ ONLY at the top level.
3313 	 */
3314 	bool		topLevelIsDeclaredReadOnly;
3315 
3316 	if (MySerializableXact == InvalidSerializableXact)
3317 	{
3318 		Assert(LocalPredicateLockHash == NULL);
3319 		return;
3320 	}
3321 
3322 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3323 
3324 	Assert(!isCommit || SxactIsPrepared(MySerializableXact));
3325 	Assert(!isCommit || !SxactIsDoomed(MySerializableXact));
3326 	Assert(!SxactIsCommitted(MySerializableXact));
3327 	Assert(!SxactIsRolledBack(MySerializableXact));
3328 
3329 	/* may not be serializable during COMMIT/ROLLBACK PREPARED */
3330 	Assert(MySerializableXact->pid == 0 || IsolationIsSerializable());
3331 
3332 	/* We'd better not already be on the cleanup list. */
3333 	Assert(!SxactIsOnFinishedList(MySerializableXact));
3334 
3335 	topLevelIsDeclaredReadOnly = SxactIsReadOnly(MySerializableXact);
3336 
3337 	/*
3338 	 * We don't hold XidGenLock lock here, assuming that TransactionId is
3339 	 * atomic!
3340 	 *
3341 	 * If this value is changing, we don't care that much whether we get the
3342 	 * old or new value -- it is just used to determine how far
3343 	 * GlobalSerializableXmin must advance before this transaction can be
3344 	 * fully cleaned up.  The worst that could happen is we wait for one more
3345 	 * transaction to complete before freeing some RAM; correctness of visible
3346 	 * behavior is not affected.
3347 	 */
3348 	MySerializableXact->finishedBefore = ShmemVariableCache->nextXid;
3349 
3350 	/*
3351 	 * If it's not a commit it's a rollback, and we can clear our locks
3352 	 * immediately.
3353 	 */
3354 	if (isCommit)
3355 	{
3356 		MySerializableXact->flags |= SXACT_FLAG_COMMITTED;
3357 		MySerializableXact->commitSeqNo = ++(PredXact->LastSxactCommitSeqNo);
3358 		/* Recognize implicit read-only transaction (commit without write). */
3359 		if (!MyXactDidWrite)
3360 			MySerializableXact->flags |= SXACT_FLAG_READ_ONLY;
3361 	}
3362 	else
3363 	{
3364 		/*
3365 		 * The DOOMED flag indicates that we intend to roll back this
3366 		 * transaction and so it should not cause serialization failures for
3367 		 * other transactions that conflict with it. Note that this flag might
3368 		 * already be set, if another backend marked this transaction for
3369 		 * abort.
3370 		 *
3371 		 * The ROLLED_BACK flag further indicates that ReleasePredicateLocks
3372 		 * has been called, and so the SerializableXact is eligible for
3373 		 * cleanup. This means it should not be considered when calculating
3374 		 * SxactGlobalXmin.
3375 		 */
3376 		MySerializableXact->flags |= SXACT_FLAG_DOOMED;
3377 		MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
3378 
3379 		/*
3380 		 * If the transaction was previously prepared, but is now failing due
3381 		 * to a ROLLBACK PREPARED or (hopefully very rare) error after the
3382 		 * prepare, clear the prepared flag.  This simplifies conflict
3383 		 * checking.
3384 		 */
3385 		MySerializableXact->flags &= ~SXACT_FLAG_PREPARED;
3386 	}
3387 
3388 	if (!topLevelIsDeclaredReadOnly)
3389 	{
3390 		Assert(PredXact->WritableSxactCount > 0);
3391 		if (--(PredXact->WritableSxactCount) == 0)
3392 		{
3393 			/*
3394 			 * Release predicate locks and rw-conflicts in for all committed
3395 			 * transactions.  There are no longer any transactions which might
3396 			 * conflict with the locks and no chance for new transactions to
3397 			 * overlap.  Similarly, existing conflicts in can't cause pivots,
3398 			 * and any conflicts in which could have completed a dangerous
3399 			 * structure would already have caused a rollback, so any
3400 			 * remaining ones must be benign.
3401 			 */
3402 			PredXact->CanPartialClearThrough = PredXact->LastSxactCommitSeqNo;
3403 		}
3404 	}
3405 	else
3406 	{
3407 		/*
3408 		 * Read-only transactions: clear the list of transactions that might
3409 		 * make us unsafe. Note that we use 'inLink' for the iteration as
3410 		 * opposed to 'outLink' for the r/w xacts.
3411 		 */
3412 		possibleUnsafeConflict = (RWConflict)
3413 			SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3414 						 &MySerializableXact->possibleUnsafeConflicts,
3415 						 offsetof(RWConflictData, inLink));
3416 		while (possibleUnsafeConflict)
3417 		{
3418 			nextConflict = (RWConflict)
3419 				SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3420 							 &possibleUnsafeConflict->inLink,
3421 							 offsetof(RWConflictData, inLink));
3422 
3423 			Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
3424 			Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
3425 
3426 			ReleaseRWConflict(possibleUnsafeConflict);
3427 
3428 			possibleUnsafeConflict = nextConflict;
3429 		}
3430 	}
3431 
3432 	/* Check for conflict out to old committed transactions. */
3433 	if (isCommit
3434 		&& !SxactIsReadOnly(MySerializableXact)
3435 		&& SxactHasSummaryConflictOut(MySerializableXact))
3436 	{
3437 		/*
3438 		 * we don't know which old committed transaction we conflicted with,
3439 		 * so be conservative and use FirstNormalSerCommitSeqNo here
3440 		 */
3441 		MySerializableXact->SeqNo.earliestOutConflictCommit =
3442 			FirstNormalSerCommitSeqNo;
3443 		MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3444 	}
3445 
3446 	/*
3447 	 * Release all outConflicts to committed transactions.  If we're rolling
3448 	 * back clear them all.  Set SXACT_FLAG_CONFLICT_OUT if any point to
3449 	 * previously committed transactions.
3450 	 */
3451 	conflict = (RWConflict)
3452 		SHMQueueNext(&MySerializableXact->outConflicts,
3453 					 &MySerializableXact->outConflicts,
3454 					 offsetof(RWConflictData, outLink));
3455 	while (conflict)
3456 	{
3457 		nextConflict = (RWConflict)
3458 			SHMQueueNext(&MySerializableXact->outConflicts,
3459 						 &conflict->outLink,
3460 						 offsetof(RWConflictData, outLink));
3461 
3462 		if (isCommit
3463 			&& !SxactIsReadOnly(MySerializableXact)
3464 			&& SxactIsCommitted(conflict->sxactIn))
3465 		{
3466 			if ((MySerializableXact->flags & SXACT_FLAG_CONFLICT_OUT) == 0
3467 				|| conflict->sxactIn->prepareSeqNo < MySerializableXact->SeqNo.earliestOutConflictCommit)
3468 				MySerializableXact->SeqNo.earliestOutConflictCommit = conflict->sxactIn->prepareSeqNo;
3469 			MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3470 		}
3471 
3472 		if (!isCommit
3473 			|| SxactIsCommitted(conflict->sxactIn)
3474 			|| (conflict->sxactIn->SeqNo.lastCommitBeforeSnapshot >= PredXact->LastSxactCommitSeqNo))
3475 			ReleaseRWConflict(conflict);
3476 
3477 		conflict = nextConflict;
3478 	}
3479 
3480 	/*
3481 	 * Release all inConflicts from committed and read-only transactions. If
3482 	 * we're rolling back, clear them all.
3483 	 */
3484 	conflict = (RWConflict)
3485 		SHMQueueNext(&MySerializableXact->inConflicts,
3486 					 &MySerializableXact->inConflicts,
3487 					 offsetof(RWConflictData, inLink));
3488 	while (conflict)
3489 	{
3490 		nextConflict = (RWConflict)
3491 			SHMQueueNext(&MySerializableXact->inConflicts,
3492 						 &conflict->inLink,
3493 						 offsetof(RWConflictData, inLink));
3494 
3495 		if (!isCommit
3496 			|| SxactIsCommitted(conflict->sxactOut)
3497 			|| SxactIsReadOnly(conflict->sxactOut))
3498 			ReleaseRWConflict(conflict);
3499 
3500 		conflict = nextConflict;
3501 	}
3502 
3503 	if (!topLevelIsDeclaredReadOnly)
3504 	{
3505 		/*
3506 		 * Remove ourselves from the list of possible conflicts for concurrent
3507 		 * READ ONLY transactions, flagging them as unsafe if we have a
3508 		 * conflict out. If any are waiting DEFERRABLE transactions, wake them
3509 		 * up if they are known safe or known unsafe.
3510 		 */
3511 		possibleUnsafeConflict = (RWConflict)
3512 			SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3513 						 &MySerializableXact->possibleUnsafeConflicts,
3514 						 offsetof(RWConflictData, outLink));
3515 		while (possibleUnsafeConflict)
3516 		{
3517 			nextConflict = (RWConflict)
3518 				SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3519 							 &possibleUnsafeConflict->outLink,
3520 							 offsetof(RWConflictData, outLink));
3521 
3522 			roXact = possibleUnsafeConflict->sxactIn;
3523 			Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
3524 			Assert(SxactIsReadOnly(roXact));
3525 
3526 			/* Mark conflicted if necessary. */
3527 			if (isCommit
3528 				&& MyXactDidWrite
3529 				&& SxactHasConflictOut(MySerializableXact)
3530 				&& (MySerializableXact->SeqNo.earliestOutConflictCommit
3531 					<= roXact->SeqNo.lastCommitBeforeSnapshot))
3532 			{
3533 				/*
3534 				 * This releases possibleUnsafeConflict (as well as all other
3535 				 * possible conflicts for roXact)
3536 				 */
3537 				FlagSxactUnsafe(roXact);
3538 			}
3539 			else
3540 			{
3541 				ReleaseRWConflict(possibleUnsafeConflict);
3542 
3543 				/*
3544 				 * If we were the last possible conflict, flag it safe. The
3545 				 * transaction can now safely release its predicate locks (but
3546 				 * that transaction's backend has to do that itself).
3547 				 */
3548 				if (SHMQueueEmpty(&roXact->possibleUnsafeConflicts))
3549 					roXact->flags |= SXACT_FLAG_RO_SAFE;
3550 			}
3551 
3552 			/*
3553 			 * Wake up the process for a waiting DEFERRABLE transaction if we
3554 			 * now know it's either safe or conflicted.
3555 			 */
3556 			if (SxactIsDeferrableWaiting(roXact) &&
3557 				(SxactIsROUnsafe(roXact) || SxactIsROSafe(roXact)))
3558 				ProcSendSignal(roXact->pid);
3559 
3560 			possibleUnsafeConflict = nextConflict;
3561 		}
3562 	}
3563 
3564 	/*
3565 	 * Check whether it's time to clean up old transactions. This can only be
3566 	 * done when the last serializable transaction with the oldest xmin among
3567 	 * serializable transactions completes.  We then find the "new oldest"
3568 	 * xmin and purge any transactions which finished before this transaction
3569 	 * was launched.
3570 	 */
3571 	needToClear = false;
3572 	if (TransactionIdEquals(MySerializableXact->xmin, PredXact->SxactGlobalXmin))
3573 	{
3574 		Assert(PredXact->SxactGlobalXminCount > 0);
3575 		if (--(PredXact->SxactGlobalXminCount) == 0)
3576 		{
3577 			SetNewSxactGlobalXmin();
3578 			needToClear = true;
3579 		}
3580 	}
3581 
3582 	LWLockRelease(SerializableXactHashLock);
3583 
3584 	LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3585 
3586 	/* Add this to the list of transactions to check for later cleanup. */
3587 	if (isCommit)
3588 		SHMQueueInsertBefore(FinishedSerializableTransactions,
3589 							 &MySerializableXact->finishedLink);
3590 
3591 	if (!isCommit)
3592 		ReleaseOneSerializableXact(MySerializableXact, false, false);
3593 
3594 	LWLockRelease(SerializableFinishedListLock);
3595 
3596 	if (needToClear)
3597 		ClearOldPredicateLocks();
3598 
3599 	MySerializableXact = InvalidSerializableXact;
3600 	MyXactDidWrite = false;
3601 
3602 	/* Delete per-transaction lock table */
3603 	if (LocalPredicateLockHash != NULL)
3604 	{
3605 		hash_destroy(LocalPredicateLockHash);
3606 		LocalPredicateLockHash = NULL;
3607 	}
3608 }
3609 
3610 /*
3611  * Clear old predicate locks, belonging to committed transactions that are no
3612  * longer interesting to any in-progress transaction.
3613  */
3614 static void
ClearOldPredicateLocks(void)3615 ClearOldPredicateLocks(void)
3616 {
3617 	SERIALIZABLEXACT *finishedSxact;
3618 	PREDICATELOCK *predlock;
3619 
3620 	/*
3621 	 * Loop through finished transactions. They are in commit order, so we can
3622 	 * stop as soon as we find one that's still interesting.
3623 	 */
3624 	LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3625 	finishedSxact = (SERIALIZABLEXACT *)
3626 		SHMQueueNext(FinishedSerializableTransactions,
3627 					 FinishedSerializableTransactions,
3628 					 offsetof(SERIALIZABLEXACT, finishedLink));
3629 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3630 	while (finishedSxact)
3631 	{
3632 		SERIALIZABLEXACT *nextSxact;
3633 
3634 		nextSxact = (SERIALIZABLEXACT *)
3635 			SHMQueueNext(FinishedSerializableTransactions,
3636 						 &(finishedSxact->finishedLink),
3637 						 offsetof(SERIALIZABLEXACT, finishedLink));
3638 		if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3639 			|| TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
3640 											 PredXact->SxactGlobalXmin))
3641 		{
3642 			/*
3643 			 * This transaction committed before any in-progress transaction
3644 			 * took its snapshot. It's no longer interesting.
3645 			 */
3646 			LWLockRelease(SerializableXactHashLock);
3647 			SHMQueueDelete(&(finishedSxact->finishedLink));
3648 			ReleaseOneSerializableXact(finishedSxact, false, false);
3649 			LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3650 		}
3651 		else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
3652 				 && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
3653 		{
3654 			/*
3655 			 * Any active transactions that took their snapshot before this
3656 			 * transaction committed are read-only, so we can clear part of
3657 			 * its state.
3658 			 */
3659 			LWLockRelease(SerializableXactHashLock);
3660 
3661 			if (SxactIsReadOnly(finishedSxact))
3662 			{
3663 				/* A read-only transaction can be removed entirely */
3664 				SHMQueueDelete(&(finishedSxact->finishedLink));
3665 				ReleaseOneSerializableXact(finishedSxact, false, false);
3666 			}
3667 			else
3668 			{
3669 				/*
3670 				 * A read-write transaction can only be partially cleared. We
3671 				 * need to keep the SERIALIZABLEXACT but can release the
3672 				 * SIREAD locks and conflicts in.
3673 				 */
3674 				ReleaseOneSerializableXact(finishedSxact, true, false);
3675 			}
3676 
3677 			PredXact->HavePartialClearedThrough = finishedSxact->commitSeqNo;
3678 			LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3679 		}
3680 		else
3681 		{
3682 			/* Still interesting. */
3683 			break;
3684 		}
3685 		finishedSxact = nextSxact;
3686 	}
3687 	LWLockRelease(SerializableXactHashLock);
3688 
3689 	/*
3690 	 * Loop through predicate locks on dummy transaction for summarized data.
3691 	 */
3692 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3693 	predlock = (PREDICATELOCK *)
3694 		SHMQueueNext(&OldCommittedSxact->predicateLocks,
3695 					 &OldCommittedSxact->predicateLocks,
3696 					 offsetof(PREDICATELOCK, xactLink));
3697 	while (predlock)
3698 	{
3699 		PREDICATELOCK *nextpredlock;
3700 		bool		canDoPartialCleanup;
3701 
3702 		nextpredlock = (PREDICATELOCK *)
3703 			SHMQueueNext(&OldCommittedSxact->predicateLocks,
3704 						 &predlock->xactLink,
3705 						 offsetof(PREDICATELOCK, xactLink));
3706 
3707 		LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3708 		Assert(predlock->commitSeqNo != 0);
3709 		Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3710 		canDoPartialCleanup = (predlock->commitSeqNo <= PredXact->CanPartialClearThrough);
3711 		LWLockRelease(SerializableXactHashLock);
3712 
3713 		/*
3714 		 * If this lock originally belonged to an old enough transaction, we
3715 		 * can release it.
3716 		 */
3717 		if (canDoPartialCleanup)
3718 		{
3719 			PREDICATELOCKTAG tag;
3720 			PREDICATELOCKTARGET *target;
3721 			PREDICATELOCKTARGETTAG targettag;
3722 			uint32		targettaghash;
3723 			LWLock	   *partitionLock;
3724 
3725 			tag = predlock->tag;
3726 			target = tag.myTarget;
3727 			targettag = target->tag;
3728 			targettaghash = PredicateLockTargetTagHashCode(&targettag);
3729 			partitionLock = PredicateLockHashPartitionLock(targettaghash);
3730 
3731 			LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3732 
3733 			SHMQueueDelete(&(predlock->targetLink));
3734 			SHMQueueDelete(&(predlock->xactLink));
3735 
3736 			hash_search_with_hash_value(PredicateLockHash, &tag,
3737 										PredicateLockHashCodeFromTargetHashCode(&tag,
3738 																				targettaghash),
3739 										HASH_REMOVE, NULL);
3740 			RemoveTargetIfNoLongerUsed(target, targettaghash);
3741 
3742 			LWLockRelease(partitionLock);
3743 		}
3744 
3745 		predlock = nextpredlock;
3746 	}
3747 
3748 	LWLockRelease(SerializablePredicateLockListLock);
3749 	LWLockRelease(SerializableFinishedListLock);
3750 }
3751 
3752 /*
3753  * This is the normal way to delete anything from any of the predicate
3754  * locking hash tables.  Given a transaction which we know can be deleted:
3755  * delete all predicate locks held by that transaction and any predicate
3756  * lock targets which are now unreferenced by a lock; delete all conflicts
3757  * for the transaction; delete all xid values for the transaction; then
3758  * delete the transaction.
3759  *
3760  * When the partial flag is set, we can release all predicate locks and
3761  * in-conflict information -- we've established that there are no longer
3762  * any overlapping read write transactions for which this transaction could
3763  * matter -- but keep the transaction entry itself and any outConflicts.
3764  *
3765  * When the summarize flag is set, we've run short of room for sxact data
3766  * and must summarize to the SLRU.  Predicate locks are transferred to a
3767  * dummy "old" transaction, with duplicate locks on a single target
3768  * collapsing to a single lock with the "latest" commitSeqNo from among
3769  * the conflicting locks..
3770  */
3771 static void
ReleaseOneSerializableXact(SERIALIZABLEXACT * sxact,bool partial,bool summarize)3772 ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
3773 						   bool summarize)
3774 {
3775 	PREDICATELOCK *predlock;
3776 	SERIALIZABLEXIDTAG sxidtag;
3777 	RWConflict	conflict,
3778 				nextConflict;
3779 
3780 	Assert(sxact != NULL);
3781 	Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
3782 	Assert(partial || !SxactIsOnFinishedList(sxact));
3783 	Assert(LWLockHeldByMe(SerializableFinishedListLock));
3784 
3785 	/*
3786 	 * First release all the predicate locks held by this xact (or transfer
3787 	 * them to OldCommittedSxact if summarize is true)
3788 	 */
3789 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3790 	predlock = (PREDICATELOCK *)
3791 		SHMQueueNext(&(sxact->predicateLocks),
3792 					 &(sxact->predicateLocks),
3793 					 offsetof(PREDICATELOCK, xactLink));
3794 	while (predlock)
3795 	{
3796 		PREDICATELOCK *nextpredlock;
3797 		PREDICATELOCKTAG tag;
3798 		SHM_QUEUE  *targetLink;
3799 		PREDICATELOCKTARGET *target;
3800 		PREDICATELOCKTARGETTAG targettag;
3801 		uint32		targettaghash;
3802 		LWLock	   *partitionLock;
3803 
3804 		nextpredlock = (PREDICATELOCK *)
3805 			SHMQueueNext(&(sxact->predicateLocks),
3806 						 &(predlock->xactLink),
3807 						 offsetof(PREDICATELOCK, xactLink));
3808 
3809 		tag = predlock->tag;
3810 		targetLink = &(predlock->targetLink);
3811 		target = tag.myTarget;
3812 		targettag = target->tag;
3813 		targettaghash = PredicateLockTargetTagHashCode(&targettag);
3814 		partitionLock = PredicateLockHashPartitionLock(targettaghash);
3815 
3816 		LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3817 
3818 		SHMQueueDelete(targetLink);
3819 
3820 		hash_search_with_hash_value(PredicateLockHash, &tag,
3821 									PredicateLockHashCodeFromTargetHashCode(&tag,
3822 																			targettaghash),
3823 									HASH_REMOVE, NULL);
3824 		if (summarize)
3825 		{
3826 			bool		found;
3827 
3828 			/* Fold into dummy transaction list. */
3829 			tag.myXact = OldCommittedSxact;
3830 			predlock = hash_search_with_hash_value(PredicateLockHash, &tag,
3831 												   PredicateLockHashCodeFromTargetHashCode(&tag,
3832 																						   targettaghash),
3833 												   HASH_ENTER_NULL, &found);
3834 			if (!predlock)
3835 				ereport(ERROR,
3836 						(errcode(ERRCODE_OUT_OF_MEMORY),
3837 						 errmsg("out of shared memory"),
3838 						 errhint("You might need to increase max_pred_locks_per_transaction.")));
3839 			if (found)
3840 			{
3841 				Assert(predlock->commitSeqNo != 0);
3842 				Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3843 				if (predlock->commitSeqNo < sxact->commitSeqNo)
3844 					predlock->commitSeqNo = sxact->commitSeqNo;
3845 			}
3846 			else
3847 			{
3848 				SHMQueueInsertBefore(&(target->predicateLocks),
3849 									 &(predlock->targetLink));
3850 				SHMQueueInsertBefore(&(OldCommittedSxact->predicateLocks),
3851 									 &(predlock->xactLink));
3852 				predlock->commitSeqNo = sxact->commitSeqNo;
3853 			}
3854 		}
3855 		else
3856 			RemoveTargetIfNoLongerUsed(target, targettaghash);
3857 
3858 		LWLockRelease(partitionLock);
3859 
3860 		predlock = nextpredlock;
3861 	}
3862 
3863 	/*
3864 	 * Rather than retail removal, just re-init the head after we've run
3865 	 * through the list.
3866 	 */
3867 	SHMQueueInit(&sxact->predicateLocks);
3868 
3869 	LWLockRelease(SerializablePredicateLockListLock);
3870 
3871 	sxidtag.xid = sxact->topXid;
3872 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3873 
3874 	/* Release all outConflicts (unless 'partial' is true) */
3875 	if (!partial)
3876 	{
3877 		conflict = (RWConflict)
3878 			SHMQueueNext(&sxact->outConflicts,
3879 						 &sxact->outConflicts,
3880 						 offsetof(RWConflictData, outLink));
3881 		while (conflict)
3882 		{
3883 			nextConflict = (RWConflict)
3884 				SHMQueueNext(&sxact->outConflicts,
3885 							 &conflict->outLink,
3886 							 offsetof(RWConflictData, outLink));
3887 			if (summarize)
3888 				conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
3889 			ReleaseRWConflict(conflict);
3890 			conflict = nextConflict;
3891 		}
3892 	}
3893 
3894 	/* Release all inConflicts. */
3895 	conflict = (RWConflict)
3896 		SHMQueueNext(&sxact->inConflicts,
3897 					 &sxact->inConflicts,
3898 					 offsetof(RWConflictData, inLink));
3899 	while (conflict)
3900 	{
3901 		nextConflict = (RWConflict)
3902 			SHMQueueNext(&sxact->inConflicts,
3903 						 &conflict->inLink,
3904 						 offsetof(RWConflictData, inLink));
3905 		if (summarize)
3906 			conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
3907 		ReleaseRWConflict(conflict);
3908 		conflict = nextConflict;
3909 	}
3910 
3911 	/* Finally, get rid of the xid and the record of the transaction itself. */
3912 	if (!partial)
3913 	{
3914 		if (sxidtag.xid != InvalidTransactionId)
3915 			hash_search(SerializableXidHash, &sxidtag, HASH_REMOVE, NULL);
3916 		ReleasePredXact(sxact);
3917 	}
3918 
3919 	LWLockRelease(SerializableXactHashLock);
3920 }
3921 
3922 /*
3923  * Tests whether the given top level transaction is concurrent with
3924  * (overlaps) our current transaction.
3925  *
3926  * We need to identify the top level transaction for SSI, anyway, so pass
3927  * that to this function to save the overhead of checking the snapshot's
3928  * subxip array.
3929  */
3930 static bool
XidIsConcurrent(TransactionId xid)3931 XidIsConcurrent(TransactionId xid)
3932 {
3933 	Snapshot	snap;
3934 	uint32		i;
3935 
3936 	Assert(TransactionIdIsValid(xid));
3937 	Assert(!TransactionIdEquals(xid, GetTopTransactionIdIfAny()));
3938 
3939 	snap = GetTransactionSnapshot();
3940 
3941 	if (TransactionIdPrecedes(xid, snap->xmin))
3942 		return false;
3943 
3944 	if (TransactionIdFollowsOrEquals(xid, snap->xmax))
3945 		return true;
3946 
3947 	for (i = 0; i < snap->xcnt; i++)
3948 	{
3949 		if (xid == snap->xip[i])
3950 			return true;
3951 	}
3952 
3953 	return false;
3954 }
3955 
3956 /*
3957  * CheckForSerializableConflictOut
3958  *		We are reading a tuple which has been modified.  If it is visible to
3959  *		us but has been deleted, that indicates a rw-conflict out.  If it's
3960  *		not visible and was created by a concurrent (overlapping)
3961  *		serializable transaction, that is also a rw-conflict out,
3962  *
3963  * We will determine the top level xid of the writing transaction with which
3964  * we may be in conflict, and check for overlap with our own transaction.
3965  * If the transactions overlap (i.e., they cannot see each other's writes),
3966  * then we have a conflict out.
3967  *
3968  * This function should be called just about anywhere in heapam.c where a
3969  * tuple has been read. The caller must hold at least a shared lock on the
3970  * buffer, because this function might set hint bits on the tuple. There is
3971  * currently no known reason to call this function from an index AM.
3972  */
3973 void
CheckForSerializableConflictOut(bool visible,Relation relation,HeapTuple tuple,Buffer buffer,Snapshot snapshot)3974 CheckForSerializableConflictOut(bool visible, Relation relation,
3975 								HeapTuple tuple, Buffer buffer,
3976 								Snapshot snapshot)
3977 {
3978 	TransactionId xid;
3979 	SERIALIZABLEXIDTAG sxidtag;
3980 	SERIALIZABLEXID *sxid;
3981 	SERIALIZABLEXACT *sxact;
3982 	HTSV_Result htsvResult;
3983 
3984 	if (!SerializationNeededForRead(relation, snapshot))
3985 		return;
3986 
3987 	/* Check if someone else has already decided that we need to die */
3988 	if (SxactIsDoomed(MySerializableXact))
3989 	{
3990 		ereport(ERROR,
3991 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3992 				 errmsg("could not serialize access due to read/write dependencies among transactions"),
3993 				 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
3994 				 errhint("The transaction might succeed if retried.")));
3995 	}
3996 
3997 	/*
3998 	 * Check to see whether the tuple has been written to by a concurrent
3999 	 * transaction, either to create it not visible to us, or to delete it
4000 	 * while it is visible to us.  The "visible" bool indicates whether the
4001 	 * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
4002 	 * is going on with it.
4003 	 *
4004 	 * In the event of a concurrently inserted tuple that also happens to have
4005 	 * been concurrently updated (by a separate transaction), the xmin of the
4006 	 * tuple will be used -- not the updater's xid.
4007 	 */
4008 	htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
4009 	switch (htsvResult)
4010 	{
4011 		case HEAPTUPLE_LIVE:
4012 			if (visible)
4013 				return;
4014 			xid = HeapTupleHeaderGetXmin(tuple->t_data);
4015 			break;
4016 		case HEAPTUPLE_RECENTLY_DEAD:
4017 		case HEAPTUPLE_DELETE_IN_PROGRESS:
4018 			if (visible)
4019 				xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
4020 			else
4021 				xid = HeapTupleHeaderGetXmin(tuple->t_data);
4022 
4023 			if (TransactionIdPrecedes(xid, TransactionXmin))
4024 			{
4025 				/* This is like the HEAPTUPLE_DEAD case */
4026 				Assert(!visible);
4027 				return;
4028 			}
4029 			break;
4030 		case HEAPTUPLE_INSERT_IN_PROGRESS:
4031 			xid = HeapTupleHeaderGetXmin(tuple->t_data);
4032 			break;
4033 		case HEAPTUPLE_DEAD:
4034 			Assert(!visible);
4035 			return;
4036 		default:
4037 
4038 			/*
4039 			 * The only way to get to this default clause is if a new value is
4040 			 * added to the enum type without adding it to this switch
4041 			 * statement.  That's a bug, so elog.
4042 			 */
4043 			elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
4044 
4045 			/*
4046 			 * In spite of having all enum values covered and calling elog on
4047 			 * this default, some compilers think this is a code path which
4048 			 * allows xid to be used below without initialization. Silence
4049 			 * that warning.
4050 			 */
4051 			xid = InvalidTransactionId;
4052 	}
4053 	Assert(TransactionIdIsValid(xid));
4054 	Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
4055 
4056 	/*
4057 	 * Find top level xid.  Bail out if xid is too early to be a conflict, or
4058 	 * if it's our own xid.
4059 	 */
4060 	if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
4061 		return;
4062 	xid = SubTransGetTopmostTransaction(xid);
4063 	if (TransactionIdPrecedes(xid, TransactionXmin))
4064 		return;
4065 	if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
4066 		return;
4067 
4068 	/*
4069 	 * Find sxact or summarized info for the top level xid.
4070 	 */
4071 	sxidtag.xid = xid;
4072 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4073 	sxid = (SERIALIZABLEXID *)
4074 		hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4075 	if (!sxid)
4076 	{
4077 		/*
4078 		 * Transaction not found in "normal" SSI structures.  Check whether it
4079 		 * got pushed out to SLRU storage for "old committed" transactions.
4080 		 */
4081 		SerCommitSeqNo conflictCommitSeqNo;
4082 
4083 		conflictCommitSeqNo = OldSerXidGetMinConflictCommitSeqNo(xid);
4084 		if (conflictCommitSeqNo != 0)
4085 		{
4086 			if (conflictCommitSeqNo != InvalidSerCommitSeqNo
4087 				&& (!SxactIsReadOnly(MySerializableXact)
4088 					|| conflictCommitSeqNo
4089 					<= MySerializableXact->SeqNo.lastCommitBeforeSnapshot))
4090 				ereport(ERROR,
4091 						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4092 						 errmsg("could not serialize access due to read/write dependencies among transactions"),
4093 						 errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
4094 						 errhint("The transaction might succeed if retried.")));
4095 
4096 			if (SxactHasSummaryConflictIn(MySerializableXact)
4097 				|| !SHMQueueEmpty(&MySerializableXact->inConflicts))
4098 				ereport(ERROR,
4099 						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4100 						 errmsg("could not serialize access due to read/write dependencies among transactions"),
4101 						 errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
4102 						 errhint("The transaction might succeed if retried.")));
4103 
4104 			MySerializableXact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4105 		}
4106 
4107 		/* It's not serializable or otherwise not important. */
4108 		LWLockRelease(SerializableXactHashLock);
4109 		return;
4110 	}
4111 	sxact = sxid->myXact;
4112 	Assert(TransactionIdEquals(sxact->topXid, xid));
4113 	if (sxact == MySerializableXact || SxactIsDoomed(sxact))
4114 	{
4115 		/* Can't conflict with ourself or a transaction that will roll back. */
4116 		LWLockRelease(SerializableXactHashLock);
4117 		return;
4118 	}
4119 
4120 	/*
4121 	 * We have a conflict out to a transaction which has a conflict out to a
4122 	 * summarized transaction.  That summarized transaction must have
4123 	 * committed first, and we can't tell when it committed in relation to our
4124 	 * snapshot acquisition, so something needs to be canceled.
4125 	 */
4126 	if (SxactHasSummaryConflictOut(sxact))
4127 	{
4128 		if (!SxactIsPrepared(sxact))
4129 		{
4130 			sxact->flags |= SXACT_FLAG_DOOMED;
4131 			LWLockRelease(SerializableXactHashLock);
4132 			return;
4133 		}
4134 		else
4135 		{
4136 			LWLockRelease(SerializableXactHashLock);
4137 			ereport(ERROR,
4138 					(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4139 					 errmsg("could not serialize access due to read/write dependencies among transactions"),
4140 					 errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
4141 					 errhint("The transaction might succeed if retried.")));
4142 		}
4143 	}
4144 
4145 	/*
4146 	 * If this is a read-only transaction and the writing transaction has
4147 	 * committed, and it doesn't have a rw-conflict to a transaction which
4148 	 * committed before it, no conflict.
4149 	 */
4150 	if (SxactIsReadOnly(MySerializableXact)
4151 		&& SxactIsCommitted(sxact)
4152 		&& !SxactHasSummaryConflictOut(sxact)
4153 		&& (!SxactHasConflictOut(sxact)
4154 			|| MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
4155 	{
4156 		/* Read-only transaction will appear to run first.  No conflict. */
4157 		LWLockRelease(SerializableXactHashLock);
4158 		return;
4159 	}
4160 
4161 	if (!XidIsConcurrent(xid))
4162 	{
4163 		/* This write was already in our snapshot; no conflict. */
4164 		LWLockRelease(SerializableXactHashLock);
4165 		return;
4166 	}
4167 
4168 	if (RWConflictExists(MySerializableXact, sxact))
4169 	{
4170 		/* We don't want duplicate conflict records in the list. */
4171 		LWLockRelease(SerializableXactHashLock);
4172 		return;
4173 	}
4174 
4175 	/*
4176 	 * Flag the conflict.  But first, if this conflict creates a dangerous
4177 	 * structure, ereport an error.
4178 	 */
4179 	FlagRWConflict(MySerializableXact, sxact);
4180 	LWLockRelease(SerializableXactHashLock);
4181 }
4182 
4183 /*
4184  * Check a particular target for rw-dependency conflict in. A subroutine of
4185  * CheckForSerializableConflictIn().
4186  */
4187 static void
CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG * targettag)4188 CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
4189 {
4190 	uint32		targettaghash;
4191 	LWLock	   *partitionLock;
4192 	PREDICATELOCKTARGET *target;
4193 	PREDICATELOCK *predlock;
4194 	PREDICATELOCK *mypredlock = NULL;
4195 	PREDICATELOCKTAG mypredlocktag;
4196 
4197 	Assert(MySerializableXact != InvalidSerializableXact);
4198 
4199 	/*
4200 	 * The same hash and LW lock apply to the lock target and the lock itself.
4201 	 */
4202 	targettaghash = PredicateLockTargetTagHashCode(targettag);
4203 	partitionLock = PredicateLockHashPartitionLock(targettaghash);
4204 	LWLockAcquire(partitionLock, LW_SHARED);
4205 	target = (PREDICATELOCKTARGET *)
4206 		hash_search_with_hash_value(PredicateLockTargetHash,
4207 									targettag, targettaghash,
4208 									HASH_FIND, NULL);
4209 	if (!target)
4210 	{
4211 		/* Nothing has this target locked; we're done here. */
4212 		LWLockRelease(partitionLock);
4213 		return;
4214 	}
4215 
4216 	/*
4217 	 * Each lock for an overlapping transaction represents a conflict: a
4218 	 * rw-dependency in to this transaction.
4219 	 */
4220 	predlock = (PREDICATELOCK *)
4221 		SHMQueueNext(&(target->predicateLocks),
4222 					 &(target->predicateLocks),
4223 					 offsetof(PREDICATELOCK, targetLink));
4224 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4225 	while (predlock)
4226 	{
4227 		SHM_QUEUE  *predlocktargetlink;
4228 		PREDICATELOCK *nextpredlock;
4229 		SERIALIZABLEXACT *sxact;
4230 
4231 		predlocktargetlink = &(predlock->targetLink);
4232 		nextpredlock = (PREDICATELOCK *)
4233 			SHMQueueNext(&(target->predicateLocks),
4234 						 predlocktargetlink,
4235 						 offsetof(PREDICATELOCK, targetLink));
4236 
4237 		sxact = predlock->tag.myXact;
4238 		if (sxact == MySerializableXact)
4239 		{
4240 			/*
4241 			 * If we're getting a write lock on a tuple, we don't need a
4242 			 * predicate (SIREAD) lock on the same tuple. We can safely remove
4243 			 * our SIREAD lock, but we'll defer doing so until after the loop
4244 			 * because that requires upgrading to an exclusive partition lock.
4245 			 *
4246 			 * We can't use this optimization within a subtransaction because
4247 			 * the subtransaction could roll back, and we would be left
4248 			 * without any lock at the top level.
4249 			 */
4250 			if (!IsSubTransaction()
4251 				&& GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
4252 			{
4253 				mypredlock = predlock;
4254 				mypredlocktag = predlock->tag;
4255 			}
4256 		}
4257 		else if (!SxactIsDoomed(sxact)
4258 				 && (!SxactIsCommitted(sxact)
4259 					 || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4260 											  sxact->finishedBefore))
4261 				 && !RWConflictExists(sxact, MySerializableXact))
4262 		{
4263 			LWLockRelease(SerializableXactHashLock);
4264 			LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4265 
4266 			/*
4267 			 * Re-check after getting exclusive lock because the other
4268 			 * transaction may have flagged a conflict.
4269 			 */
4270 			if (!SxactIsDoomed(sxact)
4271 				&& (!SxactIsCommitted(sxact)
4272 					|| TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4273 											 sxact->finishedBefore))
4274 				&& !RWConflictExists(sxact, MySerializableXact))
4275 			{
4276 				FlagRWConflict(sxact, MySerializableXact);
4277 			}
4278 
4279 			LWLockRelease(SerializableXactHashLock);
4280 			LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4281 		}
4282 
4283 		predlock = nextpredlock;
4284 	}
4285 	LWLockRelease(SerializableXactHashLock);
4286 	LWLockRelease(partitionLock);
4287 
4288 	/*
4289 	 * If we found one of our own SIREAD locks to remove, remove it now.
4290 	 *
4291 	 * At this point our transaction already has an ExclusiveRowLock on the
4292 	 * relation, so we are OK to drop the predicate lock on the tuple, if
4293 	 * found, without fearing that another write against the tuple will occur
4294 	 * before the MVCC information makes it to the buffer.
4295 	 */
4296 	if (mypredlock != NULL)
4297 	{
4298 		uint32		predlockhashcode;
4299 		PREDICATELOCK *rmpredlock;
4300 
4301 		LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4302 		LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4303 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4304 
4305 		/*
4306 		 * Remove the predicate lock from shared memory, if it wasn't removed
4307 		 * while the locks were released.  One way that could happen is from
4308 		 * autovacuum cleaning up an index.
4309 		 */
4310 		predlockhashcode = PredicateLockHashCodeFromTargetHashCode
4311 			(&mypredlocktag, targettaghash);
4312 		rmpredlock = (PREDICATELOCK *)
4313 			hash_search_with_hash_value(PredicateLockHash,
4314 										&mypredlocktag,
4315 										predlockhashcode,
4316 										HASH_FIND, NULL);
4317 		if (rmpredlock != NULL)
4318 		{
4319 			Assert(rmpredlock == mypredlock);
4320 
4321 			SHMQueueDelete(&(mypredlock->targetLink));
4322 			SHMQueueDelete(&(mypredlock->xactLink));
4323 
4324 			rmpredlock = (PREDICATELOCK *)
4325 				hash_search_with_hash_value(PredicateLockHash,
4326 											&mypredlocktag,
4327 											predlockhashcode,
4328 											HASH_REMOVE, NULL);
4329 			Assert(rmpredlock == mypredlock);
4330 
4331 			RemoveTargetIfNoLongerUsed(target, targettaghash);
4332 		}
4333 
4334 		LWLockRelease(SerializableXactHashLock);
4335 		LWLockRelease(partitionLock);
4336 		LWLockRelease(SerializablePredicateLockListLock);
4337 
4338 		if (rmpredlock != NULL)
4339 		{
4340 			/*
4341 			 * Remove entry in local lock table if it exists. It's OK if it
4342 			 * doesn't exist; that means the lock was transferred to a new
4343 			 * target by a different backend.
4344 			 */
4345 			hash_search_with_hash_value(LocalPredicateLockHash,
4346 										targettag, targettaghash,
4347 										HASH_REMOVE, NULL);
4348 
4349 			DecrementParentLocks(targettag);
4350 		}
4351 	}
4352 }
4353 
4354 /*
4355  * CheckForSerializableConflictIn
4356  *		We are writing the given tuple.  If that indicates a rw-conflict
4357  *		in from another serializable transaction, take appropriate action.
4358  *
4359  * Skip checking for any granularity for which a parameter is missing.
4360  *
4361  * A tuple update or delete is in conflict if we have a predicate lock
4362  * against the relation or page in which the tuple exists, or against the
4363  * tuple itself.
4364  */
4365 void
CheckForSerializableConflictIn(Relation relation,HeapTuple tuple,Buffer buffer)4366 CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
4367 							   Buffer buffer)
4368 {
4369 	PREDICATELOCKTARGETTAG targettag;
4370 
4371 	if (!SerializationNeededForWrite(relation))
4372 		return;
4373 
4374 	/* Check if someone else has already decided that we need to die */
4375 	if (SxactIsDoomed(MySerializableXact))
4376 		ereport(ERROR,
4377 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4378 				 errmsg("could not serialize access due to read/write dependencies among transactions"),
4379 				 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict in checking."),
4380 				 errhint("The transaction might succeed if retried.")));
4381 
4382 	/*
4383 	 * We're doing a write which might cause rw-conflicts now or later.
4384 	 * Memorize that fact.
4385 	 */
4386 	MyXactDidWrite = true;
4387 
4388 	/*
4389 	 * It is important that we check for locks from the finest granularity to
4390 	 * the coarsest granularity, so that granularity promotion doesn't cause
4391 	 * us to miss a lock.  The new (coarser) lock will be acquired before the
4392 	 * old (finer) locks are released.
4393 	 *
4394 	 * It is not possible to take and hold a lock across the checks for all
4395 	 * granularities because each target could be in a separate partition.
4396 	 */
4397 	if (tuple != NULL)
4398 	{
4399 		SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
4400 										 relation->rd_node.dbNode,
4401 										 relation->rd_id,
4402 										 ItemPointerGetBlockNumber(&(tuple->t_self)),
4403 										 ItemPointerGetOffsetNumber(&(tuple->t_self)));
4404 		CheckTargetForConflictsIn(&targettag);
4405 	}
4406 
4407 	if (BufferIsValid(buffer))
4408 	{
4409 		SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
4410 										relation->rd_node.dbNode,
4411 										relation->rd_id,
4412 										BufferGetBlockNumber(buffer));
4413 		CheckTargetForConflictsIn(&targettag);
4414 	}
4415 
4416 	SET_PREDICATELOCKTARGETTAG_RELATION(targettag,
4417 										relation->rd_node.dbNode,
4418 										relation->rd_id);
4419 	CheckTargetForConflictsIn(&targettag);
4420 }
4421 
4422 /*
4423  * CheckTableForSerializableConflictIn
4424  *		The entire table is going through a DDL-style logical mass delete
4425  *		like TRUNCATE or DROP TABLE.  If that causes a rw-conflict in from
4426  *		another serializable transaction, take appropriate action.
4427  *
4428  * While these operations do not operate entirely within the bounds of
4429  * snapshot isolation, they can occur inside a serializable transaction, and
4430  * will logically occur after any reads which saw rows which were destroyed
4431  * by these operations, so we do what we can to serialize properly under
4432  * SSI.
4433  *
4434  * The relation passed in must be a heap relation. Any predicate lock of any
4435  * granularity on the heap will cause a rw-conflict in to this transaction.
4436  * Predicate locks on indexes do not matter because they only exist to guard
4437  * against conflicting inserts into the index, and this is a mass *delete*.
4438  * When a table is truncated or dropped, the index will also be truncated
4439  * or dropped, and we'll deal with locks on the index when that happens.
4440  *
4441  * Dropping or truncating a table also needs to drop any existing predicate
4442  * locks on heap tuples or pages, because they're about to go away. This
4443  * should be done before altering the predicate locks because the transaction
4444  * could be rolled back because of a conflict, in which case the lock changes
4445  * are not needed. (At the moment, we don't actually bother to drop the
4446  * existing locks on a dropped or truncated table at the moment. That might
4447  * lead to some false positives, but it doesn't seem worth the trouble.)
4448  */
4449 void
CheckTableForSerializableConflictIn(Relation relation)4450 CheckTableForSerializableConflictIn(Relation relation)
4451 {
4452 	HASH_SEQ_STATUS seqstat;
4453 	PREDICATELOCKTARGET *target;
4454 	Oid			dbId;
4455 	Oid			heapId;
4456 	int			i;
4457 
4458 	/*
4459 	 * Bail out quickly if there are no serializable transactions running.
4460 	 * It's safe to check this without taking locks because the caller is
4461 	 * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
4462 	 * would matter here can be acquired while that is held.
4463 	 */
4464 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
4465 		return;
4466 
4467 	if (!SerializationNeededForWrite(relation))
4468 		return;
4469 
4470 	/*
4471 	 * We're doing a write which might cause rw-conflicts now or later.
4472 	 * Memorize that fact.
4473 	 */
4474 	MyXactDidWrite = true;
4475 
4476 	Assert(relation->rd_index == NULL); /* not an index relation */
4477 
4478 	dbId = relation->rd_node.dbNode;
4479 	heapId = relation->rd_id;
4480 
4481 	LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
4482 	for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
4483 		LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
4484 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4485 
4486 	/* Scan through target list */
4487 	hash_seq_init(&seqstat, PredicateLockTargetHash);
4488 
4489 	while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
4490 	{
4491 		PREDICATELOCK *predlock;
4492 
4493 		/*
4494 		 * Check whether this is a target which needs attention.
4495 		 */
4496 		if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
4497 			continue;			/* wrong relation id */
4498 		if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
4499 			continue;			/* wrong database id */
4500 
4501 		/*
4502 		 * Loop through locks for this target and flag conflicts.
4503 		 */
4504 		predlock = (PREDICATELOCK *)
4505 			SHMQueueNext(&(target->predicateLocks),
4506 						 &(target->predicateLocks),
4507 						 offsetof(PREDICATELOCK, targetLink));
4508 		while (predlock)
4509 		{
4510 			PREDICATELOCK *nextpredlock;
4511 
4512 			nextpredlock = (PREDICATELOCK *)
4513 				SHMQueueNext(&(target->predicateLocks),
4514 							 &(predlock->targetLink),
4515 							 offsetof(PREDICATELOCK, targetLink));
4516 
4517 			if (predlock->tag.myXact != MySerializableXact
4518 				&& !RWConflictExists(predlock->tag.myXact, MySerializableXact))
4519 			{
4520 				FlagRWConflict(predlock->tag.myXact, MySerializableXact);
4521 			}
4522 
4523 			predlock = nextpredlock;
4524 		}
4525 	}
4526 
4527 	/* Release locks in reverse order */
4528 	LWLockRelease(SerializableXactHashLock);
4529 	for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
4530 		LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
4531 	LWLockRelease(SerializablePredicateLockListLock);
4532 }
4533 
4534 
4535 /*
4536  * Flag a rw-dependency between two serializable transactions.
4537  *
4538  * The caller is responsible for ensuring that we have a LW lock on
4539  * the transaction hash table.
4540  */
4541 static void
FlagRWConflict(SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)4542 FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
4543 {
4544 	Assert(reader != writer);
4545 
4546 	/* First, see if this conflict causes failure. */
4547 	OnConflict_CheckForSerializationFailure(reader, writer);
4548 
4549 	/* Actually do the conflict flagging. */
4550 	if (reader == OldCommittedSxact)
4551 		writer->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
4552 	else if (writer == OldCommittedSxact)
4553 		reader->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4554 	else
4555 		SetRWConflict(reader, writer);
4556 }
4557 
4558 /*----------------------------------------------------------------------------
4559  * We are about to add a RW-edge to the dependency graph - check that we don't
4560  * introduce a dangerous structure by doing so, and abort one of the
4561  * transactions if so.
4562  *
4563  * A serialization failure can only occur if there is a dangerous structure
4564  * in the dependency graph:
4565  *
4566  *		Tin ------> Tpivot ------> Tout
4567  *			  rw			 rw
4568  *
4569  * Furthermore, Tout must commit first.
4570  *
4571  * One more optimization is that if Tin is declared READ ONLY (or commits
4572  * without writing), we can only have a problem if Tout committed before Tin
4573  * acquired its snapshot.
4574  *----------------------------------------------------------------------------
4575  */
4576 static void
OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)4577 OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
4578 										SERIALIZABLEXACT *writer)
4579 {
4580 	bool		failure;
4581 	RWConflict	conflict;
4582 
4583 	Assert(LWLockHeldByMe(SerializableXactHashLock));
4584 
4585 	failure = false;
4586 
4587 	/*------------------------------------------------------------------------
4588 	 * Check for already-committed writer with rw-conflict out flagged
4589 	 * (conflict-flag on W means that T2 committed before W):
4590 	 *
4591 	 *		R ------> W ------> T2
4592 	 *			rw		  rw
4593 	 *
4594 	 * That is a dangerous structure, so we must abort. (Since the writer
4595 	 * has already committed, we must be the reader)
4596 	 *------------------------------------------------------------------------
4597 	 */
4598 	if (SxactIsCommitted(writer)
4599 		&& (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
4600 		failure = true;
4601 
4602 	/*------------------------------------------------------------------------
4603 	 * Check whether the writer has become a pivot with an out-conflict
4604 	 * committed transaction (T2), and T2 committed first:
4605 	 *
4606 	 *		R ------> W ------> T2
4607 	 *			rw		  rw
4608 	 *
4609 	 * Because T2 must've committed first, there is no anomaly if:
4610 	 * - the reader committed before T2
4611 	 * - the writer committed before T2
4612 	 * - the reader is a READ ONLY transaction and the reader was concurrent
4613 	 *	 with T2 (= reader acquired its snapshot before T2 committed)
4614 	 *
4615 	 * We also handle the case that T2 is prepared but not yet committed
4616 	 * here. In that case T2 has already checked for conflicts, so if it
4617 	 * commits first, making the above conflict real, it's too late for it
4618 	 * to abort.
4619 	 *------------------------------------------------------------------------
4620 	 */
4621 	if (!failure)
4622 	{
4623 		if (SxactHasSummaryConflictOut(writer))
4624 		{
4625 			failure = true;
4626 			conflict = NULL;
4627 		}
4628 		else
4629 			conflict = (RWConflict)
4630 				SHMQueueNext(&writer->outConflicts,
4631 							 &writer->outConflicts,
4632 							 offsetof(RWConflictData, outLink));
4633 		while (conflict)
4634 		{
4635 			SERIALIZABLEXACT *t2 = conflict->sxactIn;
4636 
4637 			if (SxactIsPrepared(t2)
4638 				&& (!SxactIsCommitted(reader)
4639 					|| t2->prepareSeqNo <= reader->commitSeqNo)
4640 				&& (!SxactIsCommitted(writer)
4641 					|| t2->prepareSeqNo <= writer->commitSeqNo)
4642 				&& (!SxactIsReadOnly(reader)
4643 					|| t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
4644 			{
4645 				failure = true;
4646 				break;
4647 			}
4648 			conflict = (RWConflict)
4649 				SHMQueueNext(&writer->outConflicts,
4650 							 &conflict->outLink,
4651 							 offsetof(RWConflictData, outLink));
4652 		}
4653 	}
4654 
4655 	/*------------------------------------------------------------------------
4656 	 * Check whether the reader has become a pivot with a writer
4657 	 * that's committed (or prepared):
4658 	 *
4659 	 *		T0 ------> R ------> W
4660 	 *			 rw		   rw
4661 	 *
4662 	 * Because W must've committed first for an anomaly to occur, there is no
4663 	 * anomaly if:
4664 	 * - T0 committed before the writer
4665 	 * - T0 is READ ONLY, and overlaps the writer
4666 	 *------------------------------------------------------------------------
4667 	 */
4668 	if (!failure && SxactIsPrepared(writer) && !SxactIsReadOnly(reader))
4669 	{
4670 		if (SxactHasSummaryConflictIn(reader))
4671 		{
4672 			failure = true;
4673 			conflict = NULL;
4674 		}
4675 		else
4676 			conflict = (RWConflict)
4677 				SHMQueueNext(&reader->inConflicts,
4678 							 &reader->inConflicts,
4679 							 offsetof(RWConflictData, inLink));
4680 		while (conflict)
4681 		{
4682 			SERIALIZABLEXACT *t0 = conflict->sxactOut;
4683 
4684 			if (!SxactIsDoomed(t0)
4685 				&& (!SxactIsCommitted(t0)
4686 					|| t0->commitSeqNo >= writer->prepareSeqNo)
4687 				&& (!SxactIsReadOnly(t0)
4688 					|| t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
4689 			{
4690 				failure = true;
4691 				break;
4692 			}
4693 			conflict = (RWConflict)
4694 				SHMQueueNext(&reader->inConflicts,
4695 							 &conflict->inLink,
4696 							 offsetof(RWConflictData, inLink));
4697 		}
4698 	}
4699 
4700 	if (failure)
4701 	{
4702 		/*
4703 		 * We have to kill a transaction to avoid a possible anomaly from
4704 		 * occurring. If the writer is us, we can just ereport() to cause a
4705 		 * transaction abort. Otherwise we flag the writer for termination,
4706 		 * causing it to abort when it tries to commit. However, if the writer
4707 		 * is a prepared transaction, already prepared, we can't abort it
4708 		 * anymore, so we have to kill the reader instead.
4709 		 */
4710 		if (MySerializableXact == writer)
4711 		{
4712 			LWLockRelease(SerializableXactHashLock);
4713 			ereport(ERROR,
4714 					(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4715 					 errmsg("could not serialize access due to read/write dependencies among transactions"),
4716 					 errdetail_internal("Reason code: Canceled on identification as a pivot, during write."),
4717 					 errhint("The transaction might succeed if retried.")));
4718 		}
4719 		else if (SxactIsPrepared(writer))
4720 		{
4721 			LWLockRelease(SerializableXactHashLock);
4722 
4723 			/* if we're not the writer, we have to be the reader */
4724 			Assert(MySerializableXact == reader);
4725 			ereport(ERROR,
4726 					(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4727 					 errmsg("could not serialize access due to read/write dependencies among transactions"),
4728 					 errdetail_internal("Reason code: Canceled on conflict out to pivot %u, during read.", writer->topXid),
4729 					 errhint("The transaction might succeed if retried.")));
4730 		}
4731 		writer->flags |= SXACT_FLAG_DOOMED;
4732 	}
4733 }
4734 
4735 /*
4736  * PreCommit_CheckForSerializableConflicts
4737  *		Check for dangerous structures in a serializable transaction
4738  *		at commit.
4739  *
4740  * We're checking for a dangerous structure as each conflict is recorded.
4741  * The only way we could have a problem at commit is if this is the "out"
4742  * side of a pivot, and neither the "in" side nor the pivot has yet
4743  * committed.
4744  *
4745  * If a dangerous structure is found, the pivot (the near conflict) is
4746  * marked for death, because rolling back another transaction might mean
4747  * that we flail without ever making progress.  This transaction is
4748  * committing writes, so letting it commit ensures progress.  If we
4749  * canceled the far conflict, it might immediately fail again on retry.
4750  */
4751 void
PreCommit_CheckForSerializationFailure(void)4752 PreCommit_CheckForSerializationFailure(void)
4753 {
4754 	RWConflict	nearConflict;
4755 
4756 	if (MySerializableXact == InvalidSerializableXact)
4757 		return;
4758 
4759 	Assert(IsolationIsSerializable());
4760 
4761 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4762 
4763 	/* Check if someone else has already decided that we need to die */
4764 	if (SxactIsDoomed(MySerializableXact))
4765 	{
4766 		LWLockRelease(SerializableXactHashLock);
4767 		ereport(ERROR,
4768 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4769 				 errmsg("could not serialize access due to read/write dependencies among transactions"),
4770 				 errdetail_internal("Reason code: Canceled on identification as a pivot, during commit attempt."),
4771 				 errhint("The transaction might succeed if retried.")));
4772 	}
4773 
4774 	nearConflict = (RWConflict)
4775 		SHMQueueNext(&MySerializableXact->inConflicts,
4776 					 &MySerializableXact->inConflicts,
4777 					 offsetof(RWConflictData, inLink));
4778 	while (nearConflict)
4779 	{
4780 		if (!SxactIsCommitted(nearConflict->sxactOut)
4781 			&& !SxactIsDoomed(nearConflict->sxactOut))
4782 		{
4783 			RWConflict	farConflict;
4784 
4785 			farConflict = (RWConflict)
4786 				SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4787 							 &nearConflict->sxactOut->inConflicts,
4788 							 offsetof(RWConflictData, inLink));
4789 			while (farConflict)
4790 			{
4791 				if (farConflict->sxactOut == MySerializableXact
4792 					|| (!SxactIsCommitted(farConflict->sxactOut)
4793 						&& !SxactIsReadOnly(farConflict->sxactOut)
4794 						&& !SxactIsDoomed(farConflict->sxactOut)))
4795 				{
4796 					/*
4797 					 * Normally, we kill the pivot transaction to make sure we
4798 					 * make progress if the failing transaction is retried.
4799 					 * However, we can't kill it if it's already prepared, so
4800 					 * in that case we commit suicide instead.
4801 					 */
4802 					if (SxactIsPrepared(nearConflict->sxactOut))
4803 					{
4804 						LWLockRelease(SerializableXactHashLock);
4805 						ereport(ERROR,
4806 								(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4807 								 errmsg("could not serialize access due to read/write dependencies among transactions"),
4808 								 errdetail_internal("Reason code: Canceled on commit attempt with conflict in from prepared pivot."),
4809 								 errhint("The transaction might succeed if retried.")));
4810 					}
4811 					nearConflict->sxactOut->flags |= SXACT_FLAG_DOOMED;
4812 					break;
4813 				}
4814 				farConflict = (RWConflict)
4815 					SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4816 								 &farConflict->inLink,
4817 								 offsetof(RWConflictData, inLink));
4818 			}
4819 		}
4820 
4821 		nearConflict = (RWConflict)
4822 			SHMQueueNext(&MySerializableXact->inConflicts,
4823 						 &nearConflict->inLink,
4824 						 offsetof(RWConflictData, inLink));
4825 	}
4826 
4827 	MySerializableXact->prepareSeqNo = ++(PredXact->LastSxactCommitSeqNo);
4828 	MySerializableXact->flags |= SXACT_FLAG_PREPARED;
4829 
4830 	LWLockRelease(SerializableXactHashLock);
4831 }
4832 
4833 /*------------------------------------------------------------------------*/
4834 
4835 /*
4836  * Two-phase commit support
4837  */
4838 
4839 /*
4840  * AtPrepare_Locks
4841  *		Do the preparatory work for a PREPARE: make 2PC state file
4842  *		records for all predicate locks currently held.
4843  */
4844 void
AtPrepare_PredicateLocks(void)4845 AtPrepare_PredicateLocks(void)
4846 {
4847 	PREDICATELOCK *predlock;
4848 	SERIALIZABLEXACT *sxact;
4849 	TwoPhasePredicateRecord record;
4850 	TwoPhasePredicateXactRecord *xactRecord;
4851 	TwoPhasePredicateLockRecord *lockRecord;
4852 
4853 	sxact = MySerializableXact;
4854 	xactRecord = &(record.data.xactRecord);
4855 	lockRecord = &(record.data.lockRecord);
4856 
4857 	if (MySerializableXact == InvalidSerializableXact)
4858 		return;
4859 
4860 	/* Generate an xact record for our SERIALIZABLEXACT */
4861 	record.type = TWOPHASEPREDICATERECORD_XACT;
4862 	xactRecord->xmin = MySerializableXact->xmin;
4863 	xactRecord->flags = MySerializableXact->flags;
4864 
4865 	/*
4866 	 * Note that we don't include the list of conflicts in our out in the
4867 	 * statefile, because new conflicts can be added even after the
4868 	 * transaction prepares. We'll just make a conservative assumption during
4869 	 * recovery instead.
4870 	 */
4871 
4872 	RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
4873 						   &record, sizeof(record));
4874 
4875 	/*
4876 	 * Generate a lock record for each lock.
4877 	 *
4878 	 * To do this, we need to walk the predicate lock list in our sxact rather
4879 	 * than using the local predicate lock table because the latter is not
4880 	 * guaranteed to be accurate.
4881 	 */
4882 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4883 
4884 	predlock = (PREDICATELOCK *)
4885 		SHMQueueNext(&(sxact->predicateLocks),
4886 					 &(sxact->predicateLocks),
4887 					 offsetof(PREDICATELOCK, xactLink));
4888 
4889 	while (predlock != NULL)
4890 	{
4891 		record.type = TWOPHASEPREDICATERECORD_LOCK;
4892 		lockRecord->target = predlock->tag.myTarget->tag;
4893 
4894 		RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
4895 							   &record, sizeof(record));
4896 
4897 		predlock = (PREDICATELOCK *)
4898 			SHMQueueNext(&(sxact->predicateLocks),
4899 						 &(predlock->xactLink),
4900 						 offsetof(PREDICATELOCK, xactLink));
4901 	}
4902 
4903 	LWLockRelease(SerializablePredicateLockListLock);
4904 }
4905 
4906 /*
4907  * PostPrepare_Locks
4908  *		Clean up after successful PREPARE. Unlike the non-predicate
4909  *		lock manager, we do not need to transfer locks to a dummy
4910  *		PGPROC because our SERIALIZABLEXACT will stay around
4911  *		anyway. We only need to clean up our local state.
4912  */
4913 void
PostPrepare_PredicateLocks(TransactionId xid)4914 PostPrepare_PredicateLocks(TransactionId xid)
4915 {
4916 	if (MySerializableXact == InvalidSerializableXact)
4917 		return;
4918 
4919 	Assert(SxactIsPrepared(MySerializableXact));
4920 
4921 	MySerializableXact->pid = 0;
4922 
4923 	hash_destroy(LocalPredicateLockHash);
4924 	LocalPredicateLockHash = NULL;
4925 
4926 	MySerializableXact = InvalidSerializableXact;
4927 	MyXactDidWrite = false;
4928 }
4929 
4930 /*
4931  * PredicateLockTwoPhaseFinish
4932  *		Release a prepared transaction's predicate locks once it
4933  *		commits or aborts.
4934  */
4935 void
PredicateLockTwoPhaseFinish(TransactionId xid,bool isCommit)4936 PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
4937 {
4938 	SERIALIZABLEXID *sxid;
4939 	SERIALIZABLEXIDTAG sxidtag;
4940 
4941 	sxidtag.xid = xid;
4942 
4943 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4944 	sxid = (SERIALIZABLEXID *)
4945 		hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4946 	LWLockRelease(SerializableXactHashLock);
4947 
4948 	/* xid will not be found if it wasn't a serializable transaction */
4949 	if (sxid == NULL)
4950 		return;
4951 
4952 	/* Release its locks */
4953 	MySerializableXact = sxid->myXact;
4954 	MyXactDidWrite = true;		/* conservatively assume that we wrote
4955 								 * something */
4956 	ReleasePredicateLocks(isCommit);
4957 }
4958 
4959 /*
4960  * Re-acquire a predicate lock belonging to a transaction that was prepared.
4961  */
4962 void
predicatelock_twophase_recover(TransactionId xid,uint16 info,void * recdata,uint32 len)4963 predicatelock_twophase_recover(TransactionId xid, uint16 info,
4964 							   void *recdata, uint32 len)
4965 {
4966 	TwoPhasePredicateRecord *record;
4967 
4968 	Assert(len == sizeof(TwoPhasePredicateRecord));
4969 
4970 	record = (TwoPhasePredicateRecord *) recdata;
4971 
4972 	Assert((record->type == TWOPHASEPREDICATERECORD_XACT) ||
4973 		   (record->type == TWOPHASEPREDICATERECORD_LOCK));
4974 
4975 	if (record->type == TWOPHASEPREDICATERECORD_XACT)
4976 	{
4977 		/* Per-transaction record. Set up a SERIALIZABLEXACT. */
4978 		TwoPhasePredicateXactRecord *xactRecord;
4979 		SERIALIZABLEXACT *sxact;
4980 		SERIALIZABLEXID *sxid;
4981 		SERIALIZABLEXIDTAG sxidtag;
4982 		bool		found;
4983 
4984 		xactRecord = (TwoPhasePredicateXactRecord *) &record->data.xactRecord;
4985 
4986 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4987 		sxact = CreatePredXact();
4988 		if (!sxact)
4989 			ereport(ERROR,
4990 					(errcode(ERRCODE_OUT_OF_MEMORY),
4991 					 errmsg("out of shared memory")));
4992 
4993 		/* vxid for a prepared xact is InvalidBackendId/xid; no pid */
4994 		sxact->vxid.backendId = InvalidBackendId;
4995 		sxact->vxid.localTransactionId = (LocalTransactionId) xid;
4996 		sxact->pid = 0;
4997 
4998 		/* a prepared xact hasn't committed yet */
4999 		sxact->prepareSeqNo = RecoverySerCommitSeqNo;
5000 		sxact->commitSeqNo = InvalidSerCommitSeqNo;
5001 		sxact->finishedBefore = InvalidTransactionId;
5002 
5003 		sxact->SeqNo.lastCommitBeforeSnapshot = RecoverySerCommitSeqNo;
5004 
5005 		/*
5006 		 * Don't need to track this; no transactions running at the time the
5007 		 * recovered xact started are still active, except possibly other
5008 		 * prepared xacts and we don't care whether those are RO_SAFE or not.
5009 		 */
5010 		SHMQueueInit(&(sxact->possibleUnsafeConflicts));
5011 
5012 		SHMQueueInit(&(sxact->predicateLocks));
5013 		SHMQueueElemInit(&(sxact->finishedLink));
5014 
5015 		sxact->topXid = xid;
5016 		sxact->xmin = xactRecord->xmin;
5017 		sxact->flags = xactRecord->flags;
5018 		Assert(SxactIsPrepared(sxact));
5019 		if (!SxactIsReadOnly(sxact))
5020 		{
5021 			++(PredXact->WritableSxactCount);
5022 			Assert(PredXact->WritableSxactCount <=
5023 				   (MaxBackends + max_prepared_xacts));
5024 		}
5025 
5026 		/*
5027 		 * We don't know whether the transaction had any conflicts or not, so
5028 		 * we'll conservatively assume that it had both a conflict in and a
5029 		 * conflict out, and represent that with the summary conflict flags.
5030 		 */
5031 		SHMQueueInit(&(sxact->outConflicts));
5032 		SHMQueueInit(&(sxact->inConflicts));
5033 		sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
5034 		sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
5035 
5036 		/* Register the transaction's xid */
5037 		sxidtag.xid = xid;
5038 		sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
5039 											   &sxidtag,
5040 											   HASH_ENTER, &found);
5041 		Assert(sxid != NULL);
5042 		Assert(!found);
5043 		sxid->myXact = (SERIALIZABLEXACT *) sxact;
5044 
5045 		/*
5046 		 * Update global xmin. Note that this is a special case compared to
5047 		 * registering a normal transaction, because the global xmin might go
5048 		 * backwards. That's OK, because until recovery is over we're not
5049 		 * going to complete any transactions or create any non-prepared
5050 		 * transactions, so there's no danger of throwing away.
5051 		 */
5052 		if ((!TransactionIdIsValid(PredXact->SxactGlobalXmin)) ||
5053 			(TransactionIdFollows(PredXact->SxactGlobalXmin, sxact->xmin)))
5054 		{
5055 			PredXact->SxactGlobalXmin = sxact->xmin;
5056 			PredXact->SxactGlobalXminCount = 1;
5057 			OldSerXidSetActiveSerXmin(sxact->xmin);
5058 		}
5059 		else if (TransactionIdEquals(sxact->xmin, PredXact->SxactGlobalXmin))
5060 		{
5061 			Assert(PredXact->SxactGlobalXminCount > 0);
5062 			PredXact->SxactGlobalXminCount++;
5063 		}
5064 
5065 		LWLockRelease(SerializableXactHashLock);
5066 	}
5067 	else if (record->type == TWOPHASEPREDICATERECORD_LOCK)
5068 	{
5069 		/* Lock record. Recreate the PREDICATELOCK */
5070 		TwoPhasePredicateLockRecord *lockRecord;
5071 		SERIALIZABLEXID *sxid;
5072 		SERIALIZABLEXACT *sxact;
5073 		SERIALIZABLEXIDTAG sxidtag;
5074 		uint32		targettaghash;
5075 
5076 		lockRecord = (TwoPhasePredicateLockRecord *) &record->data.lockRecord;
5077 		targettaghash = PredicateLockTargetTagHashCode(&lockRecord->target);
5078 
5079 		LWLockAcquire(SerializableXactHashLock, LW_SHARED);
5080 		sxidtag.xid = xid;
5081 		sxid = (SERIALIZABLEXID *)
5082 			hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
5083 		LWLockRelease(SerializableXactHashLock);
5084 
5085 		Assert(sxid != NULL);
5086 		sxact = sxid->myXact;
5087 		Assert(sxact != InvalidSerializableXact);
5088 
5089 		CreatePredicateLock(&lockRecord->target, targettaghash, sxact);
5090 	}
5091 }
5092