1 /*-------------------------------------------------------------------------
2  *
3  * predicate.c
4  *	  POSTGRES predicate locking
5  *	  to support full serializable transaction isolation
6  *
7  *
8  * The approach taken is to implement Serializable Snapshot Isolation (SSI)
9  * as initially described in this paper:
10  *
11  *	Michael J. Cahill, Uwe Röhm, and Alan D. Fekete. 2008.
12  *	Serializable isolation for snapshot databases.
13  *	In SIGMOD '08: Proceedings of the 2008 ACM SIGMOD
14  *	international conference on Management of data,
15  *	pages 729-738, New York, NY, USA. ACM.
16  *	http://doi.acm.org/10.1145/1376616.1376690
17  *
18  * and further elaborated in Cahill's doctoral thesis:
19  *
20  *	Michael James Cahill. 2009.
21  *	Serializable Isolation for Snapshot Databases.
22  *	Sydney Digital Theses.
23  *	University of Sydney, School of Information Technologies.
24  *	http://hdl.handle.net/2123/5353
25  *
26  *
27  * Predicate locks for Serializable Snapshot Isolation (SSI) are SIREAD
28  * locks, which are so different from normal locks that a distinct set of
29  * structures is required to handle them.  They are needed to detect
30  * rw-conflicts when the read happens before the write.  (When the write
31  * occurs first, the reading transaction can check for a conflict by
32  * examining the MVCC data.)
33  *
34  * (1)	Besides tuples actually read, they must cover ranges of tuples
35  *		which would have been read based on the predicate.  This will
36  *		require modelling the predicates through locks against database
37  *		objects such as pages, index ranges, or entire tables.
38  *
39  * (2)	They must be kept in RAM for quick access.  Because of this, it
40  *		isn't possible to always maintain tuple-level granularity -- when
41  *		the space allocated to store these approaches exhaustion, a
42  *		request for a lock may need to scan for situations where a single
43  *		transaction holds many fine-grained locks which can be coalesced
44  *		into a single coarser-grained lock.
45  *
46  * (3)	They never block anything; they are more like flags than locks
47  *		in that regard; although they refer to database objects and are
48  *		used to identify rw-conflicts with normal write locks.
49  *
50  * (4)	While they are associated with a transaction, they must survive
51  *		a successful COMMIT of that transaction, and remain until all
52  *		overlapping transactions complete.  This even means that they
53  *		must survive termination of the transaction's process.  If a
54  *		top level transaction is rolled back, however, it is immediately
55  *		flagged so that it can be ignored, and its SIREAD locks can be
56  *		released any time after that.
57  *
58  * (5)	The only transactions which create SIREAD locks or check for
59  *		conflicts with them are serializable transactions.
60  *
61  * (6)	When a write lock for a top level transaction is found to cover
62  *		an existing SIREAD lock for the same transaction, the SIREAD lock
63  *		can be deleted.
64  *
65  * (7)	A write from a serializable transaction must ensure that an xact
66  *		record exists for the transaction, with the same lifespan (until
67  *		all concurrent transaction complete or the transaction is rolled
68  *		back) so that rw-dependencies to that transaction can be
69  *		detected.
70  *
71  * We use an optimization for read-only transactions. Under certain
72  * circumstances, a read-only transaction's snapshot can be shown to
73  * never have conflicts with other transactions.  This is referred to
74  * as a "safe" snapshot (and one known not to be is "unsafe").
75  * However, it can't be determined whether a snapshot is safe until
76  * all concurrent read/write transactions complete.
77  *
78  * Once a read-only transaction is known to have a safe snapshot, it
79  * can release its predicate locks and exempt itself from further
80  * predicate lock tracking. READ ONLY DEFERRABLE transactions run only
81  * on safe snapshots, waiting as necessary for one to be available.
82  *
83  *
84  * Lightweight locks to manage access to the predicate locking shared
85  * memory objects must be taken in this order, and should be released in
86  * reverse order:
87  *
88  *	SerializableFinishedListLock
89  *		- Protects the list of transactions which have completed but which
90  *			may yet matter because they overlap still-active transactions.
91  *
92  *	SerializablePredicateLockListLock
93  *		- Protects the linked list of locks held by a transaction.  Note
94  *			that the locks themselves are also covered by the partition
95  *			locks of their respective lock targets; this lock only affects
96  *			the linked list connecting the locks related to a transaction.
97  *		- All transactions share this single lock (with no partitioning).
98  *		- There is never a need for a process other than the one running
99  *			an active transaction to walk the list of locks held by that
100  *			transaction.
101  *		- It is relatively infrequent that another process needs to
102  *			modify the list for a transaction, but it does happen for such
103  *			things as index page splits for pages with predicate locks and
104  *			freeing of predicate locked pages by a vacuum process.  When
105  *			removing a lock in such cases, the lock itself contains the
106  *			pointers needed to remove it from the list.  When adding a
107  *			lock in such cases, the lock can be added using the anchor in
108  *			the transaction structure.  Neither requires walking the list.
109  *		- Cleaning up the list for a terminated transaction is sometimes
110  *			not done on a retail basis, in which case no lock is required.
111  *		- Due to the above, a process accessing its active transaction's
112  *			list always uses a shared lock, regardless of whether it is
113  *			walking or maintaining the list.  This improves concurrency
114  *			for the common access patterns.
115  *		- A process which needs to alter the list of a transaction other
116  *			than its own active transaction must acquire an exclusive
117  *			lock.
118  *
119  *	PredicateLockHashPartitionLock(hashcode)
120  *		- The same lock protects a target, all locks on that target, and
121  *			the linked list of locks on the target.
122  *		- When more than one is needed, acquire in ascending address order.
123  *		- When all are needed (rare), acquire in ascending index order with
124  *			PredicateLockHashPartitionLockByIndex(index).
125  *
126  *	SerializableXactHashLock
127  *		- Protects both PredXact and SerializableXidHash.
128  *
129  *
130  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
131  * Portions Copyright (c) 1994, Regents of the University of California
132  *
133  *
134  * IDENTIFICATION
135  *	  src/backend/storage/lmgr/predicate.c
136  *
137  *-------------------------------------------------------------------------
138  */
139 /*
140  * INTERFACE ROUTINES
141  *
142  * housekeeping for setting up shared memory predicate lock structures
143  *		InitPredicateLocks(void)
144  *		PredicateLockShmemSize(void)
145  *
146  * predicate lock reporting
147  *		GetPredicateLockStatusData(void)
148  *		PageIsPredicateLocked(Relation relation, BlockNumber blkno)
149  *
150  * predicate lock maintenance
151  *		GetSerializableTransactionSnapshot(Snapshot snapshot)
152  *		SetSerializableTransactionSnapshot(Snapshot snapshot,
153  *										   VirtualTransactionId *sourcevxid)
154  *		RegisterPredicateLockingXid(void)
155  *		PredicateLockRelation(Relation relation, Snapshot snapshot)
156  *		PredicateLockPage(Relation relation, BlockNumber blkno,
157  *						Snapshot snapshot)
158  *		PredicateLockTuple(Relation relation, HeapTuple tuple,
159  *						Snapshot snapshot)
160  *		PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
161  *							   BlockNumber newblkno)
162  *		PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
163  *								 BlockNumber newblkno)
164  *		TransferPredicateLocksToHeapRelation(Relation relation)
165  *		ReleasePredicateLocks(bool isCommit)
166  *
167  * conflict detection (may also trigger rollback)
168  *		CheckForSerializableConflictOut(bool visible, Relation relation,
169  *										HeapTupleData *tup, Buffer buffer,
170  *										Snapshot snapshot)
171  *		CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
172  *									   Buffer buffer)
173  *		CheckTableForSerializableConflictIn(Relation relation)
174  *
175  * final rollback checking
176  *		PreCommit_CheckForSerializationFailure(void)
177  *
178  * two-phase commit support
179  *		AtPrepare_PredicateLocks(void);
180  *		PostPrepare_PredicateLocks(TransactionId xid);
181  *		PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
182  *		predicatelock_twophase_recover(TransactionId xid, uint16 info,
183  *									   void *recdata, uint32 len);
184  */
185 
186 #include "postgres.h"
187 
188 #include "access/htup_details.h"
189 #include "access/slru.h"
190 #include "access/subtrans.h"
191 #include "access/transam.h"
192 #include "access/twophase.h"
193 #include "access/twophase_rmgr.h"
194 #include "access/xact.h"
195 #include "access/xlog.h"
196 #include "miscadmin.h"
197 #include "pgstat.h"
198 #include "storage/bufmgr.h"
199 #include "storage/predicate.h"
200 #include "storage/predicate_internals.h"
201 #include "storage/proc.h"
202 #include "storage/procarray.h"
203 #include "utils/rel.h"
204 #include "utils/snapmgr.h"
205 #include "utils/tqual.h"
206 
207 /* Uncomment the next line to test the graceful degradation code. */
208 /* #define TEST_OLDSERXID */
209 
210 /*
211  * Test the most selective fields first, for performance.
212  *
213  * a is covered by b if all of the following hold:
214  *	1) a.database = b.database
215  *	2) a.relation = b.relation
216  *	3) b.offset is invalid (b is page-granularity or higher)
217  *	4) either of the following:
218  *		4a) a.offset is valid (a is tuple-granularity) and a.page = b.page
219  *	 or 4b) a.offset is invalid and b.page is invalid (a is
220  *			page-granularity and b is relation-granularity
221  */
222 #define TargetTagIsCoveredBy(covered_target, covering_target)			\
223 	((GET_PREDICATELOCKTARGETTAG_RELATION(covered_target) == /* (2) */	\
224 	  GET_PREDICATELOCKTARGETTAG_RELATION(covering_target))				\
225 	 && (GET_PREDICATELOCKTARGETTAG_OFFSET(covering_target) ==			\
226 		 InvalidOffsetNumber)								 /* (3) */	\
227 	 && (((GET_PREDICATELOCKTARGETTAG_OFFSET(covered_target) !=			\
228 		   InvalidOffsetNumber)								 /* (4a) */ \
229 		  && (GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) ==		\
230 			  GET_PREDICATELOCKTARGETTAG_PAGE(covered_target)))			\
231 		 || ((GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) ==		\
232 			  InvalidBlockNumber)							 /* (4b) */ \
233 			 && (GET_PREDICATELOCKTARGETTAG_PAGE(covered_target)		\
234 				 != InvalidBlockNumber)))								\
235 	 && (GET_PREDICATELOCKTARGETTAG_DB(covered_target) ==	 /* (1) */	\
236 		 GET_PREDICATELOCKTARGETTAG_DB(covering_target)))
237 
238 /*
239  * The predicate locking target and lock shared hash tables are partitioned to
240  * reduce contention.  To determine which partition a given target belongs to,
241  * compute the tag's hash code with PredicateLockTargetTagHashCode(), then
242  * apply one of these macros.
243  * NB: NUM_PREDICATELOCK_PARTITIONS must be a power of 2!
244  */
245 #define PredicateLockHashPartition(hashcode) \
246 	((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
247 #define PredicateLockHashPartitionLock(hashcode) \
248 	(&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + \
249 		PredicateLockHashPartition(hashcode)].lock)
250 #define PredicateLockHashPartitionLockByIndex(i) \
251 	(&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
252 
253 #define NPREDICATELOCKTARGETENTS() \
254 	mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
255 
256 #define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
257 
258 /*
259  * Note that a sxact is marked "prepared" once it has passed
260  * PreCommit_CheckForSerializationFailure, even if it isn't using
261  * 2PC. This is the point at which it can no longer be aborted.
262  *
263  * The PREPARED flag remains set after commit, so SxactIsCommitted
264  * implies SxactIsPrepared.
265  */
266 #define SxactIsCommitted(sxact) (((sxact)->flags & SXACT_FLAG_COMMITTED) != 0)
267 #define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
268 #define SxactIsRolledBack(sxact) (((sxact)->flags & SXACT_FLAG_ROLLED_BACK) != 0)
269 #define SxactIsDoomed(sxact) (((sxact)->flags & SXACT_FLAG_DOOMED) != 0)
270 #define SxactIsReadOnly(sxact) (((sxact)->flags & SXACT_FLAG_READ_ONLY) != 0)
271 #define SxactHasSummaryConflictIn(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_IN) != 0)
272 #define SxactHasSummaryConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_OUT) != 0)
273 /*
274  * The following macro actually means that the specified transaction has a
275  * conflict out *to a transaction which committed ahead of it*.  It's hard
276  * to get that into a name of a reasonable length.
277  */
278 #define SxactHasConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_CONFLICT_OUT) != 0)
279 #define SxactIsDeferrableWaiting(sxact) (((sxact)->flags & SXACT_FLAG_DEFERRABLE_WAITING) != 0)
280 #define SxactIsROSafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_SAFE) != 0)
281 #define SxactIsROUnsafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_UNSAFE) != 0)
282 
283 /*
284  * Compute the hash code associated with a PREDICATELOCKTARGETTAG.
285  *
286  * To avoid unnecessary recomputations of the hash code, we try to do this
287  * just once per function, and then pass it around as needed.  Aside from
288  * passing the hashcode to hash_search_with_hash_value(), we can extract
289  * the lock partition number from the hashcode.
290  */
291 #define PredicateLockTargetTagHashCode(predicatelocktargettag) \
292 	get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
293 
294 /*
295  * Given a predicate lock tag, and the hash for its target,
296  * compute the lock hash.
297  *
298  * To make the hash code also depend on the transaction, we xor the sxid
299  * struct's address into the hash code, left-shifted so that the
300  * partition-number bits don't change.  Since this is only a hash, we
301  * don't care if we lose high-order bits of the address; use an
302  * intermediate variable to suppress cast-pointer-to-int warnings.
303  */
304 #define PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash) \
305 	((targethash) ^ ((uint32) PointerGetDatum((predicatelocktag)->myXact)) \
306 	 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
307 
308 
309 /*
310  * The SLRU buffer area through which we access the old xids.
311  */
312 static SlruCtlData OldSerXidSlruCtlData;
313 
314 #define OldSerXidSlruCtl			(&OldSerXidSlruCtlData)
315 
316 #define OLDSERXID_PAGESIZE			BLCKSZ
317 #define OLDSERXID_ENTRYSIZE			sizeof(SerCommitSeqNo)
318 #define OLDSERXID_ENTRIESPERPAGE	(OLDSERXID_PAGESIZE / OLDSERXID_ENTRYSIZE)
319 
320 /*
321  * Set maximum pages based on the lesser of the number needed to track all
322  * transactions and the maximum that SLRU supports.
323  */
324 #define OLDSERXID_MAX_PAGE			Min(SLRU_PAGES_PER_SEGMENT * 0x10000 - 1, \
325 										(MaxTransactionId) / OLDSERXID_ENTRIESPERPAGE)
326 
327 #define OldSerXidNextPage(page) (((page) >= OLDSERXID_MAX_PAGE) ? 0 : (page) + 1)
328 
329 #define OldSerXidValue(slotno, xid) (*((SerCommitSeqNo *) \
330 	(OldSerXidSlruCtl->shared->page_buffer[slotno] + \
331 	((((uint32) (xid)) % OLDSERXID_ENTRIESPERPAGE) * OLDSERXID_ENTRYSIZE))))
332 
333 #define OldSerXidPage(xid)	((((uint32) (xid)) / OLDSERXID_ENTRIESPERPAGE) % (OLDSERXID_MAX_PAGE + 1))
334 #define OldSerXidSegment(page)	((page) / SLRU_PAGES_PER_SEGMENT)
335 
336 typedef struct OldSerXidControlData
337 {
338 	int			headPage;		/* newest initialized page */
339 	TransactionId headXid;		/* newest valid Xid in the SLRU */
340 	TransactionId tailXid;		/* oldest xmin we might be interested in */
341 	bool		warningIssued;	/* have we issued SLRU wrap-around warning? */
342 }			OldSerXidControlData;
343 
344 typedef struct OldSerXidControlData *OldSerXidControl;
345 
346 static OldSerXidControl oldSerXidControl;
347 
348 /*
349  * When the oldest committed transaction on the "finished" list is moved to
350  * SLRU, its predicate locks will be moved to this "dummy" transaction,
351  * collapsing duplicate targets.  When a duplicate is found, the later
352  * commitSeqNo is used.
353  */
354 static SERIALIZABLEXACT *OldCommittedSxact;
355 
356 
357 /*
358  * These configuration variables are used to set the predicate lock table size
359  * and to control promotion of predicate locks to coarser granularity in an
360  * attempt to degrade performance (mostly as false positive serialization
361  * failure) gracefully in the face of memory pressurel
362  */
363 int			max_predicate_locks_per_xact;	/* set by guc.c */
364 int			max_predicate_locks_per_relation;	/* set by guc.c */
365 int			max_predicate_locks_per_page;	/* set by guc.c */
366 
367 /*
368  * This provides a list of objects in order to track transactions
369  * participating in predicate locking.  Entries in the list are fixed size,
370  * and reside in shared memory.  The memory address of an entry must remain
371  * fixed during its lifetime.  The list will be protected from concurrent
372  * update externally; no provision is made in this code to manage that.  The
373  * number of entries in the list, and the size allowed for each entry is
374  * fixed upon creation.
375  */
376 static PredXactList PredXact;
377 
378 /*
379  * This provides a pool of RWConflict data elements to use in conflict lists
380  * between transactions.
381  */
382 static RWConflictPoolHeader RWConflictPool;
383 
384 /*
385  * The predicate locking hash tables are in shared memory.
386  * Each backend keeps pointers to them.
387  */
388 static HTAB *SerializableXidHash;
389 static HTAB *PredicateLockTargetHash;
390 static HTAB *PredicateLockHash;
391 static SHM_QUEUE *FinishedSerializableTransactions;
392 
393 /*
394  * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
395  * this entry, you can ensure that there's enough scratch space available for
396  * inserting one entry in the hash table. This is an otherwise-invalid tag.
397  */
398 static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0};
399 static uint32 ScratchTargetTagHash;
400 static LWLock *ScratchPartitionLock;
401 
402 /*
403  * The local hash table used to determine when to combine multiple fine-
404  * grained locks into a single courser-grained lock.
405  */
406 static HTAB *LocalPredicateLockHash = NULL;
407 
408 /*
409  * Keep a pointer to the currently-running serializable transaction (if any)
410  * for quick reference. Also, remember if we have written anything that could
411  * cause a rw-conflict.
412  */
413 static SERIALIZABLEXACT *MySerializableXact = InvalidSerializableXact;
414 static bool MyXactDidWrite = false;
415 
416 /* local functions */
417 
418 static SERIALIZABLEXACT *CreatePredXact(void);
419 static void ReleasePredXact(SERIALIZABLEXACT *sxact);
420 static SERIALIZABLEXACT *FirstPredXact(void);
421 static SERIALIZABLEXACT *NextPredXact(SERIALIZABLEXACT *sxact);
422 
423 static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer);
424 static void SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
425 static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact);
426 static void ReleaseRWConflict(RWConflict conflict);
427 static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact);
428 
429 static bool OldSerXidPagePrecedesLogically(int page1, int page2);
430 static void OldSerXidInit(void);
431 static void OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo);
432 static SerCommitSeqNo OldSerXidGetMinConflictCommitSeqNo(TransactionId xid);
433 static void OldSerXidSetActiveSerXmin(TransactionId xid);
434 
435 static uint32 predicatelock_hash(const void *key, Size keysize);
436 static void SummarizeOldestCommittedSxact(void);
437 static Snapshot GetSafeSnapshot(Snapshot snapshot);
438 static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot,
439 									  VirtualTransactionId *sourcevxid,
440 									  int sourcepid);
441 static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
442 static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
443 						  PREDICATELOCKTARGETTAG *parent);
444 static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
445 static void RemoveScratchTarget(bool lockheld);
446 static void RestoreScratchTarget(bool lockheld);
447 static void RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target,
448 						   uint32 targettaghash);
449 static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
450 static int	MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag);
451 static bool CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag);
452 static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag);
453 static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
454 					uint32 targettaghash,
455 					SERIALIZABLEXACT *sxact);
456 static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash);
457 static bool TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
458 								  PREDICATELOCKTARGETTAG newtargettag,
459 								  bool removeOld);
460 static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
461 static void DropAllPredicateLocksFromTable(Relation relation,
462 							   bool transfer);
463 static void SetNewSxactGlobalXmin(void);
464 static void ClearOldPredicateLocks(void);
465 static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
466 						   bool summarize);
467 static bool XidIsConcurrent(TransactionId xid);
468 static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag);
469 static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
470 static void OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
471 										SERIALIZABLEXACT *writer);
472 
473 
474 /*------------------------------------------------------------------------*/
475 
476 /*
477  * Does this relation participate in predicate locking? Temporary and system
478  * relations are exempt, as are materialized views.
479  */
480 static inline bool
PredicateLockingNeededForRelation(Relation relation)481 PredicateLockingNeededForRelation(Relation relation)
482 {
483 	return !(relation->rd_id < FirstBootstrapObjectId ||
484 			 RelationUsesLocalBuffers(relation) ||
485 			 relation->rd_rel->relkind == RELKIND_MATVIEW);
486 }
487 
488 /*
489  * When a public interface method is called for a read, this is the test to
490  * see if we should do a quick return.
491  *
492  * Note: this function has side-effects! If this transaction has been flagged
493  * as RO-safe since the last call, we release all predicate locks and reset
494  * MySerializableXact. That makes subsequent calls to return quickly.
495  *
496  * This is marked as 'inline' to make to eliminate the function call overhead
497  * in the common case that serialization is not needed.
498  */
499 static inline bool
SerializationNeededForRead(Relation relation,Snapshot snapshot)500 SerializationNeededForRead(Relation relation, Snapshot snapshot)
501 {
502 	/* Nothing to do if this is not a serializable transaction */
503 	if (MySerializableXact == InvalidSerializableXact)
504 		return false;
505 
506 	/*
507 	 * Don't acquire locks or conflict when scanning with a special snapshot.
508 	 * This excludes things like CLUSTER and REINDEX. They use the wholesale
509 	 * functions TransferPredicateLocksToHeapRelation() and
510 	 * CheckTableForSerializableConflictIn() to participate in serialization,
511 	 * but the scans involved don't need serialization.
512 	 */
513 	if (!IsMVCCSnapshot(snapshot))
514 		return false;
515 
516 	/*
517 	 * Check if we have just become "RO-safe". If we have, immediately release
518 	 * all locks as they're not needed anymore. This also resets
519 	 * MySerializableXact, so that subsequent calls to this function can exit
520 	 * quickly.
521 	 *
522 	 * A transaction is flagged as RO_SAFE if all concurrent R/W transactions
523 	 * commit without having conflicts out to an earlier snapshot, thus
524 	 * ensuring that no conflicts are possible for this transaction.
525 	 */
526 	if (SxactIsROSafe(MySerializableXact))
527 	{
528 		ReleasePredicateLocks(false);
529 		return false;
530 	}
531 
532 	/* Check if the relation doesn't participate in predicate locking */
533 	if (!PredicateLockingNeededForRelation(relation))
534 		return false;
535 
536 	return true;				/* no excuse to skip predicate locking */
537 }
538 
539 /*
540  * Like SerializationNeededForRead(), but called on writes.
541  * The logic is the same, but there is no snapshot and we can't be RO-safe.
542  */
543 static inline bool
SerializationNeededForWrite(Relation relation)544 SerializationNeededForWrite(Relation relation)
545 {
546 	/* Nothing to do if this is not a serializable transaction */
547 	if (MySerializableXact == InvalidSerializableXact)
548 		return false;
549 
550 	/* Check if the relation doesn't participate in predicate locking */
551 	if (!PredicateLockingNeededForRelation(relation))
552 		return false;
553 
554 	return true;				/* no excuse to skip predicate locking */
555 }
556 
557 
558 /*------------------------------------------------------------------------*/
559 
560 /*
561  * These functions are a simple implementation of a list for this specific
562  * type of struct.  If there is ever a generalized shared memory list, we
563  * should probably switch to that.
564  */
565 static SERIALIZABLEXACT *
CreatePredXact(void)566 CreatePredXact(void)
567 {
568 	PredXactListElement ptle;
569 
570 	ptle = (PredXactListElement)
571 		SHMQueueNext(&PredXact->availableList,
572 					 &PredXact->availableList,
573 					 offsetof(PredXactListElementData, link));
574 	if (!ptle)
575 		return NULL;
576 
577 	SHMQueueDelete(&ptle->link);
578 	SHMQueueInsertBefore(&PredXact->activeList, &ptle->link);
579 	return &ptle->sxact;
580 }
581 
582 static void
ReleasePredXact(SERIALIZABLEXACT * sxact)583 ReleasePredXact(SERIALIZABLEXACT *sxact)
584 {
585 	PredXactListElement ptle;
586 
587 	Assert(ShmemAddrIsValid(sxact));
588 
589 	ptle = (PredXactListElement)
590 		(((char *) sxact)
591 		 - offsetof(PredXactListElementData, sxact)
592 		 + offsetof(PredXactListElementData, link));
593 	SHMQueueDelete(&ptle->link);
594 	SHMQueueInsertBefore(&PredXact->availableList, &ptle->link);
595 }
596 
597 static SERIALIZABLEXACT *
FirstPredXact(void)598 FirstPredXact(void)
599 {
600 	PredXactListElement ptle;
601 
602 	ptle = (PredXactListElement)
603 		SHMQueueNext(&PredXact->activeList,
604 					 &PredXact->activeList,
605 					 offsetof(PredXactListElementData, link));
606 	if (!ptle)
607 		return NULL;
608 
609 	return &ptle->sxact;
610 }
611 
612 static SERIALIZABLEXACT *
NextPredXact(SERIALIZABLEXACT * sxact)613 NextPredXact(SERIALIZABLEXACT *sxact)
614 {
615 	PredXactListElement ptle;
616 
617 	Assert(ShmemAddrIsValid(sxact));
618 
619 	ptle = (PredXactListElement)
620 		(((char *) sxact)
621 		 - offsetof(PredXactListElementData, sxact)
622 		 + offsetof(PredXactListElementData, link));
623 	ptle = (PredXactListElement)
624 		SHMQueueNext(&PredXact->activeList,
625 					 &ptle->link,
626 					 offsetof(PredXactListElementData, link));
627 	if (!ptle)
628 		return NULL;
629 
630 	return &ptle->sxact;
631 }
632 
633 /*------------------------------------------------------------------------*/
634 
635 /*
636  * These functions manage primitive access to the RWConflict pool and lists.
637  */
638 static bool
RWConflictExists(const SERIALIZABLEXACT * reader,const SERIALIZABLEXACT * writer)639 RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
640 {
641 	RWConflict	conflict;
642 
643 	Assert(reader != writer);
644 
645 	/* Check the ends of the purported conflict first. */
646 	if (SxactIsDoomed(reader)
647 		|| SxactIsDoomed(writer)
648 		|| SHMQueueEmpty(&reader->outConflicts)
649 		|| SHMQueueEmpty(&writer->inConflicts))
650 		return false;
651 
652 	/* A conflict is possible; walk the list to find out. */
653 	conflict = (RWConflict)
654 		SHMQueueNext(&reader->outConflicts,
655 					 &reader->outConflicts,
656 					 offsetof(RWConflictData, outLink));
657 	while (conflict)
658 	{
659 		if (conflict->sxactIn == writer)
660 			return true;
661 		conflict = (RWConflict)
662 			SHMQueueNext(&reader->outConflicts,
663 						 &conflict->outLink,
664 						 offsetof(RWConflictData, outLink));
665 	}
666 
667 	/* No conflict found. */
668 	return false;
669 }
670 
671 static void
SetRWConflict(SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)672 SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
673 {
674 	RWConflict	conflict;
675 
676 	Assert(reader != writer);
677 	Assert(!RWConflictExists(reader, writer));
678 
679 	conflict = (RWConflict)
680 		SHMQueueNext(&RWConflictPool->availableList,
681 					 &RWConflictPool->availableList,
682 					 offsetof(RWConflictData, outLink));
683 	if (!conflict)
684 		ereport(ERROR,
685 				(errcode(ERRCODE_OUT_OF_MEMORY),
686 				 errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
687 				 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
688 
689 	SHMQueueDelete(&conflict->outLink);
690 
691 	conflict->sxactOut = reader;
692 	conflict->sxactIn = writer;
693 	SHMQueueInsertBefore(&reader->outConflicts, &conflict->outLink);
694 	SHMQueueInsertBefore(&writer->inConflicts, &conflict->inLink);
695 }
696 
697 static void
SetPossibleUnsafeConflict(SERIALIZABLEXACT * roXact,SERIALIZABLEXACT * activeXact)698 SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
699 						  SERIALIZABLEXACT *activeXact)
700 {
701 	RWConflict	conflict;
702 
703 	Assert(roXact != activeXact);
704 	Assert(SxactIsReadOnly(roXact));
705 	Assert(!SxactIsReadOnly(activeXact));
706 
707 	conflict = (RWConflict)
708 		SHMQueueNext(&RWConflictPool->availableList,
709 					 &RWConflictPool->availableList,
710 					 offsetof(RWConflictData, outLink));
711 	if (!conflict)
712 		ereport(ERROR,
713 				(errcode(ERRCODE_OUT_OF_MEMORY),
714 				 errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
715 				 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
716 
717 	SHMQueueDelete(&conflict->outLink);
718 
719 	conflict->sxactOut = activeXact;
720 	conflict->sxactIn = roXact;
721 	SHMQueueInsertBefore(&activeXact->possibleUnsafeConflicts,
722 						 &conflict->outLink);
723 	SHMQueueInsertBefore(&roXact->possibleUnsafeConflicts,
724 						 &conflict->inLink);
725 }
726 
727 static void
ReleaseRWConflict(RWConflict conflict)728 ReleaseRWConflict(RWConflict conflict)
729 {
730 	SHMQueueDelete(&conflict->inLink);
731 	SHMQueueDelete(&conflict->outLink);
732 	SHMQueueInsertBefore(&RWConflictPool->availableList, &conflict->outLink);
733 }
734 
735 static void
FlagSxactUnsafe(SERIALIZABLEXACT * sxact)736 FlagSxactUnsafe(SERIALIZABLEXACT *sxact)
737 {
738 	RWConflict	conflict,
739 				nextConflict;
740 
741 	Assert(SxactIsReadOnly(sxact));
742 	Assert(!SxactIsROSafe(sxact));
743 
744 	sxact->flags |= SXACT_FLAG_RO_UNSAFE;
745 
746 	/*
747 	 * We know this isn't a safe snapshot, so we can stop looking for other
748 	 * potential conflicts.
749 	 */
750 	conflict = (RWConflict)
751 		SHMQueueNext(&sxact->possibleUnsafeConflicts,
752 					 &sxact->possibleUnsafeConflicts,
753 					 offsetof(RWConflictData, inLink));
754 	while (conflict)
755 	{
756 		nextConflict = (RWConflict)
757 			SHMQueueNext(&sxact->possibleUnsafeConflicts,
758 						 &conflict->inLink,
759 						 offsetof(RWConflictData, inLink));
760 
761 		Assert(!SxactIsReadOnly(conflict->sxactOut));
762 		Assert(sxact == conflict->sxactIn);
763 
764 		ReleaseRWConflict(conflict);
765 
766 		conflict = nextConflict;
767 	}
768 }
769 
770 /*------------------------------------------------------------------------*/
771 
772 /*
773  * Decide whether an OldSerXid page number is "older" for truncation purposes.
774  * Analogous to CLOGPagePrecedes().
775  */
776 static bool
OldSerXidPagePrecedesLogically(int page1,int page2)777 OldSerXidPagePrecedesLogically(int page1, int page2)
778 {
779 	TransactionId xid1;
780 	TransactionId xid2;
781 
782 	xid1 = ((TransactionId) page1) * OLDSERXID_ENTRIESPERPAGE;
783 	xid1 += FirstNormalTransactionId + 1;
784 	xid2 = ((TransactionId) page2) * OLDSERXID_ENTRIESPERPAGE;
785 	xid2 += FirstNormalTransactionId + 1;
786 
787 	return (TransactionIdPrecedes(xid1, xid2) &&
788 			TransactionIdPrecedes(xid1, xid2 + OLDSERXID_ENTRIESPERPAGE - 1));
789 }
790 
791 #ifdef USE_ASSERT_CHECKING
792 static void
OldSerXidPagePrecedesLogicallyUnitTests(void)793 OldSerXidPagePrecedesLogicallyUnitTests(void)
794 {
795 	int			per_page = OLDSERXID_ENTRIESPERPAGE,
796 				offset = per_page / 2;
797 	int			newestPage,
798 				oldestPage,
799 				headPage,
800 				targetPage;
801 	TransactionId newestXact,
802 				oldestXact;
803 
804 	/* GetNewTransactionId() has assigned the last XID it can safely use. */
805 	newestPage = 2 * SLRU_PAGES_PER_SEGMENT - 1;	/* nothing special */
806 	newestXact = newestPage * per_page + offset;
807 	Assert(newestXact / per_page == newestPage);
808 	oldestXact = newestXact + 1;
809 	oldestXact -= 1U << 31;
810 	oldestPage = oldestXact / per_page;
811 
812 	/*
813 	 * In this scenario, the SLRU headPage pertains to the last ~1000 XIDs
814 	 * assigned.  oldestXact finishes, ~2B XIDs having elapsed since it
815 	 * started.  Further transactions cause us to summarize oldestXact to
816 	 * tailPage.  Function must return false so OldSerXidAdd() doesn't zero
817 	 * tailPage (which may contain entries for other old, recently-finished
818 	 * XIDs) and half the SLRU.  Reaching this requires burning ~2B XIDs in
819 	 * single-user mode, a negligible possibility.
820 	 */
821 	headPage = newestPage;
822 	targetPage = oldestPage;
823 	Assert(!OldSerXidPagePrecedesLogically(headPage, targetPage));
824 
825 	/*
826 	 * In this scenario, the SLRU headPage pertains to oldestXact.  We're
827 	 * summarizing an XID near newestXact.  (Assume few other XIDs used
828 	 * SERIALIZABLE, hence the minimal headPage advancement.  Assume
829 	 * oldestXact was long-running and only recently reached the SLRU.)
830 	 * Function must return true to make OldSerXidAdd() create targetPage.
831 	 *
832 	 * Today's implementation mishandles this case, but it doesn't matter
833 	 * enough to fix.  Verify that the defect affects just one page by
834 	 * asserting correct treatment of its prior page.  Reaching this case
835 	 * requires burning ~2B XIDs in single-user mode, a negligible
836 	 * possibility.  Moreover, if it does happen, the consequence would be
837 	 * mild, namely a new transaction failing in SimpleLruReadPage().
838 	 */
839 	headPage = oldestPage;
840 	targetPage = newestPage;
841 	Assert(OldSerXidPagePrecedesLogically(headPage, targetPage - 1));
842 #if 0
843 	Assert(OldSerXidPagePrecedesLogically(headPage, targetPage));
844 #endif
845 }
846 #endif
847 
848 /*
849  * Initialize for the tracking of old serializable committed xids.
850  */
851 static void
OldSerXidInit(void)852 OldSerXidInit(void)
853 {
854 	bool		found;
855 
856 	/*
857 	 * Set up SLRU management of the pg_serial data.
858 	 */
859 	OldSerXidSlruCtl->PagePrecedes = OldSerXidPagePrecedesLogically;
860 	SimpleLruInit(OldSerXidSlruCtl, "oldserxid",
861 				  NUM_OLDSERXID_BUFFERS, 0, OldSerXidLock, "pg_serial",
862 				  LWTRANCHE_OLDSERXID_BUFFERS);
863 	/* Override default assumption that writes should be fsync'd */
864 	OldSerXidSlruCtl->do_fsync = false;
865 #ifdef USE_ASSERT_CHECKING
866 	OldSerXidPagePrecedesLogicallyUnitTests();
867 #endif
868 	SlruPagePrecedesUnitTests(OldSerXidSlruCtl, OLDSERXID_ENTRIESPERPAGE);
869 
870 	/*
871 	 * Create or attach to the OldSerXidControl structure.
872 	 */
873 	oldSerXidControl = (OldSerXidControl)
874 		ShmemInitStruct("OldSerXidControlData", sizeof(OldSerXidControlData), &found);
875 
876 	Assert(found == IsUnderPostmaster);
877 	if (!found)
878 	{
879 		/*
880 		 * Set control information to reflect empty SLRU.
881 		 */
882 		oldSerXidControl->headPage = -1;
883 		oldSerXidControl->headXid = InvalidTransactionId;
884 		oldSerXidControl->tailXid = InvalidTransactionId;
885 		oldSerXidControl->warningIssued = false;
886 	}
887 }
888 
889 /*
890  * Record a committed read write serializable xid and the minimum
891  * commitSeqNo of any transactions to which this xid had a rw-conflict out.
892  * An invalid seqNo means that there were no conflicts out from xid.
893  */
894 static void
OldSerXidAdd(TransactionId xid,SerCommitSeqNo minConflictCommitSeqNo)895 OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
896 {
897 	TransactionId tailXid;
898 	int			targetPage;
899 	int			slotno;
900 	int			firstZeroPage;
901 	bool		isNewPage;
902 
903 	Assert(TransactionIdIsValid(xid));
904 
905 	targetPage = OldSerXidPage(xid);
906 
907 	LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
908 
909 	/*
910 	 * If no serializable transactions are active, there shouldn't be anything
911 	 * to push out to the SLRU.  Hitting this assert would mean there's
912 	 * something wrong with the earlier cleanup logic.
913 	 */
914 	tailXid = oldSerXidControl->tailXid;
915 	Assert(TransactionIdIsValid(tailXid));
916 
917 	/*
918 	 * If the SLRU is currently unused, zero out the whole active region from
919 	 * tailXid to headXid before taking it into use. Otherwise zero out only
920 	 * any new pages that enter the tailXid-headXid range as we advance
921 	 * headXid.
922 	 */
923 	if (oldSerXidControl->headPage < 0)
924 	{
925 		firstZeroPage = OldSerXidPage(tailXid);
926 		isNewPage = true;
927 	}
928 	else
929 	{
930 		firstZeroPage = OldSerXidNextPage(oldSerXidControl->headPage);
931 		isNewPage = OldSerXidPagePrecedesLogically(oldSerXidControl->headPage,
932 												   targetPage);
933 	}
934 
935 	if (!TransactionIdIsValid(oldSerXidControl->headXid)
936 		|| TransactionIdFollows(xid, oldSerXidControl->headXid))
937 		oldSerXidControl->headXid = xid;
938 	if (isNewPage)
939 		oldSerXidControl->headPage = targetPage;
940 
941 	/*
942 	 * Give a warning if we're about to run out of SLRU pages.
943 	 *
944 	 * slru.c has a maximum of 64k segments, with 32 (SLRU_PAGES_PER_SEGMENT)
945 	 * pages each. We need to store a 64-bit integer for each Xid, and with
946 	 * default 8k block size, 65536*32 pages is only enough to cover 2^30
947 	 * XIDs. If we're about to hit that limit and wrap around, warn the user.
948 	 *
949 	 * To avoid spamming the user, we only give one warning when we've used 1
950 	 * billion XIDs, and stay silent until the situation is fixed and the
951 	 * number of XIDs used falls below 800 million again.
952 	 *
953 	 * XXX: We have no safeguard to actually *prevent* the wrap-around,
954 	 * though. All you get is a warning.
955 	 */
956 	if (oldSerXidControl->warningIssued)
957 	{
958 		TransactionId lowWatermark;
959 
960 		lowWatermark = tailXid + 800000000;
961 		if (lowWatermark < FirstNormalTransactionId)
962 			lowWatermark = FirstNormalTransactionId;
963 		if (TransactionIdPrecedes(xid, lowWatermark))
964 			oldSerXidControl->warningIssued = false;
965 	}
966 	else
967 	{
968 		TransactionId highWatermark;
969 
970 		highWatermark = tailXid + 1000000000;
971 		if (highWatermark < FirstNormalTransactionId)
972 			highWatermark = FirstNormalTransactionId;
973 		if (TransactionIdFollows(xid, highWatermark))
974 		{
975 			oldSerXidControl->warningIssued = true;
976 			ereport(WARNING,
977 					(errmsg("memory for serializable conflict tracking is nearly exhausted"),
978 					 errhint("There might be an idle transaction or a forgotten prepared transaction causing this.")));
979 		}
980 	}
981 
982 	if (isNewPage)
983 	{
984 		/* Initialize intervening pages. */
985 		while (firstZeroPage != targetPage)
986 		{
987 			(void) SimpleLruZeroPage(OldSerXidSlruCtl, firstZeroPage);
988 			firstZeroPage = OldSerXidNextPage(firstZeroPage);
989 		}
990 		slotno = SimpleLruZeroPage(OldSerXidSlruCtl, targetPage);
991 	}
992 	else
993 		slotno = SimpleLruReadPage(OldSerXidSlruCtl, targetPage, true, xid);
994 
995 	OldSerXidValue(slotno, xid) = minConflictCommitSeqNo;
996 	OldSerXidSlruCtl->shared->page_dirty[slotno] = true;
997 
998 	LWLockRelease(OldSerXidLock);
999 }
1000 
1001 /*
1002  * Get the minimum commitSeqNo for any conflict out for the given xid.  For
1003  * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
1004  * will be returned.
1005  */
1006 static SerCommitSeqNo
OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)1007 OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)
1008 {
1009 	TransactionId headXid;
1010 	TransactionId tailXid;
1011 	SerCommitSeqNo val;
1012 	int			slotno;
1013 
1014 	Assert(TransactionIdIsValid(xid));
1015 
1016 	LWLockAcquire(OldSerXidLock, LW_SHARED);
1017 	headXid = oldSerXidControl->headXid;
1018 	tailXid = oldSerXidControl->tailXid;
1019 	LWLockRelease(OldSerXidLock);
1020 
1021 	if (!TransactionIdIsValid(headXid))
1022 		return 0;
1023 
1024 	Assert(TransactionIdIsValid(tailXid));
1025 
1026 	if (TransactionIdPrecedes(xid, tailXid)
1027 		|| TransactionIdFollows(xid, headXid))
1028 		return 0;
1029 
1030 	/*
1031 	 * The following function must be called without holding OldSerXidLock,
1032 	 * but will return with that lock held, which must then be released.
1033 	 */
1034 	slotno = SimpleLruReadPage_ReadOnly(OldSerXidSlruCtl,
1035 										OldSerXidPage(xid), xid);
1036 	val = OldSerXidValue(slotno, xid);
1037 	LWLockRelease(OldSerXidLock);
1038 	return val;
1039 }
1040 
1041 /*
1042  * Call this whenever there is a new xmin for active serializable
1043  * transactions.  We don't need to keep information on transactions which
1044  * precede that.  InvalidTransactionId means none active, so everything in
1045  * the SLRU can be discarded.
1046  */
1047 static void
OldSerXidSetActiveSerXmin(TransactionId xid)1048 OldSerXidSetActiveSerXmin(TransactionId xid)
1049 {
1050 	LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1051 
1052 	/*
1053 	 * When no sxacts are active, nothing overlaps, set the xid values to
1054 	 * invalid to show that there are no valid entries.  Don't clear headPage,
1055 	 * though.  A new xmin might still land on that page, and we don't want to
1056 	 * repeatedly zero out the same page.
1057 	 */
1058 	if (!TransactionIdIsValid(xid))
1059 	{
1060 		oldSerXidControl->tailXid = InvalidTransactionId;
1061 		oldSerXidControl->headXid = InvalidTransactionId;
1062 		LWLockRelease(OldSerXidLock);
1063 		return;
1064 	}
1065 
1066 	/*
1067 	 * When we're recovering prepared transactions, the global xmin might move
1068 	 * backwards depending on the order they're recovered. Normally that's not
1069 	 * OK, but during recovery no serializable transactions will commit, so
1070 	 * the SLRU is empty and we can get away with it.
1071 	 */
1072 	if (RecoveryInProgress())
1073 	{
1074 		Assert(oldSerXidControl->headPage < 0);
1075 		if (!TransactionIdIsValid(oldSerXidControl->tailXid)
1076 			|| TransactionIdPrecedes(xid, oldSerXidControl->tailXid))
1077 		{
1078 			oldSerXidControl->tailXid = xid;
1079 		}
1080 		LWLockRelease(OldSerXidLock);
1081 		return;
1082 	}
1083 
1084 	Assert(!TransactionIdIsValid(oldSerXidControl->tailXid)
1085 		   || TransactionIdFollows(xid, oldSerXidControl->tailXid));
1086 
1087 	oldSerXidControl->tailXid = xid;
1088 
1089 	LWLockRelease(OldSerXidLock);
1090 }
1091 
1092 /*
1093  * Perform a checkpoint --- either during shutdown, or on-the-fly
1094  *
1095  * We don't have any data that needs to survive a restart, but this is a
1096  * convenient place to truncate the SLRU.
1097  */
1098 void
CheckPointPredicate(void)1099 CheckPointPredicate(void)
1100 {
1101 	int			tailPage;
1102 
1103 	LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1104 
1105 	/* Exit quickly if the SLRU is currently not in use. */
1106 	if (oldSerXidControl->headPage < 0)
1107 	{
1108 		LWLockRelease(OldSerXidLock);
1109 		return;
1110 	}
1111 
1112 	if (TransactionIdIsValid(oldSerXidControl->tailXid))
1113 	{
1114 		/* We can truncate the SLRU up to the page containing tailXid */
1115 		tailPage = OldSerXidPage(oldSerXidControl->tailXid);
1116 	}
1117 	else
1118 	{
1119 		/*----------
1120 		 * The SLRU is no longer needed. Truncate to head before we set head
1121 		 * invalid.
1122 		 *
1123 		 * XXX: It's possible that the SLRU is not needed again until XID
1124 		 * wrap-around has happened, so that the segment containing headPage
1125 		 * that we leave behind will appear to be new again. In that case it
1126 		 * won't be removed until XID horizon advances enough to make it
1127 		 * current again.
1128 		 *
1129 		 * XXX: This should happen in vac_truncate_clog(), not in checkpoints.
1130 		 * Consider this scenario, starting from a system with no in-progress
1131 		 * transactions and VACUUM FREEZE having maximized oldestXact:
1132 		 * - Start a SERIALIZABLE transaction.
1133 		 * - Start, finish, and summarize a SERIALIZABLE transaction, creating
1134 		 *   one SLRU page.
1135 		 * - Consume XIDs to reach xidStopLimit.
1136 		 * - Finish all transactions.  Due to the long-running SERIALIZABLE
1137 		 *   transaction, earlier checkpoints did not touch headPage.  The
1138 		 *   next checkpoint will change it, but that checkpoint happens after
1139 		 *   the end of the scenario.
1140 		 * - VACUUM to advance XID limits.
1141 		 * - Consume ~2M XIDs, crossing the former xidWrapLimit.
1142 		 * - Start, finish, and summarize a SERIALIZABLE transaction.
1143 		 *   OldSerXidAdd() declines to create the targetPage, because
1144 		 *   headPage is not regarded as in the past relative to that
1145 		 *   targetPage.  The transaction instigating the summarize fails in
1146 		 *   SimpleLruReadPage().
1147 		 */
1148 		tailPage = oldSerXidControl->headPage;
1149 		oldSerXidControl->headPage = -1;
1150 	}
1151 
1152 	LWLockRelease(OldSerXidLock);
1153 
1154 	/* Truncate away pages that are no longer required */
1155 	SimpleLruTruncate(OldSerXidSlruCtl, tailPage);
1156 
1157 	/*
1158 	 * Flush dirty SLRU pages to disk
1159 	 *
1160 	 * This is not actually necessary from a correctness point of view. We do
1161 	 * it merely as a debugging aid.
1162 	 *
1163 	 * We're doing this after the truncation to avoid writing pages right
1164 	 * before deleting the file in which they sit, which would be completely
1165 	 * pointless.
1166 	 */
1167 	SimpleLruFlush(OldSerXidSlruCtl, true);
1168 }
1169 
1170 /*------------------------------------------------------------------------*/
1171 
1172 /*
1173  * InitPredicateLocks -- Initialize the predicate locking data structures.
1174  *
1175  * This is called from CreateSharedMemoryAndSemaphores(), which see for
1176  * more comments.  In the normal postmaster case, the shared hash tables
1177  * are created here.  Backends inherit the pointers
1178  * to the shared tables via fork().  In the EXEC_BACKEND case, each
1179  * backend re-executes this code to obtain pointers to the already existing
1180  * shared hash tables.
1181  */
1182 void
InitPredicateLocks(void)1183 InitPredicateLocks(void)
1184 {
1185 	HASHCTL		info;
1186 	long		max_table_size;
1187 	Size		requestSize;
1188 	bool		found;
1189 
1190 #ifndef EXEC_BACKEND
1191 	Assert(!IsUnderPostmaster);
1192 #endif
1193 
1194 	/*
1195 	 * Compute size of predicate lock target hashtable. Note these
1196 	 * calculations must agree with PredicateLockShmemSize!
1197 	 */
1198 	max_table_size = NPREDICATELOCKTARGETENTS();
1199 
1200 	/*
1201 	 * Allocate hash table for PREDICATELOCKTARGET structs.  This stores
1202 	 * per-predicate-lock-target information.
1203 	 */
1204 	MemSet(&info, 0, sizeof(info));
1205 	info.keysize = sizeof(PREDICATELOCKTARGETTAG);
1206 	info.entrysize = sizeof(PREDICATELOCKTARGET);
1207 	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1208 
1209 	PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
1210 											max_table_size,
1211 											max_table_size,
1212 											&info,
1213 											HASH_ELEM | HASH_BLOBS |
1214 											HASH_PARTITION | HASH_FIXED_SIZE);
1215 
1216 	/*
1217 	 * Reserve a dummy entry in the hash table; we use it to make sure there's
1218 	 * always one entry available when we need to split or combine a page,
1219 	 * because running out of space there could mean aborting a
1220 	 * non-serializable transaction.
1221 	 */
1222 	if (!IsUnderPostmaster)
1223 	{
1224 		(void) hash_search(PredicateLockTargetHash, &ScratchTargetTag,
1225 						   HASH_ENTER, &found);
1226 		Assert(!found);
1227 	}
1228 
1229 	/* Pre-calculate the hash and partition lock of the scratch entry */
1230 	ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag);
1231 	ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash);
1232 
1233 	/*
1234 	 * Allocate hash table for PREDICATELOCK structs.  This stores per
1235 	 * xact-lock-of-a-target information.
1236 	 */
1237 	MemSet(&info, 0, sizeof(info));
1238 	info.keysize = sizeof(PREDICATELOCKTAG);
1239 	info.entrysize = sizeof(PREDICATELOCK);
1240 	info.hash = predicatelock_hash;
1241 	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1242 
1243 	/* Assume an average of 2 xacts per target */
1244 	max_table_size *= 2;
1245 
1246 	PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
1247 									  max_table_size,
1248 									  max_table_size,
1249 									  &info,
1250 									  HASH_ELEM | HASH_FUNCTION |
1251 									  HASH_PARTITION | HASH_FIXED_SIZE);
1252 
1253 	/*
1254 	 * Compute size for serializable transaction hashtable. Note these
1255 	 * calculations must agree with PredicateLockShmemSize!
1256 	 */
1257 	max_table_size = (MaxBackends + max_prepared_xacts);
1258 
1259 	/*
1260 	 * Allocate a list to hold information on transactions participating in
1261 	 * predicate locking.
1262 	 *
1263 	 * Assume an average of 10 predicate locking transactions per backend.
1264 	 * This allows aggressive cleanup while detail is present before data must
1265 	 * be summarized for storage in SLRU and the "dummy" transaction.
1266 	 */
1267 	max_table_size *= 10;
1268 
1269 	PredXact = ShmemInitStruct("PredXactList",
1270 							   PredXactListDataSize,
1271 							   &found);
1272 	Assert(found == IsUnderPostmaster);
1273 	if (!found)
1274 	{
1275 		int			i;
1276 
1277 		SHMQueueInit(&PredXact->availableList);
1278 		SHMQueueInit(&PredXact->activeList);
1279 		PredXact->SxactGlobalXmin = InvalidTransactionId;
1280 		PredXact->SxactGlobalXminCount = 0;
1281 		PredXact->WritableSxactCount = 0;
1282 		PredXact->LastSxactCommitSeqNo = FirstNormalSerCommitSeqNo - 1;
1283 		PredXact->CanPartialClearThrough = 0;
1284 		PredXact->HavePartialClearedThrough = 0;
1285 		requestSize = mul_size((Size) max_table_size,
1286 							   PredXactListElementDataSize);
1287 		PredXact->element = ShmemAlloc(requestSize);
1288 		/* Add all elements to available list, clean. */
1289 		memset(PredXact->element, 0, requestSize);
1290 		for (i = 0; i < max_table_size; i++)
1291 		{
1292 			SHMQueueInsertBefore(&(PredXact->availableList),
1293 								 &(PredXact->element[i].link));
1294 		}
1295 		PredXact->OldCommittedSxact = CreatePredXact();
1296 		SetInvalidVirtualTransactionId(PredXact->OldCommittedSxact->vxid);
1297 		PredXact->OldCommittedSxact->prepareSeqNo = 0;
1298 		PredXact->OldCommittedSxact->commitSeqNo = 0;
1299 		PredXact->OldCommittedSxact->SeqNo.lastCommitBeforeSnapshot = 0;
1300 		SHMQueueInit(&PredXact->OldCommittedSxact->outConflicts);
1301 		SHMQueueInit(&PredXact->OldCommittedSxact->inConflicts);
1302 		SHMQueueInit(&PredXact->OldCommittedSxact->predicateLocks);
1303 		SHMQueueInit(&PredXact->OldCommittedSxact->finishedLink);
1304 		SHMQueueInit(&PredXact->OldCommittedSxact->possibleUnsafeConflicts);
1305 		PredXact->OldCommittedSxact->topXid = InvalidTransactionId;
1306 		PredXact->OldCommittedSxact->finishedBefore = InvalidTransactionId;
1307 		PredXact->OldCommittedSxact->xmin = InvalidTransactionId;
1308 		PredXact->OldCommittedSxact->flags = SXACT_FLAG_COMMITTED;
1309 		PredXact->OldCommittedSxact->pid = 0;
1310 	}
1311 	/* This never changes, so let's keep a local copy. */
1312 	OldCommittedSxact = PredXact->OldCommittedSxact;
1313 
1314 	/*
1315 	 * Allocate hash table for SERIALIZABLEXID structs.  This stores per-xid
1316 	 * information for serializable transactions which have accessed data.
1317 	 */
1318 	MemSet(&info, 0, sizeof(info));
1319 	info.keysize = sizeof(SERIALIZABLEXIDTAG);
1320 	info.entrysize = sizeof(SERIALIZABLEXID);
1321 
1322 	SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
1323 										max_table_size,
1324 										max_table_size,
1325 										&info,
1326 										HASH_ELEM | HASH_BLOBS |
1327 										HASH_FIXED_SIZE);
1328 
1329 	/*
1330 	 * Allocate space for tracking rw-conflicts in lists attached to the
1331 	 * transactions.
1332 	 *
1333 	 * Assume an average of 5 conflicts per transaction.  Calculations suggest
1334 	 * that this will prevent resource exhaustion in even the most pessimal
1335 	 * loads up to max_connections = 200 with all 200 connections pounding the
1336 	 * database with serializable transactions.  Beyond that, there may be
1337 	 * occasional transactions canceled when trying to flag conflicts. That's
1338 	 * probably OK.
1339 	 */
1340 	max_table_size *= 5;
1341 
1342 	RWConflictPool = ShmemInitStruct("RWConflictPool",
1343 									 RWConflictPoolHeaderDataSize,
1344 									 &found);
1345 	Assert(found == IsUnderPostmaster);
1346 	if (!found)
1347 	{
1348 		int			i;
1349 
1350 		SHMQueueInit(&RWConflictPool->availableList);
1351 		requestSize = mul_size((Size) max_table_size,
1352 							   RWConflictDataSize);
1353 		RWConflictPool->element = ShmemAlloc(requestSize);
1354 		/* Add all elements to available list, clean. */
1355 		memset(RWConflictPool->element, 0, requestSize);
1356 		for (i = 0; i < max_table_size; i++)
1357 		{
1358 			SHMQueueInsertBefore(&(RWConflictPool->availableList),
1359 								 &(RWConflictPool->element[i].outLink));
1360 		}
1361 	}
1362 
1363 	/*
1364 	 * Create or attach to the header for the list of finished serializable
1365 	 * transactions.
1366 	 */
1367 	FinishedSerializableTransactions = (SHM_QUEUE *)
1368 		ShmemInitStruct("FinishedSerializableTransactions",
1369 						sizeof(SHM_QUEUE),
1370 						&found);
1371 	Assert(found == IsUnderPostmaster);
1372 	if (!found)
1373 		SHMQueueInit(FinishedSerializableTransactions);
1374 
1375 	/*
1376 	 * Initialize the SLRU storage for old committed serializable
1377 	 * transactions.
1378 	 */
1379 	OldSerXidInit();
1380 }
1381 
1382 /*
1383  * Estimate shared-memory space used for predicate lock table
1384  */
1385 Size
PredicateLockShmemSize(void)1386 PredicateLockShmemSize(void)
1387 {
1388 	Size		size = 0;
1389 	long		max_table_size;
1390 
1391 	/* predicate lock target hash table */
1392 	max_table_size = NPREDICATELOCKTARGETENTS();
1393 	size = add_size(size, hash_estimate_size(max_table_size,
1394 											 sizeof(PREDICATELOCKTARGET)));
1395 
1396 	/* predicate lock hash table */
1397 	max_table_size *= 2;
1398 	size = add_size(size, hash_estimate_size(max_table_size,
1399 											 sizeof(PREDICATELOCK)));
1400 
1401 	/*
1402 	 * Since NPREDICATELOCKTARGETENTS is only an estimate, add 10% safety
1403 	 * margin.
1404 	 */
1405 	size = add_size(size, size / 10);
1406 
1407 	/* transaction list */
1408 	max_table_size = MaxBackends + max_prepared_xacts;
1409 	max_table_size *= 10;
1410 	size = add_size(size, PredXactListDataSize);
1411 	size = add_size(size, mul_size((Size) max_table_size,
1412 								   PredXactListElementDataSize));
1413 
1414 	/* transaction xid table */
1415 	size = add_size(size, hash_estimate_size(max_table_size,
1416 											 sizeof(SERIALIZABLEXID)));
1417 
1418 	/* rw-conflict pool */
1419 	max_table_size *= 5;
1420 	size = add_size(size, RWConflictPoolHeaderDataSize);
1421 	size = add_size(size, mul_size((Size) max_table_size,
1422 								   RWConflictDataSize));
1423 
1424 	/* Head for list of finished serializable transactions. */
1425 	size = add_size(size, sizeof(SHM_QUEUE));
1426 
1427 	/* Shared memory structures for SLRU tracking of old committed xids. */
1428 	size = add_size(size, sizeof(OldSerXidControlData));
1429 	size = add_size(size, SimpleLruShmemSize(NUM_OLDSERXID_BUFFERS, 0));
1430 
1431 	return size;
1432 }
1433 
1434 
1435 /*
1436  * Compute the hash code associated with a PREDICATELOCKTAG.
1437  *
1438  * Because we want to use just one set of partition locks for both the
1439  * PREDICATELOCKTARGET and PREDICATELOCK hash tables, we have to make sure
1440  * that PREDICATELOCKs fall into the same partition number as their
1441  * associated PREDICATELOCKTARGETs.  dynahash.c expects the partition number
1442  * to be the low-order bits of the hash code, and therefore a
1443  * PREDICATELOCKTAG's hash code must have the same low-order bits as the
1444  * associated PREDICATELOCKTARGETTAG's hash code.  We achieve this with this
1445  * specialized hash function.
1446  */
1447 static uint32
predicatelock_hash(const void * key,Size keysize)1448 predicatelock_hash(const void *key, Size keysize)
1449 {
1450 	const PREDICATELOCKTAG *predicatelocktag = (const PREDICATELOCKTAG *) key;
1451 	uint32		targethash;
1452 
1453 	Assert(keysize == sizeof(PREDICATELOCKTAG));
1454 
1455 	/* Look into the associated target object, and compute its hash code */
1456 	targethash = PredicateLockTargetTagHashCode(&predicatelocktag->myTarget->tag);
1457 
1458 	return PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash);
1459 }
1460 
1461 
1462 /*
1463  * GetPredicateLockStatusData
1464  *		Return a table containing the internal state of the predicate
1465  *		lock manager for use in pg_lock_status.
1466  *
1467  * Like GetLockStatusData, this function tries to hold the partition LWLocks
1468  * for as short a time as possible by returning two arrays that simply
1469  * contain the PREDICATELOCKTARGETTAG and SERIALIZABLEXACT for each lock
1470  * table entry. Multiple copies of the same PREDICATELOCKTARGETTAG and
1471  * SERIALIZABLEXACT will likely appear.
1472  */
1473 PredicateLockData *
GetPredicateLockStatusData(void)1474 GetPredicateLockStatusData(void)
1475 {
1476 	PredicateLockData *data;
1477 	int			i;
1478 	int			els,
1479 				el;
1480 	HASH_SEQ_STATUS seqstat;
1481 	PREDICATELOCK *predlock;
1482 
1483 	data = (PredicateLockData *) palloc(sizeof(PredicateLockData));
1484 
1485 	/*
1486 	 * To ensure consistency, take simultaneous locks on all partition locks
1487 	 * in ascending order, then SerializableXactHashLock.
1488 	 */
1489 	for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
1490 		LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
1491 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1492 
1493 	/* Get number of locks and allocate appropriately-sized arrays. */
1494 	els = hash_get_num_entries(PredicateLockHash);
1495 	data->nelements = els;
1496 	data->locktags = (PREDICATELOCKTARGETTAG *)
1497 		palloc(sizeof(PREDICATELOCKTARGETTAG) * els);
1498 	data->xacts = (SERIALIZABLEXACT *)
1499 		palloc(sizeof(SERIALIZABLEXACT) * els);
1500 
1501 
1502 	/* Scan through PredicateLockHash and copy contents */
1503 	hash_seq_init(&seqstat, PredicateLockHash);
1504 
1505 	el = 0;
1506 
1507 	while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
1508 	{
1509 		data->locktags[el] = predlock->tag.myTarget->tag;
1510 		data->xacts[el] = *predlock->tag.myXact;
1511 		el++;
1512 	}
1513 
1514 	Assert(el == els);
1515 
1516 	/* Release locks in reverse order */
1517 	LWLockRelease(SerializableXactHashLock);
1518 	for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
1519 		LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
1520 
1521 	return data;
1522 }
1523 
1524 /*
1525  * Free up shared memory structures by pushing the oldest sxact (the one at
1526  * the front of the SummarizeOldestCommittedSxact queue) into summary form.
1527  * Each call will free exactly one SERIALIZABLEXACT structure and may also
1528  * free one or more of these structures: SERIALIZABLEXID, PREDICATELOCK,
1529  * PREDICATELOCKTARGET, RWConflictData.
1530  */
1531 static void
SummarizeOldestCommittedSxact(void)1532 SummarizeOldestCommittedSxact(void)
1533 {
1534 	SERIALIZABLEXACT *sxact;
1535 
1536 	LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
1537 
1538 	/*
1539 	 * This function is only called if there are no sxact slots available.
1540 	 * Some of them must belong to old, already-finished transactions, so
1541 	 * there should be something in FinishedSerializableTransactions list that
1542 	 * we can summarize. However, there's a race condition: while we were not
1543 	 * holding any locks, a transaction might have ended and cleaned up all
1544 	 * the finished sxact entries already, freeing up their sxact slots. In
1545 	 * that case, we have nothing to do here. The caller will find one of the
1546 	 * slots released by the other backend when it retries.
1547 	 */
1548 	if (SHMQueueEmpty(FinishedSerializableTransactions))
1549 	{
1550 		LWLockRelease(SerializableFinishedListLock);
1551 		return;
1552 	}
1553 
1554 	/*
1555 	 * Grab the first sxact off the finished list -- this will be the earliest
1556 	 * commit.  Remove it from the list.
1557 	 */
1558 	sxact = (SERIALIZABLEXACT *)
1559 		SHMQueueNext(FinishedSerializableTransactions,
1560 					 FinishedSerializableTransactions,
1561 					 offsetof(SERIALIZABLEXACT, finishedLink));
1562 	SHMQueueDelete(&(sxact->finishedLink));
1563 
1564 	/* Add to SLRU summary information. */
1565 	if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
1566 		OldSerXidAdd(sxact->topXid, SxactHasConflictOut(sxact)
1567 					 ? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
1568 
1569 	/* Summarize and release the detail. */
1570 	ReleaseOneSerializableXact(sxact, false, true);
1571 
1572 	LWLockRelease(SerializableFinishedListLock);
1573 }
1574 
1575 /*
1576  * GetSafeSnapshot
1577  *		Obtain and register a snapshot for a READ ONLY DEFERRABLE
1578  *		transaction. Ensures that the snapshot is "safe", i.e. a
1579  *		read-only transaction running on it can execute serializably
1580  *		without further checks. This requires waiting for concurrent
1581  *		transactions to complete, and retrying with a new snapshot if
1582  *		one of them could possibly create a conflict.
1583  *
1584  *		As with GetSerializableTransactionSnapshot (which this is a subroutine
1585  *		for), the passed-in Snapshot pointer should reference a static data
1586  *		area that can safely be passed to GetSnapshotData.
1587  */
1588 static Snapshot
GetSafeSnapshot(Snapshot origSnapshot)1589 GetSafeSnapshot(Snapshot origSnapshot)
1590 {
1591 	Snapshot	snapshot;
1592 
1593 	Assert(XactReadOnly && XactDeferrable);
1594 
1595 	while (true)
1596 	{
1597 		/*
1598 		 * GetSerializableTransactionSnapshotInt is going to call
1599 		 * GetSnapshotData, so we need to provide it the static snapshot area
1600 		 * our caller passed to us.  The pointer returned is actually the same
1601 		 * one passed to it, but we avoid assuming that here.
1602 		 */
1603 		snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
1604 														 NULL, InvalidPid);
1605 
1606 		if (MySerializableXact == InvalidSerializableXact)
1607 			return snapshot;	/* no concurrent r/w xacts; it's safe */
1608 
1609 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1610 
1611 		/*
1612 		 * Wait for concurrent transactions to finish. Stop early if one of
1613 		 * them marked us as conflicted.
1614 		 */
1615 		MySerializableXact->flags |= SXACT_FLAG_DEFERRABLE_WAITING;
1616 		while (!(SHMQueueEmpty(&MySerializableXact->possibleUnsafeConflicts) ||
1617 				 SxactIsROUnsafe(MySerializableXact)))
1618 		{
1619 			LWLockRelease(SerializableXactHashLock);
1620 			ProcWaitForSignal(WAIT_EVENT_SAFE_SNAPSHOT);
1621 			LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1622 		}
1623 		MySerializableXact->flags &= ~SXACT_FLAG_DEFERRABLE_WAITING;
1624 
1625 		if (!SxactIsROUnsafe(MySerializableXact))
1626 		{
1627 			LWLockRelease(SerializableXactHashLock);
1628 			break;				/* success */
1629 		}
1630 
1631 		LWLockRelease(SerializableXactHashLock);
1632 
1633 		/* else, need to retry... */
1634 		ereport(DEBUG2,
1635 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1636 				 errmsg("deferrable snapshot was unsafe; trying a new one")));
1637 		ReleasePredicateLocks(false);
1638 	}
1639 
1640 	/*
1641 	 * Now we have a safe snapshot, so we don't need to do any further checks.
1642 	 */
1643 	Assert(SxactIsROSafe(MySerializableXact));
1644 	ReleasePredicateLocks(false);
1645 
1646 	return snapshot;
1647 }
1648 
1649 /*
1650  * GetSafeSnapshotBlockingPids
1651  *		If the specified process is currently blocked in GetSafeSnapshot,
1652  *		write the process IDs of all processes that it is blocked by
1653  *		into the caller-supplied buffer output[].  The list is truncated at
1654  *		output_size, and the number of PIDs written into the buffer is
1655  *		returned.  Returns zero if the given PID is not currently blocked
1656  *		in GetSafeSnapshot.
1657  */
1658 int
GetSafeSnapshotBlockingPids(int blocked_pid,int * output,int output_size)1659 GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
1660 {
1661 	int			num_written = 0;
1662 	SERIALIZABLEXACT *sxact;
1663 
1664 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1665 
1666 	/* Find blocked_pid's SERIALIZABLEXACT by linear search. */
1667 	for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
1668 	{
1669 		if (sxact->pid == blocked_pid)
1670 			break;
1671 	}
1672 
1673 	/* Did we find it, and is it currently waiting in GetSafeSnapshot? */
1674 	if (sxact != NULL && SxactIsDeferrableWaiting(sxact))
1675 	{
1676 		RWConflict	possibleUnsafeConflict;
1677 
1678 		/* Traverse the list of possible unsafe conflicts collecting PIDs. */
1679 		possibleUnsafeConflict = (RWConflict)
1680 			SHMQueueNext(&sxact->possibleUnsafeConflicts,
1681 						 &sxact->possibleUnsafeConflicts,
1682 						 offsetof(RWConflictData, inLink));
1683 
1684 		while (possibleUnsafeConflict != NULL && num_written < output_size)
1685 		{
1686 			output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
1687 			possibleUnsafeConflict = (RWConflict)
1688 				SHMQueueNext(&sxact->possibleUnsafeConflicts,
1689 							 &possibleUnsafeConflict->inLink,
1690 							 offsetof(RWConflictData, inLink));
1691 		}
1692 	}
1693 
1694 	LWLockRelease(SerializableXactHashLock);
1695 
1696 	return num_written;
1697 }
1698 
1699 /*
1700  * Acquire a snapshot that can be used for the current transaction.
1701  *
1702  * Make sure we have a SERIALIZABLEXACT reference in MySerializableXact.
1703  * It should be current for this process and be contained in PredXact.
1704  *
1705  * The passed-in Snapshot pointer should reference a static data area that
1706  * can safely be passed to GetSnapshotData.  The return value is actually
1707  * always this same pointer; no new snapshot data structure is allocated
1708  * within this function.
1709  */
1710 Snapshot
GetSerializableTransactionSnapshot(Snapshot snapshot)1711 GetSerializableTransactionSnapshot(Snapshot snapshot)
1712 {
1713 	Assert(IsolationIsSerializable());
1714 
1715 	/*
1716 	 * Can't use serializable mode while recovery is still active, as it is,
1717 	 * for example, on a hot standby.  We could get here despite the check in
1718 	 * check_XactIsoLevel() if default_transaction_isolation is set to
1719 	 * serializable, so phrase the hint accordingly.
1720 	 */
1721 	if (RecoveryInProgress())
1722 		ereport(ERROR,
1723 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1724 				 errmsg("cannot use serializable mode in a hot standby"),
1725 				 errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
1726 				 errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
1727 
1728 	/*
1729 	 * A special optimization is available for SERIALIZABLE READ ONLY
1730 	 * DEFERRABLE transactions -- we can wait for a suitable snapshot and
1731 	 * thereby avoid all SSI overhead once it's running.
1732 	 */
1733 	if (XactReadOnly && XactDeferrable)
1734 		return GetSafeSnapshot(snapshot);
1735 
1736 	return GetSerializableTransactionSnapshotInt(snapshot,
1737 												 NULL, InvalidPid);
1738 }
1739 
1740 /*
1741  * Import a snapshot to be used for the current transaction.
1742  *
1743  * This is nearly the same as GetSerializableTransactionSnapshot, except that
1744  * we don't take a new snapshot, but rather use the data we're handed.
1745  *
1746  * The caller must have verified that the snapshot came from a serializable
1747  * transaction; and if we're read-write, the source transaction must not be
1748  * read-only.
1749  */
1750 void
SetSerializableTransactionSnapshot(Snapshot snapshot,VirtualTransactionId * sourcevxid,int sourcepid)1751 SetSerializableTransactionSnapshot(Snapshot snapshot,
1752 								   VirtualTransactionId *sourcevxid,
1753 								   int sourcepid)
1754 {
1755 	Assert(IsolationIsSerializable());
1756 
1757 	/*
1758 	 * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
1759 	 * import snapshots, since there's no way to wait for a safe snapshot when
1760 	 * we're using the snap we're told to.  (XXX instead of throwing an error,
1761 	 * we could just ignore the XactDeferrable flag?)
1762 	 */
1763 	if (XactReadOnly && XactDeferrable)
1764 		ereport(ERROR,
1765 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1766 				 errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
1767 
1768 	(void) GetSerializableTransactionSnapshotInt(snapshot, sourcevxid,
1769 												 sourcepid);
1770 }
1771 
1772 /*
1773  * Guts of GetSerializableTransactionSnapshot
1774  *
1775  * If sourcexid is valid, this is actually an import operation and we should
1776  * skip calling GetSnapshotData, because the snapshot contents are already
1777  * loaded up.  HOWEVER: to avoid race conditions, we must check that the
1778  * source xact is still running after we acquire SerializableXactHashLock.
1779  * We do that by calling ProcArrayInstallImportedXmin.
1780  */
1781 static Snapshot
GetSerializableTransactionSnapshotInt(Snapshot snapshot,VirtualTransactionId * sourcevxid,int sourcepid)1782 GetSerializableTransactionSnapshotInt(Snapshot snapshot,
1783 									  VirtualTransactionId *sourcevxid,
1784 									  int sourcepid)
1785 {
1786 	PGPROC	   *proc;
1787 	VirtualTransactionId vxid;
1788 	SERIALIZABLEXACT *sxact,
1789 			   *othersxact;
1790 	HASHCTL		hash_ctl;
1791 
1792 	/* We only do this for serializable transactions.  Once. */
1793 	Assert(MySerializableXact == InvalidSerializableXact);
1794 
1795 	Assert(!RecoveryInProgress());
1796 
1797 	/*
1798 	 * Since all parts of a serializable transaction must use the same
1799 	 * snapshot, it is too late to establish one after a parallel operation
1800 	 * has begun.
1801 	 */
1802 	if (IsInParallelMode())
1803 		elog(ERROR, "cannot establish serializable snapshot during a parallel operation");
1804 
1805 	proc = MyProc;
1806 	Assert(proc != NULL);
1807 	GET_VXID_FROM_PGPROC(vxid, *proc);
1808 
1809 	/*
1810 	 * First we get the sxact structure, which may involve looping and access
1811 	 * to the "finished" list to free a structure for use.
1812 	 *
1813 	 * We must hold SerializableXactHashLock when taking/checking the snapshot
1814 	 * to avoid race conditions, for much the same reasons that
1815 	 * GetSnapshotData takes the ProcArrayLock.  Since we might have to
1816 	 * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
1817 	 * this means we have to create the sxact first, which is a bit annoying
1818 	 * (in particular, an elog(ERROR) in procarray.c would cause us to leak
1819 	 * the sxact).  Consider refactoring to avoid this.
1820 	 */
1821 #ifdef TEST_OLDSERXID
1822 	SummarizeOldestCommittedSxact();
1823 #endif
1824 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1825 	do
1826 	{
1827 		sxact = CreatePredXact();
1828 		/* If null, push out committed sxact to SLRU summary & retry. */
1829 		if (!sxact)
1830 		{
1831 			LWLockRelease(SerializableXactHashLock);
1832 			SummarizeOldestCommittedSxact();
1833 			LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1834 		}
1835 	} while (!sxact);
1836 
1837 	/* Get the snapshot, or check that it's safe to use */
1838 	if (!sourcevxid)
1839 		snapshot = GetSnapshotData(snapshot);
1840 	else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcevxid))
1841 	{
1842 		ReleasePredXact(sxact);
1843 		LWLockRelease(SerializableXactHashLock);
1844 		ereport(ERROR,
1845 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1846 				 errmsg("could not import the requested snapshot"),
1847 				 errdetail("The source process with PID %d is not running anymore.",
1848 						   sourcepid)));
1849 	}
1850 
1851 	/*
1852 	 * If there are no serializable transactions which are not read-only, we
1853 	 * can "opt out" of predicate locking and conflict checking for a
1854 	 * read-only transaction.
1855 	 *
1856 	 * The reason this is safe is that a read-only transaction can only become
1857 	 * part of a dangerous structure if it overlaps a writable transaction
1858 	 * which in turn overlaps a writable transaction which committed before
1859 	 * the read-only transaction started.  A new writable transaction can
1860 	 * overlap this one, but it can't meet the other condition of overlapping
1861 	 * a transaction which committed before this one started.
1862 	 */
1863 	if (XactReadOnly && PredXact->WritableSxactCount == 0)
1864 	{
1865 		ReleasePredXact(sxact);
1866 		LWLockRelease(SerializableXactHashLock);
1867 		return snapshot;
1868 	}
1869 
1870 	/* Maintain serializable global xmin info. */
1871 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
1872 	{
1873 		Assert(PredXact->SxactGlobalXminCount == 0);
1874 		PredXact->SxactGlobalXmin = snapshot->xmin;
1875 		PredXact->SxactGlobalXminCount = 1;
1876 		OldSerXidSetActiveSerXmin(snapshot->xmin);
1877 	}
1878 	else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
1879 	{
1880 		Assert(PredXact->SxactGlobalXminCount > 0);
1881 		PredXact->SxactGlobalXminCount++;
1882 	}
1883 	else
1884 	{
1885 		Assert(TransactionIdFollows(snapshot->xmin, PredXact->SxactGlobalXmin));
1886 	}
1887 
1888 	/* Initialize the structure. */
1889 	sxact->vxid = vxid;
1890 	sxact->SeqNo.lastCommitBeforeSnapshot = PredXact->LastSxactCommitSeqNo;
1891 	sxact->prepareSeqNo = InvalidSerCommitSeqNo;
1892 	sxact->commitSeqNo = InvalidSerCommitSeqNo;
1893 	SHMQueueInit(&(sxact->outConflicts));
1894 	SHMQueueInit(&(sxact->inConflicts));
1895 	SHMQueueInit(&(sxact->possibleUnsafeConflicts));
1896 	sxact->topXid = GetTopTransactionIdIfAny();
1897 	sxact->finishedBefore = InvalidTransactionId;
1898 	sxact->xmin = snapshot->xmin;
1899 	sxact->pid = MyProcPid;
1900 	SHMQueueInit(&(sxact->predicateLocks));
1901 	SHMQueueElemInit(&(sxact->finishedLink));
1902 	sxact->flags = 0;
1903 	if (XactReadOnly)
1904 	{
1905 		sxact->flags |= SXACT_FLAG_READ_ONLY;
1906 
1907 		/*
1908 		 * Register all concurrent r/w transactions as possible conflicts; if
1909 		 * all of them commit without any outgoing conflicts to earlier
1910 		 * transactions then this snapshot can be deemed safe (and we can run
1911 		 * without tracking predicate locks).
1912 		 */
1913 		for (othersxact = FirstPredXact();
1914 			 othersxact != NULL;
1915 			 othersxact = NextPredXact(othersxact))
1916 		{
1917 			if (!SxactIsCommitted(othersxact)
1918 				&& !SxactIsDoomed(othersxact)
1919 				&& !SxactIsReadOnly(othersxact))
1920 			{
1921 				SetPossibleUnsafeConflict(sxact, othersxact);
1922 			}
1923 		}
1924 	}
1925 	else
1926 	{
1927 		++(PredXact->WritableSxactCount);
1928 		Assert(PredXact->WritableSxactCount <=
1929 			   (MaxBackends + max_prepared_xacts));
1930 	}
1931 
1932 	MySerializableXact = sxact;
1933 	MyXactDidWrite = false;		/* haven't written anything yet */
1934 
1935 	LWLockRelease(SerializableXactHashLock);
1936 
1937 	/* Initialize the backend-local hash table of parent locks */
1938 	Assert(LocalPredicateLockHash == NULL);
1939 	MemSet(&hash_ctl, 0, sizeof(hash_ctl));
1940 	hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
1941 	hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
1942 	LocalPredicateLockHash = hash_create("Local predicate lock",
1943 										 max_predicate_locks_per_xact,
1944 										 &hash_ctl,
1945 										 HASH_ELEM | HASH_BLOBS);
1946 
1947 	return snapshot;
1948 }
1949 
1950 /*
1951  * Register the top level XID in SerializableXidHash.
1952  * Also store it for easy reference in MySerializableXact.
1953  */
1954 void
RegisterPredicateLockingXid(TransactionId xid)1955 RegisterPredicateLockingXid(TransactionId xid)
1956 {
1957 	SERIALIZABLEXIDTAG sxidtag;
1958 	SERIALIZABLEXID *sxid;
1959 	bool		found;
1960 
1961 	/*
1962 	 * If we're not tracking predicate lock data for this transaction, we
1963 	 * should ignore the request and return quickly.
1964 	 */
1965 	if (MySerializableXact == InvalidSerializableXact)
1966 		return;
1967 
1968 	/* We should have a valid XID and be at the top level. */
1969 	Assert(TransactionIdIsValid(xid));
1970 
1971 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1972 
1973 	/* This should only be done once per transaction. */
1974 	Assert(MySerializableXact->topXid == InvalidTransactionId);
1975 
1976 	MySerializableXact->topXid = xid;
1977 
1978 	sxidtag.xid = xid;
1979 	sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
1980 										   &sxidtag,
1981 										   HASH_ENTER, &found);
1982 	Assert(!found);
1983 
1984 	/* Initialize the structure. */
1985 	sxid->myXact = MySerializableXact;
1986 	LWLockRelease(SerializableXactHashLock);
1987 }
1988 
1989 
1990 /*
1991  * Check whether there are any predicate locks held by any transaction
1992  * for the page at the given block number.
1993  *
1994  * Note that the transaction may be completed but not yet subject to
1995  * cleanup due to overlapping serializable transactions.  This must
1996  * return valid information regardless of transaction isolation level.
1997  *
1998  * Also note that this doesn't check for a conflicting relation lock,
1999  * just a lock specifically on the given page.
2000  *
2001  * One use is to support proper behavior during GiST index vacuum.
2002  */
2003 bool
PageIsPredicateLocked(Relation relation,BlockNumber blkno)2004 PageIsPredicateLocked(Relation relation, BlockNumber blkno)
2005 {
2006 	PREDICATELOCKTARGETTAG targettag;
2007 	uint32		targettaghash;
2008 	LWLock	   *partitionLock;
2009 	PREDICATELOCKTARGET *target;
2010 
2011 	SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
2012 									relation->rd_node.dbNode,
2013 									relation->rd_id,
2014 									blkno);
2015 
2016 	targettaghash = PredicateLockTargetTagHashCode(&targettag);
2017 	partitionLock = PredicateLockHashPartitionLock(targettaghash);
2018 	LWLockAcquire(partitionLock, LW_SHARED);
2019 	target = (PREDICATELOCKTARGET *)
2020 		hash_search_with_hash_value(PredicateLockTargetHash,
2021 									&targettag, targettaghash,
2022 									HASH_FIND, NULL);
2023 	LWLockRelease(partitionLock);
2024 
2025 	return (target != NULL);
2026 }
2027 
2028 
2029 /*
2030  * Check whether a particular lock is held by this transaction.
2031  *
2032  * Important note: this function may return false even if the lock is
2033  * being held, because it uses the local lock table which is not
2034  * updated if another transaction modifies our lock list (e.g. to
2035  * split an index page). It can also return true when a coarser
2036  * granularity lock that covers this target is being held. Be careful
2037  * to only use this function in circumstances where such errors are
2038  * acceptable!
2039  */
2040 static bool
PredicateLockExists(const PREDICATELOCKTARGETTAG * targettag)2041 PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag)
2042 {
2043 	LOCALPREDICATELOCK *lock;
2044 
2045 	/* check local hash table */
2046 	lock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2047 											  targettag,
2048 											  HASH_FIND, NULL);
2049 
2050 	if (!lock)
2051 		return false;
2052 
2053 	/*
2054 	 * Found entry in the table, but still need to check whether it's actually
2055 	 * held -- it could just be a parent of some held lock.
2056 	 */
2057 	return lock->held;
2058 }
2059 
2060 /*
2061  * Return the parent lock tag in the lock hierarchy: the next coarser
2062  * lock that covers the provided tag.
2063  *
2064  * Returns true and sets *parent to the parent tag if one exists,
2065  * returns false if none exists.
2066  */
2067 static bool
GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG * tag,PREDICATELOCKTARGETTAG * parent)2068 GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
2069 						  PREDICATELOCKTARGETTAG *parent)
2070 {
2071 	switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2072 	{
2073 		case PREDLOCKTAG_RELATION:
2074 			/* relation locks have no parent lock */
2075 			return false;
2076 
2077 		case PREDLOCKTAG_PAGE:
2078 			/* parent lock is relation lock */
2079 			SET_PREDICATELOCKTARGETTAG_RELATION(*parent,
2080 												GET_PREDICATELOCKTARGETTAG_DB(*tag),
2081 												GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
2082 
2083 			return true;
2084 
2085 		case PREDLOCKTAG_TUPLE:
2086 			/* parent lock is page lock */
2087 			SET_PREDICATELOCKTARGETTAG_PAGE(*parent,
2088 											GET_PREDICATELOCKTARGETTAG_DB(*tag),
2089 											GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
2090 											GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
2091 			return true;
2092 	}
2093 
2094 	/* not reachable */
2095 	Assert(false);
2096 	return false;
2097 }
2098 
2099 /*
2100  * Check whether the lock we are considering is already covered by a
2101  * coarser lock for our transaction.
2102  *
2103  * Like PredicateLockExists, this function might return a false
2104  * negative, but it will never return a false positive.
2105  */
2106 static bool
CoarserLockCovers(const PREDICATELOCKTARGETTAG * newtargettag)2107 CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
2108 {
2109 	PREDICATELOCKTARGETTAG targettag,
2110 				parenttag;
2111 
2112 	targettag = *newtargettag;
2113 
2114 	/* check parents iteratively until no more */
2115 	while (GetParentPredicateLockTag(&targettag, &parenttag))
2116 	{
2117 		targettag = parenttag;
2118 		if (PredicateLockExists(&targettag))
2119 			return true;
2120 	}
2121 
2122 	/* no more parents to check; lock is not covered */
2123 	return false;
2124 }
2125 
2126 /*
2127  * Remove the dummy entry from the predicate lock target hash, to free up some
2128  * scratch space. The caller must be holding SerializablePredicateLockListLock,
2129  * and must restore the entry with RestoreScratchTarget() before releasing the
2130  * lock.
2131  *
2132  * If lockheld is true, the caller is already holding the partition lock
2133  * of the partition containing the scratch entry.
2134  */
2135 static void
RemoveScratchTarget(bool lockheld)2136 RemoveScratchTarget(bool lockheld)
2137 {
2138 	bool		found;
2139 
2140 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2141 
2142 	if (!lockheld)
2143 		LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2144 	hash_search_with_hash_value(PredicateLockTargetHash,
2145 								&ScratchTargetTag,
2146 								ScratchTargetTagHash,
2147 								HASH_REMOVE, &found);
2148 	Assert(found);
2149 	if (!lockheld)
2150 		LWLockRelease(ScratchPartitionLock);
2151 }
2152 
2153 /*
2154  * Re-insert the dummy entry in predicate lock target hash.
2155  */
2156 static void
RestoreScratchTarget(bool lockheld)2157 RestoreScratchTarget(bool lockheld)
2158 {
2159 	bool		found;
2160 
2161 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2162 
2163 	if (!lockheld)
2164 		LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2165 	hash_search_with_hash_value(PredicateLockTargetHash,
2166 								&ScratchTargetTag,
2167 								ScratchTargetTagHash,
2168 								HASH_ENTER, &found);
2169 	Assert(!found);
2170 	if (!lockheld)
2171 		LWLockRelease(ScratchPartitionLock);
2172 }
2173 
2174 /*
2175  * Check whether the list of related predicate locks is empty for a
2176  * predicate lock target, and remove the target if it is.
2177  */
2178 static void
RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET * target,uint32 targettaghash)2179 RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
2180 {
2181 	PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY;
2182 
2183 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2184 
2185 	/* Can't remove it until no locks at this target. */
2186 	if (!SHMQueueEmpty(&target->predicateLocks))
2187 		return;
2188 
2189 	/* Actually remove the target. */
2190 	rmtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2191 										   &target->tag,
2192 										   targettaghash,
2193 										   HASH_REMOVE, NULL);
2194 	Assert(rmtarget == target);
2195 }
2196 
2197 /*
2198  * Delete child target locks owned by this process.
2199  * This implementation is assuming that the usage of each target tag field
2200  * is uniform.  No need to make this hard if we don't have to.
2201  *
2202  * We aren't acquiring lightweight locks for the predicate lock or lock
2203  * target structures associated with this transaction unless we're going
2204  * to modify them, because no other process is permitted to modify our
2205  * locks.
2206  */
2207 static void
DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG * newtargettag)2208 DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
2209 {
2210 	SERIALIZABLEXACT *sxact;
2211 	PREDICATELOCK *predlock;
2212 
2213 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2214 	sxact = MySerializableXact;
2215 	predlock = (PREDICATELOCK *)
2216 		SHMQueueNext(&(sxact->predicateLocks),
2217 					 &(sxact->predicateLocks),
2218 					 offsetof(PREDICATELOCK, xactLink));
2219 	while (predlock)
2220 	{
2221 		SHM_QUEUE  *predlocksxactlink;
2222 		PREDICATELOCK *nextpredlock;
2223 		PREDICATELOCKTAG oldlocktag;
2224 		PREDICATELOCKTARGET *oldtarget;
2225 		PREDICATELOCKTARGETTAG oldtargettag;
2226 
2227 		predlocksxactlink = &(predlock->xactLink);
2228 		nextpredlock = (PREDICATELOCK *)
2229 			SHMQueueNext(&(sxact->predicateLocks),
2230 						 predlocksxactlink,
2231 						 offsetof(PREDICATELOCK, xactLink));
2232 
2233 		oldlocktag = predlock->tag;
2234 		Assert(oldlocktag.myXact == sxact);
2235 		oldtarget = oldlocktag.myTarget;
2236 		oldtargettag = oldtarget->tag;
2237 
2238 		if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
2239 		{
2240 			uint32		oldtargettaghash;
2241 			LWLock	   *partitionLock;
2242 			PREDICATELOCK *rmpredlock PG_USED_FOR_ASSERTS_ONLY;
2243 
2244 			oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2245 			partitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2246 
2247 			LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2248 
2249 			SHMQueueDelete(predlocksxactlink);
2250 			SHMQueueDelete(&(predlock->targetLink));
2251 			rmpredlock = hash_search_with_hash_value
2252 				(PredicateLockHash,
2253 				 &oldlocktag,
2254 				 PredicateLockHashCodeFromTargetHashCode(&oldlocktag,
2255 														 oldtargettaghash),
2256 				 HASH_REMOVE, NULL);
2257 			Assert(rmpredlock == predlock);
2258 
2259 			RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2260 
2261 			LWLockRelease(partitionLock);
2262 
2263 			DecrementParentLocks(&oldtargettag);
2264 		}
2265 
2266 		predlock = nextpredlock;
2267 	}
2268 	LWLockRelease(SerializablePredicateLockListLock);
2269 }
2270 
2271 /*
2272  * Returns the promotion limit for a given predicate lock target.  This is the
2273  * max number of descendant locks allowed before promoting to the specified
2274  * tag. Note that the limit includes non-direct descendants (e.g., both tuples
2275  * and pages for a relation lock).
2276  *
2277  * Currently the default limit is 2 for a page lock, and half of the value of
2278  * max_pred_locks_per_transaction - 1 for a relation lock, to match behavior
2279  * of earlier releases when upgrading.
2280  *
2281  * TODO SSI: We should probably add additional GUCs to allow a maximum ratio
2282  * of page and tuple locks based on the pages in a relation, and the maximum
2283  * ratio of tuple locks to tuples in a page.  This would provide more
2284  * generally "balanced" allocation of locks to where they are most useful,
2285  * while still allowing the absolute numbers to prevent one relation from
2286  * tying up all predicate lock resources.
2287  */
2288 static int
MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG * tag)2289 MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag)
2290 {
2291 	switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2292 	{
2293 		case PREDLOCKTAG_RELATION:
2294 			return max_predicate_locks_per_relation < 0
2295 				? (max_predicate_locks_per_xact
2296 				   / (-max_predicate_locks_per_relation)) - 1
2297 				: max_predicate_locks_per_relation;
2298 
2299 		case PREDLOCKTAG_PAGE:
2300 			return max_predicate_locks_per_page;
2301 
2302 		case PREDLOCKTAG_TUPLE:
2303 
2304 			/*
2305 			 * not reachable: nothing is finer-granularity than a tuple, so we
2306 			 * should never try to promote to it.
2307 			 */
2308 			Assert(false);
2309 			return 0;
2310 	}
2311 
2312 	/* not reachable */
2313 	Assert(false);
2314 	return 0;
2315 }
2316 
2317 /*
2318  * For all ancestors of a newly-acquired predicate lock, increment
2319  * their child count in the parent hash table. If any of them have
2320  * more descendants than their promotion threshold, acquire the
2321  * coarsest such lock.
2322  *
2323  * Returns true if a parent lock was acquired and false otherwise.
2324  */
2325 static bool
CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG * reqtag)2326 CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag)
2327 {
2328 	PREDICATELOCKTARGETTAG targettag,
2329 				nexttag,
2330 				promotiontag;
2331 	LOCALPREDICATELOCK *parentlock;
2332 	bool		found,
2333 				promote;
2334 
2335 	promote = false;
2336 
2337 	targettag = *reqtag;
2338 
2339 	/* check parents iteratively */
2340 	while (GetParentPredicateLockTag(&targettag, &nexttag))
2341 	{
2342 		targettag = nexttag;
2343 		parentlock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2344 														&targettag,
2345 														HASH_ENTER,
2346 														&found);
2347 		if (!found)
2348 		{
2349 			parentlock->held = false;
2350 			parentlock->childLocks = 1;
2351 		}
2352 		else
2353 			parentlock->childLocks++;
2354 
2355 		if (parentlock->childLocks >
2356 			MaxPredicateChildLocks(&targettag))
2357 		{
2358 			/*
2359 			 * We should promote to this parent lock. Continue to check its
2360 			 * ancestors, however, both to get their child counts right and to
2361 			 * check whether we should just go ahead and promote to one of
2362 			 * them.
2363 			 */
2364 			promotiontag = targettag;
2365 			promote = true;
2366 		}
2367 	}
2368 
2369 	if (promote)
2370 	{
2371 		/* acquire coarsest ancestor eligible for promotion */
2372 		PredicateLockAcquire(&promotiontag);
2373 		return true;
2374 	}
2375 	else
2376 		return false;
2377 }
2378 
2379 /*
2380  * When releasing a lock, decrement the child count on all ancestor
2381  * locks.
2382  *
2383  * This is called only when releasing a lock via
2384  * DeleteChildTargetLocks (i.e. when a lock becomes redundant because
2385  * we've acquired its parent, possibly due to promotion) or when a new
2386  * MVCC write lock makes the predicate lock unnecessary. There's no
2387  * point in calling it when locks are released at transaction end, as
2388  * this information is no longer needed.
2389  */
2390 static void
DecrementParentLocks(const PREDICATELOCKTARGETTAG * targettag)2391 DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag)
2392 {
2393 	PREDICATELOCKTARGETTAG parenttag,
2394 				nexttag;
2395 
2396 	parenttag = *targettag;
2397 
2398 	while (GetParentPredicateLockTag(&parenttag, &nexttag))
2399 	{
2400 		uint32		targettaghash;
2401 		LOCALPREDICATELOCK *parentlock,
2402 				   *rmlock PG_USED_FOR_ASSERTS_ONLY;
2403 
2404 		parenttag = nexttag;
2405 		targettaghash = PredicateLockTargetTagHashCode(&parenttag);
2406 		parentlock = (LOCALPREDICATELOCK *)
2407 			hash_search_with_hash_value(LocalPredicateLockHash,
2408 										&parenttag, targettaghash,
2409 										HASH_FIND, NULL);
2410 
2411 		/*
2412 		 * There's a small chance the parent lock doesn't exist in the lock
2413 		 * table. This can happen if we prematurely removed it because an
2414 		 * index split caused the child refcount to be off.
2415 		 */
2416 		if (parentlock == NULL)
2417 			continue;
2418 
2419 		parentlock->childLocks--;
2420 
2421 		/*
2422 		 * Under similar circumstances the parent lock's refcount might be
2423 		 * zero. This only happens if we're holding that lock (otherwise we
2424 		 * would have removed the entry).
2425 		 */
2426 		if (parentlock->childLocks < 0)
2427 		{
2428 			Assert(parentlock->held);
2429 			parentlock->childLocks = 0;
2430 		}
2431 
2432 		if ((parentlock->childLocks == 0) && (!parentlock->held))
2433 		{
2434 			rmlock = (LOCALPREDICATELOCK *)
2435 				hash_search_with_hash_value(LocalPredicateLockHash,
2436 											&parenttag, targettaghash,
2437 											HASH_REMOVE, NULL);
2438 			Assert(rmlock == parentlock);
2439 		}
2440 	}
2441 }
2442 
2443 /*
2444  * Indicate that a predicate lock on the given target is held by the
2445  * specified transaction. Has no effect if the lock is already held.
2446  *
2447  * This updates the lock table and the sxact's lock list, and creates
2448  * the lock target if necessary, but does *not* do anything related to
2449  * granularity promotion or the local lock table. See
2450  * PredicateLockAcquire for that.
2451  */
2452 static void
CreatePredicateLock(const PREDICATELOCKTARGETTAG * targettag,uint32 targettaghash,SERIALIZABLEXACT * sxact)2453 CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
2454 					uint32 targettaghash,
2455 					SERIALIZABLEXACT *sxact)
2456 {
2457 	PREDICATELOCKTARGET *target;
2458 	PREDICATELOCKTAG locktag;
2459 	PREDICATELOCK *lock;
2460 	LWLock	   *partitionLock;
2461 	bool		found;
2462 
2463 	partitionLock = PredicateLockHashPartitionLock(targettaghash);
2464 
2465 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2466 	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2467 
2468 	/* Make sure that the target is represented. */
2469 	target = (PREDICATELOCKTARGET *)
2470 		hash_search_with_hash_value(PredicateLockTargetHash,
2471 									targettag, targettaghash,
2472 									HASH_ENTER_NULL, &found);
2473 	if (!target)
2474 		ereport(ERROR,
2475 				(errcode(ERRCODE_OUT_OF_MEMORY),
2476 				 errmsg("out of shared memory"),
2477 				 errhint("You might need to increase max_pred_locks_per_transaction.")));
2478 	if (!found)
2479 		SHMQueueInit(&(target->predicateLocks));
2480 
2481 	/* We've got the sxact and target, make sure they're joined. */
2482 	locktag.myTarget = target;
2483 	locktag.myXact = sxact;
2484 	lock = (PREDICATELOCK *)
2485 		hash_search_with_hash_value(PredicateLockHash, &locktag,
2486 									PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
2487 									HASH_ENTER_NULL, &found);
2488 	if (!lock)
2489 		ereport(ERROR,
2490 				(errcode(ERRCODE_OUT_OF_MEMORY),
2491 				 errmsg("out of shared memory"),
2492 				 errhint("You might need to increase max_pred_locks_per_transaction.")));
2493 
2494 	if (!found)
2495 	{
2496 		SHMQueueInsertBefore(&(target->predicateLocks), &(lock->targetLink));
2497 		SHMQueueInsertBefore(&(sxact->predicateLocks),
2498 							 &(lock->xactLink));
2499 		lock->commitSeqNo = InvalidSerCommitSeqNo;
2500 	}
2501 
2502 	LWLockRelease(partitionLock);
2503 	LWLockRelease(SerializablePredicateLockListLock);
2504 }
2505 
2506 /*
2507  * Acquire a predicate lock on the specified target for the current
2508  * connection if not already held. This updates the local lock table
2509  * and uses it to implement granularity promotion. It will consolidate
2510  * multiple locks into a coarser lock if warranted, and will release
2511  * any finer-grained locks covered by the new one.
2512  */
2513 static void
PredicateLockAcquire(const PREDICATELOCKTARGETTAG * targettag)2514 PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag)
2515 {
2516 	uint32		targettaghash;
2517 	bool		found;
2518 	LOCALPREDICATELOCK *locallock;
2519 
2520 	/* Do we have the lock already, or a covering lock? */
2521 	if (PredicateLockExists(targettag))
2522 		return;
2523 
2524 	if (CoarserLockCovers(targettag))
2525 		return;
2526 
2527 	/* the same hash and LW lock apply to the lock target and the local lock. */
2528 	targettaghash = PredicateLockTargetTagHashCode(targettag);
2529 
2530 	/* Acquire lock in local table */
2531 	locallock = (LOCALPREDICATELOCK *)
2532 		hash_search_with_hash_value(LocalPredicateLockHash,
2533 									targettag, targettaghash,
2534 									HASH_ENTER, &found);
2535 	locallock->held = true;
2536 	if (!found)
2537 		locallock->childLocks = 0;
2538 
2539 	/* Actually create the lock */
2540 	CreatePredicateLock(targettag, targettaghash, MySerializableXact);
2541 
2542 	/*
2543 	 * Lock has been acquired. Check whether it should be promoted to a
2544 	 * coarser granularity, or whether there are finer-granularity locks to
2545 	 * clean up.
2546 	 */
2547 	if (CheckAndPromotePredicateLockRequest(targettag))
2548 	{
2549 		/*
2550 		 * Lock request was promoted to a coarser-granularity lock, and that
2551 		 * lock was acquired. It will delete this lock and any of its
2552 		 * children, so we're done.
2553 		 */
2554 	}
2555 	else
2556 	{
2557 		/* Clean up any finer-granularity locks */
2558 		if (GET_PREDICATELOCKTARGETTAG_TYPE(*targettag) != PREDLOCKTAG_TUPLE)
2559 			DeleteChildTargetLocks(targettag);
2560 	}
2561 }
2562 
2563 
2564 /*
2565  *		PredicateLockRelation
2566  *
2567  * Gets a predicate lock at the relation level.
2568  * Skip if not in full serializable transaction isolation level.
2569  * Skip if this is a temporary table.
2570  * Clear any finer-grained predicate locks this session has on the relation.
2571  */
2572 void
PredicateLockRelation(Relation relation,Snapshot snapshot)2573 PredicateLockRelation(Relation relation, Snapshot snapshot)
2574 {
2575 	PREDICATELOCKTARGETTAG tag;
2576 
2577 	if (!SerializationNeededForRead(relation, snapshot))
2578 		return;
2579 
2580 	SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2581 										relation->rd_node.dbNode,
2582 										relation->rd_id);
2583 	PredicateLockAcquire(&tag);
2584 }
2585 
2586 /*
2587  *		PredicateLockPage
2588  *
2589  * Gets a predicate lock at the page level.
2590  * Skip if not in full serializable transaction isolation level.
2591  * Skip if this is a temporary table.
2592  * Skip if a coarser predicate lock already covers this page.
2593  * Clear any finer-grained predicate locks this session has on the relation.
2594  */
2595 void
PredicateLockPage(Relation relation,BlockNumber blkno,Snapshot snapshot)2596 PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
2597 {
2598 	PREDICATELOCKTARGETTAG tag;
2599 
2600 	if (!SerializationNeededForRead(relation, snapshot))
2601 		return;
2602 
2603 	SET_PREDICATELOCKTARGETTAG_PAGE(tag,
2604 									relation->rd_node.dbNode,
2605 									relation->rd_id,
2606 									blkno);
2607 	PredicateLockAcquire(&tag);
2608 }
2609 
2610 /*
2611  *		PredicateLockTuple
2612  *
2613  * Gets a predicate lock at the tuple level.
2614  * Skip if not in full serializable transaction isolation level.
2615  * Skip if this is a temporary table.
2616  */
2617 void
PredicateLockTuple(Relation relation,HeapTuple tuple,Snapshot snapshot)2618 PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
2619 {
2620 	PREDICATELOCKTARGETTAG tag;
2621 	ItemPointer tid;
2622 	TransactionId targetxmin;
2623 
2624 	if (!SerializationNeededForRead(relation, snapshot))
2625 		return;
2626 
2627 	/*
2628 	 * If it's a heap tuple, return if this xact wrote it.
2629 	 */
2630 	if (relation->rd_index == NULL)
2631 	{
2632 		TransactionId myxid;
2633 
2634 		targetxmin = HeapTupleHeaderGetXmin(tuple->t_data);
2635 
2636 		myxid = GetTopTransactionIdIfAny();
2637 		if (TransactionIdIsValid(myxid))
2638 		{
2639 			if (TransactionIdFollowsOrEquals(targetxmin, TransactionXmin))
2640 			{
2641 				TransactionId xid = SubTransGetTopmostTransaction(targetxmin);
2642 
2643 				if (TransactionIdEquals(xid, myxid))
2644 				{
2645 					/* We wrote it; we already have a write lock. */
2646 					return;
2647 				}
2648 			}
2649 		}
2650 	}
2651 
2652 	/*
2653 	 * Do quick-but-not-definitive test for a relation lock first.  This will
2654 	 * never cause a return when the relation is *not* locked, but will
2655 	 * occasionally let the check continue when there really *is* a relation
2656 	 * level lock.
2657 	 */
2658 	SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2659 										relation->rd_node.dbNode,
2660 										relation->rd_id);
2661 	if (PredicateLockExists(&tag))
2662 		return;
2663 
2664 	tid = &(tuple->t_self);
2665 	SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
2666 									 relation->rd_node.dbNode,
2667 									 relation->rd_id,
2668 									 ItemPointerGetBlockNumber(tid),
2669 									 ItemPointerGetOffsetNumber(tid));
2670 	PredicateLockAcquire(&tag);
2671 }
2672 
2673 
2674 /*
2675  *		DeleteLockTarget
2676  *
2677  * Remove a predicate lock target along with any locks held for it.
2678  *
2679  * Caller must hold SerializablePredicateLockListLock and the
2680  * appropriate hash partition lock for the target.
2681  */
2682 static void
DeleteLockTarget(PREDICATELOCKTARGET * target,uint32 targettaghash)2683 DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
2684 {
2685 	PREDICATELOCK *predlock;
2686 	SHM_QUEUE  *predlocktargetlink;
2687 	PREDICATELOCK *nextpredlock;
2688 	bool		found;
2689 
2690 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2691 	Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
2692 
2693 	predlock = (PREDICATELOCK *)
2694 		SHMQueueNext(&(target->predicateLocks),
2695 					 &(target->predicateLocks),
2696 					 offsetof(PREDICATELOCK, targetLink));
2697 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2698 	while (predlock)
2699 	{
2700 		predlocktargetlink = &(predlock->targetLink);
2701 		nextpredlock = (PREDICATELOCK *)
2702 			SHMQueueNext(&(target->predicateLocks),
2703 						 predlocktargetlink,
2704 						 offsetof(PREDICATELOCK, targetLink));
2705 
2706 		SHMQueueDelete(&(predlock->xactLink));
2707 		SHMQueueDelete(&(predlock->targetLink));
2708 
2709 		hash_search_with_hash_value
2710 			(PredicateLockHash,
2711 			 &predlock->tag,
2712 			 PredicateLockHashCodeFromTargetHashCode(&predlock->tag,
2713 													 targettaghash),
2714 			 HASH_REMOVE, &found);
2715 		Assert(found);
2716 
2717 		predlock = nextpredlock;
2718 	}
2719 	LWLockRelease(SerializableXactHashLock);
2720 
2721 	/* Remove the target itself, if possible. */
2722 	RemoveTargetIfNoLongerUsed(target, targettaghash);
2723 }
2724 
2725 
2726 /*
2727  *		TransferPredicateLocksToNewTarget
2728  *
2729  * Move or copy all the predicate locks for a lock target, for use by
2730  * index page splits/combines and other things that create or replace
2731  * lock targets. If 'removeOld' is true, the old locks and the target
2732  * will be removed.
2733  *
2734  * Returns true on success, or false if we ran out of shared memory to
2735  * allocate the new target or locks. Guaranteed to always succeed if
2736  * removeOld is set (by using the scratch entry in PredicateLockTargetHash
2737  * for scratch space).
2738  *
2739  * Warning: the "removeOld" option should be used only with care,
2740  * because this function does not (indeed, can not) update other
2741  * backends' LocalPredicateLockHash. If we are only adding new
2742  * entries, this is not a problem: the local lock table is used only
2743  * as a hint, so missing entries for locks that are held are
2744  * OK. Having entries for locks that are no longer held, as can happen
2745  * when using "removeOld", is not in general OK. We can only use it
2746  * safely when replacing a lock with a coarser-granularity lock that
2747  * covers it, or if we are absolutely certain that no one will need to
2748  * refer to that lock in the future.
2749  *
2750  * Caller must hold SerializablePredicateLockListLock.
2751  */
2752 static bool
TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,PREDICATELOCKTARGETTAG newtargettag,bool removeOld)2753 TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
2754 								  PREDICATELOCKTARGETTAG newtargettag,
2755 								  bool removeOld)
2756 {
2757 	uint32		oldtargettaghash;
2758 	LWLock	   *oldpartitionLock;
2759 	PREDICATELOCKTARGET *oldtarget;
2760 	uint32		newtargettaghash;
2761 	LWLock	   *newpartitionLock;
2762 	bool		found;
2763 	bool		outOfShmem = false;
2764 
2765 	Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2766 
2767 	oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2768 	newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
2769 	oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2770 	newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
2771 
2772 	if (removeOld)
2773 	{
2774 		/*
2775 		 * Remove the dummy entry to give us scratch space, so we know we'll
2776 		 * be able to create the new lock target.
2777 		 */
2778 		RemoveScratchTarget(false);
2779 	}
2780 
2781 	/*
2782 	 * We must get the partition locks in ascending sequence to avoid
2783 	 * deadlocks. If old and new partitions are the same, we must request the
2784 	 * lock only once.
2785 	 */
2786 	if (oldpartitionLock < newpartitionLock)
2787 	{
2788 		LWLockAcquire(oldpartitionLock,
2789 					  (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2790 		LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2791 	}
2792 	else if (oldpartitionLock > newpartitionLock)
2793 	{
2794 		LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2795 		LWLockAcquire(oldpartitionLock,
2796 					  (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2797 	}
2798 	else
2799 		LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2800 
2801 	/*
2802 	 * Look for the old target.  If not found, that's OK; no predicate locks
2803 	 * are affected, so we can just clean up and return. If it does exist,
2804 	 * walk its list of predicate locks and move or copy them to the new
2805 	 * target.
2806 	 */
2807 	oldtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2808 											&oldtargettag,
2809 											oldtargettaghash,
2810 											HASH_FIND, NULL);
2811 
2812 	if (oldtarget)
2813 	{
2814 		PREDICATELOCKTARGET *newtarget;
2815 		PREDICATELOCK *oldpredlock;
2816 		PREDICATELOCKTAG newpredlocktag;
2817 
2818 		newtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2819 												&newtargettag,
2820 												newtargettaghash,
2821 												HASH_ENTER_NULL, &found);
2822 
2823 		if (!newtarget)
2824 		{
2825 			/* Failed to allocate due to insufficient shmem */
2826 			outOfShmem = true;
2827 			goto exit;
2828 		}
2829 
2830 		/* If we created a new entry, initialize it */
2831 		if (!found)
2832 			SHMQueueInit(&(newtarget->predicateLocks));
2833 
2834 		newpredlocktag.myTarget = newtarget;
2835 
2836 		/*
2837 		 * Loop through all the locks on the old target, replacing them with
2838 		 * locks on the new target.
2839 		 */
2840 		oldpredlock = (PREDICATELOCK *)
2841 			SHMQueueNext(&(oldtarget->predicateLocks),
2842 						 &(oldtarget->predicateLocks),
2843 						 offsetof(PREDICATELOCK, targetLink));
2844 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2845 		while (oldpredlock)
2846 		{
2847 			SHM_QUEUE  *predlocktargetlink;
2848 			PREDICATELOCK *nextpredlock;
2849 			PREDICATELOCK *newpredlock;
2850 			SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
2851 
2852 			predlocktargetlink = &(oldpredlock->targetLink);
2853 			nextpredlock = (PREDICATELOCK *)
2854 				SHMQueueNext(&(oldtarget->predicateLocks),
2855 							 predlocktargetlink,
2856 							 offsetof(PREDICATELOCK, targetLink));
2857 			newpredlocktag.myXact = oldpredlock->tag.myXact;
2858 
2859 			if (removeOld)
2860 			{
2861 				SHMQueueDelete(&(oldpredlock->xactLink));
2862 				SHMQueueDelete(&(oldpredlock->targetLink));
2863 
2864 				hash_search_with_hash_value
2865 					(PredicateLockHash,
2866 					 &oldpredlock->tag,
2867 					 PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
2868 															 oldtargettaghash),
2869 					 HASH_REMOVE, &found);
2870 				Assert(found);
2871 			}
2872 
2873 			newpredlock = (PREDICATELOCK *)
2874 				hash_search_with_hash_value(PredicateLockHash,
2875 											&newpredlocktag,
2876 											PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
2877 																					newtargettaghash),
2878 											HASH_ENTER_NULL,
2879 											&found);
2880 			if (!newpredlock)
2881 			{
2882 				/* Out of shared memory. Undo what we've done so far. */
2883 				LWLockRelease(SerializableXactHashLock);
2884 				DeleteLockTarget(newtarget, newtargettaghash);
2885 				outOfShmem = true;
2886 				goto exit;
2887 			}
2888 			if (!found)
2889 			{
2890 				SHMQueueInsertBefore(&(newtarget->predicateLocks),
2891 									 &(newpredlock->targetLink));
2892 				SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
2893 									 &(newpredlock->xactLink));
2894 				newpredlock->commitSeqNo = oldCommitSeqNo;
2895 			}
2896 			else
2897 			{
2898 				if (newpredlock->commitSeqNo < oldCommitSeqNo)
2899 					newpredlock->commitSeqNo = oldCommitSeqNo;
2900 			}
2901 
2902 			Assert(newpredlock->commitSeqNo != 0);
2903 			Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
2904 				   || (newpredlock->tag.myXact == OldCommittedSxact));
2905 
2906 			oldpredlock = nextpredlock;
2907 		}
2908 		LWLockRelease(SerializableXactHashLock);
2909 
2910 		if (removeOld)
2911 		{
2912 			Assert(SHMQueueEmpty(&oldtarget->predicateLocks));
2913 			RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2914 		}
2915 	}
2916 
2917 
2918 exit:
2919 	/* Release partition locks in reverse order of acquisition. */
2920 	if (oldpartitionLock < newpartitionLock)
2921 	{
2922 		LWLockRelease(newpartitionLock);
2923 		LWLockRelease(oldpartitionLock);
2924 	}
2925 	else if (oldpartitionLock > newpartitionLock)
2926 	{
2927 		LWLockRelease(oldpartitionLock);
2928 		LWLockRelease(newpartitionLock);
2929 	}
2930 	else
2931 		LWLockRelease(newpartitionLock);
2932 
2933 	if (removeOld)
2934 	{
2935 		/* We shouldn't run out of memory if we're moving locks */
2936 		Assert(!outOfShmem);
2937 
2938 		/* Put the scratch entry back */
2939 		RestoreScratchTarget(false);
2940 	}
2941 
2942 	return !outOfShmem;
2943 }
2944 
2945 /*
2946  * Drop all predicate locks of any granularity from the specified relation,
2947  * which can be a heap relation or an index relation.  If 'transfer' is true,
2948  * acquire a relation lock on the heap for any transactions with any lock(s)
2949  * on the specified relation.
2950  *
2951  * This requires grabbing a lot of LW locks and scanning the entire lock
2952  * target table for matches.  That makes this more expensive than most
2953  * predicate lock management functions, but it will only be called for DDL
2954  * type commands that are expensive anyway, and there are fast returns when
2955  * no serializable transactions are active or the relation is temporary.
2956  *
2957  * We don't use the TransferPredicateLocksToNewTarget function because it
2958  * acquires its own locks on the partitions of the two targets involved,
2959  * and we'll already be holding all partition locks.
2960  *
2961  * We can't throw an error from here, because the call could be from a
2962  * transaction which is not serializable.
2963  *
2964  * NOTE: This is currently only called with transfer set to true, but that may
2965  * change.  If we decide to clean up the locks from a table on commit of a
2966  * transaction which executed DROP TABLE, the false condition will be useful.
2967  */
2968 static void
DropAllPredicateLocksFromTable(Relation relation,bool transfer)2969 DropAllPredicateLocksFromTable(Relation relation, bool transfer)
2970 {
2971 	HASH_SEQ_STATUS seqstat;
2972 	PREDICATELOCKTARGET *oldtarget;
2973 	PREDICATELOCKTARGET *heaptarget;
2974 	Oid			dbId;
2975 	Oid			relId;
2976 	Oid			heapId;
2977 	int			i;
2978 	bool		isIndex;
2979 	bool		found;
2980 	uint32		heaptargettaghash;
2981 
2982 	/*
2983 	 * Bail out quickly if there are no serializable transactions running.
2984 	 * It's safe to check this without taking locks because the caller is
2985 	 * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
2986 	 * would matter here can be acquired while that is held.
2987 	 */
2988 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
2989 		return;
2990 
2991 	if (!PredicateLockingNeededForRelation(relation))
2992 		return;
2993 
2994 	dbId = relation->rd_node.dbNode;
2995 	relId = relation->rd_id;
2996 	if (relation->rd_index == NULL)
2997 	{
2998 		isIndex = false;
2999 		heapId = relId;
3000 	}
3001 	else
3002 	{
3003 		isIndex = true;
3004 		heapId = relation->rd_index->indrelid;
3005 	}
3006 	Assert(heapId != InvalidOid);
3007 	Assert(transfer || !isIndex);	/* index OID only makes sense with
3008 									 * transfer */
3009 
3010 	/* Retrieve first time needed, then keep. */
3011 	heaptargettaghash = 0;
3012 	heaptarget = NULL;
3013 
3014 	/* Acquire locks on all lock partitions */
3015 	LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
3016 	for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
3017 		LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
3018 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3019 
3020 	/*
3021 	 * Remove the dummy entry to give us scratch space, so we know we'll be
3022 	 * able to create the new lock target.
3023 	 */
3024 	if (transfer)
3025 		RemoveScratchTarget(true);
3026 
3027 	/* Scan through target map */
3028 	hash_seq_init(&seqstat, PredicateLockTargetHash);
3029 
3030 	while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
3031 	{
3032 		PREDICATELOCK *oldpredlock;
3033 
3034 		/*
3035 		 * Check whether this is a target which needs attention.
3036 		 */
3037 		if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
3038 			continue;			/* wrong relation id */
3039 		if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
3040 			continue;			/* wrong database id */
3041 		if (transfer && !isIndex
3042 			&& GET_PREDICATELOCKTARGETTAG_TYPE(oldtarget->tag) == PREDLOCKTAG_RELATION)
3043 			continue;			/* already the right lock */
3044 
3045 		/*
3046 		 * If we made it here, we have work to do.  We make sure the heap
3047 		 * relation lock exists, then we walk the list of predicate locks for
3048 		 * the old target we found, moving all locks to the heap relation lock
3049 		 * -- unless they already hold that.
3050 		 */
3051 
3052 		/*
3053 		 * First make sure we have the heap relation target.  We only need to
3054 		 * do this once.
3055 		 */
3056 		if (transfer && heaptarget == NULL)
3057 		{
3058 			PREDICATELOCKTARGETTAG heaptargettag;
3059 
3060 			SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
3061 			heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
3062 			heaptarget = hash_search_with_hash_value(PredicateLockTargetHash,
3063 													 &heaptargettag,
3064 													 heaptargettaghash,
3065 													 HASH_ENTER, &found);
3066 			if (!found)
3067 				SHMQueueInit(&heaptarget->predicateLocks);
3068 		}
3069 
3070 		/*
3071 		 * Loop through all the locks on the old target, replacing them with
3072 		 * locks on the new target.
3073 		 */
3074 		oldpredlock = (PREDICATELOCK *)
3075 			SHMQueueNext(&(oldtarget->predicateLocks),
3076 						 &(oldtarget->predicateLocks),
3077 						 offsetof(PREDICATELOCK, targetLink));
3078 		while (oldpredlock)
3079 		{
3080 			PREDICATELOCK *nextpredlock;
3081 			PREDICATELOCK *newpredlock;
3082 			SerCommitSeqNo oldCommitSeqNo;
3083 			SERIALIZABLEXACT *oldXact;
3084 
3085 			nextpredlock = (PREDICATELOCK *)
3086 				SHMQueueNext(&(oldtarget->predicateLocks),
3087 							 &(oldpredlock->targetLink),
3088 							 offsetof(PREDICATELOCK, targetLink));
3089 
3090 			/*
3091 			 * Remove the old lock first. This avoids the chance of running
3092 			 * out of lock structure entries for the hash table.
3093 			 */
3094 			oldCommitSeqNo = oldpredlock->commitSeqNo;
3095 			oldXact = oldpredlock->tag.myXact;
3096 
3097 			SHMQueueDelete(&(oldpredlock->xactLink));
3098 
3099 			/*
3100 			 * No need for retail delete from oldtarget list, we're removing
3101 			 * the whole target anyway.
3102 			 */
3103 			hash_search(PredicateLockHash,
3104 						&oldpredlock->tag,
3105 						HASH_REMOVE, &found);
3106 			Assert(found);
3107 
3108 			if (transfer)
3109 			{
3110 				PREDICATELOCKTAG newpredlocktag;
3111 
3112 				newpredlocktag.myTarget = heaptarget;
3113 				newpredlocktag.myXact = oldXact;
3114 				newpredlock = (PREDICATELOCK *)
3115 					hash_search_with_hash_value(PredicateLockHash,
3116 												&newpredlocktag,
3117 												PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
3118 																						heaptargettaghash),
3119 												HASH_ENTER,
3120 												&found);
3121 				if (!found)
3122 				{
3123 					SHMQueueInsertBefore(&(heaptarget->predicateLocks),
3124 										 &(newpredlock->targetLink));
3125 					SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
3126 										 &(newpredlock->xactLink));
3127 					newpredlock->commitSeqNo = oldCommitSeqNo;
3128 				}
3129 				else
3130 				{
3131 					if (newpredlock->commitSeqNo < oldCommitSeqNo)
3132 						newpredlock->commitSeqNo = oldCommitSeqNo;
3133 				}
3134 
3135 				Assert(newpredlock->commitSeqNo != 0);
3136 				Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
3137 					   || (newpredlock->tag.myXact == OldCommittedSxact));
3138 			}
3139 
3140 			oldpredlock = nextpredlock;
3141 		}
3142 
3143 		hash_search(PredicateLockTargetHash, &oldtarget->tag, HASH_REMOVE,
3144 					&found);
3145 		Assert(found);
3146 	}
3147 
3148 	/* Put the scratch entry back */
3149 	if (transfer)
3150 		RestoreScratchTarget(true);
3151 
3152 	/* Release locks in reverse order */
3153 	LWLockRelease(SerializableXactHashLock);
3154 	for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
3155 		LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
3156 	LWLockRelease(SerializablePredicateLockListLock);
3157 }
3158 
3159 /*
3160  * TransferPredicateLocksToHeapRelation
3161  *		For all transactions, transfer all predicate locks for the given
3162  *		relation to a single relation lock on the heap.
3163  */
3164 void
TransferPredicateLocksToHeapRelation(Relation relation)3165 TransferPredicateLocksToHeapRelation(Relation relation)
3166 {
3167 	DropAllPredicateLocksFromTable(relation, true);
3168 }
3169 
3170 
3171 /*
3172  *		PredicateLockPageSplit
3173  *
3174  * Copies any predicate locks for the old page to the new page.
3175  * Skip if this is a temporary table or toast table.
3176  *
3177  * NOTE: A page split (or overflow) affects all serializable transactions,
3178  * even if it occurs in the context of another transaction isolation level.
3179  *
3180  * NOTE: This currently leaves the local copy of the locks without
3181  * information on the new lock which is in shared memory.  This could cause
3182  * problems if enough page splits occur on locked pages without the processes
3183  * which hold the locks getting in and noticing.
3184  */
3185 void
PredicateLockPageSplit(Relation relation,BlockNumber oldblkno,BlockNumber newblkno)3186 PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
3187 					   BlockNumber newblkno)
3188 {
3189 	PREDICATELOCKTARGETTAG oldtargettag;
3190 	PREDICATELOCKTARGETTAG newtargettag;
3191 	bool		success;
3192 
3193 	/*
3194 	 * Bail out quickly if there are no serializable transactions running.
3195 	 *
3196 	 * It's safe to do this check without taking any additional locks. Even if
3197 	 * a serializable transaction starts concurrently, we know it can't take
3198 	 * any SIREAD locks on the page being split because the caller is holding
3199 	 * the associated buffer page lock. Memory reordering isn't an issue; the
3200 	 * memory barrier in the LWLock acquisition guarantees that this read
3201 	 * occurs while the buffer page lock is held.
3202 	 */
3203 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
3204 		return;
3205 
3206 	if (!PredicateLockingNeededForRelation(relation))
3207 		return;
3208 
3209 	Assert(oldblkno != newblkno);
3210 	Assert(BlockNumberIsValid(oldblkno));
3211 	Assert(BlockNumberIsValid(newblkno));
3212 
3213 	SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
3214 									relation->rd_node.dbNode,
3215 									relation->rd_id,
3216 									oldblkno);
3217 	SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
3218 									relation->rd_node.dbNode,
3219 									relation->rd_id,
3220 									newblkno);
3221 
3222 	LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
3223 
3224 	/*
3225 	 * Try copying the locks over to the new page's tag, creating it if
3226 	 * necessary.
3227 	 */
3228 	success = TransferPredicateLocksToNewTarget(oldtargettag,
3229 												newtargettag,
3230 												false);
3231 
3232 	if (!success)
3233 	{
3234 		/*
3235 		 * No more predicate lock entries are available. Failure isn't an
3236 		 * option here, so promote the page lock to a relation lock.
3237 		 */
3238 
3239 		/* Get the parent relation lock's lock tag */
3240 		success = GetParentPredicateLockTag(&oldtargettag,
3241 											&newtargettag);
3242 		Assert(success);
3243 
3244 		/*
3245 		 * Move the locks to the parent. This shouldn't fail.
3246 		 *
3247 		 * Note that here we are removing locks held by other backends,
3248 		 * leading to a possible inconsistency in their local lock hash table.
3249 		 * This is OK because we're replacing it with a lock that covers the
3250 		 * old one.
3251 		 */
3252 		success = TransferPredicateLocksToNewTarget(oldtargettag,
3253 													newtargettag,
3254 													true);
3255 		Assert(success);
3256 	}
3257 
3258 	LWLockRelease(SerializablePredicateLockListLock);
3259 }
3260 
3261 /*
3262  *		PredicateLockPageCombine
3263  *
3264  * Combines predicate locks for two existing pages.
3265  * Skip if this is a temporary table or toast table.
3266  *
3267  * NOTE: A page combine affects all serializable transactions, even if it
3268  * occurs in the context of another transaction isolation level.
3269  */
3270 void
PredicateLockPageCombine(Relation relation,BlockNumber oldblkno,BlockNumber newblkno)3271 PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
3272 						 BlockNumber newblkno)
3273 {
3274 	/*
3275 	 * Page combines differ from page splits in that we ought to be able to
3276 	 * remove the locks on the old page after transferring them to the new
3277 	 * page, instead of duplicating them. However, because we can't edit other
3278 	 * backends' local lock tables, removing the old lock would leave them
3279 	 * with an entry in their LocalPredicateLockHash for a lock they're not
3280 	 * holding, which isn't acceptable. So we wind up having to do the same
3281 	 * work as a page split, acquiring a lock on the new page and keeping the
3282 	 * old page locked too. That can lead to some false positives, but should
3283 	 * be rare in practice.
3284 	 */
3285 	PredicateLockPageSplit(relation, oldblkno, newblkno);
3286 }
3287 
3288 /*
3289  * Walk the list of in-progress serializable transactions and find the new
3290  * xmin.
3291  */
3292 static void
SetNewSxactGlobalXmin(void)3293 SetNewSxactGlobalXmin(void)
3294 {
3295 	SERIALIZABLEXACT *sxact;
3296 
3297 	Assert(LWLockHeldByMe(SerializableXactHashLock));
3298 
3299 	PredXact->SxactGlobalXmin = InvalidTransactionId;
3300 	PredXact->SxactGlobalXminCount = 0;
3301 
3302 	for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
3303 	{
3304 		if (!SxactIsRolledBack(sxact)
3305 			&& !SxactIsCommitted(sxact)
3306 			&& sxact != OldCommittedSxact)
3307 		{
3308 			Assert(sxact->xmin != InvalidTransactionId);
3309 			if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3310 				|| TransactionIdPrecedes(sxact->xmin,
3311 										 PredXact->SxactGlobalXmin))
3312 			{
3313 				PredXact->SxactGlobalXmin = sxact->xmin;
3314 				PredXact->SxactGlobalXminCount = 1;
3315 			}
3316 			else if (TransactionIdEquals(sxact->xmin,
3317 										 PredXact->SxactGlobalXmin))
3318 				PredXact->SxactGlobalXminCount++;
3319 		}
3320 	}
3321 
3322 	OldSerXidSetActiveSerXmin(PredXact->SxactGlobalXmin);
3323 }
3324 
3325 /*
3326  *		ReleasePredicateLocks
3327  *
3328  * Releases predicate locks based on completion of the current transaction,
3329  * whether committed or rolled back.  It can also be called for a read only
3330  * transaction when it becomes impossible for the transaction to become
3331  * part of a dangerous structure.
3332  *
3333  * We do nothing unless this is a serializable transaction.
3334  *
3335  * This method must ensure that shared memory hash tables are cleaned
3336  * up in some relatively timely fashion.
3337  *
3338  * If this transaction is committing and is holding any predicate locks,
3339  * it must be added to a list of completed serializable transactions still
3340  * holding locks.
3341  */
3342 void
ReleasePredicateLocks(bool isCommit)3343 ReleasePredicateLocks(bool isCommit)
3344 {
3345 	bool		needToClear;
3346 	RWConflict	conflict,
3347 				nextConflict,
3348 				possibleUnsafeConflict;
3349 	SERIALIZABLEXACT *roXact;
3350 
3351 	/*
3352 	 * We can't trust XactReadOnly here, because a transaction which started
3353 	 * as READ WRITE can show as READ ONLY later, e.g., within
3354 	 * subtransactions.  We want to flag a transaction as READ ONLY if it
3355 	 * commits without writing so that de facto READ ONLY transactions get the
3356 	 * benefit of some RO optimizations, so we will use this local variable to
3357 	 * get some cleanup logic right which is based on whether the transaction
3358 	 * was declared READ ONLY at the top level.
3359 	 */
3360 	bool		topLevelIsDeclaredReadOnly;
3361 
3362 	if (MySerializableXact == InvalidSerializableXact)
3363 	{
3364 		Assert(LocalPredicateLockHash == NULL);
3365 		return;
3366 	}
3367 
3368 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3369 
3370 	Assert(!isCommit || SxactIsPrepared(MySerializableXact));
3371 	Assert(!isCommit || !SxactIsDoomed(MySerializableXact));
3372 	Assert(!SxactIsCommitted(MySerializableXact));
3373 	Assert(!SxactIsRolledBack(MySerializableXact));
3374 
3375 	/* may not be serializable during COMMIT/ROLLBACK PREPARED */
3376 	Assert(MySerializableXact->pid == 0 || IsolationIsSerializable());
3377 
3378 	/* We'd better not already be on the cleanup list. */
3379 	Assert(!SxactIsOnFinishedList(MySerializableXact));
3380 
3381 	topLevelIsDeclaredReadOnly = SxactIsReadOnly(MySerializableXact);
3382 
3383 	/*
3384 	 * We don't hold XidGenLock lock here, assuming that TransactionId is
3385 	 * atomic!
3386 	 *
3387 	 * If this value is changing, we don't care that much whether we get the
3388 	 * old or new value -- it is just used to determine how far
3389 	 * GlobalSerializableXmin must advance before this transaction can be
3390 	 * fully cleaned up.  The worst that could happen is we wait for one more
3391 	 * transaction to complete before freeing some RAM; correctness of visible
3392 	 * behavior is not affected.
3393 	 */
3394 	MySerializableXact->finishedBefore = ShmemVariableCache->nextXid;
3395 
3396 	/*
3397 	 * If it's not a commit it's a rollback, and we can clear our locks
3398 	 * immediately.
3399 	 */
3400 	if (isCommit)
3401 	{
3402 		MySerializableXact->flags |= SXACT_FLAG_COMMITTED;
3403 		MySerializableXact->commitSeqNo = ++(PredXact->LastSxactCommitSeqNo);
3404 		/* Recognize implicit read-only transaction (commit without write). */
3405 		if (!MyXactDidWrite)
3406 			MySerializableXact->flags |= SXACT_FLAG_READ_ONLY;
3407 	}
3408 	else
3409 	{
3410 		/*
3411 		 * The DOOMED flag indicates that we intend to roll back this
3412 		 * transaction and so it should not cause serialization failures for
3413 		 * other transactions that conflict with it. Note that this flag might
3414 		 * already be set, if another backend marked this transaction for
3415 		 * abort.
3416 		 *
3417 		 * The ROLLED_BACK flag further indicates that ReleasePredicateLocks
3418 		 * has been called, and so the SerializableXact is eligible for
3419 		 * cleanup. This means it should not be considered when calculating
3420 		 * SxactGlobalXmin.
3421 		 */
3422 		MySerializableXact->flags |= SXACT_FLAG_DOOMED;
3423 		MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
3424 
3425 		/*
3426 		 * If the transaction was previously prepared, but is now failing due
3427 		 * to a ROLLBACK PREPARED or (hopefully very rare) error after the
3428 		 * prepare, clear the prepared flag.  This simplifies conflict
3429 		 * checking.
3430 		 */
3431 		MySerializableXact->flags &= ~SXACT_FLAG_PREPARED;
3432 	}
3433 
3434 	if (!topLevelIsDeclaredReadOnly)
3435 	{
3436 		Assert(PredXact->WritableSxactCount > 0);
3437 		if (--(PredXact->WritableSxactCount) == 0)
3438 		{
3439 			/*
3440 			 * Release predicate locks and rw-conflicts in for all committed
3441 			 * transactions.  There are no longer any transactions which might
3442 			 * conflict with the locks and no chance for new transactions to
3443 			 * overlap.  Similarly, existing conflicts in can't cause pivots,
3444 			 * and any conflicts in which could have completed a dangerous
3445 			 * structure would already have caused a rollback, so any
3446 			 * remaining ones must be benign.
3447 			 */
3448 			PredXact->CanPartialClearThrough = PredXact->LastSxactCommitSeqNo;
3449 		}
3450 	}
3451 	else
3452 	{
3453 		/*
3454 		 * Read-only transactions: clear the list of transactions that might
3455 		 * make us unsafe. Note that we use 'inLink' for the iteration as
3456 		 * opposed to 'outLink' for the r/w xacts.
3457 		 */
3458 		possibleUnsafeConflict = (RWConflict)
3459 			SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3460 						 &MySerializableXact->possibleUnsafeConflicts,
3461 						 offsetof(RWConflictData, inLink));
3462 		while (possibleUnsafeConflict)
3463 		{
3464 			nextConflict = (RWConflict)
3465 				SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3466 							 &possibleUnsafeConflict->inLink,
3467 							 offsetof(RWConflictData, inLink));
3468 
3469 			Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
3470 			Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
3471 
3472 			ReleaseRWConflict(possibleUnsafeConflict);
3473 
3474 			possibleUnsafeConflict = nextConflict;
3475 		}
3476 	}
3477 
3478 	/* Check for conflict out to old committed transactions. */
3479 	if (isCommit
3480 		&& !SxactIsReadOnly(MySerializableXact)
3481 		&& SxactHasSummaryConflictOut(MySerializableXact))
3482 	{
3483 		/*
3484 		 * we don't know which old committed transaction we conflicted with,
3485 		 * so be conservative and use FirstNormalSerCommitSeqNo here
3486 		 */
3487 		MySerializableXact->SeqNo.earliestOutConflictCommit =
3488 			FirstNormalSerCommitSeqNo;
3489 		MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3490 	}
3491 
3492 	/*
3493 	 * Release all outConflicts to committed transactions.  If we're rolling
3494 	 * back clear them all.  Set SXACT_FLAG_CONFLICT_OUT if any point to
3495 	 * previously committed transactions.
3496 	 */
3497 	conflict = (RWConflict)
3498 		SHMQueueNext(&MySerializableXact->outConflicts,
3499 					 &MySerializableXact->outConflicts,
3500 					 offsetof(RWConflictData, outLink));
3501 	while (conflict)
3502 	{
3503 		nextConflict = (RWConflict)
3504 			SHMQueueNext(&MySerializableXact->outConflicts,
3505 						 &conflict->outLink,
3506 						 offsetof(RWConflictData, outLink));
3507 
3508 		if (isCommit
3509 			&& !SxactIsReadOnly(MySerializableXact)
3510 			&& SxactIsCommitted(conflict->sxactIn))
3511 		{
3512 			if ((MySerializableXact->flags & SXACT_FLAG_CONFLICT_OUT) == 0
3513 				|| conflict->sxactIn->prepareSeqNo < MySerializableXact->SeqNo.earliestOutConflictCommit)
3514 				MySerializableXact->SeqNo.earliestOutConflictCommit = conflict->sxactIn->prepareSeqNo;
3515 			MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3516 		}
3517 
3518 		if (!isCommit
3519 			|| SxactIsCommitted(conflict->sxactIn)
3520 			|| (conflict->sxactIn->SeqNo.lastCommitBeforeSnapshot >= PredXact->LastSxactCommitSeqNo))
3521 			ReleaseRWConflict(conflict);
3522 
3523 		conflict = nextConflict;
3524 	}
3525 
3526 	/*
3527 	 * Release all inConflicts from committed and read-only transactions. If
3528 	 * we're rolling back, clear them all.
3529 	 */
3530 	conflict = (RWConflict)
3531 		SHMQueueNext(&MySerializableXact->inConflicts,
3532 					 &MySerializableXact->inConflicts,
3533 					 offsetof(RWConflictData, inLink));
3534 	while (conflict)
3535 	{
3536 		nextConflict = (RWConflict)
3537 			SHMQueueNext(&MySerializableXact->inConflicts,
3538 						 &conflict->inLink,
3539 						 offsetof(RWConflictData, inLink));
3540 
3541 		if (!isCommit
3542 			|| SxactIsCommitted(conflict->sxactOut)
3543 			|| SxactIsReadOnly(conflict->sxactOut))
3544 			ReleaseRWConflict(conflict);
3545 
3546 		conflict = nextConflict;
3547 	}
3548 
3549 	if (!topLevelIsDeclaredReadOnly)
3550 	{
3551 		/*
3552 		 * Remove ourselves from the list of possible conflicts for concurrent
3553 		 * READ ONLY transactions, flagging them as unsafe if we have a
3554 		 * conflict out. If any are waiting DEFERRABLE transactions, wake them
3555 		 * up if they are known safe or known unsafe.
3556 		 */
3557 		possibleUnsafeConflict = (RWConflict)
3558 			SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3559 						 &MySerializableXact->possibleUnsafeConflicts,
3560 						 offsetof(RWConflictData, outLink));
3561 		while (possibleUnsafeConflict)
3562 		{
3563 			nextConflict = (RWConflict)
3564 				SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3565 							 &possibleUnsafeConflict->outLink,
3566 							 offsetof(RWConflictData, outLink));
3567 
3568 			roXact = possibleUnsafeConflict->sxactIn;
3569 			Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
3570 			Assert(SxactIsReadOnly(roXact));
3571 
3572 			/* Mark conflicted if necessary. */
3573 			if (isCommit
3574 				&& MyXactDidWrite
3575 				&& SxactHasConflictOut(MySerializableXact)
3576 				&& (MySerializableXact->SeqNo.earliestOutConflictCommit
3577 					<= roXact->SeqNo.lastCommitBeforeSnapshot))
3578 			{
3579 				/*
3580 				 * This releases possibleUnsafeConflict (as well as all other
3581 				 * possible conflicts for roXact)
3582 				 */
3583 				FlagSxactUnsafe(roXact);
3584 			}
3585 			else
3586 			{
3587 				ReleaseRWConflict(possibleUnsafeConflict);
3588 
3589 				/*
3590 				 * If we were the last possible conflict, flag it safe. The
3591 				 * transaction can now safely release its predicate locks (but
3592 				 * that transaction's backend has to do that itself).
3593 				 */
3594 				if (SHMQueueEmpty(&roXact->possibleUnsafeConflicts))
3595 					roXact->flags |= SXACT_FLAG_RO_SAFE;
3596 			}
3597 
3598 			/*
3599 			 * Wake up the process for a waiting DEFERRABLE transaction if we
3600 			 * now know it's either safe or conflicted.
3601 			 */
3602 			if (SxactIsDeferrableWaiting(roXact) &&
3603 				(SxactIsROUnsafe(roXact) || SxactIsROSafe(roXact)))
3604 				ProcSendSignal(roXact->pid);
3605 
3606 			possibleUnsafeConflict = nextConflict;
3607 		}
3608 	}
3609 
3610 	/*
3611 	 * Check whether it's time to clean up old transactions. This can only be
3612 	 * done when the last serializable transaction with the oldest xmin among
3613 	 * serializable transactions completes.  We then find the "new oldest"
3614 	 * xmin and purge any transactions which finished before this transaction
3615 	 * was launched.
3616 	 */
3617 	needToClear = false;
3618 	if (TransactionIdEquals(MySerializableXact->xmin, PredXact->SxactGlobalXmin))
3619 	{
3620 		Assert(PredXact->SxactGlobalXminCount > 0);
3621 		if (--(PredXact->SxactGlobalXminCount) == 0)
3622 		{
3623 			SetNewSxactGlobalXmin();
3624 			needToClear = true;
3625 		}
3626 	}
3627 
3628 	LWLockRelease(SerializableXactHashLock);
3629 
3630 	LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3631 
3632 	/* Add this to the list of transactions to check for later cleanup. */
3633 	if (isCommit)
3634 		SHMQueueInsertBefore(FinishedSerializableTransactions,
3635 							 &MySerializableXact->finishedLink);
3636 
3637 	if (!isCommit)
3638 		ReleaseOneSerializableXact(MySerializableXact, false, false);
3639 
3640 	LWLockRelease(SerializableFinishedListLock);
3641 
3642 	if (needToClear)
3643 		ClearOldPredicateLocks();
3644 
3645 	MySerializableXact = InvalidSerializableXact;
3646 	MyXactDidWrite = false;
3647 
3648 	/* Delete per-transaction lock table */
3649 	if (LocalPredicateLockHash != NULL)
3650 	{
3651 		hash_destroy(LocalPredicateLockHash);
3652 		LocalPredicateLockHash = NULL;
3653 	}
3654 }
3655 
3656 /*
3657  * Clear old predicate locks, belonging to committed transactions that are no
3658  * longer interesting to any in-progress transaction.
3659  */
3660 static void
ClearOldPredicateLocks(void)3661 ClearOldPredicateLocks(void)
3662 {
3663 	SERIALIZABLEXACT *finishedSxact;
3664 	PREDICATELOCK *predlock;
3665 
3666 	/*
3667 	 * Loop through finished transactions. They are in commit order, so we can
3668 	 * stop as soon as we find one that's still interesting.
3669 	 */
3670 	LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3671 	finishedSxact = (SERIALIZABLEXACT *)
3672 		SHMQueueNext(FinishedSerializableTransactions,
3673 					 FinishedSerializableTransactions,
3674 					 offsetof(SERIALIZABLEXACT, finishedLink));
3675 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3676 	while (finishedSxact)
3677 	{
3678 		SERIALIZABLEXACT *nextSxact;
3679 
3680 		nextSxact = (SERIALIZABLEXACT *)
3681 			SHMQueueNext(FinishedSerializableTransactions,
3682 						 &(finishedSxact->finishedLink),
3683 						 offsetof(SERIALIZABLEXACT, finishedLink));
3684 		if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3685 			|| TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
3686 											 PredXact->SxactGlobalXmin))
3687 		{
3688 			/*
3689 			 * This transaction committed before any in-progress transaction
3690 			 * took its snapshot. It's no longer interesting.
3691 			 */
3692 			LWLockRelease(SerializableXactHashLock);
3693 			SHMQueueDelete(&(finishedSxact->finishedLink));
3694 			ReleaseOneSerializableXact(finishedSxact, false, false);
3695 			LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3696 		}
3697 		else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
3698 				 && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
3699 		{
3700 			/*
3701 			 * Any active transactions that took their snapshot before this
3702 			 * transaction committed are read-only, so we can clear part of
3703 			 * its state.
3704 			 */
3705 			LWLockRelease(SerializableXactHashLock);
3706 
3707 			if (SxactIsReadOnly(finishedSxact))
3708 			{
3709 				/* A read-only transaction can be removed entirely */
3710 				SHMQueueDelete(&(finishedSxact->finishedLink));
3711 				ReleaseOneSerializableXact(finishedSxact, false, false);
3712 			}
3713 			else
3714 			{
3715 				/*
3716 				 * A read-write transaction can only be partially cleared. We
3717 				 * need to keep the SERIALIZABLEXACT but can release the
3718 				 * SIREAD locks and conflicts in.
3719 				 */
3720 				ReleaseOneSerializableXact(finishedSxact, true, false);
3721 			}
3722 
3723 			PredXact->HavePartialClearedThrough = finishedSxact->commitSeqNo;
3724 			LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3725 		}
3726 		else
3727 		{
3728 			/* Still interesting. */
3729 			break;
3730 		}
3731 		finishedSxact = nextSxact;
3732 	}
3733 	LWLockRelease(SerializableXactHashLock);
3734 
3735 	/*
3736 	 * Loop through predicate locks on dummy transaction for summarized data.
3737 	 */
3738 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3739 	predlock = (PREDICATELOCK *)
3740 		SHMQueueNext(&OldCommittedSxact->predicateLocks,
3741 					 &OldCommittedSxact->predicateLocks,
3742 					 offsetof(PREDICATELOCK, xactLink));
3743 	while (predlock)
3744 	{
3745 		PREDICATELOCK *nextpredlock;
3746 		bool		canDoPartialCleanup;
3747 
3748 		nextpredlock = (PREDICATELOCK *)
3749 			SHMQueueNext(&OldCommittedSxact->predicateLocks,
3750 						 &predlock->xactLink,
3751 						 offsetof(PREDICATELOCK, xactLink));
3752 
3753 		LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3754 		Assert(predlock->commitSeqNo != 0);
3755 		Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3756 		canDoPartialCleanup = (predlock->commitSeqNo <= PredXact->CanPartialClearThrough);
3757 		LWLockRelease(SerializableXactHashLock);
3758 
3759 		/*
3760 		 * If this lock originally belonged to an old enough transaction, we
3761 		 * can release it.
3762 		 */
3763 		if (canDoPartialCleanup)
3764 		{
3765 			PREDICATELOCKTAG tag;
3766 			PREDICATELOCKTARGET *target;
3767 			PREDICATELOCKTARGETTAG targettag;
3768 			uint32		targettaghash;
3769 			LWLock	   *partitionLock;
3770 
3771 			tag = predlock->tag;
3772 			target = tag.myTarget;
3773 			targettag = target->tag;
3774 			targettaghash = PredicateLockTargetTagHashCode(&targettag);
3775 			partitionLock = PredicateLockHashPartitionLock(targettaghash);
3776 
3777 			LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3778 
3779 			SHMQueueDelete(&(predlock->targetLink));
3780 			SHMQueueDelete(&(predlock->xactLink));
3781 
3782 			hash_search_with_hash_value(PredicateLockHash, &tag,
3783 										PredicateLockHashCodeFromTargetHashCode(&tag,
3784 																				targettaghash),
3785 										HASH_REMOVE, NULL);
3786 			RemoveTargetIfNoLongerUsed(target, targettaghash);
3787 
3788 			LWLockRelease(partitionLock);
3789 		}
3790 
3791 		predlock = nextpredlock;
3792 	}
3793 
3794 	LWLockRelease(SerializablePredicateLockListLock);
3795 	LWLockRelease(SerializableFinishedListLock);
3796 }
3797 
3798 /*
3799  * This is the normal way to delete anything from any of the predicate
3800  * locking hash tables.  Given a transaction which we know can be deleted:
3801  * delete all predicate locks held by that transaction and any predicate
3802  * lock targets which are now unreferenced by a lock; delete all conflicts
3803  * for the transaction; delete all xid values for the transaction; then
3804  * delete the transaction.
3805  *
3806  * When the partial flag is set, we can release all predicate locks and
3807  * in-conflict information -- we've established that there are no longer
3808  * any overlapping read write transactions for which this transaction could
3809  * matter -- but keep the transaction entry itself and any outConflicts.
3810  *
3811  * When the summarize flag is set, we've run short of room for sxact data
3812  * and must summarize to the SLRU.  Predicate locks are transferred to a
3813  * dummy "old" transaction, with duplicate locks on a single target
3814  * collapsing to a single lock with the "latest" commitSeqNo from among
3815  * the conflicting locks..
3816  */
3817 static void
ReleaseOneSerializableXact(SERIALIZABLEXACT * sxact,bool partial,bool summarize)3818 ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
3819 						   bool summarize)
3820 {
3821 	PREDICATELOCK *predlock;
3822 	SERIALIZABLEXIDTAG sxidtag;
3823 	RWConflict	conflict,
3824 				nextConflict;
3825 
3826 	Assert(sxact != NULL);
3827 	Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
3828 	Assert(partial || !SxactIsOnFinishedList(sxact));
3829 	Assert(LWLockHeldByMe(SerializableFinishedListLock));
3830 
3831 	/*
3832 	 * First release all the predicate locks held by this xact (or transfer
3833 	 * them to OldCommittedSxact if summarize is true)
3834 	 */
3835 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3836 	predlock = (PREDICATELOCK *)
3837 		SHMQueueNext(&(sxact->predicateLocks),
3838 					 &(sxact->predicateLocks),
3839 					 offsetof(PREDICATELOCK, xactLink));
3840 	while (predlock)
3841 	{
3842 		PREDICATELOCK *nextpredlock;
3843 		PREDICATELOCKTAG tag;
3844 		SHM_QUEUE  *targetLink;
3845 		PREDICATELOCKTARGET *target;
3846 		PREDICATELOCKTARGETTAG targettag;
3847 		uint32		targettaghash;
3848 		LWLock	   *partitionLock;
3849 
3850 		nextpredlock = (PREDICATELOCK *)
3851 			SHMQueueNext(&(sxact->predicateLocks),
3852 						 &(predlock->xactLink),
3853 						 offsetof(PREDICATELOCK, xactLink));
3854 
3855 		tag = predlock->tag;
3856 		targetLink = &(predlock->targetLink);
3857 		target = tag.myTarget;
3858 		targettag = target->tag;
3859 		targettaghash = PredicateLockTargetTagHashCode(&targettag);
3860 		partitionLock = PredicateLockHashPartitionLock(targettaghash);
3861 
3862 		LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3863 
3864 		SHMQueueDelete(targetLink);
3865 
3866 		hash_search_with_hash_value(PredicateLockHash, &tag,
3867 									PredicateLockHashCodeFromTargetHashCode(&tag,
3868 																			targettaghash),
3869 									HASH_REMOVE, NULL);
3870 		if (summarize)
3871 		{
3872 			bool		found;
3873 
3874 			/* Fold into dummy transaction list. */
3875 			tag.myXact = OldCommittedSxact;
3876 			predlock = hash_search_with_hash_value(PredicateLockHash, &tag,
3877 												   PredicateLockHashCodeFromTargetHashCode(&tag,
3878 																						   targettaghash),
3879 												   HASH_ENTER_NULL, &found);
3880 			if (!predlock)
3881 				ereport(ERROR,
3882 						(errcode(ERRCODE_OUT_OF_MEMORY),
3883 						 errmsg("out of shared memory"),
3884 						 errhint("You might need to increase max_pred_locks_per_transaction.")));
3885 			if (found)
3886 			{
3887 				Assert(predlock->commitSeqNo != 0);
3888 				Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3889 				if (predlock->commitSeqNo < sxact->commitSeqNo)
3890 					predlock->commitSeqNo = sxact->commitSeqNo;
3891 			}
3892 			else
3893 			{
3894 				SHMQueueInsertBefore(&(target->predicateLocks),
3895 									 &(predlock->targetLink));
3896 				SHMQueueInsertBefore(&(OldCommittedSxact->predicateLocks),
3897 									 &(predlock->xactLink));
3898 				predlock->commitSeqNo = sxact->commitSeqNo;
3899 			}
3900 		}
3901 		else
3902 			RemoveTargetIfNoLongerUsed(target, targettaghash);
3903 
3904 		LWLockRelease(partitionLock);
3905 
3906 		predlock = nextpredlock;
3907 	}
3908 
3909 	/*
3910 	 * Rather than retail removal, just re-init the head after we've run
3911 	 * through the list.
3912 	 */
3913 	SHMQueueInit(&sxact->predicateLocks);
3914 
3915 	LWLockRelease(SerializablePredicateLockListLock);
3916 
3917 	sxidtag.xid = sxact->topXid;
3918 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3919 
3920 	/* Release all outConflicts (unless 'partial' is true) */
3921 	if (!partial)
3922 	{
3923 		conflict = (RWConflict)
3924 			SHMQueueNext(&sxact->outConflicts,
3925 						 &sxact->outConflicts,
3926 						 offsetof(RWConflictData, outLink));
3927 		while (conflict)
3928 		{
3929 			nextConflict = (RWConflict)
3930 				SHMQueueNext(&sxact->outConflicts,
3931 							 &conflict->outLink,
3932 							 offsetof(RWConflictData, outLink));
3933 			if (summarize)
3934 				conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
3935 			ReleaseRWConflict(conflict);
3936 			conflict = nextConflict;
3937 		}
3938 	}
3939 
3940 	/* Release all inConflicts. */
3941 	conflict = (RWConflict)
3942 		SHMQueueNext(&sxact->inConflicts,
3943 					 &sxact->inConflicts,
3944 					 offsetof(RWConflictData, inLink));
3945 	while (conflict)
3946 	{
3947 		nextConflict = (RWConflict)
3948 			SHMQueueNext(&sxact->inConflicts,
3949 						 &conflict->inLink,
3950 						 offsetof(RWConflictData, inLink));
3951 		if (summarize)
3952 			conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
3953 		ReleaseRWConflict(conflict);
3954 		conflict = nextConflict;
3955 	}
3956 
3957 	/* Finally, get rid of the xid and the record of the transaction itself. */
3958 	if (!partial)
3959 	{
3960 		if (sxidtag.xid != InvalidTransactionId)
3961 			hash_search(SerializableXidHash, &sxidtag, HASH_REMOVE, NULL);
3962 		ReleasePredXact(sxact);
3963 	}
3964 
3965 	LWLockRelease(SerializableXactHashLock);
3966 }
3967 
3968 /*
3969  * Tests whether the given top level transaction is concurrent with
3970  * (overlaps) our current transaction.
3971  *
3972  * We need to identify the top level transaction for SSI, anyway, so pass
3973  * that to this function to save the overhead of checking the snapshot's
3974  * subxip array.
3975  */
3976 static bool
XidIsConcurrent(TransactionId xid)3977 XidIsConcurrent(TransactionId xid)
3978 {
3979 	Snapshot	snap;
3980 	uint32		i;
3981 
3982 	Assert(TransactionIdIsValid(xid));
3983 	Assert(!TransactionIdEquals(xid, GetTopTransactionIdIfAny()));
3984 
3985 	snap = GetTransactionSnapshot();
3986 
3987 	if (TransactionIdPrecedes(xid, snap->xmin))
3988 		return false;
3989 
3990 	if (TransactionIdFollowsOrEquals(xid, snap->xmax))
3991 		return true;
3992 
3993 	for (i = 0; i < snap->xcnt; i++)
3994 	{
3995 		if (xid == snap->xip[i])
3996 			return true;
3997 	}
3998 
3999 	return false;
4000 }
4001 
4002 /*
4003  * CheckForSerializableConflictOut
4004  *		We are reading a tuple which has been modified.  If it is visible to
4005  *		us but has been deleted, that indicates a rw-conflict out.  If it's
4006  *		not visible and was created by a concurrent (overlapping)
4007  *		serializable transaction, that is also a rw-conflict out,
4008  *
4009  * We will determine the top level xid of the writing transaction with which
4010  * we may be in conflict, and check for overlap with our own transaction.
4011  * If the transactions overlap (i.e., they cannot see each other's writes),
4012  * then we have a conflict out.
4013  *
4014  * This function should be called just about anywhere in heapam.c where a
4015  * tuple has been read. The caller must hold at least a shared lock on the
4016  * buffer, because this function might set hint bits on the tuple. There is
4017  * currently no known reason to call this function from an index AM.
4018  */
4019 void
CheckForSerializableConflictOut(bool visible,Relation relation,HeapTuple tuple,Buffer buffer,Snapshot snapshot)4020 CheckForSerializableConflictOut(bool visible, Relation relation,
4021 								HeapTuple tuple, Buffer buffer,
4022 								Snapshot snapshot)
4023 {
4024 	TransactionId xid;
4025 	SERIALIZABLEXIDTAG sxidtag;
4026 	SERIALIZABLEXID *sxid;
4027 	SERIALIZABLEXACT *sxact;
4028 	HTSV_Result htsvResult;
4029 
4030 	if (!SerializationNeededForRead(relation, snapshot))
4031 		return;
4032 
4033 	/* Check if someone else has already decided that we need to die */
4034 	if (SxactIsDoomed(MySerializableXact))
4035 	{
4036 		ereport(ERROR,
4037 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4038 				 errmsg("could not serialize access due to read/write dependencies among transactions"),
4039 				 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
4040 				 errhint("The transaction might succeed if retried.")));
4041 	}
4042 
4043 	/*
4044 	 * Check to see whether the tuple has been written to by a concurrent
4045 	 * transaction, either to create it not visible to us, or to delete it
4046 	 * while it is visible to us.  The "visible" bool indicates whether the
4047 	 * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
4048 	 * is going on with it.
4049 	 *
4050 	 * In the event of a concurrently inserted tuple that also happens to have
4051 	 * been concurrently updated (by a separate transaction), the xmin of the
4052 	 * tuple will be used -- not the updater's xid.
4053 	 */
4054 	htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
4055 	switch (htsvResult)
4056 	{
4057 		case HEAPTUPLE_LIVE:
4058 			if (visible)
4059 				return;
4060 			xid = HeapTupleHeaderGetXmin(tuple->t_data);
4061 			break;
4062 		case HEAPTUPLE_RECENTLY_DEAD:
4063 		case HEAPTUPLE_DELETE_IN_PROGRESS:
4064 			if (visible)
4065 				xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
4066 			else
4067 				xid = HeapTupleHeaderGetXmin(tuple->t_data);
4068 
4069 			if (TransactionIdPrecedes(xid, TransactionXmin))
4070 			{
4071 				/* This is like the HEAPTUPLE_DEAD case */
4072 				Assert(!visible);
4073 				return;
4074 			}
4075 			break;
4076 		case HEAPTUPLE_INSERT_IN_PROGRESS:
4077 			xid = HeapTupleHeaderGetXmin(tuple->t_data);
4078 			break;
4079 		case HEAPTUPLE_DEAD:
4080 			Assert(!visible);
4081 			return;
4082 		default:
4083 
4084 			/*
4085 			 * The only way to get to this default clause is if a new value is
4086 			 * added to the enum type without adding it to this switch
4087 			 * statement.  That's a bug, so elog.
4088 			 */
4089 			elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
4090 
4091 			/*
4092 			 * In spite of having all enum values covered and calling elog on
4093 			 * this default, some compilers think this is a code path which
4094 			 * allows xid to be used below without initialization. Silence
4095 			 * that warning.
4096 			 */
4097 			xid = InvalidTransactionId;
4098 	}
4099 	Assert(TransactionIdIsValid(xid));
4100 	Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
4101 
4102 	/*
4103 	 * Find top level xid.  Bail out if xid is too early to be a conflict, or
4104 	 * if it's our own xid.
4105 	 */
4106 	if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
4107 		return;
4108 	xid = SubTransGetTopmostTransaction(xid);
4109 	if (TransactionIdPrecedes(xid, TransactionXmin))
4110 		return;
4111 	if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
4112 		return;
4113 
4114 	/*
4115 	 * Find sxact or summarized info for the top level xid.
4116 	 */
4117 	sxidtag.xid = xid;
4118 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4119 	sxid = (SERIALIZABLEXID *)
4120 		hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4121 	if (!sxid)
4122 	{
4123 		/*
4124 		 * Transaction not found in "normal" SSI structures.  Check whether it
4125 		 * got pushed out to SLRU storage for "old committed" transactions.
4126 		 */
4127 		SerCommitSeqNo conflictCommitSeqNo;
4128 
4129 		conflictCommitSeqNo = OldSerXidGetMinConflictCommitSeqNo(xid);
4130 		if (conflictCommitSeqNo != 0)
4131 		{
4132 			if (conflictCommitSeqNo != InvalidSerCommitSeqNo
4133 				&& (!SxactIsReadOnly(MySerializableXact)
4134 					|| conflictCommitSeqNo
4135 					<= MySerializableXact->SeqNo.lastCommitBeforeSnapshot))
4136 				ereport(ERROR,
4137 						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4138 						 errmsg("could not serialize access due to read/write dependencies among transactions"),
4139 						 errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
4140 						 errhint("The transaction might succeed if retried.")));
4141 
4142 			if (SxactHasSummaryConflictIn(MySerializableXact)
4143 				|| !SHMQueueEmpty(&MySerializableXact->inConflicts))
4144 				ereport(ERROR,
4145 						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4146 						 errmsg("could not serialize access due to read/write dependencies among transactions"),
4147 						 errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
4148 						 errhint("The transaction might succeed if retried.")));
4149 
4150 			MySerializableXact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4151 		}
4152 
4153 		/* It's not serializable or otherwise not important. */
4154 		LWLockRelease(SerializableXactHashLock);
4155 		return;
4156 	}
4157 	sxact = sxid->myXact;
4158 	Assert(TransactionIdEquals(sxact->topXid, xid));
4159 	if (sxact == MySerializableXact || SxactIsDoomed(sxact))
4160 	{
4161 		/* Can't conflict with ourself or a transaction that will roll back. */
4162 		LWLockRelease(SerializableXactHashLock);
4163 		return;
4164 	}
4165 
4166 	/*
4167 	 * We have a conflict out to a transaction which has a conflict out to a
4168 	 * summarized transaction.  That summarized transaction must have
4169 	 * committed first, and we can't tell when it committed in relation to our
4170 	 * snapshot acquisition, so something needs to be canceled.
4171 	 */
4172 	if (SxactHasSummaryConflictOut(sxact))
4173 	{
4174 		if (!SxactIsPrepared(sxact))
4175 		{
4176 			sxact->flags |= SXACT_FLAG_DOOMED;
4177 			LWLockRelease(SerializableXactHashLock);
4178 			return;
4179 		}
4180 		else
4181 		{
4182 			LWLockRelease(SerializableXactHashLock);
4183 			ereport(ERROR,
4184 					(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4185 					 errmsg("could not serialize access due to read/write dependencies among transactions"),
4186 					 errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
4187 					 errhint("The transaction might succeed if retried.")));
4188 		}
4189 	}
4190 
4191 	/*
4192 	 * If this is a read-only transaction and the writing transaction has
4193 	 * committed, and it doesn't have a rw-conflict to a transaction which
4194 	 * committed before it, no conflict.
4195 	 */
4196 	if (SxactIsReadOnly(MySerializableXact)
4197 		&& SxactIsCommitted(sxact)
4198 		&& !SxactHasSummaryConflictOut(sxact)
4199 		&& (!SxactHasConflictOut(sxact)
4200 			|| MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
4201 	{
4202 		/* Read-only transaction will appear to run first.  No conflict. */
4203 		LWLockRelease(SerializableXactHashLock);
4204 		return;
4205 	}
4206 
4207 	if (!XidIsConcurrent(xid))
4208 	{
4209 		/* This write was already in our snapshot; no conflict. */
4210 		LWLockRelease(SerializableXactHashLock);
4211 		return;
4212 	}
4213 
4214 	if (RWConflictExists(MySerializableXact, sxact))
4215 	{
4216 		/* We don't want duplicate conflict records in the list. */
4217 		LWLockRelease(SerializableXactHashLock);
4218 		return;
4219 	}
4220 
4221 	/*
4222 	 * Flag the conflict.  But first, if this conflict creates a dangerous
4223 	 * structure, ereport an error.
4224 	 */
4225 	FlagRWConflict(MySerializableXact, sxact);
4226 	LWLockRelease(SerializableXactHashLock);
4227 }
4228 
4229 /*
4230  * Check a particular target for rw-dependency conflict in. A subroutine of
4231  * CheckForSerializableConflictIn().
4232  */
4233 static void
CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG * targettag)4234 CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
4235 {
4236 	uint32		targettaghash;
4237 	LWLock	   *partitionLock;
4238 	PREDICATELOCKTARGET *target;
4239 	PREDICATELOCK *predlock;
4240 	PREDICATELOCK *mypredlock = NULL;
4241 	PREDICATELOCKTAG mypredlocktag;
4242 
4243 	Assert(MySerializableXact != InvalidSerializableXact);
4244 
4245 	/*
4246 	 * The same hash and LW lock apply to the lock target and the lock itself.
4247 	 */
4248 	targettaghash = PredicateLockTargetTagHashCode(targettag);
4249 	partitionLock = PredicateLockHashPartitionLock(targettaghash);
4250 	LWLockAcquire(partitionLock, LW_SHARED);
4251 	target = (PREDICATELOCKTARGET *)
4252 		hash_search_with_hash_value(PredicateLockTargetHash,
4253 									targettag, targettaghash,
4254 									HASH_FIND, NULL);
4255 	if (!target)
4256 	{
4257 		/* Nothing has this target locked; we're done here. */
4258 		LWLockRelease(partitionLock);
4259 		return;
4260 	}
4261 
4262 	/*
4263 	 * Each lock for an overlapping transaction represents a conflict: a
4264 	 * rw-dependency in to this transaction.
4265 	 */
4266 	predlock = (PREDICATELOCK *)
4267 		SHMQueueNext(&(target->predicateLocks),
4268 					 &(target->predicateLocks),
4269 					 offsetof(PREDICATELOCK, targetLink));
4270 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4271 	while (predlock)
4272 	{
4273 		SHM_QUEUE  *predlocktargetlink;
4274 		PREDICATELOCK *nextpredlock;
4275 		SERIALIZABLEXACT *sxact;
4276 
4277 		predlocktargetlink = &(predlock->targetLink);
4278 		nextpredlock = (PREDICATELOCK *)
4279 			SHMQueueNext(&(target->predicateLocks),
4280 						 predlocktargetlink,
4281 						 offsetof(PREDICATELOCK, targetLink));
4282 
4283 		sxact = predlock->tag.myXact;
4284 		if (sxact == MySerializableXact)
4285 		{
4286 			/*
4287 			 * If we're getting a write lock on a tuple, we don't need a
4288 			 * predicate (SIREAD) lock on the same tuple. We can safely remove
4289 			 * our SIREAD lock, but we'll defer doing so until after the loop
4290 			 * because that requires upgrading to an exclusive partition lock.
4291 			 *
4292 			 * We can't use this optimization within a subtransaction because
4293 			 * the subtransaction could roll back, and we would be left
4294 			 * without any lock at the top level.
4295 			 */
4296 			if (!IsSubTransaction()
4297 				&& GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
4298 			{
4299 				mypredlock = predlock;
4300 				mypredlocktag = predlock->tag;
4301 			}
4302 		}
4303 		else if (!SxactIsDoomed(sxact)
4304 				 && (!SxactIsCommitted(sxact)
4305 					 || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4306 											  sxact->finishedBefore))
4307 				 && !RWConflictExists(sxact, MySerializableXact))
4308 		{
4309 			LWLockRelease(SerializableXactHashLock);
4310 			LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4311 
4312 			/*
4313 			 * Re-check after getting exclusive lock because the other
4314 			 * transaction may have flagged a conflict.
4315 			 */
4316 			if (!SxactIsDoomed(sxact)
4317 				&& (!SxactIsCommitted(sxact)
4318 					|| TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4319 											 sxact->finishedBefore))
4320 				&& !RWConflictExists(sxact, MySerializableXact))
4321 			{
4322 				FlagRWConflict(sxact, MySerializableXact);
4323 			}
4324 
4325 			LWLockRelease(SerializableXactHashLock);
4326 			LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4327 		}
4328 
4329 		predlock = nextpredlock;
4330 	}
4331 	LWLockRelease(SerializableXactHashLock);
4332 	LWLockRelease(partitionLock);
4333 
4334 	/*
4335 	 * If we found one of our own SIREAD locks to remove, remove it now.
4336 	 *
4337 	 * At this point our transaction already has an ExclusiveRowLock on the
4338 	 * relation, so we are OK to drop the predicate lock on the tuple, if
4339 	 * found, without fearing that another write against the tuple will occur
4340 	 * before the MVCC information makes it to the buffer.
4341 	 */
4342 	if (mypredlock != NULL)
4343 	{
4344 		uint32		predlockhashcode;
4345 		PREDICATELOCK *rmpredlock;
4346 
4347 		LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4348 		LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4349 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4350 
4351 		/*
4352 		 * Remove the predicate lock from shared memory, if it wasn't removed
4353 		 * while the locks were released.  One way that could happen is from
4354 		 * autovacuum cleaning up an index.
4355 		 */
4356 		predlockhashcode = PredicateLockHashCodeFromTargetHashCode
4357 			(&mypredlocktag, targettaghash);
4358 		rmpredlock = (PREDICATELOCK *)
4359 			hash_search_with_hash_value(PredicateLockHash,
4360 										&mypredlocktag,
4361 										predlockhashcode,
4362 										HASH_FIND, NULL);
4363 		if (rmpredlock != NULL)
4364 		{
4365 			Assert(rmpredlock == mypredlock);
4366 
4367 			SHMQueueDelete(&(mypredlock->targetLink));
4368 			SHMQueueDelete(&(mypredlock->xactLink));
4369 
4370 			rmpredlock = (PREDICATELOCK *)
4371 				hash_search_with_hash_value(PredicateLockHash,
4372 											&mypredlocktag,
4373 											predlockhashcode,
4374 											HASH_REMOVE, NULL);
4375 			Assert(rmpredlock == mypredlock);
4376 
4377 			RemoveTargetIfNoLongerUsed(target, targettaghash);
4378 		}
4379 
4380 		LWLockRelease(SerializableXactHashLock);
4381 		LWLockRelease(partitionLock);
4382 		LWLockRelease(SerializablePredicateLockListLock);
4383 
4384 		if (rmpredlock != NULL)
4385 		{
4386 			/*
4387 			 * Remove entry in local lock table if it exists. It's OK if it
4388 			 * doesn't exist; that means the lock was transferred to a new
4389 			 * target by a different backend.
4390 			 */
4391 			hash_search_with_hash_value(LocalPredicateLockHash,
4392 										targettag, targettaghash,
4393 										HASH_REMOVE, NULL);
4394 
4395 			DecrementParentLocks(targettag);
4396 		}
4397 	}
4398 }
4399 
4400 /*
4401  * CheckForSerializableConflictIn
4402  *		We are writing the given tuple.  If that indicates a rw-conflict
4403  *		in from another serializable transaction, take appropriate action.
4404  *
4405  * Skip checking for any granularity for which a parameter is missing.
4406  *
4407  * A tuple update or delete is in conflict if we have a predicate lock
4408  * against the relation or page in which the tuple exists, or against the
4409  * tuple itself.
4410  */
4411 void
CheckForSerializableConflictIn(Relation relation,HeapTuple tuple,Buffer buffer)4412 CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
4413 							   Buffer buffer)
4414 {
4415 	PREDICATELOCKTARGETTAG targettag;
4416 
4417 	if (!SerializationNeededForWrite(relation))
4418 		return;
4419 
4420 	/* Check if someone else has already decided that we need to die */
4421 	if (SxactIsDoomed(MySerializableXact))
4422 		ereport(ERROR,
4423 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4424 				 errmsg("could not serialize access due to read/write dependencies among transactions"),
4425 				 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict in checking."),
4426 				 errhint("The transaction might succeed if retried.")));
4427 
4428 	/*
4429 	 * We're doing a write which might cause rw-conflicts now or later.
4430 	 * Memorize that fact.
4431 	 */
4432 	MyXactDidWrite = true;
4433 
4434 	/*
4435 	 * It is important that we check for locks from the finest granularity to
4436 	 * the coarsest granularity, so that granularity promotion doesn't cause
4437 	 * us to miss a lock.  The new (coarser) lock will be acquired before the
4438 	 * old (finer) locks are released.
4439 	 *
4440 	 * It is not possible to take and hold a lock across the checks for all
4441 	 * granularities because each target could be in a separate partition.
4442 	 */
4443 	if (tuple != NULL)
4444 	{
4445 		SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
4446 										 relation->rd_node.dbNode,
4447 										 relation->rd_id,
4448 										 ItemPointerGetBlockNumber(&(tuple->t_self)),
4449 										 ItemPointerGetOffsetNumber(&(tuple->t_self)));
4450 		CheckTargetForConflictsIn(&targettag);
4451 	}
4452 
4453 	if (BufferIsValid(buffer))
4454 	{
4455 		SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
4456 										relation->rd_node.dbNode,
4457 										relation->rd_id,
4458 										BufferGetBlockNumber(buffer));
4459 		CheckTargetForConflictsIn(&targettag);
4460 	}
4461 
4462 	SET_PREDICATELOCKTARGETTAG_RELATION(targettag,
4463 										relation->rd_node.dbNode,
4464 										relation->rd_id);
4465 	CheckTargetForConflictsIn(&targettag);
4466 }
4467 
4468 /*
4469  * CheckTableForSerializableConflictIn
4470  *		The entire table is going through a DDL-style logical mass delete
4471  *		like TRUNCATE or DROP TABLE.  If that causes a rw-conflict in from
4472  *		another serializable transaction, take appropriate action.
4473  *
4474  * While these operations do not operate entirely within the bounds of
4475  * snapshot isolation, they can occur inside a serializable transaction, and
4476  * will logically occur after any reads which saw rows which were destroyed
4477  * by these operations, so we do what we can to serialize properly under
4478  * SSI.
4479  *
4480  * The relation passed in must be a heap relation. Any predicate lock of any
4481  * granularity on the heap will cause a rw-conflict in to this transaction.
4482  * Predicate locks on indexes do not matter because they only exist to guard
4483  * against conflicting inserts into the index, and this is a mass *delete*.
4484  * When a table is truncated or dropped, the index will also be truncated
4485  * or dropped, and we'll deal with locks on the index when that happens.
4486  *
4487  * Dropping or truncating a table also needs to drop any existing predicate
4488  * locks on heap tuples or pages, because they're about to go away. This
4489  * should be done before altering the predicate locks because the transaction
4490  * could be rolled back because of a conflict, in which case the lock changes
4491  * are not needed. (At the moment, we don't actually bother to drop the
4492  * existing locks on a dropped or truncated table at the moment. That might
4493  * lead to some false positives, but it doesn't seem worth the trouble.)
4494  */
4495 void
CheckTableForSerializableConflictIn(Relation relation)4496 CheckTableForSerializableConflictIn(Relation relation)
4497 {
4498 	HASH_SEQ_STATUS seqstat;
4499 	PREDICATELOCKTARGET *target;
4500 	Oid			dbId;
4501 	Oid			heapId;
4502 	int			i;
4503 
4504 	/*
4505 	 * Bail out quickly if there are no serializable transactions running.
4506 	 * It's safe to check this without taking locks because the caller is
4507 	 * holding an ACCESS EXCLUSIVE lock on the relation.  No new locks which
4508 	 * would matter here can be acquired while that is held.
4509 	 */
4510 	if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
4511 		return;
4512 
4513 	if (!SerializationNeededForWrite(relation))
4514 		return;
4515 
4516 	/*
4517 	 * We're doing a write which might cause rw-conflicts now or later.
4518 	 * Memorize that fact.
4519 	 */
4520 	MyXactDidWrite = true;
4521 
4522 	Assert(relation->rd_index == NULL); /* not an index relation */
4523 
4524 	dbId = relation->rd_node.dbNode;
4525 	heapId = relation->rd_id;
4526 
4527 	LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
4528 	for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
4529 		LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
4530 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4531 
4532 	/* Scan through target list */
4533 	hash_seq_init(&seqstat, PredicateLockTargetHash);
4534 
4535 	while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
4536 	{
4537 		PREDICATELOCK *predlock;
4538 
4539 		/*
4540 		 * Check whether this is a target which needs attention.
4541 		 */
4542 		if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
4543 			continue;			/* wrong relation id */
4544 		if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
4545 			continue;			/* wrong database id */
4546 
4547 		/*
4548 		 * Loop through locks for this target and flag conflicts.
4549 		 */
4550 		predlock = (PREDICATELOCK *)
4551 			SHMQueueNext(&(target->predicateLocks),
4552 						 &(target->predicateLocks),
4553 						 offsetof(PREDICATELOCK, targetLink));
4554 		while (predlock)
4555 		{
4556 			PREDICATELOCK *nextpredlock;
4557 
4558 			nextpredlock = (PREDICATELOCK *)
4559 				SHMQueueNext(&(target->predicateLocks),
4560 							 &(predlock->targetLink),
4561 							 offsetof(PREDICATELOCK, targetLink));
4562 
4563 			if (predlock->tag.myXact != MySerializableXact
4564 				&& !RWConflictExists(predlock->tag.myXact, MySerializableXact))
4565 			{
4566 				FlagRWConflict(predlock->tag.myXact, MySerializableXact);
4567 			}
4568 
4569 			predlock = nextpredlock;
4570 		}
4571 	}
4572 
4573 	/* Release locks in reverse order */
4574 	LWLockRelease(SerializableXactHashLock);
4575 	for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
4576 		LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
4577 	LWLockRelease(SerializablePredicateLockListLock);
4578 }
4579 
4580 
4581 /*
4582  * Flag a rw-dependency between two serializable transactions.
4583  *
4584  * The caller is responsible for ensuring that we have a LW lock on
4585  * the transaction hash table.
4586  */
4587 static void
FlagRWConflict(SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)4588 FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
4589 {
4590 	Assert(reader != writer);
4591 
4592 	/* First, see if this conflict causes failure. */
4593 	OnConflict_CheckForSerializationFailure(reader, writer);
4594 
4595 	/* Actually do the conflict flagging. */
4596 	if (reader == OldCommittedSxact)
4597 		writer->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
4598 	else if (writer == OldCommittedSxact)
4599 		reader->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4600 	else
4601 		SetRWConflict(reader, writer);
4602 }
4603 
4604 /*----------------------------------------------------------------------------
4605  * We are about to add a RW-edge to the dependency graph - check that we don't
4606  * introduce a dangerous structure by doing so, and abort one of the
4607  * transactions if so.
4608  *
4609  * A serialization failure can only occur if there is a dangerous structure
4610  * in the dependency graph:
4611  *
4612  *		Tin ------> Tpivot ------> Tout
4613  *			  rw			 rw
4614  *
4615  * Furthermore, Tout must commit first.
4616  *
4617  * One more optimization is that if Tin is declared READ ONLY (or commits
4618  * without writing), we can only have a problem if Tout committed before Tin
4619  * acquired its snapshot.
4620  *----------------------------------------------------------------------------
4621  */
4622 static void
OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)4623 OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
4624 										SERIALIZABLEXACT *writer)
4625 {
4626 	bool		failure;
4627 	RWConflict	conflict;
4628 
4629 	Assert(LWLockHeldByMe(SerializableXactHashLock));
4630 
4631 	failure = false;
4632 
4633 	/*------------------------------------------------------------------------
4634 	 * Check for already-committed writer with rw-conflict out flagged
4635 	 * (conflict-flag on W means that T2 committed before W):
4636 	 *
4637 	 *		R ------> W ------> T2
4638 	 *			rw		  rw
4639 	 *
4640 	 * That is a dangerous structure, so we must abort. (Since the writer
4641 	 * has already committed, we must be the reader)
4642 	 *------------------------------------------------------------------------
4643 	 */
4644 	if (SxactIsCommitted(writer)
4645 		&& (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
4646 		failure = true;
4647 
4648 	/*------------------------------------------------------------------------
4649 	 * Check whether the writer has become a pivot with an out-conflict
4650 	 * committed transaction (T2), and T2 committed first:
4651 	 *
4652 	 *		R ------> W ------> T2
4653 	 *			rw		  rw
4654 	 *
4655 	 * Because T2 must've committed first, there is no anomaly if:
4656 	 * - the reader committed before T2
4657 	 * - the writer committed before T2
4658 	 * - the reader is a READ ONLY transaction and the reader was concurrent
4659 	 *	 with T2 (= reader acquired its snapshot before T2 committed)
4660 	 *
4661 	 * We also handle the case that T2 is prepared but not yet committed
4662 	 * here. In that case T2 has already checked for conflicts, so if it
4663 	 * commits first, making the above conflict real, it's too late for it
4664 	 * to abort.
4665 	 *------------------------------------------------------------------------
4666 	 */
4667 	if (!failure)
4668 	{
4669 		if (SxactHasSummaryConflictOut(writer))
4670 		{
4671 			failure = true;
4672 			conflict = NULL;
4673 		}
4674 		else
4675 			conflict = (RWConflict)
4676 				SHMQueueNext(&writer->outConflicts,
4677 							 &writer->outConflicts,
4678 							 offsetof(RWConflictData, outLink));
4679 		while (conflict)
4680 		{
4681 			SERIALIZABLEXACT *t2 = conflict->sxactIn;
4682 
4683 			if (SxactIsPrepared(t2)
4684 				&& (!SxactIsCommitted(reader)
4685 					|| t2->prepareSeqNo <= reader->commitSeqNo)
4686 				&& (!SxactIsCommitted(writer)
4687 					|| t2->prepareSeqNo <= writer->commitSeqNo)
4688 				&& (!SxactIsReadOnly(reader)
4689 					|| t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
4690 			{
4691 				failure = true;
4692 				break;
4693 			}
4694 			conflict = (RWConflict)
4695 				SHMQueueNext(&writer->outConflicts,
4696 							 &conflict->outLink,
4697 							 offsetof(RWConflictData, outLink));
4698 		}
4699 	}
4700 
4701 	/*------------------------------------------------------------------------
4702 	 * Check whether the reader has become a pivot with a writer
4703 	 * that's committed (or prepared):
4704 	 *
4705 	 *		T0 ------> R ------> W
4706 	 *			 rw		   rw
4707 	 *
4708 	 * Because W must've committed first for an anomaly to occur, there is no
4709 	 * anomaly if:
4710 	 * - T0 committed before the writer
4711 	 * - T0 is READ ONLY, and overlaps the writer
4712 	 *------------------------------------------------------------------------
4713 	 */
4714 	if (!failure && SxactIsPrepared(writer) && !SxactIsReadOnly(reader))
4715 	{
4716 		if (SxactHasSummaryConflictIn(reader))
4717 		{
4718 			failure = true;
4719 			conflict = NULL;
4720 		}
4721 		else
4722 			conflict = (RWConflict)
4723 				SHMQueueNext(&reader->inConflicts,
4724 							 &reader->inConflicts,
4725 							 offsetof(RWConflictData, inLink));
4726 		while (conflict)
4727 		{
4728 			SERIALIZABLEXACT *t0 = conflict->sxactOut;
4729 
4730 			if (!SxactIsDoomed(t0)
4731 				&& (!SxactIsCommitted(t0)
4732 					|| t0->commitSeqNo >= writer->prepareSeqNo)
4733 				&& (!SxactIsReadOnly(t0)
4734 					|| t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
4735 			{
4736 				failure = true;
4737 				break;
4738 			}
4739 			conflict = (RWConflict)
4740 				SHMQueueNext(&reader->inConflicts,
4741 							 &conflict->inLink,
4742 							 offsetof(RWConflictData, inLink));
4743 		}
4744 	}
4745 
4746 	if (failure)
4747 	{
4748 		/*
4749 		 * We have to kill a transaction to avoid a possible anomaly from
4750 		 * occurring. If the writer is us, we can just ereport() to cause a
4751 		 * transaction abort. Otherwise we flag the writer for termination,
4752 		 * causing it to abort when it tries to commit. However, if the writer
4753 		 * is a prepared transaction, already prepared, we can't abort it
4754 		 * anymore, so we have to kill the reader instead.
4755 		 */
4756 		if (MySerializableXact == writer)
4757 		{
4758 			LWLockRelease(SerializableXactHashLock);
4759 			ereport(ERROR,
4760 					(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4761 					 errmsg("could not serialize access due to read/write dependencies among transactions"),
4762 					 errdetail_internal("Reason code: Canceled on identification as a pivot, during write."),
4763 					 errhint("The transaction might succeed if retried.")));
4764 		}
4765 		else if (SxactIsPrepared(writer))
4766 		{
4767 			LWLockRelease(SerializableXactHashLock);
4768 
4769 			/* if we're not the writer, we have to be the reader */
4770 			Assert(MySerializableXact == reader);
4771 			ereport(ERROR,
4772 					(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4773 					 errmsg("could not serialize access due to read/write dependencies among transactions"),
4774 					 errdetail_internal("Reason code: Canceled on conflict out to pivot %u, during read.", writer->topXid),
4775 					 errhint("The transaction might succeed if retried.")));
4776 		}
4777 		writer->flags |= SXACT_FLAG_DOOMED;
4778 	}
4779 }
4780 
4781 /*
4782  * PreCommit_CheckForSerializableConflicts
4783  *		Check for dangerous structures in a serializable transaction
4784  *		at commit.
4785  *
4786  * We're checking for a dangerous structure as each conflict is recorded.
4787  * The only way we could have a problem at commit is if this is the "out"
4788  * side of a pivot, and neither the "in" side nor the pivot has yet
4789  * committed.
4790  *
4791  * If a dangerous structure is found, the pivot (the near conflict) is
4792  * marked for death, because rolling back another transaction might mean
4793  * that we flail without ever making progress.  This transaction is
4794  * committing writes, so letting it commit ensures progress.  If we
4795  * canceled the far conflict, it might immediately fail again on retry.
4796  */
4797 void
PreCommit_CheckForSerializationFailure(void)4798 PreCommit_CheckForSerializationFailure(void)
4799 {
4800 	RWConflict	nearConflict;
4801 
4802 	if (MySerializableXact == InvalidSerializableXact)
4803 		return;
4804 
4805 	Assert(IsolationIsSerializable());
4806 
4807 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4808 
4809 	/* Check if someone else has already decided that we need to die */
4810 	if (SxactIsDoomed(MySerializableXact))
4811 	{
4812 		LWLockRelease(SerializableXactHashLock);
4813 		ereport(ERROR,
4814 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4815 				 errmsg("could not serialize access due to read/write dependencies among transactions"),
4816 				 errdetail_internal("Reason code: Canceled on identification as a pivot, during commit attempt."),
4817 				 errhint("The transaction might succeed if retried.")));
4818 	}
4819 
4820 	nearConflict = (RWConflict)
4821 		SHMQueueNext(&MySerializableXact->inConflicts,
4822 					 &MySerializableXact->inConflicts,
4823 					 offsetof(RWConflictData, inLink));
4824 	while (nearConflict)
4825 	{
4826 		if (!SxactIsCommitted(nearConflict->sxactOut)
4827 			&& !SxactIsDoomed(nearConflict->sxactOut))
4828 		{
4829 			RWConflict	farConflict;
4830 
4831 			farConflict = (RWConflict)
4832 				SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4833 							 &nearConflict->sxactOut->inConflicts,
4834 							 offsetof(RWConflictData, inLink));
4835 			while (farConflict)
4836 			{
4837 				if (farConflict->sxactOut == MySerializableXact
4838 					|| (!SxactIsCommitted(farConflict->sxactOut)
4839 						&& !SxactIsReadOnly(farConflict->sxactOut)
4840 						&& !SxactIsDoomed(farConflict->sxactOut)))
4841 				{
4842 					/*
4843 					 * Normally, we kill the pivot transaction to make sure we
4844 					 * make progress if the failing transaction is retried.
4845 					 * However, we can't kill it if it's already prepared, so
4846 					 * in that case we commit suicide instead.
4847 					 */
4848 					if (SxactIsPrepared(nearConflict->sxactOut))
4849 					{
4850 						LWLockRelease(SerializableXactHashLock);
4851 						ereport(ERROR,
4852 								(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4853 								 errmsg("could not serialize access due to read/write dependencies among transactions"),
4854 								 errdetail_internal("Reason code: Canceled on commit attempt with conflict in from prepared pivot."),
4855 								 errhint("The transaction might succeed if retried.")));
4856 					}
4857 					nearConflict->sxactOut->flags |= SXACT_FLAG_DOOMED;
4858 					break;
4859 				}
4860 				farConflict = (RWConflict)
4861 					SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4862 								 &farConflict->inLink,
4863 								 offsetof(RWConflictData, inLink));
4864 			}
4865 		}
4866 
4867 		nearConflict = (RWConflict)
4868 			SHMQueueNext(&MySerializableXact->inConflicts,
4869 						 &nearConflict->inLink,
4870 						 offsetof(RWConflictData, inLink));
4871 	}
4872 
4873 	MySerializableXact->prepareSeqNo = ++(PredXact->LastSxactCommitSeqNo);
4874 	MySerializableXact->flags |= SXACT_FLAG_PREPARED;
4875 
4876 	LWLockRelease(SerializableXactHashLock);
4877 }
4878 
4879 /*------------------------------------------------------------------------*/
4880 
4881 /*
4882  * Two-phase commit support
4883  */
4884 
4885 /*
4886  * AtPrepare_Locks
4887  *		Do the preparatory work for a PREPARE: make 2PC state file
4888  *		records for all predicate locks currently held.
4889  */
4890 void
AtPrepare_PredicateLocks(void)4891 AtPrepare_PredicateLocks(void)
4892 {
4893 	PREDICATELOCK *predlock;
4894 	SERIALIZABLEXACT *sxact;
4895 	TwoPhasePredicateRecord record;
4896 	TwoPhasePredicateXactRecord *xactRecord;
4897 	TwoPhasePredicateLockRecord *lockRecord;
4898 
4899 	sxact = MySerializableXact;
4900 	xactRecord = &(record.data.xactRecord);
4901 	lockRecord = &(record.data.lockRecord);
4902 
4903 	if (MySerializableXact == InvalidSerializableXact)
4904 		return;
4905 
4906 	/* Generate an xact record for our SERIALIZABLEXACT */
4907 	record.type = TWOPHASEPREDICATERECORD_XACT;
4908 	xactRecord->xmin = MySerializableXact->xmin;
4909 	xactRecord->flags = MySerializableXact->flags;
4910 
4911 	/*
4912 	 * Note that we don't include the list of conflicts in our out in the
4913 	 * statefile, because new conflicts can be added even after the
4914 	 * transaction prepares. We'll just make a conservative assumption during
4915 	 * recovery instead.
4916 	 */
4917 
4918 	RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
4919 						   &record, sizeof(record));
4920 
4921 	/*
4922 	 * Generate a lock record for each lock.
4923 	 *
4924 	 * To do this, we need to walk the predicate lock list in our sxact rather
4925 	 * than using the local predicate lock table because the latter is not
4926 	 * guaranteed to be accurate.
4927 	 */
4928 	LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4929 
4930 	predlock = (PREDICATELOCK *)
4931 		SHMQueueNext(&(sxact->predicateLocks),
4932 					 &(sxact->predicateLocks),
4933 					 offsetof(PREDICATELOCK, xactLink));
4934 
4935 	while (predlock != NULL)
4936 	{
4937 		record.type = TWOPHASEPREDICATERECORD_LOCK;
4938 		lockRecord->target = predlock->tag.myTarget->tag;
4939 
4940 		RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
4941 							   &record, sizeof(record));
4942 
4943 		predlock = (PREDICATELOCK *)
4944 			SHMQueueNext(&(sxact->predicateLocks),
4945 						 &(predlock->xactLink),
4946 						 offsetof(PREDICATELOCK, xactLink));
4947 	}
4948 
4949 	LWLockRelease(SerializablePredicateLockListLock);
4950 }
4951 
4952 /*
4953  * PostPrepare_Locks
4954  *		Clean up after successful PREPARE. Unlike the non-predicate
4955  *		lock manager, we do not need to transfer locks to a dummy
4956  *		PGPROC because our SERIALIZABLEXACT will stay around
4957  *		anyway. We only need to clean up our local state.
4958  */
4959 void
PostPrepare_PredicateLocks(TransactionId xid)4960 PostPrepare_PredicateLocks(TransactionId xid)
4961 {
4962 	if (MySerializableXact == InvalidSerializableXact)
4963 		return;
4964 
4965 	Assert(SxactIsPrepared(MySerializableXact));
4966 
4967 	MySerializableXact->pid = 0;
4968 
4969 	hash_destroy(LocalPredicateLockHash);
4970 	LocalPredicateLockHash = NULL;
4971 
4972 	MySerializableXact = InvalidSerializableXact;
4973 	MyXactDidWrite = false;
4974 }
4975 
4976 /*
4977  * PredicateLockTwoPhaseFinish
4978  *		Release a prepared transaction's predicate locks once it
4979  *		commits or aborts.
4980  */
4981 void
PredicateLockTwoPhaseFinish(TransactionId xid,bool isCommit)4982 PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
4983 {
4984 	SERIALIZABLEXID *sxid;
4985 	SERIALIZABLEXIDTAG sxidtag;
4986 
4987 	sxidtag.xid = xid;
4988 
4989 	LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4990 	sxid = (SERIALIZABLEXID *)
4991 		hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4992 	LWLockRelease(SerializableXactHashLock);
4993 
4994 	/* xid will not be found if it wasn't a serializable transaction */
4995 	if (sxid == NULL)
4996 		return;
4997 
4998 	/* Release its locks */
4999 	MySerializableXact = sxid->myXact;
5000 	MyXactDidWrite = true;		/* conservatively assume that we wrote
5001 								 * something */
5002 	ReleasePredicateLocks(isCommit);
5003 }
5004 
5005 /*
5006  * Re-acquire a predicate lock belonging to a transaction that was prepared.
5007  */
5008 void
predicatelock_twophase_recover(TransactionId xid,uint16 info,void * recdata,uint32 len)5009 predicatelock_twophase_recover(TransactionId xid, uint16 info,
5010 							   void *recdata, uint32 len)
5011 {
5012 	TwoPhasePredicateRecord *record;
5013 
5014 	Assert(len == sizeof(TwoPhasePredicateRecord));
5015 
5016 	record = (TwoPhasePredicateRecord *) recdata;
5017 
5018 	Assert((record->type == TWOPHASEPREDICATERECORD_XACT) ||
5019 		   (record->type == TWOPHASEPREDICATERECORD_LOCK));
5020 
5021 	if (record->type == TWOPHASEPREDICATERECORD_XACT)
5022 	{
5023 		/* Per-transaction record. Set up a SERIALIZABLEXACT. */
5024 		TwoPhasePredicateXactRecord *xactRecord;
5025 		SERIALIZABLEXACT *sxact;
5026 		SERIALIZABLEXID *sxid;
5027 		SERIALIZABLEXIDTAG sxidtag;
5028 		bool		found;
5029 
5030 		xactRecord = (TwoPhasePredicateXactRecord *) &record->data.xactRecord;
5031 
5032 		LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
5033 		sxact = CreatePredXact();
5034 		if (!sxact)
5035 			ereport(ERROR,
5036 					(errcode(ERRCODE_OUT_OF_MEMORY),
5037 					 errmsg("out of shared memory")));
5038 
5039 		/* vxid for a prepared xact is InvalidBackendId/xid; no pid */
5040 		sxact->vxid.backendId = InvalidBackendId;
5041 		sxact->vxid.localTransactionId = (LocalTransactionId) xid;
5042 		sxact->pid = 0;
5043 
5044 		/* a prepared xact hasn't committed yet */
5045 		sxact->prepareSeqNo = RecoverySerCommitSeqNo;
5046 		sxact->commitSeqNo = InvalidSerCommitSeqNo;
5047 		sxact->finishedBefore = InvalidTransactionId;
5048 
5049 		sxact->SeqNo.lastCommitBeforeSnapshot = RecoverySerCommitSeqNo;
5050 
5051 		/*
5052 		 * Don't need to track this; no transactions running at the time the
5053 		 * recovered xact started are still active, except possibly other
5054 		 * prepared xacts and we don't care whether those are RO_SAFE or not.
5055 		 */
5056 		SHMQueueInit(&(sxact->possibleUnsafeConflicts));
5057 
5058 		SHMQueueInit(&(sxact->predicateLocks));
5059 		SHMQueueElemInit(&(sxact->finishedLink));
5060 
5061 		sxact->topXid = xid;
5062 		sxact->xmin = xactRecord->xmin;
5063 		sxact->flags = xactRecord->flags;
5064 		Assert(SxactIsPrepared(sxact));
5065 		if (!SxactIsReadOnly(sxact))
5066 		{
5067 			++(PredXact->WritableSxactCount);
5068 			Assert(PredXact->WritableSxactCount <=
5069 				   (MaxBackends + max_prepared_xacts));
5070 		}
5071 
5072 		/*
5073 		 * We don't know whether the transaction had any conflicts or not, so
5074 		 * we'll conservatively assume that it had both a conflict in and a
5075 		 * conflict out, and represent that with the summary conflict flags.
5076 		 */
5077 		SHMQueueInit(&(sxact->outConflicts));
5078 		SHMQueueInit(&(sxact->inConflicts));
5079 		sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
5080 		sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
5081 
5082 		/* Register the transaction's xid */
5083 		sxidtag.xid = xid;
5084 		sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
5085 											   &sxidtag,
5086 											   HASH_ENTER, &found);
5087 		Assert(sxid != NULL);
5088 		Assert(!found);
5089 		sxid->myXact = (SERIALIZABLEXACT *) sxact;
5090 
5091 		/*
5092 		 * Update global xmin. Note that this is a special case compared to
5093 		 * registering a normal transaction, because the global xmin might go
5094 		 * backwards. That's OK, because until recovery is over we're not
5095 		 * going to complete any transactions or create any non-prepared
5096 		 * transactions, so there's no danger of throwing away.
5097 		 */
5098 		if ((!TransactionIdIsValid(PredXact->SxactGlobalXmin)) ||
5099 			(TransactionIdFollows(PredXact->SxactGlobalXmin, sxact->xmin)))
5100 		{
5101 			PredXact->SxactGlobalXmin = sxact->xmin;
5102 			PredXact->SxactGlobalXminCount = 1;
5103 			OldSerXidSetActiveSerXmin(sxact->xmin);
5104 		}
5105 		else if (TransactionIdEquals(sxact->xmin, PredXact->SxactGlobalXmin))
5106 		{
5107 			Assert(PredXact->SxactGlobalXminCount > 0);
5108 			PredXact->SxactGlobalXminCount++;
5109 		}
5110 
5111 		LWLockRelease(SerializableXactHashLock);
5112 	}
5113 	else if (record->type == TWOPHASEPREDICATERECORD_LOCK)
5114 	{
5115 		/* Lock record. Recreate the PREDICATELOCK */
5116 		TwoPhasePredicateLockRecord *lockRecord;
5117 		SERIALIZABLEXID *sxid;
5118 		SERIALIZABLEXACT *sxact;
5119 		SERIALIZABLEXIDTAG sxidtag;
5120 		uint32		targettaghash;
5121 
5122 		lockRecord = (TwoPhasePredicateLockRecord *) &record->data.lockRecord;
5123 		targettaghash = PredicateLockTargetTagHashCode(&lockRecord->target);
5124 
5125 		LWLockAcquire(SerializableXactHashLock, LW_SHARED);
5126 		sxidtag.xid = xid;
5127 		sxid = (SERIALIZABLEXID *)
5128 			hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
5129 		LWLockRelease(SerializableXactHashLock);
5130 
5131 		Assert(sxid != NULL);
5132 		sxact = sxid->myXact;
5133 		Assert(sxact != InvalidSerializableXact);
5134 
5135 		CreatePredicateLock(&lockRecord->target, targettaghash, sxact);
5136 	}
5137 }
5138