1 /*-------------------------------------------------------------------------
2 *
3 * predicate.c
4 * POSTGRES predicate locking
5 * to support full serializable transaction isolation
6 *
7 *
8 * The approach taken is to implement Serializable Snapshot Isolation (SSI)
9 * as initially described in this paper:
10 *
11 * Michael J. Cahill, Uwe Röhm, and Alan D. Fekete. 2008.
12 * Serializable isolation for snapshot databases.
13 * In SIGMOD '08: Proceedings of the 2008 ACM SIGMOD
14 * international conference on Management of data,
15 * pages 729-738, New York, NY, USA. ACM.
16 * http://doi.acm.org/10.1145/1376616.1376690
17 *
18 * and further elaborated in Cahill's doctoral thesis:
19 *
20 * Michael James Cahill. 2009.
21 * Serializable Isolation for Snapshot Databases.
22 * Sydney Digital Theses.
23 * University of Sydney, School of Information Technologies.
24 * http://hdl.handle.net/2123/5353
25 *
26 *
27 * Predicate locks for Serializable Snapshot Isolation (SSI) are SIREAD
28 * locks, which are so different from normal locks that a distinct set of
29 * structures is required to handle them. They are needed to detect
30 * rw-conflicts when the read happens before the write. (When the write
31 * occurs first, the reading transaction can check for a conflict by
32 * examining the MVCC data.)
33 *
34 * (1) Besides tuples actually read, they must cover ranges of tuples
35 * which would have been read based on the predicate. This will
36 * require modelling the predicates through locks against database
37 * objects such as pages, index ranges, or entire tables.
38 *
39 * (2) They must be kept in RAM for quick access. Because of this, it
40 * isn't possible to always maintain tuple-level granularity -- when
41 * the space allocated to store these approaches exhaustion, a
42 * request for a lock may need to scan for situations where a single
43 * transaction holds many fine-grained locks which can be coalesced
44 * into a single coarser-grained lock.
45 *
46 * (3) They never block anything; they are more like flags than locks
47 * in that regard; although they refer to database objects and are
48 * used to identify rw-conflicts with normal write locks.
49 *
50 * (4) While they are associated with a transaction, they must survive
51 * a successful COMMIT of that transaction, and remain until all
52 * overlapping transactions complete. This even means that they
53 * must survive termination of the transaction's process. If a
54 * top level transaction is rolled back, however, it is immediately
55 * flagged so that it can be ignored, and its SIREAD locks can be
56 * released any time after that.
57 *
58 * (5) The only transactions which create SIREAD locks or check for
59 * conflicts with them are serializable transactions.
60 *
61 * (6) When a write lock for a top level transaction is found to cover
62 * an existing SIREAD lock for the same transaction, the SIREAD lock
63 * can be deleted.
64 *
65 * (7) A write from a serializable transaction must ensure that an xact
66 * record exists for the transaction, with the same lifespan (until
67 * all concurrent transaction complete or the transaction is rolled
68 * back) so that rw-dependencies to that transaction can be
69 * detected.
70 *
71 * We use an optimization for read-only transactions. Under certain
72 * circumstances, a read-only transaction's snapshot can be shown to
73 * never have conflicts with other transactions. This is referred to
74 * as a "safe" snapshot (and one known not to be is "unsafe").
75 * However, it can't be determined whether a snapshot is safe until
76 * all concurrent read/write transactions complete.
77 *
78 * Once a read-only transaction is known to have a safe snapshot, it
79 * can release its predicate locks and exempt itself from further
80 * predicate lock tracking. READ ONLY DEFERRABLE transactions run only
81 * on safe snapshots, waiting as necessary for one to be available.
82 *
83 *
84 * Lightweight locks to manage access to the predicate locking shared
85 * memory objects must be taken in this order, and should be released in
86 * reverse order:
87 *
88 * SerializableFinishedListLock
89 * - Protects the list of transactions which have completed but which
90 * may yet matter because they overlap still-active transactions.
91 *
92 * SerializablePredicateLockListLock
93 * - Protects the linked list of locks held by a transaction. Note
94 * that the locks themselves are also covered by the partition
95 * locks of their respective lock targets; this lock only affects
96 * the linked list connecting the locks related to a transaction.
97 * - All transactions share this single lock (with no partitioning).
98 * - There is never a need for a process other than the one running
99 * an active transaction to walk the list of locks held by that
100 * transaction, except parallel query workers sharing the leader's
101 * transaction. In the parallel case, an extra per-sxact lock is
102 * taken; see below.
103 * - It is relatively infrequent that another process needs to
104 * modify the list for a transaction, but it does happen for such
105 * things as index page splits for pages with predicate locks and
106 * freeing of predicate locked pages by a vacuum process. When
107 * removing a lock in such cases, the lock itself contains the
108 * pointers needed to remove it from the list. When adding a
109 * lock in such cases, the lock can be added using the anchor in
110 * the transaction structure. Neither requires walking the list.
111 * - Cleaning up the list for a terminated transaction is sometimes
112 * not done on a retail basis, in which case no lock is required.
113 * - Due to the above, a process accessing its active transaction's
114 * list always uses a shared lock, regardless of whether it is
115 * walking or maintaining the list. This improves concurrency
116 * for the common access patterns.
117 * - A process which needs to alter the list of a transaction other
118 * than its own active transaction must acquire an exclusive
119 * lock.
120 *
121 * SERIALIZABLEXACT's member 'predicateLockListLock'
122 * - Protects the linked list of locks held by a transaction. Only
123 * needed for parallel mode, where multiple backends share the
124 * same SERIALIZABLEXACT object. Not needed if
125 * SerializablePredicateLockListLock is held exclusively.
126 *
127 * PredicateLockHashPartitionLock(hashcode)
128 * - The same lock protects a target, all locks on that target, and
129 * the linked list of locks on the target.
130 * - When more than one is needed, acquire in ascending address order.
131 * - When all are needed (rare), acquire in ascending index order with
132 * PredicateLockHashPartitionLockByIndex(index).
133 *
134 * SerializableXactHashLock
135 * - Protects both PredXact and SerializableXidHash.
136 *
137 *
138 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
139 * Portions Copyright (c) 1994, Regents of the University of California
140 *
141 *
142 * IDENTIFICATION
143 * src/backend/storage/lmgr/predicate.c
144 *
145 *-------------------------------------------------------------------------
146 */
147 /*
148 * INTERFACE ROUTINES
149 *
150 * housekeeping for setting up shared memory predicate lock structures
151 * InitPredicateLocks(void)
152 * PredicateLockShmemSize(void)
153 *
154 * predicate lock reporting
155 * GetPredicateLockStatusData(void)
156 * PageIsPredicateLocked(Relation relation, BlockNumber blkno)
157 *
158 * predicate lock maintenance
159 * GetSerializableTransactionSnapshot(Snapshot snapshot)
160 * SetSerializableTransactionSnapshot(Snapshot snapshot,
161 * VirtualTransactionId *sourcevxid)
162 * RegisterPredicateLockingXid(void)
163 * PredicateLockRelation(Relation relation, Snapshot snapshot)
164 * PredicateLockPage(Relation relation, BlockNumber blkno,
165 * Snapshot snapshot)
166 * PredicateLockTuple(Relation relation, HeapTuple tuple,
167 * Snapshot snapshot)
168 * PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
169 * BlockNumber newblkno)
170 * PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
171 * BlockNumber newblkno)
172 * TransferPredicateLocksToHeapRelation(Relation relation)
173 * ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
174 *
175 * conflict detection (may also trigger rollback)
176 * CheckForSerializableConflictOut(bool visible, Relation relation,
177 * HeapTupleData *tup, Buffer buffer,
178 * Snapshot snapshot)
179 * CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
180 * Buffer buffer)
181 * CheckTableForSerializableConflictIn(Relation relation)
182 *
183 * final rollback checking
184 * PreCommit_CheckForSerializationFailure(void)
185 *
186 * two-phase commit support
187 * AtPrepare_PredicateLocks(void);
188 * PostPrepare_PredicateLocks(TransactionId xid);
189 * PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
190 * predicatelock_twophase_recover(TransactionId xid, uint16 info,
191 * void *recdata, uint32 len);
192 */
193
194 #include "postgres.h"
195
196 #include "access/heapam.h"
197 #include "access/htup_details.h"
198 #include "access/parallel.h"
199 #include "access/slru.h"
200 #include "access/subtrans.h"
201 #include "access/transam.h"
202 #include "access/twophase.h"
203 #include "access/twophase_rmgr.h"
204 #include "access/xact.h"
205 #include "access/xlog.h"
206 #include "miscadmin.h"
207 #include "pgstat.h"
208 #include "storage/bufmgr.h"
209 #include "storage/predicate.h"
210 #include "storage/predicate_internals.h"
211 #include "storage/proc.h"
212 #include "storage/procarray.h"
213 #include "utils/rel.h"
214 #include "utils/snapmgr.h"
215
216 /* Uncomment the next line to test the graceful degradation code. */
217 /* #define TEST_OLDSERXID */
218
219 /*
220 * Test the most selective fields first, for performance.
221 *
222 * a is covered by b if all of the following hold:
223 * 1) a.database = b.database
224 * 2) a.relation = b.relation
225 * 3) b.offset is invalid (b is page-granularity or higher)
226 * 4) either of the following:
227 * 4a) a.offset is valid (a is tuple-granularity) and a.page = b.page
228 * or 4b) a.offset is invalid and b.page is invalid (a is
229 * page-granularity and b is relation-granularity
230 */
231 #define TargetTagIsCoveredBy(covered_target, covering_target) \
232 ((GET_PREDICATELOCKTARGETTAG_RELATION(covered_target) == /* (2) */ \
233 GET_PREDICATELOCKTARGETTAG_RELATION(covering_target)) \
234 && (GET_PREDICATELOCKTARGETTAG_OFFSET(covering_target) == \
235 InvalidOffsetNumber) /* (3) */ \
236 && (((GET_PREDICATELOCKTARGETTAG_OFFSET(covered_target) != \
237 InvalidOffsetNumber) /* (4a) */ \
238 && (GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) == \
239 GET_PREDICATELOCKTARGETTAG_PAGE(covered_target))) \
240 || ((GET_PREDICATELOCKTARGETTAG_PAGE(covering_target) == \
241 InvalidBlockNumber) /* (4b) */ \
242 && (GET_PREDICATELOCKTARGETTAG_PAGE(covered_target) \
243 != InvalidBlockNumber))) \
244 && (GET_PREDICATELOCKTARGETTAG_DB(covered_target) == /* (1) */ \
245 GET_PREDICATELOCKTARGETTAG_DB(covering_target)))
246
247 /*
248 * The predicate locking target and lock shared hash tables are partitioned to
249 * reduce contention. To determine which partition a given target belongs to,
250 * compute the tag's hash code with PredicateLockTargetTagHashCode(), then
251 * apply one of these macros.
252 * NB: NUM_PREDICATELOCK_PARTITIONS must be a power of 2!
253 */
254 #define PredicateLockHashPartition(hashcode) \
255 ((hashcode) % NUM_PREDICATELOCK_PARTITIONS)
256 #define PredicateLockHashPartitionLock(hashcode) \
257 (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + \
258 PredicateLockHashPartition(hashcode)].lock)
259 #define PredicateLockHashPartitionLockByIndex(i) \
260 (&MainLWLockArray[PREDICATELOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
261
262 #define NPREDICATELOCKTARGETENTS() \
263 mul_size(max_predicate_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
264
265 #define SxactIsOnFinishedList(sxact) (!SHMQueueIsDetached(&((sxact)->finishedLink)))
266
267 /*
268 * Note that a sxact is marked "prepared" once it has passed
269 * PreCommit_CheckForSerializationFailure, even if it isn't using
270 * 2PC. This is the point at which it can no longer be aborted.
271 *
272 * The PREPARED flag remains set after commit, so SxactIsCommitted
273 * implies SxactIsPrepared.
274 */
275 #define SxactIsCommitted(sxact) (((sxact)->flags & SXACT_FLAG_COMMITTED) != 0)
276 #define SxactIsPrepared(sxact) (((sxact)->flags & SXACT_FLAG_PREPARED) != 0)
277 #define SxactIsRolledBack(sxact) (((sxact)->flags & SXACT_FLAG_ROLLED_BACK) != 0)
278 #define SxactIsDoomed(sxact) (((sxact)->flags & SXACT_FLAG_DOOMED) != 0)
279 #define SxactIsReadOnly(sxact) (((sxact)->flags & SXACT_FLAG_READ_ONLY) != 0)
280 #define SxactHasSummaryConflictIn(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_IN) != 0)
281 #define SxactHasSummaryConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_SUMMARY_CONFLICT_OUT) != 0)
282 /*
283 * The following macro actually means that the specified transaction has a
284 * conflict out *to a transaction which committed ahead of it*. It's hard
285 * to get that into a name of a reasonable length.
286 */
287 #define SxactHasConflictOut(sxact) (((sxact)->flags & SXACT_FLAG_CONFLICT_OUT) != 0)
288 #define SxactIsDeferrableWaiting(sxact) (((sxact)->flags & SXACT_FLAG_DEFERRABLE_WAITING) != 0)
289 #define SxactIsROSafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_SAFE) != 0)
290 #define SxactIsROUnsafe(sxact) (((sxact)->flags & SXACT_FLAG_RO_UNSAFE) != 0)
291 #define SxactIsPartiallyReleased(sxact) (((sxact)->flags & SXACT_FLAG_PARTIALLY_RELEASED) != 0)
292
293 /*
294 * Compute the hash code associated with a PREDICATELOCKTARGETTAG.
295 *
296 * To avoid unnecessary recomputations of the hash code, we try to do this
297 * just once per function, and then pass it around as needed. Aside from
298 * passing the hashcode to hash_search_with_hash_value(), we can extract
299 * the lock partition number from the hashcode.
300 */
301 #define PredicateLockTargetTagHashCode(predicatelocktargettag) \
302 get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
303
304 /*
305 * Given a predicate lock tag, and the hash for its target,
306 * compute the lock hash.
307 *
308 * To make the hash code also depend on the transaction, we xor the sxid
309 * struct's address into the hash code, left-shifted so that the
310 * partition-number bits don't change. Since this is only a hash, we
311 * don't care if we lose high-order bits of the address; use an
312 * intermediate variable to suppress cast-pointer-to-int warnings.
313 */
314 #define PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash) \
315 ((targethash) ^ ((uint32) PointerGetDatum((predicatelocktag)->myXact)) \
316 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
317
318
319 /*
320 * The SLRU buffer area through which we access the old xids.
321 */
322 static SlruCtlData OldSerXidSlruCtlData;
323
324 #define OldSerXidSlruCtl (&OldSerXidSlruCtlData)
325
326 #define OLDSERXID_PAGESIZE BLCKSZ
327 #define OLDSERXID_ENTRYSIZE sizeof(SerCommitSeqNo)
328 #define OLDSERXID_ENTRIESPERPAGE (OLDSERXID_PAGESIZE / OLDSERXID_ENTRYSIZE)
329
330 /*
331 * Set maximum pages based on the number needed to track all transactions.
332 */
333 #define OLDSERXID_MAX_PAGE (MaxTransactionId / OLDSERXID_ENTRIESPERPAGE)
334
335 #define OldSerXidNextPage(page) (((page) >= OLDSERXID_MAX_PAGE) ? 0 : (page) + 1)
336
337 #define OldSerXidValue(slotno, xid) (*((SerCommitSeqNo *) \
338 (OldSerXidSlruCtl->shared->page_buffer[slotno] + \
339 ((((uint32) (xid)) % OLDSERXID_ENTRIESPERPAGE) * OLDSERXID_ENTRYSIZE))))
340
341 #define OldSerXidPage(xid) (((uint32) (xid)) / OLDSERXID_ENTRIESPERPAGE)
342
343 typedef struct OldSerXidControlData
344 {
345 int headPage; /* newest initialized page */
346 TransactionId headXid; /* newest valid Xid in the SLRU */
347 TransactionId tailXid; /* oldest xmin we might be interested in */
348 } OldSerXidControlData;
349
350 typedef struct OldSerXidControlData *OldSerXidControl;
351
352 static OldSerXidControl oldSerXidControl;
353
354 /*
355 * When the oldest committed transaction on the "finished" list is moved to
356 * SLRU, its predicate locks will be moved to this "dummy" transaction,
357 * collapsing duplicate targets. When a duplicate is found, the later
358 * commitSeqNo is used.
359 */
360 static SERIALIZABLEXACT *OldCommittedSxact;
361
362
363 /*
364 * These configuration variables are used to set the predicate lock table size
365 * and to control promotion of predicate locks to coarser granularity in an
366 * attempt to degrade performance (mostly as false positive serialization
367 * failure) gracefully in the face of memory pressurel
368 */
369 int max_predicate_locks_per_xact; /* set by guc.c */
370 int max_predicate_locks_per_relation; /* set by guc.c */
371 int max_predicate_locks_per_page; /* set by guc.c */
372
373 /*
374 * This provides a list of objects in order to track transactions
375 * participating in predicate locking. Entries in the list are fixed size,
376 * and reside in shared memory. The memory address of an entry must remain
377 * fixed during its lifetime. The list will be protected from concurrent
378 * update externally; no provision is made in this code to manage that. The
379 * number of entries in the list, and the size allowed for each entry is
380 * fixed upon creation.
381 */
382 static PredXactList PredXact;
383
384 /*
385 * This provides a pool of RWConflict data elements to use in conflict lists
386 * between transactions.
387 */
388 static RWConflictPoolHeader RWConflictPool;
389
390 /*
391 * The predicate locking hash tables are in shared memory.
392 * Each backend keeps pointers to them.
393 */
394 static HTAB *SerializableXidHash;
395 static HTAB *PredicateLockTargetHash;
396 static HTAB *PredicateLockHash;
397 static SHM_QUEUE *FinishedSerializableTransactions;
398
399 /*
400 * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
401 * this entry, you can ensure that there's enough scratch space available for
402 * inserting one entry in the hash table. This is an otherwise-invalid tag.
403 */
404 static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0};
405 static uint32 ScratchTargetTagHash;
406 static LWLock *ScratchPartitionLock;
407
408 /*
409 * The local hash table used to determine when to combine multiple fine-
410 * grained locks into a single courser-grained lock.
411 */
412 static HTAB *LocalPredicateLockHash = NULL;
413
414 /*
415 * Keep a pointer to the currently-running serializable transaction (if any)
416 * for quick reference. Also, remember if we have written anything that could
417 * cause a rw-conflict.
418 */
419 static SERIALIZABLEXACT *MySerializableXact = InvalidSerializableXact;
420 static bool MyXactDidWrite = false;
421
422 /*
423 * The SXACT_FLAG_RO_UNSAFE optimization might lead us to release
424 * MySerializableXact early. If that happens in a parallel query, the leader
425 * needs to defer the destruction of the SERIALIZABLEXACT until end of
426 * transaction, because the workers still have a reference to it. In that
427 * case, the leader stores it here.
428 */
429 static SERIALIZABLEXACT *SavedSerializableXact = InvalidSerializableXact;
430
431 /* local functions */
432
433 static SERIALIZABLEXACT *CreatePredXact(void);
434 static void ReleasePredXact(SERIALIZABLEXACT *sxact);
435 static SERIALIZABLEXACT *FirstPredXact(void);
436 static SERIALIZABLEXACT *NextPredXact(SERIALIZABLEXACT *sxact);
437
438 static bool RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer);
439 static void SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
440 static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT *activeXact);
441 static void ReleaseRWConflict(RWConflict conflict);
442 static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact);
443
444 static bool OldSerXidPagePrecedesLogically(int page1, int page2);
445 static void OldSerXidInit(void);
446 static void OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo);
447 static SerCommitSeqNo OldSerXidGetMinConflictCommitSeqNo(TransactionId xid);
448 static void OldSerXidSetActiveSerXmin(TransactionId xid);
449
450 static uint32 predicatelock_hash(const void *key, Size keysize);
451 static void SummarizeOldestCommittedSxact(void);
452 static Snapshot GetSafeSnapshot(Snapshot snapshot);
453 static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot,
454 VirtualTransactionId *sourcevxid,
455 int sourcepid);
456 static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
457 static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
458 PREDICATELOCKTARGETTAG *parent);
459 static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
460 static void RemoveScratchTarget(bool lockheld);
461 static void RestoreScratchTarget(bool lockheld);
462 static void RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target,
463 uint32 targettaghash);
464 static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
465 static int MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag);
466 static bool CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag);
467 static void DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag);
468 static void CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
469 uint32 targettaghash,
470 SERIALIZABLEXACT *sxact);
471 static void DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash);
472 static bool TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
473 PREDICATELOCKTARGETTAG newtargettag,
474 bool removeOld);
475 static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
476 static void DropAllPredicateLocksFromTable(Relation relation,
477 bool transfer);
478 static void SetNewSxactGlobalXmin(void);
479 static void ClearOldPredicateLocks(void);
480 static void ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
481 bool summarize);
482 static bool XidIsConcurrent(TransactionId xid);
483 static void CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag);
484 static void FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer);
485 static void OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
486 SERIALIZABLEXACT *writer);
487 static void CreateLocalPredicateLockHash(void);
488 static void ReleasePredicateLocksLocal(void);
489
490
491 /*------------------------------------------------------------------------*/
492
493 /*
494 * Does this relation participate in predicate locking? Temporary and system
495 * relations are exempt, as are materialized views.
496 */
497 static inline bool
PredicateLockingNeededForRelation(Relation relation)498 PredicateLockingNeededForRelation(Relation relation)
499 {
500 return !(relation->rd_id < FirstBootstrapObjectId ||
501 RelationUsesLocalBuffers(relation) ||
502 relation->rd_rel->relkind == RELKIND_MATVIEW);
503 }
504
505 /*
506 * When a public interface method is called for a read, this is the test to
507 * see if we should do a quick return.
508 *
509 * Note: this function has side-effects! If this transaction has been flagged
510 * as RO-safe since the last call, we release all predicate locks and reset
511 * MySerializableXact. That makes subsequent calls to return quickly.
512 *
513 * This is marked as 'inline' to eliminate the function call overhead in the
514 * common case that serialization is not needed.
515 */
516 static inline bool
SerializationNeededForRead(Relation relation,Snapshot snapshot)517 SerializationNeededForRead(Relation relation, Snapshot snapshot)
518 {
519 /* Nothing to do if this is not a serializable transaction */
520 if (MySerializableXact == InvalidSerializableXact)
521 return false;
522
523 /*
524 * Don't acquire locks or conflict when scanning with a special snapshot.
525 * This excludes things like CLUSTER and REINDEX. They use the wholesale
526 * functions TransferPredicateLocksToHeapRelation() and
527 * CheckTableForSerializableConflictIn() to participate in serialization,
528 * but the scans involved don't need serialization.
529 */
530 if (!IsMVCCSnapshot(snapshot))
531 return false;
532
533 /*
534 * Check if we have just become "RO-safe". If we have, immediately release
535 * all locks as they're not needed anymore. This also resets
536 * MySerializableXact, so that subsequent calls to this function can exit
537 * quickly.
538 *
539 * A transaction is flagged as RO_SAFE if all concurrent R/W transactions
540 * commit without having conflicts out to an earlier snapshot, thus
541 * ensuring that no conflicts are possible for this transaction.
542 */
543 if (SxactIsROSafe(MySerializableXact))
544 {
545 ReleasePredicateLocks(false, true);
546 return false;
547 }
548
549 /* Check if the relation doesn't participate in predicate locking */
550 if (!PredicateLockingNeededForRelation(relation))
551 return false;
552
553 return true; /* no excuse to skip predicate locking */
554 }
555
556 /*
557 * Like SerializationNeededForRead(), but called on writes.
558 * The logic is the same, but there is no snapshot and we can't be RO-safe.
559 */
560 static inline bool
SerializationNeededForWrite(Relation relation)561 SerializationNeededForWrite(Relation relation)
562 {
563 /* Nothing to do if this is not a serializable transaction */
564 if (MySerializableXact == InvalidSerializableXact)
565 return false;
566
567 /* Check if the relation doesn't participate in predicate locking */
568 if (!PredicateLockingNeededForRelation(relation))
569 return false;
570
571 return true; /* no excuse to skip predicate locking */
572 }
573
574
575 /*------------------------------------------------------------------------*/
576
577 /*
578 * These functions are a simple implementation of a list for this specific
579 * type of struct. If there is ever a generalized shared memory list, we
580 * should probably switch to that.
581 */
582 static SERIALIZABLEXACT *
CreatePredXact(void)583 CreatePredXact(void)
584 {
585 PredXactListElement ptle;
586
587 ptle = (PredXactListElement)
588 SHMQueueNext(&PredXact->availableList,
589 &PredXact->availableList,
590 offsetof(PredXactListElementData, link));
591 if (!ptle)
592 return NULL;
593
594 SHMQueueDelete(&ptle->link);
595 SHMQueueInsertBefore(&PredXact->activeList, &ptle->link);
596 return &ptle->sxact;
597 }
598
599 static void
ReleasePredXact(SERIALIZABLEXACT * sxact)600 ReleasePredXact(SERIALIZABLEXACT *sxact)
601 {
602 PredXactListElement ptle;
603
604 Assert(ShmemAddrIsValid(sxact));
605
606 ptle = (PredXactListElement)
607 (((char *) sxact)
608 - offsetof(PredXactListElementData, sxact)
609 + offsetof(PredXactListElementData, link));
610 SHMQueueDelete(&ptle->link);
611 SHMQueueInsertBefore(&PredXact->availableList, &ptle->link);
612 }
613
614 static SERIALIZABLEXACT *
FirstPredXact(void)615 FirstPredXact(void)
616 {
617 PredXactListElement ptle;
618
619 ptle = (PredXactListElement)
620 SHMQueueNext(&PredXact->activeList,
621 &PredXact->activeList,
622 offsetof(PredXactListElementData, link));
623 if (!ptle)
624 return NULL;
625
626 return &ptle->sxact;
627 }
628
629 static SERIALIZABLEXACT *
NextPredXact(SERIALIZABLEXACT * sxact)630 NextPredXact(SERIALIZABLEXACT *sxact)
631 {
632 PredXactListElement ptle;
633
634 Assert(ShmemAddrIsValid(sxact));
635
636 ptle = (PredXactListElement)
637 (((char *) sxact)
638 - offsetof(PredXactListElementData, sxact)
639 + offsetof(PredXactListElementData, link));
640 ptle = (PredXactListElement)
641 SHMQueueNext(&PredXact->activeList,
642 &ptle->link,
643 offsetof(PredXactListElementData, link));
644 if (!ptle)
645 return NULL;
646
647 return &ptle->sxact;
648 }
649
650 /*------------------------------------------------------------------------*/
651
652 /*
653 * These functions manage primitive access to the RWConflict pool and lists.
654 */
655 static bool
RWConflictExists(const SERIALIZABLEXACT * reader,const SERIALIZABLEXACT * writer)656 RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
657 {
658 RWConflict conflict;
659
660 Assert(reader != writer);
661
662 /* Check the ends of the purported conflict first. */
663 if (SxactIsDoomed(reader)
664 || SxactIsDoomed(writer)
665 || SHMQueueEmpty(&reader->outConflicts)
666 || SHMQueueEmpty(&writer->inConflicts))
667 return false;
668
669 /* A conflict is possible; walk the list to find out. */
670 conflict = (RWConflict)
671 SHMQueueNext(&reader->outConflicts,
672 &reader->outConflicts,
673 offsetof(RWConflictData, outLink));
674 while (conflict)
675 {
676 if (conflict->sxactIn == writer)
677 return true;
678 conflict = (RWConflict)
679 SHMQueueNext(&reader->outConflicts,
680 &conflict->outLink,
681 offsetof(RWConflictData, outLink));
682 }
683
684 /* No conflict found. */
685 return false;
686 }
687
688 static void
SetRWConflict(SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)689 SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
690 {
691 RWConflict conflict;
692
693 Assert(reader != writer);
694 Assert(!RWConflictExists(reader, writer));
695
696 conflict = (RWConflict)
697 SHMQueueNext(&RWConflictPool->availableList,
698 &RWConflictPool->availableList,
699 offsetof(RWConflictData, outLink));
700 if (!conflict)
701 ereport(ERROR,
702 (errcode(ERRCODE_OUT_OF_MEMORY),
703 errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
704 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
705
706 SHMQueueDelete(&conflict->outLink);
707
708 conflict->sxactOut = reader;
709 conflict->sxactIn = writer;
710 SHMQueueInsertBefore(&reader->outConflicts, &conflict->outLink);
711 SHMQueueInsertBefore(&writer->inConflicts, &conflict->inLink);
712 }
713
714 static void
SetPossibleUnsafeConflict(SERIALIZABLEXACT * roXact,SERIALIZABLEXACT * activeXact)715 SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
716 SERIALIZABLEXACT *activeXact)
717 {
718 RWConflict conflict;
719
720 Assert(roXact != activeXact);
721 Assert(SxactIsReadOnly(roXact));
722 Assert(!SxactIsReadOnly(activeXact));
723
724 conflict = (RWConflict)
725 SHMQueueNext(&RWConflictPool->availableList,
726 &RWConflictPool->availableList,
727 offsetof(RWConflictData, outLink));
728 if (!conflict)
729 ereport(ERROR,
730 (errcode(ERRCODE_OUT_OF_MEMORY),
731 errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
732 errhint("You might need to run fewer transactions at a time or increase max_connections.")));
733
734 SHMQueueDelete(&conflict->outLink);
735
736 conflict->sxactOut = activeXact;
737 conflict->sxactIn = roXact;
738 SHMQueueInsertBefore(&activeXact->possibleUnsafeConflicts,
739 &conflict->outLink);
740 SHMQueueInsertBefore(&roXact->possibleUnsafeConflicts,
741 &conflict->inLink);
742 }
743
744 static void
ReleaseRWConflict(RWConflict conflict)745 ReleaseRWConflict(RWConflict conflict)
746 {
747 SHMQueueDelete(&conflict->inLink);
748 SHMQueueDelete(&conflict->outLink);
749 SHMQueueInsertBefore(&RWConflictPool->availableList, &conflict->outLink);
750 }
751
752 static void
FlagSxactUnsafe(SERIALIZABLEXACT * sxact)753 FlagSxactUnsafe(SERIALIZABLEXACT *sxact)
754 {
755 RWConflict conflict,
756 nextConflict;
757
758 Assert(SxactIsReadOnly(sxact));
759 Assert(!SxactIsROSafe(sxact));
760
761 sxact->flags |= SXACT_FLAG_RO_UNSAFE;
762
763 /*
764 * We know this isn't a safe snapshot, so we can stop looking for other
765 * potential conflicts.
766 */
767 conflict = (RWConflict)
768 SHMQueueNext(&sxact->possibleUnsafeConflicts,
769 &sxact->possibleUnsafeConflicts,
770 offsetof(RWConflictData, inLink));
771 while (conflict)
772 {
773 nextConflict = (RWConflict)
774 SHMQueueNext(&sxact->possibleUnsafeConflicts,
775 &conflict->inLink,
776 offsetof(RWConflictData, inLink));
777
778 Assert(!SxactIsReadOnly(conflict->sxactOut));
779 Assert(sxact == conflict->sxactIn);
780
781 ReleaseRWConflict(conflict);
782
783 conflict = nextConflict;
784 }
785 }
786
787 /*------------------------------------------------------------------------*/
788
789 /*
790 * Decide whether an OldSerXid page number is "older" for truncation purposes.
791 * Analogous to CLOGPagePrecedes().
792 */
793 static bool
OldSerXidPagePrecedesLogically(int page1,int page2)794 OldSerXidPagePrecedesLogically(int page1, int page2)
795 {
796 TransactionId xid1;
797 TransactionId xid2;
798
799 xid1 = ((TransactionId) page1) * OLDSERXID_ENTRIESPERPAGE;
800 xid1 += FirstNormalTransactionId + 1;
801 xid2 = ((TransactionId) page2) * OLDSERXID_ENTRIESPERPAGE;
802 xid2 += FirstNormalTransactionId + 1;
803
804 return (TransactionIdPrecedes(xid1, xid2) &&
805 TransactionIdPrecedes(xid1, xid2 + OLDSERXID_ENTRIESPERPAGE - 1));
806 }
807
808 #ifdef USE_ASSERT_CHECKING
809 static void
OldSerXidPagePrecedesLogicallyUnitTests(void)810 OldSerXidPagePrecedesLogicallyUnitTests(void)
811 {
812 int per_page = OLDSERXID_ENTRIESPERPAGE,
813 offset = per_page / 2;
814 int newestPage,
815 oldestPage,
816 headPage,
817 targetPage;
818 TransactionId newestXact,
819 oldestXact;
820
821 /* GetNewTransactionId() has assigned the last XID it can safely use. */
822 newestPage = 2 * SLRU_PAGES_PER_SEGMENT - 1; /* nothing special */
823 newestXact = newestPage * per_page + offset;
824 Assert(newestXact / per_page == newestPage);
825 oldestXact = newestXact + 1;
826 oldestXact -= 1U << 31;
827 oldestPage = oldestXact / per_page;
828
829 /*
830 * In this scenario, the SLRU headPage pertains to the last ~1000 XIDs
831 * assigned. oldestXact finishes, ~2B XIDs having elapsed since it
832 * started. Further transactions cause us to summarize oldestXact to
833 * tailPage. Function must return false so OldSerXidAdd() doesn't zero
834 * tailPage (which may contain entries for other old, recently-finished
835 * XIDs) and half the SLRU. Reaching this requires burning ~2B XIDs in
836 * single-user mode, a negligible possibility.
837 */
838 headPage = newestPage;
839 targetPage = oldestPage;
840 Assert(!OldSerXidPagePrecedesLogically(headPage, targetPage));
841
842 /*
843 * In this scenario, the SLRU headPage pertains to oldestXact. We're
844 * summarizing an XID near newestXact. (Assume few other XIDs used
845 * SERIALIZABLE, hence the minimal headPage advancement. Assume
846 * oldestXact was long-running and only recently reached the SLRU.)
847 * Function must return true to make OldSerXidAdd() create targetPage.
848 *
849 * Today's implementation mishandles this case, but it doesn't matter
850 * enough to fix. Verify that the defect affects just one page by
851 * asserting correct treatment of its prior page. Reaching this case
852 * requires burning ~2B XIDs in single-user mode, a negligible
853 * possibility. Moreover, if it does happen, the consequence would be
854 * mild, namely a new transaction failing in SimpleLruReadPage().
855 */
856 headPage = oldestPage;
857 targetPage = newestPage;
858 Assert(OldSerXidPagePrecedesLogically(headPage, targetPage - 1));
859 #if 0
860 Assert(OldSerXidPagePrecedesLogically(headPage, targetPage));
861 #endif
862 }
863 #endif
864
865 /*
866 * Initialize for the tracking of old serializable committed xids.
867 */
868 static void
OldSerXidInit(void)869 OldSerXidInit(void)
870 {
871 bool found;
872
873 /*
874 * Set up SLRU management of the pg_serial data.
875 */
876 OldSerXidSlruCtl->PagePrecedes = OldSerXidPagePrecedesLogically;
877 SimpleLruInit(OldSerXidSlruCtl, "oldserxid",
878 NUM_OLDSERXID_BUFFERS, 0, OldSerXidLock, "pg_serial",
879 LWTRANCHE_OLDSERXID_BUFFERS);
880 /* Override default assumption that writes should be fsync'd */
881 OldSerXidSlruCtl->do_fsync = false;
882 #ifdef USE_ASSERT_CHECKING
883 OldSerXidPagePrecedesLogicallyUnitTests();
884 #endif
885 SlruPagePrecedesUnitTests(OldSerXidSlruCtl, OLDSERXID_ENTRIESPERPAGE);
886
887 /*
888 * Create or attach to the OldSerXidControl structure.
889 */
890 oldSerXidControl = (OldSerXidControl)
891 ShmemInitStruct("OldSerXidControlData", sizeof(OldSerXidControlData), &found);
892
893 Assert(found == IsUnderPostmaster);
894 if (!found)
895 {
896 /*
897 * Set control information to reflect empty SLRU.
898 */
899 oldSerXidControl->headPage = -1;
900 oldSerXidControl->headXid = InvalidTransactionId;
901 oldSerXidControl->tailXid = InvalidTransactionId;
902 }
903 }
904
905 /*
906 * Record a committed read write serializable xid and the minimum
907 * commitSeqNo of any transactions to which this xid had a rw-conflict out.
908 * An invalid seqNo means that there were no conflicts out from xid.
909 */
910 static void
OldSerXidAdd(TransactionId xid,SerCommitSeqNo minConflictCommitSeqNo)911 OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
912 {
913 TransactionId tailXid;
914 int targetPage;
915 int slotno;
916 int firstZeroPage;
917 bool isNewPage;
918
919 Assert(TransactionIdIsValid(xid));
920
921 targetPage = OldSerXidPage(xid);
922
923 LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
924
925 /*
926 * If no serializable transactions are active, there shouldn't be anything
927 * to push out to the SLRU. Hitting this assert would mean there's
928 * something wrong with the earlier cleanup logic.
929 */
930 tailXid = oldSerXidControl->tailXid;
931 Assert(TransactionIdIsValid(tailXid));
932
933 /*
934 * If the SLRU is currently unused, zero out the whole active region from
935 * tailXid to headXid before taking it into use. Otherwise zero out only
936 * any new pages that enter the tailXid-headXid range as we advance
937 * headXid.
938 */
939 if (oldSerXidControl->headPage < 0)
940 {
941 firstZeroPage = OldSerXidPage(tailXid);
942 isNewPage = true;
943 }
944 else
945 {
946 firstZeroPage = OldSerXidNextPage(oldSerXidControl->headPage);
947 isNewPage = OldSerXidPagePrecedesLogically(oldSerXidControl->headPage,
948 targetPage);
949 }
950
951 if (!TransactionIdIsValid(oldSerXidControl->headXid)
952 || TransactionIdFollows(xid, oldSerXidControl->headXid))
953 oldSerXidControl->headXid = xid;
954 if (isNewPage)
955 oldSerXidControl->headPage = targetPage;
956
957 if (isNewPage)
958 {
959 /* Initialize intervening pages. */
960 while (firstZeroPage != targetPage)
961 {
962 (void) SimpleLruZeroPage(OldSerXidSlruCtl, firstZeroPage);
963 firstZeroPage = OldSerXidNextPage(firstZeroPage);
964 }
965 slotno = SimpleLruZeroPage(OldSerXidSlruCtl, targetPage);
966 }
967 else
968 slotno = SimpleLruReadPage(OldSerXidSlruCtl, targetPage, true, xid);
969
970 OldSerXidValue(slotno, xid) = minConflictCommitSeqNo;
971 OldSerXidSlruCtl->shared->page_dirty[slotno] = true;
972
973 LWLockRelease(OldSerXidLock);
974 }
975
976 /*
977 * Get the minimum commitSeqNo for any conflict out for the given xid. For
978 * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
979 * will be returned.
980 */
981 static SerCommitSeqNo
OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)982 OldSerXidGetMinConflictCommitSeqNo(TransactionId xid)
983 {
984 TransactionId headXid;
985 TransactionId tailXid;
986 SerCommitSeqNo val;
987 int slotno;
988
989 Assert(TransactionIdIsValid(xid));
990
991 LWLockAcquire(OldSerXidLock, LW_SHARED);
992 headXid = oldSerXidControl->headXid;
993 tailXid = oldSerXidControl->tailXid;
994 LWLockRelease(OldSerXidLock);
995
996 if (!TransactionIdIsValid(headXid))
997 return 0;
998
999 Assert(TransactionIdIsValid(tailXid));
1000
1001 if (TransactionIdPrecedes(xid, tailXid)
1002 || TransactionIdFollows(xid, headXid))
1003 return 0;
1004
1005 /*
1006 * The following function must be called without holding OldSerXidLock,
1007 * but will return with that lock held, which must then be released.
1008 */
1009 slotno = SimpleLruReadPage_ReadOnly(OldSerXidSlruCtl,
1010 OldSerXidPage(xid), xid);
1011 val = OldSerXidValue(slotno, xid);
1012 LWLockRelease(OldSerXidLock);
1013 return val;
1014 }
1015
1016 /*
1017 * Call this whenever there is a new xmin for active serializable
1018 * transactions. We don't need to keep information on transactions which
1019 * precede that. InvalidTransactionId means none active, so everything in
1020 * the SLRU can be discarded.
1021 */
1022 static void
OldSerXidSetActiveSerXmin(TransactionId xid)1023 OldSerXidSetActiveSerXmin(TransactionId xid)
1024 {
1025 LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1026
1027 /*
1028 * When no sxacts are active, nothing overlaps, set the xid values to
1029 * invalid to show that there are no valid entries. Don't clear headPage,
1030 * though. A new xmin might still land on that page, and we don't want to
1031 * repeatedly zero out the same page.
1032 */
1033 if (!TransactionIdIsValid(xid))
1034 {
1035 oldSerXidControl->tailXid = InvalidTransactionId;
1036 oldSerXidControl->headXid = InvalidTransactionId;
1037 LWLockRelease(OldSerXidLock);
1038 return;
1039 }
1040
1041 /*
1042 * When we're recovering prepared transactions, the global xmin might move
1043 * backwards depending on the order they're recovered. Normally that's not
1044 * OK, but during recovery no serializable transactions will commit, so
1045 * the SLRU is empty and we can get away with it.
1046 */
1047 if (RecoveryInProgress())
1048 {
1049 Assert(oldSerXidControl->headPage < 0);
1050 if (!TransactionIdIsValid(oldSerXidControl->tailXid)
1051 || TransactionIdPrecedes(xid, oldSerXidControl->tailXid))
1052 {
1053 oldSerXidControl->tailXid = xid;
1054 }
1055 LWLockRelease(OldSerXidLock);
1056 return;
1057 }
1058
1059 Assert(!TransactionIdIsValid(oldSerXidControl->tailXid)
1060 || TransactionIdFollows(xid, oldSerXidControl->tailXid));
1061
1062 oldSerXidControl->tailXid = xid;
1063
1064 LWLockRelease(OldSerXidLock);
1065 }
1066
1067 /*
1068 * Perform a checkpoint --- either during shutdown, or on-the-fly
1069 *
1070 * We don't have any data that needs to survive a restart, but this is a
1071 * convenient place to truncate the SLRU.
1072 */
1073 void
CheckPointPredicate(void)1074 CheckPointPredicate(void)
1075 {
1076 int tailPage;
1077
1078 LWLockAcquire(OldSerXidLock, LW_EXCLUSIVE);
1079
1080 /* Exit quickly if the SLRU is currently not in use. */
1081 if (oldSerXidControl->headPage < 0)
1082 {
1083 LWLockRelease(OldSerXidLock);
1084 return;
1085 }
1086
1087 if (TransactionIdIsValid(oldSerXidControl->tailXid))
1088 {
1089 /* We can truncate the SLRU up to the page containing tailXid */
1090 tailPage = OldSerXidPage(oldSerXidControl->tailXid);
1091 }
1092 else
1093 {
1094 /*----------
1095 * The SLRU is no longer needed. Truncate to head before we set head
1096 * invalid.
1097 *
1098 * XXX: It's possible that the SLRU is not needed again until XID
1099 * wrap-around has happened, so that the segment containing headPage
1100 * that we leave behind will appear to be new again. In that case it
1101 * won't be removed until XID horizon advances enough to make it
1102 * current again.
1103 *
1104 * XXX: This should happen in vac_truncate_clog(), not in checkpoints.
1105 * Consider this scenario, starting from a system with no in-progress
1106 * transactions and VACUUM FREEZE having maximized oldestXact:
1107 * - Start a SERIALIZABLE transaction.
1108 * - Start, finish, and summarize a SERIALIZABLE transaction, creating
1109 * one SLRU page.
1110 * - Consume XIDs to reach xidStopLimit.
1111 * - Finish all transactions. Due to the long-running SERIALIZABLE
1112 * transaction, earlier checkpoints did not touch headPage. The
1113 * next checkpoint will change it, but that checkpoint happens after
1114 * the end of the scenario.
1115 * - VACUUM to advance XID limits.
1116 * - Consume ~2M XIDs, crossing the former xidWrapLimit.
1117 * - Start, finish, and summarize a SERIALIZABLE transaction.
1118 * OldSerXidAdd() declines to create the targetPage, because
1119 * headPage is not regarded as in the past relative to that
1120 * targetPage. The transaction instigating the summarize fails in
1121 * SimpleLruReadPage().
1122 */
1123 tailPage = oldSerXidControl->headPage;
1124 oldSerXidControl->headPage = -1;
1125 }
1126
1127 LWLockRelease(OldSerXidLock);
1128
1129 /* Truncate away pages that are no longer required */
1130 SimpleLruTruncate(OldSerXidSlruCtl, tailPage);
1131
1132 /*
1133 * Flush dirty SLRU pages to disk
1134 *
1135 * This is not actually necessary from a correctness point of view. We do
1136 * it merely as a debugging aid.
1137 *
1138 * We're doing this after the truncation to avoid writing pages right
1139 * before deleting the file in which they sit, which would be completely
1140 * pointless.
1141 */
1142 SimpleLruFlush(OldSerXidSlruCtl, true);
1143 }
1144
1145 /*------------------------------------------------------------------------*/
1146
1147 /*
1148 * InitPredicateLocks -- Initialize the predicate locking data structures.
1149 *
1150 * This is called from CreateSharedMemoryAndSemaphores(), which see for
1151 * more comments. In the normal postmaster case, the shared hash tables
1152 * are created here. Backends inherit the pointers
1153 * to the shared tables via fork(). In the EXEC_BACKEND case, each
1154 * backend re-executes this code to obtain pointers to the already existing
1155 * shared hash tables.
1156 */
1157 void
InitPredicateLocks(void)1158 InitPredicateLocks(void)
1159 {
1160 HASHCTL info;
1161 long max_table_size;
1162 Size requestSize;
1163 bool found;
1164
1165 #ifndef EXEC_BACKEND
1166 Assert(!IsUnderPostmaster);
1167 #endif
1168
1169 /*
1170 * Compute size of predicate lock target hashtable. Note these
1171 * calculations must agree with PredicateLockShmemSize!
1172 */
1173 max_table_size = NPREDICATELOCKTARGETENTS();
1174
1175 /*
1176 * Allocate hash table for PREDICATELOCKTARGET structs. This stores
1177 * per-predicate-lock-target information.
1178 */
1179 MemSet(&info, 0, sizeof(info));
1180 info.keysize = sizeof(PREDICATELOCKTARGETTAG);
1181 info.entrysize = sizeof(PREDICATELOCKTARGET);
1182 info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1183
1184 PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
1185 max_table_size,
1186 max_table_size,
1187 &info,
1188 HASH_ELEM | HASH_BLOBS |
1189 HASH_PARTITION | HASH_FIXED_SIZE);
1190
1191 /*
1192 * Reserve a dummy entry in the hash table; we use it to make sure there's
1193 * always one entry available when we need to split or combine a page,
1194 * because running out of space there could mean aborting a
1195 * non-serializable transaction.
1196 */
1197 if (!IsUnderPostmaster)
1198 {
1199 (void) hash_search(PredicateLockTargetHash, &ScratchTargetTag,
1200 HASH_ENTER, &found);
1201 Assert(!found);
1202 }
1203
1204 /* Pre-calculate the hash and partition lock of the scratch entry */
1205 ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag);
1206 ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash);
1207
1208 /*
1209 * Allocate hash table for PREDICATELOCK structs. This stores per
1210 * xact-lock-of-a-target information.
1211 */
1212 MemSet(&info, 0, sizeof(info));
1213 info.keysize = sizeof(PREDICATELOCKTAG);
1214 info.entrysize = sizeof(PREDICATELOCK);
1215 info.hash = predicatelock_hash;
1216 info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
1217
1218 /* Assume an average of 2 xacts per target */
1219 max_table_size *= 2;
1220
1221 PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
1222 max_table_size,
1223 max_table_size,
1224 &info,
1225 HASH_ELEM | HASH_FUNCTION |
1226 HASH_PARTITION | HASH_FIXED_SIZE);
1227
1228 /*
1229 * Compute size for serializable transaction hashtable. Note these
1230 * calculations must agree with PredicateLockShmemSize!
1231 */
1232 max_table_size = (MaxBackends + max_prepared_xacts);
1233
1234 /*
1235 * Allocate a list to hold information on transactions participating in
1236 * predicate locking.
1237 *
1238 * Assume an average of 10 predicate locking transactions per backend.
1239 * This allows aggressive cleanup while detail is present before data must
1240 * be summarized for storage in SLRU and the "dummy" transaction.
1241 */
1242 max_table_size *= 10;
1243
1244 PredXact = ShmemInitStruct("PredXactList",
1245 PredXactListDataSize,
1246 &found);
1247 Assert(found == IsUnderPostmaster);
1248 if (!found)
1249 {
1250 int i;
1251
1252 SHMQueueInit(&PredXact->availableList);
1253 SHMQueueInit(&PredXact->activeList);
1254 PredXact->SxactGlobalXmin = InvalidTransactionId;
1255 PredXact->SxactGlobalXminCount = 0;
1256 PredXact->WritableSxactCount = 0;
1257 PredXact->LastSxactCommitSeqNo = FirstNormalSerCommitSeqNo - 1;
1258 PredXact->CanPartialClearThrough = 0;
1259 PredXact->HavePartialClearedThrough = 0;
1260 requestSize = mul_size((Size) max_table_size,
1261 PredXactListElementDataSize);
1262 PredXact->element = ShmemAlloc(requestSize);
1263 /* Add all elements to available list, clean. */
1264 memset(PredXact->element, 0, requestSize);
1265 for (i = 0; i < max_table_size; i++)
1266 {
1267 LWLockInitialize(&PredXact->element[i].sxact.predicateLockListLock,
1268 LWTRANCHE_SXACT);
1269 SHMQueueInsertBefore(&(PredXact->availableList),
1270 &(PredXact->element[i].link));
1271 }
1272 PredXact->OldCommittedSxact = CreatePredXact();
1273 SetInvalidVirtualTransactionId(PredXact->OldCommittedSxact->vxid);
1274 PredXact->OldCommittedSxact->prepareSeqNo = 0;
1275 PredXact->OldCommittedSxact->commitSeqNo = 0;
1276 PredXact->OldCommittedSxact->SeqNo.lastCommitBeforeSnapshot = 0;
1277 SHMQueueInit(&PredXact->OldCommittedSxact->outConflicts);
1278 SHMQueueInit(&PredXact->OldCommittedSxact->inConflicts);
1279 SHMQueueInit(&PredXact->OldCommittedSxact->predicateLocks);
1280 SHMQueueInit(&PredXact->OldCommittedSxact->finishedLink);
1281 SHMQueueInit(&PredXact->OldCommittedSxact->possibleUnsafeConflicts);
1282 PredXact->OldCommittedSxact->topXid = InvalidTransactionId;
1283 PredXact->OldCommittedSxact->finishedBefore = InvalidTransactionId;
1284 PredXact->OldCommittedSxact->xmin = InvalidTransactionId;
1285 PredXact->OldCommittedSxact->flags = SXACT_FLAG_COMMITTED;
1286 PredXact->OldCommittedSxact->pid = 0;
1287 }
1288 /* This never changes, so let's keep a local copy. */
1289 OldCommittedSxact = PredXact->OldCommittedSxact;
1290
1291 /*
1292 * Allocate hash table for SERIALIZABLEXID structs. This stores per-xid
1293 * information for serializable transactions which have accessed data.
1294 */
1295 MemSet(&info, 0, sizeof(info));
1296 info.keysize = sizeof(SERIALIZABLEXIDTAG);
1297 info.entrysize = sizeof(SERIALIZABLEXID);
1298
1299 SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
1300 max_table_size,
1301 max_table_size,
1302 &info,
1303 HASH_ELEM | HASH_BLOBS |
1304 HASH_FIXED_SIZE);
1305
1306 /*
1307 * Allocate space for tracking rw-conflicts in lists attached to the
1308 * transactions.
1309 *
1310 * Assume an average of 5 conflicts per transaction. Calculations suggest
1311 * that this will prevent resource exhaustion in even the most pessimal
1312 * loads up to max_connections = 200 with all 200 connections pounding the
1313 * database with serializable transactions. Beyond that, there may be
1314 * occasional transactions canceled when trying to flag conflicts. That's
1315 * probably OK.
1316 */
1317 max_table_size *= 5;
1318
1319 RWConflictPool = ShmemInitStruct("RWConflictPool",
1320 RWConflictPoolHeaderDataSize,
1321 &found);
1322 Assert(found == IsUnderPostmaster);
1323 if (!found)
1324 {
1325 int i;
1326
1327 SHMQueueInit(&RWConflictPool->availableList);
1328 requestSize = mul_size((Size) max_table_size,
1329 RWConflictDataSize);
1330 RWConflictPool->element = ShmemAlloc(requestSize);
1331 /* Add all elements to available list, clean. */
1332 memset(RWConflictPool->element, 0, requestSize);
1333 for (i = 0; i < max_table_size; i++)
1334 {
1335 SHMQueueInsertBefore(&(RWConflictPool->availableList),
1336 &(RWConflictPool->element[i].outLink));
1337 }
1338 }
1339
1340 /*
1341 * Create or attach to the header for the list of finished serializable
1342 * transactions.
1343 */
1344 FinishedSerializableTransactions = (SHM_QUEUE *)
1345 ShmemInitStruct("FinishedSerializableTransactions",
1346 sizeof(SHM_QUEUE),
1347 &found);
1348 Assert(found == IsUnderPostmaster);
1349 if (!found)
1350 SHMQueueInit(FinishedSerializableTransactions);
1351
1352 /*
1353 * Initialize the SLRU storage for old committed serializable
1354 * transactions.
1355 */
1356 OldSerXidInit();
1357 }
1358
1359 /*
1360 * Estimate shared-memory space used for predicate lock table
1361 */
1362 Size
PredicateLockShmemSize(void)1363 PredicateLockShmemSize(void)
1364 {
1365 Size size = 0;
1366 long max_table_size;
1367
1368 /* predicate lock target hash table */
1369 max_table_size = NPREDICATELOCKTARGETENTS();
1370 size = add_size(size, hash_estimate_size(max_table_size,
1371 sizeof(PREDICATELOCKTARGET)));
1372
1373 /* predicate lock hash table */
1374 max_table_size *= 2;
1375 size = add_size(size, hash_estimate_size(max_table_size,
1376 sizeof(PREDICATELOCK)));
1377
1378 /*
1379 * Since NPREDICATELOCKTARGETENTS is only an estimate, add 10% safety
1380 * margin.
1381 */
1382 size = add_size(size, size / 10);
1383
1384 /* transaction list */
1385 max_table_size = MaxBackends + max_prepared_xacts;
1386 max_table_size *= 10;
1387 size = add_size(size, PredXactListDataSize);
1388 size = add_size(size, mul_size((Size) max_table_size,
1389 PredXactListElementDataSize));
1390
1391 /* transaction xid table */
1392 size = add_size(size, hash_estimate_size(max_table_size,
1393 sizeof(SERIALIZABLEXID)));
1394
1395 /* rw-conflict pool */
1396 max_table_size *= 5;
1397 size = add_size(size, RWConflictPoolHeaderDataSize);
1398 size = add_size(size, mul_size((Size) max_table_size,
1399 RWConflictDataSize));
1400
1401 /* Head for list of finished serializable transactions. */
1402 size = add_size(size, sizeof(SHM_QUEUE));
1403
1404 /* Shared memory structures for SLRU tracking of old committed xids. */
1405 size = add_size(size, sizeof(OldSerXidControlData));
1406 size = add_size(size, SimpleLruShmemSize(NUM_OLDSERXID_BUFFERS, 0));
1407
1408 return size;
1409 }
1410
1411
1412 /*
1413 * Compute the hash code associated with a PREDICATELOCKTAG.
1414 *
1415 * Because we want to use just one set of partition locks for both the
1416 * PREDICATELOCKTARGET and PREDICATELOCK hash tables, we have to make sure
1417 * that PREDICATELOCKs fall into the same partition number as their
1418 * associated PREDICATELOCKTARGETs. dynahash.c expects the partition number
1419 * to be the low-order bits of the hash code, and therefore a
1420 * PREDICATELOCKTAG's hash code must have the same low-order bits as the
1421 * associated PREDICATELOCKTARGETTAG's hash code. We achieve this with this
1422 * specialized hash function.
1423 */
1424 static uint32
predicatelock_hash(const void * key,Size keysize)1425 predicatelock_hash(const void *key, Size keysize)
1426 {
1427 const PREDICATELOCKTAG *predicatelocktag = (const PREDICATELOCKTAG *) key;
1428 uint32 targethash;
1429
1430 Assert(keysize == sizeof(PREDICATELOCKTAG));
1431
1432 /* Look into the associated target object, and compute its hash code */
1433 targethash = PredicateLockTargetTagHashCode(&predicatelocktag->myTarget->tag);
1434
1435 return PredicateLockHashCodeFromTargetHashCode(predicatelocktag, targethash);
1436 }
1437
1438
1439 /*
1440 * GetPredicateLockStatusData
1441 * Return a table containing the internal state of the predicate
1442 * lock manager for use in pg_lock_status.
1443 *
1444 * Like GetLockStatusData, this function tries to hold the partition LWLocks
1445 * for as short a time as possible by returning two arrays that simply
1446 * contain the PREDICATELOCKTARGETTAG and SERIALIZABLEXACT for each lock
1447 * table entry. Multiple copies of the same PREDICATELOCKTARGETTAG and
1448 * SERIALIZABLEXACT will likely appear.
1449 */
1450 PredicateLockData *
GetPredicateLockStatusData(void)1451 GetPredicateLockStatusData(void)
1452 {
1453 PredicateLockData *data;
1454 int i;
1455 int els,
1456 el;
1457 HASH_SEQ_STATUS seqstat;
1458 PREDICATELOCK *predlock;
1459
1460 data = (PredicateLockData *) palloc(sizeof(PredicateLockData));
1461
1462 /*
1463 * To ensure consistency, take simultaneous locks on all partition locks
1464 * in ascending order, then SerializableXactHashLock.
1465 */
1466 for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
1467 LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
1468 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1469
1470 /* Get number of locks and allocate appropriately-sized arrays. */
1471 els = hash_get_num_entries(PredicateLockHash);
1472 data->nelements = els;
1473 data->locktags = (PREDICATELOCKTARGETTAG *)
1474 palloc(sizeof(PREDICATELOCKTARGETTAG) * els);
1475 data->xacts = (SERIALIZABLEXACT *)
1476 palloc(sizeof(SERIALIZABLEXACT) * els);
1477
1478
1479 /* Scan through PredicateLockHash and copy contents */
1480 hash_seq_init(&seqstat, PredicateLockHash);
1481
1482 el = 0;
1483
1484 while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
1485 {
1486 data->locktags[el] = predlock->tag.myTarget->tag;
1487 data->xacts[el] = *predlock->tag.myXact;
1488 el++;
1489 }
1490
1491 Assert(el == els);
1492
1493 /* Release locks in reverse order */
1494 LWLockRelease(SerializableXactHashLock);
1495 for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
1496 LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
1497
1498 return data;
1499 }
1500
1501 /*
1502 * Free up shared memory structures by pushing the oldest sxact (the one at
1503 * the front of the SummarizeOldestCommittedSxact queue) into summary form.
1504 * Each call will free exactly one SERIALIZABLEXACT structure and may also
1505 * free one or more of these structures: SERIALIZABLEXID, PREDICATELOCK,
1506 * PREDICATELOCKTARGET, RWConflictData.
1507 */
1508 static void
SummarizeOldestCommittedSxact(void)1509 SummarizeOldestCommittedSxact(void)
1510 {
1511 SERIALIZABLEXACT *sxact;
1512
1513 LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
1514
1515 /*
1516 * This function is only called if there are no sxact slots available.
1517 * Some of them must belong to old, already-finished transactions, so
1518 * there should be something in FinishedSerializableTransactions list that
1519 * we can summarize. However, there's a race condition: while we were not
1520 * holding any locks, a transaction might have ended and cleaned up all
1521 * the finished sxact entries already, freeing up their sxact slots. In
1522 * that case, we have nothing to do here. The caller will find one of the
1523 * slots released by the other backend when it retries.
1524 */
1525 if (SHMQueueEmpty(FinishedSerializableTransactions))
1526 {
1527 LWLockRelease(SerializableFinishedListLock);
1528 return;
1529 }
1530
1531 /*
1532 * Grab the first sxact off the finished list -- this will be the earliest
1533 * commit. Remove it from the list.
1534 */
1535 sxact = (SERIALIZABLEXACT *)
1536 SHMQueueNext(FinishedSerializableTransactions,
1537 FinishedSerializableTransactions,
1538 offsetof(SERIALIZABLEXACT, finishedLink));
1539 SHMQueueDelete(&(sxact->finishedLink));
1540
1541 /* Add to SLRU summary information. */
1542 if (TransactionIdIsValid(sxact->topXid) && !SxactIsReadOnly(sxact))
1543 OldSerXidAdd(sxact->topXid, SxactHasConflictOut(sxact)
1544 ? sxact->SeqNo.earliestOutConflictCommit : InvalidSerCommitSeqNo);
1545
1546 /* Summarize and release the detail. */
1547 ReleaseOneSerializableXact(sxact, false, true);
1548
1549 LWLockRelease(SerializableFinishedListLock);
1550 }
1551
1552 /*
1553 * GetSafeSnapshot
1554 * Obtain and register a snapshot for a READ ONLY DEFERRABLE
1555 * transaction. Ensures that the snapshot is "safe", i.e. a
1556 * read-only transaction running on it can execute serializably
1557 * without further checks. This requires waiting for concurrent
1558 * transactions to complete, and retrying with a new snapshot if
1559 * one of them could possibly create a conflict.
1560 *
1561 * As with GetSerializableTransactionSnapshot (which this is a subroutine
1562 * for), the passed-in Snapshot pointer should reference a static data
1563 * area that can safely be passed to GetSnapshotData.
1564 */
1565 static Snapshot
GetSafeSnapshot(Snapshot origSnapshot)1566 GetSafeSnapshot(Snapshot origSnapshot)
1567 {
1568 Snapshot snapshot;
1569
1570 Assert(XactReadOnly && XactDeferrable);
1571
1572 while (true)
1573 {
1574 /*
1575 * GetSerializableTransactionSnapshotInt is going to call
1576 * GetSnapshotData, so we need to provide it the static snapshot area
1577 * our caller passed to us. The pointer returned is actually the same
1578 * one passed to it, but we avoid assuming that here.
1579 */
1580 snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
1581 NULL, InvalidPid);
1582
1583 if (MySerializableXact == InvalidSerializableXact)
1584 return snapshot; /* no concurrent r/w xacts; it's safe */
1585
1586 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1587
1588 /*
1589 * Wait for concurrent transactions to finish. Stop early if one of
1590 * them marked us as conflicted.
1591 */
1592 MySerializableXact->flags |= SXACT_FLAG_DEFERRABLE_WAITING;
1593 while (!(SHMQueueEmpty(&MySerializableXact->possibleUnsafeConflicts) ||
1594 SxactIsROUnsafe(MySerializableXact)))
1595 {
1596 LWLockRelease(SerializableXactHashLock);
1597 ProcWaitForSignal(WAIT_EVENT_SAFE_SNAPSHOT);
1598 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1599 }
1600 MySerializableXact->flags &= ~SXACT_FLAG_DEFERRABLE_WAITING;
1601
1602 if (!SxactIsROUnsafe(MySerializableXact))
1603 {
1604 LWLockRelease(SerializableXactHashLock);
1605 break; /* success */
1606 }
1607
1608 LWLockRelease(SerializableXactHashLock);
1609
1610 /* else, need to retry... */
1611 ereport(DEBUG2,
1612 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1613 errmsg("deferrable snapshot was unsafe; trying a new one")));
1614 ReleasePredicateLocks(false, false);
1615 }
1616
1617 /*
1618 * Now we have a safe snapshot, so we don't need to do any further checks.
1619 */
1620 Assert(SxactIsROSafe(MySerializableXact));
1621 ReleasePredicateLocks(false, true);
1622
1623 return snapshot;
1624 }
1625
1626 /*
1627 * GetSafeSnapshotBlockingPids
1628 * If the specified process is currently blocked in GetSafeSnapshot,
1629 * write the process IDs of all processes that it is blocked by
1630 * into the caller-supplied buffer output[]. The list is truncated at
1631 * output_size, and the number of PIDs written into the buffer is
1632 * returned. Returns zero if the given PID is not currently blocked
1633 * in GetSafeSnapshot.
1634 */
1635 int
GetSafeSnapshotBlockingPids(int blocked_pid,int * output,int output_size)1636 GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
1637 {
1638 int num_written = 0;
1639 SERIALIZABLEXACT *sxact;
1640
1641 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
1642
1643 /* Find blocked_pid's SERIALIZABLEXACT by linear search. */
1644 for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
1645 {
1646 if (sxact->pid == blocked_pid)
1647 break;
1648 }
1649
1650 /* Did we find it, and is it currently waiting in GetSafeSnapshot? */
1651 if (sxact != NULL && SxactIsDeferrableWaiting(sxact))
1652 {
1653 RWConflict possibleUnsafeConflict;
1654
1655 /* Traverse the list of possible unsafe conflicts collecting PIDs. */
1656 possibleUnsafeConflict = (RWConflict)
1657 SHMQueueNext(&sxact->possibleUnsafeConflicts,
1658 &sxact->possibleUnsafeConflicts,
1659 offsetof(RWConflictData, inLink));
1660
1661 while (possibleUnsafeConflict != NULL && num_written < output_size)
1662 {
1663 output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
1664 possibleUnsafeConflict = (RWConflict)
1665 SHMQueueNext(&sxact->possibleUnsafeConflicts,
1666 &possibleUnsafeConflict->inLink,
1667 offsetof(RWConflictData, inLink));
1668 }
1669 }
1670
1671 LWLockRelease(SerializableXactHashLock);
1672
1673 return num_written;
1674 }
1675
1676 /*
1677 * Acquire a snapshot that can be used for the current transaction.
1678 *
1679 * Make sure we have a SERIALIZABLEXACT reference in MySerializableXact.
1680 * It should be current for this process and be contained in PredXact.
1681 *
1682 * The passed-in Snapshot pointer should reference a static data area that
1683 * can safely be passed to GetSnapshotData. The return value is actually
1684 * always this same pointer; no new snapshot data structure is allocated
1685 * within this function.
1686 */
1687 Snapshot
GetSerializableTransactionSnapshot(Snapshot snapshot)1688 GetSerializableTransactionSnapshot(Snapshot snapshot)
1689 {
1690 Assert(IsolationIsSerializable());
1691
1692 /*
1693 * Can't use serializable mode while recovery is still active, as it is,
1694 * for example, on a hot standby. We could get here despite the check in
1695 * check_XactIsoLevel() if default_transaction_isolation is set to
1696 * serializable, so phrase the hint accordingly.
1697 */
1698 if (RecoveryInProgress())
1699 ereport(ERROR,
1700 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1701 errmsg("cannot use serializable mode in a hot standby"),
1702 errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
1703 errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
1704
1705 /*
1706 * A special optimization is available for SERIALIZABLE READ ONLY
1707 * DEFERRABLE transactions -- we can wait for a suitable snapshot and
1708 * thereby avoid all SSI overhead once it's running.
1709 */
1710 if (XactReadOnly && XactDeferrable)
1711 return GetSafeSnapshot(snapshot);
1712
1713 return GetSerializableTransactionSnapshotInt(snapshot,
1714 NULL, InvalidPid);
1715 }
1716
1717 /*
1718 * Import a snapshot to be used for the current transaction.
1719 *
1720 * This is nearly the same as GetSerializableTransactionSnapshot, except that
1721 * we don't take a new snapshot, but rather use the data we're handed.
1722 *
1723 * The caller must have verified that the snapshot came from a serializable
1724 * transaction; and if we're read-write, the source transaction must not be
1725 * read-only.
1726 */
1727 void
SetSerializableTransactionSnapshot(Snapshot snapshot,VirtualTransactionId * sourcevxid,int sourcepid)1728 SetSerializableTransactionSnapshot(Snapshot snapshot,
1729 VirtualTransactionId *sourcevxid,
1730 int sourcepid)
1731 {
1732 Assert(IsolationIsSerializable());
1733
1734 /*
1735 * If this is called by parallel.c in a parallel worker, we don't want to
1736 * create a SERIALIZABLEXACT just yet because the leader's
1737 * SERIALIZABLEXACT will be installed with AttachSerializableXact(). We
1738 * also don't want to reject SERIALIZABLE READ ONLY DEFERRABLE in this
1739 * case, because the leader has already determined that the snapshot it
1740 * has passed us is safe. So there is nothing for us to do.
1741 */
1742 if (IsParallelWorker())
1743 return;
1744
1745 /*
1746 * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
1747 * import snapshots, since there's no way to wait for a safe snapshot when
1748 * we're using the snap we're told to. (XXX instead of throwing an error,
1749 * we could just ignore the XactDeferrable flag?)
1750 */
1751 if (XactReadOnly && XactDeferrable)
1752 ereport(ERROR,
1753 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1754 errmsg("a snapshot-importing transaction must not be READ ONLY DEFERRABLE")));
1755
1756 (void) GetSerializableTransactionSnapshotInt(snapshot, sourcevxid,
1757 sourcepid);
1758 }
1759
1760 /*
1761 * Guts of GetSerializableTransactionSnapshot
1762 *
1763 * If sourcexid is valid, this is actually an import operation and we should
1764 * skip calling GetSnapshotData, because the snapshot contents are already
1765 * loaded up. HOWEVER: to avoid race conditions, we must check that the
1766 * source xact is still running after we acquire SerializableXactHashLock.
1767 * We do that by calling ProcArrayInstallImportedXmin.
1768 */
1769 static Snapshot
GetSerializableTransactionSnapshotInt(Snapshot snapshot,VirtualTransactionId * sourcevxid,int sourcepid)1770 GetSerializableTransactionSnapshotInt(Snapshot snapshot,
1771 VirtualTransactionId *sourcevxid,
1772 int sourcepid)
1773 {
1774 PGPROC *proc;
1775 VirtualTransactionId vxid;
1776 SERIALIZABLEXACT *sxact,
1777 *othersxact;
1778
1779 /* We only do this for serializable transactions. Once. */
1780 Assert(MySerializableXact == InvalidSerializableXact);
1781
1782 Assert(!RecoveryInProgress());
1783
1784 /*
1785 * Since all parts of a serializable transaction must use the same
1786 * snapshot, it is too late to establish one after a parallel operation
1787 * has begun.
1788 */
1789 if (IsInParallelMode())
1790 elog(ERROR, "cannot establish serializable snapshot during a parallel operation");
1791
1792 proc = MyProc;
1793 Assert(proc != NULL);
1794 GET_VXID_FROM_PGPROC(vxid, *proc);
1795
1796 /*
1797 * First we get the sxact structure, which may involve looping and access
1798 * to the "finished" list to free a structure for use.
1799 *
1800 * We must hold SerializableXactHashLock when taking/checking the snapshot
1801 * to avoid race conditions, for much the same reasons that
1802 * GetSnapshotData takes the ProcArrayLock. Since we might have to
1803 * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
1804 * this means we have to create the sxact first, which is a bit annoying
1805 * (in particular, an elog(ERROR) in procarray.c would cause us to leak
1806 * the sxact). Consider refactoring to avoid this.
1807 */
1808 #ifdef TEST_OLDSERXID
1809 SummarizeOldestCommittedSxact();
1810 #endif
1811 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1812 do
1813 {
1814 sxact = CreatePredXact();
1815 /* If null, push out committed sxact to SLRU summary & retry. */
1816 if (!sxact)
1817 {
1818 LWLockRelease(SerializableXactHashLock);
1819 SummarizeOldestCommittedSxact();
1820 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1821 }
1822 } while (!sxact);
1823
1824 /* Get the snapshot, or check that it's safe to use */
1825 if (!sourcevxid)
1826 snapshot = GetSnapshotData(snapshot);
1827 else if (!ProcArrayInstallImportedXmin(snapshot->xmin, sourcevxid))
1828 {
1829 ReleasePredXact(sxact);
1830 LWLockRelease(SerializableXactHashLock);
1831 ereport(ERROR,
1832 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1833 errmsg("could not import the requested snapshot"),
1834 errdetail("The source process with PID %d is not running anymore.",
1835 sourcepid)));
1836 }
1837
1838 /*
1839 * If there are no serializable transactions which are not read-only, we
1840 * can "opt out" of predicate locking and conflict checking for a
1841 * read-only transaction.
1842 *
1843 * The reason this is safe is that a read-only transaction can only become
1844 * part of a dangerous structure if it overlaps a writable transaction
1845 * which in turn overlaps a writable transaction which committed before
1846 * the read-only transaction started. A new writable transaction can
1847 * overlap this one, but it can't meet the other condition of overlapping
1848 * a transaction which committed before this one started.
1849 */
1850 if (XactReadOnly && PredXact->WritableSxactCount == 0)
1851 {
1852 ReleasePredXact(sxact);
1853 LWLockRelease(SerializableXactHashLock);
1854 return snapshot;
1855 }
1856
1857 /* Maintain serializable global xmin info. */
1858 if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
1859 {
1860 Assert(PredXact->SxactGlobalXminCount == 0);
1861 PredXact->SxactGlobalXmin = snapshot->xmin;
1862 PredXact->SxactGlobalXminCount = 1;
1863 OldSerXidSetActiveSerXmin(snapshot->xmin);
1864 }
1865 else if (TransactionIdEquals(snapshot->xmin, PredXact->SxactGlobalXmin))
1866 {
1867 Assert(PredXact->SxactGlobalXminCount > 0);
1868 PredXact->SxactGlobalXminCount++;
1869 }
1870 else
1871 {
1872 Assert(TransactionIdFollows(snapshot->xmin, PredXact->SxactGlobalXmin));
1873 }
1874
1875 /* Initialize the structure. */
1876 sxact->vxid = vxid;
1877 sxact->SeqNo.lastCommitBeforeSnapshot = PredXact->LastSxactCommitSeqNo;
1878 sxact->prepareSeqNo = InvalidSerCommitSeqNo;
1879 sxact->commitSeqNo = InvalidSerCommitSeqNo;
1880 SHMQueueInit(&(sxact->outConflicts));
1881 SHMQueueInit(&(sxact->inConflicts));
1882 SHMQueueInit(&(sxact->possibleUnsafeConflicts));
1883 sxact->topXid = GetTopTransactionIdIfAny();
1884 sxact->finishedBefore = InvalidTransactionId;
1885 sxact->xmin = snapshot->xmin;
1886 sxact->pid = MyProcPid;
1887 SHMQueueInit(&(sxact->predicateLocks));
1888 SHMQueueElemInit(&(sxact->finishedLink));
1889 sxact->flags = 0;
1890 if (XactReadOnly)
1891 {
1892 sxact->flags |= SXACT_FLAG_READ_ONLY;
1893
1894 /*
1895 * Register all concurrent r/w transactions as possible conflicts; if
1896 * all of them commit without any outgoing conflicts to earlier
1897 * transactions then this snapshot can be deemed safe (and we can run
1898 * without tracking predicate locks).
1899 */
1900 for (othersxact = FirstPredXact();
1901 othersxact != NULL;
1902 othersxact = NextPredXact(othersxact))
1903 {
1904 if (!SxactIsCommitted(othersxact)
1905 && !SxactIsDoomed(othersxact)
1906 && !SxactIsReadOnly(othersxact))
1907 {
1908 SetPossibleUnsafeConflict(sxact, othersxact);
1909 }
1910 }
1911 }
1912 else
1913 {
1914 ++(PredXact->WritableSxactCount);
1915 Assert(PredXact->WritableSxactCount <=
1916 (MaxBackends + max_prepared_xacts));
1917 }
1918
1919 MySerializableXact = sxact;
1920 MyXactDidWrite = false; /* haven't written anything yet */
1921
1922 LWLockRelease(SerializableXactHashLock);
1923
1924 CreateLocalPredicateLockHash();
1925
1926 return snapshot;
1927 }
1928
1929 static void
CreateLocalPredicateLockHash(void)1930 CreateLocalPredicateLockHash(void)
1931 {
1932 HASHCTL hash_ctl;
1933
1934 /* Initialize the backend-local hash table of parent locks */
1935 Assert(LocalPredicateLockHash == NULL);
1936 MemSet(&hash_ctl, 0, sizeof(hash_ctl));
1937 hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
1938 hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
1939 LocalPredicateLockHash = hash_create("Local predicate lock",
1940 max_predicate_locks_per_xact,
1941 &hash_ctl,
1942 HASH_ELEM | HASH_BLOBS);
1943 }
1944
1945 /*
1946 * Register the top level XID in SerializableXidHash.
1947 * Also store it for easy reference in MySerializableXact.
1948 */
1949 void
RegisterPredicateLockingXid(TransactionId xid)1950 RegisterPredicateLockingXid(TransactionId xid)
1951 {
1952 SERIALIZABLEXIDTAG sxidtag;
1953 SERIALIZABLEXID *sxid;
1954 bool found;
1955
1956 /*
1957 * If we're not tracking predicate lock data for this transaction, we
1958 * should ignore the request and return quickly.
1959 */
1960 if (MySerializableXact == InvalidSerializableXact)
1961 return;
1962
1963 /* We should have a valid XID and be at the top level. */
1964 Assert(TransactionIdIsValid(xid));
1965
1966 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
1967
1968 /* This should only be done once per transaction. */
1969 Assert(MySerializableXact->topXid == InvalidTransactionId);
1970
1971 MySerializableXact->topXid = xid;
1972
1973 sxidtag.xid = xid;
1974 sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
1975 &sxidtag,
1976 HASH_ENTER, &found);
1977 Assert(!found);
1978
1979 /* Initialize the structure. */
1980 sxid->myXact = MySerializableXact;
1981 LWLockRelease(SerializableXactHashLock);
1982 }
1983
1984
1985 /*
1986 * Check whether there are any predicate locks held by any transaction
1987 * for the page at the given block number.
1988 *
1989 * Note that the transaction may be completed but not yet subject to
1990 * cleanup due to overlapping serializable transactions. This must
1991 * return valid information regardless of transaction isolation level.
1992 *
1993 * Also note that this doesn't check for a conflicting relation lock,
1994 * just a lock specifically on the given page.
1995 *
1996 * One use is to support proper behavior during GiST index vacuum.
1997 */
1998 bool
PageIsPredicateLocked(Relation relation,BlockNumber blkno)1999 PageIsPredicateLocked(Relation relation, BlockNumber blkno)
2000 {
2001 PREDICATELOCKTARGETTAG targettag;
2002 uint32 targettaghash;
2003 LWLock *partitionLock;
2004 PREDICATELOCKTARGET *target;
2005
2006 SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
2007 relation->rd_node.dbNode,
2008 relation->rd_id,
2009 blkno);
2010
2011 targettaghash = PredicateLockTargetTagHashCode(&targettag);
2012 partitionLock = PredicateLockHashPartitionLock(targettaghash);
2013 LWLockAcquire(partitionLock, LW_SHARED);
2014 target = (PREDICATELOCKTARGET *)
2015 hash_search_with_hash_value(PredicateLockTargetHash,
2016 &targettag, targettaghash,
2017 HASH_FIND, NULL);
2018 LWLockRelease(partitionLock);
2019
2020 return (target != NULL);
2021 }
2022
2023
2024 /*
2025 * Check whether a particular lock is held by this transaction.
2026 *
2027 * Important note: this function may return false even if the lock is
2028 * being held, because it uses the local lock table which is not
2029 * updated if another transaction modifies our lock list (e.g. to
2030 * split an index page). It can also return true when a coarser
2031 * granularity lock that covers this target is being held. Be careful
2032 * to only use this function in circumstances where such errors are
2033 * acceptable!
2034 */
2035 static bool
PredicateLockExists(const PREDICATELOCKTARGETTAG * targettag)2036 PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag)
2037 {
2038 LOCALPREDICATELOCK *lock;
2039
2040 /* check local hash table */
2041 lock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2042 targettag,
2043 HASH_FIND, NULL);
2044
2045 if (!lock)
2046 return false;
2047
2048 /*
2049 * Found entry in the table, but still need to check whether it's actually
2050 * held -- it could just be a parent of some held lock.
2051 */
2052 return lock->held;
2053 }
2054
2055 /*
2056 * Return the parent lock tag in the lock hierarchy: the next coarser
2057 * lock that covers the provided tag.
2058 *
2059 * Returns true and sets *parent to the parent tag if one exists,
2060 * returns false if none exists.
2061 */
2062 static bool
GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG * tag,PREDICATELOCKTARGETTAG * parent)2063 GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
2064 PREDICATELOCKTARGETTAG *parent)
2065 {
2066 switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2067 {
2068 case PREDLOCKTAG_RELATION:
2069 /* relation locks have no parent lock */
2070 return false;
2071
2072 case PREDLOCKTAG_PAGE:
2073 /* parent lock is relation lock */
2074 SET_PREDICATELOCKTARGETTAG_RELATION(*parent,
2075 GET_PREDICATELOCKTARGETTAG_DB(*tag),
2076 GET_PREDICATELOCKTARGETTAG_RELATION(*tag));
2077
2078 return true;
2079
2080 case PREDLOCKTAG_TUPLE:
2081 /* parent lock is page lock */
2082 SET_PREDICATELOCKTARGETTAG_PAGE(*parent,
2083 GET_PREDICATELOCKTARGETTAG_DB(*tag),
2084 GET_PREDICATELOCKTARGETTAG_RELATION(*tag),
2085 GET_PREDICATELOCKTARGETTAG_PAGE(*tag));
2086 return true;
2087 }
2088
2089 /* not reachable */
2090 Assert(false);
2091 return false;
2092 }
2093
2094 /*
2095 * Check whether the lock we are considering is already covered by a
2096 * coarser lock for our transaction.
2097 *
2098 * Like PredicateLockExists, this function might return a false
2099 * negative, but it will never return a false positive.
2100 */
2101 static bool
CoarserLockCovers(const PREDICATELOCKTARGETTAG * newtargettag)2102 CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
2103 {
2104 PREDICATELOCKTARGETTAG targettag,
2105 parenttag;
2106
2107 targettag = *newtargettag;
2108
2109 /* check parents iteratively until no more */
2110 while (GetParentPredicateLockTag(&targettag, &parenttag))
2111 {
2112 targettag = parenttag;
2113 if (PredicateLockExists(&targettag))
2114 return true;
2115 }
2116
2117 /* no more parents to check; lock is not covered */
2118 return false;
2119 }
2120
2121 /*
2122 * Remove the dummy entry from the predicate lock target hash, to free up some
2123 * scratch space. The caller must be holding SerializablePredicateLockListLock,
2124 * and must restore the entry with RestoreScratchTarget() before releasing the
2125 * lock.
2126 *
2127 * If lockheld is true, the caller is already holding the partition lock
2128 * of the partition containing the scratch entry.
2129 */
2130 static void
RemoveScratchTarget(bool lockheld)2131 RemoveScratchTarget(bool lockheld)
2132 {
2133 bool found;
2134
2135 Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2136
2137 if (!lockheld)
2138 LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2139 hash_search_with_hash_value(PredicateLockTargetHash,
2140 &ScratchTargetTag,
2141 ScratchTargetTagHash,
2142 HASH_REMOVE, &found);
2143 Assert(found);
2144 if (!lockheld)
2145 LWLockRelease(ScratchPartitionLock);
2146 }
2147
2148 /*
2149 * Re-insert the dummy entry in predicate lock target hash.
2150 */
2151 static void
RestoreScratchTarget(bool lockheld)2152 RestoreScratchTarget(bool lockheld)
2153 {
2154 bool found;
2155
2156 Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2157
2158 if (!lockheld)
2159 LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
2160 hash_search_with_hash_value(PredicateLockTargetHash,
2161 &ScratchTargetTag,
2162 ScratchTargetTagHash,
2163 HASH_ENTER, &found);
2164 Assert(!found);
2165 if (!lockheld)
2166 LWLockRelease(ScratchPartitionLock);
2167 }
2168
2169 /*
2170 * Check whether the list of related predicate locks is empty for a
2171 * predicate lock target, and remove the target if it is.
2172 */
2173 static void
RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET * target,uint32 targettaghash)2174 RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
2175 {
2176 PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY;
2177
2178 Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
2179
2180 /* Can't remove it until no locks at this target. */
2181 if (!SHMQueueEmpty(&target->predicateLocks))
2182 return;
2183
2184 /* Actually remove the target. */
2185 rmtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2186 &target->tag,
2187 targettaghash,
2188 HASH_REMOVE, NULL);
2189 Assert(rmtarget == target);
2190 }
2191
2192 /*
2193 * Delete child target locks owned by this process.
2194 * This implementation is assuming that the usage of each target tag field
2195 * is uniform. No need to make this hard if we don't have to.
2196 *
2197 * We acquire an LWLock in the case of parallel mode, because worker
2198 * backends have access to the leader's SERIALIZABLEXACT. Otherwise,
2199 * we aren't acquiring LWLocks for the predicate lock or lock
2200 * target structures associated with this transaction unless we're going
2201 * to modify them, because no other process is permitted to modify our
2202 * locks.
2203 */
2204 static void
DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG * newtargettag)2205 DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
2206 {
2207 SERIALIZABLEXACT *sxact;
2208 PREDICATELOCK *predlock;
2209
2210 LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2211 sxact = MySerializableXact;
2212 if (IsInParallelMode())
2213 LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
2214 predlock = (PREDICATELOCK *)
2215 SHMQueueNext(&(sxact->predicateLocks),
2216 &(sxact->predicateLocks),
2217 offsetof(PREDICATELOCK, xactLink));
2218 while (predlock)
2219 {
2220 SHM_QUEUE *predlocksxactlink;
2221 PREDICATELOCK *nextpredlock;
2222 PREDICATELOCKTAG oldlocktag;
2223 PREDICATELOCKTARGET *oldtarget;
2224 PREDICATELOCKTARGETTAG oldtargettag;
2225
2226 predlocksxactlink = &(predlock->xactLink);
2227 nextpredlock = (PREDICATELOCK *)
2228 SHMQueueNext(&(sxact->predicateLocks),
2229 predlocksxactlink,
2230 offsetof(PREDICATELOCK, xactLink));
2231
2232 oldlocktag = predlock->tag;
2233 Assert(oldlocktag.myXact == sxact);
2234 oldtarget = oldlocktag.myTarget;
2235 oldtargettag = oldtarget->tag;
2236
2237 if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
2238 {
2239 uint32 oldtargettaghash;
2240 LWLock *partitionLock;
2241 PREDICATELOCK *rmpredlock PG_USED_FOR_ASSERTS_ONLY;
2242
2243 oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2244 partitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2245
2246 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2247
2248 SHMQueueDelete(predlocksxactlink);
2249 SHMQueueDelete(&(predlock->targetLink));
2250 rmpredlock = hash_search_with_hash_value
2251 (PredicateLockHash,
2252 &oldlocktag,
2253 PredicateLockHashCodeFromTargetHashCode(&oldlocktag,
2254 oldtargettaghash),
2255 HASH_REMOVE, NULL);
2256 Assert(rmpredlock == predlock);
2257
2258 RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2259
2260 LWLockRelease(partitionLock);
2261
2262 DecrementParentLocks(&oldtargettag);
2263 }
2264
2265 predlock = nextpredlock;
2266 }
2267 if (IsInParallelMode())
2268 LWLockRelease(&sxact->predicateLockListLock);
2269 LWLockRelease(SerializablePredicateLockListLock);
2270 }
2271
2272 /*
2273 * Returns the promotion limit for a given predicate lock target. This is the
2274 * max number of descendant locks allowed before promoting to the specified
2275 * tag. Note that the limit includes non-direct descendants (e.g., both tuples
2276 * and pages for a relation lock).
2277 *
2278 * Currently the default limit is 2 for a page lock, and half of the value of
2279 * max_pred_locks_per_transaction - 1 for a relation lock, to match behavior
2280 * of earlier releases when upgrading.
2281 *
2282 * TODO SSI: We should probably add additional GUCs to allow a maximum ratio
2283 * of page and tuple locks based on the pages in a relation, and the maximum
2284 * ratio of tuple locks to tuples in a page. This would provide more
2285 * generally "balanced" allocation of locks to where they are most useful,
2286 * while still allowing the absolute numbers to prevent one relation from
2287 * tying up all predicate lock resources.
2288 */
2289 static int
MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG * tag)2290 MaxPredicateChildLocks(const PREDICATELOCKTARGETTAG *tag)
2291 {
2292 switch (GET_PREDICATELOCKTARGETTAG_TYPE(*tag))
2293 {
2294 case PREDLOCKTAG_RELATION:
2295 return max_predicate_locks_per_relation < 0
2296 ? (max_predicate_locks_per_xact
2297 / (-max_predicate_locks_per_relation)) - 1
2298 : max_predicate_locks_per_relation;
2299
2300 case PREDLOCKTAG_PAGE:
2301 return max_predicate_locks_per_page;
2302
2303 case PREDLOCKTAG_TUPLE:
2304
2305 /*
2306 * not reachable: nothing is finer-granularity than a tuple, so we
2307 * should never try to promote to it.
2308 */
2309 Assert(false);
2310 return 0;
2311 }
2312
2313 /* not reachable */
2314 Assert(false);
2315 return 0;
2316 }
2317
2318 /*
2319 * For all ancestors of a newly-acquired predicate lock, increment
2320 * their child count in the parent hash table. If any of them have
2321 * more descendants than their promotion threshold, acquire the
2322 * coarsest such lock.
2323 *
2324 * Returns true if a parent lock was acquired and false otherwise.
2325 */
2326 static bool
CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG * reqtag)2327 CheckAndPromotePredicateLockRequest(const PREDICATELOCKTARGETTAG *reqtag)
2328 {
2329 PREDICATELOCKTARGETTAG targettag,
2330 nexttag,
2331 promotiontag;
2332 LOCALPREDICATELOCK *parentlock;
2333 bool found,
2334 promote;
2335
2336 promote = false;
2337
2338 targettag = *reqtag;
2339
2340 /* check parents iteratively */
2341 while (GetParentPredicateLockTag(&targettag, &nexttag))
2342 {
2343 targettag = nexttag;
2344 parentlock = (LOCALPREDICATELOCK *) hash_search(LocalPredicateLockHash,
2345 &targettag,
2346 HASH_ENTER,
2347 &found);
2348 if (!found)
2349 {
2350 parentlock->held = false;
2351 parentlock->childLocks = 1;
2352 }
2353 else
2354 parentlock->childLocks++;
2355
2356 if (parentlock->childLocks >
2357 MaxPredicateChildLocks(&targettag))
2358 {
2359 /*
2360 * We should promote to this parent lock. Continue to check its
2361 * ancestors, however, both to get their child counts right and to
2362 * check whether we should just go ahead and promote to one of
2363 * them.
2364 */
2365 promotiontag = targettag;
2366 promote = true;
2367 }
2368 }
2369
2370 if (promote)
2371 {
2372 /* acquire coarsest ancestor eligible for promotion */
2373 PredicateLockAcquire(&promotiontag);
2374 return true;
2375 }
2376 else
2377 return false;
2378 }
2379
2380 /*
2381 * When releasing a lock, decrement the child count on all ancestor
2382 * locks.
2383 *
2384 * This is called only when releasing a lock via
2385 * DeleteChildTargetLocks (i.e. when a lock becomes redundant because
2386 * we've acquired its parent, possibly due to promotion) or when a new
2387 * MVCC write lock makes the predicate lock unnecessary. There's no
2388 * point in calling it when locks are released at transaction end, as
2389 * this information is no longer needed.
2390 */
2391 static void
DecrementParentLocks(const PREDICATELOCKTARGETTAG * targettag)2392 DecrementParentLocks(const PREDICATELOCKTARGETTAG *targettag)
2393 {
2394 PREDICATELOCKTARGETTAG parenttag,
2395 nexttag;
2396
2397 parenttag = *targettag;
2398
2399 while (GetParentPredicateLockTag(&parenttag, &nexttag))
2400 {
2401 uint32 targettaghash;
2402 LOCALPREDICATELOCK *parentlock,
2403 *rmlock PG_USED_FOR_ASSERTS_ONLY;
2404
2405 parenttag = nexttag;
2406 targettaghash = PredicateLockTargetTagHashCode(&parenttag);
2407 parentlock = (LOCALPREDICATELOCK *)
2408 hash_search_with_hash_value(LocalPredicateLockHash,
2409 &parenttag, targettaghash,
2410 HASH_FIND, NULL);
2411
2412 /*
2413 * There's a small chance the parent lock doesn't exist in the lock
2414 * table. This can happen if we prematurely removed it because an
2415 * index split caused the child refcount to be off.
2416 */
2417 if (parentlock == NULL)
2418 continue;
2419
2420 parentlock->childLocks--;
2421
2422 /*
2423 * Under similar circumstances the parent lock's refcount might be
2424 * zero. This only happens if we're holding that lock (otherwise we
2425 * would have removed the entry).
2426 */
2427 if (parentlock->childLocks < 0)
2428 {
2429 Assert(parentlock->held);
2430 parentlock->childLocks = 0;
2431 }
2432
2433 if ((parentlock->childLocks == 0) && (!parentlock->held))
2434 {
2435 rmlock = (LOCALPREDICATELOCK *)
2436 hash_search_with_hash_value(LocalPredicateLockHash,
2437 &parenttag, targettaghash,
2438 HASH_REMOVE, NULL);
2439 Assert(rmlock == parentlock);
2440 }
2441 }
2442 }
2443
2444 /*
2445 * Indicate that a predicate lock on the given target is held by the
2446 * specified transaction. Has no effect if the lock is already held.
2447 *
2448 * This updates the lock table and the sxact's lock list, and creates
2449 * the lock target if necessary, but does *not* do anything related to
2450 * granularity promotion or the local lock table. See
2451 * PredicateLockAcquire for that.
2452 */
2453 static void
CreatePredicateLock(const PREDICATELOCKTARGETTAG * targettag,uint32 targettaghash,SERIALIZABLEXACT * sxact)2454 CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
2455 uint32 targettaghash,
2456 SERIALIZABLEXACT *sxact)
2457 {
2458 PREDICATELOCKTARGET *target;
2459 PREDICATELOCKTAG locktag;
2460 PREDICATELOCK *lock;
2461 LWLock *partitionLock;
2462 bool found;
2463
2464 partitionLock = PredicateLockHashPartitionLock(targettaghash);
2465
2466 LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
2467 if (IsInParallelMode())
2468 LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
2469 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2470
2471 /* Make sure that the target is represented. */
2472 target = (PREDICATELOCKTARGET *)
2473 hash_search_with_hash_value(PredicateLockTargetHash,
2474 targettag, targettaghash,
2475 HASH_ENTER_NULL, &found);
2476 if (!target)
2477 ereport(ERROR,
2478 (errcode(ERRCODE_OUT_OF_MEMORY),
2479 errmsg("out of shared memory"),
2480 errhint("You might need to increase max_pred_locks_per_transaction.")));
2481 if (!found)
2482 SHMQueueInit(&(target->predicateLocks));
2483
2484 /* We've got the sxact and target, make sure they're joined. */
2485 locktag.myTarget = target;
2486 locktag.myXact = sxact;
2487 lock = (PREDICATELOCK *)
2488 hash_search_with_hash_value(PredicateLockHash, &locktag,
2489 PredicateLockHashCodeFromTargetHashCode(&locktag, targettaghash),
2490 HASH_ENTER_NULL, &found);
2491 if (!lock)
2492 ereport(ERROR,
2493 (errcode(ERRCODE_OUT_OF_MEMORY),
2494 errmsg("out of shared memory"),
2495 errhint("You might need to increase max_pred_locks_per_transaction.")));
2496
2497 if (!found)
2498 {
2499 SHMQueueInsertBefore(&(target->predicateLocks), &(lock->targetLink));
2500 SHMQueueInsertBefore(&(sxact->predicateLocks),
2501 &(lock->xactLink));
2502 lock->commitSeqNo = InvalidSerCommitSeqNo;
2503 }
2504
2505 LWLockRelease(partitionLock);
2506 if (IsInParallelMode())
2507 LWLockRelease(&sxact->predicateLockListLock);
2508 LWLockRelease(SerializablePredicateLockListLock);
2509 }
2510
2511 /*
2512 * Acquire a predicate lock on the specified target for the current
2513 * connection if not already held. This updates the local lock table
2514 * and uses it to implement granularity promotion. It will consolidate
2515 * multiple locks into a coarser lock if warranted, and will release
2516 * any finer-grained locks covered by the new one.
2517 */
2518 static void
PredicateLockAcquire(const PREDICATELOCKTARGETTAG * targettag)2519 PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag)
2520 {
2521 uint32 targettaghash;
2522 bool found;
2523 LOCALPREDICATELOCK *locallock;
2524
2525 /* Do we have the lock already, or a covering lock? */
2526 if (PredicateLockExists(targettag))
2527 return;
2528
2529 if (CoarserLockCovers(targettag))
2530 return;
2531
2532 /* the same hash and LW lock apply to the lock target and the local lock. */
2533 targettaghash = PredicateLockTargetTagHashCode(targettag);
2534
2535 /* Acquire lock in local table */
2536 locallock = (LOCALPREDICATELOCK *)
2537 hash_search_with_hash_value(LocalPredicateLockHash,
2538 targettag, targettaghash,
2539 HASH_ENTER, &found);
2540 locallock->held = true;
2541 if (!found)
2542 locallock->childLocks = 0;
2543
2544 /* Actually create the lock */
2545 CreatePredicateLock(targettag, targettaghash, MySerializableXact);
2546
2547 /*
2548 * Lock has been acquired. Check whether it should be promoted to a
2549 * coarser granularity, or whether there are finer-granularity locks to
2550 * clean up.
2551 */
2552 if (CheckAndPromotePredicateLockRequest(targettag))
2553 {
2554 /*
2555 * Lock request was promoted to a coarser-granularity lock, and that
2556 * lock was acquired. It will delete this lock and any of its
2557 * children, so we're done.
2558 */
2559 }
2560 else
2561 {
2562 /* Clean up any finer-granularity locks */
2563 if (GET_PREDICATELOCKTARGETTAG_TYPE(*targettag) != PREDLOCKTAG_TUPLE)
2564 DeleteChildTargetLocks(targettag);
2565 }
2566 }
2567
2568
2569 /*
2570 * PredicateLockRelation
2571 *
2572 * Gets a predicate lock at the relation level.
2573 * Skip if not in full serializable transaction isolation level.
2574 * Skip if this is a temporary table.
2575 * Clear any finer-grained predicate locks this session has on the relation.
2576 */
2577 void
PredicateLockRelation(Relation relation,Snapshot snapshot)2578 PredicateLockRelation(Relation relation, Snapshot snapshot)
2579 {
2580 PREDICATELOCKTARGETTAG tag;
2581
2582 if (!SerializationNeededForRead(relation, snapshot))
2583 return;
2584
2585 SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2586 relation->rd_node.dbNode,
2587 relation->rd_id);
2588 PredicateLockAcquire(&tag);
2589 }
2590
2591 /*
2592 * PredicateLockPage
2593 *
2594 * Gets a predicate lock at the page level.
2595 * Skip if not in full serializable transaction isolation level.
2596 * Skip if this is a temporary table.
2597 * Skip if a coarser predicate lock already covers this page.
2598 * Clear any finer-grained predicate locks this session has on the relation.
2599 */
2600 void
PredicateLockPage(Relation relation,BlockNumber blkno,Snapshot snapshot)2601 PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
2602 {
2603 PREDICATELOCKTARGETTAG tag;
2604
2605 if (!SerializationNeededForRead(relation, snapshot))
2606 return;
2607
2608 SET_PREDICATELOCKTARGETTAG_PAGE(tag,
2609 relation->rd_node.dbNode,
2610 relation->rd_id,
2611 blkno);
2612 PredicateLockAcquire(&tag);
2613 }
2614
2615 /*
2616 * PredicateLockTuple
2617 *
2618 * Gets a predicate lock at the tuple level.
2619 * Skip if not in full serializable transaction isolation level.
2620 * Skip if this is a temporary table.
2621 */
2622 void
PredicateLockTuple(Relation relation,HeapTuple tuple,Snapshot snapshot)2623 PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
2624 {
2625 PREDICATELOCKTARGETTAG tag;
2626 ItemPointer tid;
2627 TransactionId targetxmin;
2628
2629 if (!SerializationNeededForRead(relation, snapshot))
2630 return;
2631
2632 /*
2633 * If it's a heap tuple, return if this xact wrote it.
2634 */
2635 if (relation->rd_index == NULL)
2636 {
2637 TransactionId myxid;
2638
2639 targetxmin = HeapTupleHeaderGetXmin(tuple->t_data);
2640
2641 myxid = GetTopTransactionIdIfAny();
2642 if (TransactionIdIsValid(myxid))
2643 {
2644 if (TransactionIdFollowsOrEquals(targetxmin, TransactionXmin))
2645 {
2646 TransactionId xid = SubTransGetTopmostTransaction(targetxmin);
2647
2648 if (TransactionIdEquals(xid, myxid))
2649 {
2650 /* We wrote it; we already have a write lock. */
2651 return;
2652 }
2653 }
2654 }
2655 }
2656
2657 /*
2658 * Do quick-but-not-definitive test for a relation lock first. This will
2659 * never cause a return when the relation is *not* locked, but will
2660 * occasionally let the check continue when there really *is* a relation
2661 * level lock.
2662 */
2663 SET_PREDICATELOCKTARGETTAG_RELATION(tag,
2664 relation->rd_node.dbNode,
2665 relation->rd_id);
2666 if (PredicateLockExists(&tag))
2667 return;
2668
2669 tid = &(tuple->t_self);
2670 SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
2671 relation->rd_node.dbNode,
2672 relation->rd_id,
2673 ItemPointerGetBlockNumber(tid),
2674 ItemPointerGetOffsetNumber(tid));
2675 PredicateLockAcquire(&tag);
2676 }
2677
2678
2679 /*
2680 * DeleteLockTarget
2681 *
2682 * Remove a predicate lock target along with any locks held for it.
2683 *
2684 * Caller must hold SerializablePredicateLockListLock and the
2685 * appropriate hash partition lock for the target.
2686 */
2687 static void
DeleteLockTarget(PREDICATELOCKTARGET * target,uint32 targettaghash)2688 DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
2689 {
2690 PREDICATELOCK *predlock;
2691 SHM_QUEUE *predlocktargetlink;
2692 PREDICATELOCK *nextpredlock;
2693 bool found;
2694
2695 Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock,
2696 LW_EXCLUSIVE));
2697 Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
2698
2699 predlock = (PREDICATELOCK *)
2700 SHMQueueNext(&(target->predicateLocks),
2701 &(target->predicateLocks),
2702 offsetof(PREDICATELOCK, targetLink));
2703 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2704 while (predlock)
2705 {
2706 predlocktargetlink = &(predlock->targetLink);
2707 nextpredlock = (PREDICATELOCK *)
2708 SHMQueueNext(&(target->predicateLocks),
2709 predlocktargetlink,
2710 offsetof(PREDICATELOCK, targetLink));
2711
2712 SHMQueueDelete(&(predlock->xactLink));
2713 SHMQueueDelete(&(predlock->targetLink));
2714
2715 hash_search_with_hash_value
2716 (PredicateLockHash,
2717 &predlock->tag,
2718 PredicateLockHashCodeFromTargetHashCode(&predlock->tag,
2719 targettaghash),
2720 HASH_REMOVE, &found);
2721 Assert(found);
2722
2723 predlock = nextpredlock;
2724 }
2725 LWLockRelease(SerializableXactHashLock);
2726
2727 /* Remove the target itself, if possible. */
2728 RemoveTargetIfNoLongerUsed(target, targettaghash);
2729 }
2730
2731
2732 /*
2733 * TransferPredicateLocksToNewTarget
2734 *
2735 * Move or copy all the predicate locks for a lock target, for use by
2736 * index page splits/combines and other things that create or replace
2737 * lock targets. If 'removeOld' is true, the old locks and the target
2738 * will be removed.
2739 *
2740 * Returns true on success, or false if we ran out of shared memory to
2741 * allocate the new target or locks. Guaranteed to always succeed if
2742 * removeOld is set (by using the scratch entry in PredicateLockTargetHash
2743 * for scratch space).
2744 *
2745 * Warning: the "removeOld" option should be used only with care,
2746 * because this function does not (indeed, can not) update other
2747 * backends' LocalPredicateLockHash. If we are only adding new
2748 * entries, this is not a problem: the local lock table is used only
2749 * as a hint, so missing entries for locks that are held are
2750 * OK. Having entries for locks that are no longer held, as can happen
2751 * when using "removeOld", is not in general OK. We can only use it
2752 * safely when replacing a lock with a coarser-granularity lock that
2753 * covers it, or if we are absolutely certain that no one will need to
2754 * refer to that lock in the future.
2755 *
2756 * Caller must hold SerializablePredicateLockListLock exclusively.
2757 */
2758 static bool
TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,PREDICATELOCKTARGETTAG newtargettag,bool removeOld)2759 TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
2760 PREDICATELOCKTARGETTAG newtargettag,
2761 bool removeOld)
2762 {
2763 uint32 oldtargettaghash;
2764 LWLock *oldpartitionLock;
2765 PREDICATELOCKTARGET *oldtarget;
2766 uint32 newtargettaghash;
2767 LWLock *newpartitionLock;
2768 bool found;
2769 bool outOfShmem = false;
2770
2771 Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock,
2772 LW_EXCLUSIVE));
2773
2774 oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
2775 newtargettaghash = PredicateLockTargetTagHashCode(&newtargettag);
2776 oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
2777 newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
2778
2779 if (removeOld)
2780 {
2781 /*
2782 * Remove the dummy entry to give us scratch space, so we know we'll
2783 * be able to create the new lock target.
2784 */
2785 RemoveScratchTarget(false);
2786 }
2787
2788 /*
2789 * We must get the partition locks in ascending sequence to avoid
2790 * deadlocks. If old and new partitions are the same, we must request the
2791 * lock only once.
2792 */
2793 if (oldpartitionLock < newpartitionLock)
2794 {
2795 LWLockAcquire(oldpartitionLock,
2796 (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2797 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2798 }
2799 else if (oldpartitionLock > newpartitionLock)
2800 {
2801 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2802 LWLockAcquire(oldpartitionLock,
2803 (removeOld ? LW_EXCLUSIVE : LW_SHARED));
2804 }
2805 else
2806 LWLockAcquire(newpartitionLock, LW_EXCLUSIVE);
2807
2808 /*
2809 * Look for the old target. If not found, that's OK; no predicate locks
2810 * are affected, so we can just clean up and return. If it does exist,
2811 * walk its list of predicate locks and move or copy them to the new
2812 * target.
2813 */
2814 oldtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2815 &oldtargettag,
2816 oldtargettaghash,
2817 HASH_FIND, NULL);
2818
2819 if (oldtarget)
2820 {
2821 PREDICATELOCKTARGET *newtarget;
2822 PREDICATELOCK *oldpredlock;
2823 PREDICATELOCKTAG newpredlocktag;
2824
2825 newtarget = hash_search_with_hash_value(PredicateLockTargetHash,
2826 &newtargettag,
2827 newtargettaghash,
2828 HASH_ENTER_NULL, &found);
2829
2830 if (!newtarget)
2831 {
2832 /* Failed to allocate due to insufficient shmem */
2833 outOfShmem = true;
2834 goto exit;
2835 }
2836
2837 /* If we created a new entry, initialize it */
2838 if (!found)
2839 SHMQueueInit(&(newtarget->predicateLocks));
2840
2841 newpredlocktag.myTarget = newtarget;
2842
2843 /*
2844 * Loop through all the locks on the old target, replacing them with
2845 * locks on the new target.
2846 */
2847 oldpredlock = (PREDICATELOCK *)
2848 SHMQueueNext(&(oldtarget->predicateLocks),
2849 &(oldtarget->predicateLocks),
2850 offsetof(PREDICATELOCK, targetLink));
2851 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
2852 while (oldpredlock)
2853 {
2854 SHM_QUEUE *predlocktargetlink;
2855 PREDICATELOCK *nextpredlock;
2856 PREDICATELOCK *newpredlock;
2857 SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
2858
2859 predlocktargetlink = &(oldpredlock->targetLink);
2860 nextpredlock = (PREDICATELOCK *)
2861 SHMQueueNext(&(oldtarget->predicateLocks),
2862 predlocktargetlink,
2863 offsetof(PREDICATELOCK, targetLink));
2864 newpredlocktag.myXact = oldpredlock->tag.myXact;
2865
2866 if (removeOld)
2867 {
2868 SHMQueueDelete(&(oldpredlock->xactLink));
2869 SHMQueueDelete(&(oldpredlock->targetLink));
2870
2871 hash_search_with_hash_value
2872 (PredicateLockHash,
2873 &oldpredlock->tag,
2874 PredicateLockHashCodeFromTargetHashCode(&oldpredlock->tag,
2875 oldtargettaghash),
2876 HASH_REMOVE, &found);
2877 Assert(found);
2878 }
2879
2880 newpredlock = (PREDICATELOCK *)
2881 hash_search_with_hash_value(PredicateLockHash,
2882 &newpredlocktag,
2883 PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
2884 newtargettaghash),
2885 HASH_ENTER_NULL,
2886 &found);
2887 if (!newpredlock)
2888 {
2889 /* Out of shared memory. Undo what we've done so far. */
2890 LWLockRelease(SerializableXactHashLock);
2891 DeleteLockTarget(newtarget, newtargettaghash);
2892 outOfShmem = true;
2893 goto exit;
2894 }
2895 if (!found)
2896 {
2897 SHMQueueInsertBefore(&(newtarget->predicateLocks),
2898 &(newpredlock->targetLink));
2899 SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
2900 &(newpredlock->xactLink));
2901 newpredlock->commitSeqNo = oldCommitSeqNo;
2902 }
2903 else
2904 {
2905 if (newpredlock->commitSeqNo < oldCommitSeqNo)
2906 newpredlock->commitSeqNo = oldCommitSeqNo;
2907 }
2908
2909 Assert(newpredlock->commitSeqNo != 0);
2910 Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
2911 || (newpredlock->tag.myXact == OldCommittedSxact));
2912
2913 oldpredlock = nextpredlock;
2914 }
2915 LWLockRelease(SerializableXactHashLock);
2916
2917 if (removeOld)
2918 {
2919 Assert(SHMQueueEmpty(&oldtarget->predicateLocks));
2920 RemoveTargetIfNoLongerUsed(oldtarget, oldtargettaghash);
2921 }
2922 }
2923
2924
2925 exit:
2926 /* Release partition locks in reverse order of acquisition. */
2927 if (oldpartitionLock < newpartitionLock)
2928 {
2929 LWLockRelease(newpartitionLock);
2930 LWLockRelease(oldpartitionLock);
2931 }
2932 else if (oldpartitionLock > newpartitionLock)
2933 {
2934 LWLockRelease(oldpartitionLock);
2935 LWLockRelease(newpartitionLock);
2936 }
2937 else
2938 LWLockRelease(newpartitionLock);
2939
2940 if (removeOld)
2941 {
2942 /* We shouldn't run out of memory if we're moving locks */
2943 Assert(!outOfShmem);
2944
2945 /* Put the scratch entry back */
2946 RestoreScratchTarget(false);
2947 }
2948
2949 return !outOfShmem;
2950 }
2951
2952 /*
2953 * Drop all predicate locks of any granularity from the specified relation,
2954 * which can be a heap relation or an index relation. If 'transfer' is true,
2955 * acquire a relation lock on the heap for any transactions with any lock(s)
2956 * on the specified relation.
2957 *
2958 * This requires grabbing a lot of LW locks and scanning the entire lock
2959 * target table for matches. That makes this more expensive than most
2960 * predicate lock management functions, but it will only be called for DDL
2961 * type commands that are expensive anyway, and there are fast returns when
2962 * no serializable transactions are active or the relation is temporary.
2963 *
2964 * We don't use the TransferPredicateLocksToNewTarget function because it
2965 * acquires its own locks on the partitions of the two targets involved,
2966 * and we'll already be holding all partition locks.
2967 *
2968 * We can't throw an error from here, because the call could be from a
2969 * transaction which is not serializable.
2970 *
2971 * NOTE: This is currently only called with transfer set to true, but that may
2972 * change. If we decide to clean up the locks from a table on commit of a
2973 * transaction which executed DROP TABLE, the false condition will be useful.
2974 */
2975 static void
DropAllPredicateLocksFromTable(Relation relation,bool transfer)2976 DropAllPredicateLocksFromTable(Relation relation, bool transfer)
2977 {
2978 HASH_SEQ_STATUS seqstat;
2979 PREDICATELOCKTARGET *oldtarget;
2980 PREDICATELOCKTARGET *heaptarget;
2981 Oid dbId;
2982 Oid relId;
2983 Oid heapId;
2984 int i;
2985 bool isIndex;
2986 bool found;
2987 uint32 heaptargettaghash;
2988
2989 /*
2990 * Bail out quickly if there are no serializable transactions running.
2991 * It's safe to check this without taking locks because the caller is
2992 * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
2993 * would matter here can be acquired while that is held.
2994 */
2995 if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
2996 return;
2997
2998 if (!PredicateLockingNeededForRelation(relation))
2999 return;
3000
3001 dbId = relation->rd_node.dbNode;
3002 relId = relation->rd_id;
3003 if (relation->rd_index == NULL)
3004 {
3005 isIndex = false;
3006 heapId = relId;
3007 }
3008 else
3009 {
3010 isIndex = true;
3011 heapId = relation->rd_index->indrelid;
3012 }
3013 Assert(heapId != InvalidOid);
3014 Assert(transfer || !isIndex); /* index OID only makes sense with
3015 * transfer */
3016
3017 /* Retrieve first time needed, then keep. */
3018 heaptargettaghash = 0;
3019 heaptarget = NULL;
3020
3021 /* Acquire locks on all lock partitions */
3022 LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
3023 for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
3024 LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
3025 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3026
3027 /*
3028 * Remove the dummy entry to give us scratch space, so we know we'll be
3029 * able to create the new lock target.
3030 */
3031 if (transfer)
3032 RemoveScratchTarget(true);
3033
3034 /* Scan through target map */
3035 hash_seq_init(&seqstat, PredicateLockTargetHash);
3036
3037 while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
3038 {
3039 PREDICATELOCK *oldpredlock;
3040
3041 /*
3042 * Check whether this is a target which needs attention.
3043 */
3044 if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
3045 continue; /* wrong relation id */
3046 if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
3047 continue; /* wrong database id */
3048 if (transfer && !isIndex
3049 && GET_PREDICATELOCKTARGETTAG_TYPE(oldtarget->tag) == PREDLOCKTAG_RELATION)
3050 continue; /* already the right lock */
3051
3052 /*
3053 * If we made it here, we have work to do. We make sure the heap
3054 * relation lock exists, then we walk the list of predicate locks for
3055 * the old target we found, moving all locks to the heap relation lock
3056 * -- unless they already hold that.
3057 */
3058
3059 /*
3060 * First make sure we have the heap relation target. We only need to
3061 * do this once.
3062 */
3063 if (transfer && heaptarget == NULL)
3064 {
3065 PREDICATELOCKTARGETTAG heaptargettag;
3066
3067 SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
3068 heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
3069 heaptarget = hash_search_with_hash_value(PredicateLockTargetHash,
3070 &heaptargettag,
3071 heaptargettaghash,
3072 HASH_ENTER, &found);
3073 if (!found)
3074 SHMQueueInit(&heaptarget->predicateLocks);
3075 }
3076
3077 /*
3078 * Loop through all the locks on the old target, replacing them with
3079 * locks on the new target.
3080 */
3081 oldpredlock = (PREDICATELOCK *)
3082 SHMQueueNext(&(oldtarget->predicateLocks),
3083 &(oldtarget->predicateLocks),
3084 offsetof(PREDICATELOCK, targetLink));
3085 while (oldpredlock)
3086 {
3087 PREDICATELOCK *nextpredlock;
3088 PREDICATELOCK *newpredlock;
3089 SerCommitSeqNo oldCommitSeqNo;
3090 SERIALIZABLEXACT *oldXact;
3091
3092 nextpredlock = (PREDICATELOCK *)
3093 SHMQueueNext(&(oldtarget->predicateLocks),
3094 &(oldpredlock->targetLink),
3095 offsetof(PREDICATELOCK, targetLink));
3096
3097 /*
3098 * Remove the old lock first. This avoids the chance of running
3099 * out of lock structure entries for the hash table.
3100 */
3101 oldCommitSeqNo = oldpredlock->commitSeqNo;
3102 oldXact = oldpredlock->tag.myXact;
3103
3104 SHMQueueDelete(&(oldpredlock->xactLink));
3105
3106 /*
3107 * No need for retail delete from oldtarget list, we're removing
3108 * the whole target anyway.
3109 */
3110 hash_search(PredicateLockHash,
3111 &oldpredlock->tag,
3112 HASH_REMOVE, &found);
3113 Assert(found);
3114
3115 if (transfer)
3116 {
3117 PREDICATELOCKTAG newpredlocktag;
3118
3119 newpredlocktag.myTarget = heaptarget;
3120 newpredlocktag.myXact = oldXact;
3121 newpredlock = (PREDICATELOCK *)
3122 hash_search_with_hash_value(PredicateLockHash,
3123 &newpredlocktag,
3124 PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
3125 heaptargettaghash),
3126 HASH_ENTER,
3127 &found);
3128 if (!found)
3129 {
3130 SHMQueueInsertBefore(&(heaptarget->predicateLocks),
3131 &(newpredlock->targetLink));
3132 SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
3133 &(newpredlock->xactLink));
3134 newpredlock->commitSeqNo = oldCommitSeqNo;
3135 }
3136 else
3137 {
3138 if (newpredlock->commitSeqNo < oldCommitSeqNo)
3139 newpredlock->commitSeqNo = oldCommitSeqNo;
3140 }
3141
3142 Assert(newpredlock->commitSeqNo != 0);
3143 Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
3144 || (newpredlock->tag.myXact == OldCommittedSxact));
3145 }
3146
3147 oldpredlock = nextpredlock;
3148 }
3149
3150 hash_search(PredicateLockTargetHash, &oldtarget->tag, HASH_REMOVE,
3151 &found);
3152 Assert(found);
3153 }
3154
3155 /* Put the scratch entry back */
3156 if (transfer)
3157 RestoreScratchTarget(true);
3158
3159 /* Release locks in reverse order */
3160 LWLockRelease(SerializableXactHashLock);
3161 for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
3162 LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
3163 LWLockRelease(SerializablePredicateLockListLock);
3164 }
3165
3166 /*
3167 * TransferPredicateLocksToHeapRelation
3168 * For all transactions, transfer all predicate locks for the given
3169 * relation to a single relation lock on the heap.
3170 */
3171 void
TransferPredicateLocksToHeapRelation(Relation relation)3172 TransferPredicateLocksToHeapRelation(Relation relation)
3173 {
3174 DropAllPredicateLocksFromTable(relation, true);
3175 }
3176
3177
3178 /*
3179 * PredicateLockPageSplit
3180 *
3181 * Copies any predicate locks for the old page to the new page.
3182 * Skip if this is a temporary table or toast table.
3183 *
3184 * NOTE: A page split (or overflow) affects all serializable transactions,
3185 * even if it occurs in the context of another transaction isolation level.
3186 *
3187 * NOTE: This currently leaves the local copy of the locks without
3188 * information on the new lock which is in shared memory. This could cause
3189 * problems if enough page splits occur on locked pages without the processes
3190 * which hold the locks getting in and noticing.
3191 */
3192 void
PredicateLockPageSplit(Relation relation,BlockNumber oldblkno,BlockNumber newblkno)3193 PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
3194 BlockNumber newblkno)
3195 {
3196 PREDICATELOCKTARGETTAG oldtargettag;
3197 PREDICATELOCKTARGETTAG newtargettag;
3198 bool success;
3199
3200 /*
3201 * Bail out quickly if there are no serializable transactions running.
3202 *
3203 * It's safe to do this check without taking any additional locks. Even if
3204 * a serializable transaction starts concurrently, we know it can't take
3205 * any SIREAD locks on the page being split because the caller is holding
3206 * the associated buffer page lock. Memory reordering isn't an issue; the
3207 * memory barrier in the LWLock acquisition guarantees that this read
3208 * occurs while the buffer page lock is held.
3209 */
3210 if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
3211 return;
3212
3213 if (!PredicateLockingNeededForRelation(relation))
3214 return;
3215
3216 Assert(oldblkno != newblkno);
3217 Assert(BlockNumberIsValid(oldblkno));
3218 Assert(BlockNumberIsValid(newblkno));
3219
3220 SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
3221 relation->rd_node.dbNode,
3222 relation->rd_id,
3223 oldblkno);
3224 SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
3225 relation->rd_node.dbNode,
3226 relation->rd_id,
3227 newblkno);
3228
3229 LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
3230
3231 /*
3232 * Try copying the locks over to the new page's tag, creating it if
3233 * necessary.
3234 */
3235 success = TransferPredicateLocksToNewTarget(oldtargettag,
3236 newtargettag,
3237 false);
3238
3239 if (!success)
3240 {
3241 /*
3242 * No more predicate lock entries are available. Failure isn't an
3243 * option here, so promote the page lock to a relation lock.
3244 */
3245
3246 /* Get the parent relation lock's lock tag */
3247 success = GetParentPredicateLockTag(&oldtargettag,
3248 &newtargettag);
3249 Assert(success);
3250
3251 /*
3252 * Move the locks to the parent. This shouldn't fail.
3253 *
3254 * Note that here we are removing locks held by other backends,
3255 * leading to a possible inconsistency in their local lock hash table.
3256 * This is OK because we're replacing it with a lock that covers the
3257 * old one.
3258 */
3259 success = TransferPredicateLocksToNewTarget(oldtargettag,
3260 newtargettag,
3261 true);
3262 Assert(success);
3263 }
3264
3265 LWLockRelease(SerializablePredicateLockListLock);
3266 }
3267
3268 /*
3269 * PredicateLockPageCombine
3270 *
3271 * Combines predicate locks for two existing pages.
3272 * Skip if this is a temporary table or toast table.
3273 *
3274 * NOTE: A page combine affects all serializable transactions, even if it
3275 * occurs in the context of another transaction isolation level.
3276 */
3277 void
PredicateLockPageCombine(Relation relation,BlockNumber oldblkno,BlockNumber newblkno)3278 PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
3279 BlockNumber newblkno)
3280 {
3281 /*
3282 * Page combines differ from page splits in that we ought to be able to
3283 * remove the locks on the old page after transferring them to the new
3284 * page, instead of duplicating them. However, because we can't edit other
3285 * backends' local lock tables, removing the old lock would leave them
3286 * with an entry in their LocalPredicateLockHash for a lock they're not
3287 * holding, which isn't acceptable. So we wind up having to do the same
3288 * work as a page split, acquiring a lock on the new page and keeping the
3289 * old page locked too. That can lead to some false positives, but should
3290 * be rare in practice.
3291 */
3292 PredicateLockPageSplit(relation, oldblkno, newblkno);
3293 }
3294
3295 /*
3296 * Walk the list of in-progress serializable transactions and find the new
3297 * xmin.
3298 */
3299 static void
SetNewSxactGlobalXmin(void)3300 SetNewSxactGlobalXmin(void)
3301 {
3302 SERIALIZABLEXACT *sxact;
3303
3304 Assert(LWLockHeldByMe(SerializableXactHashLock));
3305
3306 PredXact->SxactGlobalXmin = InvalidTransactionId;
3307 PredXact->SxactGlobalXminCount = 0;
3308
3309 for (sxact = FirstPredXact(); sxact != NULL; sxact = NextPredXact(sxact))
3310 {
3311 if (!SxactIsRolledBack(sxact)
3312 && !SxactIsCommitted(sxact)
3313 && sxact != OldCommittedSxact)
3314 {
3315 Assert(sxact->xmin != InvalidTransactionId);
3316 if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3317 || TransactionIdPrecedes(sxact->xmin,
3318 PredXact->SxactGlobalXmin))
3319 {
3320 PredXact->SxactGlobalXmin = sxact->xmin;
3321 PredXact->SxactGlobalXminCount = 1;
3322 }
3323 else if (TransactionIdEquals(sxact->xmin,
3324 PredXact->SxactGlobalXmin))
3325 PredXact->SxactGlobalXminCount++;
3326 }
3327 }
3328
3329 OldSerXidSetActiveSerXmin(PredXact->SxactGlobalXmin);
3330 }
3331
3332 /*
3333 * ReleasePredicateLocks
3334 *
3335 * Releases predicate locks based on completion of the current transaction,
3336 * whether committed or rolled back. It can also be called for a read only
3337 * transaction when it becomes impossible for the transaction to become
3338 * part of a dangerous structure.
3339 *
3340 * We do nothing unless this is a serializable transaction.
3341 *
3342 * This method must ensure that shared memory hash tables are cleaned
3343 * up in some relatively timely fashion.
3344 *
3345 * If this transaction is committing and is holding any predicate locks,
3346 * it must be added to a list of completed serializable transactions still
3347 * holding locks.
3348 *
3349 * If isReadOnlySafe is true, then predicate locks are being released before
3350 * the end of the transaction because MySerializableXact has been determined
3351 * to be RO_SAFE. In non-parallel mode we can release it completely, but it
3352 * in parallel mode we partially release the SERIALIZABLEXACT and keep it
3353 * around until the end of the transaction, allowing each backend to clear its
3354 * MySerializableXact variable and benefit from the optimization in its own
3355 * time.
3356 */
3357 void
ReleasePredicateLocks(bool isCommit,bool isReadOnlySafe)3358 ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
3359 {
3360 bool needToClear;
3361 RWConflict conflict,
3362 nextConflict,
3363 possibleUnsafeConflict;
3364 SERIALIZABLEXACT *roXact;
3365
3366 /*
3367 * We can't trust XactReadOnly here, because a transaction which started
3368 * as READ WRITE can show as READ ONLY later, e.g., within
3369 * subtransactions. We want to flag a transaction as READ ONLY if it
3370 * commits without writing so that de facto READ ONLY transactions get the
3371 * benefit of some RO optimizations, so we will use this local variable to
3372 * get some cleanup logic right which is based on whether the transaction
3373 * was declared READ ONLY at the top level.
3374 */
3375 bool topLevelIsDeclaredReadOnly;
3376
3377 /* We can't be both committing and releasing early due to RO_SAFE. */
3378 Assert(!(isCommit && isReadOnlySafe));
3379
3380 /* Are we at the end of a transaction, that is, a commit or abort? */
3381 if (!isReadOnlySafe)
3382 {
3383 /*
3384 * Parallel workers mustn't release predicate locks at the end of
3385 * their transaction. The leader will do that at the end of its
3386 * transaction.
3387 */
3388 if (IsParallelWorker())
3389 {
3390 ReleasePredicateLocksLocal();
3391 return;
3392 }
3393
3394 /*
3395 * By the time the leader in a parallel query reaches end of
3396 * transaction, it has waited for all workers to exit.
3397 */
3398 Assert(!ParallelContextActive());
3399
3400 /*
3401 * If the leader in a parallel query earlier stashed a partially
3402 * released SERIALIZABLEXACT for final clean-up at end of transaction
3403 * (because workers might still have been accessing it), then it's
3404 * time to restore it.
3405 */
3406 if (SavedSerializableXact != InvalidSerializableXact)
3407 {
3408 Assert(MySerializableXact == InvalidSerializableXact);
3409 MySerializableXact = SavedSerializableXact;
3410 SavedSerializableXact = InvalidSerializableXact;
3411 Assert(SxactIsPartiallyReleased(MySerializableXact));
3412 }
3413 }
3414
3415 if (MySerializableXact == InvalidSerializableXact)
3416 {
3417 Assert(LocalPredicateLockHash == NULL);
3418 return;
3419 }
3420
3421 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
3422
3423 /*
3424 * If the transaction is committing, but it has been partially released
3425 * already, then treat this as a roll back. It was marked as rolled back.
3426 */
3427 if (isCommit && SxactIsPartiallyReleased(MySerializableXact))
3428 isCommit = false;
3429
3430 /*
3431 * If we're called in the middle of a transaction because we discovered
3432 * that the SXACT_FLAG_RO_SAFE flag was set, then we'll partially release
3433 * it (that is, release the predicate locks and conflicts, but not the
3434 * SERIALIZABLEXACT itself) if we're the first backend to have noticed.
3435 */
3436 if (isReadOnlySafe && IsInParallelMode())
3437 {
3438 /*
3439 * The leader needs to stash a pointer to it, so that it can
3440 * completely release it at end-of-transaction.
3441 */
3442 if (!IsParallelWorker())
3443 SavedSerializableXact = MySerializableXact;
3444
3445 /*
3446 * The first backend to reach this condition will partially release
3447 * the SERIALIZABLEXACT. All others will just clear their
3448 * backend-local state so that they stop doing SSI checks for the rest
3449 * of the transaction.
3450 */
3451 if (SxactIsPartiallyReleased(MySerializableXact))
3452 {
3453 LWLockRelease(SerializableXactHashLock);
3454 ReleasePredicateLocksLocal();
3455 return;
3456 }
3457 else
3458 {
3459 MySerializableXact->flags |= SXACT_FLAG_PARTIALLY_RELEASED;
3460 /* ... and proceed to perform the partial release below. */
3461 }
3462 }
3463 Assert(!isCommit || SxactIsPrepared(MySerializableXact));
3464 Assert(!isCommit || !SxactIsDoomed(MySerializableXact));
3465 Assert(!SxactIsCommitted(MySerializableXact));
3466 Assert(SxactIsPartiallyReleased(MySerializableXact)
3467 || !SxactIsRolledBack(MySerializableXact));
3468
3469 /* may not be serializable during COMMIT/ROLLBACK PREPARED */
3470 Assert(MySerializableXact->pid == 0 || IsolationIsSerializable());
3471
3472 /* We'd better not already be on the cleanup list. */
3473 Assert(!SxactIsOnFinishedList(MySerializableXact));
3474
3475 topLevelIsDeclaredReadOnly = SxactIsReadOnly(MySerializableXact);
3476
3477 /*
3478 * We don't hold XidGenLock lock here, assuming that TransactionId is
3479 * atomic!
3480 *
3481 * If this value is changing, we don't care that much whether we get the
3482 * old or new value -- it is just used to determine how far
3483 * GlobalSerializableXmin must advance before this transaction can be
3484 * fully cleaned up. The worst that could happen is we wait for one more
3485 * transaction to complete before freeing some RAM; correctness of visible
3486 * behavior is not affected.
3487 */
3488 MySerializableXact->finishedBefore = XidFromFullTransactionId(ShmemVariableCache->nextFullXid);
3489
3490 /*
3491 * If it's not a commit it's either a rollback or a read-only transaction
3492 * flagged SXACT_FLAG_RO_SAFE, and we can clear our locks immediately.
3493 */
3494 if (isCommit)
3495 {
3496 MySerializableXact->flags |= SXACT_FLAG_COMMITTED;
3497 MySerializableXact->commitSeqNo = ++(PredXact->LastSxactCommitSeqNo);
3498 /* Recognize implicit read-only transaction (commit without write). */
3499 if (!MyXactDidWrite)
3500 MySerializableXact->flags |= SXACT_FLAG_READ_ONLY;
3501 }
3502 else
3503 {
3504 /*
3505 * The DOOMED flag indicates that we intend to roll back this
3506 * transaction and so it should not cause serialization failures for
3507 * other transactions that conflict with it. Note that this flag might
3508 * already be set, if another backend marked this transaction for
3509 * abort.
3510 *
3511 * The ROLLED_BACK flag further indicates that ReleasePredicateLocks
3512 * has been called, and so the SerializableXact is eligible for
3513 * cleanup. This means it should not be considered when calculating
3514 * SxactGlobalXmin.
3515 */
3516 MySerializableXact->flags |= SXACT_FLAG_DOOMED;
3517 MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
3518
3519 /*
3520 * If the transaction was previously prepared, but is now failing due
3521 * to a ROLLBACK PREPARED or (hopefully very rare) error after the
3522 * prepare, clear the prepared flag. This simplifies conflict
3523 * checking.
3524 */
3525 MySerializableXact->flags &= ~SXACT_FLAG_PREPARED;
3526 }
3527
3528 if (!topLevelIsDeclaredReadOnly)
3529 {
3530 Assert(PredXact->WritableSxactCount > 0);
3531 if (--(PredXact->WritableSxactCount) == 0)
3532 {
3533 /*
3534 * Release predicate locks and rw-conflicts in for all committed
3535 * transactions. There are no longer any transactions which might
3536 * conflict with the locks and no chance for new transactions to
3537 * overlap. Similarly, existing conflicts in can't cause pivots,
3538 * and any conflicts in which could have completed a dangerous
3539 * structure would already have caused a rollback, so any
3540 * remaining ones must be benign.
3541 */
3542 PredXact->CanPartialClearThrough = PredXact->LastSxactCommitSeqNo;
3543 }
3544 }
3545 else
3546 {
3547 /*
3548 * Read-only transactions: clear the list of transactions that might
3549 * make us unsafe. Note that we use 'inLink' for the iteration as
3550 * opposed to 'outLink' for the r/w xacts.
3551 */
3552 possibleUnsafeConflict = (RWConflict)
3553 SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3554 &MySerializableXact->possibleUnsafeConflicts,
3555 offsetof(RWConflictData, inLink));
3556 while (possibleUnsafeConflict)
3557 {
3558 nextConflict = (RWConflict)
3559 SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3560 &possibleUnsafeConflict->inLink,
3561 offsetof(RWConflictData, inLink));
3562
3563 Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
3564 Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
3565
3566 ReleaseRWConflict(possibleUnsafeConflict);
3567
3568 possibleUnsafeConflict = nextConflict;
3569 }
3570 }
3571
3572 /* Check for conflict out to old committed transactions. */
3573 if (isCommit
3574 && !SxactIsReadOnly(MySerializableXact)
3575 && SxactHasSummaryConflictOut(MySerializableXact))
3576 {
3577 /*
3578 * we don't know which old committed transaction we conflicted with,
3579 * so be conservative and use FirstNormalSerCommitSeqNo here
3580 */
3581 MySerializableXact->SeqNo.earliestOutConflictCommit =
3582 FirstNormalSerCommitSeqNo;
3583 MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3584 }
3585
3586 /*
3587 * Release all outConflicts to committed transactions. If we're rolling
3588 * back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to
3589 * previously committed transactions.
3590 */
3591 conflict = (RWConflict)
3592 SHMQueueNext(&MySerializableXact->outConflicts,
3593 &MySerializableXact->outConflicts,
3594 offsetof(RWConflictData, outLink));
3595 while (conflict)
3596 {
3597 nextConflict = (RWConflict)
3598 SHMQueueNext(&MySerializableXact->outConflicts,
3599 &conflict->outLink,
3600 offsetof(RWConflictData, outLink));
3601
3602 if (isCommit
3603 && !SxactIsReadOnly(MySerializableXact)
3604 && SxactIsCommitted(conflict->sxactIn))
3605 {
3606 if ((MySerializableXact->flags & SXACT_FLAG_CONFLICT_OUT) == 0
3607 || conflict->sxactIn->prepareSeqNo < MySerializableXact->SeqNo.earliestOutConflictCommit)
3608 MySerializableXact->SeqNo.earliestOutConflictCommit = conflict->sxactIn->prepareSeqNo;
3609 MySerializableXact->flags |= SXACT_FLAG_CONFLICT_OUT;
3610 }
3611
3612 if (!isCommit
3613 || SxactIsCommitted(conflict->sxactIn)
3614 || (conflict->sxactIn->SeqNo.lastCommitBeforeSnapshot >= PredXact->LastSxactCommitSeqNo))
3615 ReleaseRWConflict(conflict);
3616
3617 conflict = nextConflict;
3618 }
3619
3620 /*
3621 * Release all inConflicts from committed and read-only transactions. If
3622 * we're rolling back, clear them all.
3623 */
3624 conflict = (RWConflict)
3625 SHMQueueNext(&MySerializableXact->inConflicts,
3626 &MySerializableXact->inConflicts,
3627 offsetof(RWConflictData, inLink));
3628 while (conflict)
3629 {
3630 nextConflict = (RWConflict)
3631 SHMQueueNext(&MySerializableXact->inConflicts,
3632 &conflict->inLink,
3633 offsetof(RWConflictData, inLink));
3634
3635 if (!isCommit
3636 || SxactIsCommitted(conflict->sxactOut)
3637 || SxactIsReadOnly(conflict->sxactOut))
3638 ReleaseRWConflict(conflict);
3639
3640 conflict = nextConflict;
3641 }
3642
3643 if (!topLevelIsDeclaredReadOnly)
3644 {
3645 /*
3646 * Remove ourselves from the list of possible conflicts for concurrent
3647 * READ ONLY transactions, flagging them as unsafe if we have a
3648 * conflict out. If any are waiting DEFERRABLE transactions, wake them
3649 * up if they are known safe or known unsafe.
3650 */
3651 possibleUnsafeConflict = (RWConflict)
3652 SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3653 &MySerializableXact->possibleUnsafeConflicts,
3654 offsetof(RWConflictData, outLink));
3655 while (possibleUnsafeConflict)
3656 {
3657 nextConflict = (RWConflict)
3658 SHMQueueNext(&MySerializableXact->possibleUnsafeConflicts,
3659 &possibleUnsafeConflict->outLink,
3660 offsetof(RWConflictData, outLink));
3661
3662 roXact = possibleUnsafeConflict->sxactIn;
3663 Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
3664 Assert(SxactIsReadOnly(roXact));
3665
3666 /* Mark conflicted if necessary. */
3667 if (isCommit
3668 && MyXactDidWrite
3669 && SxactHasConflictOut(MySerializableXact)
3670 && (MySerializableXact->SeqNo.earliestOutConflictCommit
3671 <= roXact->SeqNo.lastCommitBeforeSnapshot))
3672 {
3673 /*
3674 * This releases possibleUnsafeConflict (as well as all other
3675 * possible conflicts for roXact)
3676 */
3677 FlagSxactUnsafe(roXact);
3678 }
3679 else
3680 {
3681 ReleaseRWConflict(possibleUnsafeConflict);
3682
3683 /*
3684 * If we were the last possible conflict, flag it safe. The
3685 * transaction can now safely release its predicate locks (but
3686 * that transaction's backend has to do that itself).
3687 */
3688 if (SHMQueueEmpty(&roXact->possibleUnsafeConflicts))
3689 roXact->flags |= SXACT_FLAG_RO_SAFE;
3690 }
3691
3692 /*
3693 * Wake up the process for a waiting DEFERRABLE transaction if we
3694 * now know it's either safe or conflicted.
3695 */
3696 if (SxactIsDeferrableWaiting(roXact) &&
3697 (SxactIsROUnsafe(roXact) || SxactIsROSafe(roXact)))
3698 ProcSendSignal(roXact->pid);
3699
3700 possibleUnsafeConflict = nextConflict;
3701 }
3702 }
3703
3704 /*
3705 * Check whether it's time to clean up old transactions. This can only be
3706 * done when the last serializable transaction with the oldest xmin among
3707 * serializable transactions completes. We then find the "new oldest"
3708 * xmin and purge any transactions which finished before this transaction
3709 * was launched.
3710 */
3711 needToClear = false;
3712 if (TransactionIdEquals(MySerializableXact->xmin, PredXact->SxactGlobalXmin))
3713 {
3714 Assert(PredXact->SxactGlobalXminCount > 0);
3715 if (--(PredXact->SxactGlobalXminCount) == 0)
3716 {
3717 SetNewSxactGlobalXmin();
3718 needToClear = true;
3719 }
3720 }
3721
3722 LWLockRelease(SerializableXactHashLock);
3723
3724 LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3725
3726 /* Add this to the list of transactions to check for later cleanup. */
3727 if (isCommit)
3728 SHMQueueInsertBefore(FinishedSerializableTransactions,
3729 &MySerializableXact->finishedLink);
3730
3731 /*
3732 * If we're releasing a RO_SAFE transaction in parallel mode, we'll only
3733 * partially release it. That's necessary because other backends may have
3734 * a reference to it. The leader will release the SERIALIZABLEXACT itself
3735 * at the end of the transaction after workers have stopped running.
3736 */
3737 if (!isCommit)
3738 ReleaseOneSerializableXact(MySerializableXact,
3739 isReadOnlySafe && IsInParallelMode(),
3740 false);
3741
3742 LWLockRelease(SerializableFinishedListLock);
3743
3744 if (needToClear)
3745 ClearOldPredicateLocks();
3746
3747 ReleasePredicateLocksLocal();
3748 }
3749
3750 static void
ReleasePredicateLocksLocal(void)3751 ReleasePredicateLocksLocal(void)
3752 {
3753 MySerializableXact = InvalidSerializableXact;
3754 MyXactDidWrite = false;
3755
3756 /* Delete per-transaction lock table */
3757 if (LocalPredicateLockHash != NULL)
3758 {
3759 hash_destroy(LocalPredicateLockHash);
3760 LocalPredicateLockHash = NULL;
3761 }
3762 }
3763
3764 /*
3765 * Clear old predicate locks, belonging to committed transactions that are no
3766 * longer interesting to any in-progress transaction.
3767 */
3768 static void
ClearOldPredicateLocks(void)3769 ClearOldPredicateLocks(void)
3770 {
3771 SERIALIZABLEXACT *finishedSxact;
3772 PREDICATELOCK *predlock;
3773
3774 /*
3775 * Loop through finished transactions. They are in commit order, so we can
3776 * stop as soon as we find one that's still interesting.
3777 */
3778 LWLockAcquire(SerializableFinishedListLock, LW_EXCLUSIVE);
3779 finishedSxact = (SERIALIZABLEXACT *)
3780 SHMQueueNext(FinishedSerializableTransactions,
3781 FinishedSerializableTransactions,
3782 offsetof(SERIALIZABLEXACT, finishedLink));
3783 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3784 while (finishedSxact)
3785 {
3786 SERIALIZABLEXACT *nextSxact;
3787
3788 nextSxact = (SERIALIZABLEXACT *)
3789 SHMQueueNext(FinishedSerializableTransactions,
3790 &(finishedSxact->finishedLink),
3791 offsetof(SERIALIZABLEXACT, finishedLink));
3792 if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
3793 || TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
3794 PredXact->SxactGlobalXmin))
3795 {
3796 /*
3797 * This transaction committed before any in-progress transaction
3798 * took its snapshot. It's no longer interesting.
3799 */
3800 LWLockRelease(SerializableXactHashLock);
3801 SHMQueueDelete(&(finishedSxact->finishedLink));
3802 ReleaseOneSerializableXact(finishedSxact, false, false);
3803 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3804 }
3805 else if (finishedSxact->commitSeqNo > PredXact->HavePartialClearedThrough
3806 && finishedSxact->commitSeqNo <= PredXact->CanPartialClearThrough)
3807 {
3808 /*
3809 * Any active transactions that took their snapshot before this
3810 * transaction committed are read-only, so we can clear part of
3811 * its state.
3812 */
3813 LWLockRelease(SerializableXactHashLock);
3814
3815 if (SxactIsReadOnly(finishedSxact))
3816 {
3817 /* A read-only transaction can be removed entirely */
3818 SHMQueueDelete(&(finishedSxact->finishedLink));
3819 ReleaseOneSerializableXact(finishedSxact, false, false);
3820 }
3821 else
3822 {
3823 /*
3824 * A read-write transaction can only be partially cleared. We
3825 * need to keep the SERIALIZABLEXACT but can release the
3826 * SIREAD locks and conflicts in.
3827 */
3828 ReleaseOneSerializableXact(finishedSxact, true, false);
3829 }
3830
3831 PredXact->HavePartialClearedThrough = finishedSxact->commitSeqNo;
3832 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3833 }
3834 else
3835 {
3836 /* Still interesting. */
3837 break;
3838 }
3839 finishedSxact = nextSxact;
3840 }
3841 LWLockRelease(SerializableXactHashLock);
3842
3843 /*
3844 * Loop through predicate locks on dummy transaction for summarized data.
3845 */
3846 LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3847 predlock = (PREDICATELOCK *)
3848 SHMQueueNext(&OldCommittedSxact->predicateLocks,
3849 &OldCommittedSxact->predicateLocks,
3850 offsetof(PREDICATELOCK, xactLink));
3851 while (predlock)
3852 {
3853 PREDICATELOCK *nextpredlock;
3854 bool canDoPartialCleanup;
3855
3856 nextpredlock = (PREDICATELOCK *)
3857 SHMQueueNext(&OldCommittedSxact->predicateLocks,
3858 &predlock->xactLink,
3859 offsetof(PREDICATELOCK, xactLink));
3860
3861 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
3862 Assert(predlock->commitSeqNo != 0);
3863 Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3864 canDoPartialCleanup = (predlock->commitSeqNo <= PredXact->CanPartialClearThrough);
3865 LWLockRelease(SerializableXactHashLock);
3866
3867 /*
3868 * If this lock originally belonged to an old enough transaction, we
3869 * can release it.
3870 */
3871 if (canDoPartialCleanup)
3872 {
3873 PREDICATELOCKTAG tag;
3874 PREDICATELOCKTARGET *target;
3875 PREDICATELOCKTARGETTAG targettag;
3876 uint32 targettaghash;
3877 LWLock *partitionLock;
3878
3879 tag = predlock->tag;
3880 target = tag.myTarget;
3881 targettag = target->tag;
3882 targettaghash = PredicateLockTargetTagHashCode(&targettag);
3883 partitionLock = PredicateLockHashPartitionLock(targettaghash);
3884
3885 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3886
3887 SHMQueueDelete(&(predlock->targetLink));
3888 SHMQueueDelete(&(predlock->xactLink));
3889
3890 hash_search_with_hash_value(PredicateLockHash, &tag,
3891 PredicateLockHashCodeFromTargetHashCode(&tag,
3892 targettaghash),
3893 HASH_REMOVE, NULL);
3894 RemoveTargetIfNoLongerUsed(target, targettaghash);
3895
3896 LWLockRelease(partitionLock);
3897 }
3898
3899 predlock = nextpredlock;
3900 }
3901
3902 LWLockRelease(SerializablePredicateLockListLock);
3903 LWLockRelease(SerializableFinishedListLock);
3904 }
3905
3906 /*
3907 * This is the normal way to delete anything from any of the predicate
3908 * locking hash tables. Given a transaction which we know can be deleted:
3909 * delete all predicate locks held by that transaction and any predicate
3910 * lock targets which are now unreferenced by a lock; delete all conflicts
3911 * for the transaction; delete all xid values for the transaction; then
3912 * delete the transaction.
3913 *
3914 * When the partial flag is set, we can release all predicate locks and
3915 * in-conflict information -- we've established that there are no longer
3916 * any overlapping read write transactions for which this transaction could
3917 * matter -- but keep the transaction entry itself and any outConflicts.
3918 *
3919 * When the summarize flag is set, we've run short of room for sxact data
3920 * and must summarize to the SLRU. Predicate locks are transferred to a
3921 * dummy "old" transaction, with duplicate locks on a single target
3922 * collapsing to a single lock with the "latest" commitSeqNo from among
3923 * the conflicting locks..
3924 */
3925 static void
ReleaseOneSerializableXact(SERIALIZABLEXACT * sxact,bool partial,bool summarize)3926 ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
3927 bool summarize)
3928 {
3929 PREDICATELOCK *predlock;
3930 SERIALIZABLEXIDTAG sxidtag;
3931 RWConflict conflict,
3932 nextConflict;
3933
3934 Assert(sxact != NULL);
3935 Assert(SxactIsRolledBack(sxact) || SxactIsCommitted(sxact));
3936 Assert(partial || !SxactIsOnFinishedList(sxact));
3937 Assert(LWLockHeldByMe(SerializableFinishedListLock));
3938
3939 /*
3940 * First release all the predicate locks held by this xact (or transfer
3941 * them to OldCommittedSxact if summarize is true)
3942 */
3943 LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
3944 if (IsInParallelMode())
3945 LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
3946 predlock = (PREDICATELOCK *)
3947 SHMQueueNext(&(sxact->predicateLocks),
3948 &(sxact->predicateLocks),
3949 offsetof(PREDICATELOCK, xactLink));
3950 while (predlock)
3951 {
3952 PREDICATELOCK *nextpredlock;
3953 PREDICATELOCKTAG tag;
3954 SHM_QUEUE *targetLink;
3955 PREDICATELOCKTARGET *target;
3956 PREDICATELOCKTARGETTAG targettag;
3957 uint32 targettaghash;
3958 LWLock *partitionLock;
3959
3960 nextpredlock = (PREDICATELOCK *)
3961 SHMQueueNext(&(sxact->predicateLocks),
3962 &(predlock->xactLink),
3963 offsetof(PREDICATELOCK, xactLink));
3964
3965 tag = predlock->tag;
3966 targetLink = &(predlock->targetLink);
3967 target = tag.myTarget;
3968 targettag = target->tag;
3969 targettaghash = PredicateLockTargetTagHashCode(&targettag);
3970 partitionLock = PredicateLockHashPartitionLock(targettaghash);
3971
3972 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3973
3974 SHMQueueDelete(targetLink);
3975
3976 hash_search_with_hash_value(PredicateLockHash, &tag,
3977 PredicateLockHashCodeFromTargetHashCode(&tag,
3978 targettaghash),
3979 HASH_REMOVE, NULL);
3980 if (summarize)
3981 {
3982 bool found;
3983
3984 /* Fold into dummy transaction list. */
3985 tag.myXact = OldCommittedSxact;
3986 predlock = hash_search_with_hash_value(PredicateLockHash, &tag,
3987 PredicateLockHashCodeFromTargetHashCode(&tag,
3988 targettaghash),
3989 HASH_ENTER_NULL, &found);
3990 if (!predlock)
3991 ereport(ERROR,
3992 (errcode(ERRCODE_OUT_OF_MEMORY),
3993 errmsg("out of shared memory"),
3994 errhint("You might need to increase max_pred_locks_per_transaction.")));
3995 if (found)
3996 {
3997 Assert(predlock->commitSeqNo != 0);
3998 Assert(predlock->commitSeqNo != InvalidSerCommitSeqNo);
3999 if (predlock->commitSeqNo < sxact->commitSeqNo)
4000 predlock->commitSeqNo = sxact->commitSeqNo;
4001 }
4002 else
4003 {
4004 SHMQueueInsertBefore(&(target->predicateLocks),
4005 &(predlock->targetLink));
4006 SHMQueueInsertBefore(&(OldCommittedSxact->predicateLocks),
4007 &(predlock->xactLink));
4008 predlock->commitSeqNo = sxact->commitSeqNo;
4009 }
4010 }
4011 else
4012 RemoveTargetIfNoLongerUsed(target, targettaghash);
4013
4014 LWLockRelease(partitionLock);
4015
4016 predlock = nextpredlock;
4017 }
4018
4019 /*
4020 * Rather than retail removal, just re-init the head after we've run
4021 * through the list.
4022 */
4023 SHMQueueInit(&sxact->predicateLocks);
4024
4025 if (IsInParallelMode())
4026 LWLockRelease(&sxact->predicateLockListLock);
4027 LWLockRelease(SerializablePredicateLockListLock);
4028
4029 sxidtag.xid = sxact->topXid;
4030 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4031
4032 /* Release all outConflicts (unless 'partial' is true) */
4033 if (!partial)
4034 {
4035 conflict = (RWConflict)
4036 SHMQueueNext(&sxact->outConflicts,
4037 &sxact->outConflicts,
4038 offsetof(RWConflictData, outLink));
4039 while (conflict)
4040 {
4041 nextConflict = (RWConflict)
4042 SHMQueueNext(&sxact->outConflicts,
4043 &conflict->outLink,
4044 offsetof(RWConflictData, outLink));
4045 if (summarize)
4046 conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
4047 ReleaseRWConflict(conflict);
4048 conflict = nextConflict;
4049 }
4050 }
4051
4052 /* Release all inConflicts. */
4053 conflict = (RWConflict)
4054 SHMQueueNext(&sxact->inConflicts,
4055 &sxact->inConflicts,
4056 offsetof(RWConflictData, inLink));
4057 while (conflict)
4058 {
4059 nextConflict = (RWConflict)
4060 SHMQueueNext(&sxact->inConflicts,
4061 &conflict->inLink,
4062 offsetof(RWConflictData, inLink));
4063 if (summarize)
4064 conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4065 ReleaseRWConflict(conflict);
4066 conflict = nextConflict;
4067 }
4068
4069 /* Finally, get rid of the xid and the record of the transaction itself. */
4070 if (!partial)
4071 {
4072 if (sxidtag.xid != InvalidTransactionId)
4073 hash_search(SerializableXidHash, &sxidtag, HASH_REMOVE, NULL);
4074 ReleasePredXact(sxact);
4075 }
4076
4077 LWLockRelease(SerializableXactHashLock);
4078 }
4079
4080 /*
4081 * Tests whether the given top level transaction is concurrent with
4082 * (overlaps) our current transaction.
4083 *
4084 * We need to identify the top level transaction for SSI, anyway, so pass
4085 * that to this function to save the overhead of checking the snapshot's
4086 * subxip array.
4087 */
4088 static bool
XidIsConcurrent(TransactionId xid)4089 XidIsConcurrent(TransactionId xid)
4090 {
4091 Snapshot snap;
4092 uint32 i;
4093
4094 Assert(TransactionIdIsValid(xid));
4095 Assert(!TransactionIdEquals(xid, GetTopTransactionIdIfAny()));
4096
4097 snap = GetTransactionSnapshot();
4098
4099 if (TransactionIdPrecedes(xid, snap->xmin))
4100 return false;
4101
4102 if (TransactionIdFollowsOrEquals(xid, snap->xmax))
4103 return true;
4104
4105 for (i = 0; i < snap->xcnt; i++)
4106 {
4107 if (xid == snap->xip[i])
4108 return true;
4109 }
4110
4111 return false;
4112 }
4113
4114 /*
4115 * CheckForSerializableConflictOut
4116 * We are reading a tuple which has been modified. If it is visible to
4117 * us but has been deleted, that indicates a rw-conflict out. If it's
4118 * not visible and was created by a concurrent (overlapping)
4119 * serializable transaction, that is also a rw-conflict out,
4120 *
4121 * We will determine the top level xid of the writing transaction with which
4122 * we may be in conflict, and check for overlap with our own transaction.
4123 * If the transactions overlap (i.e., they cannot see each other's writes),
4124 * then we have a conflict out.
4125 *
4126 * This function should be called just about anywhere in heapam.c where a
4127 * tuple has been read. The caller must hold at least a shared lock on the
4128 * buffer, because this function might set hint bits on the tuple. There is
4129 * currently no known reason to call this function from an index AM.
4130 */
4131 void
CheckForSerializableConflictOut(bool visible,Relation relation,HeapTuple tuple,Buffer buffer,Snapshot snapshot)4132 CheckForSerializableConflictOut(bool visible, Relation relation,
4133 HeapTuple tuple, Buffer buffer,
4134 Snapshot snapshot)
4135 {
4136 TransactionId xid;
4137 SERIALIZABLEXIDTAG sxidtag;
4138 SERIALIZABLEXID *sxid;
4139 SERIALIZABLEXACT *sxact;
4140 HTSV_Result htsvResult;
4141
4142 if (!SerializationNeededForRead(relation, snapshot))
4143 return;
4144
4145 /* Check if someone else has already decided that we need to die */
4146 if (SxactIsDoomed(MySerializableXact))
4147 {
4148 ereport(ERROR,
4149 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4150 errmsg("could not serialize access due to read/write dependencies among transactions"),
4151 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
4152 errhint("The transaction might succeed if retried.")));
4153 }
4154
4155 /*
4156 * Check to see whether the tuple has been written to by a concurrent
4157 * transaction, either to create it not visible to us, or to delete it
4158 * while it is visible to us. The "visible" bool indicates whether the
4159 * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
4160 * is going on with it.
4161 *
4162 * In the event of a concurrently inserted tuple that also happens to have
4163 * been concurrently updated (by a separate transaction), the xmin of the
4164 * tuple will be used -- not the updater's xid.
4165 */
4166 htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
4167 switch (htsvResult)
4168 {
4169 case HEAPTUPLE_LIVE:
4170 if (visible)
4171 return;
4172 xid = HeapTupleHeaderGetXmin(tuple->t_data);
4173 break;
4174 case HEAPTUPLE_RECENTLY_DEAD:
4175 case HEAPTUPLE_DELETE_IN_PROGRESS:
4176 if (visible)
4177 xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
4178 else
4179 xid = HeapTupleHeaderGetXmin(tuple->t_data);
4180
4181 if (TransactionIdPrecedes(xid, TransactionXmin))
4182 {
4183 /* This is like the HEAPTUPLE_DEAD case */
4184 Assert(!visible);
4185 return;
4186 }
4187 break;
4188 case HEAPTUPLE_INSERT_IN_PROGRESS:
4189 xid = HeapTupleHeaderGetXmin(tuple->t_data);
4190 break;
4191 case HEAPTUPLE_DEAD:
4192 Assert(!visible);
4193 return;
4194 default:
4195
4196 /*
4197 * The only way to get to this default clause is if a new value is
4198 * added to the enum type without adding it to this switch
4199 * statement. That's a bug, so elog.
4200 */
4201 elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
4202
4203 /*
4204 * In spite of having all enum values covered and calling elog on
4205 * this default, some compilers think this is a code path which
4206 * allows xid to be used below without initialization. Silence
4207 * that warning.
4208 */
4209 xid = InvalidTransactionId;
4210 }
4211 Assert(TransactionIdIsValid(xid));
4212 Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
4213
4214 /*
4215 * Find top level xid. Bail out if xid is too early to be a conflict, or
4216 * if it's our own xid.
4217 */
4218 if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
4219 return;
4220 xid = SubTransGetTopmostTransaction(xid);
4221 if (TransactionIdPrecedes(xid, TransactionXmin))
4222 return;
4223 if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
4224 return;
4225
4226 /*
4227 * Find sxact or summarized info for the top level xid.
4228 */
4229 sxidtag.xid = xid;
4230 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4231 sxid = (SERIALIZABLEXID *)
4232 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
4233 if (!sxid)
4234 {
4235 /*
4236 * Transaction not found in "normal" SSI structures. Check whether it
4237 * got pushed out to SLRU storage for "old committed" transactions.
4238 */
4239 SerCommitSeqNo conflictCommitSeqNo;
4240
4241 conflictCommitSeqNo = OldSerXidGetMinConflictCommitSeqNo(xid);
4242 if (conflictCommitSeqNo != 0)
4243 {
4244 if (conflictCommitSeqNo != InvalidSerCommitSeqNo
4245 && (!SxactIsReadOnly(MySerializableXact)
4246 || conflictCommitSeqNo
4247 <= MySerializableXact->SeqNo.lastCommitBeforeSnapshot))
4248 ereport(ERROR,
4249 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4250 errmsg("could not serialize access due to read/write dependencies among transactions"),
4251 errdetail_internal("Reason code: Canceled on conflict out to old pivot %u.", xid),
4252 errhint("The transaction might succeed if retried.")));
4253
4254 if (SxactHasSummaryConflictIn(MySerializableXact)
4255 || !SHMQueueEmpty(&MySerializableXact->inConflicts))
4256 ereport(ERROR,
4257 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4258 errmsg("could not serialize access due to read/write dependencies among transactions"),
4259 errdetail_internal("Reason code: Canceled on identification as a pivot, with conflict out to old committed transaction %u.", xid),
4260 errhint("The transaction might succeed if retried.")));
4261
4262 MySerializableXact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4263 }
4264
4265 /* It's not serializable or otherwise not important. */
4266 LWLockRelease(SerializableXactHashLock);
4267 return;
4268 }
4269 sxact = sxid->myXact;
4270 Assert(TransactionIdEquals(sxact->topXid, xid));
4271 if (sxact == MySerializableXact || SxactIsDoomed(sxact))
4272 {
4273 /* Can't conflict with ourself or a transaction that will roll back. */
4274 LWLockRelease(SerializableXactHashLock);
4275 return;
4276 }
4277
4278 /*
4279 * We have a conflict out to a transaction which has a conflict out to a
4280 * summarized transaction. That summarized transaction must have
4281 * committed first, and we can't tell when it committed in relation to our
4282 * snapshot acquisition, so something needs to be canceled.
4283 */
4284 if (SxactHasSummaryConflictOut(sxact))
4285 {
4286 if (!SxactIsPrepared(sxact))
4287 {
4288 sxact->flags |= SXACT_FLAG_DOOMED;
4289 LWLockRelease(SerializableXactHashLock);
4290 return;
4291 }
4292 else
4293 {
4294 LWLockRelease(SerializableXactHashLock);
4295 ereport(ERROR,
4296 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4297 errmsg("could not serialize access due to read/write dependencies among transactions"),
4298 errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
4299 errhint("The transaction might succeed if retried.")));
4300 }
4301 }
4302
4303 /*
4304 * If this is a read-only transaction and the writing transaction has
4305 * committed, and it doesn't have a rw-conflict to a transaction which
4306 * committed before it, no conflict.
4307 */
4308 if (SxactIsReadOnly(MySerializableXact)
4309 && SxactIsCommitted(sxact)
4310 && !SxactHasSummaryConflictOut(sxact)
4311 && (!SxactHasConflictOut(sxact)
4312 || MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
4313 {
4314 /* Read-only transaction will appear to run first. No conflict. */
4315 LWLockRelease(SerializableXactHashLock);
4316 return;
4317 }
4318
4319 if (!XidIsConcurrent(xid))
4320 {
4321 /* This write was already in our snapshot; no conflict. */
4322 LWLockRelease(SerializableXactHashLock);
4323 return;
4324 }
4325
4326 if (RWConflictExists(MySerializableXact, sxact))
4327 {
4328 /* We don't want duplicate conflict records in the list. */
4329 LWLockRelease(SerializableXactHashLock);
4330 return;
4331 }
4332
4333 /*
4334 * Flag the conflict. But first, if this conflict creates a dangerous
4335 * structure, ereport an error.
4336 */
4337 FlagRWConflict(MySerializableXact, sxact);
4338 LWLockRelease(SerializableXactHashLock);
4339 }
4340
4341 /*
4342 * Check a particular target for rw-dependency conflict in. A subroutine of
4343 * CheckForSerializableConflictIn().
4344 */
4345 static void
CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG * targettag)4346 CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
4347 {
4348 uint32 targettaghash;
4349 LWLock *partitionLock;
4350 PREDICATELOCKTARGET *target;
4351 PREDICATELOCK *predlock;
4352 PREDICATELOCK *mypredlock = NULL;
4353 PREDICATELOCKTAG mypredlocktag;
4354
4355 Assert(MySerializableXact != InvalidSerializableXact);
4356
4357 /*
4358 * The same hash and LW lock apply to the lock target and the lock itself.
4359 */
4360 targettaghash = PredicateLockTargetTagHashCode(targettag);
4361 partitionLock = PredicateLockHashPartitionLock(targettaghash);
4362 LWLockAcquire(partitionLock, LW_SHARED);
4363 target = (PREDICATELOCKTARGET *)
4364 hash_search_with_hash_value(PredicateLockTargetHash,
4365 targettag, targettaghash,
4366 HASH_FIND, NULL);
4367 if (!target)
4368 {
4369 /* Nothing has this target locked; we're done here. */
4370 LWLockRelease(partitionLock);
4371 return;
4372 }
4373
4374 /*
4375 * Each lock for an overlapping transaction represents a conflict: a
4376 * rw-dependency in to this transaction.
4377 */
4378 predlock = (PREDICATELOCK *)
4379 SHMQueueNext(&(target->predicateLocks),
4380 &(target->predicateLocks),
4381 offsetof(PREDICATELOCK, targetLink));
4382 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4383 while (predlock)
4384 {
4385 SHM_QUEUE *predlocktargetlink;
4386 PREDICATELOCK *nextpredlock;
4387 SERIALIZABLEXACT *sxact;
4388
4389 predlocktargetlink = &(predlock->targetLink);
4390 nextpredlock = (PREDICATELOCK *)
4391 SHMQueueNext(&(target->predicateLocks),
4392 predlocktargetlink,
4393 offsetof(PREDICATELOCK, targetLink));
4394
4395 sxact = predlock->tag.myXact;
4396 if (sxact == MySerializableXact)
4397 {
4398 /*
4399 * If we're getting a write lock on a tuple, we don't need a
4400 * predicate (SIREAD) lock on the same tuple. We can safely remove
4401 * our SIREAD lock, but we'll defer doing so until after the loop
4402 * because that requires upgrading to an exclusive partition lock.
4403 *
4404 * We can't use this optimization within a subtransaction because
4405 * the subtransaction could roll back, and we would be left
4406 * without any lock at the top level.
4407 */
4408 if (!IsSubTransaction()
4409 && GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
4410 {
4411 mypredlock = predlock;
4412 mypredlocktag = predlock->tag;
4413 }
4414 }
4415 else if (!SxactIsDoomed(sxact)
4416 && (!SxactIsCommitted(sxact)
4417 || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4418 sxact->finishedBefore))
4419 && !RWConflictExists(sxact, MySerializableXact))
4420 {
4421 LWLockRelease(SerializableXactHashLock);
4422 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4423
4424 /*
4425 * Re-check after getting exclusive lock because the other
4426 * transaction may have flagged a conflict.
4427 */
4428 if (!SxactIsDoomed(sxact)
4429 && (!SxactIsCommitted(sxact)
4430 || TransactionIdPrecedes(GetTransactionSnapshot()->xmin,
4431 sxact->finishedBefore))
4432 && !RWConflictExists(sxact, MySerializableXact))
4433 {
4434 FlagRWConflict(sxact, MySerializableXact);
4435 }
4436
4437 LWLockRelease(SerializableXactHashLock);
4438 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
4439 }
4440
4441 predlock = nextpredlock;
4442 }
4443 LWLockRelease(SerializableXactHashLock);
4444 LWLockRelease(partitionLock);
4445
4446 /*
4447 * If we found one of our own SIREAD locks to remove, remove it now.
4448 *
4449 * At this point our transaction already has an ExclusiveRowLock on the
4450 * relation, so we are OK to drop the predicate lock on the tuple, if
4451 * found, without fearing that another write against the tuple will occur
4452 * before the MVCC information makes it to the buffer.
4453 */
4454 if (mypredlock != NULL)
4455 {
4456 uint32 predlockhashcode;
4457 PREDICATELOCK *rmpredlock;
4458
4459 LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
4460 if (IsInParallelMode())
4461 LWLockAcquire(&MySerializableXact->predicateLockListLock, LW_EXCLUSIVE);
4462 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4463 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4464
4465 /*
4466 * Remove the predicate lock from shared memory, if it wasn't removed
4467 * while the locks were released. One way that could happen is from
4468 * autovacuum cleaning up an index.
4469 */
4470 predlockhashcode = PredicateLockHashCodeFromTargetHashCode
4471 (&mypredlocktag, targettaghash);
4472 rmpredlock = (PREDICATELOCK *)
4473 hash_search_with_hash_value(PredicateLockHash,
4474 &mypredlocktag,
4475 predlockhashcode,
4476 HASH_FIND, NULL);
4477 if (rmpredlock != NULL)
4478 {
4479 Assert(rmpredlock == mypredlock);
4480
4481 SHMQueueDelete(&(mypredlock->targetLink));
4482 SHMQueueDelete(&(mypredlock->xactLink));
4483
4484 rmpredlock = (PREDICATELOCK *)
4485 hash_search_with_hash_value(PredicateLockHash,
4486 &mypredlocktag,
4487 predlockhashcode,
4488 HASH_REMOVE, NULL);
4489 Assert(rmpredlock == mypredlock);
4490
4491 RemoveTargetIfNoLongerUsed(target, targettaghash);
4492 }
4493
4494 LWLockRelease(SerializableXactHashLock);
4495 LWLockRelease(partitionLock);
4496 if (IsInParallelMode())
4497 LWLockRelease(&MySerializableXact->predicateLockListLock);
4498 LWLockRelease(SerializablePredicateLockListLock);
4499
4500 if (rmpredlock != NULL)
4501 {
4502 /*
4503 * Remove entry in local lock table if it exists. It's OK if it
4504 * doesn't exist; that means the lock was transferred to a new
4505 * target by a different backend.
4506 */
4507 hash_search_with_hash_value(LocalPredicateLockHash,
4508 targettag, targettaghash,
4509 HASH_REMOVE, NULL);
4510
4511 DecrementParentLocks(targettag);
4512 }
4513 }
4514 }
4515
4516 /*
4517 * CheckForSerializableConflictIn
4518 * We are writing the given tuple. If that indicates a rw-conflict
4519 * in from another serializable transaction, take appropriate action.
4520 *
4521 * Skip checking for any granularity for which a parameter is missing.
4522 *
4523 * A tuple update or delete is in conflict if we have a predicate lock
4524 * against the relation or page in which the tuple exists, or against the
4525 * tuple itself.
4526 */
4527 void
CheckForSerializableConflictIn(Relation relation,HeapTuple tuple,Buffer buffer)4528 CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
4529 Buffer buffer)
4530 {
4531 PREDICATELOCKTARGETTAG targettag;
4532
4533 if (!SerializationNeededForWrite(relation))
4534 return;
4535
4536 /* Check if someone else has already decided that we need to die */
4537 if (SxactIsDoomed(MySerializableXact))
4538 ereport(ERROR,
4539 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4540 errmsg("could not serialize access due to read/write dependencies among transactions"),
4541 errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict in checking."),
4542 errhint("The transaction might succeed if retried.")));
4543
4544 /*
4545 * We're doing a write which might cause rw-conflicts now or later.
4546 * Memorize that fact.
4547 */
4548 MyXactDidWrite = true;
4549
4550 /*
4551 * It is important that we check for locks from the finest granularity to
4552 * the coarsest granularity, so that granularity promotion doesn't cause
4553 * us to miss a lock. The new (coarser) lock will be acquired before the
4554 * old (finer) locks are released.
4555 *
4556 * It is not possible to take and hold a lock across the checks for all
4557 * granularities because each target could be in a separate partition.
4558 */
4559 if (tuple != NULL)
4560 {
4561 SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
4562 relation->rd_node.dbNode,
4563 relation->rd_id,
4564 ItemPointerGetBlockNumber(&(tuple->t_self)),
4565 ItemPointerGetOffsetNumber(&(tuple->t_self)));
4566 CheckTargetForConflictsIn(&targettag);
4567 }
4568
4569 if (BufferIsValid(buffer))
4570 {
4571 SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
4572 relation->rd_node.dbNode,
4573 relation->rd_id,
4574 BufferGetBlockNumber(buffer));
4575 CheckTargetForConflictsIn(&targettag);
4576 }
4577
4578 SET_PREDICATELOCKTARGETTAG_RELATION(targettag,
4579 relation->rd_node.dbNode,
4580 relation->rd_id);
4581 CheckTargetForConflictsIn(&targettag);
4582 }
4583
4584 /*
4585 * CheckTableForSerializableConflictIn
4586 * The entire table is going through a DDL-style logical mass delete
4587 * like TRUNCATE or DROP TABLE. If that causes a rw-conflict in from
4588 * another serializable transaction, take appropriate action.
4589 *
4590 * While these operations do not operate entirely within the bounds of
4591 * snapshot isolation, they can occur inside a serializable transaction, and
4592 * will logically occur after any reads which saw rows which were destroyed
4593 * by these operations, so we do what we can to serialize properly under
4594 * SSI.
4595 *
4596 * The relation passed in must be a heap relation. Any predicate lock of any
4597 * granularity on the heap will cause a rw-conflict in to this transaction.
4598 * Predicate locks on indexes do not matter because they only exist to guard
4599 * against conflicting inserts into the index, and this is a mass *delete*.
4600 * When a table is truncated or dropped, the index will also be truncated
4601 * or dropped, and we'll deal with locks on the index when that happens.
4602 *
4603 * Dropping or truncating a table also needs to drop any existing predicate
4604 * locks on heap tuples or pages, because they're about to go away. This
4605 * should be done before altering the predicate locks because the transaction
4606 * could be rolled back because of a conflict, in which case the lock changes
4607 * are not needed. (At the moment, we don't actually bother to drop the
4608 * existing locks on a dropped or truncated table at the moment. That might
4609 * lead to some false positives, but it doesn't seem worth the trouble.)
4610 */
4611 void
CheckTableForSerializableConflictIn(Relation relation)4612 CheckTableForSerializableConflictIn(Relation relation)
4613 {
4614 HASH_SEQ_STATUS seqstat;
4615 PREDICATELOCKTARGET *target;
4616 Oid dbId;
4617 Oid heapId;
4618 int i;
4619
4620 /*
4621 * Bail out quickly if there are no serializable transactions running.
4622 * It's safe to check this without taking locks because the caller is
4623 * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
4624 * would matter here can be acquired while that is held.
4625 */
4626 if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
4627 return;
4628
4629 if (!SerializationNeededForWrite(relation))
4630 return;
4631
4632 /*
4633 * We're doing a write which might cause rw-conflicts now or later.
4634 * Memorize that fact.
4635 */
4636 MyXactDidWrite = true;
4637
4638 Assert(relation->rd_index == NULL); /* not an index relation */
4639
4640 dbId = relation->rd_node.dbNode;
4641 heapId = relation->rd_id;
4642
4643 LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
4644 for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
4645 LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
4646 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4647
4648 /* Scan through target list */
4649 hash_seq_init(&seqstat, PredicateLockTargetHash);
4650
4651 while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
4652 {
4653 PREDICATELOCK *predlock;
4654
4655 /*
4656 * Check whether this is a target which needs attention.
4657 */
4658 if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
4659 continue; /* wrong relation id */
4660 if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
4661 continue; /* wrong database id */
4662
4663 /*
4664 * Loop through locks for this target and flag conflicts.
4665 */
4666 predlock = (PREDICATELOCK *)
4667 SHMQueueNext(&(target->predicateLocks),
4668 &(target->predicateLocks),
4669 offsetof(PREDICATELOCK, targetLink));
4670 while (predlock)
4671 {
4672 PREDICATELOCK *nextpredlock;
4673
4674 nextpredlock = (PREDICATELOCK *)
4675 SHMQueueNext(&(target->predicateLocks),
4676 &(predlock->targetLink),
4677 offsetof(PREDICATELOCK, targetLink));
4678
4679 if (predlock->tag.myXact != MySerializableXact
4680 && !RWConflictExists(predlock->tag.myXact, MySerializableXact))
4681 {
4682 FlagRWConflict(predlock->tag.myXact, MySerializableXact);
4683 }
4684
4685 predlock = nextpredlock;
4686 }
4687 }
4688
4689 /* Release locks in reverse order */
4690 LWLockRelease(SerializableXactHashLock);
4691 for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
4692 LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
4693 LWLockRelease(SerializablePredicateLockListLock);
4694 }
4695
4696
4697 /*
4698 * Flag a rw-dependency between two serializable transactions.
4699 *
4700 * The caller is responsible for ensuring that we have a LW lock on
4701 * the transaction hash table.
4702 */
4703 static void
FlagRWConflict(SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)4704 FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
4705 {
4706 Assert(reader != writer);
4707
4708 /* First, see if this conflict causes failure. */
4709 OnConflict_CheckForSerializationFailure(reader, writer);
4710
4711 /* Actually do the conflict flagging. */
4712 if (reader == OldCommittedSxact)
4713 writer->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
4714 else if (writer == OldCommittedSxact)
4715 reader->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
4716 else
4717 SetRWConflict(reader, writer);
4718 }
4719
4720 /*----------------------------------------------------------------------------
4721 * We are about to add a RW-edge to the dependency graph - check that we don't
4722 * introduce a dangerous structure by doing so, and abort one of the
4723 * transactions if so.
4724 *
4725 * A serialization failure can only occur if there is a dangerous structure
4726 * in the dependency graph:
4727 *
4728 * Tin ------> Tpivot ------> Tout
4729 * rw rw
4730 *
4731 * Furthermore, Tout must commit first.
4732 *
4733 * One more optimization is that if Tin is declared READ ONLY (or commits
4734 * without writing), we can only have a problem if Tout committed before Tin
4735 * acquired its snapshot.
4736 *----------------------------------------------------------------------------
4737 */
4738 static void
OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT * reader,SERIALIZABLEXACT * writer)4739 OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
4740 SERIALIZABLEXACT *writer)
4741 {
4742 bool failure;
4743 RWConflict conflict;
4744
4745 Assert(LWLockHeldByMe(SerializableXactHashLock));
4746
4747 failure = false;
4748
4749 /*------------------------------------------------------------------------
4750 * Check for already-committed writer with rw-conflict out flagged
4751 * (conflict-flag on W means that T2 committed before W):
4752 *
4753 * R ------> W ------> T2
4754 * rw rw
4755 *
4756 * That is a dangerous structure, so we must abort. (Since the writer
4757 * has already committed, we must be the reader)
4758 *------------------------------------------------------------------------
4759 */
4760 if (SxactIsCommitted(writer)
4761 && (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
4762 failure = true;
4763
4764 /*------------------------------------------------------------------------
4765 * Check whether the writer has become a pivot with an out-conflict
4766 * committed transaction (T2), and T2 committed first:
4767 *
4768 * R ------> W ------> T2
4769 * rw rw
4770 *
4771 * Because T2 must've committed first, there is no anomaly if:
4772 * - the reader committed before T2
4773 * - the writer committed before T2
4774 * - the reader is a READ ONLY transaction and the reader was concurrent
4775 * with T2 (= reader acquired its snapshot before T2 committed)
4776 *
4777 * We also handle the case that T2 is prepared but not yet committed
4778 * here. In that case T2 has already checked for conflicts, so if it
4779 * commits first, making the above conflict real, it's too late for it
4780 * to abort.
4781 *------------------------------------------------------------------------
4782 */
4783 if (!failure)
4784 {
4785 if (SxactHasSummaryConflictOut(writer))
4786 {
4787 failure = true;
4788 conflict = NULL;
4789 }
4790 else
4791 conflict = (RWConflict)
4792 SHMQueueNext(&writer->outConflicts,
4793 &writer->outConflicts,
4794 offsetof(RWConflictData, outLink));
4795 while (conflict)
4796 {
4797 SERIALIZABLEXACT *t2 = conflict->sxactIn;
4798
4799 if (SxactIsPrepared(t2)
4800 && (!SxactIsCommitted(reader)
4801 || t2->prepareSeqNo <= reader->commitSeqNo)
4802 && (!SxactIsCommitted(writer)
4803 || t2->prepareSeqNo <= writer->commitSeqNo)
4804 && (!SxactIsReadOnly(reader)
4805 || t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
4806 {
4807 failure = true;
4808 break;
4809 }
4810 conflict = (RWConflict)
4811 SHMQueueNext(&writer->outConflicts,
4812 &conflict->outLink,
4813 offsetof(RWConflictData, outLink));
4814 }
4815 }
4816
4817 /*------------------------------------------------------------------------
4818 * Check whether the reader has become a pivot with a writer
4819 * that's committed (or prepared):
4820 *
4821 * T0 ------> R ------> W
4822 * rw rw
4823 *
4824 * Because W must've committed first for an anomaly to occur, there is no
4825 * anomaly if:
4826 * - T0 committed before the writer
4827 * - T0 is READ ONLY, and overlaps the writer
4828 *------------------------------------------------------------------------
4829 */
4830 if (!failure && SxactIsPrepared(writer) && !SxactIsReadOnly(reader))
4831 {
4832 if (SxactHasSummaryConflictIn(reader))
4833 {
4834 failure = true;
4835 conflict = NULL;
4836 }
4837 else
4838 conflict = (RWConflict)
4839 SHMQueueNext(&reader->inConflicts,
4840 &reader->inConflicts,
4841 offsetof(RWConflictData, inLink));
4842 while (conflict)
4843 {
4844 SERIALIZABLEXACT *t0 = conflict->sxactOut;
4845
4846 if (!SxactIsDoomed(t0)
4847 && (!SxactIsCommitted(t0)
4848 || t0->commitSeqNo >= writer->prepareSeqNo)
4849 && (!SxactIsReadOnly(t0)
4850 || t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
4851 {
4852 failure = true;
4853 break;
4854 }
4855 conflict = (RWConflict)
4856 SHMQueueNext(&reader->inConflicts,
4857 &conflict->inLink,
4858 offsetof(RWConflictData, inLink));
4859 }
4860 }
4861
4862 if (failure)
4863 {
4864 /*
4865 * We have to kill a transaction to avoid a possible anomaly from
4866 * occurring. If the writer is us, we can just ereport() to cause a
4867 * transaction abort. Otherwise we flag the writer for termination,
4868 * causing it to abort when it tries to commit. However, if the writer
4869 * is a prepared transaction, already prepared, we can't abort it
4870 * anymore, so we have to kill the reader instead.
4871 */
4872 if (MySerializableXact == writer)
4873 {
4874 LWLockRelease(SerializableXactHashLock);
4875 ereport(ERROR,
4876 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4877 errmsg("could not serialize access due to read/write dependencies among transactions"),
4878 errdetail_internal("Reason code: Canceled on identification as a pivot, during write."),
4879 errhint("The transaction might succeed if retried.")));
4880 }
4881 else if (SxactIsPrepared(writer))
4882 {
4883 LWLockRelease(SerializableXactHashLock);
4884
4885 /* if we're not the writer, we have to be the reader */
4886 Assert(MySerializableXact == reader);
4887 ereport(ERROR,
4888 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4889 errmsg("could not serialize access due to read/write dependencies among transactions"),
4890 errdetail_internal("Reason code: Canceled on conflict out to pivot %u, during read.", writer->topXid),
4891 errhint("The transaction might succeed if retried.")));
4892 }
4893 writer->flags |= SXACT_FLAG_DOOMED;
4894 }
4895 }
4896
4897 /*
4898 * PreCommit_CheckForSerializableConflicts
4899 * Check for dangerous structures in a serializable transaction
4900 * at commit.
4901 *
4902 * We're checking for a dangerous structure as each conflict is recorded.
4903 * The only way we could have a problem at commit is if this is the "out"
4904 * side of a pivot, and neither the "in" side nor the pivot has yet
4905 * committed.
4906 *
4907 * If a dangerous structure is found, the pivot (the near conflict) is
4908 * marked for death, because rolling back another transaction might mean
4909 * that we flail without ever making progress. This transaction is
4910 * committing writes, so letting it commit ensures progress. If we
4911 * canceled the far conflict, it might immediately fail again on retry.
4912 */
4913 void
PreCommit_CheckForSerializationFailure(void)4914 PreCommit_CheckForSerializationFailure(void)
4915 {
4916 RWConflict nearConflict;
4917
4918 if (MySerializableXact == InvalidSerializableXact)
4919 return;
4920
4921 Assert(IsolationIsSerializable());
4922
4923 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
4924
4925 /* Check if someone else has already decided that we need to die */
4926 if (SxactIsDoomed(MySerializableXact))
4927 {
4928 Assert(!SxactIsPartiallyReleased(MySerializableXact));
4929 LWLockRelease(SerializableXactHashLock);
4930 ereport(ERROR,
4931 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4932 errmsg("could not serialize access due to read/write dependencies among transactions"),
4933 errdetail_internal("Reason code: Canceled on identification as a pivot, during commit attempt."),
4934 errhint("The transaction might succeed if retried.")));
4935 }
4936
4937 nearConflict = (RWConflict)
4938 SHMQueueNext(&MySerializableXact->inConflicts,
4939 &MySerializableXact->inConflicts,
4940 offsetof(RWConflictData, inLink));
4941 while (nearConflict)
4942 {
4943 if (!SxactIsCommitted(nearConflict->sxactOut)
4944 && !SxactIsDoomed(nearConflict->sxactOut))
4945 {
4946 RWConflict farConflict;
4947
4948 farConflict = (RWConflict)
4949 SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4950 &nearConflict->sxactOut->inConflicts,
4951 offsetof(RWConflictData, inLink));
4952 while (farConflict)
4953 {
4954 if (farConflict->sxactOut == MySerializableXact
4955 || (!SxactIsCommitted(farConflict->sxactOut)
4956 && !SxactIsReadOnly(farConflict->sxactOut)
4957 && !SxactIsDoomed(farConflict->sxactOut)))
4958 {
4959 /*
4960 * Normally, we kill the pivot transaction to make sure we
4961 * make progress if the failing transaction is retried.
4962 * However, we can't kill it if it's already prepared, so
4963 * in that case we commit suicide instead.
4964 */
4965 if (SxactIsPrepared(nearConflict->sxactOut))
4966 {
4967 LWLockRelease(SerializableXactHashLock);
4968 ereport(ERROR,
4969 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
4970 errmsg("could not serialize access due to read/write dependencies among transactions"),
4971 errdetail_internal("Reason code: Canceled on commit attempt with conflict in from prepared pivot."),
4972 errhint("The transaction might succeed if retried.")));
4973 }
4974 nearConflict->sxactOut->flags |= SXACT_FLAG_DOOMED;
4975 break;
4976 }
4977 farConflict = (RWConflict)
4978 SHMQueueNext(&nearConflict->sxactOut->inConflicts,
4979 &farConflict->inLink,
4980 offsetof(RWConflictData, inLink));
4981 }
4982 }
4983
4984 nearConflict = (RWConflict)
4985 SHMQueueNext(&MySerializableXact->inConflicts,
4986 &nearConflict->inLink,
4987 offsetof(RWConflictData, inLink));
4988 }
4989
4990 MySerializableXact->prepareSeqNo = ++(PredXact->LastSxactCommitSeqNo);
4991 MySerializableXact->flags |= SXACT_FLAG_PREPARED;
4992
4993 LWLockRelease(SerializableXactHashLock);
4994 }
4995
4996 /*------------------------------------------------------------------------*/
4997
4998 /*
4999 * Two-phase commit support
5000 */
5001
5002 /*
5003 * AtPrepare_Locks
5004 * Do the preparatory work for a PREPARE: make 2PC state file
5005 * records for all predicate locks currently held.
5006 */
5007 void
AtPrepare_PredicateLocks(void)5008 AtPrepare_PredicateLocks(void)
5009 {
5010 PREDICATELOCK *predlock;
5011 SERIALIZABLEXACT *sxact;
5012 TwoPhasePredicateRecord record;
5013 TwoPhasePredicateXactRecord *xactRecord;
5014 TwoPhasePredicateLockRecord *lockRecord;
5015
5016 sxact = MySerializableXact;
5017 xactRecord = &(record.data.xactRecord);
5018 lockRecord = &(record.data.lockRecord);
5019
5020 if (MySerializableXact == InvalidSerializableXact)
5021 return;
5022
5023 /* Generate an xact record for our SERIALIZABLEXACT */
5024 record.type = TWOPHASEPREDICATERECORD_XACT;
5025 xactRecord->xmin = MySerializableXact->xmin;
5026 xactRecord->flags = MySerializableXact->flags;
5027
5028 /*
5029 * Note that we don't include the list of conflicts in our out in the
5030 * statefile, because new conflicts can be added even after the
5031 * transaction prepares. We'll just make a conservative assumption during
5032 * recovery instead.
5033 */
5034
5035 RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
5036 &record, sizeof(record));
5037
5038 /*
5039 * Generate a lock record for each lock.
5040 *
5041 * To do this, we need to walk the predicate lock list in our sxact rather
5042 * than using the local predicate lock table because the latter is not
5043 * guaranteed to be accurate.
5044 */
5045 LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
5046
5047 /*
5048 * No need to take sxact->predicateLockListLock in parallel mode because
5049 * there cannot be any parallel workers running while we are preparing a
5050 * transaction.
5051 */
5052 Assert(!IsParallelWorker() && !ParallelContextActive());
5053
5054 predlock = (PREDICATELOCK *)
5055 SHMQueueNext(&(sxact->predicateLocks),
5056 &(sxact->predicateLocks),
5057 offsetof(PREDICATELOCK, xactLink));
5058
5059 while (predlock != NULL)
5060 {
5061 record.type = TWOPHASEPREDICATERECORD_LOCK;
5062 lockRecord->target = predlock->tag.myTarget->tag;
5063
5064 RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
5065 &record, sizeof(record));
5066
5067 predlock = (PREDICATELOCK *)
5068 SHMQueueNext(&(sxact->predicateLocks),
5069 &(predlock->xactLink),
5070 offsetof(PREDICATELOCK, xactLink));
5071 }
5072
5073 LWLockRelease(SerializablePredicateLockListLock);
5074 }
5075
5076 /*
5077 * PostPrepare_Locks
5078 * Clean up after successful PREPARE. Unlike the non-predicate
5079 * lock manager, we do not need to transfer locks to a dummy
5080 * PGPROC because our SERIALIZABLEXACT will stay around
5081 * anyway. We only need to clean up our local state.
5082 */
5083 void
PostPrepare_PredicateLocks(TransactionId xid)5084 PostPrepare_PredicateLocks(TransactionId xid)
5085 {
5086 if (MySerializableXact == InvalidSerializableXact)
5087 return;
5088
5089 Assert(SxactIsPrepared(MySerializableXact));
5090
5091 MySerializableXact->pid = 0;
5092
5093 hash_destroy(LocalPredicateLockHash);
5094 LocalPredicateLockHash = NULL;
5095
5096 MySerializableXact = InvalidSerializableXact;
5097 MyXactDidWrite = false;
5098 }
5099
5100 /*
5101 * PredicateLockTwoPhaseFinish
5102 * Release a prepared transaction's predicate locks once it
5103 * commits or aborts.
5104 */
5105 void
PredicateLockTwoPhaseFinish(TransactionId xid,bool isCommit)5106 PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
5107 {
5108 SERIALIZABLEXID *sxid;
5109 SERIALIZABLEXIDTAG sxidtag;
5110
5111 sxidtag.xid = xid;
5112
5113 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
5114 sxid = (SERIALIZABLEXID *)
5115 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
5116 LWLockRelease(SerializableXactHashLock);
5117
5118 /* xid will not be found if it wasn't a serializable transaction */
5119 if (sxid == NULL)
5120 return;
5121
5122 /* Release its locks */
5123 MySerializableXact = sxid->myXact;
5124 MyXactDidWrite = true; /* conservatively assume that we wrote
5125 * something */
5126 ReleasePredicateLocks(isCommit, false);
5127 }
5128
5129 /*
5130 * Re-acquire a predicate lock belonging to a transaction that was prepared.
5131 */
5132 void
predicatelock_twophase_recover(TransactionId xid,uint16 info,void * recdata,uint32 len)5133 predicatelock_twophase_recover(TransactionId xid, uint16 info,
5134 void *recdata, uint32 len)
5135 {
5136 TwoPhasePredicateRecord *record;
5137
5138 Assert(len == sizeof(TwoPhasePredicateRecord));
5139
5140 record = (TwoPhasePredicateRecord *) recdata;
5141
5142 Assert((record->type == TWOPHASEPREDICATERECORD_XACT) ||
5143 (record->type == TWOPHASEPREDICATERECORD_LOCK));
5144
5145 if (record->type == TWOPHASEPREDICATERECORD_XACT)
5146 {
5147 /* Per-transaction record. Set up a SERIALIZABLEXACT. */
5148 TwoPhasePredicateXactRecord *xactRecord;
5149 SERIALIZABLEXACT *sxact;
5150 SERIALIZABLEXID *sxid;
5151 SERIALIZABLEXIDTAG sxidtag;
5152 bool found;
5153
5154 xactRecord = (TwoPhasePredicateXactRecord *) &record->data.xactRecord;
5155
5156 LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
5157 sxact = CreatePredXact();
5158 if (!sxact)
5159 ereport(ERROR,
5160 (errcode(ERRCODE_OUT_OF_MEMORY),
5161 errmsg("out of shared memory")));
5162
5163 /* vxid for a prepared xact is InvalidBackendId/xid; no pid */
5164 sxact->vxid.backendId = InvalidBackendId;
5165 sxact->vxid.localTransactionId = (LocalTransactionId) xid;
5166 sxact->pid = 0;
5167
5168 /* a prepared xact hasn't committed yet */
5169 sxact->prepareSeqNo = RecoverySerCommitSeqNo;
5170 sxact->commitSeqNo = InvalidSerCommitSeqNo;
5171 sxact->finishedBefore = InvalidTransactionId;
5172
5173 sxact->SeqNo.lastCommitBeforeSnapshot = RecoverySerCommitSeqNo;
5174
5175 /*
5176 * Don't need to track this; no transactions running at the time the
5177 * recovered xact started are still active, except possibly other
5178 * prepared xacts and we don't care whether those are RO_SAFE or not.
5179 */
5180 SHMQueueInit(&(sxact->possibleUnsafeConflicts));
5181
5182 SHMQueueInit(&(sxact->predicateLocks));
5183 SHMQueueElemInit(&(sxact->finishedLink));
5184
5185 sxact->topXid = xid;
5186 sxact->xmin = xactRecord->xmin;
5187 sxact->flags = xactRecord->flags;
5188 Assert(SxactIsPrepared(sxact));
5189 if (!SxactIsReadOnly(sxact))
5190 {
5191 ++(PredXact->WritableSxactCount);
5192 Assert(PredXact->WritableSxactCount <=
5193 (MaxBackends + max_prepared_xacts));
5194 }
5195
5196 /*
5197 * We don't know whether the transaction had any conflicts or not, so
5198 * we'll conservatively assume that it had both a conflict in and a
5199 * conflict out, and represent that with the summary conflict flags.
5200 */
5201 SHMQueueInit(&(sxact->outConflicts));
5202 SHMQueueInit(&(sxact->inConflicts));
5203 sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
5204 sxact->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
5205
5206 /* Register the transaction's xid */
5207 sxidtag.xid = xid;
5208 sxid = (SERIALIZABLEXID *) hash_search(SerializableXidHash,
5209 &sxidtag,
5210 HASH_ENTER, &found);
5211 Assert(sxid != NULL);
5212 Assert(!found);
5213 sxid->myXact = (SERIALIZABLEXACT *) sxact;
5214
5215 /*
5216 * Update global xmin. Note that this is a special case compared to
5217 * registering a normal transaction, because the global xmin might go
5218 * backwards. That's OK, because until recovery is over we're not
5219 * going to complete any transactions or create any non-prepared
5220 * transactions, so there's no danger of throwing away.
5221 */
5222 if ((!TransactionIdIsValid(PredXact->SxactGlobalXmin)) ||
5223 (TransactionIdFollows(PredXact->SxactGlobalXmin, sxact->xmin)))
5224 {
5225 PredXact->SxactGlobalXmin = sxact->xmin;
5226 PredXact->SxactGlobalXminCount = 1;
5227 OldSerXidSetActiveSerXmin(sxact->xmin);
5228 }
5229 else if (TransactionIdEquals(sxact->xmin, PredXact->SxactGlobalXmin))
5230 {
5231 Assert(PredXact->SxactGlobalXminCount > 0);
5232 PredXact->SxactGlobalXminCount++;
5233 }
5234
5235 LWLockRelease(SerializableXactHashLock);
5236 }
5237 else if (record->type == TWOPHASEPREDICATERECORD_LOCK)
5238 {
5239 /* Lock record. Recreate the PREDICATELOCK */
5240 TwoPhasePredicateLockRecord *lockRecord;
5241 SERIALIZABLEXID *sxid;
5242 SERIALIZABLEXACT *sxact;
5243 SERIALIZABLEXIDTAG sxidtag;
5244 uint32 targettaghash;
5245
5246 lockRecord = (TwoPhasePredicateLockRecord *) &record->data.lockRecord;
5247 targettaghash = PredicateLockTargetTagHashCode(&lockRecord->target);
5248
5249 LWLockAcquire(SerializableXactHashLock, LW_SHARED);
5250 sxidtag.xid = xid;
5251 sxid = (SERIALIZABLEXID *)
5252 hash_search(SerializableXidHash, &sxidtag, HASH_FIND, NULL);
5253 LWLockRelease(SerializableXactHashLock);
5254
5255 Assert(sxid != NULL);
5256 sxact = sxid->myXact;
5257 Assert(sxact != InvalidSerializableXact);
5258
5259 CreatePredicateLock(&lockRecord->target, targettaghash, sxact);
5260 }
5261 }
5262
5263 /*
5264 * Prepare to share the current SERIALIZABLEXACT with parallel workers.
5265 * Return a handle object that can be used by AttachSerializableXact() in a
5266 * parallel worker.
5267 */
5268 SerializableXactHandle
ShareSerializableXact(void)5269 ShareSerializableXact(void)
5270 {
5271 return MySerializableXact;
5272 }
5273
5274 /*
5275 * Allow parallel workers to import the leader's SERIALIZABLEXACT.
5276 */
5277 void
AttachSerializableXact(SerializableXactHandle handle)5278 AttachSerializableXact(SerializableXactHandle handle)
5279 {
5280
5281 Assert(MySerializableXact == InvalidSerializableXact);
5282
5283 MySerializableXact = (SERIALIZABLEXACT *) handle;
5284 if (MySerializableXact != InvalidSerializableXact)
5285 CreateLocalPredicateLockHash();
5286 }
5287