1 /*------------------------------------------------------------------------- 2 * 3 * lwlock.h 4 * Lightweight lock manager 5 * 6 * 7 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group 8 * Portions Copyright (c) 1994, Regents of the University of California 9 * 10 * src/include/storage/lwlock.h 11 * 12 *------------------------------------------------------------------------- 13 */ 14 #ifndef LWLOCK_H 15 #define LWLOCK_H 16 17 #ifdef FRONTEND 18 #error "lwlock.h may not be included from frontend code" 19 #endif 20 21 #include "port/atomics.h" 22 #include "storage/proclist_types.h" 23 24 struct PGPROC; 25 26 /* 27 * Code outside of lwlock.c should not manipulate the contents of this 28 * structure directly, but we have to declare it here to allow LWLocks to be 29 * incorporated into other data structures. 30 */ 31 typedef struct LWLock 32 { 33 uint16 tranche; /* tranche ID */ 34 pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */ 35 proclist_head waiters; /* list of waiting PGPROCs */ 36 #ifdef LOCK_DEBUG 37 pg_atomic_uint32 nwaiters; /* number of waiters */ 38 struct PGPROC *owner; /* last exclusive owner of the lock */ 39 #endif 40 } LWLock; 41 42 /* 43 * In most cases, it's desirable to force each tranche of LWLocks to be aligned 44 * on a cache line boundary and make the array stride a power of 2. This saves 45 * a few cycles in indexing, but more importantly ensures that individual 46 * LWLocks don't cross cache line boundaries. This reduces cache contention 47 * problems, especially on AMD Opterons. In some cases, it's useful to add 48 * even more padding so that each LWLock takes up an entire cache line; this is 49 * useful, for example, in the main LWLock array, where the overall number of 50 * locks is small but some are heavily contended. 51 */ 52 #define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE 53 54 /* LWLock, padded to a full cache line size */ 55 typedef union LWLockPadded 56 { 57 LWLock lock; 58 char pad[LWLOCK_PADDED_SIZE]; 59 } LWLockPadded; 60 61 extern PGDLLIMPORT LWLockPadded *MainLWLockArray; 62 63 /* struct for storing named tranche information */ 64 typedef struct NamedLWLockTranche 65 { 66 int trancheId; 67 char *trancheName; 68 } NamedLWLockTranche; 69 70 extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray; 71 extern PGDLLIMPORT int NamedLWLockTrancheRequests; 72 73 /* Names for fixed lwlocks */ 74 #include "storage/lwlocknames.h" 75 76 /* 77 * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS 78 * here, but we need them to figure out offsets within MainLWLockArray, and 79 * having this file include lock.h or bufmgr.h would be backwards. 80 */ 81 82 /* Number of partitions of the shared buffer mapping hashtable */ 83 #define NUM_BUFFER_PARTITIONS 128 84 85 /* Number of partitions the shared lock tables are divided into */ 86 #define LOG2_NUM_LOCK_PARTITIONS 4 87 #define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS) 88 89 /* Number of partitions the shared predicate lock tables are divided into */ 90 #define LOG2_NUM_PREDICATELOCK_PARTITIONS 4 91 #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS) 92 93 /* Offsets for various chunks of preallocated lwlocks. */ 94 #define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS 95 #define LOCK_MANAGER_LWLOCK_OFFSET \ 96 (BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS) 97 #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \ 98 (LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS) 99 #define NUM_FIXED_LWLOCKS \ 100 (PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS) 101 102 typedef enum LWLockMode 103 { 104 LW_EXCLUSIVE, 105 LW_SHARED, 106 LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwWaitMode, 107 * when waiting for lock to become free. Not 108 * to be used as LWLockAcquire argument */ 109 } LWLockMode; 110 111 112 #ifdef LOCK_DEBUG 113 extern bool Trace_lwlocks; 114 #endif 115 116 extern bool LWLockAcquire(LWLock *lock, LWLockMode mode); 117 extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode); 118 extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode); 119 extern void LWLockRelease(LWLock *lock); 120 extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val); 121 extern void LWLockReleaseAll(void); 122 extern bool LWLockHeldByMe(LWLock *lock); 123 extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode); 124 125 extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval); 126 extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value); 127 128 extern Size LWLockShmemSize(void); 129 extern void CreateLWLocks(void); 130 extern void InitLWLockAccess(void); 131 132 extern const char *GetLWLockIdentifier(uint32 classId, uint16 eventId); 133 134 /* 135 * Extensions (or core code) can obtain an LWLocks by calling 136 * RequestNamedLWLockTranche() during postmaster startup. Subsequently, 137 * call GetNamedLWLockTranche() to obtain a pointer to an array containing 138 * the number of LWLocks requested. 139 */ 140 extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks); 141 extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name); 142 143 /* 144 * There is another, more flexible method of obtaining lwlocks. First, call 145 * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from 146 * a shared counter. Next, each individual process using the tranche should 147 * call LWLockRegisterTranche() to associate that tranche ID with a name. 148 * Finally, LWLockInitialize should be called just once per lwlock, passing 149 * the tranche ID as an argument. 150 * 151 * It may seem strange that each process using the tranche must register it 152 * separately, but dynamic shared memory segments aren't guaranteed to be 153 * mapped at the same address in all coordinating backends, so storing the 154 * registration in the main shared memory segment wouldn't work for that case. 155 */ 156 extern int LWLockNewTrancheId(void); 157 extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name); 158 extern void LWLockInitialize(LWLock *lock, int tranche_id); 159 160 /* 161 * Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also, 162 * we reserve additional tranche IDs for builtin tranches not included in 163 * the set of individual LWLocks. A call to LWLockNewTrancheId will never 164 * return a value less than LWTRANCHE_FIRST_USER_DEFINED. 165 */ 166 typedef enum BuiltinTrancheIds 167 { 168 LWTRANCHE_XACT_BUFFER = NUM_INDIVIDUAL_LWLOCKS, 169 LWTRANCHE_COMMITTS_BUFFER, 170 LWTRANCHE_SUBTRANS_BUFFER, 171 LWTRANCHE_MULTIXACTOFFSET_BUFFER, 172 LWTRANCHE_MULTIXACTMEMBER_BUFFER, 173 LWTRANCHE_NOTIFY_BUFFER, 174 LWTRANCHE_SERIAL_BUFFER, 175 LWTRANCHE_WAL_INSERT, 176 LWTRANCHE_BUFFER_CONTENT, 177 LWTRANCHE_REPLICATION_ORIGIN_STATE, 178 LWTRANCHE_REPLICATION_SLOT_IO, 179 LWTRANCHE_LOCK_FASTPATH, 180 LWTRANCHE_BUFFER_MAPPING, 181 LWTRANCHE_LOCK_MANAGER, 182 LWTRANCHE_PREDICATE_LOCK_MANAGER, 183 LWTRANCHE_PARALLEL_HASH_JOIN, 184 LWTRANCHE_PARALLEL_QUERY_DSA, 185 LWTRANCHE_PER_SESSION_DSA, 186 LWTRANCHE_PER_SESSION_RECORD_TYPE, 187 LWTRANCHE_PER_SESSION_RECORD_TYPMOD, 188 LWTRANCHE_SHARED_TUPLESTORE, 189 LWTRANCHE_SHARED_TIDBITMAP, 190 LWTRANCHE_PARALLEL_APPEND, 191 LWTRANCHE_PER_XACT_PREDICATE_LIST, 192 LWTRANCHE_FIRST_USER_DEFINED 193 } BuiltinTrancheIds; 194 195 /* 196 * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer 197 * to LWLocks. New code should instead use LWLock *. However, for the 198 * convenience of third-party code, we include the following typedef. 199 */ 200 typedef LWLock *LWLockId; 201 202 #endif /* LWLOCK_H */ 203