1 /*-------------------------------------------------------------------------
2  *
3  * lwlock.h
4  *	  Lightweight lock manager
5  *
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/storage/lwlock.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef LWLOCK_H
15 #define LWLOCK_H
16 
17 #ifdef FRONTEND
18 #error "lwlock.h may not be included from frontend code"
19 #endif
20 
21 #include "port/atomics.h"
22 #include "storage/proclist_types.h"
23 #include "storage/s_lock.h"
24 
25 struct PGPROC;
26 
27 /*
28  * Code outside of lwlock.c should not manipulate the contents of this
29  * structure directly, but we have to declare it here to allow LWLocks to be
30  * incorporated into other data structures.
31  */
32 typedef struct LWLock
33 {
34 	uint16		tranche;		/* tranche ID */
35 	pg_atomic_uint32 state;		/* state of exclusive/nonexclusive lockers */
36 	proclist_head waiters;		/* list of waiting PGPROCs */
37 #ifdef LOCK_DEBUG
38 	pg_atomic_uint32 nwaiters;	/* number of waiters */
39 	struct PGPROC *owner;		/* last exclusive owner of the lock */
40 #endif
41 } LWLock;
42 
43 /*
44  * In most cases, it's desirable to force each tranche of LWLocks to be aligned
45  * on a cache line boundary and make the array stride a power of 2.  This saves
46  * a few cycles in indexing, but more importantly ensures that individual
47  * LWLocks don't cross cache line boundaries.  This reduces cache contention
48  * problems, especially on AMD Opterons.  In some cases, it's useful to add
49  * even more padding so that each LWLock takes up an entire cache line; this is
50  * useful, for example, in the main LWLock array, where the overall number of
51  * locks is small but some are heavily contended.
52  *
53  * When allocating a tranche that contains data other than LWLocks, it is
54  * probably best to include a bare LWLock and then pad the resulting structure
55  * as necessary for performance.  For an array that contains only LWLocks,
56  * LWLockMinimallyPadded can be used for cases where we just want to ensure
57  * that we don't cross cache line boundaries within a single lock, while
58  * LWLockPadded can be used for cases where we want each lock to be an entire
59  * cache line.
60  *
61  * An LWLockMinimallyPadded might contain more than the absolute minimum amount
62  * of padding required to keep a lock from crossing a cache line boundary,
63  * because an unpadded LWLock will normally fit into 16 bytes.  We ignore that
64  * possibility when determining the minimal amount of padding.  Older releases
65  * had larger LWLocks, so 32 really was the minimum, and packing them in
66  * tighter might hurt performance.
67  *
68  * LWLOCK_MINIMAL_SIZE should be 32 on basically all common platforms, but
69  * because pg_atomic_uint32 is more than 4 bytes on some obscure platforms, we
70  * allow for the possibility that it might be 64.  Even on those platforms,
71  * we probably won't exceed 32 bytes unless LOCK_DEBUG is defined.
72  */
73 #define LWLOCK_PADDED_SIZE	PG_CACHE_LINE_SIZE
74 #define LWLOCK_MINIMAL_SIZE (sizeof(LWLock) <= 32 ? 32 : 64)
75 
76 /* LWLock, padded to a full cache line size */
77 typedef union LWLockPadded
78 {
79 	LWLock		lock;
80 	char		pad[LWLOCK_PADDED_SIZE];
81 } LWLockPadded;
82 
83 /* LWLock, minimally padded */
84 typedef union LWLockMinimallyPadded
85 {
86 	LWLock		lock;
87 	char		pad[LWLOCK_MINIMAL_SIZE];
88 } LWLockMinimallyPadded;
89 
90 extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
91 
92 /* struct for storing named tranche information */
93 typedef struct NamedLWLockTranche
94 {
95 	int			trancheId;
96 	char	   *trancheName;
97 } NamedLWLockTranche;
98 
99 extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray;
100 extern PGDLLIMPORT int NamedLWLockTrancheRequests;
101 
102 /* Names for fixed lwlocks */
103 #include "storage/lwlocknames.h"
104 
105 /*
106  * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
107  * here, but we need them to figure out offsets within MainLWLockArray, and
108  * having this file include lock.h or bufmgr.h would be backwards.
109  */
110 
111 /* Number of partitions of the shared buffer mapping hashtable */
112 #define NUM_BUFFER_PARTITIONS  128
113 
114 /* Number of partitions the shared lock tables are divided into */
115 #define LOG2_NUM_LOCK_PARTITIONS  4
116 #define NUM_LOCK_PARTITIONS  (1 << LOG2_NUM_LOCK_PARTITIONS)
117 
118 /* Number of partitions the shared predicate lock tables are divided into */
119 #define LOG2_NUM_PREDICATELOCK_PARTITIONS  4
120 #define NUM_PREDICATELOCK_PARTITIONS  (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
121 
122 /* Offsets for various chunks of preallocated lwlocks. */
123 #define BUFFER_MAPPING_LWLOCK_OFFSET	NUM_INDIVIDUAL_LWLOCKS
124 #define LOCK_MANAGER_LWLOCK_OFFSET		\
125 	(BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
126 #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
127 	(LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
128 #define NUM_FIXED_LWLOCKS \
129 	(PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
130 
131 typedef enum LWLockMode
132 {
133 	LW_EXCLUSIVE,
134 	LW_SHARED,
135 	LW_WAIT_UNTIL_FREE			/* A special mode used in PGPROC->lwWaitMode,
136 								 * when waiting for lock to become free. Not
137 								 * to be used as LWLockAcquire argument */
138 } LWLockMode;
139 
140 
141 #ifdef LOCK_DEBUG
142 extern bool Trace_lwlocks;
143 #endif
144 
145 extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
146 extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
147 extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
148 extern void LWLockRelease(LWLock *lock);
149 extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
150 extern void LWLockReleaseAll(void);
151 extern bool LWLockHeldByMe(LWLock *lock);
152 extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
153 
154 extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
155 extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value);
156 
157 extern Size LWLockShmemSize(void);
158 extern void CreateLWLocks(void);
159 extern void InitLWLockAccess(void);
160 
161 extern const char *GetLWLockIdentifier(uint32 classId, uint16 eventId);
162 
163 /*
164  * Extensions (or core code) can obtain an LWLocks by calling
165  * RequestNamedLWLockTranche() during postmaster startup.  Subsequently,
166  * call GetNamedLWLockTranche() to obtain a pointer to an array containing
167  * the number of LWLocks requested.
168  */
169 extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks);
170 extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name);
171 
172 /*
173  * There is another, more flexible method of obtaining lwlocks. First, call
174  * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
175  * a shared counter.  Next, each individual process using the tranche should
176  * call LWLockRegisterTranche() to associate that tranche ID with a name.
177  * Finally, LWLockInitialize should be called just once per lwlock, passing
178  * the tranche ID as an argument.
179  *
180  * It may seem strange that each process using the tranche must register it
181  * separately, but dynamic shared memory segments aren't guaranteed to be
182  * mapped at the same address in all coordinating backends, so storing the
183  * registration in the main shared memory segment wouldn't work for that case.
184  */
185 extern int	LWLockNewTrancheId(void);
186 extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name);
187 extern void LWLockInitialize(LWLock *lock, int tranche_id);
188 
189 /*
190  * Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also,
191  * we reserve additional tranche IDs for builtin tranches not included in
192  * the set of individual LWLocks.  A call to LWLockNewTrancheId will never
193  * return a value less than LWTRANCHE_FIRST_USER_DEFINED.
194  */
195 typedef enum BuiltinTrancheIds
196 {
197 	LWTRANCHE_XACT_BUFFER = NUM_INDIVIDUAL_LWLOCKS,
198 	LWTRANCHE_COMMITTS_BUFFER,
199 	LWTRANCHE_SUBTRANS_BUFFER,
200 	LWTRANCHE_MULTIXACTOFFSET_BUFFER,
201 	LWTRANCHE_MULTIXACTMEMBER_BUFFER,
202 	LWTRANCHE_NOTIFY_BUFFER,
203 	LWTRANCHE_SERIAL_BUFFER,
204 	LWTRANCHE_WAL_INSERT,
205 	LWTRANCHE_BUFFER_CONTENT,
206 	LWTRANCHE_BUFFER_IO,
207 	LWTRANCHE_REPLICATION_ORIGIN_STATE,
208 	LWTRANCHE_REPLICATION_SLOT_IO,
209 	LWTRANCHE_LOCK_FASTPATH,
210 	LWTRANCHE_BUFFER_MAPPING,
211 	LWTRANCHE_LOCK_MANAGER,
212 	LWTRANCHE_PREDICATE_LOCK_MANAGER,
213 	LWTRANCHE_PARALLEL_HASH_JOIN,
214 	LWTRANCHE_PARALLEL_QUERY_DSA,
215 	LWTRANCHE_PER_SESSION_DSA,
216 	LWTRANCHE_PER_SESSION_RECORD_TYPE,
217 	LWTRANCHE_PER_SESSION_RECORD_TYPMOD,
218 	LWTRANCHE_SHARED_TUPLESTORE,
219 	LWTRANCHE_SHARED_TIDBITMAP,
220 	LWTRANCHE_PARALLEL_APPEND,
221 	LWTRANCHE_PER_XACT_PREDICATE_LIST,
222 	LWTRANCHE_FIRST_USER_DEFINED
223 }			BuiltinTrancheIds;
224 
225 /*
226  * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
227  * to LWLocks.  New code should instead use LWLock *.  However, for the
228  * convenience of third-party code, we include the following typedef.
229  */
230 typedef LWLock *LWLockId;
231 
232 #endif							/* LWLOCK_H */
233