1 /*
2 * Implementation of the Global Interpreter Lock (GIL).
3 */
4
5 #include <stdlib.h>
6 #include <errno.h>
7
8 #include "pycore_atomic.h"
9
10
11 /*
12 Notes about the implementation:
13
14 - The GIL is just a boolean variable (locked) whose access is protected
15 by a mutex (gil_mutex), and whose changes are signalled by a condition
16 variable (gil_cond). gil_mutex is taken for short periods of time,
17 and therefore mostly uncontended.
18
19 - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
20 able to release the GIL on demand by another thread. A volatile boolean
21 variable (gil_drop_request) is used for that purpose, which is checked
22 at every turn of the eval loop. That variable is set after a wait of
23 `interval` microseconds on `gil_cond` has timed out.
24
25 [Actually, another volatile boolean variable (eval_breaker) is used
26 which ORs several conditions into one. Volatile booleans are
27 sufficient as inter-thread signalling means since Python is run
28 on cache-coherent architectures only.]
29
30 - A thread wanting to take the GIL will first let pass a given amount of
31 time (`interval` microseconds) before setting gil_drop_request. This
32 encourages a defined switching period, but doesn't enforce it since
33 opcodes can take an arbitrary time to execute.
34
35 The `interval` value is available for the user to read and modify
36 using the Python API `sys.{get,set}switchinterval()`.
37
38 - When a thread releases the GIL and gil_drop_request is set, that thread
39 ensures that another GIL-awaiting thread gets scheduled.
40 It does so by waiting on a condition variable (switch_cond) until
41 the value of last_holder is changed to something else than its
42 own thread state pointer, indicating that another thread was able to
43 take the GIL.
44
45 This is meant to prohibit the latency-adverse behaviour on multi-core
46 machines where one thread would speculatively release the GIL, but still
47 run and end up being the first to re-acquire it, making the "timeslices"
48 much longer than expected.
49 (Note: this mechanism is enabled with FORCE_SWITCHING above)
50 */
51
52 #include "condvar.h"
53
54 #define MUTEX_INIT(mut) \
55 if (PyMUTEX_INIT(&(mut))) { \
56 Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
57 #define MUTEX_FINI(mut) \
58 if (PyMUTEX_FINI(&(mut))) { \
59 Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
60 #define MUTEX_LOCK(mut) \
61 if (PyMUTEX_LOCK(&(mut))) { \
62 Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
63 #define MUTEX_UNLOCK(mut) \
64 if (PyMUTEX_UNLOCK(&(mut))) { \
65 Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
66
67 #define COND_INIT(cond) \
68 if (PyCOND_INIT(&(cond))) { \
69 Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
70 #define COND_FINI(cond) \
71 if (PyCOND_FINI(&(cond))) { \
72 Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
73 #define COND_SIGNAL(cond) \
74 if (PyCOND_SIGNAL(&(cond))) { \
75 Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
76 #define COND_WAIT(cond, mut) \
77 if (PyCOND_WAIT(&(cond), &(mut))) { \
78 Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
79 #define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
80 { \
81 int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
82 if (r < 0) \
83 Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
84 if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
85 timeout_result = 1; \
86 else \
87 timeout_result = 0; \
88 } \
89
90
91 #define DEFAULT_INTERVAL 5000
92
_gil_initialize(struct _gil_runtime_state * gil)93 static void _gil_initialize(struct _gil_runtime_state *gil)
94 {
95 _Py_atomic_int uninitialized = {-1};
96 gil->locked = uninitialized;
97 gil->interval = DEFAULT_INTERVAL;
98 }
99
gil_created(struct _gil_runtime_state * gil)100 static int gil_created(struct _gil_runtime_state *gil)
101 {
102 return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
103 }
104
create_gil(struct _gil_runtime_state * gil)105 static void create_gil(struct _gil_runtime_state *gil)
106 {
107 MUTEX_INIT(gil->mutex);
108 #ifdef FORCE_SWITCHING
109 MUTEX_INIT(gil->switch_mutex);
110 #endif
111 COND_INIT(gil->cond);
112 #ifdef FORCE_SWITCHING
113 COND_INIT(gil->switch_cond);
114 #endif
115 _Py_atomic_store_relaxed(&gil->last_holder, 0);
116 _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
117 _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
118 }
119
destroy_gil(struct _gil_runtime_state * gil)120 static void destroy_gil(struct _gil_runtime_state *gil)
121 {
122 /* some pthread-like implementations tie the mutex to the cond
123 * and must have the cond destroyed first.
124 */
125 COND_FINI(gil->cond);
126 MUTEX_FINI(gil->mutex);
127 #ifdef FORCE_SWITCHING
128 COND_FINI(gil->switch_cond);
129 MUTEX_FINI(gil->switch_mutex);
130 #endif
131 _Py_atomic_store_explicit(&gil->locked, -1,
132 _Py_memory_order_release);
133 _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
134 }
135
recreate_gil(struct _gil_runtime_state * gil)136 static void recreate_gil(struct _gil_runtime_state *gil)
137 {
138 _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
139 /* XXX should we destroy the old OS resources here? */
140 create_gil(gil);
141 }
142
143 static void
drop_gil(struct _ceval_runtime_state * ceval,PyThreadState * tstate)144 drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
145 {
146 struct _gil_runtime_state *gil = &ceval->gil;
147 if (!_Py_atomic_load_relaxed(&gil->locked)) {
148 Py_FatalError("drop_gil: GIL is not locked");
149 }
150
151 /* tstate is allowed to be NULL (early interpreter init) */
152 if (tstate != NULL) {
153 /* Sub-interpreter support: threads might have been switched
154 under our feet using PyThreadState_Swap(). Fix the GIL last
155 holder variable so that our heuristics work. */
156 _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
157 }
158
159 MUTEX_LOCK(gil->mutex);
160 _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
161 _Py_atomic_store_relaxed(&gil->locked, 0);
162 COND_SIGNAL(gil->cond);
163 MUTEX_UNLOCK(gil->mutex);
164
165 #ifdef FORCE_SWITCHING
166 if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) {
167 MUTEX_LOCK(gil->switch_mutex);
168 /* Not switched yet => wait */
169 if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
170 {
171 RESET_GIL_DROP_REQUEST(ceval);
172 /* NOTE: if COND_WAIT does not atomically start waiting when
173 releasing the mutex, another thread can run through, take
174 the GIL and drop it again, and reset the condition
175 before we even had a chance to wait for it. */
176 COND_WAIT(gil->switch_cond, gil->switch_mutex);
177 }
178 MUTEX_UNLOCK(gil->switch_mutex);
179 }
180 #endif
181 }
182
183 static void
take_gil(struct _ceval_runtime_state * ceval,PyThreadState * tstate)184 take_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
185 {
186 if (tstate == NULL) {
187 Py_FatalError("take_gil: NULL tstate");
188 }
189
190 struct _gil_runtime_state *gil = &ceval->gil;
191 int err = errno;
192 MUTEX_LOCK(gil->mutex);
193
194 if (!_Py_atomic_load_relaxed(&gil->locked)) {
195 goto _ready;
196 }
197
198 while (_Py_atomic_load_relaxed(&gil->locked)) {
199 int timed_out = 0;
200 unsigned long saved_switchnum;
201
202 saved_switchnum = gil->switch_number;
203
204
205 unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
206 COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
207 /* If we timed out and no switch occurred in the meantime, it is time
208 to ask the GIL-holding thread to drop it. */
209 if (timed_out &&
210 _Py_atomic_load_relaxed(&gil->locked) &&
211 gil->switch_number == saved_switchnum)
212 {
213 SET_GIL_DROP_REQUEST(ceval);
214 }
215 }
216 _ready:
217 #ifdef FORCE_SWITCHING
218 /* This mutex must be taken before modifying gil->last_holder:
219 see drop_gil(). */
220 MUTEX_LOCK(gil->switch_mutex);
221 #endif
222 /* We now hold the GIL */
223 _Py_atomic_store_relaxed(&gil->locked, 1);
224 _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
225
226 if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
227 _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
228 ++gil->switch_number;
229 }
230
231 #ifdef FORCE_SWITCHING
232 COND_SIGNAL(gil->switch_cond);
233 MUTEX_UNLOCK(gil->switch_mutex);
234 #endif
235 if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
236 RESET_GIL_DROP_REQUEST(ceval);
237 }
238 if (tstate->async_exc != NULL) {
239 _PyEval_SignalAsyncExc(ceval);
240 }
241
242 MUTEX_UNLOCK(gil->mutex);
243 errno = err;
244 }
245
_PyEval_SetSwitchInterval(unsigned long microseconds)246 void _PyEval_SetSwitchInterval(unsigned long microseconds)
247 {
248 _PyRuntime.ceval.gil.interval = microseconds;
249 }
250
_PyEval_GetSwitchInterval()251 unsigned long _PyEval_GetSwitchInterval()
252 {
253 return _PyRuntime.ceval.gil.interval;
254 }
255