1 /*
2  * This file is part of the MicroPython project, http://micropython.org/
3  *
4  * The MIT License (MIT)
5  *
6  * Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <errno.h>
30 
31 #include "py/runtime.h"
32 #include "py/mpthread.h"
33 #include "py/gc.h"
34 
35 #if MICROPY_PY_THREAD
36 
37 #include <fcntl.h>
38 #include <signal.h>
39 #include <sched.h>
40 #include <semaphore.h>
41 
42 #include "shared/runtime/gchelper.h"
43 
44 // Some platforms don't have SIGRTMIN but if we do have it, use it to avoid
45 // potential conflict with other uses of the more commonly used SIGUSR1.
46 #ifdef SIGRTMIN
47 #define MP_THREAD_GC_SIGNAL (SIGRTMIN + 5)
48 #else
49 #define MP_THREAD_GC_SIGNAL (SIGUSR1)
50 #endif
51 
52 // This value seems to be about right for both 32-bit and 64-bit builds.
53 #define THREAD_STACK_OVERFLOW_MARGIN (8192)
54 
55 // this structure forms a linked list, one node per active thread
56 typedef struct _thread_t {
57     pthread_t id;           // system id of thread
58     int ready;              // whether the thread is ready and running
59     void *arg;              // thread Python args, a GC root pointer
60     struct _thread_t *next;
61 } thread_t;
62 
63 STATIC pthread_key_t tls_key;
64 
65 // The mutex is used for any code in this port that needs to be thread safe.
66 // Specifically for thread management, access to the linked list is one example.
67 // But also, e.g. scheduler state.
68 STATIC pthread_mutex_t thread_mutex;
69 STATIC thread_t *thread;
70 
71 // this is used to synchronise the signal handler of the thread
72 // it's needed because we can't use any pthread calls in a signal handler
73 #if defined(__APPLE__)
74 STATIC char thread_signal_done_name[25];
75 STATIC sem_t *thread_signal_done_p;
76 #else
77 STATIC sem_t thread_signal_done;
78 #endif
79 
mp_thread_unix_begin_atomic_section(void)80 void mp_thread_unix_begin_atomic_section(void) {
81     pthread_mutex_lock(&thread_mutex);
82 }
83 
mp_thread_unix_end_atomic_section(void)84 void mp_thread_unix_end_atomic_section(void) {
85     pthread_mutex_unlock(&thread_mutex);
86 }
87 
88 // this signal handler is used to scan the regs and stack of a thread
mp_thread_gc(int signo,siginfo_t * info,void * context)89 STATIC void mp_thread_gc(int signo, siginfo_t *info, void *context) {
90     (void)info; // unused
91     (void)context; // unused
92     if (signo == MP_THREAD_GC_SIGNAL) {
93         gc_helper_collect_regs_and_stack();
94         // We have access to the context (regs, stack) of the thread but it seems
95         // that we don't need the extra information, enough is captured by the
96         // gc_collect_regs_and_stack function above
97         // gc_collect_root((void**)context, sizeof(ucontext_t) / sizeof(uintptr_t));
98         #if MICROPY_ENABLE_PYSTACK
99         void **ptrs = (void **)(void *)MP_STATE_THREAD(pystack_start);
100         gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void *));
101         #endif
102         #if defined(__APPLE__)
103         sem_post(thread_signal_done_p);
104         #else
105         sem_post(&thread_signal_done);
106         #endif
107     }
108 }
109 
mp_thread_init(void)110 void mp_thread_init(void) {
111     pthread_key_create(&tls_key, NULL);
112     pthread_setspecific(tls_key, &mp_state_ctx.thread);
113 
114     // Needs to be a recursive mutex to emulate the behavior of
115     // BEGIN_ATOMIC_SECTION on bare metal.
116     pthread_mutexattr_t thread_mutex_attr;
117     pthread_mutexattr_init(&thread_mutex_attr);
118     pthread_mutexattr_settype(&thread_mutex_attr, PTHREAD_MUTEX_RECURSIVE);
119     pthread_mutex_init(&thread_mutex, &thread_mutex_attr);
120 
121     // create first entry in linked list of all threads
122     thread = malloc(sizeof(thread_t));
123     thread->id = pthread_self();
124     thread->ready = 1;
125     thread->arg = NULL;
126     thread->next = NULL;
127 
128     #if defined(__APPLE__)
129     snprintf(thread_signal_done_name, sizeof(thread_signal_done_name), "micropython_sem_%ld", (long)thread->id);
130     thread_signal_done_p = sem_open(thread_signal_done_name, O_CREAT | O_EXCL, 0666, 0);
131     #else
132     sem_init(&thread_signal_done, 0, 0);
133     #endif
134 
135     // enable signal handler for garbage collection
136     struct sigaction sa;
137     sa.sa_flags = SA_SIGINFO;
138     sa.sa_sigaction = mp_thread_gc;
139     sigemptyset(&sa.sa_mask);
140     sigaction(MP_THREAD_GC_SIGNAL, &sa, NULL);
141 }
142 
mp_thread_deinit(void)143 void mp_thread_deinit(void) {
144     mp_thread_unix_begin_atomic_section();
145     while (thread->next != NULL) {
146         thread_t *th = thread;
147         thread = thread->next;
148         pthread_cancel(th->id);
149         free(th);
150     }
151     mp_thread_unix_end_atomic_section();
152     #if defined(__APPLE__)
153     sem_close(thread_signal_done_p);
154     sem_unlink(thread_signal_done_name);
155     #endif
156     assert(thread->id == pthread_self());
157     free(thread);
158 }
159 
160 // This function scans all pointers that are external to the current thread.
161 // It does this by signalling all other threads and getting them to scan their
162 // own registers and stack.  Note that there may still be some edge cases left
163 // with race conditions and root-pointer scanning: a given thread may manipulate
164 // the global root pointers (in mp_state_ctx) while another thread is doing a
165 // garbage collection and tracing these pointers.
mp_thread_gc_others(void)166 void mp_thread_gc_others(void) {
167     mp_thread_unix_begin_atomic_section();
168     for (thread_t *th = thread; th != NULL; th = th->next) {
169         gc_collect_root(&th->arg, 1);
170         if (th->id == pthread_self()) {
171             continue;
172         }
173         if (!th->ready) {
174             continue;
175         }
176         pthread_kill(th->id, MP_THREAD_GC_SIGNAL);
177         #if defined(__APPLE__)
178         sem_wait(thread_signal_done_p);
179         #else
180         sem_wait(&thread_signal_done);
181         #endif
182     }
183     mp_thread_unix_end_atomic_section();
184 }
185 
mp_thread_get_state(void)186 mp_state_thread_t *mp_thread_get_state(void) {
187     return (mp_state_thread_t *)pthread_getspecific(tls_key);
188 }
189 
mp_thread_set_state(mp_state_thread_t * state)190 void mp_thread_set_state(mp_state_thread_t *state) {
191     pthread_setspecific(tls_key, state);
192 }
193 
mp_thread_start(void)194 void mp_thread_start(void) {
195     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
196     mp_thread_unix_begin_atomic_section();
197     for (thread_t *th = thread; th != NULL; th = th->next) {
198         if (th->id == pthread_self()) {
199             th->ready = 1;
200             break;
201         }
202     }
203     mp_thread_unix_end_atomic_section();
204 }
205 
mp_thread_create(void * (* entry)(void *),void * arg,size_t * stack_size)206 void mp_thread_create(void *(*entry)(void *), void *arg, size_t *stack_size) {
207     // default stack size is 8k machine-words
208     if (*stack_size == 0) {
209         *stack_size = 8192 * sizeof(void *);
210     }
211 
212     // minimum stack size is set by pthreads
213     if (*stack_size < PTHREAD_STACK_MIN) {
214         *stack_size = PTHREAD_STACK_MIN;
215     }
216 
217     // ensure there is enough stack to include a stack-overflow margin
218     if (*stack_size < 2 * THREAD_STACK_OVERFLOW_MARGIN) {
219         *stack_size = 2 * THREAD_STACK_OVERFLOW_MARGIN;
220     }
221 
222     // set thread attributes
223     pthread_attr_t attr;
224     int ret = pthread_attr_init(&attr);
225     if (ret != 0) {
226         goto er;
227     }
228     ret = pthread_attr_setstacksize(&attr, *stack_size);
229     if (ret != 0) {
230         goto er;
231     }
232 
233     ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
234     if (ret != 0) {
235         goto er;
236     }
237 
238     mp_thread_unix_begin_atomic_section();
239 
240     // create thread
241     pthread_t id;
242     ret = pthread_create(&id, &attr, entry, arg);
243     if (ret != 0) {
244         mp_thread_unix_end_atomic_section();
245         goto er;
246     }
247 
248     // adjust stack_size to provide room to recover from hitting the limit
249     *stack_size -= THREAD_STACK_OVERFLOW_MARGIN;
250 
251     // add thread to linked list of all threads
252     thread_t *th = malloc(sizeof(thread_t));
253     th->id = id;
254     th->ready = 0;
255     th->arg = arg;
256     th->next = thread;
257     thread = th;
258 
259     mp_thread_unix_end_atomic_section();
260 
261     return;
262 
263 er:
264     mp_raise_OSError(ret);
265 }
266 
mp_thread_finish(void)267 void mp_thread_finish(void) {
268     mp_thread_unix_begin_atomic_section();
269     thread_t *prev = NULL;
270     for (thread_t *th = thread; th != NULL; th = th->next) {
271         if (th->id == pthread_self()) {
272             if (prev == NULL) {
273                 thread = th->next;
274             } else {
275                 prev->next = th->next;
276             }
277             free(th);
278             break;
279         }
280         prev = th;
281     }
282     mp_thread_unix_end_atomic_section();
283 }
284 
mp_thread_mutex_init(mp_thread_mutex_t * mutex)285 void mp_thread_mutex_init(mp_thread_mutex_t *mutex) {
286     pthread_mutex_init(mutex, NULL);
287 }
288 
mp_thread_mutex_lock(mp_thread_mutex_t * mutex,int wait)289 int mp_thread_mutex_lock(mp_thread_mutex_t *mutex, int wait) {
290     int ret;
291     if (wait) {
292         ret = pthread_mutex_lock(mutex);
293         if (ret == 0) {
294             return 1;
295         }
296     } else {
297         ret = pthread_mutex_trylock(mutex);
298         if (ret == 0) {
299             return 1;
300         } else if (ret == EBUSY) {
301             return 0;
302         }
303     }
304     return -ret;
305 }
306 
mp_thread_mutex_unlock(mp_thread_mutex_t * mutex)307 void mp_thread_mutex_unlock(mp_thread_mutex_t *mutex) {
308     pthread_mutex_unlock(mutex);
309     // TODO check return value
310 }
311 
312 #endif // MICROPY_PY_THREAD
313