1 /* thread.c
2  * Copyright 1984-2017 Cisco Systems, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "system.h"
18 
19 static thread_gc *free_thread_gcs;
20 
21 /* locally defined functions */
22 #ifdef PTHREADS
23 static s_thread_rv_t start_thread PROTO((void *tc));
24 static IBOOL destroy_thread PROTO((ptr tc));
25 #endif
26 
S_thread_init()27 void S_thread_init() {
28   if (S_boot_time) {
29     S_protect(&S_G.threadno);
30     S_G.threadno = FIX(0);
31 
32 #ifdef PTHREADS
33    /* this is also reset in scheme.c after heap restoration */
34     s_thread_mutex_init(&S_tc_mutex.pmutex);
35     S_tc_mutex.owner = s_thread_self();
36     S_tc_mutex.count = 0;
37     s_thread_cond_init(&S_collect_cond);
38     s_thread_cond_init(&S_collect_thread0_cond);
39     s_thread_mutex_init(&S_alloc_mutex.pmutex);
40     s_thread_cond_init(&S_terminated_cond);
41     S_alloc_mutex.owner = 0;
42     S_alloc_mutex.count = 0;
43 
44 # ifdef IMPLICIT_ATOMIC_AS_EXPLICIT
45     s_thread_mutex_init(&S_implicit_mutex);
46 # endif
47 #endif /* PTHREADS */
48   }
49 }
50 
51 /* this needs to be reworked.  currently, S_create_thread_object is
52    called from main to create the base thread, from fork_thread when
53    there is already an active current thread, and from S_activate_thread
54    when there is no current thread.  scheme.c does part of the initialization of the
55    base thread (e.g., parameters, current input/output ports) in one
56    or more places. */
S_create_thread_object(who,p_tc)57 ptr S_create_thread_object(who, p_tc) const char *who; ptr p_tc; {
58   ptr thread, tc;
59   thread_gc *tgc;
60   INT i;
61 
62   tc_mutex_acquire();
63 
64   if (S_threads == Snil) {
65     tc = TO_PTR(S_G.thread_context);
66     tgc = &S_G.main_thread_gc;
67     GCDATA(tc) = TO_PTR(tgc);
68     tgc->tc = tc;
69   } else { /* clone parent */
70     ptr p_v = PARAMETERS(p_tc);
71     iptr i, n = Svector_length(p_v);
72     ptr v;
73 
74     tc = TO_PTR(malloc(size_tc));
75     if (free_thread_gcs) {
76       tgc = free_thread_gcs;
77       free_thread_gcs = tgc->next;
78     } else
79       tgc = malloc(sizeof(thread_gc));
80 
81     if (tc == (ptr)0)
82       S_error(who, "unable to malloc thread data structure");
83     memcpy(TO_VOIDP(tc), TO_VOIDP(p_tc), size_tc);
84 
85     GCDATA(tc) = TO_PTR(tgc);
86     tgc->tc = tc;
87 
88     {
89       IGEN g; ISPC s;
90       for (g = 0; g <= static_generation; g++) {
91         for (s = 0; s <= max_real_space; s++) {
92           tgc->base_loc[g][s] = (ptr)0;
93           tgc->next_loc[g][s] = (ptr)0;
94           tgc->bytes_left[g][s] = 0;
95           tgc->sweep_loc[g][s] = (ptr)0;
96           tgc->sweep_next[g][s] = NULL;
97         }
98         tgc->bitmask_overhead[g] = 0;
99       }
100     }
101 
102     tgc->during_alloc = 0;
103     tgc->pending_ephemerons = (ptr)0;
104     for (i = 0; i < (int)DIRTY_SEGMENT_LISTS; i++)
105       tgc->dirty_segments[i] = NULL;
106     tgc->queued_fire = 0;
107     tgc->preserve_ownership = 0;
108 
109     v = S_vector_in(tc, space_new, 0, n);
110 
111     for (i = 0; i < n; i += 1)
112       INITVECTIT(v, i) = Svector_ref(p_v, i);
113 
114     PARAMETERS(tc) = v;
115     CODERANGESTOFLUSH(tc) = Snil;
116   }
117 
118   tgc->sweeper = main_sweeper_index;
119 
120   /* override nonclonable tc fields */
121   THREADNO(tc) = S_G.threadno;
122   S_G.threadno = S_add(S_G.threadno, FIX(1));
123 
124   CCHAIN(tc) = Snil;
125 
126   WINDERS(tc) = Snil;
127   ATTACHMENTS(tc) = Snil;
128   CACHEDFRAME(tc) = Sfalse;
129   STACKLINK(tc) = SYMVAL(S_G.null_continuation_id);
130   STACKCACHE(tc) = Snil;
131 
132  /* S_reset_scheme_stack initializes stack, size, esp, and sfp */
133   S_reset_scheme_stack(tc, stack_slop);
134   FRAME(tc,0) = TO_PTR(&CODEIT(S_G.dummy_code_object,size_rp_header));
135 
136  /* S_reset_allocation_pointer initializes ap and eap */
137   alloc_mutex_acquire();
138   S_reset_allocation_pointer(tc);
139   alloc_mutex_release();
140   S_maybe_fire_collector(tgc);
141 
142   RANDOMSEED(tc) = most_positive_fixnum < 0xffffffff ? most_positive_fixnum : 0xffffffff;
143   X(tc) = Y(tc) = U(tc) = V(tc) = W(tc) = FIX(0);
144 
145   TIMERTICKS(tc) = Sfalse;
146   DISABLECOUNT(tc) = Sfixnum(0);
147   SIGNALINTERRUPTPENDING(tc) = Sfalse;
148   SIGNALINTERRUPTQUEUE(tc) = S_allocate_scheme_signal_queue();
149   KEYBOARDINTERRUPTPENDING(tc) = Sfalse;
150 
151   TARGETMACHINE(tc) = S_intern((const unsigned char *)MACHINE_TYPE);
152 
153  /* choosing not to clone virtual registers */
154   for (i = 0 ; i < virtual_register_count ; i += 1) {
155     VIRTREG(tc, i) = FIX(0);
156   }
157 
158   DSTBV(tc) = SRCBV(tc) = Sfalse;
159 
160   thread = S_thread(tc);
161 
162   S_threads = S_cons_in(tc, space_new, 0, thread, S_threads);
163   S_nthreads += 1;
164   SETSYMVAL(S_G.active_threads_id,
165    FIX(UNFIX(SYMVAL(S_G.active_threads_id)) + 1));
166   ACTIVE(tc) = 1;
167 
168  /* collect request is only thing that can be pending for new thread.
169     must do this after we're on the thread list in case the cons
170     adding us onto the thread list set collect-request-pending */
171   SOMETHINGPENDING(tc) = SYMVAL(S_G.collect_request_pending_id);
172 
173   GUARDIANENTRIES(tc) = Snil;
174 
175   LZ4OUTBUFFER(tc) = 0;
176 
177   CP(tc) = 0;
178 
179   tc_mutex_release();
180 
181   return thread;
182 }
183 
184 #ifdef PTHREADS
Sactivate_thread()185 IBOOL Sactivate_thread() { /* create or reactivate current thread */
186   ptr tc = get_thread_context();
187 
188   if (tc == (ptr)0) { /* thread created by someone else */
189     ptr thread;
190 
191    /* borrow base thread to clone */
192     thread = S_create_thread_object("Sactivate_thread", TO_PTR(S_G.thread_context));
193     s_thread_setspecific(S_tc_key, TO_VOIDP(THREADTC(thread)));
194     return 1;
195   } else {
196     reactivate_thread(tc)
197     return 0;
198   }
199 }
200 
S_activate_thread()201 int S_activate_thread() { /* Like Sactivate_thread(), but returns a mode to revert the effect */
202   ptr tc = get_thread_context();
203 
204   if (tc == (ptr)0) {
205     Sactivate_thread();
206     return unactivate_mode_destroy;
207   } else if (!ACTIVE(tc)) {
208     reactivate_thread(tc);
209     return unactivate_mode_deactivate;
210   } else
211     return unactivate_mode_noop;
212 }
213 
S_unactivate_thread(int mode)214 void S_unactivate_thread(int mode) { /* Reverts a previous S_activate_thread() effect */
215   switch (mode) {
216   case unactivate_mode_deactivate:
217     Sdeactivate_thread();
218     break;
219   case unactivate_mode_destroy:
220     Sdestroy_thread();
221     break;
222   case unactivate_mode_noop:
223   default:
224     break;
225   }
226 }
227 
Sdeactivate_thread()228 void Sdeactivate_thread() { /* deactivate current thread */
229   ptr tc = get_thread_context();
230   if (tc != (ptr)0) deactivate_thread(tc)
231 }
232 
Sdestroy_thread()233 int Sdestroy_thread() { /* destroy current thread */
234   ptr tc = get_thread_context();
235   if (tc != (ptr)0 && destroy_thread(tc)) {
236     s_thread_setspecific(S_tc_key, 0);
237     return 1;
238   }
239   return 0;
240 }
241 
destroy_thread(tc)242 static IBOOL destroy_thread(tc) ptr tc; {
243   ptr *ls; IBOOL status;
244 
245   status = 0;
246   tc_mutex_acquire();
247   ls = &S_threads;
248   while (*ls != Snil) {
249     ptr thread = Scar(*ls);
250     if (THREADTC(thread) == (uptr)tc) {
251       *ls = Scdr(*ls);
252       S_nthreads -= 1;
253 
254       alloc_mutex_acquire();
255 
256      /* process remembered set before dropping allocation area */
257       S_scan_dirty((ptr *)EAP(tc), (ptr *)REAL_EAP(tc));
258 
259      /* close off thread-local allocation */
260       S_thread_start_code_write(tc, static_generation, 0, NULL, 0);
261       {
262         ISPC s; IGEN g;
263         thread_gc *tgc = THREAD_GC(tc);
264         for (g = 0; g <= static_generation; g++)
265           for (s = 0; s <= max_real_space; s++)
266             if (tgc->next_loc[g][s])
267               S_close_off_thread_local_segment(tc, s, g);
268       }
269       S_thread_end_code_write(tc, static_generation, 0, NULL, 0);
270 
271       alloc_mutex_release();
272 
273      /* process guardian entries */
274       {
275 	ptr target, ges, obj, next; seginfo *si;
276 	target = S_G.guardians[0];
277 	for (ges = GUARDIANENTRIES(tc); ges != Snil; ges = next) {
278 	  obj = GUARDIANOBJ(ges);
279 	  next = GUARDIANNEXT(ges);
280 	  if (!FIXMEDIATE(obj) && (si = MaybeSegInfo(ptr_get_segment(obj))) != NULL && si->generation != static_generation) {
281 	    INITGUARDIANNEXT(ges) = target;
282 	    target = ges;
283 	  }
284 	}
285 	S_G.guardians[0] = target;
286       }
287 
288      /* deactivate thread */
289       if (ACTIVE(tc)) {
290         SETSYMVAL(S_G.active_threads_id,
291          FIX(UNFIX(SYMVAL(S_G.active_threads_id)) - 1));
292         if (Sboolean_value(SYMVAL(S_G.collect_request_pending_id))
293             && SYMVAL(S_G.active_threads_id) == FIX(0)) {
294           s_thread_cond_signal(&S_collect_cond);
295           s_thread_cond_signal(&S_collect_thread0_cond);
296         }
297       }
298 
299       if (LZ4OUTBUFFER(tc) != (ptr)0) free(TO_VOIDP(LZ4OUTBUFFER(tc)));
300       if (SIGNALINTERRUPTQUEUE(tc) != (ptr)0) free(TO_VOIDP(SIGNALINTERRUPTQUEUE(tc)));
301 
302       if (THREAD_GC(tc)->preserve_ownership)
303         --S_num_preserve_ownership_threads;
304 
305       /* Never free a thread_gc, since it may be recorded in a segment
306          as the segment's creator. Recycle manually, instead. */
307       THREAD_GC(tc)->sweeper = main_sweeper_index;
308       THREAD_GC(tc)->tc = (ptr)0;
309       THREAD_GC(tc)->next = free_thread_gcs;
310       free_thread_gcs = THREAD_GC(tc);
311 
312       free((void *)tc);
313 
314       THREADTC(thread) = 0; /* mark it dead */
315       status = 1;
316 
317       s_thread_cond_broadcast(&S_terminated_cond);
318       break;
319     }
320     ls = &Scdr(*ls);
321   }
322   tc_mutex_release();
323   return status;
324 }
325 
S_fork_thread(thunk)326 ptr S_fork_thread(thunk) ptr thunk; {
327   ptr thread;
328   int status;
329 
330   /* pass the current thread's context as the parent thread */
331   thread = S_create_thread_object("fork-thread", get_thread_context());
332   CP(THREADTC(thread)) = thunk;
333 
334   if ((status = s_thread_create(start_thread, TO_VOIDP(THREADTC(thread)))) != 0) {
335     destroy_thread((ptr)THREADTC(thread));
336     S_error1("fork-thread", "failed: ~a", S_strerror(status));
337   }
338 
339   return thread;
340 }
341 
start_thread(p)342 static s_thread_rv_t start_thread(p) void *p; {
343   ptr tc = (ptr)p; ptr cp;
344 
345   s_thread_setspecific(S_tc_key, TO_VOIDP(tc));
346 
347   cp = CP(tc);
348   CP(tc) = Svoid; /* should hold calling code object, which we don't have */
349   TRAP(tc) = (ptr)default_timer_ticks;
350   Scall0(cp);
351  /* caution: calling into Scheme may result into a collection, so we
352     can't access any Scheme objects, e.g., cp, after this point.  But tc
353     is static, so we can access it. */
354 
355  /* find and destroy our thread */
356   destroy_thread(tc);
357   s_thread_setspecific(S_tc_key, NULL);
358 
359   s_thread_return;
360 }
361 
362 
S_make_mutex()363 scheme_mutex_t *S_make_mutex() {
364   scheme_mutex_t *m;
365 
366   m = (scheme_mutex_t *)malloc(sizeof(scheme_mutex_t));
367 
368   if (m == (scheme_mutex_t *)0)
369     S_error("make-mutex", "unable to malloc mutex");
370   s_thread_mutex_init(&m->pmutex);
371   m->owner = s_thread_self();
372   m->count = 0;
373 
374   return m;
375 }
376 
S_mutex_free(m)377 void S_mutex_free(m) scheme_mutex_t *m; {
378   s_thread_mutex_destroy(&m->pmutex);
379   free(m);
380 }
381 
S_mutex_acquire(scheme_mutex_t * m)382 void S_mutex_acquire(scheme_mutex_t *m) NO_THREAD_SANITIZE {
383   s_thread_t self = s_thread_self();
384   iptr count;
385   INT status;
386 
387   if ((count = m->count) > 0 && s_thread_equal(m->owner, self)) {
388     if (count == most_positive_fixnum)
389       S_error1("mutex-acquire", "recursion limit exceeded for ~s", TO_PTR(m));
390     m->count = count + 1;
391     return;
392   }
393 
394   if ((status = s_thread_mutex_lock(&m->pmutex)) != 0)
395     S_error1("mutex-acquire", "failed: ~a", S_strerror(status));
396   m->owner = self;
397   m->count = 1;
398 }
399 
S_mutex_tryacquire(scheme_mutex_t * m)400 INT S_mutex_tryacquire(scheme_mutex_t *m) NO_THREAD_SANITIZE {
401   s_thread_t self = s_thread_self();
402   iptr count;
403   INT status;
404 
405   if ((count = m->count) > 0 && s_thread_equal(m->owner, self)) {
406     if (count == most_positive_fixnum)
407       S_error1("mutex-acquire", "recursion limit exceeded for ~s", TO_PTR(m));
408     m->count = count + 1;
409     return 0;
410   }
411 
412   status = s_thread_mutex_trylock(&m->pmutex);
413   if (status == 0) {
414     m->owner = self;
415     m->count = 1;
416   } else if (status != EBUSY) {
417     S_error1("mutex-acquire", "failed: ~a", S_strerror(status));
418   }
419   return status;
420 }
421 
S_mutex_is_owner(scheme_mutex_t * m)422 IBOOL S_mutex_is_owner(scheme_mutex_t *m) NO_THREAD_SANITIZE {
423   s_thread_t self = s_thread_self();
424   return ((m->count > 0) && s_thread_equal(m->owner, self));
425 }
426 
S_mutex_release(scheme_mutex_t * m)427 void S_mutex_release(scheme_mutex_t *m) NO_THREAD_SANITIZE {
428   s_thread_t self = s_thread_self();
429   iptr count;
430   INT status;
431 
432   if ((count = m->count) == 0 || !s_thread_equal(m->owner, self))
433     S_error1("mutex-release", "thread does not own mutex ~s", TO_PTR(m));
434 
435   if ((m->count = count - 1) == 0) {
436     m->owner = 0; /* needed for a memory model like ARM, for example */
437     if ((status = s_thread_mutex_unlock(&m->pmutex)) != 0)
438       S_error1("mutex-release", "failed: ~a", S_strerror(status));
439   }
440 }
441 
S_make_condition()442 s_thread_cond_t *S_make_condition() {
443   s_thread_cond_t *c;
444 
445   c = (s_thread_cond_t *)malloc(sizeof(s_thread_cond_t));
446   if (c == (s_thread_cond_t *)0)
447     S_error("make-condition", "unable to malloc condition");
448   s_thread_cond_init(c);
449   return c;
450 }
451 
S_condition_free(c)452 void S_condition_free(c) s_thread_cond_t *c; {
453   s_thread_cond_destroy(c);
454   free(c);
455 }
456 
457 #ifdef FEATURE_WINDOWS
458 
s_thread_cond_timedwait(s_thread_cond_t * cond,s_thread_mutex_t * mutex,int typeno,I64 sec,long nsec)459 static inline int s_thread_cond_timedwait(s_thread_cond_t *cond, s_thread_mutex_t *mutex, int typeno, I64 sec, long nsec) {
460   if (typeno == time_utc) {
461     struct timespec now;
462     S_gettime(time_utc, &now);
463     sec -= now.tv_sec;
464     nsec -= now.tv_nsec;
465     if (nsec < 0) {
466       sec -= 1;
467       nsec += 1000000000;
468     }
469   }
470   if (sec < 0) {
471     sec = 0;
472     nsec = 0;
473   }
474   if (SleepConditionVariableCS(cond, mutex, (DWORD)(sec*1000 + (nsec+500000)/1000000))) {
475     return 0;
476   } else if (GetLastError() == ERROR_TIMEOUT) {
477     return ETIMEDOUT;
478   } else {
479     return EINVAL;
480   }
481 }
482 
483 #else /* FEATURE_WINDOWS */
484 
s_thread_cond_timedwait(s_thread_cond_t * cond,s_thread_mutex_t * mutex,int typeno,I64 sec,long nsec)485 static inline int s_thread_cond_timedwait(s_thread_cond_t *cond, s_thread_mutex_t *mutex, int typeno, I64 sec, long nsec) {
486   struct timespec t;
487   if (typeno == time_duration) {
488     struct timespec now;
489     S_gettime(time_utc, &now);
490     t.tv_sec = (time_t)(now.tv_sec + sec);
491     t.tv_nsec = now.tv_nsec + nsec;
492     if (t.tv_nsec >= 1000000000) {
493       t.tv_sec += 1;
494       t.tv_nsec -= 1000000000;
495     }
496   } else {
497     t.tv_sec = sec;
498     t.tv_nsec = nsec;
499   }
500   return pthread_cond_timedwait(cond, mutex, &t);
501 }
502 
503 #endif /* FEATURE_WINDOWS */
504 
505 #define Srecord_ref(x,i) (((ptr *)((uptr)(x)+record_data_disp))[i])
506 
S_condition_wait(c,m,t)507 IBOOL S_condition_wait(c, m, t) s_thread_cond_t *c; scheme_mutex_t *m; ptr t; {
508   ptr tc = get_thread_context();
509   s_thread_t self = s_thread_self();
510   iptr count;
511   INT typeno;
512   I64 sec;
513   long nsec;
514   INT status;
515   IBOOL is_collect;
516   iptr collect_index = 0;
517 
518   if ((count = m->count) == 0 || !s_thread_equal(m->owner, self))
519     S_error1("condition-wait", "thread does not own mutex ~s", TO_PTR(m));
520 
521   if (count != 1)
522     S_error1("condition-wait", "mutex ~s is recursively locked", TO_PTR(m));
523 
524   if (t != Sfalse) {
525     /* Keep in sync with ts record in s/date.ss */
526     typeno = Sinteger32_value(Srecord_ref(t,0));
527     sec = Sinteger64_value(Scar(Srecord_ref(t,1)));
528     nsec = Sinteger32_value(Scdr(Srecord_ref(t,1)));
529   } else {
530     typeno = 0;
531     sec = 0;
532     nsec = 0;
533   }
534 
535   is_collect = (c == &S_collect_cond || c == &S_collect_thread0_cond);
536 
537   if (is_collect) {
538     /* Remember the index where we record this tc, because a thread
539        might temporarily wait for collection, but then get woken
540        up (e.g., to make the main thread drive the collection) before
541        a collection actually happens. */
542     int i;
543     S_collect_waiting_threads++;
544     collect_index = maximum_parallel_collect_threads;
545     if (S_collect_waiting_threads <= maximum_parallel_collect_threads) {
546       /* look for an open slot in `S_collect_waiting_tcs` */
547       for (i = 0; i < maximum_parallel_collect_threads; i++) {
548         if (S_collect_waiting_tcs[i] == (ptr)0) {
549           collect_index = i;
550           S_collect_waiting_tcs[collect_index] = tc;
551           break;
552         }
553       }
554     }
555   }
556 
557   if (is_collect || DISABLECOUNT(tc) == 0) {
558     deactivate_thread_signal_collect(tc, !is_collect)
559   }
560 
561   m->count = 0;
562   status = (t == Sfalse) ? s_thread_cond_wait(c, &m->pmutex) :
563     s_thread_cond_timedwait(c, &m->pmutex, typeno, sec, nsec);
564   m->owner = self;
565   m->count = 1;
566 
567   if (is_collect || DISABLECOUNT(tc) == 0) {
568     reactivate_thread(tc)
569   }
570 
571   if (is_collect) {
572     --S_collect_waiting_threads;
573     if (collect_index < maximum_parallel_collect_threads)
574       S_collect_waiting_tcs[collect_index] = (ptr)0;
575   }
576 
577   if (status == 0) {
578     return 1;
579   } else if (status == ETIMEDOUT) {
580     return 0;
581   } else {
582     S_error1("condition-wait", "failed: ~a", S_strerror(status));
583     return 0;
584   }
585 }
586 #endif /* PTHREADS */
587