1 /* thread.c
2  * Copyright 1984-2017 Cisco Systems, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "system.h"
18 
19 /* locally defined functions */
20 #ifdef PTHREADS
21 static s_thread_rv_t start_thread PROTO((void *tc));
22 static IBOOL destroy_thread PROTO((ptr tc));
23 #endif
24 
S_thread_init()25 void S_thread_init() {
26   if (S_boot_time) {
27     S_protect(&S_G.threadno);
28     S_G.threadno = FIX(0);
29 
30 #ifdef PTHREADS
31    /* this is also reset in scheme.c after heap restoration */
32     s_thread_mutex_init(&S_tc_mutex.pmutex);
33     S_tc_mutex.owner = s_thread_self();
34     S_tc_mutex.count = 0;
35     s_thread_cond_init(&S_collect_cond);
36     S_tc_mutex_depth = 0;
37 #endif /* PTHREADS */
38   }
39 }
40 
41 /* this needs to be reworked.  currently, S_create_thread_object is
42    called from main to create the base thread, from fork_thread when
43    there is already an active current thread, and from S_activate_thread
44    when there is no current thread.  we have to avoid thread-local
45    allocation in at least the latter case, so we call vector_in and
46    cons_in and arrange for S_thread to use find_room rather than
47    thread_find_room.  scheme.c does part of the initialization of the
48    base thread (e.g., parameters, current input/output ports) in one
49    or more places. */
S_create_thread_object(who,p_tc)50 ptr S_create_thread_object(who, p_tc) const char *who; ptr p_tc; {
51   ptr thread, tc;
52   INT i;
53 
54   tc_mutex_acquire()
55 
56   if (S_threads == Snil) {
57     tc = (ptr)S_G.thread_context;
58   } else { /* clone parent */
59     ptr p_v = PARAMETERS(p_tc);
60     iptr i, n = Svector_length(p_v);
61    /* use S_vector_in to avoid thread-local allocation */
62     ptr v = S_vector_in(space_new, 0, n);
63 
64     tc = (ptr)malloc(size_tc);
65     if (tc == (ptr)0)
66       S_error(who, "unable to malloc thread data structure");
67     memcpy((void *)tc, (void *)p_tc, size_tc);
68 
69     for (i = 0; i < n; i += 1)
70       INITVECTIT(v, i) = Svector_ref(p_v, i);
71 
72     PARAMETERS(tc) = v;
73     CODERANGESTOFLUSH(tc) = Snil;
74   }
75 
76  /* override nonclonable tc fields */
77   THREADNO(tc) = S_G.threadno;
78   S_G.threadno = S_add(S_G.threadno, FIX(1));
79 
80   CCHAIN(tc) = Snil;
81 
82   WINDERS(tc) = Snil;
83   STACKLINK(tc) = SYMVAL(S_G.null_continuation_id);
84   STACKCACHE(tc) = Snil;
85 
86  /* S_reset_scheme_stack initializes stack, size, esp, and sfp */
87   S_reset_scheme_stack(tc, stack_slop);
88   FRAME(tc,0) = (ptr)&CODEIT(S_G.dummy_code_object,size_rp_header);
89 
90  /* S_reset_allocation_pointer initializes ap and eap */
91   S_reset_allocation_pointer(tc);
92   RANDOMSEED(tc) = most_positive_fixnum < 0xffffffff ? most_positive_fixnum : 0xffffffff;
93   X(tc) = Y(tc) = U(tc) = V(tc) = W(tc) = FIX(0);
94 
95   TIMERTICKS(tc) = Sfalse;
96   DISABLECOUNT(tc) = Sfixnum(0);
97   SIGNALINTERRUPTPENDING(tc) = Sfalse;
98   SIGNALINTERRUPTQUEUE(tc) = S_allocate_scheme_signal_queue();
99   KEYBOARDINTERRUPTPENDING(tc) = Sfalse;
100 
101   TARGETMACHINE(tc) = S_intern((const unsigned char *)MACHINE_TYPE);
102 
103  /* choosing not to clone virtual registers */
104   for (i = 0 ; i < virtual_register_count ; i += 1) {
105     VIRTREG(tc, i) = FIX(0);
106   }
107 
108   DSTBV(tc) = SRCBV(tc) = Sfalse;
109 
110  /* S_thread had better not do thread-local allocation */
111   thread = S_thread(tc);
112 
113  /* use S_cons_in to avoid thread-local allocation */
114   S_threads = S_cons_in(space_new, 0, thread, S_threads);
115   S_nthreads += 1;
116   SETSYMVAL(S_G.active_threads_id,
117    FIX(UNFIX(SYMVAL(S_G.active_threads_id)) + 1));
118   ACTIVE(tc) = 1;
119 
120  /* collect request is only thing that can be pending for new thread.
121     must do this after we're on the thread list in case the cons
122     adding us onto the thread list set collect-request-pending */
123   SOMETHINGPENDING(tc) = SYMVAL(S_G.collect_request_pending_id);
124 
125   GUARDIANENTRIES(tc) = Snil;
126 
127   LZ4OUTBUFFER(tc) = NULL;
128 
129   tc_mutex_release()
130 
131   return thread;
132 }
133 
134 #ifdef PTHREADS
Sactivate_thread()135 IBOOL Sactivate_thread() { /* create or reactivate current thread */
136   ptr tc = get_thread_context();
137 
138   if (tc == (ptr)0) { /* thread created by someone else */
139     ptr thread;
140 
141    /* borrow base thread for now */
142     thread = S_create_thread_object("Sactivate_thread", S_G.thread_context);
143     s_thread_setspecific(S_tc_key, (ptr)THREADTC(thread));
144     return 1;
145   } else {
146     reactivate_thread(tc)
147     return 0;
148   }
149 }
150 
S_activate_thread()151 int S_activate_thread() { /* Like Sactivate_thread(), but returns a mode to revert the effect */
152   ptr tc = get_thread_context();
153 
154   if (tc == (ptr)0) {
155     Sactivate_thread();
156     return unactivate_mode_destroy;
157   } else if (!ACTIVE(tc)) {
158     reactivate_thread(tc);
159     return unactivate_mode_deactivate;
160   } else
161     return unactivate_mode_noop;
162 }
163 
S_unactivate_thread(int mode)164 void S_unactivate_thread(int mode) { /* Reverts a previous S_activate_thread() effect */
165   switch (mode) {
166   case unactivate_mode_deactivate:
167     Sdeactivate_thread();
168     break;
169   case unactivate_mode_destroy:
170     Sdestroy_thread();
171     break;
172   case unactivate_mode_noop:
173   default:
174     break;
175   }
176 }
177 
Sdeactivate_thread()178 void Sdeactivate_thread() { /* deactivate current thread */
179   ptr tc = get_thread_context();
180   if (tc != (ptr)0) deactivate_thread(tc)
181 }
182 
Sdestroy_thread()183 int Sdestroy_thread() { /* destroy current thread */
184   ptr tc = get_thread_context();
185   if (tc != (ptr)0 && destroy_thread(tc)) {
186     s_thread_setspecific(S_tc_key, 0);
187     return 1;
188   }
189   return 0;
190 }
191 
destroy_thread(tc)192 static IBOOL destroy_thread(tc) ptr tc; {
193   ptr *ls; IBOOL status;
194 
195   status = 0;
196   tc_mutex_acquire()
197   ls = &S_threads;
198   while (*ls != Snil) {
199     ptr thread = Scar(*ls);
200     if (THREADTC(thread) == (uptr)tc) {
201       *ls = Scdr(*ls);
202       S_nthreads -= 1;
203 
204      /* process remembered set before dropping allocation area */
205       S_scan_dirty((ptr **)EAP(tc), (ptr **)REAL_EAP(tc));
206 
207      /* process guardian entries */
208       {
209 	ptr target, ges, obj, next; seginfo *si;
210 	target = S_G.guardians[0];
211 	for (ges = GUARDIANENTRIES(tc); ges != Snil; ges = next) {
212 	  obj = GUARDIANOBJ(ges);
213 	  next = GUARDIANNEXT(ges);
214 	  if (!IMMEDIATE(obj) && (si = MaybeSegInfo(ptr_get_segment(obj))) != NULL && si->generation != static_generation) {
215 	    INITGUARDIANNEXT(ges) = target;
216 	    target = ges;
217 	  }
218 	}
219 	S_G.guardians[0] = target;
220       }
221 
222      /* deactivate thread */
223       if (ACTIVE(tc)) {
224         SETSYMVAL(S_G.active_threads_id,
225          FIX(UNFIX(SYMVAL(S_G.active_threads_id)) - 1));
226         if (Sboolean_value(SYMVAL(S_G.collect_request_pending_id))
227             && SYMVAL(S_G.active_threads_id) == FIX(0)) {
228           s_thread_cond_signal(&S_collect_cond);
229         }
230       }
231 
232       if (LZ4OUTBUFFER(tc) != NULL) free(LZ4OUTBUFFER(tc));
233       if (SIGNALINTERRUPTQUEUE(tc) != NULL) free(SIGNALINTERRUPTQUEUE(tc));
234 
235       free((void *)tc);
236       THREADTC(thread) = 0; /* mark it dead */
237       status = 1;
238       break;
239     }
240     ls = &Scdr(*ls);
241   }
242   tc_mutex_release()
243   return status;
244 }
245 
S_fork_thread(thunk)246 ptr S_fork_thread(thunk) ptr thunk; {
247   ptr thread;
248   int status;
249 
250   /* pass the current thread's context as the parent thread */
251   thread = S_create_thread_object("fork-thread", get_thread_context());
252   CP(THREADTC(thread)) = thunk;
253 
254   if ((status = s_thread_create(start_thread, (void *)THREADTC(thread))) != 0) {
255     destroy_thread((ptr)THREADTC(thread));
256     S_error1("fork-thread", "failed: ~a", S_strerror(status));
257   }
258 
259   return thread;
260 }
261 
start_thread(p)262 static s_thread_rv_t start_thread(p) void *p; {
263   ptr tc = (ptr)p; ptr cp;
264 
265   s_thread_setspecific(S_tc_key, tc);
266 
267   cp = CP(tc);
268   CP(tc) = Svoid; /* should hold calling code object, which we don't have */
269   TRAP(tc) = (ptr)default_timer_ticks;
270   Scall0(cp);
271  /* caution: calling into Scheme may result into a collection, so we
272     can't access any Scheme objects, e.g., cp, after this point.  But tc
273     is static, so we can access it. */
274 
275  /* find and destroy our thread */
276   destroy_thread(tc);
277   s_thread_setspecific(S_tc_key, (ptr)0);
278 
279   s_thread_return;
280 }
281 
282 
S_make_mutex()283 scheme_mutex_t *S_make_mutex() {
284   scheme_mutex_t *m;
285 
286   m = (scheme_mutex_t *)malloc(sizeof(scheme_mutex_t));
287 
288   if (m == (scheme_mutex_t *)0)
289     S_error("make-mutex", "unable to malloc mutex");
290   s_thread_mutex_init(&m->pmutex);
291   m->owner = s_thread_self();
292   m->count = 0;
293 
294   return m;
295 }
296 
S_mutex_free(m)297 void S_mutex_free(m) scheme_mutex_t *m; {
298   s_thread_mutex_destroy(&m->pmutex);
299   free(m);
300 }
301 
S_mutex_acquire(m)302 void S_mutex_acquire(m) scheme_mutex_t *m; {
303   s_thread_t self = s_thread_self();
304   iptr count;
305   INT status;
306 
307   if ((count = m->count) > 0 && s_thread_equal(m->owner, self)) {
308     if (count == most_positive_fixnum)
309       S_error1("mutex-acquire", "recursion limit exceeded for ~s", m);
310     m->count = count + 1;
311     return;
312   }
313 
314   if ((status = s_thread_mutex_lock(&m->pmutex)) != 0)
315     S_error1("mutex-acquire", "failed: ~a", S_strerror(status));
316   m->owner = self;
317   m->count = 1;
318 }
319 
S_mutex_tryacquire(m)320 INT S_mutex_tryacquire(m) scheme_mutex_t *m; {
321   s_thread_t self = s_thread_self();
322   iptr count;
323   INT status;
324 
325   if ((count = m->count) > 0 && s_thread_equal(m->owner, self)) {
326     if (count == most_positive_fixnum)
327       S_error1("mutex-acquire", "recursion limit exceeded for ~s", m);
328     m->count = count + 1;
329     return 0;
330   }
331 
332   status = s_thread_mutex_trylock(&m->pmutex);
333   if (status == 0) {
334     m->owner = self;
335     m->count = 1;
336   } else if (status != EBUSY) {
337     S_error1("mutex-acquire", "failed: ~a", S_strerror(status));
338   }
339   return status;
340 }
341 
S_mutex_release(m)342 void S_mutex_release(m) scheme_mutex_t *m; {
343   s_thread_t self = s_thread_self();
344   iptr count;
345   INT status;
346 
347   if ((count = m->count) == 0 || !s_thread_equal(m->owner, self))
348     S_error1("mutex-release", "thread does not own mutex ~s", m);
349 
350   if ((m->count = count - 1) == 0)
351     if ((status = s_thread_mutex_unlock(&m->pmutex)) != 0)
352       S_error1("mutex-release", "failed: ~a", S_strerror(status));
353 }
354 
S_make_condition()355 s_thread_cond_t *S_make_condition() {
356   s_thread_cond_t *c;
357 
358   c = (s_thread_cond_t *)malloc(sizeof(s_thread_cond_t));
359   if (c == (s_thread_cond_t *)0)
360     S_error("make-condition", "unable to malloc condition");
361   s_thread_cond_init(c);
362   return c;
363 }
364 
S_condition_free(c)365 void S_condition_free(c) s_thread_cond_t *c; {
366   s_thread_cond_destroy(c);
367   free(c);
368 }
369 
370 #ifdef FEATURE_WINDOWS
371 
s_thread_cond_timedwait(s_thread_cond_t * cond,s_thread_mutex_t * mutex,int typeno,I64 sec,long nsec)372 static inline int s_thread_cond_timedwait(s_thread_cond_t *cond, s_thread_mutex_t *mutex, int typeno, I64 sec, long nsec) {
373   if (typeno == time_utc) {
374     struct timespec now;
375     S_gettime(time_utc, &now);
376     sec -= now.tv_sec;
377     nsec -= now.tv_nsec;
378     if (nsec < 0) {
379       sec -= 1;
380       nsec += 1000000000;
381     }
382   }
383   if (sec < 0) {
384     sec = 0;
385     nsec = 0;
386   }
387   if (SleepConditionVariableCS(cond, mutex, (DWORD)(sec*1000 + (nsec+500000)/1000000))) {
388     return 0;
389   } else if (GetLastError() == ERROR_TIMEOUT) {
390     return ETIMEDOUT;
391   } else {
392     return EINVAL;
393   }
394 }
395 
396 #else /* FEATURE_WINDOWS */
397 
s_thread_cond_timedwait(s_thread_cond_t * cond,s_thread_mutex_t * mutex,int typeno,I64 sec,long nsec)398 static inline int s_thread_cond_timedwait(s_thread_cond_t *cond, s_thread_mutex_t *mutex, int typeno, I64 sec, long nsec) {
399   struct timespec t;
400   if (typeno == time_duration) {
401     struct timespec now;
402     S_gettime(time_utc, &now);
403     t.tv_sec = (time_t)(now.tv_sec + sec);
404     t.tv_nsec = now.tv_nsec + nsec;
405     if (t.tv_nsec >= 1000000000) {
406       t.tv_sec += 1;
407       t.tv_nsec -= 1000000000;
408     }
409   } else {
410     t.tv_sec = sec;
411     t.tv_nsec = nsec;
412   }
413   return pthread_cond_timedwait(cond, mutex, &t);
414 }
415 
416 #endif /* FEATURE_WINDOWS */
417 
418 #define Srecord_ref(x,i) (((ptr *)((uptr)(x)+record_data_disp))[i])
419 
S_condition_wait(c,m,t)420 IBOOL S_condition_wait(c, m, t) s_thread_cond_t *c; scheme_mutex_t *m; ptr t; {
421   ptr tc = get_thread_context();
422   s_thread_t self = s_thread_self();
423   iptr count;
424   INT typeno;
425   I64 sec;
426   long nsec;
427   INT status;
428 
429   if ((count = m->count) == 0 || !s_thread_equal(m->owner, self))
430     S_error1("condition-wait", "thread does not own mutex ~s", m);
431 
432   if (count != 1)
433     S_error1("condition-wait", "mutex ~s is recursively locked", m);
434 
435   if (t != Sfalse) {
436     /* Keep in sync with ts record in s/date.ss */
437     typeno = Sinteger32_value(Srecord_ref(t,0));
438     sec = Sinteger64_value(Scar(Srecord_ref(t,1)));
439     nsec = Sinteger32_value(Scdr(Srecord_ref(t,1)));
440   } else {
441     typeno = 0;
442     sec = 0;
443     nsec = 0;
444   }
445 
446   if (c == &S_collect_cond || DISABLECOUNT(tc) == 0) {
447     deactivate_thread(tc)
448   }
449 
450   m->count = 0;
451   status = (t == Sfalse) ? s_thread_cond_wait(c, &m->pmutex) :
452     s_thread_cond_timedwait(c, &m->pmutex, typeno, sec, nsec);
453   m->owner = self;
454   m->count = 1;
455 
456   if (c == &S_collect_cond || DISABLECOUNT(tc) == 0) {
457     reactivate_thread(tc)
458   }
459 
460   if (status == 0) {
461     return 1;
462   } else if (status == ETIMEDOUT) {
463     return 0;
464   } else {
465     S_error1("condition-wait", "failed: ~a", S_strerror(status));
466     return 0;
467   }
468 }
469 #endif /* PTHREADS */
470 
471