1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 
5 #include <signal.h>
6 #include <unistd.h>
7 
8 #include "config.h"
9 
10 #include "runtime.h"
11 #include "arch.h"
12 #include "array.h"
13 
14 enum {
15 	maxround = sizeof(uintptr),
16 };
17 
18 // Keep a cached value to make gotraceback fast,
19 // since we call it on every call to gentraceback.
20 // The cached value is a uint32 in which the low bit
21 // is the "crash" setting and the top 31 bits are the
22 // gotraceback value.
23 enum {
24 	tracebackCrash = 1 << 0,
25 	tracebackAll = 1 << 1,
26 	tracebackShift = 2,
27 };
28 static uint32 traceback_cache = 2 << tracebackShift;
29 static uint32 traceback_env;
30 
31 extern volatile intgo runtime_MemProfileRate
32   __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
33 
34 
35 // gotraceback returns the current traceback settings.
36 //
37 // If level is 0, suppress all tracebacks.
38 // If level is 1, show tracebacks, but exclude runtime frames.
39 // If level is 2, show tracebacks including runtime frames.
40 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
41 // If crash is set, crash (core dump, etc) after tracebacking.
42 int32
runtime_gotraceback(bool * crash)43 runtime_gotraceback(bool *crash)
44 {
45 	uint32 x;
46 
47 	if(crash != nil)
48 		*crash = false;
49 	if(runtime_m()->traceback != 0)
50 		return runtime_m()->traceback;
51 	x = runtime_atomicload(&traceback_cache);
52 	if(crash != nil)
53 		*crash = x&tracebackCrash;
54 	return x>>tracebackShift;
55 }
56 
57 static int32	argc;
58 static byte**	argv;
59 
60 static Slice args;
61 Slice envs;
62 
63 void (*runtime_sysargs)(int32, uint8**);
64 
65 void
runtime_args(int32 c,byte ** v)66 runtime_args(int32 c, byte **v)
67 {
68 	argc = c;
69 	argv = v;
70 	if(runtime_sysargs != nil)
71 		runtime_sysargs(c, v);
72 }
73 
74 byte*
runtime_progname()75 runtime_progname()
76 {
77   return argc == 0 ? nil : argv[0];
78 }
79 
80 void
runtime_goargs(void)81 runtime_goargs(void)
82 {
83 	String *s;
84 	int32 i;
85 
86 	// for windows implementation see "os" package
87 	if(Windows)
88 		return;
89 
90 	s = runtime_malloc(argc*sizeof s[0]);
91 	for(i=0; i<argc; i++)
92 		s[i] = runtime_gostringnocopy((const byte*)argv[i]);
93 	args.__values = (void*)s;
94 	args.__count = argc;
95 	args.__capacity = argc;
96 }
97 
98 void
runtime_goenvs_unix(void)99 runtime_goenvs_unix(void)
100 {
101 	String *s;
102 	int32 i, n;
103 
104 	for(n=0; argv[argc+1+n] != 0; n++)
105 		;
106 
107 	s = runtime_malloc(n*sizeof s[0]);
108 	for(i=0; i<n; i++)
109 		s[i] = runtime_gostringnocopy(argv[argc+1+i]);
110 	envs.__values = (void*)s;
111 	envs.__count = n;
112 	envs.__capacity = n;
113 }
114 
115 // Called from the syscall package.
116 Slice runtime_envs(void) __asm__ (GOSYM_PREFIX "syscall.runtime_envs");
117 
118 Slice
runtime_envs()119 runtime_envs()
120 {
121 	return envs;
122 }
123 
124 Slice os_runtime_args(void) __asm__ (GOSYM_PREFIX "os.runtime_args");
125 
126 Slice
os_runtime_args()127 os_runtime_args()
128 {
129 	return args;
130 }
131 
132 int32
runtime_atoi(const byte * p,intgo len)133 runtime_atoi(const byte *p, intgo len)
134 {
135 	int32 n;
136 
137 	n = 0;
138 	while(len > 0 && '0' <= *p && *p <= '9') {
139 		n = n*10 + *p++ - '0';
140 		len--;
141 	}
142 	return n;
143 }
144 
145 static struct root_list runtime_roots =
146 { nil,
147   { { &envs, sizeof envs },
148     { &args, sizeof args },
149     { nil, 0 } },
150 };
151 
152 static void
TestAtomic64(void)153 TestAtomic64(void)
154 {
155 	uint64 z64, x64;
156 
157 	z64 = 42;
158 	x64 = 0;
159 	PREFETCH(&z64);
160 	if(runtime_cas64(&z64, x64, 1))
161 		runtime_throw("cas64 failed");
162 	if(x64 != 0)
163 		runtime_throw("cas64 failed");
164 	x64 = 42;
165 	if(!runtime_cas64(&z64, x64, 1))
166 		runtime_throw("cas64 failed");
167 	if(x64 != 42 || z64 != 1)
168 		runtime_throw("cas64 failed");
169 	if(runtime_atomicload64(&z64) != 1)
170 		runtime_throw("load64 failed");
171 	runtime_atomicstore64(&z64, (1ull<<40)+1);
172 	if(runtime_atomicload64(&z64) != (1ull<<40)+1)
173 		runtime_throw("store64 failed");
174 	if(runtime_xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
175 		runtime_throw("xadd64 failed");
176 	if(runtime_atomicload64(&z64) != (2ull<<40)+2)
177 		runtime_throw("xadd64 failed");
178 	if(runtime_xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2)
179 		runtime_throw("xchg64 failed");
180 	if(runtime_atomicload64(&z64) != (3ull<<40)+3)
181 		runtime_throw("xchg64 failed");
182 }
183 
184 void
runtime_check(void)185 runtime_check(void)
186 {
187 	__go_register_gc_roots(&runtime_roots);
188 
189 	TestAtomic64();
190 }
191 
192 uint32
runtime_fastrand1(void)193 runtime_fastrand1(void)
194 {
195 	M *m;
196 	uint32 x;
197 
198 	m = runtime_m();
199 	x = m->fastrand;
200 	x += x;
201 	if(x & 0x80000000L)
202 		x ^= 0x88888eefUL;
203 	m->fastrand = x;
204 	return x;
205 }
206 
207 int64
runtime_cputicks(void)208 runtime_cputicks(void)
209 {
210 #if defined(__386__) || defined(__x86_64__)
211   uint32 low, high;
212   asm("rdtsc" : "=a" (low), "=d" (high));
213   return (int64)(((uint64)high << 32) | (uint64)low);
214 #elif defined (__s390__) || defined (__s390x__)
215   uint64 clock = 0;
216   /* stckf may not write the return variable in case of a clock error, so make
217      it read-write to prevent that the initialisation is optimised out.
218      Note: Targets below z9-109 will crash when executing store clock fast, i.e.
219      we don't support Go for machines older than that.  */
220   asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
221   return (int64)clock;
222 #else
223   // FIXME: implement for other processors.
224   return 0;
225 #endif
226 }
227 
228 bool
runtime_showframe(String s,bool current)229 runtime_showframe(String s, bool current)
230 {
231 	static int32 traceback = -1;
232 
233 	if(current && runtime_m()->throwing > 0)
234 		return 1;
235 	if(traceback < 0)
236 		traceback = runtime_gotraceback(nil);
237 	return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
238 }
239 
240 static Lock ticksLock;
241 static int64 ticks;
242 
243 int64
runtime_tickspersecond(void)244 runtime_tickspersecond(void)
245 {
246 	int64 res, t0, t1, c0, c1;
247 
248 	res = (int64)runtime_atomicload64((uint64*)&ticks);
249 	if(res != 0)
250 		return ticks;
251 	runtime_lock(&ticksLock);
252 	res = ticks;
253 	if(res == 0) {
254 		t0 = runtime_nanotime();
255 		c0 = runtime_cputicks();
256 		runtime_usleep(100*1000);
257 		t1 = runtime_nanotime();
258 		c1 = runtime_cputicks();
259 		if(t1 == t0)
260 			t1++;
261 		res = (c1-c0)*1000*1000*1000/(t1-t0);
262 		if(res == 0)
263 			res++;
264 		runtime_atomicstore64((uint64*)&ticks, res);
265 	}
266 	runtime_unlock(&ticksLock);
267 	return res;
268 }
269 
270 // Called to initialize a new m (including the bootstrap m).
271 // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
272 void
runtime_mpreinit(M * mp)273 runtime_mpreinit(M *mp)
274 {
275 	mp->gsignal = runtime_malg(32*1024, &mp->gsignalstack, &mp->gsignalstacksize);	// OS X wants >=8K, Linux >=2K
276 }
277 
278 // Called to initialize a new m (including the bootstrap m).
279 // Called on the new thread, can not allocate memory.
280 void
runtime_minit(void)281 runtime_minit(void)
282 {
283 	M* m;
284 	sigset_t sigs;
285 
286 	// Initialize signal handling.
287 	m = runtime_m();
288 	runtime_signalstack(m->gsignalstack, m->gsignalstacksize);
289 	if (sigemptyset(&sigs) != 0)
290 		runtime_throw("sigemptyset");
291 	pthread_sigmask(SIG_SETMASK, &sigs, nil);
292 }
293 
294 // Called from dropm to undo the effect of an minit.
295 void
runtime_unminit(void)296 runtime_unminit(void)
297 {
298 	runtime_signalstack(nil, 0);
299 }
300 
301 
302 void
runtime_signalstack(byte * p,int32 n)303 runtime_signalstack(byte *p, int32 n)
304 {
305 	stack_t st;
306 
307 	st.ss_sp = p;
308 	st.ss_size = n;
309 	st.ss_flags = 0;
310 	if(p == nil)
311 		st.ss_flags = SS_DISABLE;
312 	if(sigaltstack(&st, nil) < 0)
313 		*(int *)0xf1 = 0xf1;
314 }
315 
316 void setTraceback(String level)
317   __asm__ (GOSYM_PREFIX "runtime_debug.SetTraceback");
318 
setTraceback(String level)319 void setTraceback(String level) {
320 	uint32 t;
321 
322 	if (level.len == 4 && __builtin_memcmp(level.str, "none", 4) == 0) {
323 		t = 0;
324 	} else if (level.len == 0 || (level.len == 6 && __builtin_memcmp(level.str, "single", 6) == 0)) {
325 		t = 1 << tracebackShift;
326 	} else if (level.len == 3 && __builtin_memcmp(level.str, "all", 3) == 0) {
327 		t = (1<<tracebackShift) | tracebackAll;
328 	} else if (level.len == 6 && __builtin_memcmp(level.str, "system", 6) == 0) {
329 		t = (2<<tracebackShift) | tracebackAll;
330 	} else if (level.len == 5 && __builtin_memcmp(level.str, "crash", 5) == 0) {
331 		t = (2<<tracebackShift) | tracebackAll | tracebackCrash;
332 	} else {
333 		t = (runtime_atoi(level.str, level.len)<<tracebackShift) | tracebackAll;
334 	}
335 
336 	t |= traceback_env;
337 
338 	runtime_atomicstore(&traceback_cache, t);
339 }
340 
341 DebugVars	runtime_debug;
342 
343 // Holds variables parsed from GODEBUG env var,
344 // except for "memprofilerate" since there is an
345 // existing var for that value which is int
346 // instead of in32 and might have an
347 // initial value.
348 static struct {
349 	const char* name;
350 	int32*	value;
351 } dbgvar[] = {
352 	{"allocfreetrace", &runtime_debug.allocfreetrace},
353 	{"cgocheck", &runtime_debug.cgocheck},
354 	{"efence", &runtime_debug.efence},
355 	{"gccheckmark", &runtime_debug.gccheckmark},
356 	{"gcpacertrace", &runtime_debug.gcpacertrace},
357 	{"gcshrinkstackoff", &runtime_debug.gcshrinkstackoff},
358 	{"gcstackbarrieroff", &runtime_debug.gcstackbarrieroff},
359 	{"gcstackbarrierall", &runtime_debug.gcstackbarrierall},
360 	{"gcstoptheworld", &runtime_debug.gcstoptheworld},
361 	{"gctrace", &runtime_debug.gctrace},
362 	{"gcdead", &runtime_debug.gcdead},
363 	{"invalidptr", &runtime_debug.invalidptr},
364 	{"sbrk", &runtime_debug.sbrk},
365 	{"scavenge", &runtime_debug.scavenge},
366 	{"scheddetail", &runtime_debug.scheddetail},
367 	{"schedtrace", &runtime_debug.schedtrace},
368 	{"wbshadow", &runtime_debug.wbshadow},
369 };
370 
371 void
runtime_parsedebugvars(void)372 runtime_parsedebugvars(void)
373 {
374 	String s;
375 	const byte *p, *pn;
376 	intgo len;
377 	intgo i, n;
378 
379 	s = runtime_getenv("GODEBUG");
380 	if(s.len == 0)
381 		return;
382 	p = s.str;
383 	len = s.len;
384 	for(;;) {
385 		for(i=0; i<(intgo)nelem(dbgvar); i++) {
386 			n = runtime_findnull((const byte*)dbgvar[i].name);
387 			if(len > n && runtime_mcmp(p, "memprofilerate", n) == 0 && p[n] == '=')
388 				// Set the MemProfileRate directly since it
389 				// is an int, not int32, and should only lbe
390 				// set here if specified by GODEBUG
391 				runtime_MemProfileRate = runtime_atoi(p+n+1, len-(n+1));
392 			else if(len > n && runtime_mcmp(p, dbgvar[i].name, n) == 0 && p[n] == '=')
393 				*dbgvar[i].value = runtime_atoi(p+n+1, len-(n+1));
394 		}
395 		pn = (const byte *)runtime_strstr((const char *)p, ",");
396 		if(pn == nil || pn - p >= len)
397 			break;
398 		len -= (pn - p) - 1;
399 		p = pn + 1;
400 	}
401 
402 	setTraceback(runtime_getenv("GOTRACEBACK"));
403 	traceback_env = traceback_cache;
404 }
405 
406 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
407 // the "environment" traceback level, so later calls to
408 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
409 void SetTracebackEnv(String level)
410   __asm__ (GOSYM_PREFIX "runtime.SetTracebackEnv");
411 
SetTracebackEnv(String level)412 void SetTracebackEnv(String level) {
413 	setTraceback(level);
414 	traceback_env = traceback_cache;
415 }
416 
417 // Poor mans 64-bit division.
418 // This is a very special function, do not use it if you are not sure what you are doing.
419 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
420 // Handles overflow in a time-specific manner.
421 int32
runtime_timediv(int64 v,int32 div,int32 * rem)422 runtime_timediv(int64 v, int32 div, int32 *rem)
423 {
424 	int32 res, bit;
425 
426 	if(v >= (int64)div*0x7fffffffLL) {
427 		if(rem != nil)
428 			*rem = 0;
429 		return 0x7fffffff;
430 	}
431 	res = 0;
432 	for(bit = 30; bit >= 0; bit--) {
433 		if(v >= ((int64)div<<bit)) {
434 			v = v - ((int64)div<<bit);
435 			res += 1<<bit;
436 		}
437 	}
438 	if(rem != nil)
439 		*rem = v;
440 	return res;
441 }
442 
443 // Setting the max stack size doesn't really do anything for gccgo.
444 
445 uintptr runtime_maxstacksize = 1<<20; // enough until runtime.main sets it for real
446 
447 void memclrBytes(Slice)
448      __asm__ (GOSYM_PREFIX "runtime.memclrBytes");
449 
450 void
memclrBytes(Slice s)451 memclrBytes(Slice s)
452 {
453 	runtime_memclr(s.__values, s.__count);
454 }
455