1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/atomic"
9	"runtime/internal/sys"
10	"unsafe"
11)
12
13// For gccgo, while we still have C runtime code, use go:linkname to
14// export some functions to themselves.
15//
16//go:linkname gotraceback
17//go:linkname args
18//go:linkname goargs
19//go:linkname check
20//go:linkname goenvs_unix
21//go:linkname parsedebugvars
22//go:linkname timediv
23
24// Keep a cached value to make gotraceback fast,
25// since we call it on every call to gentraceback.
26// The cached value is a uint32 in which the low bits
27// are the "crash" and "all" settings and the remaining
28// bits are the traceback value (0 off, 1 on, 2 include system).
29const (
30	tracebackCrash = 1 << iota
31	tracebackAll
32	tracebackShift = iota
33)
34
35var traceback_cache uint32 = 2 << tracebackShift
36var traceback_env uint32
37
38// gotraceback returns the current traceback settings.
39//
40// If level is 0, suppress all tracebacks.
41// If level is 1, show tracebacks, but exclude runtime frames.
42// If level is 2, show tracebacks including runtime frames.
43// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
44// If crash is set, crash (core dump, etc) after tracebacking.
45//
46//go:nosplit
47func gotraceback() (level int32, all, crash bool) {
48	_g_ := getg()
49	t := atomic.Load(&traceback_cache)
50	crash = t&tracebackCrash != 0
51	all = _g_.m.throwing > 0 || t&tracebackAll != 0
52	if _g_.m.traceback != 0 {
53		level = int32(_g_.m.traceback)
54	} else {
55		level = int32(t >> tracebackShift)
56	}
57	return
58}
59
60var (
61	argc int32
62	argv **byte
63)
64
65// nosplit for use in linux startup sysargs
66//go:nosplit
67func argv_index(argv **byte, i int32) *byte {
68	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
69}
70
71func args(c int32, v **byte) {
72	argc = c
73	argv = v
74	sysargs(c, v)
75}
76
77func goargs() {
78	if GOOS == "windows" {
79		return
80	}
81	argslice = make([]string, argc)
82	for i := int32(0); i < argc; i++ {
83		argslice[i] = gostringnocopy(argv_index(argv, i))
84	}
85}
86
87func goenvs_unix() {
88	// TODO(austin): ppc64 in dynamic linking mode doesn't
89	// guarantee env[] will immediately follow argv. Might cause
90	// problems.
91	n := int32(0)
92	for argv_index(argv, argc+1+n) != nil {
93		n++
94	}
95
96	envs = make([]string, n)
97	for i := int32(0); i < n; i++ {
98		envs[i] = gostring(argv_index(argv, argc+1+i))
99	}
100}
101
102func environ() []string {
103	return envs
104}
105
106// TODO: These should be locals in testAtomic64, but we don't 8-byte
107// align stack variables on 386.
108var test_z64, test_x64 uint64
109
110func testAtomic64() {
111	test_z64 = 42
112	test_x64 = 0
113	if atomic.Cas64(&test_z64, test_x64, 1) {
114		throw("cas64 failed")
115	}
116	if test_x64 != 0 {
117		throw("cas64 failed")
118	}
119	test_x64 = 42
120	if !atomic.Cas64(&test_z64, test_x64, 1) {
121		throw("cas64 failed")
122	}
123	if test_x64 != 42 || test_z64 != 1 {
124		throw("cas64 failed")
125	}
126	if atomic.Load64(&test_z64) != 1 {
127		throw("load64 failed")
128	}
129	atomic.Store64(&test_z64, (1<<40)+1)
130	if atomic.Load64(&test_z64) != (1<<40)+1 {
131		throw("store64 failed")
132	}
133	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
134		throw("xadd64 failed")
135	}
136	if atomic.Load64(&test_z64) != (2<<40)+2 {
137		throw("xadd64 failed")
138	}
139	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
140		throw("xchg64 failed")
141	}
142	if atomic.Load64(&test_z64) != (3<<40)+3 {
143		throw("xchg64 failed")
144	}
145}
146
147func check() {
148	var (
149		a     int8
150		b     uint8
151		c     int16
152		d     uint16
153		e     int32
154		f     uint32
155		g     int64
156		h     uint64
157		i, i1 float32
158		j, j1 float64
159		k     unsafe.Pointer
160		l     *uint16
161		m     [4]byte
162	)
163	type x1t struct {
164		x uint8
165	}
166	type y1t struct {
167		x1 x1t
168		y  uint8
169	}
170	var x1 x1t
171	var y1 y1t
172
173	if unsafe.Sizeof(a) != 1 {
174		throw("bad a")
175	}
176	if unsafe.Sizeof(b) != 1 {
177		throw("bad b")
178	}
179	if unsafe.Sizeof(c) != 2 {
180		throw("bad c")
181	}
182	if unsafe.Sizeof(d) != 2 {
183		throw("bad d")
184	}
185	if unsafe.Sizeof(e) != 4 {
186		throw("bad e")
187	}
188	if unsafe.Sizeof(f) != 4 {
189		throw("bad f")
190	}
191	if unsafe.Sizeof(g) != 8 {
192		throw("bad g")
193	}
194	if unsafe.Sizeof(h) != 8 {
195		throw("bad h")
196	}
197	if unsafe.Sizeof(i) != 4 {
198		throw("bad i")
199	}
200	if unsafe.Sizeof(j) != 8 {
201		throw("bad j")
202	}
203	if unsafe.Sizeof(k) != sys.PtrSize {
204		throw("bad k")
205	}
206	if unsafe.Sizeof(l) != sys.PtrSize {
207		throw("bad l")
208	}
209	if unsafe.Sizeof(x1) != 1 {
210		throw("bad unsafe.Sizeof x1")
211	}
212	if unsafe.Offsetof(y1.y) != 1 {
213		throw("bad offsetof y1.y")
214	}
215	if unsafe.Sizeof(y1) != 2 {
216		throw("bad unsafe.Sizeof y1")
217	}
218
219	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
220		throw("bad timediv")
221	}
222
223	var z uint32
224	z = 1
225	if !atomic.Cas(&z, 1, 2) {
226		throw("cas1")
227	}
228	if z != 2 {
229		throw("cas2")
230	}
231
232	z = 4
233	if atomic.Cas(&z, 5, 6) {
234		throw("cas3")
235	}
236	if z != 4 {
237		throw("cas4")
238	}
239
240	z = 0xffffffff
241	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
242		throw("cas5")
243	}
244	if z != 0xfffffffe {
245		throw("cas6")
246	}
247
248	m = [4]byte{1, 1, 1, 1}
249	atomic.Or8(&m[1], 0xf0)
250	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
251		throw("atomicor8")
252	}
253
254	m = [4]byte{0xff, 0xff, 0xff, 0xff}
255	atomic.And8(&m[1], 0x1)
256	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
257		throw("atomicand8")
258	}
259
260	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
261	if j == j {
262		throw("float64nan")
263	}
264	if !(j != j) {
265		throw("float64nan1")
266	}
267
268	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
269	if j == j1 {
270		throw("float64nan2")
271	}
272	if !(j != j1) {
273		throw("float64nan3")
274	}
275
276	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
277	if i == i {
278		throw("float32nan")
279	}
280	if i == i {
281		throw("float32nan1")
282	}
283
284	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
285	if i == i1 {
286		throw("float32nan2")
287	}
288	if i == i1 {
289		throw("float32nan3")
290	}
291
292	testAtomic64()
293
294	// if _FixedStack != round2(_FixedStack) {
295	// 	throw("FixedStack is not power-of-2")
296	// }
297
298	if !checkASM() {
299		throw("assembly checks failed")
300	}
301}
302
303type dbgVar struct {
304	name  string
305	value *int32
306}
307
308// Holds variables parsed from GODEBUG env var,
309// except for "memprofilerate" since there is an
310// existing int var for that value, which may
311// already have an initial value.
312var debug struct {
313	allocfreetrace     int32
314	cgocheck           int32
315	clobberfree        int32
316	efence             int32
317	gccheckmark        int32
318	gcpacertrace       int32
319	gcshrinkstackoff   int32
320	gcstoptheworld     int32
321	gctrace            int32
322	invalidptr         int32
323	madvdontneed       int32 // for Linux; issue 28466
324	sbrk               int32
325	scavenge           int32
326	scavtrace          int32
327	scheddetail        int32
328	schedtrace         int32
329	tracebackancestors int32
330	asyncpreemptoff    int32
331}
332
333var dbgvars = []dbgVar{
334	{"allocfreetrace", &debug.allocfreetrace},
335	{"clobberfree", &debug.clobberfree},
336	{"cgocheck", &debug.cgocheck},
337	{"efence", &debug.efence},
338	{"gccheckmark", &debug.gccheckmark},
339	{"gcpacertrace", &debug.gcpacertrace},
340	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
341	{"gcstoptheworld", &debug.gcstoptheworld},
342	{"gctrace", &debug.gctrace},
343	{"invalidptr", &debug.invalidptr},
344	{"madvdontneed", &debug.madvdontneed},
345	{"sbrk", &debug.sbrk},
346	{"scavenge", &debug.scavenge},
347	{"scavtrace", &debug.scavtrace},
348	{"scheddetail", &debug.scheddetail},
349	{"schedtrace", &debug.schedtrace},
350	{"tracebackancestors", &debug.tracebackancestors},
351	{"asyncpreemptoff", &debug.asyncpreemptoff},
352}
353
354func parsedebugvars() {
355	// defaults
356	debug.cgocheck = 1
357
358	// Gccgo uses conservative stack scanning, so we cannot check
359	// invalid pointers on stack. But we can still enable invalid
360	// pointer check on heap scanning. When scanning the heap, we
361	// ensure that we only trace allocated heap objects, which should
362	// not contain invalid pointers.
363	debug.invalidptr = 1
364
365	for p := gogetenv("GODEBUG"); p != ""; {
366		field := ""
367		i := index(p, ",")
368		if i < 0 {
369			field, p = p, ""
370		} else {
371			field, p = p[:i], p[i+1:]
372		}
373		i = index(field, "=")
374		if i < 0 {
375			continue
376		}
377		key, value := field[:i], field[i+1:]
378
379		// Update MemProfileRate directly here since it
380		// is int, not int32, and should only be updated
381		// if specified in GODEBUG.
382		if key == "memprofilerate" {
383			if n, ok := atoi(value); ok {
384				MemProfileRate = n
385			}
386		} else {
387			for _, v := range dbgvars {
388				if v.name == key {
389					if n, ok := atoi32(value); ok {
390						*v.value = n
391					}
392				}
393			}
394		}
395	}
396
397	setTraceback(gogetenv("GOTRACEBACK"))
398	traceback_env = traceback_cache
399}
400
401//go:linkname setTraceback runtime..z2fdebug.SetTraceback
402func setTraceback(level string) {
403	var t uint32
404	switch level {
405	case "none":
406		t = 0
407	case "single", "":
408		t = 1 << tracebackShift
409	case "all":
410		t = 1<<tracebackShift | tracebackAll
411	case "system":
412		t = 2<<tracebackShift | tracebackAll
413	case "crash":
414		t = 2<<tracebackShift | tracebackAll | tracebackCrash
415	default:
416		t = tracebackAll
417		if n, ok := atoi(level); ok && n == int(uint32(n)) {
418			t |= uint32(n) << tracebackShift
419		}
420	}
421	// when C owns the process, simply exit'ing the process on fatal errors
422	// and panics is surprising. Be louder and abort instead.
423	if islibrary || isarchive {
424		t |= tracebackCrash
425	}
426
427	t |= traceback_env
428
429	atomic.Store(&traceback_cache, t)
430}
431
432// Poor mans 64-bit division.
433// This is a very special function, do not use it if you are not sure what you are doing.
434// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
435// Handles overflow in a time-specific manner.
436// This keeps us within no-split stack limits on 32-bit processors.
437//go:nosplit
438func timediv(v int64, div int32, rem *int32) int32 {
439	res := int32(0)
440	for bit := 30; bit >= 0; bit-- {
441		if v >= int64(div)<<uint(bit) {
442			v = v - (int64(div) << uint(bit))
443			// Before this for loop, res was 0, thus all these
444			// power of 2 increments are now just bitsets.
445			res |= 1 << uint(bit)
446		}
447	}
448	if v >= int64(div) {
449		if rem != nil {
450			*rem = 0
451		}
452		return 0x7fffffff
453	}
454	if rem != nil {
455		*rem = int32(v)
456	}
457	return res
458}
459
460// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
461
462//go:nosplit
463func acquirem() *m {
464	_g_ := getg()
465	_g_.m.locks++
466	return _g_.m
467}
468
469//go:nosplit
470func releasem(mp *m) {
471	// _g_ := getg()
472	mp.locks--
473	// if mp.locks == 0 && _g_.preempt {
474	//	// restore the preemption request in case we've cleared it in newstack
475	//	_g_.stackguard0 = stackPreempt
476	// }
477}
478
479//go:nosplit
480func gomcache() *mcache {
481	return getg().m.mcache
482}
483