1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/bytealg"
9	"runtime/internal/atomic"
10	"runtime/internal/sys"
11	"unsafe"
12)
13
14// For gccgo, while we still have C runtime code, use go:linkname to
15// export some functions to themselves.
16//
17//go:linkname gotraceback
18//go:linkname args
19//go:linkname goargs
20//go:linkname check
21//go:linkname goenvs_unix
22//go:linkname parsedebugvars
23//go:linkname timediv
24
25// Keep a cached value to make gotraceback fast,
26// since we call it on every call to gentraceback.
27// The cached value is a uint32 in which the low bits
28// are the "crash" and "all" settings and the remaining
29// bits are the traceback value (0 off, 1 on, 2 include system).
30const (
31	tracebackCrash = 1 << iota
32	tracebackAll
33	tracebackShift = iota
34)
35
36var traceback_cache uint32 = 2 << tracebackShift
37var traceback_env uint32
38
39// gotraceback returns the current traceback settings.
40//
41// If level is 0, suppress all tracebacks.
42// If level is 1, show tracebacks, but exclude runtime frames.
43// If level is 2, show tracebacks including runtime frames.
44// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
45// If crash is set, crash (core dump, etc) after tracebacking.
46//
47//go:nosplit
48func gotraceback() (level int32, all, crash bool) {
49	_g_ := getg()
50	t := atomic.Load(&traceback_cache)
51	crash = t&tracebackCrash != 0
52	all = _g_.m.throwing > 0 || t&tracebackAll != 0
53	if _g_.m.traceback != 0 {
54		level = int32(_g_.m.traceback)
55	} else {
56		level = int32(t >> tracebackShift)
57	}
58	return
59}
60
61var (
62	argc int32
63	argv **byte
64)
65
66// nosplit for use in linux startup sysargs
67//go:nosplit
68func argv_index(argv **byte, i int32) *byte {
69	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
70}
71
72func args(c int32, v **byte) {
73	argc = c
74	argv = v
75	sysargs(c, v)
76}
77
78func goargs() {
79	if GOOS == "windows" {
80		return
81	}
82	argslice = make([]string, argc)
83	for i := int32(0); i < argc; i++ {
84		argslice[i] = gostringnocopy(argv_index(argv, i))
85	}
86}
87
88func goenvs_unix() {
89	// TODO(austin): ppc64 in dynamic linking mode doesn't
90	// guarantee env[] will immediately follow argv. Might cause
91	// problems.
92	n := int32(0)
93	for argv_index(argv, argc+1+n) != nil {
94		n++
95	}
96
97	envs = make([]string, n)
98	for i := int32(0); i < n; i++ {
99		envs[i] = gostring(argv_index(argv, argc+1+i))
100	}
101}
102
103func environ() []string {
104	return envs
105}
106
107// TODO: These should be locals in testAtomic64, but we don't 8-byte
108// align stack variables on 386.
109var test_z64, test_x64 uint64
110
111func testAtomic64() {
112	test_z64 = 42
113	test_x64 = 0
114	if atomic.Cas64(&test_z64, test_x64, 1) {
115		throw("cas64 failed")
116	}
117	if test_x64 != 0 {
118		throw("cas64 failed")
119	}
120	test_x64 = 42
121	if !atomic.Cas64(&test_z64, test_x64, 1) {
122		throw("cas64 failed")
123	}
124	if test_x64 != 42 || test_z64 != 1 {
125		throw("cas64 failed")
126	}
127	if atomic.Load64(&test_z64) != 1 {
128		throw("load64 failed")
129	}
130	atomic.Store64(&test_z64, (1<<40)+1)
131	if atomic.Load64(&test_z64) != (1<<40)+1 {
132		throw("store64 failed")
133	}
134	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
135		throw("xadd64 failed")
136	}
137	if atomic.Load64(&test_z64) != (2<<40)+2 {
138		throw("xadd64 failed")
139	}
140	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
141		throw("xchg64 failed")
142	}
143	if atomic.Load64(&test_z64) != (3<<40)+3 {
144		throw("xchg64 failed")
145	}
146}
147
148func check() {
149	var (
150		a     int8
151		b     uint8
152		c     int16
153		d     uint16
154		e     int32
155		f     uint32
156		g     int64
157		h     uint64
158		i, i1 float32
159		j, j1 float64
160		k     unsafe.Pointer
161		l     *uint16
162		m     [4]byte
163	)
164	type x1t struct {
165		x uint8
166	}
167	type y1t struct {
168		x1 x1t
169		y  uint8
170	}
171	var x1 x1t
172	var y1 y1t
173
174	if unsafe.Sizeof(a) != 1 {
175		throw("bad a")
176	}
177	if unsafe.Sizeof(b) != 1 {
178		throw("bad b")
179	}
180	if unsafe.Sizeof(c) != 2 {
181		throw("bad c")
182	}
183	if unsafe.Sizeof(d) != 2 {
184		throw("bad d")
185	}
186	if unsafe.Sizeof(e) != 4 {
187		throw("bad e")
188	}
189	if unsafe.Sizeof(f) != 4 {
190		throw("bad f")
191	}
192	if unsafe.Sizeof(g) != 8 {
193		throw("bad g")
194	}
195	if unsafe.Sizeof(h) != 8 {
196		throw("bad h")
197	}
198	if unsafe.Sizeof(i) != 4 {
199		throw("bad i")
200	}
201	if unsafe.Sizeof(j) != 8 {
202		throw("bad j")
203	}
204	if unsafe.Sizeof(k) != sys.PtrSize {
205		throw("bad k")
206	}
207	if unsafe.Sizeof(l) != sys.PtrSize {
208		throw("bad l")
209	}
210	if unsafe.Sizeof(x1) != 1 {
211		throw("bad unsafe.Sizeof x1")
212	}
213	if unsafe.Offsetof(y1.y) != 1 {
214		throw("bad offsetof y1.y")
215	}
216	if unsafe.Sizeof(y1) != 2 {
217		throw("bad unsafe.Sizeof y1")
218	}
219
220	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
221		throw("bad timediv")
222	}
223
224	var z uint32
225	z = 1
226	if !atomic.Cas(&z, 1, 2) {
227		throw("cas1")
228	}
229	if z != 2 {
230		throw("cas2")
231	}
232
233	z = 4
234	if atomic.Cas(&z, 5, 6) {
235		throw("cas3")
236	}
237	if z != 4 {
238		throw("cas4")
239	}
240
241	z = 0xffffffff
242	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
243		throw("cas5")
244	}
245	if z != 0xfffffffe {
246		throw("cas6")
247	}
248
249	m = [4]byte{1, 1, 1, 1}
250	atomic.Or8(&m[1], 0xf0)
251	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
252		throw("atomicor8")
253	}
254
255	m = [4]byte{0xff, 0xff, 0xff, 0xff}
256	atomic.And8(&m[1], 0x1)
257	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
258		throw("atomicand8")
259	}
260
261	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
262	if j == j {
263		throw("float64nan")
264	}
265	if !(j != j) {
266		throw("float64nan1")
267	}
268
269	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
270	if j == j1 {
271		throw("float64nan2")
272	}
273	if !(j != j1) {
274		throw("float64nan3")
275	}
276
277	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
278	if i == i {
279		throw("float32nan")
280	}
281	if i == i {
282		throw("float32nan1")
283	}
284
285	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
286	if i == i1 {
287		throw("float32nan2")
288	}
289	if i == i1 {
290		throw("float32nan3")
291	}
292
293	testAtomic64()
294
295	// if _FixedStack != round2(_FixedStack) {
296	// 	throw("FixedStack is not power-of-2")
297	// }
298
299	if !checkASM() {
300		throw("assembly checks failed")
301	}
302}
303
304type dbgVar struct {
305	name  string
306	value *int32
307}
308
309// Holds variables parsed from GODEBUG env var,
310// except for "memprofilerate" since there is an
311// existing int var for that value, which may
312// already have an initial value.
313var debug struct {
314	cgocheck           int32
315	clobberfree        int32
316	efence             int32
317	gccheckmark        int32
318	gcpacertrace       int32
319	gcshrinkstackoff   int32
320	gcstoptheworld     int32
321	gctrace            int32
322	invalidptr         int32
323	madvdontneed       int32 // for Linux; issue 28466
324	scavtrace          int32
325	scheddetail        int32
326	schedtrace         int32
327	tracebackancestors int32
328	asyncpreemptoff    int32
329
330	// debug.malloc is used as a combined debug check
331	// in the malloc function and should be set
332	// if any of the below debug options is != 0.
333	malloc         bool
334	allocfreetrace int32
335	inittrace      int32
336	sbrk           int32
337}
338
339var dbgvars = []dbgVar{
340	{"allocfreetrace", &debug.allocfreetrace},
341	{"clobberfree", &debug.clobberfree},
342	{"cgocheck", &debug.cgocheck},
343	{"efence", &debug.efence},
344	{"gccheckmark", &debug.gccheckmark},
345	{"gcpacertrace", &debug.gcpacertrace},
346	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
347	{"gcstoptheworld", &debug.gcstoptheworld},
348	{"gctrace", &debug.gctrace},
349	{"invalidptr", &debug.invalidptr},
350	{"madvdontneed", &debug.madvdontneed},
351	{"sbrk", &debug.sbrk},
352	{"scavtrace", &debug.scavtrace},
353	{"scheddetail", &debug.scheddetail},
354	{"schedtrace", &debug.schedtrace},
355	{"tracebackancestors", &debug.tracebackancestors},
356	{"asyncpreemptoff", &debug.asyncpreemptoff},
357	{"inittrace", &debug.inittrace},
358}
359
360func parsedebugvars() {
361	// defaults
362	debug.cgocheck = 1
363
364	// Gccgo uses conservative stack scanning, so we cannot check
365	// invalid pointers on stack. But we can still enable invalid
366	// pointer check on heap scanning. When scanning the heap, we
367	// ensure that we only trace allocated heap objects, which should
368	// not contain invalid pointers.
369	debug.invalidptr = 1
370	if GOOS == "linux" {
371		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
372		// but doesn't affect many of the statistics that
373		// MADV_DONTNEED does until the memory is actually
374		// reclaimed. This generally leads to poor user
375		// experience, like confusing stats in top and other
376		// monitoring tools; and bad integration with
377		// management systems that respond to memory usage.
378		// Hence, default to MADV_DONTNEED.
379		debug.madvdontneed = 1
380	}
381
382	for p := gogetenv("GODEBUG"); p != ""; {
383		field := ""
384		i := bytealg.IndexByteString(p, ',')
385		if i < 0 {
386			field, p = p, ""
387		} else {
388			field, p = p[:i], p[i+1:]
389		}
390		i = bytealg.IndexByteString(field, '=')
391		if i < 0 {
392			continue
393		}
394		key, value := field[:i], field[i+1:]
395
396		// Update MemProfileRate directly here since it
397		// is int, not int32, and should only be updated
398		// if specified in GODEBUG.
399		if key == "memprofilerate" {
400			if n, ok := atoi(value); ok {
401				MemProfileRate = n
402			}
403		} else {
404			for _, v := range dbgvars {
405				if v.name == key {
406					if n, ok := atoi32(value); ok {
407						*v.value = n
408					}
409				}
410			}
411		}
412	}
413
414	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
415
416	setTraceback(gogetenv("GOTRACEBACK"))
417	traceback_env = traceback_cache
418}
419
420//go:linkname setTraceback runtime_1debug.SetTraceback
421func setTraceback(level string) {
422	var t uint32
423	switch level {
424	case "none":
425		t = 0
426	case "single", "":
427		t = 1 << tracebackShift
428	case "all":
429		t = 1<<tracebackShift | tracebackAll
430	case "system":
431		t = 2<<tracebackShift | tracebackAll
432	case "crash":
433		t = 2<<tracebackShift | tracebackAll | tracebackCrash
434	default:
435		t = tracebackAll
436		if n, ok := atoi(level); ok && n == int(uint32(n)) {
437			t |= uint32(n) << tracebackShift
438		}
439	}
440	// when C owns the process, simply exit'ing the process on fatal errors
441	// and panics is surprising. Be louder and abort instead.
442	if islibrary || isarchive {
443		t |= tracebackCrash
444	}
445
446	t |= traceback_env
447
448	atomic.Store(&traceback_cache, t)
449}
450
451// Poor mans 64-bit division.
452// This is a very special function, do not use it if you are not sure what you are doing.
453// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
454// Handles overflow in a time-specific manner.
455// This keeps us within no-split stack limits on 32-bit processors.
456//go:nosplit
457func timediv(v int64, div int32, rem *int32) int32 {
458	res := int32(0)
459	for bit := 30; bit >= 0; bit-- {
460		if v >= int64(div)<<uint(bit) {
461			v = v - (int64(div) << uint(bit))
462			// Before this for loop, res was 0, thus all these
463			// power of 2 increments are now just bitsets.
464			res |= 1 << uint(bit)
465		}
466	}
467	if v >= int64(div) {
468		if rem != nil {
469			*rem = 0
470		}
471		return 0x7fffffff
472	}
473	if rem != nil {
474		*rem = int32(v)
475	}
476	return res
477}
478
479// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
480
481//go:nosplit
482func acquirem() *m {
483	_g_ := getg()
484	_g_.m.locks++
485	return _g_.m
486}
487
488//go:nosplit
489func releasem(mp *m) {
490	// _g_ := getg()
491	mp.locks--
492	// if mp.locks == 0 && _g_.preempt {
493	//	// restore the preemption request in case we've cleared it in newstack
494	//	_g_.stackguard0 = stackPreempt
495	// }
496}
497