1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"flag"
9	"io"
10	. "runtime"
11	"runtime/debug"
12	"strings"
13	"testing"
14	"unsafe"
15)
16
17var flagQuick = flag.Bool("quick", false, "skip slow tests, for second run in all.bash")
18
19func init() {
20	// We're testing the runtime, so make tracebacks show things
21	// in the runtime. This only raises the level, so it won't
22	// override GOTRACEBACK=crash from the user.
23	SetTracebackEnv("system")
24}
25
26var errf error
27
28func errfn() error {
29	return errf
30}
31
32func errfn1() error {
33	return io.EOF
34}
35
36func BenchmarkIfaceCmp100(b *testing.B) {
37	for i := 0; i < b.N; i++ {
38		for j := 0; j < 100; j++ {
39			if errfn() == io.EOF {
40				b.Fatal("bad comparison")
41			}
42		}
43	}
44}
45
46func BenchmarkIfaceCmpNil100(b *testing.B) {
47	for i := 0; i < b.N; i++ {
48		for j := 0; j < 100; j++ {
49			if errfn1() == nil {
50				b.Fatal("bad comparison")
51			}
52		}
53	}
54}
55
56var efaceCmp1 interface{}
57var efaceCmp2 interface{}
58
59func BenchmarkEfaceCmpDiff(b *testing.B) {
60	x := 5
61	efaceCmp1 = &x
62	y := 6
63	efaceCmp2 = &y
64	for i := 0; i < b.N; i++ {
65		for j := 0; j < 100; j++ {
66			if efaceCmp1 == efaceCmp2 {
67				b.Fatal("bad comparison")
68			}
69		}
70	}
71}
72
73func BenchmarkEfaceCmpDiffIndirect(b *testing.B) {
74	efaceCmp1 = [2]int{1, 2}
75	efaceCmp2 = [2]int{1, 2}
76	for i := 0; i < b.N; i++ {
77		for j := 0; j < 100; j++ {
78			if efaceCmp1 != efaceCmp2 {
79				b.Fatal("bad comparison")
80			}
81		}
82	}
83}
84
85func BenchmarkDefer(b *testing.B) {
86	for i := 0; i < b.N; i++ {
87		defer1()
88	}
89}
90
91func defer1() {
92	defer func(x, y, z int) {
93		if recover() != nil || x != 1 || y != 2 || z != 3 {
94			panic("bad recover")
95		}
96	}(1, 2, 3)
97}
98
99func BenchmarkDefer10(b *testing.B) {
100	for i := 0; i < b.N/10; i++ {
101		defer2()
102	}
103}
104
105func defer2() {
106	for i := 0; i < 10; i++ {
107		defer func(x, y, z int) {
108			if recover() != nil || x != 1 || y != 2 || z != 3 {
109				panic("bad recover")
110			}
111		}(1, 2, 3)
112	}
113}
114
115func BenchmarkDeferMany(b *testing.B) {
116	for i := 0; i < b.N; i++ {
117		defer func(x, y, z int) {
118			if recover() != nil || x != 1 || y != 2 || z != 3 {
119				panic("bad recover")
120			}
121		}(1, 2, 3)
122	}
123}
124
125func BenchmarkPanicRecover(b *testing.B) {
126	for i := 0; i < b.N; i++ {
127		defer3()
128	}
129}
130
131func defer3() {
132	defer func(x, y, z int) {
133		if recover() == nil {
134			panic("failed recover")
135		}
136	}(1, 2, 3)
137	panic("hi")
138}
139
140// golang.org/issue/7063
141func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
142	SetCPUProfileRate(0)
143}
144
145// Addresses to test for faulting behavior.
146// This is less a test of SetPanicOnFault and more a check that
147// the operating system and the runtime can process these faults
148// correctly. That is, we're indirectly testing that without SetPanicOnFault
149// these would manage to turn into ordinary crashes.
150// Note that these are truncated on 32-bit systems, so the bottom 32 bits
151// of the larger addresses must themselves be invalid addresses.
152// We might get unlucky and the OS might have mapped one of these
153// addresses, but probably not: they're all in the first page, very high
154// addresses that normally an OS would reserve for itself, or malformed
155// addresses. Even so, we might have to remove one or two on different
156// systems. We will see.
157
158var faultAddrs = []uint64{
159	// low addresses
160	0,
161	1,
162	0xfff,
163	// high (kernel) addresses
164	// or else malformed.
165	0xffffffffffffffff,
166	0xfffffffffffff001,
167	0xffffffffffff0001,
168	0xfffffffffff00001,
169	0xffffffffff000001,
170	0xfffffffff0000001,
171	0xffffffff00000001,
172	0xfffffff000000001,
173	0xffffff0000000001,
174	0xfffff00000000001,
175	0xffff000000000001,
176	0xfff0000000000001,
177	0xff00000000000001,
178	0xf000000000000001,
179	0x8000000000000001,
180}
181
182func TestSetPanicOnFault(t *testing.T) {
183	old := debug.SetPanicOnFault(true)
184	defer debug.SetPanicOnFault(old)
185
186	nfault := 0
187	for _, addr := range faultAddrs {
188		testSetPanicOnFault(t, uintptr(addr), &nfault)
189	}
190	if nfault == 0 {
191		t.Fatalf("none of the addresses faulted")
192	}
193}
194
195// testSetPanicOnFault tests one potentially faulting address.
196// It deliberately constructs and uses an invalid pointer,
197// so mark it as nocheckptr.
198//go:nocheckptr
199func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) {
200	if strings.Contains(Version(), "llvm") {
201		t.Skip("LLVM doesn't support non-call exception")
202	}
203	if GOOS == "js" {
204		t.Skip("js does not support catching faults")
205	}
206
207	defer func() {
208		if err := recover(); err != nil {
209			*nfault++
210		}
211	}()
212
213	// The read should fault, except that sometimes we hit
214	// addresses that have had C or kernel pages mapped there
215	// readable by user code. So just log the content.
216	// If no addresses fault, we'll fail the test.
217	v := *(*byte)(unsafe.Pointer(addr))
218	t.Logf("addr %#x: %#x\n", addr, v)
219}
220
221func eqstring_generic(s1, s2 string) bool {
222	if len(s1) != len(s2) {
223		return false
224	}
225	// optimization in assembly versions:
226	// if s1.str == s2.str { return true }
227	for i := 0; i < len(s1); i++ {
228		if s1[i] != s2[i] {
229			return false
230		}
231	}
232	return true
233}
234
235func TestEqString(t *testing.T) {
236	// This isn't really an exhaustive test of == on strings, it's
237	// just a convenient way of documenting (via eqstring_generic)
238	// what == does.
239	s := []string{
240		"",
241		"a",
242		"c",
243		"aaa",
244		"ccc",
245		"cccc"[:3], // same contents, different string
246		"1234567890",
247	}
248	for _, s1 := range s {
249		for _, s2 := range s {
250			x := s1 == s2
251			y := eqstring_generic(s1, s2)
252			if x != y {
253				t.Errorf(`("%s" == "%s") = %t, want %t`, s1, s2, x, y)
254			}
255		}
256	}
257}
258
259/*
260func TestTrailingZero(t *testing.T) {
261	// make sure we add padding for structs with trailing zero-sized fields
262	type T1 struct {
263		n int32
264		z [0]byte
265	}
266	if unsafe.Sizeof(T1{}) != 8 {
267		t.Errorf("sizeof(%#v)==%d, want 8", T1{}, unsafe.Sizeof(T1{}))
268	}
269	type T2 struct {
270		n int64
271		z struct{}
272	}
273	if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(Uintreg(0)) {
274		t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(Uintreg(0)))
275	}
276	type T3 struct {
277		n byte
278		z [4]struct{}
279	}
280	if unsafe.Sizeof(T3{}) != 2 {
281		t.Errorf("sizeof(%#v)==%d, want 2", T3{}, unsafe.Sizeof(T3{}))
282	}
283	// make sure padding can double for both zerosize and alignment
284	type T4 struct {
285		a int32
286		b int16
287		c int8
288		z struct{}
289	}
290	if unsafe.Sizeof(T4{}) != 8 {
291		t.Errorf("sizeof(%#v)==%d, want 8", T4{}, unsafe.Sizeof(T4{}))
292	}
293	// make sure we don't pad a zero-sized thing
294	type T5 struct {
295	}
296	if unsafe.Sizeof(T5{}) != 0 {
297		t.Errorf("sizeof(%#v)==%d, want 0", T5{}, unsafe.Sizeof(T5{}))
298	}
299}
300*/
301
302func TestAppendGrowth(t *testing.T) {
303	var x []int64
304	check := func(want int) {
305		if cap(x) != want {
306			t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
307		}
308	}
309
310	check(0)
311	want := 1
312	for i := 1; i <= 100; i++ {
313		x = append(x, 1)
314		check(want)
315		if i&(i-1) == 0 {
316			want = 2 * i
317		}
318	}
319}
320
321var One = []int64{1}
322
323func TestAppendSliceGrowth(t *testing.T) {
324	var x []int64
325	check := func(want int) {
326		if cap(x) != want {
327			t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
328		}
329	}
330
331	check(0)
332	want := 1
333	for i := 1; i <= 100; i++ {
334		x = append(x, One...)
335		check(want)
336		if i&(i-1) == 0 {
337			want = 2 * i
338		}
339	}
340}
341
342func TestGoroutineProfileTrivial(t *testing.T) {
343	// Calling GoroutineProfile twice in a row should find the same number of goroutines,
344	// but it's possible there are goroutines just about to exit, so we might end up
345	// with fewer in the second call. Try a few times; it should converge once those
346	// zombies are gone.
347	for i := 0; ; i++ {
348		n1, ok := GoroutineProfile(nil) // should fail, there's at least 1 goroutine
349		if n1 < 1 || ok {
350			t.Fatalf("GoroutineProfile(nil) = %d, %v, want >0, false", n1, ok)
351		}
352		n2, ok := GoroutineProfile(make([]StackRecord, n1))
353		if n2 == n1 && ok {
354			break
355		}
356		t.Logf("GoroutineProfile(%d) = %d, %v, want %d, true", n1, n2, ok, n1)
357		if i >= 10 {
358			t.Fatalf("GoroutineProfile not converging")
359		}
360	}
361}
362
363func TestVersion(t *testing.T) {
364	// Test that version does not contain \r or \n.
365	vers := Version()
366	if strings.Contains(vers, "\r") || strings.Contains(vers, "\n") {
367		t.Fatalf("cr/nl in version: %q", vers)
368	}
369}
370