1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"flag"
9	"fmt"
10	"internal/race"
11	"internal/testenv"
12	"os"
13	"os/exec"
14	"reflect"
15	. "runtime"
16	"strings"
17	"testing"
18	"time"
19	"unsafe"
20)
21
22var testMemStatsCount int
23
24func TestMemStats(t *testing.T) {
25	t.Skip("skipping test with gccgo")
26
27	testMemStatsCount++
28
29	// Make sure there's at least one forced GC.
30	GC()
31
32	// Test that MemStats has sane values.
33	st := new(MemStats)
34	ReadMemStats(st)
35
36	nz := func(x interface{}) error {
37		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
38			return nil
39		}
40		return fmt.Errorf("zero value")
41	}
42	le := func(thresh float64) func(interface{}) error {
43		return func(x interface{}) error {
44			// These sanity tests aren't necessarily valid
45			// with high -test.count values, so only run
46			// them once.
47			if testMemStatsCount > 1 {
48				return nil
49			}
50
51			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
52				return nil
53			}
54			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
55		}
56	}
57	eq := func(x interface{}) func(interface{}) error {
58		return func(y interface{}) error {
59			if x == y {
60				return nil
61			}
62			return fmt.Errorf("want %v", x)
63		}
64	}
65	// Of the uint fields, HeapReleased, HeapIdle can be 0.
66	// PauseTotalNs can be 0 if timer resolution is poor.
67	fields := map[string][]func(interface{}) error{
68		"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
69		"Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
70		"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
71		"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
72		"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
73		"MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
74		"MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
75		"BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
76		"NextGC": {nz, le(1e10)}, "LastGC": {nz},
77		"PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
78		"NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
79		"GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
80		"BySize": nil,
81	}
82
83	rst := reflect.ValueOf(st).Elem()
84	for i := 0; i < rst.Type().NumField(); i++ {
85		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
86		checks, ok := fields[name]
87		if !ok {
88			t.Errorf("unknown MemStats field %s", name)
89			continue
90		}
91		for _, check := range checks {
92			if err := check(val); err != nil {
93				t.Errorf("%s = %v: %s", name, val, err)
94			}
95		}
96	}
97	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
98		st.BuckHashSys+st.GCSys+st.OtherSys {
99		t.Fatalf("Bad sys value: %+v", *st)
100	}
101
102	if st.HeapIdle+st.HeapInuse != st.HeapSys {
103		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
104	}
105
106	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
107		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
108	}
109
110	var pauseTotal uint64
111	for _, pause := range st.PauseNs {
112		pauseTotal += pause
113	}
114	if int(st.NumGC) < len(st.PauseNs) {
115		// We have all pauses, so this should be exact.
116		if st.PauseTotalNs != pauseTotal {
117			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
118		}
119		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
120			if st.PauseNs[i] != 0 {
121				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
122			}
123			if st.PauseEnd[i] != 0 {
124				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
125			}
126		}
127	} else {
128		if st.PauseTotalNs < pauseTotal {
129			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
130		}
131	}
132
133	if st.NumForcedGC > st.NumGC {
134		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
135	}
136}
137
138func TestStringConcatenationAllocs(t *testing.T) {
139	t.Skip("skipping test with gccgo")
140	n := testing.AllocsPerRun(1e3, func() {
141		b := make([]byte, 10)
142		for i := 0; i < 10; i++ {
143			b[i] = byte(i) + '0'
144		}
145		s := "foo" + string(b)
146		if want := "foo0123456789"; s != want {
147			t.Fatalf("want %v, got %v", want, s)
148		}
149	})
150	// Only string concatenation allocates.
151	if n != 1 {
152		t.Fatalf("want 1 allocation, got %v", n)
153	}
154}
155
156func TestTinyAlloc(t *testing.T) {
157	const N = 16
158	var v [N]unsafe.Pointer
159	for i := range v {
160		v[i] = unsafe.Pointer(new(byte))
161	}
162
163	chunks := make(map[uintptr]bool, N)
164	for _, p := range v {
165		chunks[uintptr(p)&^7] = true
166	}
167
168	if len(chunks) == N {
169		t.Fatal("no bytes allocated within the same 8-byte chunk")
170	}
171}
172
173func TestPageCacheLeak(t *testing.T) {
174	defer GOMAXPROCS(GOMAXPROCS(1))
175	leaked := PageCachePagesLeaked()
176	if leaked != 0 {
177		t.Fatalf("found %d leaked pages in page caches", leaked)
178	}
179}
180
181func TestPhysicalMemoryUtilization(t *testing.T) {
182	got := runTestProg(t, "testprog", "GCPhys")
183	want := "OK\n"
184	if got != want {
185		t.Fatalf("expected %q, but got %q", want, got)
186	}
187}
188
189func TestScavengedBitsCleared(t *testing.T) {
190	var mismatches [128]BitsMismatch
191	if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
192		t.Errorf("uncleared scavenged bits")
193		for _, m := range mismatches[:n] {
194			t.Logf("\t@ address 0x%x", m.Base)
195			t.Logf("\t|  got: %064b", m.Got)
196			t.Logf("\t| want: %064b", m.Want)
197		}
198		t.FailNow()
199	}
200}
201
202type acLink struct {
203	x [1 << 20]byte
204}
205
206var arenaCollisionSink []*acLink
207
208func TestArenaCollision(t *testing.T) {
209	testenv.MustHaveExec(t)
210
211	// Test that mheap.sysAlloc handles collisions with other
212	// memory mappings.
213	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
214		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v"))
215		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
216		out, err := cmd.CombinedOutput()
217		if race.Enabled {
218			// This test runs the runtime out of hint
219			// addresses, so it will start mapping the
220			// heap wherever it can. The race detector
221			// doesn't support this, so look for the
222			// expected failure.
223			if want := "too many address space collisions"; !strings.Contains(string(out), want) {
224				t.Fatalf("want %q, got:\n%s", want, string(out))
225			}
226		} else if !strings.Contains(string(out), "PASS\n") || err != nil {
227			t.Fatalf("%s\n(exit status %v)", string(out), err)
228		}
229		return
230	}
231	disallowed := [][2]uintptr{}
232	// Drop all but the next 3 hints. 64-bit has a lot of hints,
233	// so it would take a lot of memory to go through all of them.
234	KeepNArenaHints(3)
235	// Consume these 3 hints and force the runtime to find some
236	// fallback hints.
237	for i := 0; i < 5; i++ {
238		// Reserve memory at the next hint so it can't be used
239		// for the heap.
240		start, end := MapNextArenaHint()
241		disallowed = append(disallowed, [2]uintptr{start, end})
242		// Allocate until the runtime tries to use the hint we
243		// just mapped over.
244		hint := GetNextArenaHint()
245		for GetNextArenaHint() == hint {
246			ac := new(acLink)
247			arenaCollisionSink = append(arenaCollisionSink, ac)
248			// The allocation must not have fallen into
249			// one of the reserved regions.
250			p := uintptr(unsafe.Pointer(ac))
251			for _, d := range disallowed {
252				if d[0] <= p && p < d[1] {
253					t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
254				}
255			}
256		}
257	}
258}
259
260var mallocSink uintptr
261
262func BenchmarkMalloc8(b *testing.B) {
263	var x uintptr
264	for i := 0; i < b.N; i++ {
265		p := new(int64)
266		x ^= uintptr(unsafe.Pointer(p))
267	}
268	mallocSink = x
269}
270
271func BenchmarkMalloc16(b *testing.B) {
272	var x uintptr
273	for i := 0; i < b.N; i++ {
274		p := new([2]int64)
275		x ^= uintptr(unsafe.Pointer(p))
276	}
277	mallocSink = x
278}
279
280func BenchmarkMallocTypeInfo8(b *testing.B) {
281	var x uintptr
282	for i := 0; i < b.N; i++ {
283		p := new(struct {
284			p [8 / unsafe.Sizeof(uintptr(0))]*int
285		})
286		x ^= uintptr(unsafe.Pointer(p))
287	}
288	mallocSink = x
289}
290
291func BenchmarkMallocTypeInfo16(b *testing.B) {
292	var x uintptr
293	for i := 0; i < b.N; i++ {
294		p := new(struct {
295			p [16 / unsafe.Sizeof(uintptr(0))]*int
296		})
297		x ^= uintptr(unsafe.Pointer(p))
298	}
299	mallocSink = x
300}
301
302var n = flag.Int("n", 1000, "number of goroutines")
303
304func BenchmarkGoroutineSelect(b *testing.B) {
305	quit := make(chan struct{})
306	read := func(ch chan struct{}) {
307		for {
308			select {
309			case _, ok := <-ch:
310				if !ok {
311					return
312				}
313			case <-quit:
314				return
315			}
316		}
317	}
318	benchHelper(b, *n, read)
319}
320
321func BenchmarkGoroutineBlocking(b *testing.B) {
322	read := func(ch chan struct{}) {
323		for {
324			if _, ok := <-ch; !ok {
325				return
326			}
327		}
328	}
329	benchHelper(b, *n, read)
330}
331
332func BenchmarkGoroutineForRange(b *testing.B) {
333	read := func(ch chan struct{}) {
334		for _ = range ch {
335		}
336	}
337	benchHelper(b, *n, read)
338}
339
340func benchHelper(b *testing.B, n int, read func(chan struct{})) {
341	m := make([]chan struct{}, n)
342	for i := range m {
343		m[i] = make(chan struct{}, 1)
344		go read(m[i])
345	}
346	b.StopTimer()
347	b.ResetTimer()
348	GC()
349
350	for i := 0; i < b.N; i++ {
351		for _, ch := range m {
352			if ch != nil {
353				ch <- struct{}{}
354			}
355		}
356		time.Sleep(10 * time.Millisecond)
357		b.StartTimer()
358		GC()
359		b.StopTimer()
360	}
361
362	for _, ch := range m {
363		close(ch)
364	}
365	time.Sleep(10 * time.Millisecond)
366}
367
368func BenchmarkGoroutineIdle(b *testing.B) {
369	quit := make(chan struct{})
370	fn := func() {
371		<-quit
372	}
373	for i := 0; i < *n; i++ {
374		go fn()
375	}
376
377	GC()
378	b.ResetTimer()
379
380	for i := 0; i < b.N; i++ {
381		GC()
382	}
383
384	b.StopTimer()
385	close(quit)
386	time.Sleep(10 * time.Millisecond)
387}
388