1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"fmt"
9	"os"
10	"reflect"
11	"runtime"
12	"runtime/debug"
13	"sync"
14	"sync/atomic"
15	"testing"
16	"time"
17	"unsafe"
18)
19
20func TestGcSys(t *testing.T) {
21	t.Skip("does not test anything; https://golang.org/issue/23343")
22	if os.Getenv("GOGC") == "off" {
23		t.Skip("skipping test; GOGC=off in environment")
24	}
25	if runtime.GOOS == "windows" {
26		t.Skip("skipping test; GOOS=windows http://golang.org/issue/27156")
27	}
28	if runtime.GOOS == "linux" && runtime.GOARCH == "arm64" {
29		t.Skip("skipping test; GOOS=linux GOARCH=arm64 https://github.com/golang/go/issues/27636")
30	}
31	got := runTestProg(t, "testprog", "GCSys")
32	want := "OK\n"
33	if got != want {
34		t.Fatalf("expected %q, but got %q", want, got)
35	}
36}
37
38func TestGcDeepNesting(t *testing.T) {
39	type T [2][2][2][2][2][2][2][2][2][2]*int
40	a := new(T)
41
42	// Prevent the compiler from applying escape analysis.
43	// This makes sure new(T) is allocated on heap, not on the stack.
44	t.Logf("%p", a)
45
46	a[0][0][0][0][0][0][0][0][0][0] = new(int)
47	*a[0][0][0][0][0][0][0][0][0][0] = 13
48	runtime.GC()
49	if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
50		t.Fail()
51	}
52}
53
54func TestGcMapIndirection(t *testing.T) {
55	defer debug.SetGCPercent(debug.SetGCPercent(1))
56	runtime.GC()
57	type T struct {
58		a [256]int
59	}
60	m := make(map[T]T)
61	for i := 0; i < 2000; i++ {
62		var a T
63		a.a[0] = i
64		m[a] = T{}
65	}
66}
67
68func TestGcArraySlice(t *testing.T) {
69	type X struct {
70		buf     [1]byte
71		nextbuf []byte
72		next    *X
73	}
74	var head *X
75	for i := 0; i < 10; i++ {
76		p := &X{}
77		p.buf[0] = 42
78		p.next = head
79		if head != nil {
80			p.nextbuf = head.buf[:]
81		}
82		head = p
83		runtime.GC()
84	}
85	for p := head; p != nil; p = p.next {
86		if p.buf[0] != 42 {
87			t.Fatal("corrupted heap")
88		}
89	}
90}
91
92func TestGcRescan(t *testing.T) {
93	type X struct {
94		c     chan error
95		nextx *X
96	}
97	type Y struct {
98		X
99		nexty *Y
100		p     *int
101	}
102	var head *Y
103	for i := 0; i < 10; i++ {
104		p := &Y{}
105		p.c = make(chan error)
106		if head != nil {
107			p.nextx = &head.X
108		}
109		p.nexty = head
110		p.p = new(int)
111		*p.p = 42
112		head = p
113		runtime.GC()
114	}
115	for p := head; p != nil; p = p.nexty {
116		if *p.p != 42 {
117			t.Fatal("corrupted heap")
118		}
119	}
120}
121
122func TestGcLastTime(t *testing.T) {
123	ms := new(runtime.MemStats)
124	t0 := time.Now().UnixNano()
125	runtime.GC()
126	t1 := time.Now().UnixNano()
127	runtime.ReadMemStats(ms)
128	last := int64(ms.LastGC)
129	if t0 > last || last > t1 {
130		t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
131	}
132	pause := ms.PauseNs[(ms.NumGC+255)%256]
133	// Due to timer granularity, pause can actually be 0 on windows
134	// or on virtualized environments.
135	if pause == 0 {
136		t.Logf("last GC pause was 0")
137	} else if pause > 10e9 {
138		t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
139	}
140}
141
142var hugeSink interface{}
143
144func TestHugeGCInfo(t *testing.T) {
145	// The test ensures that compiler can chew these huge types even on weakest machines.
146	// The types are not allocated at runtime.
147	if hugeSink != nil {
148		// 400MB on 32 bots, 4TB on 64-bits.
149		const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
150		hugeSink = new([n]*byte)
151		hugeSink = new([n]uintptr)
152		hugeSink = new(struct {
153			x float64
154			y [n]*byte
155			z []string
156		})
157		hugeSink = new(struct {
158			x float64
159			y [n]uintptr
160			z []string
161		})
162	}
163}
164
165/*
166func TestPeriodicGC(t *testing.T) {
167	if runtime.GOARCH == "wasm" {
168		t.Skip("no sysmon on wasm yet")
169	}
170
171	// Make sure we're not in the middle of a GC.
172	runtime.GC()
173
174	var ms1, ms2 runtime.MemStats
175	runtime.ReadMemStats(&ms1)
176
177	// Make periodic GC run continuously.
178	orig := *runtime.ForceGCPeriod
179	*runtime.ForceGCPeriod = 0
180
181	// Let some periodic GCs happen. In a heavily loaded system,
182	// it's possible these will be delayed, so this is designed to
183	// succeed quickly if things are working, but to give it some
184	// slack if things are slow.
185	var numGCs uint32
186	const want = 2
187	for i := 0; i < 200 && numGCs < want; i++ {
188		time.Sleep(5 * time.Millisecond)
189
190		// Test that periodic GC actually happened.
191		runtime.ReadMemStats(&ms2)
192		numGCs = ms2.NumGC - ms1.NumGC
193	}
194	*runtime.ForceGCPeriod = orig
195
196	if numGCs < want {
197		t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
198	}
199}
200*/
201
202func BenchmarkSetTypePtr(b *testing.B) {
203	benchSetType(b, new(*byte))
204}
205
206func BenchmarkSetTypePtr8(b *testing.B) {
207	benchSetType(b, new([8]*byte))
208}
209
210func BenchmarkSetTypePtr16(b *testing.B) {
211	benchSetType(b, new([16]*byte))
212}
213
214func BenchmarkSetTypePtr32(b *testing.B) {
215	benchSetType(b, new([32]*byte))
216}
217
218func BenchmarkSetTypePtr64(b *testing.B) {
219	benchSetType(b, new([64]*byte))
220}
221
222func BenchmarkSetTypePtr126(b *testing.B) {
223	benchSetType(b, new([126]*byte))
224}
225
226func BenchmarkSetTypePtr128(b *testing.B) {
227	benchSetType(b, new([128]*byte))
228}
229
230func BenchmarkSetTypePtrSlice(b *testing.B) {
231	benchSetType(b, make([]*byte, 1<<10))
232}
233
234type Node1 struct {
235	Value       [1]uintptr
236	Left, Right *byte
237}
238
239func BenchmarkSetTypeNode1(b *testing.B) {
240	benchSetType(b, new(Node1))
241}
242
243func BenchmarkSetTypeNode1Slice(b *testing.B) {
244	benchSetType(b, make([]Node1, 32))
245}
246
247type Node8 struct {
248	Value       [8]uintptr
249	Left, Right *byte
250}
251
252func BenchmarkSetTypeNode8(b *testing.B) {
253	benchSetType(b, new(Node8))
254}
255
256func BenchmarkSetTypeNode8Slice(b *testing.B) {
257	benchSetType(b, make([]Node8, 32))
258}
259
260type Node64 struct {
261	Value       [64]uintptr
262	Left, Right *byte
263}
264
265func BenchmarkSetTypeNode64(b *testing.B) {
266	benchSetType(b, new(Node64))
267}
268
269func BenchmarkSetTypeNode64Slice(b *testing.B) {
270	benchSetType(b, make([]Node64, 32))
271}
272
273type Node64Dead struct {
274	Left, Right *byte
275	Value       [64]uintptr
276}
277
278func BenchmarkSetTypeNode64Dead(b *testing.B) {
279	benchSetType(b, new(Node64Dead))
280}
281
282func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
283	benchSetType(b, make([]Node64Dead, 32))
284}
285
286type Node124 struct {
287	Value       [124]uintptr
288	Left, Right *byte
289}
290
291func BenchmarkSetTypeNode124(b *testing.B) {
292	benchSetType(b, new(Node124))
293}
294
295func BenchmarkSetTypeNode124Slice(b *testing.B) {
296	benchSetType(b, make([]Node124, 32))
297}
298
299type Node126 struct {
300	Value       [126]uintptr
301	Left, Right *byte
302}
303
304func BenchmarkSetTypeNode126(b *testing.B) {
305	benchSetType(b, new(Node126))
306}
307
308func BenchmarkSetTypeNode126Slice(b *testing.B) {
309	benchSetType(b, make([]Node126, 32))
310}
311
312type Node128 struct {
313	Value       [128]uintptr
314	Left, Right *byte
315}
316
317func BenchmarkSetTypeNode128(b *testing.B) {
318	benchSetType(b, new(Node128))
319}
320
321func BenchmarkSetTypeNode128Slice(b *testing.B) {
322	benchSetType(b, make([]Node128, 32))
323}
324
325type Node130 struct {
326	Value       [130]uintptr
327	Left, Right *byte
328}
329
330func BenchmarkSetTypeNode130(b *testing.B) {
331	benchSetType(b, new(Node130))
332}
333
334func BenchmarkSetTypeNode130Slice(b *testing.B) {
335	benchSetType(b, make([]Node130, 32))
336}
337
338type Node1024 struct {
339	Value       [1024]uintptr
340	Left, Right *byte
341}
342
343func BenchmarkSetTypeNode1024(b *testing.B) {
344	benchSetType(b, new(Node1024))
345}
346
347func BenchmarkSetTypeNode1024Slice(b *testing.B) {
348	benchSetType(b, make([]Node1024, 32))
349}
350
351func benchSetType(b *testing.B, x interface{}) {
352	v := reflect.ValueOf(x)
353	t := v.Type()
354	switch t.Kind() {
355	case reflect.Ptr:
356		b.SetBytes(int64(t.Elem().Size()))
357	case reflect.Slice:
358		b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
359	}
360	b.ResetTimer()
361	//runtime.BenchSetType(b.N, x)
362}
363
364func BenchmarkAllocation(b *testing.B) {
365	type T struct {
366		x, y *byte
367	}
368	ngo := runtime.GOMAXPROCS(0)
369	work := make(chan bool, b.N+ngo)
370	result := make(chan *T)
371	for i := 0; i < b.N; i++ {
372		work <- true
373	}
374	for i := 0; i < ngo; i++ {
375		work <- false
376	}
377	for i := 0; i < ngo; i++ {
378		go func() {
379			var x *T
380			for <-work {
381				for i := 0; i < 1000; i++ {
382					x = &T{}
383				}
384			}
385			result <- x
386		}()
387	}
388	for i := 0; i < ngo; i++ {
389		<-result
390	}
391}
392
393func TestPrintGC(t *testing.T) {
394	if testing.Short() {
395		t.Skip("Skipping in short mode")
396	}
397	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
398	done := make(chan bool)
399	go func() {
400		for {
401			select {
402			case <-done:
403				return
404			default:
405				runtime.GC()
406			}
407		}
408	}()
409	for i := 0; i < 1e4; i++ {
410		func() {
411			defer print("")
412		}()
413	}
414	close(done)
415}
416
417func testTypeSwitch(x interface{}) error {
418	switch y := x.(type) {
419	case nil:
420		// ok
421	case error:
422		return y
423	}
424	return nil
425}
426
427func testAssert(x interface{}) error {
428	if y, ok := x.(error); ok {
429		return y
430	}
431	return nil
432}
433
434func testAssertVar(x interface{}) error {
435	var y, ok = x.(error)
436	if ok {
437		return y
438	}
439	return nil
440}
441
442var a bool
443
444//go:noinline
445func testIfaceEqual(x interface{}) {
446	if x == "abc" {
447		a = true
448	}
449}
450
451func TestPageAccounting(t *testing.T) {
452	// Grow the heap in small increments. This used to drop the
453	// pages-in-use count below zero because of a rounding
454	// mismatch (golang.org/issue/15022).
455	const blockSize = 64 << 10
456	blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
457	for i := range blocks {
458		blocks[i] = new([blockSize]byte)
459	}
460
461	// Check that the running page count matches reality.
462	pagesInUse, counted := runtime.CountPagesInUse()
463	if pagesInUse != counted {
464		t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
465	}
466}
467
468func TestReadMemStats(t *testing.T) {
469	base, slow := runtime.ReadMemStatsSlow()
470	if base != slow {
471		logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
472		t.Fatal("memstats mismatch")
473	}
474}
475
476func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
477	typ := got.Type()
478	switch typ.Kind() {
479	case reflect.Array, reflect.Slice:
480		if got.Len() != want.Len() {
481			t.Logf("len(%s): got %v, want %v", prefix, got, want)
482			return
483		}
484		for i := 0; i < got.Len(); i++ {
485			logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
486		}
487	case reflect.Struct:
488		for i := 0; i < typ.NumField(); i++ {
489			gf, wf := got.Field(i), want.Field(i)
490			logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
491		}
492	case reflect.Map:
493		t.Fatal("not implemented: logDiff for map")
494	default:
495		if got.Interface() != want.Interface() {
496			t.Logf("%s: got %v, want %v", prefix, got, want)
497		}
498	}
499}
500
501func BenchmarkReadMemStats(b *testing.B) {
502	var ms runtime.MemStats
503	const heapSize = 100 << 20
504	x := make([]*[1024]byte, heapSize/1024)
505	for i := range x {
506		x[i] = new([1024]byte)
507	}
508	hugeSink = x
509
510	b.ResetTimer()
511	for i := 0; i < b.N; i++ {
512		runtime.ReadMemStats(&ms)
513	}
514
515	hugeSink = nil
516}
517
518func TestUserForcedGC(t *testing.T) {
519	// Test that runtime.GC() triggers a GC even if GOGC=off.
520	defer debug.SetGCPercent(debug.SetGCPercent(-1))
521
522	var ms1, ms2 runtime.MemStats
523	runtime.ReadMemStats(&ms1)
524	runtime.GC()
525	runtime.ReadMemStats(&ms2)
526	if ms1.NumGC == ms2.NumGC {
527		t.Fatalf("runtime.GC() did not trigger GC")
528	}
529	if ms1.NumForcedGC == ms2.NumForcedGC {
530		t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
531	}
532}
533
534func writeBarrierBenchmark(b *testing.B, f func()) {
535	runtime.GC()
536	var ms runtime.MemStats
537	runtime.ReadMemStats(&ms)
538	//b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
539
540	// Keep GC running continuously during the benchmark, which in
541	// turn keeps the write barrier on continuously.
542	var stop uint32
543	done := make(chan bool)
544	go func() {
545		for atomic.LoadUint32(&stop) == 0 {
546			runtime.GC()
547		}
548		close(done)
549	}()
550	defer func() {
551		atomic.StoreUint32(&stop, 1)
552		<-done
553	}()
554
555	b.ResetTimer()
556	f()
557	b.StopTimer()
558}
559
560func BenchmarkWriteBarrier(b *testing.B) {
561	if runtime.GOMAXPROCS(-1) < 2 {
562		// We don't want GC to take our time.
563		b.Skip("need GOMAXPROCS >= 2")
564	}
565
566	// Construct a large tree both so the GC runs for a while and
567	// so we have a data structure to manipulate the pointers of.
568	type node struct {
569		l, r *node
570	}
571	var wbRoots []*node
572	var mkTree func(level int) *node
573	mkTree = func(level int) *node {
574		if level == 0 {
575			return nil
576		}
577		n := &node{mkTree(level - 1), mkTree(level - 1)}
578		if level == 10 {
579			// Seed GC with enough early pointers so it
580			// doesn't start termination barriers when it
581			// only has the top of the tree.
582			wbRoots = append(wbRoots, n)
583		}
584		return n
585	}
586	const depth = 22 // 64 MB
587	root := mkTree(22)
588
589	writeBarrierBenchmark(b, func() {
590		var stack [depth]*node
591		tos := -1
592
593		// There are two write barriers per iteration, so i+=2.
594		for i := 0; i < b.N; i += 2 {
595			if tos == -1 {
596				stack[0] = root
597				tos = 0
598			}
599
600			// Perform one step of reversing the tree.
601			n := stack[tos]
602			if n.l == nil {
603				tos--
604			} else {
605				n.l, n.r = n.r, n.l
606				stack[tos] = n.l
607				stack[tos+1] = n.r
608				tos++
609			}
610
611			if i%(1<<12) == 0 {
612				// Avoid non-preemptible loops (see issue #10958).
613				runtime.Gosched()
614			}
615		}
616	})
617
618	runtime.KeepAlive(wbRoots)
619}
620
621func BenchmarkBulkWriteBarrier(b *testing.B) {
622	if runtime.GOMAXPROCS(-1) < 2 {
623		// We don't want GC to take our time.
624		b.Skip("need GOMAXPROCS >= 2")
625	}
626
627	// Construct a large set of objects we can copy around.
628	const heapSize = 64 << 20
629	type obj [16]*byte
630	ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
631	for i := range ptrs {
632		ptrs[i] = new(obj)
633	}
634
635	writeBarrierBenchmark(b, func() {
636		const blockSize = 1024
637		var pos int
638		for i := 0; i < b.N; i += blockSize {
639			// Rotate block.
640			block := ptrs[pos : pos+blockSize]
641			first := block[0]
642			copy(block, block[1:])
643			block[blockSize-1] = first
644
645			pos += blockSize
646			if pos+blockSize > len(ptrs) {
647				pos = 0
648			}
649
650			runtime.Gosched()
651		}
652	})
653
654	runtime.KeepAlive(ptrs)
655}
656
657func BenchmarkScanStackNoLocals(b *testing.B) {
658	var ready sync.WaitGroup
659	teardown := make(chan bool)
660	for j := 0; j < 10; j++ {
661		ready.Add(1)
662		go func() {
663			x := 100000
664			countpwg(&x, &ready, teardown)
665		}()
666	}
667	ready.Wait()
668	b.ResetTimer()
669	for i := 0; i < b.N; i++ {
670		b.StartTimer()
671		runtime.GC()
672		runtime.GC()
673		b.StopTimer()
674	}
675	close(teardown)
676}
677
678func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
679	if *n == 0 {
680		ready.Done()
681		<-teardown
682		return
683	}
684	*n--
685	countpwg(n, ready, teardown)
686}
687