1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"fmt"
9	"os"
10	"reflect"
11	"runtime"
12	"runtime/debug"
13	"sync"
14	"sync/atomic"
15	"testing"
16	"time"
17	"unsafe"
18)
19
20func TestGcSys(t *testing.T) {
21	t.Skip("does not test anything; https://golang.org/issue/23343")
22	if os.Getenv("GOGC") == "off" {
23		t.Skip("skipping test; GOGC=off in environment")
24	}
25	got := runTestProg(t, "testprog", "GCSys")
26	want := "OK\n"
27	if got != want {
28		t.Fatalf("expected %q, but got %q", want, got)
29	}
30}
31
32func TestGcDeepNesting(t *testing.T) {
33	type T [2][2][2][2][2][2][2][2][2][2]*int
34	a := new(T)
35
36	// Prevent the compiler from applying escape analysis.
37	// This makes sure new(T) is allocated on heap, not on the stack.
38	t.Logf("%p", a)
39
40	a[0][0][0][0][0][0][0][0][0][0] = new(int)
41	*a[0][0][0][0][0][0][0][0][0][0] = 13
42	runtime.GC()
43	if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
44		t.Fail()
45	}
46}
47
48func TestGcMapIndirection(t *testing.T) {
49	defer debug.SetGCPercent(debug.SetGCPercent(1))
50	runtime.GC()
51	type T struct {
52		a [256]int
53	}
54	m := make(map[T]T)
55	for i := 0; i < 2000; i++ {
56		var a T
57		a.a[0] = i
58		m[a] = T{}
59	}
60}
61
62func TestGcArraySlice(t *testing.T) {
63	type X struct {
64		buf     [1]byte
65		nextbuf []byte
66		next    *X
67	}
68	var head *X
69	for i := 0; i < 10; i++ {
70		p := &X{}
71		p.buf[0] = 42
72		p.next = head
73		if head != nil {
74			p.nextbuf = head.buf[:]
75		}
76		head = p
77		runtime.GC()
78	}
79	for p := head; p != nil; p = p.next {
80		if p.buf[0] != 42 {
81			t.Fatal("corrupted heap")
82		}
83	}
84}
85
86func TestGcRescan(t *testing.T) {
87	type X struct {
88		c     chan error
89		nextx *X
90	}
91	type Y struct {
92		X
93		nexty *Y
94		p     *int
95	}
96	var head *Y
97	for i := 0; i < 10; i++ {
98		p := &Y{}
99		p.c = make(chan error)
100		if head != nil {
101			p.nextx = &head.X
102		}
103		p.nexty = head
104		p.p = new(int)
105		*p.p = 42
106		head = p
107		runtime.GC()
108	}
109	for p := head; p != nil; p = p.nexty {
110		if *p.p != 42 {
111			t.Fatal("corrupted heap")
112		}
113	}
114}
115
116func TestGcLastTime(t *testing.T) {
117	ms := new(runtime.MemStats)
118	t0 := time.Now().UnixNano()
119	runtime.GC()
120	t1 := time.Now().UnixNano()
121	runtime.ReadMemStats(ms)
122	last := int64(ms.LastGC)
123	if t0 > last || last > t1 {
124		t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
125	}
126	pause := ms.PauseNs[(ms.NumGC+255)%256]
127	// Due to timer granularity, pause can actually be 0 on windows
128	// or on virtualized environments.
129	if pause == 0 {
130		t.Logf("last GC pause was 0")
131	} else if pause > 10e9 {
132		t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
133	}
134}
135
136var hugeSink interface{}
137
138func TestHugeGCInfo(t *testing.T) {
139	// The test ensures that compiler can chew these huge types even on weakest machines.
140	// The types are not allocated at runtime.
141	if hugeSink != nil {
142		// 400MB on 32 bots, 4TB on 64-bits.
143		const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
144		hugeSink = new([n]*byte)
145		hugeSink = new([n]uintptr)
146		hugeSink = new(struct {
147			x float64
148			y [n]*byte
149			z []string
150		})
151		hugeSink = new(struct {
152			x float64
153			y [n]uintptr
154			z []string
155		})
156	}
157}
158
159/*
160func TestPeriodicGC(t *testing.T) {
161	if runtime.GOARCH == "wasm" {
162		t.Skip("no sysmon on wasm yet")
163	}
164
165	// Make sure we're not in the middle of a GC.
166	runtime.GC()
167
168	var ms1, ms2 runtime.MemStats
169	runtime.ReadMemStats(&ms1)
170
171	// Make periodic GC run continuously.
172	orig := *runtime.ForceGCPeriod
173	*runtime.ForceGCPeriod = 0
174
175	// Let some periodic GCs happen. In a heavily loaded system,
176	// it's possible these will be delayed, so this is designed to
177	// succeed quickly if things are working, but to give it some
178	// slack if things are slow.
179	var numGCs uint32
180	const want = 2
181	for i := 0; i < 200 && numGCs < want; i++ {
182		time.Sleep(5 * time.Millisecond)
183
184		// Test that periodic GC actually happened.
185		runtime.ReadMemStats(&ms2)
186		numGCs = ms2.NumGC - ms1.NumGC
187	}
188	*runtime.ForceGCPeriod = orig
189
190	if numGCs < want {
191		t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
192	}
193}
194*/
195
196func BenchmarkSetTypePtr(b *testing.B) {
197	benchSetType(b, new(*byte))
198}
199
200func BenchmarkSetTypePtr8(b *testing.B) {
201	benchSetType(b, new([8]*byte))
202}
203
204func BenchmarkSetTypePtr16(b *testing.B) {
205	benchSetType(b, new([16]*byte))
206}
207
208func BenchmarkSetTypePtr32(b *testing.B) {
209	benchSetType(b, new([32]*byte))
210}
211
212func BenchmarkSetTypePtr64(b *testing.B) {
213	benchSetType(b, new([64]*byte))
214}
215
216func BenchmarkSetTypePtr126(b *testing.B) {
217	benchSetType(b, new([126]*byte))
218}
219
220func BenchmarkSetTypePtr128(b *testing.B) {
221	benchSetType(b, new([128]*byte))
222}
223
224func BenchmarkSetTypePtrSlice(b *testing.B) {
225	benchSetType(b, make([]*byte, 1<<10))
226}
227
228type Node1 struct {
229	Value       [1]uintptr
230	Left, Right *byte
231}
232
233func BenchmarkSetTypeNode1(b *testing.B) {
234	benchSetType(b, new(Node1))
235}
236
237func BenchmarkSetTypeNode1Slice(b *testing.B) {
238	benchSetType(b, make([]Node1, 32))
239}
240
241type Node8 struct {
242	Value       [8]uintptr
243	Left, Right *byte
244}
245
246func BenchmarkSetTypeNode8(b *testing.B) {
247	benchSetType(b, new(Node8))
248}
249
250func BenchmarkSetTypeNode8Slice(b *testing.B) {
251	benchSetType(b, make([]Node8, 32))
252}
253
254type Node64 struct {
255	Value       [64]uintptr
256	Left, Right *byte
257}
258
259func BenchmarkSetTypeNode64(b *testing.B) {
260	benchSetType(b, new(Node64))
261}
262
263func BenchmarkSetTypeNode64Slice(b *testing.B) {
264	benchSetType(b, make([]Node64, 32))
265}
266
267type Node64Dead struct {
268	Left, Right *byte
269	Value       [64]uintptr
270}
271
272func BenchmarkSetTypeNode64Dead(b *testing.B) {
273	benchSetType(b, new(Node64Dead))
274}
275
276func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
277	benchSetType(b, make([]Node64Dead, 32))
278}
279
280type Node124 struct {
281	Value       [124]uintptr
282	Left, Right *byte
283}
284
285func BenchmarkSetTypeNode124(b *testing.B) {
286	benchSetType(b, new(Node124))
287}
288
289func BenchmarkSetTypeNode124Slice(b *testing.B) {
290	benchSetType(b, make([]Node124, 32))
291}
292
293type Node126 struct {
294	Value       [126]uintptr
295	Left, Right *byte
296}
297
298func BenchmarkSetTypeNode126(b *testing.B) {
299	benchSetType(b, new(Node126))
300}
301
302func BenchmarkSetTypeNode126Slice(b *testing.B) {
303	benchSetType(b, make([]Node126, 32))
304}
305
306type Node128 struct {
307	Value       [128]uintptr
308	Left, Right *byte
309}
310
311func BenchmarkSetTypeNode128(b *testing.B) {
312	benchSetType(b, new(Node128))
313}
314
315func BenchmarkSetTypeNode128Slice(b *testing.B) {
316	benchSetType(b, make([]Node128, 32))
317}
318
319type Node130 struct {
320	Value       [130]uintptr
321	Left, Right *byte
322}
323
324func BenchmarkSetTypeNode130(b *testing.B) {
325	benchSetType(b, new(Node130))
326}
327
328func BenchmarkSetTypeNode130Slice(b *testing.B) {
329	benchSetType(b, make([]Node130, 32))
330}
331
332type Node1024 struct {
333	Value       [1024]uintptr
334	Left, Right *byte
335}
336
337func BenchmarkSetTypeNode1024(b *testing.B) {
338	benchSetType(b, new(Node1024))
339}
340
341func BenchmarkSetTypeNode1024Slice(b *testing.B) {
342	benchSetType(b, make([]Node1024, 32))
343}
344
345func benchSetType(b *testing.B, x interface{}) {
346	v := reflect.ValueOf(x)
347	t := v.Type()
348	switch t.Kind() {
349	case reflect.Ptr:
350		b.SetBytes(int64(t.Elem().Size()))
351	case reflect.Slice:
352		b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
353	}
354	b.ResetTimer()
355	//runtime.BenchSetType(b.N, x)
356}
357
358func BenchmarkAllocation(b *testing.B) {
359	type T struct {
360		x, y *byte
361	}
362	ngo := runtime.GOMAXPROCS(0)
363	work := make(chan bool, b.N+ngo)
364	result := make(chan *T)
365	for i := 0; i < b.N; i++ {
366		work <- true
367	}
368	for i := 0; i < ngo; i++ {
369		work <- false
370	}
371	for i := 0; i < ngo; i++ {
372		go func() {
373			var x *T
374			for <-work {
375				for i := 0; i < 1000; i++ {
376					x = &T{}
377				}
378			}
379			result <- x
380		}()
381	}
382	for i := 0; i < ngo; i++ {
383		<-result
384	}
385}
386
387func TestPrintGC(t *testing.T) {
388	if testing.Short() {
389		t.Skip("Skipping in short mode")
390	}
391	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
392	done := make(chan bool)
393	go func() {
394		for {
395			select {
396			case <-done:
397				return
398			default:
399				runtime.GC()
400			}
401		}
402	}()
403	for i := 0; i < 1e4; i++ {
404		func() {
405			defer print("")
406		}()
407	}
408	close(done)
409}
410
411func testTypeSwitch(x interface{}) error {
412	switch y := x.(type) {
413	case nil:
414		// ok
415	case error:
416		return y
417	}
418	return nil
419}
420
421func testAssert(x interface{}) error {
422	if y, ok := x.(error); ok {
423		return y
424	}
425	return nil
426}
427
428func testAssertVar(x interface{}) error {
429	var y, ok = x.(error)
430	if ok {
431		return y
432	}
433	return nil
434}
435
436var a bool
437
438//go:noinline
439func testIfaceEqual(x interface{}) {
440	if x == "abc" {
441		a = true
442	}
443}
444
445func TestPageAccounting(t *testing.T) {
446	// Grow the heap in small increments. This used to drop the
447	// pages-in-use count below zero because of a rounding
448	// mismatch (golang.org/issue/15022).
449	const blockSize = 64 << 10
450	blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
451	for i := range blocks {
452		blocks[i] = new([blockSize]byte)
453	}
454
455	// Check that the running page count matches reality.
456	pagesInUse, counted := runtime.CountPagesInUse()
457	if pagesInUse != counted {
458		t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
459	}
460}
461
462func TestReadMemStats(t *testing.T) {
463	base, slow := runtime.ReadMemStatsSlow()
464	if base != slow {
465		logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
466		t.Fatal("memstats mismatch")
467	}
468}
469
470func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
471	typ := got.Type()
472	switch typ.Kind() {
473	case reflect.Array, reflect.Slice:
474		if got.Len() != want.Len() {
475			t.Logf("len(%s): got %v, want %v", prefix, got, want)
476			return
477		}
478		for i := 0; i < got.Len(); i++ {
479			logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
480		}
481	case reflect.Struct:
482		for i := 0; i < typ.NumField(); i++ {
483			gf, wf := got.Field(i), want.Field(i)
484			logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
485		}
486	case reflect.Map:
487		t.Fatal("not implemented: logDiff for map")
488	default:
489		if got.Interface() != want.Interface() {
490			t.Logf("%s: got %v, want %v", prefix, got, want)
491		}
492	}
493}
494
495func BenchmarkReadMemStats(b *testing.B) {
496	var ms runtime.MemStats
497	const heapSize = 100 << 20
498	x := make([]*[1024]byte, heapSize/1024)
499	for i := range x {
500		x[i] = new([1024]byte)
501	}
502	hugeSink = x
503
504	b.ResetTimer()
505	for i := 0; i < b.N; i++ {
506		runtime.ReadMemStats(&ms)
507	}
508
509	hugeSink = nil
510}
511
512func TestUserForcedGC(t *testing.T) {
513	// Test that runtime.GC() triggers a GC even if GOGC=off.
514	defer debug.SetGCPercent(debug.SetGCPercent(-1))
515
516	var ms1, ms2 runtime.MemStats
517	runtime.ReadMemStats(&ms1)
518	runtime.GC()
519	runtime.ReadMemStats(&ms2)
520	if ms1.NumGC == ms2.NumGC {
521		t.Fatalf("runtime.GC() did not trigger GC")
522	}
523	if ms1.NumForcedGC == ms2.NumForcedGC {
524		t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
525	}
526}
527
528func writeBarrierBenchmark(b *testing.B, f func()) {
529	runtime.GC()
530	var ms runtime.MemStats
531	runtime.ReadMemStats(&ms)
532	//b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
533
534	// Keep GC running continuously during the benchmark, which in
535	// turn keeps the write barrier on continuously.
536	var stop uint32
537	done := make(chan bool)
538	go func() {
539		for atomic.LoadUint32(&stop) == 0 {
540			runtime.GC()
541		}
542		close(done)
543	}()
544	defer func() {
545		atomic.StoreUint32(&stop, 1)
546		<-done
547	}()
548
549	b.ResetTimer()
550	f()
551	b.StopTimer()
552}
553
554func BenchmarkWriteBarrier(b *testing.B) {
555	if runtime.GOMAXPROCS(-1) < 2 {
556		// We don't want GC to take our time.
557		b.Skip("need GOMAXPROCS >= 2")
558	}
559
560	// Construct a large tree both so the GC runs for a while and
561	// so we have a data structure to manipulate the pointers of.
562	type node struct {
563		l, r *node
564	}
565	var wbRoots []*node
566	var mkTree func(level int) *node
567	mkTree = func(level int) *node {
568		if level == 0 {
569			return nil
570		}
571		n := &node{mkTree(level - 1), mkTree(level - 1)}
572		if level == 10 {
573			// Seed GC with enough early pointers so it
574			// doesn't start termination barriers when it
575			// only has the top of the tree.
576			wbRoots = append(wbRoots, n)
577		}
578		return n
579	}
580	const depth = 22 // 64 MB
581	root := mkTree(22)
582
583	writeBarrierBenchmark(b, func() {
584		var stack [depth]*node
585		tos := -1
586
587		// There are two write barriers per iteration, so i+=2.
588		for i := 0; i < b.N; i += 2 {
589			if tos == -1 {
590				stack[0] = root
591				tos = 0
592			}
593
594			// Perform one step of reversing the tree.
595			n := stack[tos]
596			if n.l == nil {
597				tos--
598			} else {
599				n.l, n.r = n.r, n.l
600				stack[tos] = n.l
601				stack[tos+1] = n.r
602				tos++
603			}
604
605			if i%(1<<12) == 0 {
606				// Avoid non-preemptible loops (see issue #10958).
607				runtime.Gosched()
608			}
609		}
610	})
611
612	runtime.KeepAlive(wbRoots)
613}
614
615func BenchmarkBulkWriteBarrier(b *testing.B) {
616	if runtime.GOMAXPROCS(-1) < 2 {
617		// We don't want GC to take our time.
618		b.Skip("need GOMAXPROCS >= 2")
619	}
620
621	// Construct a large set of objects we can copy around.
622	const heapSize = 64 << 20
623	type obj [16]*byte
624	ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
625	for i := range ptrs {
626		ptrs[i] = new(obj)
627	}
628
629	writeBarrierBenchmark(b, func() {
630		const blockSize = 1024
631		var pos int
632		for i := 0; i < b.N; i += blockSize {
633			// Rotate block.
634			block := ptrs[pos : pos+blockSize]
635			first := block[0]
636			copy(block, block[1:])
637			block[blockSize-1] = first
638
639			pos += blockSize
640			if pos+blockSize > len(ptrs) {
641				pos = 0
642			}
643
644			runtime.Gosched()
645		}
646	})
647
648	runtime.KeepAlive(ptrs)
649}
650
651func BenchmarkScanStackNoLocals(b *testing.B) {
652	var ready sync.WaitGroup
653	teardown := make(chan bool)
654	for j := 0; j < 10; j++ {
655		ready.Add(1)
656		go func() {
657			x := 100000
658			countpwg(&x, &ready, teardown)
659		}()
660	}
661	ready.Wait()
662	b.ResetTimer()
663	for i := 0; i < b.N; i++ {
664		b.StartTimer()
665		runtime.GC()
666		runtime.GC()
667		b.StopTimer()
668	}
669	close(teardown)
670}
671
672func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
673	if *n == 0 {
674		ready.Done()
675		<-teardown
676		return
677	}
678	*n--
679	countpwg(n, ready, teardown)
680}
681