1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	. "runtime"
9	"strings"
10	"sync"
11	"sync/atomic"
12	"testing"
13	"time"
14)
15
16// TestStackMem measures per-thread stack segment cache behavior.
17// The test consumed up to 500MB in the past.
18func TestStackMem(t *testing.T) {
19	const (
20		BatchSize      = 32
21		BatchCount     = 256
22		ArraySize      = 1024
23		RecursionDepth = 128
24	)
25	if testing.Short() {
26		return
27	}
28	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
29	s0 := new(MemStats)
30	ReadMemStats(s0)
31	for b := 0; b < BatchCount; b++ {
32		c := make(chan bool, BatchSize)
33		for i := 0; i < BatchSize; i++ {
34			go func() {
35				var f func(k int, a [ArraySize]byte)
36				f = func(k int, a [ArraySize]byte) {
37					if k == 0 {
38						time.Sleep(time.Millisecond)
39						return
40					}
41					f(k-1, a)
42				}
43				f(RecursionDepth, [ArraySize]byte{})
44				c <- true
45			}()
46		}
47		for i := 0; i < BatchSize; i++ {
48			<-c
49		}
50
51		// The goroutines have signaled via c that they are ready to exit.
52		// Give them a chance to exit by sleeping. If we don't wait, we
53		// might not reuse them on the next batch.
54		time.Sleep(10 * time.Millisecond)
55	}
56	s1 := new(MemStats)
57	ReadMemStats(s1)
58	consumed := int64(s1.StackSys - s0.StackSys)
59	t.Logf("Consumed %vMB for stack mem", consumed>>20)
60	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
61	if consumed > estimate {
62		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
63	}
64	// Due to broken stack memory accounting (https://golang.org/issue/7468),
65	// StackInuse can decrease during function execution, so we cast the values to int64.
66	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
67	t.Logf("Inuse %vMB for stack mem", inuse>>20)
68	if inuse > 4<<20 {
69		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
70	}
71}
72
73// Test stack growing in different contexts.
74func TestStackGrowth(t *testing.T) {
75	// Don't make this test parallel as this makes the 20 second
76	// timeout unreliable on slow builders. (See issue #19381.)
77
78	var wg sync.WaitGroup
79
80	// in a normal goroutine
81	wg.Add(1)
82	go func() {
83		defer wg.Done()
84		growStack()
85	}()
86	wg.Wait()
87
88	// in locked goroutine
89	wg.Add(1)
90	go func() {
91		defer wg.Done()
92		LockOSThread()
93		growStack()
94		UnlockOSThread()
95	}()
96	wg.Wait()
97
98	// in finalizer
99	wg.Add(1)
100	go func() {
101		defer wg.Done()
102		done := make(chan bool)
103		var started uint32
104		go func() {
105			s := new(string)
106			SetFinalizer(s, func(ss *string) {
107				atomic.StoreUint32(&started, 1)
108				growStack()
109				done <- true
110			})
111			s = nil
112			done <- true
113		}()
114		<-done
115		GC()
116		select {
117		case <-done:
118		case <-time.After(20 * time.Second):
119			if atomic.LoadUint32(&started) == 0 {
120				t.Log("finalizer did not start")
121			}
122			t.Error("finalizer did not run")
123			return
124		}
125	}()
126	wg.Wait()
127}
128
129// ... and in init
130//func init() {
131//	growStack()
132//}
133
134func growStack() {
135	n := 1 << 10
136	if testing.Short() {
137		n = 1 << 8
138	}
139	for i := 0; i < n; i++ {
140		x := 0
141		growStackIter(&x, i)
142		if x != i+1 {
143			panic("stack is corrupted")
144		}
145	}
146	GC()
147}
148
149// This function is not an anonymous func, so that the compiler can do escape
150// analysis and place x on stack (and subsequently stack growth update the pointer).
151func growStackIter(p *int, n int) {
152	if n == 0 {
153		*p = n + 1
154		GC()
155		return
156	}
157	*p = n + 1
158	x := 0
159	growStackIter(&x, n-1)
160	if x != n {
161		panic("stack is corrupted")
162	}
163}
164
165func TestStackGrowthCallback(t *testing.T) {
166	t.Parallel()
167	var wg sync.WaitGroup
168
169	// test stack growth at chan op
170	wg.Add(1)
171	go func() {
172		defer wg.Done()
173		c := make(chan int, 1)
174		growStackWithCallback(func() {
175			c <- 1
176			<-c
177		})
178	}()
179
180	// test stack growth at map op
181	wg.Add(1)
182	go func() {
183		defer wg.Done()
184		m := make(map[int]int)
185		growStackWithCallback(func() {
186			_, _ = m[1]
187			m[1] = 1
188		})
189	}()
190
191	// test stack growth at goroutine creation
192	wg.Add(1)
193	go func() {
194		defer wg.Done()
195		growStackWithCallback(func() {
196			done := make(chan bool)
197			go func() {
198				done <- true
199			}()
200			<-done
201		})
202	}()
203	wg.Wait()
204}
205
206func growStackWithCallback(cb func()) {
207	var f func(n int)
208	f = func(n int) {
209		if n == 0 {
210			cb()
211			return
212		}
213		f(n - 1)
214	}
215	for i := 0; i < 1<<10; i++ {
216		f(i)
217	}
218}
219
220// TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
221// during a stack copy.
222func set(p *int, x int) {
223	*p = x
224}
225func TestDeferPtrs(t *testing.T) {
226	var y int
227
228	defer func() {
229		if y != 42 {
230			t.Errorf("defer's stack references were not adjusted appropriately")
231		}
232	}()
233	defer set(&y, 42)
234	growStack()
235}
236
237type bigBuf [4 * 1024]byte
238
239// TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
240// stack grows as part of starting the deferred function. It calls Goexit at various
241// stack depths, forcing the deferred function (with >4kB of args) to be run at
242// the bottom of the stack. The goal is to find a stack depth less than 4kB from
243// the end of the stack. Each trial runs in a different goroutine so that an earlier
244// stack growth does not invalidate a later attempt.
245func TestDeferPtrsGoexit(t *testing.T) {
246	for i := 0; i < 100; i++ {
247		c := make(chan int, 1)
248		go testDeferPtrsGoexit(c, i)
249		if n := <-c; n != 42 {
250			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
251		}
252	}
253}
254
255func testDeferPtrsGoexit(c chan int, i int) {
256	var y int
257	defer func() {
258		c <- y
259	}()
260	defer setBig(&y, 42, bigBuf{})
261	useStackAndCall(i, Goexit)
262}
263
264func setBig(p *int, x int, b bigBuf) {
265	*p = x
266}
267
268// TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
269// of Goexit to run the Defers. Those two are different execution paths
270// in the runtime.
271func TestDeferPtrsPanic(t *testing.T) {
272	for i := 0; i < 100; i++ {
273		c := make(chan int, 1)
274		go testDeferPtrsGoexit(c, i)
275		if n := <-c; n != 42 {
276			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
277		}
278	}
279}
280
281func testDeferPtrsPanic(c chan int, i int) {
282	var y int
283	defer func() {
284		if recover() == nil {
285			c <- -1
286			return
287		}
288		c <- y
289	}()
290	defer setBig(&y, 42, bigBuf{})
291	useStackAndCall(i, func() { panic(1) })
292}
293
294// TestPanicUseStack checks that a chain of Panic structs on the stack are
295// updated correctly if the stack grows during the deferred execution that
296// happens as a result of the panic.
297func TestPanicUseStack(t *testing.T) {
298	pc := make([]uintptr, 10000)
299	defer func() {
300		recover()
301		Callers(0, pc) // force stack walk
302		useStackAndCall(100, func() {
303			defer func() {
304				recover()
305				Callers(0, pc) // force stack walk
306				useStackAndCall(200, func() {
307					defer func() {
308						recover()
309						Callers(0, pc) // force stack walk
310					}()
311					panic(3)
312				})
313			}()
314			panic(2)
315		})
316	}()
317	panic(1)
318}
319
320func TestPanicFar(t *testing.T) {
321	var xtree *xtreeNode
322	pc := make([]uintptr, 10000)
323	defer func() {
324		// At this point we created a large stack and unwound
325		// it via recovery. Force a stack walk, which will
326		// check the stack's consistency.
327		Callers(0, pc)
328	}()
329	defer func() {
330		recover()
331	}()
332	useStackAndCall(100, func() {
333		// Kick off the GC and make it do something nontrivial.
334		// (This used to force stack barriers to stick around.)
335		xtree = makeTree(18)
336		// Give the GC time to start scanning stacks.
337		time.Sleep(time.Millisecond)
338		panic(1)
339	})
340	_ = xtree
341}
342
343type xtreeNode struct {
344	l, r *xtreeNode
345}
346
347func makeTree(d int) *xtreeNode {
348	if d == 0 {
349		return new(xtreeNode)
350	}
351	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
352}
353
354// use about n KB of stack and call f
355func useStackAndCall(n int, f func()) {
356	if n == 0 {
357		f()
358		return
359	}
360	var b [1024]byte // makes frame about 1KB
361	useStackAndCall(n-1+int(b[99]), f)
362}
363
364func useStack(n int) {
365	useStackAndCall(n, func() {})
366}
367
368func growing(c chan int, done chan struct{}) {
369	for n := range c {
370		useStack(n)
371		done <- struct{}{}
372	}
373	done <- struct{}{}
374}
375
376func TestStackCache(t *testing.T) {
377	// Allocate a bunch of goroutines and grow their stacks.
378	// Repeat a few times to test the stack cache.
379	const (
380		R = 4
381		G = 200
382		S = 5
383	)
384	for i := 0; i < R; i++ {
385		var reqchans [G]chan int
386		done := make(chan struct{})
387		for j := 0; j < G; j++ {
388			reqchans[j] = make(chan int)
389			go growing(reqchans[j], done)
390		}
391		for s := 0; s < S; s++ {
392			for j := 0; j < G; j++ {
393				reqchans[j] <- 1 << uint(s)
394			}
395			for j := 0; j < G; j++ {
396				<-done
397			}
398		}
399		for j := 0; j < G; j++ {
400			close(reqchans[j])
401		}
402		for j := 0; j < G; j++ {
403			<-done
404		}
405	}
406}
407
408func TestStackOutput(t *testing.T) {
409	b := make([]byte, 1024)
410	stk := string(b[:Stack(b, false)])
411	if !strings.HasPrefix(stk, "goroutine ") {
412		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
413		t.Errorf("Stack output should begin with \"goroutine \"")
414	}
415}
416
417func TestStackAllOutput(t *testing.T) {
418	b := make([]byte, 1024)
419	stk := string(b[:Stack(b, true)])
420	if !strings.HasPrefix(stk, "goroutine ") {
421		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
422		t.Errorf("Stack output should begin with \"goroutine \"")
423	}
424}
425
426func TestStackPanic(t *testing.T) {
427	// Test that stack copying copies panics correctly. This is difficult
428	// to test because it is very unlikely that the stack will be copied
429	// in the middle of gopanic. But it can happen.
430	// To make this test effective, edit panic.go:gopanic and uncomment
431	// the GC() call just before freedefer(d).
432	defer func() {
433		if x := recover(); x == nil {
434			t.Errorf("recover failed")
435		}
436	}()
437	useStack(32)
438	panic("test panic")
439}
440
441func BenchmarkStackCopy(b *testing.B) {
442	c := make(chan bool)
443	for i := 0; i < b.N; i++ {
444		go func() {
445			count(1000000)
446			c <- true
447		}()
448		<-c
449	}
450}
451
452func count(n int) int {
453	if n == 0 {
454		return 0
455	}
456	return 1 + count(n-1)
457}
458
459func BenchmarkStackCopyNoCache(b *testing.B) {
460	c := make(chan bool)
461	for i := 0; i < b.N; i++ {
462		go func() {
463			count1(1000000)
464			c <- true
465		}()
466		<-c
467	}
468}
469
470func count1(n int) int {
471	if n == 0 {
472		return 0
473	}
474	return 1 + count2(n-1)
475}
476
477func count2(n int) int {
478	if n == 0 {
479		return 0
480	}
481	return 1 + count3(n-1)
482}
483
484func count3(n int) int {
485	if n == 0 {
486		return 0
487	}
488	return 1 + count4(n-1)
489}
490
491func count4(n int) int {
492	if n == 0 {
493		return 0
494	}
495	return 1 + count5(n-1)
496}
497
498func count5(n int) int {
499	if n == 0 {
500		return 0
501	}
502	return 1 + count6(n-1)
503}
504
505func count6(n int) int {
506	if n == 0 {
507		return 0
508	}
509	return 1 + count7(n-1)
510}
511
512func count7(n int) int {
513	if n == 0 {
514		return 0
515	}
516	return 1 + count8(n-1)
517}
518
519func count8(n int) int {
520	if n == 0 {
521		return 0
522	}
523	return 1 + count9(n-1)
524}
525
526func count9(n int) int {
527	if n == 0 {
528		return 0
529	}
530	return 1 + count10(n-1)
531}
532
533func count10(n int) int {
534	if n == 0 {
535		return 0
536	}
537	return 1 + count11(n-1)
538}
539
540func count11(n int) int {
541	if n == 0 {
542		return 0
543	}
544	return 1 + count12(n-1)
545}
546
547func count12(n int) int {
548	if n == 0 {
549		return 0
550	}
551	return 1 + count13(n-1)
552}
553
554func count13(n int) int {
555	if n == 0 {
556		return 0
557	}
558	return 1 + count14(n-1)
559}
560
561func count14(n int) int {
562	if n == 0 {
563		return 0
564	}
565	return 1 + count15(n-1)
566}
567
568func count15(n int) int {
569	if n == 0 {
570		return 0
571	}
572	return 1 + count16(n-1)
573}
574
575func count16(n int) int {
576	if n == 0 {
577		return 0
578	}
579	return 1 + count17(n-1)
580}
581
582func count17(n int) int {
583	if n == 0 {
584		return 0
585	}
586	return 1 + count18(n-1)
587}
588
589func count18(n int) int {
590	if n == 0 {
591		return 0
592	}
593	return 1 + count19(n-1)
594}
595
596func count19(n int) int {
597	if n == 0 {
598		return 0
599	}
600	return 1 + count20(n-1)
601}
602
603func count20(n int) int {
604	if n == 0 {
605		return 0
606	}
607	return 1 + count21(n-1)
608}
609
610func count21(n int) int {
611	if n == 0 {
612		return 0
613	}
614	return 1 + count22(n-1)
615}
616
617func count22(n int) int {
618	if n == 0 {
619		return 0
620	}
621	return 1 + count23(n-1)
622}
623
624func count23(n int) int {
625	if n == 0 {
626		return 0
627	}
628	return 1 + count1(n-1)
629}
630