1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"bytes"
9	"fmt"
10	"os"
11	"reflect"
12	"regexp"
13	. "runtime"
14	"strconv"
15	"strings"
16	"sync"
17	"sync/atomic"
18	"testing"
19	"time"
20)
21
22// TestStackMem measures per-thread stack segment cache behavior.
23// The test consumed up to 500MB in the past.
24func TestStackMem(t *testing.T) {
25	const (
26		BatchSize      = 32
27		BatchCount     = 256
28		ArraySize      = 1024
29		RecursionDepth = 128
30	)
31	if testing.Short() {
32		return
33	}
34	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
35	s0 := new(MemStats)
36	ReadMemStats(s0)
37	for b := 0; b < BatchCount; b++ {
38		c := make(chan bool, BatchSize)
39		for i := 0; i < BatchSize; i++ {
40			go func() {
41				var f func(k int, a [ArraySize]byte)
42				f = func(k int, a [ArraySize]byte) {
43					if k == 0 {
44						time.Sleep(time.Millisecond)
45						return
46					}
47					f(k-1, a)
48				}
49				f(RecursionDepth, [ArraySize]byte{})
50				c <- true
51			}()
52		}
53		for i := 0; i < BatchSize; i++ {
54			<-c
55		}
56
57		// The goroutines have signaled via c that they are ready to exit.
58		// Give them a chance to exit by sleeping. If we don't wait, we
59		// might not reuse them on the next batch.
60		time.Sleep(10 * time.Millisecond)
61	}
62	s1 := new(MemStats)
63	ReadMemStats(s1)
64	consumed := int64(s1.StackSys - s0.StackSys)
65	t.Logf("Consumed %vMB for stack mem", consumed>>20)
66	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
67	if consumed > estimate {
68		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
69	}
70	// Due to broken stack memory accounting (https://golang.org/issue/7468),
71	// StackInuse can decrease during function execution, so we cast the values to int64.
72	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
73	t.Logf("Inuse %vMB for stack mem", inuse>>20)
74	if inuse > 4<<20 {
75		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
76	}
77}
78
79// Test stack growing in different contexts.
80func TestStackGrowth(t *testing.T) {
81	if *flagQuick {
82		t.Skip("-quick")
83	}
84
85	if GOARCH == "wasm" {
86		t.Skip("fails on wasm (too slow?)")
87	}
88
89	// Don't make this test parallel as this makes the 20 second
90	// timeout unreliable on slow builders. (See issue #19381.)
91
92	var wg sync.WaitGroup
93
94	// in a normal goroutine
95	var growDuration time.Duration // For debugging failures
96	wg.Add(1)
97	go func() {
98		defer wg.Done()
99		start := time.Now()
100		growStack(nil)
101		growDuration = time.Since(start)
102	}()
103	wg.Wait()
104
105	// in locked goroutine
106	wg.Add(1)
107	go func() {
108		defer wg.Done()
109		LockOSThread()
110		growStack(nil)
111		UnlockOSThread()
112	}()
113	wg.Wait()
114
115	// in finalizer
116	wg.Add(1)
117	go func() {
118		defer wg.Done()
119
120		if Compiler == "gccgo" && !*Pusestackmaps {
121			// This test is flaky for gccgo's
122			// conservative stack scanning.
123			return
124		}
125
126		done := make(chan bool)
127		var startTime time.Time
128		var started, progress uint32
129		go func() {
130			s := new(string)
131			SetFinalizer(s, func(ss *string) {
132				startTime = time.Now()
133				atomic.StoreUint32(&started, 1)
134				growStack(&progress)
135				done <- true
136			})
137			s = nil
138			done <- true
139		}()
140		<-done
141		GC()
142
143		timeout := 20 * time.Second
144		if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
145			scale, err := strconv.Atoi(s)
146			if err == nil {
147				timeout *= time.Duration(scale)
148			}
149		}
150
151		select {
152		case <-done:
153		case <-time.After(timeout):
154			if atomic.LoadUint32(&started) == 0 {
155				t.Log("finalizer did not start")
156			} else {
157				t.Logf("finalizer started %s ago and finished %d iterations", time.Since(startTime), atomic.LoadUint32(&progress))
158			}
159			t.Log("first growStack took", growDuration)
160			t.Error("finalizer did not run")
161			return
162		}
163	}()
164	wg.Wait()
165}
166
167// ... and in init
168//func init() {
169//	growStack()
170//}
171
172func growStack(progress *uint32) {
173	n := 1 << 10
174	if testing.Short() {
175		n = 1 << 8
176	}
177	for i := 0; i < n; i++ {
178		x := 0
179		growStackIter(&x, i)
180		if x != i+1 {
181			panic("stack is corrupted")
182		}
183		if progress != nil {
184			atomic.StoreUint32(progress, uint32(i))
185		}
186	}
187	GC()
188}
189
190// This function is not an anonymous func, so that the compiler can do escape
191// analysis and place x on stack (and subsequently stack growth update the pointer).
192func growStackIter(p *int, n int) {
193	if n == 0 {
194		*p = n + 1
195		GC()
196		return
197	}
198	*p = n + 1
199	x := 0
200	growStackIter(&x, n-1)
201	if x != n {
202		panic("stack is corrupted")
203	}
204}
205
206func TestStackGrowthCallback(t *testing.T) {
207	t.Parallel()
208	var wg sync.WaitGroup
209
210	// test stack growth at chan op
211	wg.Add(1)
212	go func() {
213		defer wg.Done()
214		c := make(chan int, 1)
215		growStackWithCallback(func() {
216			c <- 1
217			<-c
218		})
219	}()
220
221	// test stack growth at map op
222	wg.Add(1)
223	go func() {
224		defer wg.Done()
225		m := make(map[int]int)
226		growStackWithCallback(func() {
227			_, _ = m[1]
228			m[1] = 1
229		})
230	}()
231
232	// test stack growth at goroutine creation
233	wg.Add(1)
234	go func() {
235		defer wg.Done()
236		growStackWithCallback(func() {
237			done := make(chan bool)
238			go func() {
239				done <- true
240			}()
241			<-done
242		})
243	}()
244	wg.Wait()
245}
246
247func growStackWithCallback(cb func()) {
248	var f func(n int)
249	f = func(n int) {
250		if n == 0 {
251			cb()
252			return
253		}
254		f(n - 1)
255	}
256	for i := 0; i < 1<<10; i++ {
257		f(i)
258	}
259}
260
261// TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
262// during a stack copy.
263func set(p *int, x int) {
264	*p = x
265}
266func TestDeferPtrs(t *testing.T) {
267	var y int
268
269	defer func() {
270		if y != 42 {
271			t.Errorf("defer's stack references were not adjusted appropriately")
272		}
273	}()
274	defer set(&y, 42)
275	growStack(nil)
276}
277
278type bigBuf [4 * 1024]byte
279
280// TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
281// stack grows as part of starting the deferred function. It calls Goexit at various
282// stack depths, forcing the deferred function (with >4kB of args) to be run at
283// the bottom of the stack. The goal is to find a stack depth less than 4kB from
284// the end of the stack. Each trial runs in a different goroutine so that an earlier
285// stack growth does not invalidate a later attempt.
286func TestDeferPtrsGoexit(t *testing.T) {
287	for i := 0; i < 100; i++ {
288		c := make(chan int, 1)
289		go testDeferPtrsGoexit(c, i)
290		if n := <-c; n != 42 {
291			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
292		}
293	}
294}
295
296func testDeferPtrsGoexit(c chan int, i int) {
297	var y int
298	defer func() {
299		c <- y
300	}()
301	defer setBig(&y, 42, bigBuf{})
302	useStackAndCall(i, Goexit)
303}
304
305func setBig(p *int, x int, b bigBuf) {
306	*p = x
307}
308
309// TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
310// of Goexit to run the Defers. Those two are different execution paths
311// in the runtime.
312func TestDeferPtrsPanic(t *testing.T) {
313	for i := 0; i < 100; i++ {
314		c := make(chan int, 1)
315		go testDeferPtrsGoexit(c, i)
316		if n := <-c; n != 42 {
317			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
318		}
319	}
320}
321
322func testDeferPtrsPanic(c chan int, i int) {
323	var y int
324	defer func() {
325		if recover() == nil {
326			c <- -1
327			return
328		}
329		c <- y
330	}()
331	defer setBig(&y, 42, bigBuf{})
332	useStackAndCall(i, func() { panic(1) })
333}
334
335//go:noinline
336func testDeferLeafSigpanic1() {
337	// Cause a sigpanic to be injected in this frame.
338	//
339	// This function has to be declared before
340	// TestDeferLeafSigpanic so the runtime will crash if we think
341	// this function's continuation PC is in
342	// TestDeferLeafSigpanic.
343	*(*int)(nil) = 0
344}
345
346// TestDeferLeafSigpanic tests defer matching around leaf functions
347// that sigpanic. This is tricky because on LR machines the outer
348// function and the inner function have the same SP, but it's critical
349// that we match up the defer correctly to get the right liveness map.
350// See issue #25499.
351func TestDeferLeafSigpanic(t *testing.T) {
352	// Push a defer that will walk the stack.
353	defer func() {
354		if err := recover(); err == nil {
355			t.Fatal("expected panic from nil pointer")
356		}
357		GC()
358	}()
359	// Call a leaf function. We must set up the exact call stack:
360	//
361	//  defering function -> leaf function -> sigpanic
362	//
363	// On LR machines, the leaf function will have the same SP as
364	// the SP pushed for the defer frame.
365	testDeferLeafSigpanic1()
366}
367
368// TestPanicUseStack checks that a chain of Panic structs on the stack are
369// updated correctly if the stack grows during the deferred execution that
370// happens as a result of the panic.
371func TestPanicUseStack(t *testing.T) {
372	pc := make([]uintptr, 10000)
373	defer func() {
374		recover()
375		Callers(0, pc) // force stack walk
376		useStackAndCall(100, func() {
377			defer func() {
378				recover()
379				Callers(0, pc) // force stack walk
380				useStackAndCall(200, func() {
381					defer func() {
382						recover()
383						Callers(0, pc) // force stack walk
384					}()
385					panic(3)
386				})
387			}()
388			panic(2)
389		})
390	}()
391	panic(1)
392}
393
394func TestPanicFar(t *testing.T) {
395	var xtree *xtreeNode
396	pc := make([]uintptr, 10000)
397	defer func() {
398		// At this point we created a large stack and unwound
399		// it via recovery. Force a stack walk, which will
400		// check the stack's consistency.
401		Callers(0, pc)
402	}()
403	defer func() {
404		recover()
405	}()
406	useStackAndCall(100, func() {
407		// Kick off the GC and make it do something nontrivial.
408		// (This used to force stack barriers to stick around.)
409		xtree = makeTree(18)
410		// Give the GC time to start scanning stacks.
411		time.Sleep(time.Millisecond)
412		panic(1)
413	})
414	_ = xtree
415}
416
417type xtreeNode struct {
418	l, r *xtreeNode
419}
420
421func makeTree(d int) *xtreeNode {
422	if d == 0 {
423		return new(xtreeNode)
424	}
425	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
426}
427
428// use about n KB of stack and call f
429func useStackAndCall(n int, f func()) {
430	if n == 0 {
431		f()
432		return
433	}
434	var b [1024]byte // makes frame about 1KB
435	useStackAndCall(n-1+int(b[99]), f)
436}
437
438func useStack(n int) {
439	useStackAndCall(n, func() {})
440}
441
442func growing(c chan int, done chan struct{}) {
443	for n := range c {
444		useStack(n)
445		done <- struct{}{}
446	}
447	done <- struct{}{}
448}
449
450func TestStackCache(t *testing.T) {
451	// Allocate a bunch of goroutines and grow their stacks.
452	// Repeat a few times to test the stack cache.
453	const (
454		R = 4
455		G = 200
456		S = 5
457	)
458	for i := 0; i < R; i++ {
459		var reqchans [G]chan int
460		done := make(chan struct{})
461		for j := 0; j < G; j++ {
462			reqchans[j] = make(chan int)
463			go growing(reqchans[j], done)
464		}
465		for s := 0; s < S; s++ {
466			for j := 0; j < G; j++ {
467				reqchans[j] <- 1 << uint(s)
468			}
469			for j := 0; j < G; j++ {
470				<-done
471			}
472		}
473		for j := 0; j < G; j++ {
474			close(reqchans[j])
475		}
476		for j := 0; j < G; j++ {
477			<-done
478		}
479	}
480}
481
482func TestStackOutput(t *testing.T) {
483	b := make([]byte, 1024)
484	stk := string(b[:Stack(b, false)])
485	if !strings.HasPrefix(stk, "goroutine ") {
486		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
487		t.Errorf("Stack output should begin with \"goroutine \"")
488	}
489}
490
491func TestStackAllOutput(t *testing.T) {
492	b := make([]byte, 1024)
493	stk := string(b[:Stack(b, true)])
494	if !strings.HasPrefix(stk, "goroutine ") {
495		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
496		t.Errorf("Stack output should begin with \"goroutine \"")
497	}
498}
499
500func TestStackPanic(t *testing.T) {
501	// Test that stack copying copies panics correctly. This is difficult
502	// to test because it is very unlikely that the stack will be copied
503	// in the middle of gopanic. But it can happen.
504	// To make this test effective, edit panic.go:gopanic and uncomment
505	// the GC() call just before freedefer(d).
506	defer func() {
507		if x := recover(); x == nil {
508			t.Errorf("recover failed")
509		}
510	}()
511	useStack(32)
512	panic("test panic")
513}
514
515func BenchmarkStackCopyPtr(b *testing.B) {
516	c := make(chan bool)
517	for i := 0; i < b.N; i++ {
518		go func() {
519			i := 1000000
520			countp(&i)
521			c <- true
522		}()
523		<-c
524	}
525}
526
527func countp(n *int) {
528	if *n == 0 {
529		return
530	}
531	*n--
532	countp(n)
533}
534
535func BenchmarkStackCopy(b *testing.B) {
536	c := make(chan bool)
537	for i := 0; i < b.N; i++ {
538		go func() {
539			count(1000000)
540			c <- true
541		}()
542		<-c
543	}
544}
545
546func count(n int) int {
547	if n == 0 {
548		return 0
549	}
550	return 1 + count(n-1)
551}
552
553func BenchmarkStackCopyNoCache(b *testing.B) {
554	c := make(chan bool)
555	for i := 0; i < b.N; i++ {
556		go func() {
557			count1(1000000)
558			c <- true
559		}()
560		<-c
561	}
562}
563
564func count1(n int) int {
565	if n <= 0 {
566		return 0
567	}
568	return 1 + count2(n-1)
569}
570
571func count2(n int) int  { return 1 + count3(n-1) }
572func count3(n int) int  { return 1 + count4(n-1) }
573func count4(n int) int  { return 1 + count5(n-1) }
574func count5(n int) int  { return 1 + count6(n-1) }
575func count6(n int) int  { return 1 + count7(n-1) }
576func count7(n int) int  { return 1 + count8(n-1) }
577func count8(n int) int  { return 1 + count9(n-1) }
578func count9(n int) int  { return 1 + count10(n-1) }
579func count10(n int) int { return 1 + count11(n-1) }
580func count11(n int) int { return 1 + count12(n-1) }
581func count12(n int) int { return 1 + count13(n-1) }
582func count13(n int) int { return 1 + count14(n-1) }
583func count14(n int) int { return 1 + count15(n-1) }
584func count15(n int) int { return 1 + count16(n-1) }
585func count16(n int) int { return 1 + count17(n-1) }
586func count17(n int) int { return 1 + count18(n-1) }
587func count18(n int) int { return 1 + count19(n-1) }
588func count19(n int) int { return 1 + count20(n-1) }
589func count20(n int) int { return 1 + count21(n-1) }
590func count21(n int) int { return 1 + count22(n-1) }
591func count22(n int) int { return 1 + count23(n-1) }
592func count23(n int) int { return 1 + count1(n-1) }
593
594type structWithMethod struct{}
595
596func (s structWithMethod) caller() string {
597	_, file, line, ok := Caller(1)
598	if !ok {
599		panic("Caller failed")
600	}
601	return fmt.Sprintf("%s:%d", file, line)
602}
603
604func (s structWithMethod) callers() []uintptr {
605	pc := make([]uintptr, 16)
606	return pc[:Callers(0, pc)]
607}
608
609func (s structWithMethod) stack() string {
610	buf := make([]byte, 4<<10)
611	return string(buf[:Stack(buf, false)])
612}
613
614func (s structWithMethod) nop() {}
615
616func TestStackWrapperCaller(t *testing.T) {
617	var d structWithMethod
618	// Force the compiler to construct a wrapper method.
619	wrapper := (*structWithMethod).caller
620	// Check that the wrapper doesn't affect the stack trace.
621	if dc, ic := d.caller(), wrapper(&d); dc != ic {
622		t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
623	}
624}
625
626func TestStackWrapperCallers(t *testing.T) {
627	var d structWithMethod
628	wrapper := (*structWithMethod).callers
629	// Check that <autogenerated> doesn't appear in the stack trace.
630	pcs := wrapper(&d)
631	frames := CallersFrames(pcs)
632	for {
633		fr, more := frames.Next()
634		if fr.File == "<autogenerated>" {
635			t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
636		}
637		if !more {
638			break
639		}
640	}
641}
642
643func TestStackWrapperStack(t *testing.T) {
644	var d structWithMethod
645	wrapper := (*structWithMethod).stack
646	// Check that <autogenerated> doesn't appear in the stack trace.
647	stk := wrapper(&d)
648	if strings.Contains(stk, "<autogenerated>") {
649		t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
650	}
651}
652
653type I interface {
654	M()
655}
656
657func TestStackWrapperStackPanic(t *testing.T) {
658	if Compiler == "gccgo" {
659		t.Skip("gccgo currently uses different, meaningless, wrapper names")
660	}
661
662	t.Run("sigpanic", func(t *testing.T) {
663		// nil calls to interface methods cause a sigpanic.
664		testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
665	})
666	t.Run("panicwrap", func(t *testing.T) {
667		// Nil calls to value method wrappers call panicwrap.
668		wrapper := (*structWithMethod).nop
669		testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
670	})
671}
672
673func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
674	// Test that the stack trace from a panicking wrapper includes
675	// the wrapper, even though elide these when they don't panic.
676	t.Run("CallersFrames", func(t *testing.T) {
677		defer func() {
678			err := recover()
679			if err == nil {
680				t.Fatalf("expected panic")
681			}
682			pcs := make([]uintptr, 10)
683			n := Callers(0, pcs)
684			frames := CallersFrames(pcs[:n])
685			for {
686				frame, more := frames.Next()
687				t.Log(frame.Function)
688				if frame.Function == expect {
689					return
690				}
691				if !more {
692					break
693				}
694			}
695			t.Fatalf("panicking wrapper %s missing from stack trace", expect)
696		}()
697		cb()
698	})
699	t.Run("Stack", func(t *testing.T) {
700		defer func() {
701			err := recover()
702			if err == nil {
703				t.Fatalf("expected panic")
704			}
705			buf := make([]byte, 4<<10)
706			stk := string(buf[:Stack(buf, false)])
707			if !strings.Contains(stk, "\n"+expect) {
708				t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
709			}
710		}()
711		cb()
712	})
713}
714
715func TestCallersFromWrapper(t *testing.T) {
716	if Compiler == "gccgo" {
717		t.Skip("gccgo currently uses different, meaningless, wrapper names")
718	}
719	// Test that invoking CallersFrames on a stack where the first
720	// PC is an autogenerated wrapper keeps the wrapper in the
721	// trace. Normally we elide these, assuming that the wrapper
722	// calls the thing you actually wanted to see, but in this
723	// case we need to keep it.
724	pc := reflect.ValueOf(I.M).Pointer()
725	frames := CallersFrames([]uintptr{pc})
726	frame, more := frames.Next()
727	if frame.Function != "runtime_test.I.M" {
728		t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
729	}
730	if more {
731		t.Fatalf("want 1 frame, got > 1")
732	}
733}
734
735func TestTracebackSystemstack(t *testing.T) {
736	if Compiler == "gccgo" {
737		t.Skip("test currently fails with gccgo")
738	}
739	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
740		t.Skip("systemstack tail call not implemented on ppc64x")
741	}
742
743	// Test that profiles correctly jump over systemstack,
744	// including nested systemstack calls.
745	pcs := make([]uintptr, 20)
746	pcs = pcs[:TracebackSystemstack(pcs, 5)]
747	// Check that runtime.TracebackSystemstack appears five times
748	// and that we see TestTracebackSystemstack.
749	countIn, countOut := 0, 0
750	frames := CallersFrames(pcs)
751	var tb bytes.Buffer
752	for {
753		frame, more := frames.Next()
754		fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
755		switch frame.Function {
756		case "runtime.TracebackSystemstack":
757			countIn++
758		case "runtime_test.TestTracebackSystemstack":
759			countOut++
760		}
761		if !more {
762			break
763		}
764	}
765	if countIn != 5 || countOut != 1 {
766		t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
767	}
768}
769
770func TestTracebackAncestors(t *testing.T) {
771	if Compiler == "gccgo" {
772		t.Skip("gccgo currently doesn't generate full ancestor tracebacks")
773	}
774	goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
775	for _, tracebackDepth := range []int{0, 1, 5, 50} {
776		output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
777
778		numGoroutines := 3
779		numFrames := 2
780		ancestorsExpected := numGoroutines
781		if numGoroutines > tracebackDepth {
782			ancestorsExpected = tracebackDepth
783		}
784
785		matches := goroutineRegex.FindAllStringSubmatch(output, -1)
786		if len(matches) != 2 {
787			t.Fatalf("want 2 goroutines, got:\n%s", output)
788		}
789
790		// Check functions in the traceback.
791		fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
792		for _, fn := range fns {
793			if !strings.Contains(output, "\n"+fn+"(") {
794				t.Fatalf("expected %q function in traceback:\n%s", fn, output)
795			}
796		}
797
798		if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
799			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
800		}
801
802		if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
803			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
804		}
805
806		if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
807			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
808		}
809	}
810}
811
812// Test that defer closure is correctly scanned when the stack is scanned.
813func TestDeferLiveness(t *testing.T) {
814	output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
815	if output != "" {
816		t.Errorf("output:\n%s\n\nwant no output", output)
817	}
818}
819
820func TestDeferHeapAndStack(t *testing.T) {
821	P := 4     // processors
822	N := 10000 //iterations
823	D := 200   // stack depth
824
825	if testing.Short() {
826		P /= 2
827		N /= 10
828		D /= 10
829	}
830	c := make(chan bool)
831	for p := 0; p < P; p++ {
832		go func() {
833			for i := 0; i < N; i++ {
834				if deferHeapAndStack(D) != 2*D {
835					panic("bad result")
836				}
837			}
838			c <- true
839		}()
840	}
841	for p := 0; p < P; p++ {
842		<-c
843	}
844}
845
846// deferHeapAndStack(n) computes 2*n
847func deferHeapAndStack(n int) (r int) {
848	if n == 0 {
849		return 0
850	}
851	if n%2 == 0 {
852		// heap-allocated defers
853		for i := 0; i < 2; i++ {
854			defer func() {
855				r++
856			}()
857		}
858	} else {
859		// stack-allocated defers
860		defer func() {
861			r++
862		}()
863		defer func() {
864			r++
865		}()
866	}
867	r = deferHeapAndStack(n - 1)
868	escapeMe(new([1024]byte)) // force some GCs
869	return
870}
871
872// Pass a value to escapeMe to force it to escape.
873var escapeMe = func(x interface{}) {}
874