1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"math"
9	"net"
10	"runtime"
11	"runtime/debug"
12	"strings"
13	"sync"
14	"sync/atomic"
15	"syscall"
16	"testing"
17	"time"
18)
19
20var stop = make(chan bool, 1)
21
22func perpetuumMobile() {
23	select {
24	case <-stop:
25	default:
26		go perpetuumMobile()
27	}
28}
29
30func TestStopTheWorldDeadlock(t *testing.T) {
31	if runtime.GOARCH == "wasm" {
32		t.Skip("no preemption on wasm yet")
33	}
34	if testing.Short() {
35		t.Skip("skipping during short test")
36	}
37	maxprocs := runtime.GOMAXPROCS(3)
38	compl := make(chan bool, 2)
39	go func() {
40		for i := 0; i != 1000; i += 1 {
41			runtime.GC()
42		}
43		compl <- true
44	}()
45	go func() {
46		for i := 0; i != 1000; i += 1 {
47			runtime.GOMAXPROCS(3)
48		}
49		compl <- true
50	}()
51	go perpetuumMobile()
52	<-compl
53	<-compl
54	stop <- true
55	runtime.GOMAXPROCS(maxprocs)
56}
57
58func TestYieldProgress(t *testing.T) {
59	testYieldProgress(false)
60}
61
62func TestYieldLockedProgress(t *testing.T) {
63	testYieldProgress(true)
64}
65
66func testYieldProgress(locked bool) {
67	c := make(chan bool)
68	cack := make(chan bool)
69	go func() {
70		if locked {
71			runtime.LockOSThread()
72		}
73		for {
74			select {
75			case <-c:
76				cack <- true
77				return
78			default:
79				runtime.Gosched()
80			}
81		}
82	}()
83	time.Sleep(10 * time.Millisecond)
84	c <- true
85	<-cack
86}
87
88func TestYieldLocked(t *testing.T) {
89	const N = 10
90	c := make(chan bool)
91	go func() {
92		runtime.LockOSThread()
93		for i := 0; i < N; i++ {
94			runtime.Gosched()
95			time.Sleep(time.Millisecond)
96		}
97		c <- true
98		// runtime.UnlockOSThread() is deliberately omitted
99	}()
100	<-c
101}
102
103func TestGoroutineParallelism(t *testing.T) {
104	if runtime.NumCPU() == 1 {
105		// Takes too long, too easy to deadlock, etc.
106		t.Skip("skipping on uniprocessor")
107	}
108	P := 4
109	N := 10
110	if testing.Short() {
111		P = 3
112		N = 3
113	}
114	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
115	// If runtime triggers a forced GC during this test then it will deadlock,
116	// since the goroutines can't be stopped/preempted.
117	// Disable GC for this test (see issue #10958).
118	defer debug.SetGCPercent(debug.SetGCPercent(-1))
119	for try := 0; try < N; try++ {
120		done := make(chan bool)
121		x := uint32(0)
122		for p := 0; p < P; p++ {
123			// Test that all P goroutines are scheduled at the same time
124			go func(p int) {
125				for i := 0; i < 3; i++ {
126					expected := uint32(P*i + p)
127					for atomic.LoadUint32(&x) != expected {
128					}
129					atomic.StoreUint32(&x, expected+1)
130				}
131				done <- true
132			}(p)
133		}
134		for p := 0; p < P; p++ {
135			<-done
136		}
137	}
138}
139
140// Test that all runnable goroutines are scheduled at the same time.
141func TestGoroutineParallelism2(t *testing.T) {
142	//testGoroutineParallelism2(t, false, false)
143	testGoroutineParallelism2(t, true, false)
144	testGoroutineParallelism2(t, false, true)
145	testGoroutineParallelism2(t, true, true)
146}
147
148func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
149	if runtime.NumCPU() == 1 {
150		// Takes too long, too easy to deadlock, etc.
151		t.Skip("skipping on uniprocessor")
152	}
153	P := 4
154	N := 10
155	if testing.Short() {
156		N = 3
157	}
158	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
159	// If runtime triggers a forced GC during this test then it will deadlock,
160	// since the goroutines can't be stopped/preempted.
161	// Disable GC for this test (see issue #10958).
162	defer debug.SetGCPercent(debug.SetGCPercent(-1))
163	for try := 0; try < N; try++ {
164		if load {
165			// Create P goroutines and wait until they all run.
166			// When we run the actual test below, worker threads
167			// running the goroutines will start parking.
168			done := make(chan bool)
169			x := uint32(0)
170			for p := 0; p < P; p++ {
171				go func() {
172					if atomic.AddUint32(&x, 1) == uint32(P) {
173						done <- true
174						return
175					}
176					for atomic.LoadUint32(&x) != uint32(P) {
177					}
178				}()
179			}
180			<-done
181		}
182		if netpoll {
183			// Enable netpoller, affects schedler behavior.
184			laddr := "localhost:0"
185			if runtime.GOOS == "android" {
186				// On some Android devices, there are no records for localhost,
187				// see https://golang.org/issues/14486.
188				// Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
189				laddr = "127.0.0.1:0"
190			}
191			ln, err := net.Listen("tcp", laddr)
192			if err != nil {
193				defer ln.Close() // yup, defer in a loop
194			}
195		}
196		done := make(chan bool)
197		x := uint32(0)
198		// Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
199		for p := 0; p < P/2; p++ {
200			go func(p int) {
201				for p2 := 0; p2 < 2; p2++ {
202					go func(p2 int) {
203						for i := 0; i < 3; i++ {
204							expected := uint32(P*i + p*2 + p2)
205							for atomic.LoadUint32(&x) != expected {
206							}
207							atomic.StoreUint32(&x, expected+1)
208						}
209						done <- true
210					}(p2)
211				}
212			}(p)
213		}
214		for p := 0; p < P; p++ {
215			<-done
216		}
217	}
218}
219
220func TestBlockLocked(t *testing.T) {
221	const N = 10
222	c := make(chan bool)
223	go func() {
224		runtime.LockOSThread()
225		for i := 0; i < N; i++ {
226			c <- true
227		}
228		runtime.UnlockOSThread()
229	}()
230	for i := 0; i < N; i++ {
231		<-c
232	}
233}
234
235func TestTimerFairness(t *testing.T) {
236	if runtime.GOARCH == "wasm" {
237		t.Skip("no preemption on wasm yet")
238	}
239
240	done := make(chan bool)
241	c := make(chan bool)
242	for i := 0; i < 2; i++ {
243		go func() {
244			for {
245				select {
246				case c <- true:
247				case <-done:
248					return
249				}
250			}
251		}()
252	}
253
254	timer := time.After(20 * time.Millisecond)
255	for {
256		select {
257		case <-c:
258		case <-timer:
259			close(done)
260			return
261		}
262	}
263}
264
265func TestTimerFairness2(t *testing.T) {
266	if runtime.GOARCH == "wasm" {
267		t.Skip("no preemption on wasm yet")
268	}
269
270	done := make(chan bool)
271	c := make(chan bool)
272	for i := 0; i < 2; i++ {
273		go func() {
274			timer := time.After(20 * time.Millisecond)
275			var buf [1]byte
276			for {
277				syscall.Read(0, buf[0:0])
278				select {
279				case c <- true:
280				case <-c:
281				case <-timer:
282					done <- true
283					return
284				}
285			}
286		}()
287	}
288	<-done
289	<-done
290}
291
292// The function is used to test preemption at split stack checks.
293// Declaring a var avoids inlining at the call site.
294var preempt = func() int {
295	var a [128]int
296	sum := 0
297	for _, v := range a {
298		sum += v
299	}
300	return sum
301}
302
303func TestPreemption(t *testing.T) {
304	if runtime.Compiler == "gccgo" {
305		t.Skip("gccgo does not implement preemption")
306	}
307	if runtime.GOARCH == "wasm" {
308		t.Skip("no preemption on wasm yet")
309	}
310
311	// Test that goroutines are preempted at function calls.
312	N := 5
313	if testing.Short() {
314		N = 2
315	}
316	c := make(chan bool)
317	var x uint32
318	for g := 0; g < 2; g++ {
319		go func(g int) {
320			for i := 0; i < N; i++ {
321				for atomic.LoadUint32(&x) != uint32(g) {
322					preempt()
323				}
324				atomic.StoreUint32(&x, uint32(1-g))
325			}
326			c <- true
327		}(g)
328	}
329	<-c
330	<-c
331}
332
333func TestPreemptionGC(t *testing.T) {
334	if runtime.Compiler == "gccgo" {
335		t.Skip("gccgo does not implement preemption")
336	}
337	if runtime.GOARCH == "wasm" {
338		t.Skip("no preemption on wasm yet")
339	}
340
341	// Test that pending GC preempts running goroutines.
342	P := 5
343	N := 10
344	if testing.Short() {
345		P = 3
346		N = 2
347	}
348	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
349	var stop uint32
350	for i := 0; i < P; i++ {
351		go func() {
352			for atomic.LoadUint32(&stop) == 0 {
353				preempt()
354			}
355		}()
356	}
357	for i := 0; i < N; i++ {
358		runtime.Gosched()
359		runtime.GC()
360	}
361	atomic.StoreUint32(&stop, 1)
362}
363
364func TestGCFairness(t *testing.T) {
365	output := runTestProg(t, "testprog", "GCFairness")
366	want := "OK\n"
367	if output != want {
368		t.Fatalf("want %s, got %s\n", want, output)
369	}
370}
371
372func TestGCFairness2(t *testing.T) {
373	output := runTestProg(t, "testprog", "GCFairness2")
374	want := "OK\n"
375	if output != want {
376		t.Fatalf("want %s, got %s\n", want, output)
377	}
378}
379
380func TestNumGoroutine(t *testing.T) {
381	output := runTestProg(t, "testprog", "NumGoroutine")
382	want := "1\n"
383	if output != want {
384		t.Fatalf("want %q, got %q", want, output)
385	}
386
387	buf := make([]byte, 1<<20)
388
389	// Try up to 10 times for a match before giving up.
390	// This is a fundamentally racy check but it's important
391	// to notice if NumGoroutine and Stack are _always_ out of sync.
392	for i := 0; ; i++ {
393		// Give goroutines about to exit a chance to exit.
394		// The NumGoroutine and Stack below need to see
395		// the same state of the world, so anything we can do
396		// to keep it quiet is good.
397		runtime.Gosched()
398
399		n := runtime.NumGoroutine()
400		buf = buf[:runtime.Stack(buf, true)]
401
402		nstk := strings.Count(string(buf), "goroutine ")
403		if n == nstk {
404			break
405		}
406		if i >= 10 {
407			t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
408		}
409	}
410}
411
412func TestPingPongHog(t *testing.T) {
413	if runtime.GOARCH == "wasm" {
414		t.Skip("no preemption on wasm yet")
415	}
416	if testing.Short() {
417		t.Skip("skipping in -short mode")
418	}
419
420	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
421	done := make(chan bool)
422	hogChan, lightChan := make(chan bool), make(chan bool)
423	hogCount, lightCount := 0, 0
424
425	run := func(limit int, counter *int, wake chan bool) {
426		for {
427			select {
428			case <-done:
429				return
430
431			case <-wake:
432				for i := 0; i < limit; i++ {
433					*counter++
434				}
435				wake <- true
436			}
437		}
438	}
439
440	// Start two co-scheduled hog goroutines.
441	for i := 0; i < 2; i++ {
442		go run(1e6, &hogCount, hogChan)
443	}
444
445	// Start two co-scheduled light goroutines.
446	for i := 0; i < 2; i++ {
447		go run(1e3, &lightCount, lightChan)
448	}
449
450	// Start goroutine pairs and wait for a few preemption rounds.
451	hogChan <- true
452	lightChan <- true
453	time.Sleep(100 * time.Millisecond)
454	close(done)
455	<-hogChan
456	<-lightChan
457
458	// Check that hogCount and lightCount are within a factor of
459	// 5, which indicates that both pairs of goroutines handed off
460	// the P within a time-slice to their buddy. We can use a
461	// fairly large factor here to make this robust: if the
462	// scheduler isn't working right, the gap should be ~1000X.
463	const factor = 5
464	if hogCount > lightCount*factor || lightCount > hogCount*factor {
465		t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
466	}
467}
468
469func BenchmarkPingPongHog(b *testing.B) {
470	if b.N == 0 {
471		return
472	}
473	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
474
475	// Create a CPU hog
476	stop, done := make(chan bool), make(chan bool)
477	go func() {
478		for {
479			select {
480			case <-stop:
481				done <- true
482				return
483			default:
484			}
485		}
486	}()
487
488	// Ping-pong b.N times
489	ping, pong := make(chan bool), make(chan bool)
490	go func() {
491		for j := 0; j < b.N; j++ {
492			pong <- <-ping
493		}
494		close(stop)
495		done <- true
496	}()
497	go func() {
498		for i := 0; i < b.N; i++ {
499			ping <- <-pong
500		}
501		done <- true
502	}()
503	b.ResetTimer()
504	ping <- true // Start ping-pong
505	<-stop
506	b.StopTimer()
507	<-ping // Let last ponger exit
508	<-done // Make sure goroutines exit
509	<-done
510	<-done
511}
512
513func stackGrowthRecursive(i int) {
514	var pad [128]uint64
515	if i != 0 && pad[0] == 0 {
516		stackGrowthRecursive(i - 1)
517	}
518}
519
520func TestPreemptSplitBig(t *testing.T) {
521	if testing.Short() {
522		t.Skip("skipping in -short mode")
523	}
524	t.Skip("gccgo does not implement preemption")
525	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
526	stop := make(chan int)
527	go big(stop)
528	for i := 0; i < 3; i++ {
529		time.Sleep(10 * time.Microsecond) // let big start running
530		runtime.GC()
531	}
532	close(stop)
533}
534
535func big(stop chan int) int {
536	n := 0
537	for {
538		// delay so that gc is sure to have asked for a preemption
539		for i := 0; i < 1e9; i++ {
540			n++
541		}
542
543		// call bigframe, which used to miss the preemption in its prologue.
544		bigframe(stop)
545
546		// check if we've been asked to stop.
547		select {
548		case <-stop:
549			return n
550		}
551	}
552}
553
554func bigframe(stop chan int) int {
555	// not splitting the stack will overflow.
556	// small will notice that it needs a stack split and will
557	// catch the overflow.
558	var x [8192]byte
559	return small(stop, &x)
560}
561
562func small(stop chan int, x *[8192]byte) int {
563	for i := range x {
564		x[i] = byte(i)
565	}
566	sum := 0
567	for i := range x {
568		sum += int(x[i])
569	}
570
571	// keep small from being a leaf function, which might
572	// make it not do any stack check at all.
573	nonleaf(stop)
574
575	return sum
576}
577
578func nonleaf(stop chan int) bool {
579	// do something that won't be inlined:
580	select {
581	case <-stop:
582		return true
583	default:
584		return false
585	}
586}
587
588func TestSchedLocalQueue(t *testing.T) {
589	runtime.RunSchedLocalQueueTest()
590}
591
592func TestSchedLocalQueueSteal(t *testing.T) {
593	runtime.RunSchedLocalQueueStealTest()
594}
595
596func TestSchedLocalQueueEmpty(t *testing.T) {
597	if runtime.NumCPU() == 1 {
598		// Takes too long and does not trigger the race.
599		t.Skip("skipping on uniprocessor")
600	}
601	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
602
603	// If runtime triggers a forced GC during this test then it will deadlock,
604	// since the goroutines can't be stopped/preempted during spin wait.
605	defer debug.SetGCPercent(debug.SetGCPercent(-1))
606
607	iters := int(1e5)
608	if testing.Short() {
609		iters = 1e2
610	}
611	runtime.RunSchedLocalQueueEmptyTest(iters)
612}
613
614func benchmarkStackGrowth(b *testing.B, rec int) {
615	b.RunParallel(func(pb *testing.PB) {
616		for pb.Next() {
617			stackGrowthRecursive(rec)
618		}
619	})
620}
621
622func BenchmarkStackGrowth(b *testing.B) {
623	benchmarkStackGrowth(b, 10)
624}
625
626func BenchmarkStackGrowthDeep(b *testing.B) {
627	benchmarkStackGrowth(b, 1024)
628}
629
630func BenchmarkCreateGoroutines(b *testing.B) {
631	benchmarkCreateGoroutines(b, 1)
632}
633
634func BenchmarkCreateGoroutinesParallel(b *testing.B) {
635	benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
636}
637
638func benchmarkCreateGoroutines(b *testing.B, procs int) {
639	c := make(chan bool)
640	var f func(n int)
641	f = func(n int) {
642		if n == 0 {
643			c <- true
644			return
645		}
646		go f(n - 1)
647	}
648	for i := 0; i < procs; i++ {
649		go f(b.N / procs)
650	}
651	for i := 0; i < procs; i++ {
652		<-c
653	}
654}
655
656func BenchmarkCreateGoroutinesCapture(b *testing.B) {
657	b.ReportAllocs()
658	for i := 0; i < b.N; i++ {
659		const N = 4
660		var wg sync.WaitGroup
661		wg.Add(N)
662		for i := 0; i < N; i++ {
663			i := i
664			go func() {
665				if i >= N {
666					b.Logf("bad") // just to capture b
667				}
668				wg.Done()
669			}()
670		}
671		wg.Wait()
672	}
673}
674
675func BenchmarkClosureCall(b *testing.B) {
676	sum := 0
677	off1 := 1
678	for i := 0; i < b.N; i++ {
679		off2 := 2
680		func() {
681			sum += i + off1 + off2
682		}()
683	}
684	_ = sum
685}
686
687func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
688	if runtime.GOMAXPROCS(0) == 1 {
689		b.Skip("skipping: GOMAXPROCS=1")
690	}
691
692	wakeDelay := 5 * time.Microsecond
693	for _, delay := range []time.Duration{
694		0,
695		1 * time.Microsecond,
696		2 * time.Microsecond,
697		5 * time.Microsecond,
698		10 * time.Microsecond,
699		20 * time.Microsecond,
700		50 * time.Microsecond,
701		100 * time.Microsecond,
702	} {
703		b.Run(delay.String(), func(b *testing.B) {
704			if b.N == 0 {
705				return
706			}
707			// Start two goroutines, which alternate between being
708			// sender and receiver in the following protocol:
709			//
710			// - The receiver spins for `delay` and then does a
711			// blocking receive on a channel.
712			//
713			// - The sender spins for `delay+wakeDelay` and then
714			// sends to the same channel. (The addition of
715			// `wakeDelay` improves the probability that the
716			// receiver will be blocking when the send occurs when
717			// the goroutines execute in parallel.)
718			//
719			// In each iteration of the benchmark, each goroutine
720			// acts once as sender and once as receiver, so each
721			// goroutine spins for delay twice.
722			//
723			// BenchmarkWakeupParallel is used to estimate how
724			// efficiently the scheduler parallelizes goroutines in
725			// the presence of blocking:
726			//
727			// - If both goroutines are executed on the same core,
728			// an increase in delay by N will increase the time per
729			// iteration by 4*N, because all 4 delays are
730			// serialized.
731			//
732			// - Otherwise, an increase in delay by N will increase
733			// the time per iteration by 2*N, and the time per
734			// iteration is 2 * (runtime overhead + chan
735			// send/receive pair + delay + wakeDelay). This allows
736			// the runtime overhead, including the time it takes
737			// for the unblocked goroutine to be scheduled, to be
738			// estimated.
739			ping, pong := make(chan struct{}), make(chan struct{})
740			start := make(chan struct{})
741			done := make(chan struct{})
742			go func() {
743				<-start
744				for i := 0; i < b.N; i++ {
745					// sender
746					spin(delay + wakeDelay)
747					ping <- struct{}{}
748					// receiver
749					spin(delay)
750					<-pong
751				}
752				done <- struct{}{}
753			}()
754			go func() {
755				for i := 0; i < b.N; i++ {
756					// receiver
757					spin(delay)
758					<-ping
759					// sender
760					spin(delay + wakeDelay)
761					pong <- struct{}{}
762				}
763				done <- struct{}{}
764			}()
765			b.ResetTimer()
766			start <- struct{}{}
767			<-done
768			<-done
769		})
770	}
771}
772
773func BenchmarkWakeupParallelSpinning(b *testing.B) {
774	benchmarkWakeupParallel(b, func(d time.Duration) {
775		end := time.Now().Add(d)
776		for time.Now().Before(end) {
777			// do nothing
778		}
779	})
780}
781
782// sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
783// to sleep for the given duration. If nil, dependent tests are skipped.
784// The implementation should invoke a blocking system call and not
785// call time.Sleep, which would deschedule the goroutine.
786var sysNanosleep func(d time.Duration)
787
788func BenchmarkWakeupParallelSyscall(b *testing.B) {
789	if sysNanosleep == nil {
790		b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
791	}
792	benchmarkWakeupParallel(b, func(d time.Duration) {
793		sysNanosleep(d)
794	})
795}
796
797type Matrix [][]float64
798
799func BenchmarkMatmult(b *testing.B) {
800	b.StopTimer()
801	// matmult is O(N**3) but testing expects O(b.N),
802	// so we need to take cube root of b.N
803	n := int(math.Cbrt(float64(b.N))) + 1
804	A := makeMatrix(n)
805	B := makeMatrix(n)
806	C := makeMatrix(n)
807	b.StartTimer()
808	matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
809}
810
811func makeMatrix(n int) Matrix {
812	m := make(Matrix, n)
813	for i := 0; i < n; i++ {
814		m[i] = make([]float64, n)
815		for j := 0; j < n; j++ {
816			m[i][j] = float64(i*n + j)
817		}
818	}
819	return m
820}
821
822func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
823	di := i1 - i0
824	dj := j1 - j0
825	dk := k1 - k0
826	if di >= dj && di >= dk && di >= threshold {
827		// divide in two by y axis
828		mi := i0 + di/2
829		done1 := make(chan struct{}, 1)
830		go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
831		matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
832		<-done1
833	} else if dj >= dk && dj >= threshold {
834		// divide in two by x axis
835		mj := j0 + dj/2
836		done1 := make(chan struct{}, 1)
837		go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
838		matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
839		<-done1
840	} else if dk >= threshold {
841		// divide in two by "k" axis
842		// deliberately not parallel because of data races
843		mk := k0 + dk/2
844		matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
845		matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
846	} else {
847		// the matrices are small enough, compute directly
848		for i := i0; i < i1; i++ {
849			for j := j0; j < j1; j++ {
850				for k := k0; k < k1; k++ {
851					C[i][j] += A[i][k] * B[k][j]
852				}
853			}
854		}
855	}
856	if done != nil {
857		done <- struct{}{}
858	}
859}
860
861func TestStealOrder(t *testing.T) {
862	runtime.RunStealOrderTest()
863}
864
865func TestLockOSThreadNesting(t *testing.T) {
866	if runtime.GOARCH == "wasm" {
867		t.Skip("no threads on wasm yet")
868	}
869
870	go func() {
871		e, i := runtime.LockOSCounts()
872		if e != 0 || i != 0 {
873			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
874			return
875		}
876		runtime.LockOSThread()
877		runtime.LockOSThread()
878		runtime.UnlockOSThread()
879		e, i = runtime.LockOSCounts()
880		if e != 1 || i != 0 {
881			t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
882			return
883		}
884		runtime.UnlockOSThread()
885		e, i = runtime.LockOSCounts()
886		if e != 0 || i != 0 {
887			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
888			return
889		}
890	}()
891}
892
893func TestLockOSThreadExit(t *testing.T) {
894	testLockOSThreadExit(t, "testprog")
895}
896
897func testLockOSThreadExit(t *testing.T, prog string) {
898	output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
899	want := "OK\n"
900	if output != want {
901		t.Errorf("want %q, got %q", want, output)
902	}
903
904	output = runTestProg(t, prog, "LockOSThreadAlt")
905	if output != want {
906		t.Errorf("want %q, got %q", want, output)
907	}
908}
909
910func TestLockOSThreadAvoidsStatePropagation(t *testing.T) {
911	want := "OK\n"
912	skip := "unshare not permitted\n"
913	output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
914	if output == skip {
915		t.Skip("unshare syscall not permitted on this system")
916	} else if output != want {
917		t.Errorf("want %q, got %q", want, output)
918	}
919}
920