1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"internal/testenv"
9	"math"
10	"runtime"
11	"sync"
12	"sync/atomic"
13	"testing"
14	"time"
15)
16
17func TestChan(t *testing.T) {
18	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
19	N := 200
20	if testing.Short() {
21		N = 20
22	}
23	for chanCap := 0; chanCap < N; chanCap++ {
24		{
25			// Ensure that receive from empty chan blocks.
26			c := make(chan int, chanCap)
27			recv1 := false
28			go func() {
29				_ = <-c
30				recv1 = true
31			}()
32			recv2 := false
33			go func() {
34				_, _ = <-c
35				recv2 = true
36			}()
37			time.Sleep(time.Millisecond)
38			if recv1 || recv2 {
39				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
40			}
41			// Ensure that non-blocking receive does not block.
42			select {
43			case _ = <-c:
44				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
45			default:
46			}
47			select {
48			case _, _ = <-c:
49				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
50			default:
51			}
52			c <- 0
53			c <- 0
54		}
55
56		{
57			// Ensure that send to full chan blocks.
58			c := make(chan int, chanCap)
59			for i := 0; i < chanCap; i++ {
60				c <- i
61			}
62			sent := uint32(0)
63			go func() {
64				c <- 0
65				atomic.StoreUint32(&sent, 1)
66			}()
67			time.Sleep(time.Millisecond)
68			if atomic.LoadUint32(&sent) != 0 {
69				t.Fatalf("chan[%d]: send to full chan", chanCap)
70			}
71			// Ensure that non-blocking send does not block.
72			select {
73			case c <- 0:
74				t.Fatalf("chan[%d]: send to full chan", chanCap)
75			default:
76			}
77			<-c
78		}
79
80		{
81			// Ensure that we receive 0 from closed chan.
82			c := make(chan int, chanCap)
83			for i := 0; i < chanCap; i++ {
84				c <- i
85			}
86			close(c)
87			for i := 0; i < chanCap; i++ {
88				v := <-c
89				if v != i {
90					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
91				}
92			}
93			if v := <-c; v != 0 {
94				t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
95			}
96			if v, ok := <-c; v != 0 || ok {
97				t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
98			}
99		}
100
101		{
102			// Ensure that close unblocks receive.
103			c := make(chan int, chanCap)
104			done := make(chan bool)
105			go func() {
106				v, ok := <-c
107				done <- v == 0 && ok == false
108			}()
109			time.Sleep(time.Millisecond)
110			close(c)
111			if !<-done {
112				t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
113			}
114		}
115
116		{
117			// Send 100 integers,
118			// ensure that we receive them non-corrupted in FIFO order.
119			c := make(chan int, chanCap)
120			go func() {
121				for i := 0; i < 100; i++ {
122					c <- i
123				}
124			}()
125			for i := 0; i < 100; i++ {
126				v := <-c
127				if v != i {
128					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
129				}
130			}
131
132			// Same, but using recv2.
133			go func() {
134				for i := 0; i < 100; i++ {
135					c <- i
136				}
137			}()
138			for i := 0; i < 100; i++ {
139				v, ok := <-c
140				if !ok {
141					t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
142				}
143				if v != i {
144					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
145				}
146			}
147
148			// Send 1000 integers in 4 goroutines,
149			// ensure that we receive what we send.
150			const P = 4
151			const L = 1000
152			for p := 0; p < P; p++ {
153				go func() {
154					for i := 0; i < L; i++ {
155						c <- i
156					}
157				}()
158			}
159			done := make(chan map[int]int)
160			for p := 0; p < P; p++ {
161				go func() {
162					recv := make(map[int]int)
163					for i := 0; i < L; i++ {
164						v := <-c
165						recv[v] = recv[v] + 1
166					}
167					done <- recv
168				}()
169			}
170			recv := make(map[int]int)
171			for p := 0; p < P; p++ {
172				for k, v := range <-done {
173					recv[k] = recv[k] + v
174				}
175			}
176			if len(recv) != L {
177				t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
178			}
179			for _, v := range recv {
180				if v != P {
181					t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
182				}
183			}
184		}
185
186		{
187			// Test len/cap.
188			c := make(chan int, chanCap)
189			if len(c) != 0 || cap(c) != chanCap {
190				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
191			}
192			for i := 0; i < chanCap; i++ {
193				c <- i
194			}
195			if len(c) != chanCap || cap(c) != chanCap {
196				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
197			}
198		}
199
200	}
201}
202
203func TestNonblockRecvRace(t *testing.T) {
204	n := 10000
205	if testing.Short() {
206		n = 100
207	} else {
208		if runtime.GOARCH == "s390" {
209			// Test uses too much address space on 31-bit S390.
210			t.Skip("skipping long test on s390")
211		}
212	}
213	for i := 0; i < n; i++ {
214		c := make(chan int, 1)
215		c <- 1
216		go func() {
217			select {
218			case <-c:
219			default:
220				t.Error("chan is not ready")
221			}
222		}()
223		close(c)
224		<-c
225		if t.Failed() {
226			return
227		}
228	}
229}
230
231// This test checks that select acts on the state of the channels at one
232// moment in the execution, not over a smeared time window.
233// In the test, one goroutine does:
234//	create c1, c2
235//	make c1 ready for receiving
236//	create second goroutine
237//	make c2 ready for receiving
238//	make c1 no longer ready for receiving (if possible)
239// The second goroutine does a non-blocking select receiving from c1 and c2.
240// From the time the second goroutine is created, at least one of c1 and c2
241// is always ready for receiving, so the select in the second goroutine must
242// always receive from one or the other. It must never execute the default case.
243func TestNonblockSelectRace(t *testing.T) {
244	n := 100000
245	if testing.Short() {
246		n = 1000
247	}
248	done := make(chan bool, 1)
249	for i := 0; i < n; i++ {
250		c1 := make(chan int, 1)
251		c2 := make(chan int, 1)
252		c1 <- 1
253		go func() {
254			select {
255			case <-c1:
256			case <-c2:
257			default:
258				done <- false
259				return
260			}
261			done <- true
262		}()
263		c2 <- 1
264		select {
265		case <-c1:
266		default:
267		}
268		if !<-done {
269			t.Fatal("no chan is ready")
270		}
271	}
272}
273
274// Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
275func TestNonblockSelectRace2(t *testing.T) {
276	n := 100000
277	if testing.Short() {
278		n = 1000
279	}
280	done := make(chan bool, 1)
281	for i := 0; i < n; i++ {
282		c1 := make(chan int, 1)
283		c2 := make(chan int)
284		c1 <- 1
285		go func() {
286			select {
287			case <-c1:
288			case <-c2:
289			default:
290				done <- false
291				return
292			}
293			done <- true
294		}()
295		close(c2)
296		select {
297		case <-c1:
298		default:
299		}
300		if !<-done {
301			t.Fatal("no chan is ready")
302		}
303	}
304}
305
306func TestSelfSelect(t *testing.T) {
307	// Ensure that send/recv on the same chan in select
308	// does not crash nor deadlock.
309	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
310	for _, chanCap := range []int{0, 10} {
311		var wg sync.WaitGroup
312		wg.Add(2)
313		c := make(chan int, chanCap)
314		for p := 0; p < 2; p++ {
315			p := p
316			go func() {
317				defer wg.Done()
318				for i := 0; i < 1000; i++ {
319					if p == 0 || i%2 == 0 {
320						select {
321						case c <- p:
322						case v := <-c:
323							if chanCap == 0 && v == p {
324								t.Errorf("self receive")
325								return
326							}
327						}
328					} else {
329						select {
330						case v := <-c:
331							if chanCap == 0 && v == p {
332								t.Errorf("self receive")
333								return
334							}
335						case c <- p:
336						}
337					}
338				}
339			}()
340		}
341		wg.Wait()
342	}
343}
344
345func TestSelectStress(t *testing.T) {
346	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
347	var c [4]chan int
348	c[0] = make(chan int)
349	c[1] = make(chan int)
350	c[2] = make(chan int, 2)
351	c[3] = make(chan int, 3)
352	N := int(1e5)
353	if testing.Short() {
354		N /= 10
355	}
356	// There are 4 goroutines that send N values on each of the chans,
357	// + 4 goroutines that receive N values on each of the chans,
358	// + 1 goroutine that sends N values on each of the chans in a single select,
359	// + 1 goroutine that receives N values on each of the chans in a single select.
360	// All these sends, receives and selects interact chaotically at runtime,
361	// but we are careful that this whole construct does not deadlock.
362	var wg sync.WaitGroup
363	wg.Add(10)
364	for k := 0; k < 4; k++ {
365		k := k
366		go func() {
367			for i := 0; i < N; i++ {
368				c[k] <- 0
369			}
370			wg.Done()
371		}()
372		go func() {
373			for i := 0; i < N; i++ {
374				<-c[k]
375			}
376			wg.Done()
377		}()
378	}
379	go func() {
380		var n [4]int
381		c1 := c
382		for i := 0; i < 4*N; i++ {
383			select {
384			case c1[3] <- 0:
385				n[3]++
386				if n[3] == N {
387					c1[3] = nil
388				}
389			case c1[2] <- 0:
390				n[2]++
391				if n[2] == N {
392					c1[2] = nil
393				}
394			case c1[0] <- 0:
395				n[0]++
396				if n[0] == N {
397					c1[0] = nil
398				}
399			case c1[1] <- 0:
400				n[1]++
401				if n[1] == N {
402					c1[1] = nil
403				}
404			}
405		}
406		wg.Done()
407	}()
408	go func() {
409		var n [4]int
410		c1 := c
411		for i := 0; i < 4*N; i++ {
412			select {
413			case <-c1[0]:
414				n[0]++
415				if n[0] == N {
416					c1[0] = nil
417				}
418			case <-c1[1]:
419				n[1]++
420				if n[1] == N {
421					c1[1] = nil
422				}
423			case <-c1[2]:
424				n[2]++
425				if n[2] == N {
426					c1[2] = nil
427				}
428			case <-c1[3]:
429				n[3]++
430				if n[3] == N {
431					c1[3] = nil
432				}
433			}
434		}
435		wg.Done()
436	}()
437	wg.Wait()
438}
439
440func TestSelectFairness(t *testing.T) {
441	const trials = 10000
442	if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" {
443		testenv.SkipFlaky(t, 22047)
444	}
445	c1 := make(chan byte, trials+1)
446	c2 := make(chan byte, trials+1)
447	for i := 0; i < trials+1; i++ {
448		c1 <- 1
449		c2 <- 2
450	}
451	c3 := make(chan byte)
452	c4 := make(chan byte)
453	out := make(chan byte)
454	done := make(chan byte)
455	var wg sync.WaitGroup
456	wg.Add(1)
457	go func() {
458		defer wg.Done()
459		for {
460			var b byte
461			select {
462			case b = <-c3:
463			case b = <-c4:
464			case b = <-c1:
465			case b = <-c2:
466			}
467			select {
468			case out <- b:
469			case <-done:
470				return
471			}
472		}
473	}()
474	cnt1, cnt2 := 0, 0
475	for i := 0; i < trials; i++ {
476		switch b := <-out; b {
477		case 1:
478			cnt1++
479		case 2:
480			cnt2++
481		default:
482			t.Fatalf("unexpected value %d on channel", b)
483		}
484	}
485	// If the select in the goroutine is fair,
486	// cnt1 and cnt2 should be about the same value.
487	// With 10,000 trials, the expected margin of error at
488	// a confidence level of six nines is 4.891676 / (2 * Sqrt(10000)).
489	r := float64(cnt1) / trials
490	e := math.Abs(r - 0.5)
491	t.Log(cnt1, cnt2, r, e)
492	if e > 4.891676/(2*math.Sqrt(trials)) {
493		t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2)
494	}
495	close(done)
496	wg.Wait()
497}
498
499func TestChanSendInterface(t *testing.T) {
500	type mt struct{}
501	m := &mt{}
502	c := make(chan interface{}, 1)
503	c <- m
504	select {
505	case c <- m:
506	default:
507	}
508	select {
509	case c <- m:
510	case c <- &mt{}:
511	default:
512	}
513}
514
515func TestPseudoRandomSend(t *testing.T) {
516	n := 100
517	for _, chanCap := range []int{0, n} {
518		c := make(chan int, chanCap)
519		l := make([]int, n)
520		var m sync.Mutex
521		m.Lock()
522		go func() {
523			for i := 0; i < n; i++ {
524				runtime.Gosched()
525				l[i] = <-c
526			}
527			m.Unlock()
528		}()
529		for i := 0; i < n; i++ {
530			select {
531			case c <- 1:
532			case c <- 0:
533			}
534		}
535		m.Lock() // wait
536		n0 := 0
537		n1 := 0
538		for _, i := range l {
539			n0 += (i + 1) % 2
540			n1 += i
541		}
542		if n0 <= n/10 || n1 <= n/10 {
543			t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
544		}
545	}
546}
547
548func TestMultiConsumer(t *testing.T) {
549	const nwork = 23
550	const niter = 271828
551
552	pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
553
554	q := make(chan int, nwork*3)
555	r := make(chan int, nwork*3)
556
557	// workers
558	var wg sync.WaitGroup
559	for i := 0; i < nwork; i++ {
560		wg.Add(1)
561		go func(w int) {
562			for v := range q {
563				// mess with the fifo-ish nature of range
564				if pn[w%len(pn)] == v {
565					runtime.Gosched()
566				}
567				r <- v
568			}
569			wg.Done()
570		}(i)
571	}
572
573	// feeder & closer
574	expect := 0
575	go func() {
576		for i := 0; i < niter; i++ {
577			v := pn[i%len(pn)]
578			expect += v
579			q <- v
580		}
581		close(q)  // no more work
582		wg.Wait() // workers done
583		close(r)  // ... so there can be no more results
584	}()
585
586	// consume & check
587	n := 0
588	s := 0
589	for v := range r {
590		n++
591		s += v
592	}
593	if n != niter || s != expect {
594		t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
595			expect, s, niter, n)
596	}
597}
598
599func TestShrinkStackDuringBlockedSend(t *testing.T) {
600	// make sure that channel operations still work when we are
601	// blocked on a channel send and we shrink the stack.
602	// NOTE: this test probably won't fail unless stack1.go:stackDebug
603	// is set to >= 1.
604	const n = 10
605	c := make(chan int)
606	done := make(chan struct{})
607
608	go func() {
609		for i := 0; i < n; i++ {
610			c <- i
611			// use lots of stack, briefly.
612			stackGrowthRecursive(20)
613		}
614		done <- struct{}{}
615	}()
616
617	for i := 0; i < n; i++ {
618		x := <-c
619		if x != i {
620			t.Errorf("bad channel read: want %d, got %d", i, x)
621		}
622		// Waste some time so sender can finish using lots of stack
623		// and block in channel send.
624		time.Sleep(1 * time.Millisecond)
625		// trigger GC which will shrink the stack of the sender.
626		runtime.GC()
627	}
628	<-done
629}
630
631func TestSelectDuplicateChannel(t *testing.T) {
632	// This test makes sure we can queue a G on
633	// the same channel multiple times.
634	c := make(chan int)
635	d := make(chan int)
636	e := make(chan int)
637
638	// goroutine A
639	go func() {
640		select {
641		case <-c:
642		case <-c:
643		case <-d:
644		}
645		e <- 9
646	}()
647	time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
648
649	// goroutine B
650	go func() {
651		<-c
652	}()
653	time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
654
655	d <- 7 // wake up A, it dequeues itself from c.  This operation used to corrupt c.recvq.
656	<-e    // A tells us it's done
657	c <- 8 // wake up B.  This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
658}
659
660var selectSink interface{}
661
662func TestSelectStackAdjust(t *testing.T) {
663	// Test that channel receive slots that contain local stack
664	// pointers are adjusted correctly by stack shrinking.
665	c := make(chan *int)
666	d := make(chan *int)
667	ready1 := make(chan bool)
668	ready2 := make(chan bool)
669
670	f := func(ready chan bool, dup bool) {
671		// Temporarily grow the stack to 10K.
672		stackGrowthRecursive((10 << 10) / (128 * 8))
673
674		// We're ready to trigger GC and stack shrink.
675		ready <- true
676
677		val := 42
678		var cx *int
679		cx = &val
680
681		var c2 chan *int
682		var d2 chan *int
683		if dup {
684			c2 = c
685			d2 = d
686		}
687
688		// Receive from d. cx won't be affected.
689		select {
690		case cx = <-c:
691		case <-c2:
692		case <-d:
693		case <-d2:
694		}
695
696		// Check that pointer in cx was adjusted correctly.
697		if cx != &val {
698			t.Error("cx no longer points to val")
699		} else if val != 42 {
700			t.Error("val changed")
701		} else {
702			*cx = 43
703			if val != 43 {
704				t.Error("changing *cx failed to change val")
705			}
706		}
707		ready <- true
708	}
709
710	go f(ready1, false)
711	go f(ready2, true)
712
713	// Let the goroutines get into the select.
714	<-ready1
715	<-ready2
716	time.Sleep(10 * time.Millisecond)
717
718	// Force concurrent GC a few times.
719	var before, after runtime.MemStats
720	runtime.ReadMemStats(&before)
721	for i := 0; i < 100; i++ {
722		selectSink = new([1 << 20]byte)
723		runtime.ReadMemStats(&after)
724		if after.NumGC-before.NumGC >= 2 {
725			goto done
726		}
727		runtime.Gosched()
728	}
729	t.Fatal("failed to trigger concurrent GC")
730done:
731	selectSink = nil
732
733	// Wake selects.
734	close(d)
735	<-ready1
736	<-ready2
737}
738
739type struct0 struct{}
740
741func BenchmarkMakeChan(b *testing.B) {
742	b.Run("Byte", func(b *testing.B) {
743		var x chan byte
744		for i := 0; i < b.N; i++ {
745			x = make(chan byte, 8)
746		}
747		close(x)
748	})
749	b.Run("Int", func(b *testing.B) {
750		var x chan int
751		for i := 0; i < b.N; i++ {
752			x = make(chan int, 8)
753		}
754		close(x)
755	})
756	b.Run("Ptr", func(b *testing.B) {
757		var x chan *byte
758		for i := 0; i < b.N; i++ {
759			x = make(chan *byte, 8)
760		}
761		close(x)
762	})
763	b.Run("Struct", func(b *testing.B) {
764		b.Run("0", func(b *testing.B) {
765			var x chan struct0
766			for i := 0; i < b.N; i++ {
767				x = make(chan struct0, 8)
768			}
769			close(x)
770		})
771		b.Run("32", func(b *testing.B) {
772			var x chan struct32
773			for i := 0; i < b.N; i++ {
774				x = make(chan struct32, 8)
775			}
776			close(x)
777		})
778		b.Run("40", func(b *testing.B) {
779			var x chan struct40
780			for i := 0; i < b.N; i++ {
781				x = make(chan struct40, 8)
782			}
783			close(x)
784		})
785	})
786}
787
788func BenchmarkChanNonblocking(b *testing.B) {
789	myc := make(chan int)
790	b.RunParallel(func(pb *testing.PB) {
791		for pb.Next() {
792			select {
793			case <-myc:
794			default:
795			}
796		}
797	})
798}
799
800func BenchmarkSelectUncontended(b *testing.B) {
801	b.RunParallel(func(pb *testing.PB) {
802		myc1 := make(chan int, 1)
803		myc2 := make(chan int, 1)
804		myc1 <- 0
805		for pb.Next() {
806			select {
807			case <-myc1:
808				myc2 <- 0
809			case <-myc2:
810				myc1 <- 0
811			}
812		}
813	})
814}
815
816func BenchmarkSelectSyncContended(b *testing.B) {
817	myc1 := make(chan int)
818	myc2 := make(chan int)
819	myc3 := make(chan int)
820	done := make(chan int)
821	b.RunParallel(func(pb *testing.PB) {
822		go func() {
823			for {
824				select {
825				case myc1 <- 0:
826				case myc2 <- 0:
827				case myc3 <- 0:
828				case <-done:
829					return
830				}
831			}
832		}()
833		for pb.Next() {
834			select {
835			case <-myc1:
836			case <-myc2:
837			case <-myc3:
838			}
839		}
840	})
841	close(done)
842}
843
844func BenchmarkSelectAsyncContended(b *testing.B) {
845	procs := runtime.GOMAXPROCS(0)
846	myc1 := make(chan int, procs)
847	myc2 := make(chan int, procs)
848	b.RunParallel(func(pb *testing.PB) {
849		myc1 <- 0
850		for pb.Next() {
851			select {
852			case <-myc1:
853				myc2 <- 0
854			case <-myc2:
855				myc1 <- 0
856			}
857		}
858	})
859}
860
861func BenchmarkSelectNonblock(b *testing.B) {
862	myc1 := make(chan int)
863	myc2 := make(chan int)
864	myc3 := make(chan int, 1)
865	myc4 := make(chan int, 1)
866	b.RunParallel(func(pb *testing.PB) {
867		for pb.Next() {
868			select {
869			case <-myc1:
870			default:
871			}
872			select {
873			case myc2 <- 0:
874			default:
875			}
876			select {
877			case <-myc3:
878			default:
879			}
880			select {
881			case myc4 <- 0:
882			default:
883			}
884		}
885	})
886}
887
888func BenchmarkChanUncontended(b *testing.B) {
889	const C = 100
890	b.RunParallel(func(pb *testing.PB) {
891		myc := make(chan int, C)
892		for pb.Next() {
893			for i := 0; i < C; i++ {
894				myc <- 0
895			}
896			for i := 0; i < C; i++ {
897				<-myc
898			}
899		}
900	})
901}
902
903func BenchmarkChanContended(b *testing.B) {
904	const C = 100
905	myc := make(chan int, C*runtime.GOMAXPROCS(0))
906	b.RunParallel(func(pb *testing.PB) {
907		for pb.Next() {
908			for i := 0; i < C; i++ {
909				myc <- 0
910			}
911			for i := 0; i < C; i++ {
912				<-myc
913			}
914		}
915	})
916}
917
918func benchmarkChanSync(b *testing.B, work int) {
919	const CallsPerSched = 1000
920	procs := 2
921	N := int32(b.N / CallsPerSched / procs * procs)
922	c := make(chan bool, procs)
923	myc := make(chan int)
924	for p := 0; p < procs; p++ {
925		go func() {
926			for {
927				i := atomic.AddInt32(&N, -1)
928				if i < 0 {
929					break
930				}
931				for g := 0; g < CallsPerSched; g++ {
932					if i%2 == 0 {
933						<-myc
934						localWork(work)
935						myc <- 0
936						localWork(work)
937					} else {
938						myc <- 0
939						localWork(work)
940						<-myc
941						localWork(work)
942					}
943				}
944			}
945			c <- true
946		}()
947	}
948	for p := 0; p < procs; p++ {
949		<-c
950	}
951}
952
953func BenchmarkChanSync(b *testing.B) {
954	benchmarkChanSync(b, 0)
955}
956
957func BenchmarkChanSyncWork(b *testing.B) {
958	benchmarkChanSync(b, 1000)
959}
960
961func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
962	const CallsPerSched = 1000
963	procs := runtime.GOMAXPROCS(-1)
964	N := int32(b.N / CallsPerSched)
965	c := make(chan bool, 2*procs)
966	myc := make(chan int, chanSize)
967	for p := 0; p < procs; p++ {
968		go func() {
969			foo := 0
970			for atomic.AddInt32(&N, -1) >= 0 {
971				for g := 0; g < CallsPerSched; g++ {
972					for i := 0; i < localWork; i++ {
973						foo *= 2
974						foo /= 2
975					}
976					myc <- 1
977				}
978			}
979			myc <- 0
980			c <- foo == 42
981		}()
982		go func() {
983			foo := 0
984			for {
985				v := <-myc
986				if v == 0 {
987					break
988				}
989				for i := 0; i < localWork; i++ {
990					foo *= 2
991					foo /= 2
992				}
993			}
994			c <- foo == 42
995		}()
996	}
997	for p := 0; p < procs; p++ {
998		<-c
999		<-c
1000	}
1001}
1002
1003func BenchmarkChanProdCons0(b *testing.B) {
1004	benchmarkChanProdCons(b, 0, 0)
1005}
1006
1007func BenchmarkChanProdCons10(b *testing.B) {
1008	benchmarkChanProdCons(b, 10, 0)
1009}
1010
1011func BenchmarkChanProdCons100(b *testing.B) {
1012	benchmarkChanProdCons(b, 100, 0)
1013}
1014
1015func BenchmarkChanProdConsWork0(b *testing.B) {
1016	benchmarkChanProdCons(b, 0, 100)
1017}
1018
1019func BenchmarkChanProdConsWork10(b *testing.B) {
1020	benchmarkChanProdCons(b, 10, 100)
1021}
1022
1023func BenchmarkChanProdConsWork100(b *testing.B) {
1024	benchmarkChanProdCons(b, 100, 100)
1025}
1026
1027func BenchmarkSelectProdCons(b *testing.B) {
1028	const CallsPerSched = 1000
1029	procs := runtime.GOMAXPROCS(-1)
1030	N := int32(b.N / CallsPerSched)
1031	c := make(chan bool, 2*procs)
1032	myc := make(chan int, 128)
1033	myclose := make(chan bool)
1034	for p := 0; p < procs; p++ {
1035		go func() {
1036			// Producer: sends to myc.
1037			foo := 0
1038			// Intended to not fire during benchmarking.
1039			mytimer := time.After(time.Hour)
1040			for atomic.AddInt32(&N, -1) >= 0 {
1041				for g := 0; g < CallsPerSched; g++ {
1042					// Model some local work.
1043					for i := 0; i < 100; i++ {
1044						foo *= 2
1045						foo /= 2
1046					}
1047					select {
1048					case myc <- 1:
1049					case <-mytimer:
1050					case <-myclose:
1051					}
1052				}
1053			}
1054			myc <- 0
1055			c <- foo == 42
1056		}()
1057		go func() {
1058			// Consumer: receives from myc.
1059			foo := 0
1060			// Intended to not fire during benchmarking.
1061			mytimer := time.After(time.Hour)
1062		loop:
1063			for {
1064				select {
1065				case v := <-myc:
1066					if v == 0 {
1067						break loop
1068					}
1069				case <-mytimer:
1070				case <-myclose:
1071				}
1072				// Model some local work.
1073				for i := 0; i < 100; i++ {
1074					foo *= 2
1075					foo /= 2
1076				}
1077			}
1078			c <- foo == 42
1079		}()
1080	}
1081	for p := 0; p < procs; p++ {
1082		<-c
1083		<-c
1084	}
1085}
1086
1087func BenchmarkChanCreation(b *testing.B) {
1088	b.RunParallel(func(pb *testing.PB) {
1089		for pb.Next() {
1090			myc := make(chan int, 1)
1091			myc <- 0
1092			<-myc
1093		}
1094	})
1095}
1096
1097func BenchmarkChanSem(b *testing.B) {
1098	type Empty struct{}
1099	myc := make(chan Empty, runtime.GOMAXPROCS(0))
1100	b.RunParallel(func(pb *testing.PB) {
1101		for pb.Next() {
1102			myc <- Empty{}
1103			<-myc
1104		}
1105	})
1106}
1107
1108func BenchmarkChanPopular(b *testing.B) {
1109	const n = 1000
1110	c := make(chan bool)
1111	var a []chan bool
1112	var wg sync.WaitGroup
1113	wg.Add(n)
1114	for j := 0; j < n; j++ {
1115		d := make(chan bool)
1116		a = append(a, d)
1117		go func() {
1118			for i := 0; i < b.N; i++ {
1119				select {
1120				case <-c:
1121				case <-d:
1122				}
1123			}
1124			wg.Done()
1125		}()
1126	}
1127	for i := 0; i < b.N; i++ {
1128		for _, d := range a {
1129			d <- true
1130		}
1131	}
1132	wg.Wait()
1133}
1134
1135var (
1136	alwaysFalse = false
1137	workSink    = 0
1138)
1139
1140func localWork(w int) {
1141	foo := 0
1142	for i := 0; i < w; i++ {
1143		foo /= (foo + 1)
1144	}
1145	if alwaysFalse {
1146		workSink += foo
1147	}
1148}
1149