1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// GOMAXPROCS=10 go test
6
7package sync_test
8
9import (
10	"runtime"
11	. "sync"
12	"testing"
13)
14
15func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
16	for i := 0; i < loops; i++ {
17		Runtime_Semacquire(s)
18		Runtime_Semrelease(s)
19	}
20	cdone <- true
21}
22
23func TestSemaphore(t *testing.T) {
24	s := new(uint32)
25	*s = 1
26	c := make(chan bool)
27	for i := 0; i < 10; i++ {
28		go HammerSemaphore(s, 1000, c)
29	}
30	for i := 0; i < 10; i++ {
31		<-c
32	}
33}
34
35func BenchmarkUncontendedSemaphore(b *testing.B) {
36	s := new(uint32)
37	*s = 1
38	HammerSemaphore(s, b.N, make(chan bool, 2))
39}
40
41func BenchmarkContendedSemaphore(b *testing.B) {
42	b.StopTimer()
43	s := new(uint32)
44	*s = 1
45	c := make(chan bool)
46	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
47	b.StartTimer()
48
49	go HammerSemaphore(s, b.N/2, c)
50	go HammerSemaphore(s, b.N/2, c)
51	<-c
52	<-c
53}
54
55func HammerMutex(m *Mutex, loops int, cdone chan bool) {
56	for i := 0; i < loops; i++ {
57		m.Lock()
58		m.Unlock()
59	}
60	cdone <- true
61}
62
63func TestMutex(t *testing.T) {
64	m := new(Mutex)
65	c := make(chan bool)
66	for i := 0; i < 10; i++ {
67		go HammerMutex(m, 1000, c)
68	}
69	for i := 0; i < 10; i++ {
70		<-c
71	}
72}
73
74func TestMutexPanic(t *testing.T) {
75	defer func() {
76		if recover() == nil {
77			t.Fatalf("unlock of unlocked mutex did not panic")
78		}
79	}()
80
81	var mu Mutex
82	mu.Lock()
83	mu.Unlock()
84	mu.Unlock()
85}
86
87func BenchmarkMutexUncontended(b *testing.B) {
88	type PaddedMutex struct {
89		Mutex
90		pad [128]uint8
91	}
92	b.RunParallel(func(pb *testing.PB) {
93		var mu PaddedMutex
94		for pb.Next() {
95			mu.Lock()
96			mu.Unlock()
97		}
98	})
99}
100
101func benchmarkMutex(b *testing.B, slack, work bool) {
102	var mu Mutex
103	if slack {
104		b.SetParallelism(10)
105	}
106	b.RunParallel(func(pb *testing.PB) {
107		foo := 0
108		for pb.Next() {
109			mu.Lock()
110			mu.Unlock()
111			if work {
112				for i := 0; i < 100; i++ {
113					foo *= 2
114					foo /= 2
115				}
116			}
117		}
118		_ = foo
119	})
120}
121
122func BenchmarkMutex(b *testing.B) {
123	benchmarkMutex(b, false, false)
124}
125
126func BenchmarkMutexSlack(b *testing.B) {
127	benchmarkMutex(b, true, false)
128}
129
130func BenchmarkMutexWork(b *testing.B) {
131	benchmarkMutex(b, false, true)
132}
133
134func BenchmarkMutexWorkSlack(b *testing.B) {
135	benchmarkMutex(b, true, true)
136}
137
138func BenchmarkMutexNoSpin(b *testing.B) {
139	// This benchmark models a situation where spinning in the mutex should be
140	// non-profitable and allows to confirm that spinning does not do harm.
141	// To achieve this we create excess of goroutines most of which do local work.
142	// These goroutines yield during local work, so that switching from
143	// a blocked goroutine to other goroutines is profitable.
144	// As a matter of fact, this benchmark still triggers some spinning in the mutex.
145	var m Mutex
146	var acc0, acc1 uint64
147	b.SetParallelism(4)
148	b.RunParallel(func(pb *testing.PB) {
149		c := make(chan bool)
150		var data [4 << 10]uint64
151		for i := 0; pb.Next(); i++ {
152			if i%4 == 0 {
153				m.Lock()
154				acc0 -= 100
155				acc1 += 100
156				m.Unlock()
157			} else {
158				for i := 0; i < len(data); i += 4 {
159					data[i]++
160				}
161				// Elaborate way to say runtime.Gosched
162				// that does not put the goroutine onto global runq.
163				go func() {
164					c <- true
165				}()
166				<-c
167			}
168		}
169	})
170}
171
172func BenchmarkMutexSpin(b *testing.B) {
173	// This benchmark models a situation where spinning in the mutex should be
174	// profitable. To achieve this we create a goroutine per-proc.
175	// These goroutines access considerable amount of local data so that
176	// unnecessary rescheduling is penalized by cache misses.
177	var m Mutex
178	var acc0, acc1 uint64
179	b.RunParallel(func(pb *testing.PB) {
180		var data [16 << 10]uint64
181		for i := 0; pb.Next(); i++ {
182			m.Lock()
183			acc0 -= 100
184			acc1 += 100
185			m.Unlock()
186			for i := 0; i < len(data); i += 4 {
187				data[i]++
188			}
189		}
190	})
191}
192