1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// GOMAXPROCS=10 go test
6
7package sync_test
8
9import (
10	"fmt"
11	"runtime"
12	. "sync"
13	"sync/atomic"
14	"testing"
15)
16
17// There is a modified copy of this file in runtime/rwmutex_test.go.
18// If you make any changes here, see if you should make them there.
19
20func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
21	m.RLock()
22	clocked <- true
23	<-cunlock
24	m.RUnlock()
25	cdone <- true
26}
27
28func doTestParallelReaders(numReaders, gomaxprocs int) {
29	runtime.GOMAXPROCS(gomaxprocs)
30	var m RWMutex
31	clocked := make(chan bool)
32	cunlock := make(chan bool)
33	cdone := make(chan bool)
34	for i := 0; i < numReaders; i++ {
35		go parallelReader(&m, clocked, cunlock, cdone)
36	}
37	// Wait for all parallel RLock()s to succeed.
38	for i := 0; i < numReaders; i++ {
39		<-clocked
40	}
41	for i := 0; i < numReaders; i++ {
42		cunlock <- true
43	}
44	// Wait for the goroutines to finish.
45	for i := 0; i < numReaders; i++ {
46		<-cdone
47	}
48}
49
50func TestParallelReaders(t *testing.T) {
51	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
52	doTestParallelReaders(1, 4)
53	doTestParallelReaders(3, 4)
54	doTestParallelReaders(4, 2)
55}
56
57func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
58	for i := 0; i < num_iterations; i++ {
59		rwm.RLock()
60		n := atomic.AddInt32(activity, 1)
61		if n < 1 || n >= 10000 {
62			panic(fmt.Sprintf("wlock(%d)\n", n))
63		}
64		for i := 0; i < 100; i++ {
65		}
66		atomic.AddInt32(activity, -1)
67		rwm.RUnlock()
68	}
69	cdone <- true
70}
71
72func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
73	for i := 0; i < num_iterations; i++ {
74		rwm.Lock()
75		n := atomic.AddInt32(activity, 10000)
76		if n != 10000 {
77			panic(fmt.Sprintf("wlock(%d)\n", n))
78		}
79		for i := 0; i < 100; i++ {
80		}
81		atomic.AddInt32(activity, -10000)
82		rwm.Unlock()
83	}
84	cdone <- true
85}
86
87func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
88	runtime.GOMAXPROCS(gomaxprocs)
89	// Number of active readers + 10000 * number of active writers.
90	var activity int32
91	var rwm RWMutex
92	cdone := make(chan bool)
93	go writer(&rwm, num_iterations, &activity, cdone)
94	var i int
95	for i = 0; i < numReaders/2; i++ {
96		go reader(&rwm, num_iterations, &activity, cdone)
97	}
98	go writer(&rwm, num_iterations, &activity, cdone)
99	for ; i < numReaders; i++ {
100		go reader(&rwm, num_iterations, &activity, cdone)
101	}
102	// Wait for the 2 writers and all readers to finish.
103	for i := 0; i < 2+numReaders; i++ {
104		<-cdone
105	}
106}
107
108func TestRWMutex(t *testing.T) {
109	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
110	n := 1000
111	if testing.Short() {
112		n = 5
113	}
114	HammerRWMutex(1, 1, n)
115	HammerRWMutex(1, 3, n)
116	HammerRWMutex(1, 10, n)
117	HammerRWMutex(4, 1, n)
118	HammerRWMutex(4, 3, n)
119	HammerRWMutex(4, 10, n)
120	HammerRWMutex(10, 1, n)
121	HammerRWMutex(10, 3, n)
122	HammerRWMutex(10, 10, n)
123	HammerRWMutex(10, 5, n)
124}
125
126func TestRLocker(t *testing.T) {
127	var wl RWMutex
128	var rl Locker
129	wlocked := make(chan bool, 1)
130	rlocked := make(chan bool, 1)
131	rl = wl.RLocker()
132	n := 10
133	go func() {
134		for i := 0; i < n; i++ {
135			rl.Lock()
136			rl.Lock()
137			rlocked <- true
138			wl.Lock()
139			wlocked <- true
140		}
141	}()
142	for i := 0; i < n; i++ {
143		<-rlocked
144		rl.Unlock()
145		select {
146		case <-wlocked:
147			t.Fatal("RLocker() didn't read-lock it")
148		default:
149		}
150		rl.Unlock()
151		<-wlocked
152		select {
153		case <-rlocked:
154			t.Fatal("RLocker() didn't respect the write lock")
155		default:
156		}
157		wl.Unlock()
158	}
159}
160
161func BenchmarkRWMutexUncontended(b *testing.B) {
162	type PaddedRWMutex struct {
163		RWMutex
164		pad [32]uint32
165	}
166	b.RunParallel(func(pb *testing.PB) {
167		var rwm PaddedRWMutex
168		for pb.Next() {
169			rwm.RLock()
170			rwm.RLock()
171			rwm.RUnlock()
172			rwm.RUnlock()
173			rwm.Lock()
174			rwm.Unlock()
175		}
176	})
177}
178
179func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
180	var rwm RWMutex
181	b.RunParallel(func(pb *testing.PB) {
182		foo := 0
183		for pb.Next() {
184			foo++
185			if foo%writeRatio == 0 {
186				rwm.Lock()
187				rwm.Unlock()
188			} else {
189				rwm.RLock()
190				for i := 0; i != localWork; i += 1 {
191					foo *= 2
192					foo /= 2
193				}
194				rwm.RUnlock()
195			}
196		}
197		_ = foo
198	})
199}
200
201func BenchmarkRWMutexWrite100(b *testing.B) {
202	benchmarkRWMutex(b, 0, 100)
203}
204
205func BenchmarkRWMutexWrite10(b *testing.B) {
206	benchmarkRWMutex(b, 0, 10)
207}
208
209func BenchmarkRWMutexWorkWrite100(b *testing.B) {
210	benchmarkRWMutex(b, 100, 100)
211}
212
213func BenchmarkRWMutexWorkWrite10(b *testing.B) {
214	benchmarkRWMutex(b, 100, 10)
215}
216