1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package sync_test
6
7import (
8	"fmt"
9	"reflect"
10	"sync"
11	"sync/atomic"
12	"testing"
13)
14
15type bench struct {
16	setup func(*testing.B, mapInterface)
17	perG  func(b *testing.B, pb *testing.PB, i int, m mapInterface)
18}
19
20func benchMap(b *testing.B, bench bench) {
21	for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} {
22		b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
23			m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
24			if bench.setup != nil {
25				bench.setup(b, m)
26			}
27
28			b.ResetTimer()
29
30			var i int64
31			b.RunParallel(func(pb *testing.PB) {
32				id := int(atomic.AddInt64(&i, 1) - 1)
33				bench.perG(b, pb, id*b.N, m)
34			})
35		})
36	}
37}
38
39func BenchmarkLoadMostlyHits(b *testing.B) {
40	const hits, misses = 1023, 1
41
42	benchMap(b, bench{
43		setup: func(_ *testing.B, m mapInterface) {
44			for i := 0; i < hits; i++ {
45				m.LoadOrStore(i, i)
46			}
47			// Prime the map to get it into a steady state.
48			for i := 0; i < hits*2; i++ {
49				m.Load(i % hits)
50			}
51		},
52
53		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
54			for ; pb.Next(); i++ {
55				m.Load(i % (hits + misses))
56			}
57		},
58	})
59}
60
61func BenchmarkLoadMostlyMisses(b *testing.B) {
62	const hits, misses = 1, 1023
63
64	benchMap(b, bench{
65		setup: func(_ *testing.B, m mapInterface) {
66			for i := 0; i < hits; i++ {
67				m.LoadOrStore(i, i)
68			}
69			// Prime the map to get it into a steady state.
70			for i := 0; i < hits*2; i++ {
71				m.Load(i % hits)
72			}
73		},
74
75		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
76			for ; pb.Next(); i++ {
77				m.Load(i % (hits + misses))
78			}
79		},
80	})
81}
82
83func BenchmarkLoadOrStoreBalanced(b *testing.B) {
84	const hits, misses = 128, 128
85
86	benchMap(b, bench{
87		setup: func(b *testing.B, m mapInterface) {
88			if _, ok := m.(*DeepCopyMap); ok {
89				b.Skip("DeepCopyMap has quadratic running time.")
90			}
91			for i := 0; i < hits; i++ {
92				m.LoadOrStore(i, i)
93			}
94			// Prime the map to get it into a steady state.
95			for i := 0; i < hits*2; i++ {
96				m.Load(i % hits)
97			}
98		},
99
100		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
101			for ; pb.Next(); i++ {
102				j := i % (hits + misses)
103				if j < hits {
104					if _, ok := m.LoadOrStore(j, i); !ok {
105						b.Fatalf("unexpected miss for %v", j)
106					}
107				} else {
108					if v, loaded := m.LoadOrStore(i, i); loaded {
109						b.Fatalf("failed to store %v: existing value %v", i, v)
110					}
111				}
112			}
113		},
114	})
115}
116
117func BenchmarkLoadOrStoreUnique(b *testing.B) {
118	benchMap(b, bench{
119		setup: func(b *testing.B, m mapInterface) {
120			if _, ok := m.(*DeepCopyMap); ok {
121				b.Skip("DeepCopyMap has quadratic running time.")
122			}
123		},
124
125		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
126			for ; pb.Next(); i++ {
127				m.LoadOrStore(i, i)
128			}
129		},
130	})
131}
132
133func BenchmarkLoadOrStoreCollision(b *testing.B) {
134	benchMap(b, bench{
135		setup: func(_ *testing.B, m mapInterface) {
136			m.LoadOrStore(0, 0)
137		},
138
139		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
140			for ; pb.Next(); i++ {
141				m.LoadOrStore(0, 0)
142			}
143		},
144	})
145}
146
147func BenchmarkLoadAndDeleteBalanced(b *testing.B) {
148	const hits, misses = 128, 128
149
150	benchMap(b, bench{
151		setup: func(b *testing.B, m mapInterface) {
152			if _, ok := m.(*DeepCopyMap); ok {
153				b.Skip("DeepCopyMap has quadratic running time.")
154			}
155			for i := 0; i < hits; i++ {
156				m.LoadOrStore(i, i)
157			}
158			// Prime the map to get it into a steady state.
159			for i := 0; i < hits*2; i++ {
160				m.Load(i % hits)
161			}
162		},
163
164		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
165			for ; pb.Next(); i++ {
166				j := i % (hits + misses)
167				if j < hits {
168					m.LoadAndDelete(j)
169				} else {
170					m.LoadAndDelete(i)
171				}
172			}
173		},
174	})
175}
176
177func BenchmarkLoadAndDeleteUnique(b *testing.B) {
178	benchMap(b, bench{
179		setup: func(b *testing.B, m mapInterface) {
180			if _, ok := m.(*DeepCopyMap); ok {
181				b.Skip("DeepCopyMap has quadratic running time.")
182			}
183		},
184
185		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
186			for ; pb.Next(); i++ {
187				m.LoadAndDelete(i)
188			}
189		},
190	})
191}
192
193func BenchmarkLoadAndDeleteCollision(b *testing.B) {
194	benchMap(b, bench{
195		setup: func(_ *testing.B, m mapInterface) {
196			m.LoadOrStore(0, 0)
197		},
198
199		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
200			for ; pb.Next(); i++ {
201				m.LoadAndDelete(0)
202			}
203		},
204	})
205}
206
207func BenchmarkRange(b *testing.B) {
208	const mapSize = 1 << 10
209
210	benchMap(b, bench{
211		setup: func(_ *testing.B, m mapInterface) {
212			for i := 0; i < mapSize; i++ {
213				m.Store(i, i)
214			}
215		},
216
217		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
218			for ; pb.Next(); i++ {
219				m.Range(func(_, _ any) bool { return true })
220			}
221		},
222	})
223}
224
225// BenchmarkAdversarialAlloc tests performance when we store a new value
226// immediately whenever the map is promoted to clean and otherwise load a
227// unique, missing key.
228//
229// This forces the Load calls to always acquire the map's mutex.
230func BenchmarkAdversarialAlloc(b *testing.B) {
231	benchMap(b, bench{
232		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
233			var stores, loadsSinceStore int64
234			for ; pb.Next(); i++ {
235				m.Load(i)
236				if loadsSinceStore++; loadsSinceStore > stores {
237					m.LoadOrStore(i, stores)
238					loadsSinceStore = 0
239					stores++
240				}
241			}
242		},
243	})
244}
245
246// BenchmarkAdversarialDelete tests performance when we periodically delete
247// one key and add a different one in a large map.
248//
249// This forces the Load calls to always acquire the map's mutex and periodically
250// makes a full copy of the map despite changing only one entry.
251func BenchmarkAdversarialDelete(b *testing.B) {
252	const mapSize = 1 << 10
253
254	benchMap(b, bench{
255		setup: func(_ *testing.B, m mapInterface) {
256			for i := 0; i < mapSize; i++ {
257				m.Store(i, i)
258			}
259		},
260
261		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
262			for ; pb.Next(); i++ {
263				m.Load(i)
264
265				if i%mapSize == 0 {
266					m.Range(func(k, _ any) bool {
267						m.Delete(k)
268						return false
269					})
270					m.Store(i, i)
271				}
272			}
273		},
274	})
275}
276
277func BenchmarkDeleteCollision(b *testing.B) {
278	benchMap(b, bench{
279		setup: func(_ *testing.B, m mapInterface) {
280			m.LoadOrStore(0, 0)
281		},
282
283		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
284			for ; pb.Next(); i++ {
285				m.Delete(0)
286			}
287		},
288	})
289}
290