1package metrics
2
3import (
4	"math/rand"
5	"runtime"
6	"testing"
7	"time"
8)
9
10// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
11// expensive computations like Variance, the cost of copying the Sample, as
12// approximated by a make and copy, is much greater than the cost of the
13// computation for small samples and only slightly less for large samples.
14func BenchmarkCompute1000(b *testing.B) {
15	s := make([]int64, 1000)
16	for i := 0; i < len(s); i++ {
17		s[i] = int64(i)
18	}
19	b.ResetTimer()
20	for i := 0; i < b.N; i++ {
21		SampleVariance(s)
22	}
23}
24func BenchmarkCompute1000000(b *testing.B) {
25	s := make([]int64, 1000000)
26	for i := 0; i < len(s); i++ {
27		s[i] = int64(i)
28	}
29	b.ResetTimer()
30	for i := 0; i < b.N; i++ {
31		SampleVariance(s)
32	}
33}
34func BenchmarkCopy1000(b *testing.B) {
35	s := make([]int64, 1000)
36	for i := 0; i < len(s); i++ {
37		s[i] = int64(i)
38	}
39	b.ResetTimer()
40	for i := 0; i < b.N; i++ {
41		sCopy := make([]int64, len(s))
42		copy(sCopy, s)
43	}
44}
45func BenchmarkCopy1000000(b *testing.B) {
46	s := make([]int64, 1000000)
47	for i := 0; i < len(s); i++ {
48		s[i] = int64(i)
49	}
50	b.ResetTimer()
51	for i := 0; i < b.N; i++ {
52		sCopy := make([]int64, len(s))
53		copy(sCopy, s)
54	}
55}
56
57func BenchmarkExpDecaySample257(b *testing.B) {
58	benchmarkSample(b, NewExpDecaySample(257, 0.015))
59}
60
61func BenchmarkExpDecaySample514(b *testing.B) {
62	benchmarkSample(b, NewExpDecaySample(514, 0.015))
63}
64
65func BenchmarkExpDecaySample1028(b *testing.B) {
66	benchmarkSample(b, NewExpDecaySample(1028, 0.015))
67}
68
69func BenchmarkUniformSample257(b *testing.B) {
70	benchmarkSample(b, NewUniformSample(257))
71}
72
73func BenchmarkUniformSample514(b *testing.B) {
74	benchmarkSample(b, NewUniformSample(514))
75}
76
77func BenchmarkUniformSample1028(b *testing.B) {
78	benchmarkSample(b, NewUniformSample(1028))
79}
80
81func TestExpDecaySample10(t *testing.T) {
82	rand.Seed(1)
83	s := NewExpDecaySample(100, 0.99)
84	for i := 0; i < 10; i++ {
85		s.Update(int64(i))
86	}
87	if size := s.Count(); 10 != size {
88		t.Errorf("s.Count(): 10 != %v\n", size)
89	}
90	if size := s.Size(); 10 != size {
91		t.Errorf("s.Size(): 10 != %v\n", size)
92	}
93	if l := len(s.Values()); 10 != l {
94		t.Errorf("len(s.Values()): 10 != %v\n", l)
95	}
96	for _, v := range s.Values() {
97		if v > 10 || v < 0 {
98			t.Errorf("out of range [0, 10): %v\n", v)
99		}
100	}
101}
102
103func TestExpDecaySample100(t *testing.T) {
104	rand.Seed(1)
105	s := NewExpDecaySample(1000, 0.01)
106	for i := 0; i < 100; i++ {
107		s.Update(int64(i))
108	}
109	if size := s.Count(); 100 != size {
110		t.Errorf("s.Count(): 100 != %v\n", size)
111	}
112	if size := s.Size(); 100 != size {
113		t.Errorf("s.Size(): 100 != %v\n", size)
114	}
115	if l := len(s.Values()); 100 != l {
116		t.Errorf("len(s.Values()): 100 != %v\n", l)
117	}
118	for _, v := range s.Values() {
119		if v > 100 || v < 0 {
120			t.Errorf("out of range [0, 100): %v\n", v)
121		}
122	}
123}
124
125func TestExpDecaySample1000(t *testing.T) {
126	rand.Seed(1)
127	s := NewExpDecaySample(100, 0.99)
128	for i := 0; i < 1000; i++ {
129		s.Update(int64(i))
130	}
131	if size := s.Count(); 1000 != size {
132		t.Errorf("s.Count(): 1000 != %v\n", size)
133	}
134	if size := s.Size(); 100 != size {
135		t.Errorf("s.Size(): 100 != %v\n", size)
136	}
137	if l := len(s.Values()); 100 != l {
138		t.Errorf("len(s.Values()): 100 != %v\n", l)
139	}
140	for _, v := range s.Values() {
141		if v > 1000 || v < 0 {
142			t.Errorf("out of range [0, 1000): %v\n", v)
143		}
144	}
145}
146
147// This test makes sure that the sample's priority is not amplified by using
148// nanosecond duration since start rather than second duration since start.
149// The priority becomes +Inf quickly after starting if this is done,
150// effectively freezing the set of samples until a rescale step happens.
151func TestExpDecaySampleNanosecondRegression(t *testing.T) {
152	rand.Seed(1)
153	s := NewExpDecaySample(100, 0.99)
154	for i := 0; i < 100; i++ {
155		s.Update(10)
156	}
157	time.Sleep(1 * time.Millisecond)
158	for i := 0; i < 100; i++ {
159		s.Update(20)
160	}
161	v := s.Values()
162	avg := float64(0)
163	for i := 0; i < len(v); i++ {
164		avg += float64(v[i])
165	}
166	avg /= float64(len(v))
167	if avg > 16 || avg < 14 {
168		t.Errorf("out of range [14, 16]: %v\n", avg)
169	}
170}
171
172func TestExpDecaySampleSnapshot(t *testing.T) {
173	now := time.Now()
174	rand.Seed(1)
175	s := NewExpDecaySample(100, 0.99)
176	for i := 1; i <= 10000; i++ {
177		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
178	}
179	snapshot := s.Snapshot()
180	s.Update(1)
181	testExpDecaySampleStatistics(t, snapshot)
182}
183
184func TestExpDecaySampleStatistics(t *testing.T) {
185	now := time.Now()
186	rand.Seed(1)
187	s := NewExpDecaySample(100, 0.99)
188	for i := 1; i <= 10000; i++ {
189		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
190	}
191	testExpDecaySampleStatistics(t, s)
192}
193
194func TestUniformSample(t *testing.T) {
195	rand.Seed(1)
196	s := NewUniformSample(100)
197	for i := 0; i < 1000; i++ {
198		s.Update(int64(i))
199	}
200	if size := s.Count(); 1000 != size {
201		t.Errorf("s.Count(): 1000 != %v\n", size)
202	}
203	if size := s.Size(); 100 != size {
204		t.Errorf("s.Size(): 100 != %v\n", size)
205	}
206	if l := len(s.Values()); 100 != l {
207		t.Errorf("len(s.Values()): 100 != %v\n", l)
208	}
209	for _, v := range s.Values() {
210		if v > 1000 || v < 0 {
211			t.Errorf("out of range [0, 100): %v\n", v)
212		}
213	}
214}
215
216func TestUniformSampleIncludesTail(t *testing.T) {
217	rand.Seed(1)
218	s := NewUniformSample(100)
219	max := 100
220	for i := 0; i < max; i++ {
221		s.Update(int64(i))
222	}
223	v := s.Values()
224	sum := 0
225	exp := (max - 1) * max / 2
226	for i := 0; i < len(v); i++ {
227		sum += int(v[i])
228	}
229	if exp != sum {
230		t.Errorf("sum: %v != %v\n", exp, sum)
231	}
232}
233
234func TestUniformSampleSnapshot(t *testing.T) {
235	s := NewUniformSample(100)
236	for i := 1; i <= 10000; i++ {
237		s.Update(int64(i))
238	}
239	snapshot := s.Snapshot()
240	s.Update(1)
241	testUniformSampleStatistics(t, snapshot)
242}
243
244func TestUniformSampleStatistics(t *testing.T) {
245	rand.Seed(1)
246	s := NewUniformSample(100)
247	for i := 1; i <= 10000; i++ {
248		s.Update(int64(i))
249	}
250	testUniformSampleStatistics(t, s)
251}
252
253func benchmarkSample(b *testing.B, s Sample) {
254	var memStats runtime.MemStats
255	runtime.ReadMemStats(&memStats)
256	pauseTotalNs := memStats.PauseTotalNs
257	b.ResetTimer()
258	for i := 0; i < b.N; i++ {
259		s.Update(1)
260	}
261	b.StopTimer()
262	runtime.GC()
263	runtime.ReadMemStats(&memStats)
264	b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
265}
266
267func testExpDecaySampleStatistics(t *testing.T, s Sample) {
268	if count := s.Count(); 10000 != count {
269		t.Errorf("s.Count(): 10000 != %v\n", count)
270	}
271	if min := s.Min(); 107 != min {
272		t.Errorf("s.Min(): 107 != %v\n", min)
273	}
274	if max := s.Max(); 10000 != max {
275		t.Errorf("s.Max(): 10000 != %v\n", max)
276	}
277	if mean := s.Mean(); 4965.98 != mean {
278		t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
279	}
280	if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
281		t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
282	}
283	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
284	if 4615 != ps[0] {
285		t.Errorf("median: 4615 != %v\n", ps[0])
286	}
287	if 7672 != ps[1] {
288		t.Errorf("75th percentile: 7672 != %v\n", ps[1])
289	}
290	if 9998.99 != ps[2] {
291		t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
292	}
293}
294
295func testUniformSampleStatistics(t *testing.T, s Sample) {
296	if count := s.Count(); 10000 != count {
297		t.Errorf("s.Count(): 10000 != %v\n", count)
298	}
299	if min := s.Min(); 9412 != min {
300		t.Errorf("s.Min(): 9412 != %v\n", min)
301	}
302	if max := s.Max(); 10000 != max {
303		t.Errorf("s.Max(): 10000 != %v\n", max)
304	}
305	if mean := s.Mean(); 9902.26 != mean {
306		t.Errorf("s.Mean(): 9902.26 != %v\n", mean)
307	}
308	if stdDev := s.StdDev(); 101.8667384380201 != stdDev {
309		t.Errorf("s.StdDev(): 101.8667384380201 != %v\n", stdDev)
310	}
311	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
312	if 9930.5 != ps[0] {
313		t.Errorf("median: 9930.5 != %v\n", ps[0])
314	}
315	if 9973.75 != ps[1] {
316		t.Errorf("75th percentile: 9973.75 != %v\n", ps[1])
317	}
318	if 9999.99 != ps[2] {
319		t.Errorf("99th percentile: 9999.99 != %v\n", ps[2])
320	}
321}
322
323// TestUniformSampleConcurrentUpdateCount would expose data race problems with
324// concurrent Update and Count calls on Sample when test is called with -race
325// argument
326func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
327	if testing.Short() {
328		t.Skip("skipping in short mode")
329	}
330	s := NewUniformSample(100)
331	for i := 0; i < 100; i++ {
332		s.Update(int64(i))
333	}
334	quit := make(chan struct{})
335	go func() {
336		t := time.NewTicker(10 * time.Millisecond)
337		for {
338			select {
339			case <-t.C:
340				s.Update(rand.Int63())
341			case <-quit:
342				t.Stop()
343				return
344			}
345		}
346	}()
347	for i := 0; i < 1000; i++ {
348		s.Count()
349		time.Sleep(5 * time.Millisecond)
350	}
351	quit <- struct{}{}
352}
353