1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4package runtime_test
5
6import (
7	"fmt"
8	"testing"
9)
10
11const N = 20
12
13type (
14	struct24 struct{ a, b, c int64 }
15	struct32 struct{ a, b, c, d int64 }
16	struct40 struct{ a, b, c, d, e int64 }
17)
18
19func BenchmarkMakeSlice(b *testing.B) {
20	const length = 2
21	b.Run("Byte", func(b *testing.B) {
22		var x []byte
23		for i := 0; i < b.N; i++ {
24			x = make([]byte, length, 2*length)
25			_ = x
26		}
27	})
28	b.Run("Int16", func(b *testing.B) {
29		var x []int16
30		for i := 0; i < b.N; i++ {
31			x = make([]int16, length, 2*length)
32			_ = x
33		}
34	})
35	b.Run("Int", func(b *testing.B) {
36		var x []int
37		for i := 0; i < b.N; i++ {
38			x = make([]int, length, 2*length)
39			_ = x
40		}
41	})
42	b.Run("Ptr", func(b *testing.B) {
43		var x []*byte
44		for i := 0; i < b.N; i++ {
45			x = make([]*byte, length, 2*length)
46			_ = x
47		}
48	})
49	b.Run("Struct", func(b *testing.B) {
50		b.Run("24", func(b *testing.B) {
51			var x []struct24
52			for i := 0; i < b.N; i++ {
53				x = make([]struct24, length, 2*length)
54				_ = x
55			}
56		})
57		b.Run("32", func(b *testing.B) {
58			var x []struct32
59			for i := 0; i < b.N; i++ {
60				x = make([]struct32, length, 2*length)
61				_ = x
62			}
63		})
64		b.Run("40", func(b *testing.B) {
65			var x []struct40
66			for i := 0; i < b.N; i++ {
67				x = make([]struct40, length, 2*length)
68				_ = x
69			}
70		})
71
72	})
73}
74
75func BenchmarkGrowSlice(b *testing.B) {
76	b.Run("Byte", func(b *testing.B) {
77		x := make([]byte, 9)
78		for i := 0; i < b.N; i++ {
79			_ = append([]byte(nil), x...)
80		}
81	})
82	b.Run("Int16", func(b *testing.B) {
83		x := make([]int16, 9)
84		for i := 0; i < b.N; i++ {
85			_ = append([]int16(nil), x...)
86		}
87	})
88	b.Run("Int", func(b *testing.B) {
89		x := make([]int, 9)
90		for i := 0; i < b.N; i++ {
91			_ = append([]int(nil), x...)
92		}
93	})
94	b.Run("Ptr", func(b *testing.B) {
95		x := make([]*byte, 9)
96		for i := 0; i < b.N; i++ {
97			_ = append([]*byte(nil), x...)
98		}
99	})
100	b.Run("Struct", func(b *testing.B) {
101		b.Run("24", func(b *testing.B) {
102			x := make([]struct24, 9)
103			for i := 0; i < b.N; i++ {
104				_ = append([]struct24(nil), x...)
105			}
106		})
107		b.Run("32", func(b *testing.B) {
108			x := make([]struct32, 9)
109			for i := 0; i < b.N; i++ {
110				_ = append([]struct32(nil), x...)
111			}
112		})
113		b.Run("40", func(b *testing.B) {
114			x := make([]struct40, 9)
115			for i := 0; i < b.N; i++ {
116				_ = append([]struct40(nil), x...)
117			}
118		})
119
120	})
121}
122
123var (
124	SinkIntSlice        []int
125	SinkIntPointerSlice []*int
126)
127
128func BenchmarkExtendSlice(b *testing.B) {
129	var length = 4 // Use a variable to prevent stack allocation of slices.
130	b.Run("IntSlice", func(b *testing.B) {
131		s := make([]int, 0, length)
132		for i := 0; i < b.N; i++ {
133			s = append(s[:0:length/2], make([]int, length)...)
134		}
135		SinkIntSlice = s
136	})
137	b.Run("PointerSlice", func(b *testing.B) {
138		s := make([]*int, 0, length)
139		for i := 0; i < b.N; i++ {
140			s = append(s[:0:length/2], make([]*int, length)...)
141		}
142		SinkIntPointerSlice = s
143	})
144	b.Run("NoGrow", func(b *testing.B) {
145		s := make([]int, 0, length)
146		for i := 0; i < b.N; i++ {
147			s = append(s[:0:length], make([]int, length)...)
148		}
149		SinkIntSlice = s
150	})
151}
152
153func BenchmarkAppend(b *testing.B) {
154	b.StopTimer()
155	x := make([]int, 0, N)
156	b.StartTimer()
157	for i := 0; i < b.N; i++ {
158		x = x[0:0]
159		for j := 0; j < N; j++ {
160			x = append(x, j)
161		}
162	}
163}
164
165func BenchmarkAppendGrowByte(b *testing.B) {
166	for i := 0; i < b.N; i++ {
167		var x []byte
168		for j := 0; j < 1<<20; j++ {
169			x = append(x, byte(j))
170		}
171	}
172}
173
174func BenchmarkAppendGrowString(b *testing.B) {
175	var s string
176	for i := 0; i < b.N; i++ {
177		var x []string
178		for j := 0; j < 1<<20; j++ {
179			x = append(x, s)
180		}
181	}
182}
183
184func BenchmarkAppendSlice(b *testing.B) {
185	for _, length := range []int{1, 4, 7, 8, 15, 16, 32} {
186		b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) {
187			x := make([]byte, 0, N)
188			y := make([]byte, length)
189			for i := 0; i < b.N; i++ {
190				x = x[0:0]
191				x = append(x, y...)
192			}
193		})
194	}
195}
196
197var (
198	blackhole []byte
199)
200
201func BenchmarkAppendSliceLarge(b *testing.B) {
202	for _, length := range []int{1 << 10, 4 << 10, 16 << 10, 64 << 10, 256 << 10, 1024 << 10} {
203		y := make([]byte, length)
204		b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) {
205			for i := 0; i < b.N; i++ {
206				blackhole = nil
207				blackhole = append(blackhole, y...)
208			}
209		})
210	}
211}
212
213func BenchmarkAppendStr(b *testing.B) {
214	for _, str := range []string{
215		"1",
216		"1234",
217		"12345678",
218		"1234567890123456",
219		"12345678901234567890123456789012",
220	} {
221		b.Run(fmt.Sprint(len(str), "Bytes"), func(b *testing.B) {
222			x := make([]byte, 0, N)
223			for i := 0; i < b.N; i++ {
224				x = x[0:0]
225				x = append(x, str...)
226			}
227		})
228	}
229}
230
231func BenchmarkAppendSpecialCase(b *testing.B) {
232	b.StopTimer()
233	x := make([]int, 0, N)
234	b.StartTimer()
235	for i := 0; i < b.N; i++ {
236		x = x[0:0]
237		for j := 0; j < N; j++ {
238			if len(x) < cap(x) {
239				x = x[:len(x)+1]
240				x[len(x)-1] = j
241			} else {
242				x = append(x, j)
243			}
244		}
245	}
246}
247
248var x []int
249
250func f() int {
251	x[:1][0] = 3
252	return 2
253}
254
255func TestSideEffectOrder(t *testing.T) {
256	x = make([]int, 0, 10)
257	x = append(x, 1, f())
258	if x[0] != 1 || x[1] != 2 {
259		t.Error("append failed: ", x[0], x[1])
260	}
261}
262
263func TestAppendOverlap(t *testing.T) {
264	x := []byte("1234")
265	x = append(x[1:], x...) // p > q in runtime·appendslice.
266	got := string(x)
267	want := "2341234"
268	if got != want {
269		t.Errorf("overlap failed: got %q want %q", got, want)
270	}
271}
272
273func BenchmarkCopy(b *testing.B) {
274	for _, l := range []int{1, 2, 4, 8, 12, 16, 32, 128, 1024} {
275		buf := make([]byte, 4096)
276		b.Run(fmt.Sprint(l, "Byte"), func(b *testing.B) {
277			s := make([]byte, l)
278			var n int
279			for i := 0; i < b.N; i++ {
280				n = copy(buf, s)
281			}
282			b.SetBytes(int64(n))
283		})
284		b.Run(fmt.Sprint(l, "String"), func(b *testing.B) {
285			s := string(make([]byte, l))
286			var n int
287			for i := 0; i < b.N; i++ {
288				n = copy(buf, s)
289			}
290			b.SetBytes(int64(n))
291		})
292	}
293}
294
295var (
296	sByte []byte
297	s1Ptr []uintptr
298	s2Ptr [][2]uintptr
299	s3Ptr [][3]uintptr
300	s4Ptr [][4]uintptr
301)
302
303// BenchmarkAppendInPlace tests the performance of append
304// when the result is being written back to the same slice.
305// In order for the in-place optimization to occur,
306// the slice must be referred to by address;
307// using a global is an easy way to trigger that.
308// We test the "grow" and "no grow" paths separately,
309// but not the "normal" (occasionally grow) path,
310// because it is a blend of the other two.
311// We use small numbers and small sizes in an attempt
312// to avoid benchmarking memory allocation and copying.
313// We use scalars instead of pointers in an attempt
314// to avoid benchmarking the write barriers.
315// We benchmark four common sizes (byte, pointer, string/interface, slice),
316// and one larger size.
317func BenchmarkAppendInPlace(b *testing.B) {
318	b.Run("NoGrow", func(b *testing.B) {
319		const C = 128
320
321		b.Run("Byte", func(b *testing.B) {
322			for i := 0; i < b.N; i++ {
323				sByte = make([]byte, C)
324				for j := 0; j < C; j++ {
325					sByte = append(sByte, 0x77)
326				}
327			}
328		})
329
330		b.Run("1Ptr", func(b *testing.B) {
331			for i := 0; i < b.N; i++ {
332				s1Ptr = make([]uintptr, C)
333				for j := 0; j < C; j++ {
334					s1Ptr = append(s1Ptr, 0x77)
335				}
336			}
337		})
338
339		b.Run("2Ptr", func(b *testing.B) {
340			for i := 0; i < b.N; i++ {
341				s2Ptr = make([][2]uintptr, C)
342				for j := 0; j < C; j++ {
343					s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
344				}
345			}
346		})
347
348		b.Run("3Ptr", func(b *testing.B) {
349			for i := 0; i < b.N; i++ {
350				s3Ptr = make([][3]uintptr, C)
351				for j := 0; j < C; j++ {
352					s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
353				}
354			}
355		})
356
357		b.Run("4Ptr", func(b *testing.B) {
358			for i := 0; i < b.N; i++ {
359				s4Ptr = make([][4]uintptr, C)
360				for j := 0; j < C; j++ {
361					s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
362				}
363			}
364		})
365
366	})
367
368	b.Run("Grow", func(b *testing.B) {
369		const C = 5
370
371		b.Run("Byte", func(b *testing.B) {
372			for i := 0; i < b.N; i++ {
373				sByte = make([]byte, 0)
374				for j := 0; j < C; j++ {
375					sByte = append(sByte, 0x77)
376					sByte = sByte[:cap(sByte)]
377				}
378			}
379		})
380
381		b.Run("1Ptr", func(b *testing.B) {
382			for i := 0; i < b.N; i++ {
383				s1Ptr = make([]uintptr, 0)
384				for j := 0; j < C; j++ {
385					s1Ptr = append(s1Ptr, 0x77)
386					s1Ptr = s1Ptr[:cap(s1Ptr)]
387				}
388			}
389		})
390
391		b.Run("2Ptr", func(b *testing.B) {
392			for i := 0; i < b.N; i++ {
393				s2Ptr = make([][2]uintptr, 0)
394				for j := 0; j < C; j++ {
395					s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
396					s2Ptr = s2Ptr[:cap(s2Ptr)]
397				}
398			}
399		})
400
401		b.Run("3Ptr", func(b *testing.B) {
402			for i := 0; i < b.N; i++ {
403				s3Ptr = make([][3]uintptr, 0)
404				for j := 0; j < C; j++ {
405					s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
406					s3Ptr = s3Ptr[:cap(s3Ptr)]
407				}
408			}
409		})
410
411		b.Run("4Ptr", func(b *testing.B) {
412			for i := 0; i < b.N; i++ {
413				s4Ptr = make([][4]uintptr, 0)
414				for j := 0; j < C; j++ {
415					s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
416					s4Ptr = s4Ptr[:cap(s4Ptr)]
417				}
418			}
419		})
420
421	})
422}
423