1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"fmt"
9	"math"
10	"math/rand"
11	"reflect"
12	. "runtime"
13	"strings"
14	"testing"
15	"unsafe"
16)
17
18func TestMemHash32Equality(t *testing.T) {
19	if *UseAeshash {
20		t.Skip("skipping since AES hash implementation is used")
21	}
22	var b [4]byte
23	r := rand.New(rand.NewSource(1234))
24	seed := uintptr(r.Uint64())
25	for i := 0; i < 100; i++ {
26		randBytes(r, b[:])
27		got := MemHash32(unsafe.Pointer(&b), seed)
28		want := MemHash(unsafe.Pointer(&b), seed, 4)
29		if got != want {
30			t.Errorf("MemHash32(%x, %v) = %v; want %v", b, seed, got, want)
31		}
32	}
33}
34
35func TestMemHash64Equality(t *testing.T) {
36	if *UseAeshash {
37		t.Skip("skipping since AES hash implementation is used")
38	}
39	var b [8]byte
40	r := rand.New(rand.NewSource(1234))
41	seed := uintptr(r.Uint64())
42	for i := 0; i < 100; i++ {
43		randBytes(r, b[:])
44		got := MemHash64(unsafe.Pointer(&b), seed)
45		want := MemHash(unsafe.Pointer(&b), seed, 8)
46		if got != want {
47			t.Errorf("MemHash64(%x, %v) = %v; want %v", b, seed, got, want)
48		}
49	}
50}
51
52func TestCompilerVsRuntimeHash(t *testing.T) {
53	// Test to make sure the compiler's hash function and the runtime's hash function agree.
54	// See issue 37716.
55	for _, m := range []interface{}{
56		map[bool]int{},
57		map[int8]int{},
58		map[uint8]int{},
59		map[int16]int{},
60		map[uint16]int{},
61		map[int32]int{},
62		map[uint32]int{},
63		map[int64]int{},
64		map[uint64]int{},
65		map[int]int{},
66		map[uint]int{},
67		map[uintptr]int{},
68		map[*byte]int{},
69		map[chan int]int{},
70		map[unsafe.Pointer]int{},
71		map[float32]int{},
72		map[float64]int{},
73		map[complex64]int{},
74		map[complex128]int{},
75		map[string]int{},
76		//map[interface{}]int{},
77		//map[interface{F()}]int{},
78		map[[8]uint64]int{},
79		map[[8]string]int{},
80		map[struct{ a, b, c, d int32 }]int{}, // Note: tests AMEM128
81		map[struct{ a, b, _, d int32 }]int{},
82		map[struct {
83			a, b int32
84			c    float32
85			d, e [8]byte
86		}]int{},
87		map[struct {
88			a int16
89			b int64
90		}]int{},
91	} {
92		k := reflect.New(reflect.TypeOf(m).Key()).Elem().Interface() // the zero key
93		x, y := MapHashCheck(m, k)
94		if x != y {
95			t.Errorf("hashes did not match (%x vs %x) for map %T", x, y, m)
96		}
97	}
98}
99
100// Smhasher is a torture test for hash functions.
101// https://code.google.com/p/smhasher/
102// This code is a port of some of the Smhasher tests to Go.
103//
104// The current AES hash function passes Smhasher. Our fallback
105// hash functions don't, so we only enable the difficult tests when
106// we know the AES implementation is available.
107
108// Sanity checks.
109// hash should not depend on values outside key.
110// hash should not depend on alignment.
111func TestSmhasherSanity(t *testing.T) {
112	r := rand.New(rand.NewSource(1234))
113	const REP = 10
114	const KEYMAX = 128
115	const PAD = 16
116	const OFFMAX = 16
117	for k := 0; k < REP; k++ {
118		for n := 0; n < KEYMAX; n++ {
119			for i := 0; i < OFFMAX; i++ {
120				var b [KEYMAX + OFFMAX + 2*PAD]byte
121				var c [KEYMAX + OFFMAX + 2*PAD]byte
122				randBytes(r, b[:])
123				randBytes(r, c[:])
124				copy(c[PAD+i:PAD+i+n], b[PAD:PAD+n])
125				if BytesHash(b[PAD:PAD+n], 0) != BytesHash(c[PAD+i:PAD+i+n], 0) {
126					t.Errorf("hash depends on bytes outside key")
127				}
128			}
129		}
130	}
131}
132
133type HashSet struct {
134	m map[uintptr]struct{} // set of hashes added
135	n int                  // number of hashes added
136}
137
138func newHashSet() *HashSet {
139	return &HashSet{make(map[uintptr]struct{}), 0}
140}
141func (s *HashSet) add(h uintptr) {
142	s.m[h] = struct{}{}
143	s.n++
144}
145func (s *HashSet) addS(x string) {
146	s.add(StringHash(x, 0))
147}
148func (s *HashSet) addB(x []byte) {
149	s.add(BytesHash(x, 0))
150}
151func (s *HashSet) addS_seed(x string, seed uintptr) {
152	s.add(StringHash(x, seed))
153}
154func (s *HashSet) check(t *testing.T) {
155	const SLOP = 10.0
156	collisions := s.n - len(s.m)
157	//fmt.Printf("%d/%d\n", len(s.m), s.n)
158	pairs := int64(s.n) * int64(s.n-1) / 2
159	expected := float64(pairs) / math.Pow(2.0, float64(hashSize))
160	stddev := math.Sqrt(expected)
161	if float64(collisions) > expected+SLOP*(3*stddev+1) {
162		t.Errorf("unexpected number of collisions: got=%d mean=%f stddev=%f", collisions, expected, stddev)
163	}
164}
165
166// a string plus adding zeros must make distinct hashes
167func TestSmhasherAppendedZeros(t *testing.T) {
168	s := "hello" + strings.Repeat("\x00", 256)
169	h := newHashSet()
170	for i := 0; i <= len(s); i++ {
171		h.addS(s[:i])
172	}
173	h.check(t)
174}
175
176// All 0-3 byte strings have distinct hashes.
177func TestSmhasherSmallKeys(t *testing.T) {
178	h := newHashSet()
179	var b [3]byte
180	for i := 0; i < 256; i++ {
181		b[0] = byte(i)
182		h.addB(b[:1])
183		for j := 0; j < 256; j++ {
184			b[1] = byte(j)
185			h.addB(b[:2])
186			if !testing.Short() {
187				for k := 0; k < 256; k++ {
188					b[2] = byte(k)
189					h.addB(b[:3])
190				}
191			}
192		}
193	}
194	h.check(t)
195}
196
197// Different length strings of all zeros have distinct hashes.
198func TestSmhasherZeros(t *testing.T) {
199	N := 256 * 1024
200	if testing.Short() {
201		N = 1024
202	}
203	h := newHashSet()
204	b := make([]byte, N)
205	for i := 0; i <= N; i++ {
206		h.addB(b[:i])
207	}
208	h.check(t)
209}
210
211// Strings with up to two nonzero bytes all have distinct hashes.
212func TestSmhasherTwoNonzero(t *testing.T) {
213	if GOARCH == "wasm" {
214		t.Skip("Too slow on wasm")
215	}
216	if testing.Short() {
217		t.Skip("Skipping in short mode")
218	}
219	h := newHashSet()
220	for n := 2; n <= 16; n++ {
221		twoNonZero(h, n)
222	}
223	h.check(t)
224}
225func twoNonZero(h *HashSet, n int) {
226	b := make([]byte, n)
227
228	// all zero
229	h.addB(b)
230
231	// one non-zero byte
232	for i := 0; i < n; i++ {
233		for x := 1; x < 256; x++ {
234			b[i] = byte(x)
235			h.addB(b)
236			b[i] = 0
237		}
238	}
239
240	// two non-zero bytes
241	for i := 0; i < n; i++ {
242		for x := 1; x < 256; x++ {
243			b[i] = byte(x)
244			for j := i + 1; j < n; j++ {
245				for y := 1; y < 256; y++ {
246					b[j] = byte(y)
247					h.addB(b)
248					b[j] = 0
249				}
250			}
251			b[i] = 0
252		}
253	}
254}
255
256// Test strings with repeats, like "abcdabcdabcdabcd..."
257func TestSmhasherCyclic(t *testing.T) {
258	if testing.Short() {
259		t.Skip("Skipping in short mode")
260	}
261	r := rand.New(rand.NewSource(1234))
262	const REPEAT = 8
263	const N = 1000000
264	for n := 4; n <= 12; n++ {
265		h := newHashSet()
266		b := make([]byte, REPEAT*n)
267		for i := 0; i < N; i++ {
268			b[0] = byte(i * 79 % 97)
269			b[1] = byte(i * 43 % 137)
270			b[2] = byte(i * 151 % 197)
271			b[3] = byte(i * 199 % 251)
272			randBytes(r, b[4:n])
273			for j := n; j < n*REPEAT; j++ {
274				b[j] = b[j-n]
275			}
276			h.addB(b)
277		}
278		h.check(t)
279	}
280}
281
282// Test strings with only a few bits set
283func TestSmhasherSparse(t *testing.T) {
284	if GOARCH == "wasm" {
285		t.Skip("Too slow on wasm")
286	}
287	if testing.Short() {
288		t.Skip("Skipping in short mode")
289	}
290	sparse(t, 32, 6)
291	sparse(t, 40, 6)
292	sparse(t, 48, 5)
293	sparse(t, 56, 5)
294	sparse(t, 64, 5)
295	sparse(t, 96, 4)
296	sparse(t, 256, 3)
297	sparse(t, 2048, 2)
298}
299func sparse(t *testing.T, n int, k int) {
300	b := make([]byte, n/8)
301	h := newHashSet()
302	setbits(h, b, 0, k)
303	h.check(t)
304}
305
306// set up to k bits at index i and greater
307func setbits(h *HashSet, b []byte, i int, k int) {
308	h.addB(b)
309	if k == 0 {
310		return
311	}
312	for j := i; j < len(b)*8; j++ {
313		b[j/8] |= byte(1 << uint(j&7))
314		setbits(h, b, j+1, k-1)
315		b[j/8] &= byte(^(1 << uint(j&7)))
316	}
317}
318
319// Test all possible combinations of n blocks from the set s.
320// "permutation" is a bad name here, but it is what Smhasher uses.
321func TestSmhasherPermutation(t *testing.T) {
322	if GOARCH == "wasm" {
323		t.Skip("Too slow on wasm")
324	}
325	if testing.Short() {
326		t.Skip("Skipping in short mode")
327	}
328	permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8)
329	permutation(t, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8)
330	permutation(t, []uint32{0, 1}, 20)
331	permutation(t, []uint32{0, 1 << 31}, 20)
332	permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 6)
333}
334func permutation(t *testing.T, s []uint32, n int) {
335	b := make([]byte, n*4)
336	h := newHashSet()
337	genPerm(h, b, s, 0)
338	h.check(t)
339}
340func genPerm(h *HashSet, b []byte, s []uint32, n int) {
341	h.addB(b[:n])
342	if n == len(b) {
343		return
344	}
345	for _, v := range s {
346		b[n] = byte(v)
347		b[n+1] = byte(v >> 8)
348		b[n+2] = byte(v >> 16)
349		b[n+3] = byte(v >> 24)
350		genPerm(h, b, s, n+4)
351	}
352}
353
354type Key interface {
355	clear()              // set bits all to 0
356	random(r *rand.Rand) // set key to something random
357	bits() int           // how many bits key has
358	flipBit(i int)       // flip bit i of the key
359	hash() uintptr       // hash the key
360	name() string        // for error reporting
361}
362
363type BytesKey struct {
364	b []byte
365}
366
367func (k *BytesKey) clear() {
368	for i := range k.b {
369		k.b[i] = 0
370	}
371}
372func (k *BytesKey) random(r *rand.Rand) {
373	randBytes(r, k.b)
374}
375func (k *BytesKey) bits() int {
376	return len(k.b) * 8
377}
378func (k *BytesKey) flipBit(i int) {
379	k.b[i>>3] ^= byte(1 << uint(i&7))
380}
381func (k *BytesKey) hash() uintptr {
382	return BytesHash(k.b, 0)
383}
384func (k *BytesKey) name() string {
385	return fmt.Sprintf("bytes%d", len(k.b))
386}
387
388type Int32Key struct {
389	i uint32
390}
391
392func (k *Int32Key) clear() {
393	k.i = 0
394}
395func (k *Int32Key) random(r *rand.Rand) {
396	k.i = r.Uint32()
397}
398func (k *Int32Key) bits() int {
399	return 32
400}
401func (k *Int32Key) flipBit(i int) {
402	k.i ^= 1 << uint(i)
403}
404func (k *Int32Key) hash() uintptr {
405	return Int32Hash(k.i, 0)
406}
407func (k *Int32Key) name() string {
408	return "int32"
409}
410
411type Int64Key struct {
412	i uint64
413}
414
415func (k *Int64Key) clear() {
416	k.i = 0
417}
418func (k *Int64Key) random(r *rand.Rand) {
419	k.i = uint64(r.Uint32()) + uint64(r.Uint32())<<32
420}
421func (k *Int64Key) bits() int {
422	return 64
423}
424func (k *Int64Key) flipBit(i int) {
425	k.i ^= 1 << uint(i)
426}
427func (k *Int64Key) hash() uintptr {
428	return Int64Hash(k.i, 0)
429}
430func (k *Int64Key) name() string {
431	return "int64"
432}
433
434type EfaceKey struct {
435	i interface{}
436}
437
438func (k *EfaceKey) clear() {
439	k.i = nil
440}
441func (k *EfaceKey) random(r *rand.Rand) {
442	k.i = uint64(r.Int63())
443}
444func (k *EfaceKey) bits() int {
445	// use 64 bits. This tests inlined interfaces
446	// on 64-bit targets and indirect interfaces on
447	// 32-bit targets.
448	return 64
449}
450func (k *EfaceKey) flipBit(i int) {
451	k.i = k.i.(uint64) ^ uint64(1)<<uint(i)
452}
453func (k *EfaceKey) hash() uintptr {
454	return EfaceHash(k.i, 0)
455}
456func (k *EfaceKey) name() string {
457	return "Eface"
458}
459
460type IfaceKey struct {
461	i interface {
462		F()
463	}
464}
465type fInter uint64
466
467func (x fInter) F() {
468}
469
470func (k *IfaceKey) clear() {
471	k.i = nil
472}
473func (k *IfaceKey) random(r *rand.Rand) {
474	k.i = fInter(r.Int63())
475}
476func (k *IfaceKey) bits() int {
477	// use 64 bits. This tests inlined interfaces
478	// on 64-bit targets and indirect interfaces on
479	// 32-bit targets.
480	return 64
481}
482func (k *IfaceKey) flipBit(i int) {
483	k.i = k.i.(fInter) ^ fInter(1)<<uint(i)
484}
485func (k *IfaceKey) hash() uintptr {
486	return IfaceHash(k.i, 0)
487}
488func (k *IfaceKey) name() string {
489	return "Iface"
490}
491
492// Flipping a single bit of a key should flip each output bit with 50% probability.
493func TestSmhasherAvalanche(t *testing.T) {
494	if GOARCH == "wasm" {
495		t.Skip("Too slow on wasm")
496	}
497	if testing.Short() {
498		t.Skip("Skipping in short mode")
499	}
500	avalancheTest1(t, &BytesKey{make([]byte, 2)})
501	avalancheTest1(t, &BytesKey{make([]byte, 4)})
502	avalancheTest1(t, &BytesKey{make([]byte, 8)})
503	avalancheTest1(t, &BytesKey{make([]byte, 16)})
504	avalancheTest1(t, &BytesKey{make([]byte, 32)})
505	avalancheTest1(t, &BytesKey{make([]byte, 200)})
506	avalancheTest1(t, &Int32Key{})
507	avalancheTest1(t, &Int64Key{})
508	avalancheTest1(t, &EfaceKey{})
509	avalancheTest1(t, &IfaceKey{})
510}
511func avalancheTest1(t *testing.T, k Key) {
512	const REP = 100000
513	r := rand.New(rand.NewSource(1234))
514	n := k.bits()
515
516	// grid[i][j] is a count of whether flipping
517	// input bit i affects output bit j.
518	grid := make([][hashSize]int, n)
519
520	for z := 0; z < REP; z++ {
521		// pick a random key, hash it
522		k.random(r)
523		h := k.hash()
524
525		// flip each bit, hash & compare the results
526		for i := 0; i < n; i++ {
527			k.flipBit(i)
528			d := h ^ k.hash()
529			k.flipBit(i)
530
531			// record the effects of that bit flip
532			g := &grid[i]
533			for j := 0; j < hashSize; j++ {
534				g[j] += int(d & 1)
535				d >>= 1
536			}
537		}
538	}
539
540	// Each entry in the grid should be about REP/2.
541	// More precisely, we did N = k.bits() * hashSize experiments where
542	// each is the sum of REP coin flips. We want to find bounds on the
543	// sum of coin flips such that a truly random experiment would have
544	// all sums inside those bounds with 99% probability.
545	N := n * hashSize
546	var c float64
547	// find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .9999
548	for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .9999; c += .1 {
549	}
550	c *= 4.0 // allowed slack - we don't need to be perfectly random
551	mean := .5 * REP
552	stddev := .5 * math.Sqrt(REP)
553	low := int(mean - c*stddev)
554	high := int(mean + c*stddev)
555	for i := 0; i < n; i++ {
556		for j := 0; j < hashSize; j++ {
557			x := grid[i][j]
558			if x < low || x > high {
559				t.Errorf("bad bias for %s bit %d -> bit %d: %d/%d\n", k.name(), i, j, x, REP)
560			}
561		}
562	}
563}
564
565// All bit rotations of a set of distinct keys
566func TestSmhasherWindowed(t *testing.T) {
567	windowed(t, &Int32Key{})
568	windowed(t, &Int64Key{})
569	windowed(t, &BytesKey{make([]byte, 128)})
570}
571func windowed(t *testing.T, k Key) {
572	if GOARCH == "wasm" {
573		t.Skip("Too slow on wasm")
574	}
575	if testing.Short() {
576		t.Skip("Skipping in short mode")
577	}
578	const BITS = 16
579
580	for r := 0; r < k.bits(); r++ {
581		h := newHashSet()
582		for i := 0; i < 1<<BITS; i++ {
583			k.clear()
584			for j := 0; j < BITS; j++ {
585				if i>>uint(j)&1 != 0 {
586					k.flipBit((j + r) % k.bits())
587				}
588			}
589			h.add(k.hash())
590		}
591		h.check(t)
592	}
593}
594
595// All keys of the form prefix + [A-Za-z0-9]*N + suffix.
596func TestSmhasherText(t *testing.T) {
597	if testing.Short() {
598		t.Skip("Skipping in short mode")
599	}
600	text(t, "Foo", "Bar")
601	text(t, "FooBar", "")
602	text(t, "", "FooBar")
603}
604func text(t *testing.T, prefix, suffix string) {
605	const N = 4
606	const S = "ABCDEFGHIJKLMNOPQRSTabcdefghijklmnopqrst0123456789"
607	const L = len(S)
608	b := make([]byte, len(prefix)+N+len(suffix))
609	copy(b, prefix)
610	copy(b[len(prefix)+N:], suffix)
611	h := newHashSet()
612	c := b[len(prefix):]
613	for i := 0; i < L; i++ {
614		c[0] = S[i]
615		for j := 0; j < L; j++ {
616			c[1] = S[j]
617			for k := 0; k < L; k++ {
618				c[2] = S[k]
619				for x := 0; x < L; x++ {
620					c[3] = S[x]
621					h.addB(b)
622				}
623			}
624		}
625	}
626	h.check(t)
627}
628
629// Make sure different seed values generate different hashes.
630func TestSmhasherSeed(t *testing.T) {
631	h := newHashSet()
632	const N = 100000
633	s := "hello"
634	for i := 0; i < N; i++ {
635		h.addS_seed(s, uintptr(i))
636	}
637	h.check(t)
638}
639
640// size of the hash output (32 or 64 bits)
641const hashSize = 32 + int(^uintptr(0)>>63<<5)
642
643func randBytes(r *rand.Rand, b []byte) {
644	for i := range b {
645		b[i] = byte(r.Uint32())
646	}
647}
648
649func benchmarkHash(b *testing.B, n int) {
650	s := strings.Repeat("A", n)
651
652	for i := 0; i < b.N; i++ {
653		StringHash(s, 0)
654	}
655	b.SetBytes(int64(n))
656}
657
658func BenchmarkHash5(b *testing.B)     { benchmarkHash(b, 5) }
659func BenchmarkHash16(b *testing.B)    { benchmarkHash(b, 16) }
660func BenchmarkHash64(b *testing.B)    { benchmarkHash(b, 64) }
661func BenchmarkHash1024(b *testing.B)  { benchmarkHash(b, 1024) }
662func BenchmarkHash65536(b *testing.B) { benchmarkHash(b, 65536) }
663
664func TestArrayHash(t *testing.T) {
665	if Compiler == "gccgo" {
666		t.Skip("does not work on gccgo without better escape analysis")
667	}
668
669	// Make sure that "" in arrays hash correctly. The hash
670	// should at least scramble the input seed so that, e.g.,
671	// {"","foo"} and {"foo",""} have different hashes.
672
673	// If the hash is bad, then all (8 choose 4) = 70 keys
674	// have the same hash. If so, we allocate 70/8 = 8
675	// overflow buckets. If the hash is good we don't
676	// normally allocate any overflow buckets, and the
677	// probability of even one or two overflows goes down rapidly.
678	// (There is always 1 allocation of the bucket array. The map
679	// header is allocated on the stack.)
680	f := func() {
681		// Make the key type at most 128 bytes. Otherwise,
682		// we get an allocation per key.
683		type key [8]string
684		m := make(map[key]bool, 70)
685
686		// fill m with keys that have 4 "foo"s and 4 ""s.
687		for i := 0; i < 256; i++ {
688			var k key
689			cnt := 0
690			for j := uint(0); j < 8; j++ {
691				if i>>j&1 != 0 {
692					k[j] = "foo"
693					cnt++
694				}
695			}
696			if cnt == 4 {
697				m[k] = true
698			}
699		}
700		if len(m) != 70 {
701			t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m))
702		}
703	}
704	if n := testing.AllocsPerRun(10, f); n > 6 {
705		t.Errorf("too many allocs %f - hash not balanced", n)
706	}
707}
708func TestStructHash(t *testing.T) {
709	// See the comment in TestArrayHash.
710	f := func() {
711		type key struct {
712			a, b, c, d, e, f, g, h string
713		}
714		m := make(map[key]bool, 70)
715
716		// fill m with keys that have 4 "foo"s and 4 ""s.
717		for i := 0; i < 256; i++ {
718			var k key
719			cnt := 0
720			if i&1 != 0 {
721				k.a = "foo"
722				cnt++
723			}
724			if i&2 != 0 {
725				k.b = "foo"
726				cnt++
727			}
728			if i&4 != 0 {
729				k.c = "foo"
730				cnt++
731			}
732			if i&8 != 0 {
733				k.d = "foo"
734				cnt++
735			}
736			if i&16 != 0 {
737				k.e = "foo"
738				cnt++
739			}
740			if i&32 != 0 {
741				k.f = "foo"
742				cnt++
743			}
744			if i&64 != 0 {
745				k.g = "foo"
746				cnt++
747			}
748			if i&128 != 0 {
749				k.h = "foo"
750				cnt++
751			}
752			if cnt == 4 {
753				m[k] = true
754			}
755		}
756		if len(m) != 70 {
757			t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m))
758		}
759	}
760	if n := testing.AllocsPerRun(10, f); n > 6 {
761		t.Errorf("too many allocs %f - hash not balanced", n)
762	}
763}
764
765var sink uint64
766
767func BenchmarkAlignedLoad(b *testing.B) {
768	var buf [16]byte
769	p := unsafe.Pointer(&buf[0])
770	var s uint64
771	for i := 0; i < b.N; i++ {
772		s += ReadUnaligned64(p)
773	}
774	sink = s
775}
776
777func BenchmarkUnalignedLoad(b *testing.B) {
778	var buf [16]byte
779	p := unsafe.Pointer(&buf[1])
780	var s uint64
781	for i := 0; i < b.N; i++ {
782		s += ReadUnaligned64(p)
783	}
784	sink = s
785}
786
787func TestCollisions(t *testing.T) {
788	if testing.Short() {
789		t.Skip("Skipping in short mode")
790	}
791	for i := 0; i < 16; i++ {
792		for j := 0; j < 16; j++ {
793			if j == i {
794				continue
795			}
796			var a [16]byte
797			m := make(map[uint16]struct{}, 1<<16)
798			for n := 0; n < 1<<16; n++ {
799				a[i] = byte(n)
800				a[j] = byte(n >> 8)
801				m[uint16(BytesHash(a[:], 0))] = struct{}{}
802			}
803			if len(m) <= 1<<15 {
804				t.Errorf("too many collisions i=%d j=%d outputs=%d out of 65536\n", i, j, len(m))
805			}
806		}
807	}
808}
809