1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Hashing algorithm inspired by
6//   xxhash: https://code.google.com/p/xxhash/
7// cityhash: https://code.google.com/p/cityhash/
8
9// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm alpha amd64p32 arm64be ia64 mips64p32 mips64p32le sparc64
10
11package runtime
12
13import "unsafe"
14
15// For gccgo, use go:linkname to export compiler-called functions.
16//
17//go:linkname memhash
18
19const (
20	// Constants for multiplication: four random odd 64-bit numbers.
21	m1 = 16877499708836156737
22	m2 = 2820277070424839065
23	m3 = 9497967016996688599
24	m4 = 15839092249703872147
25)
26
27func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
28	if (GOARCH == "amd64" || GOARCH == "arm64") && useAeshash {
29		return aeshash(p, seed, s)
30	}
31	h := uint64(seed + s*hashkey[0])
32tail:
33	switch {
34	case s == 0:
35	case s < 4:
36		h ^= uint64(*(*byte)(p))
37		h ^= uint64(*(*byte)(add(p, s>>1))) << 8
38		h ^= uint64(*(*byte)(add(p, s-1))) << 16
39		h = rotl_31(h*m1) * m2
40	case s <= 8:
41		h ^= uint64(readUnaligned32(p))
42		h ^= uint64(readUnaligned32(add(p, s-4))) << 32
43		h = rotl_31(h*m1) * m2
44	case s <= 16:
45		h ^= readUnaligned64(p)
46		h = rotl_31(h*m1) * m2
47		h ^= readUnaligned64(add(p, s-8))
48		h = rotl_31(h*m1) * m2
49	case s <= 32:
50		h ^= readUnaligned64(p)
51		h = rotl_31(h*m1) * m2
52		h ^= readUnaligned64(add(p, 8))
53		h = rotl_31(h*m1) * m2
54		h ^= readUnaligned64(add(p, s-16))
55		h = rotl_31(h*m1) * m2
56		h ^= readUnaligned64(add(p, s-8))
57		h = rotl_31(h*m1) * m2
58	default:
59		v1 := h
60		v2 := uint64(seed * hashkey[1])
61		v3 := uint64(seed * hashkey[2])
62		v4 := uint64(seed * hashkey[3])
63		for s >= 32 {
64			v1 ^= readUnaligned64(p)
65			v1 = rotl_31(v1*m1) * m2
66			p = add(p, 8)
67			v2 ^= readUnaligned64(p)
68			v2 = rotl_31(v2*m2) * m3
69			p = add(p, 8)
70			v3 ^= readUnaligned64(p)
71			v3 = rotl_31(v3*m3) * m4
72			p = add(p, 8)
73			v4 ^= readUnaligned64(p)
74			v4 = rotl_31(v4*m4) * m1
75			p = add(p, 8)
76			s -= 32
77		}
78		h = v1 ^ v2 ^ v3 ^ v4
79		goto tail
80	}
81
82	h ^= h >> 29
83	h *= m3
84	h ^= h >> 32
85	return uintptr(h)
86}
87
88func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
89	h := uint64(seed + 4*hashkey[0])
90	v := uint64(readUnaligned32(p))
91	h ^= v
92	h ^= v << 32
93	h = rotl_31(h*m1) * m2
94	h ^= h >> 29
95	h *= m3
96	h ^= h >> 32
97	return uintptr(h)
98}
99
100func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
101	h := uint64(seed + 8*hashkey[0])
102	h ^= uint64(readUnaligned32(p)) | uint64(readUnaligned32(add(p, 4)))<<32
103	h = rotl_31(h*m1) * m2
104	h ^= h >> 29
105	h *= m3
106	h ^= h >> 32
107	return uintptr(h)
108}
109
110// Note: in order to get the compiler to issue rotl instructions, we
111// need to constant fold the shift amount by hand.
112// TODO: convince the compiler to issue rotl instructions after inlining.
113func rotl_31(x uint64) uint64 {
114	return (x << 31) | (x >> (64 - 31))
115}
116