1 /*
2  * The following hash function is based on MurmurHash3, placed into the public
3  * domain by Austin Appleby.  See http://code.google.com/p/smhasher/ for
4  * details.
5  */
6 /******************************************************************************/
7 #ifdef JEMALLOC_H_TYPES
8 
9 #endif /* JEMALLOC_H_TYPES */
10 /******************************************************************************/
11 #ifdef JEMALLOC_H_STRUCTS
12 
13 #endif /* JEMALLOC_H_STRUCTS */
14 /******************************************************************************/
15 #ifdef JEMALLOC_H_EXTERNS
16 
17 #endif /* JEMALLOC_H_EXTERNS */
18 /******************************************************************************/
19 #ifdef JEMALLOC_H_INLINES
20 
21 #ifndef JEMALLOC_ENABLE_INLINE
22 uint32_t	hash_x86_32(const void *key, int len, uint32_t seed);
23 void	hash_x86_128(const void *key, const int len, uint32_t seed,
24     uint64_t r_out[2]);
25 void	hash_x64_128(const void *key, const int len, const uint32_t seed,
26     uint64_t r_out[2]);
27 void	hash(const void *key, size_t len, const uint32_t seed,
28     size_t r_hash[2]);
29 #endif
30 
31 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
32 /******************************************************************************/
33 /* Internal implementation. */
34 JEMALLOC_INLINE uint32_t
35 hash_rotl_32(uint32_t x, int8_t r)
36 {
37 
38 	return ((x << r) | (x >> (32 - r)));
39 }
40 
41 JEMALLOC_INLINE uint64_t
42 hash_rotl_64(uint64_t x, int8_t r)
43 {
44 
45 	return ((x << r) | (x >> (64 - r)));
46 }
47 
48 JEMALLOC_INLINE uint32_t
49 hash_get_block_32(const uint32_t *p, int i)
50 {
51 
52 	return (p[i]);
53 }
54 
55 JEMALLOC_INLINE uint64_t
56 hash_get_block_64(const uint64_t *p, int i)
57 {
58 
59 	return (p[i]);
60 }
61 
62 JEMALLOC_INLINE uint32_t
63 hash_fmix_32(uint32_t h)
64 {
65 
66 	h ^= h >> 16;
67 	h *= 0x85ebca6b;
68 	h ^= h >> 13;
69 	h *= 0xc2b2ae35;
70 	h ^= h >> 16;
71 
72 	return (h);
73 }
74 
75 JEMALLOC_INLINE uint64_t
76 hash_fmix_64(uint64_t k)
77 {
78 
79 	k ^= k >> 33;
80 	k *= KQU(0xff51afd7ed558ccd);
81 	k ^= k >> 33;
82 	k *= KQU(0xc4ceb9fe1a85ec53);
83 	k ^= k >> 33;
84 
85 	return (k);
86 }
87 
88 JEMALLOC_INLINE uint32_t
89 hash_x86_32(const void *key, int len, uint32_t seed)
90 {
91 	const uint8_t *data = (const uint8_t *) key;
92 	const int nblocks = len / 4;
93 
94 	uint32_t h1 = seed;
95 
96 	const uint32_t c1 = 0xcc9e2d51;
97 	const uint32_t c2 = 0x1b873593;
98 
99 	/* body */
100 	{
101 		const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
102 		int i;
103 
104 		for (i = -nblocks; i; i++) {
105 			uint32_t k1 = hash_get_block_32(blocks, i);
106 
107 			k1 *= c1;
108 			k1 = hash_rotl_32(k1, 15);
109 			k1 *= c2;
110 
111 			h1 ^= k1;
112 			h1 = hash_rotl_32(h1, 13);
113 			h1 = h1*5 + 0xe6546b64;
114 		}
115 	}
116 
117 	/* tail */
118 	{
119 		const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
120 
121 		uint32_t k1 = 0;
122 
123 		switch (len & 3) {
124 		case 3: k1 ^= tail[2] << 16;
125 		case 2: k1 ^= tail[1] << 8;
126 		case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
127 			k1 *= c2; h1 ^= k1;
128 		}
129 	}
130 
131 	/* finalization */
132 	h1 ^= len;
133 
134 	h1 = hash_fmix_32(h1);
135 
136 	return (h1);
137 }
138 
139 UNUSED JEMALLOC_INLINE void
140 hash_x86_128(const void *key, const int len, uint32_t seed,
141     uint64_t r_out[2])
142 {
143 	const uint8_t * data = (const uint8_t *) key;
144 	const int nblocks = len / 16;
145 
146 	uint32_t h1 = seed;
147 	uint32_t h2 = seed;
148 	uint32_t h3 = seed;
149 	uint32_t h4 = seed;
150 
151 	const uint32_t c1 = 0x239b961b;
152 	const uint32_t c2 = 0xab0e9789;
153 	const uint32_t c3 = 0x38b34ae5;
154 	const uint32_t c4 = 0xa1e38b93;
155 
156 	/* body */
157 	{
158 		const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
159 		int i;
160 
161 		for (i = -nblocks; i; i++) {
162 			uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
163 			uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
164 			uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
165 			uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
166 
167 			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
168 
169 			h1 = hash_rotl_32(h1, 19); h1 += h2;
170 			h1 = h1*5 + 0x561ccd1b;
171 
172 			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
173 
174 			h2 = hash_rotl_32(h2, 17); h2 += h3;
175 			h2 = h2*5 + 0x0bcaa747;
176 
177 			k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
178 
179 			h3 = hash_rotl_32(h3, 15); h3 += h4;
180 			h3 = h3*5 + 0x96cd1c35;
181 
182 			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
183 
184 			h4 = hash_rotl_32(h4, 13); h4 += h1;
185 			h4 = h4*5 + 0x32ac3b17;
186 		}
187 	}
188 
189 	/* tail */
190 	{
191 		const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
192 		uint32_t k1 = 0;
193 		uint32_t k2 = 0;
194 		uint32_t k3 = 0;
195 		uint32_t k4 = 0;
196 
197 		switch (len & 15) {
198 		case 15: k4 ^= tail[14] << 16;
199 		case 14: k4 ^= tail[13] << 8;
200 		case 13: k4 ^= tail[12] << 0;
201 			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
202 
203 		case 12: k3 ^= tail[11] << 24;
204 		case 11: k3 ^= tail[10] << 16;
205 		case 10: k3 ^= tail[ 9] << 8;
206 		case  9: k3 ^= tail[ 8] << 0;
207 		     k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
208 
209 		case  8: k2 ^= tail[ 7] << 24;
210 		case  7: k2 ^= tail[ 6] << 16;
211 		case  6: k2 ^= tail[ 5] << 8;
212 		case  5: k2 ^= tail[ 4] << 0;
213 			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
214 
215 		case  4: k1 ^= tail[ 3] << 24;
216 		case  3: k1 ^= tail[ 2] << 16;
217 		case  2: k1 ^= tail[ 1] << 8;
218 		case  1: k1 ^= tail[ 0] << 0;
219 			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
220 		}
221 	}
222 
223 	/* finalization */
224 	h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
225 
226 	h1 += h2; h1 += h3; h1 += h4;
227 	h2 += h1; h3 += h1; h4 += h1;
228 
229 	h1 = hash_fmix_32(h1);
230 	h2 = hash_fmix_32(h2);
231 	h3 = hash_fmix_32(h3);
232 	h4 = hash_fmix_32(h4);
233 
234 	h1 += h2; h1 += h3; h1 += h4;
235 	h2 += h1; h3 += h1; h4 += h1;
236 
237 	r_out[0] = (((uint64_t) h2) << 32) | h1;
238 	r_out[1] = (((uint64_t) h4) << 32) | h3;
239 }
240 
241 UNUSED JEMALLOC_INLINE void
242 hash_x64_128(const void *key, const int len, const uint32_t seed,
243     uint64_t r_out[2])
244 {
245 	const uint8_t *data = (const uint8_t *) key;
246 	const int nblocks = len / 16;
247 
248 	uint64_t h1 = seed;
249 	uint64_t h2 = seed;
250 
251 	const uint64_t c1 = KQU(0x87c37b91114253d5);
252 	const uint64_t c2 = KQU(0x4cf5ad432745937f);
253 
254 	/* body */
255 	{
256 		const uint64_t *blocks = (const uint64_t *) (data);
257 		int i;
258 
259 		for (i = 0; i < nblocks; i++) {
260 			uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
261 			uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
262 
263 			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
264 
265 			h1 = hash_rotl_64(h1, 27); h1 += h2;
266 			h1 = h1*5 + 0x52dce729;
267 
268 			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
269 
270 			h2 = hash_rotl_64(h2, 31); h2 += h1;
271 			h2 = h2*5 + 0x38495ab5;
272 		}
273 	}
274 
275 	/* tail */
276 	{
277 		const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
278 		uint64_t k1 = 0;
279 		uint64_t k2 = 0;
280 
281 		switch (len & 15) {
282 		case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
283 		case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
284 		case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
285 		case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
286 		case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
287 		case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
288 		case  9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
289 			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
290 
291 		case  8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
292 		case  7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
293 		case  6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
294 		case  5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
295 		case  4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
296 		case  3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
297 		case  2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
298 		case  1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
299 			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
300 		}
301 	}
302 
303 	/* finalization */
304 	h1 ^= len; h2 ^= len;
305 
306 	h1 += h2;
307 	h2 += h1;
308 
309 	h1 = hash_fmix_64(h1);
310 	h2 = hash_fmix_64(h2);
311 
312 	h1 += h2;
313 	h2 += h1;
314 
315 	r_out[0] = h1;
316 	r_out[1] = h2;
317 }
318 
319 /******************************************************************************/
320 /* API. */
321 JEMALLOC_INLINE void
322 hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
323 {
324 #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
325 	hash_x64_128(key, len, seed, (uint64_t *)r_hash);
326 #else
327 	uint64_t hashes[2];
328 	hash_x86_128(key, len, seed, hashes);
329 	r_hash[0] = (size_t)hashes[0];
330 	r_hash[1] = (size_t)hashes[1];
331 #endif
332 }
333 #endif
334 
335 #endif /* JEMALLOC_H_INLINES */
336 /******************************************************************************/
337