1 //-----------------------------------------------------------------------------
2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
3 // domain. The author hereby disclaims copyright to this source code.
4 
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
9 
10 #include "MurmurHash3.h"
11 
12 //-----------------------------------------------------------------------------
13 // Platform-specific functions and macros
14 
15 // Microsoft Visual Studio
16 
17 #if defined(_MSC_VER)
18 
19 #define FORCE_INLINE	__forceinline
20 
21 #include <stdlib.h>
22 
23 #define ROTL32(x,y)	_rotl(x,y)
24 #define ROTL64(x,y)	_rotl64(x,y)
25 
26 #define BIG_CONSTANT(x) (x)
27 
28 // Other compilers
29 
30 #else	// defined(_MSC_VER)
31 
32 #define	FORCE_INLINE __attribute__((always_inline))
33 
rotl32(uint32_t x,int8_t r)34 inline uint32_t rotl32 ( uint32_t x, int8_t r )
35 {
36   return (x << r) | (x >> (32 - r));
37 }
38 
rotl64(uint64_t x,int8_t r)39 inline uint64_t rotl64 ( uint64_t x, int8_t r )
40 {
41   return (x << r) | (x >> (64 - r));
42 }
43 
44 #define	ROTL32(x,y)	rotl32(x,y)
45 #define ROTL64(x,y)	rotl64(x,y)
46 
47 #define BIG_CONSTANT(x) (x##LLU)
48 
49 #endif // !defined(_MSC_VER)
50 
51 //-----------------------------------------------------------------------------
52 // Block read - if your platform needs to do endian-swapping or can only
53 // handle aligned reads, do the conversion here
54 
getblock(const uint32_t * p,int i)55 FORCE_INLINE uint32_t getblock ( const uint32_t * p, int i )
56 {
57   return p[i];
58 }
59 
getblock(const uint64_t * p,int i)60 FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
61 {
62   return p[i];
63 }
64 
65 //-----------------------------------------------------------------------------
66 // Finalization mix - force all bits of a hash block to avalanche
67 
fmix(uint32_t h)68 FORCE_INLINE uint32_t fmix ( uint32_t h )
69 {
70   h ^= h >> 16;
71   h *= 0x85ebca6b;
72   h ^= h >> 13;
73   h *= 0xc2b2ae35;
74   h ^= h >> 16;
75 
76   return h;
77 }
78 
79 //----------
80 
fmix(uint64_t k)81 FORCE_INLINE uint64_t fmix ( uint64_t k )
82 {
83   k ^= k >> 33;
84   k *= BIG_CONSTANT(0xff51afd7ed558ccd);
85   k ^= k >> 33;
86   k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
87   k ^= k >> 33;
88 
89   return k;
90 }
91 
92 //-----------------------------------------------------------------------------
93 
MurmurHash3_x86_32(const void * key,int len,uint32_t seed,void * out)94 void MurmurHash3_x86_32 ( const void * key, int len,
95                           uint32_t seed, void * out )
96 {
97   const uint8_t * data = (const uint8_t*)key;
98   const int nblocks = len / 4;
99 
100   uint32_t h1 = seed;
101 
102   uint32_t c1 = 0xcc9e2d51;
103   uint32_t c2 = 0x1b873593;
104 
105   //----------
106   // body
107 
108   const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
109 
110   for(int i = -nblocks; i; i++)
111   {
112     uint32_t k1 = getblock(blocks,i);
113 
114     k1 *= c1;
115     k1 = ROTL32(k1,15);
116     k1 *= c2;
117 
118     h1 ^= k1;
119     h1 = ROTL32(h1,13);
120     h1 = h1*5+0xe6546b64;
121   }
122 
123   //----------
124   // tail
125 
126   const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
127 
128   uint32_t k1 = 0;
129 
130   switch(len & 3)
131   {
132   case 3: k1 ^= tail[2] << 16;
133   case 2: k1 ^= tail[1] << 8;
134   case 1: k1 ^= tail[0];
135           k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
136   };
137 
138   //----------
139   // finalization
140 
141   h1 ^= len;
142 
143   h1 = fmix(h1);
144 
145   *(uint32_t*)out = h1;
146 }
147 
148 //-----------------------------------------------------------------------------
149 
MurmurHash3_x86_128(const void * key,const int len,uint32_t seed,void * out)150 void MurmurHash3_x86_128 ( const void * key, const int len,
151                            uint32_t seed, void * out )
152 {
153   const uint8_t * data = (const uint8_t*)key;
154   const int nblocks = len / 16;
155 
156   uint32_t h1 = seed;
157   uint32_t h2 = seed;
158   uint32_t h3 = seed;
159   uint32_t h4 = seed;
160 
161   uint32_t c1 = 0x239b961b;
162   uint32_t c2 = 0xab0e9789;
163   uint32_t c3 = 0x38b34ae5;
164   uint32_t c4 = 0xa1e38b93;
165 
166   //----------
167   // body
168 
169   const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
170 
171   for(int i = -nblocks; i; i++)
172   {
173     uint32_t k1 = getblock(blocks,i*4+0);
174     uint32_t k2 = getblock(blocks,i*4+1);
175     uint32_t k3 = getblock(blocks,i*4+2);
176     uint32_t k4 = getblock(blocks,i*4+3);
177 
178     k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
179 
180     h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
181 
182     k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
183 
184     h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
185 
186     k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
187 
188     h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
189 
190     k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
191 
192     h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
193   }
194 
195   //----------
196   // tail
197 
198   const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
199 
200   uint32_t k1 = 0;
201   uint32_t k2 = 0;
202   uint32_t k3 = 0;
203   uint32_t k4 = 0;
204 
205   switch(len & 15)
206   {
207   case 15: k4 ^= tail[14] << 16;
208   case 14: k4 ^= tail[13] << 8;
209   case 13: k4 ^= tail[12] << 0;
210            k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
211 
212   case 12: k3 ^= tail[11] << 24;
213   case 11: k3 ^= tail[10] << 16;
214   case 10: k3 ^= tail[ 9] << 8;
215   case  9: k3 ^= tail[ 8] << 0;
216            k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
217 
218   case  8: k2 ^= tail[ 7] << 24;
219   case  7: k2 ^= tail[ 6] << 16;
220   case  6: k2 ^= tail[ 5] << 8;
221   case  5: k2 ^= tail[ 4] << 0;
222            k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
223 
224   case  4: k1 ^= tail[ 3] << 24;
225   case  3: k1 ^= tail[ 2] << 16;
226   case  2: k1 ^= tail[ 1] << 8;
227   case  1: k1 ^= tail[ 0] << 0;
228            k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
229   };
230 
231   //----------
232   // finalization
233 
234   h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
235 
236   h1 += h2; h1 += h3; h1 += h4;
237   h2 += h1; h3 += h1; h4 += h1;
238 
239   h1 = fmix(h1);
240   h2 = fmix(h2);
241   h3 = fmix(h3);
242   h4 = fmix(h4);
243 
244   h1 += h2; h1 += h3; h1 += h4;
245   h2 += h1; h3 += h1; h4 += h1;
246 
247   ((uint32_t*)out)[0] = h1;
248   ((uint32_t*)out)[1] = h2;
249   ((uint32_t*)out)[2] = h3;
250   ((uint32_t*)out)[3] = h4;
251 }
252 
253 //-----------------------------------------------------------------------------
254 
MurmurHash3_x64_128(const void * key,const int len,const uint32_t seed,void * out)255 void MurmurHash3_x64_128 ( const void * key, const int len,
256                            const uint32_t seed, void * out )
257 {
258   const uint8_t * data = (const uint8_t*)key;
259   const int nblocks = len / 16;
260 
261   uint64_t h1 = seed;
262   uint64_t h2 = seed;
263 
264   uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
265   uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
266 
267   //----------
268   // body
269 
270   const uint64_t * blocks = (const uint64_t *)(data);
271 
272   for(int i = 0; i < nblocks; i++)
273   {
274     uint64_t k1 = getblock(blocks,i*2+0);
275     uint64_t k2 = getblock(blocks,i*2+1);
276 
277     k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
278 
279     h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
280 
281     k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
282 
283     h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
284   }
285 
286   //----------
287   // tail
288 
289   const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
290 
291   uint64_t k1 = 0;
292   uint64_t k2 = 0;
293 
294   switch(len & 15)
295   {
296   case 15: k2 ^= uint64_t(tail[14]) << 48;
297   case 14: k2 ^= uint64_t(tail[13]) << 40;
298   case 13: k2 ^= uint64_t(tail[12]) << 32;
299   case 12: k2 ^= uint64_t(tail[11]) << 24;
300   case 11: k2 ^= uint64_t(tail[10]) << 16;
301   case 10: k2 ^= uint64_t(tail[ 9]) << 8;
302   case  9: k2 ^= uint64_t(tail[ 8]) << 0;
303            k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
304 
305   case  8: k1 ^= uint64_t(tail[ 7]) << 56;
306   case  7: k1 ^= uint64_t(tail[ 6]) << 48;
307   case  6: k1 ^= uint64_t(tail[ 5]) << 40;
308   case  5: k1 ^= uint64_t(tail[ 4]) << 32;
309   case  4: k1 ^= uint64_t(tail[ 3]) << 24;
310   case  3: k1 ^= uint64_t(tail[ 2]) << 16;
311   case  2: k1 ^= uint64_t(tail[ 1]) << 8;
312   case  1: k1 ^= uint64_t(tail[ 0]) << 0;
313            k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
314   };
315 
316   //----------
317   // finalization
318 
319   h1 ^= len; h2 ^= len;
320 
321   h1 += h2;
322   h2 += h1;
323 
324   h1 = fmix(h1);
325   h2 = fmix(h2);
326 
327   h1 += h2;
328   h2 += h1;
329 
330   ((uint64_t*)out)[0] = h1;
331   ((uint64_t*)out)[1] = h2;
332 }
333 
334 //-----------------------------------------------------------------------------
335 
336