1 #include <stdint.h>
2 #include <cstring>
3 #include "hash.hpp"
4 
_rotl64(uint64_t value,int8_t amount)5 uint64_t inline _rotl64(uint64_t value, int8_t amount) {
6   return ((value) << (amount)) | ((value) >> (64 - (amount)));
7 }
8 
SuperFastHash(const char * data,int len)9 uint32_t SuperFastHash (const char *data, int len) {
10   uint32_t hash = len, tmp;
11   int rem;
12 
13   if (len <= 0 || data == NULL) { return 0; }
14 
15   rem = len & 3;
16   len >>= 2;
17 
18   /* Main loop */
19   for (; len > 0; len--) {
20     hash  += get16bits (data);
21     tmp    = (get16bits (data+2) << 11) ^ hash;
22     hash   = (hash << 16) ^ tmp;
23     data  += 2*sizeof (uint16_t);
24     hash  += hash >> 11;
25   }
26 
27   /* Handle end cases */
28   switch (rem) {
29   case 3: hash += get16bits (data);
30     hash ^= hash << 16;
31     hash ^= data[sizeof (uint16_t)] << 18;
32     hash += hash >> 11;
33     break;
34   case 2: hash += get16bits (data);
35     hash ^= hash << 11;
36     hash += hash >> 17;
37     break;
38   case 1: hash += *data;
39     hash ^= hash << 10;
40     hash += hash >> 1;
41   }
42 
43   /* Force "avalanching" of final 127 bits */
44   hash ^= hash << 3;
45   hash += hash >> 5;
46   hash ^= hash << 4;
47   hash += hash >> 17;
48   hash ^= hash << 25;
49   hash += hash >> 6;
50 
51   return hash;
52 }
53 
54 
55 
56 
57 //-----------------------------------------------------------------------------
58 // Block read - if your platform needs to do endian-swapping or can only
59 // handle aligned reads, do the conversion here
60 
getblock(const uint64_t * p,int i)61 inline uint64_t getblock ( const uint64_t *p, int i ) {
62   return p[i];
63 }
64 
65 //----------
66 // Block mix - combine the key bits with the hash bits and scramble everything
67 
bmix64(uint64_t & h1,uint64_t & h2,uint64_t & k1,uint64_t & k2,uint64_t & c1,uint64_t & c2)68 inline void bmix64 ( uint64_t& h1, uint64_t& h2, uint64_t& k1, uint64_t& k2, uint64_t& c1, uint64_t& c2 ) {
69   k1 *= c1;
70   k1  = _rotl64(k1,23);
71   k1 *= c2;
72   h1 ^= k1;
73   h1 += h2;
74 
75   h2 = _rotl64(h2,41);
76 
77   k2 *= c2;
78   k2  = _rotl64(k2,23);
79   k2 *= c1;
80   h2 ^= k2;
81   h2 += h1;
82 
83   h1 = h1*3+0x52dce729;
84   h2 = h2*3+0x38495ab5;
85 
86   c1 = c1*5+0x7b7d159c;
87   c2 = c2*5+0x6bce6396;
88 }
89 
90 //----------
91 // Finalization mix - avalanches all bits to within 0.05% bias
92 
fmix64(uint64_t k)93 inline uint64_t fmix64 ( uint64_t k ) {
94   k ^= k >> 33;
95   k *= 0xff51afd7ed558ccd;
96   k ^= k >> 33;
97   k *= 0xc4ceb9fe1a85ec53;
98   k ^= k >> 33;
99 
100   return k;
101 }
102 
MurmurHash3_x64_128(const void * key,const int len,const uint32_t seed,void * out)103 void MurmurHash3_x64_128 ( const void *key, const int len, const uint32_t seed, void *out ) {
104   const uint8_t *data = (const uint8_t *)key;
105   const int nblocks = len / 16;
106 
107   uint64_t h1 = 0x9368e53c2f6af274 ^ seed;
108   uint64_t h2 = 0x586dcd208f7cd3fd ^ seed;
109 
110   uint64_t c1 = 0x87c37b91114253d5;
111   uint64_t c2 = 0x4cf5ad432745937f;
112 
113   //----------
114   // body
115 
116   const uint64_t *blocks = (const uint64_t *)(data);
117 
118   for(int i = 0; i < nblocks; i++) {
119     uint64_t k1 = getblock(blocks,i*2+0);
120     uint64_t k2 = getblock(blocks,i*2+1);
121 
122     bmix64(h1,h2,k1,k2,c1,c2);
123   }
124 
125   //----------
126   // tail
127 
128   const uint8_t *tail = (const uint8_t *)(data + nblocks*16);
129 
130   uint64_t k1 = 0;
131   uint64_t k2 = 0;
132 
133   switch(len & 15) {
134   case 15: k2 ^= uint64_t(tail[14]) << 48;
135   case 14: k2 ^= uint64_t(tail[13]) << 40;
136   case 13: k2 ^= uint64_t(tail[12]) << 32;
137   case 12: k2 ^= uint64_t(tail[11]) << 24;
138   case 11: k2 ^= uint64_t(tail[10]) << 16;
139   case 10: k2 ^= uint64_t(tail[ 9]) << 8;
140   case  9: k2 ^= uint64_t(tail[ 8]) << 0;
141 
142   case  8: k1 ^= uint64_t(tail[ 7]) << 56;
143   case  7: k1 ^= uint64_t(tail[ 6]) << 48;
144   case  6: k1 ^= uint64_t(tail[ 5]) << 40;
145   case  5: k1 ^= uint64_t(tail[ 4]) << 32;
146   case  4: k1 ^= uint64_t(tail[ 3]) << 24;
147   case  3: k1 ^= uint64_t(tail[ 2]) << 16;
148   case  2: k1 ^= uint64_t(tail[ 1]) << 8;
149   case  1: k1 ^= uint64_t(tail[ 0]) << 0;
150     bmix64(h1,h2,k1,k2,c1,c2);
151   };
152 
153   //----------
154   // finalization
155 
156   h2 ^= len;
157 
158   h1 += h2;
159   h2 += h1;
160 
161   h1 = fmix64(h1);
162   h2 = fmix64(h2);
163 
164   h1 += h2;
165   h2 += h1;
166 
167   ((uint64_t *)out)[0] = h1;
168   ((uint64_t *)out)[1] = h2;
169 }
170 
171 //-----------------------------------------------------------------------------
172 // If we need a smaller hash value, it's faster to just use a portion of the
173 // 128-bit hash
174 
MurmurHash3_x64_32(const void * key,int len,uint32_t seed,void * out)175 void MurmurHash3_x64_32 ( const void *key, int len, uint32_t seed, void *out ) {
176   uint32_t temp[4];
177 
178   MurmurHash3_x64_128(key,len,seed,temp);
179 
180   *(uint32_t *)out = temp[0];
181 }
182 
183 //----------
184 
MurmurHash3_x64_64(const void * key,int len,uint32_t seed,void * out)185 void MurmurHash3_x64_64 ( const void *key, int len, uint32_t seed, void *out ) {
186   uint64_t temp[2];
187 
188   MurmurHash3_x64_128(key,len,seed,temp);
189 
190   *(uint64_t *)out = temp[0];
191 }
192 
193 //-----------------------------------------------------------------------------
194