1 /*
2 * virhashcode.c: hash code generation
3 *
4 * Copyright (C) 2012 Red Hat, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library. If not, see
18 * <http://www.gnu.org/licenses/>.
19 *
20 * The hash code generation is based on the public domain MurmurHash3 from Austin Appleby:
21 * https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
22 *
23 * We use only the 32 bit variant because the 2 produce different results while
24 * we need to produce the same result regardless of the architecture as
25 * clients can be both 64 or 32 bit at the same time.
26 */
27
28 #include <config.h>
29
30 #include "virhashcode.h"
31
rotl32(uint32_t x,int8_t r)32 static uint32_t rotl32(uint32_t x, int8_t r)
33 {
34 return (x << r) | (x >> (32 - r));
35 }
36
37 /* slower than original but handles platforms that do only aligned reads */
getblock(const uint8_t * p,int i)38 static inline uint32_t getblock(const uint8_t *p, int i)
39 {
40 uint32_t r;
41 size_t size = sizeof(r);
42
43 memcpy(&r, &p[i * size], size);
44
45 return r;
46 }
47
48 /*
49 * Finalization mix - force all bits of a hash block to avalanche
50 */
fmix(uint32_t h)51 static inline uint32_t fmix(uint32_t h)
52 {
53 h ^= h >> 16;
54 h *= 0x85ebca6b;
55 h ^= h >> 13;
56 h *= 0xc2b2ae35;
57 h ^= h >> 16;
58
59 return h;
60 }
61
62
virHashCodeGen(const void * key,size_t len,uint32_t seed)63 uint32_t virHashCodeGen(const void *key, size_t len, uint32_t seed)
64 {
65 const uint8_t *blocks;
66 const uint8_t *tail;
67 size_t nblocks;
68 uint32_t h1;
69 uint32_t k1;
70 uint32_t c1;
71 uint32_t c2;
72 size_t i;
73
74 blocks = (const uint8_t *)key;
75 nblocks = len / 4;
76 h1 = seed;
77 c1 = 0xcc9e2d51;
78 c2 = 0x1b873593;
79
80 /* body */
81
82 for (i = 0; i < nblocks; i++) {
83
84 k1 = getblock(blocks, i);
85
86 k1 *= c1;
87 k1 = rotl32(k1, 15);
88 k1 *= c2;
89
90 h1 ^= k1;
91 h1 = rotl32(h1, 13);
92 h1 = h1 * 5 + 0xe6546b64;
93 }
94
95 /* tail */
96
97 tail = (const uint8_t *)key + nblocks * 4;
98
99 k1 = 0;
100
101 switch (len & 3) {
102 case 3:
103 k1 ^= tail[2] << 16;
104 G_GNUC_FALLTHROUGH;
105 case 2:
106 k1 ^= tail[1] << 8;
107 G_GNUC_FALLTHROUGH;
108 case 1:
109 k1 ^= tail[0];
110 k1 *= c1;
111 k1 = rotl32(k1, 15);
112 k1 *= c2;
113 h1 ^= k1;
114 G_GNUC_FALLTHROUGH;
115 default:
116 break;
117 }
118
119 /* finalization */
120
121 h1 ^= len;
122 h1 = fmix(h1);
123
124 return h1;
125 }
126