1 //===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the newly proposed standard C++ interfaces for hashing
10 // arbitrary data and building hash functions for user-defined types. This
11 // interface was originally proposed in N3333[1] and is currently under review
12 // for inclusion in a future TR and/or standard.
13 //
14 // The primary interfaces provide are comprised of one type and three functions:
15 //
16 //  -- 'hash_code' class is an opaque type representing the hash code for some
17 //     data. It is the intended product of hashing, and can be used to implement
18 //     hash tables, checksumming, and other common uses of hashes. It is not an
19 //     integer type (although it can be converted to one) because it is risky
20 //     to assume much about the internals of a hash_code. In particular, each
21 //     execution of the program has a high probability of producing a different
22 //     hash_code for a given input. Thus their values are not stable to save or
23 //     persist, and should only be used during the execution for the
24 //     construction of hashing datastructures.
25 //
26 //  -- 'hash_value' is a function designed to be overloaded for each
27 //     user-defined type which wishes to be used within a hashing context. It
28 //     should be overloaded within the user-defined type's namespace and found
29 //     via ADL. Overloads for primitive types are provided by this library.
30 //
31 //  -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
32 //      programmers in easily and intuitively combining a set of data into
33 //      a single hash_code for their object. They should only logically be used
34 //      within the implementation of a 'hash_value' routine or similar context.
35 //
36 // Note that 'hash_combine_range' contains very special logic for hashing
37 // a contiguous array of integers or pointers. This logic is *extremely* fast,
38 // on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
39 // benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
40 // under 32-bytes.
41 //
42 //===----------------------------------------------------------------------===//
43 
44 #ifndef LLVM_ADT_HASHING_H
45 #define LLVM_ADT_HASHING_H
46 
47 #include "llvm/Support/DataTypes.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/SwapByteOrder.h"
50 #include "llvm/Support/type_traits.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <cstring>
54 #include <string>
55 #include <utility>
56 
57 namespace llvm {
58 
59 /// An opaque object representing a hash code.
60 ///
61 /// This object represents the result of hashing some entity. It is intended to
62 /// be used to implement hashtables or other hashing-based data structures.
63 /// While it wraps and exposes a numeric value, this value should not be
64 /// trusted to be stable or predictable across processes or executions.
65 ///
66 /// In order to obtain the hash_code for an object 'x':
67 /// \code
68 ///   using llvm::hash_value;
69 ///   llvm::hash_code code = hash_value(x);
70 /// \endcode
71 class hash_code {
72   size_t value;
73 
74 public:
75   /// Default construct a hash_code.
76   /// Note that this leaves the value uninitialized.
77   hash_code() = default;
78 
79   /// Form a hash code directly from a numerical value.
80   hash_code(size_t value) : value(value) {}
81 
82   /// Convert the hash code to its numerical value for use.
83   /*explicit*/ operator size_t() const { return value; }
84 
85   friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
86     return lhs.value == rhs.value;
87   }
88   friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
89     return lhs.value != rhs.value;
90   }
91 
92   /// Allow a hash_code to be directly run through hash_value.
93   friend size_t hash_value(const hash_code &code) { return code.value; }
94 };
95 
96 /// Compute a hash_code for any integer value.
97 ///
98 /// Note that this function is intended to compute the same hash_code for
99 /// a particular value without regard to the pre-promotion type. This is in
100 /// contrast to hash_combine which may produce different hash_codes for
101 /// differing argument types even if they would implicit promote to a common
102 /// type without changing the value.
103 template <typename T>
104 typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
105 hash_value(T value);
106 
107 /// Compute a hash_code for a pointer's address.
108 ///
109 /// N.B.: This hashes the *address*. Not the value and not the type.
110 template <typename T> hash_code hash_value(const T *ptr);
111 
112 /// Compute a hash_code for a pair of objects.
113 template <typename T, typename U>
114 hash_code hash_value(const std::pair<T, U> &arg);
115 
116 /// Compute a hash_code for a standard string.
117 template <typename T>
118 hash_code hash_value(const std::basic_string<T> &arg);
119 
120 
121 /// Override the execution seed with a fixed value.
122 ///
123 /// This hashing library uses a per-execution seed designed to change on each
124 /// run with high probability in order to ensure that the hash codes are not
125 /// attackable and to ensure that output which is intended to be stable does
126 /// not rely on the particulars of the hash codes produced.
127 ///
128 /// That said, there are use cases where it is important to be able to
129 /// reproduce *exactly* a specific behavior. To that end, we provide a function
130 /// which will forcibly set the seed to a fixed value. This must be done at the
131 /// start of the program, before any hashes are computed. Also, it cannot be
132 /// undone. This makes it thread-hostile and very hard to use outside of
133 /// immediately on start of a simple program designed for reproducible
134 /// behavior.
135 void set_fixed_execution_hash_seed(uint64_t fixed_value);
136 
137 
138 // All of the implementation details of actually computing the various hash
139 // code values are held within this namespace. These routines are included in
140 // the header file mainly to allow inlining and constant propagation.
141 namespace hashing {
142 namespace detail {
143 
144 inline uint64_t fetch64(const char *p) {
145   uint64_t result;
146   memcpy(&result, p, sizeof(result));
147   if (sys::IsBigEndianHost)
148     sys::swapByteOrder(result);
149   return result;
150 }
151 
152 inline uint32_t fetch32(const char *p) {
153   uint32_t result;
154   memcpy(&result, p, sizeof(result));
155   if (sys::IsBigEndianHost)
156     sys::swapByteOrder(result);
157   return result;
158 }
159 
160 /// Some primes between 2^63 and 2^64 for various uses.
161 static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
162 static const uint64_t k1 = 0xb492b66fbe98f273ULL;
163 static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
164 static const uint64_t k3 = 0xc949d7c7509e6557ULL;
165 
166 /// Bitwise right rotate.
167 /// Normally this will compile to a single instruction, especially if the
168 /// shift is a manifest constant.
169 inline uint64_t rotate(uint64_t val, size_t shift) {
170   // Avoid shifting by 64: doing so yields an undefined result.
171   return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
172 }
173 
174 inline uint64_t shift_mix(uint64_t val) {
175   return val ^ (val >> 47);
176 }
177 
178 inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
179   // Murmur-inspired hashing.
180   const uint64_t kMul = 0x9ddfea08eb382d69ULL;
181   uint64_t a = (low ^ high) * kMul;
182   a ^= (a >> 47);
183   uint64_t b = (high ^ a) * kMul;
184   b ^= (b >> 47);
185   b *= kMul;
186   return b;
187 }
188 
189 inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
190   uint8_t a = s[0];
191   uint8_t b = s[len >> 1];
192   uint8_t c = s[len - 1];
193   uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
194   uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
195   return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
196 }
197 
198 inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
199   uint64_t a = fetch32(s);
200   return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
201 }
202 
203 inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
204   uint64_t a = fetch64(s);
205   uint64_t b = fetch64(s + len - 8);
206   return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
207 }
208 
209 inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
210   uint64_t a = fetch64(s) * k1;
211   uint64_t b = fetch64(s + 8);
212   uint64_t c = fetch64(s + len - 8) * k2;
213   uint64_t d = fetch64(s + len - 16) * k0;
214   return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
215                        a + rotate(b ^ k3, 20) - c + len + seed);
216 }
217 
218 inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
219   uint64_t z = fetch64(s + 24);
220   uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
221   uint64_t b = rotate(a + z, 52);
222   uint64_t c = rotate(a, 37);
223   a += fetch64(s + 8);
224   c += rotate(a, 7);
225   a += fetch64(s + 16);
226   uint64_t vf = a + z;
227   uint64_t vs = b + rotate(a, 31) + c;
228   a = fetch64(s + 16) + fetch64(s + len - 32);
229   z = fetch64(s + len - 8);
230   b = rotate(a + z, 52);
231   c = rotate(a, 37);
232   a += fetch64(s + len - 24);
233   c += rotate(a, 7);
234   a += fetch64(s + len - 16);
235   uint64_t wf = a + z;
236   uint64_t ws = b + rotate(a, 31) + c;
237   uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
238   return shift_mix((seed ^ (r * k0)) + vs) * k2;
239 }
240 
241 inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
242   if (length >= 4 && length <= 8)
243     return hash_4to8_bytes(s, length, seed);
244   if (length > 8 && length <= 16)
245     return hash_9to16_bytes(s, length, seed);
246   if (length > 16 && length <= 32)
247     return hash_17to32_bytes(s, length, seed);
248   if (length > 32)
249     return hash_33to64_bytes(s, length, seed);
250   if (length != 0)
251     return hash_1to3_bytes(s, length, seed);
252 
253   return k2 ^ seed;
254 }
255 
256 /// The intermediate state used during hashing.
257 /// Currently, the algorithm for computing hash codes is based on CityHash and
258 /// keeps 56 bytes of arbitrary state.
259 struct hash_state {
260   uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
261 
262   /// Create a new hash_state structure and initialize it based on the
263   /// seed and the first 64-byte chunk.
264   /// This effectively performs the initial mix.
265   static hash_state create(const char *s, uint64_t seed) {
266     hash_state state = {
267       0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
268       seed * k1, shift_mix(seed), 0 };
269     state.h6 = hash_16_bytes(state.h4, state.h5);
270     state.mix(s);
271     return state;
272   }
273 
274   /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
275   /// and 'b', including whatever is already in 'a' and 'b'.
276   static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
277     a += fetch64(s);
278     uint64_t c = fetch64(s + 24);
279     b = rotate(b + a + c, 21);
280     uint64_t d = a;
281     a += fetch64(s + 8) + fetch64(s + 16);
282     b += rotate(a, 44) + d;
283     a += c;
284   }
285 
286   /// Mix in a 64-byte buffer of data.
287   /// We mix all 64 bytes even when the chunk length is smaller, but we
288   /// record the actual length.
289   void mix(const char *s) {
290     h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
291     h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
292     h0 ^= h6;
293     h1 += h3 + fetch64(s + 40);
294     h2 = rotate(h2 + h5, 33) * k1;
295     h3 = h4 * k1;
296     h4 = h0 + h5;
297     mix_32_bytes(s, h3, h4);
298     h5 = h2 + h6;
299     h6 = h1 + fetch64(s + 16);
300     mix_32_bytes(s + 32, h5, h6);
301     std::swap(h2, h0);
302   }
303 
304   /// Compute the final 64-bit hash code value based on the current
305   /// state and the length of bytes hashed.
306   uint64_t finalize(size_t length) {
307     return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
308                          hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
309   }
310 };
311 
312 
313 /// A global, fixed seed-override variable.
314 ///
315 /// This variable can be set using the \see llvm::set_fixed_execution_seed
316 /// function. See that function for details. Do not, under any circumstances,
317 /// set or read this variable.
318 extern uint64_t fixed_seed_override;
319 
320 inline uint64_t get_execution_seed() {
321   // FIXME: This needs to be a per-execution seed. This is just a placeholder
322   // implementation. Switching to a per-execution seed is likely to flush out
323   // instability bugs and so will happen as its own commit.
324   //
325   // However, if there is a fixed seed override set the first time this is
326   // called, return that instead of the per-execution seed.
327   const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
328   static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime;
329   return seed;
330 }
331 
332 
333 /// Trait to indicate whether a type's bits can be hashed directly.
334 ///
335 /// A type trait which is true if we want to combine values for hashing by
336 /// reading the underlying data. It is false if values of this type must
337 /// first be passed to hash_value, and the resulting hash_codes combined.
338 //
339 // FIXME: We want to replace is_integral_or_enum and is_pointer here with
340 // a predicate which asserts that comparing the underlying storage of two
341 // values of the type for equality is equivalent to comparing the two values
342 // for equality. For all the platforms we care about, this holds for integers
343 // and pointers, but there are platforms where it doesn't and we would like to
344 // support user-defined types which happen to satisfy this property.
345 template <typename T> struct is_hashable_data
346   : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
347                                    std::is_pointer<T>::value) &&
348                                   64 % sizeof(T) == 0)> {};
349 
350 // Special case std::pair to detect when both types are viable and when there
351 // is no alignment-derived padding in the pair. This is a bit of a lie because
352 // std::pair isn't truly POD, but it's close enough in all reasonable
353 // implementations for our use case of hashing the underlying data.
354 template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
355   : std::integral_constant<bool, (is_hashable_data<T>::value &&
356                                   is_hashable_data<U>::value &&
357                                   (sizeof(T) + sizeof(U)) ==
358                                    sizeof(std::pair<T, U>))> {};
359 
360 /// Helper to get the hashable data representation for a type.
361 /// This variant is enabled when the type itself can be used.
362 template <typename T>
363 typename std::enable_if<is_hashable_data<T>::value, T>::type
364 get_hashable_data(const T &value) {
365   return value;
366 }
367 /// Helper to get the hashable data representation for a type.
368 /// This variant is enabled when we must first call hash_value and use the
369 /// result as our data.
370 template <typename T>
371 typename std::enable_if<!is_hashable_data<T>::value, size_t>::type
372 get_hashable_data(const T &value) {
373   using ::llvm::hash_value;
374   return hash_value(value);
375 }
376 
377 /// Helper to store data from a value into a buffer and advance the
378 /// pointer into that buffer.
379 ///
380 /// This routine first checks whether there is enough space in the provided
381 /// buffer, and if not immediately returns false. If there is space, it
382 /// copies the underlying bytes of value into the buffer, advances the
383 /// buffer_ptr past the copied bytes, and returns true.
384 template <typename T>
385 bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
386                        size_t offset = 0) {
387   size_t store_size = sizeof(value) - offset;
388   if (buffer_ptr + store_size > buffer_end)
389     return false;
390   const char *value_data = reinterpret_cast<const char *>(&value);
391   memcpy(buffer_ptr, value_data + offset, store_size);
392   buffer_ptr += store_size;
393   return true;
394 }
395 
396 /// Implement the combining of integral values into a hash_code.
397 ///
398 /// This overload is selected when the value type of the iterator is
399 /// integral. Rather than computing a hash_code for each object and then
400 /// combining them, this (as an optimization) directly combines the integers.
401 template <typename InputIteratorT>
402 hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
403   const uint64_t seed = get_execution_seed();
404   char buffer[64], *buffer_ptr = buffer;
405   char *const buffer_end = std::end(buffer);
406   while (first != last && store_and_advance(buffer_ptr, buffer_end,
407                                             get_hashable_data(*first)))
408     ++first;
409   if (first == last)
410     return hash_short(buffer, buffer_ptr - buffer, seed);
411   assert(buffer_ptr == buffer_end);
412 
413   hash_state state = state.create(buffer, seed);
414   size_t length = 64;
415   while (first != last) {
416     // Fill up the buffer. We don't clear it, which re-mixes the last round
417     // when only a partial 64-byte chunk is left.
418     buffer_ptr = buffer;
419     while (first != last && store_and_advance(buffer_ptr, buffer_end,
420                                               get_hashable_data(*first)))
421       ++first;
422 
423     // Rotate the buffer if we did a partial fill in order to simulate doing
424     // a mix of the last 64-bytes. That is how the algorithm works when we
425     // have a contiguous byte sequence, and we want to emulate that here.
426     std::rotate(buffer, buffer_ptr, buffer_end);
427 
428     // Mix this chunk into the current state.
429     state.mix(buffer);
430     length += buffer_ptr - buffer;
431   };
432 
433   return state.finalize(length);
434 }
435 
436 /// Implement the combining of integral values into a hash_code.
437 ///
438 /// This overload is selected when the value type of the iterator is integral
439 /// and when the input iterator is actually a pointer. Rather than computing
440 /// a hash_code for each object and then combining them, this (as an
441 /// optimization) directly combines the integers. Also, because the integers
442 /// are stored in contiguous memory, this routine avoids copying each value
443 /// and directly reads from the underlying memory.
444 template <typename ValueT>
445 typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
446 hash_combine_range_impl(ValueT *first, ValueT *last) {
447   const uint64_t seed = get_execution_seed();
448   const char *s_begin = reinterpret_cast<const char *>(first);
449   const char *s_end = reinterpret_cast<const char *>(last);
450   const size_t length = std::distance(s_begin, s_end);
451   if (length <= 64)
452     return hash_short(s_begin, length, seed);
453 
454   const char *s_aligned_end = s_begin + (length & ~63);
455   hash_state state = state.create(s_begin, seed);
456   s_begin += 64;
457   while (s_begin != s_aligned_end) {
458     state.mix(s_begin);
459     s_begin += 64;
460   }
461   if (length & 63)
462     state.mix(s_end - 64);
463 
464   return state.finalize(length);
465 }
466 
467 } // namespace detail
468 } // namespace hashing
469 
470 
471 /// Compute a hash_code for a sequence of values.
472 ///
473 /// This hashes a sequence of values. It produces the same hash_code as
474 /// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
475 /// and is significantly faster given pointers and types which can be hashed as
476 /// a sequence of bytes.
477 template <typename InputIteratorT>
478 hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
479   return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
480 }
481 
482 
483 // Implementation details for hash_combine.
484 namespace hashing {
485 namespace detail {
486 
487 /// Helper class to manage the recursive combining of hash_combine
488 /// arguments.
489 ///
490 /// This class exists to manage the state and various calls involved in the
491 /// recursive combining of arguments used in hash_combine. It is particularly
492 /// useful at minimizing the code in the recursive calls to ease the pain
493 /// caused by a lack of variadic functions.
494 struct hash_combine_recursive_helper {
495   char buffer[64] = {};
496   hash_state state;
497   const uint64_t seed;
498 
499 public:
500   /// Construct a recursive hash combining helper.
501   ///
502   /// This sets up the state for a recursive hash combine, including getting
503   /// the seed and buffer setup.
504   hash_combine_recursive_helper()
505     : seed(get_execution_seed()) {}
506 
507   /// Combine one chunk of data into the current in-flight hash.
508   ///
509   /// This merges one chunk of data into the hash. First it tries to buffer
510   /// the data. If the buffer is full, it hashes the buffer into its
511   /// hash_state, empties it, and then merges the new chunk in. This also
512   /// handles cases where the data straddles the end of the buffer.
513   template <typename T>
514   char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
515     if (!store_and_advance(buffer_ptr, buffer_end, data)) {
516       // Check for skew which prevents the buffer from being packed, and do
517       // a partial store into the buffer to fill it. This is only a concern
518       // with the variadic combine because that formation can have varying
519       // argument types.
520       size_t partial_store_size = buffer_end - buffer_ptr;
521       memcpy(buffer_ptr, &data, partial_store_size);
522 
523       // If the store fails, our buffer is full and ready to hash. We have to
524       // either initialize the hash state (on the first full buffer) or mix
525       // this buffer into the existing hash state. Length tracks the *hashed*
526       // length, not the buffered length.
527       if (length == 0) {
528         state = state.create(buffer, seed);
529         length = 64;
530       } else {
531         // Mix this chunk into the current state and bump length up by 64.
532         state.mix(buffer);
533         length += 64;
534       }
535       // Reset the buffer_ptr to the head of the buffer for the next chunk of
536       // data.
537       buffer_ptr = buffer;
538 
539       // Try again to store into the buffer -- this cannot fail as we only
540       // store types smaller than the buffer.
541       if (!store_and_advance(buffer_ptr, buffer_end, data,
542                              partial_store_size))
543         llvm_unreachable("buffer smaller than stored type");
544     }
545     return buffer_ptr;
546   }
547 
548   /// Recursive, variadic combining method.
549   ///
550   /// This function recurses through each argument, combining that argument
551   /// into a single hash.
552   template <typename T, typename ...Ts>
553   hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
554                     const T &arg, const Ts &...args) {
555     buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
556 
557     // Recurse to the next argument.
558     return combine(length, buffer_ptr, buffer_end, args...);
559   }
560 
561   /// Base case for recursive, variadic combining.
562   ///
563   /// The base case when combining arguments recursively is reached when all
564   /// arguments have been handled. It flushes the remaining buffer and
565   /// constructs a hash_code.
566   hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
567     // Check whether the entire set of values fit in the buffer. If so, we'll
568     // use the optimized short hashing routine and skip state entirely.
569     if (length == 0)
570       return hash_short(buffer, buffer_ptr - buffer, seed);
571 
572     // Mix the final buffer, rotating it if we did a partial fill in order to
573     // simulate doing a mix of the last 64-bytes. That is how the algorithm
574     // works when we have a contiguous byte sequence, and we want to emulate
575     // that here.
576     std::rotate(buffer, buffer_ptr, buffer_end);
577 
578     // Mix this chunk into the current state.
579     state.mix(buffer);
580     length += buffer_ptr - buffer;
581 
582     return state.finalize(length);
583   }
584 };
585 
586 } // namespace detail
587 } // namespace hashing
588 
589 /// Combine values into a single hash_code.
590 ///
591 /// This routine accepts a varying number of arguments of any type. It will
592 /// attempt to combine them into a single hash_code. For user-defined types it
593 /// attempts to call a \see hash_value overload (via ADL) for the type. For
594 /// integer and pointer types it directly combines their data into the
595 /// resulting hash_code.
596 ///
597 /// The result is suitable for returning from a user's hash_value
598 /// *implementation* for their user-defined type. Consumers of a type should
599 /// *not* call this routine, they should instead call 'hash_value'.
600 template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
601   // Recursively hash each argument using a helper class.
602   ::llvm::hashing::detail::hash_combine_recursive_helper helper;
603   return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
604 }
605 
606 // Implementation details for implementations of hash_value overloads provided
607 // here.
608 namespace hashing {
609 namespace detail {
610 
611 /// Helper to hash the value of a single integer.
612 ///
613 /// Overloads for smaller integer types are not provided to ensure consistent
614 /// behavior in the presence of integral promotions. Essentially,
615 /// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
616 inline hash_code hash_integer_value(uint64_t value) {
617   // Similar to hash_4to8_bytes but using a seed instead of length.
618   const uint64_t seed = get_execution_seed();
619   const char *s = reinterpret_cast<const char *>(&value);
620   const uint64_t a = fetch32(s);
621   return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
622 }
623 
624 } // namespace detail
625 } // namespace hashing
626 
627 // Declared and documented above, but defined here so that any of the hashing
628 // infrastructure is available.
629 template <typename T>
630 typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
631 hash_value(T value) {
632   return ::llvm::hashing::detail::hash_integer_value(
633       static_cast<uint64_t>(value));
634 }
635 
636 // Declared and documented above, but defined here so that any of the hashing
637 // infrastructure is available.
638 template <typename T> hash_code hash_value(const T *ptr) {
639   return ::llvm::hashing::detail::hash_integer_value(
640     reinterpret_cast<uintptr_t>(ptr));
641 }
642 
643 // Declared and documented above, but defined here so that any of the hashing
644 // infrastructure is available.
645 template <typename T, typename U>
646 hash_code hash_value(const std::pair<T, U> &arg) {
647   return hash_combine(arg.first, arg.second);
648 }
649 
650 // Declared and documented above, but defined here so that any of the hashing
651 // infrastructure is available.
652 template <typename T>
653 hash_code hash_value(const std::basic_string<T> &arg) {
654   return hash_combine_range(arg.begin(), arg.end());
655 }
656 
657 } // namespace llvm
658 
659 #endif
660