1 /*
2 * xxHash - Extremely Fast Hash algorithm
3 * Header File
4 * Copyright (C) 2012-2020 Yann Collet
5 *
6 * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
10 * met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following disclaimer
16 * in the documentation and/or other materials provided with the
17 * distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * You can contact the author at:
32 * - xxHash homepage: https://www.xxhash.com
33 * - xxHash source repository: https://github.com/Cyan4973/xxHash
34 */
35 /*!
36 * @mainpage xxHash
37 *
38 * @file xxhash.h
39 * xxHash prototypes and implementation
40 */
41 /* TODO: update */
42 /* Notice extracted from xxHash homepage:
43
44 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
45 It also successfully passes all tests from the SMHasher suite.
46
47 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
48
49 Name Speed Q.Score Author
50 xxHash 5.4 GB/s 10
51 CrapWow 3.2 GB/s 2 Andrew
52 MurmurHash 3a 2.7 GB/s 10 Austin Appleby
53 SpookyHash 2.0 GB/s 10 Bob Jenkins
54 SBox 1.4 GB/s 9 Bret Mulvey
55 Lookup3 1.2 GB/s 9 Bob Jenkins
56 SuperFastHash 1.2 GB/s 1 Paul Hsieh
57 CityHash64 1.05 GB/s 10 Pike & Alakuijala
58 FNV 0.55 GB/s 5 Fowler, Noll, Vo
59 CRC32 0.43 GB/s 9
60 MD5-32 0.33 GB/s 10 Ronald L. Rivest
61 SHA1-32 0.28 GB/s 10
62
63 Q.Score is a measure of quality of the hash function.
64 It depends on successfully passing SMHasher test set.
65 10 is a perfect score.
66
67 Note: SMHasher's CRC32 implementation is not the fastest one.
68 Other speed-oriented implementations can be faster,
69 especially in combination with PCLMUL instruction:
70 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
71
72 A 64-bit version, named XXH64, is available since r35.
73 It offers much better speed, but for 64-bit applications only.
74 Name Speed on 64 bits Speed on 32 bits
75 XXH64 13.8 GB/s 1.9 GB/s
76 XXH32 6.8 GB/s 6.0 GB/s
77 */
78
79 #if defined (__cplusplus)
80 extern "C" {
81 #endif
82
83 /* ****************************
84 * INLINE mode
85 ******************************/
86 /*!
87 * XXH_INLINE_ALL (and XXH_PRIVATE_API)
88 * Use these build macros to inline xxhash into the target unit.
89 * Inlining improves performance on small inputs, especially when the length is
90 * expressed as a compile-time constant:
91 *
92 * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
93 *
94 * It also keeps xxHash symbols private to the unit, so they are not exported.
95 *
96 * Usage:
97 * #define XXH_INLINE_ALL
98 * #include "xxhash.h"
99 *
100 * Do not compile and link xxhash.o as a separate object, as it is not useful.
101 */
102 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
103 && !defined(XXH_INLINE_ALL_31684351384)
104 /* this section should be traversed only once */
105 # define XXH_INLINE_ALL_31684351384
106 /* give access to the advanced API, required to compile implementations */
107 # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
108 # define XXH_STATIC_LINKING_ONLY
109 /* make all functions private */
110 # undef XXH_PUBLIC_API
111 # if defined(__GNUC__)
112 # define XXH_PUBLIC_API static __inline __attribute__((unused))
113 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
114 # define XXH_PUBLIC_API static inline
115 # elif defined(_MSC_VER)
116 # define XXH_PUBLIC_API static __inline
117 # else
118 /* note: this version may generate warnings for unused static functions */
119 # define XXH_PUBLIC_API static
120 # endif
121
122 /*
123 * This part deals with the special case where a unit wants to inline xxHash,
124 * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such
125 * as part of some previously included *.h header file.
126 * Without further action, the new include would just be ignored,
127 * and functions would effectively _not_ be inlined (silent failure).
128 * The following macros solve this situation by prefixing all inlined names,
129 * avoiding naming collision with previous inclusions.
130 */
131 # ifdef XXH_NAMESPACE
132 # error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported"
133 /*
134 * Note: Alternative: #undef all symbols (it's a pretty large list).
135 * Without #error: it compiles, but functions are actually not inlined.
136 */
137 # endif
138 # define XXH_NAMESPACE XXH_INLINE_
139 /*
140 * Some identifiers (enums, type names) are not symbols, but they must
141 * still be renamed to avoid redeclaration.
142 * Alternative solution: do not redeclare them.
143 * However, this requires some #ifdefs, and is a more dispersed action.
144 * Meanwhile, renaming can be achieved in a single block
145 */
146 # define XXH_IPREF(Id) XXH_INLINE_ ## Id
147 # define XXH_OK XXH_IPREF(XXH_OK)
148 # define XXH_ERROR XXH_IPREF(XXH_ERROR)
149 # define XXH_errorcode XXH_IPREF(XXH_errorcode)
150 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
151 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
152 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
153 # define XXH32_state_s XXH_IPREF(XXH32_state_s)
154 # define XXH32_state_t XXH_IPREF(XXH32_state_t)
155 # define XXH64_state_s XXH_IPREF(XXH64_state_s)
156 # define XXH64_state_t XXH_IPREF(XXH64_state_t)
157 # define XXH3_state_s XXH_IPREF(XXH3_state_s)
158 # define XXH3_state_t XXH_IPREF(XXH3_state_t)
159 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
160 /* Ensure the header is parsed again, even if it was previously included */
161 # undef XXHASH_H_5627135585666179
162 # undef XXHASH_H_STATIC_13879238742
163 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
164
165
166
167 /* ****************************************************************
168 * Stable API
169 *****************************************************************/
170 #ifndef XXHASH_H_5627135585666179
171 #define XXHASH_H_5627135585666179 1
172
173
174 /*!
175 * @defgroup public Public API
176 * Contains details on the public xxHash functions.
177 * @{
178 */
179 /* specific declaration modes for Windows */
180 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
181 # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
182 # ifdef XXH_EXPORT
183 # define XXH_PUBLIC_API __declspec(dllexport)
184 # elif XXH_IMPORT
185 # define XXH_PUBLIC_API __declspec(dllimport)
186 # endif
187 # else
188 # define XXH_PUBLIC_API /* do nothing */
189 # endif
190 #endif
191
192 #ifdef XXH_DOXYGEN
193 /*!
194 * @brief Emulate a namespace by transparently prefixing all symbols.
195 *
196 * If you want to include _and expose_ xxHash functions from within your own
197 * library, but also want to avoid symbol collisions with other libraries which
198 * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
199 * any public symbol from xxhash library with the value of XXH_NAMESPACE
200 * (therefore, avoid empty or numeric values).
201 *
202 * Note that no change is required within the calling program as long as it
203 * includes `xxhash.h`: Regular symbol names will be automatically translated
204 * by this header.
205 */
206 # define XXH_NAMESPACE /* YOUR NAME HERE */
207 # undef XXH_NAMESPACE
208 #endif
209
210 #ifdef XXH_NAMESPACE
211 # define XXH_CAT(A,B) A##B
212 # define XXH_NAME2(A,B) XXH_CAT(A,B)
213 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
214 /* XXH32 */
215 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
216 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
217 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
218 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
219 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
220 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
221 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
222 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
223 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
224 /* XXH64 */
225 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
226 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
227 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
228 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
229 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
230 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
231 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
232 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
233 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
234 /* XXH3_64bits */
235 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
236 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
237 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
238 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
239 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
240 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
241 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
242 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
243 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
244 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
245 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
246 # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
247 /* XXH3_128bits */
248 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
249 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
250 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
251 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
252 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
253 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
254 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
255 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
256 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
257 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
258 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
259 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
260 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
261 #endif
262
263
264 /* *************************************
265 * Version
266 ***************************************/
267 #define XXH_VERSION_MAJOR 0
268 #define XXH_VERSION_MINOR 8
269 #define XXH_VERSION_RELEASE 0
270 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
271
272 /*!
273 * @brief Obtains the xxHash version.
274 *
275 * This is only useful when xxHash is compiled as a shared library, as it is
276 * independent of the version defined in the header.
277 *
278 * @return `XXH_VERSION_NUMBER` as of when the function was compiled.
279 */
280 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
281
282
283 /* ****************************
284 * Definitions
285 ******************************/
286 #include <stddef.h> /* size_t */
287 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
288
289
290 /*-**********************************************************************
291 * 32-bit hash
292 ************************************************************************/
293 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
294 /*!
295 * @brief An unsigned 32-bit integer.
296 *
297 * Not necessarily defined to `uint32_t` but functionally equivalent.
298 */
299 typedef uint32_t XXH32_hash_t;
300 #elif !defined (__VMS) \
301 && (defined (__cplusplus) \
302 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
303 # include <stdint.h>
304 typedef uint32_t XXH32_hash_t;
305 #else
306 # include <limits.h>
307 # if UINT_MAX == 0xFFFFFFFFUL
308 typedef unsigned int XXH32_hash_t;
309 # else
310 # if ULONG_MAX == 0xFFFFFFFFUL
311 typedef unsigned long XXH32_hash_t;
312 # else
313 # error "unsupported platform: need a 32-bit type"
314 # endif
315 # endif
316 #endif
317
318 /*!
319 * @}
320 *
321 * @defgroup xxh32_family XXH32 family
322 * @ingroup public
323 * Contains functions used in the classic 32-bit xxHash algorithm.
324 *
325 * @note
326 * XXH32 is considered rather weak by today's standards.
327 * The @ref xxh3_family provides competitive speed for both 32-bit and 64-bit
328 * systems, and offers true 64/128 bit hash results. It provides a superior
329 * level of dispersion, and greatly reduces the risks of collisions.
330 *
331 * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
332 * @see @ref xxh32_impl for implementation details
333 * @{
334 */
335
336 /*!
337 * @brief Calculates the 32-bit hash of @p input using xxHash32.
338 *
339 * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
340 *
341 * @param input The block of data to be hashed, at least @p length bytes in size.
342 * @param length The length of @p input, in bytes.
343 * @param seed The 32-bit seed to alter the hash's output predictably.
344 *
345 * @pre
346 * The memory between @p input and @p input + @p length must be valid,
347 * readable, contiguous memory. However, if @p length is `0`, @p input may be
348 * `NULL`. In C++, this also must be *TriviallyCopyable*.
349 *
350 * @return The calculated 32-bit hash value.
351 *
352 * @see
353 * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
354 * Direct equivalents for the other variants of xxHash.
355 * @see
356 * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
357 */
358 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
359
360 /*!
361 * Streaming functions generate the xxHash value from an incremental input.
362 * This method is slower than single-call functions, due to state management.
363 * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
364 *
365 * An XXH state must first be allocated using `XXH*_createState()`.
366 *
367 * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
368 *
369 * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
370 *
371 * The function returns an error code, with 0 meaning OK, and any other value
372 * meaning there is an error.
373 *
374 * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
375 * This function returns the nn-bits hash as an int or long long.
376 *
377 * It's still possible to continue inserting input into the hash state after a
378 * digest, and generate new hash values later on by invoking `XXH*_digest()`.
379 *
380 * When done, release the state using `XXH*_freeState()`.
381 *
382 * Example code for incrementally hashing a file:
383 * @code{.c}
384 * #include <stdio.h>
385 * #include <xxhash.h>
386 * #define BUFFER_SIZE 256
387 *
388 * // Note: XXH64 and XXH3 use the same interface.
389 * XXH32_hash_t
390 * hashFile(FILE* stream)
391 * {
392 * XXH32_state_t* state;
393 * unsigned char buf[BUFFER_SIZE];
394 * size_t amt;
395 * XXH32_hash_t hash;
396 *
397 * state = XXH32_createState(); // Create a state
398 * assert(state != NULL); // Error check here
399 * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed
400 * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
401 * XXH32_update(state, buf, amt); // Hash the file in chunks
402 * }
403 * hash = XXH32_digest(state); // Finalize the hash
404 * XXH32_freeState(state); // Clean up
405 * return hash;
406 * }
407 * @endcode
408 */
409
410 /*!
411 * @typedef struct XXH32_state_s XXH32_state_t
412 * @brief The opaque state struct for the XXH32 streaming API.
413 *
414 * @see XXH32_state_s for details.
415 */
416 typedef struct XXH32_state_s XXH32_state_t;
417
418 /*!
419 * @brief Allocates an @ref XXH32_state_t.
420 *
421 * Must be freed with XXH32_freeState().
422 * @return An allocated XXH32_state_t on success, `NULL` on failure.
423 */
424 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
425 /*!
426 * @brief Frees an @ref XXH32_state_t.
427 *
428 * Must be allocated with XXH32_createState().
429 * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
430 * @return XXH_OK.
431 */
432 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
433 /*!
434 * @brief Copies one @ref XXH32_state_t to another.
435 *
436 * @param dst_state The state to copy to.
437 * @param src_state The state to copy from.
438 * @pre
439 * @p dst_state and @p src_state must not be `NULL` and must not overlap.
440 */
441 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
442
443 /*!
444 * @brief Resets an @ref XXH32_state_t to begin a new hash.
445 *
446 * This function resets and seeds a state. Call it before @ref XXH32_update().
447 *
448 * @param statePtr The state struct to reset.
449 * @param seed The 32-bit seed to alter the hash result predictably.
450 *
451 * @pre
452 * @p statePtr must not be `NULL`.
453 *
454 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
455 */
456 XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
457
458 /*!
459 * @brief Consumes a block of @p input to an @ref XXH32_state_t.
460 *
461 * Call this to incrementally consume blocks of data.
462 *
463 * @param statePtr The state struct to update.
464 * @param input The block of data to be hashed, at least @p length bytes in size.
465 * @param length The length of @p input, in bytes.
466 *
467 * @pre
468 * @p statePtr must not be `NULL`.
469 * @pre
470 * The memory between @p input and @p input + @p length must be valid,
471 * readable, contiguous memory. However, if @p length is `0`, @p input may be
472 * `NULL`. In C++, this also must be *TriviallyCopyable*.
473 *
474 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
475 */
476 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
477
478 /*!
479 * @brief Returns the calculated hash value from an @ref XXH32_state_t.
480 *
481 * @note
482 * Calling XXH32_digest() will not affect @p statePtr, so you can update,
483 * digest, and update again.
484 *
485 * @param statePtr The state struct to calculate the hash from.
486 *
487 * @pre
488 * @p statePtr must not be `NULL`.
489 *
490 * @return The calculated xxHash32 value from that state.
491 */
492 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
493
494 /******* Canonical representation *******/
495
496 /*
497 * The default return values from XXH functions are unsigned 32 and 64 bit
498 * integers.
499 * This the simplest and fastest format for further post-processing.
500 *
501 * However, this leaves open the question of what is the order on the byte level,
502 * since little and big endian conventions will store the same number differently.
503 *
504 * The canonical representation settles this issue by mandating big-endian
505 * convention, the same convention as human-readable numbers (large digits first).
506 *
507 * When writing hash values to storage, sending them over a network, or printing
508 * them, it's highly recommended to use the canonical representation to ensure
509 * portability across a wider range of systems, present and future.
510 *
511 * The following functions allow transformation of hash values to and from
512 * canonical format.
513 */
514
515 /*!
516 * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
517 */
518 typedef struct {
519 unsigned char digest[4]; /*!< Hash bytes, big endian */
520 } XXH32_canonical_t;
521
522 /*!
523 * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
524 *
525 * @param dst The @ref XXH32_canonical_t pointer to be stored to.
526 * @param hash The @ref XXH32_hash_t to be converted.
527 *
528 * @pre
529 * @p dst must not be `NULL`.
530 */
531 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
532
533 /*!
534 * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
535 *
536 * @param src The @ref XXH32_canonical_t to convert.
537 *
538 * @pre
539 * @p src must not be `NULL`.
540 *
541 * @return The converted hash.
542 */
543 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
544
545
546 /*!
547 * @}
548 * @ingroup public
549 * @{
550 */
551
552 #ifndef XXH_NO_LONG_LONG
553 /*-**********************************************************************
554 * 64-bit hash
555 ************************************************************************/
556 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
557 /*!
558 * @brief An unsigned 64-bit integer.
559 *
560 * Not necessarily defined to `uint64_t` but functionally equivalent.
561 */
562 typedef uint64_t XXH64_hash_t;
563 #elif !defined (__VMS) \
564 && (defined (__cplusplus) \
565 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
566 # include <stdint.h>
567 typedef uint64_t XXH64_hash_t;
568 #else
569 # include <limits.h>
570 # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
571 /* LP64 ABI says uint64_t is unsigned long */
572 typedef unsigned long XXH64_hash_t;
573 # else
574 /* the following type must have a width of 64-bit */
575 typedef unsigned long long XXH64_hash_t;
576 # endif
577 #endif
578
579 /*!
580 * @}
581 *
582 * @defgroup xxh64_family XXH64 family
583 * @ingroup public
584 * @{
585 * Contains functions used in the classic 64-bit xxHash algorithm.
586 *
587 * @note
588 * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
589 * and offers true 64/128 bit hash results. It provides a superior level of
590 * dispersion, and greatly reduces the risks of collisions.
591 */
592
593
594 /*!
595 * @brief Calculates the 64-bit hash of @p input using xxHash64.
596 *
597 * This function usually runs faster on 64-bit systems, but slower on 32-bit
598 * systems (see benchmark).
599 *
600 * @param input The block of data to be hashed, at least @p length bytes in size.
601 * @param length The length of @p input, in bytes.
602 * @param seed The 64-bit seed to alter the hash's output predictably.
603 *
604 * @pre
605 * The memory between @p input and @p input + @p length must be valid,
606 * readable, contiguous memory. However, if @p length is `0`, @p input may be
607 * `NULL`. In C++, this also must be *TriviallyCopyable*.
608 *
609 * @return The calculated 64-bit hash.
610 *
611 * @see
612 * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
613 * Direct equivalents for the other variants of xxHash.
614 * @see
615 * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
616 */
617 XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
618
619 /******* Streaming *******/
620 /*!
621 * @brief The opaque state struct for the XXH64 streaming API.
622 *
623 * @see XXH64_state_s for details.
624 */
625 typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
626 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
627 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
628 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
629
630 XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
631 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
632 XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
633
634 /******* Canonical representation *******/
635 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
636 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
637 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
638
639 /*!
640 * @}
641 * ************************************************************************
642 * @defgroup xxh3_family XXH3 family
643 * @ingroup public
644 * @{
645 *
646 * XXH3 is a more recent hash algorithm featuring:
647 * - Improved speed for both small and large inputs
648 * - True 64-bit and 128-bit outputs
649 * - SIMD acceleration
650 * - Improved 32-bit viability
651 *
652 * Speed analysis methodology is explained here:
653 *
654 * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
655 *
656 * Compared to XXH64, expect XXH3 to run approximately
657 * ~2x faster on large inputs and >3x faster on small ones,
658 * exact differences vary depending on platform.
659 *
660 * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
661 * but does not require it.
662 * Any 32-bit and 64-bit targets that can run XXH32 smoothly
663 * can run XXH3 at competitive speeds, even without vector support.
664 * Further details are explained in the implementation.
665 *
666 * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
667 * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
668 *
669 * XXH3 implementation is portable:
670 * it has a generic C90 formulation that can be compiled on any platform,
671 * all implementations generage exactly the same hash value on all platforms.
672 * Starting from v0.8.0, it's also labelled "stable", meaning that
673 * any future version will also generate the same hash value.
674 *
675 * XXH3 offers 2 variants, _64bits and _128bits.
676 *
677 * When only 64 bits are needed, prefer invoking the _64bits variant, as it
678 * reduces the amount of mixing, resulting in faster speed on small inputs.
679 * It's also generally simpler to manipulate a scalar return type than a struct.
680 *
681 * The API supports one-shot hashing, streaming mode, and custom secrets.
682 */
683
684 /*-**********************************************************************
685 * XXH3 64-bit variant
686 ************************************************************************/
687
688 /* XXH3_64bits():
689 * default 64-bit variant, using default secret and default seed of 0.
690 * It's the fastest variant. */
691 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
692
693 /*
694 * XXH3_64bits_withSeed():
695 * This variant generates a custom secret on the fly
696 * based on default secret altered using the `seed` value.
697 * While this operation is decently fast, note that it's not completely free.
698 * Note: seed==0 produces the same results as XXH3_64bits().
699 */
700 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
701
702 /*!
703 * The bare minimum size for a custom secret.
704 *
705 * @see
706 * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
707 * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
708 */
709 #define XXH3_SECRET_SIZE_MIN 136
710
711 /*
712 * XXH3_64bits_withSecret():
713 * It's possible to provide any blob of bytes as a "secret" to generate the hash.
714 * This makes it more difficult for an external actor to prepare an intentional collision.
715 * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
716 * However, the quality of produced hash values depends on secret's entropy.
717 * Technically, the secret must look like a bunch of random bytes.
718 * Avoid "trivial" or structured data such as repeated sequences or a text document.
719 * Whenever unsure about the "randomness" of the blob of bytes,
720 * consider relabelling it as a "custom seed" instead,
721 * and employ "XXH3_generateSecret()" (see below)
722 * to generate a high entropy secret derived from the custom seed.
723 */
724 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
725
726
727 /******* Streaming *******/
728 /*
729 * Streaming requires state maintenance.
730 * This operation costs memory and CPU.
731 * As a consequence, streaming is slower than one-shot hashing.
732 * For better performance, prefer one-shot functions whenever applicable.
733 */
734
735 /*!
736 * @brief The state struct for the XXH3 streaming API.
737 *
738 * @see XXH3_state_s for details.
739 */
740 typedef struct XXH3_state_s XXH3_state_t;
741 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
742 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
743 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
744
745 /*
746 * XXH3_64bits_reset():
747 * Initialize with default parameters.
748 * digest will be equivalent to `XXH3_64bits()`.
749 */
750 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
751 /*
752 * XXH3_64bits_reset_withSeed():
753 * Generate a custom secret from `seed`, and store it into `statePtr`.
754 * digest will be equivalent to `XXH3_64bits_withSeed()`.
755 */
756 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
757 /*
758 * XXH3_64bits_reset_withSecret():
759 * `secret` is referenced, it _must outlive_ the hash streaming session.
760 * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
761 * and the quality of produced hash values depends on secret's entropy
762 * (secret's content should look like a bunch of random bytes).
763 * When in doubt about the randomness of a candidate `secret`,
764 * consider employing `XXH3_generateSecret()` instead (see below).
765 */
766 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
767
768 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
769 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
770
771 /* note : canonical representation of XXH3 is the same as XXH64
772 * since they both produce XXH64_hash_t values */
773
774
775 /*-**********************************************************************
776 * XXH3 128-bit variant
777 ************************************************************************/
778
779 /*!
780 * @brief The return value from 128-bit hashes.
781 *
782 * Stored in little endian order, although the fields themselves are in native
783 * endianness.
784 */
785 typedef struct {
786 XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
787 XXH64_hash_t high64; /*!< `value >> 64` */
788 } XXH128_hash_t;
789
790 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
791 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
792 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
793
794 /******* Streaming *******/
795 /*
796 * Streaming requires state maintenance.
797 * This operation costs memory and CPU.
798 * As a consequence, streaming is slower than one-shot hashing.
799 * For better performance, prefer one-shot functions whenever applicable.
800 *
801 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
802 * Use already declared XXH3_createState() and XXH3_freeState().
803 *
804 * All reset and streaming functions have same meaning as their 64-bit counterpart.
805 */
806
807 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
808 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
809 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
810
811 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
812 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
813
814 /* Following helper functions make it possible to compare XXH128_hast_t values.
815 * Since XXH128_hash_t is a structure, this capability is not offered by the language.
816 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
817
818 /*!
819 * XXH128_isEqual():
820 * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
821 */
822 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
823
824 /*!
825 * XXH128_cmp():
826 *
827 * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
828 *
829 * return: >0 if *h128_1 > *h128_2
830 * =0 if *h128_1 == *h128_2
831 * <0 if *h128_1 < *h128_2
832 */
833 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
834
835
836 /******* Canonical representation *******/
837 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
838 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
839 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
840
841
842 #endif /* XXH_NO_LONG_LONG */
843
844 /*!
845 * @}
846 */
847 #endif /* XXHASH_H_5627135585666179 */
848
849
850
851 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
852 #define XXHASH_H_STATIC_13879238742
853 /* ****************************************************************************
854 * This section contains declarations which are not guaranteed to remain stable.
855 * They may change in future versions, becoming incompatible with a different
856 * version of the library.
857 * These declarations should only be used with static linking.
858 * Never use them in association with dynamic linking!
859 ***************************************************************************** */
860
861 /*
862 * These definitions are only present to allow static allocation
863 * of XXH states, on stack or in a struct, for example.
864 * Never **ever** access their members directly.
865 */
866
867 /*!
868 * @internal
869 * @brief Structure for XXH32 streaming API.
870 *
871 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
872 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
873 * an opaque type. This allows fields to safely be changed.
874 *
875 * Typedef'd to @ref XXH32_state_t.
876 * Do not access the members of this struct directly.
877 * @see XXH64_state_s, XXH3_state_s
878 */
879 struct XXH32_state_s {
880 XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
881 XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
882 XXH32_hash_t v1; /*!< First accumulator lane */
883 XXH32_hash_t v2; /*!< Second accumulator lane */
884 XXH32_hash_t v3; /*!< Third accumulator lane */
885 XXH32_hash_t v4; /*!< Fourth accumulator lane */
886 XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
887 XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
888 XXH32_hash_t reserved; /*!< Reserved field. Do not read or write to it, it may be removed. */
889 }; /* typedef'd to XXH32_state_t */
890
891
892 #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
893
894 /*!
895 * @internal
896 * @brief Structure for XXH64 streaming API.
897 *
898 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
899 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
900 * an opaque type. This allows fields to safely be changed.
901 *
902 * Typedef'd to @ref XXH64_state_t.
903 * Do not access the members of this struct directly.
904 * @see XXH32_state_s, XXH3_state_s
905 */
906 struct XXH64_state_s {
907 XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
908 XXH64_hash_t v1; /*!< First accumulator lane */
909 XXH64_hash_t v2; /*!< Second accumulator lane */
910 XXH64_hash_t v3; /*!< Third accumulator lane */
911 XXH64_hash_t v4; /*!< Fourth accumulator lane */
912 XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
913 XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
914 XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
915 XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it, it may be removed. */
916 }; /* typedef'd to XXH64_state_t */
917
918 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */
919 # include <stdalign.h>
920 # define XXH_ALIGN(n) alignas(n)
921 #elif defined(__GNUC__)
922 # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
923 #elif defined(_MSC_VER)
924 # define XXH_ALIGN(n) __declspec(align(n))
925 #else
926 # define XXH_ALIGN(n) /* disabled */
927 #endif
928
929 /* Old GCC versions only accept the attribute after the type in structures. */
930 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
931 && defined(__GNUC__)
932 # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
933 #else
934 # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
935 #endif
936
937 /*!
938 * @brief The size of the internal XXH3 buffer.
939 *
940 * This is the optimal update size for incremental hashing.
941 *
942 * @see XXH3_64b_update(), XXH3_128b_update().
943 */
944 #define XXH3_INTERNALBUFFER_SIZE 256
945
946 /*!
947 * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
948 *
949 * This is the size used in @ref XXH3_kSecret and the seeded functions.
950 *
951 * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
952 */
953 #define XXH3_SECRET_DEFAULT_SIZE 192
954
955 /*!
956 * @internal
957 * @brief Structure for XXH3 streaming API.
958 *
959 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
960 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
961 * an opaque type. This allows fields to safely be changed.
962 *
963 * @note **This structure has a strict alignment requirement of 64 bytes.** Do
964 * not allocate this with `malloc()` or `new`, it will not be sufficiently
965 * aligned. Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack
966 * allocation.
967 *
968 * Typedef'd to @ref XXH3_state_t.
969 * Do not access the members of this struct directly.
970 *
971 * @see XXH3_INITSTATE() for stack initialization.
972 * @see XXH3_createState(), XXH3_freeState().
973 * @see XXH32_state_s, XXH64_state_s
974 */
975 struct XXH3_state_s {
976 XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
977 /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
978 XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
979 /*!< Used to store a custom secret generated from a seed. */
980 XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
981 /*!< The internal buffer. @see XXH32_state_s::mem32 */
982 XXH32_hash_t bufferedSize;
983 /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
984 XXH32_hash_t reserved32;
985 /*!< Reserved field. Needed for padding on 64-bit. */
986 size_t nbStripesSoFar;
987 /*!< Number or stripes processed. */
988 XXH64_hash_t totalLen;
989 /*!< Total length hashed. 64-bit even on 32-bit targets. */
990 size_t nbStripesPerBlock;
991 /*!< Number of stripes per block. */
992 size_t secretLimit;
993 /*!< Size of @ref customSecret or @ref extSecret */
994 XXH64_hash_t seed;
995 /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
996 XXH64_hash_t reserved64;
997 /*!< Reserved field. */
998 const unsigned char* extSecret;
999 /*!< Reference to an external secret for the _withSecret variants, NULL
1000 * for other variants. */
1001 /* note: there may be some padding at the end due to alignment on 64 bytes */
1002 }; /* typedef'd to XXH3_state_t */
1003
1004 #undef XXH_ALIGN_MEMBER
1005
1006 /*!
1007 * @brief Initializes a stack-allocated `XXH3_state_s`.
1008 *
1009 * When the @ref XXH3_state_t structure is merely emplaced on stack,
1010 * it should be initialized with XXH3_INITSTATE() or a memset()
1011 * in case its first reset uses XXH3_NNbits_reset_withSeed().
1012 * This init can be omitted if the first reset uses default or _withSecret mode.
1013 * This operation isn't necessary when the state is created with XXH3_createState().
1014 * Note that this doesn't prepare the state for a streaming operation,
1015 * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1016 */
1017 #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
1018
1019
1020 /* === Experimental API === */
1021 /* Symbols defined below must be considered tied to a specific library version. */
1022
1023 /*
1024 * XXH3_generateSecret():
1025 *
1026 * Derive a high-entropy secret from any user-defined content, named customSeed.
1027 * The generated secret can be used in combination with `*_withSecret()` functions.
1028 * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1029 * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1030 *
1031 * The function accepts as input a custom seed of any length and any content,
1032 * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE
1033 * into an already allocated buffer secretBuffer.
1034 * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long.
1035 *
1036 * The generated secret can then be used with any `*_withSecret()` variant.
1037 * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1038 * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1039 * are part of this list. They all accept a `secret` parameter
1040 * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1041 * _and_ feature very high entropy (consist of random-looking bytes).
1042 * These conditions can be a high bar to meet, so
1043 * this function can be used to generate a secret of proper quality.
1044 *
1045 * customSeed can be anything. It can have any size, even small ones,
1046 * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes.
1047 * The resulting `secret` will nonetheless provide all expected qualities.
1048 *
1049 * Supplying NULL as the customSeed copies the default secret into `secretBuffer`.
1050 * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1051 */
1052 XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize);
1053
1054
1055 /* simple short-cut to pre-selected XXH3_128bits variant */
1056 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1057
1058
1059 #endif /* XXH_NO_LONG_LONG */
1060 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1061 # define XXH_IMPLEMENTATION
1062 #endif
1063
1064 #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1065
1066
1067 /* ======================================================================== */
1068 /* ======================================================================== */
1069 /* ======================================================================== */
1070
1071
1072 /*-**********************************************************************
1073 * xxHash implementation
1074 *-**********************************************************************
1075 * xxHash's implementation used to be hosted inside xxhash.c.
1076 *
1077 * However, inlining requires implementation to be visible to the compiler,
1078 * hence be included alongside the header.
1079 * Previously, implementation was hosted inside xxhash.c,
1080 * which was then #included when inlining was activated.
1081 * This construction created issues with a few build and install systems,
1082 * as it required xxhash.c to be stored in /include directory.
1083 *
1084 * xxHash implementation is now directly integrated within xxhash.h.
1085 * As a consequence, xxhash.c is no longer needed in /include.
1086 *
1087 * xxhash.c is still available and is still useful.
1088 * In a "normal" setup, when xxhash is not inlined,
1089 * xxhash.h only exposes the prototypes and public symbols,
1090 * while xxhash.c can be built into an object file xxhash.o
1091 * which can then be linked into the final binary.
1092 ************************************************************************/
1093
1094 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1095 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1096 # define XXH_IMPLEM_13a8737387
1097
1098 /* *************************************
1099 * Tuning parameters
1100 ***************************************/
1101
1102 /*!
1103 * @defgroup tuning Tuning parameters
1104 * @{
1105 *
1106 * Various macros to control xxHash's behavior.
1107 */
1108 #ifdef XXH_DOXYGEN
1109 /*!
1110 * @brief Define this to disable 64-bit code.
1111 *
1112 * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
1113 */
1114 # define XXH_NO_LONG_LONG
1115 # undef XXH_NO_LONG_LONG /* don't actually */
1116 /*!
1117 * @brief Controls how unaligned memory is accessed.
1118 *
1119 * By default, access to unaligned memory is controlled by `memcpy()`, which is
1120 * safe and portable.
1121 *
1122 * Unfortunately, on some target/compiler combinations, the generated assembly
1123 * is sub-optimal.
1124 *
1125 * The below switch allow selection of a different access method
1126 * in the search for improved performance.
1127 *
1128 * @par Possible options:
1129 *
1130 * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1131 * @par
1132 * Use `memcpy()`. Safe and portable. Note that most modern compilers will
1133 * eliminate the function call and treat it as an unaligned access.
1134 *
1135 * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
1136 * @par
1137 * Depends on compiler extensions and is therefore not portable.
1138 * This method is safe _if_ your compiler supports it,
1139 * and *generally* as fast or faster than `memcpy`.
1140 *
1141 * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1142 * @par
1143 * Casts directly and dereferences. This method doesn't depend on the
1144 * compiler, but it violates the C standard as it directly dereferences an
1145 * unaligned pointer. It can generate buggy code on targets which do not
1146 * support unaligned memory accesses, but in some circumstances, it's the
1147 * only known way to get the most performance.
1148 *
1149 * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1150 * @par
1151 * Also portable. This can generate the best code on old compilers which don't
1152 * inline small `memcpy()` calls, and it might also be faster on big-endian
1153 * systems which lack a native byteswap instruction. However, some compilers
1154 * will emit literal byteshifts even if the target supports unaligned access.
1155 * .
1156 *
1157 * @warning
1158 * Methods 1 and 2 rely on implementation-defined behavior. Use these with
1159 * care, as what works on one compiler/platform/optimization level may cause
1160 * another to read garbage data or even crash.
1161 *
1162 * See https://stackoverflow.com/a/32095106/646947 for details.
1163 *
1164 * Prefer these methods in priority order (0 > 3 > 1 > 2)
1165 */
1166 # define XXH_FORCE_MEMORY_ACCESS 0
1167 /*!
1168 * @def XXH_ACCEPT_NULL_INPUT_POINTER
1169 * @brief Whether to add explicit `NULL` checks.
1170 *
1171 * If the input pointer is `NULL` and the length is non-zero, xxHash's default
1172 * behavior is to dereference it, triggering a segfault.
1173 *
1174 * When this macro is enabled, xxHash actively checks the input for a null pointer.
1175 * If it is, the result for null input pointers is the same as a zero-length input.
1176 */
1177 # define XXH_ACCEPT_NULL_INPUT_POINTER 0
1178 /*!
1179 * @def XXH_FORCE_ALIGN_CHECK
1180 * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1181 * and XXH64() only).
1182 *
1183 * This is an important performance trick for architectures without decent
1184 * unaligned memory access performance.
1185 *
1186 * It checks for input alignment, and when conditions are met, uses a "fast
1187 * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1188 * faster_ read speed.
1189 *
1190 * The check costs one initial branch per hash, which is generally negligible,
1191 * but not zero.
1192 *
1193 * Moreover, it's not useful to generate an additional code path if memory
1194 * access uses the same instruction for both aligned and unaligned
1195 * addresses (e.g. x86 and aarch64).
1196 *
1197 * In these cases, the alignment check can be removed by setting this macro to 0.
1198 * Then the code will always use unaligned memory access.
1199 * Align check is automatically disabled on x86, x64 & arm64,
1200 * which are platforms known to offer good unaligned memory accesses performance.
1201 *
1202 * This option does not affect XXH3 (only XXH32 and XXH64).
1203 */
1204 # define XXH_FORCE_ALIGN_CHECK 0
1205
1206 /*!
1207 * @def XXH_NO_INLINE_HINTS
1208 * @brief When non-zero, sets all functions to `static`.
1209 *
1210 * By default, xxHash tries to force the compiler to inline almost all internal
1211 * functions.
1212 *
1213 * This can usually improve performance due to reduced jumping and improved
1214 * constant folding, but significantly increases the size of the binary which
1215 * might not be favorable.
1216 *
1217 * Additionally, sometimes the forced inlining can be detrimental to performance,
1218 * depending on the architecture.
1219 *
1220 * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1221 * compiler full control on whether to inline or not.
1222 *
1223 * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
1224 * -fno-inline with GCC or Clang, this will automatically be defined.
1225 */
1226 # define XXH_NO_INLINE_HINTS 0
1227
1228 /*!
1229 * @def XXH_REROLL
1230 * @brief Whether to reroll `XXH32_finalize` and `XXH64_finalize`.
1231 *
1232 * For performance, `XXH32_finalize` and `XXH64_finalize` use an unrolled loop
1233 * in the form of a switch statement.
1234 *
1235 * This is not always desirable, as it generates larger code, and depending on
1236 * the architecture, may even be slower
1237 *
1238 * This is automatically defined with `-Os`/`-Oz` on GCC and Clang.
1239 */
1240 # define XXH_REROLL 0
1241
1242 /*!
1243 * @internal
1244 * @brief Redefines old internal names.
1245 *
1246 * For compatibility with code that uses xxHash's internals before the names
1247 * were changed to improve namespacing. There is no other reason to use this.
1248 */
1249 # define XXH_OLD_NAMES
1250 # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1251 #endif /* XXH_DOXYGEN */
1252 /*!
1253 * @}
1254 */
1255
1256 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
1257 /* prefer __packed__ structures (method 1) for gcc on armv7 and armv8 */
1258 # if !defined(__clang__) && ( \
1259 (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1260 (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)) )
1261 # define XXH_FORCE_MEMORY_ACCESS 1
1262 # endif
1263 #endif
1264
1265 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
1266 # define XXH_ACCEPT_NULL_INPUT_POINTER 0
1267 #endif
1268
1269 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
1270 # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
1271 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
1272 # define XXH_FORCE_ALIGN_CHECK 0
1273 # else
1274 # define XXH_FORCE_ALIGN_CHECK 1
1275 # endif
1276 #endif
1277
1278 #ifndef XXH_NO_INLINE_HINTS
1279 # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1280 || defined(__NO_INLINE__) /* -O0, -fno-inline */
1281 # define XXH_NO_INLINE_HINTS 1
1282 # else
1283 # define XXH_NO_INLINE_HINTS 0
1284 # endif
1285 #endif
1286
1287 #ifndef XXH_REROLL
1288 # if defined(__OPTIMIZE_SIZE__)
1289 # define XXH_REROLL 1
1290 # else
1291 # define XXH_REROLL 0
1292 # endif
1293 #endif
1294
1295 /*!
1296 * @defgroup impl Implementation
1297 * @{
1298 */
1299
1300
1301 /* *************************************
1302 * Includes & Memory related functions
1303 ***************************************/
1304 /*
1305 * Modify the local functions below should you wish to use
1306 * different memory routines for malloc() and free()
1307 */
1308 #include <stdlib.h>
1309
1310 /*!
1311 * @internal
1312 * @brief Modify this function to use a different routine than malloc().
1313 */
XXH_malloc(size_t s)1314 static void* XXH_malloc(size_t s) { return malloc(s); }
1315
1316 /*!
1317 * @internal
1318 * @brief Modify this function to use a different routine than free().
1319 */
XXH_free(void * p)1320 static void XXH_free(void* p) { free(p); }
1321
1322 #include <string.h>
1323
1324 /*!
1325 * @internal
1326 * @brief Modify this function to use a different routine than memcpy().
1327 */
XXH_memcpy(void * dest,const void * src,size_t size)1328 static void* XXH_memcpy(void* dest, const void* src, size_t size)
1329 {
1330 return memcpy(dest,src,size);
1331 }
1332
1333 #include <limits.h> /* ULLONG_MAX */
1334
1335
1336 /* *************************************
1337 * Compiler Specific Options
1338 ***************************************/
1339 #ifdef _MSC_VER /* Visual Studio warning fix */
1340 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1341 #endif
1342
1343 #if XXH_NO_INLINE_HINTS /* disable inlining hints */
1344 # if defined(__GNUC__)
1345 # define XXH_FORCE_INLINE static __attribute__((unused))
1346 # else
1347 # define XXH_FORCE_INLINE static
1348 # endif
1349 # define XXH_NO_INLINE static
1350 /* enable inlining hints */
1351 #elif defined(_MSC_VER) /* Visual Studio */
1352 # define XXH_FORCE_INLINE static __forceinline
1353 # define XXH_NO_INLINE static __declspec(noinline)
1354 #elif defined(__GNUC__)
1355 # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1356 # define XXH_NO_INLINE static __attribute__((noinline))
1357 #elif defined (__cplusplus) \
1358 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
1359 # define XXH_FORCE_INLINE static inline
1360 # define XXH_NO_INLINE static
1361 #else
1362 # define XXH_FORCE_INLINE static
1363 # define XXH_NO_INLINE static
1364 #endif
1365
1366
1367
1368 /* *************************************
1369 * Debug
1370 ***************************************/
1371 /*!
1372 * @ingroup tuning
1373 * @def XXH_DEBUGLEVEL
1374 * @brief Sets the debugging level.
1375 *
1376 * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1377 * compiler's command line options. The value must be a number.
1378 */
1379 #ifndef XXH_DEBUGLEVEL
1380 # ifdef DEBUGLEVEL /* backwards compat */
1381 # define XXH_DEBUGLEVEL DEBUGLEVEL
1382 # else
1383 # define XXH_DEBUGLEVEL 0
1384 # endif
1385 #endif
1386
1387 #if (XXH_DEBUGLEVEL>=1)
1388 # include <assert.h> /* note: can still be disabled with NDEBUG */
1389 # define XXH_ASSERT(c) assert(c)
1390 #else
1391 # define XXH_ASSERT(c) ((void)0)
1392 #endif
1393
1394 /* note: use after variable declarations */
1395 #define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0)
1396
1397 /*!
1398 * @internal
1399 * @def XXH_COMPILER_GUARD(var)
1400 * @brief Used to prevent unwanted optimizations for @p var.
1401 *
1402 * It uses an empty GCC inline assembly statement with a register constraint
1403 * which forces @p var into a general purpose register (eg eax, ebx, ecx
1404 * on x86) and marks it as modified.
1405 *
1406 * This is used in a few places to avoid unwanted autovectorization (e.g.
1407 * XXH32_round()). All vectorization we want is explicit via intrinsics,
1408 * and _usually_ isn't wanted elsewhere.
1409 *
1410 * We also use it to prevent unwanted constant folding for AArch64 in
1411 * XXH3_initCustomSecret_scalar().
1412 */
1413 #ifdef __GNUC__
1414 # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1415 #else
1416 # define XXH_COMPILER_GUARD(var) ((void)0)
1417 #endif
1418
1419 /* *************************************
1420 * Basic Types
1421 ***************************************/
1422 #if !defined (__VMS) \
1423 && (defined (__cplusplus) \
1424 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1425 # include <stdint.h>
1426 typedef uint8_t xxh_u8;
1427 #else
1428 typedef unsigned char xxh_u8;
1429 #endif
1430 typedef XXH32_hash_t xxh_u32;
1431
1432 #ifdef XXH_OLD_NAMES
1433 # define BYTE xxh_u8
1434 # define U8 xxh_u8
1435 # define U32 xxh_u32
1436 #endif
1437
1438 /* *** Memory access *** */
1439
1440 /*!
1441 * @internal
1442 * @fn xxh_u32 XXH_read32(const void* ptr)
1443 * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1444 *
1445 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1446 *
1447 * @param ptr The pointer to read from.
1448 * @return The 32-bit native endian integer from the bytes at @p ptr.
1449 */
1450
1451 /*!
1452 * @internal
1453 * @fn xxh_u32 XXH_readLE32(const void* ptr)
1454 * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1455 *
1456 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1457 *
1458 * @param ptr The pointer to read from.
1459 * @return The 32-bit little endian integer from the bytes at @p ptr.
1460 */
1461
1462 /*!
1463 * @internal
1464 * @fn xxh_u32 XXH_readBE32(const void* ptr)
1465 * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1466 *
1467 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1468 *
1469 * @param ptr The pointer to read from.
1470 * @return The 32-bit big endian integer from the bytes at @p ptr.
1471 */
1472
1473 /*!
1474 * @internal
1475 * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1476 * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1477 *
1478 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1479 * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1480 * always @ref XXH_alignment::XXH_unaligned.
1481 *
1482 * @param ptr The pointer to read from.
1483 * @param align Whether @p ptr is aligned.
1484 * @pre
1485 * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1486 * aligned.
1487 * @return The 32-bit little endian integer from the bytes at @p ptr.
1488 */
1489
1490 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1491 /*
1492 * Manual byteshift. Best for old compilers which don't inline memcpy.
1493 * We actually directly use XXH_readLE32 and XXH_readBE32.
1494 */
1495 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1496
1497 /*
1498 * Force direct memory access. Only works on CPU which support unaligned memory
1499 * access in hardware.
1500 */
XXH_read32(const void * memPtr)1501 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1502
1503 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1504
1505 /*
1506 * __pack instructions are safer but compiler specific, hence potentially
1507 * problematic for some compilers.
1508 *
1509 * Currently only defined for GCC and ICC.
1510 */
1511 #ifdef XXH_OLD_NAMES
1512 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1513 #endif
XXH_read32(const void * ptr)1514 static xxh_u32 XXH_read32(const void* ptr)
1515 {
1516 typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1517 return ((const xxh_unalign*)ptr)->u32;
1518 }
1519
1520 #else
1521
1522 /*
1523 * Portable and safe solution. Generally efficient.
1524 * see: https://stackoverflow.com/a/32095106/646947
1525 */
XXH_read32(const void * memPtr)1526 static xxh_u32 XXH_read32(const void* memPtr)
1527 {
1528 xxh_u32 val;
1529 memcpy(&val, memPtr, sizeof(val));
1530 return val;
1531 }
1532
1533 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1534
1535
1536 /* *** Endianness *** */
1537 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
1538
1539 /*!
1540 * @ingroup tuning
1541 * @def XXH_CPU_LITTLE_ENDIAN
1542 * @brief Whether the target is little endian.
1543 *
1544 * Defined to 1 if the target is little endian, or 0 if it is big endian.
1545 * It can be defined externally, for example on the compiler command line.
1546 *
1547 * If it is not defined, a runtime check (which is usually constant folded)
1548 * is used instead.
1549 *
1550 * @note
1551 * This is not necessarily defined to an integer constant.
1552 *
1553 * @see XXH_isLittleEndian() for the runtime check.
1554 */
1555 #ifndef XXH_CPU_LITTLE_ENDIAN
1556 /*
1557 * Try to detect endianness automatically, to avoid the nonstandard behavior
1558 * in `XXH_isLittleEndian()`
1559 */
1560 # if defined(_WIN32) /* Windows is always little endian */ \
1561 || defined(__LITTLE_ENDIAN__) \
1562 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1563 # define XXH_CPU_LITTLE_ENDIAN 1
1564 # elif defined(__BIG_ENDIAN__) \
1565 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1566 # define XXH_CPU_LITTLE_ENDIAN 0
1567 # else
1568 /*!
1569 * @internal
1570 * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
1571 *
1572 * Most compilers will constant fold this.
1573 */
XXH_isLittleEndian(void)1574 static int XXH_isLittleEndian(void)
1575 {
1576 /*
1577 * Portable and well-defined behavior.
1578 * Don't use static: it is detrimental to performance.
1579 */
1580 const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1581 return one.c[0];
1582 }
1583 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
1584 # endif
1585 #endif
1586
1587
1588
1589
1590 /* ****************************************
1591 * Compiler-specific Functions and Macros
1592 ******************************************/
1593 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1594
1595 #ifdef __has_builtin
1596 # define XXH_HAS_BUILTIN(x) __has_builtin(x)
1597 #else
1598 # define XXH_HAS_BUILTIN(x) 0
1599 #endif
1600
1601 /*!
1602 * @internal
1603 * @def XXH_rotl32(x,r)
1604 * @brief 32-bit rotate left.
1605 *
1606 * @param x The 32-bit integer to be rotated.
1607 * @param r The number of bits to rotate.
1608 * @pre
1609 * @p r > 0 && @p r < 32
1610 * @note
1611 * @p x and @p r may be evaluated multiple times.
1612 * @return The rotated result.
1613 */
1614 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1615 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1616 # define XXH_rotl32 __builtin_rotateleft32
1617 # define XXH_rotl64 __builtin_rotateleft64
1618 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1619 #elif defined(_MSC_VER)
1620 # define XXH_rotl32(x,r) _rotl(x,r)
1621 # define XXH_rotl64(x,r) _rotl64(x,r)
1622 #else
1623 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1624 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1625 #endif
1626
1627 /*!
1628 * @internal
1629 * @fn xxh_u32 XXH_swap32(xxh_u32 x)
1630 * @brief A 32-bit byteswap.
1631 *
1632 * @param x The 32-bit integer to byteswap.
1633 * @return @p x, byteswapped.
1634 */
1635 #if defined(_MSC_VER) /* Visual Studio */
1636 # define XXH_swap32 _byteswap_ulong
1637 #elif XXH_GCC_VERSION >= 403
1638 # define XXH_swap32 __builtin_bswap32
1639 #else
XXH_swap32(xxh_u32 x)1640 static xxh_u32 XXH_swap32 (xxh_u32 x)
1641 {
1642 return ((x << 24) & 0xff000000 ) |
1643 ((x << 8) & 0x00ff0000 ) |
1644 ((x >> 8) & 0x0000ff00 ) |
1645 ((x >> 24) & 0x000000ff );
1646 }
1647 #endif
1648
1649
1650 /* ***************************
1651 * Memory reads
1652 *****************************/
1653
1654 /*!
1655 * @internal
1656 * @brief Enum to indicate whether a pointer is aligned.
1657 */
1658 typedef enum {
1659 XXH_aligned, /*!< Aligned */
1660 XXH_unaligned /*!< Possibly unaligned */
1661 } XXH_alignment;
1662
1663 /*
1664 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1665 *
1666 * This is ideal for older compilers which don't inline memcpy.
1667 */
1668 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1669
XXH_readLE32(const void * memPtr)1670 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1671 {
1672 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1673 return bytePtr[0]
1674 | ((xxh_u32)bytePtr[1] << 8)
1675 | ((xxh_u32)bytePtr[2] << 16)
1676 | ((xxh_u32)bytePtr[3] << 24);
1677 }
1678
XXH_readBE32(const void * memPtr)1679 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1680 {
1681 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1682 return bytePtr[3]
1683 | ((xxh_u32)bytePtr[2] << 8)
1684 | ((xxh_u32)bytePtr[1] << 16)
1685 | ((xxh_u32)bytePtr[0] << 24);
1686 }
1687
1688 #else
XXH_readLE32(const void * ptr)1689 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1690 {
1691 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1692 }
1693
XXH_readBE32(const void * ptr)1694 static xxh_u32 XXH_readBE32(const void* ptr)
1695 {
1696 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1697 }
1698 #endif
1699
1700 XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void * ptr,XXH_alignment align)1701 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1702 {
1703 if (align==XXH_unaligned) {
1704 return XXH_readLE32(ptr);
1705 } else {
1706 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1707 }
1708 }
1709
1710
1711 /* *************************************
1712 * Misc
1713 ***************************************/
1714 /*! @ingroup public */
XXH_versionNumber(void)1715 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1716
1717
1718 /* *******************************************************************
1719 * 32-bit hash functions
1720 *********************************************************************/
1721 /*!
1722 * @}
1723 * @defgroup xxh32_impl XXH32 implementation
1724 * @ingroup impl
1725 * @{
1726 */
1727 /* #define instead of static const, to be used as initializers */
1728 #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
1729 #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
1730 #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
1731 #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
1732 #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
1733
1734 #ifdef XXH_OLD_NAMES
1735 # define PRIME32_1 XXH_PRIME32_1
1736 # define PRIME32_2 XXH_PRIME32_2
1737 # define PRIME32_3 XXH_PRIME32_3
1738 # define PRIME32_4 XXH_PRIME32_4
1739 # define PRIME32_5 XXH_PRIME32_5
1740 #endif
1741
1742 /*!
1743 * @internal
1744 * @brief Normal stripe processing routine.
1745 *
1746 * This shuffles the bits so that any bit from @p input impacts several bits in
1747 * @p acc.
1748 *
1749 * @param acc The accumulator lane.
1750 * @param input The stripe of input to mix.
1751 * @return The mixed accumulator lane.
1752 */
XXH32_round(xxh_u32 acc,xxh_u32 input)1753 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1754 {
1755 acc += input * XXH_PRIME32_2;
1756 acc = XXH_rotl32(acc, 13);
1757 acc *= XXH_PRIME32_1;
1758 #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1759 /*
1760 * UGLY HACK:
1761 * A compiler fence is the only thing that prevents GCC and Clang from
1762 * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1763 * reason) without globally disabling SSE4.1.
1764 *
1765 * The reason we want to avoid vectorization is because despite working on
1766 * 4 integers at a time, there are multiple factors slowing XXH32 down on
1767 * SSE4:
1768 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1769 * newer chips!) making it slightly slower to multiply four integers at
1770 * once compared to four integers independently. Even when pmulld was
1771 * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1772 * just to multiply unless doing a long operation.
1773 *
1774 * - Four instructions are required to rotate,
1775 * movqda tmp, v // not required with VEX encoding
1776 * pslld tmp, 13 // tmp <<= 13
1777 * psrld v, 19 // x >>= 19
1778 * por v, tmp // x |= tmp
1779 * compared to one for scalar:
1780 * roll v, 13 // reliably fast across the board
1781 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
1782 *
1783 * - Instruction level parallelism is actually more beneficial here because
1784 * the SIMD actually serializes this operation: While v1 is rotating, v2
1785 * can load data, while v3 can multiply. SSE forces them to operate
1786 * together.
1787 *
1788 * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1789 * and it is pointless writing a NEON implementation that is basically the
1790 * same speed as scalar for XXH32.
1791 */
1792 XXH_COMPILER_GUARD(acc);
1793 #endif
1794 return acc;
1795 }
1796
1797 /*!
1798 * @internal
1799 * @brief Mixes all bits to finalize the hash.
1800 *
1801 * The final mix ensures that all input bits have a chance to impact any bit in
1802 * the output digest, resulting in an unbiased distribution.
1803 *
1804 * @param h32 The hash to avalanche.
1805 * @return The avalanched hash.
1806 */
XXH32_avalanche(xxh_u32 h32)1807 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1808 {
1809 h32 ^= h32 >> 15;
1810 h32 *= XXH_PRIME32_2;
1811 h32 ^= h32 >> 13;
1812 h32 *= XXH_PRIME32_3;
1813 h32 ^= h32 >> 16;
1814 return(h32);
1815 }
1816
1817 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1818
1819 /*!
1820 * @internal
1821 * @brief Processes the last 0-15 bytes of @p ptr.
1822 *
1823 * There may be up to 15 bytes remaining to consume from the input.
1824 * This final stage will digest them to ensure that all input bytes are present
1825 * in the final mix.
1826 *
1827 * @param h32 The hash to finalize.
1828 * @param ptr The pointer to the remaining input.
1829 * @param len The remaining length, modulo 16.
1830 * @param align Whether @p ptr is aligned.
1831 * @return The finalized hash.
1832 */
1833 static xxh_u32
XXH32_finalize(xxh_u32 h32,const xxh_u8 * ptr,size_t len,XXH_alignment align)1834 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1835 {
1836 #define XXH_PROCESS1 do { \
1837 h32 += (*ptr++) * XXH_PRIME32_5; \
1838 h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
1839 } while (0)
1840
1841 #define XXH_PROCESS4 do { \
1842 h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
1843 ptr += 4; \
1844 h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
1845 } while (0)
1846
1847 /* Compact rerolled version */
1848 if (XXH_REROLL) {
1849 len &= 15;
1850 while (len >= 4) {
1851 XXH_PROCESS4;
1852 len -= 4;
1853 }
1854 while (len > 0) {
1855 XXH_PROCESS1;
1856 --len;
1857 }
1858 return XXH32_avalanche(h32);
1859 } else {
1860 switch(len&15) /* or switch(bEnd - p) */ {
1861 case 12: XXH_PROCESS4;
1862 /* fallthrough */
1863 case 8: XXH_PROCESS4;
1864 /* fallthrough */
1865 case 4: XXH_PROCESS4;
1866 return XXH32_avalanche(h32);
1867
1868 case 13: XXH_PROCESS4;
1869 /* fallthrough */
1870 case 9: XXH_PROCESS4;
1871 /* fallthrough */
1872 case 5: XXH_PROCESS4;
1873 XXH_PROCESS1;
1874 return XXH32_avalanche(h32);
1875
1876 case 14: XXH_PROCESS4;
1877 /* fallthrough */
1878 case 10: XXH_PROCESS4;
1879 /* fallthrough */
1880 case 6: XXH_PROCESS4;
1881 XXH_PROCESS1;
1882 XXH_PROCESS1;
1883 return XXH32_avalanche(h32);
1884
1885 case 15: XXH_PROCESS4;
1886 /* fallthrough */
1887 case 11: XXH_PROCESS4;
1888 /* fallthrough */
1889 case 7: XXH_PROCESS4;
1890 /* fallthrough */
1891 case 3: XXH_PROCESS1;
1892 /* fallthrough */
1893 case 2: XXH_PROCESS1;
1894 /* fallthrough */
1895 case 1: XXH_PROCESS1;
1896 /* fallthrough */
1897 case 0: return XXH32_avalanche(h32);
1898 }
1899 XXH_ASSERT(0);
1900 return h32; /* reaching this point is deemed impossible */
1901 }
1902 }
1903
1904 #ifdef XXH_OLD_NAMES
1905 # define PROCESS1 XXH_PROCESS1
1906 # define PROCESS4 XXH_PROCESS4
1907 #else
1908 # undef XXH_PROCESS1
1909 # undef XXH_PROCESS4
1910 #endif
1911
1912 /*!
1913 * @internal
1914 * @brief The implementation for @ref XXH32().
1915 *
1916 * @param input, len, seed Directly passed from @ref XXH32().
1917 * @param align Whether @p input is aligned.
1918 * @return The calculated hash.
1919 */
1920 XXH_FORCE_INLINE xxh_u32
XXH32_endian_align(const xxh_u8 * input,size_t len,xxh_u32 seed,XXH_alignment align)1921 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
1922 {
1923 const xxh_u8* bEnd = input + len;
1924 xxh_u32 h32;
1925
1926 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1927 if (input==NULL) {
1928 len=0;
1929 bEnd=input=(const xxh_u8*)(size_t)16;
1930 }
1931 #endif
1932
1933 if (len>=16) {
1934 const xxh_u8* const limit = bEnd - 15;
1935 xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
1936 xxh_u32 v2 = seed + XXH_PRIME32_2;
1937 xxh_u32 v3 = seed + 0;
1938 xxh_u32 v4 = seed - XXH_PRIME32_1;
1939
1940 do {
1941 v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
1942 v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
1943 v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
1944 v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
1945 } while (input < limit);
1946
1947 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
1948 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
1949 } else {
1950 h32 = seed + XXH_PRIME32_5;
1951 }
1952
1953 h32 += (xxh_u32)len;
1954
1955 return XXH32_finalize(h32, input, len&15, align);
1956 }
1957
1958 /*! @ingroup xxh32_family */
XXH32(const void * input,size_t len,XXH32_hash_t seed)1959 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
1960 {
1961 #if 0
1962 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
1963 XXH32_state_t state;
1964 XXH32_reset(&state, seed);
1965 XXH32_update(&state, (const xxh_u8*)input, len);
1966 return XXH32_digest(&state);
1967 #else
1968 if (XXH_FORCE_ALIGN_CHECK) {
1969 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
1970 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
1971 } }
1972
1973 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
1974 #endif
1975 }
1976
1977
1978
1979 /******* Hash streaming *******/
1980 /*!
1981 * @ingroup xxh32_family
1982 */
XXH32_createState(void)1983 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
1984 {
1985 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
1986 }
1987 /*! @ingroup xxh32_family */
XXH32_freeState(XXH32_state_t * statePtr)1988 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
1989 {
1990 XXH_free(statePtr);
1991 return XXH_OK;
1992 }
1993
1994 /*! @ingroup xxh32_family */
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)1995 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
1996 {
1997 memcpy(dstState, srcState, sizeof(*dstState));
1998 }
1999
2000 /*! @ingroup xxh32_family */
XXH32_reset(XXH32_state_t * statePtr,XXH32_hash_t seed)2001 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
2002 {
2003 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
2004 memset(&state, 0, sizeof(state));
2005 state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2006 state.v2 = seed + XXH_PRIME32_2;
2007 state.v3 = seed + 0;
2008 state.v4 = seed - XXH_PRIME32_1;
2009 /* do not write into reserved, planned to be removed in a future version */
2010 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
2011 return XXH_OK;
2012 }
2013
2014
2015 /*! @ingroup xxh32_family */
2016 XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)2017 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2018 {
2019 if (input==NULL)
2020 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2021 return XXH_OK;
2022 #else
2023 return XXH_ERROR;
2024 #endif
2025
2026 { const xxh_u8* p = (const xxh_u8*)input;
2027 const xxh_u8* const bEnd = p + len;
2028
2029 state->total_len_32 += (XXH32_hash_t)len;
2030 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2031
2032 if (state->memsize + len < 16) { /* fill in tmp buffer */
2033 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2034 state->memsize += (XXH32_hash_t)len;
2035 return XXH_OK;
2036 }
2037
2038 if (state->memsize) { /* some data left from previous update */
2039 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2040 { const xxh_u32* p32 = state->mem32;
2041 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
2042 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
2043 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
2044 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
2045 }
2046 p += 16-state->memsize;
2047 state->memsize = 0;
2048 }
2049
2050 if (p <= bEnd-16) {
2051 const xxh_u8* const limit = bEnd - 16;
2052 xxh_u32 v1 = state->v1;
2053 xxh_u32 v2 = state->v2;
2054 xxh_u32 v3 = state->v3;
2055 xxh_u32 v4 = state->v4;
2056
2057 do {
2058 v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
2059 v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
2060 v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
2061 v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
2062 } while (p<=limit);
2063
2064 state->v1 = v1;
2065 state->v2 = v2;
2066 state->v3 = v3;
2067 state->v4 = v4;
2068 }
2069
2070 if (p < bEnd) {
2071 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2072 state->memsize = (unsigned)(bEnd-p);
2073 }
2074 }
2075
2076 return XXH_OK;
2077 }
2078
2079
2080 /*! @ingroup xxh32_family */
XXH32_digest(const XXH32_state_t * state)2081 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2082 {
2083 xxh_u32 h32;
2084
2085 if (state->large_len) {
2086 h32 = XXH_rotl32(state->v1, 1)
2087 + XXH_rotl32(state->v2, 7)
2088 + XXH_rotl32(state->v3, 12)
2089 + XXH_rotl32(state->v4, 18);
2090 } else {
2091 h32 = state->v3 /* == seed */ + XXH_PRIME32_5;
2092 }
2093
2094 h32 += state->total_len_32;
2095
2096 return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2097 }
2098
2099
2100 /******* Canonical representation *******/
2101
2102 /*!
2103 * @ingroup xxh32_family
2104 * The default return values from XXH functions are unsigned 32 and 64 bit
2105 * integers.
2106 *
2107 * The canonical representation uses big endian convention, the same convention
2108 * as human-readable numbers (large digits first).
2109 *
2110 * This way, hash values can be written into a file or buffer, remaining
2111 * comparable across different systems.
2112 *
2113 * The following functions allow transformation of hash values to and from their
2114 * canonical format.
2115 */
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)2116 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2117 {
2118 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
2119 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2120 memcpy(dst, &hash, sizeof(*dst));
2121 }
2122 /*! @ingroup xxh32_family */
XXH32_hashFromCanonical(const XXH32_canonical_t * src)2123 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2124 {
2125 return XXH_readBE32(src);
2126 }
2127
2128
2129 #ifndef XXH_NO_LONG_LONG
2130
2131 /* *******************************************************************
2132 * 64-bit hash functions
2133 *********************************************************************/
2134 /*!
2135 * @}
2136 * @ingroup impl
2137 * @{
2138 */
2139 /******* Memory access *******/
2140
2141 typedef XXH64_hash_t xxh_u64;
2142
2143 #ifdef XXH_OLD_NAMES
2144 # define U64 xxh_u64
2145 #endif
2146
2147 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2148 /*
2149 * Manual byteshift. Best for old compilers which don't inline memcpy.
2150 * We actually directly use XXH_readLE64 and XXH_readBE64.
2151 */
2152 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2153
2154 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)2155 static xxh_u64 XXH_read64(const void* memPtr)
2156 {
2157 return *(const xxh_u64*) memPtr;
2158 }
2159
2160 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2161
2162 /*
2163 * __pack instructions are safer, but compiler specific, hence potentially
2164 * problematic for some compilers.
2165 *
2166 * Currently only defined for GCC and ICC.
2167 */
2168 #ifdef XXH_OLD_NAMES
2169 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2170 #endif
XXH_read64(const void * ptr)2171 static xxh_u64 XXH_read64(const void* ptr)
2172 {
2173 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2174 return ((const xxh_unalign64*)ptr)->u64;
2175 }
2176
2177 #else
2178
2179 /*
2180 * Portable and safe solution. Generally efficient.
2181 * see: https://stackoverflow.com/a/32095106/646947
2182 */
XXH_read64(const void * memPtr)2183 static xxh_u64 XXH_read64(const void* memPtr)
2184 {
2185 xxh_u64 val;
2186 memcpy(&val, memPtr, sizeof(val));
2187 return val;
2188 }
2189
2190 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2191
2192 #if defined(_MSC_VER) /* Visual Studio */
2193 # define XXH_swap64 _byteswap_uint64
2194 #elif XXH_GCC_VERSION >= 403
2195 # define XXH_swap64 __builtin_bswap64
2196 #else
XXH_swap64(xxh_u64 x)2197 static xxh_u64 XXH_swap64(xxh_u64 x)
2198 {
2199 return ((x << 56) & 0xff00000000000000ULL) |
2200 ((x << 40) & 0x00ff000000000000ULL) |
2201 ((x << 24) & 0x0000ff0000000000ULL) |
2202 ((x << 8) & 0x000000ff00000000ULL) |
2203 ((x >> 8) & 0x00000000ff000000ULL) |
2204 ((x >> 24) & 0x0000000000ff0000ULL) |
2205 ((x >> 40) & 0x000000000000ff00ULL) |
2206 ((x >> 56) & 0x00000000000000ffULL);
2207 }
2208 #endif
2209
2210
2211 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2212 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2213
XXH_readLE64(const void * memPtr)2214 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2215 {
2216 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2217 return bytePtr[0]
2218 | ((xxh_u64)bytePtr[1] << 8)
2219 | ((xxh_u64)bytePtr[2] << 16)
2220 | ((xxh_u64)bytePtr[3] << 24)
2221 | ((xxh_u64)bytePtr[4] << 32)
2222 | ((xxh_u64)bytePtr[5] << 40)
2223 | ((xxh_u64)bytePtr[6] << 48)
2224 | ((xxh_u64)bytePtr[7] << 56);
2225 }
2226
XXH_readBE64(const void * memPtr)2227 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2228 {
2229 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2230 return bytePtr[7]
2231 | ((xxh_u64)bytePtr[6] << 8)
2232 | ((xxh_u64)bytePtr[5] << 16)
2233 | ((xxh_u64)bytePtr[4] << 24)
2234 | ((xxh_u64)bytePtr[3] << 32)
2235 | ((xxh_u64)bytePtr[2] << 40)
2236 | ((xxh_u64)bytePtr[1] << 48)
2237 | ((xxh_u64)bytePtr[0] << 56);
2238 }
2239
2240 #else
XXH_readLE64(const void * ptr)2241 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2242 {
2243 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2244 }
2245
XXH_readBE64(const void * ptr)2246 static xxh_u64 XXH_readBE64(const void* ptr)
2247 {
2248 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2249 }
2250 #endif
2251
2252 XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void * ptr,XXH_alignment align)2253 XXH_readLE64_align(const void* ptr, XXH_alignment align)
2254 {
2255 if (align==XXH_unaligned)
2256 return XXH_readLE64(ptr);
2257 else
2258 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2259 }
2260
2261
2262 /******* xxh64 *******/
2263 /*!
2264 * @}
2265 * @defgroup xxh64_impl XXH64 implementation
2266 * @ingroup impl
2267 * @{
2268 */
2269 /* #define rather that static const, to be used as initializers */
2270 #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2271 #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2272 #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2273 #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2274 #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2275
2276 #ifdef XXH_OLD_NAMES
2277 # define PRIME64_1 XXH_PRIME64_1
2278 # define PRIME64_2 XXH_PRIME64_2
2279 # define PRIME64_3 XXH_PRIME64_3
2280 # define PRIME64_4 XXH_PRIME64_4
2281 # define PRIME64_5 XXH_PRIME64_5
2282 #endif
2283
XXH64_round(xxh_u64 acc,xxh_u64 input)2284 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2285 {
2286 acc += input * XXH_PRIME64_2;
2287 acc = XXH_rotl64(acc, 31);
2288 acc *= XXH_PRIME64_1;
2289 return acc;
2290 }
2291
XXH64_mergeRound(xxh_u64 acc,xxh_u64 val)2292 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2293 {
2294 val = XXH64_round(0, val);
2295 acc ^= val;
2296 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2297 return acc;
2298 }
2299
XXH64_avalanche(xxh_u64 h64)2300 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2301 {
2302 h64 ^= h64 >> 33;
2303 h64 *= XXH_PRIME64_2;
2304 h64 ^= h64 >> 29;
2305 h64 *= XXH_PRIME64_3;
2306 h64 ^= h64 >> 32;
2307 return h64;
2308 }
2309
2310
2311 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2312
2313 static xxh_u64
XXH64_finalize(xxh_u64 h64,const xxh_u8 * ptr,size_t len,XXH_alignment align)2314 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2315 {
2316 len &= 31;
2317 while (len >= 8) {
2318 xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2319 ptr += 8;
2320 h64 ^= k1;
2321 h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2322 len -= 8;
2323 }
2324 if (len >= 4) {
2325 h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2326 ptr += 4;
2327 h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2328 len -= 4;
2329 }
2330 while (len > 0) {
2331 h64 ^= (*ptr++) * XXH_PRIME64_5;
2332 h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2333 --len;
2334 }
2335 return XXH64_avalanche(h64);
2336 }
2337
2338 #ifdef XXH_OLD_NAMES
2339 # define PROCESS1_64 XXH_PROCESS1_64
2340 # define PROCESS4_64 XXH_PROCESS4_64
2341 # define PROCESS8_64 XXH_PROCESS8_64
2342 #else
2343 # undef XXH_PROCESS1_64
2344 # undef XXH_PROCESS4_64
2345 # undef XXH_PROCESS8_64
2346 #endif
2347
2348 XXH_FORCE_INLINE xxh_u64
XXH64_endian_align(const xxh_u8 * input,size_t len,xxh_u64 seed,XXH_alignment align)2349 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2350 {
2351 const xxh_u8* bEnd = input + len;
2352 xxh_u64 h64;
2353
2354 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2355 if (input==NULL) {
2356 len=0;
2357 bEnd=input=(const xxh_u8*)(size_t)32;
2358 }
2359 #endif
2360
2361 if (len>=32) {
2362 const xxh_u8* const limit = bEnd - 32;
2363 xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2364 xxh_u64 v2 = seed + XXH_PRIME64_2;
2365 xxh_u64 v3 = seed + 0;
2366 xxh_u64 v4 = seed - XXH_PRIME64_1;
2367
2368 do {
2369 v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2370 v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2371 v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2372 v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2373 } while (input<=limit);
2374
2375 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2376 h64 = XXH64_mergeRound(h64, v1);
2377 h64 = XXH64_mergeRound(h64, v2);
2378 h64 = XXH64_mergeRound(h64, v3);
2379 h64 = XXH64_mergeRound(h64, v4);
2380
2381 } else {
2382 h64 = seed + XXH_PRIME64_5;
2383 }
2384
2385 h64 += (xxh_u64) len;
2386
2387 return XXH64_finalize(h64, input, len, align);
2388 }
2389
2390
2391 /*! @ingroup xxh64_family */
XXH64(const void * input,size_t len,XXH64_hash_t seed)2392 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
2393 {
2394 #if 0
2395 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2396 XXH64_state_t state;
2397 XXH64_reset(&state, seed);
2398 XXH64_update(&state, (const xxh_u8*)input, len);
2399 return XXH64_digest(&state);
2400 #else
2401 if (XXH_FORCE_ALIGN_CHECK) {
2402 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
2403 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2404 } }
2405
2406 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2407
2408 #endif
2409 }
2410
2411 /******* Hash Streaming *******/
2412
2413 /*! @ingroup xxh64_family*/
XXH64_createState(void)2414 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2415 {
2416 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2417 }
2418 /*! @ingroup xxh64_family */
XXH64_freeState(XXH64_state_t * statePtr)2419 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2420 {
2421 XXH_free(statePtr);
2422 return XXH_OK;
2423 }
2424
2425 /*! @ingroup xxh64_family */
XXH64_copyState(XXH64_state_t * dstState,const XXH64_state_t * srcState)2426 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2427 {
2428 memcpy(dstState, srcState, sizeof(*dstState));
2429 }
2430
2431 /*! @ingroup xxh64_family */
XXH64_reset(XXH64_state_t * statePtr,XXH64_hash_t seed)2432 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
2433 {
2434 XXH64_state_t state; /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
2435 memset(&state, 0, sizeof(state));
2436 state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2437 state.v2 = seed + XXH_PRIME64_2;
2438 state.v3 = seed + 0;
2439 state.v4 = seed - XXH_PRIME64_1;
2440 /* do not write into reserved64, might be removed in a future version */
2441 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
2442 return XXH_OK;
2443 }
2444
2445 /*! @ingroup xxh64_family */
2446 XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH64_state_t * state,const void * input,size_t len)2447 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2448 {
2449 if (input==NULL)
2450 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2451 return XXH_OK;
2452 #else
2453 return XXH_ERROR;
2454 #endif
2455
2456 { const xxh_u8* p = (const xxh_u8*)input;
2457 const xxh_u8* const bEnd = p + len;
2458
2459 state->total_len += len;
2460
2461 if (state->memsize + len < 32) { /* fill in tmp buffer */
2462 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2463 state->memsize += (xxh_u32)len;
2464 return XXH_OK;
2465 }
2466
2467 if (state->memsize) { /* tmp buffer is full */
2468 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2469 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
2470 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
2471 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
2472 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
2473 p += 32 - state->memsize;
2474 state->memsize = 0;
2475 }
2476
2477 if (p+32 <= bEnd) {
2478 const xxh_u8* const limit = bEnd - 32;
2479 xxh_u64 v1 = state->v1;
2480 xxh_u64 v2 = state->v2;
2481 xxh_u64 v3 = state->v3;
2482 xxh_u64 v4 = state->v4;
2483
2484 do {
2485 v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
2486 v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
2487 v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
2488 v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
2489 } while (p<=limit);
2490
2491 state->v1 = v1;
2492 state->v2 = v2;
2493 state->v3 = v3;
2494 state->v4 = v4;
2495 }
2496
2497 if (p < bEnd) {
2498 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2499 state->memsize = (unsigned)(bEnd-p);
2500 }
2501 }
2502
2503 return XXH_OK;
2504 }
2505
2506
2507 /*! @ingroup xxh64_family */
XXH64_digest(const XXH64_state_t * state)2508 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
2509 {
2510 xxh_u64 h64;
2511
2512 if (state->total_len >= 32) {
2513 xxh_u64 const v1 = state->v1;
2514 xxh_u64 const v2 = state->v2;
2515 xxh_u64 const v3 = state->v3;
2516 xxh_u64 const v4 = state->v4;
2517
2518 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2519 h64 = XXH64_mergeRound(h64, v1);
2520 h64 = XXH64_mergeRound(h64, v2);
2521 h64 = XXH64_mergeRound(h64, v3);
2522 h64 = XXH64_mergeRound(h64, v4);
2523 } else {
2524 h64 = state->v3 /*seed*/ + XXH_PRIME64_5;
2525 }
2526
2527 h64 += (xxh_u64) state->total_len;
2528
2529 return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2530 }
2531
2532
2533 /******* Canonical representation *******/
2534
2535 /*! @ingroup xxh64_family */
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)2536 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
2537 {
2538 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
2539 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2540 memcpy(dst, &hash, sizeof(*dst));
2541 }
2542
2543 /*! @ingroup xxh64_family */
XXH64_hashFromCanonical(const XXH64_canonical_t * src)2544 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
2545 {
2546 return XXH_readBE64(src);
2547 }
2548
2549 #ifndef XXH_NO_XXH3
2550
2551 /* *********************************************************************
2552 * XXH3
2553 * New generation hash designed for speed on small keys and vectorization
2554 ************************************************************************ */
2555 /*!
2556 * @}
2557 * @defgroup xxh3_impl XXH3 implementation
2558 * @ingroup impl
2559 * @{
2560 */
2561
2562 /* === Compiler specifics === */
2563
2564 #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2565 # define XXH_RESTRICT /* disable */
2566 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
2567 # define XXH_RESTRICT restrict
2568 #else
2569 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2570 # define XXH_RESTRICT /* disable */
2571 #endif
2572
2573 #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
2574 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2575 || defined(__clang__)
2576 # define XXH_likely(x) __builtin_expect(x, 1)
2577 # define XXH_unlikely(x) __builtin_expect(x, 0)
2578 #else
2579 # define XXH_likely(x) (x)
2580 # define XXH_unlikely(x) (x)
2581 #endif
2582
2583 #if defined(__GNUC__)
2584 # if defined(__AVX2__)
2585 # include <immintrin.h>
2586 # elif defined(__SSE2__)
2587 # include <emmintrin.h>
2588 # elif defined(__ARM_NEON__) || defined(__ARM_NEON)
2589 # define inline __inline__ /* circumvent a clang bug */
2590 # include <arm_neon.h>
2591 # undef inline
2592 # endif
2593 #elif defined(_MSC_VER)
2594 # include <intrin.h>
2595 #endif
2596
2597 /*
2598 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2599 * remaining a true 64-bit/128-bit hash function.
2600 *
2601 * This is done by prioritizing a subset of 64-bit operations that can be
2602 * emulated without too many steps on the average 32-bit machine.
2603 *
2604 * For example, these two lines seem similar, and run equally fast on 64-bit:
2605 *
2606 * xxh_u64 x;
2607 * x ^= (x >> 47); // good
2608 * x ^= (x >> 13); // bad
2609 *
2610 * However, to a 32-bit machine, there is a major difference.
2611 *
2612 * x ^= (x >> 47) looks like this:
2613 *
2614 * x.lo ^= (x.hi >> (47 - 32));
2615 *
2616 * while x ^= (x >> 13) looks like this:
2617 *
2618 * // note: funnel shifts are not usually cheap.
2619 * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2620 * x.hi ^= (x.hi >> 13);
2621 *
2622 * The first one is significantly faster than the second, simply because the
2623 * shift is larger than 32. This means:
2624 * - All the bits we need are in the upper 32 bits, so we can ignore the lower
2625 * 32 bits in the shift.
2626 * - The shift result will always fit in the lower 32 bits, and therefore,
2627 * we can ignore the upper 32 bits in the xor.
2628 *
2629 * Thanks to this optimization, XXH3 only requires these features to be efficient:
2630 *
2631 * - Usable unaligned access
2632 * - A 32-bit or 64-bit ALU
2633 * - If 32-bit, a decent ADC instruction
2634 * - A 32 or 64-bit multiply with a 64-bit result
2635 * - For the 128-bit variant, a decent byteswap helps short inputs.
2636 *
2637 * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2638 * platforms which can run XXH32 can run XXH3 efficiently.
2639 *
2640 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2641 * notable exception.
2642 *
2643 * First of all, Thumb-1 lacks support for the UMULL instruction which
2644 * performs the important long multiply. This means numerous __aeabi_lmul
2645 * calls.
2646 *
2647 * Second of all, the 8 functional registers are just not enough.
2648 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2649 * Lo registers, and this shuffling results in thousands more MOVs than A32.
2650 *
2651 * A32 and T32 don't have this limitation. They can access all 14 registers,
2652 * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2653 * shifts is helpful, too.
2654 *
2655 * Therefore, we do a quick sanity check.
2656 *
2657 * If compiling Thumb-1 for a target which supports ARM instructions, we will
2658 * emit a warning, as it is not a "sane" platform to compile for.
2659 *
2660 * Usually, if this happens, it is because of an accident and you probably need
2661 * to specify -march, as you likely meant to compile for a newer architecture.
2662 *
2663 * Credit: large sections of the vectorial and asm source code paths
2664 * have been contributed by @easyaspi314
2665 */
2666 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2667 # warning "XXH3 is highly inefficient without ARM or Thumb-2."
2668 #endif
2669
2670 /* ==========================================
2671 * Vectorization detection
2672 * ========================================== */
2673
2674 #ifdef XXH_DOXYGEN
2675 /*!
2676 * @ingroup tuning
2677 * @brief Overrides the vectorization implementation chosen for XXH3.
2678 *
2679 * Can be defined to 0 to disable SIMD or any of the values mentioned in
2680 * @ref XXH_VECTOR_TYPE.
2681 *
2682 * If this is not defined, it uses predefined macros to determine the best
2683 * implementation.
2684 */
2685 # define XXH_VECTOR XXH_SCALAR
2686 /*!
2687 * @ingroup tuning
2688 * @brief Possible values for @ref XXH_VECTOR.
2689 *
2690 * Note that these are actually implemented as macros.
2691 *
2692 * If this is not defined, it is detected automatically.
2693 * @ref XXH_X86DISPATCH overrides this.
2694 */
2695 enum XXH_VECTOR_TYPE /* fake enum */ {
2696 XXH_SCALAR = 0, /*!< Portable scalar version */
2697 XXH_SSE2 = 1, /*!<
2698 * SSE2 for Pentium 4, Opteron, all x86_64.
2699 *
2700 * @note SSE2 is also guaranteed on Windows 10, macOS, and
2701 * Android x86.
2702 */
2703 XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
2704 XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
2705 XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
2706 XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
2707 };
2708 /*!
2709 * @ingroup tuning
2710 * @brief Selects the minimum alignment for XXH3's accumulators.
2711 *
2712 * When using SIMD, this should match the alignment reqired for said vector
2713 * type, so, for example, 32 for AVX2.
2714 *
2715 * Default: Auto detected.
2716 */
2717 # define XXH_ACC_ALIGN 8
2718 #endif
2719
2720 /* Actual definition */
2721 #ifndef XXH_DOXYGEN
2722 # define XXH_SCALAR 0
2723 # define XXH_SSE2 1
2724 # define XXH_AVX2 2
2725 # define XXH_AVX512 3
2726 # define XXH_NEON 4
2727 # define XXH_VSX 5
2728 #endif
2729
2730 #ifndef XXH_VECTOR /* can be defined on command line */
2731 # if defined(__AVX512F__)
2732 # define XXH_VECTOR XXH_AVX512
2733 # elif defined(__AVX2__)
2734 # define XXH_VECTOR XXH_AVX2
2735 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2736 # define XXH_VECTOR XXH_SSE2
2737 # elif defined(__GNUC__) /* msvc support maybe later */ \
2738 && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \
2739 && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \
2740 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
2741 # define XXH_VECTOR XXH_NEON
2742 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2743 || (defined(__s390x__) && defined(__VEC__)) \
2744 && defined(__GNUC__) /* TODO: IBM XL */
2745 # define XXH_VECTOR XXH_VSX
2746 # else
2747 # define XXH_VECTOR XXH_SCALAR
2748 # endif
2749 #endif
2750
2751 /*
2752 * Controls the alignment of the accumulator,
2753 * for compatibility with aligned vector loads, which are usually faster.
2754 */
2755 #ifndef XXH_ACC_ALIGN
2756 # if defined(XXH_X86DISPATCH)
2757 # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
2758 # elif XXH_VECTOR == XXH_SCALAR /* scalar */
2759 # define XXH_ACC_ALIGN 8
2760 # elif XXH_VECTOR == XXH_SSE2 /* sse2 */
2761 # define XXH_ACC_ALIGN 16
2762 # elif XXH_VECTOR == XXH_AVX2 /* avx2 */
2763 # define XXH_ACC_ALIGN 32
2764 # elif XXH_VECTOR == XXH_NEON /* neon */
2765 # define XXH_ACC_ALIGN 16
2766 # elif XXH_VECTOR == XXH_VSX /* vsx */
2767 # define XXH_ACC_ALIGN 16
2768 # elif XXH_VECTOR == XXH_AVX512 /* avx512 */
2769 # define XXH_ACC_ALIGN 64
2770 # endif
2771 #endif
2772
2773 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2774 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2775 # define XXH_SEC_ALIGN XXH_ACC_ALIGN
2776 #else
2777 # define XXH_SEC_ALIGN 8
2778 #endif
2779
2780 /*
2781 * UGLY HACK:
2782 * GCC usually generates the best code with -O3 for xxHash.
2783 *
2784 * However, when targeting AVX2, it is overzealous in its unrolling resulting
2785 * in code roughly 3/4 the speed of Clang.
2786 *
2787 * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2788 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2789 * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2790 *
2791 * That is why when compiling the AVX2 version, it is recommended to use either
2792 * -O2 -mavx2 -march=haswell
2793 * or
2794 * -O2 -mavx2 -mno-avx256-split-unaligned-load
2795 * for decent performance, or to use Clang instead.
2796 *
2797 * Fortunately, we can control the first one with a pragma that forces GCC into
2798 * -O2, but the other one we can't control without "failed to inline always
2799 * inline function due to target mismatch" warnings.
2800 */
2801 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2802 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2803 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2804 # pragma GCC push_options
2805 # pragma GCC optimize("-O2")
2806 #endif
2807
2808
2809 #if XXH_VECTOR == XXH_NEON
2810 /*
2811 * NEON's setup for vmlal_u32 is a little more complicated than it is on
2812 * SSE2, AVX2, and VSX.
2813 *
2814 * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2815 *
2816 * To do the same operation, the 128-bit 'Q' register needs to be split into
2817 * two 64-bit 'D' registers, performing this operation::
2818 *
2819 * [ a | b ]
2820 * | '---------. .--------' |
2821 * | x |
2822 * | .---------' '--------. |
2823 * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
2824 *
2825 * Due to significant changes in aarch64, the fastest method for aarch64 is
2826 * completely different than the fastest method for ARMv7-A.
2827 *
2828 * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2829 * D11 will modify the high half of Q5. This is similar to how modifying AH
2830 * will only affect bits 8-15 of AX on x86.
2831 *
2832 * VZIP takes two registers, and puts even lanes in one register and odd lanes
2833 * in the other.
2834 *
2835 * On ARMv7-A, this strangely modifies both parameters in place instead of
2836 * taking the usual 3-operand form.
2837 *
2838 * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
2839 * lower and upper halves of the Q register to end up with the high and low
2840 * halves where we want - all in one instruction.
2841 *
2842 * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
2843 *
2844 * Unfortunately we need inline assembly for this: Instructions modifying two
2845 * registers at once is not possible in GCC or Clang's IR, and they have to
2846 * create a copy.
2847 *
2848 * aarch64 requires a different approach.
2849 *
2850 * In order to make it easier to write a decent compiler for aarch64, many
2851 * quirks were removed, such as conditional execution.
2852 *
2853 * NEON was also affected by this.
2854 *
2855 * aarch64 cannot access the high bits of a Q-form register, and writes to a
2856 * D-form register zero the high bits, similar to how writes to W-form scalar
2857 * registers (or DWORD registers on x86_64) work.
2858 *
2859 * The formerly free vget_high intrinsics now require a vext (with a few
2860 * exceptions)
2861 *
2862 * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
2863 * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
2864 * operand.
2865 *
2866 * The equivalent of the VZIP.32 on the lower and upper halves would be this
2867 * mess:
2868 *
2869 * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
2870 * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
2871 * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
2872 *
2873 * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
2874 *
2875 * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
2876 * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
2877 *
2878 * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
2879 */
2880
2881 /*!
2882 * Function-like macro:
2883 * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
2884 * {
2885 * outLo = (uint32x2_t)(in & 0xFFFFFFFF);
2886 * outHi = (uint32x2_t)(in >> 32);
2887 * in = UNDEFINED;
2888 * }
2889 */
2890 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
2891 && defined(__GNUC__) \
2892 && !defined(__aarch64__) && !defined(__arm64__)
2893 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
2894 do { \
2895 /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
2896 /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
2897 /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
2898 __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
2899 (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
2900 (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
2901 } while (0)
2902 # else
2903 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
2904 do { \
2905 (outLo) = vmovn_u64 (in); \
2906 (outHi) = vshrn_n_u64 ((in), 32); \
2907 } while (0)
2908 # endif
2909 #endif /* XXH_VECTOR == XXH_NEON */
2910
2911 /*
2912 * VSX and Z Vector helpers.
2913 *
2914 * This is very messy, and any pull requests to clean this up are welcome.
2915 *
2916 * There are a lot of problems with supporting VSX and s390x, due to
2917 * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
2918 */
2919 #if XXH_VECTOR == XXH_VSX
2920 # if defined(__s390x__)
2921 # include <s390intrin.h>
2922 # else
2923 /* gcc's altivec.h can have the unwanted consequence to unconditionally
2924 * #define bool, vector, and pixel keywords,
2925 * with bad consequences for programs already using these keywords for other purposes.
2926 * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
2927 * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
2928 * but it seems that, in some cases, it isn't.
2929 * Force the build macro to be defined, so that keywords are not altered.
2930 */
2931 # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
2932 # define __APPLE_ALTIVEC__
2933 # endif
2934 # include <altivec.h>
2935 # endif
2936
2937 typedef __vector unsigned long long xxh_u64x2;
2938 typedef __vector unsigned char xxh_u8x16;
2939 typedef __vector unsigned xxh_u32x4;
2940
2941 # ifndef XXH_VSX_BE
2942 # if defined(__BIG_ENDIAN__) \
2943 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2944 # define XXH_VSX_BE 1
2945 # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
2946 # warning "-maltivec=be is not recommended. Please use native endianness."
2947 # define XXH_VSX_BE 1
2948 # else
2949 # define XXH_VSX_BE 0
2950 # endif
2951 # endif /* !defined(XXH_VSX_BE) */
2952
2953 # if XXH_VSX_BE
2954 # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
2955 # define XXH_vec_revb vec_revb
2956 # else
2957 /*!
2958 * A polyfill for POWER9's vec_revb().
2959 */
XXH_vec_revb(xxh_u64x2 val)2960 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
2961 {
2962 xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
2963 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
2964 return vec_perm(val, val, vByteSwap);
2965 }
2966 # endif
2967 # endif /* XXH_VSX_BE */
2968
2969 /*!
2970 * Performs an unaligned vector load and byte swaps it on big endian.
2971 */
XXH_vec_loadu(const void * ptr)2972 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
2973 {
2974 xxh_u64x2 ret;
2975 memcpy(&ret, ptr, sizeof(xxh_u64x2));
2976 # if XXH_VSX_BE
2977 ret = XXH_vec_revb(ret);
2978 # endif
2979 return ret;
2980 }
2981
2982 /*
2983 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
2984 *
2985 * These intrinsics weren't added until GCC 8, despite existing for a while,
2986 * and they are endian dependent. Also, their meaning swap depending on version.
2987 * */
2988 # if defined(__s390x__)
2989 /* s390x is always big endian, no issue on this platform */
2990 # define XXH_vec_mulo vec_mulo
2991 # define XXH_vec_mule vec_mule
2992 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
2993 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
2994 # define XXH_vec_mulo __builtin_altivec_vmulouw
2995 # define XXH_vec_mule __builtin_altivec_vmuleuw
2996 # else
2997 /* gcc needs inline assembly */
2998 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_vec_mulo(xxh_u32x4 a,xxh_u32x4 b)2999 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3000 {
3001 xxh_u64x2 result;
3002 __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3003 return result;
3004 }
XXH_vec_mule(xxh_u32x4 a,xxh_u32x4 b)3005 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3006 {
3007 xxh_u64x2 result;
3008 __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3009 return result;
3010 }
3011 # endif /* XXH_vec_mulo, XXH_vec_mule */
3012 #endif /* XXH_VECTOR == XXH_VSX */
3013
3014
3015 /* prefetch
3016 * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3017 #if defined(XXH_NO_PREFETCH)
3018 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3019 #else
3020 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
3021 # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3022 # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3023 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3024 # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3025 # else
3026 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3027 # endif
3028 #endif /* XXH_NO_PREFETCH */
3029
3030
3031 /* ==========================================
3032 * XXH3 default settings
3033 * ========================================== */
3034
3035 #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
3036
3037 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3038 # error "default keyset is not large enough"
3039 #endif
3040
3041 /*! Pseudorandom secret taken directly from FARSH. */
3042 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3043 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3044 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3045 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3046 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3047 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3048 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3049 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3050 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3051 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3052 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3053 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3054 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3055 };
3056
3057
3058 #ifdef XXH_OLD_NAMES
3059 # define kSecret XXH3_kSecret
3060 #endif
3061
3062 #ifdef XXH_DOXYGEN
3063 /*!
3064 * @brief Calculates a 32-bit to 64-bit long multiply.
3065 *
3066 * Implemented as a macro.
3067 *
3068 * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3069 * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3070 * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3071 * use that instead of the normal method.
3072 *
3073 * If you are compiling for platforms like Thumb-1 and don't have a better option,
3074 * you may also want to write your own long multiply routine here.
3075 *
3076 * @param x, y Numbers to be multiplied
3077 * @return 64-bit product of the low 32 bits of @p x and @p y.
3078 */
3079 XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x,xxh_u64 y)3080 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3081 {
3082 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3083 }
3084 #elif defined(_MSC_VER) && defined(_M_IX86)
3085 # include <intrin.h>
3086 # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3087 #else
3088 /*
3089 * Downcast + upcast is usually better than masking on older compilers like
3090 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3091 *
3092 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3093 * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3094 */
3095 # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3096 #endif
3097
3098 /*!
3099 * @brief Calculates a 64->128-bit long multiply.
3100 *
3101 * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3102 * version.
3103 *
3104 * @param lhs, rhs The 64-bit integers to be multiplied
3105 * @return The 128-bit result represented in an @ref XXH128_hash_t.
3106 */
3107 static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs,xxh_u64 rhs)3108 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3109 {
3110 /*
3111 * GCC/Clang __uint128_t method.
3112 *
3113 * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3114 * This is usually the best way as it usually uses a native long 64-bit
3115 * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3116 *
3117 * Usually.
3118 *
3119 * Despite being a 32-bit platform, Clang (and emscripten) define this type
3120 * despite not having the arithmetic for it. This results in a laggy
3121 * compiler builtin call which calculates a full 128-bit multiply.
3122 * In that case it is best to use the portable one.
3123 * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3124 */
3125 #if defined(__GNUC__) && !defined(__wasm__) \
3126 && defined(__SIZEOF_INT128__) \
3127 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3128
3129 __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3130 XXH128_hash_t r128;
3131 r128.low64 = (xxh_u64)(product);
3132 r128.high64 = (xxh_u64)(product >> 64);
3133 return r128;
3134
3135 /*
3136 * MSVC for x64's _umul128 method.
3137 *
3138 * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3139 *
3140 * This compiles to single operand MUL on x64.
3141 */
3142 #elif defined(_M_X64) || defined(_M_IA64)
3143
3144 #ifndef _MSC_VER
3145 # pragma intrinsic(_umul128)
3146 #endif
3147 xxh_u64 product_high;
3148 xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3149 XXH128_hash_t r128;
3150 r128.low64 = product_low;
3151 r128.high64 = product_high;
3152 return r128;
3153
3154 #else
3155 /*
3156 * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3157 *
3158 * This is a fast and simple grade school multiply, which is shown below
3159 * with base 10 arithmetic instead of base 0x100000000.
3160 *
3161 * 9 3 // D2 lhs = 93
3162 * x 7 5 // D2 rhs = 75
3163 * ----------
3164 * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3165 * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3166 * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3167 * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3168 * ---------
3169 * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3170 * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3171 * ---------
3172 * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3173 *
3174 * The reasons for adding the products like this are:
3175 * 1. It avoids manual carry tracking. Just like how
3176 * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3177 * This avoids a lot of complexity.
3178 *
3179 * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
3180 * instruction available in ARM's Digital Signal Processing extension
3181 * in 32-bit ARMv6 and later, which is shown below:
3182 *
3183 * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3184 * {
3185 * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3186 * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3187 * *RdHi = (xxh_u32)(product >> 32);
3188 * }
3189 *
3190 * This instruction was designed for efficient long multiplication, and
3191 * allows this to be calculated in only 4 instructions at speeds
3192 * comparable to some 64-bit ALUs.
3193 *
3194 * 3. It isn't terrible on other platforms. Usually this will be a couple
3195 * of 32-bit ADD/ADCs.
3196 */
3197
3198 /* First calculate all of the cross products. */
3199 xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3200 xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
3201 xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3202 xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
3203
3204 /* Now add the products together. These will never overflow. */
3205 xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3206 xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
3207 xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3208
3209 XXH128_hash_t r128;
3210 r128.low64 = lower;
3211 r128.high64 = upper;
3212 return r128;
3213 #endif
3214 }
3215
3216 /*!
3217 * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3218 *
3219 * The reason for the separate function is to prevent passing too many structs
3220 * around by value. This will hopefully inline the multiply, but we don't force it.
3221 *
3222 * @param lhs, rhs The 64-bit integers to multiply
3223 * @return The low 64 bits of the product XOR'd by the high 64 bits.
3224 * @see XXH_mult64to128()
3225 */
3226 static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs,xxh_u64 rhs)3227 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3228 {
3229 XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3230 return product.low64 ^ product.high64;
3231 }
3232
3233 /*! Seems to produce slightly better code on GCC for some reason. */
XXH_xorshift64(xxh_u64 v64,int shift)3234 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3235 {
3236 XXH_ASSERT(0 <= shift && shift < 64);
3237 return v64 ^ (v64 >> shift);
3238 }
3239
3240 /*
3241 * This is a fast avalanche stage,
3242 * suitable when input bits are already partially mixed
3243 */
XXH3_avalanche(xxh_u64 h64)3244 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3245 {
3246 h64 = XXH_xorshift64(h64, 37);
3247 h64 *= 0x165667919E3779F9ULL;
3248 h64 = XXH_xorshift64(h64, 32);
3249 return h64;
3250 }
3251
3252 /*
3253 * This is a stronger avalanche,
3254 * inspired by Pelle Evensen's rrmxmx
3255 * preferable when input has not been previously mixed
3256 */
XXH3_rrmxmx(xxh_u64 h64,xxh_u64 len)3257 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3258 {
3259 /* this mix is inspired by Pelle Evensen's rrmxmx */
3260 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3261 h64 *= 0x9FB21C651E98DF25ULL;
3262 h64 ^= (h64 >> 35) + len ;
3263 h64 *= 0x9FB21C651E98DF25ULL;
3264 return XXH_xorshift64(h64, 28);
3265 }
3266
3267
3268 /* ==========================================
3269 * Short keys
3270 * ==========================================
3271 * One of the shortcomings of XXH32 and XXH64 was that their performance was
3272 * sub-optimal on short lengths. It used an iterative algorithm which strongly
3273 * favored lengths that were a multiple of 4 or 8.
3274 *
3275 * Instead of iterating over individual inputs, we use a set of single shot
3276 * functions which piece together a range of lengths and operate in constant time.
3277 *
3278 * Additionally, the number of multiplies has been significantly reduced. This
3279 * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3280 *
3281 * Depending on the platform, this may or may not be faster than XXH32, but it
3282 * is almost guaranteed to be faster than XXH64.
3283 */
3284
3285 /*
3286 * At very short lengths, there isn't enough input to fully hide secrets, or use
3287 * the entire secret.
3288 *
3289 * There is also only a limited amount of mixing we can do before significantly
3290 * impacting performance.
3291 *
3292 * Therefore, we use different sections of the secret and always mix two secret
3293 * samples with an XOR. This should have no effect on performance on the
3294 * seedless or withSeed variants because everything _should_ be constant folded
3295 * by modern compilers.
3296 *
3297 * The XOR mixing hides individual parts of the secret and increases entropy.
3298 *
3299 * This adds an extra layer of strength for custom secrets.
3300 */
3301 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3302 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3303 {
3304 XXH_ASSERT(input != NULL);
3305 XXH_ASSERT(1 <= len && len <= 3);
3306 XXH_ASSERT(secret != NULL);
3307 /*
3308 * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3309 * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3310 * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3311 */
3312 { xxh_u8 const c1 = input[0];
3313 xxh_u8 const c2 = input[len >> 1];
3314 xxh_u8 const c3 = input[len - 1];
3315 xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
3316 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
3317 xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3318 xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3319 return XXH64_avalanche(keyed);
3320 }
3321 }
3322
3323 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3324 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3325 {
3326 XXH_ASSERT(input != NULL);
3327 XXH_ASSERT(secret != NULL);
3328 XXH_ASSERT(4 <= len && len <= 8);
3329 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3330 { xxh_u32 const input1 = XXH_readLE32(input);
3331 xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3332 xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3333 xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3334 xxh_u64 const keyed = input64 ^ bitflip;
3335 return XXH3_rrmxmx(keyed, len);
3336 }
3337 }
3338
3339 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3340 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3341 {
3342 XXH_ASSERT(input != NULL);
3343 XXH_ASSERT(secret != NULL);
3344 XXH_ASSERT(8 <= len && len <= 16);
3345 { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3346 xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3347 xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
3348 xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3349 xxh_u64 const acc = len
3350 + XXH_swap64(input_lo) + input_hi
3351 + XXH3_mul128_fold64(input_lo, input_hi);
3352 return XXH3_avalanche(acc);
3353 }
3354 }
3355
3356 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3357 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3358 {
3359 XXH_ASSERT(len <= 16);
3360 { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3361 if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3362 if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3363 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3364 }
3365 }
3366
3367 /*
3368 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3369 * multiplication by zero, affecting hashes of lengths 17 to 240.
3370 *
3371 * However, they are very unlikely.
3372 *
3373 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3374 * unseeded non-cryptographic hashes, it does not attempt to defend itself
3375 * against specially crafted inputs, only random inputs.
3376 *
3377 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3378 * cancelling out the secret is taken an arbitrary number of times (addressed
3379 * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3380 * and/or proper seeding:
3381 *
3382 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3383 * function that is only called up to 16 times per hash with up to 240 bytes of
3384 * input.
3385 *
3386 * This is not too bad for a non-cryptographic hash function, especially with
3387 * only 64 bit outputs.
3388 *
3389 * The 128-bit variant (which trades some speed for strength) is NOT affected
3390 * by this, although it is always a good idea to use a proper seed if you care
3391 * about strength.
3392 */
XXH3_mix16B(const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 seed64)3393 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3394 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3395 {
3396 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3397 && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
3398 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
3399 /*
3400 * UGLY HACK:
3401 * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3402 * slower code.
3403 *
3404 * By forcing seed64 into a register, we disrupt the cost model and
3405 * cause it to scalarize. See `XXH32_round()`
3406 *
3407 * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3408 * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3409 * GCC 9.2, despite both emitting scalar code.
3410 *
3411 * GCC generates much better scalar code than Clang for the rest of XXH3,
3412 * which is why finding a more optimal codepath is an interest.
3413 */
3414 XXH_COMPILER_GUARD(seed64);
3415 #endif
3416 { xxh_u64 const input_lo = XXH_readLE64(input);
3417 xxh_u64 const input_hi = XXH_readLE64(input+8);
3418 return XXH3_mul128_fold64(
3419 input_lo ^ (XXH_readLE64(secret) + seed64),
3420 input_hi ^ (XXH_readLE64(secret+8) - seed64)
3421 );
3422 }
3423 }
3424
3425 /* For mid range keys, XXH3 uses a Mum-hash variant. */
3426 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3427 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3428 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3429 XXH64_hash_t seed)
3430 {
3431 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3432 XXH_ASSERT(16 < len && len <= 128);
3433
3434 { xxh_u64 acc = len * XXH_PRIME64_1;
3435 if (len > 32) {
3436 if (len > 64) {
3437 if (len > 96) {
3438 acc += XXH3_mix16B(input+48, secret+96, seed);
3439 acc += XXH3_mix16B(input+len-64, secret+112, seed);
3440 }
3441 acc += XXH3_mix16B(input+32, secret+64, seed);
3442 acc += XXH3_mix16B(input+len-48, secret+80, seed);
3443 }
3444 acc += XXH3_mix16B(input+16, secret+32, seed);
3445 acc += XXH3_mix16B(input+len-32, secret+48, seed);
3446 }
3447 acc += XXH3_mix16B(input+0, secret+0, seed);
3448 acc += XXH3_mix16B(input+len-16, secret+16, seed);
3449
3450 return XXH3_avalanche(acc);
3451 }
3452 }
3453
3454 #define XXH3_MIDSIZE_MAX 240
3455
3456 XXH_NO_INLINE XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3457 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3458 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3459 XXH64_hash_t seed)
3460 {
3461 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3462 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3463
3464 #define XXH3_MIDSIZE_STARTOFFSET 3
3465 #define XXH3_MIDSIZE_LASTOFFSET 17
3466
3467 { xxh_u64 acc = len * XXH_PRIME64_1;
3468 int const nbRounds = (int)len / 16;
3469 int i;
3470 for (i=0; i<8; i++) {
3471 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3472 }
3473 acc = XXH3_avalanche(acc);
3474 XXH_ASSERT(nbRounds >= 8);
3475 #if defined(__clang__) /* Clang */ \
3476 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3477 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
3478 /*
3479 * UGLY HACK:
3480 * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3481 * In everywhere else, it uses scalar code.
3482 *
3483 * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3484 * would still be slower than UMAAL (see XXH_mult64to128).
3485 *
3486 * Unfortunately, Clang doesn't handle the long multiplies properly and
3487 * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3488 * scalarized into an ugly mess of VMOV.32 instructions.
3489 *
3490 * This mess is difficult to avoid without turning autovectorization
3491 * off completely, but they are usually relatively minor and/or not
3492 * worth it to fix.
3493 *
3494 * This loop is the easiest to fix, as unlike XXH32, this pragma
3495 * _actually works_ because it is a loop vectorization instead of an
3496 * SLP vectorization.
3497 */
3498 #pragma clang loop vectorize(disable)
3499 #endif
3500 for (i=8 ; i < nbRounds; i++) {
3501 acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3502 }
3503 /* last bytes */
3504 acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3505 return XXH3_avalanche(acc);
3506 }
3507 }
3508
3509
3510 /* ======= Long Keys ======= */
3511
3512 #define XXH_STRIPE_LEN 64
3513 #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
3514 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3515
3516 #ifdef XXH_OLD_NAMES
3517 # define STRIPE_LEN XXH_STRIPE_LEN
3518 # define ACC_NB XXH_ACC_NB
3519 #endif
3520
XXH_writeLE64(void * dst,xxh_u64 v64)3521 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3522 {
3523 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3524 memcpy(dst, &v64, sizeof(v64));
3525 }
3526
3527 /* Several intrinsic functions below are supposed to accept __int64 as argument,
3528 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3529 * However, several environments do not define __int64 type,
3530 * requiring a workaround.
3531 */
3532 #if !defined (__VMS) \
3533 && (defined (__cplusplus) \
3534 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3535 typedef int64_t xxh_i64;
3536 #else
3537 /* the following type must have a width of 64-bit */
3538 typedef long long xxh_i64;
3539 #endif
3540
3541 /*
3542 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3543 *
3544 * It is a hardened version of UMAC, based off of FARSH's implementation.
3545 *
3546 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3547 * implementations, and it is ridiculously fast.
3548 *
3549 * We harden it by mixing the original input to the accumulators as well as the product.
3550 *
3551 * This means that in the (relatively likely) case of a multiply by zero, the
3552 * original input is preserved.
3553 *
3554 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3555 * cross-pollination, as otherwise the upper and lower halves would be
3556 * essentially independent.
3557 *
3558 * This doesn't matter on 64-bit hashes since they all get merged together in
3559 * the end, so we skip the extra step.
3560 *
3561 * Both XXH3_64bits and XXH3_128bits use this subroutine.
3562 */
3563
3564 #if (XXH_VECTOR == XXH_AVX512) \
3565 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3566
3567 #ifndef XXH_TARGET_AVX512
3568 # define XXH_TARGET_AVX512 /* disable attribute target */
3569 #endif
3570
3571 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3572 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3573 const void* XXH_RESTRICT input,
3574 const void* XXH_RESTRICT secret)
3575 {
3576 XXH_ALIGN(64) __m512i* const xacc = (__m512i *) acc;
3577 XXH_ASSERT((((size_t)acc) & 63) == 0);
3578 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3579
3580 {
3581 /* data_vec = input[0]; */
3582 __m512i const data_vec = _mm512_loadu_si512 (input);
3583 /* key_vec = secret[0]; */
3584 __m512i const key_vec = _mm512_loadu_si512 (secret);
3585 /* data_key = data_vec ^ key_vec; */
3586 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3587 /* data_key_lo = data_key >> 32; */
3588 __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3589 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3590 __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
3591 /* xacc[0] += swap(data_vec); */
3592 __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3593 __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
3594 /* xacc[0] += product; */
3595 *xacc = _mm512_add_epi64(product, sum);
3596 }
3597 }
3598
3599 /*
3600 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3601 *
3602 * Multiplication isn't perfect, as explained by Google in HighwayHash:
3603 *
3604 * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3605 * // varying degrees. In descending order of goodness, bytes
3606 * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3607 * // As expected, the upper and lower bytes are much worse.
3608 *
3609 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3610 *
3611 * Since our algorithm uses a pseudorandom secret to add some variance into the
3612 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3613 *
3614 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3615 * extraction.
3616 *
3617 * Both XXH3_64bits and XXH3_128bits use this subroutine.
3618 */
3619
3620 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3621 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3622 {
3623 XXH_ASSERT((((size_t)acc) & 63) == 0);
3624 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3625 { XXH_ALIGN(64) __m512i* const xacc = (__m512i*) acc;
3626 const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3627
3628 /* xacc[0] ^= (xacc[0] >> 47) */
3629 __m512i const acc_vec = *xacc;
3630 __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
3631 __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
3632 /* xacc[0] ^= secret; */
3633 __m512i const key_vec = _mm512_loadu_si512 (secret);
3634 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3635
3636 /* xacc[0] *= XXH_PRIME32_1; */
3637 __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3638 __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
3639 __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
3640 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3641 }
3642 }
3643
3644 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3645 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3646 {
3647 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3648 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3649 XXH_ASSERT(((size_t)customSecret & 63) == 0);
3650 (void)(&XXH_writeLE64);
3651 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3652 __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, -(xxh_i64)seed64);
3653
3654 XXH_ALIGN(64) const __m512i* const src = (const __m512i*) XXH3_kSecret;
3655 XXH_ALIGN(64) __m512i* const dest = ( __m512i*) customSecret;
3656 int i;
3657 for (i=0; i < nbRounds; ++i) {
3658 /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3659 * this will warn "discards ‘const’ qualifier". */
3660 union {
3661 XXH_ALIGN(64) const __m512i* cp;
3662 XXH_ALIGN(64) void* p;
3663 } remote_const_void;
3664 remote_const_void.cp = src + i;
3665 dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3666 } }
3667 }
3668
3669 #endif
3670
3671 #if (XXH_VECTOR == XXH_AVX2) \
3672 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3673
3674 #ifndef XXH_TARGET_AVX2
3675 # define XXH_TARGET_AVX2 /* disable attribute target */
3676 #endif
3677
3678 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3679 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3680 const void* XXH_RESTRICT input,
3681 const void* XXH_RESTRICT secret)
3682 {
3683 XXH_ASSERT((((size_t)acc) & 31) == 0);
3684 { XXH_ALIGN(32) __m256i* const xacc = (__m256i *) acc;
3685 /* Unaligned. This is mainly for pointer arithmetic, and because
3686 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3687 const __m256i* const xinput = (const __m256i *) input;
3688 /* Unaligned. This is mainly for pointer arithmetic, and because
3689 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3690 const __m256i* const xsecret = (const __m256i *) secret;
3691
3692 size_t i;
3693 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3694 /* data_vec = xinput[i]; */
3695 __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
3696 /* key_vec = xsecret[i]; */
3697 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3698 /* data_key = data_vec ^ key_vec; */
3699 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3700 /* data_key_lo = data_key >> 32; */
3701 __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3702 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3703 __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
3704 /* xacc[i] += swap(data_vec); */
3705 __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3706 __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
3707 /* xacc[i] += product; */
3708 xacc[i] = _mm256_add_epi64(product, sum);
3709 } }
3710 }
3711
3712 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3713 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3714 {
3715 XXH_ASSERT((((size_t)acc) & 31) == 0);
3716 { XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc;
3717 /* Unaligned. This is mainly for pointer arithmetic, and because
3718 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3719 const __m256i* const xsecret = (const __m256i *) secret;
3720 const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3721
3722 size_t i;
3723 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3724 /* xacc[i] ^= (xacc[i] >> 47) */
3725 __m256i const acc_vec = xacc[i];
3726 __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
3727 __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
3728 /* xacc[i] ^= xsecret; */
3729 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3730 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3731
3732 /* xacc[i] *= XXH_PRIME32_1; */
3733 __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3734 __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
3735 __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
3736 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3737 }
3738 }
3739 }
3740
XXH3_initCustomSecret_avx2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3741 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3742 {
3743 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3744 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3745 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3746 (void)(&XXH_writeLE64);
3747 XXH_PREFETCH(customSecret);
3748 { __m256i const seed = _mm256_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64, -(xxh_i64)seed64, (xxh_i64)seed64);
3749
3750 XXH_ALIGN(64) const __m256i* const src = (const __m256i*) XXH3_kSecret;
3751 XXH_ALIGN(64) __m256i* dest = ( __m256i*) customSecret;
3752
3753 # if defined(__GNUC__) || defined(__clang__)
3754 /*
3755 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3756 * - do not extract the secret from sse registers in the internal loop
3757 * - use less common registers, and avoid pushing these reg into stack
3758 */
3759 XXH_COMPILER_GUARD(dest);
3760 # endif
3761
3762 /* GCC -O2 need unroll loop manually */
3763 dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3764 dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3765 dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3766 dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3767 dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3768 dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3769 }
3770 }
3771
3772 #endif
3773
3774 /* x86dispatch always generates SSE2 */
3775 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3776
3777 #ifndef XXH_TARGET_SSE2
3778 # define XXH_TARGET_SSE2 /* disable attribute target */
3779 #endif
3780
3781 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3782 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3783 const void* XXH_RESTRICT input,
3784 const void* XXH_RESTRICT secret)
3785 {
3786 /* SSE2 is just a half-scale version of the AVX2 version. */
3787 XXH_ASSERT((((size_t)acc) & 15) == 0);
3788 { XXH_ALIGN(16) __m128i* const xacc = (__m128i *) acc;
3789 /* Unaligned. This is mainly for pointer arithmetic, and because
3790 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3791 const __m128i* const xinput = (const __m128i *) input;
3792 /* Unaligned. This is mainly for pointer arithmetic, and because
3793 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3794 const __m128i* const xsecret = (const __m128i *) secret;
3795
3796 size_t i;
3797 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3798 /* data_vec = xinput[i]; */
3799 __m128i const data_vec = _mm_loadu_si128 (xinput+i);
3800 /* key_vec = xsecret[i]; */
3801 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3802 /* data_key = data_vec ^ key_vec; */
3803 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
3804 /* data_key_lo = data_key >> 32; */
3805 __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3806 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3807 __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
3808 /* xacc[i] += swap(data_vec); */
3809 __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3810 __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
3811 /* xacc[i] += product; */
3812 xacc[i] = _mm_add_epi64(product, sum);
3813 } }
3814 }
3815
3816 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3817 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3818 {
3819 XXH_ASSERT((((size_t)acc) & 15) == 0);
3820 { XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc;
3821 /* Unaligned. This is mainly for pointer arithmetic, and because
3822 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3823 const __m128i* const xsecret = (const __m128i *) secret;
3824 const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
3825
3826 size_t i;
3827 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3828 /* xacc[i] ^= (xacc[i] >> 47) */
3829 __m128i const acc_vec = xacc[i];
3830 __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
3831 __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
3832 /* xacc[i] ^= xsecret[i]; */
3833 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3834 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
3835
3836 /* xacc[i] *= XXH_PRIME32_1; */
3837 __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3838 __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
3839 __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
3840 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
3841 }
3842 }
3843 }
3844
XXH3_initCustomSecret_sse2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3845 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3846 {
3847 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
3848 (void)(&XXH_writeLE64);
3849 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
3850
3851 # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
3852 // MSVC 32bit mode does not support _mm_set_epi64x before 2015
3853 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, -(xxh_i64)seed64 };
3854 __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
3855 # else
3856 __m128i const seed = _mm_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64);
3857 # endif
3858 int i;
3859
3860 XXH_ALIGN(64) const float* const src = (float const*) XXH3_kSecret;
3861 XXH_ALIGN(XXH_SEC_ALIGN) __m128i* dest = (__m128i*) customSecret;
3862 # if defined(__GNUC__) || defined(__clang__)
3863 /*
3864 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3865 * - do not extract the secret from sse registers in the internal loop
3866 * - use less common registers, and avoid pushing these reg into stack
3867 */
3868 XXH_COMPILER_GUARD(dest);
3869 # endif
3870
3871 for (i=0; i < nbRounds; ++i) {
3872 dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src+i*4)), seed);
3873 } }
3874 }
3875
3876 #endif
3877
3878 #if (XXH_VECTOR == XXH_NEON)
3879
3880 XXH_FORCE_INLINE void
XXH3_accumulate_512_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3881 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
3882 const void* XXH_RESTRICT input,
3883 const void* XXH_RESTRICT secret)
3884 {
3885 XXH_ASSERT((((size_t)acc) & 15) == 0);
3886 {
3887 XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc;
3888 /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
3889 uint8_t const* const xinput = (const uint8_t *) input;
3890 uint8_t const* const xsecret = (const uint8_t *) secret;
3891
3892 size_t i;
3893 for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
3894 /* data_vec = xinput[i]; */
3895 uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
3896 /* key_vec = xsecret[i]; */
3897 uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
3898 uint64x2_t data_key;
3899 uint32x2_t data_key_lo, data_key_hi;
3900 /* xacc[i] += swap(data_vec); */
3901 uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
3902 uint64x2_t const swapped = vextq_u64(data64, data64, 1);
3903 xacc[i] = vaddq_u64 (xacc[i], swapped);
3904 /* data_key = data_vec ^ key_vec; */
3905 data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
3906 /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
3907 * data_key_hi = (uint32x2_t) (data_key >> 32);
3908 * data_key = UNDEFINED; */
3909 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
3910 /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
3911 xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
3912
3913 }
3914 }
3915 }
3916
3917 XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3918 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3919 {
3920 XXH_ASSERT((((size_t)acc) & 15) == 0);
3921
3922 { uint64x2_t* xacc = (uint64x2_t*) acc;
3923 uint8_t const* xsecret = (uint8_t const*) secret;
3924 uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
3925
3926 size_t i;
3927 for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) {
3928 /* xacc[i] ^= (xacc[i] >> 47); */
3929 uint64x2_t acc_vec = xacc[i];
3930 uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
3931 uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
3932
3933 /* xacc[i] ^= xsecret[i]; */
3934 uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
3935 uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec));
3936
3937 /* xacc[i] *= XXH_PRIME32_1 */
3938 uint32x2_t data_key_lo, data_key_hi;
3939 /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
3940 * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
3941 * xacc[i] = UNDEFINED; */
3942 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
3943 { /*
3944 * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
3945 *
3946 * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
3947 * incorrectly "optimize" this:
3948 * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
3949 * shifted = vshll_n_u32(tmp, 32);
3950 * to this:
3951 * tmp = "vmulq_u64"(a, b); // no such thing!
3952 * shifted = vshlq_n_u64(tmp, 32);
3953 *
3954 * However, unlike SSE, Clang lacks a 64-bit multiply routine
3955 * for NEON, and it scalarizes two 64-bit multiplies instead.
3956 *
3957 * vmull_u32 has the same timing as vmul_u32, and it avoids
3958 * this bug completely.
3959 * See https://bugs.llvm.org/show_bug.cgi?id=39967
3960 */
3961 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
3962 /* xacc[i] = prod_hi << 32; */
3963 xacc[i] = vshlq_n_u64(prod_hi, 32);
3964 /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
3965 xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
3966 }
3967 } }
3968 }
3969
3970 #endif
3971
3972 #if (XXH_VECTOR == XXH_VSX)
3973
3974 XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3975 XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
3976 const void* XXH_RESTRICT input,
3977 const void* XXH_RESTRICT secret)
3978 {
3979 xxh_u64x2* const xacc = (xxh_u64x2*) acc; /* presumed aligned */
3980 xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
3981 xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
3982 xxh_u64x2 const v32 = { 32, 32 };
3983 size_t i;
3984 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
3985 /* data_vec = xinput[i]; */
3986 xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
3987 /* key_vec = xsecret[i]; */
3988 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
3989 xxh_u64x2 const data_key = data_vec ^ key_vec;
3990 /* shuffled = (data_key << 32) | (data_key >> 32); */
3991 xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
3992 /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
3993 xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
3994 xacc[i] += product;
3995
3996 /* swap high and low halves */
3997 #ifdef __s390x__
3998 xacc[i] += vec_permi(data_vec, data_vec, 2);
3999 #else
4000 xacc[i] += vec_xxpermdi(data_vec, data_vec, 2);
4001 #endif
4002 }
4003 }
4004
4005 XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4006 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4007 {
4008 XXH_ASSERT((((size_t)acc) & 15) == 0);
4009
4010 { xxh_u64x2* const xacc = (xxh_u64x2*) acc;
4011 const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4012 /* constants */
4013 xxh_u64x2 const v32 = { 32, 32 };
4014 xxh_u64x2 const v47 = { 47, 47 };
4015 xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4016 size_t i;
4017 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4018 /* xacc[i] ^= (xacc[i] >> 47); */
4019 xxh_u64x2 const acc_vec = xacc[i];
4020 xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4021
4022 /* xacc[i] ^= xsecret[i]; */
4023 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4024 xxh_u64x2 const data_key = data_vec ^ key_vec;
4025
4026 /* xacc[i] *= XXH_PRIME32_1 */
4027 /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
4028 xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
4029 /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
4030 xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4031 xacc[i] = prod_odd + (prod_even << v32);
4032 } }
4033 }
4034
4035 #endif
4036
4037 /* scalar variants - universal */
4038
4039 XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4040 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4041 const void* XXH_RESTRICT input,
4042 const void* XXH_RESTRICT secret)
4043 {
4044 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4045 const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */
4046 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4047 size_t i;
4048 XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4049 for (i=0; i < XXH_ACC_NB; i++) {
4050 xxh_u64 const data_val = XXH_readLE64(xinput + 8*i);
4051 xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
4052 xacc[i ^ 1] += data_val; /* swap adjacent lanes */
4053 xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4054 }
4055 }
4056
4057 XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4058 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4059 {
4060 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4061 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4062 size_t i;
4063 XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4064 for (i=0; i < XXH_ACC_NB; i++) {
4065 xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i);
4066 xxh_u64 acc64 = xacc[i];
4067 acc64 = XXH_xorshift64(acc64, 47);
4068 acc64 ^= key64;
4069 acc64 *= XXH_PRIME32_1;
4070 xacc[i] = acc64;
4071 }
4072 }
4073
4074 XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4075 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4076 {
4077 /*
4078 * We need a separate pointer for the hack below,
4079 * which requires a non-const pointer.
4080 * Any decent compiler will optimize this out otherwise.
4081 */
4082 const xxh_u8* kSecretPtr = XXH3_kSecret;
4083 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4084
4085 #if defined(__clang__) && defined(__aarch64__)
4086 /*
4087 * UGLY HACK:
4088 * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4089 * placed sequentially, in order, at the top of the unrolled loop.
4090 *
4091 * While MOVK is great for generating constants (2 cycles for a 64-bit
4092 * constant compared to 4 cycles for LDR), long MOVK chains stall the
4093 * integer pipelines:
4094 * I L S
4095 * MOVK
4096 * MOVK
4097 * MOVK
4098 * MOVK
4099 * ADD
4100 * SUB STR
4101 * STR
4102 * By forcing loads from memory (as the asm line causes Clang to assume
4103 * that XXH3_kSecretPtr has been changed), the pipelines are used more
4104 * efficiently:
4105 * I L S
4106 * LDR
4107 * ADD LDR
4108 * SUB STR
4109 * STR
4110 * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4111 * without hack: 2654.4 MB/s
4112 * with hack: 3202.9 MB/s
4113 */
4114 XXH_COMPILER_GUARD(kSecretPtr);
4115 #endif
4116 /*
4117 * Note: in debug mode, this overrides the asm optimization
4118 * and Clang will emit MOVK chains again.
4119 */
4120 XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4121
4122 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4123 int i;
4124 for (i=0; i < nbRounds; i++) {
4125 /*
4126 * The asm hack causes Clang to assume that kSecretPtr aliases with
4127 * customSecret, and on aarch64, this prevented LDP from merging two
4128 * loads together for free. Putting the loads together before the stores
4129 * properly generates LDP.
4130 */
4131 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
4132 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4133 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
4134 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4135 } }
4136 }
4137
4138
4139 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4140 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4141 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4142
4143
4144 #if (XXH_VECTOR == XXH_AVX512)
4145
4146 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4147 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
4148 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4149
4150 #elif (XXH_VECTOR == XXH_AVX2)
4151
4152 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4153 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
4154 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4155
4156 #elif (XXH_VECTOR == XXH_SSE2)
4157
4158 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4159 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
4160 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4161
4162 #elif (XXH_VECTOR == XXH_NEON)
4163
4164 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4165 #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
4166 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4167
4168 #elif (XXH_VECTOR == XXH_VSX)
4169
4170 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4171 #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
4172 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4173
4174 #else /* scalar */
4175
4176 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4177 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
4178 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4179
4180 #endif
4181
4182
4183
4184 #ifndef XXH_PREFETCH_DIST
4185 # ifdef __clang__
4186 # define XXH_PREFETCH_DIST 320
4187 # else
4188 # if (XXH_VECTOR == XXH_AVX512)
4189 # define XXH_PREFETCH_DIST 512
4190 # else
4191 # define XXH_PREFETCH_DIST 384
4192 # endif
4193 # endif /* __clang__ */
4194 #endif /* XXH_PREFETCH_DIST */
4195
4196 /*
4197 * XXH3_accumulate()
4198 * Loops over XXH3_accumulate_512().
4199 * Assumption: nbStripes will not overflow the secret size
4200 */
4201 XXH_FORCE_INLINE void
XXH3_accumulate(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,size_t nbStripes,XXH3_f_accumulate_512 f_acc512)4202 XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
4203 const xxh_u8* XXH_RESTRICT input,
4204 const xxh_u8* XXH_RESTRICT secret,
4205 size_t nbStripes,
4206 XXH3_f_accumulate_512 f_acc512)
4207 {
4208 size_t n;
4209 for (n = 0; n < nbStripes; n++ ) {
4210 const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4211 XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4212 f_acc512(acc,
4213 in,
4214 secret + n*XXH_SECRET_CONSUME_RATE);
4215 }
4216 }
4217
4218 XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4219 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4220 const xxh_u8* XXH_RESTRICT input, size_t len,
4221 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4222 XXH3_f_accumulate_512 f_acc512,
4223 XXH3_f_scrambleAcc f_scramble)
4224 {
4225 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4226 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4227 size_t const nb_blocks = (len - 1) / block_len;
4228
4229 size_t n;
4230
4231 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4232
4233 for (n = 0; n < nb_blocks; n++) {
4234 XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4235 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4236 }
4237
4238 /* last partial block */
4239 XXH_ASSERT(len > XXH_STRIPE_LEN);
4240 { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4241 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4242 XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4243
4244 /* last stripe */
4245 { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4246 #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
4247 f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4248 } }
4249 }
4250
4251 XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret)4252 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4253 {
4254 return XXH3_mul128_fold64(
4255 acc[0] ^ XXH_readLE64(secret),
4256 acc[1] ^ XXH_readLE64(secret+8) );
4257 }
4258
4259 static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 start)4260 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4261 {
4262 xxh_u64 result64 = start;
4263 size_t i = 0;
4264
4265 for (i = 0; i < 4; i++) {
4266 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4267 #if defined(__clang__) /* Clang */ \
4268 && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
4269 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4270 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
4271 /*
4272 * UGLY HACK:
4273 * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4274 * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4275 * XXH3_64bits, len == 256, Snapdragon 835:
4276 * without hack: 2063.7 MB/s
4277 * with hack: 2560.7 MB/s
4278 */
4279 XXH_COMPILER_GUARD(result64);
4280 #endif
4281 }
4282
4283 return XXH3_avalanche(result64);
4284 }
4285
4286 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4287 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4288
4289 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void * XXH_RESTRICT input,size_t len,const void * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4290 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4291 const void* XXH_RESTRICT secret, size_t secretSize,
4292 XXH3_f_accumulate_512 f_acc512,
4293 XXH3_f_scrambleAcc f_scramble)
4294 {
4295 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4296
4297 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4298
4299 /* converge into final hash */
4300 XXH_STATIC_ASSERT(sizeof(acc) == 64);
4301 /* do not align on 8, so that the secret is different from the accumulator */
4302 #define XXH_SECRET_MERGEACCS_START 11
4303 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4304 return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4305 }
4306
4307 /*
4308 * It's important for performance that XXH3_hashLong is not inlined.
4309 */
4310 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4311 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4312 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4313 {
4314 (void)seed64;
4315 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4316 }
4317
4318 /*
4319 * It's important for performance that XXH3_hashLong is not inlined.
4320 * Since the function is not inlined, the compiler may not be able to understand that,
4321 * in some scenarios, its `secret` argument is actually a compile time constant.
4322 * This variant enforces that the compiler can detect that,
4323 * and uses this opportunity to streamline the generated code for better performance.
4324 */
4325 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4326 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4327 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4328 {
4329 (void)seed64; (void)secret; (void)secretLen;
4330 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4331 }
4332
4333 /*
4334 * XXH3_hashLong_64b_withSeed():
4335 * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4336 * and then use this key for long mode hashing.
4337 *
4338 * This operation is decently fast but nonetheless costs a little bit of time.
4339 * Try to avoid it whenever possible (typically when seed==0).
4340 *
4341 * It's important for performance that XXH3_hashLong is not inlined. Not sure
4342 * why (uop cache maybe?), but the difference is large and easily measurable.
4343 */
4344 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void * input,size_t len,XXH64_hash_t seed,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)4345 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4346 XXH64_hash_t seed,
4347 XXH3_f_accumulate_512 f_acc512,
4348 XXH3_f_scrambleAcc f_scramble,
4349 XXH3_f_initCustomSecret f_initSec)
4350 {
4351 if (seed == 0)
4352 return XXH3_hashLong_64b_internal(input, len,
4353 XXH3_kSecret, sizeof(XXH3_kSecret),
4354 f_acc512, f_scramble);
4355 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4356 f_initSec(secret, seed);
4357 return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4358 f_acc512, f_scramble);
4359 }
4360 }
4361
4362 /*
4363 * It's important for performance that XXH3_hashLong is not inlined.
4364 */
4365 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void * input,size_t len,XXH64_hash_t seed,const xxh_u8 * secret,size_t secretLen)4366 XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4367 XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4368 {
4369 (void)secret; (void)secretLen;
4370 return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4371 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4372 }
4373
4374
4375 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4376 XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4377
4378 XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong64_f f_hashLong)4379 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4380 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4381 XXH3_hashLong64_f f_hashLong)
4382 {
4383 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4384 /*
4385 * If an action is to be taken if `secretLen` condition is not respected,
4386 * it should be done here.
4387 * For now, it's a contract pre-condition.
4388 * Adding a check and a branch here would cost performance at every hash.
4389 * Also, note that function signature doesn't offer room to return an error.
4390 */
4391 if (len <= 16)
4392 return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4393 if (len <= 128)
4394 return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4395 if (len <= XXH3_MIDSIZE_MAX)
4396 return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4397 return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4398 }
4399
4400
4401 /* === Public entry point === */
4402
4403 /*! @ingroup xxh3_family */
XXH3_64bits(const void * input,size_t len)4404 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4405 {
4406 return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4407 }
4408
4409 /*! @ingroup xxh3_family */
4410 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)4411 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4412 {
4413 return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4414 }
4415
4416 /*! @ingroup xxh3_family */
4417 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)4418 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4419 {
4420 return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4421 }
4422
4423
4424 /* === XXH3 streaming === */
4425
4426 /*
4427 * Malloc's a pointer that is always aligned to align.
4428 *
4429 * This must be freed with `XXH_alignedFree()`.
4430 *
4431 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4432 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4433 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4434 *
4435 * This underalignment previously caused a rather obvious crash which went
4436 * completely unnoticed due to XXH3_createState() not actually being tested.
4437 * Credit to RedSpah for noticing this bug.
4438 *
4439 * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4440 * are avoided: To maintain portability, we would have to write a fallback
4441 * like this anyways, and besides, testing for the existence of library
4442 * functions without relying on external build tools is impossible.
4443 *
4444 * The method is simple: Overallocate, manually align, and store the offset
4445 * to the original behind the returned pointer.
4446 *
4447 * Align must be a power of 2 and 8 <= align <= 128.
4448 */
XXH_alignedMalloc(size_t s,size_t align)4449 static void* XXH_alignedMalloc(size_t s, size_t align)
4450 {
4451 XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4452 XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
4453 XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
4454 { /* Overallocate to make room for manual realignment and an offset byte */
4455 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4456 if (base != NULL) {
4457 /*
4458 * Get the offset needed to align this pointer.
4459 *
4460 * Even if the returned pointer is aligned, there will always be
4461 * at least one byte to store the offset to the original pointer.
4462 */
4463 size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4464 /* Add the offset for the now-aligned pointer */
4465 xxh_u8* ptr = base + offset;
4466
4467 XXH_ASSERT((size_t)ptr % align == 0);
4468
4469 /* Store the offset immediately before the returned pointer. */
4470 ptr[-1] = (xxh_u8)offset;
4471 return ptr;
4472 }
4473 return NULL;
4474 }
4475 }
4476 /*
4477 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4478 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4479 */
XXH_alignedFree(void * p)4480 static void XXH_alignedFree(void* p)
4481 {
4482 if (p != NULL) {
4483 xxh_u8* ptr = (xxh_u8*)p;
4484 /* Get the offset byte we added in XXH_malloc. */
4485 xxh_u8 offset = ptr[-1];
4486 /* Free the original malloc'd pointer */
4487 xxh_u8* base = ptr - offset;
4488 XXH_free(base);
4489 }
4490 }
4491 /*! @ingroup xxh3_family */
XXH3_createState(void)4492 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
4493 {
4494 XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4495 if (state==NULL) return NULL;
4496 XXH3_INITSTATE(state);
4497 return state;
4498 }
4499
4500 /*! @ingroup xxh3_family */
XXH3_freeState(XXH3_state_t * statePtr)4501 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
4502 {
4503 XXH_alignedFree(statePtr);
4504 return XXH_OK;
4505 }
4506
4507 /*! @ingroup xxh3_family */
4508 XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t * dst_state,const XXH3_state_t * src_state)4509 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4510 {
4511 memcpy(dst_state, src_state, sizeof(*dst_state));
4512 }
4513
4514 static void
XXH3_reset_internal(XXH3_state_t * statePtr,XXH64_hash_t seed,const void * secret,size_t secretSize)4515 XXH3_reset_internal(XXH3_state_t* statePtr,
4516 XXH64_hash_t seed,
4517 const void* secret, size_t secretSize)
4518 {
4519 size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4520 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4521 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4522 XXH_ASSERT(statePtr != NULL);
4523 /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4524 memset((char*)statePtr + initStart, 0, initLength);
4525 statePtr->acc[0] = XXH_PRIME32_3;
4526 statePtr->acc[1] = XXH_PRIME64_1;
4527 statePtr->acc[2] = XXH_PRIME64_2;
4528 statePtr->acc[3] = XXH_PRIME64_3;
4529 statePtr->acc[4] = XXH_PRIME64_4;
4530 statePtr->acc[5] = XXH_PRIME32_2;
4531 statePtr->acc[6] = XXH_PRIME64_5;
4532 statePtr->acc[7] = XXH_PRIME32_1;
4533 statePtr->seed = seed;
4534 statePtr->extSecret = (const unsigned char*)secret;
4535 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4536 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4537 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4538 }
4539
4540 /*! @ingroup xxh3_family */
4541 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t * statePtr)4542 XXH3_64bits_reset(XXH3_state_t* statePtr)
4543 {
4544 if (statePtr == NULL) return XXH_ERROR;
4545 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4546 return XXH_OK;
4547 }
4548
4549 /*! @ingroup xxh3_family */
4550 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)4551 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4552 {
4553 if (statePtr == NULL) return XXH_ERROR;
4554 XXH3_reset_internal(statePtr, 0, secret, secretSize);
4555 if (secret == NULL) return XXH_ERROR;
4556 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4557 return XXH_OK;
4558 }
4559
4560 /*! @ingroup xxh3_family */
4561 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)4562 XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
4563 {
4564 if (statePtr == NULL) return XXH_ERROR;
4565 if (seed==0) return XXH3_64bits_reset(statePtr);
4566 if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed);
4567 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4568 return XXH_OK;
4569 }
4570
4571 /* Note : when XXH3_consumeStripes() is invoked,
4572 * there must be a guarantee that at least one more byte must be consumed from input
4573 * so that the function can blindly consume all stripes using the "normal" secret segment */
4574 XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64 * XXH_RESTRICT acc,size_t * XXH_RESTRICT nbStripesSoFarPtr,size_t nbStripesPerBlock,const xxh_u8 * XXH_RESTRICT input,size_t nbStripes,const xxh_u8 * XXH_RESTRICT secret,size_t secretLimit,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4575 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4576 size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4577 const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4578 const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4579 XXH3_f_accumulate_512 f_acc512,
4580 XXH3_f_scrambleAcc f_scramble)
4581 {
4582 XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
4583 XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4584 if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4585 /* need a scrambling operation */
4586 size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4587 size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4588 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4589 f_scramble(acc, secret + secretLimit);
4590 XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4591 *nbStripesSoFarPtr = nbStripesAfterBlock;
4592 } else {
4593 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4594 *nbStripesSoFarPtr += nbStripes;
4595 }
4596 }
4597
4598 /*
4599 * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4600 */
4601 XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t * state,const xxh_u8 * input,size_t len,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4602 XXH3_update(XXH3_state_t* state,
4603 const xxh_u8* input, size_t len,
4604 XXH3_f_accumulate_512 f_acc512,
4605 XXH3_f_scrambleAcc f_scramble)
4606 {
4607 if (input==NULL)
4608 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
4609 return XXH_OK;
4610 #else
4611 return XXH_ERROR;
4612 #endif
4613
4614 { const xxh_u8* const bEnd = input + len;
4615 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4616
4617 state->totalLen += len;
4618 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4619
4620 if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */
4621 XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4622 state->bufferedSize += (XXH32_hash_t)len;
4623 return XXH_OK;
4624 }
4625 /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4626
4627 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4628 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
4629
4630 /*
4631 * Internal buffer is partially filled (always, except at beginning)
4632 * Complete it, then consume it.
4633 */
4634 if (state->bufferedSize) {
4635 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4636 XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4637 input += loadSize;
4638 XXH3_consumeStripes(state->acc,
4639 &state->nbStripesSoFar, state->nbStripesPerBlock,
4640 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4641 secret, state->secretLimit,
4642 f_acc512, f_scramble);
4643 state->bufferedSize = 0;
4644 }
4645 XXH_ASSERT(input < bEnd);
4646
4647 /* Consume input by a multiple of internal buffer size */
4648 if (input+XXH3_INTERNALBUFFER_SIZE < bEnd) {
4649 const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4650 do {
4651 XXH3_consumeStripes(state->acc,
4652 &state->nbStripesSoFar, state->nbStripesPerBlock,
4653 input, XXH3_INTERNALBUFFER_STRIPES,
4654 secret, state->secretLimit,
4655 f_acc512, f_scramble);
4656 input += XXH3_INTERNALBUFFER_SIZE;
4657 } while (input<limit);
4658 /* for last partial stripe */
4659 memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4660 }
4661 XXH_ASSERT(input < bEnd);
4662
4663 /* Some remaining input (always) : buffer it */
4664 XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
4665 state->bufferedSize = (XXH32_hash_t)(bEnd-input);
4666 }
4667
4668 return XXH_OK;
4669 }
4670
4671 /*! @ingroup xxh3_family */
4672 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t * state,const void * input,size_t len)4673 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
4674 {
4675 return XXH3_update(state, (const xxh_u8*)input, len,
4676 XXH3_accumulate_512, XXH3_scrambleAcc);
4677 }
4678
4679
4680 XXH_FORCE_INLINE void
XXH3_digest_long(XXH64_hash_t * acc,const XXH3_state_t * state,const unsigned char * secret)4681 XXH3_digest_long (XXH64_hash_t* acc,
4682 const XXH3_state_t* state,
4683 const unsigned char* secret)
4684 {
4685 /*
4686 * Digest on a local copy. This way, the state remains unaltered, and it can
4687 * continue ingesting more input afterwards.
4688 */
4689 memcpy(acc, state->acc, sizeof(state->acc));
4690 if (state->bufferedSize >= XXH_STRIPE_LEN) {
4691 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
4692 size_t nbStripesSoFar = state->nbStripesSoFar;
4693 XXH3_consumeStripes(acc,
4694 &nbStripesSoFar, state->nbStripesPerBlock,
4695 state->buffer, nbStripes,
4696 secret, state->secretLimit,
4697 XXH3_accumulate_512, XXH3_scrambleAcc);
4698 /* last stripe */
4699 XXH3_accumulate_512(acc,
4700 state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
4701 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4702 } else { /* bufferedSize < XXH_STRIPE_LEN */
4703 xxh_u8 lastStripe[XXH_STRIPE_LEN];
4704 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
4705 XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
4706 memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
4707 memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
4708 XXH3_accumulate_512(acc,
4709 lastStripe,
4710 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4711 }
4712 }
4713
4714 /*! @ingroup xxh3_family */
XXH3_64bits_digest(const XXH3_state_t * state)4715 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
4716 {
4717 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4718 if (state->totalLen > XXH3_MIDSIZE_MAX) {
4719 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
4720 XXH3_digest_long(acc, state, secret);
4721 return XXH3_mergeAccs(acc,
4722 secret + XXH_SECRET_MERGEACCS_START,
4723 (xxh_u64)state->totalLen * XXH_PRIME64_1);
4724 }
4725 /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
4726 if (state->seed)
4727 return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
4728 return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
4729 secret, state->secretLimit + XXH_STRIPE_LEN);
4730 }
4731
4732
4733 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
4734
4735 /*! @ingroup xxh3_family */
4736 XXH_PUBLIC_API void
XXH3_generateSecret(void * secretBuffer,const void * customSeed,size_t customSeedSize)4737 XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize)
4738 {
4739 XXH_ASSERT(secretBuffer != NULL);
4740 if (customSeedSize == 0) {
4741 memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4742 return;
4743 }
4744 XXH_ASSERT(customSeed != NULL);
4745
4746 { size_t const segmentSize = sizeof(XXH128_hash_t);
4747 size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize;
4748 XXH128_canonical_t scrambler;
4749 XXH64_hash_t seeds[12];
4750 size_t segnb;
4751 XXH_ASSERT(nbSegments == 12);
4752 XXH_ASSERT(segmentSize * nbSegments == XXH_SECRET_DEFAULT_SIZE); /* exact multiple */
4753 XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
4754
4755 /*
4756 * Copy customSeed to seeds[], truncating or repeating as necessary.
4757 */
4758 { size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds));
4759 size_t filled = toFill;
4760 memcpy(seeds, customSeed, toFill);
4761 while (filled < sizeof(seeds)) {
4762 toFill = XXH_MIN(filled, sizeof(seeds) - filled);
4763 memcpy((char*)seeds + filled, seeds, toFill);
4764 filled += toFill;
4765 } }
4766
4767 /* generate secret */
4768 memcpy(secretBuffer, &scrambler, sizeof(scrambler));
4769 for (segnb=1; segnb < nbSegments; segnb++) {
4770 size_t const segmentStart = segnb * segmentSize;
4771 XXH128_canonical_t segment;
4772 XXH128_canonicalFromHash(&segment,
4773 XXH128(&scrambler, sizeof(scrambler), XXH_readLE64(seeds + segnb) + segnb) );
4774 memcpy((char*)secretBuffer + segmentStart, &segment, sizeof(segment));
4775 } }
4776 }
4777
4778
4779 /* ==========================================
4780 * XXH3 128 bits (a.k.a XXH128)
4781 * ==========================================
4782 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
4783 * even without counting the significantly larger output size.
4784 *
4785 * For example, extra steps are taken to avoid the seed-dependent collisions
4786 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
4787 *
4788 * This strength naturally comes at the cost of some speed, especially on short
4789 * lengths. Note that longer hashes are about as fast as the 64-bit version
4790 * due to it using only a slight modification of the 64-bit loop.
4791 *
4792 * XXH128 is also more oriented towards 64-bit machines. It is still extremely
4793 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
4794 */
4795
4796 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4797 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4798 {
4799 /* A doubled version of 1to3_64b with different constants. */
4800 XXH_ASSERT(input != NULL);
4801 XXH_ASSERT(1 <= len && len <= 3);
4802 XXH_ASSERT(secret != NULL);
4803 /*
4804 * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
4805 * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
4806 * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
4807 */
4808 { xxh_u8 const c1 = input[0];
4809 xxh_u8 const c2 = input[len >> 1];
4810 xxh_u8 const c3 = input[len - 1];
4811 xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
4812 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
4813 xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
4814 xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
4815 xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
4816 xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
4817 xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
4818 XXH128_hash_t h128;
4819 h128.low64 = XXH64_avalanche(keyed_lo);
4820 h128.high64 = XXH64_avalanche(keyed_hi);
4821 return h128;
4822 }
4823 }
4824
4825 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4826 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4827 {
4828 XXH_ASSERT(input != NULL);
4829 XXH_ASSERT(secret != NULL);
4830 XXH_ASSERT(4 <= len && len <= 8);
4831 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
4832 { xxh_u32 const input_lo = XXH_readLE32(input);
4833 xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
4834 xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
4835 xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
4836 xxh_u64 const keyed = input_64 ^ bitflip;
4837
4838 /* Shift len to the left to ensure it is even, this avoids even multiplies. */
4839 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
4840
4841 m128.high64 += (m128.low64 << 1);
4842 m128.low64 ^= (m128.high64 >> 3);
4843
4844 m128.low64 = XXH_xorshift64(m128.low64, 35);
4845 m128.low64 *= 0x9FB21C651E98DF25ULL;
4846 m128.low64 = XXH_xorshift64(m128.low64, 28);
4847 m128.high64 = XXH3_avalanche(m128.high64);
4848 return m128;
4849 }
4850 }
4851
4852 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4853 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4854 {
4855 XXH_ASSERT(input != NULL);
4856 XXH_ASSERT(secret != NULL);
4857 XXH_ASSERT(9 <= len && len <= 16);
4858 { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
4859 xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
4860 xxh_u64 const input_lo = XXH_readLE64(input);
4861 xxh_u64 input_hi = XXH_readLE64(input + len - 8);
4862 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
4863 /*
4864 * Put len in the middle of m128 to ensure that the length gets mixed to
4865 * both the low and high bits in the 128x64 multiply below.
4866 */
4867 m128.low64 += (xxh_u64)(len - 1) << 54;
4868 input_hi ^= bitfliph;
4869 /*
4870 * Add the high 32 bits of input_hi to the high 32 bits of m128, then
4871 * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
4872 * the high 64 bits of m128.
4873 *
4874 * The best approach to this operation is different on 32-bit and 64-bit.
4875 */
4876 if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
4877 /*
4878 * 32-bit optimized version, which is more readable.
4879 *
4880 * On 32-bit, it removes an ADC and delays a dependency between the two
4881 * halves of m128.high64, but it generates an extra mask on 64-bit.
4882 */
4883 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
4884 } else {
4885 /*
4886 * 64-bit optimized (albeit more confusing) version.
4887 *
4888 * Uses some properties of addition and multiplication to remove the mask:
4889 *
4890 * Let:
4891 * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
4892 * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
4893 * c = XXH_PRIME32_2
4894 *
4895 * a + (b * c)
4896 * Inverse Property: x + y - x == y
4897 * a + (b * (1 + c - 1))
4898 * Distributive Property: x * (y + z) == (x * y) + (x * z)
4899 * a + (b * 1) + (b * (c - 1))
4900 * Identity Property: x * 1 == x
4901 * a + b + (b * (c - 1))
4902 *
4903 * Substitute a, b, and c:
4904 * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
4905 *
4906 * Since input_hi.hi + input_hi.lo == input_hi, we get this:
4907 * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
4908 */
4909 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
4910 }
4911 /* m128 ^= XXH_swap64(m128 >> 64); */
4912 m128.low64 ^= XXH_swap64(m128.high64);
4913
4914 { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
4915 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
4916 h128.high64 += m128.high64 * XXH_PRIME64_2;
4917
4918 h128.low64 = XXH3_avalanche(h128.low64);
4919 h128.high64 = XXH3_avalanche(h128.high64);
4920 return h128;
4921 } }
4922 }
4923
4924 /*
4925 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
4926 */
4927 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4928 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4929 {
4930 XXH_ASSERT(len <= 16);
4931 { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
4932 if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
4933 if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
4934 { XXH128_hash_t h128;
4935 xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
4936 xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
4937 h128.low64 = XXH64_avalanche(seed ^ bitflipl);
4938 h128.high64 = XXH64_avalanche( seed ^ bitfliph);
4939 return h128;
4940 } }
4941 }
4942
4943 /*
4944 * A bit slower than XXH3_mix16B, but handles multiply by zero better.
4945 */
4946 XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc,const xxh_u8 * input_1,const xxh_u8 * input_2,const xxh_u8 * secret,XXH64_hash_t seed)4947 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
4948 const xxh_u8* secret, XXH64_hash_t seed)
4949 {
4950 acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
4951 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
4952 acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
4953 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
4954 return acc;
4955 }
4956
4957
4958 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)4959 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
4960 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4961 XXH64_hash_t seed)
4962 {
4963 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4964 XXH_ASSERT(16 < len && len <= 128);
4965
4966 { XXH128_hash_t acc;
4967 acc.low64 = len * XXH_PRIME64_1;
4968 acc.high64 = 0;
4969 if (len > 32) {
4970 if (len > 64) {
4971 if (len > 96) {
4972 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
4973 }
4974 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
4975 }
4976 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
4977 }
4978 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
4979 { XXH128_hash_t h128;
4980 h128.low64 = acc.low64 + acc.high64;
4981 h128.high64 = (acc.low64 * XXH_PRIME64_1)
4982 + (acc.high64 * XXH_PRIME64_4)
4983 + ((len - seed) * XXH_PRIME64_2);
4984 h128.low64 = XXH3_avalanche(h128.low64);
4985 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
4986 return h128;
4987 }
4988 }
4989 }
4990
4991 XXH_NO_INLINE XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)4992 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
4993 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4994 XXH64_hash_t seed)
4995 {
4996 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4997 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4998
4999 { XXH128_hash_t acc;
5000 int const nbRounds = (int)len / 32;
5001 int i;
5002 acc.low64 = len * XXH_PRIME64_1;
5003 acc.high64 = 0;
5004 for (i=0; i<4; i++) {
5005 acc = XXH128_mix32B(acc,
5006 input + (32 * i),
5007 input + (32 * i) + 16,
5008 secret + (32 * i),
5009 seed);
5010 }
5011 acc.low64 = XXH3_avalanche(acc.low64);
5012 acc.high64 = XXH3_avalanche(acc.high64);
5013 XXH_ASSERT(nbRounds >= 4);
5014 for (i=4 ; i < nbRounds; i++) {
5015 acc = XXH128_mix32B(acc,
5016 input + (32 * i),
5017 input + (32 * i) + 16,
5018 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5019 seed);
5020 }
5021 /* last bytes */
5022 acc = XXH128_mix32B(acc,
5023 input + len - 16,
5024 input + len - 32,
5025 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5026 0ULL - seed);
5027
5028 { XXH128_hash_t h128;
5029 h128.low64 = acc.low64 + acc.high64;
5030 h128.high64 = (acc.low64 * XXH_PRIME64_1)
5031 + (acc.high64 * XXH_PRIME64_4)
5032 + ((len - seed) * XXH_PRIME64_2);
5033 h128.low64 = XXH3_avalanche(h128.low64);
5034 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5035 return h128;
5036 }
5037 }
5038 }
5039
5040 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)5041 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5042 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5043 XXH3_f_accumulate_512 f_acc512,
5044 XXH3_f_scrambleAcc f_scramble)
5045 {
5046 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5047
5048 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5049
5050 /* converge into final hash */
5051 XXH_STATIC_ASSERT(sizeof(acc) == 64);
5052 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5053 { XXH128_hash_t h128;
5054 h128.low64 = XXH3_mergeAccs(acc,
5055 secret + XXH_SECRET_MERGEACCS_START,
5056 (xxh_u64)len * XXH_PRIME64_1);
5057 h128.high64 = XXH3_mergeAccs(acc,
5058 secret + secretSize
5059 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5060 ~((xxh_u64)len * XXH_PRIME64_2));
5061 return h128;
5062 }
5063 }
5064
5065 /*
5066 * It's important for performance that XXH3_hashLong is not inlined.
5067 */
5068 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5069 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5070 XXH64_hash_t seed64,
5071 const void* XXH_RESTRICT secret, size_t secretLen)
5072 {
5073 (void)seed64; (void)secret; (void)secretLen;
5074 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5075 XXH3_accumulate_512, XXH3_scrambleAcc);
5076 }
5077
5078 /*
5079 * It's important for performance that XXH3_hashLong is not inlined.
5080 */
5081 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5082 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5083 XXH64_hash_t seed64,
5084 const void* XXH_RESTRICT secret, size_t secretLen)
5085 {
5086 (void)seed64;
5087 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5088 XXH3_accumulate_512, XXH3_scrambleAcc);
5089 }
5090
5091 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)5092 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5093 XXH64_hash_t seed64,
5094 XXH3_f_accumulate_512 f_acc512,
5095 XXH3_f_scrambleAcc f_scramble,
5096 XXH3_f_initCustomSecret f_initSec)
5097 {
5098 if (seed64 == 0)
5099 return XXH3_hashLong_128b_internal(input, len,
5100 XXH3_kSecret, sizeof(XXH3_kSecret),
5101 f_acc512, f_scramble);
5102 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5103 f_initSec(secret, seed64);
5104 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5105 f_acc512, f_scramble);
5106 }
5107 }
5108
5109 /*
5110 * It's important for performance that XXH3_hashLong is not inlined.
5111 */
5112 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5113 XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5114 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5115 {
5116 (void)secret; (void)secretLen;
5117 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5118 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5119 }
5120
5121 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5122 XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5123
5124 XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong128_f f_hl128)5125 XXH3_128bits_internal(const void* input, size_t len,
5126 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5127 XXH3_hashLong128_f f_hl128)
5128 {
5129 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5130 /*
5131 * If an action is to be taken if `secret` conditions are not respected,
5132 * it should be done here.
5133 * For now, it's a contract pre-condition.
5134 * Adding a check and a branch here would cost performance at every hash.
5135 */
5136 if (len <= 16)
5137 return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5138 if (len <= 128)
5139 return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5140 if (len <= XXH3_MIDSIZE_MAX)
5141 return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5142 return f_hl128(input, len, seed64, secret, secretLen);
5143 }
5144
5145
5146 /* === Public XXH128 API === */
5147
5148 /*! @ingroup xxh3_family */
XXH3_128bits(const void * input,size_t len)5149 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5150 {
5151 return XXH3_128bits_internal(input, len, 0,
5152 XXH3_kSecret, sizeof(XXH3_kSecret),
5153 XXH3_hashLong_128b_default);
5154 }
5155
5156 /*! @ingroup xxh3_family */
5157 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)5158 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5159 {
5160 return XXH3_128bits_internal(input, len, 0,
5161 (const xxh_u8*)secret, secretSize,
5162 XXH3_hashLong_128b_withSecret);
5163 }
5164
5165 /*! @ingroup xxh3_family */
5166 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)5167 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5168 {
5169 return XXH3_128bits_internal(input, len, seed,
5170 XXH3_kSecret, sizeof(XXH3_kSecret),
5171 XXH3_hashLong_128b_withSeed);
5172 }
5173
5174 /*! @ingroup xxh3_family */
5175 XXH_PUBLIC_API XXH128_hash_t
XXH128(const void * input,size_t len,XXH64_hash_t seed)5176 XXH128(const void* input, size_t len, XXH64_hash_t seed)
5177 {
5178 return XXH3_128bits_withSeed(input, len, seed);
5179 }
5180
5181
5182 /* === XXH3 128-bit streaming === */
5183
5184 /*
5185 * All the functions are actually the same as for 64-bit streaming variant.
5186 * The only difference is the finalization routine.
5187 */
5188
5189 /*! @ingroup xxh3_family */
5190 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t * statePtr)5191 XXH3_128bits_reset(XXH3_state_t* statePtr)
5192 {
5193 if (statePtr == NULL) return XXH_ERROR;
5194 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
5195 return XXH_OK;
5196 }
5197
5198 /*! @ingroup xxh3_family */
5199 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)5200 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5201 {
5202 if (statePtr == NULL) return XXH_ERROR;
5203 XXH3_reset_internal(statePtr, 0, secret, secretSize);
5204 if (secret == NULL) return XXH_ERROR;
5205 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5206 return XXH_OK;
5207 }
5208
5209 /*! @ingroup xxh3_family */
5210 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)5211 XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
5212 {
5213 if (statePtr == NULL) return XXH_ERROR;
5214 if (seed==0) return XXH3_128bits_reset(statePtr);
5215 if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed);
5216 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
5217 return XXH_OK;
5218 }
5219
5220 /*! @ingroup xxh3_family */
5221 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t * state,const void * input,size_t len)5222 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5223 {
5224 return XXH3_update(state, (const xxh_u8*)input, len,
5225 XXH3_accumulate_512, XXH3_scrambleAcc);
5226 }
5227
5228 /*! @ingroup xxh3_family */
XXH3_128bits_digest(const XXH3_state_t * state)5229 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
5230 {
5231 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5232 if (state->totalLen > XXH3_MIDSIZE_MAX) {
5233 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5234 XXH3_digest_long(acc, state, secret);
5235 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5236 { XXH128_hash_t h128;
5237 h128.low64 = XXH3_mergeAccs(acc,
5238 secret + XXH_SECRET_MERGEACCS_START,
5239 (xxh_u64)state->totalLen * XXH_PRIME64_1);
5240 h128.high64 = XXH3_mergeAccs(acc,
5241 secret + state->secretLimit + XXH_STRIPE_LEN
5242 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5243 ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5244 return h128;
5245 }
5246 }
5247 /* len <= XXH3_MIDSIZE_MAX : short code */
5248 if (state->seed)
5249 return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5250 return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5251 secret, state->secretLimit + XXH_STRIPE_LEN);
5252 }
5253
5254 /* 128-bit utility functions */
5255
5256 #include <string.h> /* memcmp, memcpy */
5257
5258 /* return : 1 is equal, 0 if different */
5259 /*! @ingroup xxh3_family */
XXH128_isEqual(XXH128_hash_t h1,XXH128_hash_t h2)5260 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
5261 {
5262 /* note : XXH128_hash_t is compact, it has no padding byte */
5263 return !(memcmp(&h1, &h2, sizeof(h1)));
5264 }
5265
5266 /* This prototype is compatible with stdlib's qsort().
5267 * return : >0 if *h128_1 > *h128_2
5268 * <0 if *h128_1 < *h128_2
5269 * =0 if *h128_1 == *h128_2 */
5270 /*! @ingroup xxh3_family */
XXH128_cmp(const void * h128_1,const void * h128_2)5271 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5272 {
5273 XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5274 XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5275 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5276 /* note : bets that, in most cases, hash values are different */
5277 if (hcmp) return hcmp;
5278 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5279 }
5280
5281
5282 /*====== Canonical representation ======*/
5283 /*! @ingroup xxh3_family */
5284 XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t * dst,XXH128_hash_t hash)5285 XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
5286 {
5287 XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5288 if (XXH_CPU_LITTLE_ENDIAN) {
5289 hash.high64 = XXH_swap64(hash.high64);
5290 hash.low64 = XXH_swap64(hash.low64);
5291 }
5292 memcpy(dst, &hash.high64, sizeof(hash.high64));
5293 memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5294 }
5295
5296 /*! @ingroup xxh3_family */
5297 XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t * src)5298 XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5299 {
5300 XXH128_hash_t h;
5301 h.high64 = XXH_readBE64(src);
5302 h.low64 = XXH_readBE64(src->digest + 8);
5303 return h;
5304 }
5305
5306 /* Pop our optimization override from above */
5307 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5308 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5309 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5310 # pragma GCC pop_options
5311 #endif
5312
5313 #endif /* XXH_NO_LONG_LONG */
5314
5315 #endif /* XXH_NO_XXH3 */
5316
5317 /*!
5318 * @}
5319 */
5320 #endif /* XXH_IMPLEMENTATION */
5321
5322
5323 #if defined (__cplusplus)
5324 }
5325 #endif
5326