1 // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 /* BEGIN RocksDB customizations */
7 #ifndef XXH_STATIC_LINKING_ONLY
8 #define XXH_STATIC_LINKING_ONLY 1 /* using xxhash.cc */
9 #endif // !defined(XXH_STATIC_LINKING_ONLY)
10 #ifndef XXH_NAMESPACE
11 #define XXH_NAMESPACE ROCKSDB_
12 #endif // !defined(XXH_NAMESPACE)
13 #include "port/lang.h" // for FALLTHROUGH_INTENDED, inserted as appropriate
14 /* END RocksDB customizations */
15
16 /*
17 * xxHash - Extremely Fast Hash algorithm
18 * Header File
19 * Copyright (C) 2012-2020 Yann Collet
20 *
21 * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions are
25 * met:
26 *
27 * * Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * * Redistributions in binary form must reproduce the above
30 * copyright notice, this list of conditions and the following disclaimer
31 * in the documentation and/or other materials provided with the
32 * distribution.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 * You can contact the author at:
47 * - xxHash homepage: https://www.xxhash.com
48 * - xxHash source repository: https://github.com/Cyan4973/xxHash
49 */
50 /*!
51 * @mainpage xxHash
52 *
53 * @file xxhash.h
54 * xxHash prototypes and implementation
55 */
56 /* TODO: update */
57 /* Notice extracted from xxHash homepage:
58
59 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
60 It also successfully passes all tests from the SMHasher suite.
61
62 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
63
64 Name Speed Q.Score Author
65 xxHash 5.4 GB/s 10
66 CrapWow 3.2 GB/s 2 Andrew
67 MurmurHash 3a 2.7 GB/s 10 Austin Appleby
68 SpookyHash 2.0 GB/s 10 Bob Jenkins
69 SBox 1.4 GB/s 9 Bret Mulvey
70 Lookup3 1.2 GB/s 9 Bob Jenkins
71 SuperFastHash 1.2 GB/s 1 Paul Hsieh
72 CityHash64 1.05 GB/s 10 Pike & Alakuijala
73 FNV 0.55 GB/s 5 Fowler, Noll, Vo
74 CRC32 0.43 GB/s 9
75 MD5-32 0.33 GB/s 10 Ronald L. Rivest
76 SHA1-32 0.28 GB/s 10
77
78 Q.Score is a measure of quality of the hash function.
79 It depends on successfully passing SMHasher test set.
80 10 is a perfect score.
81
82 Note: SMHasher's CRC32 implementation is not the fastest one.
83 Other speed-oriented implementations can be faster,
84 especially in combination with PCLMUL instruction:
85 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
86
87 A 64-bit version, named XXH64, is available since r35.
88 It offers much better speed, but for 64-bit applications only.
89 Name Speed on 64 bits Speed on 32 bits
90 XXH64 13.8 GB/s 1.9 GB/s
91 XXH32 6.8 GB/s 6.0 GB/s
92 */
93
94 #if defined (__cplusplus)
95 extern "C" {
96 #endif
97
98 /* ****************************
99 * INLINE mode
100 ******************************/
101 /*!
102 * XXH_INLINE_ALL (and XXH_PRIVATE_API)
103 * Use these build macros to inline xxhash into the target unit.
104 * Inlining improves performance on small inputs, especially when the length is
105 * expressed as a compile-time constant:
106 *
107 * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
108 *
109 * It also keeps xxHash symbols private to the unit, so they are not exported.
110 *
111 * Usage:
112 * #define XXH_INLINE_ALL
113 * #include "xxhash.h"
114 *
115 * Do not compile and link xxhash.o as a separate object, as it is not useful.
116 */
117 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
118 && !defined(XXH_INLINE_ALL_31684351384)
119 /* this section should be traversed only once */
120 # define XXH_INLINE_ALL_31684351384
121 /* give access to the advanced API, required to compile implementations */
122 # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
123 # define XXH_STATIC_LINKING_ONLY
124 /* make all functions private */
125 # undef XXH_PUBLIC_API
126 # if defined(__GNUC__)
127 # define XXH_PUBLIC_API static __inline __attribute__((unused))
128 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
129 # define XXH_PUBLIC_API static inline
130 # elif defined(_MSC_VER)
131 # define XXH_PUBLIC_API static __inline
132 # else
133 /* note: this version may generate warnings for unused static functions */
134 # define XXH_PUBLIC_API static
135 # endif
136
137 /*
138 * This part deals with the special case where a unit wants to inline xxHash,
139 * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such
140 * as part of some previously included *.h header file.
141 * Without further action, the new include would just be ignored,
142 * and functions would effectively _not_ be inlined (silent failure).
143 * The following macros solve this situation by prefixing all inlined names,
144 * avoiding naming collision with previous inclusions.
145 */
146 # ifdef XXH_NAMESPACE
147 # error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported"
148 /*
149 * Note: Alternative: #undef all symbols (it's a pretty large list).
150 * Without #error: it compiles, but functions are actually not inlined.
151 */
152 # endif
153 # define XXH_NAMESPACE XXH_INLINE_
154 /*
155 * Some identifiers (enums, type names) are not symbols, but they must
156 * still be renamed to avoid redeclaration.
157 * Alternative solution: do not redeclare them.
158 * However, this requires some #ifdefs, and is a more dispersed action.
159 * Meanwhile, renaming can be achieved in a single block
160 */
161 # define XXH_IPREF(Id) XXH_INLINE_ ## Id
162 # define XXH_OK XXH_IPREF(XXH_OK)
163 # define XXH_ERROR XXH_IPREF(XXH_ERROR)
164 # define XXH_errorcode XXH_IPREF(XXH_errorcode)
165 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
166 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
167 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
168 # define XXH32_state_s XXH_IPREF(XXH32_state_s)
169 # define XXH32_state_t XXH_IPREF(XXH32_state_t)
170 # define XXH64_state_s XXH_IPREF(XXH64_state_s)
171 # define XXH64_state_t XXH_IPREF(XXH64_state_t)
172 # define XXH3_state_s XXH_IPREF(XXH3_state_s)
173 # define XXH3_state_t XXH_IPREF(XXH3_state_t)
174 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
175 /* Ensure the header is parsed again, even if it was previously included */
176 # undef XXHASH_H_5627135585666179
177 # undef XXHASH_H_STATIC_13879238742
178 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
179
180
181
182 /* ****************************************************************
183 * Stable API
184 *****************************************************************/
185 #ifndef XXHASH_H_5627135585666179
186 #define XXHASH_H_5627135585666179 1
187
188
189 /*!
190 * @defgroup public Public API
191 * Contains details on the public xxHash functions.
192 * @{
193 */
194 /* specific declaration modes for Windows */
195 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
196 # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
197 # ifdef XXH_EXPORT
198 # define XXH_PUBLIC_API __declspec(dllexport)
199 # elif XXH_IMPORT
200 # define XXH_PUBLIC_API __declspec(dllimport)
201 # endif
202 # else
203 # define XXH_PUBLIC_API /* do nothing */
204 # endif
205 #endif
206
207 #ifdef XXH_DOXYGEN
208 /*!
209 * @brief Emulate a namespace by transparently prefixing all symbols.
210 *
211 * If you want to include _and expose_ xxHash functions from within your own
212 * library, but also want to avoid symbol collisions with other libraries which
213 * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
214 * any public symbol from xxhash library with the value of XXH_NAMESPACE
215 * (therefore, avoid empty or numeric values).
216 *
217 * Note that no change is required within the calling program as long as it
218 * includes `xxhash.h`: Regular symbol names will be automatically translated
219 * by this header.
220 */
221 # define XXH_NAMESPACE /* YOUR NAME HERE */
222 # undef XXH_NAMESPACE
223 #endif
224
225 #ifdef XXH_NAMESPACE
226 # define XXH_CAT(A,B) A##B
227 # define XXH_NAME2(A,B) XXH_CAT(A,B)
228 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
229 /* XXH32 */
230 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
231 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
232 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
233 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
234 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
235 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
236 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
237 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
238 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
239 /* XXH64 */
240 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
241 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
242 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
243 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
244 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
245 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
246 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
247 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
248 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
249 /* XXH3_64bits */
250 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
251 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
252 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
253 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
254 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
255 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
256 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
257 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
258 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
259 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
260 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
261 # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
262 /* XXH3_128bits */
263 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
264 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
265 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
266 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
267 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
268 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
269 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
270 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
271 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
272 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
273 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
274 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
275 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
276 #endif
277
278
279 /* *************************************
280 * Version
281 ***************************************/
282 #define XXH_VERSION_MAJOR 0
283 #define XXH_VERSION_MINOR 8
284 #define XXH_VERSION_RELEASE 1
285 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
286
287 /*!
288 * @brief Obtains the xxHash version.
289 *
290 * This is only useful when xxHash is compiled as a shared library, as it is
291 * independent of the version defined in the header.
292 *
293 * @return `XXH_VERSION_NUMBER` as of when the libray was compiled.
294 */
295 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
296
297
298 /* ****************************
299 * Definitions
300 ******************************/
301 #include <stddef.h> /* size_t */
302 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
303
304
305 /*-**********************************************************************
306 * 32-bit hash
307 ************************************************************************/
308 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
309 /*!
310 * @brief An unsigned 32-bit integer.
311 *
312 * Not necessarily defined to `uint32_t` but functionally equivalent.
313 */
314 typedef uint32_t XXH32_hash_t;
315 #elif !defined (__VMS) \
316 && (defined (__cplusplus) \
317 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
318 # include <stdint.h>
319 typedef uint32_t XXH32_hash_t;
320 #else
321 # include <limits.h>
322 # if UINT_MAX == 0xFFFFFFFFUL
323 typedef unsigned int XXH32_hash_t;
324 # else
325 # if ULONG_MAX == 0xFFFFFFFFUL
326 typedef unsigned long XXH32_hash_t;
327 # else
328 # error "unsupported platform: need a 32-bit type"
329 # endif
330 # endif
331 #endif
332
333 /*!
334 * @}
335 *
336 * @defgroup xxh32_family XXH32 family
337 * @ingroup public
338 * Contains functions used in the classic 32-bit xxHash algorithm.
339 *
340 * @note
341 * XXH32 is considered rather weak by today's standards.
342 * The @ref xxh3_family provides competitive speed for both 32-bit and 64-bit
343 * systems, and offers true 64/128 bit hash results. It provides a superior
344 * level of dispersion, and greatly reduces the risks of collisions.
345 *
346 * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
347 * @see @ref xxh32_impl for implementation details
348 * @{
349 */
350
351 /*!
352 * @brief Calculates the 32-bit hash of @p input using xxHash32.
353 *
354 * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
355 *
356 * @param input The block of data to be hashed, at least @p length bytes in size.
357 * @param length The length of @p input, in bytes.
358 * @param seed The 32-bit seed to alter the hash's output predictably.
359 *
360 * @pre
361 * The memory between @p input and @p input + @p length must be valid,
362 * readable, contiguous memory. However, if @p length is `0`, @p input may be
363 * `NULL`. In C++, this also must be *TriviallyCopyable*.
364 *
365 * @return The calculated 32-bit hash value.
366 *
367 * @see
368 * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
369 * Direct equivalents for the other variants of xxHash.
370 * @see
371 * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
372 */
373 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
374
375 /*!
376 * Streaming functions generate the xxHash value from an incremental input.
377 * This method is slower than single-call functions, due to state management.
378 * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
379 *
380 * An XXH state must first be allocated using `XXH*_createState()`.
381 *
382 * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
383 *
384 * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
385 *
386 * The function returns an error code, with 0 meaning OK, and any other value
387 * meaning there is an error.
388 *
389 * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
390 * This function returns the nn-bits hash as an int or long long.
391 *
392 * It's still possible to continue inserting input into the hash state after a
393 * digest, and generate new hash values later on by invoking `XXH*_digest()`.
394 *
395 * When done, release the state using `XXH*_freeState()`.
396 *
397 * Example code for incrementally hashing a file:
398 * @code{.c}
399 * #include <stdio.h>
400 * #include <xxhash.h>
401 * #define BUFFER_SIZE 256
402 *
403 * // Note: XXH64 and XXH3 use the same interface.
404 * XXH32_hash_t
405 * hashFile(FILE* stream)
406 * {
407 * XXH32_state_t* state;
408 * unsigned char buf[BUFFER_SIZE];
409 * size_t amt;
410 * XXH32_hash_t hash;
411 *
412 * state = XXH32_createState(); // Create a state
413 * assert(state != NULL); // Error check here
414 * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed
415 * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
416 * XXH32_update(state, buf, amt); // Hash the file in chunks
417 * }
418 * hash = XXH32_digest(state); // Finalize the hash
419 * XXH32_freeState(state); // Clean up
420 * return hash;
421 * }
422 * @endcode
423 */
424
425 /*!
426 * @typedef struct XXH32_state_s XXH32_state_t
427 * @brief The opaque state struct for the XXH32 streaming API.
428 *
429 * @see XXH32_state_s for details.
430 */
431 typedef struct XXH32_state_s XXH32_state_t;
432
433 /*!
434 * @brief Allocates an @ref XXH32_state_t.
435 *
436 * Must be freed with XXH32_freeState().
437 * @return An allocated XXH32_state_t on success, `NULL` on failure.
438 */
439 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
440 /*!
441 * @brief Frees an @ref XXH32_state_t.
442 *
443 * Must be allocated with XXH32_createState().
444 * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
445 * @return XXH_OK.
446 */
447 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
448 /*!
449 * @brief Copies one @ref XXH32_state_t to another.
450 *
451 * @param dst_state The state to copy to.
452 * @param src_state The state to copy from.
453 * @pre
454 * @p dst_state and @p src_state must not be `NULL` and must not overlap.
455 */
456 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
457
458 /*!
459 * @brief Resets an @ref XXH32_state_t to begin a new hash.
460 *
461 * This function resets and seeds a state. Call it before @ref XXH32_update().
462 *
463 * @param statePtr The state struct to reset.
464 * @param seed The 32-bit seed to alter the hash result predictably.
465 *
466 * @pre
467 * @p statePtr must not be `NULL`.
468 *
469 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
470 */
471 XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
472
473 /*!
474 * @brief Consumes a block of @p input to an @ref XXH32_state_t.
475 *
476 * Call this to incrementally consume blocks of data.
477 *
478 * @param statePtr The state struct to update.
479 * @param input The block of data to be hashed, at least @p length bytes in size.
480 * @param length The length of @p input, in bytes.
481 *
482 * @pre
483 * @p statePtr must not be `NULL`.
484 * @pre
485 * The memory between @p input and @p input + @p length must be valid,
486 * readable, contiguous memory. However, if @p length is `0`, @p input may be
487 * `NULL`. In C++, this also must be *TriviallyCopyable*.
488 *
489 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
490 */
491 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
492
493 /*!
494 * @brief Returns the calculated hash value from an @ref XXH32_state_t.
495 *
496 * @note
497 * Calling XXH32_digest() will not affect @p statePtr, so you can update,
498 * digest, and update again.
499 *
500 * @param statePtr The state struct to calculate the hash from.
501 *
502 * @pre
503 * @p statePtr must not be `NULL`.
504 *
505 * @return The calculated xxHash32 value from that state.
506 */
507 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
508
509 /******* Canonical representation *******/
510
511 /*
512 * The default return values from XXH functions are unsigned 32 and 64 bit
513 * integers.
514 * This the simplest and fastest format for further post-processing.
515 *
516 * However, this leaves open the question of what is the order on the byte level,
517 * since little and big endian conventions will store the same number differently.
518 *
519 * The canonical representation settles this issue by mandating big-endian
520 * convention, the same convention as human-readable numbers (large digits first).
521 *
522 * When writing hash values to storage, sending them over a network, or printing
523 * them, it's highly recommended to use the canonical representation to ensure
524 * portability across a wider range of systems, present and future.
525 *
526 * The following functions allow transformation of hash values to and from
527 * canonical format.
528 */
529
530 /*!
531 * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
532 */
533 typedef struct {
534 unsigned char digest[4]; /*!< Hash bytes, big endian */
535 } XXH32_canonical_t;
536
537 /*!
538 * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
539 *
540 * @param dst The @ref XXH32_canonical_t pointer to be stored to.
541 * @param hash The @ref XXH32_hash_t to be converted.
542 *
543 * @pre
544 * @p dst must not be `NULL`.
545 */
546 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
547
548 /*!
549 * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
550 *
551 * @param src The @ref XXH32_canonical_t to convert.
552 *
553 * @pre
554 * @p src must not be `NULL`.
555 *
556 * @return The converted hash.
557 */
558 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
559
560
561 /*!
562 * @}
563 * @ingroup public
564 * @{
565 */
566
567 #ifndef XXH_NO_LONG_LONG
568 /*-**********************************************************************
569 * 64-bit hash
570 ************************************************************************/
571 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
572 /*!
573 * @brief An unsigned 64-bit integer.
574 *
575 * Not necessarily defined to `uint64_t` but functionally equivalent.
576 */
577 typedef uint64_t XXH64_hash_t;
578 #elif !defined (__VMS) \
579 && (defined (__cplusplus) \
580 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
581 # include <stdint.h>
582 typedef uint64_t XXH64_hash_t;
583 #else
584 # include <limits.h>
585 # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
586 /* LP64 ABI says uint64_t is unsigned long */
587 typedef unsigned long XXH64_hash_t;
588 # else
589 /* the following type must have a width of 64-bit */
590 typedef unsigned long long XXH64_hash_t;
591 # endif
592 #endif
593
594 /*!
595 * @}
596 *
597 * @defgroup xxh64_family XXH64 family
598 * @ingroup public
599 * @{
600 * Contains functions used in the classic 64-bit xxHash algorithm.
601 *
602 * @note
603 * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
604 * and offers true 64/128 bit hash results. It provides a superior level of
605 * dispersion, and greatly reduces the risks of collisions.
606 */
607
608
609 /*!
610 * @brief Calculates the 64-bit hash of @p input using xxHash64.
611 *
612 * This function usually runs faster on 64-bit systems, but slower on 32-bit
613 * systems (see benchmark).
614 *
615 * @param input The block of data to be hashed, at least @p length bytes in size.
616 * @param length The length of @p input, in bytes.
617 * @param seed The 64-bit seed to alter the hash's output predictably.
618 *
619 * @pre
620 * The memory between @p input and @p input + @p length must be valid,
621 * readable, contiguous memory. However, if @p length is `0`, @p input may be
622 * `NULL`. In C++, this also must be *TriviallyCopyable*.
623 *
624 * @return The calculated 64-bit hash.
625 *
626 * @see
627 * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
628 * Direct equivalents for the other variants of xxHash.
629 * @see
630 * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
631 */
632 XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
633
634 /******* Streaming *******/
635 /*!
636 * @brief The opaque state struct for the XXH64 streaming API.
637 *
638 * @see XXH64_state_s for details.
639 */
640 typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
641 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
642 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
643 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
644
645 XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
646 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
647 XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
648
649 /******* Canonical representation *******/
650 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
651 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
652 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
653
654 /*!
655 * @}
656 * ************************************************************************
657 * @defgroup xxh3_family XXH3 family
658 * @ingroup public
659 * @{
660 *
661 * XXH3 is a more recent hash algorithm featuring:
662 * - Improved speed for both small and large inputs
663 * - True 64-bit and 128-bit outputs
664 * - SIMD acceleration
665 * - Improved 32-bit viability
666 *
667 * Speed analysis methodology is explained here:
668 *
669 * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
670 *
671 * Compared to XXH64, expect XXH3 to run approximately
672 * ~2x faster on large inputs and >3x faster on small ones,
673 * exact differences vary depending on platform.
674 *
675 * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
676 * but does not require it.
677 * Any 32-bit and 64-bit targets that can run XXH32 smoothly
678 * can run XXH3 at competitive speeds, even without vector support.
679 * Further details are explained in the implementation.
680 *
681 * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
682 * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
683 *
684 * XXH3 implementation is portable:
685 * it has a generic C90 formulation that can be compiled on any platform,
686 * all implementations generage exactly the same hash value on all platforms.
687 * Starting from v0.8.0, it's also labelled "stable", meaning that
688 * any future version will also generate the same hash value.
689 *
690 * XXH3 offers 2 variants, _64bits and _128bits.
691 *
692 * When only 64 bits are needed, prefer invoking the _64bits variant, as it
693 * reduces the amount of mixing, resulting in faster speed on small inputs.
694 * It's also generally simpler to manipulate a scalar return type than a struct.
695 *
696 * The API supports one-shot hashing, streaming mode, and custom secrets.
697 */
698
699 /*-**********************************************************************
700 * XXH3 64-bit variant
701 ************************************************************************/
702
703 /* XXH3_64bits():
704 * default 64-bit variant, using default secret and default seed of 0.
705 * It's the fastest variant. */
706 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
707
708 /*
709 * XXH3_64bits_withSeed():
710 * This variant generates a custom secret on the fly
711 * based on default secret altered using the `seed` value.
712 * While this operation is decently fast, note that it's not completely free.
713 * Note: seed==0 produces the same results as XXH3_64bits().
714 */
715 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
716
717 /*!
718 * The bare minimum size for a custom secret.
719 *
720 * @see
721 * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
722 * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
723 */
724 #define XXH3_SECRET_SIZE_MIN 136
725
726 /*
727 * XXH3_64bits_withSecret():
728 * It's possible to provide any blob of bytes as a "secret" to generate the hash.
729 * This makes it more difficult for an external actor to prepare an intentional collision.
730 * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
731 * However, the quality of produced hash values depends on secret's entropy.
732 * Technically, the secret must look like a bunch of random bytes.
733 * Avoid "trivial" or structured data such as repeated sequences or a text document.
734 * Whenever unsure about the "randomness" of the blob of bytes,
735 * consider relabelling it as a "custom seed" instead,
736 * and employ "XXH3_generateSecret()" (see below)
737 * to generate a high entropy secret derived from the custom seed.
738 */
739 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
740
741
742 /******* Streaming *******/
743 /*
744 * Streaming requires state maintenance.
745 * This operation costs memory and CPU.
746 * As a consequence, streaming is slower than one-shot hashing.
747 * For better performance, prefer one-shot functions whenever applicable.
748 */
749
750 /*!
751 * @brief The state struct for the XXH3 streaming API.
752 *
753 * @see XXH3_state_s for details.
754 */
755 typedef struct XXH3_state_s XXH3_state_t;
756 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
757 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
758 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
759
760 /*
761 * XXH3_64bits_reset():
762 * Initialize with default parameters.
763 * digest will be equivalent to `XXH3_64bits()`.
764 */
765 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
766 /*
767 * XXH3_64bits_reset_withSeed():
768 * Generate a custom secret from `seed`, and store it into `statePtr`.
769 * digest will be equivalent to `XXH3_64bits_withSeed()`.
770 */
771 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
772 /*
773 * XXH3_64bits_reset_withSecret():
774 * `secret` is referenced, it _must outlive_ the hash streaming session.
775 * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
776 * and the quality of produced hash values depends on secret's entropy
777 * (secret's content should look like a bunch of random bytes).
778 * When in doubt about the randomness of a candidate `secret`,
779 * consider employing `XXH3_generateSecret()` instead (see below).
780 */
781 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
782
783 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
784 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
785
786 /* note : canonical representation of XXH3 is the same as XXH64
787 * since they both produce XXH64_hash_t values */
788
789
790 /*-**********************************************************************
791 * XXH3 128-bit variant
792 ************************************************************************/
793
794 /*!
795 * @brief The return value from 128-bit hashes.
796 *
797 * Stored in little endian order, although the fields themselves are in native
798 * endianness.
799 */
800 typedef struct {
801 XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
802 XXH64_hash_t high64; /*!< `value >> 64` */
803 } XXH128_hash_t;
804
805 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
806 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
807 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
808
809 /******* Streaming *******/
810 /*
811 * Streaming requires state maintenance.
812 * This operation costs memory and CPU.
813 * As a consequence, streaming is slower than one-shot hashing.
814 * For better performance, prefer one-shot functions whenever applicable.
815 *
816 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
817 * Use already declared XXH3_createState() and XXH3_freeState().
818 *
819 * All reset and streaming functions have same meaning as their 64-bit counterpart.
820 */
821
822 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
823 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
824 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
825
826 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
827 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
828
829 /* Following helper functions make it possible to compare XXH128_hast_t values.
830 * Since XXH128_hash_t is a structure, this capability is not offered by the language.
831 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
832
833 /*!
834 * XXH128_isEqual():
835 * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
836 */
837 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
838
839 /*!
840 * XXH128_cmp():
841 *
842 * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
843 *
844 * return: >0 if *h128_1 > *h128_2
845 * =0 if *h128_1 == *h128_2
846 * <0 if *h128_1 < *h128_2
847 */
848 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
849
850
851 /******* Canonical representation *******/
852 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
853 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
854 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
855
856
857 #endif /* XXH_NO_LONG_LONG */
858
859 /*!
860 * @}
861 */
862 #endif /* XXHASH_H_5627135585666179 */
863
864
865
866 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
867 #define XXHASH_H_STATIC_13879238742
868 /* ****************************************************************************
869 * This section contains declarations which are not guaranteed to remain stable.
870 * They may change in future versions, becoming incompatible with a different
871 * version of the library.
872 * These declarations should only be used with static linking.
873 * Never use them in association with dynamic linking!
874 ***************************************************************************** */
875
876 /*
877 * These definitions are only present to allow static allocation
878 * of XXH states, on stack or in a struct, for example.
879 * Never **ever** access their members directly.
880 */
881
882 /*!
883 * @internal
884 * @brief Structure for XXH32 streaming API.
885 *
886 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
887 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
888 * an opaque type. This allows fields to safely be changed.
889 *
890 * Typedef'd to @ref XXH32_state_t.
891 * Do not access the members of this struct directly.
892 * @see XXH64_state_s, XXH3_state_s
893 */
894 struct XXH32_state_s {
895 XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
896 XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
897 XXH32_hash_t v1; /*!< First accumulator lane */
898 XXH32_hash_t v2; /*!< Second accumulator lane */
899 XXH32_hash_t v3; /*!< Third accumulator lane */
900 XXH32_hash_t v4; /*!< Fourth accumulator lane */
901 XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
902 XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
903 XXH32_hash_t reserved; /*!< Reserved field. Do not read or write to it, it may be removed. */
904 }; /* typedef'd to XXH32_state_t */
905
906
907 #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
908
909 /*!
910 * @internal
911 * @brief Structure for XXH64 streaming API.
912 *
913 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
914 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
915 * an opaque type. This allows fields to safely be changed.
916 *
917 * Typedef'd to @ref XXH64_state_t.
918 * Do not access the members of this struct directly.
919 * @see XXH32_state_s, XXH3_state_s
920 */
921 struct XXH64_state_s {
922 XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
923 XXH64_hash_t v1; /*!< First accumulator lane */
924 XXH64_hash_t v2; /*!< Second accumulator lane */
925 XXH64_hash_t v3; /*!< Third accumulator lane */
926 XXH64_hash_t v4; /*!< Fourth accumulator lane */
927 XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
928 XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
929 XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
930 XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it, it may be removed. */
931 }; /* typedef'd to XXH64_state_t */
932
933 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */
934 # include <stdalign.h>
935 # define XXH_ALIGN(n) alignas(n)
936 #elif defined(__GNUC__)
937 # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
938 #elif defined(_MSC_VER)
939 # define XXH_ALIGN(n) __declspec(align(n))
940 #else
941 # define XXH_ALIGN(n) /* disabled */
942 #endif
943
944 /* Old GCC versions only accept the attribute after the type in structures. */
945 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
946 && defined(__GNUC__)
947 # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
948 #else
949 # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
950 #endif
951
952 /*!
953 * @brief The size of the internal XXH3 buffer.
954 *
955 * This is the optimal update size for incremental hashing.
956 *
957 * @see XXH3_64b_update(), XXH3_128b_update().
958 */
959 #define XXH3_INTERNALBUFFER_SIZE 256
960
961 /*!
962 * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
963 *
964 * This is the size used in @ref XXH3_kSecret and the seeded functions.
965 *
966 * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
967 */
968 #define XXH3_SECRET_DEFAULT_SIZE 192
969
970 /*!
971 * @internal
972 * @brief Structure for XXH3 streaming API.
973 *
974 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
975 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
976 * an opaque type. This allows fields to safely be changed.
977 *
978 * @note **This structure has a strict alignment requirement of 64 bytes.** Do
979 * not allocate this with `malloc()` or `new`, it will not be sufficiently
980 * aligned. Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack
981 * allocation.
982 *
983 * Typedef'd to @ref XXH3_state_t.
984 * Do not access the members of this struct directly.
985 *
986 * @see XXH3_INITSTATE() for stack initialization.
987 * @see XXH3_createState(), XXH3_freeState().
988 * @see XXH32_state_s, XXH64_state_s
989 */
990 struct XXH3_state_s {
991 XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
992 /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
993 XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
994 /*!< Used to store a custom secret generated from a seed. */
995 XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
996 /*!< The internal buffer. @see XXH32_state_s::mem32 */
997 XXH32_hash_t bufferedSize;
998 /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
999 XXH32_hash_t reserved32;
1000 /*!< Reserved field. Needed for padding on 64-bit. */
1001 size_t nbStripesSoFar;
1002 /*!< Number or stripes processed. */
1003 XXH64_hash_t totalLen;
1004 /*!< Total length hashed. 64-bit even on 32-bit targets. */
1005 size_t nbStripesPerBlock;
1006 /*!< Number of stripes per block. */
1007 size_t secretLimit;
1008 /*!< Size of @ref customSecret or @ref extSecret */
1009 XXH64_hash_t seed;
1010 /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1011 XXH64_hash_t reserved64;
1012 /*!< Reserved field. */
1013 const unsigned char* extSecret;
1014 /*!< Reference to an external secret for the _withSecret variants, NULL
1015 * for other variants. */
1016 /* note: there may be some padding at the end due to alignment on 64 bytes */
1017 }; /* typedef'd to XXH3_state_t */
1018
1019 #undef XXH_ALIGN_MEMBER
1020
1021 /*!
1022 * @brief Initializes a stack-allocated `XXH3_state_s`.
1023 *
1024 * When the @ref XXH3_state_t structure is merely emplaced on stack,
1025 * it should be initialized with XXH3_INITSTATE() or a memset()
1026 * in case its first reset uses XXH3_NNbits_reset_withSeed().
1027 * This init can be omitted if the first reset uses default or _withSecret mode.
1028 * This operation isn't necessary when the state is created with XXH3_createState().
1029 * Note that this doesn't prepare the state for a streaming operation,
1030 * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1031 */
1032 #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
1033
1034
1035 /* === Experimental API === */
1036 /* Symbols defined below must be considered tied to a specific library version. */
1037
1038 /*
1039 * XXH3_generateSecret():
1040 *
1041 * Derive a high-entropy secret from any user-defined content, named customSeed.
1042 * The generated secret can be used in combination with `*_withSecret()` functions.
1043 * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1044 * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1045 *
1046 * The function accepts as input a custom seed of any length and any content,
1047 * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE
1048 * into an already allocated buffer secretBuffer.
1049 * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long.
1050 *
1051 * The generated secret can then be used with any `*_withSecret()` variant.
1052 * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1053 * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1054 * are part of this list. They all accept a `secret` parameter
1055 * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1056 * _and_ feature very high entropy (consist of random-looking bytes).
1057 * These conditions can be a high bar to meet, so
1058 * this function can be used to generate a secret of proper quality.
1059 *
1060 * customSeed can be anything. It can have any size, even small ones,
1061 * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes.
1062 * The resulting `secret` will nonetheless provide all expected qualities.
1063 *
1064 * Supplying NULL as the customSeed copies the default secret into `secretBuffer`.
1065 * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1066 */
1067 XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize);
1068
1069
1070 /* simple short-cut to pre-selected XXH3_128bits variant */
1071 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1072
1073
1074 #endif /* XXH_NO_LONG_LONG */
1075 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1076 # define XXH_IMPLEMENTATION
1077 #endif
1078
1079 #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1080
1081
1082 /* ======================================================================== */
1083 /* ======================================================================== */
1084 /* ======================================================================== */
1085
1086
1087 /*-**********************************************************************
1088 * xxHash implementation
1089 *-**********************************************************************
1090 * xxHash's implementation used to be hosted inside xxhash.c.
1091 *
1092 * However, inlining requires implementation to be visible to the compiler,
1093 * hence be included alongside the header.
1094 * Previously, implementation was hosted inside xxhash.c,
1095 * which was then #included when inlining was activated.
1096 * This construction created issues with a few build and install systems,
1097 * as it required xxhash.c to be stored in /include directory.
1098 *
1099 * xxHash implementation is now directly integrated within xxhash.h.
1100 * As a consequence, xxhash.c is no longer needed in /include.
1101 *
1102 * xxhash.c is still available and is still useful.
1103 * In a "normal" setup, when xxhash is not inlined,
1104 * xxhash.h only exposes the prototypes and public symbols,
1105 * while xxhash.c can be built into an object file xxhash.o
1106 * which can then be linked into the final binary.
1107 ************************************************************************/
1108
1109 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1110 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1111 # define XXH_IMPLEM_13a8737387
1112
1113 /* *************************************
1114 * Tuning parameters
1115 ***************************************/
1116
1117 /*!
1118 * @defgroup tuning Tuning parameters
1119 * @{
1120 *
1121 * Various macros to control xxHash's behavior.
1122 */
1123 #ifdef XXH_DOXYGEN
1124 /*!
1125 * @brief Define this to disable 64-bit code.
1126 *
1127 * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
1128 */
1129 # define XXH_NO_LONG_LONG
1130 # undef XXH_NO_LONG_LONG /* don't actually */
1131 /*!
1132 * @brief Controls how unaligned memory is accessed.
1133 *
1134 * By default, access to unaligned memory is controlled by `memcpy()`, which is
1135 * safe and portable.
1136 *
1137 * Unfortunately, on some target/compiler combinations, the generated assembly
1138 * is sub-optimal.
1139 *
1140 * The below switch allow selection of a different access method
1141 * in the search for improved performance.
1142 *
1143 * @par Possible options:
1144 *
1145 * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1146 * @par
1147 * Use `memcpy()`. Safe and portable. Note that most modern compilers will
1148 * eliminate the function call and treat it as an unaligned access.
1149 *
1150 * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
1151 * @par
1152 * Depends on compiler extensions and is therefore not portable.
1153 * This method is safe _if_ your compiler supports it,
1154 * and *generally* as fast or faster than `memcpy`.
1155 *
1156 * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1157 * @par
1158 * Casts directly and dereferences. This method doesn't depend on the
1159 * compiler, but it violates the C standard as it directly dereferences an
1160 * unaligned pointer. It can generate buggy code on targets which do not
1161 * support unaligned memory accesses, but in some circumstances, it's the
1162 * only known way to get the most performance.
1163 *
1164 * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1165 * @par
1166 * Also portable. This can generate the best code on old compilers which don't
1167 * inline small `memcpy()` calls, and it might also be faster on big-endian
1168 * systems which lack a native byteswap instruction. However, some compilers
1169 * will emit literal byteshifts even if the target supports unaligned access.
1170 * .
1171 *
1172 * @warning
1173 * Methods 1 and 2 rely on implementation-defined behavior. Use these with
1174 * care, as what works on one compiler/platform/optimization level may cause
1175 * another to read garbage data or even crash.
1176 *
1177 * See https://stackoverflow.com/a/32095106/646947 for details.
1178 *
1179 * Prefer these methods in priority order (0 > 3 > 1 > 2)
1180 */
1181 # define XXH_FORCE_MEMORY_ACCESS 0
1182 /*!
1183 * @def XXH_ACCEPT_NULL_INPUT_POINTER
1184 * @brief Whether to add explicit `NULL` checks.
1185 *
1186 * If the input pointer is `NULL` and the length is non-zero, xxHash's default
1187 * behavior is to dereference it, triggering a segfault.
1188 *
1189 * When this macro is enabled, xxHash actively checks the input for a null pointer.
1190 * If it is, the result for null input pointers is the same as a zero-length input.
1191 */
1192 # define XXH_ACCEPT_NULL_INPUT_POINTER 0
1193 /*!
1194 * @def XXH_FORCE_ALIGN_CHECK
1195 * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1196 * and XXH64() only).
1197 *
1198 * This is an important performance trick for architectures without decent
1199 * unaligned memory access performance.
1200 *
1201 * It checks for input alignment, and when conditions are met, uses a "fast
1202 * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1203 * faster_ read speed.
1204 *
1205 * The check costs one initial branch per hash, which is generally negligible,
1206 * but not zero.
1207 *
1208 * Moreover, it's not useful to generate an additional code path if memory
1209 * access uses the same instruction for both aligned and unaligned
1210 * addresses (e.g. x86 and aarch64).
1211 *
1212 * In these cases, the alignment check can be removed by setting this macro to 0.
1213 * Then the code will always use unaligned memory access.
1214 * Align check is automatically disabled on x86, x64 & arm64,
1215 * which are platforms known to offer good unaligned memory accesses performance.
1216 *
1217 * This option does not affect XXH3 (only XXH32 and XXH64).
1218 */
1219 # define XXH_FORCE_ALIGN_CHECK 0
1220
1221 /*!
1222 * @def XXH_NO_INLINE_HINTS
1223 * @brief When non-zero, sets all functions to `static`.
1224 *
1225 * By default, xxHash tries to force the compiler to inline almost all internal
1226 * functions.
1227 *
1228 * This can usually improve performance due to reduced jumping and improved
1229 * constant folding, but significantly increases the size of the binary which
1230 * might not be favorable.
1231 *
1232 * Additionally, sometimes the forced inlining can be detrimental to performance,
1233 * depending on the architecture.
1234 *
1235 * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1236 * compiler full control on whether to inline or not.
1237 *
1238 * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
1239 * -fno-inline with GCC or Clang, this will automatically be defined.
1240 */
1241 # define XXH_NO_INLINE_HINTS 0
1242
1243 /*!
1244 * @def XXH_REROLL
1245 * @brief Whether to reroll `XXH32_finalize` and `XXH64_finalize`.
1246 *
1247 * For performance, `XXH32_finalize` and `XXH64_finalize` use an unrolled loop
1248 * in the form of a switch statement.
1249 *
1250 * This is not always desirable, as it generates larger code, and depending on
1251 * the architecture, may even be slower
1252 *
1253 * This is automatically defined with `-Os`/`-Oz` on GCC and Clang.
1254 */
1255 # define XXH_REROLL 0
1256
1257 /*!
1258 * @internal
1259 * @brief Redefines old internal names.
1260 *
1261 * For compatibility with code that uses xxHash's internals before the names
1262 * were changed to improve namespacing. There is no other reason to use this.
1263 */
1264 # define XXH_OLD_NAMES
1265 # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1266 #endif /* XXH_DOXYGEN */
1267 /*!
1268 * @}
1269 */
1270
1271 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
1272 /* prefer __packed__ structures (method 1) for gcc on armv7 and armv8 */
1273 # if !defined(__clang__) && ( \
1274 (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1275 (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)) )
1276 # define XXH_FORCE_MEMORY_ACCESS 1
1277 # endif
1278 #endif
1279
1280 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
1281 # define XXH_ACCEPT_NULL_INPUT_POINTER 0
1282 #endif
1283
1284 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
1285 # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
1286 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
1287 # define XXH_FORCE_ALIGN_CHECK 0
1288 # else
1289 # define XXH_FORCE_ALIGN_CHECK 1
1290 # endif
1291 #endif
1292
1293 #ifndef XXH_NO_INLINE_HINTS
1294 # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1295 || defined(__NO_INLINE__) /* -O0, -fno-inline */
1296 # define XXH_NO_INLINE_HINTS 1
1297 # else
1298 # define XXH_NO_INLINE_HINTS 0
1299 # endif
1300 #endif
1301
1302 #ifndef XXH_REROLL
1303 # if defined(__OPTIMIZE_SIZE__)
1304 # define XXH_REROLL 1
1305 # else
1306 # define XXH_REROLL 0
1307 # endif
1308 #endif
1309
1310 /*!
1311 * @defgroup impl Implementation
1312 * @{
1313 */
1314
1315
1316 /* *************************************
1317 * Includes & Memory related functions
1318 ***************************************/
1319 /*
1320 * Modify the local functions below should you wish to use
1321 * different memory routines for malloc() and free()
1322 */
1323 #include <stdlib.h>
1324
1325 /*!
1326 * @internal
1327 * @brief Modify this function to use a different routine than malloc().
1328 */
XXH_malloc(size_t s)1329 static void* XXH_malloc(size_t s) { return malloc(s); }
1330
1331 /*!
1332 * @internal
1333 * @brief Modify this function to use a different routine than free().
1334 */
XXH_free(void * p)1335 static void XXH_free(void* p) { free(p); }
1336
1337 #include <string.h>
1338
1339 /*!
1340 * @internal
1341 * @brief Modify this function to use a different routine than memcpy().
1342 */
XXH_memcpy(void * dest,const void * src,size_t size)1343 static void* XXH_memcpy(void* dest, const void* src, size_t size)
1344 {
1345 return memcpy(dest,src,size);
1346 }
1347
1348 #include <limits.h> /* ULLONG_MAX */
1349
1350
1351 /* *************************************
1352 * Compiler Specific Options
1353 ***************************************/
1354 #ifdef _MSC_VER /* Visual Studio warning fix */
1355 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1356 #endif
1357
1358 #if XXH_NO_INLINE_HINTS /* disable inlining hints */
1359 # if defined(__GNUC__)
1360 # define XXH_FORCE_INLINE static __attribute__((unused))
1361 # else
1362 # define XXH_FORCE_INLINE static
1363 # endif
1364 # define XXH_NO_INLINE static
1365 /* enable inlining hints */
1366 #elif defined(_MSC_VER) /* Visual Studio */
1367 # define XXH_FORCE_INLINE static __forceinline
1368 # define XXH_NO_INLINE static __declspec(noinline)
1369 #elif defined(__GNUC__)
1370 # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1371 # define XXH_NO_INLINE static __attribute__((noinline))
1372 #elif defined (__cplusplus) \
1373 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
1374 # define XXH_FORCE_INLINE static inline
1375 # define XXH_NO_INLINE static
1376 #else
1377 # define XXH_FORCE_INLINE static
1378 # define XXH_NO_INLINE static
1379 #endif
1380
1381
1382
1383 /* *************************************
1384 * Debug
1385 ***************************************/
1386 /*!
1387 * @ingroup tuning
1388 * @def XXH_DEBUGLEVEL
1389 * @brief Sets the debugging level.
1390 *
1391 * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1392 * compiler's command line options. The value must be a number.
1393 */
1394 #ifndef XXH_DEBUGLEVEL
1395 # ifdef DEBUGLEVEL /* backwards compat */
1396 # define XXH_DEBUGLEVEL DEBUGLEVEL
1397 # else
1398 # define XXH_DEBUGLEVEL 0
1399 # endif
1400 #endif
1401
1402 #if (XXH_DEBUGLEVEL>=1)
1403 # include <assert.h> /* note: can still be disabled with NDEBUG */
1404 # define XXH_ASSERT(c) assert(c)
1405 #else
1406 # define XXH_ASSERT(c) ((void)0)
1407 #endif
1408
1409 /* note: use after variable declarations */
1410 #define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0)
1411
1412 /*!
1413 * @internal
1414 * @def XXH_COMPILER_GUARD(var)
1415 * @brief Used to prevent unwanted optimizations for @p var.
1416 *
1417 * It uses an empty GCC inline assembly statement with a register constraint
1418 * which forces @p var into a general purpose register (eg eax, ebx, ecx
1419 * on x86) and marks it as modified.
1420 *
1421 * This is used in a few places to avoid unwanted autovectorization (e.g.
1422 * XXH32_round()). All vectorization we want is explicit via intrinsics,
1423 * and _usually_ isn't wanted elsewhere.
1424 *
1425 * We also use it to prevent unwanted constant folding for AArch64 in
1426 * XXH3_initCustomSecret_scalar().
1427 */
1428 #ifdef __GNUC__
1429 # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1430 #else
1431 # define XXH_COMPILER_GUARD(var) ((void)0)
1432 #endif
1433
1434 /* *************************************
1435 * Basic Types
1436 ***************************************/
1437 #if !defined (__VMS) \
1438 && (defined (__cplusplus) \
1439 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1440 # include <stdint.h>
1441 typedef uint8_t xxh_u8;
1442 #else
1443 typedef unsigned char xxh_u8;
1444 #endif
1445 typedef XXH32_hash_t xxh_u32;
1446
1447 #ifdef XXH_OLD_NAMES
1448 # define BYTE xxh_u8
1449 # define U8 xxh_u8
1450 # define U32 xxh_u32
1451 #endif
1452
1453 /* *** Memory access *** */
1454
1455 /*!
1456 * @internal
1457 * @fn xxh_u32 XXH_read32(const void* ptr)
1458 * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1459 *
1460 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1461 *
1462 * @param ptr The pointer to read from.
1463 * @return The 32-bit native endian integer from the bytes at @p ptr.
1464 */
1465
1466 /*!
1467 * @internal
1468 * @fn xxh_u32 XXH_readLE32(const void* ptr)
1469 * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1470 *
1471 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1472 *
1473 * @param ptr The pointer to read from.
1474 * @return The 32-bit little endian integer from the bytes at @p ptr.
1475 */
1476
1477 /*!
1478 * @internal
1479 * @fn xxh_u32 XXH_readBE32(const void* ptr)
1480 * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1481 *
1482 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1483 *
1484 * @param ptr The pointer to read from.
1485 * @return The 32-bit big endian integer from the bytes at @p ptr.
1486 */
1487
1488 /*!
1489 * @internal
1490 * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1491 * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1492 *
1493 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1494 * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1495 * always @ref XXH_alignment::XXH_unaligned.
1496 *
1497 * @param ptr The pointer to read from.
1498 * @param align Whether @p ptr is aligned.
1499 * @pre
1500 * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1501 * aligned.
1502 * @return The 32-bit little endian integer from the bytes at @p ptr.
1503 */
1504
1505 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1506 /*
1507 * Manual byteshift. Best for old compilers which don't inline memcpy.
1508 * We actually directly use XXH_readLE32 and XXH_readBE32.
1509 */
1510 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1511
1512 /*
1513 * Force direct memory access. Only works on CPU which support unaligned memory
1514 * access in hardware.
1515 */
XXH_read32(const void * memPtr)1516 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1517
1518 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1519
1520 /*
1521 * __pack instructions are safer but compiler specific, hence potentially
1522 * problematic for some compilers.
1523 *
1524 * Currently only defined for GCC and ICC.
1525 */
1526 #ifdef XXH_OLD_NAMES
1527 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1528 #endif
XXH_read32(const void * ptr)1529 static xxh_u32 XXH_read32(const void* ptr)
1530 {
1531 typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1532 return ((const xxh_unalign*)ptr)->u32;
1533 }
1534
1535 #else
1536
1537 /*
1538 * Portable and safe solution. Generally efficient.
1539 * see: https://stackoverflow.com/a/32095106/646947
1540 */
XXH_read32(const void * memPtr)1541 static xxh_u32 XXH_read32(const void* memPtr)
1542 {
1543 xxh_u32 val;
1544 memcpy(&val, memPtr, sizeof(val));
1545 return val;
1546 }
1547
1548 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1549
1550
1551 /* *** Endianness *** */
1552 /*!
1553 * @ingroup tuning
1554 * @def XXH_CPU_LITTLE_ENDIAN
1555 * @brief Whether the target is little endian.
1556 *
1557 * Defined to 1 if the target is little endian, or 0 if it is big endian.
1558 * It can be defined externally, for example on the compiler command line.
1559 *
1560 * If it is not defined, a runtime check (which is usually constant folded)
1561 * is used instead.
1562 *
1563 * @note
1564 * This is not necessarily defined to an integer constant.
1565 *
1566 * @see XXH_isLittleEndian() for the runtime check.
1567 */
1568 #ifndef XXH_CPU_LITTLE_ENDIAN
1569 /*
1570 * Try to detect endianness automatically, to avoid the nonstandard behavior
1571 * in `XXH_isLittleEndian()`
1572 */
1573 # if defined(_WIN32) /* Windows is always little endian */ \
1574 || defined(__LITTLE_ENDIAN__) \
1575 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1576 # define XXH_CPU_LITTLE_ENDIAN 1
1577 # elif defined(__BIG_ENDIAN__) \
1578 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1579 # define XXH_CPU_LITTLE_ENDIAN 0
1580 # else
1581 /*!
1582 * @internal
1583 * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
1584 *
1585 * Most compilers will constant fold this.
1586 */
XXH_isLittleEndian(void)1587 static int XXH_isLittleEndian(void)
1588 {
1589 /*
1590 * Portable and well-defined behavior.
1591 * Don't use static: it is detrimental to performance.
1592 */
1593 const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1594 return one.c[0];
1595 }
1596 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
1597 # endif
1598 #endif
1599
1600
1601
1602
1603 /* ****************************************
1604 * Compiler-specific Functions and Macros
1605 ******************************************/
1606 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1607
1608 #ifdef __has_builtin
1609 # define XXH_HAS_BUILTIN(x) __has_builtin(x)
1610 #else
1611 # define XXH_HAS_BUILTIN(x) 0
1612 #endif
1613
1614 /*!
1615 * @internal
1616 * @def XXH_rotl32(x,r)
1617 * @brief 32-bit rotate left.
1618 *
1619 * @param x The 32-bit integer to be rotated.
1620 * @param r The number of bits to rotate.
1621 * @pre
1622 * @p r > 0 && @p r < 32
1623 * @note
1624 * @p x and @p r may be evaluated multiple times.
1625 * @return The rotated result.
1626 */
1627 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1628 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1629 # define XXH_rotl32 __builtin_rotateleft32
1630 # define XXH_rotl64 __builtin_rotateleft64
1631 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1632 #elif defined(_MSC_VER)
1633 # define XXH_rotl32(x,r) _rotl(x,r)
1634 # define XXH_rotl64(x,r) _rotl64(x,r)
1635 #else
1636 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1637 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1638 #endif
1639
1640 /*!
1641 * @internal
1642 * @fn xxh_u32 XXH_swap32(xxh_u32 x)
1643 * @brief A 32-bit byteswap.
1644 *
1645 * @param x The 32-bit integer to byteswap.
1646 * @return @p x, byteswapped.
1647 */
1648 #if defined(_MSC_VER) /* Visual Studio */
1649 # define XXH_swap32 _byteswap_ulong
1650 #elif XXH_GCC_VERSION >= 403
1651 # define XXH_swap32 __builtin_bswap32
1652 #else
XXH_swap32(xxh_u32 x)1653 static xxh_u32 XXH_swap32 (xxh_u32 x)
1654 {
1655 return ((x << 24) & 0xff000000 ) |
1656 ((x << 8) & 0x00ff0000 ) |
1657 ((x >> 8) & 0x0000ff00 ) |
1658 ((x >> 24) & 0x000000ff );
1659 }
1660 #endif
1661
1662
1663 /* ***************************
1664 * Memory reads
1665 *****************************/
1666
1667 /*!
1668 * @internal
1669 * @brief Enum to indicate whether a pointer is aligned.
1670 */
1671 typedef enum {
1672 XXH_aligned, /*!< Aligned */
1673 XXH_unaligned /*!< Possibly unaligned */
1674 } XXH_alignment;
1675
1676 /*
1677 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1678 *
1679 * This is ideal for older compilers which don't inline memcpy.
1680 */
1681 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1682
XXH_readLE32(const void * memPtr)1683 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1684 {
1685 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1686 return bytePtr[0]
1687 | ((xxh_u32)bytePtr[1] << 8)
1688 | ((xxh_u32)bytePtr[2] << 16)
1689 | ((xxh_u32)bytePtr[3] << 24);
1690 }
1691
XXH_readBE32(const void * memPtr)1692 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1693 {
1694 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1695 return bytePtr[3]
1696 | ((xxh_u32)bytePtr[2] << 8)
1697 | ((xxh_u32)bytePtr[1] << 16)
1698 | ((xxh_u32)bytePtr[0] << 24);
1699 }
1700
1701 #else
XXH_readLE32(const void * ptr)1702 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1703 {
1704 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1705 }
1706
XXH_readBE32(const void * ptr)1707 static xxh_u32 XXH_readBE32(const void* ptr)
1708 {
1709 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1710 }
1711 #endif
1712
1713 XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void * ptr,XXH_alignment align)1714 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1715 {
1716 if (align==XXH_unaligned) {
1717 return XXH_readLE32(ptr);
1718 } else {
1719 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1720 }
1721 }
1722
1723
1724 /* *************************************
1725 * Misc
1726 ***************************************/
1727 /*! @ingroup public */
XXH_versionNumber(void)1728 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1729
1730
1731 /* *******************************************************************
1732 * 32-bit hash functions
1733 *********************************************************************/
1734 /*!
1735 * @}
1736 * @defgroup xxh32_impl XXH32 implementation
1737 * @ingroup impl
1738 * @{
1739 */
1740 /* #define instead of static const, to be used as initializers */
1741 #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
1742 #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
1743 #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
1744 #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
1745 #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
1746
1747 #ifdef XXH_OLD_NAMES
1748 # define PRIME32_1 XXH_PRIME32_1
1749 # define PRIME32_2 XXH_PRIME32_2
1750 # define PRIME32_3 XXH_PRIME32_3
1751 # define PRIME32_4 XXH_PRIME32_4
1752 # define PRIME32_5 XXH_PRIME32_5
1753 #endif
1754
1755 /*!
1756 * @internal
1757 * @brief Normal stripe processing routine.
1758 *
1759 * This shuffles the bits so that any bit from @p input impacts several bits in
1760 * @p acc.
1761 *
1762 * @param acc The accumulator lane.
1763 * @param input The stripe of input to mix.
1764 * @return The mixed accumulator lane.
1765 */
XXH32_round(xxh_u32 acc,xxh_u32 input)1766 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1767 {
1768 acc += input * XXH_PRIME32_2;
1769 acc = XXH_rotl32(acc, 13);
1770 acc *= XXH_PRIME32_1;
1771 #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1772 /*
1773 * UGLY HACK:
1774 * A compiler fence is the only thing that prevents GCC and Clang from
1775 * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1776 * reason) without globally disabling SSE4.1.
1777 *
1778 * The reason we want to avoid vectorization is because despite working on
1779 * 4 integers at a time, there are multiple factors slowing XXH32 down on
1780 * SSE4:
1781 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1782 * newer chips!) making it slightly slower to multiply four integers at
1783 * once compared to four integers independently. Even when pmulld was
1784 * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1785 * just to multiply unless doing a long operation.
1786 *
1787 * - Four instructions are required to rotate,
1788 * movqda tmp, v // not required with VEX encoding
1789 * pslld tmp, 13 // tmp <<= 13
1790 * psrld v, 19 // x >>= 19
1791 * por v, tmp // x |= tmp
1792 * compared to one for scalar:
1793 * roll v, 13 // reliably fast across the board
1794 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
1795 *
1796 * - Instruction level parallelism is actually more beneficial here because
1797 * the SIMD actually serializes this operation: While v1 is rotating, v2
1798 * can load data, while v3 can multiply. SSE forces them to operate
1799 * together.
1800 *
1801 * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1802 * and it is pointless writing a NEON implementation that is basically the
1803 * same speed as scalar for XXH32.
1804 */
1805 XXH_COMPILER_GUARD(acc);
1806 #endif
1807 return acc;
1808 }
1809
1810 /*!
1811 * @internal
1812 * @brief Mixes all bits to finalize the hash.
1813 *
1814 * The final mix ensures that all input bits have a chance to impact any bit in
1815 * the output digest, resulting in an unbiased distribution.
1816 *
1817 * @param h32 The hash to avalanche.
1818 * @return The avalanched hash.
1819 */
XXH32_avalanche(xxh_u32 h32)1820 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1821 {
1822 h32 ^= h32 >> 15;
1823 h32 *= XXH_PRIME32_2;
1824 h32 ^= h32 >> 13;
1825 h32 *= XXH_PRIME32_3;
1826 h32 ^= h32 >> 16;
1827 return(h32);
1828 }
1829
1830 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1831
1832 /*!
1833 * @internal
1834 * @brief Processes the last 0-15 bytes of @p ptr.
1835 *
1836 * There may be up to 15 bytes remaining to consume from the input.
1837 * This final stage will digest them to ensure that all input bytes are present
1838 * in the final mix.
1839 *
1840 * @param h32 The hash to finalize.
1841 * @param ptr The pointer to the remaining input.
1842 * @param len The remaining length, modulo 16.
1843 * @param align Whether @p ptr is aligned.
1844 * @return The finalized hash.
1845 */
1846 static xxh_u32
XXH32_finalize(xxh_u32 h32,const xxh_u8 * ptr,size_t len,XXH_alignment align)1847 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1848 {
1849 #define XXH_PROCESS1 do { \
1850 h32 += (*ptr++) * XXH_PRIME32_5; \
1851 h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
1852 } while (0)
1853
1854 #define XXH_PROCESS4 do { \
1855 h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
1856 ptr += 4; \
1857 h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
1858 } while (0)
1859
1860 /* Compact rerolled version */
1861 if (XXH_REROLL) {
1862 len &= 15;
1863 while (len >= 4) {
1864 XXH_PROCESS4;
1865 len -= 4;
1866 }
1867 while (len > 0) {
1868 XXH_PROCESS1;
1869 --len;
1870 }
1871 return XXH32_avalanche(h32);
1872 } else {
1873 switch(len&15) /* or switch(bEnd - p) */ {
1874 case 12: XXH_PROCESS4;
1875 FALLTHROUGH_INTENDED;
1876 case 8: XXH_PROCESS4;
1877 FALLTHROUGH_INTENDED;
1878 case 4: XXH_PROCESS4;
1879 return XXH32_avalanche(h32);
1880
1881 case 13: XXH_PROCESS4;
1882 FALLTHROUGH_INTENDED;
1883 case 9: XXH_PROCESS4;
1884 FALLTHROUGH_INTENDED;
1885 case 5: XXH_PROCESS4;
1886 XXH_PROCESS1;
1887 return XXH32_avalanche(h32);
1888
1889 case 14: XXH_PROCESS4;
1890 FALLTHROUGH_INTENDED;
1891 case 10: XXH_PROCESS4;
1892 FALLTHROUGH_INTENDED;
1893 case 6: XXH_PROCESS4;
1894 XXH_PROCESS1;
1895 XXH_PROCESS1;
1896 return XXH32_avalanche(h32);
1897
1898 case 15: XXH_PROCESS4;
1899 FALLTHROUGH_INTENDED;
1900 case 11: XXH_PROCESS4;
1901 FALLTHROUGH_INTENDED;
1902 case 7: XXH_PROCESS4;
1903 FALLTHROUGH_INTENDED;
1904 case 3: XXH_PROCESS1;
1905 FALLTHROUGH_INTENDED;
1906 case 2: XXH_PROCESS1;
1907 FALLTHROUGH_INTENDED;
1908 case 1: XXH_PROCESS1;
1909 FALLTHROUGH_INTENDED;
1910 case 0: return XXH32_avalanche(h32);
1911 }
1912 XXH_ASSERT(0);
1913 return h32; /* reaching this point is deemed impossible */
1914 }
1915 }
1916
1917 #ifdef XXH_OLD_NAMES
1918 # define PROCESS1 XXH_PROCESS1
1919 # define PROCESS4 XXH_PROCESS4
1920 #else
1921 # undef XXH_PROCESS1
1922 # undef XXH_PROCESS4
1923 #endif
1924
1925 /*!
1926 * @internal
1927 * @brief The implementation for @ref XXH32().
1928 *
1929 * @param input, len, seed Directly passed from @ref XXH32().
1930 * @param align Whether @p input is aligned.
1931 * @return The calculated hash.
1932 */
1933 XXH_FORCE_INLINE xxh_u32
XXH32_endian_align(const xxh_u8 * input,size_t len,xxh_u32 seed,XXH_alignment align)1934 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
1935 {
1936 const xxh_u8* bEnd = input ? input + len : NULL;
1937 xxh_u32 h32;
1938
1939 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1940 if (input==NULL) {
1941 len=0;
1942 bEnd=input=(const xxh_u8*)(size_t)16;
1943 }
1944 #endif
1945
1946 if (len>=16) {
1947 const xxh_u8* const limit = bEnd - 15;
1948 xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
1949 xxh_u32 v2 = seed + XXH_PRIME32_2;
1950 xxh_u32 v3 = seed + 0;
1951 xxh_u32 v4 = seed - XXH_PRIME32_1;
1952
1953 do {
1954 v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
1955 v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
1956 v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
1957 v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
1958 } while (input < limit);
1959
1960 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
1961 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
1962 } else {
1963 h32 = seed + XXH_PRIME32_5;
1964 }
1965
1966 h32 += (xxh_u32)len;
1967
1968 return XXH32_finalize(h32, input, len&15, align);
1969 }
1970
1971 /*! @ingroup xxh32_family */
XXH32(const void * input,size_t len,XXH32_hash_t seed)1972 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
1973 {
1974 #if 0
1975 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
1976 XXH32_state_t state;
1977 XXH32_reset(&state, seed);
1978 XXH32_update(&state, (const xxh_u8*)input, len);
1979 return XXH32_digest(&state);
1980 #else
1981 if (XXH_FORCE_ALIGN_CHECK) {
1982 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
1983 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
1984 } }
1985
1986 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
1987 #endif
1988 }
1989
1990
1991
1992 /******* Hash streaming *******/
1993 /*!
1994 * @ingroup xxh32_family
1995 */
XXH32_createState(void)1996 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
1997 {
1998 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
1999 }
2000 /*! @ingroup xxh32_family */
XXH32_freeState(XXH32_state_t * statePtr)2001 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
2002 {
2003 XXH_free(statePtr);
2004 return XXH_OK;
2005 }
2006
2007 /*! @ingroup xxh32_family */
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)2008 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
2009 {
2010 memcpy(dstState, srcState, sizeof(*dstState));
2011 }
2012
2013 /*! @ingroup xxh32_family */
XXH32_reset(XXH32_state_t * statePtr,XXH32_hash_t seed)2014 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
2015 {
2016 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
2017 memset(&state, 0, sizeof(state));
2018 state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2019 state.v2 = seed + XXH_PRIME32_2;
2020 state.v3 = seed + 0;
2021 state.v4 = seed - XXH_PRIME32_1;
2022 /* do not write into reserved, planned to be removed in a future version */
2023 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
2024 return XXH_OK;
2025 }
2026
2027
2028 /*! @ingroup xxh32_family */
2029 XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)2030 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2031 {
2032 if (input==NULL)
2033 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2034 return XXH_OK;
2035 #else
2036 return XXH_ERROR;
2037 #endif
2038
2039 { const xxh_u8* p = (const xxh_u8*)input;
2040 const xxh_u8* const bEnd = p + len;
2041
2042 state->total_len_32 += (XXH32_hash_t)len;
2043 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2044
2045 if (state->memsize + len < 16) { /* fill in tmp buffer */
2046 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2047 state->memsize += (XXH32_hash_t)len;
2048 return XXH_OK;
2049 }
2050
2051 if (state->memsize) { /* some data left from previous update */
2052 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2053 { const xxh_u32* p32 = state->mem32;
2054 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
2055 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
2056 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
2057 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
2058 }
2059 p += 16-state->memsize;
2060 state->memsize = 0;
2061 }
2062
2063 /* uintptr_t casts avoid UB or compiler warning on out-of-bounds
2064 * pointer arithmetic */
2065 if ((uintptr_t)p <= (uintptr_t)bEnd - 16) {
2066 const uintptr_t limit = (uintptr_t)bEnd - 16;
2067 xxh_u32 v1 = state->v1;
2068 xxh_u32 v2 = state->v2;
2069 xxh_u32 v3 = state->v3;
2070 xxh_u32 v4 = state->v4;
2071
2072 do {
2073 v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
2074 v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
2075 v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
2076 v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
2077 } while ((uintptr_t)p<=limit);
2078
2079 state->v1 = v1;
2080 state->v2 = v2;
2081 state->v3 = v3;
2082 state->v4 = v4;
2083 }
2084
2085 if (p < bEnd) {
2086 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2087 state->memsize = (unsigned)(bEnd-p);
2088 }
2089 }
2090
2091 return XXH_OK;
2092 }
2093
2094
2095 /*! @ingroup xxh32_family */
XXH32_digest(const XXH32_state_t * state)2096 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2097 {
2098 xxh_u32 h32;
2099
2100 if (state->large_len) {
2101 h32 = XXH_rotl32(state->v1, 1)
2102 + XXH_rotl32(state->v2, 7)
2103 + XXH_rotl32(state->v3, 12)
2104 + XXH_rotl32(state->v4, 18);
2105 } else {
2106 h32 = state->v3 /* == seed */ + XXH_PRIME32_5;
2107 }
2108
2109 h32 += state->total_len_32;
2110
2111 return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2112 }
2113
2114
2115 /******* Canonical representation *******/
2116
2117 /*!
2118 * @ingroup xxh32_family
2119 * The default return values from XXH functions are unsigned 32 and 64 bit
2120 * integers.
2121 *
2122 * The canonical representation uses big endian convention, the same convention
2123 * as human-readable numbers (large digits first).
2124 *
2125 * This way, hash values can be written into a file or buffer, remaining
2126 * comparable across different systems.
2127 *
2128 * The following functions allow transformation of hash values to and from their
2129 * canonical format.
2130 */
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)2131 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2132 {
2133 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
2134 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2135 memcpy(dst, &hash, sizeof(*dst));
2136 }
2137 /*! @ingroup xxh32_family */
XXH32_hashFromCanonical(const XXH32_canonical_t * src)2138 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2139 {
2140 return XXH_readBE32(src);
2141 }
2142
2143
2144 #ifndef XXH_NO_LONG_LONG
2145
2146 /* *******************************************************************
2147 * 64-bit hash functions
2148 *********************************************************************/
2149 /*!
2150 * @}
2151 * @ingroup impl
2152 * @{
2153 */
2154 /******* Memory access *******/
2155
2156 typedef XXH64_hash_t xxh_u64;
2157
2158 #ifdef XXH_OLD_NAMES
2159 # define U64 xxh_u64
2160 #endif
2161
2162 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2163 /*
2164 * Manual byteshift. Best for old compilers which don't inline memcpy.
2165 * We actually directly use XXH_readLE64 and XXH_readBE64.
2166 */
2167 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2168
2169 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)2170 static xxh_u64 XXH_read64(const void* memPtr)
2171 {
2172 return *(const xxh_u64*) memPtr;
2173 }
2174
2175 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2176
2177 /*
2178 * __pack instructions are safer, but compiler specific, hence potentially
2179 * problematic for some compilers.
2180 *
2181 * Currently only defined for GCC and ICC.
2182 */
2183 #ifdef XXH_OLD_NAMES
2184 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2185 #endif
XXH_read64(const void * ptr)2186 static xxh_u64 XXH_read64(const void* ptr)
2187 {
2188 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2189 return ((const xxh_unalign64*)ptr)->u64;
2190 }
2191
2192 #else
2193
2194 /*
2195 * Portable and safe solution. Generally efficient.
2196 * see: https://stackoverflow.com/a/32095106/646947
2197 */
XXH_read64(const void * memPtr)2198 static xxh_u64 XXH_read64(const void* memPtr)
2199 {
2200 xxh_u64 val;
2201 memcpy(&val, memPtr, sizeof(val));
2202 return val;
2203 }
2204
2205 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2206
2207 #if defined(_MSC_VER) /* Visual Studio */
2208 # define XXH_swap64 _byteswap_uint64
2209 #elif XXH_GCC_VERSION >= 403
2210 # define XXH_swap64 __builtin_bswap64
2211 #else
XXH_swap64(xxh_u64 x)2212 static xxh_u64 XXH_swap64(xxh_u64 x)
2213 {
2214 return ((x << 56) & 0xff00000000000000ULL) |
2215 ((x << 40) & 0x00ff000000000000ULL) |
2216 ((x << 24) & 0x0000ff0000000000ULL) |
2217 ((x << 8) & 0x000000ff00000000ULL) |
2218 ((x >> 8) & 0x00000000ff000000ULL) |
2219 ((x >> 24) & 0x0000000000ff0000ULL) |
2220 ((x >> 40) & 0x000000000000ff00ULL) |
2221 ((x >> 56) & 0x00000000000000ffULL);
2222 }
2223 #endif
2224
2225
2226 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2227 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2228
XXH_readLE64(const void * memPtr)2229 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2230 {
2231 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2232 return bytePtr[0]
2233 | ((xxh_u64)bytePtr[1] << 8)
2234 | ((xxh_u64)bytePtr[2] << 16)
2235 | ((xxh_u64)bytePtr[3] << 24)
2236 | ((xxh_u64)bytePtr[4] << 32)
2237 | ((xxh_u64)bytePtr[5] << 40)
2238 | ((xxh_u64)bytePtr[6] << 48)
2239 | ((xxh_u64)bytePtr[7] << 56);
2240 }
2241
XXH_readBE64(const void * memPtr)2242 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2243 {
2244 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2245 return bytePtr[7]
2246 | ((xxh_u64)bytePtr[6] << 8)
2247 | ((xxh_u64)bytePtr[5] << 16)
2248 | ((xxh_u64)bytePtr[4] << 24)
2249 | ((xxh_u64)bytePtr[3] << 32)
2250 | ((xxh_u64)bytePtr[2] << 40)
2251 | ((xxh_u64)bytePtr[1] << 48)
2252 | ((xxh_u64)bytePtr[0] << 56);
2253 }
2254
2255 #else
XXH_readLE64(const void * ptr)2256 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2257 {
2258 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2259 }
2260
XXH_readBE64(const void * ptr)2261 static xxh_u64 XXH_readBE64(const void* ptr)
2262 {
2263 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2264 }
2265 #endif
2266
2267 XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void * ptr,XXH_alignment align)2268 XXH_readLE64_align(const void* ptr, XXH_alignment align)
2269 {
2270 if (align==XXH_unaligned)
2271 return XXH_readLE64(ptr);
2272 else
2273 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2274 }
2275
2276
2277 /******* xxh64 *******/
2278 /*!
2279 * @}
2280 * @defgroup xxh64_impl XXH64 implementation
2281 * @ingroup impl
2282 * @{
2283 */
2284 /* #define rather that static const, to be used as initializers */
2285 #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2286 #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2287 #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2288 #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2289 #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2290
2291 #ifdef XXH_OLD_NAMES
2292 # define PRIME64_1 XXH_PRIME64_1
2293 # define PRIME64_2 XXH_PRIME64_2
2294 # define PRIME64_3 XXH_PRIME64_3
2295 # define PRIME64_4 XXH_PRIME64_4
2296 # define PRIME64_5 XXH_PRIME64_5
2297 #endif
2298
XXH64_round(xxh_u64 acc,xxh_u64 input)2299 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2300 {
2301 acc += input * XXH_PRIME64_2;
2302 acc = XXH_rotl64(acc, 31);
2303 acc *= XXH_PRIME64_1;
2304 return acc;
2305 }
2306
XXH64_mergeRound(xxh_u64 acc,xxh_u64 val)2307 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2308 {
2309 val = XXH64_round(0, val);
2310 acc ^= val;
2311 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2312 return acc;
2313 }
2314
XXH64_avalanche(xxh_u64 h64)2315 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2316 {
2317 h64 ^= h64 >> 33;
2318 h64 *= XXH_PRIME64_2;
2319 h64 ^= h64 >> 29;
2320 h64 *= XXH_PRIME64_3;
2321 h64 ^= h64 >> 32;
2322 return h64;
2323 }
2324
2325
2326 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2327
2328 static xxh_u64
XXH64_finalize(xxh_u64 h64,const xxh_u8 * ptr,size_t len,XXH_alignment align)2329 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2330 {
2331 len &= 31;
2332 while (len >= 8) {
2333 xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2334 ptr += 8;
2335 h64 ^= k1;
2336 h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2337 len -= 8;
2338 }
2339 if (len >= 4) {
2340 h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2341 ptr += 4;
2342 h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2343 len -= 4;
2344 }
2345 while (len > 0) {
2346 h64 ^= (*ptr++) * XXH_PRIME64_5;
2347 h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2348 --len;
2349 }
2350 return XXH64_avalanche(h64);
2351 }
2352
2353 #ifdef XXH_OLD_NAMES
2354 # define PROCESS1_64 XXH_PROCESS1_64
2355 # define PROCESS4_64 XXH_PROCESS4_64
2356 # define PROCESS8_64 XXH_PROCESS8_64
2357 #else
2358 # undef XXH_PROCESS1_64
2359 # undef XXH_PROCESS4_64
2360 # undef XXH_PROCESS8_64
2361 #endif
2362
2363 XXH_FORCE_INLINE xxh_u64
XXH64_endian_align(const xxh_u8 * input,size_t len,xxh_u64 seed,XXH_alignment align)2364 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2365 {
2366 const xxh_u8* bEnd = input ? input + len : NULL;
2367 xxh_u64 h64;
2368
2369 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2370 if (input==NULL) {
2371 len=0;
2372 bEnd=input=(const xxh_u8*)(size_t)32;
2373 }
2374 #endif
2375
2376 if (len>=32) {
2377 const xxh_u8* const limit = bEnd - 32;
2378 xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2379 xxh_u64 v2 = seed + XXH_PRIME64_2;
2380 xxh_u64 v3 = seed + 0;
2381 xxh_u64 v4 = seed - XXH_PRIME64_1;
2382
2383 do {
2384 v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2385 v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2386 v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2387 v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2388 } while (input<=limit);
2389
2390 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2391 h64 = XXH64_mergeRound(h64, v1);
2392 h64 = XXH64_mergeRound(h64, v2);
2393 h64 = XXH64_mergeRound(h64, v3);
2394 h64 = XXH64_mergeRound(h64, v4);
2395
2396 } else {
2397 h64 = seed + XXH_PRIME64_5;
2398 }
2399
2400 h64 += (xxh_u64) len;
2401
2402 return XXH64_finalize(h64, input, len, align);
2403 }
2404
2405
2406 /*! @ingroup xxh64_family */
XXH64(const void * input,size_t len,XXH64_hash_t seed)2407 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
2408 {
2409 #if 0
2410 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2411 XXH64_state_t state;
2412 XXH64_reset(&state, seed);
2413 XXH64_update(&state, (const xxh_u8*)input, len);
2414 return XXH64_digest(&state);
2415 #else
2416 if (XXH_FORCE_ALIGN_CHECK) {
2417 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
2418 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2419 } }
2420
2421 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2422
2423 #endif
2424 }
2425
2426 /******* Hash Streaming *******/
2427
2428 /*! @ingroup xxh64_family*/
XXH64_createState(void)2429 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2430 {
2431 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2432 }
2433 /*! @ingroup xxh64_family */
XXH64_freeState(XXH64_state_t * statePtr)2434 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2435 {
2436 XXH_free(statePtr);
2437 return XXH_OK;
2438 }
2439
2440 /*! @ingroup xxh64_family */
XXH64_copyState(XXH64_state_t * dstState,const XXH64_state_t * srcState)2441 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2442 {
2443 memcpy(dstState, srcState, sizeof(*dstState));
2444 }
2445
2446 /*! @ingroup xxh64_family */
XXH64_reset(XXH64_state_t * statePtr,XXH64_hash_t seed)2447 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
2448 {
2449 XXH64_state_t state; /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
2450 memset(&state, 0, sizeof(state));
2451 state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2452 state.v2 = seed + XXH_PRIME64_2;
2453 state.v3 = seed + 0;
2454 state.v4 = seed - XXH_PRIME64_1;
2455 /* do not write into reserved64, might be removed in a future version */
2456 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
2457 return XXH_OK;
2458 }
2459
2460 /*! @ingroup xxh64_family */
2461 XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH64_state_t * state,const void * input,size_t len)2462 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2463 {
2464 if (input==NULL)
2465 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
2466 return XXH_OK;
2467 #else
2468 return XXH_ERROR;
2469 #endif
2470
2471 { const xxh_u8* p = (const xxh_u8*)input;
2472 const xxh_u8* const bEnd = p + len;
2473
2474 state->total_len += len;
2475
2476 if (state->memsize + len < 32) { /* fill in tmp buffer */
2477 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2478 state->memsize += (xxh_u32)len;
2479 return XXH_OK;
2480 }
2481
2482 if (state->memsize) { /* tmp buffer is full */
2483 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2484 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
2485 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
2486 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
2487 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
2488 p += 32 - state->memsize;
2489 state->memsize = 0;
2490 }
2491
2492 /* uintptr_t casts avoid UB or compiler warning on out-of-bounds
2493 * pointer arithmetic */
2494 if ((uintptr_t)p + 32 <= (uintptr_t)bEnd) {
2495 const uintptr_t limit = (uintptr_t)bEnd - 32;
2496 xxh_u64 v1 = state->v1;
2497 xxh_u64 v2 = state->v2;
2498 xxh_u64 v3 = state->v3;
2499 xxh_u64 v4 = state->v4;
2500
2501 do {
2502 v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
2503 v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
2504 v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
2505 v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
2506 } while ((uintptr_t)p<=limit);
2507
2508 state->v1 = v1;
2509 state->v2 = v2;
2510 state->v3 = v3;
2511 state->v4 = v4;
2512 }
2513
2514 if (p < bEnd) {
2515 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2516 state->memsize = (unsigned)(bEnd-p);
2517 }
2518 }
2519
2520 return XXH_OK;
2521 }
2522
2523
2524 /*! @ingroup xxh64_family */
XXH64_digest(const XXH64_state_t * state)2525 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
2526 {
2527 xxh_u64 h64;
2528
2529 if (state->total_len >= 32) {
2530 xxh_u64 const v1 = state->v1;
2531 xxh_u64 const v2 = state->v2;
2532 xxh_u64 const v3 = state->v3;
2533 xxh_u64 const v4 = state->v4;
2534
2535 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2536 h64 = XXH64_mergeRound(h64, v1);
2537 h64 = XXH64_mergeRound(h64, v2);
2538 h64 = XXH64_mergeRound(h64, v3);
2539 h64 = XXH64_mergeRound(h64, v4);
2540 } else {
2541 h64 = state->v3 /*seed*/ + XXH_PRIME64_5;
2542 }
2543
2544 h64 += (xxh_u64) state->total_len;
2545
2546 return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2547 }
2548
2549
2550 /******* Canonical representation *******/
2551
2552 /*! @ingroup xxh64_family */
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)2553 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
2554 {
2555 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
2556 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2557 memcpy(dst, &hash, sizeof(*dst));
2558 }
2559
2560 /*! @ingroup xxh64_family */
XXH64_hashFromCanonical(const XXH64_canonical_t * src)2561 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
2562 {
2563 return XXH_readBE64(src);
2564 }
2565
2566 #ifndef XXH_NO_XXH3
2567
2568 /* *********************************************************************
2569 * XXH3
2570 * New generation hash designed for speed on small keys and vectorization
2571 ************************************************************************ */
2572 /*!
2573 * @}
2574 * @defgroup xxh3_impl XXH3 implementation
2575 * @ingroup impl
2576 * @{
2577 */
2578
2579 /* === Compiler specifics === */
2580
2581 #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2582 # define XXH_RESTRICT /* disable */
2583 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
2584 # define XXH_RESTRICT restrict
2585 #else
2586 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2587 # define XXH_RESTRICT /* disable */
2588 #endif
2589
2590 #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
2591 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2592 || defined(__clang__)
2593 # define XXH_likely(x) __builtin_expect(x, 1)
2594 # define XXH_unlikely(x) __builtin_expect(x, 0)
2595 #else
2596 # define XXH_likely(x) (x)
2597 # define XXH_unlikely(x) (x)
2598 #endif
2599
2600 #if defined(__GNUC__)
2601 # if defined(__AVX2__)
2602 # include <immintrin.h>
2603 # elif defined(__SSE2__)
2604 # include <emmintrin.h>
2605 # elif defined(__ARM_NEON__) || defined(__ARM_NEON)
2606 # define inline __inline__ /* circumvent a clang bug */
2607 # include <arm_neon.h>
2608 # undef inline
2609 # endif
2610 #elif defined(_MSC_VER)
2611 # include <intrin.h>
2612 #endif
2613
2614 /*
2615 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2616 * remaining a true 64-bit/128-bit hash function.
2617 *
2618 * This is done by prioritizing a subset of 64-bit operations that can be
2619 * emulated without too many steps on the average 32-bit machine.
2620 *
2621 * For example, these two lines seem similar, and run equally fast on 64-bit:
2622 *
2623 * xxh_u64 x;
2624 * x ^= (x >> 47); // good
2625 * x ^= (x >> 13); // bad
2626 *
2627 * However, to a 32-bit machine, there is a major difference.
2628 *
2629 * x ^= (x >> 47) looks like this:
2630 *
2631 * x.lo ^= (x.hi >> (47 - 32));
2632 *
2633 * while x ^= (x >> 13) looks like this:
2634 *
2635 * // note: funnel shifts are not usually cheap.
2636 * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2637 * x.hi ^= (x.hi >> 13);
2638 *
2639 * The first one is significantly faster than the second, simply because the
2640 * shift is larger than 32. This means:
2641 * - All the bits we need are in the upper 32 bits, so we can ignore the lower
2642 * 32 bits in the shift.
2643 * - The shift result will always fit in the lower 32 bits, and therefore,
2644 * we can ignore the upper 32 bits in the xor.
2645 *
2646 * Thanks to this optimization, XXH3 only requires these features to be efficient:
2647 *
2648 * - Usable unaligned access
2649 * - A 32-bit or 64-bit ALU
2650 * - If 32-bit, a decent ADC instruction
2651 * - A 32 or 64-bit multiply with a 64-bit result
2652 * - For the 128-bit variant, a decent byteswap helps short inputs.
2653 *
2654 * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2655 * platforms which can run XXH32 can run XXH3 efficiently.
2656 *
2657 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2658 * notable exception.
2659 *
2660 * First of all, Thumb-1 lacks support for the UMULL instruction which
2661 * performs the important long multiply. This means numerous __aeabi_lmul
2662 * calls.
2663 *
2664 * Second of all, the 8 functional registers are just not enough.
2665 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2666 * Lo registers, and this shuffling results in thousands more MOVs than A32.
2667 *
2668 * A32 and T32 don't have this limitation. They can access all 14 registers,
2669 * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2670 * shifts is helpful, too.
2671 *
2672 * Therefore, we do a quick sanity check.
2673 *
2674 * If compiling Thumb-1 for a target which supports ARM instructions, we will
2675 * emit a warning, as it is not a "sane" platform to compile for.
2676 *
2677 * Usually, if this happens, it is because of an accident and you probably need
2678 * to specify -march, as you likely meant to compile for a newer architecture.
2679 *
2680 * Credit: large sections of the vectorial and asm source code paths
2681 * have been contributed by @easyaspi314
2682 */
2683 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2684 # warning "XXH3 is highly inefficient without ARM or Thumb-2."
2685 #endif
2686
2687 /* ==========================================
2688 * Vectorization detection
2689 * ========================================== */
2690
2691 #ifdef XXH_DOXYGEN
2692 /*!
2693 * @ingroup tuning
2694 * @brief Overrides the vectorization implementation chosen for XXH3.
2695 *
2696 * Can be defined to 0 to disable SIMD or any of the values mentioned in
2697 * @ref XXH_VECTOR_TYPE.
2698 *
2699 * If this is not defined, it uses predefined macros to determine the best
2700 * implementation.
2701 */
2702 # define XXH_VECTOR XXH_SCALAR
2703 /*!
2704 * @ingroup tuning
2705 * @brief Possible values for @ref XXH_VECTOR.
2706 *
2707 * Note that these are actually implemented as macros.
2708 *
2709 * If this is not defined, it is detected automatically.
2710 * @ref XXH_X86DISPATCH overrides this.
2711 */
2712 enum XXH_VECTOR_TYPE /* fake enum */ {
2713 XXH_SCALAR = 0, /*!< Portable scalar version */
2714 XXH_SSE2 = 1, /*!<
2715 * SSE2 for Pentium 4, Opteron, all x86_64.
2716 *
2717 * @note SSE2 is also guaranteed on Windows 10, macOS, and
2718 * Android x86.
2719 */
2720 XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
2721 XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
2722 XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
2723 XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
2724 };
2725 /*!
2726 * @ingroup tuning
2727 * @brief Selects the minimum alignment for XXH3's accumulators.
2728 *
2729 * When using SIMD, this should match the alignment reqired for said vector
2730 * type, so, for example, 32 for AVX2.
2731 *
2732 * Default: Auto detected.
2733 */
2734 # define XXH_ACC_ALIGN 8
2735 #endif
2736
2737 /* Actual definition */
2738 #ifndef XXH_DOXYGEN
2739 # define XXH_SCALAR 0
2740 # define XXH_SSE2 1
2741 # define XXH_AVX2 2
2742 # define XXH_AVX512 3
2743 # define XXH_NEON 4
2744 # define XXH_VSX 5
2745 #endif
2746
2747 #ifndef XXH_VECTOR /* can be defined on command line */
2748 # if defined(__AVX512F__)
2749 # define XXH_VECTOR XXH_AVX512
2750 # elif defined(__AVX2__)
2751 # define XXH_VECTOR XXH_AVX2
2752 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2753 # define XXH_VECTOR XXH_SSE2
2754 # elif defined(__GNUC__) /* msvc support maybe later */ \
2755 && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \
2756 && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \
2757 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
2758 # define XXH_VECTOR XXH_NEON
2759 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2760 || (defined(__s390x__) && defined(__VEC__)) \
2761 && defined(__GNUC__) /* TODO: IBM XL */
2762 # define XXH_VECTOR XXH_VSX
2763 # else
2764 # define XXH_VECTOR XXH_SCALAR
2765 # endif
2766 #endif
2767
2768 /*
2769 * Controls the alignment of the accumulator,
2770 * for compatibility with aligned vector loads, which are usually faster.
2771 */
2772 #ifndef XXH_ACC_ALIGN
2773 # if defined(XXH_X86DISPATCH)
2774 # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
2775 # elif XXH_VECTOR == XXH_SCALAR /* scalar */
2776 # define XXH_ACC_ALIGN 8
2777 # elif XXH_VECTOR == XXH_SSE2 /* sse2 */
2778 # define XXH_ACC_ALIGN 16
2779 # elif XXH_VECTOR == XXH_AVX2 /* avx2 */
2780 # define XXH_ACC_ALIGN 32
2781 # elif XXH_VECTOR == XXH_NEON /* neon */
2782 # define XXH_ACC_ALIGN 16
2783 # elif XXH_VECTOR == XXH_VSX /* vsx */
2784 # define XXH_ACC_ALIGN 16
2785 # elif XXH_VECTOR == XXH_AVX512 /* avx512 */
2786 # define XXH_ACC_ALIGN 64
2787 # endif
2788 #endif
2789
2790 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2791 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2792 # define XXH_SEC_ALIGN XXH_ACC_ALIGN
2793 #else
2794 # define XXH_SEC_ALIGN 8
2795 #endif
2796
2797 /*
2798 * UGLY HACK:
2799 * GCC usually generates the best code with -O3 for xxHash.
2800 *
2801 * However, when targeting AVX2, it is overzealous in its unrolling resulting
2802 * in code roughly 3/4 the speed of Clang.
2803 *
2804 * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2805 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2806 * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2807 *
2808 * That is why when compiling the AVX2 version, it is recommended to use either
2809 * -O2 -mavx2 -march=haswell
2810 * or
2811 * -O2 -mavx2 -mno-avx256-split-unaligned-load
2812 * for decent performance, or to use Clang instead.
2813 *
2814 * Fortunately, we can control the first one with a pragma that forces GCC into
2815 * -O2, but the other one we can't control without "failed to inline always
2816 * inline function due to target mismatch" warnings.
2817 */
2818 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2819 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2820 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2821 # pragma GCC push_options
2822 # pragma GCC optimize("-O2")
2823 #endif
2824
2825
2826 #if XXH_VECTOR == XXH_NEON
2827 /*
2828 * NEON's setup for vmlal_u32 is a little more complicated than it is on
2829 * SSE2, AVX2, and VSX.
2830 *
2831 * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2832 *
2833 * To do the same operation, the 128-bit 'Q' register needs to be split into
2834 * two 64-bit 'D' registers, performing this operation::
2835 *
2836 * [ a | b ]
2837 * | '---------. .--------' |
2838 * | x |
2839 * | .---------' '--------. |
2840 * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
2841 *
2842 * Due to significant changes in aarch64, the fastest method for aarch64 is
2843 * completely different than the fastest method for ARMv7-A.
2844 *
2845 * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2846 * D11 will modify the high half of Q5. This is similar to how modifying AH
2847 * will only affect bits 8-15 of AX on x86.
2848 *
2849 * VZIP takes two registers, and puts even lanes in one register and odd lanes
2850 * in the other.
2851 *
2852 * On ARMv7-A, this strangely modifies both parameters in place instead of
2853 * taking the usual 3-operand form.
2854 *
2855 * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
2856 * lower and upper halves of the Q register to end up with the high and low
2857 * halves where we want - all in one instruction.
2858 *
2859 * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
2860 *
2861 * Unfortunately we need inline assembly for this: Instructions modifying two
2862 * registers at once is not possible in GCC or Clang's IR, and they have to
2863 * create a copy.
2864 *
2865 * aarch64 requires a different approach.
2866 *
2867 * In order to make it easier to write a decent compiler for aarch64, many
2868 * quirks were removed, such as conditional execution.
2869 *
2870 * NEON was also affected by this.
2871 *
2872 * aarch64 cannot access the high bits of a Q-form register, and writes to a
2873 * D-form register zero the high bits, similar to how writes to W-form scalar
2874 * registers (or DWORD registers on x86_64) work.
2875 *
2876 * The formerly free vget_high intrinsics now require a vext (with a few
2877 * exceptions)
2878 *
2879 * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
2880 * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
2881 * operand.
2882 *
2883 * The equivalent of the VZIP.32 on the lower and upper halves would be this
2884 * mess:
2885 *
2886 * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
2887 * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
2888 * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
2889 *
2890 * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
2891 *
2892 * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
2893 * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
2894 *
2895 * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
2896 */
2897
2898 /*!
2899 * Function-like macro:
2900 * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
2901 * {
2902 * outLo = (uint32x2_t)(in & 0xFFFFFFFF);
2903 * outHi = (uint32x2_t)(in >> 32);
2904 * in = UNDEFINED;
2905 * }
2906 */
2907 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
2908 && defined(__GNUC__) \
2909 && !defined(__aarch64__) && !defined(__arm64__)
2910 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
2911 do { \
2912 /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
2913 /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
2914 /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
2915 __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
2916 (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
2917 (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
2918 } while (0)
2919 # else
2920 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
2921 do { \
2922 (outLo) = vmovn_u64 (in); \
2923 (outHi) = vshrn_n_u64 ((in), 32); \
2924 } while (0)
2925 # endif
2926 #endif /* XXH_VECTOR == XXH_NEON */
2927
2928 /*
2929 * VSX and Z Vector helpers.
2930 *
2931 * This is very messy, and any pull requests to clean this up are welcome.
2932 *
2933 * There are a lot of problems with supporting VSX and s390x, due to
2934 * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
2935 */
2936 #if XXH_VECTOR == XXH_VSX
2937 # if defined(__s390x__)
2938 # include <s390intrin.h>
2939 # else
2940 /* gcc's altivec.h can have the unwanted consequence to unconditionally
2941 * #define bool, vector, and pixel keywords,
2942 * with bad consequences for programs already using these keywords for other purposes.
2943 * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
2944 * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
2945 * but it seems that, in some cases, it isn't.
2946 * Force the build macro to be defined, so that keywords are not altered.
2947 */
2948 # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
2949 # define __APPLE_ALTIVEC__
2950 # endif
2951 # include <altivec.h>
2952 # endif
2953
2954 typedef __vector unsigned long long xxh_u64x2;
2955 typedef __vector unsigned char xxh_u8x16;
2956 typedef __vector unsigned xxh_u32x4;
2957
2958 # ifndef XXH_VSX_BE
2959 # if defined(__BIG_ENDIAN__) \
2960 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2961 # define XXH_VSX_BE 1
2962 # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
2963 # warning "-maltivec=be is not recommended. Please use native endianness."
2964 # define XXH_VSX_BE 1
2965 # else
2966 # define XXH_VSX_BE 0
2967 # endif
2968 # endif /* !defined(XXH_VSX_BE) */
2969
2970 # if XXH_VSX_BE
2971 # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
2972 # define XXH_vec_revb vec_revb
2973 # else
2974 /*!
2975 * A polyfill for POWER9's vec_revb().
2976 */
XXH_vec_revb(xxh_u64x2 val)2977 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
2978 {
2979 xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
2980 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
2981 return vec_perm(val, val, vByteSwap);
2982 }
2983 # endif
2984 # endif /* XXH_VSX_BE */
2985
2986 /*!
2987 * Performs an unaligned vector load and byte swaps it on big endian.
2988 */
XXH_vec_loadu(const void * ptr)2989 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
2990 {
2991 xxh_u64x2 ret;
2992 memcpy(&ret, ptr, sizeof(xxh_u64x2));
2993 # if XXH_VSX_BE
2994 ret = XXH_vec_revb(ret);
2995 # endif
2996 return ret;
2997 }
2998
2999 /*
3000 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3001 *
3002 * These intrinsics weren't added until GCC 8, despite existing for a while,
3003 * and they are endian dependent. Also, their meaning swap depending on version.
3004 * */
3005 # if defined(__s390x__)
3006 /* s390x is always big endian, no issue on this platform */
3007 # define XXH_vec_mulo vec_mulo
3008 # define XXH_vec_mule vec_mule
3009 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3010 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3011 # define XXH_vec_mulo __builtin_altivec_vmulouw
3012 # define XXH_vec_mule __builtin_altivec_vmuleuw
3013 # else
3014 /* gcc needs inline assembly */
3015 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_vec_mulo(xxh_u32x4 a,xxh_u32x4 b)3016 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3017 {
3018 xxh_u64x2 result;
3019 __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3020 return result;
3021 }
XXH_vec_mule(xxh_u32x4 a,xxh_u32x4 b)3022 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3023 {
3024 xxh_u64x2 result;
3025 __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3026 return result;
3027 }
3028 # endif /* XXH_vec_mulo, XXH_vec_mule */
3029 #endif /* XXH_VECTOR == XXH_VSX */
3030
3031
3032 /* prefetch
3033 * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3034 #if defined(XXH_NO_PREFETCH)
3035 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3036 #else
3037 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
3038 # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3039 # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3040 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3041 # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3042 # else
3043 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3044 # endif
3045 #endif /* XXH_NO_PREFETCH */
3046
3047
3048 /* ==========================================
3049 * XXH3 default settings
3050 * ========================================== */
3051
3052 #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
3053
3054 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3055 # error "default keyset is not large enough"
3056 #endif
3057
3058 /*! Pseudorandom secret taken directly from FARSH. */
3059 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3060 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3061 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3062 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3063 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3064 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3065 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3066 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3067 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3068 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3069 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3070 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3071 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3072 };
3073
3074
3075 #ifdef XXH_OLD_NAMES
3076 # define kSecret XXH3_kSecret
3077 #endif
3078
3079 #ifdef XXH_DOXYGEN
3080 /*!
3081 * @brief Calculates a 32-bit to 64-bit long multiply.
3082 *
3083 * Implemented as a macro.
3084 *
3085 * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3086 * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3087 * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3088 * use that instead of the normal method.
3089 *
3090 * If you are compiling for platforms like Thumb-1 and don't have a better option,
3091 * you may also want to write your own long multiply routine here.
3092 *
3093 * @param x, y Numbers to be multiplied
3094 * @return 64-bit product of the low 32 bits of @p x and @p y.
3095 */
3096 XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x,xxh_u64 y)3097 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3098 {
3099 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3100 }
3101 #elif defined(_MSC_VER) && defined(_M_IX86)
3102 # include <intrin.h>
3103 # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3104 #else
3105 /*
3106 * Downcast + upcast is usually better than masking on older compilers like
3107 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3108 *
3109 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3110 * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3111 */
3112 # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3113 #endif
3114
3115 /*!
3116 * @brief Calculates a 64->128-bit long multiply.
3117 *
3118 * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3119 * version.
3120 *
3121 * @param lhs, rhs The 64-bit integers to be multiplied
3122 * @return The 128-bit result represented in an @ref XXH128_hash_t.
3123 */
3124 static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs,xxh_u64 rhs)3125 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3126 {
3127 /*
3128 * GCC/Clang __uint128_t method.
3129 *
3130 * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3131 * This is usually the best way as it usually uses a native long 64-bit
3132 * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3133 *
3134 * Usually.
3135 *
3136 * Despite being a 32-bit platform, Clang (and emscripten) define this type
3137 * despite not having the arithmetic for it. This results in a laggy
3138 * compiler builtin call which calculates a full 128-bit multiply.
3139 * In that case it is best to use the portable one.
3140 * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3141 */
3142 #if defined(__GNUC__) && !defined(__wasm__) \
3143 && defined(__SIZEOF_INT128__) \
3144 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3145
3146 __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3147 XXH128_hash_t r128;
3148 r128.low64 = (xxh_u64)(product);
3149 r128.high64 = (xxh_u64)(product >> 64);
3150 return r128;
3151
3152 /*
3153 * MSVC for x64's _umul128 method.
3154 *
3155 * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3156 *
3157 * This compiles to single operand MUL on x64.
3158 */
3159 #elif defined(_M_X64) || defined(_M_IA64)
3160
3161 #ifndef _MSC_VER
3162 # pragma intrinsic(_umul128)
3163 #endif
3164 xxh_u64 product_high;
3165 xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3166 XXH128_hash_t r128;
3167 r128.low64 = product_low;
3168 r128.high64 = product_high;
3169 return r128;
3170
3171 #else
3172 /*
3173 * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3174 *
3175 * This is a fast and simple grade school multiply, which is shown below
3176 * with base 10 arithmetic instead of base 0x100000000.
3177 *
3178 * 9 3 // D2 lhs = 93
3179 * x 7 5 // D2 rhs = 75
3180 * ----------
3181 * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3182 * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3183 * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3184 * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3185 * ---------
3186 * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3187 * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3188 * ---------
3189 * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3190 *
3191 * The reasons for adding the products like this are:
3192 * 1. It avoids manual carry tracking. Just like how
3193 * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3194 * This avoids a lot of complexity.
3195 *
3196 * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
3197 * instruction available in ARM's Digital Signal Processing extension
3198 * in 32-bit ARMv6 and later, which is shown below:
3199 *
3200 * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3201 * {
3202 * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3203 * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3204 * *RdHi = (xxh_u32)(product >> 32);
3205 * }
3206 *
3207 * This instruction was designed for efficient long multiplication, and
3208 * allows this to be calculated in only 4 instructions at speeds
3209 * comparable to some 64-bit ALUs.
3210 *
3211 * 3. It isn't terrible on other platforms. Usually this will be a couple
3212 * of 32-bit ADD/ADCs.
3213 */
3214
3215 /* First calculate all of the cross products. */
3216 xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3217 xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
3218 xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3219 xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
3220
3221 /* Now add the products together. These will never overflow. */
3222 xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3223 xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
3224 xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3225
3226 XXH128_hash_t r128;
3227 r128.low64 = lower;
3228 r128.high64 = upper;
3229 return r128;
3230 #endif
3231 }
3232
3233 /*!
3234 * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3235 *
3236 * The reason for the separate function is to prevent passing too many structs
3237 * around by value. This will hopefully inline the multiply, but we don't force it.
3238 *
3239 * @param lhs, rhs The 64-bit integers to multiply
3240 * @return The low 64 bits of the product XOR'd by the high 64 bits.
3241 * @see XXH_mult64to128()
3242 */
3243 static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs,xxh_u64 rhs)3244 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3245 {
3246 XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3247 return product.low64 ^ product.high64;
3248 }
3249
3250 /*! Seems to produce slightly better code on GCC for some reason. */
XXH_xorshift64(xxh_u64 v64,int shift)3251 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3252 {
3253 XXH_ASSERT(0 <= shift && shift < 64);
3254 return v64 ^ (v64 >> shift);
3255 }
3256
3257 /*
3258 * This is a fast avalanche stage,
3259 * suitable when input bits are already partially mixed
3260 */
XXH3_avalanche(xxh_u64 h64)3261 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3262 {
3263 h64 = XXH_xorshift64(h64, 37);
3264 h64 *= 0x165667919E3779F9ULL;
3265 h64 = XXH_xorshift64(h64, 32);
3266 return h64;
3267 }
3268
3269 /*
3270 * This is a stronger avalanche,
3271 * inspired by Pelle Evensen's rrmxmx
3272 * preferable when input has not been previously mixed
3273 */
XXH3_rrmxmx(xxh_u64 h64,xxh_u64 len)3274 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3275 {
3276 /* this mix is inspired by Pelle Evensen's rrmxmx */
3277 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3278 h64 *= 0x9FB21C651E98DF25ULL;
3279 h64 ^= (h64 >> 35) + len ;
3280 h64 *= 0x9FB21C651E98DF25ULL;
3281 return XXH_xorshift64(h64, 28);
3282 }
3283
3284
3285 /* ==========================================
3286 * Short keys
3287 * ==========================================
3288 * One of the shortcomings of XXH32 and XXH64 was that their performance was
3289 * sub-optimal on short lengths. It used an iterative algorithm which strongly
3290 * favored lengths that were a multiple of 4 or 8.
3291 *
3292 * Instead of iterating over individual inputs, we use a set of single shot
3293 * functions which piece together a range of lengths and operate in constant time.
3294 *
3295 * Additionally, the number of multiplies has been significantly reduced. This
3296 * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3297 *
3298 * Depending on the platform, this may or may not be faster than XXH32, but it
3299 * is almost guaranteed to be faster than XXH64.
3300 */
3301
3302 /*
3303 * At very short lengths, there isn't enough input to fully hide secrets, or use
3304 * the entire secret.
3305 *
3306 * There is also only a limited amount of mixing we can do before significantly
3307 * impacting performance.
3308 *
3309 * Therefore, we use different sections of the secret and always mix two secret
3310 * samples with an XOR. This should have no effect on performance on the
3311 * seedless or withSeed variants because everything _should_ be constant folded
3312 * by modern compilers.
3313 *
3314 * The XOR mixing hides individual parts of the secret and increases entropy.
3315 *
3316 * This adds an extra layer of strength for custom secrets.
3317 */
3318 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3319 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3320 {
3321 XXH_ASSERT(input != NULL);
3322 XXH_ASSERT(1 <= len && len <= 3);
3323 XXH_ASSERT(secret != NULL);
3324 /*
3325 * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3326 * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3327 * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3328 */
3329 { xxh_u8 const c1 = input[0];
3330 xxh_u8 const c2 = input[len >> 1];
3331 xxh_u8 const c3 = input[len - 1];
3332 xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
3333 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
3334 xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3335 xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3336 return XXH64_avalanche(keyed);
3337 }
3338 }
3339
3340 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3341 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3342 {
3343 XXH_ASSERT(input != NULL);
3344 XXH_ASSERT(secret != NULL);
3345 XXH_ASSERT(4 <= len && len <= 8);
3346 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3347 { xxh_u32 const input1 = XXH_readLE32(input);
3348 xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3349 xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3350 xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3351 xxh_u64 const keyed = input64 ^ bitflip;
3352 return XXH3_rrmxmx(keyed, len);
3353 }
3354 }
3355
3356 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3357 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3358 {
3359 XXH_ASSERT(input != NULL);
3360 XXH_ASSERT(secret != NULL);
3361 XXH_ASSERT(9 <= len && len <= 16);
3362 { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3363 xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3364 xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
3365 xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3366 xxh_u64 const acc = len
3367 + XXH_swap64(input_lo) + input_hi
3368 + XXH3_mul128_fold64(input_lo, input_hi);
3369 return XXH3_avalanche(acc);
3370 }
3371 }
3372
3373 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3374 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3375 {
3376 XXH_ASSERT(len <= 16);
3377 { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3378 if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3379 if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3380 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3381 }
3382 }
3383
3384 /*
3385 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3386 * multiplication by zero, affecting hashes of lengths 17 to 240.
3387 *
3388 * However, they are very unlikely.
3389 *
3390 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3391 * unseeded non-cryptographic hashes, it does not attempt to defend itself
3392 * against specially crafted inputs, only random inputs.
3393 *
3394 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3395 * cancelling out the secret is taken an arbitrary number of times (addressed
3396 * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3397 * and/or proper seeding:
3398 *
3399 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3400 * function that is only called up to 16 times per hash with up to 240 bytes of
3401 * input.
3402 *
3403 * This is not too bad for a non-cryptographic hash function, especially with
3404 * only 64 bit outputs.
3405 *
3406 * The 128-bit variant (which trades some speed for strength) is NOT affected
3407 * by this, although it is always a good idea to use a proper seed if you care
3408 * about strength.
3409 */
XXH3_mix16B(const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 seed64)3410 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3411 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3412 {
3413 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3414 && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
3415 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
3416 /*
3417 * UGLY HACK:
3418 * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3419 * slower code.
3420 *
3421 * By forcing seed64 into a register, we disrupt the cost model and
3422 * cause it to scalarize. See `XXH32_round()`
3423 *
3424 * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3425 * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3426 * GCC 9.2, despite both emitting scalar code.
3427 *
3428 * GCC generates much better scalar code than Clang for the rest of XXH3,
3429 * which is why finding a more optimal codepath is an interest.
3430 */
3431 XXH_COMPILER_GUARD(seed64);
3432 #endif
3433 { xxh_u64 const input_lo = XXH_readLE64(input);
3434 xxh_u64 const input_hi = XXH_readLE64(input+8);
3435 return XXH3_mul128_fold64(
3436 input_lo ^ (XXH_readLE64(secret) + seed64),
3437 input_hi ^ (XXH_readLE64(secret+8) - seed64)
3438 );
3439 }
3440 }
3441
3442 /* For mid range keys, XXH3 uses a Mum-hash variant. */
3443 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3444 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3445 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3446 XXH64_hash_t seed)
3447 {
3448 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3449 XXH_ASSERT(16 < len && len <= 128);
3450
3451 { xxh_u64 acc = len * XXH_PRIME64_1;
3452 if (len > 32) {
3453 if (len > 64) {
3454 if (len > 96) {
3455 acc += XXH3_mix16B(input+48, secret+96, seed);
3456 acc += XXH3_mix16B(input+len-64, secret+112, seed);
3457 }
3458 acc += XXH3_mix16B(input+32, secret+64, seed);
3459 acc += XXH3_mix16B(input+len-48, secret+80, seed);
3460 }
3461 acc += XXH3_mix16B(input+16, secret+32, seed);
3462 acc += XXH3_mix16B(input+len-32, secret+48, seed);
3463 }
3464 acc += XXH3_mix16B(input+0, secret+0, seed);
3465 acc += XXH3_mix16B(input+len-16, secret+16, seed);
3466
3467 return XXH3_avalanche(acc);
3468 }
3469 }
3470
3471 #define XXH3_MIDSIZE_MAX 240
3472
3473 XXH_NO_INLINE XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3474 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3475 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3476 XXH64_hash_t seed)
3477 {
3478 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3479 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3480
3481 #define XXH3_MIDSIZE_STARTOFFSET 3
3482 #define XXH3_MIDSIZE_LASTOFFSET 17
3483
3484 { xxh_u64 acc = len * XXH_PRIME64_1;
3485 int const nbRounds = (int)len / 16;
3486 int i;
3487 for (i=0; i<8; i++) {
3488 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3489 }
3490 acc = XXH3_avalanche(acc);
3491 XXH_ASSERT(nbRounds >= 8);
3492 #if defined(__clang__) /* Clang */ \
3493 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3494 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
3495 /*
3496 * UGLY HACK:
3497 * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3498 * In everywhere else, it uses scalar code.
3499 *
3500 * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3501 * would still be slower than UMAAL (see XXH_mult64to128).
3502 *
3503 * Unfortunately, Clang doesn't handle the long multiplies properly and
3504 * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3505 * scalarized into an ugly mess of VMOV.32 instructions.
3506 *
3507 * This mess is difficult to avoid without turning autovectorization
3508 * off completely, but they are usually relatively minor and/or not
3509 * worth it to fix.
3510 *
3511 * This loop is the easiest to fix, as unlike XXH32, this pragma
3512 * _actually works_ because it is a loop vectorization instead of an
3513 * SLP vectorization.
3514 */
3515 #pragma clang loop vectorize(disable)
3516 #endif
3517 for (i=8 ; i < nbRounds; i++) {
3518 acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3519 }
3520 /* last bytes */
3521 acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3522 return XXH3_avalanche(acc);
3523 }
3524 }
3525
3526
3527 /* ======= Long Keys ======= */
3528
3529 #define XXH_STRIPE_LEN 64
3530 #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
3531 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3532
3533 #ifdef XXH_OLD_NAMES
3534 # define STRIPE_LEN XXH_STRIPE_LEN
3535 # define ACC_NB XXH_ACC_NB
3536 #endif
3537
XXH_writeLE64(void * dst,xxh_u64 v64)3538 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3539 {
3540 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3541 memcpy(dst, &v64, sizeof(v64));
3542 }
3543
3544 /* Several intrinsic functions below are supposed to accept __int64 as argument,
3545 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3546 * However, several environments do not define __int64 type,
3547 * requiring a workaround.
3548 */
3549 #if !defined (__VMS) \
3550 && (defined (__cplusplus) \
3551 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3552 typedef int64_t xxh_i64;
3553 #else
3554 /* the following type must have a width of 64-bit */
3555 typedef long long xxh_i64;
3556 #endif
3557
3558 /*
3559 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3560 *
3561 * It is a hardened version of UMAC, based off of FARSH's implementation.
3562 *
3563 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3564 * implementations, and it is ridiculously fast.
3565 *
3566 * We harden it by mixing the original input to the accumulators as well as the product.
3567 *
3568 * This means that in the (relatively likely) case of a multiply by zero, the
3569 * original input is preserved.
3570 *
3571 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3572 * cross-pollination, as otherwise the upper and lower halves would be
3573 * essentially independent.
3574 *
3575 * This doesn't matter on 64-bit hashes since they all get merged together in
3576 * the end, so we skip the extra step.
3577 *
3578 * Both XXH3_64bits and XXH3_128bits use this subroutine.
3579 */
3580
3581 #if (XXH_VECTOR == XXH_AVX512) \
3582 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3583
3584 #ifndef XXH_TARGET_AVX512
3585 # define XXH_TARGET_AVX512 /* disable attribute target */
3586 #endif
3587
3588 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3589 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3590 const void* XXH_RESTRICT input,
3591 const void* XXH_RESTRICT secret)
3592 {
3593 XXH_ALIGN(64) __m512i* const xacc = (__m512i *) acc;
3594 XXH_ASSERT((((size_t)acc) & 63) == 0);
3595 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3596
3597 {
3598 /* data_vec = input[0]; */
3599 __m512i const data_vec = _mm512_loadu_si512 (input);
3600 /* key_vec = secret[0]; */
3601 __m512i const key_vec = _mm512_loadu_si512 (secret);
3602 /* data_key = data_vec ^ key_vec; */
3603 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3604 /* data_key_lo = data_key >> 32; */
3605 __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3606 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3607 __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
3608 /* xacc[0] += swap(data_vec); */
3609 __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3610 __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
3611 /* xacc[0] += product; */
3612 *xacc = _mm512_add_epi64(product, sum);
3613 }
3614 }
3615
3616 /*
3617 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3618 *
3619 * Multiplication isn't perfect, as explained by Google in HighwayHash:
3620 *
3621 * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3622 * // varying degrees. In descending order of goodness, bytes
3623 * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3624 * // As expected, the upper and lower bytes are much worse.
3625 *
3626 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3627 *
3628 * Since our algorithm uses a pseudorandom secret to add some variance into the
3629 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3630 *
3631 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3632 * extraction.
3633 *
3634 * Both XXH3_64bits and XXH3_128bits use this subroutine.
3635 */
3636
3637 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3638 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3639 {
3640 XXH_ASSERT((((size_t)acc) & 63) == 0);
3641 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3642 { XXH_ALIGN(64) __m512i* const xacc = (__m512i*) acc;
3643 const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3644
3645 /* xacc[0] ^= (xacc[0] >> 47) */
3646 __m512i const acc_vec = *xacc;
3647 __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
3648 __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
3649 /* xacc[0] ^= secret; */
3650 __m512i const key_vec = _mm512_loadu_si512 (secret);
3651 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3652
3653 /* xacc[0] *= XXH_PRIME32_1; */
3654 __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3655 __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
3656 __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
3657 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3658 }
3659 }
3660
3661 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3662 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3663 {
3664 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3665 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3666 XXH_ASSERT(((size_t)customSecret & 63) == 0);
3667 (void)(&XXH_writeLE64);
3668 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3669 __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
3670
3671 XXH_ALIGN(64) const __m512i* const src = (const __m512i*) XXH3_kSecret;
3672 XXH_ALIGN(64) __m512i* const dest = ( __m512i*) customSecret;
3673 int i;
3674 for (i=0; i < nbRounds; ++i) {
3675 /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3676 * this will warn "discards ‘const’ qualifier". */
3677 union {
3678 XXH_ALIGN(64) const __m512i* cp;
3679 XXH_ALIGN(64) void* p;
3680 } remote_const_void;
3681 remote_const_void.cp = src + i;
3682 dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3683 } }
3684 }
3685
3686 #endif
3687
3688 #if (XXH_VECTOR == XXH_AVX2) \
3689 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3690
3691 #ifndef XXH_TARGET_AVX2
3692 # define XXH_TARGET_AVX2 /* disable attribute target */
3693 #endif
3694
3695 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3696 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3697 const void* XXH_RESTRICT input,
3698 const void* XXH_RESTRICT secret)
3699 {
3700 XXH_ASSERT((((size_t)acc) & 31) == 0);
3701 { XXH_ALIGN(32) __m256i* const xacc = (__m256i *) acc;
3702 /* Unaligned. This is mainly for pointer arithmetic, and because
3703 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3704 const __m256i* const xinput = (const __m256i *) input;
3705 /* Unaligned. This is mainly for pointer arithmetic, and because
3706 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3707 const __m256i* const xsecret = (const __m256i *) secret;
3708
3709 size_t i;
3710 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3711 /* data_vec = xinput[i]; */
3712 __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
3713 /* key_vec = xsecret[i]; */
3714 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3715 /* data_key = data_vec ^ key_vec; */
3716 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3717 /* data_key_lo = data_key >> 32; */
3718 __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3719 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3720 __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
3721 /* xacc[i] += swap(data_vec); */
3722 __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3723 __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
3724 /* xacc[i] += product; */
3725 xacc[i] = _mm256_add_epi64(product, sum);
3726 } }
3727 }
3728
3729 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3730 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3731 {
3732 XXH_ASSERT((((size_t)acc) & 31) == 0);
3733 { XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc;
3734 /* Unaligned. This is mainly for pointer arithmetic, and because
3735 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3736 const __m256i* const xsecret = (const __m256i *) secret;
3737 const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3738
3739 size_t i;
3740 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3741 /* xacc[i] ^= (xacc[i] >> 47) */
3742 __m256i const acc_vec = xacc[i];
3743 __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
3744 __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
3745 /* xacc[i] ^= xsecret; */
3746 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3747 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3748
3749 /* xacc[i] *= XXH_PRIME32_1; */
3750 __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3751 __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
3752 __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
3753 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3754 }
3755 }
3756 }
3757
XXH3_initCustomSecret_avx2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3758 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3759 {
3760 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3761 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3762 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3763 (void)(&XXH_writeLE64);
3764 XXH_PREFETCH(customSecret);
3765 { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
3766
3767 XXH_ALIGN(64) const __m256i* const src = (const __m256i*) XXH3_kSecret;
3768 XXH_ALIGN(64) __m256i* dest = ( __m256i*) customSecret;
3769
3770 # if defined(__GNUC__) || defined(__clang__)
3771 /*
3772 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3773 * - do not extract the secret from sse registers in the internal loop
3774 * - use less common registers, and avoid pushing these reg into stack
3775 */
3776 XXH_COMPILER_GUARD(dest);
3777 # endif
3778
3779 /* GCC -O2 need unroll loop manually */
3780 dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3781 dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3782 dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3783 dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3784 dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3785 dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3786 }
3787 }
3788
3789 #endif
3790
3791 /* x86dispatch always generates SSE2 */
3792 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3793
3794 #ifndef XXH_TARGET_SSE2
3795 # define XXH_TARGET_SSE2 /* disable attribute target */
3796 #endif
3797
3798 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3799 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3800 const void* XXH_RESTRICT input,
3801 const void* XXH_RESTRICT secret)
3802 {
3803 /* SSE2 is just a half-scale version of the AVX2 version. */
3804 XXH_ASSERT((((size_t)acc) & 15) == 0);
3805 { XXH_ALIGN(16) __m128i* const xacc = (__m128i *) acc;
3806 /* Unaligned. This is mainly for pointer arithmetic, and because
3807 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3808 const __m128i* const xinput = (const __m128i *) input;
3809 /* Unaligned. This is mainly for pointer arithmetic, and because
3810 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3811 const __m128i* const xsecret = (const __m128i *) secret;
3812
3813 size_t i;
3814 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3815 /* data_vec = xinput[i]; */
3816 __m128i const data_vec = _mm_loadu_si128 (xinput+i);
3817 /* key_vec = xsecret[i]; */
3818 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3819 /* data_key = data_vec ^ key_vec; */
3820 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
3821 /* data_key_lo = data_key >> 32; */
3822 __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3823 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3824 __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
3825 /* xacc[i] += swap(data_vec); */
3826 __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3827 __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
3828 /* xacc[i] += product; */
3829 xacc[i] = _mm_add_epi64(product, sum);
3830 } }
3831 }
3832
3833 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3834 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3835 {
3836 XXH_ASSERT((((size_t)acc) & 15) == 0);
3837 { XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc;
3838 /* Unaligned. This is mainly for pointer arithmetic, and because
3839 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3840 const __m128i* const xsecret = (const __m128i *) secret;
3841 const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
3842
3843 size_t i;
3844 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3845 /* xacc[i] ^= (xacc[i] >> 47) */
3846 __m128i const acc_vec = xacc[i];
3847 __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
3848 __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
3849 /* xacc[i] ^= xsecret[i]; */
3850 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3851 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
3852
3853 /* xacc[i] *= XXH_PRIME32_1; */
3854 __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3855 __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
3856 __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
3857 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
3858 }
3859 }
3860 }
3861
XXH3_initCustomSecret_sse2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3862 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3863 {
3864 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
3865 (void)(&XXH_writeLE64);
3866 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
3867
3868 # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
3869 // MSVC 32bit mode does not support _mm_set_epi64x before 2015
3870 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
3871 __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
3872 # else
3873 __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
3874 # endif
3875 int i;
3876
3877 XXH_ALIGN(64) const float* const src = (float const*) XXH3_kSecret;
3878 XXH_ALIGN(XXH_SEC_ALIGN) __m128i* dest = (__m128i*) customSecret;
3879 # if defined(__GNUC__) || defined(__clang__)
3880 /*
3881 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3882 * - do not extract the secret from sse registers in the internal loop
3883 * - use less common registers, and avoid pushing these reg into stack
3884 */
3885 XXH_COMPILER_GUARD(dest);
3886 # endif
3887
3888 for (i=0; i < nbRounds; ++i) {
3889 dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src+i*4)), seed);
3890 } }
3891 }
3892
3893 #endif
3894
3895 #if (XXH_VECTOR == XXH_NEON)
3896
3897 XXH_FORCE_INLINE void
XXH3_accumulate_512_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3898 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
3899 const void* XXH_RESTRICT input,
3900 const void* XXH_RESTRICT secret)
3901 {
3902 XXH_ASSERT((((size_t)acc) & 15) == 0);
3903 {
3904 XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc;
3905 /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
3906 uint8_t const* const xinput = (const uint8_t *) input;
3907 uint8_t const* const xsecret = (const uint8_t *) secret;
3908
3909 size_t i;
3910 for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
3911 /* data_vec = xinput[i]; */
3912 uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
3913 /* key_vec = xsecret[i]; */
3914 uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
3915 uint64x2_t data_key;
3916 uint32x2_t data_key_lo, data_key_hi;
3917 /* xacc[i] += swap(data_vec); */
3918 uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
3919 uint64x2_t const swapped = vextq_u64(data64, data64, 1);
3920 xacc[i] = vaddq_u64 (xacc[i], swapped);
3921 /* data_key = data_vec ^ key_vec; */
3922 data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
3923 /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
3924 * data_key_hi = (uint32x2_t) (data_key >> 32);
3925 * data_key = UNDEFINED; */
3926 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
3927 /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
3928 xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
3929
3930 }
3931 }
3932 }
3933
3934 XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3935 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3936 {
3937 XXH_ASSERT((((size_t)acc) & 15) == 0);
3938
3939 { uint64x2_t* xacc = (uint64x2_t*) acc;
3940 uint8_t const* xsecret = (uint8_t const*) secret;
3941 uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
3942
3943 size_t i;
3944 for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) {
3945 /* xacc[i] ^= (xacc[i] >> 47); */
3946 uint64x2_t acc_vec = xacc[i];
3947 uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
3948 uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
3949
3950 /* xacc[i] ^= xsecret[i]; */
3951 uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
3952 uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec));
3953
3954 /* xacc[i] *= XXH_PRIME32_1 */
3955 uint32x2_t data_key_lo, data_key_hi;
3956 /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
3957 * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
3958 * xacc[i] = UNDEFINED; */
3959 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
3960 { /*
3961 * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
3962 *
3963 * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
3964 * incorrectly "optimize" this:
3965 * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
3966 * shifted = vshll_n_u32(tmp, 32);
3967 * to this:
3968 * tmp = "vmulq_u64"(a, b); // no such thing!
3969 * shifted = vshlq_n_u64(tmp, 32);
3970 *
3971 * However, unlike SSE, Clang lacks a 64-bit multiply routine
3972 * for NEON, and it scalarizes two 64-bit multiplies instead.
3973 *
3974 * vmull_u32 has the same timing as vmul_u32, and it avoids
3975 * this bug completely.
3976 * See https://bugs.llvm.org/show_bug.cgi?id=39967
3977 */
3978 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
3979 /* xacc[i] = prod_hi << 32; */
3980 xacc[i] = vshlq_n_u64(prod_hi, 32);
3981 /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
3982 xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
3983 }
3984 } }
3985 }
3986
3987 #endif
3988
3989 #if (XXH_VECTOR == XXH_VSX)
3990
3991 XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3992 XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
3993 const void* XXH_RESTRICT input,
3994 const void* XXH_RESTRICT secret)
3995 {
3996 xxh_u64x2* const xacc = (xxh_u64x2*) acc; /* presumed aligned */
3997 xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
3998 xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
3999 xxh_u64x2 const v32 = { 32, 32 };
4000 size_t i;
4001 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4002 /* data_vec = xinput[i]; */
4003 xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4004 /* key_vec = xsecret[i]; */
4005 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4006 xxh_u64x2 const data_key = data_vec ^ key_vec;
4007 /* shuffled = (data_key << 32) | (data_key >> 32); */
4008 xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4009 /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4010 xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4011 xacc[i] += product;
4012
4013 /* swap high and low halves */
4014 #ifdef __s390x__
4015 xacc[i] += vec_permi(data_vec, data_vec, 2);
4016 #else
4017 xacc[i] += vec_xxpermdi(data_vec, data_vec, 2);
4018 #endif
4019 }
4020 }
4021
4022 XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4023 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4024 {
4025 XXH_ASSERT((((size_t)acc) & 15) == 0);
4026
4027 { xxh_u64x2* const xacc = (xxh_u64x2*) acc;
4028 const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4029 /* constants */
4030 xxh_u64x2 const v32 = { 32, 32 };
4031 xxh_u64x2 const v47 = { 47, 47 };
4032 xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4033 size_t i;
4034 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4035 /* xacc[i] ^= (xacc[i] >> 47); */
4036 xxh_u64x2 const acc_vec = xacc[i];
4037 xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4038
4039 /* xacc[i] ^= xsecret[i]; */
4040 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4041 xxh_u64x2 const data_key = data_vec ^ key_vec;
4042
4043 /* xacc[i] *= XXH_PRIME32_1 */
4044 /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
4045 xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
4046 /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
4047 xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4048 xacc[i] = prod_odd + (prod_even << v32);
4049 } }
4050 }
4051
4052 #endif
4053
4054 /* scalar variants - universal */
4055
4056 XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4057 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4058 const void* XXH_RESTRICT input,
4059 const void* XXH_RESTRICT secret)
4060 {
4061 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4062 const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */
4063 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4064 size_t i;
4065 XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4066 for (i=0; i < XXH_ACC_NB; i++) {
4067 xxh_u64 const data_val = XXH_readLE64(xinput + 8*i);
4068 xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
4069 xacc[i ^ 1] += data_val; /* swap adjacent lanes */
4070 xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4071 }
4072 }
4073
4074 XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4075 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4076 {
4077 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4078 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4079 size_t i;
4080 XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4081 for (i=0; i < XXH_ACC_NB; i++) {
4082 xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i);
4083 xxh_u64 acc64 = xacc[i];
4084 acc64 = XXH_xorshift64(acc64, 47);
4085 acc64 ^= key64;
4086 acc64 *= XXH_PRIME32_1;
4087 xacc[i] = acc64;
4088 }
4089 }
4090
4091 XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4092 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4093 {
4094 /*
4095 * We need a separate pointer for the hack below,
4096 * which requires a non-const pointer.
4097 * Any decent compiler will optimize this out otherwise.
4098 */
4099 const xxh_u8* kSecretPtr = XXH3_kSecret;
4100 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4101
4102 #if defined(__clang__) && defined(__aarch64__)
4103 /*
4104 * UGLY HACK:
4105 * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4106 * placed sequentially, in order, at the top of the unrolled loop.
4107 *
4108 * While MOVK is great for generating constants (2 cycles for a 64-bit
4109 * constant compared to 4 cycles for LDR), long MOVK chains stall the
4110 * integer pipelines:
4111 * I L S
4112 * MOVK
4113 * MOVK
4114 * MOVK
4115 * MOVK
4116 * ADD
4117 * SUB STR
4118 * STR
4119 * By forcing loads from memory (as the asm line causes Clang to assume
4120 * that XXH3_kSecretPtr has been changed), the pipelines are used more
4121 * efficiently:
4122 * I L S
4123 * LDR
4124 * ADD LDR
4125 * SUB STR
4126 * STR
4127 * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4128 * without hack: 2654.4 MB/s
4129 * with hack: 3202.9 MB/s
4130 */
4131 XXH_COMPILER_GUARD(kSecretPtr);
4132 #endif
4133 /*
4134 * Note: in debug mode, this overrides the asm optimization
4135 * and Clang will emit MOVK chains again.
4136 */
4137 XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4138
4139 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4140 int i;
4141 for (i=0; i < nbRounds; i++) {
4142 /*
4143 * The asm hack causes Clang to assume that kSecretPtr aliases with
4144 * customSecret, and on aarch64, this prevented LDP from merging two
4145 * loads together for free. Putting the loads together before the stores
4146 * properly generates LDP.
4147 */
4148 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
4149 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4150 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
4151 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4152 } }
4153 }
4154
4155
4156 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4157 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4158 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4159
4160
4161 #if (XXH_VECTOR == XXH_AVX512)
4162
4163 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4164 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
4165 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4166
4167 #elif (XXH_VECTOR == XXH_AVX2)
4168
4169 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4170 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
4171 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4172
4173 #elif (XXH_VECTOR == XXH_SSE2)
4174
4175 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4176 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
4177 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4178
4179 #elif (XXH_VECTOR == XXH_NEON)
4180
4181 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4182 #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
4183 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4184
4185 #elif (XXH_VECTOR == XXH_VSX)
4186
4187 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4188 #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
4189 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4190
4191 #else /* scalar */
4192
4193 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4194 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
4195 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4196
4197 #endif
4198
4199
4200
4201 #ifndef XXH_PREFETCH_DIST
4202 # ifdef __clang__
4203 # define XXH_PREFETCH_DIST 320
4204 # else
4205 # if (XXH_VECTOR == XXH_AVX512)
4206 # define XXH_PREFETCH_DIST 512
4207 # else
4208 # define XXH_PREFETCH_DIST 384
4209 # endif
4210 # endif /* __clang__ */
4211 #endif /* XXH_PREFETCH_DIST */
4212
4213 /*
4214 * XXH3_accumulate()
4215 * Loops over XXH3_accumulate_512().
4216 * Assumption: nbStripes will not overflow the secret size
4217 */
4218 XXH_FORCE_INLINE void
XXH3_accumulate(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,size_t nbStripes,XXH3_f_accumulate_512 f_acc512)4219 XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
4220 const xxh_u8* XXH_RESTRICT input,
4221 const xxh_u8* XXH_RESTRICT secret,
4222 size_t nbStripes,
4223 XXH3_f_accumulate_512 f_acc512)
4224 {
4225 size_t n;
4226 for (n = 0; n < nbStripes; n++ ) {
4227 const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4228 XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4229 f_acc512(acc,
4230 in,
4231 secret + n*XXH_SECRET_CONSUME_RATE);
4232 }
4233 }
4234
4235 XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4236 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4237 const xxh_u8* XXH_RESTRICT input, size_t len,
4238 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4239 XXH3_f_accumulate_512 f_acc512,
4240 XXH3_f_scrambleAcc f_scramble)
4241 {
4242 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4243 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4244 size_t const nb_blocks = (len - 1) / block_len;
4245
4246 size_t n;
4247
4248 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4249
4250 for (n = 0; n < nb_blocks; n++) {
4251 XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4252 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4253 }
4254
4255 /* last partial block */
4256 XXH_ASSERT(len > XXH_STRIPE_LEN);
4257 { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4258 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4259 XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4260
4261 /* last stripe */
4262 { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4263 #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
4264 f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4265 } }
4266 }
4267
4268 XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret)4269 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4270 {
4271 return XXH3_mul128_fold64(
4272 acc[0] ^ XXH_readLE64(secret),
4273 acc[1] ^ XXH_readLE64(secret+8) );
4274 }
4275
4276 static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 start)4277 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4278 {
4279 xxh_u64 result64 = start;
4280 size_t i = 0;
4281
4282 for (i = 0; i < 4; i++) {
4283 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4284 #if defined(__clang__) /* Clang */ \
4285 && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
4286 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4287 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
4288 /*
4289 * UGLY HACK:
4290 * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4291 * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4292 * XXH3_64bits, len == 256, Snapdragon 835:
4293 * without hack: 2063.7 MB/s
4294 * with hack: 2560.7 MB/s
4295 */
4296 XXH_COMPILER_GUARD(result64);
4297 #endif
4298 }
4299
4300 return XXH3_avalanche(result64);
4301 }
4302
4303 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4304 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4305
4306 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void * XXH_RESTRICT input,size_t len,const void * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4307 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4308 const void* XXH_RESTRICT secret, size_t secretSize,
4309 XXH3_f_accumulate_512 f_acc512,
4310 XXH3_f_scrambleAcc f_scramble)
4311 {
4312 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4313
4314 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4315
4316 /* converge into final hash */
4317 XXH_STATIC_ASSERT(sizeof(acc) == 64);
4318 /* do not align on 8, so that the secret is different from the accumulator */
4319 #define XXH_SECRET_MERGEACCS_START 11
4320 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4321 return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4322 }
4323
4324 /*
4325 * It's important for performance that XXH3_hashLong is not inlined.
4326 */
4327 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4328 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4329 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4330 {
4331 (void)seed64;
4332 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4333 }
4334
4335 /*
4336 * It's important for performance that XXH3_hashLong is not inlined.
4337 * Since the function is not inlined, the compiler may not be able to understand that,
4338 * in some scenarios, its `secret` argument is actually a compile time constant.
4339 * This variant enforces that the compiler can detect that,
4340 * and uses this opportunity to streamline the generated code for better performance.
4341 */
4342 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4343 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4344 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4345 {
4346 (void)seed64; (void)secret; (void)secretLen;
4347 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4348 }
4349
4350 /*
4351 * XXH3_hashLong_64b_withSeed():
4352 * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4353 * and then use this key for long mode hashing.
4354 *
4355 * This operation is decently fast but nonetheless costs a little bit of time.
4356 * Try to avoid it whenever possible (typically when seed==0).
4357 *
4358 * It's important for performance that XXH3_hashLong is not inlined. Not sure
4359 * why (uop cache maybe?), but the difference is large and easily measurable.
4360 */
4361 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void * input,size_t len,XXH64_hash_t seed,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)4362 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4363 XXH64_hash_t seed,
4364 XXH3_f_accumulate_512 f_acc512,
4365 XXH3_f_scrambleAcc f_scramble,
4366 XXH3_f_initCustomSecret f_initSec)
4367 {
4368 if (seed == 0)
4369 return XXH3_hashLong_64b_internal(input, len,
4370 XXH3_kSecret, sizeof(XXH3_kSecret),
4371 f_acc512, f_scramble);
4372 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4373 f_initSec(secret, seed);
4374 return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4375 f_acc512, f_scramble);
4376 }
4377 }
4378
4379 /*
4380 * It's important for performance that XXH3_hashLong is not inlined.
4381 */
4382 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void * input,size_t len,XXH64_hash_t seed,const xxh_u8 * secret,size_t secretLen)4383 XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4384 XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4385 {
4386 (void)secret; (void)secretLen;
4387 return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4388 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4389 }
4390
4391
4392 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4393 XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4394
4395 XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong64_f f_hashLong)4396 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4397 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4398 XXH3_hashLong64_f f_hashLong)
4399 {
4400 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4401 /*
4402 * If an action is to be taken if `secretLen` condition is not respected,
4403 * it should be done here.
4404 * For now, it's a contract pre-condition.
4405 * Adding a check and a branch here would cost performance at every hash.
4406 * Also, note that function signature doesn't offer room to return an error.
4407 */
4408 if (len <= 16)
4409 return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4410 if (len <= 128)
4411 return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4412 if (len <= XXH3_MIDSIZE_MAX)
4413 return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4414 return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4415 }
4416
4417
4418 /* === Public entry point === */
4419
4420 /*! @ingroup xxh3_family */
XXH3_64bits(const void * input,size_t len)4421 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4422 {
4423 return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4424 }
4425
4426 /*! @ingroup xxh3_family */
4427 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)4428 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4429 {
4430 return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4431 }
4432
4433 /*! @ingroup xxh3_family */
4434 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)4435 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4436 {
4437 return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4438 }
4439
4440
4441 /* === XXH3 streaming === */
4442
4443 /*
4444 * Malloc's a pointer that is always aligned to align.
4445 *
4446 * This must be freed with `XXH_alignedFree()`.
4447 *
4448 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4449 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4450 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4451 *
4452 * This underalignment previously caused a rather obvious crash which went
4453 * completely unnoticed due to XXH3_createState() not actually being tested.
4454 * Credit to RedSpah for noticing this bug.
4455 *
4456 * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4457 * are avoided: To maintain portability, we would have to write a fallback
4458 * like this anyways, and besides, testing for the existence of library
4459 * functions without relying on external build tools is impossible.
4460 *
4461 * The method is simple: Overallocate, manually align, and store the offset
4462 * to the original behind the returned pointer.
4463 *
4464 * Align must be a power of 2 and 8 <= align <= 128.
4465 */
XXH_alignedMalloc(size_t s,size_t align)4466 static void* XXH_alignedMalloc(size_t s, size_t align)
4467 {
4468 XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4469 XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
4470 XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
4471 { /* Overallocate to make room for manual realignment and an offset byte */
4472 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4473 if (base != NULL) {
4474 /*
4475 * Get the offset needed to align this pointer.
4476 *
4477 * Even if the returned pointer is aligned, there will always be
4478 * at least one byte to store the offset to the original pointer.
4479 */
4480 size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4481 /* Add the offset for the now-aligned pointer */
4482 xxh_u8* ptr = base + offset;
4483
4484 XXH_ASSERT((size_t)ptr % align == 0);
4485
4486 /* Store the offset immediately before the returned pointer. */
4487 ptr[-1] = (xxh_u8)offset;
4488 return ptr;
4489 }
4490 return NULL;
4491 }
4492 }
4493 /*
4494 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4495 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4496 */
XXH_alignedFree(void * p)4497 static void XXH_alignedFree(void* p)
4498 {
4499 if (p != NULL) {
4500 xxh_u8* ptr = (xxh_u8*)p;
4501 /* Get the offset byte we added in XXH_malloc. */
4502 xxh_u8 offset = ptr[-1];
4503 /* Free the original malloc'd pointer */
4504 xxh_u8* base = ptr - offset;
4505 XXH_free(base);
4506 }
4507 }
4508 /*! @ingroup xxh3_family */
XXH3_createState(void)4509 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
4510 {
4511 XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4512 if (state==NULL) return NULL;
4513 XXH3_INITSTATE(state);
4514 return state;
4515 }
4516
4517 /*! @ingroup xxh3_family */
XXH3_freeState(XXH3_state_t * statePtr)4518 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
4519 {
4520 XXH_alignedFree(statePtr);
4521 return XXH_OK;
4522 }
4523
4524 /*! @ingroup xxh3_family */
4525 XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t * dst_state,const XXH3_state_t * src_state)4526 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4527 {
4528 memcpy(dst_state, src_state, sizeof(*dst_state));
4529 }
4530
4531 static void
XXH3_reset_internal(XXH3_state_t * statePtr,XXH64_hash_t seed,const void * secret,size_t secretSize)4532 XXH3_reset_internal(XXH3_state_t* statePtr,
4533 XXH64_hash_t seed,
4534 const void* secret, size_t secretSize)
4535 {
4536 size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4537 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4538 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4539 XXH_ASSERT(statePtr != NULL);
4540 /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4541 memset((char*)statePtr + initStart, 0, initLength);
4542 statePtr->acc[0] = XXH_PRIME32_3;
4543 statePtr->acc[1] = XXH_PRIME64_1;
4544 statePtr->acc[2] = XXH_PRIME64_2;
4545 statePtr->acc[3] = XXH_PRIME64_3;
4546 statePtr->acc[4] = XXH_PRIME64_4;
4547 statePtr->acc[5] = XXH_PRIME32_2;
4548 statePtr->acc[6] = XXH_PRIME64_5;
4549 statePtr->acc[7] = XXH_PRIME32_1;
4550 statePtr->seed = seed;
4551 statePtr->extSecret = (const unsigned char*)secret;
4552 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4553 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4554 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4555 }
4556
4557 /*! @ingroup xxh3_family */
4558 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t * statePtr)4559 XXH3_64bits_reset(XXH3_state_t* statePtr)
4560 {
4561 if (statePtr == NULL) return XXH_ERROR;
4562 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4563 return XXH_OK;
4564 }
4565
4566 /*! @ingroup xxh3_family */
4567 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)4568 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4569 {
4570 if (statePtr == NULL) return XXH_ERROR;
4571 XXH3_reset_internal(statePtr, 0, secret, secretSize);
4572 if (secret == NULL) return XXH_ERROR;
4573 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4574 return XXH_OK;
4575 }
4576
4577 /*! @ingroup xxh3_family */
4578 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)4579 XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
4580 {
4581 if (statePtr == NULL) return XXH_ERROR;
4582 if (seed==0) return XXH3_64bits_reset(statePtr);
4583 if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed);
4584 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4585 return XXH_OK;
4586 }
4587
4588 /* Note : when XXH3_consumeStripes() is invoked,
4589 * there must be a guarantee that at least one more byte must be consumed from input
4590 * so that the function can blindly consume all stripes using the "normal" secret segment */
4591 XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64 * XXH_RESTRICT acc,size_t * XXH_RESTRICT nbStripesSoFarPtr,size_t nbStripesPerBlock,const xxh_u8 * XXH_RESTRICT input,size_t nbStripes,const xxh_u8 * XXH_RESTRICT secret,size_t secretLimit,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4592 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4593 size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4594 const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4595 const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4596 XXH3_f_accumulate_512 f_acc512,
4597 XXH3_f_scrambleAcc f_scramble)
4598 {
4599 XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
4600 XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4601 if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4602 /* need a scrambling operation */
4603 size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4604 size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4605 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4606 f_scramble(acc, secret + secretLimit);
4607 XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4608 *nbStripesSoFarPtr = nbStripesAfterBlock;
4609 } else {
4610 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4611 *nbStripesSoFarPtr += nbStripes;
4612 }
4613 }
4614
4615 /*
4616 * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4617 */
4618 XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t * state,const xxh_u8 * input,size_t len,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4619 XXH3_update(XXH3_state_t* state,
4620 const xxh_u8* input, size_t len,
4621 XXH3_f_accumulate_512 f_acc512,
4622 XXH3_f_scrambleAcc f_scramble)
4623 {
4624 if (input==NULL)
4625 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
4626 return XXH_OK;
4627 #else
4628 return XXH_ERROR;
4629 #endif
4630
4631 { const xxh_u8* const bEnd = input + len;
4632 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4633
4634 state->totalLen += len;
4635 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4636
4637 if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */
4638 XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4639 state->bufferedSize += (XXH32_hash_t)len;
4640 return XXH_OK;
4641 }
4642 /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4643
4644 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4645 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
4646
4647 /*
4648 * Internal buffer is partially filled (always, except at beginning)
4649 * Complete it, then consume it.
4650 */
4651 if (state->bufferedSize) {
4652 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4653 XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4654 input += loadSize;
4655 XXH3_consumeStripes(state->acc,
4656 &state->nbStripesSoFar, state->nbStripesPerBlock,
4657 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4658 secret, state->secretLimit,
4659 f_acc512, f_scramble);
4660 state->bufferedSize = 0;
4661 }
4662 XXH_ASSERT(input < bEnd);
4663
4664 /* Consume input by a multiple of internal buffer size */
4665 if (input+XXH3_INTERNALBUFFER_SIZE < bEnd) {
4666 const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4667 do {
4668 XXH3_consumeStripes(state->acc,
4669 &state->nbStripesSoFar, state->nbStripesPerBlock,
4670 input, XXH3_INTERNALBUFFER_STRIPES,
4671 secret, state->secretLimit,
4672 f_acc512, f_scramble);
4673 input += XXH3_INTERNALBUFFER_SIZE;
4674 } while (input<limit);
4675 /* for last partial stripe */
4676 memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4677 }
4678 XXH_ASSERT(input < bEnd);
4679
4680 /* Some remaining input (always) : buffer it */
4681 XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
4682 state->bufferedSize = (XXH32_hash_t)(bEnd-input);
4683 }
4684
4685 return XXH_OK;
4686 }
4687
4688 /*! @ingroup xxh3_family */
4689 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t * state,const void * input,size_t len)4690 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
4691 {
4692 return XXH3_update(state, (const xxh_u8*)input, len,
4693 XXH3_accumulate_512, XXH3_scrambleAcc);
4694 }
4695
4696
4697 XXH_FORCE_INLINE void
XXH3_digest_long(XXH64_hash_t * acc,const XXH3_state_t * state,const unsigned char * secret)4698 XXH3_digest_long (XXH64_hash_t* acc,
4699 const XXH3_state_t* state,
4700 const unsigned char* secret)
4701 {
4702 /*
4703 * Digest on a local copy. This way, the state remains unaltered, and it can
4704 * continue ingesting more input afterwards.
4705 */
4706 memcpy(acc, state->acc, sizeof(state->acc));
4707 if (state->bufferedSize >= XXH_STRIPE_LEN) {
4708 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
4709 size_t nbStripesSoFar = state->nbStripesSoFar;
4710 XXH3_consumeStripes(acc,
4711 &nbStripesSoFar, state->nbStripesPerBlock,
4712 state->buffer, nbStripes,
4713 secret, state->secretLimit,
4714 XXH3_accumulate_512, XXH3_scrambleAcc);
4715 /* last stripe */
4716 XXH3_accumulate_512(acc,
4717 state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
4718 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4719 } else { /* bufferedSize < XXH_STRIPE_LEN */
4720 xxh_u8 lastStripe[XXH_STRIPE_LEN];
4721 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
4722 XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
4723 memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
4724 memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
4725 XXH3_accumulate_512(acc,
4726 lastStripe,
4727 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4728 }
4729 }
4730
4731 /*! @ingroup xxh3_family */
XXH3_64bits_digest(const XXH3_state_t * state)4732 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
4733 {
4734 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4735 if (state->totalLen > XXH3_MIDSIZE_MAX) {
4736 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
4737 XXH3_digest_long(acc, state, secret);
4738 return XXH3_mergeAccs(acc,
4739 secret + XXH_SECRET_MERGEACCS_START,
4740 (xxh_u64)state->totalLen * XXH_PRIME64_1);
4741 }
4742 /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
4743 if (state->seed)
4744 return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
4745 return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
4746 secret, state->secretLimit + XXH_STRIPE_LEN);
4747 }
4748
4749
4750 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
4751
4752 /*! @ingroup xxh3_family */
4753 XXH_PUBLIC_API void
XXH3_generateSecret(void * secretBuffer,const void * customSeed,size_t customSeedSize)4754 XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize)
4755 {
4756 XXH_ASSERT(secretBuffer != NULL);
4757 if (customSeedSize == 0) {
4758 memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4759 return;
4760 }
4761 XXH_ASSERT(customSeed != NULL);
4762
4763 { size_t const segmentSize = sizeof(XXH128_hash_t);
4764 size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize;
4765 XXH128_canonical_t scrambler;
4766 XXH64_hash_t seeds[12];
4767 size_t segnb;
4768 XXH_ASSERT(nbSegments == 12);
4769 XXH_ASSERT(segmentSize * nbSegments == XXH_SECRET_DEFAULT_SIZE); /* exact multiple */
4770 XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
4771
4772 /*
4773 * Copy customSeed to seeds[], truncating or repeating as necessary.
4774 */
4775 { size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds));
4776 size_t filled = toFill;
4777 memcpy(seeds, customSeed, toFill);
4778 while (filled < sizeof(seeds)) {
4779 toFill = XXH_MIN(filled, sizeof(seeds) - filled);
4780 memcpy((char*)seeds + filled, seeds, toFill);
4781 filled += toFill;
4782 } }
4783
4784 /* generate secret */
4785 memcpy(secretBuffer, &scrambler, sizeof(scrambler));
4786 for (segnb=1; segnb < nbSegments; segnb++) {
4787 size_t const segmentStart = segnb * segmentSize;
4788 XXH128_canonical_t segment;
4789 XXH128_canonicalFromHash(&segment,
4790 XXH128(&scrambler, sizeof(scrambler), XXH_readLE64(seeds + segnb) + segnb) );
4791 memcpy((char*)secretBuffer + segmentStart, &segment, sizeof(segment));
4792 } }
4793 }
4794
4795
4796 /* ==========================================
4797 * XXH3 128 bits (a.k.a XXH128)
4798 * ==========================================
4799 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
4800 * even without counting the significantly larger output size.
4801 *
4802 * For example, extra steps are taken to avoid the seed-dependent collisions
4803 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
4804 *
4805 * This strength naturally comes at the cost of some speed, especially on short
4806 * lengths. Note that longer hashes are about as fast as the 64-bit version
4807 * due to it using only a slight modification of the 64-bit loop.
4808 *
4809 * XXH128 is also more oriented towards 64-bit machines. It is still extremely
4810 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
4811 */
4812
4813 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4814 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4815 {
4816 /* A doubled version of 1to3_64b with different constants. */
4817 XXH_ASSERT(input != NULL);
4818 XXH_ASSERT(1 <= len && len <= 3);
4819 XXH_ASSERT(secret != NULL);
4820 /*
4821 * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
4822 * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
4823 * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
4824 */
4825 { xxh_u8 const c1 = input[0];
4826 xxh_u8 const c2 = input[len >> 1];
4827 xxh_u8 const c3 = input[len - 1];
4828 xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
4829 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
4830 xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
4831 xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
4832 xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
4833 xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
4834 xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
4835 XXH128_hash_t h128;
4836 h128.low64 = XXH64_avalanche(keyed_lo);
4837 h128.high64 = XXH64_avalanche(keyed_hi);
4838 return h128;
4839 }
4840 }
4841
4842 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4843 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4844 {
4845 XXH_ASSERT(input != NULL);
4846 XXH_ASSERT(secret != NULL);
4847 XXH_ASSERT(4 <= len && len <= 8);
4848 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
4849 { xxh_u32 const input_lo = XXH_readLE32(input);
4850 xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
4851 xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
4852 xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
4853 xxh_u64 const keyed = input_64 ^ bitflip;
4854
4855 /* Shift len to the left to ensure it is even, this avoids even multiplies. */
4856 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
4857
4858 m128.high64 += (m128.low64 << 1);
4859 m128.low64 ^= (m128.high64 >> 3);
4860
4861 m128.low64 = XXH_xorshift64(m128.low64, 35);
4862 m128.low64 *= 0x9FB21C651E98DF25ULL;
4863 m128.low64 = XXH_xorshift64(m128.low64, 28);
4864 m128.high64 = XXH3_avalanche(m128.high64);
4865 return m128;
4866 }
4867 }
4868
4869 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4870 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4871 {
4872 XXH_ASSERT(input != NULL);
4873 XXH_ASSERT(secret != NULL);
4874 XXH_ASSERT(9 <= len && len <= 16);
4875 { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
4876 xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
4877 xxh_u64 const input_lo = XXH_readLE64(input);
4878 xxh_u64 input_hi = XXH_readLE64(input + len - 8);
4879 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
4880 /*
4881 * Put len in the middle of m128 to ensure that the length gets mixed to
4882 * both the low and high bits in the 128x64 multiply below.
4883 */
4884 m128.low64 += (xxh_u64)(len - 1) << 54;
4885 input_hi ^= bitfliph;
4886 /*
4887 * Add the high 32 bits of input_hi to the high 32 bits of m128, then
4888 * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
4889 * the high 64 bits of m128.
4890 *
4891 * The best approach to this operation is different on 32-bit and 64-bit.
4892 */
4893 if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
4894 /*
4895 * 32-bit optimized version, which is more readable.
4896 *
4897 * On 32-bit, it removes an ADC and delays a dependency between the two
4898 * halves of m128.high64, but it generates an extra mask on 64-bit.
4899 */
4900 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
4901 } else {
4902 /*
4903 * 64-bit optimized (albeit more confusing) version.
4904 *
4905 * Uses some properties of addition and multiplication to remove the mask:
4906 *
4907 * Let:
4908 * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
4909 * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
4910 * c = XXH_PRIME32_2
4911 *
4912 * a + (b * c)
4913 * Inverse Property: x + y - x == y
4914 * a + (b * (1 + c - 1))
4915 * Distributive Property: x * (y + z) == (x * y) + (x * z)
4916 * a + (b * 1) + (b * (c - 1))
4917 * Identity Property: x * 1 == x
4918 * a + b + (b * (c - 1))
4919 *
4920 * Substitute a, b, and c:
4921 * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
4922 *
4923 * Since input_hi.hi + input_hi.lo == input_hi, we get this:
4924 * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
4925 */
4926 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
4927 }
4928 /* m128 ^= XXH_swap64(m128 >> 64); */
4929 m128.low64 ^= XXH_swap64(m128.high64);
4930
4931 { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
4932 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
4933 h128.high64 += m128.high64 * XXH_PRIME64_2;
4934
4935 h128.low64 = XXH3_avalanche(h128.low64);
4936 h128.high64 = XXH3_avalanche(h128.high64);
4937 return h128;
4938 } }
4939 }
4940
4941 /*
4942 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
4943 */
4944 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4945 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4946 {
4947 XXH_ASSERT(len <= 16);
4948 { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
4949 if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
4950 if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
4951 { XXH128_hash_t h128;
4952 xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
4953 xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
4954 h128.low64 = XXH64_avalanche(seed ^ bitflipl);
4955 h128.high64 = XXH64_avalanche( seed ^ bitfliph);
4956 return h128;
4957 } }
4958 }
4959
4960 /*
4961 * A bit slower than XXH3_mix16B, but handles multiply by zero better.
4962 */
4963 XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc,const xxh_u8 * input_1,const xxh_u8 * input_2,const xxh_u8 * secret,XXH64_hash_t seed)4964 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
4965 const xxh_u8* secret, XXH64_hash_t seed)
4966 {
4967 acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
4968 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
4969 acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
4970 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
4971 return acc;
4972 }
4973
4974
4975 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)4976 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
4977 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4978 XXH64_hash_t seed)
4979 {
4980 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4981 XXH_ASSERT(16 < len && len <= 128);
4982
4983 { XXH128_hash_t acc;
4984 acc.low64 = len * XXH_PRIME64_1;
4985 acc.high64 = 0;
4986 if (len > 32) {
4987 if (len > 64) {
4988 if (len > 96) {
4989 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
4990 }
4991 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
4992 }
4993 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
4994 }
4995 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
4996 { XXH128_hash_t h128;
4997 h128.low64 = acc.low64 + acc.high64;
4998 h128.high64 = (acc.low64 * XXH_PRIME64_1)
4999 + (acc.high64 * XXH_PRIME64_4)
5000 + ((len - seed) * XXH_PRIME64_2);
5001 h128.low64 = XXH3_avalanche(h128.low64);
5002 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5003 return h128;
5004 }
5005 }
5006 }
5007
5008 XXH_NO_INLINE XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5009 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5010 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5011 XXH64_hash_t seed)
5012 {
5013 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5014 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
5015
5016 { XXH128_hash_t acc;
5017 int const nbRounds = (int)len / 32;
5018 int i;
5019 acc.low64 = len * XXH_PRIME64_1;
5020 acc.high64 = 0;
5021 for (i=0; i<4; i++) {
5022 acc = XXH128_mix32B(acc,
5023 input + (32 * i),
5024 input + (32 * i) + 16,
5025 secret + (32 * i),
5026 seed);
5027 }
5028 acc.low64 = XXH3_avalanche(acc.low64);
5029 acc.high64 = XXH3_avalanche(acc.high64);
5030 XXH_ASSERT(nbRounds >= 4);
5031 for (i=4 ; i < nbRounds; i++) {
5032 acc = XXH128_mix32B(acc,
5033 input + (32 * i),
5034 input + (32 * i) + 16,
5035 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5036 seed);
5037 }
5038 /* last bytes */
5039 acc = XXH128_mix32B(acc,
5040 input + len - 16,
5041 input + len - 32,
5042 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5043 0ULL - seed);
5044
5045 { XXH128_hash_t h128;
5046 h128.low64 = acc.low64 + acc.high64;
5047 h128.high64 = (acc.low64 * XXH_PRIME64_1)
5048 + (acc.high64 * XXH_PRIME64_4)
5049 + ((len - seed) * XXH_PRIME64_2);
5050 h128.low64 = XXH3_avalanche(h128.low64);
5051 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5052 return h128;
5053 }
5054 }
5055 }
5056
5057 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)5058 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5059 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5060 XXH3_f_accumulate_512 f_acc512,
5061 XXH3_f_scrambleAcc f_scramble)
5062 {
5063 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5064
5065 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5066
5067 /* converge into final hash */
5068 XXH_STATIC_ASSERT(sizeof(acc) == 64);
5069 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5070 { XXH128_hash_t h128;
5071 h128.low64 = XXH3_mergeAccs(acc,
5072 secret + XXH_SECRET_MERGEACCS_START,
5073 (xxh_u64)len * XXH_PRIME64_1);
5074 h128.high64 = XXH3_mergeAccs(acc,
5075 secret + secretSize
5076 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5077 ~((xxh_u64)len * XXH_PRIME64_2));
5078 return h128;
5079 }
5080 }
5081
5082 /*
5083 * It's important for performance that XXH3_hashLong is not inlined.
5084 */
5085 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5086 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5087 XXH64_hash_t seed64,
5088 const void* XXH_RESTRICT secret, size_t secretLen)
5089 {
5090 (void)seed64; (void)secret; (void)secretLen;
5091 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5092 XXH3_accumulate_512, XXH3_scrambleAcc);
5093 }
5094
5095 /*
5096 * It's important for performance that XXH3_hashLong is not inlined.
5097 */
5098 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5099 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5100 XXH64_hash_t seed64,
5101 const void* XXH_RESTRICT secret, size_t secretLen)
5102 {
5103 (void)seed64;
5104 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5105 XXH3_accumulate_512, XXH3_scrambleAcc);
5106 }
5107
5108 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)5109 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5110 XXH64_hash_t seed64,
5111 XXH3_f_accumulate_512 f_acc512,
5112 XXH3_f_scrambleAcc f_scramble,
5113 XXH3_f_initCustomSecret f_initSec)
5114 {
5115 if (seed64 == 0)
5116 return XXH3_hashLong_128b_internal(input, len,
5117 XXH3_kSecret, sizeof(XXH3_kSecret),
5118 f_acc512, f_scramble);
5119 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5120 f_initSec(secret, seed64);
5121 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5122 f_acc512, f_scramble);
5123 }
5124 }
5125
5126 /*
5127 * It's important for performance that XXH3_hashLong is not inlined.
5128 */
5129 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5130 XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5131 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5132 {
5133 (void)secret; (void)secretLen;
5134 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5135 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5136 }
5137
5138 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5139 XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5140
5141 XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong128_f f_hl128)5142 XXH3_128bits_internal(const void* input, size_t len,
5143 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5144 XXH3_hashLong128_f f_hl128)
5145 {
5146 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5147 /*
5148 * If an action is to be taken if `secret` conditions are not respected,
5149 * it should be done here.
5150 * For now, it's a contract pre-condition.
5151 * Adding a check and a branch here would cost performance at every hash.
5152 */
5153 if (len <= 16)
5154 return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5155 if (len <= 128)
5156 return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5157 if (len <= XXH3_MIDSIZE_MAX)
5158 return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5159 return f_hl128(input, len, seed64, secret, secretLen);
5160 }
5161
5162
5163 /* === Public XXH128 API === */
5164
5165 /*! @ingroup xxh3_family */
XXH3_128bits(const void * input,size_t len)5166 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5167 {
5168 return XXH3_128bits_internal(input, len, 0,
5169 XXH3_kSecret, sizeof(XXH3_kSecret),
5170 XXH3_hashLong_128b_default);
5171 }
5172
5173 /*! @ingroup xxh3_family */
5174 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)5175 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5176 {
5177 return XXH3_128bits_internal(input, len, 0,
5178 (const xxh_u8*)secret, secretSize,
5179 XXH3_hashLong_128b_withSecret);
5180 }
5181
5182 /*! @ingroup xxh3_family */
5183 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)5184 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5185 {
5186 return XXH3_128bits_internal(input, len, seed,
5187 XXH3_kSecret, sizeof(XXH3_kSecret),
5188 XXH3_hashLong_128b_withSeed);
5189 }
5190
5191 /*! @ingroup xxh3_family */
5192 XXH_PUBLIC_API XXH128_hash_t
XXH128(const void * input,size_t len,XXH64_hash_t seed)5193 XXH128(const void* input, size_t len, XXH64_hash_t seed)
5194 {
5195 return XXH3_128bits_withSeed(input, len, seed);
5196 }
5197
5198
5199 /* === XXH3 128-bit streaming === */
5200
5201 /*
5202 * All the functions are actually the same as for 64-bit streaming variant.
5203 * The only difference is the finalization routine.
5204 */
5205
5206 /*! @ingroup xxh3_family */
5207 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t * statePtr)5208 XXH3_128bits_reset(XXH3_state_t* statePtr)
5209 {
5210 if (statePtr == NULL) return XXH_ERROR;
5211 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
5212 return XXH_OK;
5213 }
5214
5215 /*! @ingroup xxh3_family */
5216 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)5217 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5218 {
5219 if (statePtr == NULL) return XXH_ERROR;
5220 XXH3_reset_internal(statePtr, 0, secret, secretSize);
5221 if (secret == NULL) return XXH_ERROR;
5222 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5223 return XXH_OK;
5224 }
5225
5226 /*! @ingroup xxh3_family */
5227 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)5228 XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
5229 {
5230 if (statePtr == NULL) return XXH_ERROR;
5231 if (seed==0) return XXH3_128bits_reset(statePtr);
5232 if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed);
5233 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
5234 return XXH_OK;
5235 }
5236
5237 /*! @ingroup xxh3_family */
5238 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t * state,const void * input,size_t len)5239 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5240 {
5241 return XXH3_update(state, (const xxh_u8*)input, len,
5242 XXH3_accumulate_512, XXH3_scrambleAcc);
5243 }
5244
5245 /*! @ingroup xxh3_family */
XXH3_128bits_digest(const XXH3_state_t * state)5246 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
5247 {
5248 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5249 if (state->totalLen > XXH3_MIDSIZE_MAX) {
5250 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5251 XXH3_digest_long(acc, state, secret);
5252 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5253 { XXH128_hash_t h128;
5254 h128.low64 = XXH3_mergeAccs(acc,
5255 secret + XXH_SECRET_MERGEACCS_START,
5256 (xxh_u64)state->totalLen * XXH_PRIME64_1);
5257 h128.high64 = XXH3_mergeAccs(acc,
5258 secret + state->secretLimit + XXH_STRIPE_LEN
5259 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5260 ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5261 return h128;
5262 }
5263 }
5264 /* len <= XXH3_MIDSIZE_MAX : short code */
5265 if (state->seed)
5266 return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5267 return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5268 secret, state->secretLimit + XXH_STRIPE_LEN);
5269 }
5270
5271 /* 128-bit utility functions */
5272
5273 #include <string.h> /* memcmp, memcpy */
5274
5275 /* return : 1 is equal, 0 if different */
5276 /*! @ingroup xxh3_family */
XXH128_isEqual(XXH128_hash_t h1,XXH128_hash_t h2)5277 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
5278 {
5279 /* note : XXH128_hash_t is compact, it has no padding byte */
5280 return !(memcmp(&h1, &h2, sizeof(h1)));
5281 }
5282
5283 /* This prototype is compatible with stdlib's qsort().
5284 * return : >0 if *h128_1 > *h128_2
5285 * <0 if *h128_1 < *h128_2
5286 * =0 if *h128_1 == *h128_2 */
5287 /*! @ingroup xxh3_family */
XXH128_cmp(const void * h128_1,const void * h128_2)5288 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5289 {
5290 XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5291 XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5292 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5293 /* note : bets that, in most cases, hash values are different */
5294 if (hcmp) return hcmp;
5295 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5296 }
5297
5298
5299 /*====== Canonical representation ======*/
5300 /*! @ingroup xxh3_family */
5301 XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t * dst,XXH128_hash_t hash)5302 XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
5303 {
5304 XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5305 if (XXH_CPU_LITTLE_ENDIAN) {
5306 hash.high64 = XXH_swap64(hash.high64);
5307 hash.low64 = XXH_swap64(hash.low64);
5308 }
5309 memcpy(dst, &hash.high64, sizeof(hash.high64));
5310 memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5311 }
5312
5313 /*! @ingroup xxh3_family */
5314 XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t * src)5315 XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5316 {
5317 XXH128_hash_t h;
5318 h.high64 = XXH_readBE64(src);
5319 h.low64 = XXH_readBE64(src->digest + 8);
5320 return h;
5321 }
5322
5323 /* Pop our optimization override from above */
5324 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5325 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5326 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5327 # pragma GCC pop_options
5328 #endif
5329
5330 #endif /* XXH_NO_LONG_LONG */
5331
5332 #endif /* XXH_NO_XXH3 */
5333
5334 /*!
5335 * @}
5336 */
5337 #endif /* XXH_IMPLEMENTATION */
5338
5339
5340 #if defined (__cplusplus)
5341 }
5342 #endif
5343