2 * xxHash - Fast Hash algorithm
3 * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc.
5 * You can contact the author at :
6 * - xxHash homepage: http://www.xxhash.com
7 * - xxHash source repository : https://github.com/Cyan4973/xxHash
9 * This source code is licensed under both the BSD-style license (found in the
10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11 * in the COPYING file in the root directory of this source tree).
12 * You may select, at your option, one of the above-listed licenses.
16 /* *************************************
18 ***************************************/
19 /*!XXH_FORCE_MEMORY_ACCESS :
20 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
21 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
22 * The below switch allow to select different access method for improved performance.
23 * Method 0 (default) : use `memcpy()`. Safe and portable.
24 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
25 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
26 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
27 * It can generate buggy code on targets which do not support unaligned memory accesses.
28 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
29 * See http://stackoverflow.com/a/32095106/646947 for details.
30 * Prefer these methods in priority order (0 > 1 > 2)
32 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
33 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
34 # define XXH_FORCE_MEMORY_ACCESS 2
35 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
36 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
38 # define XXH_FORCE_MEMORY_ACCESS 1
42 /*!XXH_ACCEPT_NULL_INPUT_POINTER :
43 * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
44 * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
45 * By default, this option is disabled. To enable it, uncomment below define :
47 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
49 /*!XXH_FORCE_NATIVE_FORMAT :
50 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
51 * Results are therefore identical for little-endian and big-endian CPU.
52 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
53 * Should endian-independence be of no importance for your application, you may set the #define below to 1,
54 * to improve speed for Big-endian CPU.
55 * This option has no impact on Little_Endian CPU.
57 #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
58 # define XXH_FORCE_NATIVE_FORMAT 0
61 /*!XXH_FORCE_ALIGN_CHECK :
62 * This is a minor performance trick, only useful with lots of very small keys.
63 * It means : check for aligned/unaligned input.
64 * The check costs one initial branch per hash; set to 0 when the input data
65 * is guaranteed to be aligned.
67 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
68 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
69 # define XXH_FORCE_ALIGN_CHECK 0
71 # define XXH_FORCE_ALIGN_CHECK 1
76 /* *************************************
77 * Includes & Memory related functions
78 ***************************************/
79 /* Modify the local functions below should you wish to use some other memory routines */
80 /* for malloc(), free() */
82 #include <stddef.h> /* size_t */
83 static void* XXH_malloc(size_t s
) { return malloc(s
); }
84 static void XXH_free (void* p
) { free(p
); }
87 static void* XXH_memcpy(void* dest
, const void* src
, size_t size
) { return memcpy(dest
,src
,size
); }
89 #ifndef XXH_STATIC_LINKING_ONLY
90 # define XXH_STATIC_LINKING_ONLY
95 /* *************************************
96 * Compiler Specific Options
97 ***************************************/
98 #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
99 # define INLINE_KEYWORD inline
101 # define INLINE_KEYWORD
104 #if defined(__GNUC__) || defined(__ICCARM__)
105 # define FORCE_INLINE_ATTR __attribute__((always_inline))
106 #elif defined(_MSC_VER)
107 # define FORCE_INLINE_ATTR __forceinline
109 # define FORCE_INLINE_ATTR
112 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
116 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
120 /* *************************************
122 ***************************************/
125 # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
127 typedef uint8_t BYTE
;
128 typedef uint16_t U16
;
129 typedef uint32_t U32
;
131 typedef uint64_t U64
;
133 typedef unsigned char BYTE
;
134 typedef unsigned short U16
;
135 typedef unsigned int U32
;
136 typedef signed int S32
;
137 typedef unsigned long long U64
; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
142 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
144 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
145 static U32
XXH_read32(const void* memPtr
) { return *(const U32
*) memPtr
; }
146 static U64
XXH_read64(const void* memPtr
) { return *(const U64
*) memPtr
; }
148 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
150 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
151 /* currently only defined for gcc and icc */
152 typedef union { U32 u32
; U64 u64
; } __attribute__((packed
)) unalign
;
154 static U32
XXH_read32(const void* ptr
) { return ((const unalign
*)ptr
)->u32
; }
155 static U64
XXH_read64(const void* ptr
) { return ((const unalign
*)ptr
)->u64
; }
159 /* portable and safe solution. Generally efficient.
160 * see : http://stackoverflow.com/a/32095106/646947
163 static U32
XXH_read32(const void* memPtr
)
166 memcpy(&val
, memPtr
, sizeof(val
));
170 static U64
XXH_read64(const void* memPtr
)
173 memcpy(&val
, memPtr
, sizeof(val
));
177 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
180 /* ****************************************
181 * Compiler-specific Functions and Macros
182 ******************************************/
183 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
185 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
186 #if defined(_MSC_VER)
187 # define XXH_rotl32(x,r) _rotl(x,r)
188 # define XXH_rotl64(x,r) _rotl64(x,r)
190 #if defined(__ICCARM__)
191 # include <intrinsics.h>
192 # define XXH_rotl32(x,r) __ROR(x,(32 - r))
194 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
196 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
199 #if defined(_MSC_VER) /* Visual Studio */
200 # define XXH_swap32 _byteswap_ulong
201 # define XXH_swap64 _byteswap_uint64
202 #elif GCC_VERSION >= 403
203 # define XXH_swap32 __builtin_bswap32
204 # define XXH_swap64 __builtin_bswap64
206 static U32
XXH_swap32 (U32 x
)
208 return ((x
<< 24) & 0xff000000 ) |
209 ((x
<< 8) & 0x00ff0000 ) |
210 ((x
>> 8) & 0x0000ff00 ) |
211 ((x
>> 24) & 0x000000ff );
213 static U64
XXH_swap64 (U64 x
)
215 return ((x
<< 56) & 0xff00000000000000ULL
) |
216 ((x
<< 40) & 0x00ff000000000000ULL
) |
217 ((x
<< 24) & 0x0000ff0000000000ULL
) |
218 ((x
<< 8) & 0x000000ff00000000ULL
) |
219 ((x
>> 8) & 0x00000000ff000000ULL
) |
220 ((x
>> 24) & 0x0000000000ff0000ULL
) |
221 ((x
>> 40) & 0x000000000000ff00ULL
) |
222 ((x
>> 56) & 0x00000000000000ffULL
);
227 /* *************************************
228 * Architecture Macros
229 ***************************************/
230 typedef enum { XXH_bigEndian
=0, XXH_littleEndian
=1 } XXH_endianess
;
232 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
233 #ifndef XXH_CPU_LITTLE_ENDIAN
234 static const int g_one
= 1;
235 # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
239 /* ***************************
241 *****************************/
242 typedef enum { XXH_aligned
, XXH_unaligned
} XXH_alignment
;
244 FORCE_INLINE_TEMPLATE U32
XXH_readLE32_align(const void* ptr
, XXH_endianess endian
, XXH_alignment align
)
246 if (align
==XXH_unaligned
)
247 return endian
==XXH_littleEndian
? XXH_read32(ptr
) : XXH_swap32(XXH_read32(ptr
));
249 return endian
==XXH_littleEndian
? *(const U32
*)ptr
: XXH_swap32(*(const U32
*)ptr
);
252 FORCE_INLINE_TEMPLATE U32
XXH_readLE32(const void* ptr
, XXH_endianess endian
)
254 return XXH_readLE32_align(ptr
, endian
, XXH_unaligned
);
257 static U32
XXH_readBE32(const void* ptr
)
259 return XXH_CPU_LITTLE_ENDIAN
? XXH_swap32(XXH_read32(ptr
)) : XXH_read32(ptr
);
262 FORCE_INLINE_TEMPLATE U64
XXH_readLE64_align(const void* ptr
, XXH_endianess endian
, XXH_alignment align
)
264 if (align
==XXH_unaligned
)
265 return endian
==XXH_littleEndian
? XXH_read64(ptr
) : XXH_swap64(XXH_read64(ptr
));
267 return endian
==XXH_littleEndian
? *(const U64
*)ptr
: XXH_swap64(*(const U64
*)ptr
);
270 FORCE_INLINE_TEMPLATE U64
XXH_readLE64(const void* ptr
, XXH_endianess endian
)
272 return XXH_readLE64_align(ptr
, endian
, XXH_unaligned
);
275 static U64
XXH_readBE64(const void* ptr
)
277 return XXH_CPU_LITTLE_ENDIAN
? XXH_swap64(XXH_read64(ptr
)) : XXH_read64(ptr
);
281 /* *************************************
283 ***************************************/
284 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
287 /* *************************************
289 ***************************************/
290 static const U32 PRIME32_1
= 2654435761U;
291 static const U32 PRIME32_2
= 2246822519U;
292 static const U32 PRIME32_3
= 3266489917U;
293 static const U32 PRIME32_4
= 668265263U;
294 static const U32 PRIME32_5
= 374761393U;
296 static const U64 PRIME64_1
= 11400714785074694791ULL;
297 static const U64 PRIME64_2
= 14029467366897019727ULL;
298 static const U64 PRIME64_3
= 1609587929392839161ULL;
299 static const U64 PRIME64_4
= 9650029242287828579ULL;
300 static const U64 PRIME64_5
= 2870177450012600261ULL;
302 XXH_PUBLIC_API
unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER
; }
305 /* **************************
307 ****************************/
308 XXH_PUBLIC_API
void XXH32_copyState(XXH32_state_t
* restrict dstState
, const XXH32_state_t
* restrict srcState
)
310 memcpy(dstState
, srcState
, sizeof(*dstState
));
313 XXH_PUBLIC_API
void XXH64_copyState(XXH64_state_t
* restrict dstState
, const XXH64_state_t
* restrict srcState
)
315 memcpy(dstState
, srcState
, sizeof(*dstState
));
319 /* ***************************
320 * Simple Hash Functions
321 *****************************/
323 static U32
XXH32_round(U32 seed
, U32 input
)
325 seed
+= input
* PRIME32_2
;
326 seed
= XXH_rotl32(seed
, 13);
331 FORCE_INLINE_TEMPLATE U32
XXH32_endian_align(const void* input
, size_t len
, U32 seed
, XXH_endianess endian
, XXH_alignment align
)
333 const BYTE
* p
= (const BYTE
*)input
;
334 const BYTE
* bEnd
= p
+ len
;
336 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
338 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
341 bEnd
=p
=(const BYTE
*)(size_t)16;
346 const BYTE
* const limit
= bEnd
- 16;
347 U32 v1
= seed
+ PRIME32_1
+ PRIME32_2
;
348 U32 v2
= seed
+ PRIME32_2
;
350 U32 v4
= seed
- PRIME32_1
;
353 v1
= XXH32_round(v1
, XXH_get32bits(p
)); p
+=4;
354 v2
= XXH32_round(v2
, XXH_get32bits(p
)); p
+=4;
355 v3
= XXH32_round(v3
, XXH_get32bits(p
)); p
+=4;
356 v4
= XXH32_round(v4
, XXH_get32bits(p
)); p
+=4;
359 h32
= XXH_rotl32(v1
, 1) + XXH_rotl32(v2
, 7) + XXH_rotl32(v3
, 12) + XXH_rotl32(v4
, 18);
361 h32
= seed
+ PRIME32_5
;
367 h32
+= XXH_get32bits(p
) * PRIME32_3
;
368 h32
= XXH_rotl32(h32
, 17) * PRIME32_4
;
373 h32
+= (*p
) * PRIME32_5
;
374 h32
= XXH_rotl32(h32
, 11) * PRIME32_1
;
388 XXH_PUBLIC_API
unsigned int XXH32 (const void* input
, size_t len
, unsigned int seed
)
391 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
392 XXH32_CREATESTATE_STATIC(state
);
393 XXH32_reset(state
, seed
);
394 XXH32_update(state
, input
, len
);
395 return XXH32_digest(state
);
397 XXH_endianess endian_detected
= (XXH_endianess
)XXH_CPU_LITTLE_ENDIAN
;
399 if (XXH_FORCE_ALIGN_CHECK
) {
400 if ((((size_t)input
) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
401 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
402 return XXH32_endian_align(input
, len
, seed
, XXH_littleEndian
, XXH_aligned
);
404 return XXH32_endian_align(input
, len
, seed
, XXH_bigEndian
, XXH_aligned
);
407 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
408 return XXH32_endian_align(input
, len
, seed
, XXH_littleEndian
, XXH_unaligned
);
410 return XXH32_endian_align(input
, len
, seed
, XXH_bigEndian
, XXH_unaligned
);
415 static U64
XXH64_round(U64 acc
, U64 input
)
417 acc
+= input
* PRIME64_2
;
418 acc
= XXH_rotl64(acc
, 31);
423 static U64
XXH64_mergeRound(U64 acc
, U64 val
)
425 val
= XXH64_round(0, val
);
427 acc
= acc
* PRIME64_1
+ PRIME64_4
;
431 FORCE_INLINE_TEMPLATE U64
XXH64_endian_align(const void* input
, size_t len
, U64 seed
, XXH_endianess endian
, XXH_alignment align
)
433 const BYTE
* p
= (const BYTE
*)input
;
434 const BYTE
* const bEnd
= p
+ len
;
436 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
438 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
441 bEnd
=p
=(const BYTE
*)(size_t)32;
446 const BYTE
* const limit
= bEnd
- 32;
447 U64 v1
= seed
+ PRIME64_1
+ PRIME64_2
;
448 U64 v2
= seed
+ PRIME64_2
;
450 U64 v4
= seed
- PRIME64_1
;
453 v1
= XXH64_round(v1
, XXH_get64bits(p
)); p
+=8;
454 v2
= XXH64_round(v2
, XXH_get64bits(p
)); p
+=8;
455 v3
= XXH64_round(v3
, XXH_get64bits(p
)); p
+=8;
456 v4
= XXH64_round(v4
, XXH_get64bits(p
)); p
+=8;
459 h64
= XXH_rotl64(v1
, 1) + XXH_rotl64(v2
, 7) + XXH_rotl64(v3
, 12) + XXH_rotl64(v4
, 18);
460 h64
= XXH64_mergeRound(h64
, v1
);
461 h64
= XXH64_mergeRound(h64
, v2
);
462 h64
= XXH64_mergeRound(h64
, v3
);
463 h64
= XXH64_mergeRound(h64
, v4
);
466 h64
= seed
+ PRIME64_5
;
472 U64
const k1
= XXH64_round(0, XXH_get64bits(p
));
474 h64
= XXH_rotl64(h64
,27) * PRIME64_1
+ PRIME64_4
;
479 h64
^= (U64
)(XXH_get32bits(p
)) * PRIME64_1
;
480 h64
= XXH_rotl64(h64
, 23) * PRIME64_2
+ PRIME64_3
;
485 h64
^= (*p
) * PRIME64_5
;
486 h64
= XXH_rotl64(h64
, 11) * PRIME64_1
;
500 XXH_PUBLIC_API
unsigned long long XXH64 (const void* input
, size_t len
, unsigned long long seed
)
503 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
504 XXH64_CREATESTATE_STATIC(state
);
505 XXH64_reset(state
, seed
);
506 XXH64_update(state
, input
, len
);
507 return XXH64_digest(state
);
509 XXH_endianess endian_detected
= (XXH_endianess
)XXH_CPU_LITTLE_ENDIAN
;
511 if (XXH_FORCE_ALIGN_CHECK
) {
512 if ((((size_t)input
) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
513 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
514 return XXH64_endian_align(input
, len
, seed
, XXH_littleEndian
, XXH_aligned
);
516 return XXH64_endian_align(input
, len
, seed
, XXH_bigEndian
, XXH_aligned
);
519 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
520 return XXH64_endian_align(input
, len
, seed
, XXH_littleEndian
, XXH_unaligned
);
522 return XXH64_endian_align(input
, len
, seed
, XXH_bigEndian
, XXH_unaligned
);
527 /* **************************************************
528 * Advanced Hash Functions
529 ****************************************************/
531 XXH_PUBLIC_API XXH32_state_t
* XXH32_createState(void)
533 return (XXH32_state_t
*)XXH_malloc(sizeof(XXH32_state_t
));
535 XXH_PUBLIC_API XXH_errorcode
XXH32_freeState(XXH32_state_t
* statePtr
)
541 XXH_PUBLIC_API XXH64_state_t
* XXH64_createState(void)
543 return (XXH64_state_t
*)XXH_malloc(sizeof(XXH64_state_t
));
545 XXH_PUBLIC_API XXH_errorcode
XXH64_freeState(XXH64_state_t
* statePtr
)
554 XXH_PUBLIC_API XXH_errorcode
XXH32_reset(XXH32_state_t
* statePtr
, unsigned int seed
)
556 XXH32_state_t state
; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
557 memset(&state
, 0, sizeof(state
)-4); /* do not write into reserved, for future removal */
558 state
.v1
= seed
+ PRIME32_1
+ PRIME32_2
;
559 state
.v2
= seed
+ PRIME32_2
;
561 state
.v4
= seed
- PRIME32_1
;
562 memcpy(statePtr
, &state
, sizeof(state
));
567 XXH_PUBLIC_API XXH_errorcode
XXH64_reset(XXH64_state_t
* statePtr
, unsigned long long seed
)
569 XXH64_state_t state
; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
570 memset(&state
, 0, sizeof(state
)-8); /* do not write into reserved, for future removal */
571 state
.v1
= seed
+ PRIME64_1
+ PRIME64_2
;
572 state
.v2
= seed
+ PRIME64_2
;
574 state
.v4
= seed
- PRIME64_1
;
575 memcpy(statePtr
, &state
, sizeof(state
));
580 FORCE_INLINE_TEMPLATE XXH_errorcode
XXH32_update_endian (XXH32_state_t
* state
, const void* input
, size_t len
, XXH_endianess endian
)
582 const BYTE
* p
= (const BYTE
*)input
;
583 const BYTE
* const bEnd
= p
+ len
;
585 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
586 if (input
==NULL
) return XXH_ERROR
;
589 state
->total_len_32
+= (unsigned)len
;
590 state
->large_len
|= (len
>=16) | (state
->total_len_32
>=16);
592 if (state
->memsize
+ len
< 16) { /* fill in tmp buffer */
593 XXH_memcpy((BYTE
*)(state
->mem32
) + state
->memsize
, input
, len
);
594 state
->memsize
+= (unsigned)len
;
598 if (state
->memsize
) { /* some data left from previous update */
599 XXH_memcpy((BYTE
*)(state
->mem32
) + state
->memsize
, input
, 16-state
->memsize
);
600 { const U32
* p32
= state
->mem32
;
601 state
->v1
= XXH32_round(state
->v1
, XXH_readLE32(p32
, endian
)); p32
++;
602 state
->v2
= XXH32_round(state
->v2
, XXH_readLE32(p32
, endian
)); p32
++;
603 state
->v3
= XXH32_round(state
->v3
, XXH_readLE32(p32
, endian
)); p32
++;
604 state
->v4
= XXH32_round(state
->v4
, XXH_readLE32(p32
, endian
)); p32
++;
606 p
+= 16-state
->memsize
;
611 const BYTE
* const limit
= bEnd
- 16;
618 v1
= XXH32_round(v1
, XXH_readLE32(p
, endian
)); p
+=4;
619 v2
= XXH32_round(v2
, XXH_readLE32(p
, endian
)); p
+=4;
620 v3
= XXH32_round(v3
, XXH_readLE32(p
, endian
)); p
+=4;
621 v4
= XXH32_round(v4
, XXH_readLE32(p
, endian
)); p
+=4;
631 XXH_memcpy(state
->mem32
, p
, (size_t)(bEnd
-p
));
632 state
->memsize
= (unsigned)(bEnd
-p
);
638 XXH_PUBLIC_API XXH_errorcode
XXH32_update (XXH32_state_t
* state_in
, const void* input
, size_t len
)
640 XXH_endianess endian_detected
= (XXH_endianess
)XXH_CPU_LITTLE_ENDIAN
;
642 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
643 return XXH32_update_endian(state_in
, input
, len
, XXH_littleEndian
);
645 return XXH32_update_endian(state_in
, input
, len
, XXH_bigEndian
);
650 FORCE_INLINE_TEMPLATE U32
XXH32_digest_endian (const XXH32_state_t
* state
, XXH_endianess endian
)
652 const BYTE
* p
= (const BYTE
*)state
->mem32
;
653 const BYTE
* const bEnd
= (const BYTE
*)(state
->mem32
) + state
->memsize
;
656 if (state
->large_len
) {
657 h32
= XXH_rotl32(state
->v1
, 1) + XXH_rotl32(state
->v2
, 7) + XXH_rotl32(state
->v3
, 12) + XXH_rotl32(state
->v4
, 18);
659 h32
= state
->v3
/* == seed */ + PRIME32_5
;
662 h32
+= state
->total_len_32
;
665 h32
+= XXH_readLE32(p
, endian
) * PRIME32_3
;
666 h32
= XXH_rotl32(h32
, 17) * PRIME32_4
;
671 h32
+= (*p
) * PRIME32_5
;
672 h32
= XXH_rotl32(h32
, 11) * PRIME32_1
;
686 XXH_PUBLIC_API
unsigned int XXH32_digest (const XXH32_state_t
* state_in
)
688 XXH_endianess endian_detected
= (XXH_endianess
)XXH_CPU_LITTLE_ENDIAN
;
690 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
691 return XXH32_digest_endian(state_in
, XXH_littleEndian
);
693 return XXH32_digest_endian(state_in
, XXH_bigEndian
);
698 /* **** XXH64 **** */
700 FORCE_INLINE_TEMPLATE XXH_errorcode
XXH64_update_endian (XXH64_state_t
* state
, const void* input
, size_t len
, XXH_endianess endian
)
702 const BYTE
* p
= (const BYTE
*)input
;
703 const BYTE
* const bEnd
= p
+ len
;
705 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
706 if (input
==NULL
) return XXH_ERROR
;
709 state
->total_len
+= len
;
711 if (state
->memsize
+ len
< 32) { /* fill in tmp buffer */
713 XXH_memcpy(((BYTE
*)state
->mem64
) + state
->memsize
, input
, len
);
715 state
->memsize
+= (U32
)len
;
719 if (state
->memsize
) { /* tmp buffer is full */
720 XXH_memcpy(((BYTE
*)state
->mem64
) + state
->memsize
, input
, 32-state
->memsize
);
721 state
->v1
= XXH64_round(state
->v1
, XXH_readLE64(state
->mem64
+0, endian
));
722 state
->v2
= XXH64_round(state
->v2
, XXH_readLE64(state
->mem64
+1, endian
));
723 state
->v3
= XXH64_round(state
->v3
, XXH_readLE64(state
->mem64
+2, endian
));
724 state
->v4
= XXH64_round(state
->v4
, XXH_readLE64(state
->mem64
+3, endian
));
725 p
+= 32-state
->memsize
;
730 const BYTE
* const limit
= bEnd
- 32;
737 v1
= XXH64_round(v1
, XXH_readLE64(p
, endian
)); p
+=8;
738 v2
= XXH64_round(v2
, XXH_readLE64(p
, endian
)); p
+=8;
739 v3
= XXH64_round(v3
, XXH_readLE64(p
, endian
)); p
+=8;
740 v4
= XXH64_round(v4
, XXH_readLE64(p
, endian
)); p
+=8;
750 XXH_memcpy(state
->mem64
, p
, (size_t)(bEnd
-p
));
751 state
->memsize
= (unsigned)(bEnd
-p
);
757 XXH_PUBLIC_API XXH_errorcode
XXH64_update (XXH64_state_t
* state_in
, const void* input
, size_t len
)
759 XXH_endianess endian_detected
= (XXH_endianess
)XXH_CPU_LITTLE_ENDIAN
;
761 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
762 return XXH64_update_endian(state_in
, input
, len
, XXH_littleEndian
);
764 return XXH64_update_endian(state_in
, input
, len
, XXH_bigEndian
);
769 FORCE_INLINE_TEMPLATE U64
XXH64_digest_endian (const XXH64_state_t
* state
, XXH_endianess endian
)
771 const BYTE
* p
= (const BYTE
*)state
->mem64
;
772 const BYTE
* const bEnd
= (const BYTE
*)state
->mem64
+ state
->memsize
;
775 if (state
->total_len
>= 32) {
776 U64
const v1
= state
->v1
;
777 U64
const v2
= state
->v2
;
778 U64
const v3
= state
->v3
;
779 U64
const v4
= state
->v4
;
781 h64
= XXH_rotl64(v1
, 1) + XXH_rotl64(v2
, 7) + XXH_rotl64(v3
, 12) + XXH_rotl64(v4
, 18);
782 h64
= XXH64_mergeRound(h64
, v1
);
783 h64
= XXH64_mergeRound(h64
, v2
);
784 h64
= XXH64_mergeRound(h64
, v3
);
785 h64
= XXH64_mergeRound(h64
, v4
);
787 h64
= state
->v3
+ PRIME64_5
;
790 h64
+= (U64
) state
->total_len
;
793 U64
const k1
= XXH64_round(0, XXH_readLE64(p
, endian
));
795 h64
= XXH_rotl64(h64
,27) * PRIME64_1
+ PRIME64_4
;
800 h64
^= (U64
)(XXH_readLE32(p
, endian
)) * PRIME64_1
;
801 h64
= XXH_rotl64(h64
, 23) * PRIME64_2
+ PRIME64_3
;
806 h64
^= (*p
) * PRIME64_5
;
807 h64
= XXH_rotl64(h64
, 11) * PRIME64_1
;
821 XXH_PUBLIC_API
unsigned long long XXH64_digest (const XXH64_state_t
* state_in
)
823 XXH_endianess endian_detected
= (XXH_endianess
)XXH_CPU_LITTLE_ENDIAN
;
825 if ((endian_detected
==XXH_littleEndian
) || XXH_FORCE_NATIVE_FORMAT
)
826 return XXH64_digest_endian(state_in
, XXH_littleEndian
);
828 return XXH64_digest_endian(state_in
, XXH_bigEndian
);
832 /* **************************
833 * Canonical representation
834 ****************************/
836 /*! Default XXH result types are basic unsigned 32 and 64 bits.
837 * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
838 * These functions allow transformation of hash result into and from its canonical format.
839 * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
842 XXH_PUBLIC_API
void XXH32_canonicalFromHash(XXH32_canonical_t
* dst
, XXH32_hash_t hash
)
844 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t
) == sizeof(XXH32_hash_t
));
845 if (XXH_CPU_LITTLE_ENDIAN
) hash
= XXH_swap32(hash
);
846 memcpy(dst
, &hash
, sizeof(*dst
));
849 XXH_PUBLIC_API
void XXH64_canonicalFromHash(XXH64_canonical_t
* dst
, XXH64_hash_t hash
)
851 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t
) == sizeof(XXH64_hash_t
));
852 if (XXH_CPU_LITTLE_ENDIAN
) hash
= XXH_swap64(hash
);
853 memcpy(dst
, &hash
, sizeof(*dst
));
856 XXH_PUBLIC_API XXH32_hash_t
XXH32_hashFromCanonical(const XXH32_canonical_t
* src
)
858 return XXH_readBE32(src
);
861 XXH_PUBLIC_API XXH64_hash_t
XXH64_hashFromCanonical(const XXH64_canonical_t
* src
)
863 return XXH_readBE64(src
);