]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/xxhash.h
Merge tag 'pull-target-arm-20231127' of https://git.linaro.org/people/pmaydell/qemu...
[mirror_qemu.git] / include / qemu / xxhash.h
1 /*
2 * xxHash - Fast Hash algorithm
3 * Copyright (C) 2012-2016, Yann Collet
4 *
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * + Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * + Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You can contact the author at :
31 * - xxHash source repository : https://github.com/Cyan4973/xxHash
32 */
33
34 #ifndef QEMU_XXHASH_H
35 #define QEMU_XXHASH_H
36
37 #include "qemu/bitops.h"
38
39 #define PRIME32_1 2654435761U
40 #define PRIME32_2 2246822519U
41 #define PRIME32_3 3266489917U
42 #define PRIME32_4 668265263U
43 #define PRIME32_5 374761393U
44
45 #define QEMU_XXHASH_SEED 1
46
47 /*
48 * xxhash32, customized for input variables that are not guaranteed to be
49 * contiguous in memory.
50 */
51 static inline uint32_t qemu_xxhash8(uint64_t ab, uint64_t cd, uint64_t ef,
52 uint32_t g, uint32_t h)
53 {
54 uint32_t v1 = QEMU_XXHASH_SEED + PRIME32_1 + PRIME32_2;
55 uint32_t v2 = QEMU_XXHASH_SEED + PRIME32_2;
56 uint32_t v3 = QEMU_XXHASH_SEED + 0;
57 uint32_t v4 = QEMU_XXHASH_SEED - PRIME32_1;
58 uint32_t a = ab;
59 uint32_t b = ab >> 32;
60 uint32_t c = cd;
61 uint32_t d = cd >> 32;
62 uint32_t e = ef;
63 uint32_t f = ef >> 32;
64 uint32_t h32;
65
66 v1 += a * PRIME32_2;
67 v1 = rol32(v1, 13);
68 v1 *= PRIME32_1;
69
70 v2 += b * PRIME32_2;
71 v2 = rol32(v2, 13);
72 v2 *= PRIME32_1;
73
74 v3 += c * PRIME32_2;
75 v3 = rol32(v3, 13);
76 v3 *= PRIME32_1;
77
78 v4 += d * PRIME32_2;
79 v4 = rol32(v4, 13);
80 v4 *= PRIME32_1;
81
82 h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18);
83 h32 += 28;
84
85 h32 += e * PRIME32_3;
86 h32 = rol32(h32, 17) * PRIME32_4;
87
88 h32 += f * PRIME32_3;
89 h32 = rol32(h32, 17) * PRIME32_4;
90
91 h32 += g * PRIME32_3;
92 h32 = rol32(h32, 17) * PRIME32_4;
93
94 h32 += h * PRIME32_3;
95 h32 = rol32(h32, 17) * PRIME32_4;
96
97 h32 ^= h32 >> 15;
98 h32 *= PRIME32_2;
99 h32 ^= h32 >> 13;
100 h32 *= PRIME32_3;
101 h32 ^= h32 >> 16;
102
103 return h32;
104 }
105
106 static inline uint32_t qemu_xxhash2(uint64_t ab)
107 {
108 return qemu_xxhash8(ab, 0, 0, 0, 0);
109 }
110
111 static inline uint32_t qemu_xxhash4(uint64_t ab, uint64_t cd)
112 {
113 return qemu_xxhash8(ab, cd, 0, 0, 0);
114 }
115
116 static inline uint32_t qemu_xxhash5(uint64_t ab, uint64_t cd, uint32_t e)
117 {
118 return qemu_xxhash8(ab, cd, 0, e, 0);
119 }
120
121 static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e,
122 uint32_t f)
123 {
124 return qemu_xxhash8(ab, cd, 0, e, f);
125 }
126
127 static inline uint32_t qemu_xxhash7(uint64_t ab, uint64_t cd, uint64_t ef,
128 uint32_t g)
129 {
130 return qemu_xxhash8(ab, cd, ef, g, 0);
131 }
132
133 /*
134 * Component parts of the XXH64 algorithm from
135 * https://github.com/Cyan4973/xxHash/blob/v0.8.0/xxhash.h
136 *
137 * The complete algorithm looks like
138 *
139 * i = 0;
140 * if (len >= 32) {
141 * v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
142 * v2 = seed + XXH_PRIME64_2;
143 * v3 = seed + 0;
144 * v4 = seed - XXH_PRIME64_1;
145 * do {
146 * v1 = XXH64_round(v1, get64bits(input + i));
147 * v2 = XXH64_round(v2, get64bits(input + i + 8));
148 * v3 = XXH64_round(v3, get64bits(input + i + 16));
149 * v4 = XXH64_round(v4, get64bits(input + i + 24));
150 * } while ((i += 32) <= len);
151 * h64 = XXH64_mergerounds(v1, v2, v3, v4);
152 * } else {
153 * h64 = seed + XXH_PRIME64_5;
154 * }
155 * h64 += len;
156 *
157 * for (; i + 8 <= len; i += 8) {
158 * h64 ^= XXH64_round(0, get64bits(input + i));
159 * h64 = rol64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4;
160 * }
161 * for (; i + 4 <= len; i += 4) {
162 * h64 ^= get32bits(input + i) * PRIME64_1;
163 * h64 = rol64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
164 * }
165 * for (; i < len; i += 1) {
166 * h64 ^= get8bits(input + i) * XXH_PRIME64_5;
167 * h64 = rol64(h64, 11) * XXH_PRIME64_1;
168 * }
169 *
170 * return XXH64_avalanche(h64)
171 *
172 * Exposing the pieces instead allows for simplified usage when
173 * the length is a known constant and the inputs are in registers.
174 */
175 #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
176 #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
177 #define XXH_PRIME64_3 0x165667B19E3779F9ULL
178 #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
179 #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
180
181 static inline uint64_t XXH64_round(uint64_t acc, uint64_t input)
182 {
183 return rol64(acc + input * XXH_PRIME64_2, 31) * XXH_PRIME64_1;
184 }
185
186 static inline uint64_t XXH64_mergeround(uint64_t acc, uint64_t val)
187 {
188 return (acc ^ XXH64_round(0, val)) * XXH_PRIME64_1 + XXH_PRIME64_4;
189 }
190
191 static inline uint64_t XXH64_mergerounds(uint64_t v1, uint64_t v2,
192 uint64_t v3, uint64_t v4)
193 {
194 uint64_t h64;
195
196 h64 = rol64(v1, 1) + rol64(v2, 7) + rol64(v3, 12) + rol64(v4, 18);
197 h64 = XXH64_mergeround(h64, v1);
198 h64 = XXH64_mergeround(h64, v2);
199 h64 = XXH64_mergeround(h64, v3);
200 h64 = XXH64_mergeround(h64, v4);
201
202 return h64;
203 }
204
205 static inline uint64_t XXH64_avalanche(uint64_t h64)
206 {
207 h64 ^= h64 >> 33;
208 h64 *= XXH_PRIME64_2;
209 h64 ^= h64 >> 29;
210 h64 *= XXH_PRIME64_3;
211 h64 ^= h64 >> 32;
212 return h64;
213 }
214
215 static inline uint64_t qemu_xxhash64_4(uint64_t a, uint64_t b,
216 uint64_t c, uint64_t d)
217 {
218 uint64_t v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
219 uint64_t v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
220 uint64_t v3 = QEMU_XXHASH_SEED + 0;
221 uint64_t v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
222
223 v1 = XXH64_round(v1, a);
224 v2 = XXH64_round(v2, b);
225 v3 = XXH64_round(v3, c);
226 v4 = XXH64_round(v4, d);
227
228 return XXH64_avalanche(XXH64_mergerounds(v1, v2, v3, v4));
229 }
230
231 #endif /* QEMU_XXHASH_H */