]>
Commit | Line | Data |
---|---|---|
9c376795 FG |
1 | use crate::convert::*; |
2 | use crate::operations::folded_multiply; | |
3 | use crate::operations::read_small; | |
4 | use crate::random_state::PI; | |
5 | use crate::RandomState; | |
6 | use core::hash::Hasher; | |
7 | ||
8 | ///This constant come from Kunth's prng (Empirically it works better than those from splitmix32). | |
9 | pub(crate) const MULTIPLE: u64 = 6364136223846793005; | |
10 | const ROT: u32 = 23; //17 | |
11 | ||
12 | /// A `Hasher` for hashing an arbitrary stream of bytes. | |
13 | /// | |
14 | /// Instances of [`AHasher`] represent state that is updated while hashing data. | |
15 | /// | |
16 | /// Each method updates the internal state based on the new data provided. Once | |
17 | /// all of the data has been provided, the resulting hash can be obtained by calling | |
18 | /// `finish()` | |
19 | /// | |
20 | /// [Clone] is also provided in case you wish to calculate hashes for two different items that | |
21 | /// start with the same data. | |
22 | /// | |
23 | #[derive(Debug, Clone)] | |
24 | pub struct AHasher { | |
25 | buffer: u64, | |
26 | pad: u64, | |
27 | extra_keys: [u64; 2], | |
28 | } | |
29 | ||
30 | impl AHasher { | |
31 | /// Creates a new hasher keyed to the provided key. | |
32 | #[inline] | |
33 | #[allow(dead_code)] // Is not called if non-fallback hash is used. | |
34 | pub fn new_with_keys(key1: u128, key2: u128) -> AHasher { | |
35 | let pi: [u128; 2] = PI.convert(); | |
36 | let key1: [u64; 2] = (key1 ^ pi[0]).convert(); | |
37 | let key2: [u64; 2] = (key2 ^ pi[1]).convert(); | |
38 | AHasher { | |
39 | buffer: key1[0], | |
40 | pad: key1[1], | |
41 | extra_keys: key2, | |
42 | } | |
43 | } | |
44 | ||
45 | #[allow(unused)] // False positive | |
46 | pub(crate) fn test_with_keys(key1: u128, key2: u128) -> Self { | |
47 | let key1: [u64; 2] = key1.convert(); | |
48 | let key2: [u64; 2] = key2.convert(); | |
49 | Self { | |
50 | buffer: key1[0], | |
51 | pad: key1[1], | |
52 | extra_keys: key2, | |
53 | } | |
54 | } | |
55 | ||
56 | #[inline] | |
57 | #[allow(dead_code)] // Is not called if non-fallback hash is used. | |
58 | pub(crate) fn from_random_state(rand_state: &RandomState) -> AHasher { | |
59 | AHasher { | |
60 | buffer: rand_state.k0, | |
61 | pad: rand_state.k1, | |
62 | extra_keys: [rand_state.k2, rand_state.k3], | |
63 | } | |
64 | } | |
65 | ||
66 | /// This update function has the goal of updating the buffer with a single multiply | |
67 | /// FxHash does this but is vulnerable to attack. To avoid this input needs to be masked to with an | |
68 | /// unpredictable value. Other hashes such as murmurhash have taken this approach but were found vulnerable | |
69 | /// to attack. The attack was based on the idea of reversing the pre-mixing (Which is necessarily | |
70 | /// reversible otherwise bits would be lost) then placing a difference in the highest bit before the | |
71 | /// multiply used to mix the data. Because a multiply can never affect the bits to the right of it, a | |
72 | /// subsequent update that also differed in this bit could result in a predictable collision. | |
73 | /// | |
74 | /// This version avoids this vulnerability while still only using a single multiply. It takes advantage | |
75 | /// of the fact that when a 64 bit multiply is performed the upper 64 bits are usually computed and thrown | |
76 | /// away. Instead it creates two 128 bit values where the upper 64 bits are zeros and multiplies them. | |
77 | /// (The compiler is smart enough to turn this into a 64 bit multiplication in the assembly) | |
78 | /// Then the upper bits are xored with the lower bits to produce a single 64 bit result. | |
79 | /// | |
80 | /// To understand why this is a good scrambling function it helps to understand multiply-with-carry PRNGs: | |
81 | /// https://en.wikipedia.org/wiki/Multiply-with-carry_pseudorandom_number_generator | |
82 | /// If the multiple is chosen well, this creates a long period, decent quality PRNG. | |
83 | /// Notice that this function is equivalent to this except the `buffer`/`state` is being xored with each | |
84 | /// new block of data. In the event that data is all zeros, it is exactly equivalent to a MWC PRNG. | |
85 | /// | |
86 | /// This is impervious to attack because every bit buffer at the end is dependent on every bit in | |
87 | /// `new_data ^ buffer`. For example suppose two inputs differed in only the 5th bit. Then when the | |
88 | /// multiplication is performed the `result` will differ in bits 5-69. More specifically it will differ by | |
89 | /// 2^5 * MULTIPLE. However in the next step bits 65-128 are turned into a separate 64 bit value. So the | |
90 | /// differing bits will be in the lower 6 bits of this value. The two intermediate values that differ in | |
91 | /// bits 5-63 and in bits 0-5 respectively get added together. Producing an output that differs in every | |
92 | /// bit. The addition carries in the multiplication and at the end additionally mean that the even if an | |
93 | /// attacker somehow knew part of (but not all) the contents of the buffer before hand, | |
94 | /// they would not be able to predict any of the bits in the buffer at the end. | |
95 | #[inline(always)] | |
96 | #[cfg(feature = "folded_multiply")] | |
97 | fn update(&mut self, new_data: u64) { | |
98 | self.buffer = folded_multiply(new_data ^ self.buffer, MULTIPLE); | |
99 | } | |
100 | ||
101 | #[inline(always)] | |
102 | #[cfg(not(feature = "folded_multiply"))] | |
103 | fn update(&mut self, new_data: u64) { | |
104 | let d1 = (new_data ^ self.buffer).wrapping_mul(MULTIPLE); | |
105 | self.pad = (self.pad ^ d1).rotate_left(8).wrapping_mul(MULTIPLE); | |
106 | self.buffer = (self.buffer ^ self.pad).rotate_left(24); | |
107 | } | |
108 | ||
109 | /// Similar to the above this function performs an update using a "folded multiply". | |
110 | /// However it takes in 128 bits of data instead of 64. Both halves must be masked. | |
111 | /// | |
112 | /// This makes it impossible for an attacker to place a single bit difference between | |
113 | /// two blocks so as to cancel each other. | |
114 | /// | |
115 | /// However this is not sufficient. to prevent (a,b) from hashing the same as (b,a) the buffer itself must | |
116 | /// be updated between calls in a way that does not commute. To achieve this XOR and Rotate are used. | |
117 | /// Add followed by xor is not the same as xor followed by add, and rotate ensures that the same out bits | |
118 | /// can't be changed by the same set of input bits. To cancel this sequence with subsequent input would require | |
119 | /// knowing the keys. | |
120 | #[inline(always)] | |
121 | #[cfg(feature = "folded_multiply")] | |
122 | fn large_update(&mut self, new_data: u128) { | |
123 | let block: [u64; 2] = new_data.convert(); | |
124 | let combined = folded_multiply(block[0] ^ self.extra_keys[0], block[1] ^ self.extra_keys[1]); | |
125 | self.buffer = (self.buffer.wrapping_add(self.pad) ^ combined).rotate_left(ROT); | |
126 | } | |
127 | ||
128 | #[inline(always)] | |
129 | #[cfg(not(feature = "folded_multiply"))] | |
130 | fn large_update(&mut self, new_data: u128) { | |
131 | let block: [u64; 2] = new_data.convert(); | |
132 | self.update(block[0] ^ self.extra_keys[0]); | |
133 | self.update(block[1] ^ self.extra_keys[1]); | |
134 | } | |
135 | ||
136 | #[inline] | |
137 | #[cfg(feature = "specialize")] | |
138 | fn short_finish(&self) -> u64 { | |
139 | self.buffer.wrapping_add(self.pad) | |
140 | } | |
141 | } | |
142 | ||
143 | /// Provides [Hasher] methods to hash all of the primitive types. | |
144 | /// | |
145 | /// [Hasher]: core::hash::Hasher | |
146 | impl Hasher for AHasher { | |
147 | #[inline] | |
148 | fn write_u8(&mut self, i: u8) { | |
149 | self.update(i as u64); | |
150 | } | |
151 | ||
152 | #[inline] | |
153 | fn write_u16(&mut self, i: u16) { | |
154 | self.update(i as u64); | |
155 | } | |
156 | ||
157 | #[inline] | |
158 | fn write_u32(&mut self, i: u32) { | |
159 | self.update(i as u64); | |
160 | } | |
161 | ||
162 | #[inline] | |
163 | fn write_u64(&mut self, i: u64) { | |
164 | self.update(i as u64); | |
165 | } | |
166 | ||
167 | #[inline] | |
168 | fn write_u128(&mut self, i: u128) { | |
169 | self.large_update(i); | |
170 | } | |
171 | ||
172 | #[inline] | |
173 | #[cfg(any(target_pointer_width = "64", target_pointer_width = "32", target_pointer_width = "16"))] | |
174 | fn write_usize(&mut self, i: usize) { | |
175 | self.write_u64(i as u64); | |
176 | } | |
177 | ||
178 | #[inline] | |
179 | #[cfg(target_pointer_width = "128")] | |
180 | fn write_usize(&mut self, i: usize) { | |
181 | self.write_u128(i as u128); | |
182 | } | |
183 | ||
184 | #[inline] | |
185 | #[allow(clippy::collapsible_if)] | |
186 | fn write(&mut self, input: &[u8]) { | |
187 | let mut data = input; | |
188 | let length = data.len() as u64; | |
189 | //Needs to be an add rather than an xor because otherwise it could be canceled with carefully formed input. | |
190 | self.buffer = self.buffer.wrapping_add(length).wrapping_mul(MULTIPLE); | |
191 | //A 'binary search' on sizes reduces the number of comparisons. | |
192 | if data.len() > 8 { | |
193 | if data.len() > 16 { | |
194 | let tail = data.read_last_u128(); | |
195 | self.large_update(tail); | |
196 | while data.len() > 16 { | |
197 | let (block, rest) = data.read_u128(); | |
198 | self.large_update(block); | |
199 | data = rest; | |
200 | } | |
201 | } else { | |
202 | self.large_update([data.read_u64().0, data.read_last_u64()].convert()); | |
203 | } | |
204 | } else { | |
205 | let value = read_small(data); | |
206 | self.large_update(value.convert()); | |
207 | } | |
208 | } | |
209 | ||
210 | #[inline] | |
211 | #[cfg(feature = "folded_multiply")] | |
212 | fn finish(&self) -> u64 { | |
213 | let rot = (self.buffer & 63) as u32; | |
214 | folded_multiply(self.buffer, self.pad).rotate_left(rot) | |
215 | } | |
216 | ||
217 | #[inline] | |
218 | #[cfg(not(feature = "folded_multiply"))] | |
219 | fn finish(&self) -> u64 { | |
220 | let rot = (self.buffer & 63) as u32; | |
221 | (self.buffer.wrapping_mul(MULTIPLE) ^ self.pad).rotate_left(rot) | |
222 | } | |
223 | } | |
224 | ||
225 | #[cfg(feature = "specialize")] | |
226 | pub(crate) struct AHasherU64 { | |
227 | pub(crate) buffer: u64, | |
228 | pub(crate) pad: u64, | |
229 | } | |
230 | ||
231 | /// A specialized hasher for only primitives under 64 bits. | |
232 | #[cfg(feature = "specialize")] | |
233 | impl Hasher for AHasherU64 { | |
234 | #[inline] | |
235 | fn finish(&self) -> u64 { | |
fe692bf9 | 236 | let rot = (self.pad & 63) as u32; |
9c376795 FG |
237 | self.buffer.rotate_left(rot) |
238 | } | |
239 | ||
240 | #[inline] | |
241 | fn write(&mut self, _bytes: &[u8]) { | |
fe692bf9 | 242 | unreachable!("Specialized hasher was called with a different type of object") |
9c376795 FG |
243 | } |
244 | ||
245 | #[inline] | |
246 | fn write_u8(&mut self, i: u8) { | |
247 | self.write_u64(i as u64); | |
248 | } | |
249 | ||
250 | #[inline] | |
251 | fn write_u16(&mut self, i: u16) { | |
252 | self.write_u64(i as u64); | |
253 | } | |
254 | ||
255 | #[inline] | |
256 | fn write_u32(&mut self, i: u32) { | |
257 | self.write_u64(i as u64); | |
258 | } | |
259 | ||
260 | #[inline] | |
261 | fn write_u64(&mut self, i: u64) { | |
262 | self.buffer = folded_multiply(i ^ self.buffer, MULTIPLE); | |
263 | } | |
264 | ||
265 | #[inline] | |
266 | fn write_u128(&mut self, _i: u128) { | |
fe692bf9 | 267 | unreachable!("Specialized hasher was called with a different type of object") |
9c376795 FG |
268 | } |
269 | ||
270 | #[inline] | |
271 | fn write_usize(&mut self, _i: usize) { | |
fe692bf9 | 272 | unreachable!("Specialized hasher was called with a different type of object") |
9c376795 FG |
273 | } |
274 | } | |
275 | ||
276 | #[cfg(feature = "specialize")] | |
277 | pub(crate) struct AHasherFixed(pub AHasher); | |
278 | ||
279 | /// A specialized hasher for fixed size primitives larger than 64 bits. | |
280 | #[cfg(feature = "specialize")] | |
281 | impl Hasher for AHasherFixed { | |
282 | #[inline] | |
283 | fn finish(&self) -> u64 { | |
284 | self.0.short_finish() | |
285 | } | |
286 | ||
287 | #[inline] | |
288 | fn write(&mut self, bytes: &[u8]) { | |
289 | self.0.write(bytes) | |
290 | } | |
291 | ||
292 | #[inline] | |
293 | fn write_u8(&mut self, i: u8) { | |
294 | self.write_u64(i as u64); | |
295 | } | |
296 | ||
297 | #[inline] | |
298 | fn write_u16(&mut self, i: u16) { | |
299 | self.write_u64(i as u64); | |
300 | } | |
301 | ||
302 | #[inline] | |
303 | fn write_u32(&mut self, i: u32) { | |
304 | self.write_u64(i as u64); | |
305 | } | |
306 | ||
307 | #[inline] | |
308 | fn write_u64(&mut self, i: u64) { | |
309 | self.0.write_u64(i); | |
310 | } | |
311 | ||
312 | #[inline] | |
313 | fn write_u128(&mut self, i: u128) { | |
314 | self.0.write_u128(i); | |
315 | } | |
316 | ||
317 | #[inline] | |
318 | fn write_usize(&mut self, i: usize) { | |
319 | self.0.write_usize(i); | |
320 | } | |
321 | } | |
322 | ||
323 | #[cfg(feature = "specialize")] | |
324 | pub(crate) struct AHasherStr(pub AHasher); | |
325 | ||
326 | /// A specialized hasher for a single string | |
327 | /// Note that the other types don't panic because the hash impl for String tacks on an unneeded call. (As does vec) | |
328 | #[cfg(feature = "specialize")] | |
329 | impl Hasher for AHasherStr { | |
330 | #[inline] | |
331 | fn finish(&self) -> u64 { | |
332 | self.0.finish() | |
333 | } | |
334 | ||
335 | #[inline] | |
336 | fn write(&mut self, bytes: &[u8]) { | |
337 | if bytes.len() > 8 { | |
338 | self.0.write(bytes) | |
339 | } else { | |
340 | let value = read_small(bytes); | |
341 | self.0.buffer = folded_multiply(value[0] ^ self.0.buffer, | |
342 | value[1] ^ self.0.extra_keys[1]); | |
343 | self.0.pad = self.0.pad.wrapping_add(bytes.len() as u64); | |
344 | } | |
345 | } | |
346 | ||
347 | #[inline] | |
348 | fn write_u8(&mut self, _i: u8) {} | |
349 | ||
350 | #[inline] | |
351 | fn write_u16(&mut self, _i: u16) {} | |
352 | ||
353 | #[inline] | |
354 | fn write_u32(&mut self, _i: u32) {} | |
355 | ||
356 | #[inline] | |
357 | fn write_u64(&mut self, _i: u64) {} | |
358 | ||
359 | #[inline] | |
360 | fn write_u128(&mut self, _i: u128) {} | |
361 | ||
362 | #[inline] | |
363 | fn write_usize(&mut self, _i: usize) {} | |
364 | } | |
365 | ||
366 | #[cfg(test)] | |
367 | mod tests { | |
368 | use crate::convert::Convert; | |
369 | use crate::fallback_hash::*; | |
370 | ||
371 | #[test] | |
372 | fn test_hash() { | |
373 | let mut hasher = AHasher::new_with_keys(0, 0); | |
374 | let value: u64 = 1 << 32; | |
375 | hasher.update(value); | |
376 | let result = hasher.buffer; | |
377 | let mut hasher = AHasher::new_with_keys(0, 0); | |
378 | let value2: u64 = 1; | |
379 | hasher.update(value2); | |
380 | let result2 = hasher.buffer; | |
381 | let result: [u8; 8] = result.convert(); | |
382 | let result2: [u8; 8] = result2.convert(); | |
383 | assert_ne!(hex::encode(result), hex::encode(result2)); | |
384 | } | |
385 | ||
386 | #[test] | |
387 | fn test_conversion() { | |
388 | let input: &[u8] = "dddddddd".as_bytes(); | |
389 | let bytes: u64 = as_array!(input, 8).convert(); | |
390 | assert_eq!(bytes, 0x6464646464646464); | |
391 | } | |
392 | } |