1 //! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
4 use std
::mem
::{self, MaybeUninit}
;
10 // The SipHash algorithm operates on 8-byte chunks.
11 const ELEM_SIZE
: usize = mem
::size_of
::<u64>();
13 // Size of the buffer in number of elements, not including the spill.
15 // The selection of this size was guided by rustc-perf benchmark comparisons of
16 // different buffer sizes. It should be periodically reevaluated as the compiler
17 // implementation and input characteristics change.
19 // Using the same-sized buffer for everything we hash is a performance versus
20 // complexity tradeoff. The ideal buffer size, and whether buffering should even
21 // be used, depends on what is being hashed. It may be worth it to size the
22 // buffer appropriately (perhaps by making SipHasher128 generic over the buffer
23 // size) or disable buffering depending on what is being hashed. But at this
24 // time, we use the same buffer size for everything.
25 const BUFFER_CAPACITY
: usize = 8;
27 // Size of the buffer in bytes, not including the spill.
28 const BUFFER_SIZE
: usize = BUFFER_CAPACITY
* ELEM_SIZE
;
30 // Size of the buffer in number of elements, including the spill.
31 const BUFFER_WITH_SPILL_CAPACITY
: usize = BUFFER_CAPACITY
+ 1;
33 // Size of the buffer in bytes, including the spill.
34 const BUFFER_WITH_SPILL_SIZE
: usize = BUFFER_WITH_SPILL_CAPACITY
* ELEM_SIZE
;
36 // Index of the spill element in the buffer.
37 const BUFFER_SPILL_INDEX
: usize = BUFFER_WITH_SPILL_CAPACITY
- 1;
39 #[derive(Debug, Clone)]
41 pub struct SipHasher128
{
42 // The access pattern during hashing consists of accesses to `nbuf` and
43 // `buf` until the buffer is full, followed by accesses to `state` and
44 // `processed`, and then repetition of that pattern until hashing is done.
45 // This is the basis for the ordering of fields below. However, in practice
46 // the cache miss-rate for data access is extremely low regardless of order.
47 nbuf
: usize, // how many bytes in buf are valid
48 buf
: [MaybeUninit
<u64>; BUFFER_WITH_SPILL_CAPACITY
], // unprocessed bytes le
49 state
: State
, // hash State
50 processed
: usize, // how many bytes we've processed
53 #[derive(Debug, Clone, Copy)]
56 // v0, v2 and v1, v3 show up in pairs in the algorithm,
57 // and simd implementations of SipHash will use vectors
58 // of v02 and v13. By placing them in this order in the struct,
59 // the compiler can pick up on just a few simd optimizations by itself.
66 macro_rules
! compress
{
67 ($state
:expr
) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }
};
68 ($v0
:expr
, $v1
:expr
, $v2
:expr
, $v3
:expr
) => {{
69 $v0
= $v0
.wrapping_add($v1
);
70 $v1
= $v1
.rotate_left(13);
72 $v0
= $v0
.rotate_left(32);
73 $v2
= $v2
.wrapping_add($v3
);
74 $v3
= $v3
.rotate_left(16);
76 $v0
= $v0
.wrapping_add($v3
);
77 $v3
= $v3
.rotate_left(21);
79 $v2
= $v2
.wrapping_add($v1
);
80 $v1
= $v1
.rotate_left(17);
82 $v2
= $v2
.rotate_left(32);
86 // Copies up to 8 bytes from source to destination. This performs better than
87 // `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real
88 // workloads since all of the copies have fixed sizes and avoid calling memcpy.
90 // This is specifically designed for copies of up to 8 bytes, because that's the
91 // maximum of number bytes needed to fill an 8-byte-sized element on which
92 // SipHash operates. Note that for variable-sized copies which are known to be
93 // less than 8 bytes, this function will perform more work than necessary unless
94 // the compiler is able to optimize the extra work away.
96 unsafe fn copy_nonoverlapping_small(src
: *const u8, dst
: *mut u8, count
: usize) {
97 debug_assert
!(count
<= 8);
100 ptr
::copy_nonoverlapping(src
, dst
, 8);
106 ptr
::copy_nonoverlapping(src
.add(i
), dst
.add(i
), 4);
111 ptr
::copy_nonoverlapping(src
.add(i
), dst
.add(i
), 2);
116 *dst
.add(i
) = *src
.add(i
);
120 debug_assert_eq
!(i
, count
);
125 // This implementation uses buffering to reduce the hashing cost for inputs
126 // consisting of many small integers. Buffering simplifies the integration of
127 // integer input--the integer write function typically just appends to the
128 // buffer with a statically sized write, updates metadata, and returns.
130 // Buffering also prevents alternating between writes that do and do not trigger
131 // the hashing process. Only when the entire buffer is full do we transition
132 // into hashing. This allows us to keep the hash state in registers for longer,
133 // instead of loading and storing it before and after processing each element.
135 // When a write fills the buffer, a buffer processing function is invoked to
136 // hash all of the buffered input. The buffer processing functions are marked
137 // `#[inline(never)]` so that they aren't inlined into the append functions,
138 // which ensures the more frequently called append functions remain inlineable
139 // and don't include register pushing/popping that would only be made necessary
140 // by inclusion of the complex buffer processing path which uses those
143 // The buffer includes a "spill"--an extra element at the end--which simplifies
144 // the integer write buffer processing path. The value that fills the buffer can
145 // be written with a statically sized write that may spill over into the spill.
146 // After the buffer is processed, the part of the value that spilled over can be
147 // written from the spill to the beginning of the buffer with another statically
148 // sized write. This write may copy more bytes than actually spilled over, but
149 // we maintain the metadata such that any extra copied bytes will be ignored by
150 // subsequent processing. Due to the static sizes, this scheme performs better
151 // than copying the exact number of bytes needed into the end and beginning of
154 // The buffer is uninitialized, which improves performance, but may preclude
155 // efficient implementation of alternative approaches. The improvement is not so
156 // large that an alternative approach should be disregarded because it cannot be
157 // efficiently implemented with an uninitialized buffer. On the other hand, an
158 // uninitialized buffer may become more important should a larger one be used.
160 // # Platform Dependence
162 // The SipHash algorithm operates on byte sequences. It parses the input stream
163 // as 8-byte little-endian integers. Therefore, given the same byte sequence, it
164 // produces the same result on big- and little-endian hardware.
166 // However, the Hasher trait has methods which operate on multi-byte integers.
167 // How they are converted into byte sequences can be endian-dependent (by using
168 // native byte order) or independent (by consistently using either LE or BE byte
169 // order). It can also be `isize` and `usize` size dependent (by using the
170 // native size), or independent (by converting to a common size), supposing the
171 // values can be represented in 32 bits.
173 // In order to make `SipHasher128` consistent with `SipHasher` in libstd, we
174 // choose to do the integer to byte sequence conversion in the platform-
175 // dependent way. Clients can achieve platform-independent hashing by widening
176 // `isize` and `usize` integers to 64 bits on 32-bit systems and byte-swapping
177 // integers on big-endian systems before passing them to the writing functions.
178 // This causes the input byte sequence to look identical on big- and little-
179 // endian systems (supposing `isize` and `usize` values can be represented in 32
180 // bits), which ensures platform-independent results.
183 pub fn new_with_keys(key0
: u64, key1
: u64) -> SipHasher128
{
184 let mut hasher
= SipHasher128
{
186 buf
: MaybeUninit
::uninit_array(),
188 v0
: key0 ^
0x736f6d6570736575,
189 // The XOR with 0xee is only done on 128-bit algorithm version.
190 v1
: key1 ^
(0x646f72616e646f6d ^
0xee),
191 v2
: key0 ^
0x6c7967656e657261,
192 v3
: key1 ^
0x7465646279746573,
198 // Initialize spill because we read from it in `short_write_process_buffer`.
199 *hasher
.buf
.get_unchecked_mut(BUFFER_SPILL_INDEX
) = MaybeUninit
::zeroed();
206 pub fn short_write
<const LEN
: usize>(&mut self, bytes
: [u8; LEN
]) {
207 let nbuf
= self.nbuf
;
208 debug_assert
!(LEN
<= 8);
209 debug_assert
!(nbuf
< BUFFER_SIZE
);
210 debug_assert
!(nbuf
+ LEN
< BUFFER_WITH_SPILL_SIZE
);
212 if nbuf
+ LEN
< BUFFER_SIZE
{
214 // The memcpy call is optimized away because the size is known.
215 let dst
= (self.buf
.as_mut_ptr() as *mut u8).add(nbuf
);
216 ptr
::copy_nonoverlapping(bytes
.as_ptr(), dst
, LEN
);
219 self.nbuf
= nbuf
+ LEN
;
224 unsafe { self.short_write_process_buffer(bytes) }
227 // A specialized write function for values with size <= 8 that should only
228 // be called when the write would cause the buffer to fill.
230 // SAFETY: the write of `x` into `self.buf` starting at byte offset
231 // `self.nbuf` must cause `self.buf` to become fully initialized (and not
232 // overflow) if it wasn't already.
234 unsafe fn short_write_process_buffer
<const LEN
: usize>(&mut self, bytes
: [u8; LEN
]) {
235 let nbuf
= self.nbuf
;
236 debug_assert
!(LEN
<= 8);
237 debug_assert
!(nbuf
< BUFFER_SIZE
);
238 debug_assert
!(nbuf
+ LEN
>= BUFFER_SIZE
);
239 debug_assert
!(nbuf
+ LEN
< BUFFER_WITH_SPILL_SIZE
);
241 // Copy first part of input into end of buffer, possibly into spill
242 // element. The memcpy call is optimized away because the size is known.
243 let dst
= (self.buf
.as_mut_ptr() as *mut u8).add(nbuf
);
244 ptr
::copy_nonoverlapping(bytes
.as_ptr(), dst
, LEN
);
247 for i
in 0..BUFFER_CAPACITY
{
248 let elem
= self.buf
.get_unchecked(i
).assume_init().to_le();
249 self.state
.v3 ^
= elem
;
250 Sip24Rounds
::c_rounds(&mut self.state
);
251 self.state
.v0 ^
= elem
;
254 // Copy remaining input into start of buffer by copying LEN - 1
255 // elements from spill (at most LEN - 1 bytes could have overflowed
256 // into the spill). The memcpy call is optimized away because the size
257 // is known. And the whole copy is optimized away for LEN == 1.
258 let dst
= self.buf
.as_mut_ptr() as *mut u8;
259 let src
= self.buf
.get_unchecked(BUFFER_SPILL_INDEX
) as *const _
as *const u8;
260 ptr
::copy_nonoverlapping(src
, dst
, LEN
- 1);
262 // This function should only be called when the write fills the buffer.
263 // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
264 // LEN is statically known, so the branch is optimized away.
265 self.nbuf
= if LEN
== 1 { 0 }
else { nbuf + LEN - BUFFER_SIZE }
;
266 self.processed
+= BUFFER_SIZE
;
269 // A write function for byte slices.
271 fn slice_write(&mut self, msg
: &[u8]) {
272 let length
= msg
.len();
273 let nbuf
= self.nbuf
;
274 debug_assert
!(nbuf
< BUFFER_SIZE
);
276 if nbuf
+ length
< BUFFER_SIZE
{
278 let dst
= (self.buf
.as_mut_ptr() as *mut u8).add(nbuf
);
281 copy_nonoverlapping_small(msg
.as_ptr(), dst
, length
);
283 // This memcpy is *not* optimized away.
284 ptr
::copy_nonoverlapping(msg
.as_ptr(), dst
, length
);
288 self.nbuf
= nbuf
+ length
;
293 unsafe { self.slice_write_process_buffer(msg) }
296 // A write function for byte slices that should only be called when the
297 // write would cause the buffer to fill.
299 // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`,
300 // and `msg` must contain enough bytes to initialize the rest of the element
301 // containing the byte offset `self.nbuf`.
303 unsafe fn slice_write_process_buffer(&mut self, msg
: &[u8]) {
304 let length
= msg
.len();
305 let nbuf
= self.nbuf
;
306 debug_assert
!(nbuf
< BUFFER_SIZE
);
307 debug_assert
!(nbuf
+ length
>= BUFFER_SIZE
);
309 // Always copy first part of input into current element of buffer.
310 // This function should only be called when the write fills the buffer,
311 // so we know that there is enough input to fill the current element.
312 let valid_in_elem
= nbuf
% ELEM_SIZE
;
313 let needed_in_elem
= ELEM_SIZE
- valid_in_elem
;
315 let src
= msg
.as_ptr();
316 let dst
= (self.buf
.as_mut_ptr() as *mut u8).add(nbuf
);
317 copy_nonoverlapping_small(src
, dst
, needed_in_elem
);
321 // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
322 // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
323 // We know that is true, because last step ensured we have a full
324 // element in the buffer.
325 let last
= nbuf
/ ELEM_SIZE
+ 1;
328 let elem
= self.buf
.get_unchecked(i
).assume_init().to_le();
329 self.state
.v3 ^
= elem
;
330 Sip24Rounds
::c_rounds(&mut self.state
);
331 self.state
.v0 ^
= elem
;
334 // Process the remaining element-sized chunks of input.
335 let mut processed
= needed_in_elem
;
336 let input_left
= length
- processed
;
337 let elems_left
= input_left
/ ELEM_SIZE
;
338 let extra_bytes_left
= input_left
% ELEM_SIZE
;
340 for _
in 0..elems_left
{
341 let elem
= (msg
.as_ptr().add(processed
) as *const u64).read_unaligned().to_le();
342 self.state
.v3 ^
= elem
;
343 Sip24Rounds
::c_rounds(&mut self.state
);
344 self.state
.v0 ^
= elem
;
345 processed
+= ELEM_SIZE
;
348 // Copy remaining input into start of buffer.
349 let src
= msg
.as_ptr().add(processed
);
350 let dst
= self.buf
.as_mut_ptr() as *mut u8;
351 copy_nonoverlapping_small(src
, dst
, extra_bytes_left
);
353 self.nbuf
= extra_bytes_left
;
354 self.processed
+= nbuf
+ processed
;
358 pub fn finish128(mut self) -> (u64, u64) {
359 debug_assert
!(self.nbuf
< BUFFER_SIZE
);
361 // Process full elements in buffer.
362 let last
= self.nbuf
/ ELEM_SIZE
;
364 // Since we're consuming self, avoid updating members for a potential
366 let mut state
= self.state
;
369 let elem
= unsafe { self.buf.get_unchecked(i).assume_init().to_le() }
;
371 Sip24Rounds
::c_rounds(&mut state
);
375 // Get remaining partial element.
376 let elem
= if self.nbuf
% ELEM_SIZE
!= 0 {
378 // Ensure element is initialized by writing zero bytes. At most
379 // `ELEM_SIZE - 1` are required given the above check. It's safe
380 // to write this many because we have the spill and we maintain
381 // `self.nbuf` such that this write will start before the spill.
382 let dst
= (self.buf
.as_mut_ptr() as *mut u8).add(self.nbuf
);
383 ptr
::write_bytes(dst
, 0, ELEM_SIZE
- 1);
384 self.buf
.get_unchecked(last
).assume_init().to_le()
390 // Finalize the hash.
391 let length
= self.processed
+ self.nbuf
;
392 let b
: u64 = ((length
as u64 & 0xff) << 56) | elem
;
395 Sip24Rounds
::c_rounds(&mut state
);
399 Sip24Rounds
::d_rounds(&mut state
);
400 let _0
= state
.v0 ^ state
.v1 ^ state
.v2 ^ state
.v3
;
403 Sip24Rounds
::d_rounds(&mut state
);
404 let _1
= state
.v0 ^ state
.v1 ^ state
.v2 ^ state
.v3
;
410 impl Hasher
for SipHasher128
{
412 fn write_u8(&mut self, i
: u8) {
413 self.short_write(i
.to_ne_bytes());
417 fn write_u16(&mut self, i
: u16) {
418 self.short_write(i
.to_ne_bytes());
422 fn write_u32(&mut self, i
: u32) {
423 self.short_write(i
.to_ne_bytes());
427 fn write_u64(&mut self, i
: u64) {
428 self.short_write(i
.to_ne_bytes());
432 fn write_usize(&mut self, i
: usize) {
433 self.short_write(i
.to_ne_bytes());
437 fn write_i8(&mut self, i
: i8) {
438 self.short_write((i
as u8).to_ne_bytes());
442 fn write_i16(&mut self, i
: i16) {
443 self.short_write((i
as u16).to_ne_bytes());
447 fn write_i32(&mut self, i
: i32) {
448 self.short_write((i
as u32).to_ne_bytes());
452 fn write_i64(&mut self, i
: i64) {
453 self.short_write((i
as u64).to_ne_bytes());
457 fn write_isize(&mut self, i
: isize) {
458 self.short_write((i
as usize).to_ne_bytes());
462 fn write(&mut self, msg
: &[u8]) {
463 self.slice_write(msg
);
467 fn write_str(&mut self, s
: &str) {
468 // This hasher works byte-wise, and `0xFF` cannot show up in a `str`,
469 // so just hashing the one extra byte is enough to be prefix-free.
470 self.write(s
.as_bytes());
474 fn finish(&self) -> u64 {
475 panic
!("SipHasher128 cannot provide valid 64 bit hashes")
479 #[derive(Debug, Clone, Default)]
484 fn c_rounds(state
: &mut State
) {
490 fn d_rounds(state
: &mut State
) {