2 pub extern crate byteorder
;
3 pub extern crate block_padding
;
4 pub extern crate generic_array
;
5 extern crate byte_tools
;
7 use byteorder
::{ByteOrder, BE}
;
9 use block_padding
::{Padding, PadError}
;
10 use generic_array
::{GenericArray, ArrayLength}
;
13 /// Buffer for block processing of data
14 #[derive(Clone, Default)]
15 pub struct BlockBuffer
<BlockSize
: ArrayLength
<u8>> {
16 buffer
: GenericArray
<u8, BlockSize
>,
21 unsafe fn cast
<N
: ArrayLength
<u8>>(block
: &[u8]) -> &GenericArray
<u8, N
> {
22 debug_assert_eq
!(block
.len(), N
::to_usize());
23 &*(block
.as_ptr() as *const GenericArray
<u8, N
>)
28 impl<BlockSize
: ArrayLength
<u8>> BlockBuffer
<BlockSize
> {
29 /// Process data in `input` in blocks of size `BlockSize` using function `f`.
31 pub fn input
<F
>(&mut self, mut input
: &[u8], mut f
: F
)
32 where F
: FnMut(&GenericArray
<u8, BlockSize
>)
34 // If there is already data in the buffer, process it if we have
35 // enough to complete the chunk.
36 let rem
= self.remaining();
37 if self.pos
!= 0 && input
.len() >= rem
{
38 let (l
, r
) = input
.split_at(rem
);
40 self.buffer
[self.pos
..].copy_from_slice(l
);
45 // While we have at least a full buffer size chunks's worth of data,
46 // process that data without copying it into the buffer
47 while input
.len() >= self.size() {
48 let (block
, r
) = input
.split_at(self.size());
50 f(unsafe { cast(block) }
);
53 // Copy any remaining data into the buffer.
54 self.buffer
[self.pos
..self.pos
+input
.len()].copy_from_slice(input
);
55 self.pos
+= input
.len();
59 /// Process data in `input` in blocks of size `BlockSize` using function `f`, which accepts
62 pub fn input2<F>(&mut self, mut input: &[u8], mut f: F)
63 where F: FnMut(&[GenericArray<u8, BlockSize>])
65 // If there is already data in the buffer, process it if we have
66 // enough to complete the chunk.
67 let rem = self.remaining();
68 if self.pos != 0 && input.len() >= rem {
69 let (l, r) = input.split_at(rem);
71 self.buffer[self.pos..].copy_from_slice(l);
73 f(slice::from_ref(&self.buffer));
76 // While we have at least a full buffer size chunks's worth of data,
77 // process it data without copying into the buffer
78 let n_blocks = input.len()/self.size();
79 let (left, right) = input.split_at(n_blocks*self.size());
80 // safe because we guarantee that `blocks` does not point outside of `input`
82 slice::from_raw_parts(
83 left.as_ptr() as *const GenericArray<u8, BlockSize>,
89 // Copy remaining data into the buffer.
90 self.buffer[self.pos..self.pos+right.len()].copy_from_slice(right);
91 self.pos += right.len();
95 /// Variant that doesn't flush the buffer until there's additional
96 /// data to be processed. Suitable for tweakable block ciphers
97 /// like Threefish that need to know whether a block is the *last*
98 /// data block before processing it.
100 pub fn input_lazy
<F
>(&mut self, mut input
: &[u8], mut f
: F
)
101 where F
: FnMut(&GenericArray
<u8, BlockSize
>)
103 let rem
= self.remaining();
104 if self.pos
!= 0 && input
.len() > rem
{
105 let (l
, r
) = input
.split_at(rem
);
107 self.buffer
[self.pos
..].copy_from_slice(l
);
112 while input
.len() > self.size() {
113 let (block
, r
) = input
.split_at(self.size());
115 f(unsafe { cast(block) }
);
118 self.buffer
[self.pos
..self.pos
+input
.len()].copy_from_slice(input
);
119 self.pos
+= input
.len();
122 /// Pad buffer with `prefix` and make sure that internall buffer
123 /// has at least `up_to` free bytes. All remaining bytes get
126 fn digest_pad
<F
>(&mut self, up_to
: usize, f
: &mut F
)
127 where F
: FnMut(&GenericArray
<u8, BlockSize
>)
129 if self.pos
== self.size() {
133 self.buffer
[self.pos
] = 0x80;
136 zero(&mut self.buffer
[self.pos
..]);
138 if self.remaining() < up_to
{
140 zero(&mut self.buffer
[..self.pos
]);
144 /// Pad message with 0x80, zeros and 64-bit message length
145 /// in a byte order specified by `B`
147 pub fn len64_padding
<B
, F
>(&mut self, data_len
: u64, mut f
: F
)
148 where B
: ByteOrder
, F
: FnMut(&GenericArray
<u8, BlockSize
>)
150 // TODO: replace `F` with `impl Trait` on MSRV bump
151 self.digest_pad(8, &mut f
);
153 B
::write_u64(&mut self.buffer
[s
-8..], data_len
);
159 /// Pad message with 0x80, zeros and 128-bit message length
160 /// in the big-endian byte order
162 pub fn len128_padding_be
<F
>(&mut self, hi
: u64, lo
: u64, mut f
: F
)
163 where F
: FnMut(&GenericArray
<u8, BlockSize
>)
165 // TODO: on MSRV bump replace `F` with `impl Trait`, use `u128`, add `B`
166 self.digest_pad(16, &mut f
);
168 BE
::write_u64(&mut self.buffer
[s
-16..s
-8], hi
);
169 BE
::write_u64(&mut self.buffer
[s
-8..], lo
);
174 /// Pad message with a given padding `P`
176 /// Returns `PadError` if internall buffer is full, which can only happen if
177 /// `input_lazy` was used.
179 pub fn pad_with
<P
: Padding
>(&mut self)
180 -> Result
<&mut GenericArray
<u8, BlockSize
>, PadError
>
182 P
::pad_block(&mut self.buffer
[..], self.pos
)?
;
187 /// Return size of the internall buffer in bytes
189 pub fn size(&self) -> usize {
190 BlockSize
::to_usize()
193 /// Return current cursor position
195 pub fn position(&self) -> usize {
199 /// Return number of remaining bytes in the internall buffer
201 pub fn remaining(&self) -> usize {
202 self.size() - self.pos
205 /// Reset buffer by setting cursor position to zero
207 pub fn reset(&mut self) {