]> git.proxmox.com Git - rustc.git/blob - vendor/block-buffer/src/lib.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / vendor / block-buffer / src / lib.rs
1 #![no_std]
2 pub extern crate byteorder;
3 pub extern crate block_padding;
4 pub extern crate generic_array;
5 extern crate byte_tools;
6
7 use byteorder::{ByteOrder, BE};
8 use byte_tools::zero;
9 use block_padding::{Padding, PadError};
10 use generic_array::{GenericArray, ArrayLength};
11 use core::slice;
12
13 /// Buffer for block processing of data
14 #[derive(Clone, Default)]
15 pub struct BlockBuffer<BlockSize: ArrayLength<u8>> {
16 buffer: GenericArray<u8, BlockSize>,
17 pos: usize,
18 }
19
20 #[inline(always)]
21 unsafe fn cast<N: ArrayLength<u8>>(block: &[u8]) -> &GenericArray<u8, N> {
22 debug_assert_eq!(block.len(), N::to_usize());
23 &*(block.as_ptr() as *const GenericArray<u8, N>)
24 }
25
26
27
28 impl<BlockSize: ArrayLength<u8>> BlockBuffer<BlockSize> {
29 /// Process data in `input` in blocks of size `BlockSize` using function `f`.
30 #[inline]
31 pub fn input<F>(&mut self, mut input: &[u8], mut f: F)
32 where F: FnMut(&GenericArray<u8, BlockSize>)
33 {
34 // If there is already data in the buffer, process it if we have
35 // enough to complete the chunk.
36 let rem = self.remaining();
37 if self.pos != 0 && input.len() >= rem {
38 let (l, r) = input.split_at(rem);
39 input = r;
40 self.buffer[self.pos..].copy_from_slice(l);
41 self.pos = 0;
42 f(&self.buffer);
43 }
44
45 // While we have at least a full buffer size chunks's worth of data,
46 // process that data without copying it into the buffer
47 while input.len() >= self.size() {
48 let (block, r) = input.split_at(self.size());
49 input = r;
50 f(unsafe { cast(block) });
51 }
52
53 // Copy any remaining data into the buffer.
54 self.buffer[self.pos..self.pos+input.len()].copy_from_slice(input);
55 self.pos += input.len();
56 }
57
58 /*
59 /// Process data in `input` in blocks of size `BlockSize` using function `f`, which accepts
60 /// slice of blocks.
61 #[inline]
62 pub fn input2<F>(&mut self, mut input: &[u8], mut f: F)
63 where F: FnMut(&[GenericArray<u8, BlockSize>])
64 {
65 // If there is already data in the buffer, process it if we have
66 // enough to complete the chunk.
67 let rem = self.remaining();
68 if self.pos != 0 && input.len() >= rem {
69 let (l, r) = input.split_at(rem);
70 input = r;
71 self.buffer[self.pos..].copy_from_slice(l);
72 self.pos = 0;
73 f(slice::from_ref(&self.buffer));
74 }
75
76 // While we have at least a full buffer size chunks's worth of data,
77 // process it data without copying into the buffer
78 let n_blocks = input.len()/self.size();
79 let (left, right) = input.split_at(n_blocks*self.size());
80 // safe because we guarantee that `blocks` does not point outside of `input`
81 let blocks = unsafe {
82 slice::from_raw_parts(
83 left.as_ptr() as *const GenericArray<u8, BlockSize>,
84 n_blocks,
85 )
86 };
87 f(blocks);
88
89 // Copy remaining data into the buffer.
90 self.buffer[self.pos..self.pos+right.len()].copy_from_slice(right);
91 self.pos += right.len();
92 }
93 */
94
95 /// Variant that doesn't flush the buffer until there's additional
96 /// data to be processed. Suitable for tweakable block ciphers
97 /// like Threefish that need to know whether a block is the *last*
98 /// data block before processing it.
99 #[inline]
100 pub fn input_lazy<F>(&mut self, mut input: &[u8], mut f: F)
101 where F: FnMut(&GenericArray<u8, BlockSize>)
102 {
103 let rem = self.remaining();
104 if self.pos != 0 && input.len() > rem {
105 let (l, r) = input.split_at(rem);
106 input = r;
107 self.buffer[self.pos..].copy_from_slice(l);
108 self.pos = 0;
109 f(&self.buffer);
110 }
111
112 while input.len() > self.size() {
113 let (block, r) = input.split_at(self.size());
114 input = r;
115 f(unsafe { cast(block) });
116 }
117
118 self.buffer[self.pos..self.pos+input.len()].copy_from_slice(input);
119 self.pos += input.len();
120 }
121
122 /// Pad buffer with `prefix` and make sure that internall buffer
123 /// has at least `up_to` free bytes. All remaining bytes get
124 /// zeroed-out.
125 #[inline]
126 fn digest_pad<F>(&mut self, up_to: usize, f: &mut F)
127 where F: FnMut(&GenericArray<u8, BlockSize>)
128 {
129 if self.pos == self.size() {
130 f(&self.buffer);
131 self.pos = 0;
132 }
133 self.buffer[self.pos] = 0x80;
134 self.pos += 1;
135
136 zero(&mut self.buffer[self.pos..]);
137
138 if self.remaining() < up_to {
139 f(&self.buffer);
140 zero(&mut self.buffer[..self.pos]);
141 }
142 }
143
144 /// Pad message with 0x80, zeros and 64-bit message length
145 /// in a byte order specified by `B`
146 #[inline]
147 pub fn len64_padding<B, F>(&mut self, data_len: u64, mut f: F)
148 where B: ByteOrder, F: FnMut(&GenericArray<u8, BlockSize>)
149 {
150 // TODO: replace `F` with `impl Trait` on MSRV bump
151 self.digest_pad(8, &mut f);
152 let s = self.size();
153 B::write_u64(&mut self.buffer[s-8..], data_len);
154 f(&self.buffer);
155 self.pos = 0;
156 }
157
158
159 /// Pad message with 0x80, zeros and 128-bit message length
160 /// in the big-endian byte order
161 #[inline]
162 pub fn len128_padding_be<F>(&mut self, hi: u64, lo: u64, mut f: F)
163 where F: FnMut(&GenericArray<u8, BlockSize>)
164 {
165 // TODO: on MSRV bump replace `F` with `impl Trait`, use `u128`, add `B`
166 self.digest_pad(16, &mut f);
167 let s = self.size();
168 BE::write_u64(&mut self.buffer[s-16..s-8], hi);
169 BE::write_u64(&mut self.buffer[s-8..], lo);
170 f(&self.buffer);
171 self.pos = 0;
172 }
173
174 /// Pad message with a given padding `P`
175 ///
176 /// Returns `PadError` if internall buffer is full, which can only happen if
177 /// `input_lazy` was used.
178 #[inline]
179 pub fn pad_with<P: Padding>(&mut self)
180 -> Result<&mut GenericArray<u8, BlockSize>, PadError>
181 {
182 P::pad_block(&mut self.buffer[..], self.pos)?;
183 self.pos = 0;
184 Ok(&mut self.buffer)
185 }
186
187 /// Return size of the internall buffer in bytes
188 #[inline]
189 pub fn size(&self) -> usize {
190 BlockSize::to_usize()
191 }
192
193 /// Return current cursor position
194 #[inline]
195 pub fn position(&self) -> usize {
196 self.pos
197 }
198
199 /// Return number of remaining bytes in the internall buffer
200 #[inline]
201 pub fn remaining(&self) -> usize {
202 self.size() - self.pos
203 }
204
205 /// Reset buffer by setting cursor position to zero
206 #[inline]
207 pub fn reset(&mut self) {
208 self.pos = 0
209 }
210 }