]> git.proxmox.com Git - rustc.git/blob - vendor/miniz_oxide/src/inflate/core.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / vendor / miniz_oxide / src / inflate / core.rs
1 //! Streaming decompression functionality.
2
3 use super::*;
4 use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER};
5
6 use ::core::convert::TryInto;
7 use ::core::{cmp, slice};
8
9 use self::output_buffer::OutputBuffer;
10
11 pub const TINFL_LZ_DICT_SIZE: usize = 32_768;
12
13 /// A struct containing huffman code lengths and the huffman code tree used by the decompressor.
14 struct HuffmanTable {
15 /// Length of the code at each index.
16 pub code_size: [u8; MAX_HUFF_SYMBOLS_0],
17 /// Fast lookup table for shorter huffman codes.
18 ///
19 /// See `HuffmanTable::fast_lookup`.
20 pub look_up: [i16; FAST_LOOKUP_SIZE as usize],
21 /// Full huffman tree.
22 ///
23 /// Positive values are edge nodes/symbols, negative values are
24 /// parent nodes/references to other nodes.
25 pub tree: [i16; MAX_HUFF_TREE_SIZE],
26 }
27
28 impl HuffmanTable {
29 const fn new() -> HuffmanTable {
30 HuffmanTable {
31 code_size: [0; MAX_HUFF_SYMBOLS_0],
32 look_up: [0; FAST_LOOKUP_SIZE as usize],
33 tree: [0; MAX_HUFF_TREE_SIZE],
34 }
35 }
36
37 /// Look for a symbol in the fast lookup table.
38 /// The symbol is stored in the lower 9 bits, the length in the next 6.
39 /// If the returned value is negative, the code wasn't found in the
40 /// fast lookup table and the full tree has to be traversed to find the code.
41 #[inline]
42 fn fast_lookup(&self, bit_buf: BitBuffer) -> i16 {
43 self.look_up[(bit_buf & BitBuffer::from(FAST_LOOKUP_SIZE - 1)) as usize]
44 }
45
46 /// Get the symbol and the code length from the huffman tree.
47 #[inline]
48 fn tree_lookup(&self, fast_symbol: i32, bit_buf: BitBuffer, mut code_len: u32) -> (i32, u32) {
49 let mut symbol = fast_symbol;
50 // We step through the tree until we encounter a positive value, which indicates a
51 // symbol.
52 loop {
53 // symbol here indicates the position of the left (0) node, if the next bit is 1
54 // we add 1 to the lookup position to get the right node.
55 symbol = i32::from(self.tree[(!symbol + ((bit_buf >> code_len) & 1) as i32) as usize]);
56 code_len += 1;
57 if symbol >= 0 {
58 break;
59 }
60 }
61 (symbol, code_len)
62 }
63
64 #[inline]
65 /// Look up a symbol and code length from the bits in the provided bit buffer.
66 ///
67 /// Returns Some(symbol, length) on success,
68 /// None if the length is 0.
69 ///
70 /// It's possible we could avoid checking for 0 if we can guarantee a sane table.
71 /// TODO: Check if a smaller type for code_len helps performance.
72 fn lookup(&self, bit_buf: BitBuffer) -> Option<(i32, u32)> {
73 let symbol = self.fast_lookup(bit_buf).into();
74 if symbol >= 0 {
75 if (symbol >> 9) as u32 != 0 {
76 Some((symbol, (symbol >> 9) as u32))
77 } else {
78 // Zero-length code.
79 None
80 }
81 } else {
82 // We didn't get a symbol from the fast lookup table, so check the tree instead.
83 Some(self.tree_lookup(symbol, bit_buf, FAST_LOOKUP_BITS.into()))
84 }
85 }
86 }
87
88 /// The number of huffman tables used.
89 const MAX_HUFF_TABLES: usize = 3;
90 /// The length of the first (literal/length) huffman table.
91 const MAX_HUFF_SYMBOLS_0: usize = 288;
92 /// The length of the second (distance) huffman table.
93 const MAX_HUFF_SYMBOLS_1: usize = 32;
94 /// The length of the last (huffman code length) huffman table.
95 const _MAX_HUFF_SYMBOLS_2: usize = 19;
96 /// The maximum length of a code that can be looked up in the fast lookup table.
97 const FAST_LOOKUP_BITS: u8 = 10;
98 /// The size of the fast lookup table.
99 const FAST_LOOKUP_SIZE: u32 = 1 << FAST_LOOKUP_BITS;
100 const MAX_HUFF_TREE_SIZE: usize = MAX_HUFF_SYMBOLS_0 * 2;
101 const LITLEN_TABLE: usize = 0;
102 const DIST_TABLE: usize = 1;
103 const HUFFLEN_TABLE: usize = 2;
104
105 /// Flags to [`decompress()`] to control how inflation works.
106 ///
107 /// These define bits for a bitmask argument.
108 pub mod inflate_flags {
109 /// Should we try to parse a zlib header?
110 ///
111 /// If unset, [`decompress()`] will expect an RFC1951 deflate stream. If set, it will expect an
112 /// RFC1950 zlib wrapper around the deflate stream.
113 pub const TINFL_FLAG_PARSE_ZLIB_HEADER: u32 = 1;
114
115 /// There will be more input that hasn't been given to the decompressor yet.
116 ///
117 /// This is useful when you want to decompress what you have so far,
118 /// even if you know there is probably more input that hasn't gotten here yet (_e.g._, over a
119 /// network connection). When [`decompress()`][super::decompress] reaches the end of the input
120 /// without finding the end of the compressed stream, it will return
121 /// [`TINFLStatus::NeedsMoreInput`][super::TINFLStatus::NeedsMoreInput] if this is set,
122 /// indicating that you should get more data before calling again. If not set, it will return
123 /// [`TINFLStatus::FailedCannotMakeProgress`][super::TINFLStatus::FailedCannotMakeProgress]
124 /// suggesting the stream is corrupt, since you claimed it was all there.
125 pub const TINFL_FLAG_HAS_MORE_INPUT: u32 = 2;
126
127 /// The output buffer should not wrap around.
128 pub const TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: u32 = 4;
129
130 /// Calculate the adler32 checksum of the output data even if we're not inflating a zlib stream.
131 ///
132 /// If [`TINFL_FLAG_IGNORE_ADLER32`] is specified, it will override this.
133 ///
134 /// NOTE: Enabling/disabling this between calls to decompress will result in an incorect
135 /// checksum.
136 pub const TINFL_FLAG_COMPUTE_ADLER32: u32 = 8;
137
138 /// Ignore adler32 checksum even if we are inflating a zlib stream.
139 ///
140 /// Overrides [`TINFL_FLAG_COMPUTE_ADLER32`] if both are enabled.
141 ///
142 /// NOTE: This flag does not exist in miniz as it does not support this and is a
143 /// custom addition for miniz_oxide.
144 ///
145 /// NOTE: Should not be changed from enabled to disabled after decompression has started,
146 /// this will result in checksum failure (outside the unlikely event where the checksum happens
147 /// to match anyway).
148 pub const TINFL_FLAG_IGNORE_ADLER32: u32 = 64;
149 }
150
151 use self::inflate_flags::*;
152
153 const MIN_TABLE_SIZES: [u16; 3] = [257, 1, 4];
154
155 #[cfg(target_pointer_width = "64")]
156 type BitBuffer = u64;
157
158 #[cfg(not(target_pointer_width = "64"))]
159 type BitBuffer = u32;
160
161 /// Main decompression struct.
162 ///
163 pub struct DecompressorOxide {
164 /// Current state of the decompressor.
165 state: core::State,
166 /// Number of bits in the bit buffer.
167 num_bits: u32,
168 /// Zlib CMF
169 z_header0: u32,
170 /// Zlib FLG
171 z_header1: u32,
172 /// Adler32 checksum from the zlib header.
173 z_adler32: u32,
174 /// 1 if the current block is the last block, 0 otherwise.
175 finish: u32,
176 /// The type of the current block.
177 block_type: u32,
178 /// 1 if the adler32 value should be checked.
179 check_adler32: u32,
180 /// Last match distance.
181 dist: u32,
182 /// Variable used for match length, symbols, and a number of other things.
183 counter: u32,
184 /// Number of extra bits for the last length or distance code.
185 num_extra: u32,
186 /// Number of entries in each huffman table.
187 table_sizes: [u32; MAX_HUFF_TABLES],
188 /// Buffer of input data.
189 bit_buf: BitBuffer,
190 /// Huffman tables.
191 tables: [HuffmanTable; MAX_HUFF_TABLES],
192 /// Raw block header.
193 raw_header: [u8; 4],
194 /// Huffman length codes.
195 len_codes: [u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137],
196 }
197
198 impl DecompressorOxide {
199 /// Create a new tinfl_decompressor with all fields set to 0.
200 pub fn new() -> DecompressorOxide {
201 DecompressorOxide::default()
202 }
203
204 /// Set the current state to `Start`.
205 #[inline]
206 pub fn init(&mut self) {
207 // The rest of the data is reset or overwritten when used.
208 self.state = core::State::Start;
209 }
210
211 /// Returns the adler32 checksum of the currently decompressed data.
212 /// Note: Will return Some(1) if decompressing zlib but ignoring adler32.
213 #[inline]
214 pub fn adler32(&self) -> Option<u32> {
215 if self.state != State::Start && !self.state.is_failure() && self.z_header0 != 0 {
216 Some(self.check_adler32)
217 } else {
218 None
219 }
220 }
221
222 /// Returns the adler32 that was read from the zlib header if it exists.
223 #[inline]
224 pub fn adler32_header(&self) -> Option<u32> {
225 if self.state != State::Start && self.state != State::BadZlibHeader && self.z_header0 != 0 {
226 Some(self.z_adler32)
227 } else {
228 None
229 }
230 }
231 }
232
233 impl Default for DecompressorOxide {
234 /// Create a new tinfl_decompressor with all fields set to 0.
235 #[inline(always)]
236 fn default() -> Self {
237 DecompressorOxide {
238 state: core::State::Start,
239 num_bits: 0,
240 z_header0: 0,
241 z_header1: 0,
242 z_adler32: 0,
243 finish: 0,
244 block_type: 0,
245 check_adler32: 0,
246 dist: 0,
247 counter: 0,
248 num_extra: 0,
249 table_sizes: [0; MAX_HUFF_TABLES],
250 bit_buf: 0,
251 // TODO:(oyvindln) Check that copies here are optimized out in release mode.
252 tables: [
253 HuffmanTable::new(),
254 HuffmanTable::new(),
255 HuffmanTable::new(),
256 ],
257 raw_header: [0; 4],
258 len_codes: [0; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137],
259 }
260 }
261 }
262
263 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
264 enum State {
265 Start = 0,
266 ReadZlibCmf,
267 ReadZlibFlg,
268 ReadBlockHeader,
269 BlockTypeNoCompression,
270 RawHeader,
271 RawMemcpy1,
272 RawMemcpy2,
273 ReadTableSizes,
274 ReadHufflenTableCodeSize,
275 ReadLitlenDistTablesCodeSize,
276 ReadExtraBitsCodeSize,
277 DecodeLitlen,
278 WriteSymbol,
279 ReadExtraBitsLitlen,
280 DecodeDistance,
281 ReadExtraBitsDistance,
282 RawReadFirstByte,
283 RawStoreFirstByte,
284 WriteLenBytesToEnd,
285 BlockDone,
286 HuffDecodeOuterLoop1,
287 HuffDecodeOuterLoop2,
288 ReadAdler32,
289
290 DoneForever,
291
292 // Failure states.
293 BlockTypeUnexpected,
294 BadCodeSizeSum,
295 BadTotalSymbols,
296 BadZlibHeader,
297 DistanceOutOfBounds,
298 BadRawLength,
299 BadCodeSizeDistPrevLookup,
300 InvalidLitlen,
301 InvalidDist,
302 InvalidCodeLen,
303 }
304
305 impl State {
306 fn is_failure(self) -> bool {
307 match self {
308 BlockTypeUnexpected => true,
309 BadCodeSizeSum => true,
310 BadTotalSymbols => true,
311 BadZlibHeader => true,
312 DistanceOutOfBounds => true,
313 BadRawLength => true,
314 BadCodeSizeDistPrevLookup => true,
315 InvalidLitlen => true,
316 InvalidDist => true,
317 _ => false,
318 }
319 }
320
321 #[inline]
322 fn begin(&mut self, new_state: State) {
323 *self = new_state;
324 }
325 }
326
327 use self::State::*;
328
329 // Not sure why miniz uses 32-bit values for these, maybe alignment/cache again?
330 // # Optimization
331 // We add a extra value at the end and make the tables 32 elements long
332 // so we can use a mask to avoid bounds checks.
333 // The invalid values are set to something high enough to avoid underflowing
334 // the match length.
335 /// Base length for each length code.
336 ///
337 /// The base is used together with the value of the extra bits to decode the actual
338 /// length/distance values in a match.
339 #[rustfmt::skip]
340 const LENGTH_BASE: [u16; 32] = [
341 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
342 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 512, 512, 512
343 ];
344
345 /// Number of extra bits for each length code.
346 #[rustfmt::skip]
347 const LENGTH_EXTRA: [u8; 32] = [
348 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
349 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0
350 ];
351
352 /// Base length for each distance code.
353 #[rustfmt::skip]
354 const DIST_BASE: [u16; 32] = [
355 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
356 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
357 2049, 3073, 4097, 6145, 8193, 12_289, 16_385, 24_577, 32_768, 32_768
358 ];
359
360 /// Number of extra bits for each distance code.
361 #[rustfmt::skip]
362 const DIST_EXTRA: [u8; 32] = [
363 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
364 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 13, 13
365 ];
366
367 /// The mask used when indexing the base/extra arrays.
368 const BASE_EXTRA_MASK: usize = 32 - 1;
369
370 /// Sets the value of all the elements of the slice to `val`.
371 #[inline]
372 fn memset<T: Copy>(slice: &mut [T], val: T) {
373 for x in slice {
374 *x = val
375 }
376 }
377
378 /// Read an le u16 value from the slice iterator.
379 ///
380 /// # Panics
381 /// Panics if there are less than two bytes left.
382 #[inline]
383 fn read_u16_le(iter: &mut slice::Iter<u8>) -> u16 {
384 let ret = {
385 let two_bytes = iter.as_ref()[..2].try_into().unwrap();
386 u16::from_le_bytes(two_bytes)
387 };
388 iter.nth(1);
389 ret
390 }
391
392 /// Read an le u32 value from the slice iterator.
393 ///
394 /// # Panics
395 /// Panics if there are less than four bytes left.
396 #[inline(always)]
397 #[cfg(target_pointer_width = "64")]
398 fn read_u32_le(iter: &mut slice::Iter<u8>) -> u32 {
399 let ret = {
400 let four_bytes: [u8; 4] = iter.as_ref()[..4].try_into().unwrap();
401 u32::from_le_bytes(four_bytes)
402 };
403 iter.nth(3);
404 ret
405 }
406
407 /// Ensure that there is data in the bit buffer.
408 ///
409 /// On 64-bit platform, we use a 64-bit value so this will
410 /// result in there being at least 32 bits in the bit buffer.
411 /// This function assumes that there is at least 4 bytes left in the input buffer.
412 #[inline(always)]
413 #[cfg(target_pointer_width = "64")]
414 fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>) {
415 // Read four bytes into the buffer at once.
416 if l.num_bits < 30 {
417 l.bit_buf |= BitBuffer::from(read_u32_le(in_iter)) << l.num_bits;
418 l.num_bits += 32;
419 }
420 }
421
422 /// Same as previous, but for non-64-bit platforms.
423 /// Ensures at least 16 bits are present, requires at least 2 bytes in the in buffer.
424 #[inline(always)]
425 #[cfg(not(target_pointer_width = "64"))]
426 fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>) {
427 // If the buffer is 32-bit wide, read 2 bytes instead.
428 if l.num_bits < 15 {
429 l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits;
430 l.num_bits += 16;
431 }
432 }
433
434 /// Check that the zlib header is correct and that there is enough space in the buffer
435 /// for the window size specified in the header.
436 ///
437 /// See https://tools.ietf.org/html/rfc1950
438 #[inline]
439 fn validate_zlib_header(cmf: u32, flg: u32, flags: u32, mask: usize) -> Action {
440 let mut failed =
441 // cmf + flg should be divisible by 31.
442 (((cmf * 256) + flg) % 31 != 0) ||
443 // If this flag is set, a dictionary was used for this zlib compressed data.
444 // This is currently not supported by miniz or miniz-oxide
445 ((flg & 0b0010_0000) != 0) ||
446 // Compression method. Only 8(DEFLATE) is defined by the standard.
447 ((cmf & 15) != 8);
448
449 let window_size = 1 << ((cmf >> 4) + 8);
450 if (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) == 0 {
451 // Bail if the buffer is wrapping and the window size is larger than the buffer.
452 failed |= (mask + 1) < window_size;
453 }
454
455 // Zlib doesn't allow window sizes above 32 * 1024.
456 failed |= window_size > 32_768;
457
458 if failed {
459 Action::Jump(BadZlibHeader)
460 } else {
461 Action::Jump(ReadBlockHeader)
462 }
463 }
464
465 enum Action {
466 None,
467 Jump(State),
468 End(TINFLStatus),
469 }
470
471 /// Try to decode the next huffman code, and puts it in the counter field of the decompressor
472 /// if successful.
473 ///
474 /// # Returns
475 /// The specified action returned from `f` on success,
476 /// `Action::End` if there are not enough data left to decode a symbol.
477 fn decode_huffman_code<F>(
478 r: &mut DecompressorOxide,
479 l: &mut LocalVars,
480 table: usize,
481 flags: u32,
482 in_iter: &mut slice::Iter<u8>,
483 f: F,
484 ) -> Action
485 where
486 F: FnOnce(&mut DecompressorOxide, &mut LocalVars, i32) -> Action,
487 {
488 // As the huffman codes can be up to 15 bits long we need at least 15 bits
489 // ready in the bit buffer to start decoding the next huffman code.
490 if l.num_bits < 15 {
491 // First, make sure there is enough data in the bit buffer to decode a huffman code.
492 if in_iter.len() < 2 {
493 // If there is less than 2 bytes left in the input buffer, we try to look up
494 // the huffman code with what's available, and return if that doesn't succeed.
495 // Original explanation in miniz:
496 // /* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
497 // * remaining in the input buffer falls below 2. */
498 // /* It reads just enough bytes from the input stream that are needed to decode
499 // * the next Huffman code (and absolutely no more). It works by trying to fully
500 // * decode a */
501 // /* Huffman code by using whatever bits are currently present in the bit buffer.
502 // * If this fails, it reads another byte, and tries again until it succeeds or
503 // * until the */
504 // /* bit buffer contains >=15 bits (deflate's max. Huffman code size). */
505 loop {
506 let mut temp = i32::from(r.tables[table].fast_lookup(l.bit_buf));
507
508 if temp >= 0 {
509 let code_len = (temp >> 9) as u32;
510 if (code_len != 0) && (l.num_bits >= code_len) {
511 break;
512 }
513 } else if l.num_bits > FAST_LOOKUP_BITS.into() {
514 let mut code_len = u32::from(FAST_LOOKUP_BITS);
515 loop {
516 temp = i32::from(
517 r.tables[table].tree
518 [(!temp + ((l.bit_buf >> code_len) & 1) as i32) as usize],
519 );
520 code_len += 1;
521 if temp >= 0 || l.num_bits < code_len + 1 {
522 break;
523 }
524 }
525 if temp >= 0 {
526 break;
527 }
528 }
529
530 // TODO: miniz jumps straight to here after getting here again after failing to read
531 // a byte.
532 // Doing that lets miniz avoid re-doing the lookup that that was done in the
533 // previous call.
534 let mut byte = 0;
535 if let a @ Action::End(_) = read_byte(in_iter, flags, |b| {
536 byte = b;
537 Action::None
538 }) {
539 return a;
540 };
541
542 // Do this outside closure for now to avoid borrowing r.
543 l.bit_buf |= BitBuffer::from(byte) << l.num_bits;
544 l.num_bits += 8;
545
546 if l.num_bits >= 15 {
547 break;
548 }
549 }
550 } else {
551 // There is enough data in the input buffer, so read the next two bytes
552 // and add them to the bit buffer.
553 // Unwrapping here is fine since we just checked that there are at least two
554 // bytes left.
555 l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits;
556 l.num_bits += 16;
557 }
558 }
559
560 // We now have at least 15 bits in the input buffer.
561 let mut symbol = i32::from(r.tables[table].fast_lookup(l.bit_buf));
562 let code_len;
563 // If the symbol was found in the fast lookup table.
564 if symbol >= 0 {
565 // Get the length value from the top bits.
566 // As we shift down the sign bit, converting to an unsigned value
567 // shouldn't overflow.
568 code_len = (symbol >> 9) as u32;
569 // Mask out the length value.
570 symbol &= 511;
571 } else {
572 let res = r.tables[table].tree_lookup(symbol, l.bit_buf, u32::from(FAST_LOOKUP_BITS));
573 symbol = res.0;
574 code_len = res.1 as u32;
575 };
576
577 if code_len == 0 {
578 return Action::Jump(InvalidCodeLen);
579 }
580
581 l.bit_buf >>= code_len as u32;
582 l.num_bits -= code_len;
583 f(r, l, symbol)
584 }
585
586 /// Try to read one byte from `in_iter` and call `f` with the read byte as an argument,
587 /// returning the result.
588 /// If reading fails, `Action::End is returned`
589 #[inline]
590 fn read_byte<F>(in_iter: &mut slice::Iter<u8>, flags: u32, f: F) -> Action
591 where
592 F: FnOnce(u8) -> Action,
593 {
594 match in_iter.next() {
595 None => end_of_input(flags),
596 Some(&byte) => f(byte),
597 }
598 }
599
600 // TODO: `l: &mut LocalVars` may be slow similar to decompress_fast (even with inline(always))
601 /// Try to read `amount` number of bits from `in_iter` and call the function `f` with the bits as an
602 /// an argument after reading, returning the result of that function, or `Action::End` if there are
603 /// not enough bytes left.
604 #[inline]
605 #[allow(clippy::while_immutable_condition)]
606 fn read_bits<F>(
607 l: &mut LocalVars,
608 amount: u32,
609 in_iter: &mut slice::Iter<u8>,
610 flags: u32,
611 f: F,
612 ) -> Action
613 where
614 F: FnOnce(&mut LocalVars, BitBuffer) -> Action,
615 {
616 // Clippy gives a false positive warning here due to the closure.
617 // Read enough bytes from the input iterator to cover the number of bits we want.
618 while l.num_bits < amount {
619 match read_byte(in_iter, flags, |byte| {
620 l.bit_buf |= BitBuffer::from(byte) << l.num_bits;
621 l.num_bits += 8;
622 Action::None
623 }) {
624 Action::None => (),
625 // If there are not enough bytes in the input iterator, return and signal that we need
626 // more.
627 action => return action,
628 }
629 }
630
631 let bits = l.bit_buf & ((1 << amount) - 1);
632 l.bit_buf >>= amount;
633 l.num_bits -= amount;
634 f(l, bits)
635 }
636
637 #[inline]
638 fn pad_to_bytes<F>(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>, flags: u32, f: F) -> Action
639 where
640 F: FnOnce(&mut LocalVars) -> Action,
641 {
642 let num_bits = l.num_bits & 7;
643 read_bits(l, num_bits, in_iter, flags, |l, _| f(l))
644 }
645
646 #[inline]
647 fn end_of_input(flags: u32) -> Action {
648 Action::End(if flags & TINFL_FLAG_HAS_MORE_INPUT != 0 {
649 TINFLStatus::NeedsMoreInput
650 } else {
651 TINFLStatus::FailedCannotMakeProgress
652 })
653 }
654
655 #[inline]
656 fn undo_bytes(l: &mut LocalVars, max: u32) -> u32 {
657 let res = cmp::min(l.num_bits >> 3, max);
658 l.num_bits -= res << 3;
659 res
660 }
661
662 fn start_static_table(r: &mut DecompressorOxide) {
663 r.table_sizes[LITLEN_TABLE] = 288;
664 r.table_sizes[DIST_TABLE] = 32;
665 memset(&mut r.tables[LITLEN_TABLE].code_size[0..144], 8);
666 memset(&mut r.tables[LITLEN_TABLE].code_size[144..256], 9);
667 memset(&mut r.tables[LITLEN_TABLE].code_size[256..280], 7);
668 memset(&mut r.tables[LITLEN_TABLE].code_size[280..288], 8);
669 memset(&mut r.tables[DIST_TABLE].code_size[0..32], 5);
670 }
671
672 fn init_tree(r: &mut DecompressorOxide, l: &mut LocalVars) -> Action {
673 loop {
674 let table = &mut r.tables[r.block_type as usize];
675 let table_size = r.table_sizes[r.block_type as usize] as usize;
676 let mut total_symbols = [0u32; 16];
677 let mut next_code = [0u32; 17];
678 memset(&mut table.look_up[..], 0);
679 memset(&mut table.tree[..], 0);
680
681 for &code_size in &table.code_size[..table_size] {
682 total_symbols[code_size as usize] += 1;
683 }
684
685 let mut used_symbols = 0;
686 let mut total = 0;
687 for i in 1..16 {
688 used_symbols += total_symbols[i];
689 total += total_symbols[i];
690 total <<= 1;
691 next_code[i + 1] = total;
692 }
693
694 if total != 65_536 && used_symbols > 1 {
695 return Action::Jump(BadTotalSymbols);
696 }
697
698 let mut tree_next = -1;
699 for symbol_index in 0..table_size {
700 let mut rev_code = 0;
701 let code_size = table.code_size[symbol_index];
702 if code_size == 0 {
703 continue;
704 }
705
706 let mut cur_code = next_code[code_size as usize];
707 next_code[code_size as usize] += 1;
708
709 for _ in 0..code_size {
710 rev_code = (rev_code << 1) | (cur_code & 1);
711 cur_code >>= 1;
712 }
713
714 if code_size <= FAST_LOOKUP_BITS {
715 let k = (i16::from(code_size) << 9) | symbol_index as i16;
716 while rev_code < FAST_LOOKUP_SIZE {
717 table.look_up[rev_code as usize] = k;
718 rev_code += 1 << code_size;
719 }
720 continue;
721 }
722
723 let mut tree_cur = table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize];
724 if tree_cur == 0 {
725 table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize] = tree_next as i16;
726 tree_cur = tree_next;
727 tree_next -= 2;
728 }
729
730 rev_code >>= FAST_LOOKUP_BITS - 1;
731 for _ in FAST_LOOKUP_BITS + 1..code_size {
732 rev_code >>= 1;
733 tree_cur -= (rev_code & 1) as i16;
734 if table.tree[(-tree_cur - 1) as usize] == 0 {
735 table.tree[(-tree_cur - 1) as usize] = tree_next as i16;
736 tree_cur = tree_next;
737 tree_next -= 2;
738 } else {
739 tree_cur = table.tree[(-tree_cur - 1) as usize];
740 }
741 }
742
743 rev_code >>= 1;
744 tree_cur -= (rev_code & 1) as i16;
745 table.tree[(-tree_cur - 1) as usize] = symbol_index as i16;
746 }
747
748 if r.block_type == 2 {
749 l.counter = 0;
750 return Action::Jump(ReadLitlenDistTablesCodeSize);
751 }
752
753 if r.block_type == 0 {
754 break;
755 }
756 r.block_type -= 1;
757 }
758
759 l.counter = 0;
760 Action::Jump(DecodeLitlen)
761 }
762
763 // A helper macro for generating the state machine.
764 //
765 // As Rust doesn't have fallthrough on matches, we have to return to the match statement
766 // and jump for each state change. (Which would ideally be optimized away, but often isn't.)
767 macro_rules! generate_state {
768 ($state: ident, $state_machine: tt, $f: expr) => {
769 loop {
770 match $f {
771 Action::None => continue,
772 Action::Jump(new_state) => {
773 $state = new_state;
774 continue $state_machine;
775 },
776 Action::End(result) => break $state_machine result,
777 }
778 }
779 };
780 }
781
782 #[derive(Copy, Clone)]
783 struct LocalVars {
784 pub bit_buf: BitBuffer,
785 pub num_bits: u32,
786 pub dist: u32,
787 pub counter: u32,
788 pub num_extra: u32,
789 }
790
791 #[inline]
792 fn transfer(
793 out_slice: &mut [u8],
794 mut source_pos: usize,
795 mut out_pos: usize,
796 match_len: usize,
797 out_buf_size_mask: usize,
798 ) {
799 for _ in 0..match_len >> 2 {
800 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
801 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
802 out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask];
803 out_slice[out_pos + 3] = out_slice[(source_pos + 3) & out_buf_size_mask];
804 source_pos += 4;
805 out_pos += 4;
806 }
807
808 match match_len & 3 {
809 0 => (),
810 1 => out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask],
811 2 => {
812 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
813 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
814 }
815 3 => {
816 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
817 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
818 out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask];
819 }
820 _ => unreachable!(),
821 }
822 }
823
824 /// Presumes that there is at least match_len bytes in output left.
825 #[inline]
826 fn apply_match(
827 out_slice: &mut [u8],
828 out_pos: usize,
829 dist: usize,
830 match_len: usize,
831 out_buf_size_mask: usize,
832 ) {
833 debug_assert!(out_pos + match_len <= out_slice.len());
834
835 let source_pos = out_pos.wrapping_sub(dist) & out_buf_size_mask;
836
837 if match_len == 3 {
838 // Fast path for match len 3.
839 out_slice[out_pos] = out_slice[source_pos];
840 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
841 out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask];
842 return;
843 }
844
845 if cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) {
846 // We are not on x86 so copy manually.
847 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
848 return;
849 }
850
851 if source_pos >= out_pos && (source_pos - out_pos) < match_len {
852 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
853 } else if match_len <= dist && source_pos + match_len < out_slice.len() {
854 // Destination and source segments does not intersect and source does not wrap.
855 if source_pos < out_pos {
856 let (from_slice, to_slice) = out_slice.split_at_mut(out_pos);
857 to_slice[..match_len].copy_from_slice(&from_slice[source_pos..source_pos + match_len]);
858 } else {
859 let (to_slice, from_slice) = out_slice.split_at_mut(source_pos);
860 to_slice[out_pos..out_pos + match_len].copy_from_slice(&from_slice[..match_len]);
861 }
862 } else {
863 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
864 }
865 }
866
867 /// Fast inner decompression loop which is run while there is at least
868 /// 259 bytes left in the output buffer, and at least 6 bytes left in the input buffer
869 /// (The maximum one match would need + 1).
870 ///
871 /// This was inspired by a similar optimization in zlib, which uses this info to do
872 /// faster unchecked copies of multiple bytes at a time.
873 /// Currently we don't do this here, but this function does avoid having to jump through the
874 /// big match loop on each state change(as rust does not have fallthrough or gotos at the moment),
875 /// and already improves decompression speed a fair bit.
876 fn decompress_fast(
877 r: &mut DecompressorOxide,
878 in_iter: &mut slice::Iter<u8>,
879 out_buf: &mut OutputBuffer,
880 flags: u32,
881 local_vars: &mut LocalVars,
882 out_buf_size_mask: usize,
883 ) -> (TINFLStatus, State) {
884 // Make a local copy of the most used variables, to avoid having to update and read from values
885 // in a random memory location and to encourage more register use.
886 let mut l = *local_vars;
887 let mut state;
888
889 let status: TINFLStatus = 'o: loop {
890 state = State::DecodeLitlen;
891 loop {
892 // This function assumes that there is at least 259 bytes left in the output buffer,
893 // and that there is at least 14 bytes left in the input buffer. 14 input bytes:
894 // 15 (prev lit) + 15 (length) + 5 (length extra) + 15 (dist)
895 // + 29 + 32 (left in bit buf, including last 13 dist extra) = 111 bits < 14 bytes
896 // We need the one extra byte as we may write one length and one full match
897 // before checking again.
898 if out_buf.bytes_left() < 259 || in_iter.len() < 14 {
899 state = State::DecodeLitlen;
900 break 'o TINFLStatus::Done;
901 }
902
903 fill_bit_buffer(&mut l, in_iter);
904
905 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
906 l.counter = symbol as u32;
907 l.bit_buf >>= code_len;
908 l.num_bits -= code_len;
909
910 if (l.counter & 256) != 0 {
911 // The symbol is not a literal.
912 break;
913 } else {
914 // If we have a 32-bit buffer we need to read another two bytes now
915 // to have enough bits to keep going.
916 if cfg!(not(target_pointer_width = "64")) {
917 fill_bit_buffer(&mut l, in_iter);
918 }
919
920 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
921 l.bit_buf >>= code_len;
922 l.num_bits -= code_len;
923 // The previous symbol was a literal, so write it directly and check
924 // the next one.
925 out_buf.write_byte(l.counter as u8);
926 if (symbol & 256) != 0 {
927 l.counter = symbol as u32;
928 // The symbol is a length value.
929 break;
930 } else {
931 // The symbol is a literal, so write it directly and continue.
932 out_buf.write_byte(symbol as u8);
933 }
934 } else {
935 state.begin(InvalidCodeLen);
936 break 'o TINFLStatus::Failed;
937 }
938 }
939 } else {
940 state.begin(InvalidCodeLen);
941 break 'o TINFLStatus::Failed;
942 }
943 }
944
945 // Mask the top bits since they may contain length info.
946 l.counter &= 511;
947 if l.counter == 256 {
948 // We hit the end of block symbol.
949 state.begin(BlockDone);
950 break 'o TINFLStatus::Done;
951 } else if l.counter > 285 {
952 // Invalid code.
953 // We already verified earlier that the code is > 256.
954 state.begin(InvalidLitlen);
955 break 'o TINFLStatus::Failed;
956 } else {
957 // The symbol was a length code.
958 // # Optimization
959 // Mask the value to avoid bounds checks
960 // We could use get_unchecked later if can statically verify that
961 // this will never go out of bounds.
962 l.num_extra = u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
963 l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
964 // Length and distance codes have a number of extra bits depending on
965 // the base, which together with the base gives us the exact value.
966
967 fill_bit_buffer(&mut l, in_iter);
968 if l.num_extra != 0 {
969 let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1);
970 l.bit_buf >>= l.num_extra;
971 l.num_bits -= l.num_extra;
972 l.counter += extra_bits as u32;
973 }
974
975 // We found a length code, so a distance code should follow.
976
977 if cfg!(not(target_pointer_width = "64")) {
978 fill_bit_buffer(&mut l, in_iter);
979 }
980
981 if let Some((mut symbol, code_len)) = r.tables[DIST_TABLE].lookup(l.bit_buf) {
982 symbol &= 511;
983 l.bit_buf >>= code_len;
984 l.num_bits -= code_len;
985 if symbol > 29 {
986 state.begin(InvalidDist);
987 break 'o TINFLStatus::Failed;
988 }
989
990 l.num_extra = u32::from(DIST_EXTRA[symbol as usize]);
991 l.dist = u32::from(DIST_BASE[symbol as usize]);
992 } else {
993 state.begin(InvalidCodeLen);
994 break 'o TINFLStatus::Failed;
995 }
996
997 if l.num_extra != 0 {
998 fill_bit_buffer(&mut l, in_iter);
999 let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1);
1000 l.bit_buf >>= l.num_extra;
1001 l.num_bits -= l.num_extra;
1002 l.dist += extra_bits as u32;
1003 }
1004
1005 let position = out_buf.position();
1006 if l.dist as usize > out_buf.position()
1007 && (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0)
1008 {
1009 // We encountered a distance that refers a position before
1010 // the start of the decoded data, so we can't continue.
1011 state.begin(DistanceOutOfBounds);
1012 break TINFLStatus::Failed;
1013 }
1014
1015 apply_match(
1016 out_buf.get_mut(),
1017 position,
1018 l.dist as usize,
1019 l.counter as usize,
1020 out_buf_size_mask,
1021 );
1022
1023 out_buf.set_position(position + l.counter as usize);
1024 }
1025 };
1026
1027 *local_vars = l;
1028 (status, state)
1029 }
1030
1031 /// Main decompression function. Keeps decompressing data from `in_buf` until the `in_buf` is
1032 /// empty, `out` is full, the end of the deflate stream is hit, or there is an error in the
1033 /// deflate stream.
1034 ///
1035 /// # Arguments
1036 ///
1037 /// `r` is a [`DecompressorOxide`] struct with the state of this stream.
1038 ///
1039 /// `in_buf` is a reference to the compressed data that is to be decompressed. The decompressor will
1040 /// start at the first byte of this buffer.
1041 ///
1042 /// `out` is a reference to the buffer that will store the decompressed data, and that
1043 /// stores previously decompressed data if any.
1044 ///
1045 /// * The offset given by `out_pos` indicates where in the output buffer slice writing should start.
1046 /// * If [`TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF`] is not set, the output buffer is used in a
1047 /// wrapping manner, and it's size is required to be a power of 2.
1048 /// * The decompression function normally needs access to 32KiB of the previously decompressed data
1049 ///(or to the beginning of the decompressed data if less than 32KiB has been decompressed.)
1050 /// - If this data is not available, decompression may fail.
1051 /// - Some deflate compressors allow specifying a window size which limits match distances to
1052 /// less than this, or alternatively an RLE mode where matches will only refer to the previous byte
1053 /// and thus allows a smaller output buffer. The window size can be specified in the zlib
1054 /// header structure, however, the header data should not be relied on to be correct.
1055 ///
1056 /// `flags` indicates settings and status to the decompression function.
1057 /// * The [`TINFL_FLAG_HAS_MORE_INPUT`] has to be specified if more compressed data is to be provided
1058 /// in a subsequent call to this function.
1059 /// * See the the [`inflate_flags`] module for details on other flags.
1060 ///
1061 /// # Returns
1062 ///
1063 /// Returns a tuple containing the status of the compressor, the number of input bytes read, and the
1064 /// number of bytes output to `out`.
1065 ///
1066 /// This function shouldn't panic pending any bugs.
1067 pub fn decompress(
1068 r: &mut DecompressorOxide,
1069 in_buf: &[u8],
1070 out: &mut [u8],
1071 out_pos: usize,
1072 flags: u32,
1073 ) -> (TINFLStatus, usize, usize) {
1074 let out_buf_size_mask = if flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0 {
1075 usize::max_value()
1076 } else {
1077 // In the case of zero len, any attempt to write would produce HasMoreOutput,
1078 // so to gracefully process the case of there really being no output,
1079 // set the mask to all zeros.
1080 out.len().saturating_sub(1)
1081 };
1082
1083 // Ensure the output buffer's size is a power of 2, unless the output buffer
1084 // is large enough to hold the entire output file (in which case it doesn't
1085 // matter).
1086 // Also make sure that the output buffer position is not past the end of the output buffer.
1087 if (out_buf_size_mask.wrapping_add(1) & out_buf_size_mask) != 0 || out_pos > out.len() {
1088 return (TINFLStatus::BadParam, 0, 0);
1089 }
1090
1091 let mut in_iter = in_buf.iter();
1092
1093 let mut state = r.state;
1094
1095 let mut out_buf = OutputBuffer::from_slice_and_pos(out, out_pos);
1096
1097 // Make a local copy of the important variables here so we can work with them on the stack.
1098 let mut l = LocalVars {
1099 bit_buf: r.bit_buf,
1100 num_bits: r.num_bits,
1101 dist: r.dist,
1102 counter: r.counter,
1103 num_extra: r.num_extra,
1104 };
1105
1106 let mut status = 'state_machine: loop {
1107 match state {
1108 Start => generate_state!(state, 'state_machine, {
1109 l.bit_buf = 0;
1110 l.num_bits = 0;
1111 l.dist = 0;
1112 l.counter = 0;
1113 l.num_extra = 0;
1114 r.z_header0 = 0;
1115 r.z_header1 = 0;
1116 r.z_adler32 = 1;
1117 r.check_adler32 = 1;
1118 if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 {
1119 Action::Jump(State::ReadZlibCmf)
1120 } else {
1121 Action::Jump(State::ReadBlockHeader)
1122 }
1123 }),
1124
1125 ReadZlibCmf => generate_state!(state, 'state_machine, {
1126 read_byte(&mut in_iter, flags, |cmf| {
1127 r.z_header0 = u32::from(cmf);
1128 Action::Jump(State::ReadZlibFlg)
1129 })
1130 }),
1131
1132 ReadZlibFlg => generate_state!(state, 'state_machine, {
1133 read_byte(&mut in_iter, flags, |flg| {
1134 r.z_header1 = u32::from(flg);
1135 validate_zlib_header(r.z_header0, r.z_header1, flags, out_buf_size_mask)
1136 })
1137 }),
1138
1139 // Read the block header and jump to the relevant section depending on the block type.
1140 ReadBlockHeader => generate_state!(state, 'state_machine, {
1141 read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| {
1142 r.finish = (bits & 1) as u32;
1143 r.block_type = (bits >> 1) as u32 & 3;
1144 match r.block_type {
1145 0 => Action::Jump(BlockTypeNoCompression),
1146 1 => {
1147 start_static_table(r);
1148 init_tree(r, l)
1149 },
1150 2 => {
1151 l.counter = 0;
1152 Action::Jump(ReadTableSizes)
1153 },
1154 3 => Action::Jump(BlockTypeUnexpected),
1155 _ => unreachable!()
1156 }
1157 })
1158 }),
1159
1160 // Raw/Stored/uncompressed block.
1161 BlockTypeNoCompression => generate_state!(state, 'state_machine, {
1162 pad_to_bytes(&mut l, &mut in_iter, flags, |l| {
1163 l.counter = 0;
1164 Action::Jump(RawHeader)
1165 })
1166 }),
1167
1168 // Check that the raw block header is correct.
1169 RawHeader => generate_state!(state, 'state_machine, {
1170 if l.counter < 4 {
1171 // Read block length and block length check.
1172 if l.num_bits != 0 {
1173 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1174 r.raw_header[l.counter as usize] = bits as u8;
1175 l.counter += 1;
1176 Action::None
1177 })
1178 } else {
1179 read_byte(&mut in_iter, flags, |byte| {
1180 r.raw_header[l.counter as usize] = byte;
1181 l.counter += 1;
1182 Action::None
1183 })
1184 }
1185 } else {
1186 // Check if the length value of a raw block is correct.
1187 // The 2 first (2-byte) words in a raw header are the length and the
1188 // ones complement of the length.
1189 let length = u16::from(r.raw_header[0]) | (u16::from(r.raw_header[1]) << 8);
1190 let check = u16::from(r.raw_header[2]) | (u16::from(r.raw_header[3]) << 8);
1191 let valid = length == !check;
1192 l.counter = length.into();
1193
1194 if !valid {
1195 Action::Jump(BadRawLength)
1196 } else if l.counter == 0 {
1197 // Empty raw block. Sometimes used for synchronization.
1198 Action::Jump(BlockDone)
1199 } else if l.num_bits != 0 {
1200 // There is some data in the bit buffer, so we need to write that first.
1201 Action::Jump(RawReadFirstByte)
1202 } else {
1203 // The bit buffer is empty, so memcpy the rest of the uncompressed data from
1204 // the block.
1205 Action::Jump(RawMemcpy1)
1206 }
1207 }
1208 }),
1209
1210 // Read the byte from the bit buffer.
1211 RawReadFirstByte => generate_state!(state, 'state_machine, {
1212 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1213 l.dist = bits as u32;
1214 Action::Jump(RawStoreFirstByte)
1215 })
1216 }),
1217
1218 // Write the byte we just read to the output buffer.
1219 RawStoreFirstByte => generate_state!(state, 'state_machine, {
1220 if out_buf.bytes_left() == 0 {
1221 Action::End(TINFLStatus::HasMoreOutput)
1222 } else {
1223 out_buf.write_byte(l.dist as u8);
1224 l.counter -= 1;
1225 if l.counter == 0 || l.num_bits == 0 {
1226 Action::Jump(RawMemcpy1)
1227 } else {
1228 // There is still some data left in the bit buffer that needs to be output.
1229 // TODO: Changed this to jump to `RawReadfirstbyte` rather than
1230 // `RawStoreFirstByte` as that seemed to be the correct path, but this
1231 // needs testing.
1232 Action::Jump(RawReadFirstByte)
1233 }
1234 }
1235 }),
1236
1237 RawMemcpy1 => generate_state!(state, 'state_machine, {
1238 if l.counter == 0 {
1239 Action::Jump(BlockDone)
1240 } else if out_buf.bytes_left() == 0 {
1241 Action::End(TINFLStatus::HasMoreOutput)
1242 } else {
1243 Action::Jump(RawMemcpy2)
1244 }
1245 }),
1246
1247 RawMemcpy2 => generate_state!(state, 'state_machine, {
1248 if in_iter.len() > 0 {
1249 // Copy as many raw bytes as possible from the input to the output using memcpy.
1250 // Raw block lengths are limited to 64 * 1024, so casting through usize and u32
1251 // is not an issue.
1252 let space_left = out_buf.bytes_left();
1253 let bytes_to_copy = cmp::min(cmp::min(
1254 space_left,
1255 in_iter.len()),
1256 l.counter as usize
1257 );
1258
1259 out_buf.write_slice(&in_iter.as_slice()[..bytes_to_copy]);
1260
1261 (&mut in_iter).nth(bytes_to_copy - 1);
1262 l.counter -= bytes_to_copy as u32;
1263 Action::Jump(RawMemcpy1)
1264 } else {
1265 end_of_input(flags)
1266 }
1267 }),
1268
1269 // Read how many huffman codes/symbols are used for each table.
1270 ReadTableSizes => generate_state!(state, 'state_machine, {
1271 if l.counter < 3 {
1272 let num_bits = [5, 5, 4][l.counter as usize];
1273 read_bits(&mut l, num_bits, &mut in_iter, flags, |l, bits| {
1274 r.table_sizes[l.counter as usize] =
1275 bits as u32 + u32::from(MIN_TABLE_SIZES[l.counter as usize]);
1276 l.counter += 1;
1277 Action::None
1278 })
1279 } else {
1280 memset(&mut r.tables[HUFFLEN_TABLE].code_size[..], 0);
1281 l.counter = 0;
1282 Action::Jump(ReadHufflenTableCodeSize)
1283 }
1284 }),
1285
1286 // Read the 3-bit lengths of the huffman codes describing the huffman code lengths used
1287 // to decode the lengths of the main tables.
1288 ReadHufflenTableCodeSize => generate_state!(state, 'state_machine, {
1289 if l.counter < r.table_sizes[HUFFLEN_TABLE] {
1290 read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| {
1291 // These lengths are not stored in a normal ascending order, but rather one
1292 // specified by the deflate specification intended to put the most used
1293 // values at the front as trailing zero lengths do not have to be stored.
1294 r.tables[HUFFLEN_TABLE]
1295 .code_size[HUFFMAN_LENGTH_ORDER[l.counter as usize] as usize] =
1296 bits as u8;
1297 l.counter += 1;
1298 Action::None
1299 })
1300 } else {
1301 r.table_sizes[HUFFLEN_TABLE] = 19;
1302 init_tree(r, &mut l)
1303 }
1304 }),
1305
1306 ReadLitlenDistTablesCodeSize => generate_state!(state, 'state_machine, {
1307 if l.counter < r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] {
1308 decode_huffman_code(
1309 r, &mut l, HUFFLEN_TABLE,
1310 flags, &mut in_iter, |r, l, symbol| {
1311 l.dist = symbol as u32;
1312 if l.dist < 16 {
1313 r.len_codes[l.counter as usize] = l.dist as u8;
1314 l.counter += 1;
1315 Action::None
1316 } else if l.dist == 16 && l.counter == 0 {
1317 Action::Jump(BadCodeSizeDistPrevLookup)
1318 } else {
1319 l.num_extra = [2, 3, 7][l.dist as usize - 16];
1320 Action::Jump(ReadExtraBitsCodeSize)
1321 }
1322 }
1323 )
1324 } else if l.counter != r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] {
1325 Action::Jump(BadCodeSizeSum)
1326 } else {
1327 r.tables[LITLEN_TABLE].code_size[..r.table_sizes[LITLEN_TABLE] as usize]
1328 .copy_from_slice(&r.len_codes[..r.table_sizes[LITLEN_TABLE] as usize]);
1329
1330 let dist_table_start = r.table_sizes[LITLEN_TABLE] as usize;
1331 let dist_table_end = (r.table_sizes[LITLEN_TABLE] +
1332 r.table_sizes[DIST_TABLE]) as usize;
1333 r.tables[DIST_TABLE].code_size[..r.table_sizes[DIST_TABLE] as usize]
1334 .copy_from_slice(&r.len_codes[dist_table_start..dist_table_end]);
1335
1336 r.block_type -= 1;
1337 init_tree(r, &mut l)
1338 }
1339 }),
1340
1341 ReadExtraBitsCodeSize => generate_state!(state, 'state_machine, {
1342 let num_extra = l.num_extra;
1343 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, mut extra_bits| {
1344 // Mask to avoid a bounds check.
1345 extra_bits += [3, 3, 11][(l.dist as usize - 16) & 3];
1346 let val = if l.dist == 16 {
1347 r.len_codes[l.counter as usize - 1]
1348 } else {
1349 0
1350 };
1351
1352 memset(
1353 &mut r.len_codes[
1354 l.counter as usize..l.counter as usize + extra_bits as usize
1355 ],
1356 val,
1357 );
1358 l.counter += extra_bits as u32;
1359 Action::Jump(ReadLitlenDistTablesCodeSize)
1360 })
1361 }),
1362
1363 DecodeLitlen => generate_state!(state, 'state_machine, {
1364 if in_iter.len() < 4 || out_buf.bytes_left() < 2 {
1365 // See if we can decode a literal with the data we have left.
1366 // Jumps to next state (WriteSymbol) if successful.
1367 decode_huffman_code(
1368 r,
1369 &mut l,
1370 LITLEN_TABLE,
1371 flags,
1372 &mut in_iter,
1373 |_r, l, symbol| {
1374 l.counter = symbol as u32;
1375 Action::Jump(WriteSymbol)
1376 },
1377 )
1378 } else if
1379 // If there is enough space, use the fast inner decompression
1380 // function.
1381 out_buf.bytes_left() >= 259 &&
1382 in_iter.len() >= 14
1383 {
1384 let (status, new_state) = decompress_fast(
1385 r,
1386 &mut in_iter,
1387 &mut out_buf,
1388 flags,
1389 &mut l,
1390 out_buf_size_mask,
1391 );
1392
1393 state = new_state;
1394 if status == TINFLStatus::Done {
1395 Action::Jump(new_state)
1396 } else {
1397 Action::End(status)
1398 }
1399 } else {
1400 fill_bit_buffer(&mut l, &mut in_iter);
1401
1402 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
1403
1404 l.counter = symbol as u32;
1405 l.bit_buf >>= code_len;
1406 l.num_bits -= code_len;
1407
1408 if (l.counter & 256) != 0 {
1409 // The symbol is not a literal.
1410 Action::Jump(HuffDecodeOuterLoop1)
1411 } else {
1412 // If we have a 32-bit buffer we need to read another two bytes now
1413 // to have enough bits to keep going.
1414 if cfg!(not(target_pointer_width = "64")) {
1415 fill_bit_buffer(&mut l, &mut in_iter);
1416 }
1417
1418 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
1419
1420 l.bit_buf >>= code_len;
1421 l.num_bits -= code_len;
1422 // The previous symbol was a literal, so write it directly and check
1423 // the next one.
1424 out_buf.write_byte(l.counter as u8);
1425 if (symbol & 256) != 0 {
1426 l.counter = symbol as u32;
1427 // The symbol is a length value.
1428 Action::Jump(HuffDecodeOuterLoop1)
1429 } else {
1430 // The symbol is a literal, so write it directly and continue.
1431 out_buf.write_byte(symbol as u8);
1432 Action::None
1433 }
1434 } else {
1435 Action::Jump(InvalidCodeLen)
1436 }
1437 }
1438 } else {
1439 Action::Jump(InvalidCodeLen)
1440 }
1441 }
1442 }),
1443
1444 WriteSymbol => generate_state!(state, 'state_machine, {
1445 if l.counter >= 256 {
1446 Action::Jump(HuffDecodeOuterLoop1)
1447 } else if out_buf.bytes_left() > 0 {
1448 out_buf.write_byte(l.counter as u8);
1449 Action::Jump(DecodeLitlen)
1450 } else {
1451 Action::End(TINFLStatus::HasMoreOutput)
1452 }
1453 }),
1454
1455 HuffDecodeOuterLoop1 => generate_state!(state, 'state_machine, {
1456 // Mask the top bits since they may contain length info.
1457 l.counter &= 511;
1458
1459 if l.counter == 256 {
1460 // We hit the end of block symbol.
1461 Action::Jump(BlockDone)
1462 } else if l.counter > 285 {
1463 // Invalid code.
1464 // We already verified earlier that the code is > 256.
1465 Action::Jump(InvalidLitlen)
1466 } else {
1467 // # Optimization
1468 // Mask the value to avoid bounds checks
1469 // We could use get_unchecked later if can statically verify that
1470 // this will never go out of bounds.
1471 l.num_extra =
1472 u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1473 l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1474 // Length and distance codes have a number of extra bits depending on
1475 // the base, which together with the base gives us the exact value.
1476 if l.num_extra != 0 {
1477 Action::Jump(ReadExtraBitsLitlen)
1478 } else {
1479 Action::Jump(DecodeDistance)
1480 }
1481 }
1482 }),
1483
1484 ReadExtraBitsLitlen => generate_state!(state, 'state_machine, {
1485 let num_extra = l.num_extra;
1486 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| {
1487 l.counter += extra_bits as u32;
1488 Action::Jump(DecodeDistance)
1489 })
1490 }),
1491
1492 DecodeDistance => generate_state!(state, 'state_machine, {
1493 // Try to read a huffman code from the input buffer and look up what
1494 // length code the decoded symbol refers to.
1495 decode_huffman_code(r, &mut l, DIST_TABLE, flags, &mut in_iter, |_r, l, symbol| {
1496 if symbol > 29 {
1497 // Invalid distance code.
1498 return Action::Jump(InvalidDist)
1499 }
1500 // # Optimization
1501 // Mask the value to avoid bounds checks
1502 // We could use get_unchecked later if can statically verify that
1503 // this will never go out of bounds.
1504 l.num_extra = u32::from(DIST_EXTRA[symbol as usize & BASE_EXTRA_MASK]);
1505 l.dist = u32::from(DIST_BASE[symbol as usize & BASE_EXTRA_MASK]);
1506 if l.num_extra != 0 {
1507 // ReadEXTRA_BITS_DISTACNE
1508 Action::Jump(ReadExtraBitsDistance)
1509 } else {
1510 Action::Jump(HuffDecodeOuterLoop2)
1511 }
1512 })
1513 }),
1514
1515 ReadExtraBitsDistance => generate_state!(state, 'state_machine, {
1516 let num_extra = l.num_extra;
1517 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| {
1518 l.dist += extra_bits as u32;
1519 Action::Jump(HuffDecodeOuterLoop2)
1520 })
1521 }),
1522
1523 HuffDecodeOuterLoop2 => generate_state!(state, 'state_machine, {
1524 if l.dist as usize > out_buf.position() &&
1525 (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0)
1526 {
1527 // We encountered a distance that refers a position before
1528 // the start of the decoded data, so we can't continue.
1529 Action::Jump(DistanceOutOfBounds)
1530 } else {
1531 let out_pos = out_buf.position();
1532 let source_pos = out_buf.position()
1533 .wrapping_sub(l.dist as usize) & out_buf_size_mask;
1534
1535 let out_len = out_buf.get_ref().len() as usize;
1536 let match_end_pos = out_buf.position() + l.counter as usize;
1537
1538 if match_end_pos > out_len ||
1539 // miniz doesn't do this check here. Not sure how it makes sure
1540 // that this case doesn't happen.
1541 (source_pos >= out_pos && (source_pos - out_pos) < l.counter as usize)
1542 {
1543 // Not enough space for all of the data in the output buffer,
1544 // so copy what we have space for.
1545 if l.counter == 0 {
1546 Action::Jump(DecodeLitlen)
1547 } else {
1548 Action::Jump(WriteLenBytesToEnd)
1549 }
1550 } else {
1551 apply_match(
1552 out_buf.get_mut(),
1553 out_pos,
1554 l.dist as usize,
1555 l.counter as usize,
1556 out_buf_size_mask
1557 );
1558 out_buf.set_position(out_pos + l.counter as usize);
1559 Action::Jump(DecodeLitlen)
1560 }
1561 }
1562 }),
1563
1564 WriteLenBytesToEnd => generate_state!(state, 'state_machine, {
1565 if out_buf.bytes_left() > 0 {
1566 let out_pos = out_buf.position();
1567 let source_pos = out_buf.position()
1568 .wrapping_sub(l.dist as usize) & out_buf_size_mask;
1569
1570
1571 let len = cmp::min(out_buf.bytes_left(), l.counter as usize);
1572
1573 transfer(out_buf.get_mut(), source_pos, out_pos, len, out_buf_size_mask);
1574
1575 out_buf.set_position(out_pos + len);
1576 l.counter -= len as u32;
1577 if l.counter == 0 {
1578 Action::Jump(DecodeLitlen)
1579 } else {
1580 Action::None
1581 }
1582 } else {
1583 Action::End(TINFLStatus::HasMoreOutput)
1584 }
1585 }),
1586
1587 BlockDone => generate_state!(state, 'state_machine, {
1588 // End once we've read the last block.
1589 if r.finish != 0 {
1590 pad_to_bytes(&mut l, &mut in_iter, flags, |_| Action::None);
1591
1592 let in_consumed = in_buf.len() - in_iter.len();
1593 let undo = undo_bytes(&mut l, in_consumed as u32) as usize;
1594 in_iter = in_buf[in_consumed - undo..].iter();
1595
1596 l.bit_buf &= ((1 as BitBuffer) << l.num_bits) - 1;
1597 debug_assert_eq!(l.num_bits, 0);
1598
1599 if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 {
1600 l.counter = 0;
1601 Action::Jump(ReadAdler32)
1602 } else {
1603 Action::Jump(DoneForever)
1604 }
1605 } else {
1606 Action::Jump(ReadBlockHeader)
1607 }
1608 }),
1609
1610 ReadAdler32 => generate_state!(state, 'state_machine, {
1611 if l.counter < 4 {
1612 if l.num_bits != 0 {
1613 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1614 r.z_adler32 <<= 8;
1615 r.z_adler32 |= bits as u32;
1616 l.counter += 1;
1617 Action::None
1618 })
1619 } else {
1620 read_byte(&mut in_iter, flags, |byte| {
1621 r.z_adler32 <<= 8;
1622 r.z_adler32 |= u32::from(byte);
1623 l.counter += 1;
1624 Action::None
1625 })
1626 }
1627 } else {
1628 Action::Jump(DoneForever)
1629 }
1630 }),
1631
1632 // We are done.
1633 DoneForever => break TINFLStatus::Done,
1634
1635 // Anything else indicates failure.
1636 // BadZlibHeader | BadRawLength | BlockTypeUnexpected | DistanceOutOfBounds |
1637 // BadTotalSymbols | BadCodeSizeDistPrevLookup | BadCodeSizeSum | InvalidLitlen |
1638 // InvalidDist | InvalidCodeLen
1639 _ => break TINFLStatus::Failed,
1640 };
1641 };
1642
1643 let in_undo = if status != TINFLStatus::NeedsMoreInput
1644 && status != TINFLStatus::FailedCannotMakeProgress
1645 {
1646 undo_bytes(&mut l, (in_buf.len() - in_iter.len()) as u32) as usize
1647 } else {
1648 0
1649 };
1650
1651 // Make sure HasMoreOutput overrides NeedsMoreInput if the output buffer is full.
1652 // (Unless the missing input is the adler32 value in which case we don't need to write anything.)
1653 // TODO: May want to see if we can do this in a better way.
1654 if status == TINFLStatus::NeedsMoreInput
1655 && out_buf.bytes_left() == 0
1656 && state != State::ReadAdler32
1657 {
1658 status = TINFLStatus::HasMoreOutput
1659 }
1660
1661 r.state = state;
1662 r.bit_buf = l.bit_buf;
1663 r.num_bits = l.num_bits;
1664 r.dist = l.dist;
1665 r.counter = l.counter;
1666 r.num_extra = l.num_extra;
1667
1668 r.bit_buf &= ((1 as BitBuffer) << r.num_bits) - 1;
1669
1670 // If this is a zlib stream, and update the adler32 checksum with the decompressed bytes if
1671 // requested.
1672 let need_adler = if (flags & TINFL_FLAG_IGNORE_ADLER32) == 0 {
1673 flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32) != 0
1674 } else {
1675 // If TINFL_FLAG_IGNORE_ADLER32 is enabled, ignore the checksum.
1676 false
1677 };
1678 if need_adler && status as i32 >= 0 {
1679 let out_buf_pos = out_buf.position();
1680 r.check_adler32 = update_adler32(r.check_adler32, &out_buf.get_ref()[out_pos..out_buf_pos]);
1681
1682 // disabled so that random input from fuzzer would not be rejected early,
1683 // before it has a chance to reach interesting parts of code
1684 if !cfg!(fuzzing) {
1685 // Once we are done, check if the checksum matches with the one provided in the zlib header.
1686 if status == TINFLStatus::Done
1687 && flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0
1688 && r.check_adler32 != r.z_adler32
1689 {
1690 status = TINFLStatus::Adler32Mismatch;
1691 }
1692 }
1693 }
1694
1695 (
1696 status,
1697 in_buf.len() - in_iter.len() - in_undo,
1698 out_buf.position() - out_pos,
1699 )
1700 }
1701
1702 #[cfg(test)]
1703 mod test {
1704 use super::*;
1705
1706 //TODO: Fix these.
1707
1708 fn tinfl_decompress_oxide<'i>(
1709 r: &mut DecompressorOxide,
1710 input_buffer: &'i [u8],
1711 output_buffer: &mut [u8],
1712 flags: u32,
1713 ) -> (TINFLStatus, &'i [u8], usize) {
1714 let (status, in_pos, out_pos) = decompress(r, input_buffer, output_buffer, 0, flags);
1715 (status, &input_buffer[in_pos..], out_pos)
1716 }
1717
1718 #[test]
1719 fn decompress_zlib() {
1720 let encoded = [
1721 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19,
1722 ];
1723 let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER;
1724
1725 let mut b = DecompressorOxide::new();
1726 const LEN: usize = 32;
1727 let mut b_buf = vec![0; LEN];
1728
1729 // This should fail with the out buffer being to small.
1730 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], b_buf.as_mut_slice(), flags);
1731
1732 assert_eq!(b_status.0, TINFLStatus::Failed);
1733
1734 let flags = flags | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
1735
1736 b = DecompressorOxide::new();
1737
1738 // With TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF set this should no longer fail.
1739 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], b_buf.as_mut_slice(), flags);
1740
1741 assert_eq!(b_buf[..b_status.2], b"Hello, zlib!"[..]);
1742 assert_eq!(b_status.0, TINFLStatus::Done);
1743 }
1744
1745 #[test]
1746 fn raw_block() {
1747 const LEN: usize = 64;
1748
1749 let text = b"Hello, zlib!";
1750 let encoded = {
1751 let len = text.len();
1752 let notlen = !len;
1753 let mut encoded = vec![
1754 1,
1755 len as u8,
1756 (len >> 8) as u8,
1757 notlen as u8,
1758 (notlen >> 8) as u8,
1759 ];
1760 encoded.extend_from_slice(&text[..]);
1761 encoded
1762 };
1763
1764 //let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER |
1765 let flags = TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
1766
1767 let mut b = DecompressorOxide::new();
1768
1769 let mut b_buf = vec![0; LEN];
1770
1771 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], b_buf.as_mut_slice(), flags);
1772 assert_eq!(b_buf[..b_status.2], text[..]);
1773 assert_eq!(b_status.0, TINFLStatus::Done);
1774 }
1775
1776 fn masked_lookup(table: &HuffmanTable, bit_buf: BitBuffer) -> (i32, u32) {
1777 let ret = table.lookup(bit_buf).unwrap();
1778 (ret.0 & 511, ret.1)
1779 }
1780
1781 #[test]
1782 fn fixed_table_lookup() {
1783 let mut d = DecompressorOxide::new();
1784 d.block_type = 1;
1785 start_static_table(&mut d);
1786 let mut l = LocalVars {
1787 bit_buf: d.bit_buf,
1788 num_bits: d.num_bits,
1789 dist: d.dist,
1790 counter: d.counter,
1791 num_extra: d.num_extra,
1792 };
1793 init_tree(&mut d, &mut l);
1794 let llt = &d.tables[LITLEN_TABLE];
1795 let dt = &d.tables[DIST_TABLE];
1796 assert_eq!(masked_lookup(llt, 0b00001100), (0, 8));
1797 assert_eq!(masked_lookup(llt, 0b00011110), (72, 8));
1798 assert_eq!(masked_lookup(llt, 0b01011110), (74, 8));
1799 assert_eq!(masked_lookup(llt, 0b11111101), (143, 8));
1800 assert_eq!(masked_lookup(llt, 0b000010011), (144, 9));
1801 assert_eq!(masked_lookup(llt, 0b111111111), (255, 9));
1802 assert_eq!(masked_lookup(llt, 0b00000000), (256, 7));
1803 assert_eq!(masked_lookup(llt, 0b1110100), (279, 7));
1804 assert_eq!(masked_lookup(llt, 0b00000011), (280, 8));
1805 assert_eq!(masked_lookup(llt, 0b11100011), (287, 8));
1806
1807 assert_eq!(masked_lookup(dt, 0), (0, 5));
1808 assert_eq!(masked_lookup(dt, 20), (5, 5));
1809 }
1810
1811 fn check_result(input: &[u8], expected_status: TINFLStatus, expected_state: State, zlib: bool) {
1812 let mut r = DecompressorOxide::default();
1813 let mut output_buf = vec![0; 1024 * 32];
1814 let flags = if zlib {
1815 inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
1816 } else {
1817 0
1818 } | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF
1819 | TINFL_FLAG_HAS_MORE_INPUT;
1820 let (d_status, _in_bytes, _out_bytes) =
1821 decompress(&mut r, input, &mut output_buf, 0, flags);
1822 assert_eq!(expected_status, d_status);
1823 assert_eq!(expected_state, r.state);
1824 }
1825
1826 #[test]
1827 fn bogus_input() {
1828 use self::check_result as cr;
1829 const F: TINFLStatus = TINFLStatus::Failed;
1830 const OK: TINFLStatus = TINFLStatus::Done;
1831 // Bad CM.
1832 cr(&[0x77, 0x85], F, State::BadZlibHeader, true);
1833 // Bad window size (but check is correct).
1834 cr(&[0x88, 0x98], F, State::BadZlibHeader, true);
1835 // Bad check bits.
1836 cr(&[0x78, 0x98], F, State::BadZlibHeader, true);
1837
1838 // Too many code lengths. (From inflate library issues)
1839 cr(
1840 b"M\xff\xffM*\xad\xad\xad\xad\xad\xad\xad\xcd\xcd\xcdM",
1841 F,
1842 State::BadTotalSymbols,
1843 false,
1844 );
1845 // Bad CLEN (also from inflate library issues)
1846 cr(
1847 b"\xdd\xff\xff*M\x94ffffffffff",
1848 F,
1849 State::BadTotalSymbols,
1850 false,
1851 );
1852
1853 // Port of inflate coverage tests from zlib-ng
1854 // https://github.com/Dead2/zlib-ng/blob/develop/test/infcover.c
1855 let c = |a, b, c| cr(a, b, c, false);
1856
1857 // Invalid uncompressed/raw block length.
1858 c(&[0, 0, 0, 0, 0], F, State::BadRawLength);
1859 // Ok empty uncompressed block.
1860 c(&[3, 0], OK, State::DoneForever);
1861 // Invalid block type.
1862 c(&[6], F, State::BlockTypeUnexpected);
1863 // Ok uncompressed block.
1864 c(&[1, 1, 0, 0xfe, 0xff, 0], OK, State::DoneForever);
1865 // Too many litlens, we handle this later than zlib, so this test won't
1866 // give the same result.
1867 // c(&[0xfc, 0, 0], F, State::BadTotalSymbols);
1868 // Invalid set of code lengths - TODO Check if this is the correct error for this.
1869 c(&[4, 0, 0xfe, 0xff], F, State::BadTotalSymbols);
1870 // Invalid repeat in list of code lengths.
1871 // (Try to repeat a non-existant code.)
1872 c(&[4, 0, 0x24, 0x49, 0], F, State::BadCodeSizeDistPrevLookup);
1873 // Missing end of block code (should we have a separate error for this?) - fails on futher input
1874 // c(&[4, 0, 0x24, 0xe9, 0xff, 0x6d], F, State::BadTotalSymbols);
1875 // Invalid set of literals/lengths
1876 c(
1877 &[
1878 4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x71, 0xff, 0xff, 0x93, 0x11, 0,
1879 ],
1880 F,
1881 State::BadTotalSymbols,
1882 );
1883 // Invalid set of distances _ needsmoreinput
1884 // c(&[4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x0f, 0xb4, 0xff, 0xff, 0xc3, 0x84], F, State::BadTotalSymbols);
1885 // Invalid distance code
1886 c(&[2, 0x7e, 0xff, 0xff], F, State::InvalidDist);
1887
1888 // Distance refers to position before the start
1889 c(
1890 &[0x0c, 0xc0, 0x81, 0, 0, 0, 0, 0, 0x90, 0xff, 0x6b, 0x4, 0],
1891 F,
1892 State::DistanceOutOfBounds,
1893 );
1894
1895 // Trailer
1896 // Bad gzip trailer checksum GZip header not handled by miniz_oxide
1897 //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0x01], F, State::BadCRC, false)
1898 // Bad gzip trailer length
1899 //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0x01], F, State::BadCRC, false)
1900 }
1901
1902 #[test]
1903 fn empty_output_buffer_non_wrapping() {
1904 let encoded = [
1905 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19,
1906 ];
1907 let flags = TINFL_FLAG_COMPUTE_ADLER32
1908 | TINFL_FLAG_PARSE_ZLIB_HEADER
1909 | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
1910 let mut r = DecompressorOxide::new();
1911 let mut output_buf = vec![];
1912 // Check that we handle an empty buffer properly and not panicking.
1913 // https://github.com/Frommi/miniz_oxide/issues/23
1914 let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags);
1915 assert_eq!(res, (TINFLStatus::HasMoreOutput, 4, 0));
1916 }
1917
1918 #[test]
1919 fn empty_output_buffer_wrapping() {
1920 let encoded = [
1921 0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00,
1922 ];
1923 let flags = TINFL_FLAG_COMPUTE_ADLER32;
1924 let mut r = DecompressorOxide::new();
1925 let mut output_buf = vec![];
1926 // Check that we handle an empty buffer properly and not panicking.
1927 // https://github.com/Frommi/miniz_oxide/issues/23
1928 let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags);
1929 assert_eq!(res, (TINFLStatus::HasMoreOutput, 2, 0));
1930 }
1931 }