miniz_oxide/inflate/core.rs
1//! Streaming decompression functionality.
2
3use super::*;
4use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER};
5use ::core::cell::Cell;
6
7use ::core::cmp;
8use ::core::convert::TryInto;
9
10use self::output_buffer::{InputWrapper, OutputBuffer};
11
12#[cfg(feature = "serde")]
13use crate::serde::big_array::BigArray;
14#[cfg(feature = "serde")]
15use serde::{Deserialize, Serialize};
16
17pub const TINFL_LZ_DICT_SIZE: usize = 32_768;
18
19/// A struct containing huffman code lengths and the huffman code tree used by the decompressor.
20#[cfg_attr(not(feature = "rustc-dep-of-std"), derive(Clone))]
21#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
22struct HuffmanTable {
23 /// Fast lookup table for shorter huffman codes.
24 ///
25 /// See `HuffmanTable::fast_lookup`.
26 #[cfg_attr(feature = "serde", serde(with = "BigArray"))]
27 pub look_up: [i16; FAST_LOOKUP_SIZE as usize],
28 /// Full huffman tree.
29 ///
30 /// Positive values are edge nodes/symbols, negative values are
31 /// parent nodes/references to other nodes.
32 #[cfg_attr(feature = "serde", serde(with = "BigArray"))]
33 pub tree: [i16; MAX_HUFF_TREE_SIZE],
34}
35
36impl HuffmanTable {
37 const fn new() -> HuffmanTable {
38 HuffmanTable {
39 look_up: [0; FAST_LOOKUP_SIZE as usize],
40 tree: [0; MAX_HUFF_TREE_SIZE],
41 }
42 }
43
44 /// Look for a symbol in the fast lookup table.
45 /// The symbol is stored in the lower 9 bits, the length in the next 6.
46 /// If the returned value is negative, the code wasn't found in the
47 /// fast lookup table and the full tree has to be traversed to find the code.
48 #[inline]
49 fn fast_lookup(&self, bit_buf: BitBuffer) -> i16 {
50 self.look_up[(bit_buf & BitBuffer::from(FAST_LOOKUP_SIZE - 1)) as usize]
51 }
52
53 /// Get the symbol and the code length from the huffman tree.
54 #[inline]
55 fn tree_lookup(&self, fast_symbol: i32, bit_buf: BitBuffer, mut code_len: u8) -> (i32, u32) {
56 let mut symbol = fast_symbol;
57 // We step through the tree until we encounter a positive value, which indicates a
58 // symbol.
59 loop {
60 // symbol here indicates the position of the left (0) node, if the next bit is 1
61 // we add 1 to the lookup position to get the right node.
62 let tree_index = (!symbol + ((bit_buf >> code_len) & 1) as i32) as usize;
63
64 // Use get here to avoid generatic panic code.
65 // The init_tree code should prevent this from actually going out of bounds
66 // but if there were somehow a bug with that
67 // we would at worst end up with corrupted output in release mode.
68 debug_assert!(tree_index < self.tree.len());
69 symbol = i32::from(self.tree.get(tree_index).copied().unwrap_or(i16::MAX));
70 code_len += 1;
71 if symbol >= 0 {
72 break;
73 }
74 }
75 // Note: Using a u8 for code_len inside this function seems to improve performance, but changing it
76 // in localvars seems to worsen things so we convert it to a u32 here.
77 (symbol, u32::from(code_len))
78 }
79
80 #[inline]
81 /// Look up a symbol and code length from the bits in the provided bit buffer.
82 ///
83 /// Returns Some(symbol, length) on success,
84 /// None if the length is 0.
85 ///
86 /// It's possible we could avoid checking for 0 if we can guarantee a sane table.
87 /// TODO: Check if a smaller type for code_len helps performance.
88 fn lookup(&self, bit_buf: BitBuffer) -> (i32, u32) {
89 let symbol = self.fast_lookup(bit_buf).into();
90 if symbol >= 0 {
91 let length = (symbol >> 9) as u32;
92 (symbol, length)
93 } else {
94 // We didn't get a symbol from the fast lookup table, so check the tree instead.
95 self.tree_lookup(symbol, bit_buf, FAST_LOOKUP_BITS)
96 }
97 }
98}
99
100/// The number of huffman tables used.
101const MAX_HUFF_TABLES: usize = 3;
102/// The length of the first (literal/length) huffman table.
103const MAX_HUFF_SYMBOLS_0: usize = 288;
104/// The length of the second (distance) huffman table.
105const MAX_HUFF_SYMBOLS_1: usize = 32;
106/// The length of the last (huffman code length) huffman table.
107const MAX_HUFF_SYMBOLS_2: usize = 19;
108/// The maximum length of a code that can be looked up in the fast lookup table.
109const FAST_LOOKUP_BITS: u8 = 10;
110/// The size of the fast lookup table.
111const FAST_LOOKUP_SIZE: u16 = 1 << FAST_LOOKUP_BITS;
112const MAX_HUFF_TREE_SIZE: usize = MAX_HUFF_SYMBOLS_0 * 2;
113const LITLEN_TABLE: usize = 0;
114const DIST_TABLE: usize = 1;
115const HUFFLEN_TABLE: usize = 2;
116const LEN_CODES_SIZE: usize = 512;
117const LEN_CODES_MASK: usize = LEN_CODES_SIZE - 1;
118
119/// Flags to [`decompress()`] to control how inflation works.
120///
121/// These define bits for a bitmask argument.
122pub mod inflate_flags {
123 /// Should we try to parse a zlib header?
124 ///
125 /// If unset, the function will expect an RFC1951 deflate stream. If set, it will expect a
126 /// RFC1950 zlib wrapper around the deflate stream.
127 pub const TINFL_FLAG_PARSE_ZLIB_HEADER: u32 = 1;
128
129 /// There will be more input that hasn't been given to the decompressor yet.
130 ///
131 /// This is useful when you want to decompress what you have so far,
132 /// even if you know there is probably more input that hasn't gotten here yet (_e.g._, over a
133 /// network connection). When [`decompress()`][super::decompress] reaches the end of the input
134 /// without finding the end of the compressed stream, it will return
135 /// [`TINFLStatus::NeedsMoreInput`][super::TINFLStatus::NeedsMoreInput] if this is set,
136 /// indicating that you should get more data before calling again. If not set, it will return
137 /// [`TINFLStatus::FailedCannotMakeProgress`][super::TINFLStatus::FailedCannotMakeProgress]
138 /// suggesting the stream is corrupt, since you claimed it was all there.
139 pub const TINFL_FLAG_HAS_MORE_INPUT: u32 = 2;
140
141 /// The output buffer should not wrap around.
142 pub const TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: u32 = 4;
143
144 /// Calculate the adler32 checksum of the output data even if we're not inflating a zlib stream.
145 ///
146 /// If [`TINFL_FLAG_IGNORE_ADLER32`] is specified, it will override this.
147 ///
148 /// NOTE: Enabling/disabling this between calls to decompress will result in an incorrect
149 /// checksum.
150 pub const TINFL_FLAG_COMPUTE_ADLER32: u32 = 8;
151
152 /// Ignore adler32 checksum even if we are inflating a zlib stream.
153 ///
154 /// Overrides [`TINFL_FLAG_COMPUTE_ADLER32`] if both are enabled.
155 ///
156 /// NOTE: This flag does not exist in miniz as it does not support this and is a
157 /// custom addition for miniz_oxide.
158 ///
159 /// NOTE: Should not be changed from enabled to disabled after decompression has started,
160 /// this will result in checksum failure (outside the unlikely event where the checksum happens
161 /// to match anyway).
162 pub const TINFL_FLAG_IGNORE_ADLER32: u32 = 64;
163
164 /// Return [`TINFLStatus::BlockBoundary`][super::TINFLStatus::BlockBoundary]
165 /// on reaching the boundary between deflate blocks. Calling [`decompress()`][super::decompress]
166 /// again will resume decompression of the next block.
167 #[cfg(feature = "block-boundary")]
168 pub const TINFL_FLAG_STOP_ON_BLOCK_BOUNDARY: u32 = 128;
169}
170
171use self::inflate_flags::*;
172
173const MIN_TABLE_SIZES: [u16; 3] = [257, 1, 4];
174
175#[cfg(target_pointer_width = "64")]
176type BitBuffer = u64;
177
178#[cfg(not(target_pointer_width = "64"))]
179type BitBuffer = u32;
180
181/*
182enum HuffmanTableType {
183 LiteralLength = 0,
184 Dist = 1,
185 Huffman = 2,
186}*/
187
188/// Minimal data representing the [`DecompressorOxide`] state when it is between deflate blocks
189/// (i.e. [`decompress()`] has returned [`TINFLStatus::BlockBoundary`]).
190/// This can be serialized along with the last 32KiB of the output buffer, then passed to
191/// [`DecompressorOxide::from_block_boundary_state()`] to resume decompression from the same point.
192///
193/// The Zlib/Adler32 fields can be ignored if you aren't using those features
194/// ([`TINFL_FLAG_PARSE_ZLIB_HEADER`], [`TINFL_FLAG_COMPUTE_ADLER32`]).
195/// When deserializing, you can reconstruct `bit_buf` from the previous byte in the input file
196/// (if you still have access to it), so `num_bits` is the only field that is always required.
197#[derive(Clone)]
198#[cfg(feature = "block-boundary")]
199#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
200pub struct BlockBoundaryState {
201 /// The number of bits from the last byte of input consumed,
202 /// that are needed for decoding the next deflate block.
203 /// Value is in range `0..=7`
204 pub num_bits: u8,
205
206 /// The `num_bits` MSBs from the last byte of input consumed,
207 /// that are needed for decoding the next deflate block.
208 /// Stored in the LSBs of this field.
209 pub bit_buf: u8,
210
211 /// Zlib CMF
212 pub z_header0: u32,
213 /// Zlib FLG
214 pub z_header1: u32,
215 /// Adler32 checksum of the data decompressed so far
216 pub check_adler32: u32,
217}
218
219#[cfg(feature = "block-boundary")]
220impl Default for BlockBoundaryState {
221 fn default() -> Self {
222 BlockBoundaryState {
223 num_bits: 0,
224 bit_buf: 0,
225 z_header0: 0,
226 z_header1: 0,
227 check_adler32: 1,
228 }
229 }
230}
231
232/// Main decompression struct.
233///
234#[cfg_attr(not(feature = "rustc-dep-of-std"), derive(Clone))]
235#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
236pub struct DecompressorOxide {
237 /// Current state of the decompressor.
238 state: core::State,
239 /// Number of bits in the bit buffer.
240 num_bits: u32,
241 /// Zlib CMF
242 z_header0: u32,
243 /// Zlib FLG
244 z_header1: u32,
245 /// Adler32 checksum from the zlib header.
246 z_adler32: u32,
247 /// 1 if the current block is the last block, 0 otherwise.
248 finish: u8,
249 /// The type of the current block.
250 /// or if in a dynamic block, which huffman table we are currently
251 // initializing.
252 block_type: u8,
253 /// 1 if the adler32 value should be checked.
254 check_adler32: u32,
255 /// Last match distance.
256 dist: u32,
257 /// Variable used for match length, symbols, and a number of other things.
258 counter: u32,
259 /// Number of extra bits for the last length or distance code.
260 num_extra: u8,
261 /// Number of entries in each huffman table.
262 table_sizes: [u16; MAX_HUFF_TABLES],
263 /// Buffer of input data.
264 bit_buf: BitBuffer,
265 /// Huffman tables.
266 tables: [HuffmanTable; MAX_HUFF_TABLES],
267
268 #[cfg_attr(feature = "serde", serde(with = "BigArray"))]
269 code_size_literal: [u8; MAX_HUFF_SYMBOLS_0],
270 code_size_dist: [u8; MAX_HUFF_SYMBOLS_1],
271 code_size_huffman: [u8; MAX_HUFF_SYMBOLS_2],
272 /// Raw block header.
273 raw_header: [u8; 4],
274 /// Huffman length codes.
275 #[cfg_attr(feature = "serde", serde(with = "BigArray"))]
276 // MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137
277 // Extended to 512 to allow masking to help evade bounds checks.
278 len_codes: [u8; LEN_CODES_SIZE],
279}
280
281impl DecompressorOxide {
282 /// Create a new tinfl_decompressor with all fields set to 0.
283 pub fn new() -> DecompressorOxide {
284 DecompressorOxide::default()
285 }
286
287 /// Set the current state to `Start`.
288 #[inline]
289 pub fn init(&mut self) {
290 // The rest of the data is reset or overwritten when used.
291 self.state = core::State::Start;
292 }
293
294 /// Returns the adler32 checksum of the currently decompressed data.
295 /// Note: Will return Some(1) if decompressing zlib but ignoring adler32.
296 #[inline]
297 #[cfg(not(feature = "rustc-dep-of-std"))]
298 pub fn adler32(&self) -> Option<u32> {
299 if self.state != State::Start && !self.state.is_failure() && self.z_header0 != 0 {
300 Some(self.check_adler32)
301 } else {
302 None
303 }
304 }
305
306 /// Returns the adler32 that was read from the zlib header if it exists.
307 #[inline]
308 #[cfg(not(feature = "rustc-dep-of-std"))]
309 pub fn adler32_header(&self) -> Option<u32> {
310 if self.state != State::Start && self.state != State::BadZlibHeader && self.z_header0 != 0 {
311 Some(self.z_adler32)
312 } else {
313 None
314 }
315 }
316
317 // Get zlib header for tests
318 // Only for tests for now, may provide a proper function for this for later.
319 #[cfg(all(test, feature = "with-alloc"))]
320 pub(crate) const fn zlib_header(&self) -> (u32, u32) {
321 (self.z_header0, self.z_header1)
322 }
323
324 /*fn code_size_table(&mut self, table_num: u8) -> &mut [u8] {
325 match table_num {
326 0 => &mut self.code_size_literal,
327 1 => &mut self.code_size_dist,
328 _ => &mut self.code_size_huffman,
329 }
330 }*/
331
332 /// Returns the current [`BlockBoundaryState`]. Should only be called when
333 /// [`decompress()`] has returned [`TINFLStatus::BlockBoundary`];
334 /// otherwise this will return `None`.
335 #[cfg(feature = "block-boundary")]
336 pub fn block_boundary_state(&self) -> Option<BlockBoundaryState> {
337 if self.state == core::State::ReadBlockHeader {
338 // If we're in this state, undo_bytes should have emptied
339 // bit_buf of any whole bytes
340 assert!(self.num_bits < 8);
341
342 Some(BlockBoundaryState {
343 num_bits: self.num_bits as u8,
344 bit_buf: self.bit_buf as u8,
345 z_header0: self.z_header0,
346 z_header1: self.z_header1,
347 check_adler32: self.check_adler32,
348 })
349 } else {
350 None
351 }
352 }
353
354 /// Creates a new `DecompressorOxide` from the state returned by
355 /// `block_boundary_state()`.
356 ///
357 /// When calling [`decompress()`], the 32KiB of `out` preceding `out_pos` must be
358 /// initialized with the same data that it contained when `block_boundary_state()`
359 /// was called.
360 #[cfg(feature = "block-boundary")]
361 pub fn from_block_boundary_state(st: &BlockBoundaryState) -> Self {
362 DecompressorOxide {
363 state: core::State::ReadBlockHeader,
364 num_bits: st.num_bits as u32,
365 bit_buf: st.bit_buf as BitBuffer,
366 z_header0: st.z_header0,
367 z_header1: st.z_header1,
368 z_adler32: 1,
369 check_adler32: st.check_adler32,
370 ..DecompressorOxide::default()
371 }
372 }
373}
374
375impl Default for DecompressorOxide {
376 /// Create a new tinfl_decompressor with all fields set to 0.
377 #[inline(always)]
378 fn default() -> Self {
379 DecompressorOxide {
380 state: core::State::Start,
381 num_bits: 0,
382 z_header0: 0,
383 z_header1: 0,
384 z_adler32: 0,
385 finish: 0,
386 block_type: 0,
387 check_adler32: 0,
388 dist: 0,
389 counter: 0,
390 num_extra: 0,
391 table_sizes: [0; MAX_HUFF_TABLES],
392 bit_buf: 0,
393 // TODO:(oyvindln) Check that copies here are optimized out in release mode.
394 tables: [
395 HuffmanTable::new(),
396 HuffmanTable::new(),
397 HuffmanTable::new(),
398 ],
399 code_size_literal: [0; MAX_HUFF_SYMBOLS_0],
400 code_size_dist: [0; MAX_HUFF_SYMBOLS_1],
401 code_size_huffman: [0; MAX_HUFF_SYMBOLS_2],
402 raw_header: [0; 4],
403 len_codes: [0; LEN_CODES_SIZE],
404 }
405 }
406}
407
408#[derive(Copy, Clone, PartialEq, Eq, Debug)]
409#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
410#[non_exhaustive]
411enum State {
412 Start = 0,
413 ReadZlibCmf,
414 ReadZlibFlg,
415 ReadBlockHeader,
416 BlockTypeNoCompression,
417 RawHeader,
418 RawMemcpy1,
419 RawMemcpy2,
420 ReadTableSizes,
421 ReadHufflenTableCodeSize,
422 ReadLitlenDistTablesCodeSize,
423 ReadExtraBitsCodeSize,
424 DecodeLitlen,
425 WriteSymbol,
426 ReadExtraBitsLitlen,
427 DecodeDistance,
428 ReadExtraBitsDistance,
429 RawReadFirstByte,
430 RawStoreFirstByte,
431 WriteLenBytesToEnd,
432 BlockDone,
433 HuffDecodeOuterLoop1,
434 HuffDecodeOuterLoop2,
435 ReadAdler32,
436
437 DoneForever,
438
439 // Failure states.
440 BlockTypeUnexpected,
441 BadCodeSizeSum,
442 BadDistOrLiteralTableLength,
443 BadTotalSymbols,
444 BadZlibHeader,
445 DistanceOutOfBounds,
446 BadRawLength,
447 BadCodeSizeDistPrevLookup,
448 InvalidLitlen,
449 InvalidDist,
450}
451
452impl State {
453 #[cfg(not(feature = "rustc-dep-of-std"))]
454 const fn is_failure(self) -> bool {
455 matches!(
456 self,
457 BlockTypeUnexpected
458 | BadCodeSizeSum
459 | BadDistOrLiteralTableLength
460 | BadTotalSymbols
461 | BadZlibHeader
462 | DistanceOutOfBounds
463 | BadRawLength
464 | BadCodeSizeDistPrevLookup
465 | InvalidLitlen
466 | InvalidDist
467 )
468 }
469
470 #[inline]
471 fn begin(&mut self, new_state: State) {
472 *self = new_state;
473 }
474}
475
476use self::State::*;
477
478// # Optimization
479// We add a extra value at the end and make the tables 32 elements long
480// so we can use a mask to avoid bounds checks.
481// The invalid values are set to something high enough to avoid underflowing
482// the match length.
483/// Base length for each length code.
484///
485/// The base is used together with the value of the extra bits to decode the actual
486/// length/distance values in a match.
487#[rustfmt::skip]
488const LENGTH_BASE: [u16; 32] = [
489 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
490 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 512, 512, 512
491];
492
493/// Number of extra bits for each length code.
494#[rustfmt::skip]
495const LENGTH_EXTRA: [u8; 32] = [
496 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
497 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0
498];
499
500/// Base length for each distance code.
501#[rustfmt::skip]
502const DIST_BASE: [u16; 30] = [
503 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
504 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
505 2049, 3073, 4097, 6145, 8193, 12_289, 16_385, 24_577
506];
507
508/// Get the number of extra bits used for a distance code.
509/// (Code numbers above `NUM_DISTANCE_CODES` will give some garbage
510/// value.)
511#[inline(always)]
512const fn num_extra_bits_for_distance_code(code: u8) -> u8 {
513 // TODO: Need to verify that this is faster on all platforms.
514 // This can be easily calculated without a lookup.
515 let c = code >> 1;
516 c.saturating_sub(1)
517}
518
519/// The mask used when indexing the base/extra arrays.
520const BASE_EXTRA_MASK: usize = 32 - 1;
521
522/// Read an le u16 value from the slice iterator.
523///
524/// # Panics
525/// Panics if there are less than two bytes left.
526#[inline]
527fn read_u16_le(iter: &mut InputWrapper) -> u16 {
528 let ret = {
529 let two_bytes = iter.as_slice()[..2].try_into().unwrap_or_default();
530 u16::from_le_bytes(two_bytes)
531 };
532 iter.advance(2);
533 ret
534}
535
536/// Ensure that there is data in the bit buffer.
537///
538/// On 64-bit platform, we use a 64-bit value so this will
539/// result in there being at least 32 bits in the bit buffer.
540/// This function assumes that there is at least 4 bytes left in the input buffer.
541#[inline(always)]
542#[cfg(target_pointer_width = "64")]
543fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut InputWrapper) {
544 // Read four bytes into the buffer at once.
545 if l.num_bits < 30 {
546 l.bit_buf |= BitBuffer::from(in_iter.read_u32_le()) << l.num_bits;
547 l.num_bits += 32;
548 }
549}
550
551/// Same as previous, but for non-64-bit platforms.
552/// Ensures at least 16 bits are present, requires at least 2 bytes in the in buffer.
553#[inline(always)]
554#[cfg(not(target_pointer_width = "64"))]
555fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut InputWrapper) {
556 // If the buffer is 32-bit wide, read 2 bytes instead.
557 if l.num_bits < 15 {
558 l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits;
559 l.num_bits += 16;
560 }
561}
562
563/// Check that the zlib header is correct and that there is enough space in the buffer
564/// for the window size specified in the header.
565///
566/// See https://tools.ietf.org/html/rfc1950
567#[inline]
568const fn validate_zlib_header(cmf: u32, flg: u32, flags: u32, mask: usize) -> Action {
569 let mut failed =
570 // cmf + flg should be divisible by 31.
571 (((cmf * 256) + flg) % 31 != 0) ||
572 // If this flag is set, a dictionary was used for this zlib compressed data.
573 // This is currently not supported by miniz or miniz-oxide
574 ((flg & 0b0010_0000) != 0) ||
575 // Compression method. Only 8(DEFLATE) is defined by the standard.
576 ((cmf & 15) != 8);
577
578 let window_size = 1 << ((cmf >> 4) + 8);
579 if (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) == 0 {
580 // Bail if the buffer is wrapping and the window size is larger than the buffer.
581 failed |= (mask + 1) < window_size;
582 }
583
584 // Zlib doesn't allow window sizes above 32 * 1024.
585 failed |= window_size > 32_768;
586
587 if failed {
588 Action::Jump(BadZlibHeader)
589 } else {
590 Action::Jump(ReadBlockHeader)
591 }
592}
593
594enum Action {
595 None,
596 Jump(State),
597 End(TINFLStatus),
598}
599
600/// Try to decode the next huffman code, and puts it in the counter field of the decompressor
601/// if successful.
602///
603/// # Returns
604/// The specified action returned from `f` on success,
605/// `Action::End` if there are not enough data left to decode a symbol.
606fn decode_huffman_code<F>(
607 r: &mut DecompressorOxide,
608 l: &mut LocalVars,
609 table: usize,
610 flags: u32,
611 in_iter: &mut InputWrapper,
612 f: F,
613) -> Action
614where
615 F: FnOnce(&mut DecompressorOxide, &mut LocalVars, i32) -> Action,
616{
617 // As the huffman codes can be up to 15 bits long we need at least 15 bits
618 // ready in the bit buffer to start decoding the next huffman code.
619 if l.num_bits < 15 {
620 // First, make sure there is enough data in the bit buffer to decode a huffman code.
621 if in_iter.bytes_left() < 2 {
622 // If there is less than 2 bytes left in the input buffer, we try to look up
623 // the huffman code with what's available, and return if that doesn't succeed.
624 // Original explanation in miniz:
625 // /* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
626 // * remaining in the input buffer falls below 2. */
627 // /* It reads just enough bytes from the input stream that are needed to decode
628 // * the next Huffman code (and absolutely no more). It works by trying to fully
629 // * decode a */
630 // /* Huffman code by using whatever bits are currently present in the bit buffer.
631 // * If this fails, it reads another byte, and tries again until it succeeds or
632 // * until the */
633 // /* bit buffer contains >=15 bits (deflate's max. Huffman code size). */
634 loop {
635 let mut temp = i32::from(r.tables[table].fast_lookup(l.bit_buf));
636 if temp >= 0 {
637 let code_len = (temp >> 9) as u32;
638 // TODO: Is there any point to check for code_len != 0 here still?
639 if (code_len != 0) && (l.num_bits >= code_len) {
640 break;
641 }
642 } else if l.num_bits > FAST_LOOKUP_BITS.into() {
643 let mut code_len = u32::from(FAST_LOOKUP_BITS);
644 loop {
645 temp = i32::from(
646 r.tables[table].tree
647 [(!temp + ((l.bit_buf >> code_len) & 1) as i32) as usize],
648 );
649 code_len += 1;
650 if temp >= 0 || l.num_bits < code_len + 1 {
651 break;
652 }
653 }
654 if temp >= 0 {
655 break;
656 }
657 }
658
659 // TODO: miniz jumps straight to here after getting here again after failing to read
660 // a byte.
661 // Doing that lets miniz avoid re-doing the lookup that that was done in the
662 // previous call.
663 let mut byte = 0;
664 if let a @ Action::End(_) = read_byte(in_iter, flags, |b| {
665 byte = b;
666 Action::None
667 }) {
668 return a;
669 };
670
671 // Do this outside closure for now to avoid borrowing r.
672 l.bit_buf |= BitBuffer::from(byte) << l.num_bits;
673 l.num_bits += 8;
674
675 if l.num_bits >= 15 {
676 break;
677 }
678 }
679 } else {
680 // There is enough data in the input buffer, so read the next two bytes
681 // and add them to the bit buffer.
682 // Unwrapping here is fine since we just checked that there are at least two
683 // bytes left.
684 l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits;
685 l.num_bits += 16;
686 }
687 }
688
689 // We now have at least 15 bits in the input buffer.
690 let mut symbol = i32::from(r.tables[table].fast_lookup(l.bit_buf));
691 let code_len;
692 // If the symbol was found in the fast lookup table.
693 if symbol >= 0 {
694 // Get the length value from the top bits.
695 // As we shift down the sign bit, converting to an unsigned value
696 // shouldn't overflow.
697 code_len = (symbol >> 9) as u32;
698 // Mask out the length value.
699 symbol &= 511;
700 } else {
701 let res = r.tables[table].tree_lookup(symbol, l.bit_buf, FAST_LOOKUP_BITS);
702 symbol = res.0;
703 code_len = res.1;
704 };
705
706 l.bit_buf >>= code_len;
707 l.num_bits -= code_len;
708 f(r, l, symbol)
709}
710
711/// Try to read one byte from `in_iter` and call `f` with the read byte as an argument,
712/// returning the result.
713/// If reading fails, `Action::End is returned`
714#[inline]
715fn read_byte<F>(in_iter: &mut InputWrapper, flags: u32, f: F) -> Action
716where
717 F: FnOnce(u8) -> Action,
718{
719 match in_iter.read_byte() {
720 None => end_of_input(flags),
721 Some(byte) => f(byte),
722 }
723}
724
725// TODO: `l: &mut LocalVars` may be slow similar to decompress_fast (even with inline(always))
726/// Try to read `amount` number of bits from `in_iter` and call the function `f` with the bits as an
727/// an argument after reading, returning the result of that function, or `Action::End` if there are
728/// not enough bytes left.
729#[inline]
730#[allow(clippy::while_immutable_condition)]
731fn read_bits<F>(
732 l: &mut LocalVars,
733 amount: u32,
734 in_iter: &mut InputWrapper,
735 flags: u32,
736 f: F,
737) -> Action
738where
739 F: FnOnce(&mut LocalVars, BitBuffer) -> Action,
740{
741 // Clippy gives a false positive warning here due to the closure.
742 // Read enough bytes from the input iterator to cover the number of bits we want.
743 while l.num_bits < amount {
744 let action = read_byte(in_iter, flags, |byte| {
745 l.bit_buf |= BitBuffer::from(byte) << l.num_bits;
746 l.num_bits += 8;
747 Action::None
748 });
749
750 // If there are not enough bytes in the input iterator, return and signal that we need more.
751 if !matches!(action, Action::None) {
752 return action;
753 }
754 }
755
756 let bits = l.bit_buf & ((1 << amount) - 1);
757 l.bit_buf >>= amount;
758 l.num_bits -= amount;
759 f(l, bits)
760}
761
762#[inline]
763fn pad_to_bytes<F>(l: &mut LocalVars, in_iter: &mut InputWrapper, flags: u32, f: F) -> Action
764where
765 F: FnOnce(&mut LocalVars) -> Action,
766{
767 let num_bits = l.num_bits & 7;
768 read_bits(l, num_bits, in_iter, flags, |l, _| f(l))
769}
770
771#[inline]
772const fn end_of_input(flags: u32) -> Action {
773 Action::End(if flags & TINFL_FLAG_HAS_MORE_INPUT != 0 {
774 TINFLStatus::NeedsMoreInput
775 } else {
776 TINFLStatus::FailedCannotMakeProgress
777 })
778}
779
780#[inline]
781fn undo_bytes(l: &mut LocalVars, max: u32) -> u32 {
782 let res = cmp::min(l.num_bits >> 3, max);
783 l.num_bits -= res << 3;
784 res
785}
786
787fn start_static_table(r: &mut DecompressorOxide) {
788 r.table_sizes[LITLEN_TABLE] = 288;
789 r.table_sizes[DIST_TABLE] = 32;
790 r.code_size_literal[0..144].fill(8);
791 r.code_size_literal[144..256].fill(9);
792 r.code_size_literal[256..280].fill(7);
793 r.code_size_literal[280..288].fill(8);
794 r.code_size_dist[0..32].fill(5);
795}
796
797#[cfg(any(
798 feature = "rustc-dep-of-std",
799 not(feature = "with-alloc"),
800 target_arch = "aarch64",
801 target_arch = "arm64ec",
802 target_arch = "loongarch64"
803))]
804#[inline]
805const fn reverse_bits(n: u16) -> u16 {
806 // Lookup is not used when building as part of std to avoid wasting space
807 // for lookup table in every rust binary
808 // as it's only used for backtraces in the cold path
809 // - see #152
810
811 // armv7 and newer, and loongarch have a cpu instruction for bit reversal so
812 // it's preferable to just use that on those architectures.
813
814 // Also disable lookup table when not using the alloc feature as
815 // we probably don't want to waste space for a lookup table in an environment
816 // without an allocator.
817 n.reverse_bits()
818}
819
820#[cfg(all(
821 not(any(
822 feature = "rustc-dep-of-std",
823 target_arch = "aarch64",
824 target_arch = "arm64ec",
825 target_arch = "loongarch64"
826 )),
827 feature = "with-alloc"
828))]
829fn reverse_bits(n: u16) -> u16 {
830 static REVERSED_BITS_LOOKUP: [u16; 512] = {
831 let mut table = [0; 512];
832
833 let mut i = 0;
834 while i < 512 {
835 table[i] = (i as u16).reverse_bits();
836 i += 1;
837 }
838
839 table
840 };
841
842 REVERSED_BITS_LOOKUP[n as usize]
843}
844
845fn init_tree(r: &mut DecompressorOxide, l: &mut LocalVars) -> Option<Action> {
846 loop {
847 let bt = r.block_type as usize;
848
849 let code_sizes = match bt {
850 LITLEN_TABLE => &mut r.code_size_literal[..],
851 DIST_TABLE => &mut r.code_size_dist,
852 HUFFLEN_TABLE => &mut r.code_size_huffman,
853 _ => return None,
854 };
855 let table = &mut r.tables[bt];
856
857 let mut total_symbols = [0u16; 16];
858 // Next code - we use the odd length here to simplify a loop later.
859 let mut next_code = [0u32; 17];
860 const INVALID_CODE: i16 = (1 << 9) | 286;
861 // Set the values in the fast table to return a
862 // non-zero length and an invalid symbol instead of zero
863 // so that we do not have to have a check for a zero
864 // code length in the hot code path later
865 // and can instead error out on the invalid symbol check
866 // on bogus input.
867 table.look_up.fill(INVALID_CODE);
868 // If we are initializing the huffman code length we can skip
869 // this since these codes can't be longer than 3 bits
870 // and thus only use the fast table and this table won't be accessed so
871 // there is no point clearing it.
872 // TODO: Avoid creating this table at all.
873 if bt != HUFFLEN_TABLE {
874 table.tree.fill(0);
875 }
876
877 let table_size = r.table_sizes[bt] as usize;
878 if table_size > code_sizes.len() {
879 return None;
880 }
881
882 for &code_size in &code_sizes[..table_size] {
883 let cs = code_size as usize;
884 // Code sizes are limited to max 15 according to the
885 // deflate spec.
886 // If it is larger than this, something has gone wrong...
887 if cs >= total_symbols.len() {
888 return None;
889 }
890 total_symbols[cs] += 1;
891 }
892
893 let mut used_symbols = 0;
894 let mut total = 0u32;
895 // Count up the total number of used lengths and check that the table is not under or over-subscribed.
896 for (&ts, next) in total_symbols.iter().zip(next_code[1..].iter_mut()).skip(1) {
897 used_symbols += ts;
898 total += u32::from(ts);
899 total <<= 1;
900 *next = total;
901 }
902
903 //
904 // While it's not explicitly stated in the spec, a hufflen table
905 // with a single length (or none) would be invalid as there needs to be
906 // at minimum a length for both a non-zero length huffman code for the end of block symbol
907 // and one of the codes to represent 0 to make sense - so just reject that here as well.
908 //
909 // The distance table is allowed to have a single distance code though according to the spect it is
910 // supposed to be accompanied by a second dummy code. It can also be empty indicating no used codes.
911 //
912 // The literal/length table can not be empty as there has to be an end of block symbol,
913 // The standard doesn't specify that there should be a dummy code in case of a single
914 // symbol (i.e an empty block). Normally that's not an issue though the code will have
915 // to take that into account later on in case of malformed input.
916 if total != 65_536 && (used_symbols > 1 || bt == HUFFLEN_TABLE) {
917 return Some(Action::Jump(BadTotalSymbols));
918 }
919
920 let mut tree_next = -1;
921 for symbol_index in 0..table_size {
922 // Code sizes are limited to 15 according to the spec
923 // It's already checked earlier but the compiler might not be smart enough to know that.
924 let code_size = code_sizes[symbol_index] & 15;
925 if code_size == 0 {
926 continue;
927 }
928
929 let cur_code = next_code[code_size as usize];
930 next_code[code_size as usize] += 1;
931
932 let n = (cur_code & (u32::MAX >> (32 - code_size))) as u16;
933
934 let mut rev_code = if n < 512 {
935 // Using a lookup table
936 // for a small speedup here,
937 // Seems to only really make a difference on very short
938 // inputs however.
939 // 512 seems to be around a sweet spot.
940 reverse_bits(n)
941 } else {
942 n.reverse_bits()
943 } >> (16 - code_size);
944
945 if code_size <= FAST_LOOKUP_BITS {
946 let k = (i16::from(code_size) << 9) | symbol_index as i16;
947 while rev_code < FAST_LOOKUP_SIZE {
948 table.look_up[rev_code as usize] = k;
949 rev_code += 1 << code_size;
950 }
951 continue;
952 }
953
954 let mut tree_cur = table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize];
955 if tree_cur == INVALID_CODE {
956 table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize] = tree_next;
957 tree_cur = tree_next;
958 tree_next -= 2;
959 }
960
961 rev_code >>= FAST_LOOKUP_BITS - 1;
962 for _ in FAST_LOOKUP_BITS + 1..code_size {
963 rev_code >>= 1;
964 tree_cur -= (rev_code & 1) as i16;
965 let tree_index = (-tree_cur - 1) as usize;
966 if tree_index >= table.tree.len() {
967 return None;
968 }
969 if table.tree[tree_index] == 0 {
970 table.tree[tree_index] = tree_next;
971 tree_cur = tree_next;
972 tree_next -= 2;
973 } else {
974 tree_cur = table.tree[tree_index];
975 }
976 }
977
978 rev_code >>= 1;
979 tree_cur -= (rev_code & 1) as i16;
980 let tree_index = (-tree_cur - 1) as usize;
981 if tree_index >= table.tree.len() {
982 return None;
983 }
984 table.tree[tree_index] = symbol_index as i16;
985 }
986
987 if r.block_type == HUFFLEN_TABLE as u8 {
988 l.counter = 0;
989 return Some(Action::Jump(ReadLitlenDistTablesCodeSize));
990 }
991
992 if r.block_type == LITLEN_TABLE as u8 {
993 break;
994 }
995 r.block_type -= 1;
996 }
997
998 l.counter = 0;
999
1000 Some(Action::Jump(DecodeLitlen))
1001}
1002
1003// A helper macro for generating the state machine.
1004//
1005// As Rust doesn't have fallthrough on matches, we have to return to the match statement
1006// and jump for each state change. (Which would ideally be optimized away, but often isn't.)
1007macro_rules! generate_state {
1008 ($state: ident, $state_machine: tt, $f: expr) => {
1009 loop {
1010 match $f {
1011 Action::None => continue,
1012 Action::Jump(new_state) => {
1013 $state = new_state;
1014 continue $state_machine;
1015 },
1016 Action::End(result) => break $state_machine result,
1017 }
1018 }
1019 };
1020}
1021
1022#[derive(Copy, Clone)]
1023struct LocalVars {
1024 pub bit_buf: BitBuffer,
1025 pub num_bits: u32,
1026 pub dist: u32,
1027 pub counter: u32,
1028 pub num_extra: u8,
1029}
1030
1031#[inline]
1032fn transfer(
1033 out_slice: &mut [u8],
1034 mut source_pos: usize,
1035 mut out_pos: usize,
1036 match_len: usize,
1037 out_buf_size_mask: usize,
1038) {
1039 // special case that comes up surprisingly often. in the case that `source_pos`
1040 // is 1 less than `out_pos`, we can say that the entire range will be the same
1041 // value and optimize this to be a simple `memset`
1042 let source_diff = if source_pos > out_pos {
1043 source_pos - out_pos
1044 } else {
1045 out_pos - source_pos
1046 };
1047
1048 // The last 3 bytes can wrap as those are dealt with separately at the end.
1049 // Use wrapping_sub rather than saturating for performance reasons here as
1050 // if source_pos + match_len is < 3 we just want to jump to the end
1051 // condition anyhow.
1052 let not_wrapping = (out_buf_size_mask == usize::MAX)
1053 || ((source_pos + match_len).wrapping_sub(3) < out_slice.len());
1054
1055 let end_pos = ((match_len >> 2) * 4) + out_pos;
1056 if not_wrapping && source_diff == 1 && out_pos > source_pos {
1057 let end = (match_len >> 2) * 4 + out_pos;
1058 let init = out_slice[out_pos - 1];
1059 out_slice[out_pos..end].fill(init);
1060 out_pos = end;
1061 source_pos = end - 1;
1062 // if the difference between `source_pos` and `out_pos` is greater than 3,
1063 // and we are not wrapping, we
1064 // can do slightly better than the naive case by copying everything at once
1065 } else if not_wrapping && out_pos > source_pos && (out_pos - source_pos >= 4) {
1066 let end_pos = cmp::min(end_pos, out_slice.len().saturating_sub(3));
1067 while out_pos < end_pos {
1068 out_slice.copy_within(source_pos..=source_pos + 3, out_pos);
1069 source_pos += 4;
1070 out_pos += 4;
1071 }
1072 } else {
1073 let end_pos = cmp::min(end_pos, out_slice.len().saturating_sub(3));
1074 while out_pos < end_pos {
1075 // Placing these assertions moves some bounds check before the accesses which
1076 // makes the compiler able to optimize better.
1077 // Ideally we would find a safe way to remove them entirely.
1078 assert!(out_pos + 3 < out_slice.len());
1079 assert!((source_pos + 3) & out_buf_size_mask < out_slice.len());
1080
1081 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
1082 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
1083 out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask];
1084 out_slice[out_pos + 3] = out_slice[(source_pos + 3) & out_buf_size_mask];
1085 source_pos += 4;
1086 out_pos += 4;
1087 }
1088 }
1089
1090 match match_len & 3 {
1091 0 => (),
1092 1 => out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask],
1093 2 => {
1094 assert!(out_pos + 1 < out_slice.len());
1095 assert!((source_pos + 1) & out_buf_size_mask < out_slice.len());
1096 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
1097 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
1098 }
1099 3 => {
1100 assert!(out_pos + 2 < out_slice.len());
1101 assert!((source_pos + 2) & out_buf_size_mask < out_slice.len());
1102 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
1103 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
1104 out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask];
1105 }
1106 _ => unreachable!(),
1107 }
1108}
1109
1110/// Presumes that there is at least match_len bytes in output left.
1111#[inline]
1112fn apply_match(
1113 out_slice: &mut [u8],
1114 out_pos: usize,
1115 dist: usize,
1116 match_len: usize,
1117 out_buf_size_mask: usize,
1118) {
1119 debug_assert!(out_pos.checked_add(match_len).unwrap() <= out_slice.len());
1120
1121 let source_pos = out_pos.wrapping_sub(dist) & out_buf_size_mask;
1122
1123 if match_len == 3 {
1124 let out_slice = Cell::from_mut(out_slice).as_slice_of_cells();
1125 if let Some(dst) = out_slice.get(out_pos..out_pos + 3) {
1126 // Moving bounds checks before any memory mutation allows the optimizer
1127 // combine them together.
1128 let src = out_slice
1129 .get(source_pos)
1130 .zip(out_slice.get((source_pos + 1) & out_buf_size_mask))
1131 .zip(out_slice.get((source_pos + 2) & out_buf_size_mask));
1132 if let Some(((a, b), c)) = src {
1133 // For correctness, the memory reads and writes have to be interleaved.
1134 // Cells make it possible for read and write references to overlap.
1135 dst[0].set(a.get());
1136 dst[1].set(b.get());
1137 dst[2].set(c.get());
1138 }
1139 }
1140 return;
1141 }
1142
1143 if cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) {
1144 // The copy from slice code seems to not give any added performance at least on
1145 // armv7 so transfer manually
1146 // Need to test on other platforms.
1147 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
1148 return;
1149 }
1150
1151 if source_pos >= out_pos && (source_pos - out_pos) < match_len {
1152 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
1153 } else if match_len <= dist && source_pos + match_len < out_slice.len() {
1154 // Destination and source segments does not intersect and source does not wrap.
1155 // TODO: An invalid before start of data wrapping match reached here before
1156 // it was fixed (it wrapped around and ended overlapping again)- need
1157 // to check that we are not wrapping here.
1158 if source_pos < out_pos {
1159 let (from_slice, to_slice) = out_slice.split_at_mut(out_pos);
1160 to_slice[..match_len].copy_from_slice(&from_slice[source_pos..source_pos + match_len]);
1161 } else {
1162 let (to_slice, from_slice) = out_slice.split_at_mut(source_pos);
1163 to_slice[out_pos..out_pos + match_len].copy_from_slice(&from_slice[..match_len]);
1164 }
1165 } else {
1166 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
1167 }
1168}
1169
1170/// Fast inner decompression loop which is run while there is at least
1171/// 259 bytes left in the output buffer, and at least 6 bytes left in the input buffer
1172/// (The maximum one match would need + 1).
1173///
1174/// This was inspired by a similar optimization in zlib, which uses this info to do
1175/// faster unchecked copies of multiple bytes at a time.
1176/// Currently we don't do this here, but this function does avoid having to jump through the
1177/// big match loop on each state change(as rust does not have fallthrough or gotos at the moment),
1178/// and already improves decompression speed a fair bit.
1179fn decompress_fast(
1180 r: &mut DecompressorOxide,
1181 in_iter: &mut InputWrapper,
1182 out_buf: &mut OutputBuffer,
1183 flags: u32,
1184 local_vars: &mut LocalVars,
1185 out_buf_size_mask: usize,
1186) -> (TINFLStatus, State) {
1187 // Make a local copy of the most used variables, to avoid having to update and read from values
1188 // in a random memory location and to encourage more register use.
1189 let mut l = *local_vars;
1190 let mut state;
1191
1192 let status: TINFLStatus = 'o: loop {
1193 state = State::DecodeLitlen;
1194 loop {
1195 // This function assumes that there is at least 259 bytes left in the output buffer,
1196 // and that there is at least 14 bytes left in the input buffer. 14 input bytes:
1197 // 15 (prev lit) + 15 (length) + 5 (length extra) + 15 (dist)
1198 // + 29 + 32 (left in bit buf, including last 13 dist extra) = 111 bits < 14 bytes
1199 // We need the one extra byte as we may write one length and one full match
1200 // before checking again.
1201 if out_buf.bytes_left() < 259 || in_iter.bytes_left() < 14 {
1202 state = State::DecodeLitlen;
1203 break 'o TINFLStatus::Done;
1204 }
1205
1206 fill_bit_buffer(&mut l, in_iter);
1207
1208 let (symbol, code_len) = r.tables[LITLEN_TABLE].lookup(l.bit_buf);
1209 l.counter = symbol as u32;
1210 l.bit_buf >>= code_len;
1211 l.num_bits -= code_len;
1212
1213 if (l.counter & 256) != 0 {
1214 // The symbol is not a literal.
1215 break;
1216 } else {
1217 // If we have a 32-bit buffer we need to read another two bytes now
1218 // to have enough bits to keep going.
1219 if cfg!(not(target_pointer_width = "64")) {
1220 fill_bit_buffer(&mut l, in_iter);
1221 }
1222
1223 let (symbol, code_len) = r.tables[LITLEN_TABLE].lookup(l.bit_buf);
1224 l.bit_buf >>= code_len;
1225 l.num_bits -= code_len;
1226 // The previous symbol was a literal, so write it directly and check
1227 // the next one.
1228 out_buf.write_byte(l.counter as u8);
1229 if (symbol & 256) != 0 {
1230 l.counter = symbol as u32;
1231 // The symbol is a length value.
1232 break;
1233 } else {
1234 // The symbol is a literal, so write it directly and continue.
1235 out_buf.write_byte(symbol as u8);
1236 }
1237 }
1238 }
1239
1240 // Mask the top bits since they may contain length info.
1241 l.counter &= 511;
1242 if l.counter == 256 {
1243 // We hit the end of block symbol.
1244 state.begin(BlockDone);
1245 break 'o TINFLStatus::Done;
1246 } else if l.counter > 285 {
1247 // Invalid code.
1248 // We already verified earlier that the code is > 256.
1249 state.begin(InvalidLitlen);
1250 break 'o TINFLStatus::Failed;
1251 } else {
1252 // The symbol was a length code.
1253 // # Optimization
1254 // Mask the value to avoid bounds checks
1255 // While the maximum is checked, the compiler isn't able to know that the
1256 // value won't wrap around here.
1257 l.num_extra = LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK];
1258 l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1259 // Length and distance codes have a number of extra bits depending on
1260 // the base, which together with the base gives us the exact value.
1261
1262 // We need to make sure we have at least 33 (so min 5 bytes) bits in the buffer at this spot.
1263 fill_bit_buffer(&mut l, in_iter);
1264 if l.num_extra != 0 {
1265 let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1);
1266 l.bit_buf >>= l.num_extra;
1267 l.num_bits -= u32::from(l.num_extra);
1268 l.counter += extra_bits as u32;
1269 }
1270
1271 // We found a length code, so a distance code should follow.
1272
1273 if cfg!(not(target_pointer_width = "64")) {
1274 fill_bit_buffer(&mut l, in_iter);
1275 }
1276
1277 let (mut symbol, code_len) = r.tables[DIST_TABLE].lookup(l.bit_buf);
1278 symbol &= 511;
1279 l.bit_buf >>= code_len;
1280 l.num_bits -= code_len;
1281 if symbol > 29 {
1282 state.begin(InvalidDist);
1283 break 'o TINFLStatus::Failed;
1284 }
1285
1286 l.num_extra = num_extra_bits_for_distance_code(symbol as u8);
1287 l.dist = u32::from(DIST_BASE[symbol as usize]);
1288
1289 if l.num_extra != 0 {
1290 fill_bit_buffer(&mut l, in_iter);
1291 let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1);
1292 l.bit_buf >>= l.num_extra;
1293 l.num_bits -= u32::from(l.num_extra);
1294 l.dist += extra_bits as u32;
1295 }
1296
1297 let position = out_buf.position();
1298 if (l.dist as usize > out_buf.position()
1299 && (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0))
1300 || (l.dist as usize > out_buf.get_ref().len())
1301 {
1302 // We encountered a distance that refers a position before
1303 // the start of the decoded data, so we can't continue.
1304 state.begin(DistanceOutOfBounds);
1305 break TINFLStatus::Failed;
1306 }
1307
1308 apply_match(
1309 out_buf.get_mut(),
1310 position,
1311 l.dist as usize,
1312 l.counter as usize,
1313 out_buf_size_mask,
1314 );
1315
1316 out_buf.set_position(position + l.counter as usize);
1317 }
1318 };
1319
1320 *local_vars = l;
1321 (status, state)
1322}
1323
1324/// Main decompression function. Keeps decompressing data from `in_buf` until the `in_buf` is
1325/// empty, `out` is full, the end of the deflate stream is hit, or there is an error in the
1326/// deflate stream.
1327///
1328/// # Arguments
1329///
1330/// `r` is a [`DecompressorOxide`] struct with the state of this stream.
1331///
1332/// `in_buf` is a reference to the compressed data that is to be decompressed. The decompressor will
1333/// start at the first byte of this buffer.
1334///
1335/// `out` is a reference to the buffer that will store the decompressed data, and that
1336/// stores previously decompressed data if any.
1337///
1338/// * The offset given by `out_pos` indicates where in the output buffer slice writing should start.
1339/// * If [`TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF`] is not set, the output buffer is used in a
1340/// wrapping manner, and it's size is required to be a power of 2.
1341/// * The decompression function normally needs access to 32KiB of the previously decompressed data
1342/// (or to the beginning of the decompressed data if less than 32KiB has been decompressed.)
1343/// - If this data is not available, decompression may fail.
1344/// - Some deflate compressors allow specifying a window size which limits match distances to
1345/// less than this, or alternatively an RLE mode where matches will only refer to the previous byte
1346/// and thus allows a smaller output buffer. The window size can be specified in the zlib
1347/// header structure, however, the header data should not be relied on to be correct.
1348///
1349/// `flags` indicates settings and status to the decompression function.
1350/// * The [`TINFL_FLAG_HAS_MORE_INPUT`] has to be specified if more compressed data is to be provided
1351/// in a subsequent call to this function.
1352/// * See the the [`inflate_flags`] module for details on other flags.
1353///
1354/// # Returns
1355///
1356/// Returns a tuple containing the status of the compressor, the number of input bytes read, and the
1357/// number of bytes output to `out`.
1358pub fn decompress(
1359 r: &mut DecompressorOxide,
1360 in_buf: &[u8],
1361 out: &mut [u8],
1362 out_pos: usize,
1363 flags: u32,
1364) -> (TINFLStatus, usize, usize) {
1365 let out_buf_size_mask = if flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0 {
1366 usize::MAX
1367 } else {
1368 // In the case of zero len, any attempt to write would produce HasMoreOutput,
1369 // so to gracefully process the case of there really being no output,
1370 // set the mask to all zeros.
1371 out.len().saturating_sub(1)
1372 };
1373
1374 // Ensure the output buffer's size is a power of 2, unless the output buffer
1375 // is large enough to hold the entire output file (in which case it doesn't
1376 // matter).
1377 // Also make sure that the output buffer position is not past the end of the output buffer.
1378 if (out_buf_size_mask.wrapping_add(1) & out_buf_size_mask) != 0 || out_pos > out.len() {
1379 return (TINFLStatus::BadParam, 0, 0);
1380 }
1381
1382 let mut in_iter = InputWrapper::from_slice(in_buf);
1383
1384 let mut state = r.state;
1385
1386 let mut out_buf = OutputBuffer::from_slice_and_pos(out, out_pos);
1387
1388 // Make a local copy of the important variables here so we can work with them on the stack.
1389 let mut l = LocalVars {
1390 bit_buf: r.bit_buf,
1391 num_bits: r.num_bits,
1392 dist: r.dist,
1393 counter: r.counter,
1394 num_extra: r.num_extra,
1395 };
1396
1397 let mut status = 'state_machine: loop {
1398 match state {
1399 Start => generate_state!(state, 'state_machine, {
1400 l.bit_buf = 0;
1401 l.num_bits = 0;
1402 l.dist = 0;
1403 l.counter = 0;
1404 l.num_extra = 0;
1405 r.z_header0 = 0;
1406 r.z_header1 = 0;
1407 r.z_adler32 = 1;
1408 r.check_adler32 = 1;
1409 if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 {
1410 Action::Jump(State::ReadZlibCmf)
1411 } else {
1412 Action::Jump(State::ReadBlockHeader)
1413 }
1414 }),
1415
1416 ReadZlibCmf => generate_state!(state, 'state_machine, {
1417 read_byte(&mut in_iter, flags, |cmf| {
1418 r.z_header0 = u32::from(cmf);
1419 Action::Jump(State::ReadZlibFlg)
1420 })
1421 }),
1422
1423 ReadZlibFlg => generate_state!(state, 'state_machine, {
1424 read_byte(&mut in_iter, flags, |flg| {
1425 r.z_header1 = u32::from(flg);
1426 validate_zlib_header(r.z_header0, r.z_header1, flags, out_buf_size_mask)
1427 })
1428 }),
1429
1430 // Read the block header and jump to the relevant section depending on the block type.
1431 ReadBlockHeader => generate_state!(state, 'state_machine, {
1432 read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| {
1433 r.finish = (bits & 1) as u8;
1434 r.block_type = ((bits >> 1) & 3) as u8;
1435 match r.block_type {
1436 0 => Action::Jump(BlockTypeNoCompression),
1437 1 => {
1438 start_static_table(r);
1439 init_tree(r, l).unwrap_or(Action::End(TINFLStatus::Failed))
1440 },
1441 2 => {
1442 l.counter = 0;
1443 Action::Jump(ReadTableSizes)
1444 },
1445 3 => Action::Jump(BlockTypeUnexpected),
1446 _ => unreachable!()
1447 }
1448 })
1449 }),
1450
1451 // Raw/Stored/uncompressed block.
1452 BlockTypeNoCompression => generate_state!(state, 'state_machine, {
1453 pad_to_bytes(&mut l, &mut in_iter, flags, |l| {
1454 l.counter = 0;
1455 Action::Jump(RawHeader)
1456 })
1457 }),
1458
1459 // Check that the raw block header is correct.
1460 RawHeader => generate_state!(state, 'state_machine, {
1461 if l.counter < 4 {
1462 // Read block length and block length check.
1463 if l.num_bits != 0 {
1464 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1465 r.raw_header[l.counter as usize] = bits as u8;
1466 l.counter += 1;
1467 Action::None
1468 })
1469 } else {
1470 read_byte(&mut in_iter, flags, |byte| {
1471 r.raw_header[l.counter as usize] = byte;
1472 l.counter += 1;
1473 Action::None
1474 })
1475 }
1476 } else {
1477 // Check if the length value of a raw block is correct.
1478 // The 2 first (2-byte) words in a raw header are the length and the
1479 // ones complement of the length.
1480 let length = u16::from(r.raw_header[0]) | (u16::from(r.raw_header[1]) << 8);
1481 let check = u16::from(r.raw_header[2]) | (u16::from(r.raw_header[3]) << 8);
1482 let valid = length == !check;
1483 l.counter = length.into();
1484
1485 if !valid {
1486 Action::Jump(BadRawLength)
1487 } else if l.counter == 0 {
1488 // Empty raw block. Sometimes used for synchronization.
1489 Action::Jump(BlockDone)
1490 } else if l.num_bits != 0 {
1491 // There is some data in the bit buffer, so we need to write that first.
1492 Action::Jump(RawReadFirstByte)
1493 } else {
1494 // The bit buffer is empty, so memcpy the rest of the uncompressed data from
1495 // the block.
1496 Action::Jump(RawMemcpy1)
1497 }
1498 }
1499 }),
1500
1501 // Read the byte from the bit buffer.
1502 RawReadFirstByte => generate_state!(state, 'state_machine, {
1503 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1504 l.dist = bits as u32;
1505 Action::Jump(RawStoreFirstByte)
1506 })
1507 }),
1508
1509 // Write the byte we just read to the output buffer.
1510 RawStoreFirstByte => generate_state!(state, 'state_machine, {
1511 if out_buf.bytes_left() == 0 {
1512 Action::End(TINFLStatus::HasMoreOutput)
1513 } else {
1514 out_buf.write_byte(l.dist as u8);
1515 l.counter -= 1;
1516 if l.counter == 0 || l.num_bits == 0 {
1517 Action::Jump(RawMemcpy1)
1518 } else {
1519 // There is still some data left in the bit buffer that needs to be output.
1520 // TODO: Changed this to jump to `RawReadfirstbyte` rather than
1521 // `RawStoreFirstByte` as that seemed to be the correct path, but this
1522 // needs testing.
1523 Action::Jump(RawReadFirstByte)
1524 }
1525 }
1526 }),
1527
1528 RawMemcpy1 => generate_state!(state, 'state_machine, {
1529 if l.counter == 0 {
1530 Action::Jump(BlockDone)
1531 } else if out_buf.bytes_left() == 0 {
1532 Action::End(TINFLStatus::HasMoreOutput)
1533 } else {
1534 Action::Jump(RawMemcpy2)
1535 }
1536 }),
1537
1538 RawMemcpy2 => generate_state!(state, 'state_machine, {
1539 if in_iter.bytes_left() > 0 {
1540 // Copy as many raw bytes as possible from the input to the output using memcpy.
1541 // Raw block lengths are limited to 64 * 1024, so casting through usize and u32
1542 // is not an issue.
1543 let space_left = out_buf.bytes_left();
1544 let bytes_to_copy = cmp::min(cmp::min(
1545 space_left,
1546 in_iter.bytes_left()),
1547 l.counter as usize
1548 );
1549
1550 out_buf.write_slice(&in_iter.as_slice()[..bytes_to_copy]);
1551
1552 in_iter.advance(bytes_to_copy);
1553 l.counter -= bytes_to_copy as u32;
1554 Action::Jump(RawMemcpy1)
1555 } else {
1556 end_of_input(flags)
1557 }
1558 }),
1559
1560 // Read how many huffman codes/symbols are used for each table.
1561 ReadTableSizes => generate_state!(state, 'state_machine, {
1562 if l.counter < 3 {
1563 let num_bits = [5, 5, 4][l.counter as usize];
1564 read_bits(&mut l, num_bits, &mut in_iter, flags, |l, bits| {
1565 r.table_sizes[l.counter as usize] =
1566 bits as u16 + MIN_TABLE_SIZES[l.counter as usize];
1567 l.counter += 1;
1568 Action::None
1569 })
1570 } else {
1571 r.code_size_huffman.fill(0);
1572 l.counter = 0;
1573 // Check that the litlen and distance are within spec.
1574 // litlen table should be <=286 acc to the RFC and
1575 // additionally zlib rejects dist table sizes larger than 30.
1576 // NOTE this the final sizes after adding back predefined values, not
1577 // raw value in the data.
1578 // See miniz_oxide issue #130 and https://github.com/madler/zlib/issues/82.
1579 if r.table_sizes[LITLEN_TABLE] <= 286 && r.table_sizes[DIST_TABLE] <= 30 {
1580 Action::Jump(ReadHufflenTableCodeSize)
1581 }
1582 else {
1583 Action::Jump(BadDistOrLiteralTableLength)
1584 }
1585 }
1586 }),
1587
1588 // Read the 3-bit lengths of the huffman codes describing the huffman code lengths used
1589 // to decode the lengths of the main tables.
1590 ReadHufflenTableCodeSize => generate_state!(state, 'state_machine, {
1591 if l.counter < r.table_sizes[HUFFLEN_TABLE].into() {
1592 read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| {
1593 // These lengths are not stored in a normal ascending order, but rather one
1594 // specified by the deflate specification intended to put the most used
1595 // values at the front as trailing zero lengths do not have to be stored.
1596 r.code_size_huffman[HUFFMAN_LENGTH_ORDER[l.counter as usize] as usize] =
1597 bits as u8;
1598 l.counter += 1;
1599 Action::None
1600 })
1601 } else {
1602 r.table_sizes[HUFFLEN_TABLE] = MAX_HUFF_SYMBOLS_2 as u16;
1603 init_tree(r, &mut l).unwrap_or(Action::End(TINFLStatus::Failed))
1604 }
1605 }),
1606
1607 ReadLitlenDistTablesCodeSize => generate_state!(state, 'state_machine, {
1608 if l.counter < u32::from(r.table_sizes[LITLEN_TABLE]) + u32::from(r.table_sizes[DIST_TABLE]) {
1609 decode_huffman_code(
1610 r, &mut l, HUFFLEN_TABLE,
1611 flags, &mut in_iter, |r, l, symbol| {
1612 l.dist = symbol as u32;
1613 if l.dist < 16 {
1614 r.len_codes[l.counter as usize & LEN_CODES_MASK] = l.dist as u8;
1615 l.counter += 1;
1616 Action::None
1617 } else if l.dist == 16 && l.counter == 0 {
1618 Action::Jump(BadCodeSizeDistPrevLookup)
1619 } else {
1620 // Last value is a dummy to allow mask.
1621 l.num_extra = [2, 3, 7, 0][(l.dist as usize - 16) & 3];
1622 Action::Jump(ReadExtraBitsCodeSize)
1623 }
1624 }
1625 )
1626 } else if l.counter != u32::from(r.table_sizes[LITLEN_TABLE]) + u32::from(r.table_sizes[DIST_TABLE]) {
1627 Action::Jump(BadCodeSizeSum)
1628 } else {
1629
1630 r.code_size_literal[..r.table_sizes[LITLEN_TABLE] as usize]
1631 .copy_from_slice(&r.len_codes[..r.table_sizes[LITLEN_TABLE] as usize & LEN_CODES_MASK]);
1632
1633 let dist_table_start = r.table_sizes[LITLEN_TABLE] as usize;
1634 debug_assert!(dist_table_start < r.len_codes.len());
1635 let dist_table_end = (r.table_sizes[LITLEN_TABLE] +
1636 r.table_sizes[DIST_TABLE]) as usize;
1637 let code_size_dist_end = r.table_sizes[DIST_TABLE] as usize;
1638 debug_assert!(dist_table_end < r.len_codes.len());
1639 debug_assert!(code_size_dist_end < r.code_size_dist.len());
1640 let dist_table_start = dist_table_start & LEN_CODES_MASK;
1641 let dist_table_end = dist_table_end & LEN_CODES_MASK;
1642 r.code_size_dist[..code_size_dist_end & (MAX_HUFF_SYMBOLS_1 - 1)]
1643 .copy_from_slice(&r.len_codes[dist_table_start..dist_table_end]);
1644
1645 r.block_type -= 1;
1646 init_tree(r, &mut l).unwrap_or(Action::End(TINFLStatus::Failed))
1647 }
1648 }),
1649
1650 ReadExtraBitsCodeSize => generate_state!(state, 'state_machine, {
1651 let num_extra = l.num_extra.into();
1652 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, mut extra_bits| {
1653 // Mask to avoid a bounds check.
1654 // We can use 2 since the 2 first values are the same.
1655 extra_bits += [3, 3, 11][(l.dist as usize - 16) & 2];
1656 let val = if l.dist == 16 {
1657 debug_assert!(l.counter as usize - 1 < r.len_codes.len());
1658 r.len_codes[(l.counter as usize - 1) & LEN_CODES_MASK]
1659 } else {
1660 0
1661 };
1662
1663 let fill_start = l.counter as usize;
1664 let fill_end = l.counter as usize + extra_bits as usize;
1665 debug_assert!(fill_start < r.len_codes.len());
1666 debug_assert!(fill_end < r.len_codes.len());
1667
1668 r.len_codes[
1669 fill_start & LEN_CODES_MASK..fill_end & LEN_CODES_MASK
1670 ].fill(val);
1671 l.counter += extra_bits as u32;
1672 Action::Jump(ReadLitlenDistTablesCodeSize)
1673 })
1674 }),
1675
1676 DecodeLitlen => generate_state!(state, 'state_machine, {
1677 if in_iter.bytes_left() < 4 || out_buf.bytes_left() < 2 {
1678 // See if we can decode a literal with the data we have left.
1679 // Jumps to next state (WriteSymbol) if successful.
1680 decode_huffman_code(
1681 r,
1682 &mut l,
1683 LITLEN_TABLE,
1684 flags,
1685 &mut in_iter,
1686 |_r, l, symbol| {
1687 l.counter = symbol as u32;
1688 Action::Jump(WriteSymbol)
1689 },
1690 )
1691 } else if
1692 // If there is enough space, use the fast inner decompression
1693 // function.
1694 out_buf.bytes_left() >= 259 &&
1695 in_iter.bytes_left() >= 14
1696 {
1697 let (status, new_state) = decompress_fast(
1698 r,
1699 &mut in_iter,
1700 &mut out_buf,
1701 flags,
1702 &mut l,
1703 out_buf_size_mask,
1704 );
1705
1706 state = new_state;
1707 if status == TINFLStatus::Done {
1708 Action::Jump(new_state)
1709 } else {
1710 Action::End(status)
1711 }
1712 } else {
1713 fill_bit_buffer(&mut l, &mut in_iter);
1714
1715 let (symbol, code_len) = r.tables[LITLEN_TABLE].lookup(l.bit_buf);
1716
1717 l.counter = symbol as u32;
1718 l.bit_buf >>= code_len;
1719 l.num_bits -= code_len;
1720
1721 if (l.counter & 256) != 0 {
1722 // The symbol is not a literal.
1723 Action::Jump(HuffDecodeOuterLoop1)
1724 } else {
1725 // If we have a 32-bit buffer we need to read another two bytes now
1726 // to have enough bits to keep going.
1727 if cfg!(not(target_pointer_width = "64")) {
1728 fill_bit_buffer(&mut l, &mut in_iter);
1729 }
1730
1731 let (symbol, code_len) = r.tables[LITLEN_TABLE].lookup(l.bit_buf);
1732
1733 l.bit_buf >>= code_len;
1734 l.num_bits -= code_len;
1735 // The previous symbol was a literal, so write it directly and check
1736 // the next one.
1737 out_buf.write_byte(l.counter as u8);
1738 if (symbol & 256) != 0 {
1739 l.counter = symbol as u32;
1740 // The symbol is a length value.
1741 Action::Jump(HuffDecodeOuterLoop1)
1742 } else {
1743 // The symbol is a literal, so write it directly and continue.
1744 out_buf.write_byte(symbol as u8);
1745 Action::None
1746 }
1747
1748 }
1749
1750 }
1751 }),
1752
1753 WriteSymbol => generate_state!(state, 'state_machine, {
1754 if l.counter >= 256 {
1755 Action::Jump(HuffDecodeOuterLoop1)
1756 } else if out_buf.bytes_left() > 0 {
1757 out_buf.write_byte(l.counter as u8);
1758 Action::Jump(DecodeLitlen)
1759 } else {
1760 Action::End(TINFLStatus::HasMoreOutput)
1761 }
1762 }),
1763
1764 HuffDecodeOuterLoop1 => generate_state!(state, 'state_machine, {
1765 // Mask the top bits since they may contain length info.
1766 l.counter &= 511;
1767
1768 if l.counter
1769 == 256 {
1770 // We hit the end of block symbol.
1771 Action::Jump(BlockDone)
1772 } else if l.counter > 285 {
1773 // Invalid code.
1774 // We already verified earlier that the code is > 256.
1775 Action::Jump(InvalidLitlen)
1776 } else {
1777 // # Optimization
1778 // Mask the value to avoid bounds checks
1779 // We could use get_unchecked later if can statically verify that
1780 // this will never go out of bounds.
1781 l.num_extra =
1782 LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK];
1783 l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1784 // Length and distance codes have a number of extra bits depending on
1785 // the base, which together with the base gives us the exact value.
1786 if l.num_extra != 0 {
1787 Action::Jump(ReadExtraBitsLitlen)
1788 } else {
1789 Action::Jump(DecodeDistance)
1790 }
1791 }
1792 }),
1793
1794 ReadExtraBitsLitlen => generate_state!(state, 'state_machine, {
1795 let num_extra = l.num_extra.into();
1796 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| {
1797 l.counter += extra_bits as u32;
1798 Action::Jump(DecodeDistance)
1799 })
1800 }),
1801
1802 DecodeDistance => generate_state!(state, 'state_machine, {
1803 // Try to read a huffman code from the input buffer and look up what
1804 // length code the decoded symbol refers to.
1805 decode_huffman_code(r, &mut l, DIST_TABLE, flags, &mut in_iter, |_r, l, symbol| {
1806 // # Optimizaton - transform the value into usize here before the check so
1807 // the compiler can optimize the bounds check later - ideally it should
1808 // know that the value can't be negative from earlier in the
1809 // decode_huffman_code function but it seems it may not be able
1810 // to make the assumption that it can't be negative and thus
1811 // overflow if it's converted after the check.
1812 let symbol = symbol as usize;
1813 if symbol > 29 {
1814 // Invalid distance code.
1815 return Action::Jump(InvalidDist)
1816 }
1817 l.num_extra = num_extra_bits_for_distance_code(symbol as u8);
1818 l.dist = u32::from(DIST_BASE[symbol]);
1819 if l.num_extra != 0 {
1820 // ReadEXTRA_BITS_DISTACNE
1821 Action::Jump(ReadExtraBitsDistance)
1822 } else {
1823 Action::Jump(HuffDecodeOuterLoop2)
1824 }
1825 })
1826 }),
1827
1828 ReadExtraBitsDistance => generate_state!(state, 'state_machine, {
1829 let num_extra = l.num_extra.into();
1830 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| {
1831 l.dist += extra_bits as u32;
1832 Action::Jump(HuffDecodeOuterLoop2)
1833 })
1834 }),
1835
1836 HuffDecodeOuterLoop2 => generate_state!(state, 'state_machine, {
1837 if (l.dist as usize > out_buf.position() &&
1838 (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0)) || (l.dist as usize > out_buf.get_ref().len())
1839 {
1840 // We encountered a distance that refers a position before
1841 // the start of the decoded data, so we can't continue.
1842 Action::Jump(DistanceOutOfBounds)
1843 } else {
1844 let out_pos = out_buf.position();
1845 let source_pos = out_buf.position()
1846 .wrapping_sub(l.dist as usize) & out_buf_size_mask;
1847
1848 let out_len = out_buf.get_ref().len();
1849 let match_end_pos = out_buf.position() + l.counter as usize;
1850
1851 if match_end_pos > out_len ||
1852 // miniz doesn't do this check here. Not sure how it makes sure
1853 // that this case doesn't happen.
1854 (source_pos >= out_pos && (source_pos - out_pos) < l.counter as usize)
1855 {
1856 // Not enough space for all of the data in the output buffer,
1857 // so copy what we have space for.
1858 if l.counter == 0 {
1859 Action::Jump(DecodeLitlen)
1860 } else {
1861 Action::Jump(WriteLenBytesToEnd)
1862 }
1863 } else {
1864 apply_match(
1865 out_buf.get_mut(),
1866 out_pos,
1867 l.dist as usize,
1868 l.counter as usize,
1869 out_buf_size_mask
1870 );
1871 out_buf.set_position(out_pos + l.counter as usize);
1872 Action::Jump(DecodeLitlen)
1873 }
1874 }
1875 }),
1876
1877 WriteLenBytesToEnd => generate_state!(state, 'state_machine, {
1878 if out_buf.bytes_left() > 0 {
1879 let out_pos = out_buf.position();
1880 let source_pos = out_buf.position()
1881 .wrapping_sub(l.dist as usize) & out_buf_size_mask;
1882
1883
1884 let len = cmp::min(out_buf.bytes_left(), l.counter as usize);
1885
1886 transfer(out_buf.get_mut(), source_pos, out_pos, len, out_buf_size_mask);
1887
1888 out_buf.set_position(out_pos + len);
1889 l.counter -= len as u32;
1890 if l.counter == 0 {
1891 Action::Jump(DecodeLitlen)
1892 } else {
1893 Action::None
1894 }
1895 } else {
1896 Action::End(TINFLStatus::HasMoreOutput)
1897 }
1898 }),
1899
1900 BlockDone => generate_state!(state, 'state_machine, {
1901 // End once we've read the last block.
1902 if r.finish != 0 {
1903 pad_to_bytes(&mut l, &mut in_iter, flags, |_| Action::None);
1904
1905 let in_consumed = in_buf.len() - in_iter.bytes_left();
1906 let undo = undo_bytes(&mut l, in_consumed as u32) as usize;
1907 in_iter = InputWrapper::from_slice(in_buf[in_consumed - undo..].iter().as_slice());
1908
1909 l.bit_buf &= ((1 as BitBuffer) << l.num_bits) - 1;
1910 debug_assert_eq!(l.num_bits, 0);
1911
1912 if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 {
1913 l.counter = 0;
1914 Action::Jump(ReadAdler32)
1915 } else {
1916 Action::Jump(DoneForever)
1917 }
1918 } else {
1919 #[cfg(feature = "block-boundary")]
1920 if flags & TINFL_FLAG_STOP_ON_BLOCK_BOUNDARY != 0 {
1921 Action::End(TINFLStatus::BlockBoundary)
1922 } else {
1923 Action::Jump(ReadBlockHeader)
1924 }
1925 #[cfg(not(feature = "block-boundary"))]
1926 {
1927 Action::Jump(ReadBlockHeader)
1928 }
1929 }
1930 }),
1931
1932 ReadAdler32 => generate_state!(state, 'state_machine, {
1933 if l.counter < 4 {
1934 if l.num_bits != 0 {
1935 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1936 r.z_adler32 <<= 8;
1937 r.z_adler32 |= bits as u32;
1938 l.counter += 1;
1939 Action::None
1940 })
1941 } else {
1942 read_byte(&mut in_iter, flags, |byte| {
1943 r.z_adler32 <<= 8;
1944 r.z_adler32 |= u32::from(byte);
1945 l.counter += 1;
1946 Action::None
1947 })
1948 }
1949 } else {
1950 Action::Jump(DoneForever)
1951 }
1952 }),
1953
1954 // We are done.
1955 DoneForever => break TINFLStatus::Done,
1956
1957 // Anything else indicates failure.
1958 // BadZlibHeader | BadRawLength | BadDistOrLiteralTableLength | BlockTypeUnexpected |
1959 // DistanceOutOfBounds |
1960 // BadTotalSymbols | BadCodeSizeDistPrevLookup | BadCodeSizeSum | InvalidLitlen |
1961 // InvalidDist | InvalidCodeLen
1962 _ => break TINFLStatus::Failed,
1963 };
1964 };
1965
1966 let in_undo = if status != TINFLStatus::NeedsMoreInput
1967 && status != TINFLStatus::FailedCannotMakeProgress
1968 {
1969 undo_bytes(&mut l, (in_buf.len() - in_iter.bytes_left()) as u32) as usize
1970 } else {
1971 0
1972 };
1973
1974 // If we're returning after completing a block, prepare for the next block when called again.
1975 #[cfg(feature = "block-boundary")]
1976 if status == TINFLStatus::BlockBoundary {
1977 state = State::ReadBlockHeader;
1978 }
1979
1980 // Make sure HasMoreOutput overrides NeedsMoreInput if the output buffer is full.
1981 // (Unless the missing input is the adler32 value in which case we don't need to write anything.)
1982 // TODO: May want to see if we can do this in a better way.
1983 if status == TINFLStatus::NeedsMoreInput
1984 && out_buf.bytes_left() == 0
1985 && state != State::ReadAdler32
1986 {
1987 status = TINFLStatus::HasMoreOutput
1988 }
1989
1990 r.state = state;
1991 r.bit_buf = l.bit_buf;
1992 r.num_bits = l.num_bits;
1993 r.dist = l.dist;
1994 r.counter = l.counter;
1995 r.num_extra = l.num_extra;
1996
1997 r.bit_buf &= ((1 as BitBuffer) << r.num_bits) - 1;
1998
1999 // If this is a zlib stream, and update the adler32 checksum with the decompressed bytes if
2000 // requested.
2001 let need_adler = if (flags & TINFL_FLAG_IGNORE_ADLER32) == 0 {
2002 flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32) != 0
2003 } else {
2004 // If TINFL_FLAG_IGNORE_ADLER32 is enabled, ignore the checksum.
2005 false
2006 };
2007 if need_adler && status as i32 >= 0 {
2008 let out_buf_pos = out_buf.position();
2009 r.check_adler32 = update_adler32(r.check_adler32, &out_buf.get_ref()[out_pos..out_buf_pos]);
2010
2011 // disabled so that random input from fuzzer would not be rejected early,
2012 // before it has a chance to reach interesting parts of code
2013 if !cfg!(fuzzing) {
2014 // Once we are done, check if the checksum matches with the one provided in the zlib header.
2015 if status == TINFLStatus::Done
2016 && flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0
2017 && r.check_adler32 != r.z_adler32
2018 {
2019 status = TINFLStatus::Adler32Mismatch;
2020 }
2021 }
2022 }
2023
2024 (
2025 status,
2026 in_buf.len() - in_iter.bytes_left() - in_undo,
2027 out_buf.position() - out_pos,
2028 )
2029}
2030
2031#[cfg(test)]
2032mod test {
2033 use super::*;
2034
2035 //TODO: Fix these.
2036
2037 fn tinfl_decompress_oxide<'i>(
2038 r: &mut DecompressorOxide,
2039 input_buffer: &'i [u8],
2040 output_buffer: &mut [u8],
2041 flags: u32,
2042 ) -> (TINFLStatus, &'i [u8], usize) {
2043 let (status, in_pos, out_pos) = decompress(r, input_buffer, output_buffer, 0, flags);
2044 (status, &input_buffer[in_pos..], out_pos)
2045 }
2046
2047 #[test]
2048 fn decompress_zlib() {
2049 let encoded = [
2050 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19,
2051 ];
2052 let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER;
2053
2054 let mut b = DecompressorOxide::new();
2055 const LEN: usize = 32;
2056 let mut b_buf = [0; LEN];
2057
2058 // This should fail with the out buffer being to small.
2059 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags);
2060
2061 assert!(b_status.0 == TINFLStatus::Failed);
2062
2063 let flags = flags | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
2064
2065 b = DecompressorOxide::new();
2066
2067 // With TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF set this should no longer fail.
2068 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags);
2069
2070 assert_eq!(b_buf[..b_status.2], b"Hello, zlib!"[..]);
2071 assert!(b_status.0 == TINFLStatus::Done);
2072 }
2073
2074 #[cfg(feature = "with-alloc")]
2075 #[test]
2076 fn raw_block() {
2077 const LEN: usize = 64;
2078
2079 let text = b"Hello, zlib!";
2080 let encoded = {
2081 let len = text.len();
2082 let notlen = !len;
2083 let mut encoded = vec![
2084 1,
2085 len as u8,
2086 (len >> 8) as u8,
2087 notlen as u8,
2088 (notlen >> 8) as u8,
2089 ];
2090 encoded.extend_from_slice(&text[..]);
2091 encoded
2092 };
2093
2094 //let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER |
2095 let flags = TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
2096
2097 let mut b = DecompressorOxide::new();
2098
2099 let mut b_buf = [0; LEN];
2100
2101 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags);
2102 assert_eq!(b_buf[..b_status.2], text[..]);
2103 assert_eq!(b_status.0, TINFLStatus::Done);
2104 }
2105
2106 fn masked_lookup(table: &HuffmanTable, bit_buf: BitBuffer) -> (i32, u32) {
2107 let ret = table.lookup(bit_buf);
2108 (ret.0 & 511, ret.1)
2109 }
2110
2111 #[test]
2112 fn fixed_table_lookup() {
2113 let mut d = DecompressorOxide::new();
2114 d.block_type = 1;
2115 start_static_table(&mut d);
2116 let mut l = LocalVars {
2117 bit_buf: d.bit_buf,
2118 num_bits: d.num_bits,
2119 dist: d.dist,
2120 counter: d.counter,
2121 num_extra: d.num_extra,
2122 };
2123 init_tree(&mut d, &mut l).unwrap();
2124 let llt = &d.tables[LITLEN_TABLE];
2125 let dt = &d.tables[DIST_TABLE];
2126 assert_eq!(masked_lookup(llt, 0b00001100), (0, 8));
2127 assert_eq!(masked_lookup(llt, 0b00011110), (72, 8));
2128 assert_eq!(masked_lookup(llt, 0b01011110), (74, 8));
2129 assert_eq!(masked_lookup(llt, 0b11111101), (143, 8));
2130 assert_eq!(masked_lookup(llt, 0b000010011), (144, 9));
2131 assert_eq!(masked_lookup(llt, 0b111111111), (255, 9));
2132 assert_eq!(masked_lookup(llt, 0b00000000), (256, 7));
2133 assert_eq!(masked_lookup(llt, 0b1110100), (279, 7));
2134 assert_eq!(masked_lookup(llt, 0b00000011), (280, 8));
2135 assert_eq!(masked_lookup(llt, 0b11100011), (287, 8));
2136
2137 assert_eq!(masked_lookup(dt, 0), (0, 5));
2138 assert_eq!(masked_lookup(dt, 20), (5, 5));
2139 }
2140
2141 // Only run this test with alloc enabled as it uses a larger buffer.
2142 #[cfg(feature = "with-alloc")]
2143 fn check_result(input: &[u8], expected_status: TINFLStatus, expected_state: State, zlib: bool) {
2144 let mut r = DecompressorOxide::default();
2145 let mut output_buf = vec![0; 1024 * 32];
2146 let flags = if zlib {
2147 inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
2148 } else {
2149 0
2150 } | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF
2151 | TINFL_FLAG_HAS_MORE_INPUT;
2152 let (d_status, _in_bytes, _out_bytes) =
2153 decompress(&mut r, input, &mut output_buf, 0, flags);
2154 assert_eq!(expected_status, d_status);
2155 assert_eq!(expected_state, r.state);
2156 }
2157
2158 #[cfg(feature = "with-alloc")]
2159 #[test]
2160 fn bogus_input() {
2161 use self::check_result as cr;
2162 const F: TINFLStatus = TINFLStatus::Failed;
2163 const OK: TINFLStatus = TINFLStatus::Done;
2164 // Bad CM.
2165 cr(&[0x77, 0x85], F, State::BadZlibHeader, true);
2166 // Bad window size (but check is correct).
2167 cr(&[0x88, 0x98], F, State::BadZlibHeader, true);
2168 // Bad check bits.
2169 cr(&[0x78, 0x98], F, State::BadZlibHeader, true);
2170
2171 // Too many code lengths. (From inflate library issues)
2172 cr(
2173 b"M\xff\xffM*\xad\xad\xad\xad\xad\xad\xad\xcd\xcd\xcdM",
2174 F,
2175 State::BadDistOrLiteralTableLength,
2176 false,
2177 );
2178
2179 // Bad CLEN (also from inflate library issues)
2180 cr(
2181 b"\xdd\xff\xff*M\x94ffffffffff",
2182 F,
2183 State::BadDistOrLiteralTableLength,
2184 false,
2185 );
2186
2187 // Port of inflate coverage tests from zlib-ng
2188 // https://github.com/Dead2/zlib-ng/blob/develop/test/infcover.c
2189 let c = |a, b, c| cr(a, b, c, false);
2190
2191 // Invalid uncompressed/raw block length.
2192 c(&[0, 0, 0, 0, 0], F, State::BadRawLength);
2193 // Ok empty uncompressed block.
2194 c(&[3, 0], OK, State::DoneForever);
2195 // Invalid block type.
2196 c(&[6], F, State::BlockTypeUnexpected);
2197 // Ok uncompressed block.
2198 c(&[1, 1, 0, 0xfe, 0xff, 0], OK, State::DoneForever);
2199 // Too many litlens, we handle this later than zlib, so this test won't
2200 // give the same result.
2201 // c(&[0xfc, 0, 0], F, State::BadTotalSymbols);
2202 // Invalid set of code lengths - TODO Check if this is the correct error for this.
2203 c(&[4, 0, 0xfe, 0xff], F, State::BadTotalSymbols);
2204 // Invalid repeat in list of code lengths.
2205 // (Try to repeat a non-existent code.)
2206 c(&[4, 0, 0x24, 0x49, 0], F, State::BadCodeSizeDistPrevLookup);
2207 // Missing end of block code (should we have a separate error for this?) - fails on further input
2208 // c(&[4, 0, 0x24, 0xe9, 0xff, 0x6d], F, State::BadTotalSymbols);
2209 // Invalid set of literals/lengths
2210 c(
2211 &[
2212 4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x71, 0xff, 0xff, 0x93, 0x11, 0,
2213 ],
2214 F,
2215 State::BadTotalSymbols,
2216 );
2217 // Invalid set of distances _ needsmoreinput
2218 // c(&[4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x0f, 0xb4, 0xff, 0xff, 0xc3, 0x84], F, State::BadTotalSymbols);
2219 // Invalid distance code
2220 c(&[2, 0x7e, 0xff, 0xff], F, State::InvalidDist);
2221
2222 // Distance refers to position before the start
2223 c(
2224 &[0x0c, 0xc0, 0x81, 0, 0, 0, 0, 0, 0x90, 0xff, 0x6b, 0x4, 0],
2225 F,
2226 State::DistanceOutOfBounds,
2227 );
2228
2229 // Trailer
2230 // Bad gzip trailer checksum GZip header not handled by miniz_oxide
2231 //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0x01], F, State::BadCRC, false)
2232 // Bad gzip trailer length
2233 //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0x01], F, State::BadCRC, false)
2234 }
2235
2236 #[test]
2237 fn empty_output_buffer_non_wrapping() {
2238 let encoded = [
2239 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19,
2240 ];
2241 let flags = TINFL_FLAG_COMPUTE_ADLER32
2242 | TINFL_FLAG_PARSE_ZLIB_HEADER
2243 | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
2244 let mut r = DecompressorOxide::new();
2245 let mut output_buf: [u8; 0] = [];
2246 // Check that we handle an empty buffer properly and not panicking.
2247 // https://github.com/Frommi/miniz_oxide/issues/23
2248 let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags);
2249 assert!(res == (TINFLStatus::HasMoreOutput, 4, 0));
2250 }
2251
2252 #[test]
2253 fn empty_output_buffer_wrapping() {
2254 let encoded = [
2255 0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00,
2256 ];
2257 let flags = TINFL_FLAG_COMPUTE_ADLER32;
2258 let mut r = DecompressorOxide::new();
2259 let mut output_buf: [u8; 0] = [];
2260 // Check that we handle an empty buffer properly and not panicking.
2261 // https://github.com/Frommi/miniz_oxide/issues/23
2262 let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags);
2263 assert!(res == (TINFLStatus::HasMoreOutput, 2, 0));
2264 }
2265
2266 #[test]
2267 fn dist_extra_bits() {
2268 use self::num_extra_bits_for_distance_code;
2269 // Number of extra bits for each distance code.
2270 const DIST_EXTRA: [u8; 29] = [
2271 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12,
2272 12, 13,
2273 ];
2274
2275 for (i, &dist) in DIST_EXTRA.iter().enumerate() {
2276 assert_eq!(dist, num_extra_bits_for_distance_code(i as u8));
2277 }
2278 }
2279
2280 #[test]
2281 fn check_tree() {
2282 let mut r = DecompressorOxide::new();
2283 let mut l = LocalVars {
2284 bit_buf: 0,
2285 num_bits: 0,
2286 dist: 0,
2287 counter: 0,
2288 num_extra: 0,
2289 };
2290
2291 r.code_size_huffman[0] = 1;
2292 r.code_size_huffman[1] = 1;
2293 //r.code_size_huffman[2] = 3;
2294 //r.code_size_huffman[3] = 3;
2295 //r.code_size_huffman[1] = 4;
2296 r.block_type = HUFFLEN_TABLE as u8;
2297 r.table_sizes[HUFFLEN_TABLE] = 4;
2298 let res = init_tree(&mut r, &mut l).unwrap();
2299
2300 let status = match res {
2301 Action::Jump(s) => s,
2302 _ => {
2303 //println!("issue");
2304 return;
2305 }
2306 };
2307 //println!("status {:?}", status);
2308 assert!(status != BadTotalSymbols);
2309 }
2310
2311 #[test]
2312 fn reverse_bits_lookup() {
2313 use super::reverse_bits;
2314 for i in 0..512 {
2315 assert_eq!(reverse_bits(i), i.reverse_bits());
2316 }
2317 }
2318}