]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - lib/zlib_deflate/defutil.h
Merge branch 'pcmcia-next' of git://git.kernel.org/pub/scm/linux/kernel/git/brodo...
[mirror_ubuntu-hirsute-kernel.git] / lib / zlib_deflate / defutil.h
1 #ifndef DEFUTIL_H
2 #define DEFUTIL_H
3
4 #include <linux/zutil.h>
5
6 #define Assert(err, str)
7 #define Trace(dummy)
8 #define Tracev(dummy)
9 #define Tracecv(err, dummy)
10 #define Tracevv(dummy)
11
12
13
14 #define LENGTH_CODES 29
15 /* number of length codes, not counting the special END_BLOCK code */
16
17 #define LITERALS 256
18 /* number of literal bytes 0..255 */
19
20 #define L_CODES (LITERALS+1+LENGTH_CODES)
21 /* number of Literal or Length codes, including the END_BLOCK code */
22
23 #define D_CODES 30
24 /* number of distance codes */
25
26 #define BL_CODES 19
27 /* number of codes used to transfer the bit lengths */
28
29 #define HEAP_SIZE (2*L_CODES+1)
30 /* maximum heap size */
31
32 #define MAX_BITS 15
33 /* All codes must not exceed MAX_BITS bits */
34
35 #define INIT_STATE 42
36 #define BUSY_STATE 113
37 #define FINISH_STATE 666
38 /* Stream status */
39
40
41 /* Data structure describing a single value and its code string. */
42 typedef struct ct_data_s {
43 union {
44 ush freq; /* frequency count */
45 ush code; /* bit string */
46 } fc;
47 union {
48 ush dad; /* father node in Huffman tree */
49 ush len; /* length of bit string */
50 } dl;
51 } ct_data;
52
53 #define Freq fc.freq
54 #define Code fc.code
55 #define Dad dl.dad
56 #define Len dl.len
57
58 typedef struct static_tree_desc_s static_tree_desc;
59
60 typedef struct tree_desc_s {
61 ct_data *dyn_tree; /* the dynamic tree */
62 int max_code; /* largest code with non zero frequency */
63 static_tree_desc *stat_desc; /* the corresponding static tree */
64 } tree_desc;
65
66 typedef ush Pos;
67 typedef unsigned IPos;
68
69 /* A Pos is an index in the character window. We use short instead of int to
70 * save space in the various tables. IPos is used only for parameter passing.
71 */
72
73 typedef struct deflate_state {
74 z_streamp strm; /* pointer back to this zlib stream */
75 int status; /* as the name implies */
76 Byte *pending_buf; /* output still pending */
77 ulg pending_buf_size; /* size of pending_buf */
78 Byte *pending_out; /* next pending byte to output to the stream */
79 int pending; /* nb of bytes in the pending buffer */
80 int noheader; /* suppress zlib header and adler32 */
81 Byte data_type; /* UNKNOWN, BINARY or ASCII */
82 Byte method; /* STORED (for zip only) or DEFLATED */
83 int last_flush; /* value of flush param for previous deflate call */
84
85 /* used by deflate.c: */
86
87 uInt w_size; /* LZ77 window size (32K by default) */
88 uInt w_bits; /* log2(w_size) (8..16) */
89 uInt w_mask; /* w_size - 1 */
90
91 Byte *window;
92 /* Sliding window. Input bytes are read into the second half of the window,
93 * and move to the first half later to keep a dictionary of at least wSize
94 * bytes. With this organization, matches are limited to a distance of
95 * wSize-MAX_MATCH bytes, but this ensures that IO is always
96 * performed with a length multiple of the block size. Also, it limits
97 * the window size to 64K, which is quite useful on MSDOS.
98 * To do: use the user input buffer as sliding window.
99 */
100
101 ulg window_size;
102 /* Actual size of window: 2*wSize, except when the user input buffer
103 * is directly used as sliding window.
104 */
105
106 Pos *prev;
107 /* Link to older string with same hash index. To limit the size of this
108 * array to 64K, this link is maintained only for the last 32K strings.
109 * An index in this array is thus a window index modulo 32K.
110 */
111
112 Pos *head; /* Heads of the hash chains or NIL. */
113
114 uInt ins_h; /* hash index of string to be inserted */
115 uInt hash_size; /* number of elements in hash table */
116 uInt hash_bits; /* log2(hash_size) */
117 uInt hash_mask; /* hash_size-1 */
118
119 uInt hash_shift;
120 /* Number of bits by which ins_h must be shifted at each input
121 * step. It must be such that after MIN_MATCH steps, the oldest
122 * byte no longer takes part in the hash key, that is:
123 * hash_shift * MIN_MATCH >= hash_bits
124 */
125
126 long block_start;
127 /* Window position at the beginning of the current output block. Gets
128 * negative when the window is moved backwards.
129 */
130
131 uInt match_length; /* length of best match */
132 IPos prev_match; /* previous match */
133 int match_available; /* set if previous match exists */
134 uInt strstart; /* start of string to insert */
135 uInt match_start; /* start of matching string */
136 uInt lookahead; /* number of valid bytes ahead in window */
137
138 uInt prev_length;
139 /* Length of the best match at previous step. Matches not greater than this
140 * are discarded. This is used in the lazy match evaluation.
141 */
142
143 uInt max_chain_length;
144 /* To speed up deflation, hash chains are never searched beyond this
145 * length. A higher limit improves compression ratio but degrades the
146 * speed.
147 */
148
149 uInt max_lazy_match;
150 /* Attempt to find a better match only when the current match is strictly
151 * smaller than this value. This mechanism is used only for compression
152 * levels >= 4.
153 */
154 # define max_insert_length max_lazy_match
155 /* Insert new strings in the hash table only if the match length is not
156 * greater than this length. This saves time but degrades compression.
157 * max_insert_length is used only for compression levels <= 3.
158 */
159
160 int level; /* compression level (1..9) */
161 int strategy; /* favor or force Huffman coding*/
162
163 uInt good_match;
164 /* Use a faster search when the previous match is longer than this */
165
166 int nice_match; /* Stop searching when current match exceeds this */
167
168 /* used by trees.c: */
169 /* Didn't use ct_data typedef below to suppress compiler warning */
170 struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
171 struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
172 struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
173
174 struct tree_desc_s l_desc; /* desc. for literal tree */
175 struct tree_desc_s d_desc; /* desc. for distance tree */
176 struct tree_desc_s bl_desc; /* desc. for bit length tree */
177
178 ush bl_count[MAX_BITS+1];
179 /* number of codes at each bit length for an optimal tree */
180
181 int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
182 int heap_len; /* number of elements in the heap */
183 int heap_max; /* element of largest frequency */
184 /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
185 * The same heap array is used to build all trees.
186 */
187
188 uch depth[2*L_CODES+1];
189 /* Depth of each subtree used as tie breaker for trees of equal frequency
190 */
191
192 uch *l_buf; /* buffer for literals or lengths */
193
194 uInt lit_bufsize;
195 /* Size of match buffer for literals/lengths. There are 4 reasons for
196 * limiting lit_bufsize to 64K:
197 * - frequencies can be kept in 16 bit counters
198 * - if compression is not successful for the first block, all input
199 * data is still in the window so we can still emit a stored block even
200 * when input comes from standard input. (This can also be done for
201 * all blocks if lit_bufsize is not greater than 32K.)
202 * - if compression is not successful for a file smaller than 64K, we can
203 * even emit a stored file instead of a stored block (saving 5 bytes).
204 * This is applicable only for zip (not gzip or zlib).
205 * - creating new Huffman trees less frequently may not provide fast
206 * adaptation to changes in the input data statistics. (Take for
207 * example a binary file with poorly compressible code followed by
208 * a highly compressible string table.) Smaller buffer sizes give
209 * fast adaptation but have of course the overhead of transmitting
210 * trees more frequently.
211 * - I can't count above 4
212 */
213
214 uInt last_lit; /* running index in l_buf */
215
216 ush *d_buf;
217 /* Buffer for distances. To simplify the code, d_buf and l_buf have
218 * the same number of elements. To use different lengths, an extra flag
219 * array would be necessary.
220 */
221
222 ulg opt_len; /* bit length of current block with optimal trees */
223 ulg static_len; /* bit length of current block with static trees */
224 ulg compressed_len; /* total bit length of compressed file */
225 uInt matches; /* number of string matches in current block */
226 int last_eob_len; /* bit length of EOB code for last block */
227
228 #ifdef DEBUG_ZLIB
229 ulg bits_sent; /* bit length of the compressed data */
230 #endif
231
232 ush bi_buf;
233 /* Output buffer. bits are inserted starting at the bottom (least
234 * significant bits).
235 */
236 int bi_valid;
237 /* Number of valid bits in bi_buf. All bits above the last valid bit
238 * are always zero.
239 */
240
241 } deflate_state;
242
243 #ifdef CONFIG_ZLIB_DFLTCC
244 #define zlib_deflate_window_memsize(windowBits) \
245 (2 * (1 << (windowBits)) * sizeof(Byte) + PAGE_SIZE)
246 #else
247 #define zlib_deflate_window_memsize(windowBits) \
248 (2 * (1 << (windowBits)) * sizeof(Byte))
249 #endif
250 #define zlib_deflate_prev_memsize(windowBits) \
251 ((1 << (windowBits)) * sizeof(Pos))
252 #define zlib_deflate_head_memsize(memLevel) \
253 ((1 << ((memLevel)+7)) * sizeof(Pos))
254 #define zlib_deflate_overlay_memsize(memLevel) \
255 ((1 << ((memLevel)+6)) * (sizeof(ush)+2))
256
257 /* Output a byte on the stream.
258 * IN assertion: there is enough room in pending_buf.
259 */
260 #define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
261
262
263 #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
264 /* Minimum amount of lookahead, except at the end of the input file.
265 * See deflate.c for comments about the MIN_MATCH+1.
266 */
267
268 #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
269 /* In order to simplify the code, particularly on 16 bit machines, match
270 * distances are limited to MAX_DIST instead of WSIZE.
271 */
272
273 /* in trees.c */
274 void zlib_tr_init (deflate_state *s);
275 int zlib_tr_tally (deflate_state *s, unsigned dist, unsigned lc);
276 ulg zlib_tr_flush_block (deflate_state *s, char *buf, ulg stored_len,
277 int eof);
278 void zlib_tr_align (deflate_state *s);
279 void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len,
280 int eof);
281 void zlib_tr_stored_type_only (deflate_state *);
282
283
284 /* ===========================================================================
285 * Output a short LSB first on the stream.
286 * IN assertion: there is enough room in pendingBuf.
287 */
288 #define put_short(s, w) { \
289 put_byte(s, (uch)((w) & 0xff)); \
290 put_byte(s, (uch)((ush)(w) >> 8)); \
291 }
292
293 /* ===========================================================================
294 * Reverse the first len bits of a code, using straightforward code (a faster
295 * method would use a table)
296 * IN assertion: 1 <= len <= 15
297 */
298 static inline unsigned bi_reverse(
299 unsigned code, /* the value to invert */
300 int len /* its bit length */
301 )
302 {
303 register unsigned res = 0;
304 do {
305 res |= code & 1;
306 code >>= 1, res <<= 1;
307 } while (--len > 0);
308 return res >> 1;
309 }
310
311 /* ===========================================================================
312 * Flush the bit buffer, keeping at most 7 bits in it.
313 */
314 static inline void bi_flush(deflate_state *s)
315 {
316 if (s->bi_valid == 16) {
317 put_short(s, s->bi_buf);
318 s->bi_buf = 0;
319 s->bi_valid = 0;
320 } else if (s->bi_valid >= 8) {
321 put_byte(s, (Byte)s->bi_buf);
322 s->bi_buf >>= 8;
323 s->bi_valid -= 8;
324 }
325 }
326
327 /* ===========================================================================
328 * Flush the bit buffer and align the output on a byte boundary
329 */
330 static inline void bi_windup(deflate_state *s)
331 {
332 if (s->bi_valid > 8) {
333 put_short(s, s->bi_buf);
334 } else if (s->bi_valid > 0) {
335 put_byte(s, (Byte)s->bi_buf);
336 }
337 s->bi_buf = 0;
338 s->bi_valid = 0;
339 #ifdef DEBUG_ZLIB
340 s->bits_sent = (s->bits_sent+7) & ~7;
341 #endif
342 }
343
344 typedef enum {
345 need_more, /* block not completed, need more input or more output */
346 block_done, /* block flush performed */
347 finish_started, /* finish started, need only more output at next deflate */
348 finish_done /* finish done, accept no more input or output */
349 } block_state;
350
351 #define Buf_size (8 * 2*sizeof(char))
352 /* Number of bits used within bi_buf. (bi_buf might be implemented on
353 * more than 16 bits on some systems.)
354 */
355
356 /* ===========================================================================
357 * Send a value on a given number of bits.
358 * IN assertion: length <= 16 and value fits in length bits.
359 */
360 #ifdef DEBUG_ZLIB
361 static void send_bits (deflate_state *s, int value, int length);
362
363 static void send_bits(
364 deflate_state *s,
365 int value, /* value to send */
366 int length /* number of bits */
367 )
368 {
369 Tracevv((stderr," l %2d v %4x ", length, value));
370 Assert(length > 0 && length <= 15, "invalid length");
371 s->bits_sent += (ulg)length;
372
373 /* If not enough room in bi_buf, use (valid) bits from bi_buf and
374 * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
375 * unused bits in value.
376 */
377 if (s->bi_valid > (int)Buf_size - length) {
378 s->bi_buf |= (value << s->bi_valid);
379 put_short(s, s->bi_buf);
380 s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
381 s->bi_valid += length - Buf_size;
382 } else {
383 s->bi_buf |= value << s->bi_valid;
384 s->bi_valid += length;
385 }
386 }
387 #else /* !DEBUG_ZLIB */
388
389 #define send_bits(s, value, length) \
390 { int len = length;\
391 if (s->bi_valid > (int)Buf_size - len) {\
392 int val = value;\
393 s->bi_buf |= (val << s->bi_valid);\
394 put_short(s, s->bi_buf);\
395 s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
396 s->bi_valid += len - Buf_size;\
397 } else {\
398 s->bi_buf |= (value) << s->bi_valid;\
399 s->bi_valid += len;\
400 }\
401 }
402 #endif /* DEBUG_ZLIB */
403
404 static inline void zlib_tr_send_bits(
405 deflate_state *s,
406 int value,
407 int length
408 )
409 {
410 send_bits(s, value, length);
411 }
412
413 /* =========================================================================
414 * Flush as much pending output as possible. All deflate() output goes
415 * through this function so some applications may wish to modify it
416 * to avoid allocating a large strm->next_out buffer and copying into it.
417 * (See also read_buf()).
418 */
419 static inline void flush_pending(
420 z_streamp strm
421 )
422 {
423 deflate_state *s = (deflate_state *) strm->state;
424 unsigned len = s->pending;
425
426 if (len > strm->avail_out) len = strm->avail_out;
427 if (len == 0) return;
428
429 if (strm->next_out != NULL) {
430 memcpy(strm->next_out, s->pending_out, len);
431 strm->next_out += len;
432 }
433 s->pending_out += len;
434 strm->total_out += len;
435 strm->avail_out -= len;
436 s->pending -= len;
437 if (s->pending == 0) {
438 s->pending_out = s->pending_buf;
439 }
440 }
441 #endif /* DEFUTIL_H */