FreeBSD/Linux Kernel Cross Reference
sys/kern/inflate.c
1 /*
2 * Most parts of this file are not covered by:
3 * ----------------------------------------------------------------------------
4 * "THE BEER-WARE LICENSE" (Revision 42):
5 * <phk@login.dknet.dk> wrote this file. As long as you retain this notice you
6 * can do whatever you want with this stuff. If we meet some day, and you think
7 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
8 * ----------------------------------------------------------------------------
9 *
10 * $FreeBSD: src/sys/kern/inflate.c,v 1.6.4.1 1999/09/05 08:14:47 peter Exp $
11 *
12 *
13 */
14
15 #include <sys/param.h>
16 #include <sys/inflate.h>
17 #ifdef KERNEL
18 #include <sys/systm.h>
19 #endif
20 #include <sys/mman.h>
21 #include <sys/malloc.h>
22
23 /* needed to make inflate() work */
24 #define uch u_char
25 #define ush u_short
26 #define ulg u_long
27
28 /* Stuff to make inflate() work */
29 #ifdef KERNEL
30 #define memzero(dest,len) bzero(dest,len)
31 #endif
32 #define NOMEMCPY
33 #ifdef KERNEL
34 #define FPRINTF printf
35 #else
36 extern void putstr (char *);
37 #define FPRINTF putstr
38 #endif
39
40 #define FLUSH(x,y) { \
41 int foo = (*x->gz_output)(x->gz_private,x->gz_slide,y); \
42 if (foo) \
43 return foo; \
44 }
45
46 static const int qflag = 0;
47
48 #ifndef KERNEL /* want to use this file in kzip also */
49 extern unsigned char *malloc (int, int, int);
50 extern void free (void*, int);
51 #endif
52
53 /*
54 * This came from unzip-5.12. I have changed it the flow to pass
55 * a structure pointer around, thus hopefully making it re-entrant.
56 * Poul-Henning
57 */
58
59 /* inflate.c -- put in the public domain by Mark Adler
60 version c14o, 23 August 1994 */
61
62 /* You can do whatever you like with this source file, though I would
63 prefer that if you modify it and redistribute it that you include
64 comments to that effect with your name and the date. Thank you.
65
66 History:
67 vers date who what
68 ---- --------- -------------- ------------------------------------
69 a ~~ Feb 92 M. Adler used full (large, one-step) lookup table
70 b1 21 Mar 92 M. Adler first version with partial lookup tables
71 b2 21 Mar 92 M. Adler fixed bug in fixed-code blocks
72 b3 22 Mar 92 M. Adler sped up match copies, cleaned up some
73 b4 25 Mar 92 M. Adler added prototypes; removed window[] (now
74 is the responsibility of unzip.h--also
75 changed name to slide[]), so needs diffs
76 for unzip.c and unzip.h (this allows
77 compiling in the small model on MSDOS);
78 fixed cast of q in huft_build();
79 b5 26 Mar 92 M. Adler got rid of unintended macro recursion.
80 b6 27 Mar 92 M. Adler got rid of nextbyte() routine. fixed
81 bug in inflate_fixed().
82 c1 30 Mar 92 M. Adler removed lbits, dbits environment variables.
83 changed BMAX to 16 for explode. Removed
84 OUTB usage, and replaced it with flush()--
85 this was a 20% speed improvement! Added
86 an explode.c (to replace unimplod.c) that
87 uses the huft routines here. Removed
88 register union.
89 c2 4 Apr 92 M. Adler fixed bug for file sizes a multiple of 32k.
90 c3 10 Apr 92 M. Adler reduced memory of code tables made by
91 huft_build significantly (factor of two to
92 three).
93 c4 15 Apr 92 M. Adler added NOMEMCPY do kill use of memcpy().
94 worked around a Turbo C optimization bug.
95 c5 21 Apr 92 M. Adler added the GZ_WSIZE #define to allow reducing
96 the 32K window size for specialized
97 applications.
98 c6 31 May 92 M. Adler added some typecasts to eliminate warnings
99 c7 27 Jun 92 G. Roelofs added some more typecasts (444: MSC bug).
100 c8 5 Oct 92 J-l. Gailly added ifdef'd code to deal with PKZIP bug.
101 c9 9 Oct 92 M. Adler removed a memory error message (~line 416).
102 c10 17 Oct 92 G. Roelofs changed ULONG/UWORD/byte to ulg/ush/uch,
103 removed old inflate, renamed inflate_entry
104 to inflate, added Mark's fix to a comment.
105 c10.5 14 Dec 92 M. Adler fix up error messages for incomplete trees.
106 c11 2 Jan 93 M. Adler fixed bug in detection of incomplete
107 tables, and removed assumption that EOB is
108 the longest code (bad assumption).
109 c12 3 Jan 93 M. Adler make tables for fixed blocks only once.
110 c13 5 Jan 93 M. Adler allow all zero length codes (pkzip 2.04c
111 outputs one zero length code for an empty
112 distance tree).
113 c14 12 Mar 93 M. Adler made inflate.c standalone with the
114 introduction of inflate.h.
115 c14b 16 Jul 93 G. Roelofs added (unsigned) typecast to w at 470.
116 c14c 19 Jul 93 J. Bush changed v[N_MAX], l[288], ll[28x+3x] arrays
117 to static for Amiga.
118 c14d 13 Aug 93 J-l. Gailly de-complicatified Mark's c[*p++]++ thing.
119 c14e 8 Oct 93 G. Roelofs changed memset() to memzero().
120 c14f 22 Oct 93 G. Roelofs renamed quietflg to qflag; made Trace()
121 conditional; added inflate_free().
122 c14g 28 Oct 93 G. Roelofs changed l/(lx+1) macro to pointer (Cray bug)
123 c14h 7 Dec 93 C. Ghisler huft_build() optimizations.
124 c14i 9 Jan 94 A. Verheijen set fixed_t{d,l} to NULL after freeing;
125 G. Roelofs check NEXTBYTE macro for GZ_EOF.
126 c14j 23 Jan 94 G. Roelofs removed Ghisler "optimizations"; ifdef'd
127 GZ_EOF check.
128 c14k 27 Feb 94 G. Roelofs added some typecasts to avoid warnings.
129 c14l 9 Apr 94 G. Roelofs fixed split comments on preprocessor lines
130 to avoid bug in Encore compiler.
131 c14m 7 Jul 94 P. Kienitz modified to allow assembler version of
132 inflate_codes() (define ASM_INFLATECODES)
133 c14n 22 Jul 94 G. Roelofs changed fprintf to FPRINTF for DLL versions
134 c14o 23 Aug 94 C. Spieler added a newline to a debug statement;
135 G. Roelofs added another typecast to avoid MSC warning
136 */
137
138
139 /*
140 Inflate deflated (PKZIP's method 8 compressed) data. The compression
141 method searches for as much of the current string of bytes (up to a
142 length of 258) in the previous 32K bytes. If it doesn't find any
143 matches (of at least length 3), it codes the next byte. Otherwise, it
144 codes the length of the matched string and its distance backwards from
145 the current position. There is a single Huffman code that codes both
146 single bytes (called "literals") and match lengths. A second Huffman
147 code codes the distance information, which follows a length code. Each
148 length or distance code actually represents a base value and a number
149 of "extra" (sometimes zero) bits to get to add to the base value. At
150 the end of each deflated block is a special end-of-block (EOB) literal/
151 length code. The decoding process is basically: get a literal/length
152 code; if EOB then done; if a literal, emit the decoded byte; if a
153 length then get the distance and emit the referred-to bytes from the
154 sliding window of previously emitted data.
155
156 There are (currently) three kinds of inflate blocks: stored, fixed, and
157 dynamic. The compressor outputs a chunk of data at a time and decides
158 which method to use on a chunk-by-chunk basis. A chunk might typically
159 be 32K to 64K, uncompressed. If the chunk is uncompressible, then the
160 "stored" method is used. In this case, the bytes are simply stored as
161 is, eight bits per byte, with none of the above coding. The bytes are
162 preceded by a count, since there is no longer an EOB code.
163
164 If the data is compressible, then either the fixed or dynamic methods
165 are used. In the dynamic method, the compressed data is preceded by
166 an encoding of the literal/length and distance Huffman codes that are
167 to be used to decode this block. The representation is itself Huffman
168 coded, and so is preceded by a description of that code. These code
169 descriptions take up a little space, and so for small blocks, there is
170 a predefined set of codes, called the fixed codes. The fixed method is
171 used if the block ends up smaller that way (usually for quite small
172 chunks); otherwise the dynamic method is used. In the latter case, the
173 codes are customized to the probabilities in the current block and so
174 can code it much better than the pre-determined fixed codes can.
175
176 The Huffman codes themselves are decoded using a mutli-level table
177 lookup, in order to maximize the speed of decoding plus the speed of
178 building the decoding tables. See the comments below that precede the
179 lbits and dbits tuning parameters.
180 */
181
182
183 /*
184 Notes beyond the 1.93a appnote.txt:
185
186 1. Distance pointers never point before the beginning of the output
187 stream.
188 2. Distance pointers can point back across blocks, up to 32k away.
189 3. There is an implied maximum of 7 bits for the bit length table and
190 15 bits for the actual data.
191 4. If only one code exists, then it is encoded using one bit. (Zero
192 would be more efficient, but perhaps a little confusing.) If two
193 codes exist, they are coded using one bit each (0 and 1).
194 5. There is no way of sending zero distance codes--a dummy must be
195 sent if there are none. (History: a pre 2.0 version of PKZIP would
196 store blocks with no distance codes, but this was discovered to be
197 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
198 zero distance codes, which is sent as one code of zero bits in
199 length.
200 6. There are up to 286 literal/length codes. Code 256 represents the
201 end-of-block. Note however that the static length tree defines
202 288 codes just to fill out the Huffman codes. Codes 286 and 287
203 cannot be used though, since there is no length base or extra bits
204 defined for them. Similarily, there are up to 30 distance codes.
205 However, static trees define 32 codes (all 5 bits) to fill out the
206 Huffman codes, but the last two had better not show up in the data.
207 7. Unzip can check dynamic Huffman blocks for complete code sets.
208 The exception is that a single code would not be complete (see #4).
209 8. The five bits following the block type is really the number of
210 literal codes sent minus 257.
211 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
212 (1+6+6). Therefore, to output three times the length, you output
213 three codes (1+1+1), whereas to output four times the same length,
214 you only need two codes (1+3). Hmm.
215 10. In the tree reconstruction algorithm, Code = Code + Increment
216 only if BitLength(i) is not zero. (Pretty obvious.)
217 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
218 12. Note: length code 284 can represent 227-258, but length code 285
219 really is 258. The last length deserves its own, short code
220 since it gets used a lot in very redundant files. The length
221 258 is special since 258 - 3 (the min match length) is 255.
222 13. The literal/length and distance code bit lengths are read as a
223 single stream of lengths. It is possible (and advantageous) for
224 a repeat code (16, 17, or 18) to go across the boundary between
225 the two sets of lengths.
226 */
227
228
229 #define PKZIP_BUG_WORKAROUND /* PKZIP 1.93a problem--live with it */
230
231 /*
232 inflate.h must supply the uch slide[GZ_WSIZE] array and the NEXTBYTE,
233 FLUSH() and memzero macros. If the window size is not 32K, it
234 should also define GZ_WSIZE. If INFMOD is defined, it can include
235 compiled functions to support the NEXTBYTE and/or FLUSH() macros.
236 There are defaults for NEXTBYTE and FLUSH() below for use as
237 examples of what those functions need to do. Normally, you would
238 also want FLUSH() to compute a crc on the data. inflate.h also
239 needs to provide these typedefs:
240
241 typedef unsigned char uch;
242 typedef unsigned short ush;
243 typedef unsigned long ulg;
244
245 This module uses the external functions malloc() and free() (and
246 probably memset() or bzero() in the memzero() macro). Their
247 prototypes are normally found in <string.h> and <stdlib.h>.
248 */
249 #define INFMOD /* tell inflate.h to include code to be
250 * compiled */
251
252 /* Huffman code lookup table entry--this entry is four bytes for machines
253 that have 16-bit pointers (e.g. PC's in the small or medium model).
254 Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16
255 means that v is a literal, 16 < e < 32 means that v is a pointer to
256 the next table, which codes e - 16 bits, and lastly e == 99 indicates
257 an unused code. If a code with e == 99 is looked up, this implies an
258 error in the data. */
259 struct huft {
260 uch e; /* number of extra bits or operation */
261 uch b; /* number of bits in this code or subcode */
262 union {
263 ush n; /* literal, length base, or distance
264 * base */
265 struct huft *t; /* pointer to next level of table */
266 } v;
267 };
268
269
270 /* Function prototypes */
271 static int huft_build __P((struct inflate *, unsigned *, unsigned, unsigned, const ush *, const ush *, struct huft **, int *));
272 static int huft_free __P((struct inflate *, struct huft *));
273 static int inflate_codes __P((struct inflate *, struct huft *, struct huft *, int, int));
274 static int inflate_stored __P((struct inflate *));
275 static int xinflate __P((struct inflate *));
276 static int inflate_fixed __P((struct inflate *));
277 static int inflate_dynamic __P((struct inflate *));
278 static int inflate_block __P((struct inflate *, int *));
279
280 /* The inflate algorithm uses a sliding 32K byte window on the uncompressed
281 stream to find repeated byte strings. This is implemented here as a
282 circular buffer. The index is updated simply by incrementing and then
283 and'ing with 0x7fff (32K-1). */
284 /* It is left to other modules to supply the 32K area. It is assumed
285 to be usable as if it were declared "uch slide[32768];" or as just
286 "uch *slide;" and then malloc'ed in the latter case. The definition
287 must be in unzip.h, included above. */
288
289
290 /* Tables for deflate from PKZIP's appnote.txt. */
291
292 /* Order of the bit length code lengths */
293 static const unsigned border[] = {
294 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
295
296 static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */
297 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
298 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
299 /* note: see note #13 above about the 258 in this list. */
300
301 static const ush cplext[] = { /* Extra bits for literal codes 257..285 */
302 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
303 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
304
305 static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */
306 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
307 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
308 8193, 12289, 16385, 24577};
309
310 static const ush cpdext[] = { /* Extra bits for distance codes */
311 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
312 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
313 12, 12, 13, 13};
314
315 /* And'ing with mask[n] masks the lower n bits */
316 static const ush mask[] = {
317 0x0000,
318 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
319 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
320 };
321
322
323 /* Macros for inflate() bit peeking and grabbing.
324 The usage is:
325
326 NEEDBITS(glbl,j)
327 x = b & mask[j];
328 DUMPBITS(j)
329
330 where NEEDBITS makes sure that b has at least j bits in it, and
331 DUMPBITS removes the bits from b. The macros use the variable k
332 for the number of bits in b. Normally, b and k are register
333 variables for speed, and are initialized at the begining of a
334 routine that uses these macros from a global bit buffer and count.
335
336 In order to not ask for more bits than there are in the compressed
337 stream, the Huffman tables are constructed to only ask for just
338 enough bits to make up the end-of-block code (value 256). Then no
339 bytes need to be "returned" to the buffer at the end of the last
340 block. See the huft_build() routine.
341 */
342
343 /*
344 * The following 2 were global variables.
345 * They are now fields of the inflate structure.
346 */
347
348 #define NEEDBITS(glbl,n) { \
349 while(k<(n)) { \
350 int c=(*glbl->gz_input)(glbl->gz_private); \
351 if(c==GZ_EOF) \
352 return 1; \
353 b|=((ulg)c)<<k; \
354 k+=8; \
355 } \
356 }
357
358 #define DUMPBITS(n) {b>>=(n);k-=(n);}
359
360 /*
361 Huffman code decoding is performed using a multi-level table lookup.
362 The fastest way to decode is to simply build a lookup table whose
363 size is determined by the longest code. However, the time it takes
364 to build this table can also be a factor if the data being decoded
365 is not very long. The most common codes are necessarily the
366 shortest codes, so those codes dominate the decoding time, and hence
367 the speed. The idea is you can have a shorter table that decodes the
368 shorter, more probable codes, and then point to subsidiary tables for
369 the longer codes. The time it costs to decode the longer codes is
370 then traded against the time it takes to make longer tables.
371
372 This results of this trade are in the variables lbits and dbits
373 below. lbits is the number of bits the first level table for literal/
374 length codes can decode in one step, and dbits is the same thing for
375 the distance codes. Subsequent tables are also less than or equal to
376 those sizes. These values may be adjusted either when all of the
377 codes are shorter than that, in which case the longest code length in
378 bits is used, or when the shortest code is *longer* than the requested
379 table size, in which case the length of the shortest code in bits is
380 used.
381
382 There are two different values for the two tables, since they code a
383 different number of possibilities each. The literal/length table
384 codes 286 possible values, or in a flat code, a little over eight
385 bits. The distance table codes 30 possible values, or a little less
386 than five bits, flat. The optimum values for speed end up being
387 about one bit more than those, so lbits is 8+1 and dbits is 5+1.
388 The optimum values may differ though from machine to machine, and
389 possibly even between compilers. Your mileage may vary.
390 */
391
392 static const int lbits = 9; /* bits in base literal/length lookup table */
393 static const int dbits = 6; /* bits in base distance lookup table */
394
395
396 /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
397 #define BMAX 16 /* maximum bit length of any code (16 for
398 * explode) */
399 #define N_MAX 288 /* maximum number of codes in any set */
400
401 /* Given a list of code lengths and a maximum table size, make a set of
402 tables to decode that set of codes. Return zero on success, one if
403 the given code set is incomplete (the tables are still built in this
404 case), two if the input is invalid (all zero length codes or an
405 oversubscribed set of lengths), and three if not enough memory.
406 The code with value 256 is special, and the tables are constructed
407 so that no bits beyond that code are fetched when that code is
408 decoded. */
409 static int
410 huft_build(glbl, b, n, s, d, e, t, m)
411 struct inflate *glbl;
412 unsigned *b; /* code lengths in bits (all assumed <= BMAX) */
413 unsigned n; /* number of codes (assumed <= N_MAX) */
414 unsigned s; /* number of simple-valued codes (0..s-1) */
415 const ush *d; /* list of base values for non-simple codes */
416 const ush *e; /* list of extra bits for non-simple codes */
417 struct huft **t; /* result: starting table */
418 int *m; /* maximum lookup bits, returns actual */
419 {
420 unsigned a; /* counter for codes of length k */
421 unsigned c[BMAX + 1]; /* bit length count table */
422 unsigned el; /* length of EOB code (value 256) */
423 unsigned f; /* i repeats in table every f entries */
424 int g; /* maximum code length */
425 int h; /* table level */
426 register unsigned i; /* counter, current code */
427 register unsigned j; /* counter */
428 register int k; /* number of bits in current code */
429 int lx[BMAX + 1]; /* memory for l[-1..BMAX-1] */
430 int *l = lx + 1; /* stack of bits per table */
431 register unsigned *p; /* pointer into c[], b[], or v[] */
432 register struct huft *q;/* points to current table */
433 struct huft r; /* table entry for structure assignment */
434 struct huft *u[BMAX];/* table stack */
435 unsigned v[N_MAX]; /* values in order of bit length */
436 register int w; /* bits before this table == (l * h) */
437 unsigned x[BMAX + 1]; /* bit offsets, then code stack */
438 unsigned *xp; /* pointer into x */
439 int y; /* number of dummy codes added */
440 unsigned z; /* number of entries in current table */
441
442 /* Generate counts for each bit length */
443 el = n > 256 ? b[256] : BMAX; /* set length of EOB code, if any */
444 #ifdef KERNEL
445 memzero((char *) c, sizeof(c));
446 #else
447 for (i = 0; i < BMAX+1; i++)
448 c [i] = 0;
449 #endif
450 p = b;
451 i = n;
452 do {
453 c[*p]++;
454 p++; /* assume all entries <= BMAX */
455 } while (--i);
456 if (c[0] == n) { /* null input--all zero length codes */
457 *t = (struct huft *) NULL;
458 *m = 0;
459 return 0;
460 }
461 /* Find minimum and maximum length, bound *m by those */
462 for (j = 1; j <= BMAX; j++)
463 if (c[j])
464 break;
465 k = j; /* minimum code length */
466 if ((unsigned) *m < j)
467 *m = j;
468 for (i = BMAX; i; i--)
469 if (c[i])
470 break;
471 g = i; /* maximum code length */
472 if ((unsigned) *m > i)
473 *m = i;
474
475 /* Adjust last length count to fill out codes, if needed */
476 for (y = 1 << j; j < i; j++, y <<= 1)
477 if ((y -= c[j]) < 0)
478 return 2; /* bad input: more codes than bits */
479 if ((y -= c[i]) < 0)
480 return 2;
481 c[i] += y;
482
483 /* Generate starting offsets into the value table for each length */
484 x[1] = j = 0;
485 p = c + 1;
486 xp = x + 2;
487 while (--i) { /* note that i == g from above */
488 *xp++ = (j += *p++);
489 }
490
491 /* Make a table of values in order of bit lengths */
492 p = b;
493 i = 0;
494 do {
495 if ((j = *p++) != 0)
496 v[x[j]++] = i;
497 } while (++i < n);
498
499 /* Generate the Huffman codes and for each, make the table entries */
500 x[0] = i = 0; /* first Huffman code is zero */
501 p = v; /* grab values in bit order */
502 h = -1; /* no tables yet--level -1 */
503 w = l[-1] = 0; /* no bits decoded yet */
504 u[0] = (struct huft *) NULL; /* just to keep compilers happy */
505 q = (struct huft *) NULL; /* ditto */
506 z = 0; /* ditto */
507
508 /* go through the bit lengths (k already is bits in shortest code) */
509 for (; k <= g; k++) {
510 a = c[k];
511 while (a--) {
512 /*
513 * here i is the Huffman code of length k bits for
514 * value *p
515 */
516 /* make tables up to required level */
517 while (k > w + l[h]) {
518 w += l[h++]; /* add bits already decoded */
519
520 /*
521 * compute minimum size table less than or
522 * equal to *m bits
523 */
524 z = (z = g - w) > (unsigned) *m ? *m : z; /* upper limit */
525 if ((f = 1 << (j = k - w)) > a + 1) { /* try a k-w bit table *//* t
526 * oo few codes for k-w
527 * bit table */
528 f -= a + 1; /* deduct codes from
529 * patterns left */
530 xp = c + k;
531 while (++j < z) { /* try smaller tables up
532 * to z bits */
533 if ((f <<= 1) <= *++xp)
534 break; /* enough codes to use
535 * up j bits */
536 f -= *xp; /* else deduct codes
537 * from patterns */
538 }
539 }
540 if ((unsigned) w + j > el && (unsigned) w < el)
541 j = el - w; /* make EOB code end at
542 * table */
543 z = 1 << j; /* table entries for j-bit
544 * table */
545 l[h] = j; /* set table size in stack */
546
547 /* allocate and link in new table */
548 if ((q = (struct huft *) malloc((z + 1) * sizeof(struct huft), M_GZIP, M_WAITOK)) ==
549 (struct huft *) NULL) {
550 if (h)
551 huft_free(glbl, u[0]);
552 return 3; /* not enough memory */
553 }
554 glbl->gz_hufts += z + 1; /* track memory usage */
555 *t = q + 1; /* link to list for
556 * huft_free() */
557 *(t = &(q->v.t)) = (struct huft *) NULL;
558 u[h] = ++q; /* table starts after link */
559
560 /* connect to last table, if there is one */
561 if (h) {
562 x[h] = i; /* save pattern for
563 * backing up */
564 r.b = (uch) l[h - 1]; /* bits to dump before
565 * this table */
566 r.e = (uch) (16 + j); /* bits in this table */
567 r.v.t = q; /* pointer to this table */
568 j = (i & ((1 << w) - 1)) >> (w - l[h - 1]);
569 u[h - 1][j] = r; /* connect to last table */
570 }
571 }
572
573 /* set up table entry in r */
574 r.b = (uch) (k - w);
575 if (p >= v + n)
576 r.e = 99; /* out of values--invalid
577 * code */
578 else if (*p < s) {
579 r.e = (uch) (*p < 256 ? 16 : 15); /* 256 is end-of-block
580 * code */
581 r.v.n = *p++; /* simple code is just the
582 * value */
583 } else {
584 r.e = (uch) e[*p - s]; /* non-simple--look up
585 * in lists */
586 r.v.n = d[*p++ - s];
587 }
588
589 /* fill code-like entries with r */
590 f = 1 << (k - w);
591 for (j = i >> w; j < z; j += f)
592 q[j] = r;
593
594 /* backwards increment the k-bit code i */
595 for (j = 1 << (k - 1); i & j; j >>= 1)
596 i ^= j;
597 i ^= j;
598
599 /* backup over finished tables */
600 while ((i & ((1 << w) - 1)) != x[h])
601 w -= l[--h]; /* don't need to update q */
602 }
603 }
604
605 /* return actual size of base table */
606 *m = l[0];
607
608 /* Return true (1) if we were given an incomplete table */
609 return y != 0 && g != 1;
610 }
611
612 static int
613 huft_free(glbl, t)
614 struct inflate *glbl;
615 struct huft *t; /* table to free */
616 /* Free the malloc'ed tables built by huft_build(), which makes a linked
617 list of the tables it made, with the links in a dummy first entry of
618 each table. */
619 {
620 register struct huft *p, *q;
621
622 /* Go through linked list, freeing from the malloced (t[-1]) address. */
623 p = t;
624 while (p != (struct huft *) NULL) {
625 q = (--p)->v.t;
626 free(p, M_GZIP);
627 p = q;
628 }
629 return 0;
630 }
631
632 /* inflate (decompress) the codes in a deflated (compressed) block.
633 Return an error code or zero if it all goes ok. */
634 static int
635 inflate_codes(glbl, tl, td, bl, bd)
636 struct inflate *glbl;
637 struct huft *tl, *td;/* literal/length and distance decoder tables */
638 int bl, bd; /* number of bits decoded by tl[] and td[] */
639 {
640 register unsigned e; /* table entry flag/number of extra bits */
641 unsigned n, d; /* length and index for copy */
642 unsigned w; /* current window position */
643 struct huft *t; /* pointer to table entry */
644 unsigned ml, md; /* masks for bl and bd bits */
645 register ulg b; /* bit buffer */
646 register unsigned k; /* number of bits in bit buffer */
647
648 /* make local copies of globals */
649 b = glbl->gz_bb; /* initialize bit buffer */
650 k = glbl->gz_bk;
651 w = glbl->gz_wp; /* initialize window position */
652
653 /* inflate the coded data */
654 ml = mask[bl]; /* precompute masks for speed */
655 md = mask[bd];
656 while (1) { /* do until end of block */
657 NEEDBITS(glbl, (unsigned) bl)
658 if ((e = (t = tl + ((unsigned) b & ml))->e) > 16)
659 do {
660 if (e == 99)
661 return 1;
662 DUMPBITS(t->b)
663 e -= 16;
664 NEEDBITS(glbl, e)
665 } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16);
666 DUMPBITS(t->b)
667 if (e == 16) { /* then it's a literal */
668 glbl->gz_slide[w++] = (uch) t->v.n;
669 if (w == GZ_WSIZE) {
670 FLUSH(glbl, w);
671 w = 0;
672 }
673 } else { /* it's an EOB or a length */
674 /* exit if end of block */
675 if (e == 15)
676 break;
677
678 /* get length of block to copy */
679 NEEDBITS(glbl, e)
680 n = t->v.n + ((unsigned) b & mask[e]);
681 DUMPBITS(e);
682
683 /* decode distance of block to copy */
684 NEEDBITS(glbl, (unsigned) bd)
685 if ((e = (t = td + ((unsigned) b & md))->e) > 16)
686 do {
687 if (e == 99)
688 return 1;
689 DUMPBITS(t->b)
690 e -= 16;
691 NEEDBITS(glbl, e)
692 } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16);
693 DUMPBITS(t->b)
694 NEEDBITS(glbl, e)
695 d = w - t->v.n - ((unsigned) b & mask[e]);
696 DUMPBITS(e)
697 /* do the copy */
698 do {
699 n -= (e = (e = GZ_WSIZE - ((d &= GZ_WSIZE - 1) > w ? d : w)) > n ? n : e);
700 #ifndef NOMEMCPY
701 if (w - d >= e) { /* (this test assumes
702 * unsigned comparison) */
703 memcpy(glbl->gz_slide + w, glbl->gz_slide + d, e);
704 w += e;
705 d += e;
706 } else /* do it slow to avoid memcpy()
707 * overlap */
708 #endif /* !NOMEMCPY */
709 do {
710 glbl->gz_slide[w++] = glbl->gz_slide[d++];
711 } while (--e);
712 if (w == GZ_WSIZE) {
713 FLUSH(glbl, w);
714 w = 0;
715 }
716 } while (n);
717 }
718 }
719
720 /* restore the globals from the locals */
721 glbl->gz_wp = w; /* restore global window pointer */
722 glbl->gz_bb = b; /* restore global bit buffer */
723 glbl->gz_bk = k;
724
725 /* done */
726 return 0;
727 }
728
729 /* "decompress" an inflated type 0 (stored) block. */
730 static int
731 inflate_stored(glbl)
732 struct inflate *glbl;
733 {
734 unsigned n; /* number of bytes in block */
735 unsigned w; /* current window position */
736 register ulg b; /* bit buffer */
737 register unsigned k; /* number of bits in bit buffer */
738
739 /* make local copies of globals */
740 b = glbl->gz_bb; /* initialize bit buffer */
741 k = glbl->gz_bk;
742 w = glbl->gz_wp; /* initialize window position */
743
744 /* go to byte boundary */
745 n = k & 7;
746 DUMPBITS(n);
747
748 /* get the length and its complement */
749 NEEDBITS(glbl, 16)
750 n = ((unsigned) b & 0xffff);
751 DUMPBITS(16)
752 NEEDBITS(glbl, 16)
753 if (n != (unsigned) ((~b) & 0xffff))
754 return 1; /* error in compressed data */
755 DUMPBITS(16)
756 /* read and output the compressed data */
757 while (n--) {
758 NEEDBITS(glbl, 8)
759 glbl->gz_slide[w++] = (uch) b;
760 if (w == GZ_WSIZE) {
761 FLUSH(glbl, w);
762 w = 0;
763 }
764 DUMPBITS(8)
765 }
766
767 /* restore the globals from the locals */
768 glbl->gz_wp = w; /* restore global window pointer */
769 glbl->gz_bb = b; /* restore global bit buffer */
770 glbl->gz_bk = k;
771 return 0;
772 }
773
774 /* decompress an inflated type 1 (fixed Huffman codes) block. We should
775 either replace this with a custom decoder, or at least precompute the
776 Huffman tables. */
777 static int
778 inflate_fixed(glbl)
779 struct inflate *glbl;
780 {
781 /* if first time, set up tables for fixed blocks */
782 if (glbl->gz_fixed_tl == (struct huft *) NULL) {
783 int i; /* temporary variable */
784 static unsigned l[288]; /* length list for huft_build */
785
786 /* literal table */
787 for (i = 0; i < 144; i++)
788 l[i] = 8;
789 for (; i < 256; i++)
790 l[i] = 9;
791 for (; i < 280; i++)
792 l[i] = 7;
793 for (; i < 288; i++) /* make a complete, but wrong code
794 * set */
795 l[i] = 8;
796 glbl->gz_fixed_bl = 7;
797 if ((i = huft_build(glbl, l, 288, 257, cplens, cplext,
798 &glbl->gz_fixed_tl, &glbl->gz_fixed_bl)) != 0) {
799 glbl->gz_fixed_tl = (struct huft *) NULL;
800 return i;
801 }
802 /* distance table */
803 for (i = 0; i < 30; i++) /* make an incomplete code
804 * set */
805 l[i] = 5;
806 glbl->gz_fixed_bd = 5;
807 if ((i = huft_build(glbl, l, 30, 0, cpdist, cpdext,
808 &glbl->gz_fixed_td, &glbl->gz_fixed_bd)) > 1) {
809 huft_free(glbl, glbl->gz_fixed_tl);
810 glbl->gz_fixed_tl = (struct huft *) NULL;
811 return i;
812 }
813 }
814 /* decompress until an end-of-block code */
815 return inflate_codes(glbl, glbl->gz_fixed_tl, glbl->gz_fixed_td, glbl->gz_fixed_bl, glbl->gz_fixed_bd) != 0;
816 }
817
818 /* decompress an inflated type 2 (dynamic Huffman codes) block. */
819 static int
820 inflate_dynamic(glbl)
821 struct inflate *glbl;
822 {
823 int i; /* temporary variables */
824 unsigned j;
825 unsigned l; /* last length */
826 unsigned m; /* mask for bit lengths table */
827 unsigned n; /* number of lengths to get */
828 struct huft *tl; /* literal/length code table */
829 struct huft *td; /* distance code table */
830 int bl; /* lookup bits for tl */
831 int bd; /* lookup bits for td */
832 unsigned nb; /* number of bit length codes */
833 unsigned nl; /* number of literal/length codes */
834 unsigned nd; /* number of distance codes */
835 #ifdef PKZIP_BUG_WORKAROUND
836 unsigned ll[288 + 32]; /* literal/length and distance code
837 * lengths */
838 #else
839 unsigned ll[286 + 30]; /* literal/length and distance code
840 * lengths */
841 #endif
842 register ulg b; /* bit buffer */
843 register unsigned k; /* number of bits in bit buffer */
844
845 /* make local bit buffer */
846 b = glbl->gz_bb;
847 k = glbl->gz_bk;
848
849 /* read in table lengths */
850 NEEDBITS(glbl, 5)
851 nl = 257 + ((unsigned) b & 0x1f); /* number of
852 * literal/length codes */
853 DUMPBITS(5)
854 NEEDBITS(glbl, 5)
855 nd = 1 + ((unsigned) b & 0x1f); /* number of distance codes */
856 DUMPBITS(5)
857 NEEDBITS(glbl, 4)
858 nb = 4 + ((unsigned) b & 0xf); /* number of bit length codes */
859 DUMPBITS(4)
860 #ifdef PKZIP_BUG_WORKAROUND
861 if (nl > 288 || nd > 32)
862 #else
863 if (nl > 286 || nd > 30)
864 #endif
865 return 1; /* bad lengths */
866 /* read in bit-length-code lengths */
867 for (j = 0; j < nb; j++) {
868 NEEDBITS(glbl, 3)
869 ll[border[j]] = (unsigned) b & 7;
870 DUMPBITS(3)
871 }
872 for (; j < 19; j++)
873 ll[border[j]] = 0;
874
875 /* build decoding table for trees--single level, 7 bit lookup */
876 bl = 7;
877 if ((i = huft_build(glbl, ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) {
878 if (i == 1)
879 huft_free(glbl, tl);
880 return i; /* incomplete code set */
881 }
882 /* read in literal and distance code lengths */
883 n = nl + nd;
884 m = mask[bl];
885 i = l = 0;
886 while ((unsigned) i < n) {
887 NEEDBITS(glbl, (unsigned) bl)
888 j = (td = tl + ((unsigned) b & m))->b;
889 DUMPBITS(j)
890 j = td->v.n;
891 if (j < 16) /* length of code in bits (0..15) */
892 ll[i++] = l = j; /* save last length in l */
893 else if (j == 16) { /* repeat last length 3 to 6 times */
894 NEEDBITS(glbl, 2)
895 j = 3 + ((unsigned) b & 3);
896 DUMPBITS(2)
897 if ((unsigned) i + j > n)
898 return 1;
899 while (j--)
900 ll[i++] = l;
901 } else if (j == 17) { /* 3 to 10 zero length codes */
902 NEEDBITS(glbl, 3)
903 j = 3 + ((unsigned) b & 7);
904 DUMPBITS(3)
905 if ((unsigned) i + j > n)
906 return 1;
907 while (j--)
908 ll[i++] = 0;
909 l = 0;
910 } else { /* j == 18: 11 to 138 zero length codes */
911 NEEDBITS(glbl, 7)
912 j = 11 + ((unsigned) b & 0x7f);
913 DUMPBITS(7)
914 if ((unsigned) i + j > n)
915 return 1;
916 while (j--)
917 ll[i++] = 0;
918 l = 0;
919 }
920 }
921
922 /* free decoding table for trees */
923 huft_free(glbl, tl);
924
925 /* restore the global bit buffer */
926 glbl->gz_bb = b;
927 glbl->gz_bk = k;
928
929 /* build the decoding tables for literal/length and distance codes */
930 bl = lbits;
931 i = huft_build(glbl, ll, nl, 257, cplens, cplext, &tl, &bl);
932 if (i != 0) {
933 if (i == 1 && !qflag) {
934 FPRINTF("(incomplete l-tree) ");
935 huft_free(glbl, tl);
936 }
937 return i; /* incomplete code set */
938 }
939 bd = dbits;
940 i = huft_build(glbl, ll + nl, nd, 0, cpdist, cpdext, &td, &bd);
941 if (i != 0) {
942 if (i == 1 && !qflag) {
943 FPRINTF("(incomplete d-tree) ");
944 #ifdef PKZIP_BUG_WORKAROUND
945 i = 0;
946 }
947 #else
948 huft_free(glbl, td);
949 }
950 huft_free(glbl, tl);
951 return i; /* incomplete code set */
952 #endif
953 }
954 /* decompress until an end-of-block code */
955 if (inflate_codes(glbl, tl, td, bl, bd))
956 return 1;
957
958 /* free the decoding tables, return */
959 huft_free(glbl, tl);
960 huft_free(glbl, td);
961 return 0;
962 }
963
964 /* decompress an inflated block */
965 static int
966 inflate_block(glbl, e)
967 struct inflate *glbl;
968 int *e; /* last block flag */
969 {
970 unsigned t; /* block type */
971 register ulg b; /* bit buffer */
972 register unsigned k; /* number of bits in bit buffer */
973
974 /* make local bit buffer */
975 b = glbl->gz_bb;
976 k = glbl->gz_bk;
977
978 /* read in last block bit */
979 NEEDBITS(glbl, 1)
980 * e = (int) b & 1;
981 DUMPBITS(1)
982 /* read in block type */
983 NEEDBITS(glbl, 2)
984 t = (unsigned) b & 3;
985 DUMPBITS(2)
986 /* restore the global bit buffer */
987 glbl->gz_bb = b;
988 glbl->gz_bk = k;
989
990 /* inflate that block type */
991 if (t == 2)
992 return inflate_dynamic(glbl);
993 if (t == 0)
994 return inflate_stored(glbl);
995 if (t == 1)
996 return inflate_fixed(glbl);
997 /* bad block type */
998 return 2;
999 }
1000
1001
1002
1003 /* decompress an inflated entry */
1004 static int
1005 xinflate(glbl)
1006 struct inflate *glbl;
1007 {
1008 int e; /* last block flag */
1009 int r; /* result code */
1010 unsigned h; /* maximum struct huft's malloc'ed */
1011
1012 glbl->gz_fixed_tl = (struct huft *) NULL;
1013
1014 /* initialize window, bit buffer */
1015 glbl->gz_wp = 0;
1016 glbl->gz_bk = 0;
1017 glbl->gz_bb = 0;
1018
1019 /* decompress until the last block */
1020 h = 0;
1021 do {
1022 glbl->gz_hufts = 0;
1023 if ((r = inflate_block(glbl, &e)) != 0)
1024 return r;
1025 if (glbl->gz_hufts > h)
1026 h = glbl->gz_hufts;
1027 } while (!e);
1028
1029 /* flush out slide */
1030 FLUSH(glbl, glbl->gz_wp);
1031
1032 /* return success */
1033 return 0;
1034 }
1035
1036 /* Nobody uses this - why not? */
1037 int
1038 inflate(glbl)
1039 struct inflate *glbl;
1040 {
1041 int i;
1042 #ifdef KERNEL
1043 u_char *p = NULL;
1044
1045 if (!glbl->gz_slide)
1046 p = glbl->gz_slide = malloc(GZ_WSIZE, M_GZIP, M_WAITOK);
1047 #endif
1048 if (!glbl->gz_slide)
1049 #ifdef KERNEL
1050 return(ENOMEM);
1051 #else
1052 return 3; /* kzip expects 3 */
1053 #endif
1054 i = xinflate(glbl);
1055
1056 if (glbl->gz_fixed_td != (struct huft *) NULL) {
1057 huft_free(glbl, glbl->gz_fixed_td);
1058 glbl->gz_fixed_td = (struct huft *) NULL;
1059 }
1060 if (glbl->gz_fixed_tl != (struct huft *) NULL) {
1061 huft_free(glbl, glbl->gz_fixed_tl);
1062 glbl->gz_fixed_tl = (struct huft *) NULL;
1063 }
1064 #ifdef KERNEL
1065 if (p == glbl->gz_slide) {
1066 free(glbl->gz_slide, M_GZIP);
1067 glbl->gz_slide = NULL;
1068 }
1069 #endif
1070 return i;
1071 }
1072 /* ----------------------- END INFLATE.C */
Cache object: 7525aaee2a2c1918ec69edf056bdc5d3
|