1 /*
2 * pts_defl.c -- C source file ZIP compression ripped from linux-2.6.8.1
3 * by pts@fazekas.hu at Tue Jan 18 15:19:06 CET 2005
4 *
5 * This ZIP compression (ZIP == PostScript /FlateEncode compression filter
6 * (ZLIB RFC 1950)) routine has been ripped from the Linux kernel 2.6.8.1
7 * (directory lib/zlib_deflate), which has been ripped from ZLIB 1.1.3
8 *
9 * To have a UNIX filter program (stdin -> stdout), compile zipfilt.c
10 *
11 * Dat: the exported symbols are: zlib_deflate zlib_deflateEnd
12 * zlib_deflateInit_ zlib_deflateInit2_ zlib_deflateParams
13 * zlib_deflate_workspacesize
14 *
15 */
16
17 /* ---- <rip> by pts */
18
19 /* +++ deflate.c */
20 /* deflate.c -- compress data using the deflation algorithm
21 * Copyright (C) 1995-1996 Jean-loup Gailly.
22 * For conditions of distribution and use, see copyright notice in zlib.h
23 */
24
25 /*
26 * ALGORITHM
27 *
28 * The "deflation" process depends on being able to identify portions
29 * of the input text which are identical to earlier input (within a
30 * sliding window trailing behind the input currently being processed).
31 *
32 * The most straightforward technique turns out to be the fastest for
33 * most input files: try all possible matches and select the longest.
34 * The key feature of this algorithm is that insertions into the string
35 * dictionary are very simple and thus fast, and deletions are avoided
36 * completely. Insertions are performed at each input character, whereas
37 * string matches are performed only when the previous match ends. So it
38 * is preferable to spend more time in matches to allow very fast string
39 * insertions and avoid deletions. The matching algorithm for small
40 * strings is inspired from that of Rabin & Karp. A brute force approach
41 * is used to find longer strings when a small match has been found.
42 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
43 * (by Leonid Broukhis).
44 * A previous version of this file used a more sophisticated algorithm
45 * (by Fiala and Greene) which is guaranteed to run in linear amortized
46 * time, but has a larger average cost, uses more memory and is patented.
47 * However the F&G algorithm may be faster for some highly redundant
48 * files if the parameter max_chain_length (described below) is too large.
49 *
50 * ACKNOWLEDGEMENTS
51 *
52 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
53 * I found it in 'freeze' written by Leonid Broukhis.
54 * Thanks to many people for bug reports and testing.
55 *
56 * REFERENCES
57 *
58 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
59 * Available in ftp://ds.internic.net/rfc/rfc1951.txt
60 *
61 * A description of the Rabin and Karp algorithm is given in the book
62 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
63 *
64 * Fiala,E.R., and Greene,D.H.
65 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
66 *
67 */
68
69 #if 0 /**** pts ****/
70 # include <linux/module.h>
71 #endif
72
73 /**** pts ****/
74 #if OBJDEP /* sam2p */
75 # warning PROVIDES: pts_defl
76 #endif
77 #if HAVE_CONFIG2_H /* sam2p */
78 # include "config2.h"
79 #endif
80
81 /* #include <linux/zutil.h> */
82 /* zutil.h -- internal interface and configuration of the compression library
83 * Copyright (C) 1995-1998 Jean-loup Gailly.
84 * For conditions of distribution and use, see copyright notice in zlib.h
85 */
86
87 /* WARNING: this file should *not* be used by applications. It is
88 part of the implementation of the compression library and is
89 subject to change. Applications should only use zlib.h.
90 */
91
92 /* @(#) $Id: pts_defl.c,v 1.6 2008/08/28 20:16:47 pts Exp $ */
93
94 #ifndef _Z_UTIL_H
95 #define _Z_UTIL_H
96
97 #if 0 /**** pts ****/
98 # include <linux/zlib.h>
99 #else
100 # include "pts_defl.h"
101 #endif
102 #if 0 /**** pts ****/
103 # include <linux/string.h>
104 # include <linux/errno.h>
105 # include <linux/kernel.h>
106 #endif
107
108 /* zlib.h -- interface of the 'zlib' general purpose compression library
109 version 1.1.3, July 9th, 1998
110
111 Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler
112
113 This software is provided 'as-is', without any express or implied
114 warranty. In no event will the authors be held liable for any damages
115 arising from the use of this software.
116
117 Permission is granted to anyone to use this software for any purpose,
118 including commercial applications, and to alter it and redistribute it
119 freely, subject to the following restrictions:
120
121 1. The origin of this software must not be misrepresented; you must not
122 claim that you wrote the original software. If you use this software
123 in a product, an acknowledgment in the product documentation would be
124 appreciated but is not required.
125 2. Altered source versions must be plainly marked as such, and must not be
126 misrepresented as being the original software.
127 3. This notice may not be removed or altered from any source distribution.
128
129 Jean-loup Gailly Mark Adler
130 jloup@gzip.org madler@alumni.caltech.edu
131
132
133 The data format used by the zlib library is described by RFCs (Request for
134 Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
135 (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
136 */
137
138 /*#include <linux/zconf.h>*/
139 /* zconf.h -- configuration of the zlib compression library
140 * Copyright (C) 1995-1998 Jean-loup Gailly.
141 * For conditions of distribution and use, see copyright notice in zlib.h
142 */
143
144 /* @(#) $Id: pts_defl.c,v 1.6 2008/08/28 20:16:47 pts Exp $ */
145
146 /*#ifndef _ZCONF_H*/
147 /*#define _ZCONF_H*/
148
149 #ifndef NULLP /**** pts ****/
150 #define NULLP ((void*)0) /* Imp: g++, with const */
151 #endif
152
153 #if 0 /**** pts ****/ /* Dat: `inline' is not ANSI C */
154 # define ZINLINE inline
155 #else
156 # define ZINLINE
157 #endif
158 #define ZSTATIC static /**** pts ****/
159
160 /* The memory requirements for deflate are (in bytes):
161 (1 << (windowBits+2)) + (1 << (memLevel+9))
162 that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
163 plus a few kilobytes for small objects. For example, if you want to reduce
164 the default memory requirements from 256K to 128K, compile with
165 make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
166 Of course this will generally degrade compression (there's no free lunch).
167
168 The memory requirements for inflate are (in bytes) 1 << windowBits
169 that is, 32K for windowBits=15 (default value) plus a few kilobytes
170 for small objects.
171 */
172
173 /* Maximum value for memLevel in deflateInit2 */
174 #ifndef MAX_MEM_LEVEL
175 # define MAX_MEM_LEVEL 8
176 #endif
177
178 /* Maximum value for windowBits in deflateInit2 and inflateInit2.
179 * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
180 * created by gzip. (Files created by minigzip can still be extracted by
181 * gzip.)
182 */
183 #ifndef MAX_WBITS
184 # define MAX_WBITS 15 /* 32K LZ77 window */
185 #endif
186
187 /* Type declarations */
188
189 typedef unsigned char Byte; /* 8 bits */
190 typedef unsigned int uInt; /* 16 bits or more */
191 typedef unsigned long uLong; /* 32 bits or more */
192 typedef void *voidp;
193
194 /*#endif*/ /* _ZCONF_H */
195
196 /* end of linux/zconf.h */
197
198 #if 0
199 #define ZLIB_VERSION "1.1.3"
200 #endif
201
202 /*
203 The 'zlib' compression library provides in-memory compression and
204 decompression functions, including integrity checks of the uncompressed
205 data. This version of the library supports only one compression method
206 (deflation) but other algorithms will be added later and will have the same
207 stream interface.
208
209 Compression can be done in a single step if the buffers are large
210 enough (for example if an input file is mmap'ed), or can be done by
211 repeated calls of the compression function. In the latter case, the
212 application must provide more input and/or consume the output
213 (providing more output space) before each call.
214
215 The library also supports reading and writing files in gzip (.gz) format
216 with an interface similar to that of stdio.
217
218 The library does not install any signal handler. The decoder checks
219 the consistency of the compressed data, so the library should never
220 crash even in case of corrupted input.
221 */
222
223 #if 0
224 struct zlib_internal_state; /**** pts ****/ /* Dat: was: internal_state */
225
226 typedef struct z_stream_s {
227 Byte *next_in; /* next input byte */
228 uInt avail_in; /* number of bytes available at next_in */
229 uLong total_in; /* total nb of input bytes read so far */
230
231 Byte *next_out; /* next output byte should be put there */
232 uInt avail_out; /* remaining free space at next_out */
233 uLong total_out; /* total nb of bytes output so far */
234
235 char *msg; /* last error message, NULLP if no error */
236 struct zlib_internal_state *state; /* not visible by applications */
237
238 void *workspace; /* memory allocated for this stream */
239
240 int data_type; /* best guess about the data type: ascii or binary */
241 uLong adler; /* adler32 value of the uncompressed data */
242 uLong reserved; /* reserved for future use */
243 } z_stream;
244
245 /*
246 The application must update next_in and avail_in when avail_in has
247 dropped to zero. It must update next_out and avail_out when avail_out
248 has dropped to zero. The application must initialize zalloc, zfree and
249 opaque before calling the init function. All other fields are set by the
250 compression library and must not be updated by the application.
251
252 The opaque value provided by the application will be passed as the first
253 parameter for calls of zalloc and zfree. This can be useful for custom
254 memory management. The compression library attaches no meaning to the
255 opaque value.
256
257 zalloc must return NULLP if there is not enough memory for the object.
258 If zlib is used in a multi-threaded application, zalloc and zfree must be
259 thread safe.
260
261 On 16-bit systems, the functions zalloc and zfree must be able to allocate
262 exactly 65536 bytes, but will not be required to allocate more than this
263 if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
264 pointers returned by zalloc for objects of exactly 65536 bytes *must*
265 have their offset normalized to zero. The default allocation function
266 provided by this library ensures this (see zutil.c). To reduce memory
267 requirements and avoid any allocation of 64K objects, at the expense of
268 compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
269
270 The fields total_in and total_out can be used for statistics or
271 progress reports. After compression, total_in holds the total size of
272 the uncompressed data and may be saved for use in the decompressor
273 (particularly if the decompressor wants to decompress everything in
274 a single step).
275 */
276 #endif
277 typedef z_stream *z_streamp;
278
279 /* constants */
280
281 #if 0
282 #define Z_NO_FLUSH 0
283 #define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
284 #define Z_PACKET_FLUSH 2
285 #define Z_SYNC_FLUSH 3
286 #define Z_FULL_FLUSH 4
287 #define Z_FINISH 5
288 /* Allowed flush values; see deflate() below for details */
289
290 #define Z_OK 0
291 #define Z_STREAM_END 1
292 #define Z_NEED_DICT 2
293 #define Z_ERRNO (-1)
294 #define Z_STREAM_ERROR (-2)
295 #define Z_DATA_ERROR (-3)
296 #define Z_MEM_ERROR (-4)
297 #define Z_BUF_ERROR (-5)
298 #define Z_VERSION_ERROR (-6)
299 /* Return codes for the compression/decompression functions. Negative
300 * values are errors, positive values are used for special but normal events.
301 */
302
303 #define Z_NO_COMPRESSION 0
304 #define Z_BEST_SPEED 1
305 #define Z_BEST_COMPRESSION 9
306 #define Z_DEFAULT_COMPRESSION (-1)
307 /* compression levels */
308 #endif
309
310 #define Z_FILTERED 1
311 #define Z_HUFFMAN_ONLY 2
312 #define Z_DEFAULT_STRATEGY 0
313 /* compression strategy; see deflateInit2() below for details */
314
315 #if 0
316 #define Z_BINARY 0
317 #define Z_ASCII 1
318 #define Z_UNKNOWN 2
319 /* Possible values of the data_type field */
320 #endif
321
322 #define Z_DEFLATED 8
323 /* The deflate compression method (the only one supported in this version) */
324
325 /* basic functions */
326
327 extern const char * zlib_zlibVersion (void);
328 /* The application can compare zlibVersion and ZLIB_VERSION for consistency.
329 If the first character differs, the library code actually used is
330 not compatible with the zlib.h header file used by the application.
331 This check is automatically made by deflateInit and inflateInit.
332 */
333
334 extern int zlib_deflate_workspacesize (void);
335 /*
336 Returns the number of bytes that needs to be allocated for a per-
337 stream workspace. A pointer to this number of bytes should be
338 returned in stream->workspace before calling zlib_deflateInit().
339 */
340
341 /*
342 extern int deflateInit (z_streamp strm, int level);
343
344 Initializes the internal stream state for compression. The fields
345 zalloc, zfree and opaque must be initialized before by the caller.
346 If zalloc and zfree are set to NULLP, deflateInit updates them to
347 use default allocation functions.
348
349 The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
350 1 gives best speed, 9 gives best compression, 0 gives no compression at
351 all (the input data is simply copied a block at a time).
352 Z_DEFAULT_COMPRESSION requests a default compromise between speed and
353 compression (currently equivalent to level 6).
354
355 deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
356 enough memory, Z_STREAM_ERROR if level is not a valid compression level,
357 Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
358 with the version assumed by the caller (ZLIB_VERSION).
359 msg is set to null if there is no error message. deflateInit does not
360 perform any compression: this will be done by deflate().
361 */
362
363
364 extern int zlib_deflate (z_streamp strm, int flush);
365 /*
366 deflate compresses as much data as possible, and stops when the input
367 buffer becomes empty or the output buffer becomes full. It may introduce some
368 output latency (reading input without producing any output) except when
369 forced to flush.
370
371 The detailed semantics are as follows. deflate performs one or both of the
372 following actions:
373
374 - Compress more input starting at next_in and update next_in and avail_in
375 accordingly. If not all input can be processed (because there is not
376 enough room in the output buffer), next_in and avail_in are updated and
377 processing will resume at this point for the next call of deflate().
378
379 - Provide more output starting at next_out and update next_out and avail_out
380 accordingly. This action is forced if the parameter flush is non zero.
381 Forcing flush frequently degrades the compression ratio, so this parameter
382 should be set only when necessary (in interactive applications).
383 Some output may be provided even if flush is not set.
384
385 Before the call of deflate(), the application should ensure that at least
386 one of the actions is possible, by providing more input and/or consuming
387 more output, and updating avail_in or avail_out accordingly; avail_out
388 should never be zero before the call. The application can consume the
389 compressed output when it wants, for example when the output buffer is full
390 (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
391 and with zero avail_out, it must be called again after making room in the
392 output buffer because there might be more output pending.
393
394 If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
395 flushed to the output buffer and the output is aligned on a byte boundary, so
396 that the decompressor can get all input data available so far. (In particular
397 avail_in is zero after the call if enough output space has been provided
398 before the call.) Flushing may degrade compression for some compression
399 algorithms and so it should be used only when necessary.
400
401 If flush is set to Z_FULL_FLUSH, all output is flushed as with
402 Z_SYNC_FLUSH, and the compression state is reset so that decompression can
403 restart from this point if previous compressed data has been damaged or if
404 random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
405 the compression.
406
407 If deflate returns with avail_out == 0, this function must be called again
408 with the same value of the flush parameter and more output space (updated
409 avail_out), until the flush is complete (deflate returns with non-zero
410 avail_out).
411
412 If the parameter flush is set to Z_FINISH, pending input is processed,
413 pending output is flushed and deflate returns with Z_STREAM_END if there
414 was enough output space; if deflate returns with Z_OK, this function must be
415 called again with Z_FINISH and more output space (updated avail_out) but no
416 more input data, until it returns with Z_STREAM_END or an error. After
417 deflate has returned Z_STREAM_END, the only possible operations on the
418 stream are deflateReset or deflateEnd.
419
420 Z_FINISH can be used immediately after deflateInit if all the compression
421 is to be done in a single step. In this case, avail_out must be at least
422 0.1% larger than avail_in plus 12 bytes. If deflate does not return
423 Z_STREAM_END, then it must be called again as described above.
424
425 deflate() sets strm->adler to the adler32 checksum of all input read
426 so far (that is, total_in bytes).
427
428 deflate() may update data_type if it can make a good guess about
429 the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
430 binary. This field is only for information purposes and does not affect
431 the compression algorithm in any manner.
432
433 deflate() returns Z_OK if some progress has been made (more input
434 processed or more output produced), Z_STREAM_END if all input has been
435 consumed and all output has been produced (only when flush is set to
436 Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
437 if next_in or next_out was NULLP), Z_BUF_ERROR if no progress is possible
438 (for example avail_in or avail_out was zero).
439 */
440
441
442 extern int zlib_deflateEnd (z_streamp strm);
443 /*
444 All dynamically allocated data structures for this stream are freed.
445 This function discards any unprocessed input and does not flush any
446 pending output.
447
448 deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
449 stream state was inconsistent, Z_DATA_ERROR if the stream was freed
450 prematurely (some input or output was discarded). In the error case,
451 msg may be set but then points to a static string (which must not be
452 deallocated).
453 */
454
455
456 extern int zlib_inflate_workspacesize (void);
457 /*
458 Returns the number of bytes that needs to be allocated for a per-
459 stream workspace. A pointer to this number of bytes should be
460 returned in stream->workspace before calling zlib_inflateInit().
461 */
462
463 /*
464 extern int zlib_inflateInit (z_streamp strm);
465
466 Initializes the internal stream state for decompression. The fields
467 next_in, avail_in, and workspace must be initialized before by
468 the caller. If next_in is not NULLP and avail_in is large enough (the exact
469 value depends on the compression method), inflateInit determines the
470 compression method from the zlib header and allocates all data structures
471 accordingly; otherwise the allocation will be deferred to the first call of
472 inflate. If zalloc and zfree are set to NULLP, inflateInit updates them to
473 use default allocation functions.
474
475 inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
476 memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
477 version assumed by the caller. msg is set to null if there is no error
478 message. inflateInit does not perform any decompression apart from reading
479 the zlib header if present: this will be done by inflate(). (So next_in and
480 avail_in may be modified, but next_out and avail_out are unchanged.)
481 */
482
483
484 extern int zlib_inflate (z_streamp strm, int flush);
485 /*
486 inflate decompresses as much data as possible, and stops when the input
487 buffer becomes empty or the output buffer becomes full. It may some
488 introduce some output latency (reading input without producing any output)
489 except when forced to flush.
490
491 The detailed semantics are as follows. inflate performs one or both of the
492 following actions:
493
494 - Decompress more input starting at next_in and update next_in and avail_in
495 accordingly. If not all input can be processed (because there is not
496 enough room in the output buffer), next_in is updated and processing
497 will resume at this point for the next call of inflate().
498
499 - Provide more output starting at next_out and update next_out and avail_out
500 accordingly. inflate() provides as much output as possible, until there
501 is no more input data or no more space in the output buffer (see below
502 about the flush parameter).
503
504 Before the call of inflate(), the application should ensure that at least
505 one of the actions is possible, by providing more input and/or consuming
506 more output, and updating the next_* and avail_* values accordingly.
507 The application can consume the uncompressed output when it wants, for
508 example when the output buffer is full (avail_out == 0), or after each
509 call of inflate(). If inflate returns Z_OK and with zero avail_out, it
510 must be called again after making room in the output buffer because there
511 might be more output pending.
512
513 If the parameter flush is set to Z_SYNC_FLUSH, inflate flushes as much
514 output as possible to the output buffer. The flushing behavior of inflate is
515 not specified for values of the flush parameter other than Z_SYNC_FLUSH
516 and Z_FINISH, but the current implementation actually flushes as much output
517 as possible anyway.
518
519 inflate() should normally be called until it returns Z_STREAM_END or an
520 error. However if all decompression is to be performed in a single step
521 (a single call of inflate), the parameter flush should be set to
522 Z_FINISH. In this case all pending input is processed and all pending
523 output is flushed; avail_out must be large enough to hold all the
524 uncompressed data. (The size of the uncompressed data may have been saved
525 by the compressor for this purpose.) The next operation on this stream must
526 be inflateEnd to deallocate the decompression state. The use of Z_FINISH
527 is never required, but can be used to inform inflate that a faster routine
528 may be used for the single inflate() call.
529
530 If a preset dictionary is needed at this point (see inflateSetDictionary
531 below), inflate sets strm-adler to the adler32 checksum of the
532 dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise
533 it sets strm->adler to the adler32 checksum of all output produced
534 so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or
535 an error code as described below. At the end of the stream, inflate()
536 checks that its computed adler32 checksum is equal to that saved by the
537 compressor and returns Z_STREAM_END only if the checksum is correct.
538
539 inflate() returns Z_OK if some progress has been made (more input processed
540 or more output produced), Z_STREAM_END if the end of the compressed data has
541 been reached and all uncompressed output has been produced, Z_NEED_DICT if a
542 preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
543 corrupted (input stream not conforming to the zlib format or incorrect
544 adler32 checksum), Z_STREAM_ERROR if the stream structure was inconsistent
545 (for example if next_in or next_out was NULLP), Z_MEM_ERROR if there was not
546 enough memory, Z_BUF_ERROR if no progress is possible or if there was not
547 enough room in the output buffer when Z_FINISH is used. In the Z_DATA_ERROR
548 case, the application may then call inflateSync to look for a good
549 compression block.
550 */
551
552
553 extern int zlib_inflateEnd (z_streamp strm);
554 /*
555 All dynamically allocated data structures for this stream are freed.
556 This function discards any unprocessed input and does not flush any
557 pending output.
558
559 inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
560 was inconsistent. In the error case, msg may be set but then points to a
561 static string (which must not be deallocated).
562 */
563
564 /* Advanced functions */
565
566 /*
567 The following functions are needed only in some special applications.
568 */
569
570 /*
571 extern int deflateInit2 (z_streamp strm,
572 int level,
573 int method,
574 int windowBits,
575 int memLevel,
576 int strategy);
577
578 This is another version of deflateInit with more compression options. The
579 fields next_in, zalloc, zfree and opaque must be initialized before by
580 the caller.
581
582 The method parameter is the compression method. It must be Z_DEFLATED in
583 this version of the library.
584
585 The windowBits parameter is the base two logarithm of the window size
586 (the size of the history buffer). It should be in the range 8..15 for this
587 version of the library. Larger values of this parameter result in better
588 compression at the expense of memory usage. The default value is 15 if
589 deflateInit is used instead.
590
591 The memLevel parameter specifies how much memory should be allocated
592 for the internal compression state. memLevel=1 uses minimum memory but
593 is slow and reduces compression ratio; memLevel=9 uses maximum memory
594 for optimal speed. The default value is 8. See zconf.h for total memory
595 usage as a function of windowBits and memLevel.
596
597 The strategy parameter is used to tune the compression algorithm. Use the
598 value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
599 filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
600 string match). Filtered data consists mostly of small values with a
601 somewhat random distribution. In this case, the compression algorithm is
602 tuned to compress them better. The effect of Z_FILTERED is to force more
603 Huffman coding and less string matching; it is somewhat intermediate
604 between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
605 the compression ratio but not the correctness of the compressed output even
606 if it is not set appropriately.
607
608 deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
609 memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
610 method). msg is set to null if there is no error message. deflateInit2 does
611 not perform any compression: this will be done by deflate().
612 */
613
614 extern int zlib_deflateSetDictionary (z_streamp strm,
615 const Byte *dictionary,
616 uInt dictLength);
617 /*
618 Initializes the compression dictionary from the given byte sequence
619 without producing any compressed output. This function must be called
620 immediately after deflateInit, deflateInit2 or deflateReset, before any
621 call of deflate. The compressor and decompressor must use exactly the same
622 dictionary (see inflateSetDictionary).
623
624 The dictionary should consist of strings (byte sequences) that are likely
625 to be encountered later in the data to be compressed, with the most commonly
626 used strings preferably put towards the end of the dictionary. Using a
627 dictionary is most useful when the data to be compressed is short and can be
628 predicted with good accuracy; the data can then be compressed better than
629 with the default empty dictionary.
630
631 Depending on the size of the compression data structures selected by
632 deflateInit or deflateInit2, a part of the dictionary may in effect be
633 discarded, for example if the dictionary is larger than the window size in
634 deflate or deflate2. Thus the strings most likely to be useful should be
635 put at the end of the dictionary, not at the front.
636
637 Upon return of this function, strm->adler is set to the Adler32 value
638 of the dictionary; the decompressor may later use this value to determine
639 which dictionary has been used by the compressor. (The Adler32 value
640 applies to the whole dictionary even if only a subset of the dictionary is
641 actually used by the compressor.)
642
643 deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
644 parameter is invalid (such as NULLP dictionary) or the stream state is
645 inconsistent (for example if deflate has already been called for this stream
646 or if the compression method is bsort). deflateSetDictionary does not
647 perform any compression: this will be done by deflate().
648 */
649
650 extern int zlib_deflateCopy (z_streamp dest, z_streamp source);
651 /*
652 Sets the destination stream as a complete copy of the source stream.
653
654 This function can be useful when several compression strategies will be
655 tried, for example when there are several ways of pre-processing the input
656 data with a filter. The streams that will be discarded should then be freed
657 by calling deflateEnd. Note that deflateCopy duplicates the internal
658 compression state which can be quite large, so this strategy is slow and
659 can consume lots of memory.
660
661 deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
662 enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
663 (such as zalloc being NULLP). msg is left unchanged in both source and
664 destination.
665 */
666
667 extern int zlib_deflateReset (z_streamp strm);
668 /*
669 This function is equivalent to deflateEnd followed by deflateInit,
670 but does not free and reallocate all the internal compression state.
671 The stream will keep the same compression level and any other attributes
672 that may have been set by deflateInit2.
673
674 deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
675 stream state was inconsistent (such as zalloc or state being NULLP).
676 */
677
678 extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
679 /*
680 Dynamically update the compression level and compression strategy. The
681 interpretation of level and strategy is as in deflateInit2. This can be
682 used to switch between compression and straight copy of the input data, or
683 to switch to a different kind of input data requiring a different
684 strategy. If the compression level is changed, the input available so far
685 is compressed with the old level (and may be flushed); the new level will
686 take effect only at the next call of deflate().
687
688 Before the call of deflateParams, the stream state must be set as for
689 a call of deflate(), since the currently available input may have to
690 be compressed and flushed. In particular, strm->avail_out must be non-zero.
691
692 deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
693 stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
694 if strm->avail_out was zero.
695 */
696
697 /*
698 extern int inflateInit2 (z_streamp strm, int windowBits);
699
700 This is another version of inflateInit with an extra parameter. The
701 fields next_in, avail_in, zalloc, zfree and opaque must be initialized
702 before by the caller.
703
704 The windowBits parameter is the base two logarithm of the maximum window
705 size (the size of the history buffer). It should be in the range 8..15 for
706 this version of the library. The default value is 15 if inflateInit is used
707 instead. If a compressed stream with a larger window size is given as
708 input, inflate() will return with the error code Z_DATA_ERROR instead of
709 trying to allocate a larger window.
710
711 inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
712 memory, Z_STREAM_ERROR if a parameter is invalid (such as a negative
713 memLevel). msg is set to null if there is no error message. inflateInit2
714 does not perform any decompression apart from reading the zlib header if
715 present: this will be done by inflate(). (So next_in and avail_in may be
716 modified, but next_out and avail_out are unchanged.)
717 */
718
719 extern int zlib_inflateSetDictionary (z_streamp strm,
720 const Byte *dictionary,
721 uInt dictLength);
722 /*
723 Initializes the decompression dictionary from the given uncompressed byte
724 sequence. This function must be called immediately after a call of inflate
725 if this call returned Z_NEED_DICT. The dictionary chosen by the compressor
726 can be determined from the Adler32 value returned by this call of
727 inflate. The compressor and decompressor must use exactly the same
728 dictionary (see deflateSetDictionary).
729
730 inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
731 parameter is invalid (such as NULLP dictionary) or the stream state is
732 inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
733 expected one (incorrect Adler32 value). inflateSetDictionary does not
734 perform any decompression: this will be done by subsequent calls of
735 inflate().
736 */
737
738 extern int zlib_inflateSync (z_streamp strm);
739 /*
740 Skips invalid compressed data until a full flush point (see above the
741 description of deflate with Z_FULL_FLUSH) can be found, or until all
742 available input is skipped. No output is provided.
743
744 inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
745 if no more input was provided, Z_DATA_ERROR if no flush point has been found,
746 or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
747 case, the application may save the current current value of total_in which
748 indicates where valid compressed data was found. In the error case, the
749 application may repeatedly call inflateSync, providing more input each time,
750 until success or end of the input data.
751 */
752
753 extern int zlib_inflateReset (z_streamp strm);
754 /*
755 This function is equivalent to inflateEnd followed by inflateInit,
756 but does not free and reallocate all the internal decompression state.
757 The stream will keep attributes that may have been set by inflateInit2.
758
759 inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
760 stream state was inconsistent (such as zalloc or state being NULLP).
761 */
762
763 extern int zlib_inflateIncomp (z_stream *strm);
764 /*
765 This function adds the data at next_in (avail_in bytes) to the output
766 history without performing any output. There must be no pending output,
767 and the decompressor must be expecting to see the start of a block.
768 Calling this function is equivalent to decompressing a stored block
769 containing the data at next_in (except that the data is not output).
770 */
771
772 /* various hacks, don't look :) */
773
774 /* deflateInit and inflateInit are macros to allow checking the zlib version
775 * and the compiler's view of z_stream:
776 */
777 extern int zlib_deflateInit_ (z_streamp strm, int level,
778 const char *version, int stream_size);
779 extern int zlib_inflateInit_ (z_streamp strm,
780 const char *version, int stream_size);
781 extern int zlib_deflateInit2_ (z_streamp strm, int level, int method,
782 int windowBits, int memLevel,
783 int strategy, const char *version,
784 int stream_size);
785 extern int zlib_inflateInit2_ (z_streamp strm, int windowBits,
786 const char *version, int stream_size);
787 #if 0 /**** pts ****/
788 #define zlib_deflateInit(strm, level) \
789 zlib_deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
790 #define zlib_inflateInit(strm) \
791 zlib_inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
792 #define zlib_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
793 zlib_deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
794 (strategy), ZLIB_VERSION, sizeof(z_stream))
795 #define zlib_inflateInit2(strm, windowBits) \
796 zlib_inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
797 #endif
798
799 #if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
800 struct zlib_internal_state {int dummy;}; /* hack for buggy compilers */
801 #endif
802
803 extern const char * zlib_zError (int err);
804 extern int zlib_inflateSyncPoint (z_streamp z);
805 extern const uLong * zlib_get_crc_table (void);
806
807 /**** pts ****/
808 #define max(a,b) ((a)<(b) ? (b) : (a))
809
810
811 typedef unsigned char uch;
812 typedef unsigned short ush;
813 typedef unsigned long ulg;
814
815 /* common constants */
816
817 #ifndef DEF_WBITS
818 # define DEF_WBITS MAX_WBITS
819 #endif
820 /* default windowBits for decompression. MAX_WBITS is for compression only */
821
822 #if MAX_MEM_LEVEL >= 8
823 # define DEF_MEM_LEVEL 8
824 #else
825 # define DEF_MEM_LEVEL MAX_MEM_LEVEL
826 #endif
827 /* default memLevel */
828
829 #define STORED_BLOCK 0
830 #define STATIC_TREES 1
831 #define DYN_TREES 2
832 /* The three kinds of block type */
833
834 #define MIN_MATCH 3
835 #define MAX_MATCH 258
836 /* The minimum and maximum match lengths */
837
838 #define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
839
840 /* target dependencies */
841
842 /* Common defaults */
843
844 #ifndef OS_CODE
845 # define OS_CODE 0x03 /* assume Unix */
846 #endif
847
848 /* functions */
849
850 typedef uLong (*check_func) (uLong check, const Byte *buf,
851 uInt len);
852
853
854 /* checksum functions */
855
856 #define BASE 65521L /* largest prime smaller than 65536 */
857 #define NMAX 5552
858 /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
859
860 #define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
861 #define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
862 #define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
863 #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
864 #define DO16(buf) DO8(buf,0); DO8(buf,8);
865
866 /* ========================================================================= */
867 /*
868 Update a running Adler-32 checksum with the bytes buf[0..len-1] and
869 return the updated checksum. If buf is NULLP, this function returns
870 the required initial value for the checksum.
871 An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
872 much faster. Usage example:
873
874 uLong adler = adler32(0L, NULLP, 0);
875
876 while (read_buffer(buffer, length) != EOF) {
877 adler = adler32(adler, buffer, length);
878 }
879 if (adler != original_adler) error();
880 */
zlib_adler32(uLong adler,const Byte * buf,uInt len)881 static ZINLINE uLong zlib_adler32(uLong adler,
882 const Byte *buf,
883 uInt len)
884 {
885 unsigned long s1 = adler & 0xffff;
886 unsigned long s2 = (adler >> 16) & 0xffff;
887 int k;
888
889 if (buf == NULLP) return 1L;
890
891 while (len > 0) {
892 k = len < NMAX ? len : NMAX;
893 len -= k;
894 while (k >= 16) {
895 DO16(buf);
896 buf += 16;
897 k -= 16;
898 }
899 if (k != 0) do {
900 s1 += *buf++;
901 s2 += s1;
902 } while (--k);
903 s1 %= BASE;
904 s2 %= BASE;
905 }
906 return (s2 << 16) | s1;
907 }
908
909 #endif /* _Z_UTIL_H */
910
911 /* end of zutil.h */
912
913 /* #include "defutil.h" */
914
915 #define Assert(err, str)
916 #define Trace(dummy)
917 #define Tracev(dummy)
918 #define Tracecv(err, dummy)
919 #define Tracevv(dummy)
920
921 #define LENGTH_CODES 29
922 /* number of length codes, not counting the special END_BLOCK code */
923
924 #define LITERALS 256
925 /* number of literal bytes 0..255 */
926
927 #define L_CODES (LITERALS+1+LENGTH_CODES)
928 /* number of Literal or Length codes, including the END_BLOCK code */
929
930 #define D_CODES 30
931 /* number of distance codes */
932
933 #define BL_CODES 19
934 /* number of codes used to transfer the bit lengths */
935
936 #define HEAP_SIZE (2*L_CODES+1)
937 /* maximum heap size */
938
939 #define MAX_BITS 15
940 /* All codes must not exceed MAX_BITS bits */
941
942 #define INIT_STATE 42
943 #define BUSY_STATE 113
944 #define FINISH_STATE 666
945 /* Stream status */
946
947
948 /* Data structure describing a single value and its code string. */
949 typedef struct ct_data_s {
950 union {
951 ush freq; /* frequency count */
952 ush code; /* bit string */
953 } fc;
954 union {
955 ush dad; /* father node in Huffman tree */
956 ush len; /* length of bit string */
957 } dl;
958 } ct_data;
959
960 #define Freq fc.freq
961 #define Code fc.code
962 #define Dad dl.dad
963 #define Len dl.len
964
965 typedef struct static_tree_desc_s static_tree_desc;
966
967 typedef struct tree_desc_s {
968 ct_data *dyn_tree; /* the dynamic tree */
969 int max_code; /* largest code with non zero frequency */
970 static_tree_desc *stat_desc; /* the corresponding static tree */
971 } tree_desc;
972
973 typedef ush Pos;
974 typedef unsigned IPos;
975
976 /* A Pos is an index in the character window. We use short instead of int to
977 * save space in the various tables. IPos is used only for parameter passing.
978 */
979
980 typedef struct deflate_state {
981 z_streamp strm; /* pointer back to this zlib stream */
982 int status; /* as the name implies */
983 Byte *pending_buf; /* output still pending */
984 ulg pending_buf_size; /* size of pending_buf */
985 Byte *pending_out; /* next pending byte to output to the stream */
986 int pending; /* nb of bytes in the pending buffer */
987 int noheader; /* suppress zlib header and adler32 */
988 Byte data_type; /* UNKNOWN, BINARY or ASCII */
989 Byte method; /* STORED (for zip only) or DEFLATED */
990 int last_flush; /* value of flush param for previous deflate call */
991
992 /* used by deflate.c: */
993
994 uInt w_size; /* LZ77 window size (32K by default) */
995 uInt w_bits; /* log2(w_size) (8..16) */
996 uInt w_mask; /* w_size - 1 */
997
998 Byte *window;
999 /* Sliding window. Input bytes are read into the second half of the window,
1000 * and move to the first half later to keep a dictionary of at least wSize
1001 * bytes. With this organization, matches are limited to a distance of
1002 * wSize-MAX_MATCH bytes, but this ensures that IO is always
1003 * performed with a length multiple of the block size. Also, it limits
1004 * the window size to 64K, which is quite useful on MSDOS.
1005 * To do: use the user input buffer as sliding window.
1006 */
1007
1008 ulg window_size;
1009 /* Actual size of window: 2*wSize, except when the user input buffer
1010 * is directly used as sliding window.
1011 */
1012
1013 Pos *prev;
1014 /* Link to older string with same hash index. To limit the size of this
1015 * array to 64K, this link is maintained only for the last 32K strings.
1016 * An index in this array is thus a window index modulo 32K.
1017 */
1018
1019 Pos *head; /* Heads of the hash chains or NIL. */
1020
1021 uInt ins_h; /* hash index of string to be inserted */
1022 uInt hash_size; /* number of elements in hash table */
1023 uInt hash_bits; /* log2(hash_size) */
1024 uInt hash_mask; /* hash_size-1 */
1025
1026 uInt hash_shift;
1027 /* Number of bits by which ins_h must be shifted at each input
1028 * step. It must be such that after MIN_MATCH steps, the oldest
1029 * byte no longer takes part in the hash key, that is:
1030 * hash_shift * MIN_MATCH >= hash_bits
1031 */
1032
1033 long block_start;
1034 /* Window position at the beginning of the current output block. Gets
1035 * negative when the window is moved backwards.
1036 */
1037
1038 uInt match_length; /* length of best match */
1039 IPos prev_match; /* previous match */
1040 int match_available; /* set if previous match exists */
1041 uInt strstart; /* start of string to insert */
1042 uInt match_start; /* start of matching string */
1043 uInt lookahead; /* number of valid bytes ahead in window */
1044
1045 uInt prev_length;
1046 /* Length of the best match at previous step. Matches not greater than this
1047 * are discarded. This is used in the lazy match evaluation.
1048 */
1049
1050 uInt max_chain_length;
1051 /* To speed up deflation, hash chains are never searched beyond this
1052 * length. A higher limit improves compression ratio but degrades the
1053 * speed.
1054 */
1055
1056 uInt max_lazy_match;
1057 /* Attempt to find a better match only when the current match is strictly
1058 * smaller than this value. This mechanism is used only for compression
1059 * levels >= 4.
1060 */
1061 # define max_insert_length max_lazy_match
1062 /* Insert new strings in the hash table only if the match length is not
1063 * greater than this length. This saves time but degrades compression.
1064 * max_insert_length is used only for compression levels <= 3.
1065 */
1066
1067 int level; /* compression level (1..9) */
1068 int strategy; /* favor or force Huffman coding*/
1069
1070 uInt good_match;
1071 /* Use a faster search when the previous match is longer than this */
1072
1073 int nice_match; /* Stop searching when current match exceeds this */
1074
1075 /* used by trees.c: */
1076 /* Didn't use ct_data typedef below to supress compiler warning */
1077 struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
1078 struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
1079 struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
1080
1081 struct tree_desc_s l_desc; /* desc. for literal tree */
1082 struct tree_desc_s d_desc; /* desc. for distance tree */
1083 struct tree_desc_s bl_desc; /* desc. for bit length tree */
1084
1085 ush bl_count[MAX_BITS+1];
1086 /* number of codes at each bit length for an optimal tree */
1087
1088 int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
1089 int heap_len; /* number of elements in the heap */
1090 int heap_max; /* element of largest frequency */
1091 /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
1092 * The same heap array is used to build all trees.
1093 */
1094
1095 uch depth[2*L_CODES+1];
1096 /* Depth of each subtree used as tie breaker for trees of equal frequency
1097 */
1098
1099 uch *l_buf; /* buffer for literals or lengths */
1100
1101 uInt lit_bufsize;
1102 /* Size of match buffer for literals/lengths. There are 4 reasons for
1103 * limiting lit_bufsize to 64K:
1104 * - frequencies can be kept in 16 bit counters
1105 * - if compression is not successful for the first block, all input
1106 * data is still in the window so we can still emit a stored block even
1107 * when input comes from standard input. (This can also be done for
1108 * all blocks if lit_bufsize is not greater than 32K.)
1109 * - if compression is not successful for a file smaller than 64K, we can
1110 * even emit a stored file instead of a stored block (saving 5 bytes).
1111 * This is applicable only for zip (not gzip or zlib).
1112 * - creating new Huffman trees less frequently may not provide fast
1113 * adaptation to changes in the input data statistics. (Take for
1114 * example a binary file with poorly compressible code followed by
1115 * a highly compressible string table.) Smaller buffer sizes give
1116 * fast adaptation but have of course the overhead of transmitting
1117 * trees more frequently.
1118 * - I can't count above 4
1119 */
1120
1121 uInt last_lit; /* running index in l_buf */
1122
1123 ush *d_buf;
1124 /* Buffer for distances. To simplify the code, d_buf and l_buf have
1125 * the same number of elements. To use different lengths, an extra flag
1126 * array would be necessary.
1127 */
1128
1129 ulg opt_len; /* bit length of current block with optimal trees */
1130 ulg static_len; /* bit length of current block with static trees */
1131 ulg compressed_len; /* total bit length of compressed file */
1132 uInt matches; /* number of string matches in current block */
1133 int last_eob_len; /* bit length of EOB code for last block */
1134
1135 #ifdef DEBUG_ZLIB
1136 ulg bits_sent; /* bit length of the compressed data */
1137 #endif
1138
1139 ush bi_buf;
1140 /* Output buffer. bits are inserted starting at the bottom (least
1141 * significant bits).
1142 */
1143 int bi_valid;
1144 /* Number of valid bits in bi_buf. All bits above the last valid bit
1145 * are always zero.
1146 */
1147
1148 } deflate_state;
1149
1150 typedef struct deflate_workspace {
1151 /* State memory for the deflator */
1152 deflate_state deflate_memory;
1153 Byte window_memory[2 * (1 << MAX_WBITS)];
1154 Pos prev_memory[1 << MAX_WBITS];
1155 Pos head_memory[1 << (MAX_MEM_LEVEL + 7)];
1156 char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)];
1157 } deflate_workspace;
1158
1159 /* Output a byte on the stream.
1160 * IN assertion: there is enough room in pending_buf.
1161 */
1162 #define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
1163
1164
1165 #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
1166 /* Minimum amount of lookahead, except at the end of the input file.
1167 * See deflate.c for comments about the MIN_MATCH+1.
1168 */
1169
1170 #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
1171 /* In order to simplify the code, particularly on 16 bit machines, match
1172 * distances are limited to MAX_DIST instead of WSIZE.
1173 */
1174
1175 /* in trees.c */
1176 ZSTATIC void zlib_tr_init (deflate_state *s);
1177 ZSTATIC int zlib_tr_tally (deflate_state *s, unsigned dist, unsigned lc);
1178 ZSTATIC ulg zlib_tr_flush_block (deflate_state *s, char *buf, ulg stored_len,
1179 int eof);
1180 ZSTATIC void zlib_tr_align (deflate_state *s);
1181 ZSTATIC void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len,
1182 int eof);
1183 ZSTATIC void zlib_tr_stored_type_only (deflate_state *);
1184
1185
1186 /* ===========================================================================
1187 * Output a short LSB first on the stream.
1188 * IN assertion: there is enough room in pendingBuf.
1189 */
1190 #define put_short(s, w) { \
1191 put_byte(s, (uch)((w) & 0xff)); \
1192 put_byte(s, (uch)((ush)(w) >> 8)); \
1193 }
1194
1195 /* ===========================================================================
1196 * Reverse the first len bits of a code, using straightforward code (a faster
1197 * method would use a table)
1198 * IN assertion: 1 <= len <= 15
1199 */
bi_reverse(unsigned code,int len)1200 static ZINLINE unsigned bi_reverse(unsigned code, /* the value to invert */
1201 int len) /* its bit length */
1202 {
1203 register unsigned res = 0;
1204 do {
1205 res |= code & 1;
1206 code >>= 1, res <<= 1;
1207 } while (--len > 0);
1208 return res >> 1;
1209 }
1210
1211 /* ===========================================================================
1212 * Flush the bit buffer, keeping at most 7 bits in it.
1213 */
bi_flush(deflate_state * s)1214 static ZINLINE void bi_flush(deflate_state *s)
1215 {
1216 if (s->bi_valid == 16) {
1217 put_short(s, s->bi_buf);
1218 s->bi_buf = 0;
1219 s->bi_valid = 0;
1220 } else if (s->bi_valid >= 8) {
1221 put_byte(s, (Byte)s->bi_buf);
1222 s->bi_buf >>= 8;
1223 s->bi_valid -= 8;
1224 }
1225 }
1226
1227 /* ===========================================================================
1228 * Flush the bit buffer and align the output on a byte boundary
1229 */
bi_windup(deflate_state * s)1230 static ZINLINE void bi_windup(deflate_state *s)
1231 {
1232 if (s->bi_valid > 8) {
1233 put_short(s, s->bi_buf);
1234 } else if (s->bi_valid > 0) {
1235 put_byte(s, (Byte)s->bi_buf);
1236 }
1237 s->bi_buf = 0;
1238 s->bi_valid = 0;
1239 #ifdef DEBUG_ZLIB
1240 s->bits_sent = (s->bits_sent+7) & ~7;
1241 #endif
1242 }
1243
1244
1245 /* end of defutil.h */
1246
1247 #if USE_ZLIB_MEM
1248 # include <string.h> /* memcpy(), memset() */
1249 # define zmemcpy(a,b,c) memcpy(a,b,c)
1250 # define zmemset(a,b,c) memset(a,b,c)
1251 #else
zmemcpy(void * dest,const void * src,unsigned count)1252 static void *zmemcpy(void * dest,const void *src,unsigned count) {
1253 char *tmp = (char *) dest;
1254 char const *s = (char const*) src;
1255 while (count--) *tmp++ = *s++;
1256 return dest;
1257 }
zmemset(void * s,int c,unsigned count)1258 static void *zmemset(void * s,int c,unsigned count) {
1259 char *xs = (char *) s;
1260 while (count--) *xs++ = c;
1261 return s;
1262 }
1263 #endif
1264
1265
1266 /* ===========================================================================
1267 * Function prototypes.
1268 */
1269 typedef enum {
1270 need_more, /* block not completed, need more input or more output */
1271 block_done, /* block flush performed */
1272 finish_started, /* finish started, need only more output at next deflate */
1273 finish_done /* finish done, accept no more input or output */
1274 } block_state;
1275
1276 typedef block_state (*compress_func) (deflate_state *s, int flush);
1277 /* Compression function. Returns the block state after the call. */
1278
1279 static void fill_window (deflate_state *s);
1280 static block_state deflate_stored (deflate_state *s, int flush);
1281 static block_state deflate_fast (deflate_state *s, int flush);
1282 static block_state deflate_slow (deflate_state *s, int flush);
1283 static void lm_init (deflate_state *s);
1284 static void putShortMSB (deflate_state *s, uInt b);
1285 static void flush_pending (z_streamp strm);
1286 static int read_buf (z_streamp strm, Byte *buf, unsigned size);
1287 static uInt longest_match (deflate_state *s, IPos cur_match);
1288
1289 #ifdef DEBUG_ZLIB
1290 static void check_match (deflate_state *s, IPos start, IPos match,
1291 int length);
1292 #endif
1293
1294 /* ===========================================================================
1295 * Local data
1296 */
1297
1298 #define NIL 0
1299 /* Tail of hash chains */
1300
1301 #ifndef TOO_FAR
1302 # define TOO_FAR 4096
1303 #endif
1304 /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
1305
1306 #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
1307 /* Minimum amount of lookahead, except at the end of the input file.
1308 * See deflate.c for comments about the MIN_MATCH+1.
1309 */
1310
1311 /* Values for max_lazy_match, good_match and max_chain_length, depending on
1312 * the desired pack level (0..9). The values given below have been tuned to
1313 * exclude worst case performance for pathological files. Better values may be
1314 * found for specific files.
1315 */
1316 typedef struct config_s {
1317 ush good_length; /* reduce lazy search above this match length */
1318 ush max_lazy; /* do not perform lazy search above this match length */
1319 ush nice_length; /* quit search above this match length */
1320 ush max_chain;
1321 compress_func func;
1322 } config;
1323
1324 static const config configuration_table[10] = {
1325 /* good lazy nice chain */
1326 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
1327 /* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
1328 /* 2 */ {4, 5, 16, 8, deflate_fast},
1329 /* 3 */ {4, 6, 32, 32, deflate_fast},
1330
1331 /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
1332 /* 5 */ {8, 16, 32, 32, deflate_slow},
1333 /* 6 */ {8, 16, 128, 128, deflate_slow},
1334 /* 7 */ {8, 32, 128, 256, deflate_slow},
1335 /* 8 */ {32, 128, 258, 1024, deflate_slow},
1336 /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
1337
1338 /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
1339 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
1340 * meaning.
1341 */
1342
1343 #define EQUAL 0
1344 /* result of memcmp for equal strings */
1345
1346 /* ===========================================================================
1347 * Update a hash value with the given input byte
1348 * IN assertion: all calls to to UPDATE_HASH are made with consecutive
1349 * input characters, so that a running hash key can be computed from the
1350 * previous key instead of complete recalculation each time.
1351 */
1352 #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
1353
1354
1355 /* ===========================================================================
1356 * Insert string str in the dictionary and set match_head to the previous head
1357 * of the hash chain (the most recent string with same hash key). Return
1358 * the previous length of the hash chain.
1359 * IN assertion: all calls to to INSERT_STRING are made with consecutive
1360 * input characters and the first MIN_MATCH bytes of str are valid
1361 * (except for the last MIN_MATCH-1 bytes of the input file).
1362 */
1363 #define INSERT_STRING(s, str, match_head) \
1364 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
1365 s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
1366 s->head[s->ins_h] = (Pos)(str))
1367
1368 /* ===========================================================================
1369 * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
1370 * prev[] will be initialized on the fly.
1371 */
1372 #define CLEAR_HASH(s) \
1373 s->head[s->hash_size-1] = NIL; \
1374 zmemset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
1375
1376 /* ========================================================================= */
zlib_deflateInit_(z_streamp strm,int level,const char * version,int stream_size)1377 int zlib_deflateInit_(
1378 z_streamp strm,
1379 int level,
1380 const char *version,
1381 int stream_size
1382 )
1383 {
1384 return zlib_deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS,
1385 DEF_MEM_LEVEL,
1386 Z_DEFAULT_STRATEGY, version, stream_size);
1387 /* To do: ignore strm->next_in if we use it as window */
1388 }
1389
1390 /* ========================================================================= */
zlib_deflateInit2_(z_streamp strm,int level,int method,int windowBits,int memLevel,int strategy,const char * version,int stream_size)1391 int zlib_deflateInit2_(
1392 z_streamp strm,
1393 int level,
1394 int method,
1395 int windowBits,
1396 int memLevel,
1397 int strategy,
1398 const char *version,
1399 int stream_size
1400 )
1401 {
1402 deflate_state *s;
1403 int noheader = 0;
1404 static char my_version[] = ZLIB_VERSION;
1405 deflate_workspace *mem;
1406
1407 ush *overlay;
1408 /* We overlay pending_buf and d_buf+l_buf. This works since the average
1409 * output size for (length,distance) codes is <= 24 bits.
1410 */
1411
1412 if (version == NULLP || version[0] != my_version[0] ||
1413 stream_size != sizeof(z_stream)) {
1414 return Z_VERSION_ERROR;
1415 }
1416 if (strm == NULLP) return Z_STREAM_ERROR;
1417
1418 strm->msg = (char*)NULLP; /**** pts ****/
1419
1420 if (level == Z_DEFAULT_COMPRESSION) level = 6;
1421
1422 mem = (deflate_workspace *) strm->workspace;
1423
1424 if (windowBits < 0) { /* undocumented feature: suppress zlib header */
1425 noheader = 1;
1426 windowBits = -windowBits;
1427 }
1428 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
1429 windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
1430 strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
1431 return Z_STREAM_ERROR;
1432 }
1433 s = (deflate_state *) &(mem->deflate_memory);
1434 strm->state = (struct zlib_internal_state *)s;
1435 s->strm = strm;
1436
1437 s->data_type=strm->data_type; /**** pts ****/ /* BUGFIX at Tue Jan 18 16:01:08 CET 2005 */ /* Imp: report this bugfix to the Linux kernel */
1438 s->noheader = noheader;
1439 s->w_bits = windowBits;
1440 s->w_size = 1 << s->w_bits;
1441 s->w_mask = s->w_size - 1;
1442
1443 s->hash_bits = memLevel + 7;
1444 s->hash_size = 1 << s->hash_bits;
1445 s->hash_mask = s->hash_size - 1;
1446 s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
1447
1448 s->window = (Byte *) mem->window_memory;
1449 memset(mem->window_memory, 0, s->w_size << 1); /* BUGFIX at Mon Sep 4 17:12:23 CEST 2017, otherwise valgrind reports uninitialized errors */
1450 s->prev = (Pos *) mem->prev_memory;
1451 s->head = (Pos *) mem->head_memory;
1452
1453 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
1454
1455 overlay = (ush *) mem->overlay_memory;
1456 s->pending_buf = (uch *) overlay;
1457 s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
1458
1459 s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
1460 s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
1461
1462 s->level = level;
1463 s->strategy = strategy;
1464 s->method = (Byte)method;
1465
1466 return zlib_deflateReset(strm);
1467 }
1468
1469 #if 0 /**** pts ****/
1470 /* ========================================================================= */
1471 int zlib_deflateSetDictionary(
1472 z_streamp strm,
1473 const Byte *dictionary,
1474 uInt dictLength
1475 )
1476 {
1477 deflate_state *s;
1478 uInt length = dictLength;
1479 uInt n;
1480 IPos hash_head = 0;
1481
1482 if (strm == NULLP || strm->state == NULLP || dictionary == NULLP)
1483 return Z_STREAM_ERROR;
1484
1485 s = (deflate_state *) strm->state;
1486 if (s->status != INIT_STATE) return Z_STREAM_ERROR;
1487
1488 strm->adler = zlib_adler32(strm->adler, dictionary, dictLength);
1489
1490 if (length < MIN_MATCH) return Z_OK;
1491 if (length > MAX_DIST(s)) {
1492 length = MAX_DIST(s);
1493 #ifndef USE_DICT_HEAD
1494 dictionary += dictLength - length; /* use the tail of the dictionary */
1495 #endif
1496 }
1497 zmemcpy((char *)s->window, dictionary, length);
1498 s->strstart = length;
1499 s->block_start = (long)length;
1500
1501 /* Insert all strings in the hash table (except for the last two bytes).
1502 * s->lookahead stays null, so s->ins_h will be recomputed at the next
1503 * call of fill_window.
1504 */
1505 s->ins_h = s->window[0];
1506 UPDATE_HASH(s, s->ins_h, s->window[1]);
1507 for (n = 0; n <= length - MIN_MATCH; n++) {
1508 INSERT_STRING(s, n, hash_head);
1509 }
1510 if (hash_head) hash_head = 0; /* to make compiler happy */
1511 return Z_OK;
1512 }
1513 #endif
1514
1515 /* ========================================================================= */
zlib_deflateReset(z_streamp strm)1516 int zlib_deflateReset(
1517 z_streamp strm
1518 )
1519 {
1520 deflate_state *s;
1521
1522 if (strm == NULLP || strm->state == NULLP)
1523 return Z_STREAM_ERROR;
1524
1525 strm->total_in = strm->total_out = 0;
1526 strm->msg = (char*)NULLP;
1527 strm->data_type = Z_UNKNOWN;
1528
1529 s = (deflate_state *)strm->state;
1530 s->pending = 0;
1531 s->pending_out = s->pending_buf;
1532
1533 if (s->noheader < 0) {
1534 s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
1535 }
1536 s->status = s->noheader ? BUSY_STATE : INIT_STATE;
1537 strm->adler = 1;
1538 s->last_flush = Z_NO_FLUSH;
1539
1540 zlib_tr_init(s);
1541 lm_init(s);
1542
1543 return Z_OK;
1544 }
1545
1546 #if 0 /**** pts ****/
1547 /* ========================================================================= */
1548 int zlib_deflateParams(
1549 z_streamp strm,
1550 int level,
1551 int strategy
1552 )
1553 {
1554 deflate_state *s;
1555 compress_func func;
1556 int err = Z_OK;
1557
1558 if (strm == NULLP || strm->state == NULLP) return Z_STREAM_ERROR;
1559 s = (deflate_state *) strm->state;
1560
1561 if (level == Z_DEFAULT_COMPRESSION) {
1562 level = 6;
1563 }
1564 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
1565 return Z_STREAM_ERROR;
1566 }
1567 func = configuration_table[s->level].func;
1568
1569 if (func != configuration_table[level].func && strm->total_in != 0) {
1570 /* Flush the last buffer: */
1571 err = zlib_deflate(strm, Z_PARTIAL_FLUSH);
1572 }
1573 if (s->level != level) {
1574 s->level = level;
1575 s->max_lazy_match = configuration_table[level].max_lazy;
1576 s->good_match = configuration_table[level].good_length;
1577 s->nice_match = configuration_table[level].nice_length;
1578 s->max_chain_length = configuration_table[level].max_chain;
1579 }
1580 s->strategy = strategy;
1581 return err;
1582 }
1583 #endif
1584
1585 /* =========================================================================
1586 * Put a short in the pending buffer. The 16-bit value is put in MSB order.
1587 * IN assertion: the stream state is correct and there is enough room in
1588 * pending_buf.
1589 */
putShortMSB(deflate_state * s,uInt b)1590 static void putShortMSB(
1591 deflate_state *s,
1592 uInt b
1593 )
1594 {
1595 put_byte(s, (Byte)(b >> 8));
1596 put_byte(s, (Byte)(b & 0xff));
1597 }
1598
1599 /* =========================================================================
1600 * Flush as much pending output as possible. All deflate() output goes
1601 * through this function so some applications may wish to modify it
1602 * to avoid allocating a large strm->next_out buffer and copying into it.
1603 * (See also read_buf()).
1604 */
flush_pending(z_streamp strm)1605 static void flush_pending(
1606 z_streamp strm
1607 )
1608 {
1609 deflate_state *s = (deflate_state *) strm->state;
1610 unsigned len = s->pending;
1611
1612 if (len > strm->avail_out) len = strm->avail_out;
1613 if (len == 0) return;
1614
1615 if (strm->next_out != NULLP) {
1616 zmemcpy(strm->next_out, s->pending_out, len);
1617 strm->next_out += len;
1618 }
1619 s->pending_out += len;
1620 strm->total_out += len;
1621 strm->avail_out -= len;
1622 s->pending -= len;
1623 if (s->pending == 0) {
1624 s->pending_out = s->pending_buf;
1625 }
1626 }
1627
1628 /* ========================================================================= */
zlib_deflate(z_streamp strm,int flush)1629 int zlib_deflate(
1630 z_streamp strm,
1631 int flush
1632 )
1633 {
1634 int old_flush; /* value of flush param for previous deflate call */
1635 deflate_state *s;
1636
1637 if (strm == NULLP || strm->state == NULLP ||
1638 flush > Z_FINISH || flush < 0) {
1639 return Z_STREAM_ERROR;
1640 }
1641 s = (deflate_state *) strm->state;
1642
1643 if ((strm->next_in == NULLP && strm->avail_in != 0) ||
1644 (s->status == FINISH_STATE && flush != Z_FINISH)) {
1645 return Z_STREAM_ERROR;
1646 }
1647 if (strm->avail_out == 0) return Z_BUF_ERROR;
1648
1649 s->strm = strm; /* just in case */
1650 old_flush = s->last_flush;
1651 s->last_flush = flush;
1652
1653 /* Write the zlib header */
1654 if (s->status == INIT_STATE) {
1655
1656 uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
1657 uInt level_flags = (s->level-1) >> 1;
1658
1659 if (level_flags > 3) level_flags = 3;
1660 header |= (level_flags << 6);
1661 if (s->strstart != 0) header |= PRESET_DICT;
1662 header += 31 - (header % 31);
1663
1664 s->status = BUSY_STATE;
1665 putShortMSB(s, header);
1666
1667 /* Save the adler32 of the preset dictionary: */
1668 if (s->strstart != 0) {
1669 putShortMSB(s, (uInt)(strm->adler >> 16));
1670 putShortMSB(s, (uInt)(strm->adler & 0xffff));
1671 }
1672 strm->adler = 1L;
1673 }
1674
1675 /* Flush as much pending output as possible */
1676 if (s->pending != 0) {
1677 flush_pending(strm);
1678 if (strm->avail_out == 0) {
1679 /* Since avail_out is 0, deflate will be called again with
1680 * more output space, but possibly with both pending and
1681 * avail_in equal to zero. There won't be anything to do,
1682 * but this is not an error situation so make sure we
1683 * return OK instead of BUF_ERROR at next call of deflate:
1684 */
1685 s->last_flush = -1;
1686 return Z_OK;
1687 }
1688
1689 /* Make sure there is something to do and avoid duplicate consecutive
1690 * flushes. For repeated and useless calls with Z_FINISH, we keep
1691 * returning Z_STREAM_END instead of Z_BUFF_ERROR.
1692 */
1693 } else if (strm->avail_in == 0 && flush <= old_flush &&
1694 flush != Z_FINISH) {
1695 return Z_BUF_ERROR;
1696 }
1697
1698 /* User must not provide more input after the first FINISH: */
1699 if (s->status == FINISH_STATE && strm->avail_in != 0) {
1700 return Z_BUF_ERROR;
1701 }
1702
1703 /* Start a new block or continue the current one.
1704 */
1705 if (strm->avail_in != 0 || s->lookahead != 0 ||
1706 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
1707 block_state bstate;
1708
1709 bstate = (*(configuration_table[s->level].func))(s, flush);
1710
1711 if (bstate == finish_started || bstate == finish_done) {
1712 s->status = FINISH_STATE;
1713 }
1714 if (bstate == need_more || bstate == finish_started) {
1715 if (strm->avail_out == 0) {
1716 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
1717 }
1718 return Z_OK;
1719 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
1720 * of deflate should use the same flush parameter to make sure
1721 * that the flush is complete. So we don't have to output an
1722 * empty block here, this will be done at next call. This also
1723 * ensures that for a very small output buffer, we emit at most
1724 * one empty block.
1725 */
1726 }
1727 if (bstate == block_done) {
1728 if (flush == Z_PARTIAL_FLUSH) {
1729 zlib_tr_align(s);
1730 } else if (flush == Z_PACKET_FLUSH) {
1731 /* Output just the 3-bit `stored' block type value,
1732 but not a zero length. */
1733 zlib_tr_stored_type_only(s);
1734 } else { /* FULL_FLUSH or SYNC_FLUSH */
1735 zlib_tr_stored_block(s, (char*)0, 0L, 0);
1736 /* For a full flush, this empty block will be recognized
1737 * as a special marker by inflate_sync().
1738 */
1739 if (flush == Z_FULL_FLUSH) {
1740 CLEAR_HASH(s); /* forget history */
1741 }
1742 }
1743 flush_pending(strm);
1744 if (strm->avail_out == 0) {
1745 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
1746 return Z_OK;
1747 }
1748 }
1749 }
1750 Assert(strm->avail_out > 0, "bug2");
1751
1752 if (flush != Z_FINISH) return Z_OK;
1753 if (s->noheader) return Z_STREAM_END;
1754
1755 /* Write the zlib trailer (adler32) */
1756 putShortMSB(s, (uInt)(strm->adler >> 16));
1757 putShortMSB(s, (uInt)(strm->adler & 0xffff));
1758 flush_pending(strm);
1759 /* If avail_out is zero, the application will call deflate again
1760 * to flush the rest.
1761 */
1762 s->noheader = -1; /* write the trailer only once! */
1763 return s->pending != 0 ? Z_OK : Z_STREAM_END;
1764 }
1765
1766 /* ========================================================================= */
zlib_deflateEnd(z_streamp strm)1767 int zlib_deflateEnd(
1768 z_streamp strm
1769 )
1770 {
1771 int status;
1772 deflate_state *s;
1773
1774 if (strm == NULLP || strm->state == NULLP) return Z_STREAM_ERROR;
1775 s = (deflate_state *) strm->state;
1776
1777 status = s->status;
1778 if (status != INIT_STATE && status != BUSY_STATE &&
1779 status != FINISH_STATE) {
1780 return Z_STREAM_ERROR;
1781 }
1782
1783 strm->state = (struct zlib_internal_state*)NULLP; /**** pts ****/
1784
1785 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1786 }
1787
1788 #if 0 /**** pts ****/
1789 /* =========================================================================
1790 * Copy the source state to the destination state.
1791 */
1792 int zlib_deflateCopy (
1793 z_streamp dest,
1794 z_streamp source
1795 )
1796 {
1797 #ifdef MAXSEG_64K
1798 return Z_STREAM_ERROR;
1799 #else
1800 deflate_state *ds;
1801 deflate_state *ss;
1802 ush *overlay;
1803 deflate_workspace *mem;
1804
1805
1806 if (source == NULLP || dest == NULLP || source->state == NULLP) {
1807 return Z_STREAM_ERROR;
1808 }
1809
1810 ss = (deflate_state *) source->state;
1811
1812 *dest = *source;
1813
1814 mem = (deflate_workspace *) dest->workspace;
1815
1816 ds = &(mem->deflate_memory);
1817
1818 dest->state = (struct zlib_internal_state *) ds;
1819 *ds = *ss;
1820 ds->strm = dest;
1821
1822 ds->window = (Byte *) mem->window_memory;
1823 ds->prev = (Pos *) mem->prev_memory;
1824 ds->head = (Pos *) mem->head_memory;
1825 overlay = (ush *) mem->overlay_memory;
1826 ds->pending_buf = (uch *) overlay;
1827
1828 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1829 zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
1830 zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
1831 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1832
1833 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1834 ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
1835 ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
1836
1837 ds->l_desc.dyn_tree = ds->dyn_ltree;
1838 ds->d_desc.dyn_tree = ds->dyn_dtree;
1839 ds->bl_desc.dyn_tree = ds->bl_tree;
1840
1841 return Z_OK;
1842 #endif
1843 }
1844 #endif
1845
1846 /* ===========================================================================
1847 * Read a new buffer from the current input stream, update the adler32
1848 * and total number of bytes read. All deflate() input goes through
1849 * this function so some applications may wish to modify it to avoid
1850 * allocating a large strm->next_in buffer and copying from it.
1851 * (See also flush_pending()).
1852 */
read_buf(z_streamp strm,Byte * buf,unsigned size)1853 static int read_buf(
1854 z_streamp strm,
1855 Byte *buf,
1856 unsigned size
1857 )
1858 {
1859 unsigned len = strm->avail_in;
1860
1861 if (len > size) len = size;
1862 if (len == 0) return 0;
1863
1864 strm->avail_in -= len;
1865
1866 if (!((deflate_state *)(strm->state))->noheader) {
1867 strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
1868 }
1869 zmemcpy(buf, strm->next_in, len);
1870 strm->next_in += len;
1871 strm->total_in += len;
1872
1873 return (int)len;
1874 }
1875
1876 /* ===========================================================================
1877 * Initialize the "longest match" routines for a new zlib stream
1878 */
lm_init(deflate_state * s)1879 static void lm_init(
1880 deflate_state *s
1881 )
1882 {
1883 s->window_size = (ulg)2L*s->w_size;
1884
1885 CLEAR_HASH(s);
1886
1887 /* Set the default configuration parameters:
1888 */
1889 s->max_lazy_match = configuration_table[s->level].max_lazy;
1890 s->good_match = configuration_table[s->level].good_length;
1891 s->nice_match = configuration_table[s->level].nice_length;
1892 s->max_chain_length = configuration_table[s->level].max_chain;
1893
1894 s->strstart = 0;
1895 s->block_start = 0L;
1896 s->lookahead = 0;
1897 s->match_length = s->prev_length = MIN_MATCH-1;
1898 s->match_available = 0;
1899 s->ins_h = 0;
1900 }
1901
1902 /* ===========================================================================
1903 * Set match_start to the longest match starting at the given string and
1904 * return its length. Matches shorter or equal to prev_length are discarded,
1905 * in which case the result is equal to prev_length and match_start is
1906 * garbage.
1907 * IN assertions: cur_match is the head of the hash chain for the current
1908 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1909 * OUT assertion: the match length is not greater than s->lookahead.
1910 */
1911 /* For 80x86 and 680x0, an optimized version will be provided in match.asm or
1912 * match.S. The code will be functionally equivalent.
1913 */
longest_match(deflate_state * s,IPos cur_match)1914 static uInt longest_match(
1915 deflate_state *s,
1916 IPos cur_match /* current match */
1917 )
1918 {
1919 unsigned chain_length = s->max_chain_length;/* max hash chain length */
1920 register Byte *scan = s->window + s->strstart; /* current string */
1921 register Byte *match; /* matched string */
1922 register int len; /* length of current match */
1923 int best_len = s->prev_length; /* best match length so far */
1924 int nice_match = s->nice_match; /* stop if match long enough */
1925 IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
1926 s->strstart - (IPos)MAX_DIST(s) : NIL;
1927 /* Stop when cur_match becomes <= limit. To simplify the code,
1928 * we prevent matches with the string of window index 0.
1929 */
1930 Pos *prev = s->prev;
1931 uInt wmask = s->w_mask;
1932
1933 #ifdef UNALIGNED_OK
1934 /* Compare two bytes at a time. Note: this is not always beneficial.
1935 * Try with and without -DUNALIGNED_OK to check.
1936 */
1937 register Byte *strend = s->window + s->strstart + MAX_MATCH - 1;
1938 register ush scan_start = *(ush*)scan;
1939 register ush scan_end = *(ush*)(scan+best_len-1);
1940 #else
1941 register Byte *strend = s->window + s->strstart + MAX_MATCH;
1942 register Byte scan_end1 = scan[best_len-1];
1943 register Byte scan_end = scan[best_len];
1944 #endif
1945
1946 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1947 * It is easy to get rid of this optimization if necessary.
1948 */
1949 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1950
1951 /* Do not waste too much time if we already have a good match: */
1952 if (s->prev_length >= s->good_match) {
1953 chain_length >>= 2;
1954 }
1955 /* Do not look for matches beyond the end of the input. This is necessary
1956 * to make deflate deterministic.
1957 */
1958 if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
1959
1960 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1961
1962 do {
1963 Assert(cur_match < s->strstart, "no future");
1964 match = s->window + cur_match;
1965
1966 /* Skip to next match if the match length cannot increase
1967 * or if the match length is less than 2:
1968 */
1969 #if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
1970 /* This code assumes sizeof(unsigned short) == 2. Do not use
1971 * UNALIGNED_OK if your compiler uses a different size.
1972 */
1973 if (*(ush*)(match+best_len-1) != scan_end ||
1974 *(ush*)match != scan_start) continue;
1975
1976 /* It is not necessary to compare scan[2] and match[2] since they are
1977 * always equal when the other bytes match, given that the hash keys
1978 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1979 * strstart+3, +5, ... up to strstart+257. We check for insufficient
1980 * lookahead only every 4th comparison; the 128th check will be made
1981 * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
1982 * necessary to put more guard bytes at the end of the window, or
1983 * to check more often for insufficient lookahead.
1984 */
1985 Assert(scan[2] == match[2], "scan[2]?");
1986 scan++, match++;
1987 do {
1988 } while (*(ush*)(scan+=2) == *(ush*)(match+=2) &&
1989 *(ush*)(scan+=2) == *(ush*)(match+=2) &&
1990 *(ush*)(scan+=2) == *(ush*)(match+=2) &&
1991 *(ush*)(scan+=2) == *(ush*)(match+=2) &&
1992 scan < strend);
1993 /* The funny "do {}" generates better code on most compilers */
1994
1995 /* Here, scan <= window+strstart+257 */
1996 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1997 if (*scan == *match) scan++;
1998
1999 len = (MAX_MATCH - 1) - (int)(strend-scan);
2000 scan = strend - (MAX_MATCH-1);
2001
2002 #else /* UNALIGNED_OK */
2003
2004 if (match[best_len] != scan_end ||
2005 match[best_len-1] != scan_end1 ||
2006 *match != *scan ||
2007 *++match != scan[1]) continue;
2008
2009 /* The check at best_len-1 can be removed because it will be made
2010 * again later. (This heuristic is not always a win.)
2011 * It is not necessary to compare scan[2] and match[2] since they
2012 * are always equal when the other bytes match, given that
2013 * the hash keys are equal and that HASH_BITS >= 8.
2014 */
2015 scan += 2, match++;
2016 Assert(*scan == *match, "match[2]?");
2017
2018 /* We check for insufficient lookahead only every 8th comparison;
2019 * the 256th check will be made at strstart+258.
2020 */
2021 do {
2022 } while (*++scan == *++match && *++scan == *++match &&
2023 *++scan == *++match && *++scan == *++match &&
2024 *++scan == *++match && *++scan == *++match &&
2025 *++scan == *++match && *++scan == *++match &&
2026 scan < strend);
2027
2028 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
2029
2030 len = MAX_MATCH - (int)(strend - scan);
2031 scan = strend - MAX_MATCH;
2032
2033 #endif /* UNALIGNED_OK */
2034
2035 if (len > best_len) {
2036 s->match_start = cur_match;
2037 best_len = len;
2038 if (len >= nice_match) break;
2039 #ifdef UNALIGNED_OK
2040 scan_end = *(ush*)(scan+best_len-1);
2041 #else
2042 scan_end1 = scan[best_len-1];
2043 scan_end = scan[best_len];
2044 #endif
2045 }
2046 } while ((cur_match = prev[cur_match & wmask]) > limit
2047 && --chain_length != 0);
2048
2049 if ((uInt)best_len <= s->lookahead) return best_len;
2050 return s->lookahead;
2051 }
2052
2053 #ifdef DEBUG_ZLIB
2054 /* ===========================================================================
2055 * Check that the match at match_start is indeed a match.
2056 */
check_match(deflate_state * s,IPos start,IPos match,int length)2057 static void check_match(
2058 deflate_state *s,
2059 IPos start,
2060 IPos match,
2061 int length
2062 )
2063 {
2064 /* check that the match is indeed a match */
2065 if (memcmp((char *)s->window + match,
2066 (char *)s->window + start, length) != EQUAL) {
2067 fprintf(stderr, " start %u, match %u, length %d\n",
2068 start, match, length);
2069 do {
2070 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
2071 } while (--length != 0);
2072 z_error("invalid match");
2073 }
2074 if (z_verbose > 1) {
2075 fprintf(stderr,"\\[%d,%d]", start-match, length);
2076 do { putc(s->window[start++], stderr); } while (--length != 0);
2077 }
2078 }
2079 #else
2080 # define check_match(s, start, match, length)
2081 #endif
2082
2083 /* ===========================================================================
2084 * Fill the window when the lookahead becomes insufficient.
2085 * Updates strstart and lookahead.
2086 *
2087 * IN assertion: lookahead < MIN_LOOKAHEAD
2088 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
2089 * At least one byte has been read, or avail_in == 0; reads are
2090 * performed for at least two bytes (required for the zip translate_eol
2091 * option -- not supported here).
2092 */
fill_window(deflate_state * s)2093 static void fill_window(
2094 deflate_state *s
2095 )
2096 {
2097 register unsigned n, m;
2098 register Pos *p;
2099 unsigned more; /* Amount of free space at the end of the window. */
2100 uInt wsize = s->w_size;
2101
2102 do {
2103 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
2104
2105 /* Deal with !@#$% 64K limit: */
2106 if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
2107 more = wsize;
2108
2109 } else if (more == (unsigned)(-1)) {
2110 /* Very unlikely, but possible on 16 bit machine if strstart == 0
2111 * and lookahead == 1 (input done one byte at time)
2112 */
2113 more--;
2114
2115 /* If the window is almost full and there is insufficient lookahead,
2116 * move the upper half to the lower one to make room in the upper half.
2117 */
2118 } else if (s->strstart >= wsize+MAX_DIST(s)) {
2119
2120 zmemcpy((char *)s->window, (char *)s->window+wsize,
2121 (unsigned)wsize);
2122 s->match_start -= wsize;
2123 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
2124 s->block_start -= (long) wsize;
2125
2126 /* Slide the hash table (could be avoided with 32 bit values
2127 at the expense of memory usage). We slide even when level == 0
2128 to keep the hash table consistent if we switch back to level > 0
2129 later. (Using level 0 permanently is not an optimal usage of
2130 zlib, so we don't care about this pathological case.)
2131 */
2132 n = s->hash_size;
2133 p = &s->head[n];
2134 do {
2135 m = *--p;
2136 *p = (Pos)(m >= wsize ? m-wsize : NIL);
2137 } while (--n);
2138
2139 n = wsize;
2140 p = &s->prev[n];
2141 do {
2142 m = *--p;
2143 *p = (Pos)(m >= wsize ? m-wsize : NIL);
2144 /* If n is not on any hash chain, prev[n] is garbage but
2145 * its value will never be used.
2146 */
2147 } while (--n);
2148 more += wsize;
2149 }
2150 if (s->strm->avail_in == 0) return;
2151
2152 /* If there was no sliding:
2153 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
2154 * more == window_size - lookahead - strstart
2155 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
2156 * => more >= window_size - 2*WSIZE + 2
2157 * In the BIG_MEM or MMAP case (not yet supported),
2158 * window_size == input_size + MIN_LOOKAHEAD &&
2159 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
2160 * Otherwise, window_size == 2*WSIZE so more >= 2.
2161 * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
2162 */
2163 Assert(more >= 2, "more < 2");
2164
2165 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
2166 s->lookahead += n;
2167
2168 /* Initialize the hash value now that we have some input: */
2169 if (s->lookahead >= MIN_MATCH) {
2170 s->ins_h = s->window[s->strstart];
2171 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
2172 #if MIN_MATCH != 3
2173 Call UPDATE_HASH() MIN_MATCH-3 more times
2174 #endif
2175 }
2176 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
2177 * but this is not important since only literal bytes will be emitted.
2178 */
2179
2180 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
2181 }
2182
2183 /* ===========================================================================
2184 * Flush the current block, with given end-of-file flag.
2185 * IN assertion: strstart is set to the end of the current match.
2186 */
2187 #define FLUSH_BLOCK_ONLY(s, eof) { \
2188 zlib_tr_flush_block(s, (s->block_start >= 0L ? \
2189 (char *)&s->window[(unsigned)s->block_start] : \
2190 (char *)/****pts****/NULLP), \
2191 (ulg)((long)s->strstart - s->block_start), \
2192 (eof)); \
2193 s->block_start = s->strstart; \
2194 flush_pending(s->strm); \
2195 Tracev((stderr,"[FLUSH]")); \
2196 }
2197
2198 /* Same but force premature exit if necessary. */
2199 #define FLUSH_BLOCK(s, eof) { \
2200 FLUSH_BLOCK_ONLY(s, eof); \
2201 if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
2202 }
2203
2204 /* ===========================================================================
2205 * Copy without compression as much as possible from the input stream, return
2206 * the current block state.
2207 * This function does not insert new strings in the dictionary since
2208 * uncompressible data is probably not useful. This function is used
2209 * only for the level=0 compression option.
2210 * NOTE: this function should be optimized to avoid extra copying from
2211 * window to pending_buf.
2212 */
deflate_stored(deflate_state * s,int flush)2213 static block_state deflate_stored(
2214 deflate_state *s,
2215 int flush
2216 )
2217 {
2218 /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
2219 * to pending_buf_size, and each stored block has a 5 byte header:
2220 */
2221 ulg max_block_size = 0xffff;
2222 ulg max_start;
2223
2224 if (max_block_size > s->pending_buf_size - 5) {
2225 max_block_size = s->pending_buf_size - 5;
2226 }
2227
2228 /* Copy as much as possible from input to output: */
2229 for (;;) {
2230 /* Fill the window as much as possible: */
2231 if (s->lookahead <= 1) {
2232
2233 Assert(s->strstart < s->w_size+MAX_DIST(s) ||
2234 s->block_start >= (long)s->w_size, "slide too late");
2235
2236 fill_window(s);
2237 if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
2238
2239 if (s->lookahead == 0) break; /* flush the current block */
2240 }
2241 Assert(s->block_start >= 0L, "block gone");
2242
2243 s->strstart += s->lookahead;
2244 s->lookahead = 0;
2245
2246 /* Emit a stored block if pending_buf will be full: */
2247 max_start = s->block_start + max_block_size;
2248 if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
2249 /* strstart == 0 is possible when wraparound on 16-bit machine */
2250 s->lookahead = (uInt)(s->strstart - max_start);
2251 s->strstart = (uInt)max_start;
2252 FLUSH_BLOCK(s, 0);
2253 }
2254 /* Flush if we may have to slide, otherwise block_start may become
2255 * negative and the data will be gone:
2256 */
2257 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
2258 FLUSH_BLOCK(s, 0);
2259 }
2260 }
2261 FLUSH_BLOCK(s, flush == Z_FINISH);
2262 return flush == Z_FINISH ? finish_done : block_done;
2263 }
2264
2265 /* ===========================================================================
2266 * Compress as much as possible from the input stream, return the current
2267 * block state.
2268 * This function does not perform lazy evaluation of matches and inserts
2269 * new strings in the dictionary only for unmatched strings or for short
2270 * matches. It is used only for the fast compression options.
2271 */
deflate_fast(deflate_state * s,int flush)2272 static block_state deflate_fast(
2273 deflate_state *s,
2274 int flush
2275 )
2276 {
2277 IPos hash_head = NIL; /* head of the hash chain */
2278 int bflush; /* set if current block must be flushed */
2279
2280 for (;;) {
2281 /* Make sure that we always have enough lookahead, except
2282 * at the end of the input file. We need MAX_MATCH bytes
2283 * for the next match, plus MIN_MATCH bytes to insert the
2284 * string following the next match.
2285 */
2286 if (s->lookahead < MIN_LOOKAHEAD) {
2287 fill_window(s);
2288 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
2289 return need_more;
2290 }
2291 if (s->lookahead == 0) break; /* flush the current block */
2292 }
2293
2294 /* Insert the string window[strstart .. strstart+2] in the
2295 * dictionary, and set hash_head to the head of the hash chain:
2296 */
2297 if (s->lookahead >= MIN_MATCH) {
2298 INSERT_STRING(s, s->strstart, hash_head);
2299 }
2300
2301 /* Find the longest match, discarding those <= prev_length.
2302 * At this point we have always match_length < MIN_MATCH
2303 */
2304 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
2305 /* To simplify the code, we prevent matches with the string
2306 * of window index 0 (in particular we have to avoid a match
2307 * of the string with itself at the start of the input file).
2308 */
2309 if (s->strategy != Z_HUFFMAN_ONLY) {
2310 s->match_length = longest_match (s, hash_head);
2311 }
2312 /* longest_match() sets match_start */
2313 }
2314 if (s->match_length >= MIN_MATCH) {
2315 check_match(s, s->strstart, s->match_start, s->match_length);
2316
2317 bflush = zlib_tr_tally(s, s->strstart - s->match_start,
2318 s->match_length - MIN_MATCH);
2319
2320 s->lookahead -= s->match_length;
2321
2322 /* Insert new strings in the hash table only if the match length
2323 * is not too large. This saves time but degrades compression.
2324 */
2325 if (s->match_length <= s->max_insert_length &&
2326 s->lookahead >= MIN_MATCH) {
2327 s->match_length--; /* string at strstart already in hash table */
2328 do {
2329 s->strstart++;
2330 INSERT_STRING(s, s->strstart, hash_head);
2331 /* strstart never exceeds WSIZE-MAX_MATCH, so there are
2332 * always MIN_MATCH bytes ahead.
2333 */
2334 } while (--s->match_length != 0);
2335 s->strstart++;
2336 } else {
2337 s->strstart += s->match_length;
2338 s->match_length = 0;
2339 s->ins_h = s->window[s->strstart];
2340 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
2341 #if MIN_MATCH != 3
2342 Call UPDATE_HASH() MIN_MATCH-3 more times
2343 #endif
2344 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
2345 * matter since it will be recomputed at next deflate call.
2346 */
2347 }
2348 } else {
2349 /* No match, output a literal byte */
2350 Tracevv((stderr,"%c", s->window[s->strstart]));
2351 bflush = zlib_tr_tally (s, 0, s->window[s->strstart]);
2352 s->lookahead--;
2353 s->strstart++;
2354 }
2355 if (bflush) FLUSH_BLOCK(s, 0);
2356 }
2357 FLUSH_BLOCK(s, flush == Z_FINISH);
2358 return flush == Z_FINISH ? finish_done : block_done;
2359 }
2360
2361 /* ===========================================================================
2362 * Same as above, but achieves better compression. We use a lazy
2363 * evaluation for matches: a match is finally adopted only if there is
2364 * no better match at the next window position.
2365 */
deflate_slow(deflate_state * s,int flush)2366 static block_state deflate_slow(
2367 deflate_state *s,
2368 int flush
2369 )
2370 {
2371 IPos hash_head = NIL; /* head of hash chain */
2372 int bflush; /* set if current block must be flushed */
2373
2374 /* Process the input block. */
2375 for (;;) {
2376 /* Make sure that we always have enough lookahead, except
2377 * at the end of the input file. We need MAX_MATCH bytes
2378 * for the next match, plus MIN_MATCH bytes to insert the
2379 * string following the next match.
2380 */
2381 if (s->lookahead < MIN_LOOKAHEAD) {
2382 fill_window(s);
2383 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
2384 return need_more;
2385 }
2386 if (s->lookahead == 0) break; /* flush the current block */
2387 }
2388
2389 /* Insert the string window[strstart .. strstart+2] in the
2390 * dictionary, and set hash_head to the head of the hash chain:
2391 */
2392 if (s->lookahead >= MIN_MATCH) {
2393 INSERT_STRING(s, s->strstart, hash_head);
2394 }
2395
2396 /* Find the longest match, discarding those <= prev_length.
2397 */
2398 s->prev_length = s->match_length, s->prev_match = s->match_start;
2399 s->match_length = MIN_MATCH-1;
2400
2401 if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
2402 s->strstart - hash_head <= MAX_DIST(s)) {
2403 /* To simplify the code, we prevent matches with the string
2404 * of window index 0 (in particular we have to avoid a match
2405 * of the string with itself at the start of the input file).
2406 */
2407 if (s->strategy != Z_HUFFMAN_ONLY) {
2408 s->match_length = longest_match (s, hash_head);
2409 }
2410 /* longest_match() sets match_start */
2411
2412 if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
2413 (s->match_length == MIN_MATCH &&
2414 s->strstart - s->match_start > TOO_FAR))) {
2415
2416 /* If prev_match is also MIN_MATCH, match_start is garbage
2417 * but we will ignore the current match anyway.
2418 */
2419 s->match_length = MIN_MATCH-1;
2420 }
2421 }
2422 /* If there was a match at the previous step and the current
2423 * match is not better, output the previous match:
2424 */
2425 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
2426 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
2427 /* Do not insert strings in hash table beyond this. */
2428
2429 check_match(s, s->strstart-1, s->prev_match, s->prev_length);
2430
2431 bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match,
2432 s->prev_length - MIN_MATCH);
2433
2434 /* Insert in hash table all strings up to the end of the match.
2435 * strstart-1 and strstart are already inserted. If there is not
2436 * enough lookahead, the last two strings are not inserted in
2437 * the hash table.
2438 */
2439 s->lookahead -= s->prev_length-1;
2440 s->prev_length -= 2;
2441 do {
2442 if (++s->strstart <= max_insert) {
2443 INSERT_STRING(s, s->strstart, hash_head);
2444 }
2445 } while (--s->prev_length != 0);
2446 s->match_available = 0;
2447 s->match_length = MIN_MATCH-1;
2448 s->strstart++;
2449
2450 if (bflush) FLUSH_BLOCK(s, 0);
2451
2452 } else if (s->match_available) {
2453 /* If there was no match at the previous position, output a
2454 * single literal. If there was a match but the current match
2455 * is longer, truncate the previous match to a single literal.
2456 */
2457 Tracevv((stderr,"%c", s->window[s->strstart-1]));
2458 if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) {
2459 FLUSH_BLOCK_ONLY(s, 0);
2460 }
2461 s->strstart++;
2462 s->lookahead--;
2463 if (s->strm->avail_out == 0) return need_more;
2464 } else {
2465 /* There is no previous match to compare with, wait for
2466 * the next step to decide.
2467 */
2468 s->match_available = 1;
2469 s->strstart++;
2470 s->lookahead--;
2471 }
2472 }
2473 Assert (flush != Z_NO_FLUSH, "no flush?");
2474 if (s->match_available) {
2475 Tracevv((stderr,"%c", s->window[s->strstart-1]));
2476 zlib_tr_tally (s, 0, s->window[s->strstart-1]);
2477 s->match_available = 0;
2478 }
2479 FLUSH_BLOCK(s, flush == Z_FINISH);
2480 return flush == Z_FINISH ? finish_done : block_done;
2481 }
2482
zlib_deflate_workspacesize(void)2483 int zlib_deflate_workspacesize(void)
2484 {
2485 return sizeof(deflate_workspace);
2486 }
2487
2488 /* +++ trees.c */
2489 /* trees.c -- output deflated data using Huffman coding
2490 * Copyright (C) 1995-1996 Jean-loup Gailly
2491 * For conditions of distribution and use, see copyright notice in zlib.h
2492 */
2493
2494 /*
2495 * ALGORITHM
2496 *
2497 * The "deflation" process uses several Huffman trees. The more
2498 * common source values are represented by shorter bit sequences.
2499 *
2500 * Each code tree is stored in a compressed form which is itself
2501 * a Huffman encoding of the lengths of all the code strings (in
2502 * ascending order by source values). The actual code strings are
2503 * reconstructed from the lengths in the inflate process, as described
2504 * in the deflate specification.
2505 *
2506 * REFERENCES
2507 *
2508 * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
2509 * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
2510 *
2511 * Storer, James A.
2512 * Data Compression: Methods and Theory, pp. 49-50.
2513 * Computer Science Press, 1988. ISBN 0-7167-8156-5.
2514 *
2515 * Sedgewick, R.
2516 * Algorithms, p290.
2517 * Addison-Wesley, 1983. ISBN 0-201-06672-6.
2518 */
2519
2520 /* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
2521
2522 /* #include "deflate.h" */
2523 #if 0 /**** pts ****/
2524 # include <linux/zutil.h>
2525 # include "defutil.h"
2526 # ifdef DEBUG_ZLIB
2527 # include <ctype.h>
2528 # endif
2529 #endif
2530
2531
2532 /* ===========================================================================
2533 * Constants
2534 */
2535
2536 #define MAX_BL_BITS 7
2537 /* Bit length codes must not exceed MAX_BL_BITS bits */
2538
2539 #define END_BLOCK 256
2540 /* end of block literal code */
2541
2542 #define REP_3_6 16
2543 /* repeat previous bit length 3-6 times (2 bits of repeat count) */
2544
2545 #define REPZ_3_10 17
2546 /* repeat a zero length 3-10 times (3 bits of repeat count) */
2547
2548 #define REPZ_11_138 18
2549 /* repeat a zero length 11-138 times (7 bits of repeat count) */
2550
2551 static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
2552 = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
2553
2554 static const int extra_dbits[D_CODES] /* extra bits for each distance code */
2555 = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
2556
2557 static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
2558 = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
2559
2560 static const uch bl_order[BL_CODES]
2561 = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
2562 /* The lengths of the bit length codes are sent in order of decreasing
2563 * probability, to avoid transmitting the lengths for unused bit length codes.
2564 */
2565
2566 #define Buf_size (8 * 2*sizeof(char))
2567 /* Number of bits used within bi_buf. (bi_buf might be implemented on
2568 * more than 16 bits on some systems.)
2569 */
2570
2571 /* ===========================================================================
2572 * Local data. These are initialized only once.
2573 */
2574
2575 static ct_data static_ltree[L_CODES+2];
2576 /* The static literal tree. Since the bit lengths are imposed, there is no
2577 * need for the L_CODES extra codes used during heap construction. However
2578 * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
2579 * below).
2580 */
2581
2582 static ct_data static_dtree[D_CODES];
2583 /* The static distance tree. (Actually a trivial tree since all codes use
2584 * 5 bits.)
2585 */
2586
2587 static uch dist_code[512];
2588 /* distance codes. The first 256 values correspond to the distances
2589 * 3 .. 258, the last 256 values correspond to the top 8 bits of
2590 * the 15 bit distances.
2591 */
2592
2593 static uch length_code[MAX_MATCH-MIN_MATCH+1];
2594 /* length code for each normalized match length (0 == MIN_MATCH) */
2595
2596 static int base_length[LENGTH_CODES];
2597 /* First normalized length for each code (0 = MIN_MATCH) */
2598
2599 static int base_dist[D_CODES];
2600 /* First normalized distance for each code (0 = distance of 1) */
2601
2602 struct static_tree_desc_s {
2603 const ct_data *static_tree; /* static tree or NULLP */
2604 const int *extra_bits; /* extra bits for each code or NULLP */
2605 int extra_base; /* base index for extra_bits */
2606 int elems; /* max number of elements in the tree */
2607 int max_length; /* max bit length for the codes */
2608 };
2609
2610 static static_tree_desc static_l_desc =
2611 {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
2612
2613 static static_tree_desc static_d_desc =
2614 {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
2615
2616 static static_tree_desc static_bl_desc =
2617 {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
2618
2619 /* ===========================================================================
2620 * Local (static) routines in this file.
2621 */
2622
2623 static void tr_static_init (void);
2624 static void init_block (deflate_state *s);
2625 static void pqdownheap (deflate_state *s, ct_data *tree, int k);
2626 static void gen_bitlen (deflate_state *s, tree_desc *desc);
2627 static void gen_codes (ct_data *tree, int max_code, ush *bl_count);
2628 static void build_tree (deflate_state *s, tree_desc *desc);
2629 static void scan_tree (deflate_state *s, ct_data *tree, int max_code);
2630 static void send_tree (deflate_state *s, ct_data *tree, int max_code);
2631 static int build_bl_tree (deflate_state *s);
2632 static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
2633 int blcodes);
2634 static void compress_block (deflate_state *s, ct_data *ltree,
2635 ct_data *dtree);
2636 static void set_data_type (deflate_state *s);
2637 static unsigned bi_reverse (unsigned value, int length);
2638 static void bi_windup (deflate_state *s);
2639 static void bi_flush (deflate_state *s);
2640 static void copy_block (deflate_state *s, char *buf, unsigned len,
2641 int header);
2642
2643 #ifndef DEBUG_ZLIB
2644 # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
2645 /* Send a code of the given tree. c and tree must not have side effects */
2646
2647 #else /* DEBUG_ZLIB */
2648 # define send_code(s, c, tree) \
2649 { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
2650 send_bits(s, tree[c].Code, tree[c].Len); }
2651 #endif
2652
2653 #define d_code(dist) \
2654 ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
2655 /* Mapping from a distance to a distance code. dist is the distance - 1 and
2656 * must not have side effects. dist_code[256] and dist_code[257] are never
2657 * used.
2658 */
2659
2660 /* ===========================================================================
2661 * Send a value on a given number of bits.
2662 * IN assertion: length <= 16 and value fits in length bits.
2663 */
2664 #ifdef DEBUG_ZLIB
2665 static void send_bits (deflate_state *s, int value, int length);
2666
send_bits(deflate_state * s,int value,int length)2667 static void send_bits(
2668 deflate_state *s,
2669 int value, /* value to send */
2670 int length /* number of bits */
2671 )
2672 {
2673 Tracevv((stderr," l %2d v %4x ", length, value));
2674 Assert(length > 0 && length <= 15, "invalid length");
2675 s->bits_sent += (ulg)length;
2676
2677 /* If not enough room in bi_buf, use (valid) bits from bi_buf and
2678 * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
2679 * unused bits in value.
2680 */
2681 if (s->bi_valid > (int)Buf_size - length) {
2682 s->bi_buf |= (value << s->bi_valid);
2683 put_short(s, s->bi_buf);
2684 s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
2685 s->bi_valid += length - Buf_size;
2686 } else {
2687 s->bi_buf |= value << s->bi_valid;
2688 s->bi_valid += length;
2689 }
2690 }
2691 #else /* !DEBUG_ZLIB */
2692
2693 #define send_bits(s, value, length) \
2694 { int len = length;\
2695 if (s->bi_valid > (int)Buf_size - len) {\
2696 int val = value;\
2697 s->bi_buf |= (val << s->bi_valid);\
2698 put_short(s, s->bi_buf);\
2699 s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
2700 s->bi_valid += len - Buf_size;\
2701 } else {\
2702 s->bi_buf |= (value) << s->bi_valid;\
2703 s->bi_valid += len;\
2704 }\
2705 }
2706 #endif /* DEBUG_ZLIB */
2707
2708 /* ===========================================================================
2709 * Initialize the various 'constant' tables. In a multi-threaded environment,
2710 * this function may be called by two threads concurrently, but this is
2711 * harmless since both invocations do exactly the same thing.
2712 */
tr_static_init(void)2713 static void tr_static_init(void)
2714 {
2715 static int static_init_done;
2716 int n; /* iterates over tree elements */
2717 int bits; /* bit counter */
2718 int length; /* length value */
2719 int code; /* code value */
2720 int dist; /* distance index */
2721 ush bl_count[MAX_BITS+1];
2722 /* number of codes at each bit length for an optimal tree */
2723
2724 if (static_init_done) return;
2725
2726 /* Initialize the mapping length (0..255) -> length code (0..28) */
2727 length = 0;
2728 for (code = 0; code < LENGTH_CODES-1; code++) {
2729 base_length[code] = length;
2730 for (n = 0; n < (1<<extra_lbits[code]); n++) {
2731 length_code[length++] = (uch)code;
2732 }
2733 }
2734 Assert (length == 256, "tr_static_init: length != 256");
2735 /* Note that the length 255 (match length 258) can be represented
2736 * in two different ways: code 284 + 5 bits or code 285, so we
2737 * overwrite length_code[255] to use the best encoding:
2738 */
2739 length_code[length-1] = (uch)code;
2740
2741 /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
2742 dist = 0;
2743 for (code = 0 ; code < 16; code++) {
2744 base_dist[code] = dist;
2745 for (n = 0; n < (1<<extra_dbits[code]); n++) {
2746 dist_code[dist++] = (uch)code;
2747 }
2748 }
2749 Assert (dist == 256, "tr_static_init: dist != 256");
2750 dist >>= 7; /* from now on, all distances are divided by 128 */
2751 for ( ; code < D_CODES; code++) {
2752 base_dist[code] = dist << 7;
2753 for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
2754 dist_code[256 + dist++] = (uch)code;
2755 }
2756 }
2757 Assert (dist == 256, "tr_static_init: 256+dist != 512");
2758
2759 /* Construct the codes of the static literal tree */
2760 for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
2761 n = 0;
2762 while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
2763 while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
2764 while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
2765 while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
2766 /* Codes 286 and 287 do not exist, but we must include them in the
2767 * tree construction to get a canonical Huffman tree (longest code
2768 * all ones)
2769 */
2770 gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
2771
2772 /* The static distance tree is trivial: */
2773 for (n = 0; n < D_CODES; n++) {
2774 static_dtree[n].Len = 5;
2775 static_dtree[n].Code = bi_reverse((unsigned)n, 5);
2776 }
2777 static_init_done = 1;
2778 }
2779
2780 /* ===========================================================================
2781 * Initialize the tree data structures for a new zlib stream.
2782 */
zlib_tr_init(deflate_state * s)2783 ZSTATIC void zlib_tr_init(
2784 deflate_state *s
2785 )
2786 {
2787 tr_static_init();
2788
2789 s->compressed_len = 0L;
2790
2791 s->l_desc.dyn_tree = s->dyn_ltree;
2792 s->l_desc.stat_desc = &static_l_desc;
2793
2794 s->d_desc.dyn_tree = s->dyn_dtree;
2795 s->d_desc.stat_desc = &static_d_desc;
2796
2797 s->bl_desc.dyn_tree = s->bl_tree;
2798 s->bl_desc.stat_desc = &static_bl_desc;
2799
2800 s->bi_buf = 0;
2801 s->bi_valid = 0;
2802 s->last_eob_len = 8; /* enough lookahead for inflate */
2803 #ifdef DEBUG_ZLIB
2804 s->bits_sent = 0L;
2805 #endif
2806
2807 /* Initialize the first block of the first file: */
2808 init_block(s);
2809 }
2810
2811 /* ===========================================================================
2812 * Initialize a new block.
2813 */
init_block(deflate_state * s)2814 static void init_block(
2815 deflate_state *s
2816 )
2817 {
2818 int n; /* iterates over tree elements */
2819
2820 /* Initialize the trees. */
2821 for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
2822 for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
2823 for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
2824
2825 s->dyn_ltree[END_BLOCK].Freq = 1;
2826 s->opt_len = s->static_len = 0L;
2827 s->last_lit = s->matches = 0;
2828 }
2829
2830 #define SMALLEST 1
2831 /* Index within the heap array of least frequent node in the Huffman tree */
2832
2833
2834 /* ===========================================================================
2835 * Remove the smallest element from the heap and recreate the heap with
2836 * one less element. Updates heap and heap_len.
2837 */
2838 #define pqremove(s, tree, top) \
2839 {\
2840 top = s->heap[SMALLEST]; \
2841 s->heap[SMALLEST] = s->heap[s->heap_len--]; \
2842 pqdownheap(s, tree, SMALLEST); \
2843 }
2844
2845 /* ===========================================================================
2846 * Compares to subtrees, using the tree depth as tie breaker when
2847 * the subtrees have equal frequency. This minimizes the worst case length.
2848 */
2849 #define smaller(tree, n, m, depth) \
2850 (tree[n].Freq < tree[m].Freq || \
2851 (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
2852
2853 /* ===========================================================================
2854 * Restore the heap property by moving down the tree starting at node k,
2855 * exchanging a node with the smallest of its two sons if necessary, stopping
2856 * when the heap property is re-established (each father smaller than its
2857 * two sons).
2858 */
pqdownheap(deflate_state * s,ct_data * tree,int k)2859 static void pqdownheap(
2860 deflate_state *s,
2861 ct_data *tree, /* the tree to restore */
2862 int k /* node to move down */
2863 )
2864 {
2865 int v = s->heap[k];
2866 int j = k << 1; /* left son of k */
2867 while (j <= s->heap_len) {
2868 /* Set j to the smallest of the two sons: */
2869 if (j < s->heap_len &&
2870 smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
2871 j++;
2872 }
2873 /* Exit if v is smaller than both sons */
2874 if (smaller(tree, v, s->heap[j], s->depth)) break;
2875
2876 /* Exchange v with the smallest son */
2877 s->heap[k] = s->heap[j]; k = j;
2878
2879 /* And continue down the tree, setting j to the left son of k */
2880 j <<= 1;
2881 }
2882 s->heap[k] = v;
2883 }
2884
2885 /* ===========================================================================
2886 * Compute the optimal bit lengths for a tree and update the total bit length
2887 * for the current block.
2888 * IN assertion: the fields freq and dad are set, heap[heap_max] and
2889 * above are the tree nodes sorted by increasing frequency.
2890 * OUT assertions: the field len is set to the optimal bit length, the
2891 * array bl_count contains the frequencies for each bit length.
2892 * The length opt_len is updated; static_len is also updated if stree is
2893 * not null.
2894 */
gen_bitlen(deflate_state * s,tree_desc * desc)2895 static void gen_bitlen(
2896 deflate_state *s,
2897 tree_desc *desc /* the tree descriptor */
2898 )
2899 {
2900 ct_data *tree = desc->dyn_tree;
2901 int max_code = desc->max_code;
2902 const ct_data *stree = desc->stat_desc->static_tree;
2903 const int *extra = desc->stat_desc->extra_bits;
2904 int base = desc->stat_desc->extra_base;
2905 int max_length = desc->stat_desc->max_length;
2906 int h; /* heap index */
2907 int n, m; /* iterate over the tree elements */
2908 int bits; /* bit length */
2909 int xbits; /* extra bits */
2910 ush f; /* frequency */
2911 int overflow = 0; /* number of elements with bit length too large */
2912
2913 for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
2914
2915 /* In a first pass, compute the optimal bit lengths (which may
2916 * overflow in the case of the bit length tree).
2917 */
2918 tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
2919
2920 for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
2921 n = s->heap[h];
2922 bits = tree[tree[n].Dad].Len + 1;
2923 if (bits > max_length) bits = max_length, overflow++;
2924 tree[n].Len = (ush)bits;
2925 /* We overwrite tree[n].Dad which is no longer needed */
2926
2927 if (n > max_code) continue; /* not a leaf node */
2928
2929 s->bl_count[bits]++;
2930 xbits = 0;
2931 if (n >= base) xbits = extra[n-base];
2932 f = tree[n].Freq;
2933 s->opt_len += (ulg)f * (bits + xbits);
2934 if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
2935 }
2936 if (overflow == 0) return;
2937
2938 Trace((stderr,"\nbit length overflow\n"));
2939 /* This happens for example on obj2 and pic of the Calgary corpus */
2940
2941 /* Find the first bit length which could increase: */
2942 do {
2943 bits = max_length-1;
2944 while (s->bl_count[bits] == 0) bits--;
2945 s->bl_count[bits]--; /* move one leaf down the tree */
2946 s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
2947 s->bl_count[max_length]--;
2948 /* The brother of the overflow item also moves one step up,
2949 * but this does not affect bl_count[max_length]
2950 */
2951 overflow -= 2;
2952 } while (overflow > 0);
2953
2954 /* Now recompute all bit lengths, scanning in increasing frequency.
2955 * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
2956 * lengths instead of fixing only the wrong ones. This idea is taken
2957 * from 'ar' written by Haruhiko Okumura.)
2958 */
2959 for (bits = max_length; bits != 0; bits--) {
2960 n = s->bl_count[bits];
2961 while (n != 0) {
2962 m = s->heap[--h];
2963 if (m > max_code) continue;
2964 if (tree[m].Len != (unsigned) bits) {
2965 Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
2966 s->opt_len += ((long)bits - (long)tree[m].Len)
2967 *(long)tree[m].Freq;
2968 tree[m].Len = (ush)bits;
2969 }
2970 n--;
2971 }
2972 }
2973 }
2974
2975 /* ===========================================================================
2976 * Generate the codes for a given tree and bit counts (which need not be
2977 * optimal).
2978 * IN assertion: the array bl_count contains the bit length statistics for
2979 * the given tree and the field len is set for all tree elements.
2980 * OUT assertion: the field code is set for all tree elements of non
2981 * zero code length.
2982 */
gen_codes(ct_data * tree,int max_code,ush * bl_count)2983 static void gen_codes(
2984 ct_data *tree, /* the tree to decorate */
2985 int max_code, /* largest code with non zero frequency */
2986 ush *bl_count /* number of codes at each bit length */
2987 )
2988 {
2989 ush next_code[MAX_BITS+1]; /* next code value for each bit length */
2990 ush code = 0; /* running code value */
2991 int bits; /* bit index */
2992 int n; /* code index */
2993
2994 /* The distribution counts are first used to generate the code values
2995 * without bit reversal.
2996 */
2997 for (bits = 1; bits <= MAX_BITS; bits++) {
2998 next_code[bits] = code = (code + bl_count[bits-1]) << 1;
2999 }
3000 /* Check that the bit counts in bl_count are consistent. The last code
3001 * must be all ones.
3002 */
3003 Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
3004 "inconsistent bit counts");
3005 Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
3006
3007 for (n = 0; n <= max_code; n++) {
3008 int len = tree[n].Len;
3009 if (len == 0) continue;
3010 /* Now reverse the bits */
3011 tree[n].Code = bi_reverse(next_code[len]++, len);
3012
3013 Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
3014 n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
3015 }
3016 }
3017
3018 /* ===========================================================================
3019 * Construct one Huffman tree and assigns the code bit strings and lengths.
3020 * Update the total bit length for the current block.
3021 * IN assertion: the field freq is set for all tree elements.
3022 * OUT assertions: the fields len and code are set to the optimal bit length
3023 * and corresponding code. The length opt_len is updated; static_len is
3024 * also updated if stree is not null. The field max_code is set.
3025 */
build_tree(deflate_state * s,tree_desc * desc)3026 static void build_tree(
3027 deflate_state *s,
3028 tree_desc *desc /* the tree descriptor */
3029 )
3030 {
3031 ct_data *tree = desc->dyn_tree;
3032 const ct_data *stree = desc->stat_desc->static_tree;
3033 int elems = desc->stat_desc->elems;
3034 int n, m; /* iterate over heap elements */
3035 int max_code = -1; /* largest code with non zero frequency */
3036 int node; /* new node being created */
3037
3038 /* Construct the initial heap, with least frequent element in
3039 * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
3040 * heap[0] is not used.
3041 */
3042 s->heap_len = 0, s->heap_max = HEAP_SIZE;
3043
3044 for (n = 0; n < elems; n++) {
3045 if (tree[n].Freq != 0) {
3046 s->heap[++(s->heap_len)] = max_code = n;
3047 s->depth[n] = 0;
3048 } else {
3049 tree[n].Len = 0;
3050 }
3051 }
3052
3053 /* The pkzip format requires that at least one distance code exists,
3054 * and that at least one bit should be sent even if there is only one
3055 * possible code. So to avoid special checks later on we force at least
3056 * two codes of non zero frequency.
3057 */
3058 while (s->heap_len < 2) {
3059 node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
3060 tree[node].Freq = 1;
3061 s->depth[node] = 0;
3062 s->opt_len--; if (stree) s->static_len -= stree[node].Len;
3063 /* node is 0 or 1 so it does not have extra bits */
3064 }
3065 desc->max_code = max_code;
3066
3067 /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
3068 * establish sub-heaps of increasing lengths:
3069 */
3070 for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
3071
3072 /* Construct the Huffman tree by repeatedly combining the least two
3073 * frequent nodes.
3074 */
3075 node = elems; /* next internal node of the tree */
3076 do {
3077 pqremove(s, tree, n); /* n = node of least frequency */
3078 m = s->heap[SMALLEST]; /* m = node of next least frequency */
3079
3080 s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
3081 s->heap[--(s->heap_max)] = m;
3082
3083 /* Create a new node father of n and m */
3084 tree[node].Freq = tree[n].Freq + tree[m].Freq;
3085 s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
3086 tree[n].Dad = tree[m].Dad = (ush)node;
3087 #ifdef DUMP_BL_TREE
3088 if (tree == s->bl_tree) {
3089 fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
3090 node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
3091 }
3092 #endif
3093 /* and insert the new node in the heap */
3094 s->heap[SMALLEST] = node++;
3095 pqdownheap(s, tree, SMALLEST);
3096
3097 } while (s->heap_len >= 2);
3098
3099 s->heap[--(s->heap_max)] = s->heap[SMALLEST];
3100
3101 /* At this point, the fields freq and dad are set. We can now
3102 * generate the bit lengths.
3103 */
3104 gen_bitlen(s, (tree_desc *)desc);
3105
3106 /* The field len is now set, we can generate the bit codes */
3107 gen_codes ((ct_data *)tree, max_code, s->bl_count);
3108 }
3109
3110 /* ===========================================================================
3111 * Scan a literal or distance tree to determine the frequencies of the codes
3112 * in the bit length tree.
3113 */
scan_tree(deflate_state * s,ct_data * tree,int max_code)3114 static void scan_tree(
3115 deflate_state *s,
3116 ct_data *tree, /* the tree to be scanned */
3117 int max_code /* and its largest code of non zero frequency */
3118 )
3119 {
3120 int n; /* iterates over all tree elements */
3121 int prevlen = -1; /* last emitted length */
3122 int curlen; /* length of current code */
3123 int nextlen = tree[0].Len; /* length of next code */
3124 int count = 0; /* repeat count of the current code */
3125 int max_count = 7; /* max repeat count */
3126 int min_count = 4; /* min repeat count */
3127
3128 if (nextlen == 0) max_count = 138, min_count = 3;
3129 tree[max_code+1].Len = (ush)0xffff; /* guard */
3130
3131 for (n = 0; n <= max_code; n++) {
3132 curlen = nextlen; nextlen = tree[n+1].Len;
3133 if (++count < max_count && curlen == nextlen) {
3134 continue;
3135 } else if (count < min_count) {
3136 s->bl_tree[curlen].Freq += count;
3137 } else if (curlen != 0) {
3138 if (curlen != prevlen) s->bl_tree[curlen].Freq++;
3139 s->bl_tree[REP_3_6].Freq++;
3140 } else if (count <= 10) {
3141 s->bl_tree[REPZ_3_10].Freq++;
3142 } else {
3143 s->bl_tree[REPZ_11_138].Freq++;
3144 }
3145 count = 0; prevlen = curlen;
3146 if (nextlen == 0) {
3147 max_count = 138, min_count = 3;
3148 } else if (curlen == nextlen) {
3149 max_count = 6, min_count = 3;
3150 } else {
3151 max_count = 7, min_count = 4;
3152 }
3153 }
3154 }
3155
3156 /* ===========================================================================
3157 * Send a literal or distance tree in compressed form, using the codes in
3158 * bl_tree.
3159 */
send_tree(deflate_state * s,ct_data * tree,int max_code)3160 static void send_tree(
3161 deflate_state *s,
3162 ct_data *tree, /* the tree to be scanned */
3163 int max_code /* and its largest code of non zero frequency */
3164 )
3165 {
3166 int n; /* iterates over all tree elements */
3167 int prevlen = -1; /* last emitted length */
3168 int curlen; /* length of current code */
3169 int nextlen = tree[0].Len; /* length of next code */
3170 int count = 0; /* repeat count of the current code */
3171 int max_count = 7; /* max repeat count */
3172 int min_count = 4; /* min repeat count */
3173
3174 /* tree[max_code+1].Len = -1; */ /* guard already set */
3175 if (nextlen == 0) max_count = 138, min_count = 3;
3176
3177 for (n = 0; n <= max_code; n++) {
3178 curlen = nextlen; nextlen = tree[n+1].Len;
3179 if (++count < max_count && curlen == nextlen) {
3180 continue;
3181 } else if (count < min_count) {
3182 do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
3183
3184 } else if (curlen != 0) {
3185 if (curlen != prevlen) {
3186 send_code(s, curlen, s->bl_tree); count--;
3187 }
3188 Assert(count >= 3 && count <= 6, " 3_6?");
3189 send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
3190
3191 } else if (count <= 10) {
3192 send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
3193
3194 } else {
3195 send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
3196 }
3197 count = 0; prevlen = curlen;
3198 if (nextlen == 0) {
3199 max_count = 138, min_count = 3;
3200 } else if (curlen == nextlen) {
3201 max_count = 6, min_count = 3;
3202 } else {
3203 max_count = 7, min_count = 4;
3204 }
3205 }
3206 }
3207
3208 /* ===========================================================================
3209 * Construct the Huffman tree for the bit lengths and return the index in
3210 * bl_order of the last bit length code to send.
3211 */
build_bl_tree(deflate_state * s)3212 static int build_bl_tree(
3213 deflate_state *s
3214 )
3215 {
3216 int max_blindex; /* index of last bit length code of non zero freq */
3217
3218 /* Determine the bit length frequencies for literal and distance trees */
3219 scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
3220 scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
3221
3222 /* Build the bit length tree: */
3223 build_tree(s, (tree_desc *)(&(s->bl_desc)));
3224 /* opt_len now includes the length of the tree representations, except
3225 * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
3226 */
3227
3228 /* Determine the number of bit length codes to send. The pkzip format
3229 * requires that at least 4 bit length codes be sent. (appnote.txt says
3230 * 3 but the actual value used is 4.)
3231 */
3232 for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
3233 if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
3234 }
3235 /* Update opt_len to include the bit length tree and counts */
3236 s->opt_len += 3*(max_blindex+1) + 5+5+4;
3237 Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
3238 s->opt_len, s->static_len));
3239
3240 return max_blindex;
3241 }
3242
3243 /* ===========================================================================
3244 * Send the header for a block using dynamic Huffman trees: the counts, the
3245 * lengths of the bit length codes, the literal tree and the distance tree.
3246 * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
3247 */
send_all_trees(deflate_state * s,int lcodes,int dcodes,int blcodes)3248 static void send_all_trees(
3249 deflate_state *s,
3250 int lcodes, /* number of codes for each tree */
3251 int dcodes, /* number of codes for each tree */
3252 int blcodes /* number of codes for each tree */
3253 )
3254 {
3255 int rank; /* index in bl_order */
3256
3257 Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
3258 Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
3259 "too many codes");
3260 Tracev((stderr, "\nbl counts: "));
3261 send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
3262 send_bits(s, dcodes-1, 5);
3263 send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
3264 for (rank = 0; rank < blcodes; rank++) {
3265 Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
3266 send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
3267 }
3268 Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
3269
3270 send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
3271 Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
3272
3273 send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
3274 Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
3275 }
3276
3277 /* ===========================================================================
3278 * Send a stored block
3279 */
zlib_tr_stored_block(deflate_state * s,char * buf,ulg stored_len,int eof)3280 ZSTATIC void zlib_tr_stored_block(
3281 deflate_state *s,
3282 char *buf, /* input block */
3283 ulg stored_len, /* length of input block */
3284 int eof /* true if this is the last block for a file */
3285 )
3286 {
3287 send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
3288 s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
3289 s->compressed_len += (stored_len + 4) << 3;
3290
3291 copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
3292 }
3293
3294 /* Send just the `stored block' type code without any length bytes or data.
3295 */
zlib_tr_stored_type_only(deflate_state * s)3296 ZSTATIC void zlib_tr_stored_type_only(
3297 deflate_state *s
3298 )
3299 {
3300 send_bits(s, (STORED_BLOCK << 1), 3);
3301 bi_windup(s);
3302 s->compressed_len = (s->compressed_len + 3) & ~7L;
3303 }
3304
3305
3306 /* ===========================================================================
3307 * Send one empty static block to give enough lookahead for inflate.
3308 * This takes 10 bits, of which 7 may remain in the bit buffer.
3309 * The current inflate code requires 9 bits of lookahead. If the
3310 * last two codes for the previous block (real code plus EOB) were coded
3311 * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
3312 * the last real code. In this case we send two empty static blocks instead
3313 * of one. (There are no problems if the previous block is stored or fixed.)
3314 * To simplify the code, we assume the worst case of last real code encoded
3315 * on one bit only.
3316 */
zlib_tr_align(deflate_state * s)3317 ZSTATIC void zlib_tr_align(
3318 deflate_state *s
3319 )
3320 {
3321 send_bits(s, STATIC_TREES<<1, 3);
3322 send_code(s, END_BLOCK, static_ltree);
3323 s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
3324 bi_flush(s);
3325 /* Of the 10 bits for the empty block, we have already sent
3326 * (10 - bi_valid) bits. The lookahead for the last real code (before
3327 * the EOB of the previous block) was thus at least one plus the length
3328 * of the EOB plus what we have just sent of the empty static block.
3329 */
3330 if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
3331 send_bits(s, STATIC_TREES<<1, 3);
3332 send_code(s, END_BLOCK, static_ltree);
3333 s->compressed_len += 10L;
3334 bi_flush(s);
3335 }
3336 s->last_eob_len = 7;
3337 }
3338
3339 /* ===========================================================================
3340 * Determine the best encoding for the current block: dynamic trees, static
3341 * trees or store, and output the encoded block to the zip file. This function
3342 * returns the total compressed length for the file so far.
3343 */
zlib_tr_flush_block(deflate_state * s,char * buf,ulg stored_len,int eof)3344 ZSTATIC ulg zlib_tr_flush_block(
3345 deflate_state *s,
3346 char *buf, /* input block, or NULLP if too old */
3347 ulg stored_len, /* length of input block */
3348 int eof /* true if this is the last block for a file */
3349 )
3350 {
3351 ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
3352 int max_blindex = 0; /* index of last bit length code of non zero freq */
3353
3354 /* Build the Huffman trees unless a stored block is forced */
3355 if (s->level > 0) {
3356
3357 /* Check if the file is ascii or binary */
3358 if (s->data_type == Z_UNKNOWN) set_data_type(s);
3359
3360 /* Construct the literal and distance trees */
3361 build_tree(s, (tree_desc *)(&(s->l_desc)));
3362 Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
3363 s->static_len));
3364
3365 build_tree(s, (tree_desc *)(&(s->d_desc)));
3366 Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
3367 s->static_len));
3368 /* At this point, opt_len and static_len are the total bit lengths of
3369 * the compressed block data, excluding the tree representations.
3370 */
3371
3372 /* Build the bit length tree for the above two trees, and get the index
3373 * in bl_order of the last bit length code to send.
3374 */
3375 max_blindex = build_bl_tree(s);
3376
3377 /* Determine the best encoding. Compute first the block length in bytes*/
3378 opt_lenb = (s->opt_len+3+7)>>3;
3379 static_lenb = (s->static_len+3+7)>>3;
3380
3381 Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
3382 opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
3383 s->last_lit));
3384
3385 if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
3386
3387 } else {
3388 Assert(buf != (char*)0, "lost buf");
3389 opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
3390 }
3391
3392 /* If compression failed and this is the first and last block,
3393 * and if the .zip file can be seeked (to rewrite the local header),
3394 * the whole file is transformed into a stored file:
3395 */
3396 #ifdef STORED_FILE_OK
3397 # ifdef FORCE_STORED_FILE
3398 if (eof && s->compressed_len == 0L) { /* force stored file */
3399 # else
3400 if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
3401 # endif
3402 /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
3403 if (buf == (char*)0) error ("block vanished");
3404
3405 copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
3406 s->compressed_len = stored_len << 3;
3407 s->method = STORED;
3408 } else
3409 #endif /* STORED_FILE_OK */
3410
3411 #ifdef FORCE_STORED
3412 if (buf != (char*)0) { /* force stored block */
3413 #else
3414 if (stored_len+4 <= opt_lenb && buf != (char*)0) {
3415 /* 4: two words for the lengths */
3416 #endif
3417 /* The test buf != NULLP is only necessary if LIT_BUFSIZE > WSIZE.
3418 * Otherwise we can't have processed more than WSIZE input bytes since
3419 * the last block flush, because compression would have been
3420 * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
3421 * transform a block into a stored block.
3422 */
3423 zlib_tr_stored_block(s, buf, stored_len, eof);
3424
3425 #ifdef FORCE_STATIC
3426 } else if (static_lenb >= 0) { /* force static trees */
3427 #else
3428 } else if (static_lenb == opt_lenb) {
3429 #endif
3430 send_bits(s, (STATIC_TREES<<1)+eof, 3);
3431 compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
3432 s->compressed_len += 3 + s->static_len;
3433 } else {
3434 send_bits(s, (DYN_TREES<<1)+eof, 3);
3435 send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
3436 max_blindex+1);
3437 compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
3438 s->compressed_len += 3 + s->opt_len;
3439 }
3440 Assert (s->compressed_len == s->bits_sent, "bad compressed size");
3441 init_block(s);
3442
3443 if (eof) {
3444 bi_windup(s);
3445 s->compressed_len += 7; /* align on byte boundary */
3446 }
3447 Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
3448 s->compressed_len-7*eof));
3449
3450 return s->compressed_len >> 3;
3451 }
3452
3453 /* ===========================================================================
3454 * Save the match info and tally the frequency counts. Return true if
3455 * the current block must be flushed.
3456 */
3457 ZSTATIC int zlib_tr_tally(
3458 deflate_state *s,
3459 unsigned dist, /* distance of matched string */
3460 unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */
3461 )
3462 {
3463 s->d_buf[s->last_lit] = (ush)dist;
3464 s->l_buf[s->last_lit++] = (uch)lc;
3465 if (dist == 0) {
3466 /* lc is the unmatched char */
3467 s->dyn_ltree[lc].Freq++;
3468 } else {
3469 s->matches++;
3470 /* Here, lc is the match length - MIN_MATCH */
3471 dist--; /* dist = match distance - 1 */
3472 Assert((ush)dist < (ush)MAX_DIST(s) &&
3473 (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
3474 (ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match");
3475
3476 s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
3477 s->dyn_dtree[d_code(dist)].Freq++;
3478 }
3479
3480 /* Try to guess if it is profitable to stop the current block here */
3481 if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
3482 /* Compute an upper bound for the compressed length */
3483 ulg out_length = (ulg)s->last_lit*8L;
3484 ulg in_length = (ulg)((long)s->strstart - s->block_start);
3485 int dcode;
3486 for (dcode = 0; dcode < D_CODES; dcode++) {
3487 out_length += (ulg)s->dyn_dtree[dcode].Freq *
3488 (5L+extra_dbits[dcode]);
3489 }
3490 out_length >>= 3;
3491 Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
3492 s->last_lit, in_length, out_length,
3493 100L - out_length*100L/in_length));
3494 if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
3495 }
3496 return (s->last_lit == s->lit_bufsize-1);
3497 /* We avoid equality with lit_bufsize because of wraparound at 64K
3498 * on 16 bit machines and because stored blocks are restricted to
3499 * 64K-1 bytes.
3500 */
3501 }
3502
3503 /* ===========================================================================
3504 * Send the block data compressed using the given Huffman trees
3505 */
3506 static void compress_block(
3507 deflate_state *s,
3508 ct_data *ltree, /* literal tree */
3509 ct_data *dtree /* distance tree */
3510 )
3511 {
3512 unsigned dist; /* distance of matched string */
3513 int lc; /* match length or unmatched char (if dist == 0) */
3514 unsigned lx = 0; /* running index in l_buf */
3515 unsigned code; /* the code to send */
3516 int extra; /* number of extra bits to send */
3517
3518 if (s->last_lit != 0) do {
3519 dist = s->d_buf[lx];
3520 lc = s->l_buf[lx++];
3521 if (dist == 0) {
3522 send_code(s, lc, ltree); /* send a literal byte */
3523 Tracecv(isgraph(lc), (stderr," '%c' ", lc));
3524 } else {
3525 /* Here, lc is the match length - MIN_MATCH */
3526 code = length_code[lc];
3527 send_code(s, code+LITERALS+1, ltree); /* send the length code */
3528 extra = extra_lbits[code];
3529 if (extra != 0) {
3530 lc -= base_length[code];
3531 send_bits(s, lc, extra); /* send the extra length bits */
3532 }
3533 dist--; /* dist is now the match distance - 1 */
3534 code = d_code(dist);
3535 Assert (code < D_CODES, "bad d_code");
3536
3537 send_code(s, code, dtree); /* send the distance code */
3538 extra = extra_dbits[code];
3539 if (extra != 0) {
3540 dist -= base_dist[code];
3541 send_bits(s, dist, extra); /* send the extra distance bits */
3542 }
3543 } /* literal or match pair ? */
3544
3545 /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
3546 Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
3547
3548 } while (lx < s->last_lit);
3549
3550 send_code(s, END_BLOCK, ltree);
3551 s->last_eob_len = ltree[END_BLOCK].Len;
3552 }
3553
3554 /* ===========================================================================
3555 * Set the data type to ASCII or BINARY, using a crude approximation:
3556 * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
3557 * IN assertion: the fields freq of dyn_ltree are set and the total of all
3558 * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
3559 */
3560 static void set_data_type(
3561 deflate_state *s
3562 )
3563 {
3564 int n = 0;
3565 unsigned ascii_freq = 0;
3566 unsigned bin_freq = 0;
3567 while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
3568 while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
3569 while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
3570 s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
3571 }
3572
3573 /* ===========================================================================
3574 * Copy a stored block, storing first the length and its
3575 * one's complement if requested.
3576 */
3577 static void copy_block(
3578 deflate_state *s,
3579 char *buf, /* the input data */
3580 unsigned len, /* its length */
3581 int header /* true if block header must be written */
3582 )
3583 {
3584 bi_windup(s); /* align on byte boundary */
3585 s->last_eob_len = 8; /* enough lookahead for inflate */
3586
3587 if (header) {
3588 put_short(s, (ush)len);
3589 put_short(s, (ush)~len);
3590 #ifdef DEBUG_ZLIB
3591 s->bits_sent += 2*16;
3592 #endif
3593 }
3594 #ifdef DEBUG_ZLIB
3595 s->bits_sent += (ulg)len<<3;
3596 #endif
3597 /* bundle up the put_byte(s, *buf++) calls */
3598 zmemcpy(&s->pending_buf[s->pending], buf, len);
3599 s->pending += len;
3600 }
3601
3602 /* end of deftree.c */
3603
3604 /* ---- </rip> by pts */
3605
3606 #if PTS_DEFL_MAIN
3607
3608 /*
3609 * Usage: flateenc [-<level>] < <inputfile> > <outputfile>
3610 * <level> is one of: 0: no compression; 1: low & fast; 9: high & slow
3611 */
3612
3613 #include "pts_defl.h"
3614 #include <unistd.h> /* read(), write() */
3615 #include <stdio.h>
3616 #include <stdlib.h> /* abort() */
3617
3618 int main(int argc, char **argv) {
3619 char ibuf[4096], obuf[6000]; /* Dat: 4096->6000 should be enough */
3620 char workspace[sizeof(deflate_workspace)]; /* Dat: as returned by zlib_deflate_workspacesize in ZLIB 1.1.3 */
3621 int got, zgot;
3622 /** Compression level: 0..9 or Z_DEFAULT_COMPRESSION */
3623 int level=Z_DEFAULT_COMPRESSION;
3624 z_stream zs;
3625 (void)argc;
3626 if (argv && argv[0] && argv[1] && argv[1][0]=='-' && argv[1][1]>='0' && argv[1][1]<='9')
3627 level=argv[1][1]-'0';
3628 /* printf("ws=%d\n", zlib_deflate_workspacesize()); */
3629 if (zlib_deflate_workspacesize()+(unsigned)0<sizeof(workspace)) abort();
3630 zs.total_in=0;
3631 zs.total_out=0;
3632 zs.workspace=workspace;
3633 zs.msg=(char*)NULLP;
3634 zs.state=(struct zlib_internal_state*)NULLP;
3635 zs.data_type=Z_UNKNOWN; /* Imp: do we have to initialize it? */
3636 if (Z_OK!=zlib_deflateInit(&zs, level)) abort();
3637 while (0<(got=read(0, ibuf, sizeof(ibuf)))) {
3638 zs.next_in=ibuf; zs.avail_in=got;
3639 zs.next_out=obuf; zs.avail_out=sizeof(obuf);
3640 if (Z_OK!=zlib_deflate(&zs, 0)) abort();
3641 #ifdef DEBUG_PTS_DEFL
3642 fprintf(stderr, "ai=%d ao=%d no=%d\n", zs.avail_in, zs.avail_out, (char*)zs.next_out-obuf);
3643 #endif
3644 if (0!=zs.avail_in) abort();
3645 got=sizeof(obuf)-zs.avail_out;
3646 if (got>0 && got!=write(1, zs.next_out-got, got)) abort();
3647 }
3648 if (0!=got) abort();
3649 do { /* flush all output */
3650 zs.next_in=NULL; zs.avail_in=0;
3651 zs.next_out=obuf; zs.avail_out=sizeof(obuf);
3652 if (Z_STREAM_END!=(zgot=zlib_deflate(&zs, Z_FINISH)) && Z_OK!=zgot) abort();
3653 #ifdef DEBUG_PTS_DEFL
3654 fprintf(stderr, "ai=%d ao=%d flush\n", zs.avail_in, zs.avail_out);
3655 #endif
3656 got=sizeof(obuf)-zs.avail_out;
3657 if (got>0 && got!=write(1, zs.next_out-got, got)) abort();
3658 } while (zgot==Z_OK);
3659 if (Z_OK!=zlib_deflateEnd(&zs)) abort();
3660 return 0;
3661 }
3662
3663 #endif
3664