xref: /freebsd/sys/contrib/zlib/deflate.c (revision 4b9d6057)
1 /* deflate.c -- compress data using the deflation algorithm
2  * Copyright (C) 1995-2023 Jean-loup Gailly and Mark Adler
3  * For conditions of distribution and use, see copyright notice in zlib.h
4  */
5 
6 /*
7  *  ALGORITHM
8  *
9  *      The "deflation" process depends on being able to identify portions
10  *      of the input text which are identical to earlier input (within a
11  *      sliding window trailing behind the input currently being processed).
12  *
13  *      The most straightforward technique turns out to be the fastest for
14  *      most input files: try all possible matches and select the longest.
15  *      The key feature of this algorithm is that insertions into the string
16  *      dictionary are very simple and thus fast, and deletions are avoided
17  *      completely. Insertions are performed at each input character, whereas
18  *      string matches are performed only when the previous match ends. So it
19  *      is preferable to spend more time in matches to allow very fast string
20  *      insertions and avoid deletions. The matching algorithm for small
21  *      strings is inspired from that of Rabin & Karp. A brute force approach
22  *      is used to find longer strings when a small match has been found.
23  *      A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
24  *      (by Leonid Broukhis).
25  *         A previous version of this file used a more sophisticated algorithm
26  *      (by Fiala and Greene) which is guaranteed to run in linear amortized
27  *      time, but has a larger average cost, uses more memory and is patented.
28  *      However the F&G algorithm may be faster for some highly redundant
29  *      files if the parameter max_chain_length (described below) is too large.
30  *
31  *  ACKNOWLEDGEMENTS
32  *
33  *      The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
34  *      I found it in 'freeze' written by Leonid Broukhis.
35  *      Thanks to many people for bug reports and testing.
36  *
37  *  REFERENCES
38  *
39  *      Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
40  *      Available in http://tools.ietf.org/html/rfc1951
41  *
42  *      A description of the Rabin and Karp algorithm is given in the book
43  *         "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
44  *
45  *      Fiala,E.R., and Greene,D.H.
46  *         Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
47  *
48  */
49 
50 /* @(#) $Id$ */
51 
52 #include "deflate.h"
53 
54 const char deflate_copyright[] =
55    " deflate 1.3 Copyright 1995-2023 Jean-loup Gailly and Mark Adler ";
56 /*
57   If you use the zlib library in a product, an acknowledgment is welcome
58   in the documentation of your product. If for some reason you cannot
59   include such an acknowledgment, I would appreciate that you keep this
60   copyright string in the executable of your product.
61  */
62 
63 typedef enum {
64     need_more,      /* block not completed, need more input or more output */
65     block_done,     /* block flush performed */
66     finish_started, /* finish started, need only more output at next deflate */
67     finish_done     /* finish done, accept no more input or output */
68 } block_state;
69 
70 typedef block_state (*compress_func)(deflate_state *s, int flush);
71 /* Compression function. Returns the block state after the call. */
72 
73 local block_state deflate_stored(deflate_state *s, int flush);
74 local block_state deflate_fast(deflate_state *s, int flush);
75 #ifndef FASTEST
76 local block_state deflate_slow(deflate_state *s, int flush);
77 #endif
78 local block_state deflate_rle(deflate_state *s, int flush);
79 local block_state deflate_huff(deflate_state *s, int flush);
80 
81 /* ===========================================================================
82  * Local data
83  */
84 
85 #define NIL 0
86 /* Tail of hash chains */
87 
88 #ifndef TOO_FAR
89 #  define TOO_FAR 4096
90 #endif
91 /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
92 
93 /* Values for max_lazy_match, good_match and max_chain_length, depending on
94  * the desired pack level (0..9). The values given below have been tuned to
95  * exclude worst case performance for pathological files. Better values may be
96  * found for specific files.
97  */
98 typedef struct config_s {
99    ush good_length; /* reduce lazy search above this match length */
100    ush max_lazy;    /* do not perform lazy search above this match length */
101    ush nice_length; /* quit search above this match length */
102    ush max_chain;
103    compress_func func;
104 } config;
105 
106 #ifdef FASTEST
107 local const config configuration_table[2] = {
108 /*      good lazy nice chain */
109 /* 0 */ {0,    0,  0,    0, deflate_stored},  /* store only */
110 /* 1 */ {4,    4,  8,    4, deflate_fast}}; /* max speed, no lazy matches */
111 #else
112 local const config configuration_table[10] = {
113 /*      good lazy nice chain */
114 /* 0 */ {0,    0,  0,    0, deflate_stored},  /* store only */
115 /* 1 */ {4,    4,  8,    4, deflate_fast}, /* max speed, no lazy matches */
116 /* 2 */ {4,    5, 16,    8, deflate_fast},
117 /* 3 */ {4,    6, 32,   32, deflate_fast},
118 
119 /* 4 */ {4,    4, 16,   16, deflate_slow},  /* lazy matches */
120 /* 5 */ {8,   16, 32,   32, deflate_slow},
121 /* 6 */ {8,   16, 128, 128, deflate_slow},
122 /* 7 */ {8,   32, 128, 256, deflate_slow},
123 /* 8 */ {32, 128, 258, 1024, deflate_slow},
124 /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
125 #endif
126 
127 /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
128  * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
129  * meaning.
130  */
131 
132 /* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */
133 #define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0))
134 
135 /* ===========================================================================
136  * Update a hash value with the given input byte
137  * IN  assertion: all calls to UPDATE_HASH are made with consecutive input
138  *    characters, so that a running hash key can be computed from the previous
139  *    key instead of complete recalculation each time.
140  */
141 #define UPDATE_HASH(s,h,c) (h = (((h) << s->hash_shift) ^ (c)) & s->hash_mask)
142 
143 
144 /* ===========================================================================
145  * Insert string str in the dictionary and set match_head to the previous head
146  * of the hash chain (the most recent string with same hash key). Return
147  * the previous length of the hash chain.
148  * If this file is compiled with -DFASTEST, the compression level is forced
149  * to 1, and no hash chains are maintained.
150  * IN  assertion: all calls to INSERT_STRING are made with consecutive input
151  *    characters and the first MIN_MATCH bytes of str are valid (except for
152  *    the last MIN_MATCH-1 bytes of the input file).
153  */
154 #ifdef FASTEST
155 #define INSERT_STRING(s, str, match_head) \
156    (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
157     match_head = s->head[s->ins_h], \
158     s->head[s->ins_h] = (Pos)(str))
159 #else
160 #define INSERT_STRING(s, str, match_head) \
161    (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
162     match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
163     s->head[s->ins_h] = (Pos)(str))
164 #endif
165 
166 /* ===========================================================================
167  * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
168  * prev[] will be initialized on the fly.
169  */
170 #define CLEAR_HASH(s) \
171     do { \
172         s->head[s->hash_size - 1] = NIL; \
173         zmemzero((Bytef *)s->head, \
174                  (unsigned)(s->hash_size - 1)*sizeof(*s->head)); \
175     } while (0)
176 
177 /* ===========================================================================
178  * Slide the hash table when sliding the window down (could be avoided with 32
179  * bit values at the expense of memory usage). We slide even when level == 0 to
180  * keep the hash table consistent if we switch back to level > 0 later.
181  */
182 #if defined(__has_feature)
183 #  if __has_feature(memory_sanitizer)
184      __attribute__((no_sanitize("memory")))
185 #  endif
186 #endif
187 local void slide_hash(deflate_state *s) {
188     unsigned n, m;
189     Posf *p;
190     uInt wsize = s->w_size;
191 
192     n = s->hash_size;
193     p = &s->head[n];
194     do {
195         m = *--p;
196         *p = (Pos)(m >= wsize ? m - wsize : NIL);
197     } while (--n);
198     n = wsize;
199 #ifndef FASTEST
200     p = &s->prev[n];
201     do {
202         m = *--p;
203         *p = (Pos)(m >= wsize ? m - wsize : NIL);
204         /* If n is not on any hash chain, prev[n] is garbage but
205          * its value will never be used.
206          */
207     } while (--n);
208 #endif
209 }
210 
211 /* ===========================================================================
212  * Read a new buffer from the current input stream, update the adler32
213  * and total number of bytes read.  All deflate() input goes through
214  * this function so some applications may wish to modify it to avoid
215  * allocating a large strm->next_in buffer and copying from it.
216  * (See also flush_pending()).
217  */
218 local unsigned read_buf(z_streamp strm, Bytef *buf, unsigned size) {
219     unsigned len = strm->avail_in;
220 
221     if (len > size) len = size;
222     if (len == 0) return 0;
223 
224     strm->avail_in  -= len;
225 
226     zmemcpy(buf, strm->next_in, len);
227     if (strm->state->wrap == 1) {
228         strm->adler = adler32(strm->adler, buf, len);
229     }
230 #ifdef GZIP
231     else if (strm->state->wrap == 2) {
232         strm->adler = crc32(strm->adler, buf, len);
233     }
234 #endif
235     strm->next_in  += len;
236     strm->total_in += len;
237 
238     return len;
239 }
240 
241 /* ===========================================================================
242  * Fill the window when the lookahead becomes insufficient.
243  * Updates strstart and lookahead.
244  *
245  * IN assertion: lookahead < MIN_LOOKAHEAD
246  * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
247  *    At least one byte has been read, or avail_in == 0; reads are
248  *    performed for at least two bytes (required for the zip translate_eol
249  *    option -- not supported here).
250  */
251 local void fill_window(deflate_state *s) {
252     unsigned n;
253     unsigned more;    /* Amount of free space at the end of the window. */
254     uInt wsize = s->w_size;
255 
256     Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
257 
258     do {
259         more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
260 
261         /* Deal with !@#$% 64K limit: */
262         if (sizeof(int) <= 2) {
263             if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
264                 more = wsize;
265 
266             } else if (more == (unsigned)(-1)) {
267                 /* Very unlikely, but possible on 16 bit machine if
268                  * strstart == 0 && lookahead == 1 (input done a byte at time)
269                  */
270                 more--;
271             }
272         }
273 
274         /* If the window is almost full and there is insufficient lookahead,
275          * move the upper half to the lower one to make room in the upper half.
276          */
277         if (s->strstart >= wsize + MAX_DIST(s)) {
278 
279             zmemcpy(s->window, s->window + wsize, (unsigned)wsize - more);
280             s->match_start -= wsize;
281             s->strstart    -= wsize; /* we now have strstart >= MAX_DIST */
282             s->block_start -= (long) wsize;
283             if (s->insert > s->strstart)
284                 s->insert = s->strstart;
285             slide_hash(s);
286             more += wsize;
287         }
288         if (s->strm->avail_in == 0) break;
289 
290         /* If there was no sliding:
291          *    strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
292          *    more == window_size - lookahead - strstart
293          * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
294          * => more >= window_size - 2*WSIZE + 2
295          * In the BIG_MEM or MMAP case (not yet supported),
296          *   window_size == input_size + MIN_LOOKAHEAD  &&
297          *   strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
298          * Otherwise, window_size == 2*WSIZE so more >= 2.
299          * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
300          */
301         Assert(more >= 2, "more < 2");
302 
303         n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
304         s->lookahead += n;
305 
306         /* Initialize the hash value now that we have some input: */
307         if (s->lookahead + s->insert >= MIN_MATCH) {
308             uInt str = s->strstart - s->insert;
309             s->ins_h = s->window[str];
310             UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
311 #if MIN_MATCH != 3
312             Call UPDATE_HASH() MIN_MATCH-3 more times
313 #endif
314             while (s->insert) {
315                 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
316 #ifndef FASTEST
317                 s->prev[str & s->w_mask] = s->head[s->ins_h];
318 #endif
319                 s->head[s->ins_h] = (Pos)str;
320                 str++;
321                 s->insert--;
322                 if (s->lookahead + s->insert < MIN_MATCH)
323                     break;
324             }
325         }
326         /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
327          * but this is not important since only literal bytes will be emitted.
328          */
329 
330     } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
331 
332     /* If the WIN_INIT bytes after the end of the current data have never been
333      * written, then zero those bytes in order to avoid memory check reports of
334      * the use of uninitialized (or uninitialised as Julian writes) bytes by
335      * the longest match routines.  Update the high water mark for the next
336      * time through here.  WIN_INIT is set to MAX_MATCH since the longest match
337      * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
338      */
339     if (s->high_water < s->window_size) {
340         ulg curr = s->strstart + (ulg)(s->lookahead);
341         ulg init;
342 
343         if (s->high_water < curr) {
344             /* Previous high water mark below current data -- zero WIN_INIT
345              * bytes or up to end of window, whichever is less.
346              */
347             init = s->window_size - curr;
348             if (init > WIN_INIT)
349                 init = WIN_INIT;
350             zmemzero(s->window + curr, (unsigned)init);
351             s->high_water = curr + init;
352         }
353         else if (s->high_water < (ulg)curr + WIN_INIT) {
354             /* High water mark at or above current data, but below current data
355              * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
356              * to end of window, whichever is less.
357              */
358             init = (ulg)curr + WIN_INIT - s->high_water;
359             if (init > s->window_size - s->high_water)
360                 init = s->window_size - s->high_water;
361             zmemzero(s->window + s->high_water, (unsigned)init);
362             s->high_water += init;
363         }
364     }
365 
366     Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
367            "not enough room for search");
368 }
369 
370 /* ========================================================================= */
371 int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version,
372                          int stream_size) {
373     return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
374                          Z_DEFAULT_STRATEGY, version, stream_size);
375     /* To do: ignore strm->next_in if we use it as window */
376 }
377 
378 /* ========================================================================= */
379 int ZEXPORT deflateInit2_(z_streamp strm, int level, int method,
380                           int windowBits, int memLevel, int strategy,
381                           const char *version, int stream_size) {
382     deflate_state *s;
383     int wrap = 1;
384     static const char my_version[] = ZLIB_VERSION;
385 
386     if (version == Z_NULL || version[0] != my_version[0] ||
387         stream_size != sizeof(z_stream)) {
388         return Z_VERSION_ERROR;
389     }
390     if (strm == Z_NULL) return Z_STREAM_ERROR;
391 
392     strm->msg = Z_NULL;
393     if (strm->zalloc == (alloc_func)0) {
394 #if defined(Z_SOLO) && !defined(_KERNEL)
395         return Z_STREAM_ERROR;
396 #else
397         strm->zalloc = zcalloc;
398         strm->opaque = (voidpf)0;
399 #endif
400     }
401     if (strm->zfree == (free_func)0)
402 #if defined(Z_SOLO) && !defined(_KERNEL)
403         return Z_STREAM_ERROR;
404 #else
405         strm->zfree = zcfree;
406 #endif
407 
408 #ifdef FASTEST
409     if (level != 0) level = 1;
410 #else
411     if (level == Z_DEFAULT_COMPRESSION) level = 6;
412 #endif
413 
414     if (windowBits < 0) { /* suppress zlib wrapper */
415         wrap = 0;
416         if (windowBits < -15)
417             return Z_STREAM_ERROR;
418         windowBits = -windowBits;
419     }
420 #ifdef GZIP
421     else if (windowBits > 15) {
422         wrap = 2;       /* write gzip wrapper instead */
423         windowBits -= 16;
424     }
425 #endif
426     if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
427         windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
428         strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) {
429         return Z_STREAM_ERROR;
430     }
431     if (windowBits == 8) windowBits = 9;  /* until 256-byte window bug fixed */
432     s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
433     if (s == Z_NULL) return Z_MEM_ERROR;
434     strm->state = (struct internal_state FAR *)s;
435     s->strm = strm;
436     s->status = INIT_STATE;     /* to pass state test in deflateReset() */
437 
438     s->wrap = wrap;
439     s->gzhead = Z_NULL;
440     s->w_bits = (uInt)windowBits;
441     s->w_size = 1 << s->w_bits;
442     s->w_mask = s->w_size - 1;
443 
444     s->hash_bits = (uInt)memLevel + 7;
445     s->hash_size = 1 << s->hash_bits;
446     s->hash_mask = s->hash_size - 1;
447     s->hash_shift =  ((s->hash_bits + MIN_MATCH-1) / MIN_MATCH);
448 
449     s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
450     s->prev   = (Posf *)  ZALLOC(strm, s->w_size, sizeof(Pos));
451     s->head   = (Posf *)  ZALLOC(strm, s->hash_size, sizeof(Pos));
452 
453     s->high_water = 0;      /* nothing written to s->window yet */
454 
455     s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
456 
457     /* We overlay pending_buf and sym_buf. This works since the average size
458      * for length/distance pairs over any compressed block is assured to be 31
459      * bits or less.
460      *
461      * Analysis: The longest fixed codes are a length code of 8 bits plus 5
462      * extra bits, for lengths 131 to 257. The longest fixed distance codes are
463      * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
464      * possible fixed-codes length/distance pair is then 31 bits total.
465      *
466      * sym_buf starts one-fourth of the way into pending_buf. So there are
467      * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
468      * in sym_buf is three bytes -- two for the distance and one for the
469      * literal/length. As each symbol is consumed, the pointer to the next
470      * sym_buf value to read moves forward three bytes. From that symbol, up to
471      * 31 bits are written to pending_buf. The closest the written pending_buf
472      * bits gets to the next sym_buf symbol to read is just before the last
473      * code is written. At that time, 31*(n - 2) bits have been written, just
474      * after 24*(n - 2) bits have been consumed from sym_buf. sym_buf starts at
475      * 8*n bits into pending_buf. (Note that the symbol buffer fills when n - 1
476      * symbols are written.) The closest the writing gets to what is unread is
477      * then n + 14 bits. Here n is lit_bufsize, which is 16384 by default, and
478      * can range from 128 to 32768.
479      *
480      * Therefore, at a minimum, there are 142 bits of space between what is
481      * written and what is read in the overlain buffers, so the symbols cannot
482      * be overwritten by the compressed data. That space is actually 139 bits,
483      * due to the three-bit fixed-code block header.
484      *
485      * That covers the case where either Z_FIXED is specified, forcing fixed
486      * codes, or when the use of fixed codes is chosen, because that choice
487      * results in a smaller compressed block than dynamic codes. That latter
488      * condition then assures that the above analysis also covers all dynamic
489      * blocks. A dynamic-code block will only be chosen to be emitted if it has
490      * fewer bits than a fixed-code block would for the same set of symbols.
491      * Therefore its average symbol length is assured to be less than 31. So
492      * the compressed data for a dynamic block also cannot overwrite the
493      * symbols from which it is being constructed.
494      */
495 
496     s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
497     s->pending_buf_size = (ulg)s->lit_bufsize * 4;
498 
499     if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
500         s->pending_buf == Z_NULL) {
501         s->status = FINISH_STATE;
502         strm->msg = ERR_MSG(Z_MEM_ERROR);
503         deflateEnd (strm);
504         return Z_MEM_ERROR;
505     }
506     s->sym_buf = s->pending_buf + s->lit_bufsize;
507     s->sym_end = (s->lit_bufsize - 1) * 3;
508     /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
509      * on 16 bit machines and because stored blocks are restricted to
510      * 64K-1 bytes.
511      */
512 
513     s->level = level;
514     s->strategy = strategy;
515     s->method = (Byte)method;
516 
517     return deflateReset(strm);
518 }
519 
520 /* =========================================================================
521  * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
522  */
523 local int deflateStateCheck(z_streamp strm) {
524     deflate_state *s;
525     if (strm == Z_NULL ||
526         strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
527         return 1;
528     s = strm->state;
529     if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE &&
530 #ifdef GZIP
531                                            s->status != GZIP_STATE &&
532 #endif
533                                            s->status != EXTRA_STATE &&
534                                            s->status != NAME_STATE &&
535                                            s->status != COMMENT_STATE &&
536                                            s->status != HCRC_STATE &&
537                                            s->status != BUSY_STATE &&
538                                            s->status != FINISH_STATE))
539         return 1;
540     return 0;
541 }
542 
543 /* ========================================================================= */
544 int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef *dictionary,
545                                  uInt  dictLength) {
546     deflate_state *s;
547     uInt str, n;
548     int wrap;
549     unsigned avail;
550     z_const unsigned char *next;
551 
552     if (deflateStateCheck(strm) || dictionary == Z_NULL)
553         return Z_STREAM_ERROR;
554     s = strm->state;
555     wrap = s->wrap;
556     if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead)
557         return Z_STREAM_ERROR;
558 
559     /* when using zlib wrappers, compute Adler-32 for provided dictionary */
560     if (wrap == 1)
561         strm->adler = adler32(strm->adler, dictionary, dictLength);
562     s->wrap = 0;                    /* avoid computing Adler-32 in read_buf */
563 
564     /* if dictionary would fill window, just replace the history */
565     if (dictLength >= s->w_size) {
566         if (wrap == 0) {            /* already empty otherwise */
567             CLEAR_HASH(s);
568             s->strstart = 0;
569             s->block_start = 0L;
570             s->insert = 0;
571         }
572         dictionary += dictLength - s->w_size;  /* use the tail */
573         dictLength = s->w_size;
574     }
575 
576     /* insert dictionary into window and hash */
577     avail = strm->avail_in;
578     next = strm->next_in;
579     strm->avail_in = dictLength;
580     strm->next_in = (z_const Bytef *)dictionary;
581     fill_window(s);
582     while (s->lookahead >= MIN_MATCH) {
583         str = s->strstart;
584         n = s->lookahead - (MIN_MATCH-1);
585         do {
586             UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
587 #ifndef FASTEST
588             s->prev[str & s->w_mask] = s->head[s->ins_h];
589 #endif
590             s->head[s->ins_h] = (Pos)str;
591             str++;
592         } while (--n);
593         s->strstart = str;
594         s->lookahead = MIN_MATCH-1;
595         fill_window(s);
596     }
597     s->strstart += s->lookahead;
598     s->block_start = (long)s->strstart;
599     s->insert = s->lookahead;
600     s->lookahead = 0;
601     s->match_length = s->prev_length = MIN_MATCH-1;
602     s->match_available = 0;
603     strm->next_in = next;
604     strm->avail_in = avail;
605     s->wrap = wrap;
606     return Z_OK;
607 }
608 
609 /* ========================================================================= */
610 int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef *dictionary,
611                                  uInt *dictLength) {
612     deflate_state *s;
613     uInt len;
614 
615     if (deflateStateCheck(strm))
616         return Z_STREAM_ERROR;
617     s = strm->state;
618     len = s->strstart + s->lookahead;
619     if (len > s->w_size)
620         len = s->w_size;
621     if (dictionary != Z_NULL && len)
622         zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len);
623     if (dictLength != Z_NULL)
624         *dictLength = len;
625     return Z_OK;
626 }
627 
628 /* ========================================================================= */
629 int ZEXPORT deflateResetKeep(z_streamp strm) {
630     deflate_state *s;
631 
632     if (deflateStateCheck(strm)) {
633         return Z_STREAM_ERROR;
634     }
635 
636     strm->total_in = strm->total_out = 0;
637     strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
638     strm->data_type = Z_UNKNOWN;
639 
640     s = (deflate_state *)strm->state;
641     s->pending = 0;
642     s->pending_out = s->pending_buf;
643 
644     if (s->wrap < 0) {
645         s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
646     }
647     s->status =
648 #ifdef GZIP
649         s->wrap == 2 ? GZIP_STATE :
650 #endif
651         INIT_STATE;
652     strm->adler =
653 #ifdef GZIP
654         s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
655 #endif
656         adler32(0L, Z_NULL, 0);
657     s->last_flush = -2;
658 
659     _tr_init(s);
660 
661     return Z_OK;
662 }
663 
664 /* ===========================================================================
665  * Initialize the "longest match" routines for a new zlib stream
666  */
667 local void lm_init(deflate_state *s) {
668     s->window_size = (ulg)2L*s->w_size;
669 
670     CLEAR_HASH(s);
671 
672     /* Set the default configuration parameters:
673      */
674     s->max_lazy_match   = configuration_table[s->level].max_lazy;
675     s->good_match       = configuration_table[s->level].good_length;
676     s->nice_match       = configuration_table[s->level].nice_length;
677     s->max_chain_length = configuration_table[s->level].max_chain;
678 
679     s->strstart = 0;
680     s->block_start = 0L;
681     s->lookahead = 0;
682     s->insert = 0;
683     s->match_length = s->prev_length = MIN_MATCH-1;
684     s->match_available = 0;
685     s->ins_h = 0;
686 }
687 
688 /* ========================================================================= */
689 int ZEXPORT deflateReset(z_streamp strm) {
690     int ret;
691 
692     ret = deflateResetKeep(strm);
693     if (ret == Z_OK)
694         lm_init(strm->state);
695     return ret;
696 }
697 
698 /* ========================================================================= */
699 int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head) {
700     if (deflateStateCheck(strm) || strm->state->wrap != 2)
701         return Z_STREAM_ERROR;
702     strm->state->gzhead = head;
703     return Z_OK;
704 }
705 
706 /* ========================================================================= */
707 int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits) {
708     if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
709     if (pending != Z_NULL)
710         *pending = strm->state->pending;
711     if (bits != Z_NULL)
712         *bits = strm->state->bi_valid;
713     return Z_OK;
714 }
715 
716 /* ========================================================================= */
717 int ZEXPORT deflatePrime(z_streamp strm, int bits, int value) {
718     deflate_state *s;
719     int put;
720 
721     if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
722     s = strm->state;
723     if (bits < 0 || bits > 16 ||
724         s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
725         return Z_BUF_ERROR;
726     do {
727         put = Buf_size - s->bi_valid;
728         if (put > bits)
729             put = bits;
730         s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid);
731         s->bi_valid += put;
732         _tr_flush_bits(s);
733         value >>= put;
734         bits -= put;
735     } while (bits);
736     return Z_OK;
737 }
738 
739 /* ========================================================================= */
740 int ZEXPORT deflateParams(z_streamp strm, int level, int strategy) {
741     deflate_state *s;
742     compress_func func;
743 
744     if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
745     s = strm->state;
746 
747 #ifdef FASTEST
748     if (level != 0) level = 1;
749 #else
750     if (level == Z_DEFAULT_COMPRESSION) level = 6;
751 #endif
752     if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
753         return Z_STREAM_ERROR;
754     }
755     func = configuration_table[s->level].func;
756 
757     if ((strategy != s->strategy || func != configuration_table[level].func) &&
758         s->last_flush != -2) {
759         /* Flush the last buffer: */
760         int err = deflate(strm, Z_BLOCK);
761         if (err == Z_STREAM_ERROR)
762             return err;
763         if (strm->avail_in || (s->strstart - s->block_start) + s->lookahead)
764             return Z_BUF_ERROR;
765     }
766     if (s->level != level) {
767         if (s->level == 0 && s->matches != 0) {
768             if (s->matches == 1)
769                 slide_hash(s);
770             else
771                 CLEAR_HASH(s);
772             s->matches = 0;
773         }
774         s->level = level;
775         s->max_lazy_match   = configuration_table[level].max_lazy;
776         s->good_match       = configuration_table[level].good_length;
777         s->nice_match       = configuration_table[level].nice_length;
778         s->max_chain_length = configuration_table[level].max_chain;
779     }
780     s->strategy = strategy;
781     return Z_OK;
782 }
783 
784 /* ========================================================================= */
785 int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy,
786                         int nice_length, int max_chain) {
787     deflate_state *s;
788 
789     if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
790     s = strm->state;
791     s->good_match = (uInt)good_length;
792     s->max_lazy_match = (uInt)max_lazy;
793     s->nice_match = nice_length;
794     s->max_chain_length = (uInt)max_chain;
795     return Z_OK;
796 }
797 
798 /* =========================================================================
799  * For the default windowBits of 15 and memLevel of 8, this function returns a
800  * close to exact, as well as small, upper bound on the compressed size. This
801  * is an expansion of ~0.03%, plus a small constant.
802  *
803  * For any setting other than those defaults for windowBits and memLevel, one
804  * of two worst case bounds is returned. This is at most an expansion of ~4% or
805  * ~13%, plus a small constant.
806  *
807  * Both the 0.03% and 4% derive from the overhead of stored blocks. The first
808  * one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second
809  * is for stored blocks of 127 bytes (the worst case memLevel == 1). The
810  * expansion results from five bytes of header for each stored block.
811  *
812  * The larger expansion of 13% results from a window size less than or equal to
813  * the symbols buffer size (windowBits <= memLevel + 7). In that case some of
814  * the data being compressed may have slid out of the sliding window, impeding
815  * a stored block from being emitted. Then the only choice is a fixed or
816  * dynamic block, where a fixed block limits the maximum expansion to 9 bits
817  * per 8-bit byte, plus 10 bits for every block. The smallest block size for
818  * which this can occur is 255 (memLevel == 2).
819  *
820  * Shifts are used to approximate divisions, for speed.
821  */
822 uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) {
823     deflate_state *s;
824     uLong fixedlen, storelen, wraplen;
825 
826     /* upper bound for fixed blocks with 9-bit literals and length 255
827        (memLevel == 2, which is the lowest that may not use stored blocks) --
828        ~13% overhead plus a small constant */
829     fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) +
830                (sourceLen >> 9) + 4;
831 
832     /* upper bound for stored blocks with length 127 (memLevel == 1) --
833        ~4% overhead plus a small constant */
834     storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) +
835                (sourceLen >> 11) + 7;
836 
837     /* if can't get parameters, return larger bound plus a zlib wrapper */
838     if (deflateStateCheck(strm))
839         return (fixedlen > storelen ? fixedlen : storelen) + 6;
840 
841     /* compute wrapper length */
842     s = strm->state;
843     switch (s->wrap) {
844     case 0:                                 /* raw deflate */
845         wraplen = 0;
846         break;
847     case 1:                                 /* zlib wrapper */
848         wraplen = 6 + (s->strstart ? 4 : 0);
849         break;
850 #ifdef GZIP
851     case 2:                                 /* gzip wrapper */
852         wraplen = 18;
853         if (s->gzhead != Z_NULL) {          /* user-supplied gzip header */
854             Bytef *str;
855             if (s->gzhead->extra != Z_NULL)
856                 wraplen += 2 + s->gzhead->extra_len;
857             str = s->gzhead->name;
858             if (str != Z_NULL)
859                 do {
860                     wraplen++;
861                 } while (*str++);
862             str = s->gzhead->comment;
863             if (str != Z_NULL)
864                 do {
865                     wraplen++;
866                 } while (*str++);
867             if (s->gzhead->hcrc)
868                 wraplen += 2;
869         }
870         break;
871 #endif
872     default:                                /* for compiler happiness */
873         wraplen = 6;
874     }
875 
876     /* if not default parameters, return one of the conservative bounds */
877     if (s->w_bits != 15 || s->hash_bits != 8 + 7)
878         return (s->w_bits <= s->hash_bits && s->level ? fixedlen : storelen) +
879                wraplen;
880 
881     /* default settings: return tight bound for that case -- ~0.03% overhead
882        plus a small constant */
883     return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
884            (sourceLen >> 25) + 13 - 6 + wraplen;
885 }
886 
887 /* =========================================================================
888  * Put a short in the pending buffer. The 16-bit value is put in MSB order.
889  * IN assertion: the stream state is correct and there is enough room in
890  * pending_buf.
891  */
892 local void putShortMSB(deflate_state *s, uInt b) {
893     put_byte(s, (Byte)(b >> 8));
894     put_byte(s, (Byte)(b & 0xff));
895 }
896 
897 /* =========================================================================
898  * Flush as much pending output as possible. All deflate() output, except for
899  * some deflate_stored() output, goes through this function so some
900  * applications may wish to modify it to avoid allocating a large
901  * strm->next_out buffer and copying into it. (See also read_buf()).
902  */
903 local void flush_pending(z_streamp strm) {
904     unsigned len;
905     deflate_state *s = strm->state;
906 
907     _tr_flush_bits(s);
908     len = s->pending;
909     if (len > strm->avail_out) len = strm->avail_out;
910     if (len == 0) return;
911 
912     zmemcpy(strm->next_out, s->pending_out, len);
913     strm->next_out  += len;
914     s->pending_out  += len;
915     strm->total_out += len;
916     strm->avail_out -= len;
917     s->pending      -= len;
918     if (s->pending == 0) {
919         s->pending_out = s->pending_buf;
920     }
921 }
922 
923 /* ===========================================================================
924  * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1].
925  */
926 #define HCRC_UPDATE(beg) \
927     do { \
928         if (s->gzhead->hcrc && s->pending > (beg)) \
929             strm->adler = crc32(strm->adler, s->pending_buf + (beg), \
930                                 s->pending - (beg)); \
931     } while (0)
932 
933 /* ========================================================================= */
934 int ZEXPORT deflate(z_streamp strm, int flush) {
935     int old_flush; /* value of flush param for previous deflate call */
936     deflate_state *s;
937 
938     if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) {
939         return Z_STREAM_ERROR;
940     }
941     s = strm->state;
942 
943     if (strm->next_out == Z_NULL ||
944         (strm->avail_in != 0 && strm->next_in == Z_NULL) ||
945         (s->status == FINISH_STATE && flush != Z_FINISH)) {
946         ERR_RETURN(strm, Z_STREAM_ERROR);
947     }
948     if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
949 
950     old_flush = s->last_flush;
951     s->last_flush = flush;
952 
953     /* Flush as much pending output as possible */
954     if (s->pending != 0) {
955         flush_pending(strm);
956         if (strm->avail_out == 0) {
957             /* Since avail_out is 0, deflate will be called again with
958              * more output space, but possibly with both pending and
959              * avail_in equal to zero. There won't be anything to do,
960              * but this is not an error situation so make sure we
961              * return OK instead of BUF_ERROR at next call of deflate:
962              */
963             s->last_flush = -1;
964             return Z_OK;
965         }
966 
967     /* Make sure there is something to do and avoid duplicate consecutive
968      * flushes. For repeated and useless calls with Z_FINISH, we keep
969      * returning Z_STREAM_END instead of Z_BUF_ERROR.
970      */
971     } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) &&
972                flush != Z_FINISH) {
973         ERR_RETURN(strm, Z_BUF_ERROR);
974     }
975 
976     /* User must not provide more input after the first FINISH: */
977     if (s->status == FINISH_STATE && strm->avail_in != 0) {
978         ERR_RETURN(strm, Z_BUF_ERROR);
979     }
980 
981     /* Write the header */
982     if (s->status == INIT_STATE && s->wrap == 0)
983         s->status = BUSY_STATE;
984     if (s->status == INIT_STATE) {
985         /* zlib header */
986         uInt header = (Z_DEFLATED + ((s->w_bits - 8) << 4)) << 8;
987         uInt level_flags;
988 
989         if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
990             level_flags = 0;
991         else if (s->level < 6)
992             level_flags = 1;
993         else if (s->level == 6)
994             level_flags = 2;
995         else
996             level_flags = 3;
997         header |= (level_flags << 6);
998         if (s->strstart != 0) header |= PRESET_DICT;
999         header += 31 - (header % 31);
1000 
1001         putShortMSB(s, header);
1002 
1003         /* Save the adler32 of the preset dictionary: */
1004         if (s->strstart != 0) {
1005             putShortMSB(s, (uInt)(strm->adler >> 16));
1006             putShortMSB(s, (uInt)(strm->adler & 0xffff));
1007         }
1008         strm->adler = adler32(0L, Z_NULL, 0);
1009         s->status = BUSY_STATE;
1010 
1011         /* Compression must start with an empty pending buffer */
1012         flush_pending(strm);
1013         if (s->pending != 0) {
1014             s->last_flush = -1;
1015             return Z_OK;
1016         }
1017     }
1018 #ifdef GZIP
1019     if (s->status == GZIP_STATE) {
1020         /* gzip header */
1021         strm->adler = crc32(0L, Z_NULL, 0);
1022         put_byte(s, 31);
1023         put_byte(s, 139);
1024         put_byte(s, 8);
1025         if (s->gzhead == Z_NULL) {
1026             put_byte(s, 0);
1027             put_byte(s, 0);
1028             put_byte(s, 0);
1029             put_byte(s, 0);
1030             put_byte(s, 0);
1031             put_byte(s, s->level == 9 ? 2 :
1032                      (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
1033                       4 : 0));
1034             put_byte(s, OS_CODE);
1035             s->status = BUSY_STATE;
1036 
1037             /* Compression must start with an empty pending buffer */
1038             flush_pending(strm);
1039             if (s->pending != 0) {
1040                 s->last_flush = -1;
1041                 return Z_OK;
1042             }
1043         }
1044         else {
1045             put_byte(s, (s->gzhead->text ? 1 : 0) +
1046                      (s->gzhead->hcrc ? 2 : 0) +
1047                      (s->gzhead->extra == Z_NULL ? 0 : 4) +
1048                      (s->gzhead->name == Z_NULL ? 0 : 8) +
1049                      (s->gzhead->comment == Z_NULL ? 0 : 16)
1050                      );
1051             put_byte(s, (Byte)(s->gzhead->time & 0xff));
1052             put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
1053             put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
1054             put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
1055             put_byte(s, s->level == 9 ? 2 :
1056                      (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
1057                       4 : 0));
1058             put_byte(s, s->gzhead->os & 0xff);
1059             if (s->gzhead->extra != Z_NULL) {
1060                 put_byte(s, s->gzhead->extra_len & 0xff);
1061                 put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
1062             }
1063             if (s->gzhead->hcrc)
1064                 strm->adler = crc32(strm->adler, s->pending_buf,
1065                                     s->pending);
1066             s->gzindex = 0;
1067             s->status = EXTRA_STATE;
1068         }
1069     }
1070     if (s->status == EXTRA_STATE) {
1071         if (s->gzhead->extra != Z_NULL) {
1072             ulg beg = s->pending;   /* start of bytes to update crc */
1073             uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex;
1074             while (s->pending + left > s->pending_buf_size) {
1075                 uInt copy = s->pending_buf_size - s->pending;
1076                 zmemcpy(s->pending_buf + s->pending,
1077                         s->gzhead->extra + s->gzindex, copy);
1078                 s->pending = s->pending_buf_size;
1079                 HCRC_UPDATE(beg);
1080                 s->gzindex += copy;
1081                 flush_pending(strm);
1082                 if (s->pending != 0) {
1083                     s->last_flush = -1;
1084                     return Z_OK;
1085                 }
1086                 beg = 0;
1087                 left -= copy;
1088             }
1089             zmemcpy(s->pending_buf + s->pending,
1090                     s->gzhead->extra + s->gzindex, left);
1091             s->pending += left;
1092             HCRC_UPDATE(beg);
1093             s->gzindex = 0;
1094         }
1095         s->status = NAME_STATE;
1096     }
1097     if (s->status == NAME_STATE) {
1098         if (s->gzhead->name != Z_NULL) {
1099             ulg beg = s->pending;   /* start of bytes to update crc */
1100             int val;
1101             do {
1102                 if (s->pending == s->pending_buf_size) {
1103                     HCRC_UPDATE(beg);
1104                     flush_pending(strm);
1105                     if (s->pending != 0) {
1106                         s->last_flush = -1;
1107                         return Z_OK;
1108                     }
1109                     beg = 0;
1110                 }
1111                 val = s->gzhead->name[s->gzindex++];
1112                 put_byte(s, val);
1113             } while (val != 0);
1114             HCRC_UPDATE(beg);
1115             s->gzindex = 0;
1116         }
1117         s->status = COMMENT_STATE;
1118     }
1119     if (s->status == COMMENT_STATE) {
1120         if (s->gzhead->comment != Z_NULL) {
1121             ulg beg = s->pending;   /* start of bytes to update crc */
1122             int val;
1123             do {
1124                 if (s->pending == s->pending_buf_size) {
1125                     HCRC_UPDATE(beg);
1126                     flush_pending(strm);
1127                     if (s->pending != 0) {
1128                         s->last_flush = -1;
1129                         return Z_OK;
1130                     }
1131                     beg = 0;
1132                 }
1133                 val = s->gzhead->comment[s->gzindex++];
1134                 put_byte(s, val);
1135             } while (val != 0);
1136             HCRC_UPDATE(beg);
1137         }
1138         s->status = HCRC_STATE;
1139     }
1140     if (s->status == HCRC_STATE) {
1141         if (s->gzhead->hcrc) {
1142             if (s->pending + 2 > s->pending_buf_size) {
1143                 flush_pending(strm);
1144                 if (s->pending != 0) {
1145                     s->last_flush = -1;
1146                     return Z_OK;
1147                 }
1148             }
1149             put_byte(s, (Byte)(strm->adler & 0xff));
1150             put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
1151             strm->adler = crc32(0L, Z_NULL, 0);
1152         }
1153         s->status = BUSY_STATE;
1154 
1155         /* Compression must start with an empty pending buffer */
1156         flush_pending(strm);
1157         if (s->pending != 0) {
1158             s->last_flush = -1;
1159             return Z_OK;
1160         }
1161     }
1162 #endif
1163 
1164     /* Start a new block or continue the current one.
1165      */
1166     if (strm->avail_in != 0 || s->lookahead != 0 ||
1167         (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
1168         block_state bstate;
1169 
1170         bstate = s->level == 0 ? deflate_stored(s, flush) :
1171                  s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
1172                  s->strategy == Z_RLE ? deflate_rle(s, flush) :
1173                  (*(configuration_table[s->level].func))(s, flush);
1174 
1175         if (bstate == finish_started || bstate == finish_done) {
1176             s->status = FINISH_STATE;
1177         }
1178         if (bstate == need_more || bstate == finish_started) {
1179             if (strm->avail_out == 0) {
1180                 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
1181             }
1182             return Z_OK;
1183             /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
1184              * of deflate should use the same flush parameter to make sure
1185              * that the flush is complete. So we don't have to output an
1186              * empty block here, this will be done at next call. This also
1187              * ensures that for a very small output buffer, we emit at most
1188              * one empty block.
1189              */
1190         }
1191         if (bstate == block_done) {
1192             if (flush == Z_PARTIAL_FLUSH) {
1193                 _tr_align(s);
1194             } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
1195                 _tr_stored_block(s, (char*)0, 0L, 0);
1196                 /* For a full flush, this empty block will be recognized
1197                  * as a special marker by inflate_sync().
1198                  */
1199                 if (flush == Z_FULL_FLUSH) {
1200                     CLEAR_HASH(s);             /* forget history */
1201                     if (s->lookahead == 0) {
1202                         s->strstart = 0;
1203                         s->block_start = 0L;
1204                         s->insert = 0;
1205                     }
1206                 }
1207             }
1208             flush_pending(strm);
1209             if (strm->avail_out == 0) {
1210               s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
1211               return Z_OK;
1212             }
1213         }
1214     }
1215 
1216     if (flush != Z_FINISH) return Z_OK;
1217     if (s->wrap <= 0) return Z_STREAM_END;
1218 
1219     /* Write the trailer */
1220 #ifdef GZIP
1221     if (s->wrap == 2) {
1222         put_byte(s, (Byte)(strm->adler & 0xff));
1223         put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
1224         put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
1225         put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
1226         put_byte(s, (Byte)(strm->total_in & 0xff));
1227         put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
1228         put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
1229         put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
1230     }
1231     else
1232 #endif
1233     {
1234         putShortMSB(s, (uInt)(strm->adler >> 16));
1235         putShortMSB(s, (uInt)(strm->adler & 0xffff));
1236     }
1237     flush_pending(strm);
1238     /* If avail_out is zero, the application will call deflate again
1239      * to flush the rest.
1240      */
1241     if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
1242     return s->pending != 0 ? Z_OK : Z_STREAM_END;
1243 }
1244 
1245 /* ========================================================================= */
1246 int ZEXPORT deflateEnd(z_streamp strm) {
1247     int status;
1248 
1249     if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
1250 
1251     status = strm->state->status;
1252 
1253     /* Deallocate in reverse order of allocations: */
1254     TRY_FREE(strm, strm->state->pending_buf);
1255     TRY_FREE(strm, strm->state->head);
1256     TRY_FREE(strm, strm->state->prev);
1257     TRY_FREE(strm, strm->state->window);
1258 
1259     ZFREE(strm, strm->state);
1260     strm->state = Z_NULL;
1261 
1262     return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1263 }
1264 
1265 /* =========================================================================
1266  * Copy the source state to the destination state.
1267  * To simplify the source, this is not supported for 16-bit MSDOS (which
1268  * doesn't have enough memory anyway to duplicate compression states).
1269  */
1270 int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) {
1271 #ifdef MAXSEG_64K
1272     (void)dest;
1273     (void)source;
1274     return Z_STREAM_ERROR;
1275 #else
1276     deflate_state *ds;
1277     deflate_state *ss;
1278 
1279 
1280     if (deflateStateCheck(source) || dest == Z_NULL) {
1281         return Z_STREAM_ERROR;
1282     }
1283 
1284     ss = source->state;
1285 
1286     zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
1287 
1288     ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
1289     if (ds == Z_NULL) return Z_MEM_ERROR;
1290     dest->state = (struct internal_state FAR *) ds;
1291     zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
1292     ds->strm = dest;
1293 
1294     ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1295     ds->prev   = (Posf *)  ZALLOC(dest, ds->w_size, sizeof(Pos));
1296     ds->head   = (Posf *)  ZALLOC(dest, ds->hash_size, sizeof(Pos));
1297     ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
1298 
1299     if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1300         ds->pending_buf == Z_NULL) {
1301         deflateEnd (dest);
1302         return Z_MEM_ERROR;
1303     }
1304     /* following zmemcpy do not work for 16-bit MSDOS */
1305     zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1306     zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
1307     zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
1308     zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1309 
1310     ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1311     ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
1312 
1313     ds->l_desc.dyn_tree = ds->dyn_ltree;
1314     ds->d_desc.dyn_tree = ds->dyn_dtree;
1315     ds->bl_desc.dyn_tree = ds->bl_tree;
1316 
1317     return Z_OK;
1318 #endif /* MAXSEG_64K */
1319 }
1320 
1321 #ifndef FASTEST
1322 /* ===========================================================================
1323  * Set match_start to the longest match starting at the given string and
1324  * return its length. Matches shorter or equal to prev_length are discarded,
1325  * in which case the result is equal to prev_length and match_start is
1326  * garbage.
1327  * IN assertions: cur_match is the head of the hash chain for the current
1328  *   string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1329  * OUT assertion: the match length is not greater than s->lookahead.
1330  */
1331 local uInt longest_match(deflate_state *s, IPos cur_match) {
1332     unsigned chain_length = s->max_chain_length;/* max hash chain length */
1333     register Bytef *scan = s->window + s->strstart; /* current string */
1334     register Bytef *match;                      /* matched string */
1335     register int len;                           /* length of current match */
1336     int best_len = (int)s->prev_length;         /* best match length so far */
1337     int nice_match = s->nice_match;             /* stop if match long enough */
1338     IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
1339         s->strstart - (IPos)MAX_DIST(s) : NIL;
1340     /* Stop when cur_match becomes <= limit. To simplify the code,
1341      * we prevent matches with the string of window index 0.
1342      */
1343     Posf *prev = s->prev;
1344     uInt wmask = s->w_mask;
1345 
1346 #ifdef UNALIGNED_OK
1347     /* Compare two bytes at a time. Note: this is not always beneficial.
1348      * Try with and without -DUNALIGNED_OK to check.
1349      */
1350     register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
1351     register ush scan_start = *(ushf*)scan;
1352     register ush scan_end   = *(ushf*)(scan + best_len - 1);
1353 #else
1354     register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1355     register Byte scan_end1  = scan[best_len - 1];
1356     register Byte scan_end   = scan[best_len];
1357 #endif
1358 
1359     /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1360      * It is easy to get rid of this optimization if necessary.
1361      */
1362     Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1363 
1364     /* Do not waste too much time if we already have a good match: */
1365     if (s->prev_length >= s->good_match) {
1366         chain_length >>= 2;
1367     }
1368     /* Do not look for matches beyond the end of the input. This is necessary
1369      * to make deflate deterministic.
1370      */
1371     if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
1372 
1373     Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1374            "need lookahead");
1375 
1376     do {
1377         Assert(cur_match < s->strstart, "no future");
1378         match = s->window + cur_match;
1379 
1380         /* Skip to next match if the match length cannot increase
1381          * or if the match length is less than 2.  Note that the checks below
1382          * for insufficient lookahead only occur occasionally for performance
1383          * reasons.  Therefore uninitialized memory will be accessed, and
1384          * conditional jumps will be made that depend on those values.
1385          * However the length of the match is limited to the lookahead, so
1386          * the output of deflate is not affected by the uninitialized values.
1387          */
1388 #if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
1389         /* This code assumes sizeof(unsigned short) == 2. Do not use
1390          * UNALIGNED_OK if your compiler uses a different size.
1391          */
1392         if (*(ushf*)(match + best_len - 1) != scan_end ||
1393             *(ushf*)match != scan_start) continue;
1394 
1395         /* It is not necessary to compare scan[2] and match[2] since they are
1396          * always equal when the other bytes match, given that the hash keys
1397          * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1398          * strstart + 3, + 5, up to strstart + 257. We check for insufficient
1399          * lookahead only every 4th comparison; the 128th check will be made
1400          * at strstart + 257. If MAX_MATCH-2 is not a multiple of 8, it is
1401          * necessary to put more guard bytes at the end of the window, or
1402          * to check more often for insufficient lookahead.
1403          */
1404         Assert(scan[2] == match[2], "scan[2]?");
1405         scan++, match++;
1406         do {
1407         } while (*(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1408                  *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1409                  *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1410                  *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
1411                  scan < strend);
1412         /* The funny "do {}" generates better code on most compilers */
1413 
1414         /* Here, scan <= window + strstart + 257 */
1415         Assert(scan <= s->window + (unsigned)(s->window_size - 1),
1416                "wild scan");
1417         if (*scan == *match) scan++;
1418 
1419         len = (MAX_MATCH - 1) - (int)(strend - scan);
1420         scan = strend - (MAX_MATCH-1);
1421 
1422 #else /* UNALIGNED_OK */
1423 
1424         if (match[best_len]     != scan_end  ||
1425             match[best_len - 1] != scan_end1 ||
1426             *match              != *scan     ||
1427             *++match            != scan[1])      continue;
1428 
1429         /* The check at best_len - 1 can be removed because it will be made
1430          * again later. (This heuristic is not always a win.)
1431          * It is not necessary to compare scan[2] and match[2] since they
1432          * are always equal when the other bytes match, given that
1433          * the hash keys are equal and that HASH_BITS >= 8.
1434          */
1435         scan += 2, match++;
1436         Assert(*scan == *match, "match[2]?");
1437 
1438         /* We check for insufficient lookahead only every 8th comparison;
1439          * the 256th check will be made at strstart + 258.
1440          */
1441         do {
1442         } while (*++scan == *++match && *++scan == *++match &&
1443                  *++scan == *++match && *++scan == *++match &&
1444                  *++scan == *++match && *++scan == *++match &&
1445                  *++scan == *++match && *++scan == *++match &&
1446                  scan < strend);
1447 
1448         Assert(scan <= s->window + (unsigned)(s->window_size - 1),
1449                "wild scan");
1450 
1451         len = MAX_MATCH - (int)(strend - scan);
1452         scan = strend - MAX_MATCH;
1453 
1454 #endif /* UNALIGNED_OK */
1455 
1456         if (len > best_len) {
1457             s->match_start = cur_match;
1458             best_len = len;
1459             if (len >= nice_match) break;
1460 #ifdef UNALIGNED_OK
1461             scan_end = *(ushf*)(scan + best_len - 1);
1462 #else
1463             scan_end1  = scan[best_len - 1];
1464             scan_end   = scan[best_len];
1465 #endif
1466         }
1467     } while ((cur_match = prev[cur_match & wmask]) > limit
1468              && --chain_length != 0);
1469 
1470     if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
1471     return s->lookahead;
1472 }
1473 
1474 #else /* FASTEST */
1475 
1476 /* ---------------------------------------------------------------------------
1477  * Optimized version for FASTEST only
1478  */
1479 local uInt longest_match(deflate_state *s, IPos cur_match) {
1480     register Bytef *scan = s->window + s->strstart; /* current string */
1481     register Bytef *match;                       /* matched string */
1482     register int len;                           /* length of current match */
1483     register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1484 
1485     /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1486      * It is easy to get rid of this optimization if necessary.
1487      */
1488     Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1489 
1490     Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1491            "need lookahead");
1492 
1493     Assert(cur_match < s->strstart, "no future");
1494 
1495     match = s->window + cur_match;
1496 
1497     /* Return failure if the match length is less than 2:
1498      */
1499     if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1500 
1501     /* The check at best_len - 1 can be removed because it will be made
1502      * again later. (This heuristic is not always a win.)
1503      * It is not necessary to compare scan[2] and match[2] since they
1504      * are always equal when the other bytes match, given that
1505      * the hash keys are equal and that HASH_BITS >= 8.
1506      */
1507     scan += 2, match += 2;
1508     Assert(*scan == *match, "match[2]?");
1509 
1510     /* We check for insufficient lookahead only every 8th comparison;
1511      * the 256th check will be made at strstart + 258.
1512      */
1513     do {
1514     } while (*++scan == *++match && *++scan == *++match &&
1515              *++scan == *++match && *++scan == *++match &&
1516              *++scan == *++match && *++scan == *++match &&
1517              *++scan == *++match && *++scan == *++match &&
1518              scan < strend);
1519 
1520     Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan");
1521 
1522     len = MAX_MATCH - (int)(strend - scan);
1523 
1524     if (len < MIN_MATCH) return MIN_MATCH - 1;
1525 
1526     s->match_start = cur_match;
1527     return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
1528 }
1529 
1530 #endif /* FASTEST */
1531 
1532 #ifdef ZLIB_DEBUG
1533 
1534 #define EQUAL 0
1535 /* result of memcmp for equal strings */
1536 
1537 /* ===========================================================================
1538  * Check that the match at match_start is indeed a match.
1539  */
1540 local void check_match(deflate_state *s, IPos start, IPos match, int length) {
1541     /* check that the match is indeed a match */
1542     if (zmemcmp(s->window + match,
1543                 s->window + start, length) != EQUAL) {
1544         fprintf(stderr, " start %u, match %u, length %d\n",
1545                 start, match, length);
1546         do {
1547             fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1548         } while (--length != 0);
1549         z_error("invalid match");
1550     }
1551     if (z_verbose > 1) {
1552         fprintf(stderr,"\\[%d,%d]", start - match, length);
1553         do { putc(s->window[start++], stderr); } while (--length != 0);
1554     }
1555 }
1556 #else
1557 #  define check_match(s, start, match, length)
1558 #endif /* ZLIB_DEBUG */
1559 
1560 /* ===========================================================================
1561  * Flush the current block, with given end-of-file flag.
1562  * IN assertion: strstart is set to the end of the current match.
1563  */
1564 #define FLUSH_BLOCK_ONLY(s, last) { \
1565    _tr_flush_block(s, (s->block_start >= 0L ? \
1566                    (charf *)&s->window[(unsigned)s->block_start] : \
1567                    (charf *)Z_NULL), \
1568                 (ulg)((long)s->strstart - s->block_start), \
1569                 (last)); \
1570    s->block_start = s->strstart; \
1571    flush_pending(s->strm); \
1572    Tracev((stderr,"[FLUSH]")); \
1573 }
1574 
1575 /* Same but force premature exit if necessary. */
1576 #define FLUSH_BLOCK(s, last) { \
1577    FLUSH_BLOCK_ONLY(s, last); \
1578    if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
1579 }
1580 
1581 /* Maximum stored block length in deflate format (not including header). */
1582 #define MAX_STORED 65535
1583 
1584 #if !defined(MIN)
1585 /* Minimum of a and b. */
1586 #define MIN(a, b) ((a) > (b) ? (b) : (a))
1587 #endif
1588 
1589 /* ===========================================================================
1590  * Copy without compression as much as possible from the input stream, return
1591  * the current block state.
1592  *
1593  * In case deflateParams() is used to later switch to a non-zero compression
1594  * level, s->matches (otherwise unused when storing) keeps track of the number
1595  * of hash table slides to perform. If s->matches is 1, then one hash table
1596  * slide will be done when switching. If s->matches is 2, the maximum value
1597  * allowed here, then the hash table will be cleared, since two or more slides
1598  * is the same as a clear.
1599  *
1600  * deflate_stored() is written to minimize the number of times an input byte is
1601  * copied. It is most efficient with large input and output buffers, which
1602  * maximizes the opportunities to have a single copy from next_in to next_out.
1603  */
1604 local block_state deflate_stored(deflate_state *s, int flush) {
1605     /* Smallest worthy block size when not flushing or finishing. By default
1606      * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
1607      * large input and output buffers, the stored block size will be larger.
1608      */
1609     unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
1610 
1611     /* Copy as many min_block or larger stored blocks directly to next_out as
1612      * possible. If flushing, copy the remaining available input to next_out as
1613      * stored blocks, if there is enough space.
1614      */
1615     unsigned len, left, have, last = 0;
1616     unsigned used = s->strm->avail_in;
1617     do {
1618         /* Set len to the maximum size block that we can copy directly with the
1619          * available input data and output space. Set left to how much of that
1620          * would be copied from what's left in the window.
1621          */
1622         len = MAX_STORED;       /* maximum deflate stored block length */
1623         have = (s->bi_valid + 42) >> 3;         /* number of header bytes */
1624         if (s->strm->avail_out < have)          /* need room for header */
1625             break;
1626             /* maximum stored block length that will fit in avail_out: */
1627         have = s->strm->avail_out - have;
1628         left = s->strstart - s->block_start;    /* bytes left in window */
1629         if (len > (ulg)left + s->strm->avail_in)
1630             len = left + s->strm->avail_in;     /* limit len to the input */
1631         if (len > have)
1632             len = have;                         /* limit len to the output */
1633 
1634         /* If the stored block would be less than min_block in length, or if
1635          * unable to copy all of the available input when flushing, then try
1636          * copying to the window and the pending buffer instead. Also don't
1637          * write an empty block when flushing -- deflate() does that.
1638          */
1639         if (len < min_block && ((len == 0 && flush != Z_FINISH) ||
1640                                 flush == Z_NO_FLUSH ||
1641                                 len != left + s->strm->avail_in))
1642             break;
1643 
1644         /* Make a dummy stored block in pending to get the header bytes,
1645          * including any pending bits. This also updates the debugging counts.
1646          */
1647         last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0;
1648         _tr_stored_block(s, (char *)0, 0L, last);
1649 
1650         /* Replace the lengths in the dummy stored block with len. */
1651         s->pending_buf[s->pending - 4] = len;
1652         s->pending_buf[s->pending - 3] = len >> 8;
1653         s->pending_buf[s->pending - 2] = ~len;
1654         s->pending_buf[s->pending - 1] = ~len >> 8;
1655 
1656         /* Write the stored block header bytes. */
1657         flush_pending(s->strm);
1658 
1659 #ifdef ZLIB_DEBUG
1660         /* Update debugging counts for the data about to be copied. */
1661         s->compressed_len += len << 3;
1662         s->bits_sent += len << 3;
1663 #endif
1664 
1665         /* Copy uncompressed bytes from the window to next_out. */
1666         if (left) {
1667             if (left > len)
1668                 left = len;
1669             zmemcpy(s->strm->next_out, s->window + s->block_start, left);
1670             s->strm->next_out += left;
1671             s->strm->avail_out -= left;
1672             s->strm->total_out += left;
1673             s->block_start += left;
1674             len -= left;
1675         }
1676 
1677         /* Copy uncompressed bytes directly from next_in to next_out, updating
1678          * the check value.
1679          */
1680         if (len) {
1681             read_buf(s->strm, s->strm->next_out, len);
1682             s->strm->next_out += len;
1683             s->strm->avail_out -= len;
1684             s->strm->total_out += len;
1685         }
1686     } while (last == 0);
1687 
1688     /* Update the sliding window with the last s->w_size bytes of the copied
1689      * data, or append all of the copied data to the existing window if less
1690      * than s->w_size bytes were copied. Also update the number of bytes to
1691      * insert in the hash tables, in the event that deflateParams() switches to
1692      * a non-zero compression level.
1693      */
1694     used -= s->strm->avail_in;      /* number of input bytes directly copied */
1695     if (used) {
1696         /* If any input was used, then no unused input remains in the window,
1697          * therefore s->block_start == s->strstart.
1698          */
1699         if (used >= s->w_size) {    /* supplant the previous history */
1700             s->matches = 2;         /* clear hash */
1701             zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
1702             s->strstart = s->w_size;
1703             s->insert = s->strstart;
1704         }
1705         else {
1706             if (s->window_size - s->strstart <= used) {
1707                 /* Slide the window down. */
1708                 s->strstart -= s->w_size;
1709                 zmemcpy(s->window, s->window + s->w_size, s->strstart);
1710                 if (s->matches < 2)
1711                     s->matches++;   /* add a pending slide_hash() */
1712                 if (s->insert > s->strstart)
1713                     s->insert = s->strstart;
1714             }
1715             zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
1716             s->strstart += used;
1717             s->insert += MIN(used, s->w_size - s->insert);
1718         }
1719         s->block_start = s->strstart;
1720     }
1721     if (s->high_water < s->strstart)
1722         s->high_water = s->strstart;
1723 
1724     /* If the last block was written to next_out, then done. */
1725     if (last)
1726         return finish_done;
1727 
1728     /* If flushing and all input has been consumed, then done. */
1729     if (flush != Z_NO_FLUSH && flush != Z_FINISH &&
1730         s->strm->avail_in == 0 && (long)s->strstart == s->block_start)
1731         return block_done;
1732 
1733     /* Fill the window with any remaining input. */
1734     have = s->window_size - s->strstart;
1735     if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
1736         /* Slide the window down. */
1737         s->block_start -= s->w_size;
1738         s->strstart -= s->w_size;
1739         zmemcpy(s->window, s->window + s->w_size, s->strstart);
1740         if (s->matches < 2)
1741             s->matches++;           /* add a pending slide_hash() */
1742         have += s->w_size;          /* more space now */
1743         if (s->insert > s->strstart)
1744             s->insert = s->strstart;
1745     }
1746     if (have > s->strm->avail_in)
1747         have = s->strm->avail_in;
1748     if (have) {
1749         read_buf(s->strm, s->window + s->strstart, have);
1750         s->strstart += have;
1751         s->insert += MIN(have, s->w_size - s->insert);
1752     }
1753     if (s->high_water < s->strstart)
1754         s->high_water = s->strstart;
1755 
1756     /* There was not enough avail_out to write a complete worthy or flushed
1757      * stored block to next_out. Write a stored block to pending instead, if we
1758      * have enough input for a worthy block, or if flushing and there is enough
1759      * room for the remaining input as a stored block in the pending buffer.
1760      */
1761     have = (s->bi_valid + 42) >> 3;         /* number of header bytes */
1762         /* maximum stored block length that will fit in pending: */
1763     have = MIN(s->pending_buf_size - have, MAX_STORED);
1764     min_block = MIN(have, s->w_size);
1765     left = s->strstart - s->block_start;
1766     if (left >= min_block ||
1767         ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH &&
1768          s->strm->avail_in == 0 && left <= have)) {
1769         len = MIN(left, have);
1770         last = flush == Z_FINISH && s->strm->avail_in == 0 &&
1771                len == left ? 1 : 0;
1772         _tr_stored_block(s, (charf *)s->window + s->block_start, len, last);
1773         s->block_start += len;
1774         flush_pending(s->strm);
1775     }
1776 
1777     /* We've done all we can with the available input and output. */
1778     return last ? finish_started : need_more;
1779 }
1780 
1781 /* ===========================================================================
1782  * Compress as much as possible from the input stream, return the current
1783  * block state.
1784  * This function does not perform lazy evaluation of matches and inserts
1785  * new strings in the dictionary only for unmatched strings or for short
1786  * matches. It is used only for the fast compression options.
1787  */
1788 local block_state deflate_fast(deflate_state *s, int flush) {
1789     IPos hash_head;       /* head of the hash chain */
1790     int bflush;           /* set if current block must be flushed */
1791 
1792     for (;;) {
1793         /* Make sure that we always have enough lookahead, except
1794          * at the end of the input file. We need MAX_MATCH bytes
1795          * for the next match, plus MIN_MATCH bytes to insert the
1796          * string following the next match.
1797          */
1798         if (s->lookahead < MIN_LOOKAHEAD) {
1799             fill_window(s);
1800             if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1801                 return need_more;
1802             }
1803             if (s->lookahead == 0) break; /* flush the current block */
1804         }
1805 
1806         /* Insert the string window[strstart .. strstart + 2] in the
1807          * dictionary, and set hash_head to the head of the hash chain:
1808          */
1809         hash_head = NIL;
1810         if (s->lookahead >= MIN_MATCH) {
1811             INSERT_STRING(s, s->strstart, hash_head);
1812         }
1813 
1814         /* Find the longest match, discarding those <= prev_length.
1815          * At this point we have always match_length < MIN_MATCH
1816          */
1817         if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1818             /* To simplify the code, we prevent matches with the string
1819              * of window index 0 (in particular we have to avoid a match
1820              * of the string with itself at the start of the input file).
1821              */
1822             s->match_length = longest_match (s, hash_head);
1823             /* longest_match() sets match_start */
1824         }
1825         if (s->match_length >= MIN_MATCH) {
1826             check_match(s, s->strstart, s->match_start, s->match_length);
1827 
1828             _tr_tally_dist(s, s->strstart - s->match_start,
1829                            s->match_length - MIN_MATCH, bflush);
1830 
1831             s->lookahead -= s->match_length;
1832 
1833             /* Insert new strings in the hash table only if the match length
1834              * is not too large. This saves time but degrades compression.
1835              */
1836 #ifndef FASTEST
1837             if (s->match_length <= s->max_insert_length &&
1838                 s->lookahead >= MIN_MATCH) {
1839                 s->match_length--; /* string at strstart already in table */
1840                 do {
1841                     s->strstart++;
1842                     INSERT_STRING(s, s->strstart, hash_head);
1843                     /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1844                      * always MIN_MATCH bytes ahead.
1845                      */
1846                 } while (--s->match_length != 0);
1847                 s->strstart++;
1848             } else
1849 #endif
1850             {
1851                 s->strstart += s->match_length;
1852                 s->match_length = 0;
1853                 s->ins_h = s->window[s->strstart];
1854                 UPDATE_HASH(s, s->ins_h, s->window[s->strstart + 1]);
1855 #if MIN_MATCH != 3
1856                 Call UPDATE_HASH() MIN_MATCH-3 more times
1857 #endif
1858                 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1859                  * matter since it will be recomputed at next deflate call.
1860                  */
1861             }
1862         } else {
1863             /* No match, output a literal byte */
1864             Tracevv((stderr,"%c", s->window[s->strstart]));
1865             _tr_tally_lit(s, s->window[s->strstart], bflush);
1866             s->lookahead--;
1867             s->strstart++;
1868         }
1869         if (bflush) FLUSH_BLOCK(s, 0);
1870     }
1871     s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
1872     if (flush == Z_FINISH) {
1873         FLUSH_BLOCK(s, 1);
1874         return finish_done;
1875     }
1876     if (s->sym_next)
1877         FLUSH_BLOCK(s, 0);
1878     return block_done;
1879 }
1880 
1881 #ifndef FASTEST
1882 /* ===========================================================================
1883  * Same as above, but achieves better compression. We use a lazy
1884  * evaluation for matches: a match is finally adopted only if there is
1885  * no better match at the next window position.
1886  */
1887 local block_state deflate_slow(deflate_state *s, int flush) {
1888     IPos hash_head;          /* head of hash chain */
1889     int bflush;              /* set if current block must be flushed */
1890 
1891     /* Process the input block. */
1892     for (;;) {
1893         /* Make sure that we always have enough lookahead, except
1894          * at the end of the input file. We need MAX_MATCH bytes
1895          * for the next match, plus MIN_MATCH bytes to insert the
1896          * string following the next match.
1897          */
1898         if (s->lookahead < MIN_LOOKAHEAD) {
1899             fill_window(s);
1900             if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1901                 return need_more;
1902             }
1903             if (s->lookahead == 0) break; /* flush the current block */
1904         }
1905 
1906         /* Insert the string window[strstart .. strstart + 2] in the
1907          * dictionary, and set hash_head to the head of the hash chain:
1908          */
1909         hash_head = NIL;
1910         if (s->lookahead >= MIN_MATCH) {
1911             INSERT_STRING(s, s->strstart, hash_head);
1912         }
1913 
1914         /* Find the longest match, discarding those <= prev_length.
1915          */
1916         s->prev_length = s->match_length, s->prev_match = s->match_start;
1917         s->match_length = MIN_MATCH-1;
1918 
1919         if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
1920             s->strstart - hash_head <= MAX_DIST(s)) {
1921             /* To simplify the code, we prevent matches with the string
1922              * of window index 0 (in particular we have to avoid a match
1923              * of the string with itself at the start of the input file).
1924              */
1925             s->match_length = longest_match (s, hash_head);
1926             /* longest_match() sets match_start */
1927 
1928             if (s->match_length <= 5 && (s->strategy == Z_FILTERED
1929 #if TOO_FAR <= 32767
1930                 || (s->match_length == MIN_MATCH &&
1931                     s->strstart - s->match_start > TOO_FAR)
1932 #endif
1933                 )) {
1934 
1935                 /* If prev_match is also MIN_MATCH, match_start is garbage
1936                  * but we will ignore the current match anyway.
1937                  */
1938                 s->match_length = MIN_MATCH-1;
1939             }
1940         }
1941         /* If there was a match at the previous step and the current
1942          * match is not better, output the previous match:
1943          */
1944         if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
1945             uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
1946             /* Do not insert strings in hash table beyond this. */
1947 
1948             check_match(s, s->strstart - 1, s->prev_match, s->prev_length);
1949 
1950             _tr_tally_dist(s, s->strstart - 1 - s->prev_match,
1951                            s->prev_length - MIN_MATCH, bflush);
1952 
1953             /* Insert in hash table all strings up to the end of the match.
1954              * strstart - 1 and strstart are already inserted. If there is not
1955              * enough lookahead, the last two strings are not inserted in
1956              * the hash table.
1957              */
1958             s->lookahead -= s->prev_length - 1;
1959             s->prev_length -= 2;
1960             do {
1961                 if (++s->strstart <= max_insert) {
1962                     INSERT_STRING(s, s->strstart, hash_head);
1963                 }
1964             } while (--s->prev_length != 0);
1965             s->match_available = 0;
1966             s->match_length = MIN_MATCH-1;
1967             s->strstart++;
1968 
1969             if (bflush) FLUSH_BLOCK(s, 0);
1970 
1971         } else if (s->match_available) {
1972             /* If there was no match at the previous position, output a
1973              * single literal. If there was a match but the current match
1974              * is longer, truncate the previous match to a single literal.
1975              */
1976             Tracevv((stderr,"%c", s->window[s->strstart - 1]));
1977             _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
1978             if (bflush) {
1979                 FLUSH_BLOCK_ONLY(s, 0);
1980             }
1981             s->strstart++;
1982             s->lookahead--;
1983             if (s->strm->avail_out == 0) return need_more;
1984         } else {
1985             /* There is no previous match to compare with, wait for
1986              * the next step to decide.
1987              */
1988             s->match_available = 1;
1989             s->strstart++;
1990             s->lookahead--;
1991         }
1992     }
1993     Assert (flush != Z_NO_FLUSH, "no flush?");
1994     if (s->match_available) {
1995         Tracevv((stderr,"%c", s->window[s->strstart - 1]));
1996         _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
1997         s->match_available = 0;
1998     }
1999     s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
2000     if (flush == Z_FINISH) {
2001         FLUSH_BLOCK(s, 1);
2002         return finish_done;
2003     }
2004     if (s->sym_next)
2005         FLUSH_BLOCK(s, 0);
2006     return block_done;
2007 }
2008 #endif /* FASTEST */
2009 
2010 /* ===========================================================================
2011  * For Z_RLE, simply look for runs of bytes, generate matches only of distance
2012  * one.  Do not maintain a hash table.  (It will be regenerated if this run of
2013  * deflate switches away from Z_RLE.)
2014  */
2015 local block_state deflate_rle(deflate_state *s, int flush) {
2016     int bflush;             /* set if current block must be flushed */
2017     uInt prev;              /* byte at distance one to match */
2018     Bytef *scan, *strend;   /* scan goes up to strend for length of run */
2019 
2020     for (;;) {
2021         /* Make sure that we always have enough lookahead, except
2022          * at the end of the input file. We need MAX_MATCH bytes
2023          * for the longest run, plus one for the unrolled loop.
2024          */
2025         if (s->lookahead <= MAX_MATCH) {
2026             fill_window(s);
2027             if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) {
2028                 return need_more;
2029             }
2030             if (s->lookahead == 0) break; /* flush the current block */
2031         }
2032 
2033         /* See how many times the previous byte repeats */
2034         s->match_length = 0;
2035         if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
2036             scan = s->window + s->strstart - 1;
2037             prev = *scan;
2038             if (prev == *++scan && prev == *++scan && prev == *++scan) {
2039                 strend = s->window + s->strstart + MAX_MATCH;
2040                 do {
2041                 } while (prev == *++scan && prev == *++scan &&
2042                          prev == *++scan && prev == *++scan &&
2043                          prev == *++scan && prev == *++scan &&
2044                          prev == *++scan && prev == *++scan &&
2045                          scan < strend);
2046                 s->match_length = MAX_MATCH - (uInt)(strend - scan);
2047                 if (s->match_length > s->lookahead)
2048                     s->match_length = s->lookahead;
2049             }
2050             Assert(scan <= s->window + (uInt)(s->window_size - 1),
2051                    "wild scan");
2052         }
2053 
2054         /* Emit match if have run of MIN_MATCH or longer, else emit literal */
2055         if (s->match_length >= MIN_MATCH) {
2056             check_match(s, s->strstart, s->strstart - 1, s->match_length);
2057 
2058             _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
2059 
2060             s->lookahead -= s->match_length;
2061             s->strstart += s->match_length;
2062             s->match_length = 0;
2063         } else {
2064             /* No match, output a literal byte */
2065             Tracevv((stderr,"%c", s->window[s->strstart]));
2066             _tr_tally_lit(s, s->window[s->strstart], bflush);
2067             s->lookahead--;
2068             s->strstart++;
2069         }
2070         if (bflush) FLUSH_BLOCK(s, 0);
2071     }
2072     s->insert = 0;
2073     if (flush == Z_FINISH) {
2074         FLUSH_BLOCK(s, 1);
2075         return finish_done;
2076     }
2077     if (s->sym_next)
2078         FLUSH_BLOCK(s, 0);
2079     return block_done;
2080 }
2081 
2082 /* ===========================================================================
2083  * For Z_HUFFMAN_ONLY, do not look for matches.  Do not maintain a hash table.
2084  * (It will be regenerated if this run of deflate switches away from Huffman.)
2085  */
2086 local block_state deflate_huff(deflate_state *s, int flush) {
2087     int bflush;             /* set if current block must be flushed */
2088 
2089     for (;;) {
2090         /* Make sure that we have a literal to write. */
2091         if (s->lookahead == 0) {
2092             fill_window(s);
2093             if (s->lookahead == 0) {
2094                 if (flush == Z_NO_FLUSH)
2095                     return need_more;
2096                 break;      /* flush the current block */
2097             }
2098         }
2099 
2100         /* Output a literal byte */
2101         s->match_length = 0;
2102         Tracevv((stderr,"%c", s->window[s->strstart]));
2103         _tr_tally_lit(s, s->window[s->strstart], bflush);
2104         s->lookahead--;
2105         s->strstart++;
2106         if (bflush) FLUSH_BLOCK(s, 0);
2107     }
2108     s->insert = 0;
2109     if (flush == Z_FINISH) {
2110         FLUSH_BLOCK(s, 1);
2111         return finish_done;
2112     }
2113     if (s->sym_next)
2114         FLUSH_BLOCK(s, 0);
2115     return block_done;
2116 }
2117